diff --git a/.circleci/config.yml b/.circleci/config.yml index 530631281c80..36f0774131a5 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,7 +9,7 @@ _defaults: &defaults docker: # CircleCI maintains a library of pre-built images # documented at https://circleci.com/developer/images/image/cimg/python - - image: cimg/python:3.11.8 + - image: cimg/python:3.12.12 working_directory: ~/repo @@ -17,7 +17,8 @@ jobs: build: <<: *defaults steps: - - checkout + - checkout: + method: blobless - run: name: check skip @@ -52,31 +53,24 @@ jobs: - run: name: build NumPy command: | - python3.11 -m venv venv + python3.12 -m venv venv . venv/bin/activate - pip install --progress-bar=off -r requirements/test_requirements.txt + pip install --progress-bar=off -r requirements/test_requirements.txt \ + -r requirements/build_requirements.txt \ + -r requirements/ci_requirements.txt # get newer, pre-release versions of critical packages - pip install --progress-bar=off --pre --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple -r requirements/doc_requirements.txt + pip install --progress-bar=off --pre -r requirements/doc_requirements.txt # then install numpy HEAD, which will override the version installed above - pip install . --config-settings=setup-args="-Dallow-noblas=true" - - - run: - name: create release notes - command: | - . venv/bin/activate - VERSION=$(pip show numpy | grep Version: | cut -d ' ' -f 2 | cut -c 1-5) - towncrier build --version $VERSION --yes - ./tools/ci/test_all_newsfragments_used.py + spin build --with-scipy-openblas=64 -j 2 - run: name: build devdocs w/ref warnings command: | . venv/bin/activate - cd doc # Don't use -q, show warning summary" - SPHINXOPTS="-W -n" make -e html - if [[ $(find build/html -type f | wc -l) -lt 1000 ]]; then - echo "doc build failed: build/html is empty" + SPHINXOPTS="-W -n" spin docs + if [[ $(find doc/build/html -type f | wc -l) -lt 1000 ]]; then + echo "doc build failed: doc/build/html is empty" exit -1 fi @@ -95,14 +89,17 @@ jobs: # destination: neps - run: - name: run doctests on documentation + name: check doctests command: | . venv/bin/activate - # Note: keep these two checks separate, because they seem to - # influence each other through changing global state (e.g., via - # `np.polynomial.set_default_printstyle`) - python tools/refguide_check.py --rst - python tools/refguide_check.py --doctests + spin check-docs -v + spin check-tutorials -v + # Currently, this does two checks not done by check-docs: + # - validates ReST blocks (via validate_rst_syntax) + # - checks that all of a module's `__all__` is reflected in the + # module-level docstring autosummary + echo calling python3 tools/refguide_check.py -v + python3 tools/refguide_check.py -v - persist_to_workspace: root: ~/repo @@ -114,7 +111,8 @@ jobs: deploy: <<: *defaults steps: - - checkout + - checkout: + method: blobless - attach_workspace: at: ~/repo diff --git a/.cirrus.star b/.cirrus.star index c503f25720a7..3de5ce97b0e8 100644 --- a/.cirrus.star +++ b/.cirrus.star @@ -9,17 +9,12 @@ load("cirrus", "env", "fs", "http") def main(ctx): ###################################################################### - # Should wheels be built? # Only test on the numpy/numpy repository ###################################################################### if env.get("CIRRUS_REPO_FULL_NAME") != "numpy/numpy": return [] - # only run the wheels entry on a cron job - if env.get("CIRRUS_CRON", "") == "nightly": - return fs.read("tools/ci/cirrus_wheels.yml") - # Obtain commit message for the event. Unfortunately CIRRUS_CHANGE_MESSAGE # only contains the actual commit message on a non-PR trigger event. # For a PR event it contains the PR title and description. @@ -31,23 +26,10 @@ def main(ctx): if "[skip cirrus]" in commit_msg or "[skip ci]" in commit_msg: return [] - wheel = False labels = env.get("CIRRUS_PR_LABELS", "") pr_number = env.get("CIRRUS_PR", "-1") tag = env.get("CIRRUS_TAG", "") - if "[wheel build]" in commit_msg: - wheel = True - - # if int(pr_number) > 0 and ("14 - Release" in labels or "36 - Build" in labels): - # wheel = True - - if tag.startswith("v") and "dev0" not in tag: - wheel = True - - if wheel: - return fs.read("tools/ci/cirrus_wheels.yml") - if int(pr_number) < 0: return [] diff --git a/.clang-format b/.clang-format index 60b1066bcff7..7e94a6fdb47c 100644 --- a/.clang-format +++ b/.clang-format @@ -10,7 +10,7 @@ AllowShortEnumsOnASingleLine: false AllowShortIfStatementsOnASingleLine: false AlwaysBreakAfterReturnType: TopLevel BreakBeforeBraces: Stroustrup -ColumnLimit: 79 +ColumnLimit: 88 ContinuationIndentWidth: 8 DerivePointerAlignment: false IndentWidth: 4 @@ -27,6 +27,7 @@ IncludeCategories: Priority: 1 - Regex: '^<[[:alnum:]_.]+"' Priority: 2 +IndentPPDirectives: AfterHash Language: Cpp PointerAlignment: Right ReflowComments: true diff --git a/.editorconfig b/.editorconfig index 5fdaee55c25d..1431a93063b4 100644 --- a/.editorconfig +++ b/.editorconfig @@ -4,5 +4,23 @@ root = true # https://numpy.org/neps/nep-0045-c_style_guide.html indent_size = 4 indent_style = space -max_line_length = 80 +max_line_length = 88 trim_trailing_whitespace = true + +[*.{py,pyi,pxd}] +# https://peps.python.org/pep-0008/ +charset = utf-8 +end_of_line = lf +indent_size = 4 +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +[*.py] +# Keep in sync with `tools/lint_diff.ini` and `tools/linter.py` +# https://pycodestyle.pycqa.org/en/latest/intro.html#configuration +max_line_length = 88 + +[*.pyi] +# https://typing.readthedocs.io/en/latest/guides/writing_stubs.html#style-guide +max_line_length = 130 diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index b237d52424ac..bb88ed20b8ba 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -64,10 +64,12 @@ body: - type: textarea attributes: - label: "Context for the issue:" + label: "How does this issue affect you or how did you find it:" description: | - Please explain how this issue affects your work or why it should be prioritized. + Please explain how this issue concretely affects you or others. + Especially if it does not impact you how did you find it? + (If an issue has no concrete impact this is also helpful to know.) placeholder: | - << your explanation here >> + << description of how the issue affects you >> validations: required: false diff --git a/.github/ISSUE_TEMPLATE/typing.yml b/.github/ISSUE_TEMPLATE/typing.yml new file mode 100644 index 000000000000..17eedfae1c6c --- /dev/null +++ b/.github/ISSUE_TEMPLATE/typing.yml @@ -0,0 +1,68 @@ +name: Static Typing +description: Report an issue with the NumPy typing hints. +title: "TYP: " +labels: [41 - Static typing] + +body: +- type: markdown + attributes: + value: > + Thank you for taking the time to report this issue. + Please make sure that this issue hasn't already been reported before. + +- type: textarea + attributes: + label: "Describe the issue:" + validations: + required: true + +- type: textarea + attributes: + label: "Reproduce the code example:" + description: > + A short code example that reproduces the error in your type-checker. It + should be self-contained, i.e., can be run as-is via e.g. + `mypy myproblem.py` or `pyright myproblem.py`. + placeholder: | + import numpy as np + import numpy.typing as npt + << your code here >> + render: python + validations: + required: true + +- type: textarea + attributes: + label: "Error message:" + description: > + Please include all relevant error messages from your type-checker or IDE. + render: shell + +- type: textarea + attributes: + label: "Python and NumPy Versions:" + description: > + Output from `import sys, numpy; print(numpy.__version__); print(sys.version)`. + validations: + required: true + +- type: textarea + attributes: + label: "Type-checker version and settings:" + description: > + Please include the exact version of the type-checker you are using. + Popular (static) type checkers include Mypy, Pyright / Pylance, Pytype, + Pyre, PyCharm, etc. + Also include the full CLI command used to run the type-checker, and + all of the relevant configuration options. + validations: + required: true + +- type: textarea + attributes: + label: "Additional typing packages." + description: | + If you are using `typing-extensions` or typing-stub packages, please + list their versions here. + validations: + required: false diff --git a/.github/check-warnings/action.yml b/.github/check-warnings/action.yml new file mode 100644 index 000000000000..f3f6778e229b --- /dev/null +++ b/.github/check-warnings/action.yml @@ -0,0 +1,38 @@ +name: "Check Warnings" +description: "Filter build warnings against an allowlist" + +inputs: + log-file: + description: "Path to build log file" + required: true + allowlist: + description: "Path to allowed warnings regex file" + required: true + warning-regex: + description: "Regex to extract warnings from the log" + required: true + +runs: + using: "composite" + steps: + - name: Extract warnings + shell: bash + run: | + echo "Extracting warnings from ${{ inputs.log-file }} using regex: ${{ inputs['warning-regex'] }}" + grep -E "${{ inputs['warning-regex'] }}" "${{ inputs.log-file }}" | tee warnings.log || true + + if [ ! -s warnings.log ]; then + echo "No warnings found." + exit 0 + fi + + echo "Filtering against allowlist ${{ inputs.allowlist }}" + grep -v -F -f "${{ inputs.allowlist }}" warnings.log | tee disallowed.log || true + + if [ -s disallowed.log ]; then + echo "::error::Disallowed warnings detected:" + cat disallowed.log + exit 1 + else + echo "All warnings are allowed." + fi diff --git a/.github/check-warnings/msvc-allowed-warnings.txt b/.github/check-warnings/msvc-allowed-warnings.txt new file mode 100644 index 000000000000..e3da83492e16 --- /dev/null +++ b/.github/check-warnings/msvc-allowed-warnings.txt @@ -0,0 +1,21 @@ +../numpy/_core/src/common/npy_cpu_features.c(451): warning C4098: 'npy__cpu_cpuid': 'void' function returning a value +../numpy/linalg/lapack_lite/f2c_c_lapack.c(1530): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_c_lapack.c(230): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_c_lapack.c(250): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_config.c(1368): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(1625): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(1645): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(2865): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(2882): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(2894): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/random/src/mt19937/mt19937.c(88): warning C4146: unary minus operator applied to unsigned type, result still unsigned +../numpy/random/src/mt19937/mt19937.c(92): warning C4146: unary minus operator applied to unsigned type, result still unsigned +../numpy/random/src/mt19937/mt19937.c(95): warning C4146: unary minus operator applied to unsigned type, result still unsigned +..\numpy\random\src/pcg64/pcg64.h(342): warning C4146: unary minus operator applied to unsigned type, result still unsigned +C:\a\numpy\numpy\numpy\random\src\pcg64\pcg64.h(342): warning C4146: unary minus operator applied to unsigned type, result still unsigned +D:\a\numpy\numpy\numpy\random\src\pcg64\pcg64.h(342): warning C4146: unary minus operator applied to unsigned type, result still unsigned +cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX2' +numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(26345): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(38369): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(26345): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(38369): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data diff --git a/.github/dependabot.yml b/.github/dependabot.yml index b59fe181d119..76e3f31a96e2 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,7 +4,27 @@ updates: directory: / schedule: interval: daily + cooldown: + default-days: 7 commit-message: prefix: "MAINT" labels: - "03 - Maintenance" + ignore: + - dependency-name: "bus1/cabuild" +updates: + - package-ecosystem: pip + directory: /requirements + schedule: + interval: daily + cooldown: + default-days: 7 + commit-message: + prefix: "MAINT" + labels: + - "03 - Maintenance" + ignore: + - dependency-name: "scipy-openblas32" + - dependency-name: "scipy-openblas64" + - dependency-name: "jupyterlite-pyodide-kernel" + - dependency-name: "sphinx" diff --git a/.github/meson_actions/action.yml b/.github/meson_actions/action.yml index 05d263dc7a73..476c0bbd7950 100644 --- a/.github/meson_actions/action.yml +++ b/.github/meson_actions/action.yml @@ -30,8 +30,8 @@ runs: TERM: xterm-256color run: | echo "::group::Installing Test Dependencies" - pip install pytest pytest-xdist hypothesis typing_extensions setuptools + python -m pip install -r requirements/test_requirements.txt echo "::endgroup::" echo "::group::Test NumPy" - spin test + spin test -- --durations=10 --timeout=600 echo "::endgroup::" diff --git a/.github/pr-prefix-labeler.yml b/.github/pr-prefix-labeler.yml index 4905b502045d..65ed35aa1a11 100644 --- a/.github/pr-prefix-labeler.yml +++ b/.github/pr-prefix-labeler.yml @@ -12,5 +12,5 @@ "REV": "34 - Reversion" "STY": "03 - Maintenance" "TST": "05 - Testing" -"TYP": "static typing" +"TYP": "41 - Static typing" "WIP": "25 - WIP" diff --git a/.github/windows_arm64_steps/action.yml b/.github/windows_arm64_steps/action.yml new file mode 100644 index 000000000000..8ecb3b8a0cdd --- /dev/null +++ b/.github/windows_arm64_steps/action.yml @@ -0,0 +1,22 @@ +name: Build Dependencies(Win-ARM64) +description: "Setup LLVM for Win-ARM64 builds" + +runs: + using: "composite" + steps: + - name: Install LLVM with checksum verification + shell: pwsh + run: | + Invoke-WebRequest https://github.com/llvm/llvm-project/releases/download/llvmorg-20.1.6/LLVM-20.1.6-woa64.exe -UseBasicParsing -OutFile LLVM-woa64.exe + $expectedHash = "92f69a1134e32e54b07d51c6e24d9594852f6476f32c3d70471ae00fffc2d462" + $fileHash = (Get-FileHash -Path "LLVM-woa64.exe" -Algorithm SHA256).Hash + if ($fileHash -ne $expectedHash) { + Write-Error "Checksum verification failed. The downloaded file may be corrupted or tampered with." + exit 1 + } + Start-Process -FilePath ".\LLVM-woa64.exe" -ArgumentList "/S" -Wait + echo "C:\Program Files\LLVM\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + echo "CC=clang-cl" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + echo "CXX=clang-cl" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + echo "FC=flang-new" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + diff --git a/.github/workflows/circleci.yml b/.github/workflows/circleci.yml index c0c8876b6bbe..eafe61098588 100644 --- a/.github/workflows/circleci.yml +++ b/.github/workflows/circleci.yml @@ -17,7 +17,7 @@ jobs: statuses: write steps: - name: GitHub Action step - uses: larsoner/circleci-artifacts-redirector-action@4e13a10d89177f4bfc8007a7064bdbeda848d8d1 # master + uses: larsoner/circleci-artifacts-redirector-action@5d358ff96e96429a5c64a969bb4a574555439f4f # master with: repo-token: ${{ secrets.GITHUB_TOKEN }} api-token: ${{ secrets.CIRCLE_TOKEN }} diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index deeb3e08e300..99c7afcabec7 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -41,11 +41,13 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + persist-credentials: false # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8 + uses: github/codeql-action/init@cdefb33c0f6224e58673d9004f47f7cb3e328b89 # v4.31.10 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -55,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8 + uses: github/codeql-action/autobuild@cdefb33c0f6224e58673d9004f47f7cb3e328b89 # v4.31.10 # â„šī¸ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -68,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8 + uses: github/codeql-action/analyze@cdefb33c0f6224e58673d9004f47f7cb3e328b89 # v4.31.10 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml new file mode 100644 index 000000000000..013b1a3b7831 --- /dev/null +++ b/.github/workflows/compiler_sanitizers.yml @@ -0,0 +1,143 @@ +name: Test with compiler sanitizers + +on: + push: + branches: + - main + pull_request: + branches: + - main + - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' + - 'tools/stubtest/**' + +defaults: + run: + shell: bash + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +permissions: + contents: read # to fetch code (actions/checkout) + +jobs: + clang_ASAN_UBSAN: + # To enable this workflow on a fork, comment out: + if: github.repository == 'numpy/numpy' + runs-on: macos-latest + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + - name: Set up pyenv + run: | + git clone https://github.com/pyenv/pyenv.git "$HOME/.pyenv" + PYENV_ROOT="$HOME/.pyenv" + PYENV_BIN="$PYENV_ROOT/bin" + PYENV_SHIMS="$PYENV_ROOT/shims" + echo "$PYENV_BIN" >> $GITHUB_PATH + echo "$PYENV_SHIMS" >> $GITHUB_PATH + echo "PYENV_ROOT=$PYENV_ROOT" >> $GITHUB_ENV + - name: Check pyenv is working + run: + pyenv --version + - name: Set up LLVM + run: | + brew install llvm@20 + LLVM_PREFIX=$(brew --prefix llvm@20) + echo CC="$LLVM_PREFIX/bin/clang" >> $GITHUB_ENV + echo CXX="$LLVM_PREFIX/bin/clang++" >> $GITHUB_ENV + echo LDFLAGS="-L$LLVM_PREFIX/lib" >> $GITHUB_ENV + echo CPPFLAGS="-I$LLVM_PREFIX/include" >> $GITHUB_ENV + - name: Build Python with address sanitizer + run: | + CONFIGURE_OPTS="--with-address-sanitizer" pyenv install 3.14 + pyenv global 3.14 + - name: Install dependencies + run: | + pip install -r requirements/build_requirements.txt + pip install -r requirements/ci_requirements.txt + pip install -r requirements/test_requirements.txt + # xdist captures stdout/stderr, but we want the ASAN output + pip uninstall -y pytest-xdist + - name: Build + run: + python -m spin build -j2 -- -Db_sanitize=address,undefined -Db_lundef=false + - name: Test + run: | + # pass -s to pytest to see ASAN errors and warnings, otherwise pytest captures them + # Ignore test_casting_floatingpoint_errors on macOS for now - causes crash inside UBSAN + ASAN_OPTIONS=detect_leaks=0:symbolize=1:strict_init_order=true:allocator_may_return_null=1 \ + UBSAN_OPTIONS=halt_on_error=1:suppressions=${GITHUB_WORKSPACE}/tools/ci/ubsan_suppressions_arm64.txt \ + python -m spin test -- -k "not test_casting_floatingpoint_errors" -v -s --timeout=600 --durations=10 + + clang_TSAN: + # To enable this workflow on a fork, comment out: + if: github.repository == 'numpy/numpy' + runs-on: ubuntu-latest + container: + image: ghcr.io/nascheme/numpy-tsan:3.14t + options: --shm-size=2g # increase memory for large matrix ops + + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - name: Trust working directory and initialize submodules + run: | + git config --global --add safe.directory /__w/numpy/numpy + git submodule update --init --recursive + - name: Uninstall pytest-xdist (conflicts with TSAN) + run: pip uninstall -y pytest-xdist + + - name: Upgrade spin (gh-29777) + run: pip install -U spin + + - name: Build NumPy with ThreadSanitizer + run: python -m spin build -j2 -- -Db_sanitize=thread + + - name: Run tests under prebuilt TSAN container + run: | + export TSAN_OPTIONS="halt_on_error=0:allocator_may_return_null=1:suppressions=$GITHUB_WORKSPACE/tools/ci/tsan_suppressions.txt" + echo "TSAN_OPTIONS=$TSAN_OPTIONS" + python -m spin test \ + `find numpy -name "test*.py" | xargs grep -E -l "import threading|ThreadPoolExecutor" | tr '\n' ' '` \ + -- -v -s --timeout=600 --durations=10 + + clang_ASAN: + # To enable this workflow on a fork, comment out: + if: github.repository == 'numpy/numpy' + runs-on: ubuntu-latest + container: + image: ghcr.io/nascheme/cpython-asan:3.14 + options: --shm-size=2g # increase memory for large matrix ops + + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - name: Trust working directory and initialize submodules + run: | + git config --global --add safe.directory /__w/numpy/numpy + git submodule update --init --recursive + + - name: Install dependencies + run: | + pip install -r requirements/build_requirements.txt + pip install -r requirements/ci_requirements.txt + pip install -r requirements/test_requirements.txt + # xdist captures stdout/stderr, but we want the ASAN output + pip uninstall -y pytest-xdist + + - name: Build NumPy with AddressSanitizer & LeakSanitizer + run: python -m spin build -j4 -- -Db_sanitize=address,leak + + - name: Test + run: | + # pass -s to pytest to see ASAN errors and warnings, otherwise pytest captures them + export ASAN_OPTIONS="detect_leaks=1:symbolize=1:strict_init_order=true:allocator_may_return_null=1:use_sigaltstack=0" + export LSAN_OPTIONS="suppressions=$GITHUB_WORKSPACE/tools/ci/lsan_suppressions.txt" + python -m spin test -- -v -s --timeout=600 --durations=10 diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index ce43d807f8f0..eebb6de405ab 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -4,6 +4,11 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' + - 'tools/stubtest/**' concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} @@ -18,12 +23,13 @@ jobs: # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true + persist-credentials: false - name: Install Cygwin - uses: egor-tensin/setup-cygwin@d2c752bab416d4b0662591bd366fc2686297c82d # v4 + uses: egor-tensin/setup-cygwin@fca9069f92361187d4abfaa5d8a7490e435d8349 # v4 with: platform: x86_64 install-dir: 'C:\tools\cygwin' @@ -32,7 +38,7 @@ jobs: python-setuptools-wheel liblapack-devel liblapack0 gcc-fortran gcc-g++ git dash cmake ninja - name: Set Windows PATH - uses: egor-tensin/cleanup-path@f04bc953e6823bf491cc0bdcff959c630db1b458 # v4.0.1 + uses: egor-tensin/cleanup-path@64ef0b5036b30ce7845058a1d7a8d0830db39b94 # v4.0.2 with: dirs: 'C:\tools\cygwin\bin;C:\tools\cygwin\lib\lapack' - name: Verify that bash is Cygwin bash @@ -62,7 +68,7 @@ jobs: cd tools /usr/bin/python3.9 -m pytest --pyargs numpy -n2 -m "not slow" - name: Upload wheel if tests fail - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 if: failure() with: name: numpy-cygwin-wheel diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 95a99d6dcf9b..a4bec8af6d82 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -15,6 +15,10 @@ jobs: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + persist-credentials: false - name: 'Dependency Review' - uses: actions/dependency-review-action@72eb03d02c7872a771aacd928f3123ac62ad6d3a # v4.3.3 + uses: actions/dependency-review-action@3c4e3dcb1aa7874d2c16be7d79418e9b7efd6261 # v4.8.2 + with: + allow-ghsas: GHSA-cx63-2mw6-8hw5 diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index ad2f08a9348b..d1c730f5a732 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -5,28 +5,14 @@ on: branches: - main - maintenance/** - # Note: this workflow gets triggered on the same schedule as the - # wheels.yml workflow, with the exception that this workflow runs - # the test suite for the Pyodide wheel too, prior to uploading it. - # - # Run on schedule to upload to Anaconda.org - schedule: - # ┌───────────── minute (0 - 59) - # │ ┌───────────── hour (0 - 23) - # │ │ ┌───────────── day of the month (1 - 31) - # │ │ │ ┌───────────── month (1 - 12 or JAN-DEC) - # │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT) - # │ │ │ │ │ - - cron: "42 2 * * SUN,WED" - workflow_dispatch: - inputs: - push_wheels: - # Can be 'true' or 'false'. Default is 'false'. - # Warning: this will overwrite existing wheels. - description: > - Push wheels to Anaconda.org if the build succeeds - required: false - default: 'false' + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' + - 'tools/stubtest/**' + +permissions: + contents: read # to fetch code (actions/checkout) env: FORCE_COLOR: 3 @@ -35,96 +21,22 @@ concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true -permissions: - contents: read # to fetch code (actions/checkout) jobs: build-wasm-emscripten: - name: Build NumPy distribution for Pyodide + name: Pyodide test runs-on: ubuntu-22.04 # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' - env: - PYODIDE_VERSION: 0.26.0 - # PYTHON_VERSION and EMSCRIPTEN_VERSION are determined by PYODIDE_VERSION. - # The appropriate versions can be found in the Pyodide repodata.json - # "info" field, or in Makefile.envs: - # https://github.com/pyodide/pyodide/blob/main/Makefile.envs#L2 - PYTHON_VERSION: 3.12.1 - EMSCRIPTEN_VERSION: 3.1.58 - NODE_VERSION: 18 steps: - name: Checkout NumPy - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive - # This input shall fetch tags without the need to fetch the - # entire VCS history, see https://github.com/actions/checkout#usage fetch-tags: true + persist-credentials: false - - name: Set up Python ${{ env.PYTHON_VERSION }} - id: setup-python - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 - with: - python-version: ${{ env.PYTHON_VERSION }} - - - name: Set up Emscripten toolchain - uses: mymindstorm/setup-emsdk@6ab9eb1bda2574c4ddb79809fc9247783eaf9021 # v14 - with: - version: ${{ env.EMSCRIPTEN_VERSION }} - actions-cache-folder: emsdk-cache - - - name: Install pyodide-build - run: pip install pyodide-build==${{ env.PYODIDE_VERSION }} - - - name: Find installation for pyodide-build - shell: python - run: | - import os - import pyodide_build - from pathlib import Path - - pyodide_build_path = Path(pyodide_build.__file__).parent - - env_file = os.getenv('GITHUB_ENV') - - with open(env_file, "a") as myfile: - myfile.write(f"PYODIDE_BUILD_PATH={pyodide_build_path}\n") - - - name: Build NumPy for Pyodide - run: | - pyodide build \ - -Cbuild-dir=build \ - -Csetup-args="--cross-file=$PWD/tools/ci/emscripten/emscripten.meson.cross" \ - -Csetup-args="-Dblas=none" \ - -Csetup-args="-Dlapack=none" - - - name: Set up Node.js - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 - with: - node-version: ${{ env.NODE_VERSION }} - - - name: Set up Pyodide virtual environment - run: | - pyodide venv .venv-pyodide - source .venv-pyodide/bin/activate - pip install dist/*.whl - pip install -r requirements/emscripten_test_requirements.txt - - - name: Test NumPy for Pyodide - run: | - source .venv-pyodide/bin/activate - cd .. - pytest --pyargs numpy -m "not slow" - - # Push to https://anaconda.org/scientific-python-nightly-wheels/numpy - # WARNING: this job will overwrite any existing WASM wheels. - - name: Push to Anaconda PyPI index - if: >- - (github.repository == 'numpy/numpy') && - (github.event_name == 'workflow_dispatch' && github.event.inputs.push_wheels == 'true') || - (github.event_name == 'schedule') - uses: scientific-python/upload-nightly-action@b67d7fcc0396e1128a474d1ab2b48aa94680f9fc # v0.5.0 - with: - artifacts_path: dist/ - anaconda_nightly_upload_token: ${{ secrets.NUMPY_NIGHTLY_UPLOAD_TOKEN }} + - uses: pypa/cibuildwheel@298ed2fb2c105540f5ed055e8a6ad78d82dd3a7e # v3.3.1 + env: + CIBW_PLATFORM: pyodide + CIBW_BUILD: cp312-* diff --git a/.github/workflows/free-threaded-wheels.yml b/.github/workflows/free-threaded-wheels.yml deleted file mode 100644 index edbe8dcc2387..000000000000 --- a/.github/workflows/free-threaded-wheels.yml +++ /dev/null @@ -1,178 +0,0 @@ -# Workflow to build and test wheels for the free-threaded Python build. -# -# This should be merged back into wheels.yml when free-threaded wheel -# builds can be uploaded to pypi along with the rest of numpy's release -# artifacts. -# -# To work on the wheel building infrastructure on a fork, comment out: -# -# if: github.repository == 'numpy/numpy' -# -# in the get_commit_message job. Be sure to include [wheel build] in your commit -# message to trigger the build. All files related to wheel building are located -# at tools/wheels/ -name: Free-Threaded Wheel Builder - -on: - schedule: - # ┌───────────── minute (0 - 59) - # │ ┌───────────── hour (0 - 23) - # │ │ ┌───────────── day of the month (1 - 31) - # │ │ │ ┌───────────── month (1 - 12 or JAN-DEC) - # │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT) - # │ │ │ │ │ - - cron: "42 2 * * SUN,WED" - pull_request: - branches: - - main - - maintenance/** - # we don't want to upload free-threaded wheels to pypi yet - # so we don't build on tags - workflow_dispatch: - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -permissions: - contents: read # to fetch code (actions/checkout) - -jobs: - get_commit_message: - name: Get commit message - runs-on: ubuntu-latest - # To enable this job and subsequent jobs on a fork, comment out: - if: github.repository == 'numpy/numpy' - outputs: - message: ${{ steps.commit_message.outputs.message }} - steps: - - name: Checkout numpy - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - # Gets the correct commit message for pull request - with: - ref: ${{ github.event.pull_request.head.sha }} - - name: Get commit message - id: commit_message - run: | - set -xe - COMMIT_MSG=$(git log --no-merges -1 --oneline) - echo "message=$COMMIT_MSG" >> $GITHUB_OUTPUT - echo github.ref ${{ github.ref }} - - build_wheels: - name: Build wheel ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} - needs: get_commit_message - if: >- - contains(needs.get_commit_message.outputs.message, '[wheel build]') || - github.event_name == 'schedule' || - github.event_name == 'workflow_dispatch' - runs-on: ${{ matrix.buildplat[0] }} - strategy: - # Ensure that a wheel builder finishes even if another fails - fail-fast: false - matrix: - # Github Actions doesn't support pairing matrix values together, let's improvise - # https://github.com/github/feedback/discussions/7835#discussioncomment-1769026 - buildplat: - - [ubuntu-20.04, manylinux_x86_64, ""] - - [ubuntu-20.04, musllinux_x86_64, ""] - # TODO: build numpy and set up Windows and MacOS - # cibuildwheel does not yet support Mac for free-threaded python - # windows is supported but numpy doesn't build on the image yet - python: ["cp313t"] - env: - IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} - IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} - steps: - - name: Checkout numpy - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - submodules: true - - - name: Setup MSVC (32-bit) - if: ${{ matrix.buildplat[1] == 'win32' }} - uses: bus1/cabuild/action/msdevshell@e22aba57d6e74891d059d66501b6b5aed8123c4d # v1 - with: - architecture: 'x86' - - - name: pkg-config-for-win - run: | - choco install -y --no-progress --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite - $CIBW = "${{ github.workspace }}/.openblas" - # pkgconfig needs a complete path, and not just "./openblas since the - # build is run in a tmp dir (?) - # It seems somewhere in the env passing, `\` is not - # passed through, so convert it to '/' - $CIBW = $CIBW.replace("\","/") - echo "CIBW_ENVIRONMENT_WINDOWS=PKG_CONFIG_PATH=$CIBW" >> $env:GITHUB_ENV - if: runner.os == 'windows' - - # Used to push the built wheels - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 - with: - python-version: "3.x" - - - name: Setup macOS - if: matrix.buildplat[0] == 'macos-13' || matrix.buildplat[0] == 'macos-14' - run: | - if [[ ${{ matrix.buildplat[2] }} == 'accelerate' ]]; then - # macosx_arm64 and macosx_x86_64 with accelerate - # only target Sonoma onwards - CIBW="MACOSX_DEPLOYMENT_TARGET=14.0 INSTALL_OPENBLAS=false RUNNER_OS=macOS" - echo "CIBW_ENVIRONMENT_MACOS=$CIBW" >> "$GITHUB_ENV" - - # the macos-13 image that's used for building the x86_64 wheel can't test - # a wheel with deployment target >= 14 without further work - echo "CIBW_TEST_SKIP=*-macosx_x86_64" >> "$GITHUB_ENV" - else - # macosx_x86_64 with OpenBLAS - # if INSTALL_OPENBLAS isn't specified then scipy-openblas is automatically installed - CIBW="RUNNER_OS=macOS" - PKG_CONFIG_PATH="$PWD/.openblas" - DYLD="$DYLD_LIBRARY_PATH:/$PWD/.openblas/lib" - echo "CIBW_ENVIRONMENT_MACOS=$CIBW PKG_CONFIG_PATH=$PKG_CONFIG_PATH DYLD_LIBRARY_PATH=$DYLD" >> "$GITHUB_ENV" - fi - - - name: Build wheels - uses: pypa/cibuildwheel@ba8be0d98853f5744f24e7f902c8adef7ae2e7f3 # v2.18.1 - env: - CIBW_PRERELEASE_PYTHONS: True - CIBW_FREE_THREADED_SUPPORT: True - CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - # TODO: remove along with installing build deps in - # cibw_before_build.sh when a released cython can build numpy - CIBW_BUILD_FRONTEND: "pip; args: --no-build-isolation" - - - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 - with: - name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} - path: ./wheelhouse/*.whl - - - uses: mamba-org/setup-micromamba@f8b8a1e23a26f60a44c853292711bacfd3eac822 - with: - # for installation of anaconda-client, required for upload to - # anaconda.org - # Note that this step is *after* specific pythons have been used to - # build and test the wheel - # for installation of anaconda-client, for upload to anaconda.org - # environment will be activated after creation, and in future bash steps - init-shell: bash - environment-name: upload-env - create-args: >- - anaconda-client - - - name: Upload wheels - if: success() - shell: bash -el {0} - # see https://github.com/marketplace/actions/setup-miniconda for why - # `-el {0}` is required. - env: - NUMPY_NIGHTLY_UPLOAD_TOKEN: ${{ secrets.NUMPY_NIGHTLY_UPLOAD_TOKEN }} - run: | - source tools/wheels/upload_wheels.sh - set_upload_vars - # trigger an upload to - # https://anaconda.org/scientific-python-nightly-wheels/numpy - # for cron jobs or "Run workflow" (restricted to main branch). - # The tokens were originally generated at anaconda.org - upload_wheels diff --git a/.github/workflows/linux-ppc64le.yml b/.github/workflows/linux-ppc64le.yml new file mode 100644 index 000000000000..c12165287a65 --- /dev/null +++ b/.github/workflows/linux-ppc64le.yml @@ -0,0 +1,72 @@ +name: Native ppc64le Linux Test + +on: + pull_request: + branches: + - main + - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' + - 'tools/stubtest/**' + workflow_dispatch: + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + native_ppc64le: + # This job runs only in the main NumPy repository. + # It requires a native ppc64le GHA runner, which is not available on forks. + # For more details, see: https://github.com/numpy/numpy/issues/29125 + if: github.repository == 'numpy/numpy' + runs-on: ubuntu-24.04-ppc64le-p10 + + strategy: + fail-fast: false + matrix: + config: + - name: "GCC" + args: "-Dallow-noblas=false" + - name: "clang" + args: "-Dallow-noblas=false" + + name: "${{ matrix.config.name }}" + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + submodules: recursive + fetch-tags: true + + - name: Install dependencies + run: | + sudo apt update + sudo apt install -y python3.12 python3-pip python3-dev ninja-build gfortran \ + build-essential libopenblas-dev liblapack-dev pkg-config + pip install --upgrade pip + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt + echo "/home/runner/.local/bin" >> $GITHUB_PATH + + - name: Install clang + if: matrix.config.name == 'clang' + run: | + sudo apt install -y clang + export CC=clang + export CXX=clang++ + + - name: Meson Build + run: | + spin build -- ${{ matrix.config.args }} + + - name: Meson Log + if: always() + run: cat build/meson-logs/meson-log.txt + + - name: Run Tests + run: | + spin test -- --timeout=60 --durations=10 diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index aef580c00e30..7522638e01ad 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -1,7 +1,7 @@ name: Linux tests # This file is meant for testing across supported Python versions, build types -# and interpreters (PyPy, python-dbg, a pre-release Python in summer time), +# and interpreters (python-dbg, a pre-release Python in summer time), # build-via-sdist, run benchmarks, measure code coverage, and other build # options. @@ -14,6 +14,11 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' + - 'tools/stubtest/**' defaults: run: @@ -33,19 +38,26 @@ jobs: runs-on: ubuntu-latest continue-on-error: true steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-depth: 0 - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + persist-credentials: false + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.10' + python-version: '3.12' - name: Install linter requirements run: python -m pip install -r requirements/linter_requirements.txt - - name: Run linter on PR diff + - name: Run linter on PR + env: + BASE_REF: ${{ github.base_ref }} run: - python tools/linter.py --branch origin/${{ github.base_ref }} + spin lint + - name: Check Python.h is first file included + run: | + python tools/check_python_h_first.py + smoke_test: # To enable this job on a fork, comment out: @@ -55,76 +67,81 @@ jobs: MESON_ARGS: "-Dallow-noblas=true -Dcpu-baseline=none -Dcpu-dispatch=none" strategy: matrix: - version: ["3.10", "3.11", "3.12", "3.13-dev"] + version: ["3.12", "3.14t"] steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + persist-credentials: false + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: ${{ matrix.version }} - uses: ./.github/meson_actions - pypy: + debug: needs: [smoke_test] - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 if: github.event_name != 'push' steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + persist-credentials: false + - name: Install debug Python + uses: deadsnakes/action@e640ac8743173a67cca4d7d77cd837e514bf98e8 # v3.2.0 with: - python-version: 'pypy3.10-v7.3.15' - - name: Setup using scipy-openblas + python-version: '3.14' + debug: true + - name: Install dependencies run: | - python -m pip install -r requirements/ci_requirements.txt - spin config-openblas --with-scipy-openblas=32 - - uses: ./.github/meson_actions + python --version + pip install -U pip + pip install -r requirements/build_requirements.txt + pip install -r requirements/test_requirements.txt + - name: Build NumPy debug + run: | + spin build -- -Dbuildtype=debug -Dallow-noblas=true + - name: Run test suite + run: | + spin test -- --timeout=600 --durations=10 - debug: + all_versions: + # like the smoke tests but runs on more Python versions needs: [smoke_test] + # To enable this job on a fork, comment out: + if: github.repository == 'numpy/numpy' runs-on: ubuntu-latest - if: github.event_name != 'push' + env: + MESON_ARGS: "-Dallow-noblas=true -Dcpu-baseline=none -Dcpu-dispatch=none" + strategy: + matrix: + version: ["3.13", "3.14", "3.15-dev", "3.15t-dev"] steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true - - name: Install debug Python - run: | - sudo apt-get update - sudo apt-get install python3-dbg ninja-build - - name: Build NumPy and install into venv - run: | - python3-dbg -m venv venv - source venv/bin/activate - pip install -U pip - pip install . -v -Csetup-args=-Dbuildtype=debug -Csetup-args=-Dallow-noblas=true - - name: Install test dependencies - run: | - source venv/bin/activate - pip install -r requirements/test_requirements.txt - - name: Run test suite - run: | - source venv/bin/activate - cd tools - pytest --pyargs numpy -m "not slow" + persist-credentials: false + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + with: + python-version: ${{ matrix.version }} + - uses: ./.github/meson_actions full: - # Build a wheel, install it, then run the full test suite with code coverage + # Install as editable, then run the full test suite with code coverage needs: [smoke_test] runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + persist-credentials: false + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.10' + python-version: '3.12' - name: Install build and test dependencies from PyPI run: | pip install -r requirements/build_requirements.txt @@ -138,35 +155,83 @@ jobs: mkdir -p ./.openblas python -c"import scipy_openblas32 as ob32; print(ob32.get_pkg_config())" > ./.openblas/scipy-openblas.pc - - name: Build a wheel + - name: Install as editable env: PKG_CONFIG_PATH: ${{ github.workspace }}/.openblas run: | - python -m build --wheel --no-isolation --skip-dependency-check - pip install dist/numpy*.whl + pip install -e . --no-build-isolation - name: Run full test suite run: | - cd tools - pytest --pyargs numpy --cov-report=html:build/coverage + pytest numpy --durations=10 --timeout=600 --cov-report=html:build/coverage # TODO: gcov + env: + PYTHONOPTIMIZE: 2 + + armhf_test: + # Tests NumPy on 32-bit ARM hard-float (armhf) via compatibility mode + # running on aarch64 (ARM 64-bit) GitHub runners. + needs: [smoke_test] + if: github.repository == 'numpy/numpy' + runs-on: ubuntu-24.04-arm + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + + - name: Creates new container + run: | + docker run --name the_container --interactive \ + -v $(pwd):/numpy arm32v7/ubuntu:24.04 /bin/linux32 /bin/bash -c " + apt update && + apt install -y ninja-build cmake git python3 python-is-python3 python3-dev python3-pip python3-venv + " + docker commit the_container the_container + + - name: Meson Build + run: | + docker run --rm -e "TERM=xterm-256color" \ + -v $(pwd):/numpy the_container \ + /bin/script -e -q -c "/bin/linux32 /bin/bash --noprofile --norc -eo pipefail -c ' + cd /numpy && + python -m venv venv && + source venv/bin/activate && + python -m pip install -r /numpy/requirements/build_requirements.txt && + python -m pip install -r /numpy/requirements/test_requirements.txt && + spin build + '" + + - name: Meson Log + if: always() + run: 'cat build/meson-logs/meson-log.txt' + + - name: Run Tests + run: | + docker run --rm -e "TERM=xterm-256color" \ + -v $(pwd):/numpy the_container \ + /bin/script -e -q -c "/bin/linux32 /bin/bash --noprofile --norc -eo pipefail -c ' + cd /numpy && source venv/bin/activate && spin test -m full -- --timeout=600 --durations=10 + '" benchmark: needs: [smoke_test] runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + persist-credentials: false + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.10' + python-version: '3.12' - name: Install build and benchmarking dependencies run: | sudo apt-get update sudo apt-get install libopenblas-dev ninja-build - pip install asv virtualenv packaging -r requirements/build_requirements.txt + pip install "asv<0.6.5" virtualenv packaging -r requirements/build_requirements.txt - name: Install NumPy run: | spin build -- -Dcpu-dispatch=none @@ -180,19 +245,27 @@ jobs: shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' run: | spin bench --quick + # These are run on CircleCI + # - name: Check docstests + # shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' + # run: | + # pip install -r requirements/doc_requirements.txt -r requirements/test_requirements.txt + # spin check-docs -v + # spin check-tutorials -v sdist: needs: [smoke_test] runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + persist-credentials: false + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install gfortran and setup OpenBLAS (sdist build) run: | set -xe @@ -213,6 +286,12 @@ jobs: run: | cd tools pytest --pyargs numpy -m "not slow" + - name: Test SWIG binding + run: | + sudo apt update + sudo apt install make swig + pip install setuptools + make -C tools/swig/test test array_api_tests: needs: [smoke_test] @@ -220,21 +299,23 @@ jobs: if: github.event_name != 'push' steps: - name: Checkout NumPy - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true + persist-credentials: false - name: Checkout array-api-tests - uses: actions/checkout@v4 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: repository: data-apis/array-api-tests - ref: '3cf8ef654c456d9fd1633d64e67b4470465940e9' # Latest commit as of 2024-04-09 + ref: '3c273cd34d51c64ed893737306d36adab23a94a1' # v2025.05.23 submodules: 'true' path: 'array-api-tests' + persist-credentials: false - name: Set up Python - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install build and test dependencies from PyPI run: | python -m pip install -r requirements/build_requirements.txt @@ -249,20 +330,29 @@ jobs: PYTHONWARNINGS: 'ignore::UserWarning::,ignore::DeprecationWarning::,ignore::RuntimeWarning::' run: | cd ${GITHUB_WORKSPACE}/array-api-tests - pytest array_api_tests -v -c pytest.ini --ci --max-examples=2 --derandomize --disable-deadline --skips-file ${GITHUB_WORKSPACE}/tools/ci/array-api-skips.txt + pytest array_api_tests -v -c pytest.ini -n 4 --max-examples=1000 --derandomize --disable-deadline --xfails-file ${GITHUB_WORKSPACE}/tools/ci/array-api-xfails.txt custom_checks: needs: [smoke_test] runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + persist-credentials: false + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + with: + python-version: '3.12' + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: - python-version: '3.11' + repository: numpy/numpy-release + path: numpy-release + persist-credentials: false + - name: Check scipy-openblas version in release pipelines + run: | + python tools/check_openblas_version.py --req-files numpy-release/requirements/openblas_requirements.txt - name: Install build and test dependencies from PyPI run: | pip install -r requirements/build_requirements.txt @@ -270,7 +360,7 @@ jobs: pip install vulture - name: Build and install NumPy run: | - # Install using the fastests way to build (no BLAS, no SIMD) + # Install using the fastest way to build (no BLAS, no SIMD) spin build -j2 -- -Dallow-noblas=true -Dcpu-baseline=none -Dcpu-dispatch=none - name: Check build-internal dependencies run: | @@ -281,31 +371,53 @@ jobs: - name: Check for unreachable code paths in Python modules run: | # Need the explicit `bash -c` here because `grep` returns exit code 1 for no matches - bash -c "! vulture . --min-confidence 100 --exclude doc/,numpy/distutils/,vendored-meson/ | grep 'unreachable'" + bash -c "! vulture . --min-confidence 100 --exclude doc/,vendored-meson/ | grep 'unreachable'" - name: Check usage of install_tag run: | rm -rf build-install ./vendored-meson/meson/meson.py install -C build --destdir ../build-install --tags=runtime,python-runtime,devel python tools/check_installed_files.py $(find ./build-install -path '*/site-packages/numpy') --no-tests - free-threaded: + + Linux_Python_312_32bit_full: + name: i686, cp312, full needs: [smoke_test] runs-on: ubuntu-latest - if: github.event_name != 'push' + container: + # There are few options for i686 images at https://quay.io/organization/pypa, + # use the glibc2.28 one + image: quay.io/pypa/manylinux_2_28_i686 + steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - submodules: recursive - fetch-tags: true - # TODO: replace with setup-python when there is support - - uses: deadsnakes/action@6c8b9b82fe0b4344f4b98f2775fcc395df45e494 # v3.1.0 - with: - python-version: '3.13-dev' - nogil: true - # TODO: remove cython nightly install when cython does a release - - name: Install nightly Cython + - name: Checkout and initialize submodules + # actions/checkout doesn't work in a container image run: | - pip install git+https://github.com/cython/cython - - uses: ./.github/meson_actions - env: - PYTHON_GIL: 0 + git config --global --add safe.directory $PWD + if [ $GITHUB_EVENT_NAME != pull_request ]; then + git clone --recursive --branch=$GITHUB_REF_NAME https://github.com/${GITHUB_REPOSITORY}.git $GITHUB_WORKSPACE + git reset --hard $GITHUB_SHA + else + git clone --recursive https://github.com/${GITHUB_REPOSITORY}.git $GITHUB_WORKSPACE + git fetch origin $GITHUB_REF:my_ref_name + git checkout $GITHUB_BASE_REF + git -c user.email="you@example.com" merge --no-commit my_ref_name + fi + git submodule update --init --recursive + + - name: build + run: | + python3.12 -m venv venv + source venv/bin/activate + pip install --upgrade pip + pip install -r requirements/ci32_requirements.txt + pip install -r requirements/test_requirements.txt + + spin config-openblas --with-scipy-openblas=32 + export PKG_CONFIG_PATH=$(pwd)/.openblas + python -m pip install . -v -Csetup-args="-Dallow-noblas=false" + + - name: test + run: | + source venv/bin/activate + cd tools + python -m pytest --pyargs numpy diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 3b23072dccfa..26d98fe813d7 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -40,6 +40,11 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' + - 'tools/stubtest/**' defaults: run: @@ -65,17 +70,18 @@ jobs: USE_NIGHTLY_OPENBLAS: ${{ matrix.USE_NIGHTLY_OPENBLAS }} name: "Test Linux (${{ matrix.USE_NIGHTLY_OPENBLAS && 'nightly' || 'stable' }} OpenBLAS)" steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + persist-credentials: false + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | - pip install -r requirements/build_requirements.txt + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt # Install OpenBLAS if [[ $USE_NIGHTLY_OPENBLAS == "true" ]]; then python -m pip install -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple scipy-openblas32 @@ -84,16 +90,13 @@ jobs: fi mkdir -p ./.openblas python -c"import scipy_openblas32 as ob32; print(ob32.get_pkg_config())" > ./.openblas/scipy-openblas.pc - echo "PKG_CONFIG_PATH=${{ github.workspace }}/.openblas" >> $GITHUB_ENV - ld_library_path=$(python -c"import scipy_openblas32 as ob32; print(ob32.get_lib_dir())") - echo "LD_LIBRARY_PATH=$ld_library_path" >> $GITHUB_ENV - name: Build shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' env: TERM: xterm-256color run: - spin build -- --werror -Dallow-noblas=false + spin build -- --werror -Dallow-noblas=false -Dpkg_config_path=${PWD}/.openblas - name: Check build-internal dependencies run: @@ -105,16 +108,14 @@ jobs: - name: Ensure scipy-openblas run: | set -ex - spin python tools/check_openblas_version.py 0.3.26 + spin python tools/check_openblas_version.py -- --min-version 0.3.30 - name: Test shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' env: TERM: xterm-256color - run: | - pip install pytest pytest-xdist hypothesis typing_extensions - spin test -j auto + spin test -j auto -- --timeout=600 --durations=10 openblas_no_pkgconfig_fedora: @@ -127,21 +128,21 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel openblas-devel -y - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true + persist-credentials: false - name: Install dependencies run: | - pip install -r requirements/build_requirements.txt - pip install pytest hypothesis typing_extensions + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - name: Build (LP64) run: spin build -- -Dblas=openblas -Dlapack=openblas -Ddisable-optimization=true -Dallow-noblas=false - name: Test - run: spin test -- numpy/linalg + run: spin test -- numpy/linalg --timeout=600 --durations=10 - name: Build (ILP64) run: | @@ -149,7 +150,7 @@ jobs: spin build -- -Duse-ilp64=true -Ddisable-optimization=true -Dallow-noblas=false - name: Test - run: spin test -- numpy/linalg + run: spin test -- numpy/linalg --timeout=600 --durations=10 flexiblas_fedora: @@ -162,21 +163,21 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel flexiblas-devel -y - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true + persist-credentials: false - name: Install dependencies run: | - pip install -r requirements/build_requirements.txt - pip install pytest hypothesis typing_extensions + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - name: Build run: spin build -- -Ddisable-optimization=true -Dallow-noblas=false - name: Test - run: spin test -- numpy/linalg + run: spin test -- numpy/linalg --timeout=600 --durations=10 - name: Build (ILP64) run: | @@ -184,7 +185,7 @@ jobs: spin build -- -Ddisable-optimization=true -Duse-ilp64=true -Dallow-noblas=false - name: Test (ILP64) - run: spin test -- numpy/linalg + run: spin test -- numpy/linalg --timeout=600 --durations=10 openblas_cmake: @@ -192,18 +193,18 @@ jobs: runs-on: ubuntu-latest name: "OpenBLAS with CMake" steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + persist-credentials: false + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | - pip install -r requirements/build_requirements.txt - pip install pytest pytest-xdist hypothesis typing_extensions + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt sudo apt-get update sudo apt-get install libopenblas-dev cmake sudo apt-get remove pkg-config @@ -212,25 +213,26 @@ jobs: run: spin build -- -Ddisable-optimization=true -Dallow-noblas=false - name: Test - run: spin test -j auto -- numpy/linalg + run: spin test -j auto -- numpy/linalg --timeout=600 --durations=10 + - netlib-debian: if: github.repository == 'numpy/numpy' runs-on: ubuntu-latest name: "Debian libblas/liblapack" steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + persist-credentials: false + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | - pip install -r requirements/build_requirements.txt + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt sudo apt-get update sudo apt-get install liblapack-dev pkg-config @@ -240,8 +242,7 @@ jobs: - name: Test run: | - pip install pytest pytest-xdist hypothesis typing_extensions - spin test -j auto -- numpy/linalg + spin test -j auto -- numpy/linalg --timeout=600 --durations=10 netlib-split: @@ -256,10 +257,11 @@ jobs: # If it is needed in the future, use install name `pkgconf-pkg-config` zypper install -y git gcc-c++ python3-pip python3-devel blas cblas lapack - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true + persist-credentials: false - name: Install PyPI dependencies run: | @@ -271,8 +273,10 @@ jobs: - name: Test run: | - pip install --break-system-packages pytest pytest-xdist hypothesis typing_extensions - spin test -j auto -- numpy/linalg + # do not use test_requirements.txt, it includes coverage which requires + # sqlite3, which is not available on OpenSUSE python + pip install --break-system-packages pytest pytest-xdist hypothesis pytest-timeout + spin test -j auto -- numpy/linalg --timeout=600 --durations=10 mkl: @@ -280,18 +284,19 @@ jobs: runs-on: ubuntu-latest name: "MKL (LP64, ILP64, SDL)" steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + persist-credentials: false + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | pip install -r requirements/build_requirements.txt - pip install pytest pytest-xdist hypothesis typing_extensions + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt pip install mkl mkl-devel - name: Repair MKL pkg-config files and symlinks @@ -315,7 +320,7 @@ jobs: spin build -- -Ddisable-optimization=true -Dallow-noblas=false - name: Test - run: spin test -- numpy/linalg + run: spin test -- numpy/linalg --timeout=600 --durations=10 - name: Build with ILP64 run: | @@ -324,7 +329,7 @@ jobs: spin build -- -Duse-ilp64=true -Ddisable-optimization=true -Dallow-noblas=false - name: Test - run: spin test -- numpy/linalg + run: spin test -- numpy/linalg --timeout=600 --durations=10 - name: Build without pkg-config (default options, SDL) run: | @@ -336,25 +341,26 @@ jobs: spin build -- -Ddisable-optimization=true -Dallow-noblas=false - name: Test - run: spin test -- numpy/linalg + run: spin test -- numpy/linalg --timeout=600 --durations=10 blis: if: github.repository == 'numpy/numpy' runs-on: ubuntu-latest name: "BLIS" steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + persist-credentials: false + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | pip install -r requirements/build_requirements.txt - pip install pytest pytest-xdist hypothesis typing_extensions + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt sudo apt-get update sudo apt-get install libblis-dev libopenblas-dev pkg-config @@ -372,25 +378,26 @@ jobs: run: spin build -- -Dblas=blis -Ddisable-optimization=true -Dallow-noblas=false - name: Test - run: spin test -- numpy/linalg + run: spin test -- numpy/linalg --timeout=600 --durations=10 atlas: if: github.repository == 'numpy/numpy' runs-on: ubuntu-latest name: "ATLAS" steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + persist-credentials: false + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | pip install -r requirements/build_requirements.txt - pip install pytest pytest-xdist hypothesis typing_extensions + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt sudo apt-get update sudo apt-get install libatlas-base-dev pkg-config @@ -399,4 +406,3 @@ jobs: - name: Test run: spin test -- numpy/linalg - diff --git a/.github/workflows/linux_compiler_sanitizers.yml b/.github/workflows/linux_compiler_sanitizers.yml deleted file mode 100644 index d54dd1415950..000000000000 --- a/.github/workflows/linux_compiler_sanitizers.yml +++ /dev/null @@ -1,58 +0,0 @@ -name: Test with compiler sanitizers (Linux) - -on: - pull_request: - branches: - - main - - maintenance/** - -defaults: - run: - shell: bash - -env: - PYTHON_VERSION: 3.11 - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -permissions: - contents: read # to fetch code (actions/checkout) - -jobs: - clang_sanitizers: - # To enable this workflow on a fork, comment out: - if: github.repository == 'numpy/numpy' - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - submodules: recursive - fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 - with: - python-version: ${{ env.PYTHON_VERSION }} - - name: Install dependencies - run: | - sudo apt update - sudo apt install -y llvm libstdc++-12-dev - pip install -r requirements/build_requirements.txt - pip install -r requirements/ci_requirements.txt - - name: Build - shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' - env: - TERM: xterm-256color - PKG_CONFIG_PATH: ${{ github.workspace }}/.openblas - run: - CC=clang CXX=clang++ spin build --with-scipy-openblas=32 -- -Db_sanitize=address,undefined - - name: Test - shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' - env: - TERM: xterm-256color - run: | - pip install pytest pytest-xdist hypothesis typing_extensions - ASAN_OPTIONS=detect_leaks=0:symbolize=1:strict_init_order=true:allocator_may_return_null=1:halt_on_error=1 \ - UBSAN_OPTIONS=halt_on_error=0 \ - LD_PRELOAD=$(clang --print-file-name=libclang_rt.asan-x86_64.so) \ - python -m spin test -- -v -s diff --git a/.github/workflows/linux_musl.yml b/.github/workflows/linux_musl.yml deleted file mode 100644 index ee33632c2343..000000000000 --- a/.github/workflows/linux_musl.yml +++ /dev/null @@ -1,71 +0,0 @@ -name: Test musllinux_x86_64 - -on: - pull_request: - branches: - - main - - maintenance/** - - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - - -permissions: - contents: read # to fetch code (actions/checkout) - - -jobs: - musllinux_x86_64: - runs-on: ubuntu-latest - # To enable this workflow on a fork, comment out: - if: github.repository == 'numpy/numpy' - container: - # Use container used for building musllinux wheels - # it has git installed, all the pythons, etc - image: quay.io/pypa/musllinux_1_1_x86_64 - - steps: - - name: setup - run: | - apk update --quiet - - # using git commands to clone because versioneer doesn't work when - # actions/checkout is used for the clone step in a container - - git config --global --add safe.directory $PWD - - if [ $GITHUB_EVENT_NAME != pull_request ]; then - git clone --recursive --branch=$GITHUB_REF_NAME https://github.com/${GITHUB_REPOSITORY}.git $GITHUB_WORKSPACE - git reset --hard $GITHUB_SHA - else - git clone --recursive https://github.com/${GITHUB_REPOSITORY}.git $GITHUB_WORKSPACE - git fetch origin $GITHUB_REF:my_ref_name - git checkout $GITHUB_BASE_REF - git -c user.email="you@example.com" merge --no-commit my_ref_name - fi - git submodule update --init - - ln -s /usr/local/bin/python3.10 /usr/local/bin/python - - - name: test-musllinux_x86_64 - env: - PKG_CONFIG_PATH: ${{ github.workspace }}/.openblas - run: | - python -m venv test_env - source test_env/bin/activate - - pip install -r requirements/ci_requirements.txt - pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - - # use meson to build and test - # the Duse-ilp64 is not needed with scipy-openblas wheels > 0.3.24.95.0 - # spin build --with-scipy-openblas=64 -- -Duse-ilp64=true - spin build --with-scipy-openblas=64 - spin test -j auto - - - name: Meson Log - shell: bash - run: | - cat build/meson-logs/meson-log.txt diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index d4d6fe4a4989..318f5591c2ac 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -14,6 +14,12 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' + - 'tools/stubtest/**' + workflow_dispatch: defaults: run: @@ -28,38 +34,15 @@ permissions: jobs: linux_qemu: - # To enable this workflow on a fork, comment out: - if: github.repository == 'numpy/numpy' + # Only workflow_dispatch is enabled on forks. + # To enable this job and subsequent jobs on a fork for other events, comment out: + if: github.repository == 'numpy/numpy' || github.event_name == 'workflow_dispatch' runs-on: ubuntu-22.04 continue-on-error: true strategy: fail-fast: false matrix: BUILD_PROP: - - [ - "armhf", - "arm-linux-gnueabihf", - "arm32v7/ubuntu:22.04", - "-Dallow-noblas=true", - # test_unary_spurious_fpexception is currently skipped - # FIXME(@seiko2plus): Requires confirmation for the following issue: - # The presence of an FP invalid exception caused by sqrt. Unsure if this is a qemu bug or not. - "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_unary_spurious_fpexception" - ] - - [ - "ppc64le", - "powerpc64le-linux-gnu", - "ppc64le/ubuntu:22.04", - "-Dallow-noblas=true", - "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", - ] - - [ - "ppc64le - baseline(Power9)", - "powerpc64le-linux-gnu", - "ppc64le/ubuntu:22.04", - "-Dallow-noblas=true -Dcpu-baseline=vsx3", - "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", - ] - [ "s390x", "s390x-linux-gnu", @@ -68,39 +51,45 @@ jobs: # Skipping TestRationalFunctions.test_gcd_overflow test # because of a possible qemu bug that appears to be related to int64 overflow in absolute operation. # TODO(@seiko2plus): Confirm the bug and provide a minimal reproducer, then report it to upstream. - "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_gcd_overflow" - ] + "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_gcd_overflow", + "s390x" + ] - [ "s390x - baseline(Z13)", "s390x-linux-gnu", "s390x/ubuntu:22.04", "-Dallow-noblas=true -Dcpu-baseline=vx", - "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_gcd_overflow" - ] + "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_gcd_overflow", + "s390x" + ] - [ "riscv64", "riscv64-linux-gnu", "riscv64/ubuntu:22.04", "-Dallow-noblas=true", - "test_kind or test_multiarray or test_simd or test_umath or test_ufunc" - ] + "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", + "riscv64" + ] env: TOOLCHAIN_NAME: ${{ matrix.BUILD_PROP[1] }} DOCKER_CONTAINER: ${{ matrix.BUILD_PROP[2] }} MESON_OPTIONS: ${{ matrix.BUILD_PROP[3] }} RUNTIME_TEST_FILTER: ${{ matrix.BUILD_PROP[4] }} + ARCH: ${{ matrix.BUILD_PROP[5] }} TERM: xterm-256color name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true + persist-credentials: false - name: Initialize binfmt_misc for qemu-user-static run: | - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes + # see https://hub.docker.com/r/tonistiigi/binfmt for available versions + docker run --rm --privileged tonistiigi/binfmt:qemu-v9.2.2-52 --install all - name: Install GCC cross-compilers run: | @@ -108,7 +97,7 @@ jobs: sudo apt install -y ninja-build gcc-${TOOLCHAIN_NAME} g++-${TOOLCHAIN_NAME} gfortran-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@v4.0.2 + uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} @@ -117,9 +106,12 @@ jobs: - name: Creates new container if: steps.container-cache.outputs.cache-hit != 'true' run: | - docker run --name the_container --interactive -v /:/host -v $(pwd):/numpy ${DOCKER_CONTAINER} /bin/bash -c " + docker run --platform=linux/${ARCH} --name the_container --interactive \ + -v /:/host -v $(pwd):/numpy ${DOCKER_CONTAINER} /bin/bash -c " apt update && - apt install -y cmake git python3 python-is-python3 python3-dev python3-pip && + apt install -y cmake git curl ca-certificates && + curl -LsSf https://astral.sh/uv/install.sh | sh && + export PATH="/root/.local/bin:$PATH" && mkdir -p /lib64 && ln -s /host/lib64/ld-* /lib64/ && ln -s /host/lib/x86_64-linux-gnu /lib/x86_64-linux-gnu && rm -rf /usr/${TOOLCHAIN_NAME} && ln -s /host/usr/${TOOLCHAIN_NAME} /usr/${TOOLCHAIN_NAME} && @@ -133,8 +125,11 @@ jobs: rm -f /usr/bin/ld.bfd && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-ld.bfd /usr/bin/ld.bfd && rm -f /usr/bin/ninja && ln -s /host/usr/bin/ninja /usr/bin/ninja && git config --global --add safe.directory /numpy && - python -m pip install -r /numpy/requirements/build_requirements.txt && - python -m pip install pytest pytest-xdist hypothesis typing_extensions && + # No need to build ninja from source, the host ninja is used for the build + grep -v ninja /numpy/requirements/build_requirements.txt > /tmp/build_requirements.txt && + uv venv --python 3.12 .venv && + source .venv/bin/activate && + uv pip install -r /tmp/build_requirements.txt pytest pytest-xdist hypothesis pytest-timeout rm -f /usr/local/bin/ninja && mkdir -p /usr/local/bin && ln -s /host/usr/bin/ninja /usr/local/bin/ninja " docker commit the_container the_container @@ -147,10 +142,11 @@ jobs: - name: Meson Build run: | - docker run --rm -e "TERM=xterm-256color" -v $(pwd):/numpy -v /:/host the_container \ - /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy && spin build --clean -- ${MESON_OPTIONS} - '" + docker run --rm --platform=linux/${ARCH} -e "TERM=xterm-256color" \ + -v $(pwd):/numpy -v /:/host the_container \ + /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' + source .venv/bin/activate && cd /numpy && spin build --clean -- ${MESON_OPTIONS} + '" - name: Meson Log if: always() @@ -158,9 +154,116 @@ jobs: - name: Run Tests run: | - docker run --rm -e "TERM=xterm-256color" -v $(pwd):/numpy -v /:/host the_container \ + docker run --rm --platform=linux/${ARCH} -e "TERM=xterm-256color" \ + -v $(pwd):/numpy -v /:/host the_container \ + /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' + export F90=/usr/bin/gfortran + source .venv/bin/activate && cd /numpy && spin test -- --timeout=600 --durations=10 -k \"${RUNTIME_TEST_FILTER}\" + '" + + + linux_loongarch64_qemu: + # Only workflow_dispatch is enabled on forks. + # To enable this job and subsequent jobs on a fork for other events, comment out: + if: github.repository == 'numpy/numpy' || github.event_name == 'workflow_dispatch' + runs-on: ubuntu-24.04 + continue-on-error: true + strategy: + fail-fast: false + matrix: + BUILD_PROP: + - [ + "loongarch64", + "loongarch64-linux-gnu", + "cnclarechen/numpy-loong64-debian:v1", + "-Dallow-noblas=true", + "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", + "loong64" + ] + env: + TOOLCHAIN_NAME: ${{ matrix.BUILD_PROP[1] }} + DOCKER_CONTAINER: ${{ matrix.BUILD_PROP[2] }} + MESON_OPTIONS: ${{ matrix.BUILD_PROP[3] }} + RUNTIME_TEST_FILTER: ${{ matrix.BUILD_PROP[4] }} + ARCH: ${{ matrix.BUILD_PROP[5] }} + TERM: xterm-256color + + name: "${{ matrix.BUILD_PROP[0] }}" + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + submodules: recursive + fetch-tags: true + + - name: Initialize binfmt_misc for qemu-user-static + run: | + docker run --rm --privileged tonistiigi/binfmt:qemu-v10.0.4-56 --install all + + - name: Install GCC cross-compilers + run: | + sudo apt update + sudo apt install -y ninja-build gcc-14-${TOOLCHAIN_NAME} g++-14-${TOOLCHAIN_NAME} gfortran-14-${TOOLCHAIN_NAME} + + - name: Cache docker container + uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2 + id: container-cache + with: + path: ~/docker_${{ matrix.BUILD_PROP[1] }} + key: container-${{ runner.os }}-${{ matrix.BUILD_PROP[1] }}-${{ matrix.BUILD_PROP[2] }}-${{ hashFiles('requirements/build_requirements.txt') }} + + - name: Creates new container + if: steps.container-cache.outputs.cache-hit != 'true' + run: | + docker run --platform=linux/${ARCH} --name the_container --interactive \ + -v /:/host -v $(pwd):/numpy ${DOCKER_CONTAINER} /bin/bash -c " + mkdir -p /lib64 && ln -s /host/lib64/ld-* /lib64/ && + ln -s /host/lib/x86_64-linux-gnu /lib/x86_64-linux-gnu && + ln -s /host/usr/${TOOLCHAIN_NAME} /usr/${TOOLCHAIN_NAME} && + ln -s /host/usr/lib/gcc-cross/${TOOLCHAIN_NAME} /usr/lib/gcc/${TOOLCHAIN_NAME} && + mkdir -p /usr/libexec/gcc && + rm -rf /usr/libexec/gcc/${TOOLCHAIN_NAME} && ln -s /host/usr/libexec/gcc/${TOOLCHAIN_NAME} /usr/libexec/gcc/${TOOLCHAIN_NAME} && + rm -f /usr/bin/gcc && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-gcc-14 /usr/bin/gcc && + rm -f /usr/bin/g++ && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-g++-14 /usr/bin/g++ && + rm -f /usr/bin/gfortran && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-gfortran-14 /usr/bin/gfortran && + rm -f /usr/bin/ar && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-ar /usr/bin/ar && + rm -f /usr/bin/as && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-as /usr/bin/as && + rm -f /usr/bin/ld && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-ld /usr/bin/ld && + rm -f /usr/bin/ld.bfd && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-ld.bfd /usr/bin/ld.bfd && + rm -f /usr/bin/ninja && ln -s /host/usr/bin/ninja /usr/bin/ninja && + git config --global --add safe.directory /numpy && + # No need to build ninja from source, the host ninja is used for the build + grep -v ninja /numpy/requirements/build_requirements.txt > /tmp/build_requirements.txt && + python -m pip install --break-system-packages uv --extra-index-url https://mirrors.loong64.com/pypi/simple && + export PATH="/root/.local/bin:$PATH" && + uv venv --python 3.12 .venv && + source .venv/bin/activate && + uv pip install -r /tmp/build_requirements.txt pytest pytest-xdist hypothesis && + rm -f /usr/local/bin/ninja && mkdir -p /usr/local/bin && ln -s /host/usr/bin/ninja /usr/local/bin/ninja + " + docker commit the_container the_container + mkdir -p "~/docker_${TOOLCHAIN_NAME}" + docker save -o "~/docker_${TOOLCHAIN_NAME}/the_container.tar" the_container + + - name: Load container from cache + if: steps.container-cache.outputs.cache-hit == 'true' + run: docker load -i "~/docker_${TOOLCHAIN_NAME}/the_container.tar" + + - name: Meson Build + run: | + docker run --rm --platform=linux/${ARCH} -e "TERM=xterm-256color" \ + -v $(pwd):/numpy -v /:/host the_container \ + /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' + source .venv/bin/activate && cd /numpy/ && spin build --clean -- ${MESON_OPTIONS} + '" + + - name: Meson Log + if: always() + run: 'cat build/meson-logs/meson-log.txt' + + - name: Run Tests + run: | + docker run --rm --platform=linux/${ARCH} -e "TERM=xterm-256color" \ + -v $(pwd):/numpy -v /:/host the_container \ /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' - export F90=/usr/bin/gfortran - cd /numpy && spin test -- -k \"${RUNTIME_TEST_FILTER}\" + source .venv/bin/activate && cd /numpy && spin test -- -k \"${RUNTIME_TEST_FILTER}\" '" - diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index aa4fe75f14cf..46feb4abb3e4 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -7,7 +7,7 @@ name: Linux SIMD tests # # - baseline_only: # Focuses on completing as quickly as possible and acts as a filter for other, more resource-intensive jobs. -# Utilizes only the default baseline targets (e.g., SSE3 on X86_64) without enabling any runtime dispatched features. +# Utilizes only the default baseline targets (e.g., X86_V2 on X86_64) without enabling any runtime dispatched features. # # - old_gcc: # Tests the oldest supported GCC version with default CPU/baseline/dispatch settings. @@ -17,11 +17,7 @@ name: Linux SIMD tests # # - native: # Tests against the host CPU features set as the baseline without enabling any runtime dispatched features. -# Intended to assess the entire NumPy codebase against host flags, even for code sections lacking handwritten SIMD intrincis. -# -# - without_avx512/avx2/fma3: -# Uses runtime SIMD dispatching but disables AVX2, FMA3, and AVX512. -# Intended to evaluate 128-bit SIMD extensions without FMA support. +# Intended to assess the entire NumPy codebase against host flags, even for code sections lacking handwritten SIMD intrinsics. # # - without_avx512: # Uses runtime SIMD dispatching but disables AVX512. @@ -35,6 +31,11 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' + - 'tools/stubtest/**' defaults: run: @@ -58,13 +59,14 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true -Dcpu-dispatch=none" steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + persist-credentials: false + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.10' + python-version: '3.12' - uses: ./.github/meson_actions name: Build/Test @@ -75,35 +77,69 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true" steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + persist-credentials: false + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.10' + python-version: '3.12' - - name: Install GCC/8/9 + - name: Install GCC9/10 run: | echo "deb http://archive.ubuntu.com/ubuntu focal main universe" | sudo tee /etc/apt/sources.list.d/focal.list sudo apt update - sudo apt install -y g++-8 g++-9 + sudo apt install -y g++-9 g++-10 - - name: Enable gcc-8 + - name: Enable gcc-9 run: | - sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-8 1 - sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-8 1 + sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-9 1 + sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-9 1 - uses: ./.github/meson_actions - name: Build/Test against gcc-8 + name: Build/Test against gcc-9 - - name: Enable gcc-9 + - name: Enable gcc-10 run: | - sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-9 2 - sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-9 2 + sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 2 + sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-10 2 - uses: ./.github/meson_actions - name: Build/Test against gcc-9 + name: Build/Test against gcc-10 + + arm64_simd: + if: github.repository == 'numpy/numpy' + needs: [baseline_only] + runs-on: ubuntu-22.04-arm + strategy: + fail-fast: false + matrix: + config: + - name: "baseline only" + args: "-Dallow-noblas=true -Dcpu-dispatch=none" + - name: "with ASIMD" + args: "-Dallow-noblas=true -Dcpu-baseline=asimd" + - name: "native" + args: "-Dallow-noblas=true -Dcpu-baseline=native -Dcpu-dispatch=none" + name: "ARM64 SIMD - ${{ matrix.config.name }}" + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + submodules: recursive + fetch-tags: true + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + with: + python-version: '3.12' + - name: Install dependencies + run: | + python -m pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt + - name: Build + run: | + spin build -- ${{ matrix.config.args }} + - name: Test + run: | + spin test -- --timeout=600 --durations=10 specialize: needs: [baseline_only] @@ -122,17 +158,12 @@ jobs: - [ "native", "-Dallow-noblas=true -Dcpu-baseline=native -Dcpu-dispatch=none", - "3.11" + "3.12" ] - [ "without avx512", - "-Dallow-noblas=true -Dcpu-dispatch=SSSE3,SSE41,POPCNT,SSE42,AVX,F16C,AVX2,FMA3", - "3.10" - ] - - [ - "without avx512/avx2/fma3", - "-Dallow-noblas=true -Dcpu-dispatch=SSSE3,SSE41,POPCNT,SSE42,AVX,F16C", - "3.10" + "-Dallow-noblas=true -Dcpu-dispatch=max-x86_v4", + "3.12" ] env: @@ -140,11 +171,12 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + persist-credentials: false + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "${{ matrix.BUILD_PROP[2] }}" - uses: ./.github/meson_actions @@ -154,27 +186,27 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + persist-credentials: false + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install Intel SDE run: | - curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/788820/sde-external-9.27.0-2023-09-13-lin.tar.xz + curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/859732/sde-external-9.58.0-2025-06-16-lin.tar.xz mkdir /tmp/sde && tar -xvf /tmp/sde.tar.xz -C /tmp/sde/ sudo mv /tmp/sde/* /opt/sde && sudo ln -s /opt/sde/sde64 /usr/bin/sde - name: Install dependencies run: | - python -m pip install -r requirements/build_requirements.txt - python -m pip install pytest pytest-xdist hypothesis typing_extensions + python -m pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - name: Build - run: CC=gcc-13 CXX=g++-13 spin build -- -Dallow-noblas=true -Dcpu-baseline=avx512_skx -Dtest-simd='BASELINE,AVX512_KNL,AVX512_KNM,AVX512_SKX,AVX512_CLX,AVX512_CNL,AVX512_ICL,AVX512_SPR' + run: CC=gcc-13 CXX=g++-13 spin build -- -Denable-openmp=true -Dallow-noblas=true -Dcpu-baseline=X86_V4 -Dtest-simd='BASELINE,AVX512_ICL,AVX512_SPR' - name: Meson Log if: always() @@ -204,27 +236,27 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + persist-credentials: false + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install Intel SDE run: | - curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/788820/sde-external-9.27.0-2023-09-13-lin.tar.xz + curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/859732/sde-external-9.58.0-2025-06-16-lin.tar.xz mkdir /tmp/sde && tar -xvf /tmp/sde.tar.xz -C /tmp/sde/ sudo mv /tmp/sde/* /opt/sde && sudo ln -s /opt/sde/sde64 /usr/bin/sde - name: Install dependencies run: | - python -m pip install -r requirements/build_requirements.txt - python -m pip install pytest pytest-xdist hypothesis typing_extensions + python -m pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - name: Build - run: CC=gcc-13 CXX=g++-13 spin build -- -Dallow-noblas=true -Dcpu-baseline=avx512_spr + run: CC=gcc-13 CXX=g++-13 spin build -- -Denable-openmp=true -Dallow-noblas=true -Dcpu-baseline=avx512_spr - name: Meson Log if: always() diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 9e622f2221d4..689e775b6aa3 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -1,10 +1,15 @@ -name: macOS tests (meson) +name: macOS tests on: pull_request: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' + - 'tools/stubtest/**' permissions: contents: read # to fetch code (actions/checkout) @@ -21,17 +26,18 @@ jobs: name: macOS x86-64 conda # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' - runs-on: macos-13 + runs-on: macos-15-intel strategy: fail-fast: false matrix: - python-version: ["3.11"] + python-version: ["3.12"] steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true + persist-credentials: false - name: Prepare cache dirs and timestamps id: prep-ccache @@ -44,7 +50,7 @@ jobs: echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT - name: Setup compiler cache - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2 id: cache-ccache with: path: ${{ steps.prep-ccache.outputs.dir }} @@ -52,15 +58,15 @@ jobs: restore-keys: | ${{ github.workflow }}-${{ matrix.python-version }}-ccache-macos- - - name: Setup Mambaforge - uses: conda-incubator/setup-miniconda@a4260408e20b96e80095f42ff7f1a15b27dd94ca # v3.0.4 + - name: Setup Miniforge + uses: conda-incubator/setup-miniconda@fc2d68f6413eb2d87b895e92f8584b5b94a10167 # v3.3.0 with: python-version: ${{ matrix.python-version }} channels: conda-forge channel-priority: true activate-environment: numpy-dev use-only-tar-bz2: false - miniforge-variant: Mambaforge + miniforge-variant: Miniforge3 miniforge-version: latest use-mamba: true @@ -68,7 +74,7 @@ jobs: # ensure we re-solve once a day (since we don't lock versions). Could be # replaced by a conda-lock based approach in the future. - name: Cache conda environment - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2 env: # Increase this value to reset cache if environment.yml has not changed CACHE_NUMBER: 1 @@ -103,46 +109,50 @@ jobs: accelerate: - name: Accelerate (LP64, ILP64) - ${{ matrix.build_runner[1] }} + name: Accelerate - ${{ matrix.build_runner[1] }} - ${{ matrix.version }} + # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' runs-on: ${{ matrix.build_runner[0] }} strategy: fail-fast: false matrix: build_runner: - - [ macos-13, "macos_x86_64" ] + - [ macos-15-intel, "macos_x86_64" ] - [ macos-14, "macos_arm64" ] + version: ["3.12", "3.14t"] steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true + persist-credentials: false - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 - with: - python-version: '3.10' - - - uses: maxim-lobanov/setup-xcode@60606e260d2fc5762a71e64e74b2174e8ea3c8bd # v1.6.0 - if: ${{ matrix.build_runner[0] == 'macos-13' }} + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - xcode-version: '14.3' + python-version: ${{ matrix.version }} - name: Install dependencies run: | - pip install -r requirements/build_requirements.txt - pip install pytest pytest-xdist hypothesis + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - name: Build against Accelerate (LP64) run: spin build -- -Ddisable-optimization=true -Dallow-noblas=false - name: Test (linalg only) - run: spin test -j2 -- numpy/linalg + run: spin test -j2 -- numpy/linalg --timeout=600 --durations=10 - name: Build NumPy against Accelerate (ILP64) run: | - git clean -xdf + rm -r build build-install spin build -- -Duse-ilp64=true -Ddisable-optimization=true -Dallow-noblas=false - name: Test (fast tests) - run: spin test -j2 + if: ${{ matrix.version != '3.14t' || matrix.build_runner[0] != 'macos-14' }} + run: spin test -j2 -- --timeout=600 --durations=10 + + - name: Test in multiple threads + if: ${{ matrix.version == '3.14t' && matrix.build_runner[0] == 'macos-14' }} + run: | + pip install pytest-run-parallel==0.8.2 + spin test -p 4 -- -sv --timeout=600 --durations=10 diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml deleted file mode 100644 index be9874a9f7eb..000000000000 --- a/.github/workflows/mypy.yml +++ /dev/null @@ -1,69 +0,0 @@ -name: Run MyPy - -# Mypy is too slow to run as part of regular CI. The purpose of the jobs in -# this file is to cover running Mypy across: -# -# - OSes: Linux, Windows and macOS -# - Python versions: lowest/highest supported versions, and an intermediate one -# -# The build matrix aims for sparse coverage across those two dimensions. -# Use of BLAS/LAPACK and SIMD is disabled on purpose, because those things -# don't matter for static typing and this speeds up the builds. -# -# This is a separate job file so it's easy to trigger by hand. - -on: - pull_request: - branches: - - main - - maintenance/** - paths-ignore: - - 'benchmarks/' - - '.circlecl/' - - 'docs/' - - 'meson_cpu/' - - 'tools/' - workflow_dispatch: - -defaults: - run: - shell: bash - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -permissions: - contents: read # to fetch code (actions/checkout) - -jobs: - mypy: - # To enable this workflow on a fork, comment out: - if: github.repository == 'numpy/numpy' - name: "MyPy" - runs-on: ${{ matrix.os_python[0] }} - strategy: - fail-fast: false - matrix: - os_python: - - [ubuntu-latest, '3.12'] - - [windows-2019, '3.11'] - - [macos-12, '3.10'] - steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - submodules: recursive - fetch-tags: true - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 - with: - python-version: ${{ matrix.os_python[1] }} - - name: Install dependencies - run: | - pip install -r requirements/build_requirements.txt - pip install -r requirements/test_requirements.txt - - name: Build - run: | - spin build -j2 -- -Dallow-noblas=true -Ddisable-optimization=true --vsenv - - name: Run Mypy - run: | - spin mypy diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml new file mode 100644 index 000000000000..852f36a966ea --- /dev/null +++ b/.github/workflows/mypy_primer.yml @@ -0,0 +1,104 @@ +name: Run mypy_primer + +on: + # Only run on PR, since we diff against main + pull_request: + paths: + - ".github/workflows/mypy_primer.yml" + - ".github/workflows/mypy_primer_comment.yml" + - "numpy/**/*.pyi" + - "numpy/_typing/*.py" + - "numpy/typing/*.py" + - "!numpy/typing/tests/**" + - "numpy/py.typed" + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +permissions: + contents: read + +jobs: + mypy_primer: + name: Run + runs-on: ubuntu-latest + strategy: + matrix: + shard-index: [0] # e.g. change this to [0, 1, 2] and --num-shards below to 3 + fail-fast: false + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + path: numpy_to_test + fetch-depth: 0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + with: + python-version: "3.12" + - name: Install dependencies + run: pip install git+https://github.com/hauntsaninja/mypy_primer.git + - name: Run mypy_primer + shell: bash + run: | + cd numpy_to_test + MYPY_VERSION=$(grep mypy== requirements/typing_requirements.txt | sed -n 's/mypy==\([^;]*\).*/\1/p') + + echo "new commit" + git checkout $GITHUB_SHA + git rev-list --format=%s --max-count=1 HEAD + + MERGE_BASE=$(git merge-base $GITHUB_SHA origin/$GITHUB_BASE_REF) + git worktree add ../numpy_base $MERGE_BASE + cd ../numpy_base + + echo "base commit" + git rev-list --format=%s --max-count=1 HEAD + + echo '' + cd .. + # fail action if exit code isn't zero or one + # TODO: note that we don't build numpy, so if a project attempts to use the + # numpy mypy plugin, we may see some issues involving version skew. + ( + mypy_primer \ + --new v${MYPY_VERSION} --old v${MYPY_VERSION} \ + --known-dependency-selector numpy \ + --old-prepend-path numpy_base --new-prepend-path numpy_to_test \ + --num-shards 1 --shard-index ${{ matrix.shard-index }} \ + --additional-flags="--python-version=3.12" \ + --debug \ + --output concise \ + | tee diff_${{ matrix.shard-index }}.txt + ) || [ $? -eq 1 ] + - if: ${{ matrix.shard-index == 0 }} + name: Save PR number + run: | + echo ${{ github.event.pull_request.number }} | tee pr_number.txt + - name: Upload mypy_primer diff + PR number + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + if: ${{ matrix.shard-index == 0 }} + with: + name: mypy_primer_diffs-${{ matrix.shard-index }} + path: | + diff_${{ matrix.shard-index }}.txt + pr_number.txt + - name: Upload mypy_primer diff + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + if: ${{ matrix.shard-index != 0 }} + with: + name: mypy_primer_diffs-${{ matrix.shard-index }} + path: diff_${{ matrix.shard-index }}.txt + + join_artifacts: + name: Join artifacts + runs-on: ubuntu-latest + needs: [mypy_primer] + permissions: + contents: read + steps: + - name: Merge artifacts + uses: actions/upload-artifact/merge@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + with: + name: mypy_primer_diffs + pattern: mypy_primer_diffs-* + delete-merged: true diff --git a/.github/workflows/mypy_primer_comment.yml b/.github/workflows/mypy_primer_comment.yml new file mode 100644 index 000000000000..7a83cdb53d88 --- /dev/null +++ b/.github/workflows/mypy_primer_comment.yml @@ -0,0 +1,103 @@ +name: Comment with mypy_primer diff + +on: + workflow_run: + workflows: + - Run mypy_primer + types: + - completed + +permissions: + contents: read + pull-requests: write + +jobs: + comment: + name: Comment PR from mypy_primer + runs-on: ubuntu-latest + if: ${{ github.event.workflow_run.conclusion == 'success' }} + steps: + - name: Download diffs + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + with: + script: | + const fs = require('fs'); + const artifacts = await github.rest.actions.listWorkflowRunArtifacts({ + owner: context.repo.owner, + repo: context.repo.repo, + run_id: ${{ github.event.workflow_run.id }}, + }); + const [matchArtifact] = artifacts.data.artifacts.filter((artifact) => + artifact.name == "mypy_primer_diffs"); + + const download = await github.rest.actions.downloadArtifact({ + owner: context.repo.owner, + repo: context.repo.repo, + artifact_id: matchArtifact.id, + archive_format: "zip", + }); + fs.writeFileSync("diff.zip", Buffer.from(download.data)); + + - run: unzip diff.zip + + - name: Get PR number + id: get-pr-number + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + with: + script: | + const fs = require('fs'); + return parseInt(fs.readFileSync("pr_number.txt", { encoding: "utf8" })) + + - name: Hide old comments + uses: int128/hide-comment-action@a218e276fb47d0d526ee989fe02e935a5095417b # v1.50.0 + with: + token: ${{ secrets.GITHUB_TOKEN }} + issue-number: ${{ steps.get-pr-number.outputs.result }} + + - run: cat diff_*.txt | tee fulldiff.txt + + - name: Post comment + id: post-comment + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const MAX_CHARACTERS = 50000 + const MAX_CHARACTERS_PER_PROJECT = MAX_CHARACTERS / 3 + + const fs = require('fs') + let data = fs.readFileSync('fulldiff.txt', { encoding: 'utf8' }) + + function truncateIfNeeded(original, maxLength) { + if (original.length <= maxLength) { + return original + } + let truncated = original.substring(0, maxLength) + // further, remove last line that might be truncated + truncated = truncated.substring(0, truncated.lastIndexOf('\n')) + let lines_truncated = original.split('\n').length - truncated.split('\n').length + return `${truncated}\n\n... (truncated ${lines_truncated} lines) ...` + } + + const projects = data.split('\n\n') + // don't let one project dominate + data = projects.map(project => truncateIfNeeded(project, MAX_CHARACTERS_PER_PROJECT)).join('\n\n') + // posting comment fails if too long, so truncate + data = truncateIfNeeded(data, MAX_CHARACTERS) + + console.log("Diff from mypy_primer:") + console.log(data) + + let body + if (data.trim()) { + body = 'Diff from [mypy_primer](https://github.com/hauntsaninja/mypy_primer), ' + body += 'showing the effect of this PR on type check results on a corpus of open source code:\n```diff\n' + body += data + '```' + const prNumber = parseInt(fs.readFileSync("pr_number.txt", { encoding: "utf8" })) + await github.rest.issues.createComment({ + issue_number: prNumber, + owner: context.repo.owner, + repo: context.repo.repo, + body + }) + } diff --git a/.github/workflows/pixi-packages.yml b/.github/workflows/pixi-packages.yml new file mode 100644 index 000000000000..400af28084e9 --- /dev/null +++ b/.github/workflows/pixi-packages.yml @@ -0,0 +1,39 @@ +name: Pixi packages tests + +on: + pull_request: + branches: + - main + - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' + +permissions: + contents: read # to fetch code (actions/checkout) + +jobs: + build_packages: + name: Build Pixi packages + runs-on: ${{ matrix.runs-on }} + strategy: + fail-fast: false + matrix: + runs-on: [ubuntu-latest, macos-15] + package_variant: [asan, default] + if: github.repository == 'numpy/numpy' + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + + - uses: prefix-dev/setup-pixi@82d477f15f3a381dbcc8adc1206ce643fe110fb7 # v0.9.3 + with: + pixi-version: v0.60.0 + run-install: false + + - name: Build + run: pixi build --path="pixi-packages/${{ matrix.package_variant }}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 5bd8f6cd0fce..02a4614c2177 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -11,7 +11,7 @@ on: branches: ["main"] # Declare default permissions as read only. -permissions: read-all +permissions: {} jobs: analysis: @@ -25,12 +25,12 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v3.1.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v3.1.0 with: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@dc50aa9510b46c811795eb24b2f1ba02a914e534 # v2.3.3 + uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3 with: results_file: results.sarif results_format: sarif @@ -42,7 +42,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable # uploads of run results in SARIF format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: SARIF file path: results.sarif @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v2.1.27 + uses: github/codeql-action/upload-sarif@cdefb33c0f6224e58673d9004f47f7cb3e328b89 # v2.1.27 with: sarif_file: results.sarif diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml new file mode 100644 index 000000000000..a25e6c54cfb8 --- /dev/null +++ b/.github/workflows/stubtest.yml @@ -0,0 +1,64 @@ +name: stubtest +permissions: read-all + +# Stubtest depends on different branches and paths than mypy does, so we have a separate workflow. + +on: + pull_request: + branches: + - "main" + - "maintenance/2.**" + # Stubtest requires numpy>=2.4 + - "!maintenance/2.[0-3].x" + paths: + - ".github/workflows/stubtest.yml" + - "numpy/**" + - "!numpy/**/tests/**" + - "requirements/typing_requirements.txt" + - "tools/stubtest/**" + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + mypy: + # To enable this workflow on a fork, comment out: + if: github.repository == 'numpy/numpy' + + name: stubtest + runs-on: ${{ matrix.os }}-latest + strategy: + fail-fast: false + matrix: + # TODO: consider including macos and windows + os: [ubuntu] + py: ["3.12", "3.14"] + + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + + - uses: astral-sh/setup-uv@61cb8a9741eeb8a550a1b8544337180c0fc8476b # v7.2.0 + with: + python-version: ${{ matrix.py }} + activate-environment: true + cache-dependency-glob: | + requirements/build_requirements.txt + requirements/typing_requirements.txt + + - name: uv pip install + run: >- + uv pip install + -r requirements/build_requirements.txt + -r requirements/typing_requirements.txt + + - name: spin build + run: spin build -j2 -- -Dallow-noblas=true -Ddisable-optimization=true --vsenv + + - name: spin stubtest + run: spin stubtest diff --git a/.github/workflows/typecheck.yml b/.github/workflows/typecheck.yml new file mode 100644 index 000000000000..321530993b82 --- /dev/null +++ b/.github/workflows/typecheck.yml @@ -0,0 +1,105 @@ +name: Type-checking + +# Mypy is too slow to run as part of regular CI. The purpose of the jobs in +# this file is to cover running Mypy across: +# +# - OSes: Linux, Windows and macOS +# - Python versions: lowest/highest supported versions, and an intermediate one +# +# The build matrix aims for sparse coverage across those two dimensions. +# Use of BLAS/LAPACK and SIMD is disabled on purpose, because those things +# don't matter for static typing and this speeds up the builds. +# +# This is a separate job file so it's easy to trigger by hand. + +on: + pull_request: + branches: + - main + - maintenance/** + paths-ignore: + - '**.md' + - '**.rst' + - '.circlecl/**' + - '.devcontainer/**' + - '.spin/**' + - 'benchmarks/**' + - 'branding/**' + - 'doc/**' + - 'meson_cpu/**' + - 'tools/**' + - 'vendored-meson/**' + workflow_dispatch: + +defaults: + run: + shell: bash + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +permissions: + contents: read # to fetch code (actions/checkout) + +jobs: + mypy: + # To enable this workflow on a fork, comment out: + if: github.repository == 'numpy/numpy' + name: "MyPy" + runs-on: ${{ matrix.os_python[0] }} + strategy: + fail-fast: false + matrix: + os_python: + - [macos-latest, '3.14'] + - [ubuntu-latest, '3.13'] + - [windows-latest, '3.12'] + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + - uses: astral-sh/setup-uv@61cb8a9741eeb8a550a1b8544337180c0fc8476b # v7.2.0 + with: + python-version: ${{ matrix.os_python[1] }} + activate-environment: true + cache-dependency-glob: | + requirements/build_requirements.txt + requirements/typing_requirements.txt + - name: Install dependencies + # orjson makes mypy faster but the default requirements.txt + # can't install it because orjson doesn't support 32 bit Linux + run: >- + uv pip install + -r requirements/build_requirements.txt + -r requirements/typing_requirements.txt + orjson + basedpyright + - name: Build + run: | + spin build -j2 -- -Dallow-noblas=true -Ddisable-optimization=true --vsenv + - name: Run Mypy + run: | + spin mypy + - name: Check basedpyright's type completeness is at least 100% + run: >- + spin run python tools/pyright_completeness.py + --verifytypes numpy + --ignoreexternal + --exclude-like '*.tests.*' '*.conftest.*' + + pyrefly: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: astral-sh/setup-uv@61cb8a9741eeb8a550a1b8544337180c0fc8476b # v7.2.0 + with: + activate-environment: true + - name: Install dependencies + run: >- + uv pip install + -r requirements/typing_requirements.txt + - name: Run pyrefly + run: pyrefly check diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 50453bef6ee1..bb15b79da4e8 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -1,34 +1,19 @@ -# Workflow to build and test wheels. -# To work on the wheel building infrastructure on a fork, comment out: +# Workflow to build and test wheels, similarly to numpy/numpy-release. +# To work on these jobs in a fork, comment out: # -# if: github.repository == 'numpy/numpy' -# -# in the get_commit_message job. Be sure to include [wheel build] in your commit -# message to trigger the build. All files related to wheel building are located -# at tools/wheels/ -# Alternatively, you can add labels to the pull request in order to trigger wheel -# builds. -# The labels that trigger builds are: -# 36 - Build(for changes to the building process, -# 14 - Release(ensure wheels build before release) +# if: github.repository == 'numpy/numpy' name: Wheel builder on: - schedule: - # ┌───────────── minute (0 - 59) - # │ ┌───────────── hour (0 - 23) - # │ │ ┌───────────── day of the month (1 - 31) - # │ │ │ ┌───────────── month (1 - 12 or JAN-DEC) - # │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT) - # │ │ │ │ │ - - cron: "42 2 * * SUN,WED" pull_request: branches: - main - maintenance/** - push: - tags: - - v* + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' + - 'tools/stubtest/**' workflow_dispatch: concurrency: @@ -39,70 +24,35 @@ permissions: contents: read # to fetch code (actions/checkout) jobs: - get_commit_message: - name: Get commit message - runs-on: ubuntu-latest - # To enable this job and subsequent jobs on a fork, comment out: - if: github.repository == 'numpy/numpy' - outputs: - message: ${{ steps.commit_message.outputs.message }} - steps: - - name: Checkout numpy - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - # Gets the correct commit message for pull request - with: - ref: ${{ github.event.pull_request.head.sha }} - - name: Get commit message - id: commit_message - run: | - set -xe - COMMIT_MSG=$(git log --no-merges -1 --oneline) - echo "message=$COMMIT_MSG" >> $GITHUB_OUTPUT - echo github.ref ${{ github.ref }} - build_wheels: name: Build wheel ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} - needs: get_commit_message - if: >- - contains(needs.get_commit_message.outputs.message, '[wheel build]') || - github.event_name == 'schedule' || - github.event_name == 'workflow_dispatch' || - (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') && ( ! endsWith(github.ref, 'dev0'))) + # To enable this job on a fork, comment out: + if: github.repository == 'numpy/numpy' runs-on: ${{ matrix.buildplat[0] }} strategy: - # Ensure that a wheel builder finishes even if another fails fail-fast: false matrix: # Github Actions doesn't support pairing matrix values together, let's improvise # https://github.com/github/feedback/discussions/7835#discussioncomment-1769026 buildplat: - - [ubuntu-20.04, manylinux_x86_64, ""] - - [ubuntu-20.04, musllinux_x86_64, ""] - - [macos-13, macosx_x86_64, openblas] + - [ubuntu-22.04, manylinux_x86_64, ""] + - [ubuntu-22.04, musllinux_x86_64, ""] + - [ubuntu-22.04-arm, manylinux_aarch64, ""] + - [ubuntu-22.04-arm, musllinux_aarch64, ""] + - [macos-15-intel, macosx_x86_64, openblas] + - [macos-14, macosx_arm64, openblas] + - [windows-2022, win_amd64, ""] + - [windows-11-arm, win_arm64, ""] + python: ["cp312"] - # targeting macos >= 14. Could probably build on macos-14, but it would be a cross-compile - - [macos-13, macosx_x86_64, accelerate] - - [macos-14, macosx_arm64, accelerate] # always use accelerate - - [windows-2019, win_amd64, ""] - - [windows-2019, win32, ""] - python: ["cp310", "cp311", "cp312", "pp310", "cp313"] - exclude: - # Don't build PyPy 32-bit windows - - buildplat: [windows-2019, win32, ""] - python: "pp310" - - buildplat: [ ubuntu-20.04, musllinux_x86_64, "" ] - python: "pp310" - - buildplat: [ macos-14, macosx_arm64, accelerate ] - python: "pp310" env: - IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} - IS_PUSH: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') }} - IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} + IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} # used in cibw_test_command.sh steps: - name: Checkout numpy - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: true + persist-credentials: false - name: Setup MSVC (32-bit) if: ${{ matrix.buildplat[1] == 'win32' }} @@ -110,7 +60,12 @@ jobs: with: architecture: 'x86' + - name: Setup LLVM for Windows ARM64 + if: ${{ matrix.buildplat[1] == 'win_arm64' }} + uses: ./.github/windows_arm64_steps + - name: pkg-config-for-win + if: runner.os == 'windows' run: | choco install -y --no-progress --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite $CIBW = "${{ github.workspace }}/.openblas" @@ -120,151 +75,35 @@ jobs: # passed through, so convert it to '/' $CIBW = $CIBW.replace("\","/") echo "CIBW_ENVIRONMENT_WINDOWS=PKG_CONFIG_PATH=$CIBW" >> $env:GITHUB_ENV - if: runner.os == 'windows' - - # Used to push the built wheels - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 - with: - python-version: "3.x" - name: Setup macOS - if: matrix.buildplat[0] == 'macos-13' || matrix.buildplat[0] == 'macos-14' + if: matrix.buildplat[0] == 'macos-15-intel' || matrix.buildplat[0] == 'macos-14' run: | + # Needed due to https://github.com/actions/runner-images/issues/3371 + # Supported versions: https://github.com/actions/runner-images/blob/main/images/macos/macos-14-arm64-Readme.md + echo "FC=gfortran-13" >> "$GITHUB_ENV" + echo "F77=gfortran-13" >> "$GITHUB_ENV" + echo "F90=gfortran-13" >> "$GITHUB_ENV" if [[ ${{ matrix.buildplat[2] }} == 'accelerate' ]]; then # macosx_arm64 and macosx_x86_64 with accelerate # only target Sonoma onwards CIBW="MACOSX_DEPLOYMENT_TARGET=14.0 INSTALL_OPENBLAS=false RUNNER_OS=macOS" echo "CIBW_ENVIRONMENT_MACOS=$CIBW" >> "$GITHUB_ENV" - - # the macos-13 image that's used for building the x86_64 wheel can't test - # a wheel with deployment target >= 14 without further work - echo "CIBW_TEST_SKIP=*-macosx_x86_64" >> "$GITHUB_ENV" else # macosx_x86_64 with OpenBLAS # if INSTALL_OPENBLAS isn't specified then scipy-openblas is automatically installed CIBW="RUNNER_OS=macOS" PKG_CONFIG_PATH="$PWD/.openblas" DYLD="$DYLD_LIBRARY_PATH:/$PWD/.openblas/lib" - echo "CIBW_ENVIRONMENT_MACOS=$CIBW PKG_CONFIG_PATH=$PKG_CONFIG_PATH DYLD_LIBRARY_PATH=$DYLD" >> "$GITHUB_ENV" + echo "CIBW_ENVIRONMENT_MACOS=$CIBW PKG_CONFIG_PATH=$PKG_CONFIG_PATH DYLD_LIBRARY_PATH=$DYLD" >> "$GITHUB_ENV" fi - name: Build wheels - uses: pypa/cibuildwheel@ba8be0d98853f5744f24e7f902c8adef7ae2e7f3 # v2.18.1 + uses: pypa/cibuildwheel@298ed2fb2c105540f5ed055e8a6ad78d82dd3a7e # v3.3.1 env: - CIBW_PRERELEASE_PYTHONS: True CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl - - - uses: mamba-org/setup-micromamba@f8b8a1e23a26f60a44c853292711bacfd3eac822 - with: - # for installation of anaconda-client, required for upload to - # anaconda.org - # Note that this step is *after* specific pythons have been used to - # build and test the wheel - # for installation of anaconda-client, for upload to anaconda.org - # environment will be activated after creation, and in future bash steps - init-shell: bash - environment-name: upload-env - create-args: >- - anaconda-client - - - name: Upload wheels - if: success() - shell: bash -el {0} - # see https://github.com/marketplace/actions/setup-miniconda for why - # `-el {0}` is required. - env: - NUMPY_STAGING_UPLOAD_TOKEN: ${{ secrets.NUMPY_STAGING_UPLOAD_TOKEN }} - NUMPY_NIGHTLY_UPLOAD_TOKEN: ${{ secrets.NUMPY_NIGHTLY_UPLOAD_TOKEN }} - run: | - source tools/wheels/upload_wheels.sh - set_upload_vars - # trigger an upload to - # https://anaconda.org/scientific-python-nightly-wheels/numpy - # for cron jobs or "Run workflow" (restricted to main branch). - # Tags will upload to - # https://anaconda.org/multibuild-wheels-staging/numpy - # The tokens were originally generated at anaconda.org - upload_wheels - - build_sdist: - name: Build sdist - needs: get_commit_message - if: >- - contains(needs.get_commit_message.outputs.message, '[wheel build]') || - github.event_name == 'schedule' || - github.event_name == 'workflow_dispatch' || - (github.event_name == 'pull_request' && - (contains(github.event.pull_request.labels.*.name, '36 - Build') || - contains(github.event.pull_request.labels.*.name, '14 - Release'))) || - (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') && ( ! endsWith(github.ref, 'dev0'))) - runs-on: ubuntu-latest - env: - IS_PUSH: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') }} - # commented out so the sdist doesn't upload to nightly - # IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} - steps: - - name: Checkout numpy - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - submodules: true - # Used to push the built wheels - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 - with: - # Build sdist on lowest supported Python - python-version: "3.10" - - name: Build sdist - run: | - python -m pip install -U pip build - python -m build --sdist -Csetup-args=-Dallow-noblas=true - - name: Test the sdist - run: | - # TODO: Don't run test suite, and instead build wheels from sdist - # Depends on pypa/cibuildwheel#1020 - python -m pip install dist/*.gz -Csetup-args=-Dallow-noblas=true - pip install -r requirements/test_requirements.txt - cd .. # Can't import numpy within numpy src directory - python -c "import numpy, sys; print(numpy.__version__); sys.exit(numpy.test() is False)" - - - name: Check README rendering for PyPI - run: | - python -mpip install twine - twine check dist/* - - - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 - with: - name: sdist - path: ./dist/* - - - uses: conda-incubator/setup-miniconda@a4260408e20b96e80095f42ff7f1a15b27dd94ca # v3.0.4 - with: - # for installation of anaconda-client, required for upload to - # anaconda.org - # default (and activated) environment name is test - # Note that this step is *after* specific pythons have been used to - # build and test - auto-update-conda: true - python-version: "3.10" - - - name: Upload sdist - if: success() - shell: bash -el {0} - env: - NUMPY_STAGING_UPLOAD_TOKEN: ${{ secrets.NUMPY_STAGING_UPLOAD_TOKEN }} - # commented out so the sdist doesn't upload to nightly - # NUMPY_NIGHTLY_UPLOAD_TOKEN: ${{ secrets.NUMPY_NIGHTLY_UPLOAD_TOKEN }} - run: | - conda install -y anaconda-client - source tools/wheels/upload_wheels.sh - set_upload_vars - # trigger an upload to - # https://anaconda.org/scientific-python-nightly-wheels/numpy - # for cron jobs or "Run workflow" (restricted to main branch). - # Tags will upload to - # https://anaconda.org/multibuild-wheels-staging/numpy - # The tokens were originally generated at anaconda.org - upload_wheels diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 4d6c811b1409..a1a76d363be3 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -5,6 +5,11 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' + - 'tools/stubtest/**' concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} @@ -14,50 +19,34 @@ permissions: contents: read # to fetch code (actions/checkout) jobs: - python64bit_openblas: - name: x86-64, LP64 OpenBLAS - runs-on: windows-2019 + clangcl_python64bit_openblas32: + name: Clang-cl, x86-64, fast, openblas32 + runs-on: windows-2022 # To enable this job on a fork, comment out: if: github.repository == 'numpy/numpy' - strategy: - fail-fast: false - matrix: - compiler: ["MSVC", "Clang-cl"] steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true + persist-credentials: false - name: Setup Python - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: "3.14t" - name: Install build dependencies from PyPI run: | - python -m pip install -r requirements/build_requirements.txt + pip install -r requirements/build_requirements.txt - name: Install pkg-config run: | - choco install -y --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite + python -m pip install pkgconf echo "PKG_CONFIG_PATH=${{ github.workspace }}/.openblas" >> $env:GITHUB_ENV - - - - name: Install Clang-cl - if: matrix.compiler == 'Clang-cl' - run: | - choco install llvm -y --version=16.0.6 - - - name: Install NumPy (MSVC) - if: matrix.compiler == 'MSVC' - run: | - pip install -r requirements/ci_requirements.txt - spin build --with-scipy-openblas=32 -j2 -- --vsenv - name: Install NumPy (Clang-cl) - if: matrix.compiler == 'Clang-cl' run: | "[binaries]","c = 'clang-cl'","cpp = 'clang-cl'","ar = 'llvm-lib'","c_ld = 'lld-link'","cpp_ld = 'lld-link'" | Out-File $PWD/clang-cl-build.ini -Encoding ascii pip install -r requirements/ci_requirements.txt @@ -76,34 +65,52 @@ jobs: - name: Run test suite run: | - spin test + spin test -- --timeout=600 --durations=10 - msvc_32bit_python_no_openblas: - name: MSVC, 32-bit Python, no BLAS - runs-on: windows-2019 + + #======================================================================================= + msvc_python32bit_no_openblas: + name: MSVC, ${{ matrix.architecture }}, fast, no BLAS + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + include: + - os: windows-2022 + architecture: x86 + - os: windows-11-arm + architecture: arm64 # To enable this job on a fork, comment out: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true + persist-credentials: false - - name: Setup Python (32-bit) - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - name: Setup Python + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.10' - architecture: 'x86' + python-version: '3.12' + architecture: ${{ matrix.architecture }} - - name: Setup MSVC (32-bit) + - name: Setup MSVC uses: bus1/cabuild/action/msdevshell@e22aba57d6e74891d059d66501b6b5aed8123c4d # v1 with: - architecture: 'x86' + architecture: ${{ matrix.architecture }} - name: Build and install run: | - python -m pip install . -v -Ccompile-args="-j2" -Csetup-args="-Dallow-noblas=true" + python -m pip install . -v -Ccompile-args="-j2" -Csetup-args="-Dallow-noblas=true" 2>&1 | tee build.log + + - name: Check warnings + uses: ./.github/check-warnings + with: + log-file: ./build.log + allowlist: ./.github/check-warnings/msvc-allowed-warnings.txt + warning-regex: "warning C|Command line warning" - name: Install test dependencies run: | @@ -112,4 +119,61 @@ jobs: - name: Run test suite (fast) run: | cd tools - python -m pytest --pyargs numpy -m "not slow" -n2 + python -m pytest --pyargs numpy -m "not slow" -n2 --timeout=600 --durations=10 + + #======================================================================================= + msvc_python64bit_openblas: + name: MSVC, x86_64, ${{ matrix.TEST_MODE }}, openblas${{ matrix.BLAS }} + runs-on: windows-2022 + strategy: + fail-fast: false + matrix: + include: + - BLAS: 64 + TEST_MODE: full + pyver: '3.14' + - BLAS: 32 + TEST_MODE: fast + pyver: '3.12' + + # To enable this job on a fork, comment out: + if: github.repository == 'numpy/numpy' + steps: + - name: Checkout + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + + - name: Setup Python + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + with: + python-version: ${{ matrix.pyver }} + + - name: pkg-config + run: | + python -m pip install pkgconf + + - name: Dependencies + run: | + python -m pip install -r requirements/test_requirements.txt + python -m pip install -r requirements/build_requirements.txt + + - name: Build and install + run: | + pip install -r requirements/ci_requirements.txt + spin config-openblas --with-scipy-openblas=${{ matrix.BLAS }} + $env:PKG_CONFIG_PATH="$pwd/.openblas" + python -m pip install --no-build-isolation . -v -Csetup-args="--vsenv" -Csetup-args="-Dallow-noblas=false" + + - name: Run test suite ${{ matrix.TEST_MODE }} + run: | + cd tools + # Get a gfortran onto the path for f2py tests + $env:PATH = "c:\\rtools45\\x86_64-w64-mingw32.static.posix\\bin;$env:PATH" + If ( "${{ matrix.TEST_MODE }}" -eq "full" ) { + python -m pytest --pyargs numpy -rsx -n2 --durations=10 + } else { + python -m pytest --pyargs numpy -m "not slow" -n2 --timeout=600 --durations=10 -rsx + } diff --git a/.gitignore b/.gitignore index e90cccc46642..b54de4091bf3 100644 --- a/.gitignore +++ b/.gitignore @@ -43,28 +43,11 @@ GTAGS *.so *.mod -# Packages # -############ -# it's better to unpack these files and commit the raw source -# git has its own built in compression methods -*.7z -*.bz2 -*.bzip2 -*.dmg -*.gz -*.iso -*.jar -*.rar -*.tar -*.tbz2 -*.tgz -*.zip - # Python files # ################ # meson build/installation directories build -build-install +build-* # meson python output .mesonpy-native-file.ini # sphinx build directory @@ -81,6 +64,9 @@ doc/cdoc/build .cache pip-wheel-metadata .python-version +# virtual envs +numpy-dev/ +venv/ # Paver generated files # ######################### @@ -121,6 +107,7 @@ Thumbs.db doc/source/savefig/ doc/source/**/generated/ doc/source/release/notes-towncrier.rst +doc/source/.jupyterlite.doit.db # Things specific to this project # ################################### diff --git a/.mailmap b/.mailmap index 2d910fe98fea..1a906d065f47 100644 --- a/.mailmap +++ b/.mailmap @@ -7,47 +7,88 @@ # # This file is up-to-date if the command git log --format="%aN <%aE>" | sort -u # gives no duplicates. - -@8bitmp3 <19637339+8bitmp3@users.noreply.github.com> -@DWesl <22566757+DWesl@users.noreply.github.com> -@Endolith -@GalaxySnail -@Illviljan <14371165+Illviljan@users.noreply.github.com> -@LSchroefl <65246829+LSchroefl@users.noreply.github.com> -@Lbogula -@Lisa <34400837+lyzlisa@users.noreply.github.com> -@Patrick <39380924+xamm@users.noreply.github.com> -@Scian <65375075+hoony6134@users.noreply.github.com> -@amagicmuffin <2014wcheng@gmail.com> -@code-review-doctor -@dg3192 <113710955+dg3192@users.noreply.github.com> -@h-vetinari -@h6197627 <44726212+h6197627@users.noreply.github.com> -@jbCodeHub -@juztamau5 -@legoffant <58195095+legoffant@users.noreply.github.com> -@luzpaz -@luzpaz -@partev -@pkubaj -@pmvz -@pratiklp00 -@sfolje0 -@spacescientist -@tajbinjohn -@tautaus -@xoviat <49173759+xoviat@users.noreply.github.com> -@xoviat <49173759+xoviat@users.noreply.github.com> -@yan-wyb -@yetanothercheer +!8bitmp3 <19637339+8bitmp3@users.noreply.github.com> +!Algorithmist-Girl <36552319+Algorithmist-Girl@users.noreply.github.com> +!DWesl <22566757+DWesl@users.noreply.github.com> +!Dreamge +!EarlMilktea <66886825+EarlMilktea@users.noreply.github.com> +!Endolith +!GalaxySnail +!Illviljan <14371165+Illviljan@users.noreply.github.com> +!LSchroefl <65246829+LSchroefl@users.noreply.github.com> +!Lbogula +!Lisa <34400837+lyzlisa@users.noreply.github.com> +!MyUserNameWasTakenLinux +!Patrick <39380924+xamm@users.noreply.github.com> +!Scian <65375075+hoony6134@users.noreply.github.com> +!Searchingdays +!amagicmuffin <2014wcheng@gmail.com> +!amotzop +!bersbersbers <12128514+bersbersbers@users.noreply.github.com> +!code-review-doctor +!cook-1229 <70235336+cook-1229@users.noreply.github.com> +!dg3192 <113710955+dg3192@users.noreply.github.com> +!ellaella12 +!ellaella12 <120079323+ellaella12@users.noreply.github.com> +!fengluoqiuwu +!fengluoqiuwu <163119756+fengluoqiuwu@users.noreply.github.com> +!h-vetinari +!h6197627 <44726212+h6197627@users.noreply.github.com> +!hutauf +!ianlv <168640168+ianlv@users.noreply.github.com> +!jbCodeHub +!juztamau5 +!karl3wm +!kostayScr <11485271+kostayScr@users.noreply.github.com> +!legoffant <58195095+legoffant@users.noreply.github.com> +!liang3zy22 <35164941+liang3zy22@users.noreply.github.com> +!luzpaz +!luzpaz +!matoro +!mcp292 +!mgunyho <20118130+mgunyho@users.noreply.github.com> +!msavinash <73682349+msavinash@users.noreply.github.com> +!musvaage +!mykykh <49101849+mykykh@users.noreply.github.com> +!nullSoup <34267803+nullSoup@users.noreply.github.com> +!ogidig5 <82846833+ogidig5@users.noreply.github.com> +!olivier +!partev +!pkubaj +!pmvz +!pojaghi <36278217+pojaghi@users.noreply.github.com> +!pratiklp00 +!samir539 +!sfolje0 +!spacescientist +!stefan6419846 +!stefan6419846 <96178532+stefan6419846@users.noreply.github.com> +!tajbinjohn +!tautaus +!undermyumbrella1 +!vahidmech +!wenlong2 +!xoviat <49173759+xoviat@users.noreply.github.com> +!xoviat <49173759+xoviat@users.noreply.github.com> +!yan-wyb +!yetanothercheer Aaron Baecker +Abhishek Kumar +Abhishek Kumar <142383124+abhishek-iitmadras@users.noreply.github.com> +Abhishek Tiwari <27881020+Abhi210@users.noreply.github.com> +Abraham Medina +Adrin Jalali +Akhil Kannan +Akhil Kannan <143798318+Alverok@users.noreply.github.com> Arun Kota Arun Kota Arun Kota Aarthi Agurusa Adarsh Singh ADARSH SINGH +Aditi Saluja <136131452+salujaditi14@users.noreply.github.com> Andrei Batomunkuev Ajay DS Ajay DS +Ajay Kumar Janapareddi Alan Fontenot Alan Fontenot <36168460+logeaux@users.noreply.github.com> Abdul Muneer @@ -60,6 +101,7 @@ Aerik Pawson <45904740+aerikpawson@users.noreply.github.com> Ahmet Can Solak Amrit Krishnan Amrit Krishnan +Ankit Ahlawat Alban Desmaison Albert Jornet Puig Alberto Rubiales @@ -87,8 +129,11 @@ Alok Singhal Alok Singhal Alyssa Quek Andrea Bianchi Andrea Bianchi andrea-bia +Anik Chand <161185149+anikchand461@users.noreply.github.com> Ankit Dwivedi Ankit Dwivedi +Ankur Singh +Ankur Singh <98346896+ankur0904@users.noreply.github.com> Amir Sarabadani Anas Khan Anatoly Techtonik @@ -99,7 +144,9 @@ Andrea Sangalli <53617841+and-sang@users.noreply.github.c Andreas KlÃļckner Andreas Schwab Andrei Kucharavy +Andrej Zhilenkov Andrew Lawson +Aniket Singh Yadav <148300120+Aniketsy@users.noreply.github.com> Anirudh Subramanian Anne Archibald Anne Archibald @@ -109,16 +156,22 @@ Antoine Pitrou Anton Prosekin AnÅže Starič Arfy Slowy +Arnaud Ma +Arnaud Tremblay +Arnaud Tremblay <59627629+Msa360@users.noreply.github.com> Aron Ahmadia Arun Kota Arun Kota +Arun Pa Arun Palaniappen Arun Persaud Ashutosh Singh Ashutosh Singh <55102089+Ashutosh619-sudo@users.noreply.github.com> Åsmund Hjulstad Auke Wiggers +Austin Ran <504977925@qq.com> Badhri Narayanan Krishnakumar +Baskar Gopinath Bhavuk Kalra Bhavuk Kalra Bangcheng Yang @@ -126,9 +179,11 @@ Bhargav V <12525622+brpy@users.noreply.github.com> Bas van Beek <43369155+BvB93@users.noreply.github.com> Behzad Nouri Ben Nathanson +Ben Woodruff Benjamin Root Benjamin Root weathergod Bernardt Duvenhage +Benoit Prabel Bernie Gray Bertrand Lefebvre Bharat Raghunathan @@ -152,6 +207,10 @@ Bui Duc Minh <41239569+Mibu287@users.noreply.github.co Caio Agiani Carl Kleffner Carl Leake +Carlos Henrique Hermanny Moreira da Silva +Carlos Henrique Hermanny Moreira da Silva <121122527+carlosilva10260@users.noreply.github.com> +Carlos Martin +Carlos Martin CÊdric Hannotier Charles Stern <62192187+cisaacstern@users.noreply.github.com> Chiara Marmo @@ -162,10 +221,15 @@ Chris Burns Chris Fu (傅įĢ‹ä¸š) <17433201@qq.com> Chris Holland <41524756+ChrisAHolland@users.noreply.github.com> Chris Kerr +Chris Navarro +Chris Navarro <24905907+lvllvl@users.noreply.github.com> Chris Vavaliaris +Christian Barbia Christian Clauss +Christine P. Chai Christopher Dahlin Christopher Hanley +Christoph Buchner Christoph Gohlke Christoph Gohlke Christoph Gohlke cgholke @@ -173,6 +237,7 @@ Chun-Wei Chen Chunlin Fang Chunlin Fang <834352945@qq.com> Chunlin Fang +Cobalt Yang Colin Snyder <8csnyder@gmail.com> <47012605+colinsnyder@users.noreply.github.com> Constanza Fierro Dahyun Kim @@ -199,31 +264,41 @@ David Pitchford David Prosin Davide Dal Bosco <62077652+davidedalbosco@users.noreply.github.com> Dawid Zych +Dennis Van de Vorst <87502756+dvorst@users.noreply.github.com> Dennis Zollo Derek Homeier Derek Homeier Derek Homeier Derrick Williams Devin Shanahan +Daval Parmar <53395856+DhavalParmar61@users.noreply.github.com> +Diego Atencia <53157128+alektebel@users.noreply.github.com> Digya Acharya Dima Pasechnik Dima Pasechnik Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Ding Liu Ding Liu +D.J. Ramones +D.J. Ramones <50655786+djramones@users.noreply.github.com> Dmitriy Shalyga Dmitry Belov Dustan Levenstein <43019642+dustanlevenstein@users.noreply.github.com> +Diya Singh Dylan Cutler Ed Schofield Egor Zindy +Élie Goudout +Élie Goudout <114467748+eliegoudout@users.noreply.github.com> Elliott M. Forney Erik M. Bray Erik M. Bray Erik M. Bray Eric Fode Eric Fode Eric Quintero +Eric Xie <161030123+EngineerEricXie@users.noreply.github.com> Ernest N. Mamikonyan +Ernst Peng Eskild Eriksen Eskild Eriksen <42120229+iameskild@users.noreply.github.com> Eskild Eriksen @@ -252,8 +327,16 @@ Greg Young Greg Young Gregory R. Lee Gregory R. Lee +Gubaydullin Danis +Gubaydullin Danis <96629796+DanisNone@users.noreply.github.com> +Guido Imperiale Guo Ci guoci Guo Shuai +Gyeongjae Choi +Habiba Hye +Habiba Hye <145866308+HabibiHye@users.noreply.github.com> +Halle Loveday +Halle Loveday Hameer Abbasi Hannah Aizenman Han Genuit @@ -263,13 +346,20 @@ Helder Oliveira Hemil Desai Himanshu Hiroyuki V. Yamazaki +Daniel Hrisca +Daniel Hrisca +François de Coatpont +François de Coatpont <93073405+Chevali2004@users.noreply.github.com> Hugo van Kemenade Iantra Solari I-Shen Leong +Ishan Purekar Imen Rajhi Inessa Pawson Irina Maria Mocan <28827042+IrinaMaria@users.noreply.github.com> Irvin Probst +Ishan Koradia +Ishan Koradia <39583356+Ishankoradia@users.noreply.github.com> Ivan Meleshko Isabela Presedo-Floyd Ganesh Kathiresan @@ -277,13 +367,21 @@ Gerhard Hobler Giannis Zapantis Guillaume Peillex Jack J. Woehr +Jacob M. Casey +Jakob Stevens Haas <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Jaime Fernandez Jaime Fernandez Jaime Fernandez Jake Close +Jake VanderPlas +Jake VanderPlas +Jake VanderPlas Jakob Jakobson Jakob Jakobson <43045863+jakobjakobson13@users.noreply.github.com> James Bourbeau +James Joseph Thomas +James Joseph Thomas quotuva +James Oliver <46758370+jamesoliverh@users.noreply.github.com> James Webber Jamie Macey Jan SchlÃŧter @@ -302,31 +400,38 @@ JÊrôme Richard JessÊ Pires Jessi J Zhao <35235453+jessijzhao@users.noreply.github.com> -JoÃŖo Fontes Gonçalves -Johann Rohwer -Johann Rohwer jmrohwer -Johnathon Cusick Jhong-Ken Chen (険äģ˛č‚¯) Jhong-Ken Chen (険äģ˛č‚¯) <37182101+kennychenfs@users.noreply.github.com> +Jingu Kang +Jiuding Tan (谭九éŧŽ) <109224573@qq.com> +Johann Faouzi +Johann Rohwer +Johann Rohwer jmrohwer Johannes Hampp <42553970+euronion@users.noreply.github.com> +Johannes Kaisinger +Johannes Kaisinger Johannes SchÃļnberger -Johann Faouzi John Darbyshire <24256554+attack68@users.noreply.github.com> <24256554+attack68@users.noreply.github.com> John Hagen John Kirkham John Kirkham +Johnathon Cusick Johnson Sun <20457146+j3soon@users.noreply.github.com> Jonas I. Liechti Jonas I. Liechti Jonas I. Liechti +Joren Hammudoglu +Jory Klaverstijn +Jory Klaverstijn <63673224+JoryKlaverstijn@users.noreply.github.com> Joseph Fox-Rabinovitz Joseph Fox-Rabinovitz Joseph Fox-Rabinovitz Joseph Martinot-Lagarde Joshua Himmens Joyce Brum -Jory Klaverstijn -Jory Klaverstijn <63673224+JoryKlaverstijn@users.noreply.github.com> +JoÃŖo Fontes Gonçalves +Julia Poo +Julia Poo <57632293+JuliaPoo@users.noreply.github.com> Julian Taylor Julian Taylor Julian Taylor @@ -335,11 +440,16 @@ Julien Schueller Junyan Ou Justus Magin Justus Magin +Kai Germaschewski Kai Striega Kai Striega Kasia Leszek Kasia Leszek <39829548+katleszek@users.noreply.github.com> Karan Dhir +Karel Planken <71339309+kplanken@users.noreply.github.com> +Karthik Gangula <56480632+gangula-karthik@users.noreply.github.com> +Karthik Kaiplody +Khelf Mohamed Keller Meier Kenny Huynh Kevin Granados @@ -350,7 +460,11 @@ Kerem Hallaç Khaled Ben Abdallah Okuda Kiko Correoso kikocorreoso Kiko Correoso kikocorreoso +Kira Prokopenko +Koki Watanabe Konrad Kapp +Kristoffer Pedersen +Kristoffer Pedersen Kriti Singh Kmol Yuan Kumud Lakara <55556183+kumudlakara@users.noreply.github.com> @@ -362,19 +476,27 @@ Lars GrÃŧter Leona Taric Leona Taric <92495067+LeonaTaric@users.noreply.github.com> Leonardus Chen +Liangyu Zhang Licht Takeuchi Lorenzo Mammana Lillian Zha Lillian Zha +Linus Sommer +Linus Sommer <95619282+linus-md@users.noreply.github.com> Lu Yun Chi <32014765+LuYunChi@users.noreply.github.com> Luis Pedro Coelho +Lucas Colley Luke Zoltan Kelley Madhulika Jain Chambers <53166646+madhulikajc@users.noreply.github.com> Magdalena Proszewska Magdalena Proszewska <38814059+mproszewska@users.noreply.github.com> +Makima C. Yang Malik Idrees Hasan Khan <77000356+MalikIdreesHasanKhan@users.noreply.github.com>C Manoj Kumar +Marcel Loose Marcin Podhajski <36967358+m-podhajski@users.noreply.github.com> +Marco Edward Gorelli +Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Margret Pax Margret Pax <13646646+paxcodes@users.noreply.github.com> Mark DePristo @@ -386,6 +508,8 @@ Mark Wiebe Mark Wiebe Mars Lee Mars Lee <46167686+MarsBarLee@users.noreply.github.com> +Marten van Kerkwijk +Marten van Kerkwijk Martin Goodson Martin Reinecke Martin Teichmann @@ -395,18 +519,24 @@ Matheus Vieira Portela Matheus Santana Patriarca Mathieu Lamarre Matías Ríos +Matt Hancock Matt Ord Matt Ord <55235095+Matt-Ord@users.noreply.github.com> -Matt Hancock +Matt Thompson +Matthias Bussonnier Martino Sorbaro MÃĄrton GunyhÃŗ Mattheus Ueckermann Matthew Barber Matthew Harrigan Matthias Bussonnier +Matthias Schaufelberger +Matthias Schaufelberger <45293673+maisevector@users.noreply.github.com> Matthieu Darbois Matti Picus Matti Picus mattip +Maya Anderson +Maya Anderson <63074550+andersonm-ibm@users.noreply.github.com> Maximilian Konrad Melissa Weber Mendonça Melissa Weber Mendonça @@ -424,18 +554,31 @@ Michael Schnaitter Michael Seifert Michel Fruchart +Mike O'Brien Mike Toews Miki Watanabe (æ¸Ąé‚‰ įžŽå¸Œ) Miles Cranmer +Milica Dančuk +Milica Dančuk love-bees <33499899+love-bees@users.noreply.github.com> Mircea Akos Bruma Mircea Akos Bruma Mitchell Faas <35742861+Mitchell-Faas@users.noreply.github.com> +Mohammed Abdul Rahman +Mohammed Abdul Rahman <130785777+that-ar-guy@users.noreply.github.com> +Muhammad Maaz +Muhammad Maaz <76714503+mmaaz-git@users.noreply.github.com> +Mohammed Zuhaib <56065368+zuhu2195@users.noreply.github.com> +Mohaned Qunaibit Muhammad Kasim +Muhammed Muhsin +Mugundan Selvanayagam Mukulika Pahari Mukulika Pahari <60316606+Mukulikaa@users.noreply.github.com> Munira Alduraibi Namami Shanker Namami Shanker NamamiShanker +Nathan Goldbaum +Nathan Goldbaum Nathaniel J. Smith Naveen Arunachalam naveenarun Neil Girdhar @@ -445,32 +588,45 @@ Nicolas Scheffer Nicolas Scheffer nickdg Nicholas McKibben Nick Minkyu Lee fivemok <9394929+fivemok@users.noreply.github.com> +Nyakku Shigure Norwid Behrnd Norwid Behrnd -Oliver Eberle Oleksiy Kononenko Oleksiy Kononenko <35204136+oleksiyskononenko@users.noreply.github.com> +Oliver Eberle +Olivier Barthelemy +Olivier Mattelaer Omar Ali Omid Rajaei Omid Rajaei <89868505+rajaeinet@users.noreply.github.com> Ondřej Čertík +Oscar Armas-Luy Óscar Villellas GuillÊn +Pablo Losada +Pablo Losada <48804010+TheHawz@users.noreply.github.com> Panos Mavrogiorgos Pantelis Antonoudiou Pantelis Antonoudiou Pat Miller patmiller +Paul Caprioli Paul Ivanov Paul Ivanov -Paul YS Lee Paul Paul Jacobson +Paul Juma Otieno +Paul Juma Otieno <103896399+otieno-juma@users.noreply.github.com> +Paul Reece +Paul YS Lee Paul Pey Lian Lim Pey Lian Lim <2090236+pllim@users.noreply.github.com> Pearu Peterson Pete Peeradej Tanruangporn Peter Bell Peter J Cock +Peter Kämpf Peyton Murray Phil Elson +Phoenix Studio <59125767+phoenixstudiodz@users.noreply.github.com> +Filipe Laíns Pierre GM Pierre GM pierregm Piotr Gaiński @@ -481,15 +637,22 @@ Prathmesh Shirsat <55539563+Fayyr@users.noreply.github.com> Prithvi Singh Prithvi Singh <42640176+prithvitewatia@users.noreply.github.com> Przemyslaw Bartosik +Raghuveer Devulapalli Raghuveer Devulapalli -Raghuveer Devulapalli <44766858+r-devulap@users.noreply.github.com> +Raghuveer Devulapalli <447668+r-devulap@users.noreply.github.com> Rajas Rade lkdmttg7 Rakesh Vasudevan +Ralf BÃŧrkle <214435818+polaris-3@users.noreply.github.com> Ralf Gommers Ralf Gommers rgommers Rehas Sachdeva +Richard Howe <45905457+rmhowe425@users.noreply.github.com> +Riku Sakamoto +Riku Sakamoto <46015196+riku-sakamoto@users.noreply.github.com> Ritta Narita Riya Sharma +Rob Timpe +Rob Timpe Robert Kern Robert LU Robert T. McGibbon @@ -499,7 +662,9 @@ Rohit Goswami Roland Kaufmann Roman Yurchak Ronan Lamy Ronan Lamy +Rostan Tabet Roy Jacobson +Rupesh Sharma <206439536+Rupeshhsharma@users.noreply.github.com> Russell Hewett Ryan Blakemore Ryan Polley @@ -514,22 +679,29 @@ Sam Radhakrishnan = <=> # committed without an email address Samesh Lakhotia Samesh Lakhotia <43701530+sameshl@users.noreply.github.com> Sami Salonen +Samuel Albanie Sanchez Gonzalez Alvaro Sanya Sinha <83265366+ssanya942@users.noreply.github.com> Saransh Chopra Saullo Giovani Saurabh Mehta Sayantika Banik +Sayed Awad Schrijvers Luc +Sean Cheah +Sean Cheah <67928790+thalassemia@users.noreply.github.com> Sebastian Berg +Sebastian Berg Sebastian Schleehauf Serge Guelton Sergei Vorfolomeev <39548292+vorfol@users.noreply.github.com> Shuangchi He +Shaurya Barkund <64537538+Shaurya19@users.noreply.github.com> Shubham Gupta Shubham Gupta <63910248+shubham11941140@users.noreply.github.com> Shekhar Prasad Rajak Shen Zhou +Shirong Wang Shreya Singh Shota Kawabuchi Siavash Eliasi @@ -539,9 +711,11 @@ Simon Gasse Simon Gasse Sista Seetaram Sista Seetaram <65669128+sistaseetaram@users.noreply.github.com> +Slava Gorloff <31761951+gorloffslava@users.noreply.github.com> Søren Rasmussen <47032123+sorenrasmussenai@users.noreply.github.com> Spencer Hill Srimukh Sripada +Stan Ulbrych <89152624+StanFromIreland@users.noreply.github.com> Stefan Behnel Stefan van der Walt Stefan van der Walt @@ -552,7 +726,10 @@ Steve Stagg Steven J Kern Stuart Archibald Stuart Archibald +SUMIT SRIMANI <2301109104@ptuniv.edu.in SuryaChand P +Swayam Singh +Swayam Singh Sylvain Ferriol Takanori Hirano Theodoros Nikolaou @@ -561,6 +738,7 @@ Talha Mohsin <131553190+talhabm@users.noreply.github.com Thomas A Caswell Thomas Kluyver Thomas Orgis +Timileyin Daso Tim Cera Tim Teichmann Tim Teichmann <44259103+tteichmann@users.noreply.github.com> @@ -576,16 +754,24 @@ Toshiki Kataoka Travis Oliphant Travis Oliphant Travis Oliphant +Vahid Tavanashad <120411540+vtavana@users.noreply.github.com> +Varad Raj Singh Valentin Haenel Valentin Haenel Vardhaman Kalloli <83634399+cyai@users.noreply.github.com> Varun Nayyar +Victor Herdeiro +Vijayakumar Z Vinith Kishore Vinith Kishore <85550536+vinith2@users.noreply.github.com> Vrinda Narayan Vrinda Narayan Vrinda Narayan <48102157+vrindaaa@users.noreply.github.com> +Wang Yang (杨æ—ē) +Wang Yang (杨æ—ē) <1113177880@qq.com> Wansoo Kim +Warrick Ball +Warrick Ball Warren Weckesser Warren Weckesser Weitang Li @@ -594,11 +780,19 @@ William Spotz Wim Glenn Wojtek Ruszczewski Wojciech Rzadkowski <33913808+wrzadkow@users.noreply.github.com> +Xiangyi Wang +Xiaoyu +Xiaoyu Yamada Fuyuka Yang Hau Yang Hau +Yash Pethe +Yash Pethe <83630710+patient74@users.noreply.github.com> Yashasvi Misra Yashasvi Misra <54177363+yashasvimisra2798@users.noreply.github.com> +Yasir Ashfaq +Yasir Ashfaq <107119183+yasiribmcon@users.noreply.github.com> +Yichi Zhang Yogesh Raisinghani <46864533+raisinghanii@users.noreply.github.com> Younes Sandi Younes Sandi <65843206+Unessam@users.noreply.github.com> @@ -606,7 +800,10 @@ Yu Feng Yuji Kanagawa Yuki K Yury Kirienko +Yuvraj Pradhan +Yuvraj Pradhan Zac Hatfield-Dodds +Zach Brugh <111941670+zachbrugh@users.noreply.github.com> ZÊ Vinícius Zhang Na Zixu Zhao @@ -614,4 +811,5 @@ Ziyan Zhou Zieji Pohz Zieji Pohz <8103276+zjpoh@users.noreply.github.com> Zolboo Erdenebaatar +Zolisa Bleki Zolisa Bleki <44142765+zoj613@users.noreply.github.com> diff --git a/.spin/cmds.py b/.spin/cmds.py index b78c0393e708..4dcafb3ff1f1 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -1,22 +1,20 @@ +import importlib import os -import shutil import pathlib import shutil -import pathlib -import importlib import subprocess +import sys import click -from spin import util +import spin from spin.cmds import meson - # Check that the meson git submodule is present curdir = pathlib.Path(__file__).parent meson_import_dir = curdir.parent / 'vendored-meson' / 'meson' / 'mesonbuild' if not meson_import_dir.exists(): raise RuntimeError( - 'The `vendored-meson/meson` git submodule does not exist! ' + + 'The `vendored-meson/meson` git submodule does not exist! ' 'Run `git submodule update --init` to fix this problem.' ) @@ -38,8 +36,7 @@ def _get_numpy_tools(filename): "revision-range", required=True ) -@click.pass_context -def changelog(ctx, token, revision_range): +def changelog(token, revision_range): """👩 Get change log for provided revision range \b @@ -49,8 +46,8 @@ def changelog(ctx, token, revision_range): $ spin authors -t $GH_TOKEN --revision-range v1.25.0..v1.26.0 """ try: - from github.GithubException import GithubException from git.exc import GitError + from github.GithubException import GithubException changelog = _get_numpy_tools(pathlib.Path('changelog.py')) except ModuleNotFoundError as e: raise click.ClickException( @@ -74,71 +71,20 @@ def changelog(ctx, token, revision_range): ) -@click.command() -@click.option( - "-j", "--jobs", - help="Number of parallel tasks to launch", - type=int -) -@click.option( - "--clean", is_flag=True, - help="Clean build directory before build" -) -@click.option( - "-v", "--verbose", is_flag=True, - help="Print all build output, even installation" -) @click.option( "--with-scipy-openblas", type=click.Choice(["32", "64"]), default=None, help="Build with pre-installed scipy-openblas32 or scipy-openblas64 wheel" ) -@click.argument("meson_args", nargs=-1) -@click.pass_context -def build(ctx, meson_args, with_scipy_openblas, jobs=None, clean=False, verbose=False, quiet=False, *args, **kwargs): - """🔧 Build package with Meson/ninja and install - - MESON_ARGS are passed through e.g.: - - spin build -- -Dpkg_config_path=/lib64/pkgconfig - - The package is installed to build-install - - By default builds for release, to be able to use a debugger set CFLAGS - appropriately. For example, for linux use - - CFLAGS="-O0 -g" spin build - """ - # XXX keep in sync with upstream build +@spin.util.extend_command(spin.cmds.meson.build) +def build(*, parent_callback, with_scipy_openblas, **kwargs): if with_scipy_openblas: _config_openblas(with_scipy_openblas) - ctx.params.pop("with_scipy_openblas", None) - ctx.forward(meson.build) + parent_callback(**kwargs) -@click.command() -@click.argument("sphinx_target", default="html") -@click.option( - "--clean", is_flag=True, - default=False, - help="Clean previously built docs before building" -) -@click.option( - "--build/--no-build", - "first_build", - default=True, - help="Build numpy before generating docs", -) -@click.option( - '--jobs', '-j', - metavar='N_JOBS', - # Avoids pydata_sphinx_theme extension warning from default="auto". - default="1", - help=("Number of parallel build jobs." - "Can be set to `auto` to use all cores.") -) -@click.pass_context -def docs(ctx, sphinx_target, clean, first_build, jobs, *args, **kwargs): +@spin.util.extend_command(spin.cmds.meson.docs) +def docs(*, parent_callback, **kwargs): """📖 Build Sphinx documentation By default, SPHINXOPTS="-W", raising errors on warnings. @@ -159,7 +105,12 @@ def docs(ctx, sphinx_target, clean, first_build, jobs, *args, **kwargs): spin docs dist """ - meson.docs.ignore_unknown_options = True + kwargs['clean_dirs'] = [ + './doc/build/', + './doc/source/reference/generated', + './doc/source/reference/random/bit_generators/generated', + './doc/source/reference/random/generated', + ] # Run towncrier without staging anything for commit. This is the way to get # release notes snippets included in a local doc build. @@ -169,93 +120,155 @@ def docs(ctx, sphinx_target, clean, first_build, jobs, *args, **kwargs): with open(outfile, 'w') as f: f.write(p.stdout) - ctx.forward(meson.docs) + parent_callback(**kwargs) -@click.command() -@click.argument("pytest_args", nargs=-1) +# Override default jobs to 1 +jobs_param = next(p for p in docs.params if p.name == 'jobs') +jobs_param.default = 1 + +default = "not slow" + @click.option( "-m", "markexpr", metavar='MARKEXPR', - default="not slow", + default=default, help="Run tests with the given markers" ) @click.option( - "-j", - "n_jobs", - metavar='N_JOBS', + "-p", + "--parallel-threads", + metavar='PARALLEL_THREADS', default="1", - help=("Number of parallel jobs for testing. " - "Can be set to `auto` to use all cores.") -) -@click.option( - "--tests", "-t", - metavar='TESTS', - help=(""" -Which tests to run. Can be a module, function, class, or method: - - \b - numpy.random - numpy.random.tests.test_generator_mt19937 - numpy.random.tests.test_generator_mt19937::TestMultivariateHypergeometric - numpy.random.tests.test_generator_mt19937::TestMultivariateHypergeometric::test_edge_cases - \b -""") -) -@click.option( - '--verbose', '-v', is_flag=True, default=False + help="Run tests many times in number of parallel threads under pytest-run-parallel." + " Can be set to `auto` to use all cores. Use `spin test -p -- " + "--skip-thread-unsafe=true` to only run tests that can run in parallel. " + "pytest-run-parallel must be installed to use." ) -@click.pass_context -def test(ctx, pytest_args, markexpr, n_jobs, tests, verbose, *args, **kwargs): - """🔧 Run tests +@spin.util.extend_command(spin.cmds.meson.test) +def test(*, parent_callback, pytest_args, tests, markexpr, parallel_threads, **kwargs): + """ + By default, spin will run `-m 'not slow'`. To run the full test suite, use + `spin test -m full` + + When pytest-run-parallel is avaliable, use `spin test -p auto` or + `spin test -p ` to run tests sequentional in parallel threads. + """ + if (not pytest_args) and (not tests): + pytest_args = ('--pyargs', 'numpy') + + if '-m' not in pytest_args: + if markexpr != "full": + pytest_args = ('-m', markexpr) + pytest_args + + if parallel_threads != "1": + pytest_args = ('--parallel-threads', parallel_threads) + pytest_args + + kwargs['pytest_args'] = pytest_args + parent_callback(**{'pytest_args': pytest_args, 'tests': tests, **kwargs}) + + +@spin.util.extend_command(test, doc='') +def check_docs(*, parent_callback, pytest_args, **kwargs): + """🔧 Run doctests of objects in the public API. PYTEST_ARGS are passed through directly to pytest, e.g.: - spin test -- --pdb + spin check-docs -- --pdb - To run tests on a directory or file: + To run tests on a directory: \b - spin test numpy/linalg - spin test numpy/linalg/tests/test_linalg.py + spin check-docs numpy/linalg - To report the durations of the N slowest tests: + To report the durations of the N slowest doctests: - spin test -- --durations=N + spin check-docs -- --durations=N - To run tests that match a given pattern: + To run doctests that match a given pattern: \b - spin test -- -k "geometric" - spin test -- -k "geometric and not rgeometric" + spin check-docs -- -k "slogdet" + spin check-docs numpy/linalg -- -k "det and not slogdet" - By default, spin will run `-m 'not slow'`. To run the full test suite, use - `spin -m full` + \b + Note: + ----- - For more, see `pytest --help`. - """ # noqa: E501 - if (not pytest_args) and (not tests): - pytest_args = ('numpy',) + \b + - This command only runs doctests and skips everything under tests/ + - This command only doctests public objects: those which are accessible + from the top-level `__init__.py` file. - if '-m' not in pytest_args: - if markexpr != "full": - pytest_args = ('-m', markexpr) + pytest_args + """ + try: + # prevent obscure error later + import scipy_doctest + except ModuleNotFoundError as e: + raise ModuleNotFoundError("scipy-doctest not installed") from e + if scipy_doctest.__version__ < '1.8.0': + raise ModuleNotFoundError("please update scipy_doctests to >= 1.8.0") + + if (not pytest_args): + pytest_args = ('--pyargs', 'numpy') + + # turn doctesting on: + doctest_args = ( + '--doctest-modules', + '--doctest-only-doctests=true', + '--doctest-collect=api' + ) - if (n_jobs != "1") and ('-n' not in pytest_args): - pytest_args = ('-n', str(n_jobs)) + pytest_args + pytest_args = pytest_args + doctest_args - if tests and not ('--pyargs' in pytest_args): - pytest_args = ('--pyargs', tests) + pytest_args + parent_callback(**{'pytest_args': pytest_args, **kwargs}) - if verbose: - pytest_args = ('-v',) + pytest_args - ctx.params['pytest_args'] = pytest_args +@spin.util.extend_command(test, doc='') +def check_tutorials(*, parent_callback, pytest_args, **kwargs): + """🔧 Run doctests of user-facing rst tutorials. + + To test all tutorials in the numpy doc/source/user/ directory, use + + spin check-tutorials + + To run tests on a specific RST file: + + \b + spin check-tutorials doc/source/user/absolute-beginners.rst + + \b + Note: + ----- + + \b + - This command only runs doctests and skips everything under tests/ + - This command only doctests public objects: those which are accessible + from the top-level `__init__.py` file. + + """ + # handle all of + # - `spin check-tutorials` (pytest_args == ()) + # - `spin check-tutorials path/to/rst`, and + # - `spin check-tutorials path/to/rst -- --durations=3` + if (not pytest_args) or all(arg.startswith('-') for arg in pytest_args): + pytest_args = ('doc/source/user',) + pytest_args + + # make all paths relative to the numpy source folder + pytest_args = tuple( + str(curdir / '..' / arg) if not arg.startswith('-') else arg + for arg in pytest_args + ) + + # turn doctesting on: + doctest_args = ( + '--doctest-glob=*rst', + ) - for extra_param in ('markexpr', 'n_jobs', 'tests', 'verbose'): - del ctx.params[extra_param] - ctx.forward(meson.test) + pytest_args = pytest_args + doctest_args + + parent_callback(**{'pytest_args': pytest_args, **kwargs}) # From scipy: benchmarks/benchmarks/common.py @@ -264,6 +277,7 @@ def _set_mem_rlimit(max_mem=None): Set address space rlimit """ import resource + import psutil mem = psutil.virtual_memory() @@ -282,9 +296,9 @@ def _set_mem_rlimit(max_mem=None): def _commit_to_sha(commit): - p = util.run(['git', 'rev-parse', commit], output=False, echo=False) + p = spin.util.run(['git', 'rev-parse', commit], output=False, echo=False) if p.returncode != 0: - raise( + raise ( click.ClickException( f'Could not find SHA matching commit `{commit}`' ) @@ -295,10 +309,10 @@ def _commit_to_sha(commit): def _dirty_git_working_dir(): # Changes to the working directory - p0 = util.run(['git', 'diff-files', '--quiet']) + p0 = spin.util.run(['git', 'diff-files', '--quiet']) # Staged changes - p1 = util.run(['git', 'diff-index', '--quiet', '--cached', 'HEAD']) + p1 = spin.util.run(['git', 'diff-index', '--quiet', '--cached', 'HEAD']) return (p0.returncode != 0 or p1.returncode != 0) @@ -323,40 +337,24 @@ def _run_asv(cmd): except (ImportError, RuntimeError): pass - util.run(cmd, cwd='benchmarks', env=env) + spin.util.run(cmd, cwd='benchmarks', env=env) @click.command() @click.option( - "-b", "--branch", - metavar='branch', - default="main", -) -@click.option( - '--uncommitted', + '--fix', is_flag=True, default=False, required=False, ) @click.pass_context -def lint(ctx, branch, uncommitted): - """đŸ”Ļ Run lint checks on diffs. - Provide target branch name or `uncommitted` to check changes before committing: +def lint(ctx, fix): + """đŸ”Ļ Run lint checks with Ruff \b - Examples: - - \b - For lint checks of your development brach with `main` or a custom branch: + To run automatic fixes use: \b - $ spin lint # defaults to main - $ spin lint --branch custom_branch - - \b - To check just the uncommitted changes before committing - - \b - $ spin lint --uncommitted + $ spin lint --fix """ try: linter = _get_numpy_tools(pathlib.Path('linter.py')) @@ -365,7 +363,7 @@ def lint(ctx, branch, uncommitted): f"{e.msg}. Install using requirements/linter_requirements.txt" ) - linter.DiffLinter(branch).run_lint(uncommitted) + linter.DiffLinter().run_lint(fix) @click.command() @click.option( @@ -389,13 +387,25 @@ def lint(ctx, branch, uncommitted): '--quick', '-q', is_flag=True, default=False, help="Run each benchmark only once (timings won't be accurate)" ) +@click.option( + '--factor', '-f', default=1.05, + help="The factor above or below which a benchmark result is " + "considered reportable. This is passed on to the asv command." +) +@click.option( + '--cpu-affinity', default=None, multiple=False, + help="Set CPU affinity for running the benchmark, in format: 0 or 0,1,2 or 0-3." + "Default: not set" +) @click.argument( 'commits', metavar='', required=False, nargs=-1 ) +@meson.build_dir_option @click.pass_context -def bench(ctx, tests, compare, verbose, quick, commits): +def bench(ctx, tests, compare, verbose, quick, factor, cpu_affinity, + commits, build_dir): """🏋 Run benchmarks. \b @@ -438,6 +448,9 @@ def bench(ctx, tests, compare, verbose, quick, commits): if quick: bench_args = ['--quick'] + bench_args + if cpu_affinity: + bench_args += ['--cpu-affinity', cpu_affinity] + if not compare: # No comparison requested; we build and benchmark the current version @@ -447,10 +460,10 @@ def bench(ctx, tests, compare, verbose, quick, commits): ) ctx.invoke(build) - meson._set_pythonpath() + meson._set_pythonpath(build_dir) - p = util.run( - ['python', '-c', 'import numpy as np; print(np.__version__)'], + p = spin.util.run( + [sys.executable, '-c', 'import numpy as np; print(np.__version__)'], cwd='benchmarks', echo=False, output=False @@ -467,7 +480,7 @@ def bench(ctx, tests, compare, verbose, quick, commits): ] + bench_args _run_asv(cmd) else: - # Ensure that we don't have uncommited changes + # Ensure that we don't have uncommitted changes commit_a, commit_b = [_commit_to_sha(c) for c in commits] if commit_b == 'HEAD' and _dirty_git_working_dir(): @@ -478,34 +491,25 @@ def bench(ctx, tests, compare, verbose, quick, commits): ) cmd_compare = [ - 'asv', 'continuous', '--factor', '1.05', + 'asv', 'continuous', '--factor', str(factor), ] + bench_args + [commit_a, commit_b] _run_asv(cmd_compare) -@click.command(context_settings={ - 'ignore_unknown_options': True -}) -@click.argument("python_args", metavar='', nargs=-1) -@click.pass_context -def python(ctx, python_args, *args, **kwargs): - """🐍 Launch Python shell with PYTHONPATH set - - OPTIONS are passed through directly to Python, e.g.: - - spin python -c 'import sys; print(sys.path)' - """ +@spin.util.extend_command(meson.python) +def python(*, parent_callback, **kwargs): env = os.environ env['PYTHONWARNINGS'] = env.get('PYTHONWARNINGS', 'all') - ctx.forward(meson.python) + + parent_callback(**kwargs) @click.command(context_settings={ 'ignore_unknown_options': True }) @click.argument("ipython_args", metavar='', nargs=-1) -@click.pass_context -def ipython(ctx, ipython_args): +@meson.build_dir_option +def ipython(*, ipython_args, build_dir): """đŸ’ģ Launch IPython shell with PYTHONPATH set OPTIONS are passed through directly to IPython, e.g.: @@ -515,16 +519,19 @@ def ipython(ctx, ipython_args): env = os.environ env['PYTHONWARNINGS'] = env.get('PYTHONWARNINGS', 'all') + ctx = click.get_current_context() ctx.invoke(build) - ppath = meson._set_pythonpath() + ppath = meson._set_pythonpath(build_dir) print(f'đŸ’ģ Launching IPython with PYTHONPATH="{ppath}"') + + # In spin >= 0.13.1, can replace with extended command, setting `pre_import` preimport = (r"import numpy as np; " r"print(f'\nPreimported NumPy {np.__version__} as np')") - util.run(["ipython", "--ignore-cwd", - f"--TerminalIPythonApp.exec_lines={preimport}"] + - list(ipython_args)) + spin.util.run(["ipython", "--ignore-cwd", + f"--TerminalIPythonApp.exec_lines={preimport}"] + + list(ipython_args)) @click.command(context_settings={"ignore_unknown_options": True}) @@ -532,12 +539,59 @@ def ipython(ctx, ipython_args): def mypy(ctx): """đŸĻ† Run Mypy tests for NumPy """ + ctx.invoke(build) env = os.environ env['NPY_RUN_MYPY_IN_TESTSUITE'] = '1' ctx.params['pytest_args'] = [os.path.join('numpy', 'typing')] ctx.params['markexpr'] = 'full' ctx.forward(test) + +@click.command() +def pyrefly() -> None: + """đŸĒ˛ Type-check the stubs with Pyrefly + """ + spin.util.run(['pyrefly', 'check']) + + +@click.command() +@click.option( + '--concise', + is_flag=True, + default=False, + help="Concise output format", +) +@meson.build_dir_option +def stubtest(*, concise: bool, build_dir: str) -> None: + """🧐 Run stubtest on NumPy's .pyi stubs + + Requires mypy to be installed + """ + click.get_current_context().invoke(build) + meson._set_pythonpath(build_dir) + print(f"{build_dir = !r}") + + import sysconfig + purellib = sysconfig.get_paths()["purelib"] + print(f"{purellib = !r}") + + stubtest_dir = curdir.parent / 'tools' / 'stubtest' + mypy_config = stubtest_dir / 'mypy.ini' + allowlist = stubtest_dir / 'allowlist.txt' + + cmd = [ + 'stubtest', + '--ignore-disjoint-bases', + f'--mypy-config-file={mypy_config}', + f'--allowlist={allowlist}', + ] + if concise: + cmd.append('--concise') + cmd.append('numpy') + + spin.util.run(cmd) + + @click.command(context_settings={ 'ignore_unknown_options': True }) @@ -583,8 +637,7 @@ def _config_openblas(blas_variant): help="NumPy version of release", required=False ) -@click.pass_context -def notes(ctx, version_override): +def notes(version_override): """🎉 Generate release notes and validate \b @@ -599,7 +652,7 @@ def notes(ctx, version_override): \b $ spin notes """ - project_config = util.get_config() + project_config = spin.util.get_config() version = version_override or project_config['project.version'] click.secho( @@ -610,7 +663,7 @@ def notes(ctx, version_override): # Check if `towncrier` is installed if not shutil.which("towncrier"): raise click.ClickException( - f"please install `towncrier` to use this command" + "please install `towncrier` to use this command" ) click.secho( @@ -619,7 +672,7 @@ def notes(ctx, version_override): ) # towncrier build --version 2.1 --yes cmd = ["towncrier", "build", "--version", version, "--yes"] - p = util.run(cmd=cmd, sys_exit=False, output=True, encoding="utf-8") + p = spin.util.run(cmd=cmd, sys_exit=False, output=True, encoding="utf-8") if p.returncode != 0: raise click.ClickException( f"`towncrier` failed returned {p.returncode} with error `{p.stderr}`" @@ -637,7 +690,8 @@ def notes(ctx, version_override): ) try: - test_notes = _get_numpy_tools(pathlib.Path('ci', 'test_all_newsfragments_used.py')) + cmd = pathlib.Path('ci', 'test_all_newsfragments_used.py') + test_notes = _get_numpy_tools(cmd) except ModuleNotFoundError as e: raise click.ClickException( f"{e.msg}. Install the missing packages to use this command." diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst new file mode 100644 index 000000000000..15e8f7546cc2 --- /dev/null +++ b/CONTRIBUTING.rst @@ -0,0 +1,19 @@ +=============================== +NumPy's Contributing guidelines +=============================== + +Welcome to the NumPy community! We're excited to have you here. +Whether you're new to open source or experienced, your contributions +help us grow. + +Pull requests (PRs) are always welcome, but making a PR is just the +start. Please respond to comments and requests for changes to help move the process forward. +Skip asking for an issue to be assigned to you on GitHub—send in your PR, explain what you did and ask for a review. It makes collaboration and support much easier. +Please follow our +`Code of Conduct `__, which applies +to all interactions, including issues and PRs. + +For more, please read https://www.numpy.org/devdocs/dev/index.html + +Thank you for contributing, and happy coding! + diff --git a/INSTALL.rst b/INSTALL.rst index eea2e3c9d7de..72caf98380b7 100644 --- a/INSTALL.rst +++ b/INSTALL.rst @@ -14,7 +14,7 @@ Prerequisites Building NumPy requires the following installed software: -1) Python__ 3.10.x or newer. +1) Python__ 3.12.x or newer. Please note that the Python development headers also need to be installed, e.g., on Debian/Ubuntu one needs to install both `python3` and @@ -82,7 +82,7 @@ Choosing compilers NumPy needs C and C++ compilers, and for development versions also needs Cython. A Fortran compiler isn't needed to build NumPy itself; the ``numpy.f2py`` tests will be skipped when running the test suite if no Fortran -compiler is available. +compiler is available. For more options including selecting compilers, setting custom compiler flags and controlling parallelism, see @@ -135,12 +135,8 @@ For best performance, a development package providing BLAS and CBLAS should be installed. Some of the options available are: - ``libblas-dev``: reference BLAS (not very optimized) -- ``libatlas-base-dev``: generic tuned ATLAS, it is recommended to tune it to - the available hardware, see /usr/share/doc/libatlas3-base/README.Debian for - instructions -- ``libopenblas-base``: fast and runtime detected so no tuning required but a - very recent version is needed (>=0.2.15 is recommended). Older versions of - OpenBLAS suffered from correctness issues on some CPUs. +- ``libopenblas-base``: (recommended) OpenBLAS is performant, and used + in the NumPy wheels on PyPI except where Apple's Accelerate is tuned better for Apple hardware The package linked to when numpy is loaded can be chosen after installation via the alternatives mechanism:: @@ -148,10 +144,6 @@ the alternatives mechanism:: update-alternatives --config libblas.so.3 update-alternatives --config liblapack.so.3 -Or by preloading a specific BLAS library with:: - - LD_PRELOAD=/usr/lib/atlas-base/atlas/libblas.so.3 python ... - Build issues ============ diff --git a/LICENSE.txt b/LICENSE.txt index 6ccec6824b65..f37a12cc4ccc 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2005-2024, NumPy Developers. +Copyright (c) 2005-2025, NumPy Developers. All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/LICENSES_bundled.txt b/LICENSES_bundled.txt deleted file mode 100644 index 815c9a1dba33..000000000000 --- a/LICENSES_bundled.txt +++ /dev/null @@ -1,31 +0,0 @@ -The NumPy repository and source distributions bundle several libraries that are -compatibly licensed. We list these here. - -Name: lapack-lite -Files: numpy/linalg/lapack_lite/* -License: BSD-3-Clause - For details, see numpy/linalg/lapack_lite/LICENSE.txt - -Name: dragon4 -Files: numpy/_core/src/multiarray/dragon4.c -License: MIT - For license text, see numpy/_core/src/multiarray/dragon4.c - -Name: libdivide -Files: numpy/_core/include/numpy/libdivide/* -License: Zlib - For license text, see numpy/_core/include/numpy/libdivide/LICENSE.txt - - -Note that the following files are vendored in the repository and sdist but not -installed in built numpy packages: - -Name: Meson -Files: vendored-meson/meson/* -License: Apache 2.0 - For license text, see vendored-meson/meson/COPYING - -Name: spin -Files: .spin/cmds.py -License: BSD-3 - For license text, see .spin/LICENSE diff --git a/README.md b/README.md index 51eb0785192d..344631bc5601 100644 --- a/README.md +++ b/README.md @@ -13,18 +13,20 @@ https://anaconda.org/conda-forge/numpy) https://stackoverflow.com/questions/tagged/numpy) [![Nature Paper](https://img.shields.io/badge/DOI-10.1038%2Fs41586--020--2649--2-blue)]( https://doi.org/10.1038/s41586-020-2649-2) +[![LFX Health Score](https://insights.linuxfoundation.org/api/badge/health-score?project=numpy)](https://insights.linuxfoundation.org/project/numpy) [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/numpy/numpy/badge)](https://securityscorecards.dev/viewer/?uri=github.com/numpy/numpy) +[![Typing](https://img.shields.io/pypi/types/numpy)](https://pypi.org/project/numpy/) NumPy is the fundamental package for scientific computing with Python. -- **Website:** https://www.numpy.org +- **Website:** https://numpy.org - **Documentation:** https://numpy.org/doc - **Mailing list:** https://mail.python.org/mailman/listinfo/numpy-discussion - **Source code:** https://github.com/numpy/numpy -- **Contributing:** https://www.numpy.org/devdocs/dev/index.html +- **Contributing:** https://numpy.org/devdocs/dev/index.html - **Bug reports:** https://github.com/numpy/numpy/issues -- **Report a security vulnerability:** https://tidelift.com/docs/security +- **Report a security vulnerability:** https://github.com/numpy/numpy/security/policy (via Tidelift) It provides: diff --git a/azure-pipelines.yml b/azure-pipelines.yml deleted file mode 100644 index 2393a96d3f86..000000000000 --- a/azure-pipelines.yml +++ /dev/null @@ -1,100 +0,0 @@ -trigger: - # start a new build for every push - batch: False - branches: - include: - - main - - maintenance/* - - -pr: - branches: - include: - - '*' # must quote since "*" is a YAML reserved character; we want a string - - -stages: - -- stage: Check - jobs: - - job: Skip - pool: - vmImage: 'ubuntu-20.04' - variables: - DECODE_PERCENTS: 'false' - RET: 'true' - steps: - - bash: | - git_log=`git log --max-count=1 --skip=1 --pretty=format:"%B" | tr "\n" " "` - echo "##vso[task.setvariable variable=log]$git_log" - - bash: echo "##vso[task.setvariable variable=RET]false" - condition: or(contains(variables.log, '[skip azp]'), contains(variables.log, '[azp skip]'), contains(variables.log, '[skip ci]'), contains(variables.log, '[ci skip]')) - - bash: echo "##vso[task.setvariable variable=start_main;isOutput=true]$RET" - name: result - -- stage: ComprehensiveTests - condition: and(succeeded(), eq(dependencies.Check.outputs['Skip.result.start_main'], 'true')) - dependsOn: Check - jobs: - - - job: Lint - condition: and(succeeded(), eq(variables['Build.Reason'], 'PullRequest')) - pool: - vmImage: 'ubuntu-20.04' - steps: - - task: UsePythonVersion@0 - inputs: - versionSpec: '3.10' - addToPath: true - architecture: 'x64' - - script: >- - python -m pip install -r requirements/linter_requirements.txt - displayName: 'Install tools' - # pip 21.1 emits a pile of garbage messages to annoy users :) - # failOnStderr: true - - script: | - python tools/linter.py --branch origin/$(System.PullRequest.TargetBranch) - displayName: 'Run Lint Checks' - failOnStderr: true - - - job: Linux_Python_310_32bit_full_with_asserts - pool: - vmImage: 'ubuntu-20.04' - steps: - - script: | - git submodule update --init - displayName: 'Fetch submodules' - - script: | - # yum does not have a ninja package, so use the PyPI one - docker run -v $(pwd):/numpy -e CFLAGS="-msse2 -std=c99 -UNDEBUG" \ - -e F77=gfortran-5 -e F90=gfortran-5 quay.io/pypa/manylinux2014_i686 \ - /bin/bash -xc "source /numpy/tools/ci/run_32_bit_linux_docker.sh" - displayName: 'Run 32-bit manylinux2014 Docker Build / Tests' - - - job: Windows - timeoutInMinutes: 120 - pool: - vmImage: 'windows-2019' - strategy: - maxParallel: 3 - matrix: - Python310-64bit-fast: - PYTHON_VERSION: '3.10' - PYTHON_ARCH: 'x64' - TEST_MODE: fast - BITS: 64 - Python311-64bit-full: - PYTHON_VERSION: '3.11' - PYTHON_ARCH: 'x64' - TEST_MODE: full - BITS: 64 - _USE_BLAS_ILP64: '1' - PyPy310-64bit-fast: - PYTHON_VERSION: 'pypy3.10' - PYTHON_ARCH: 'x64' - TEST_MODE: fast - BITS: 64 - _USE_BLAS_ILP64: '1' - - steps: - - template: azure-steps-windows.yml diff --git a/azure-steps-windows.yml b/azure-steps-windows.yml deleted file mode 100644 index 0baf374e1e3f..000000000000 --- a/azure-steps-windows.yml +++ /dev/null @@ -1,55 +0,0 @@ -steps: -- script: git submodule update --init - displayName: 'Fetch submodules' -- task: UsePythonVersion@0 - inputs: - versionSpec: $(PYTHON_VERSION) - addToPath: true - architecture: $(PYTHON_ARCH) - -- script: python -m pip install --upgrade pip wheel - displayName: 'Install tools' - -- script: python -m pip install -r requirements/test_requirements.txt - displayName: 'Install dependencies; some are optional to avoid test skips' - -- powershell: | - choco install -y --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite - displayName: 'Install utilities' - -- powershell: | - # Note: ensure the `pip install .` command remains the last one here, - # to avoid "green on failure" issues - If ( Test-Path env:DISABLE_BLAS ) { - python -m pip install . -v -Csetup-args="--vsenv" -Csetup-args="-Dblas=none" -Csetup-args="-Dlapack=none" -Csetup-args="-Dallow-noblas=true" - } - elseif ( Test-Path env:_USE_BLAS_ILP64 ) { - pip install -r requirements/ci_requirements.txt - spin config-openblas --with-scipy-openblas=64 - $env:PKG_CONFIG_PATH="$pwd/.openblas" - python -m pip install . -v -Csetup-args="--vsenv" - } else { - pip install -r requirements/ci_requirements.txt - spin config-openblas --with-scipy-openblas=32 - $env:PKG_CONFIG_PATH="$pwd/.openblas" - python -m pip install . -v -Csetup-args="--vsenv" - } - displayName: 'Build NumPy' - -- powershell: | - cd tools # avoid root dir to not pick up source tree - # Get a gfortran onto the path for f2py tests - $env:PATH = "c:\\rtools43\\x86_64-w64-mingw32.static.posix\\bin;$env:PATH" - If ( $env:TEST_MODE -eq "full" ) { - pytest --pyargs numpy -rsx --junitxml=junit/test-results.xml - } else { - pytest --pyargs numpy -m "not slow" -rsx --junitxml=junit/test-results.xml - } - displayName: 'Run NumPy Test Suite' - -- task: PublishTestResults@2 - condition: succeededOrFailed() - inputs: - testResultsFiles: '**/test-*.xml' - failTaskOnFailedTests: true - testRunTitle: 'Publish test results for Python $(PYTHON_VERSION) $(BITS)-bit $(TEST_MODE) Windows' diff --git a/benchmarks/README.rst b/benchmarks/README.rst index e44f8fe02f1e..e7e42a377819 100644 --- a/benchmarks/README.rst +++ b/benchmarks/README.rst @@ -127,4 +127,4 @@ Some things to consider: you are benchmarking an algorithm, it is unlikely that a user will be executing said algorithm on a newly created empty/zero array. One can force pagefaults to occur in the setup phase either by calling ``np.ones`` or - ``arr.fill(value)`` after creating the array, + ``arr.fill(value)`` after creating the array. diff --git a/benchmarks/asv_pip_nopep517.py b/benchmarks/asv_pip_nopep517.py index 085cbff1f4ee..fc231d1db5d0 100644 --- a/benchmarks/asv_pip_nopep517.py +++ b/benchmarks/asv_pip_nopep517.py @@ -1,7 +1,9 @@ """ This file is used by asv_compare.conf.json.tpl. """ -import subprocess, sys +import subprocess +import sys + # pip ignores '--global-option' when pep517 is enabled therefore we disable it. cmd = [sys.executable, '-mpip', 'wheel', '--no-use-pep517'] try: diff --git a/benchmarks/benchmarks/__init__.py b/benchmarks/benchmarks/__init__.py index 8efa67de33eb..9be15825edda 100644 --- a/benchmarks/benchmarks/__init__.py +++ b/benchmarks/benchmarks/__init__.py @@ -1,11 +1,13 @@ -from . import common -import sys import os +import sys + +from . import common + def show_cpu_features(): from numpy.lib._utils_impl import _opt_info info = _opt_info() - info = "NumPy CPU features: " + (info if info else 'nothing enabled') + info = "NumPy CPU features: " + (info or 'nothing enabled') # ASV wrapping stdout & stderr, so we assume having a tty here if 'SHELL' in os.environ and sys.platform != 'win32': # to avoid the red color that imposed by ASV @@ -42,7 +44,7 @@ def dirty_lock(lock_name, lock_on_count=1): count = 0 f.seek(0) f.truncate() - f.write(f"{str(count)} {str(ppid)}") + f.write(f"{count} {ppid}") except OSError: pass return False diff --git a/benchmarks/benchmarks/bench_app.py b/benchmarks/benchmarks/bench_app.py index d22aa2e09604..06a9401b02f5 100644 --- a/benchmarks/benchmarks/bench_app.py +++ b/benchmarks/benchmarks/bench_app.py @@ -1,7 +1,7 @@ -from .common import Benchmark - import numpy as np +from .common import Benchmark + class LaplaceInplace(Benchmark): params = ['inplace', 'normal'] diff --git a/benchmarks/benchmarks/bench_array_coercion.py b/benchmarks/benchmarks/bench_array_coercion.py index ca1f3cc83a3f..ae9c040970d8 100644 --- a/benchmarks/benchmarks/bench_array_coercion.py +++ b/benchmarks/benchmarks/bench_array_coercion.py @@ -1,7 +1,7 @@ -from .common import Benchmark - import numpy as np +from .common import Benchmark + class ArrayCoercionSmall(Benchmark): # More detailed benchmarks for array coercion, @@ -38,7 +38,7 @@ def time_asarray(self, array_like): def time_asarray_dtype(self, array_like): np.asarray(array_like, dtype=self.int64) - def time_asarray_dtype(self, array_like): + def time_asarray_dtype_order(self, array_like): np.asarray(array_like, dtype=self.int64, order="F") def time_asanyarray(self, array_like): @@ -47,7 +47,7 @@ def time_asanyarray(self, array_like): def time_asanyarray_dtype(self, array_like): np.asanyarray(array_like, dtype=self.int64) - def time_asanyarray_dtype(self, array_like): + def time_asanyarray_dtype_order(self, array_like): np.asanyarray(array_like, dtype=self.int64, order="F") def time_ascontiguousarray(self, array_like): diff --git a/benchmarks/benchmarks/bench_clip.py b/benchmarks/benchmarks/bench_clip.py index ce0511da82a4..953fc383e20b 100644 --- a/benchmarks/benchmarks/bench_clip.py +++ b/benchmarks/benchmarks/bench_clip.py @@ -1,7 +1,7 @@ -from .common import Benchmark - import numpy as np +from .common import Benchmark + class ClipFloat(Benchmark): param_names = ["dtype", "size"] diff --git a/benchmarks/benchmarks/bench_core.py b/benchmarks/benchmarks/bench_core.py index 632318d61084..ea7aae007fdc 100644 --- a/benchmarks/benchmarks/bench_core.py +++ b/benchmarks/benchmarks/bench_core.py @@ -1,7 +1,7 @@ -from .common import Benchmark - import numpy as np +from .common import Benchmark + class Core(Benchmark): def setup(self): @@ -14,6 +14,7 @@ def setup(self): self.l_view = [memoryview(a) for a in self.l] self.l10x10 = np.ones((10, 10)) self.float64_dtype = np.dtype(np.float64) + self.arr = np.arange(10000).reshape(100, 100) def time_array_1(self): np.array(1) @@ -48,6 +49,9 @@ def time_array_l_view(self): def time_can_cast(self): np.can_cast(self.l10x10, self.float64_dtype) + def time_tobytes_noncontiguous(self): + self.arr.T.tobytes() + def time_can_cast_same_kind(self): np.can_cast(self.l10x10, self.float64_dtype, casting="same_kind") @@ -137,7 +141,7 @@ class CorrConv(Benchmark): def setup(self, size1, size2, mode): self.x1 = np.linspace(0, 1, num=size1) - self.x2 = np.cos(np.linspace(0, 2*np.pi, num=size2)) + self.x2 = np.cos(np.linspace(0, 2 * np.pi, num=size2)) def time_correlate(self, size1, size2, mode): np.correlate(self.x1, self.x2, mode=mode) @@ -151,7 +155,8 @@ class CountNonzero(Benchmark): params = [ [1, 2, 3], [100, 10000, 1000000], - [bool, np.int8, np.int16, np.int32, np.int64, str, object] + [bool, np.int8, np.int16, np.int32, np.int64, np.float32, + np.float64, str, object] ] def setup(self, numaxes, size, dtype): @@ -170,9 +175,34 @@ def time_count_nonzero_multi_axis(self, numaxes, size, dtype): self.x.ndim - 1, self.x.ndim - 2)) +class Nonzero(Benchmark): + params = [ + [bool, np.uint8, np.uint64, np.int64, np.float32, np.float64], + [(1_000_000,), (1000, 1000), (100, ), (2, )] + ] + param_names = ["dtype", "shape"] + + def setup(self, dtype, size): + self.x = np.random.randint(0, 3, size=size).astype(dtype) + self.x_sparse = np.zeros(size).astype(dtype) + self.x_sparse[1] = 1 + self.x_sparse[-1] = 1 + self.x_dense = np.ones(size).astype(dtype) + + def time_nonzero(self, dtype, size): + np.nonzero(self.x) + + def time_nonzero_sparse(self, dtype, size): + np.nonzero(self.x_sparse) + + def time_nonzero_dense(self, dtype, size): + np.nonzero(self.x_dense) + + class PackBits(Benchmark): param_names = ['dtype'] params = [[bool, np.uintp]] + def setup(self, dtype): self.d = np.ones(10000, dtype=dtype) self.d2 = np.ones((200, 1000), dtype=dtype) @@ -251,10 +281,10 @@ def time_sum(self, dtype, size): class NumPyChar(Benchmark): def setup(self): - self.A = np.array([100*'x', 100*'y']) + self.A = np.array([100 * 'x', 100 * 'y']) self.B = np.array(1000 * ['aa']) - self.C = np.array([100*'x' + 'z', 100*'y' + 'z' + 'y', 100*'x']) + self.C = np.array([100 * 'x' + 'z', 100 * 'y' + 'z' + 'y', 100 * 'x']) self.D = np.array(1000 * ['ab'] + 1000 * ['ac']) def time_isalpha_small_list_big_string(self): diff --git a/benchmarks/benchmarks/bench_creation.py b/benchmarks/benchmarks/bench_creation.py index 76d871e2d411..f76a9c78f867 100644 --- a/benchmarks/benchmarks/bench_creation.py +++ b/benchmarks/benchmarks/bench_creation.py @@ -1,7 +1,7 @@ -from .common import Benchmark, TYPES1, get_squares_ - import numpy as np +from .common import TYPES1, Benchmark, get_squares_ + class MeshGrid(Benchmark): """ Benchmark meshgrid generation @@ -13,7 +13,8 @@ class MeshGrid(Benchmark): timeout = 10 def setup(self, size, ndims, ind, ndtype): - self.grid_dims = [(np.random.ranf(size)).astype(ndtype) for + rnd = np.random.RandomState(1864768776) + self.grid_dims = [(rnd.random_sample(size)).astype(ndtype) for x in range(ndims)] def time_meshgrid(self, size, ndims, ind, ndtype): diff --git a/benchmarks/benchmarks/bench_function_base.py b/benchmarks/benchmarks/bench_function_base.py index 761c56a1691a..f72d50eb74ce 100644 --- a/benchmarks/benchmarks/bench_function_base.py +++ b/benchmarks/benchmarks/bench_function_base.py @@ -1,7 +1,7 @@ -from .common import Benchmark - import numpy as np +from .common import Benchmark + try: # SkipNotImplemented is available since 6.0 from asv_runner.benchmarks.mark import SkipNotImplemented @@ -35,7 +35,7 @@ def time_fine_binning(self): class Histogram2D(Benchmark): def setup(self): - self.d = np.linspace(0, 100, 200000).reshape((-1,2)) + self.d = np.linspace(0, 100, 200000).reshape((-1, 2)) def time_full_coverage(self): np.histogramdd(self.d, (200, 200), ((0, 100), (0, 100))) @@ -64,7 +64,7 @@ class Mean(Benchmark): params = [[1, 10, 100_000]] def setup(self, size): - self.array = np.arange(2*size).reshape(2, size) + self.array = np.arange(2 * size).reshape(2, size) def time_mean(self, size): np.mean(self.array) @@ -136,6 +136,7 @@ def time_select_larger(self): def memoize(f): _memoized = {} + def wrapped(*args): if args not in _memoized: _memoized[args] = f(*args) @@ -154,18 +155,19 @@ class SortGenerator: @staticmethod @memoize - def random(size, dtype): + def random(size, dtype, rnd): """ Returns a randomly-shuffled array. """ arr = np.arange(size, dtype=dtype) rnd = np.random.RandomState(1792364059) + np.random.shuffle(arr) rnd.shuffle(arr) return arr @staticmethod @memoize - def ordered(size, dtype): + def ordered(size, dtype, rnd): """ Returns an ordered array. """ @@ -173,22 +175,22 @@ def ordered(size, dtype): @staticmethod @memoize - def reversed(size, dtype): + def reversed(size, dtype, rnd): """ Returns an array that's in descending order. """ dtype = np.dtype(dtype) try: with np.errstate(over="raise"): - res = dtype.type(size-1) + res = dtype.type(size - 1) except (OverflowError, FloatingPointError): raise SkipNotImplemented("Cannot construct arange for this size.") - return np.arange(size-1, -1, -1, dtype=dtype) + return np.arange(size - 1, -1, -1, dtype=dtype) @staticmethod @memoize - def uniform(size, dtype): + def uniform(size, dtype, rnd): """ Returns an array that has the same value everywhere. """ @@ -196,20 +198,7 @@ def uniform(size, dtype): @staticmethod @memoize - def swapped_pair(size, dtype, swap_frac): - """ - Returns an ordered array, but one that has ``swap_frac * size`` - pairs swapped. - """ - a = np.arange(size, dtype=dtype) - for _ in range(int(size * swap_frac)): - x, y = np.random.randint(0, size, 2) - a[x], a[y] = a[y], a[x] - return a - - @staticmethod - @memoize - def sorted_block(size, dtype, block_size): + def sorted_block(size, dtype, block_size, rnd): """ Returns an array with blocks that are all sorted. """ @@ -222,35 +211,6 @@ def sorted_block(size, dtype, block_size): b.extend(a[i::block_num]) return np.array(b) - @classmethod - @memoize - def random_unsorted_area(cls, size, dtype, frac, area_size=None): - """ - This type of array has random unsorted areas such that they - compose the fraction ``frac`` of the original array. - """ - if area_size is None: - area_size = cls.AREA_SIZE - - area_num = int(size * frac / area_size) - a = np.arange(size, dtype=dtype) - for _ in range(area_num): - start = np.random.randint(size-area_size) - end = start + area_size - np.random.shuffle(a[start:end]) - return a - - @classmethod - @memoize - def random_bubble(cls, size, dtype, bubble_num, bubble_size=None): - """ - This type of array has ``bubble_num`` random unsorted areas. - """ - if bubble_size is None: - bubble_size = cls.BUBBLE_SIZE - frac = bubble_size * bubble_num / size - - return cls.random_unsorted_area(size, dtype, frac, bubble_size) class Sort(Benchmark): """ @@ -271,26 +231,18 @@ class Sort(Benchmark): ('sorted_block', 10), ('sorted_block', 100), ('sorted_block', 1000), - # ('swapped_pair', 0.01), - # ('swapped_pair', 0.1), - # ('swapped_pair', 0.5), - # ('random_unsorted_area', 0.5), - # ('random_unsorted_area', 0.1), - # ('random_unsorted_area', 0.01), - # ('random_bubble', 1), - # ('random_bubble', 5), - # ('random_bubble', 10), ], ] param_names = ['kind', 'dtype', 'array_type'] # The size of the benchmarked arrays. - ARRAY_SIZE = 10000 + ARRAY_SIZE = 1000000 def setup(self, kind, dtype, array_type): - np.random.seed(1234) + rnd = np.random.RandomState(507582308) array_class = array_type[0] - self.arr = getattr(SortGenerator, array_class)(self.ARRAY_SIZE, dtype, *array_type[1:]) + generate_array_method = getattr(SortGenerator, array_class) + self.arr = generate_array_method(self.ARRAY_SIZE, dtype, *array_type[1:], rnd) def time_sort(self, kind, dtype, array_type): # Using np.sort(...) instead of arr.sort(...) because it makes a copy. @@ -322,10 +274,10 @@ class Partition(Benchmark): ARRAY_SIZE = 100000 def setup(self, dtype, array_type, k): - np.random.seed(1234) + rnd = np.random.seed(2136297818) array_class = array_type[0] - self.arr = getattr(SortGenerator, array_class)(self.ARRAY_SIZE, - dtype, *array_type[1:]) + self.arr = getattr(SortGenerator, array_class)( + self.ARRAY_SIZE, dtype, *array_type[1:], rnd) def time_partition(self, dtype, array_type, k): temp = np.partition(self.arr, k) @@ -425,4 +377,3 @@ def time_interleaved_ones_x4(self): def time_interleaved_ones_x8(self): np.where(self.rep_ones_8) - diff --git a/benchmarks/benchmarks/bench_indexing.py b/benchmarks/benchmarks/bench_indexing.py index 5d270f788164..f1153489f515 100644 --- a/benchmarks/benchmarks/bench_indexing.py +++ b/benchmarks/benchmarks/bench_indexing.py @@ -1,12 +1,12 @@ -from .common import ( - Benchmark, get_square_, get_indexes_, get_indexes_rand_, TYPES1) - -from os.path import join as pjoin import shutil -from numpy import memmap, float32, array -import numpy as np +from os.path import join as pjoin from tempfile import mkdtemp +import numpy as np +from numpy import array, float32, memmap + +from .common import TYPES1, Benchmark, get_indexes_, get_indexes_rand_, get_square_ + class Indexing(Benchmark): params = [TYPES1 + ["object", "O,i"], @@ -84,6 +84,22 @@ def time_assign_cast(self, ndim): arr[indx] = val +class BooleanAssignmentOrder(Benchmark): + params = ['C', 'F'] + param_names = ['order'] + + def setup(self, order): + shape = (64, 64, 64) + # emulate gh-30156: boolean assignment into a Fortran/C array + self.base = np.zeros(shape, dtype=np.uint32, order=order) + mask = np.random.RandomState(0).rand(*self.base.shape) > 0.5 + self.mask = mask.copy(order) + self.value = np.uint32(7) + + def time_boolean_assign_scalar(self, order): + self.base[self.mask] = self.value + + class IndexingSeparate(Benchmark): def setup(self): self.tmp_dir = mkdtemp() @@ -134,6 +150,7 @@ def setup(self): self.m_half = np.copy(self.m_all) self.m_half[::2] = False self.m_none = np.repeat(False, 200 * 50000) + self.m_index_2d = np.arange(200 * 50000).reshape((100, 100000)) def time_flat_bool_index_none(self): self.a.flat[self.m_none] @@ -143,3 +160,21 @@ def time_flat_bool_index_half(self): def time_flat_bool_index_all(self): self.a.flat[self.m_all] + + def time_flat_fancy_index_2d(self): + self.a.flat[self.m_index_2d] + + def time_flat_empty_tuple_index(self): + self.a.flat[()] + + def time_flat_ellipsis_index(self): + self.a.flat[...] + + def time_flat_bool_index_0d(self): + self.a.flat[True] + + def time_flat_int_index(self): + self.a.flat[1_000_000] + + def time_flat_slice_index(self): + self.a.flat[1_000_000:2_000_000] diff --git a/benchmarks/benchmarks/bench_io.py b/benchmarks/benchmarks/bench_io.py index e316d07f3582..eea4a4ed4309 100644 --- a/benchmarks/benchmarks/bench_io.py +++ b/benchmarks/benchmarks/bench_io.py @@ -1,7 +1,8 @@ -from .common import Benchmark, get_squares, get_squares_ +from io import SEEK_SET, BytesIO, StringIO import numpy as np -from io import SEEK_SET, StringIO, BytesIO + +from .common import Benchmark, get_squares, get_squares_ class Copy(Benchmark): @@ -88,7 +89,7 @@ def setup(self, num_lines): # unfortunately, timeit will only run setup() # between repeat events, but not for iterations # within repeats, so the StringIO object - # will have to be rewinded in the benchmark proper + # will have to be rewound in the benchmark proper self.data_comments = StringIO('\n'.join(data)) def time_comment_loadtxt_csv(self, num_lines): diff --git a/benchmarks/benchmarks/bench_itemselection.py b/benchmarks/benchmarks/bench_itemselection.py index c6c74da569c7..90f9efc77d90 100644 --- a/benchmarks/benchmarks/bench_itemselection.py +++ b/benchmarks/benchmarks/bench_itemselection.py @@ -1,7 +1,7 @@ -from .common import Benchmark, TYPES1 - import numpy as np +from .common import TYPES1, Benchmark + class Take(Benchmark): params = [ diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index f792116a6b9c..11d454ae41bf 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -1,10 +1,13 @@ """Benchmarks for `numpy.lib`.""" +import string -from .common import Benchmark +from asv_runner.benchmarks.mark import SkipNotImplemented import numpy as np +from .common import Benchmark + class Pad(Benchmark): """Benchmarks for `numpy.pad`. @@ -66,10 +69,10 @@ class Nan(Benchmark): ] def setup(self, array_size, percent_nans): - np.random.seed(123) + rnd = np.random.RandomState(1819780348) # produce a randomly shuffled array with the # approximate desired percentage np.nan content - base_array = np.random.uniform(size=array_size) + base_array = rnd.uniform(size=array_size) base_array[base_array < percent_nans / 100.] = np.nan self.arr = base_array @@ -119,37 +122,116 @@ def time_nanpercentile(self, array_size, percent_nans): class Unique(Benchmark): """Benchmark for np.unique with np.nan values.""" - param_names = ["array_size", "percent_nans"] + param_names = ["array_size", "percent_nans", "percent_unique_values", "dtype"] params = [ # sizes of the 1D arrays [200, int(2e5)], # percent of np.nan in arrays - [0, 0.1, 2., 50., 90.], + [0.0, 10., 90.], + # percent of unique values in arrays + [0.2, 20.], + # dtypes of the arrays + [np.float64, np.complex128, np.dtypes.StringDType(na_object=np.nan)], ] - def setup(self, array_size, percent_nans): - np.random.seed(123) + def setup(self, array_size, percent_nans, percent_unique_values, dtype): + rng = np.random.default_rng(123) # produce a randomly shuffled array with the # approximate desired percentage np.nan content - base_array = np.random.uniform(size=array_size) - n_nan = int(percent_nans * array_size) - nan_indices = np.random.choice(np.arange(array_size), size=n_nan) + unique_values_size = max(int(percent_unique_values / 100. * array_size), 2) + match dtype: + case np.float64: + unique_array = rng.uniform(size=unique_values_size).astype(dtype) + case np.complex128: + unique_array = np.array( + [ + complex(*rng.uniform(size=2)) + for _ in range(unique_values_size) + ], + dtype=dtype, + ) + case np.dtypes.StringDType(): + chars = string.ascii_letters + string.digits + unique_array = np.array( + [ + ''.join(rng.choice(list(chars), size=rng.integers(4, 8))) + for _ in range(unique_values_size) + ], + dtype=dtype, + ) + case _: + raise ValueError(f"Unsupported dtype {dtype}") + + base_array = np.resize(unique_array, array_size) + rng.shuffle(base_array) + # insert nans in random places + n_nan = int(percent_nans / 100. * array_size) + nan_indices = rng.choice(np.arange(array_size), size=n_nan, replace=False) base_array[nan_indices] = np.nan self.arr = base_array - def time_unique_values(self, array_size, percent_nans): + def time_unique_values(self, array_size, percent_nans, + percent_unique_values, dtype): + np.unique(self.arr, return_index=False, + return_inverse=False, return_counts=False) + + def time_unique_counts(self, array_size, percent_nans, + percent_unique_values, dtype): + np.unique(self.arr, return_index=False, + return_inverse=False, return_counts=True,) + + def time_unique_inverse(self, array_size, percent_nans, + percent_unique_values, dtype): + np.unique(self.arr, return_index=False, + return_inverse=True, return_counts=False) + + def time_unique_all(self, array_size, percent_nans, + percent_unique_values, dtype): + np.unique(self.arr, return_index=True, + return_inverse=True, return_counts=True) + + +class UniqueIntegers(Benchmark): + """Benchmark for np.unique with integer dtypes.""" + + param_names = ["array_size", "num_unique_values", "dtype"] + params = [ + # sizes of the 1D arrays + [200, 100000, 1000000], + # number of unique values in arrays + [25, 125, 5000, 50000, 250000], + # dtypes of the arrays + [np.uint8, np.int16, np.uint32, np.int64], + ] + + def setup(self, array_size, num_unique_values, dtype): + unique_array = np.arange(num_unique_values, dtype=dtype) + base_array = np.resize(unique_array, array_size) + rng = np.random.default_rng(121263137472525314065) + rng.shuffle(base_array) + self.arr = base_array + + def time_unique_values(self, array_size, num_unique_values, dtype): + if num_unique_values >= np.iinfo(dtype).max or num_unique_values > array_size: + raise SkipNotImplemented("skipped") np.unique(self.arr, return_index=False, return_inverse=False, return_counts=False) - def time_unique_counts(self, array_size, percent_nans): + def time_unique_counts(self, array_size, num_unique_values, dtype): + if num_unique_values >= np.iinfo(dtype).max or num_unique_values > array_size: + raise SkipNotImplemented("skipped") np.unique(self.arr, return_index=False, - return_inverse=False, return_counts=True) + return_inverse=False, return_counts=True,) - def time_unique_inverse(self, array_size, percent_nans): + def time_unique_inverse(self, array_size, num_unique_values, dtype): + if num_unique_values >= np.iinfo(dtype).max or num_unique_values > array_size: + raise SkipNotImplemented("skipped") np.unique(self.arr, return_index=False, return_inverse=True, return_counts=False) - def time_unique_all(self, array_size, percent_nans): + def time_unique_all(self, array_size, num_unique_values, dtype): + if num_unique_values >= np.iinfo(dtype).max or num_unique_values > array_size: + raise SkipNotImplemented("skipped") np.unique(self.arr, return_index=True, return_inverse=True, return_counts=True) diff --git a/benchmarks/benchmarks/bench_linalg.py b/benchmarks/benchmarks/bench_linalg.py index f3eb819c1803..49a7ae84fde6 100644 --- a/benchmarks/benchmarks/bench_linalg.py +++ b/benchmarks/benchmarks/bench_linalg.py @@ -1,7 +1,7 @@ -from .common import Benchmark, get_squares_, get_indexes_rand, TYPES1 - import numpy as np +from .common import TYPES1, Benchmark, get_indexes_rand, get_squares_ + class Eindot(Benchmark): def setup(self): @@ -72,7 +72,7 @@ def time_tensordot_a_b_axes_1_0_0_1(self): class Linalg(Benchmark): - params = sorted(list(set(TYPES1) - set(['float16']))) + params = sorted(set(TYPES1) - {'float16'}) param_names = ['dtype'] def setup(self, typename): @@ -103,6 +103,8 @@ def time_norm(self, typename): class LinalgSmallArrays(Benchmark): """ Test overhead of linalg methods for small arrays """ def setup(self): + self.array_3_3 = np.eye(3) + np.arange(9.).reshape((3, 3)) + self.array_3 = np.arange(3.) self.array_5 = np.arange(5.) self.array_5_5 = np.reshape(np.arange(25.), (5, 5)) @@ -111,7 +113,17 @@ def time_norm_small_array(self): def time_det_small_array(self): np.linalg.det(self.array_5_5) - + + def time_det_3x3(self): + np.linalg.det(self.array_3_3) + + def time_solve_3x3(self): + np.linalg.solve(self.array_3_3, self.array_3) + + def time_eig_3x3(self): + np.linalg.eig(self.array_3_3) + + class Lstsq(Benchmark): def setup(self): self.a = get_squares_()['float64'] @@ -123,19 +135,22 @@ def time_numpy_linalg_lstsq_a__b_float64(self): class Einsum(Benchmark): param_names = ['dtype'] params = [[np.float32, np.float64]] + def setup(self, dtype): self.one_dim_small = np.arange(600, dtype=dtype) self.one_dim = np.arange(3000, dtype=dtype) self.one_dim_big = np.arange(480000, dtype=dtype) self.two_dim_small = np.arange(1200, dtype=dtype).reshape(30, 40) self.two_dim = np.arange(240000, dtype=dtype).reshape(400, 600) - self.three_dim_small = np.arange(10000, dtype=dtype).reshape(10,100,10) + self.three_dim_small = np.arange(10000, dtype=dtype).reshape(10, 100, 10) self.three_dim = np.arange(24000, dtype=dtype).reshape(20, 30, 40) # non_contiguous arrays self.non_contiguous_dim1_small = np.arange(1, 80, 2, dtype=dtype) self.non_contiguous_dim1 = np.arange(1, 4000, 2, dtype=dtype) self.non_contiguous_dim2 = np.arange(1, 2400, 2, dtype=dtype).reshape(30, 40) - self.non_contiguous_dim3 = np.arange(1, 48000, 2, dtype=dtype).reshape(20, 30, 40) + + non_contiguous_dim3 = np.arange(1, 48000, 2, dtype=dtype) + self.non_contiguous_dim3 = non_contiguous_dim3.reshape(20, 30, 40) # outer(a,b): trigger sum_of_products_contig_stride0_outcontig_two def time_einsum_outer(self, dtype): @@ -143,7 +158,7 @@ def time_einsum_outer(self, dtype): # multiply(a, b):trigger sum_of_products_contig_two def time_einsum_multiply(self, dtype): - np.einsum("..., ...", self.two_dim_small, self.three_dim , optimize=True) + np.einsum("..., ...", self.two_dim_small, self.three_dim, optimize=True) # sum and multiply:trigger sum_of_products_contig_stride0_outstride0_two def time_einsum_sum_mul(self, dtype): @@ -167,11 +182,13 @@ def time_einsum_contig_outstride0(self, dtype): # outer(a,b): non_contiguous arrays def time_einsum_noncon_outer(self, dtype): - np.einsum("i,j", self.non_contiguous_dim1, self.non_contiguous_dim1, optimize=True) + np.einsum("i,j", self.non_contiguous_dim1, + self.non_contiguous_dim1, optimize=True) # multiply(a, b):non_contiguous arrays def time_einsum_noncon_multiply(self, dtype): - np.einsum("..., ...", self.non_contiguous_dim2, self.non_contiguous_dim3, optimize=True) + np.einsum("..., ...", self.non_contiguous_dim2, + self.non_contiguous_dim3, optimize=True) # sum and multiply:non_contiguous arrays def time_einsum_noncon_sum_mul(self, dtype): @@ -187,9 +204,10 @@ def time_einsum_noncon_mul(self, dtype): # contig_contig_outstride0_two: non_contiguous arrays def time_einsum_noncon_contig_contig(self, dtype): - np.einsum("ji,i->", self.non_contiguous_dim2, self.non_contiguous_dim1_small, optimize=True) + np.einsum("ji,i->", self.non_contiguous_dim2, + self.non_contiguous_dim1_small, optimize=True) - # sum_of_products_contig_outstride0_oneīŧšnon_contiguous arrays + # sum_of_products_contig_outstride0_one: non_contiguous arrays def time_einsum_noncon_contig_outstride0(self, dtype): np.einsum("i->", self.non_contiguous_dim1, optimize=True) @@ -208,11 +226,49 @@ def setup(self, shape, npdtypes): self.x2arg = np.random.uniform(-1, 1, np.dot(*shape)).reshape(shape) self.x2arg = self.x2arg.astype(npdtypes) if npdtypes.startswith('complex'): - self.xarg += self.xarg.T*1j - self.x2arg += self.x2arg.T*1j + self.xarg += self.xarg.T * 1j + self.x2arg += self.x2arg.T * 1j def time_transpose(self, shape, npdtypes): np.transpose(self.xarg) def time_vdot(self, shape, npdtypes): np.vdot(self.xarg, self.x2arg) + + +class MatmulStrided(Benchmark): + # some interesting points selected from + # https://github.com/numpy/numpy/pull/23752#issuecomment-2629521597 + # (m, p, n, batch_size) + args = [ + (2, 2, 2, 1), (2, 2, 2, 10), (5, 5, 5, 1), (5, 5, 5, 10), + (10, 10, 10, 1), (10, 10, 10, 10), (20, 20, 20, 1), (20, 20, 20, 10), + (50, 50, 50, 1), (50, 50, 50, 10), + (150, 150, 100, 1), (150, 150, 100, 10), + (400, 400, 100, 1), (400, 400, 100, 10) + ] + + param_names = ['configuration'] + + def __init__(self): + self.args_map = { + 'matmul_m%03d_p%03d_n%03d_bs%02d' % arg: arg for arg in self.args + } + + self.params = [list(self.args_map.keys())] + + def setup(self, configuration): + m, p, n, batch_size = self.args_map[configuration] + + self.a1raw = np.random.rand(batch_size * m * 2 * n).reshape( + (batch_size, m, 2 * n) + ) + + self.a1 = self.a1raw[:, :, ::2] + + self.a2 = np.random.rand(batch_size * n * p).reshape( + (batch_size, n, p) + ) + + def time_matmul(self, configuration): + return np.matmul(self.a1, self.a2) diff --git a/benchmarks/benchmarks/bench_ma.py b/benchmarks/benchmarks/bench_ma.py index f17da1a9ebe1..e815f5fc0cdb 100644 --- a/benchmarks/benchmarks/bench_ma.py +++ b/benchmarks/benchmarks/bench_ma.py @@ -1,7 +1,7 @@ -from .common import Benchmark - import numpy as np +from .common import Benchmark + class MA(Benchmark): def setup(self): @@ -31,17 +31,18 @@ class Indexing(Benchmark): params = [[True, False], [1, 2], [10, 100, 1000]] + def setup(self, masked, ndim, size): x = np.arange(size**ndim).reshape(ndim * (size,)) if masked: - self.m = np.ma.array(x, mask=x%2 == 0) + self.m = np.ma.array(x, mask=x % 2 == 0) else: self.m = np.ma.array(x) - self.idx_scalar = (size//2,) * ndim - self.idx_0d = (size//2,) * ndim + (Ellipsis,) - self.idx_1d = (size//2,) * (ndim - 1) + self.idx_scalar = (size // 2,) * ndim + self.idx_0d = (size // 2,) * ndim + (Ellipsis,) + self.idx_1d = (size // 2,) * (ndim - 1) def time_scalar(self, masked, ndim, size): self.m[self.idx_scalar] @@ -65,8 +66,8 @@ def setup(self, a_masked, b_masked, size): self.a_scalar = np.ma.masked if a_masked else 5 self.b_scalar = np.ma.masked if b_masked else 3 - self.a_1d = np.ma.array(x, mask=x%2 == 0 if a_masked else np.ma.nomask) - self.b_1d = np.ma.array(x, mask=x%3 == 0 if b_masked else np.ma.nomask) + self.a_1d = np.ma.array(x, mask=x % 2 == 0 if a_masked else np.ma.nomask) + self.b_1d = np.ma.array(x, mask=x % 3 == 0 if b_masked else np.ma.nomask) self.a_2d = self.a_1d.reshape(1, -1) self.b_2d = self.a_1d.reshape(-1, 1) @@ -130,7 +131,7 @@ class MAFunctions1v(Benchmark): def setup(self, mtype, func, msize): xs = 2.0 + np.random.uniform(-1, 1, 6).reshape(2, 3) m1 = [[True, False, False], [False, False, True]] - xl = 2.0 + np.random.uniform(-1, 1, 100*100).reshape(100, 100) + xl = 2.0 + np.random.uniform(-1, 1, 100 * 100).reshape(100, 100) maskx = xl > 2.8 self.nmxs = np.ma.array(xs, mask=m1) self.nmxl = np.ma.array(xl, mask=maskx) @@ -152,7 +153,7 @@ class MAMethod0v(Benchmark): def setup(self, method, msize): xs = np.random.uniform(-1, 1, 6).reshape(2, 3) m1 = [[True, False, False], [False, False, True]] - xl = np.random.uniform(-1, 1, 100*100).reshape(100, 100) + xl = np.random.uniform(-1, 1, 100 * 100).reshape(100, 100) maskx = xl > 0.8 self.nmxs = np.ma.array(xs, mask=m1) self.nmxl = np.ma.array(xl, mask=maskx) @@ -180,8 +181,8 @@ def setup(self, mtype, func, msize): self.nmxs = np.ma.array(xs, mask=m1) self.nmys = np.ma.array(ys, mask=m2) # Big arrays - xl = 2.0 + np.random.uniform(-1, 1, 100*100).reshape(100, 100) - yl = 2.0 + np.random.uniform(-1, 1, 100*100).reshape(100, 100) + xl = 2.0 + np.random.uniform(-1, 1, 100 * 100).reshape(100, 100) + yl = 2.0 + np.random.uniform(-1, 1, 100 * 100).reshape(100, 100) maskx = xl > 2.8 masky = yl < 1.8 self.nmxl = np.ma.array(xl, mask=maskx) @@ -203,7 +204,7 @@ class MAMethodGetItem(Benchmark): def setup(self, margs, msize): xs = np.random.uniform(-1, 1, 6).reshape(2, 3) m1 = [[True, False, False], [False, False, True]] - xl = np.random.uniform(-1, 1, 100*100).reshape(100, 100) + xl = np.random.uniform(-1, 1, 100 * 100).reshape(100, 100) maskx = xl > 0.8 self.nmxs = np.ma.array(xs, mask=m1) self.nmxl = np.ma.array(xl, mask=maskx) @@ -213,7 +214,7 @@ def time_methods_getitem(self, margs, msize): mdat = self.nmxs elif msize == 'big': mdat = self.nmxl - getattr(mdat, '__getitem__')(margs) + mdat.__getitem__(margs) class MAMethodSetItem(Benchmark): @@ -225,7 +226,7 @@ class MAMethodSetItem(Benchmark): def setup(self, margs, mset, msize): xs = np.random.uniform(-1, 1, 6).reshape(2, 3) m1 = [[True, False, False], [False, False, True]] - xl = np.random.uniform(-1, 1, 100*100).reshape(100, 100) + xl = np.random.uniform(-1, 1, 100 * 100).reshape(100, 100) maskx = xl > 0.8 self.nmxs = np.ma.array(xs, mask=m1) self.nmxl = np.ma.array(xl, mask=maskx) @@ -235,7 +236,7 @@ def time_methods_setitem(self, margs, mset, msize): mdat = self.nmxs elif msize == 'big': mdat = self.nmxl - getattr(mdat, '__setitem__')(margs, mset) + mdat.__setitem__(margs, mset) class Where(Benchmark): @@ -252,8 +253,8 @@ def setup(self, mtype, msize): self.nmxs = np.ma.array(xs, mask=m1) self.nmys = np.ma.array(ys, mask=m2) # Big arrays - xl = np.random.uniform(-1, 1, 100*100).reshape(100, 100) - yl = np.random.uniform(-1, 1, 100*100).reshape(100, 100) + xl = np.random.uniform(-1, 1, 100 * 100).reshape(100, 100) + yl = np.random.uniform(-1, 1, 100 * 100).reshape(100, 100) maskx = xl > 0.8 masky = yl < -0.8 self.nmxl = np.ma.array(xl, mask=maskx) diff --git a/benchmarks/benchmarks/bench_manipulate.py b/benchmarks/benchmarks/bench_manipulate.py index d74f1b7123d3..5bb867c10e89 100644 --- a/benchmarks/benchmarks/bench_manipulate.py +++ b/benchmarks/benchmarks/bench_manipulate.py @@ -1,7 +1,9 @@ -from .common import Benchmark, get_squares_, TYPES1, DLPACK_TYPES +from collections import deque import numpy as np -from collections import deque + +from .common import TYPES1, Benchmark + class BroadcastArrays(Benchmark): params = [[(16, 32), (128, 256), (512, 1024)], @@ -10,10 +12,10 @@ class BroadcastArrays(Benchmark): timeout = 10 def setup(self, shape, ndtype): - self.xarg = np.random.ranf(shape[0]*shape[1]).reshape(shape) + self.xarg = np.random.ranf(shape[0] * shape[1]).reshape(shape) self.xarg = self.xarg.astype(ndtype) if ndtype.startswith('complex'): - self.xarg += np.random.ranf(1)*1j + self.xarg += np.random.ranf(1) * 1j def time_broadcast_arrays(self, shape, ndtype): np.broadcast_arrays(self.xarg, np.ones(1)) @@ -30,7 +32,7 @@ def setup(self, size, ndtype): self.xarg = self.rng.random(size) self.xarg = self.xarg.astype(ndtype) if ndtype.startswith('complex'): - self.xarg += self.rng.random(1)*1j + self.xarg += self.rng.random(1) * 1j def time_broadcast_to(self, size, ndtype): np.broadcast_to(self.xarg, (size, size)) @@ -44,11 +46,11 @@ class ConcatenateStackArrays(Benchmark): timeout = 10 def setup(self, shape, narrays, ndtype): - self.xarg = [np.random.ranf(shape[0]*shape[1]).reshape(shape) + self.xarg = [np.random.ranf(shape[0] * shape[1]).reshape(shape) for x in range(narrays)] self.xarg = [x.astype(ndtype) for x in self.xarg] if ndtype.startswith('complex'): - [x + np.random.ranf(1)*1j for x in self.xarg] + [x + np.random.ranf(1) * 1j for x in self.xarg] def time_concatenate_ax0(self, size, narrays, ndtype): np.concatenate(self.xarg, axis=0) diff --git a/benchmarks/benchmarks/bench_ndindex.py b/benchmarks/benchmarks/bench_ndindex.py new file mode 100644 index 000000000000..132d4eeed472 --- /dev/null +++ b/benchmarks/benchmarks/bench_ndindex.py @@ -0,0 +1,54 @@ +from itertools import product + +import numpy as np + +from .common import Benchmark + + +class NdindexBenchmark(Benchmark): + """ + Benchmark comparing numpy.ndindex() and itertools.product() + for different multi-dimensional shapes. + """ + + # Fix: Define each dimension separately, not as tuples + # ASV will pass each parameter list element to setup() + params = [ + [(10, 10), (20, 20), (50, 50), (10, 10, 10), (20, 30, 40), (50, 60, 90)] + ] + param_names = ["shape"] + + def setup(self, shape): + """Setup method called before each benchmark run.""" + # Access ndindex through NumPy's main namespace + self.ndindex = np.ndindex + + def time_ndindex(self, shape): + """ + Measure time taken by np.ndindex. + It creates an iterator that goes over each index. + """ + for _ in self.ndindex(*shape): + pass # Just loop through, no work inside + + def time_itertools_product(self, shape): + """ + Measure time taken by itertools.product. + Same goal: iterate over all index positions. + """ + for _ in product(*(range(s) for s in shape)): + pass + + def peakmem_ndindex(self, shape): + """ + Measure peak memory used when fully consuming + np.ndindex iterator by converting it to a list. + """ + return list(self.ndindex(*shape)) + + def peakmem_itertools_product(self, shape): + """ + Measure peak memory used when fully consuming + itertools.product iterator by converting it to a list. + """ + return list(product(*(range(s) for s in shape))) diff --git a/benchmarks/benchmarks/bench_polynomial.py b/benchmarks/benchmarks/bench_polynomial.py new file mode 100644 index 000000000000..7bd7334e3c14 --- /dev/null +++ b/benchmarks/benchmarks/bench_polynomial.py @@ -0,0 +1,27 @@ +import numpy as np + +from .common import Benchmark + + +class Polynomial(Benchmark): + + def setup(self): + self.polynomial_degree2 = np.polynomial.Polynomial(np.array([1, 2])) + self.array3 = np.linspace(0, 1, 3) + self.array1000 = np.linspace(0, 1, 10_000) + self.float64 = np.float64(1.0) + + def time_polynomial_evaluation_scalar(self): + self.polynomial_degree2(self.float64) + + def time_polynomial_evaluation_python_float(self): + self.polynomial_degree2(1.0) + + def time_polynomial_evaluation_array_3(self): + self.polynomial_degree2(self.array3) + + def time_polynomial_evaluation_array_1000(self): + self.polynomial_degree2(self.array1000) + + def time_polynomial_addition(self): + _ = self.polynomial_degree2 + self.polynomial_degree2 diff --git a/benchmarks/benchmarks/bench_random.py b/benchmarks/benchmarks/bench_random.py index 9482eb04de97..d15d25941f93 100644 --- a/benchmarks/benchmarks/bench_random.py +++ b/benchmarks/benchmarks/bench_random.py @@ -1,7 +1,7 @@ -from .common import Benchmark - import numpy as np +from .common import Benchmark + try: from numpy.random import Generator except ImportError: @@ -84,6 +84,7 @@ def time_permutation_2d(self): def time_permutation_int(self): np.random.permutation(self.n) + nom_size = 100000 class RNG(Benchmark): @@ -147,28 +148,29 @@ class Bounded(Benchmark): ]] def setup(self, bitgen, args): + seed = 707250673 if bitgen == 'numpy': - self.rg = np.random.RandomState() + self.rg = np.random.RandomState(seed) else: - self.rg = Generator(getattr(np.random, bitgen)()) + self.rg = Generator(getattr(np.random, bitgen)(seed)) self.rg.random() def time_bounded(self, bitgen, args): - """ - Timer for 8-bit bounded values. - - Parameters (packed as args) - ---------- - dt : {uint8, uint16, uint32, unit64} - output dtype - max : int - Upper bound for range. Lower is always 0. Must be <= 2**bits. - """ - dt, max = args - if bitgen == 'numpy': - self.rg.randint(0, max + 1, nom_size, dtype=dt) - else: - self.rg.integers(0, max + 1, nom_size, dtype=dt) + """ + Timer for 8-bit bounded values. + + Parameters (packed as args) + ---------- + dt : {uint8, uint16, uint32, unit64} + output dtype + max : int + Upper bound for range. Lower is always 0. Must be <= 2**bits. + """ + dt, max = args + if bitgen == 'numpy': + self.rg.randint(0, max + 1, nom_size, dtype=dt) + else: + self.rg.integers(0, max + 1, nom_size, dtype=dt) class Choice(Benchmark): params = [1e3, 1e6, 1e8] diff --git a/benchmarks/benchmarks/bench_records.py b/benchmarks/benchmarks/bench_records.py index 35743038a74a..8c24a4715709 100644 --- a/benchmarks/benchmarks/bench_records.py +++ b/benchmarks/benchmarks/bench_records.py @@ -1,7 +1,7 @@ -from .common import Benchmark - import numpy as np +from .common import Benchmark + class Records(Benchmark): def setup(self): @@ -12,11 +12,11 @@ def setup(self): self.formats_str = ','.join(self.formats) self.dtype_ = np.dtype( [ - ('field_{}'.format(i), self.l50.dtype.str) + (f'field_{i}', self.l50.dtype.str) for i in range(self.fields_number) ] ) - self.buffer = self.l50.tostring() * self.fields_number + self.buffer = self.l50.tobytes() * self.fields_number def time_fromarrays_w_dtype(self): np._core.records.fromarrays(self.arrays, dtype=self.dtype_) @@ -30,11 +30,11 @@ def time_fromarrays_formats_as_list(self): def time_fromarrays_formats_as_string(self): np._core.records.fromarrays(self.arrays, formats=self.formats_str) - def time_fromstring_w_dtype(self): + def time_frombytes_w_dtype(self): np._core.records.fromstring(self.buffer, dtype=self.dtype_) - def time_fromstring_formats_as_list(self): + def time_frombytes_formats_as_list(self): np._core.records.fromstring(self.buffer, formats=self.formats) - def time_fromstring_formats_as_string(self): + def time_frombytes_formats_as_string(self): np._core.records.fromstring(self.buffer, formats=self.formats_str) diff --git a/benchmarks/benchmarks/bench_reduce.py b/benchmarks/benchmarks/bench_reduce.py index 53016f238b45..1d78e1bba03a 100644 --- a/benchmarks/benchmarks/bench_reduce.py +++ b/benchmarks/benchmarks/bench_reduce.py @@ -1,7 +1,7 @@ -from .common import Benchmark, TYPES1, get_squares - import numpy as np +from .common import TYPES1, Benchmark, get_squares + class AddReduce(Benchmark): def setup(self): @@ -52,7 +52,7 @@ class StatsReductions(Benchmark): def setup(self, dtype): self.data = np.ones(200, dtype=dtype) if dtype.startswith('complex'): - self.data = self.data * self.data.T*1j + self.data = self.data * self.data.T * 1j def time_min(self, dtype): np.min(self.data) diff --git a/benchmarks/benchmarks/bench_scalar.py b/benchmarks/benchmarks/bench_scalar.py index 638f66df5bde..40164926ade3 100644 --- a/benchmarks/benchmarks/bench_scalar.py +++ b/benchmarks/benchmarks/bench_scalar.py @@ -1,13 +1,14 @@ -from .common import Benchmark, TYPES1 - import numpy as np +from .common import TYPES1, Benchmark + class ScalarMath(Benchmark): # Test scalar math, note that each of these is run repeatedly to offset # the function call overhead to some degree. params = [TYPES1] param_names = ["type"] + def setup(self, typename): self.num = np.dtype(typename).type(2) self.int32 = np.int32(2) diff --git a/benchmarks/benchmarks/bench_searchsorted.py b/benchmarks/benchmarks/bench_searchsorted.py new file mode 100644 index 000000000000..86cc625161d8 --- /dev/null +++ b/benchmarks/benchmarks/bench_searchsorted.py @@ -0,0 +1,35 @@ +import numpy as np + +from .common import Benchmark + + +class SearchSorted(Benchmark): + params = [ + [100, 10_000, 1_000_000, 100_000_000], # array sizes + [1, 10, 100_000], # number of query elements + ['ordered', 'random'], # query order + [False, True], # use sorter + [42, 18122022], # seed + ] + param_names = ['array_size', 'n_queries', 'query_order', 'use_sorter', 'seed'] + + def setup(self, array_size, n_queries, query_order, use_sorter, seed): + self.arr = np.arange(array_size, dtype=np.int32) + + rng = np.random.default_rng(seed) + + low = -array_size // 10 + high = array_size + array_size // 10 + + self.queries = rng.integers(low, high, size=n_queries, dtype=np.int32) + if query_order == 'ordered': + self.queries.sort() + + if use_sorter: + rng.shuffle(self.arr) + self.sorter = self.arr.argsort() + else: + self.sorter = None + + def time_searchsorted(self, array_size, n_queries, query_order, use_sorter, seed): + np.searchsorted(self.arr, self.queries, sorter=self.sorter) diff --git a/benchmarks/benchmarks/bench_shape_base.py b/benchmarks/benchmarks/bench_shape_base.py index eb13ff969353..db66fa46371e 100644 --- a/benchmarks/benchmarks/bench_shape_base.py +++ b/benchmarks/benchmarks/bench_shape_base.py @@ -1,7 +1,7 @@ -from .common import Benchmark - import numpy as np +from .common import Benchmark + class Block(Benchmark): params = [1, 10, 100] @@ -76,7 +76,7 @@ class Block2D(Benchmark): def setup(self, shape, dtype, n_chunks): self.block_list = [ - [np.full(shape=[s//n_chunk for s, n_chunk in zip(shape, n_chunks)], + [np.full(shape=[s // n_chunk for s, n_chunk in zip(shape, n_chunks)], fill_value=1, dtype=dtype) for _ in range(n_chunks[1])] for _ in range(n_chunks[0]) ] diff --git a/benchmarks/benchmarks/bench_strings.py b/benchmarks/benchmarks/bench_strings.py index 88d20069e75b..8df866f273c0 100644 --- a/benchmarks/benchmarks/bench_strings.py +++ b/benchmarks/benchmarks/bench_strings.py @@ -1,8 +1,8 @@ -from .common import Benchmark +import operator import numpy as np -import operator +from .common import Benchmark _OPERATORS = { '==': operator.eq, diff --git a/benchmarks/benchmarks/bench_trim_zeros.py b/benchmarks/benchmarks/bench_trim_zeros.py index 4e25a8b021b7..4a9751681e9e 100644 --- a/benchmarks/benchmarks/bench_trim_zeros.py +++ b/benchmarks/benchmarks/bench_trim_zeros.py @@ -1,7 +1,7 @@ -from .common import Benchmark - import numpy as np +from .common import Benchmark + _FLOAT = np.dtype('float64') _COMPLEX = np.dtype('complex128') _INT = np.dtype('int64') diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index ca96d8c22775..ac978981faba 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -1,10 +1,11 @@ -from .common import Benchmark, get_squares_, TYPES1, DLPACK_TYPES - -import numpy as np import itertools -from packaging import version import operator +from packaging import version + +import numpy as np + +from .common import DLPACK_TYPES, TYPES1, Benchmark, get_squares_ ufuncs = ['abs', 'absolute', 'add', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'bitwise_and', 'bitwise_count', 'bitwise_not', @@ -16,18 +17,28 @@ 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', 'left_shift', 'less', 'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', 'logical_or', - 'logical_xor', 'matmul', 'maximum', 'minimum', 'mod', 'modf', - 'multiply', 'negative', 'nextafter', 'not_equal', 'positive', + 'logical_xor', 'matmul', 'matvec', 'maximum', 'minimum', 'mod', + 'modf', 'multiply', 'negative', 'nextafter', 'not_equal', 'positive', 'power', 'rad2deg', 'radians', 'reciprocal', 'remainder', 'right_shift', 'rint', 'sign', 'signbit', 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh', - 'true_divide', 'trunc'] + 'true_divide', 'trunc', 'vecdot', 'vecmat'] arrayfuncdisp = ['real', 'round'] +for name in ufuncs: + f = getattr(np, name, None) + if not isinstance(f, np.ufunc): + raise ValueError(f"Bench target `np.{name}` is not a ufunc") -for name in dir(np): - if isinstance(getattr(np, name, None), np.ufunc) and name not in ufuncs: - print("Missing ufunc %r" % (name,)) +all_ufuncs = (getattr(np, name, None) for name in dir(np)) +all_ufuncs = set(filter(lambda f: isinstance(f, np.ufunc), all_ufuncs)) +bench_ufuncs = {getattr(np, name, None) for name in ufuncs} + +missing_ufuncs = all_ufuncs - bench_ufuncs +if len(missing_ufuncs) > 0: + missing_ufunc_names = [f.__name__ for f in missing_ufuncs] + raise NotImplementedError( + f"Missing benchmarks for ufuncs {missing_ufunc_names!r}") class ArrayFunctionDispatcher(Benchmark): @@ -40,9 +51,9 @@ def setup(self, ufuncname): try: self.afdn = getattr(np, ufuncname) except AttributeError: - raise NotImplementedError() + raise NotImplementedError self.args = [] - for _, aarg in get_squares_().items(): + for aarg in get_squares_().values(): arg = (aarg,) * 1 # no nin try: self.afdn(*arg) @@ -87,9 +98,9 @@ def setup(self, ufuncname): try: self.ufn = getattr(np, ufuncname) except AttributeError: - raise NotImplementedError() + raise NotImplementedError self.args = [] - for _, aarg in get_squares_().items(): + for aarg in get_squares_().values(): arg = (aarg,) * self.ufn.nin try: self.ufn(*arg) @@ -241,14 +252,14 @@ class NDArrayGetItem(Benchmark): def setup(self, margs, msize): self.xs = np.random.uniform(-1, 1, 6).reshape(2, 3) - self.xl = np.random.uniform(-1, 1, 50*50).reshape(50, 50) + self.xl = np.random.uniform(-1, 1, 50 * 50).reshape(50, 50) def time_methods_getitem(self, margs, msize): if msize == 'small': mdat = self.xs elif msize == 'big': mdat = self.xl - getattr(mdat, '__getitem__')(margs) + mdat.__getitem__(margs) class NDArraySetItem(Benchmark): @@ -258,7 +269,7 @@ class NDArraySetItem(Benchmark): def setup(self, margs, msize): self.xs = np.random.uniform(-1, 1, 6).reshape(2, 3) - self.xl = np.random.uniform(-1, 1, 100*100).reshape(100, 100) + self.xl = np.random.uniform(-1, 1, 100 * 100).reshape(100, 100) def time_methods_setitem(self, margs, msize): if msize == 'small': @@ -293,7 +304,7 @@ def time_ndarray_dlp(self, methname, npdtypes): class NDArrayAsType(Benchmark): """ Benchmark for type conversion """ - params = [list(itertools.combinations(TYPES1, 2))] + params = [list(itertools.product(TYPES1, TYPES1))] param_names = ['typeconv'] timeout = 10 @@ -322,7 +333,7 @@ def setup(self, ufuncname): try: self.f = getattr(np, ufuncname) except AttributeError: - raise NotImplementedError() + raise NotImplementedError self.array_5 = np.array([1., 2., 10., 3., 4.]) self.array_int_3 = np.array([1, 2, 3]) self.float64 = np.float64(1.1) @@ -332,7 +343,7 @@ def time_ufunc_small_array(self, ufuncname): self.f(self.array_5) def time_ufunc_small_array_inplace(self, ufuncname): - self.f(self.array_5, out = self.array_5) + self.f(self.array_5, out=self.array_5) def time_ufunc_small_int_array(self, ufuncname): self.f(self.array_int_3) @@ -422,7 +433,7 @@ def time_divide_scalar2_inplace(self, dtype): class CustomComparison(Benchmark): - params = (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, + params = (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64, np.float32, np.float64, np.bool) param_names = ['dtype'] @@ -487,7 +498,7 @@ def time_floor_divide_int(self, dtype, size): class Scalar(Benchmark): def setup(self): self.x = np.asarray(1.0) - self.y = np.asarray((1.0 + 1j)) + self.y = np.asarray(1.0 + 1j) self.z = complex(1.0, 1.0) def time_add_scalar(self): @@ -502,13 +513,15 @@ def time_add_scalar_conv_complex(self): class ArgPack: __slots__ = ['args', 'kwargs'] + def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs + def __repr__(self): return '({})'.format(', '.join( [repr(a) for a in self.args] + - ['{}={}'.format(k, repr(v)) for k, v in self.kwargs.items()] + [f'{k}={v!r}' for k, v in self.kwargs.items()] )) @@ -576,6 +589,12 @@ def time_pow_2(self, dtype): def time_pow_half(self, dtype): np.power(self.a, 0.5) + def time_pow_2_op(self, dtype): + self.a ** 2 + + def time_pow_half_op(self, dtype): + self.a ** 0.5 + def time_atan2(self, dtype): np.arctan2(self.a, self.b) @@ -587,7 +606,7 @@ def setup(self, dtype): N = 1000000 self.a = np.random.randint(20, size=N).astype(dtype) self.b = np.random.randint(4, size=N).astype(dtype) - + def time_pow(self, dtype): np.power(self.a, self.b) diff --git a/benchmarks/benchmarks/bench_ufunc_strides.py b/benchmarks/benchmarks/bench_ufunc_strides.py index 183c7c4fb75e..0c80b1877b3a 100644 --- a/benchmarks/benchmarks/bench_ufunc_strides.py +++ b/benchmarks/benchmarks/bench_ufunc_strides.py @@ -1,23 +1,23 @@ -from .common import Benchmark, get_data - import numpy as np +from .common import Benchmark, get_data + UFUNCS = [obj for obj in np._core.umath.__dict__.values() if isinstance(obj, np.ufunc)] UFUNCS_UNARY = [uf for uf in UFUNCS if "O->O" in uf.types] class _AbstractBinary(Benchmark): params = [] - param_names = ['ufunc', 'stride_in0', 'stride_in1' 'stride_out', 'dtype'] + param_names = ['ufunc', 'stride_in0', 'stride_in1', 'stride_out', 'dtype'] timeout = 10 - arrlen = 10000 + arrlen = 1000000 data_finite = True data_denormal = False data_zeros = False def setup(self, ufunc, stride_in0, stride_in1, stride_out, dtype): ufunc_insig = f'{dtype}{dtype}->' - if ufunc_insig+dtype not in ufunc.types: + if ufunc_insig + dtype not in ufunc.types: for st_sig in (ufunc_insig, dtype): test = [sig for sig in ufunc.types if sig.startswith(st_sig)] if test: @@ -35,14 +35,14 @@ def setup(self, ufunc, stride_in0, stride_in1, stride_out, dtype): self.ufunc_args = [] for i, (dt, stride) in enumerate(zip(tin, (stride_in0, stride_in1))): self.ufunc_args += [get_data( - self.arrlen*stride, dt, i, + self.arrlen * stride, dt, i, zeros=self.data_zeros, finite=self.data_finite, denormal=self.data_denormal, )[::stride]] for dt in tout: self.ufunc_args += [ - np.empty(stride_out*self.arrlen, dt)[::stride_out] + np.empty(stride_out * self.arrlen, dt)[::stride_out] ] np.seterr(all='ignore') @@ -63,14 +63,14 @@ class _AbstractUnary(Benchmark): params = [] param_names = ['ufunc', 'stride_in', 'stride_out', 'dtype'] timeout = 10 - arrlen = 10000 + arrlen = 1000000 data_finite = True data_denormal = False data_zeros = False def setup(self, ufunc, stride_in, stride_out, dtype): arr_in = get_data( - stride_in*self.arrlen, dtype, + stride_in * self.arrlen, dtype, zeros=self.data_zeros, finite=self.data_finite, denormal=self.data_denormal, @@ -78,7 +78,7 @@ def setup(self, ufunc, stride_in, stride_out, dtype): self.ufunc_args = [arr_in[::stride_in]] ufunc_insig = f'{dtype}->' - if ufunc_insig+dtype not in ufunc.types: + if ufunc_insig + dtype not in ufunc.types: test = [sig for sig in ufunc.types if sig.startswith(ufunc_insig)] if not test: raise NotImplementedError( @@ -91,7 +91,7 @@ def setup(self, ufunc, stride_in, stride_out, dtype): for dt in tout: self.ufunc_args += [ - np.empty(stride_out*self.arrlen, dt)[::stride_out] + np.empty(stride_out * self.arrlen, dt)[::stride_out] ] np.seterr(all='ignore') @@ -172,10 +172,10 @@ class UnaryIntContig(_AbstractUnary): ] class Mandelbrot(Benchmark): - def f(self,z): + def f(self, z): return np.abs(z) < 4.0 - def g(self,z,c): + def g(self, z, c): return np.sum(np.multiply(z, z) + c) def mandelbrot_numpy(self, c, maxiter): @@ -184,43 +184,45 @@ def mandelbrot_numpy(self, c, maxiter): for it in range(maxiter): notdone = self.f(z) output[notdone] = it - z[notdone] = self.g(z[notdone],c[notdone]) - output[output == maxiter-1] = 0 + z[notdone] = self.g(z[notdone], c[notdone]) + output[output == maxiter - 1] = 0 return output - def mandelbrot_set(self,xmin,xmax,ymin,ymax,width,height,maxiter): + def mandelbrot_set(self, xmin, xmax, ymin, ymax, width, height, maxiter): r1 = np.linspace(xmin, xmax, width, dtype=np.float32) r2 = np.linspace(ymin, ymax, height, dtype=np.float32) - c = r1 + r2[:,None]*1j - n3 = self.mandelbrot_numpy(c,maxiter) - return (r1,r2,n3.T) + c = r1 + r2[:, None] * 1j + n3 = self.mandelbrot_numpy(c, maxiter) + return (r1, r2, n3.T) def time_mandel(self): - self.mandelbrot_set(-0.74877,-0.74872,0.06505,0.06510,1000,1000,2048) + self.mandelbrot_set(-0.74877, -0.74872, 0.06505, 0.06510, 1000, 1000, 2048) class LogisticRegression(Benchmark): param_names = ['dtype'] params = [np.float32, np.float64] timeout = 1000 + def train(self, max_epoch): for epoch in range(max_epoch): z = np.matmul(self.X_train, self.W) - A = 1 / (1 + np.exp(-z)) # sigmoid(z) - loss = -np.mean(self.Y_train * np.log(A) + (1-self.Y_train) * np.log(1-A)) - dz = A - self.Y_train - dw = (1/self.size) * np.matmul(self.X_train.T, dz) - self.W = self.W - self.alpha*dw + A = 1 / (1 + np.exp(-z)) # sigmoid(z) + Y_train = self.Y_train + loss = -np.mean(Y_train * np.log(A) + (1 - Y_train) * np.log(1 - A)) + dz = A - Y_train + dw = (1 / self.size) * np.matmul(self.X_train.T, dz) + self.W = self.W - self.alpha * dw def setup(self, dtype): np.random.seed(42) self.size = 250 features = 16 - self.X_train = np.random.rand(self.size,features).astype(dtype) - self.Y_train = np.random.choice(2,self.size).astype(dtype) + self.X_train = np.random.rand(self.size, features).astype(dtype) + self.Y_train = np.random.choice(2, self.size).astype(dtype) # Initialize weights - self.W = np.zeros((features,1), dtype=dtype) - self.b = np.zeros((1,1), dtype=dtype) + self.W = np.zeros((features, 1), dtype=dtype) + self.b = np.zeros((1, 1), dtype=dtype) self.alpha = 0.1 def time_train(self, dtype): diff --git a/benchmarks/benchmarks/common.py b/benchmarks/benchmarks/common.py index d4c1540ff203..7ed528e8d518 100644 --- a/benchmarks/benchmarks/common.py +++ b/benchmarks/benchmarks/common.py @@ -1,9 +1,9 @@ -import numpy as np import random -import os from functools import lru_cache from pathlib import Path +import numpy as np + # Various pre-crafted datasets/variables for testing # !!! Must not be changed -- only appended !!! # while testing numpy we better not rely on numpy to produce random @@ -21,14 +21,14 @@ TYPES1 = [ 'int16', 'float16', 'int32', 'float32', - 'int64', 'float64', 'complex64', + 'int64', 'float64', 'complex64', 'complex128', ] DLPACK_TYPES = [ 'int16', 'float16', 'int32', 'float32', - 'int64', 'float64', 'complex64', + 'int64', 'float64', 'complex64', 'complex128', 'bool', ] @@ -41,8 +41,8 @@ @lru_cache(typed=True) def get_values(): - rnd = np.random.RandomState(1) - values = np.tile(rnd.uniform(0, 100, size=nx*ny//10), 10) + rnd = np.random.RandomState(1804169117) + values = np.tile(rnd.uniform(0, 100, size=nx * ny // 10), 10) return values @@ -54,13 +54,13 @@ def get_square(dtype): # adjust complex ones to have non-degenerated imagery part -- use # original data transposed for that if arr.dtype.kind == 'c': - arr += arr.T*1j + arr += arr.T * 1j return arr @lru_cache(typed=True) def get_squares(): - return {t: get_square(t) for t in TYPES1} + return {t: get_square(t) for t in sorted(TYPES1)} @lru_cache(typed=True) @@ -72,14 +72,7 @@ def get_square_(dtype): @lru_cache(typed=True) def get_squares_(): # smaller squares - return {t: get_square_(t) for t in TYPES1} - - -@lru_cache(typed=True) -def get_vectors(): - # vectors - vectors = {t: s[0] for t, s in get_squares().items()} - return vectors + return {t: get_square_(t) for t in sorted(TYPES1)} @lru_cache(typed=True) @@ -211,7 +204,7 @@ def get_data(size, dtype, ip_num=0, zeros=False, finite=True, denormal=False): rands += [np.zeros(lsize, dtype)] stride = len(rands) for start, r in enumerate(rands): - array[start:len(r)*stride:stride] = r + array[start:len(r) * stride:stride] = r if not CACHE_ROOT.exists(): CACHE_ROOT.mkdir(parents=True) diff --git a/doc/BRANCH_WALKTHROUGH.rst b/doc/BRANCH_WALKTHROUGH.rst index 5767fb6e6a10..3f9db71a0282 100644 --- a/doc/BRANCH_WALKTHROUGH.rst +++ b/doc/BRANCH_WALKTHROUGH.rst @@ -1,6 +1,6 @@ -This guide contains a walkthrough of branching NumPy 1.21.x on Linux. The -commands can be copied into the command line, but be sure to replace 1.21 and -1.22 by the correct versions. It is good practice to make ``.mailmap`` as +This guide contains a walkthrough of branching NumPy 2.3.x on Linux. The +commands can be copied into the command line, but be sure to replace 2.3 and +2.4 by the correct versions. It is good practice to make ``.mailmap`` as current as possible before making the branch, that may take several weeks. This should be read together with the @@ -12,14 +12,13 @@ Branching Make the branch --------------- -This is only needed when starting a new maintenance branch. Because -NumPy now depends on tags to determine the version, the start of a new -development cycle in the main branch needs an annotated tag. That is done +This is only needed when starting a new maintenance branch. The start of a new +development cycle in the main branch should get an annotated tag. That is done as follows:: $ git checkout main $ git pull upstream main - $ git commit --allow-empty -m'REL: Begin NumPy 1.22.0 development' + $ git commit --allow-empty -m'REL: Begin NumPy 2.4.0 development' $ git push upstream HEAD If the push fails because new PRs have been merged, do:: @@ -28,20 +27,20 @@ If the push fails because new PRs have been merged, do:: and repeat the push. Once the push succeeds, tag it:: - $ git tag -a -s v1.22.0.dev0 -m'Begin NumPy 1.22.0 development' - $ git push upstream v1.22.0.dev0 + $ git tag -a -s v2.4.0.dev0 -m'Begin NumPy 2.4.0 development' + $ git push upstream v2.4.0.dev0 then make the new branch and push it:: - $ git branch maintenance/1.21.x HEAD^ - $ git push upstream maintenance/1.21.x + $ git branch maintenance/2.3.x HEAD^ + $ git push upstream maintenance/2.3.x Prepare the main branch for further development ----------------------------------------------- -Make a PR branch to prepare main for further development:: +Make a PR branch to prepare ``main`` for further development:: - $ git checkout -b 'prepare-main-for-1.22.0-development' v1.22.0.dev0 + $ git checkout -b 'prepare-main-for-2.4.0-development' v2.4.0.dev0 Delete the release note fragments:: @@ -49,18 +48,12 @@ Delete the release note fragments:: Create the new release notes skeleton and add to index:: - $ cp doc/source/release/template.rst doc/source/release/1.22.0-notes.rst - $ gvim doc/source/release/1.22.0-notes.rst # put the correct version - $ git add doc/source/release/1.22.0-notes.rst + $ cp doc/source/release/template.rst doc/source/release/2.4.0-notes.rst + $ gvim doc/source/release/2.4.0-notes.rst # put the correct version + $ git add doc/source/release/2.4.0-notes.rst $ gvim doc/source/release.rst # add new notes to notes index $ git add doc/source/release.rst -Update ``pavement.py`` and update the ``RELEASE_NOTES`` variable to point to -the new notes:: - - $ gvim pavement.py - $ git add pavement.py - Update ``cversions.txt`` to add current release. There should be no new hash to worry about at this early point, just add a comment following previous practice:: @@ -71,7 +64,7 @@ practice:: Check your work, commit it, and push:: $ git status # check work - $ git commit -m'REL: Prepare main for NumPy 1.22.0 development' + $ git commit -m'REL: Prepare main for NumPy 2.4.0 development' $ git push origin HEAD Now make a pull request. diff --git a/doc/C_STYLE_GUIDE.rst b/doc/C_STYLE_GUIDE.rst index 60d2d7383510..486936ac594e 100644 --- a/doc/C_STYLE_GUIDE.rst +++ b/doc/C_STYLE_GUIDE.rst @@ -1,3 +1,3 @@ The "NumPy C Style Guide" at this page has been superseded by -"NEP 45 — C Style Guide" at https://numpy.org/neps/nep-0045-c_style_guide.html +:external+nep:doc:`nep-0045-c_style_guide` diff --git a/doc/DISTUTILS.rst b/doc/DISTUTILS.rst deleted file mode 100644 index 142c15a7124a..000000000000 --- a/doc/DISTUTILS.rst +++ /dev/null @@ -1,622 +0,0 @@ -.. -*- rest -*- - -NumPy distutils - users guide -============================= - -.. contents:: - -SciPy structure -''''''''''''''' - -Currently SciPy project consists of two packages: - -- NumPy --- it provides packages like: - - + numpy.distutils - extension to Python distutils - + numpy.f2py - a tool to bind Fortran/C codes to Python - + numpy._core - future replacement of Numeric and numarray packages - + numpy.lib - extra utility functions - + numpy.testing - numpy-style tools for unit testing - + etc - -- SciPy --- a collection of scientific tools for Python. - -The aim of this document is to describe how to add new tools to SciPy. - - -Requirements for SciPy packages -''''''''''''''''''''''''''''''' - -SciPy consists of Python packages, called SciPy packages, that are -available to Python users via the ``scipy`` namespace. Each SciPy package -may contain other SciPy packages. And so on. Therefore, the SciPy -directory tree is a tree of packages with arbitrary depth and width. -Any SciPy package may depend on NumPy packages but the dependence on other -SciPy packages should be kept minimal or zero. - -A SciPy package contains, in addition to its sources, the following -files and directories: - -+ ``setup.py`` --- building script -+ ``__init__.py`` --- package initializer -+ ``tests/`` --- directory of unittests - -Their contents are described below. - -The ``setup.py`` file -''''''''''''''''''''' - -In order to add a Python package to SciPy, its build script (``setup.py``) -must meet certain requirements. The most important requirement is that the -package define a ``configuration(parent_package='',top_path=None)`` function -which returns a dictionary suitable for passing to -``numpy.distutils.core.setup(..)``. To simplify the construction of -this dictionary, ``numpy.distutils.misc_util`` provides the -``Configuration`` class, described below. - -SciPy pure Python package example ---------------------------------- - -Below is an example of a minimal ``setup.py`` file for a pure SciPy package:: - - #!/usr/bin/env python3 - def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('mypackage',parent_package,top_path) - return config - - if __name__ == "__main__": - from numpy.distutils.core import setup - #setup(**configuration(top_path='').todict()) - setup(configuration=configuration) - -The arguments of the ``configuration`` function specify the name of -parent SciPy package (``parent_package``) and the directory location -of the main ``setup.py`` script (``top_path``). These arguments, -along with the name of the current package, should be passed to the -``Configuration`` constructor. - -The ``Configuration`` constructor has a fourth optional argument, -``package_path``, that can be used when package files are located in -a different location than the directory of the ``setup.py`` file. - -Remaining ``Configuration`` arguments are all keyword arguments that will -be used to initialize attributes of ``Configuration`` -instance. Usually, these keywords are the same as the ones that -``setup(..)`` function would expect, for example, ``packages``, -``ext_modules``, ``data_files``, ``include_dirs``, ``libraries``, -``headers``, ``scripts``, ``package_dir``, etc. However, the direct -specification of these keywords is not recommended as the content of -these keyword arguments will not be processed or checked for the -consistency of SciPy building system. - -Finally, ``Configuration`` has ``.todict()`` method that returns all -the configuration data as a dictionary suitable for passing on to the -``setup(..)`` function. - -``Configuration`` instance attributes -------------------------------------- - -In addition to attributes that can be specified via keyword arguments -to ``Configuration`` constructor, ``Configuration`` instance (let us -denote as ``config``) has the following attributes that can be useful -in writing setup scripts: - -+ ``config.name`` - full name of the current package. The names of parent - packages can be extracted as ``config.name.split('.')``. - -+ ``config.local_path`` - path to the location of current ``setup.py`` file. - -+ ``config.top_path`` - path to the location of main ``setup.py`` file. - -``Configuration`` instance methods ----------------------------------- - -+ ``config.todict()`` --- returns configuration dictionary suitable for - passing to ``numpy.distutils.core.setup(..)`` function. - -+ ``config.paths(*paths) --- applies ``glob.glob(..)`` to items of - ``paths`` if necessary. Fixes ``paths`` item that is relative to - ``config.local_path``. - -+ ``config.get_subpackage(subpackage_name,subpackage_path=None)`` --- - returns a list of subpackage configurations. Subpackage is looked in the - current directory under the name ``subpackage_name`` but the path - can be specified also via optional ``subpackage_path`` argument. - If ``subpackage_name`` is specified as ``None`` then the subpackage - name will be taken the basename of ``subpackage_path``. - Any ``*`` used for subpackage names are expanded as wildcards. - -+ ``config.add_subpackage(subpackage_name,subpackage_path=None)`` --- - add SciPy subpackage configuration to the current one. The meaning - and usage of arguments is explained above, see - ``config.get_subpackage()`` method. - -+ ``config.add_data_files(*files)`` --- prepend ``files`` to ``data_files`` - list. If ``files`` item is a tuple then its first element defines - the suffix of where data files are copied relative to package installation - directory and the second element specifies the path to data - files. By default data files are copied under package installation - directory. For example, - - :: - - config.add_data_files('foo.dat', - ('fun',['gun.dat','nun/pun.dat','/tmp/sun.dat']), - 'bar/car.dat'. - '/full/path/to/can.dat', - ) - - will install data files to the following locations - - :: - - / - foo.dat - fun/ - gun.dat - pun.dat - sun.dat - bar/ - car.dat - can.dat - - Path to data files can be a function taking no arguments and - returning path(s) to data files -- this is a useful when data files - are generated while building the package. (XXX: explain the step - when this function are called exactly) - -+ ``config.add_data_dir(data_path)`` --- add directory ``data_path`` - recursively to ``data_files``. The whole directory tree starting at - ``data_path`` will be copied under package installation directory. - If ``data_path`` is a tuple then its first element defines - the suffix of where data files are copied relative to package installation - directory and the second element specifies the path to data directory. - By default, data directory are copied under package installation - directory under the basename of ``data_path``. For example, - - :: - - config.add_data_dir('fun') # fun/ contains foo.dat bar/car.dat - config.add_data_dir(('sun','fun')) - config.add_data_dir(('gun','/full/path/to/fun')) - - will install data files to the following locations - - :: - - / - fun/ - foo.dat - bar/ - car.dat - sun/ - foo.dat - bar/ - car.dat - gun/ - foo.dat - bar/ - car.dat - -+ ``config.add_include_dirs(*paths)`` --- prepend ``paths`` to - ``include_dirs`` list. This list will be visible to all extension - modules of the current package. - -+ ``config.add_headers(*files)`` --- prepend ``files`` to ``headers`` - list. By default, headers will be installed under - ``/include/pythonX.X//`` - directory. If ``files`` item is a tuple then it's first argument - specifies the installation suffix relative to - ``/include/pythonX.X/`` path. This is a Python distutils - method; its use is discouraged for NumPy and SciPy in favour of - ``config.add_data_files(*files)``. - -+ ``config.add_scripts(*files)`` --- prepend ``files`` to ``scripts`` - list. Scripts will be installed under ``/bin/`` directory. - -+ ``config.add_extension(name,sources,**kw)`` --- create and add an - ``Extension`` instance to ``ext_modules`` list. The first argument - ``name`` defines the name of the extension module that will be - installed under ``config.name`` package. The second argument is - a list of sources. ``add_extension`` method takes also keyword - arguments that are passed on to the ``Extension`` constructor. - The list of allowed keywords is the following: ``include_dirs``, - ``define_macros``, ``undef_macros``, ``library_dirs``, ``libraries``, - ``runtime_library_dirs``, ``extra_objects``, ``extra_compile_args``, - ``extra_link_args``, ``export_symbols``, ``swig_opts``, ``depends``, - ``language``, ``f2py_options``, ``module_dirs``, ``extra_info``, - ``extra_f77_compile_args``, ``extra_f90_compile_args``. - - Note that ``config.paths`` method is applied to all lists that - may contain paths. ``extra_info`` is a dictionary or a list - of dictionaries that content will be appended to keyword arguments. - The list ``depends`` contains paths to files or directories - that the sources of the extension module depend on. If any path - in the ``depends`` list is newer than the extension module, then - the module will be rebuilt. - - The list of sources may contain functions ('source generators') - with a pattern ``def (ext, build_dir): return - ``. If ``funcname`` returns ``None``, no sources - are generated. And if the ``Extension`` instance has no sources - after processing all source generators, no extension module will - be built. This is the recommended way to conditionally define - extension modules. Source generator functions are called by the - ``build_src`` sub-command of ``numpy.distutils``. - - For example, here is a typical source generator function:: - - def generate_source(ext,build_dir): - import os - from distutils.dep_util import newer - target = os.path.join(build_dir,'somesource.c') - if newer(target,__file__): - # create target file - return target - - The first argument contains the Extension instance that can be - useful to access its attributes like ``depends``, ``sources``, - etc. lists and modify them during the building process. - The second argument gives a path to a build directory that must - be used when creating files to a disk. - -+ ``config.add_library(name, sources, **build_info)`` --- add a - library to ``libraries`` list. Allowed keywords arguments are - ``depends``, ``macros``, ``include_dirs``, ``extra_compiler_args``, - ``f2py_options``, ``extra_f77_compile_args``, - ``extra_f90_compile_args``. See ``.add_extension()`` method for - more information on arguments. - -+ ``config.have_f77c()`` --- return True if Fortran 77 compiler is - available (read: a simple Fortran 77 code compiled successfully). - -+ ``config.have_f90c()`` --- return True if Fortran 90 compiler is - available (read: a simple Fortran 90 code compiled successfully). - -+ ``config.get_version()`` --- return version string of the current package, - ``None`` if version information could not be detected. This methods - scans files ``__version__.py``, ``_version.py``, - ``version.py``, ``__svn_version__.py`` for string variables - ``version``, ``__version__``, ``_version``. - -+ ``config.make_svn_version_py()`` --- appends a data function to - ``data_files`` list that will generate ``__svn_version__.py`` file - to the current package directory. The file will be removed from - the source directory when Python exits. - -+ ``config.get_build_temp_dir()`` --- return a path to a temporary - directory. This is the place where one should build temporary - files. - -+ ``config.get_distribution()`` --- return distutils ``Distribution`` - instance. - -+ ``config.get_config_cmd()`` --- returns ``numpy.distutils`` config - command instance. - -+ ``config.get_info(*names)`` --- - - -.. _templating: - -Conversion of ``.src`` files using templates --------------------------------------------- - -NumPy distutils supports automatic conversion of source files named -.src. This facility can be used to maintain very similar -code blocks requiring only simple changes between blocks. During the -build phase of setup, if a template file named .src is -encountered, a new file named is constructed from the -template and placed in the build directory to be used instead. Two -forms of template conversion are supported. The first form occurs for -files named .ext.src where ext is a recognized Fortran -extension (f, f90, f95, f77, for, ftn, pyf). The second form is used -for all other cases. - -.. index:: - single: code generation - -Fortran files -------------- - -This template converter will replicate all **function** and -**subroutine** blocks in the file with names that contain '<...>' -according to the rules in '<...>'. The number of comma-separated words -in '<...>' determines the number of times the block is repeated. What -these words are indicates what that repeat rule, '<...>', should be -replaced with in each block. All of the repeat rules in a block must -contain the same number of comma-separated words indicating the number -of times that block should be repeated. If the word in the repeat rule -needs a comma, leftarrow, or rightarrow, then prepend it with a -backslash ' \'. If a word in the repeat rule matches ' \\' then -it will be replaced with the -th word in the same repeat -specification. There are two forms for the repeat rule: named and -short. - -Named repeat rule -^^^^^^^^^^^^^^^^^ - -A named repeat rule is useful when the same set of repeats must be -used several times in a block. It is specified using , where N is the number of times the block -should be repeated. On each repeat of the block, the entire -expression, '<...>' will be replaced first with item1, and then with -item2, and so forth until N repeats are accomplished. Once a named -repeat specification has been introduced, the same repeat rule may be -used **in the current block** by referring only to the name -(i.e. ). - - -Short repeat rule -^^^^^^^^^^^^^^^^^ - -A short repeat rule looks like . The -rule specifies that the entire expression, '<...>' should be replaced -first with item1, and then with item2, and so forth until N repeats -are accomplished. - - -Pre-defined names -^^^^^^^^^^^^^^^^^ - -The following predefined named repeat rules are available: - -- - -- <_c=s,d,c,z> - -- <_t=real, double precision, complex, double complex> - -- - -- - -- - -- - - -Other files ------------- - -Non-Fortran files use a separate syntax for defining template blocks -that should be repeated using a variable expansion similar to the -named repeat rules of the Fortran-specific repeats. - -NumPy Distutils preprocesses C source files (extension: :file:`.c.src`) written -in a custom templating language to generate C code. The ``@`` symbol is -used to wrap macro-style variables to empower a string substitution mechanism -that might describe (for instance) a set of data types. - -The template language blocks are delimited by ``/**begin repeat`` -and ``/**end repeat**/`` lines, which may also be nested using -consecutively numbered delimiting lines such as ``/**begin repeat1`` -and ``/**end repeat1**/``: - -1. ``/**begin repeat`` on a line by itself marks the beginning of - a segment that should be repeated. - -2. Named variable expansions are defined using ``#name=item1, item2, item3, - ..., itemN#`` and placed on successive lines. These variables are - replaced in each repeat block with corresponding word. All named - variables in the same repeat block must define the same number of - words. - -3. In specifying the repeat rule for a named variable, ``item*N`` is short- - hand for ``item, item, ..., item`` repeated N times. In addition, - parenthesis in combination with ``*N`` can be used for grouping several - items that should be repeated. Thus, ``#name=(item1, item2)*4#`` is - equivalent to ``#name=item1, item2, item1, item2, item1, item2, item1, - item2#``. - -4. ``*/`` on a line by itself marks the end of the variable expansion - naming. The next line is the first line that will be repeated using - the named rules. - -5. Inside the block to be repeated, the variables that should be expanded - are specified as ``@name@``. - -6. ``/**end repeat**/`` on a line by itself marks the previous line - as the last line of the block to be repeated. - -7. A loop in the NumPy C source code may have a ``@TYPE@`` variable, targeted - for string substitution, which is preprocessed to a number of otherwise - identical loops with several strings such as ``INT``, ``LONG``, ``UINT``, - ``ULONG``. The ``@TYPE@`` style syntax thus reduces code duplication and - maintenance burden by mimicking languages that have generic type support. - -The above rules may be clearer in the following template source example: - -.. code-block:: NumPyC - :linenos: - :emphasize-lines: 3, 13, 29, 31 - - /* TIMEDELTA to non-float types */ - - /**begin repeat - * - * #TOTYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, DATETIME, - * TIMEDELTA# - * #totype = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, - * npy_long, npy_ulong, npy_longlong, npy_ulonglong, - * npy_datetime, npy_timedelta# - */ - - /**begin repeat1 - * - * #FROMTYPE = TIMEDELTA# - * #fromtype = npy_timedelta# - */ - static void - @FROMTYPE@_to_@TOTYPE@(void *input, void *output, npy_intp n, - void *NPY_UNUSED(aip), void *NPY_UNUSED(aop)) - { - const @fromtype@ *ip = input; - @totype@ *op = output; - - while (n--) { - *op++ = (@totype@)*ip++; - } - } - /**end repeat1**/ - - /**end repeat**/ - -The preprocessing of generically-typed C source files (whether in NumPy -proper or in any third party package using NumPy Distutils) is performed -by `conv_template.py`_. -The type-specific C files generated (extension: ``.c``) -by these modules during the build process are ready to be compiled. This -form of generic typing is also supported for C header files (preprocessed -to produce ``.h`` files). - -.. _conv_template.py: https://github.com/numpy/numpy/blob/main/numpy/distutils/conv_template.py - -Useful functions in ``numpy.distutils.misc_util`` -------------------------------------------------- - -+ ``get_numpy_include_dirs()`` --- return a list of NumPy base - include directories. NumPy base include directories contain - header files such as ``numpy/arrayobject.h``, ``numpy/funcobject.h`` - etc. For installed NumPy the returned list has length 1 - but when building NumPy the list may contain more directories, - for example, a path to ``config.h`` file that - ``numpy/base/setup.py`` file generates and is used by ``numpy`` - header files. - -+ ``append_path(prefix,path)`` --- smart append ``path`` to ``prefix``. - -+ ``gpaths(paths, local_path='')`` --- apply glob to paths and prepend - ``local_path`` if needed. - -+ ``njoin(*path)`` --- join pathname components + convert ``/``-separated path - to ``os.sep``-separated path and resolve ``..``, ``.`` from paths. - Ex. ``njoin('a',['b','./c'],'..','g') -> os.path.join('a','b','g')``. - -+ ``minrelpath(path)`` --- resolves dots in ``path``. - -+ ``rel_path(path, parent_path)`` --- return ``path`` relative to ``parent_path``. - -+ ``def get_cmd(cmdname,_cache={})`` --- returns ``numpy.distutils`` - command instance. - -+ ``all_strings(lst)`` - -+ ``has_f_sources(sources)`` - -+ ``has_cxx_sources(sources)`` - -+ ``filter_sources(sources)`` --- return ``c_sources, cxx_sources, - f_sources, fmodule_sources`` - -+ ``get_dependencies(sources)`` - -+ ``is_local_src_dir(directory)`` - -+ ``get_ext_source_files(ext)`` - -+ ``get_script_files(scripts)`` - -+ ``get_lib_source_files(lib)`` - -+ ``get_data_files(data)`` - -+ ``dot_join(*args)`` --- join non-zero arguments with a dot. - -+ ``get_frame(level=0)`` --- return frame object from call stack with given level. - -+ ``cyg2win32(path)`` - -+ ``mingw32()`` --- return ``True`` when using mingw32 environment. - -+ ``terminal_has_colors()``, ``red_text(s)``, ``green_text(s)``, - ``yellow_text(s)``, ``blue_text(s)``, ``cyan_text(s)`` - -+ ``get_path(mod_name,parent_path=None)`` --- return path of a module - relative to parent_path when given. Handles also ``__main__`` and - ``__builtin__`` modules. - -+ ``allpath(name)`` --- replaces ``/`` with ``os.sep`` in ``name``. - -+ ``cxx_ext_match``, ``fortran_ext_match``, ``f90_ext_match``, - ``f90_module_name_match`` - -``numpy.distutils.system_info`` module --------------------------------------- - -+ ``get_info(name,notfound_action=0)`` -+ ``combine_paths(*args,**kws)`` -+ ``show_all()`` - -``numpy.distutils.cpuinfo`` module ----------------------------------- - -+ ``cpuinfo`` - -``numpy.distutils.log`` module ------------------------------- - -+ ``set_verbosity(v)`` - - -``numpy.distutils.exec_command`` module ---------------------------------------- - -+ ``get_pythonexe()`` -+ ``find_executable(exe, path=None)`` -+ ``exec_command( command, execute_in='', use_shell=None, use_tee=None, **env )`` - -The ``__init__.py`` file -'''''''''''''''''''''''' - -The header of a typical SciPy ``__init__.py`` is:: - - """ - Package docstring, typically with a brief description and function listing. - """ - - # import functions into module namespace - from .subpackage import * - ... - - __all__ = [s for s in dir() if not s.startswith('_')] - - from numpy.testing import Tester - test = Tester().test - bench = Tester().bench - -Extra features in NumPy Distutils -''''''''''''''''''''''''''''''''' - -Specifying config_fc options for libraries in setup.py script -------------------------------------------------------------- - -It is possible to specify config_fc options in setup.py scripts. -For example, using:: - - config.add_library('library', - sources=[...], - config_fc={'noopt':(__file__,1)}) - -will compile the ``library`` sources without optimization flags. - -It's recommended to specify only those config_fc options in such a way -that are compiler independent. - -Getting extra Fortran 77 compiler options from source ------------------------------------------------------ - -Some old Fortran codes need special compiler options in order to -work correctly. In order to specify compiler options per source -file, ``numpy.distutils`` Fortran compiler looks for the following -pattern:: - - CF77FLAGS() = - -in the first 20 lines of the source and use the ``f77flags`` for -specified type of the fcompiler (the first character ``C`` is optional). - -TODO: This feature can be easily extended for Fortran 90 codes as -well. Let us know if you would need such a feature. diff --git a/doc/HOWTO_RELEASE.rst b/doc/HOWTO_RELEASE.rst index 850e0a9344e9..d756a75a6bce 100644 --- a/doc/HOWTO_RELEASE.rst +++ b/doc/HOWTO_RELEASE.rst @@ -4,147 +4,113 @@ releases for NumPy. Current build and release info ============================== -Useful info can be found in the following locations: - -* **Source tree** - - - `INSTALL.rst `_ - - `pavement.py `_ - -* **NumPy docs** - - - https://github.com/numpy/numpy/blob/main/doc/HOWTO_RELEASE.rst - - https://github.com/numpy/numpy/blob/main/doc/RELEASE_WALKTHROUGH.rst - - https://github.com/numpy/numpy/blob/main/doc/BRANCH_WALKTHROUGH.rst - -* **Release scripts** - - - https://github.com/numpy/numpy-vendor +Useful info can be found in `building-from-source` in the docs as well as in +these three files: +- `HOWTO_RELEASE.rst `_ +- `RELEASE_WALKTHROUGH.rst `_ +- `BRANCH_WALKTHROUGH.rst `_ Supported platforms and versions ================================ -:ref:`NEP 29 ` outlines which Python versions -are supported; For the first half of 2020, this will be Python >= 3.6. We test -NumPy against all these versions every time we merge code to main. Binary -installers may be available for a subset of these versions (see below). +:ref:`NEP 29 ` outlines which Python versions are supported *at a +minimum*. We usually decide to keep support for a given Python version slightly +longer than that minimum, to avoid giving other projects issues - this is at +the discretion of the release manager. -* **OS X** +* **macOS** - OS X versions >= 10.9 are supported, for Python version support see - :ref:`NEP 29 `. We build binary wheels for OSX that are compatible with - Python.org Python, system Python, homebrew and macports - see this - `OSX wheel building summary `_ - for details. + We aim to support the same set of macOS versions as are supported by + Python.org and `cibuildwheel`_ for any given Python version. + We build binary wheels for macOS that are compatible with common Python + installation methods, e.g., from python.org, ``python-build-standalone`` (the + ones ``uv`` installs), system Python, conda-forge, Homebrew and MacPorts. * **Windows** We build 32- and 64-bit wheels on Windows. Windows 7, 8 and 10 are supported. - We build NumPy using the `mingw-w64 toolchain`_, `cibuildwheels`_ and GitHub - actions. + We build NumPy using the most convenient compilers, which are (as of Aug + 2025) MSVC for x86/x86-64 and Clang-cl for arm64, `cibuildwheel`_ and GitHub + Actions. -.. _cibuildwheels: https://cibuildwheel.readthedocs.io/en/stable/ +.. _cibuildwheel: https://cibuildwheel.readthedocs.io/en/stable/ * **Linux** - We build and ship `manylinux2014 `_ - wheels for NumPy. Many Linux distributions include their own binary builds - of NumPy. + We build and ship ``manylinux`` and ``musllinux`` wheels for x86-64 and + aarch64 platforms on PyPI. Wheels for 32-bit platforms are not currently + provided. We aim to support the lowest non-EOL versions, and upgrade roughly + in sync with `cibuildwheel`_. See + `pypa/manylinux `__ and + `this distro compatibility table `__ + for more details. -* **BSD / Solaris** +* **BSD / Solaris / AIX** - No binaries are provided, but successful builds on Solaris and BSD have been - reported. + No binary wheels are provided on PyPI, however we expect building from source + on these platforms to work fine. -Tool chain +Toolchains ========== -We build all our wheels on cloud infrastructure - so this list of compilers is -for information and debugging builds locally. See the ``.travis.yml`` script -in the `numpy wheels`_ repo for an outdated source of the build recipes using -multibuild. - -.. _numpy wheels : https://github.com/MacPython/numpy-wheels - -Compilers ---------- -The same gcc version is used as the one with which Python itself is built on -each platform. At the moment this means: - -- OS X builds on travis currently use `clang`. It appears that binary wheels - for OSX >= 10.6 can be safely built from the travis-ci OSX 10.9 VMs - when building against the Python from the Python.org installers; -- Windows builds use the `mingw-w64 toolchain`_; -- Manylinux2014 wheels use the gcc provided on the Manylinux docker images. - -You will need Cython for building the binaries. Cython compiles the ``.pyx`` -files in the NumPy distribution to ``.c`` files. - -.. _mingw-w64 toolchain : https://mingwpy.github.io +For building wheels, we use the following toolchains: + +- Linux: we use the default compilers in the ``manylinux``/``musllinux`` Docker + images, which is usually a relatively recent GCC version. +- macOS: we use the Apple Clang compilers and XCode version installed on the + GitHub Actions runner image. +- Windows: for x86 and x86-64 we use the default MSVC and Visual Studio + toolchain installed on the relevant GitHub actions runner image. Note that in + the past it has sometimes been necessary to use an older toolchain to avoid + causing problems through the static ``libnpymath`` library for SciPy - please + inspect the `numpy/numpy-release `__ + code and CI logs in case the exact version numbers need to be determined. + +For building from source, minimum compiler versions are tracked in the top-level +``meson.build`` file. OpenBLAS -------- -All the wheels link to a version of OpenBLAS_ supplied via the openblas-libs_ repo. -The shared object (or DLL) is shipped with in the wheel, renamed to prevent name +Most wheels link to a version of OpenBLAS_ supplied via the openblas-libs_ repo. +The shared object (or DLL) is shipped within the wheel, renamed to prevent name collisions with other OpenBLAS shared objects that may exist in the filesystem. -.. _OpenBLAS: https://github.com/xianyi/OpenBLAS +.. _OpenBLAS: https://github.com/OpenMathLib/OpenBLAS .. _openblas-libs: https://github.com/MacPython/openblas-libs - -Building source archives and wheels ------------------------------------ -The NumPy wheels and sdist are now built using cibuildwheel with -github actions. - - Building docs ------------- -We are no longer building ``PDF`` files. All that will be needed is - -- virtualenv (pip). - -The other requirements will be filled automatically during the documentation -build process. - +We are no longer building ``pdf`` files. The requirements for building the +``html`` docs are no different than for regular development. See the README of +the `numpy/doc `__ repository and the step by +step instructions in ``doc/RELEASE_WALKTHROUGH.rst`` for more details. Uploading to PyPI ----------------- -The only application needed for uploading is - -- twine (pip). - -You will also need a PyPI token, which is best kept on a keyring. See the -twine keyring_ documentation for how to do that. - -.. _keyring: https://twine.readthedocs.io/en/stable/#keyring-support - +Creating a release on PyPI and uploading wheels and sdist is automated in CI +and uses `PyPI's trusted publishing `__. +See the README in the `numpy/numpy-release `__ +repository and the step by step instructions in ``doc/RELEASE_WALKTHROUGH.rst`` +for more details. Generating author/PR lists -------------------------- You will need a personal access token ``_ -so that scripts can access the github NumPy repository. - -- gitpython (pip) -- pygithub (pip) +so that scripts can access the GitHub NumPy repository. With that token, the +author/PR changelog content can be generated by running ``spin changelog``. It +may require a few extra packages, like ``gitpython`` and ``pygithub``. What is released ================ -* **Wheels** - We currently support Python 3.8-3.10 on Windows, OSX, and Linux. - - * Windows: 32-bit and 64-bit wheels built using Github actions; - * OSX: x64_86 and arm64 OSX wheels built using Github actions; - * Linux: x64_86 and aarch64 Manylinux2014 wheels built using Github actions. +On PyPI we release wheels for a number of platforms (as discussed higher up), +and an sdist. -* **Other** - Release notes and changelog - -* **Source distribution** - We build source releases in the .tar.gz format. +On GitHub Releases we release the same sdist (because the source archives which +are autogenerated by GitHub itself aren't complete), as well as the release +notes and changelog. Release process @@ -152,30 +118,11 @@ Release process Agree on a release schedule --------------------------- -A typical release schedule is one beta, two release candidates and a final -release. It's best to discuss the timing on the mailing list first, in order -for people to get their commits in on time, get doc wiki edits merged, etc. -After a date is set, create a new maintenance/x.y.z branch, add new empty -release notes for the next version in the main branch and update the Trac -Milestones. - - -Make sure current branch builds a package correctly ---------------------------------------------------- -The CI builds wheels when a PR header begins with ``REL``. Your last -PR before releasing should be so marked and all the tests should pass. -You can also do:: - - git clean -fxdq - python setup.py bdist_wheel - python setup.py sdist - -For details of the build process itself, it is best to read the -Step-by-Step Directions below. - -.. note:: The following steps are repeated for the beta(s), release - candidates(s) and the final release. - +A typical release schedule for a feature release is two release candidates and +a final release. It's best to discuss the timing on the mailing list first, in +order for people to get their commits in on time. After a date is set, create a +new ``maintenance/x.y.z`` branch, add new empty release notes for the next version +in the main branch and update the Milestones on the issue tracker. Check deprecations ------------------ diff --git a/doc/Makefile b/doc/Makefile index eccd40b1adef..e6e0689481ca 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -17,7 +17,7 @@ PAPER ?= DOXYGEN ?= doxygen # For merging a documentation archive into a git checkout of numpy/doc # Turn a tag like v1.18.0 into 1.18 -# Use sed -n -e 's/patttern/match/p' to return a blank value if no match +# Use sed -n -e 's/pattern/match/p' to return a blank value if no match TAG ?= $(shell git describe --tag | sed -n -e's,v\([1-9]\.[0-9]*\)\.[0-9].*,\1,p') FILES= @@ -39,7 +39,7 @@ help: @echo " clean to remove generated doc files and start fresh" @echo " docenv make a virtual environment in which to build docs" @echo " html to make standalone HTML files" - @echo " htmlhelp to make HTML files and a HTML help project" + @echo " htmlhelp to make HTML files and an HTML help project" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " changes to make an overview over all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @@ -50,6 +50,8 @@ help: clean: -rm -rf build/* + -rm -rf source/.jupyterlite.doit.db + -rm -rf source/contents/*.ipynb find . -name generated -type d -prune -exec rm -rf "{}" ";" gitwash-update: @@ -118,12 +120,14 @@ endif tar -C build/merge/$(TAG) -xf build/dist.tar.gz git -C build/merge add $(TAG) @# For now, the user must do this. If it is onerous, automate it and change - @# the instructions in doc/HOWTO_RELEASE.rst + @# the instructions in doc/RELEASE_WALKTHROUGH.rst @echo " " @echo New documentation archive added to ./build/merge. @echo Now add/modify the appropriate section after @echo " " @echo in build/merge/index.html, + @echo change _static/versions.json, + @echo and run \"python3 update.py\" @echo then \"git commit\", \"git push\" diff --git a/doc/Py3K.rst b/doc/Py3K.rst deleted file mode 100644 index 3f312f7ec53a..000000000000 --- a/doc/Py3K.rst +++ /dev/null @@ -1,903 +0,0 @@ -.. -*-rst-*- - -********************************************* -Developer notes on the transition to Python 3 -********************************************* - -:date: 2010-07-11 -:author: Charles R. Harris -:author: Pauli Virtanen - -General -======= - -NumPy has now been ported to Python 3. - -Some glitches may still be present; however, we are not aware of any -significant ones, the test suite passes. - - -Resources ---------- - -Information on porting to 3K: - -- https://wiki.python.org/moin/cporting -- https://wiki.python.org/moin/PortingExtensionModulesToPy3k - - -Prerequisites -------------- - -The Nose test framework has currently (Nov 2009) no released Python 3 -compatible version. Its 3K SVN branch, however, works quite well: - -- http://python-nose.googlecode.com/svn/branches/py3k - - -Known semantic changes on Py2 -============================= - -As a side effect, the Py3 adaptation has caused the following semantic -changes that are visible on Py2. - -* Objects (except bytes and str) that implement the PEP 3118 array interface - will behave as ndarrays in `array(...)` and `asarray(...)`; the same way - as if they had ``__array_interface__`` defined. - -* Otherwise, there are no known semantic changes. - - -Known semantic changes on Py3 -============================= - -The following semantic changes have been made on Py3: - -* Division: integer division is by default true_divide, also for arrays. - -* Dtype field names are Unicode. - -* Only unicode dtype field titles are included in fields dict. - -* :pep:`3118` buffer objects will behave differently from Py2 buffer objects - when used as an argument to `array(...)`, `asarray(...)`. - - In Py2, they would cast to an object array. - - In Py3, they cast similarly as objects having an - ``__array_interface__`` attribute, ie., they behave as if they were - an ndarray view on the data. - - - -Python code -=========== - - -2to3 in setup.py ----------------- - -Currently, setup.py calls 2to3 automatically to convert Python sources -to Python 3 ones, and stores the results under:: - - build/py3k - -Only changed files will be re-converted when setup.py is called a second -time, making development much faster. - -Currently, this seems to handle all of the necessary Python code -conversion. - -Not all of the 2to3 transformations are appropriate for all files. -Especially, 2to3 seems to be quite trigger-happy in replacing e.g. -``unicode`` by ``str`` which causes problems in ``defchararray.py``. -For files that need special handling, add entries to -``tools/py3tool.py``. - - - -numpy.compat.py3k ------------------ - -There are some utility functions needed for 3K compatibility in -``numpy.compat.py3k`` -- they can be imported from ``numpy.compat``: - -- bytes, unicode: bytes and unicode constructors -- asbytes: convert string to bytes (no-op on Py2) -- asbytes_nested: convert strings in lists to Bytes -- asunicode: convert string to unicode -- asunicode_nested: convert strings in lists to Unicode -- asstr: convert item to the str type -- getexception: get current exception (see below) -- isfileobj: detect Python file objects -- strchar: character for Unicode (Py3) or Strings (Py2) -- open_latin1: open file in the latin1 text mode - -More can be added as needed. - - -numpy.f2py ----------- - -F2py is ported to Py3. - - -Bytes vs. strings ------------------ - -At many points in NumPy, bytes literals are needed. These can be created via -numpy.compat.asbytes and asbytes_nested. - - -Exception syntax ----------------- - -Syntax change: "except FooException, bar:" -> "except FooException as bar:" - -This is taken care by 2to3, however. - - -Relative imports ----------------- - -The new relative import syntax, - - from . import foo - -is not available on Py2.4, so we can't simply use it. - -Using absolute imports everywhere is probably OK, if they just happen -to work. - -2to3, however, converts the old syntax to new syntax, so as long as we -use the converter, it takes care of most parts. - - -Print ------ - -The Print statement changed to a builtin function in Py3. - -Also this is taken care of by 2to3. - -``types`` module ----------------- - -The following items were removed from `types` module in Py3: - -- StringType (Py3: `bytes` is equivalent, to some degree) -- InstanceType (Py3: ???) -- IntType (Py3: no equivalent) -- LongType (Py3: equivalent `long`) -- FloatType (Py3: equivalent `float`) -- BooleanType (Py3: equivalent `bool`) -- ComplexType (Py3: equivalent `complex`) -- UnicodeType (Py3: equivalent `str`) -- BufferType (Py3: more-or-less equivalent `memoryview`) - -In ``numerictypes.py``, the "common" types were replaced by their -plain equivalents, and `IntType` was dropped. - - -numpy._core.numerictypes ------------------------ - -In numerictypes, types on Python 3 were changed so that: - -=========== ============ -Scalar type Value -=========== ============ -str_ This is the basic Unicode string type on Py3 -bytes_ This is the basic Byte-string type on Py3 -string_ bytes_ alias -unicode_ str_ alias -=========== ============ - - -numpy.loadtxt et al -------------------- - -These routines are difficult to duck-type to read both Unicode and -Bytes input. - -I assumed they are meant for reading Bytes streams -- this is probably -the far more common use case with scientific data. - - -Cyclic imports --------------- - -Python 3 is less forgiving about cyclic imports than Python 2. Cycles -need to be broken to have the same code work both on Python 2 and 3. - - -C code -====== - - -NPY_PY3K --------- - -A #define in config.h, defined when building for Py3. - -.. todo:: - - Currently, this is generated as a part of the config. - Is this sensible (we could also use Py_VERSION_HEX)? - - -private/npy_3kcompat.h ----------------------- - -Convenience macros for Python 3 support: - -- PyInt -> PyLong on Py3 -- PyString -> PyBytes on Py3 -- PyUString -> PyUnicode on Py3 and PyString on Py2 -- PyBytes on Py2 -- PyUnicode_ConcatAndDel, PyUnicode_Concat2 -- Py_SIZE et al., for older Python versions -- npy_PyFile_Dup, etc. to get FILE* from Py3 file objects -- PyObject_Cmp, convenience comparison function on Py3 -- NpyCapsule_* helpers: PyCObject - -Any new ones that need to be added should be added in this file. - -.. todo:: - - Remove PyString_* eventually -- having a call to one of these in NumPy - sources is a sign of an error... - - -ob_type, ob_size ----------------- - -These use Py_SIZE, etc. macros now. The macros are also defined in -npy_3kcompat.h for the Python versions that don't have them natively. - - -Py_TPFLAGS_CHECKTYPES ---------------------- - -Python 3 no longer supports type coercion in arithmetic. - -Py_TPFLAGS_CHECKTYPES is now on by default, and so the C-level -interface, ``nb_*`` methods, still unconditionally receive whatever -types as their two arguments. - -However, this will affect Python-level code: previously if you -inherited from a Py_TPFLAGS_CHECKTYPES enabled class that implemented -a ``__mul__`` method, the same ``__mul__`` method would still be -called also as when a ``__rmul__`` was required, but with swapped -arguments (see Python/Objects/typeobject.c:wrap_binaryfunc_r). -However, on Python 3, arguments are swapped only if both are of same -(sub-)type, and otherwise things fail. - -This means that ``ndarray``-derived subclasses must now implement all -relevant ``__r*__`` methods, since they cannot any more automatically -fall back to ndarray code. - - -PyNumberMethods ---------------- - -The structures have been converted to the new format: - -- number.c -- scalartypes.c.src -- scalarmathmodule.c.src - -The slots np_divide, np_long, np_oct, np_hex, and np_inplace_divide -have gone away. The slot np_int is what np_long used to be, tp_divide -is now tp_floor_divide, and np_inplace_divide is now -np_inplace_floor_divide. - -These have simply been #ifdef'd out on Py3. - -The Py2/Py3 compatible structure definition looks like:: - - static PyNumberMethods @name@_as_number = { - (binaryfunc)0, /*nb_add*/ - (binaryfunc)0, /*nb_subtract*/ - (binaryfunc)0, /*nb_multiply*/ - #if defined(NPY_PY3K) - #else - (binaryfunc)0, /*nb_divide*/ - #endif - (binaryfunc)0, /*nb_remainder*/ - (binaryfunc)0, /*nb_divmod*/ - (ternaryfunc)0, /*nb_power*/ - (unaryfunc)0, - (unaryfunc)0, /*nb_pos*/ - (unaryfunc)0, /*nb_abs*/ - #if defined(NPY_PY3K) - (inquiry)0, /*nb_bool*/ - #else - (inquiry)0, /*nb_nonzero*/ - #endif - (unaryfunc)0, /*nb_invert*/ - (binaryfunc)0, /*nb_lshift*/ - (binaryfunc)0, /*nb_rshift*/ - (binaryfunc)0, /*nb_and*/ - (binaryfunc)0, /*nb_xor*/ - (binaryfunc)0, /*nb_or*/ - #if defined(NPY_PY3K) - #else - 0, /*nb_coerce*/ - #endif - (unaryfunc)0, /*nb_int*/ - #if defined(NPY_PY3K) - (unaryfunc)0, /*nb_reserved*/ - #else - (unaryfunc)0, /*nb_long*/ - #endif - (unaryfunc)0, /*nb_float*/ - #if defined(NPY_PY3K) - #else - (unaryfunc)0, /*nb_oct*/ - (unaryfunc)0, /*nb_hex*/ - #endif - 0, /*inplace_add*/ - 0, /*inplace_subtract*/ - 0, /*inplace_multiply*/ - #if defined(NPY_PY3K) - #else - 0, /*inplace_divide*/ - #endif - 0, /*inplace_remainder*/ - 0, /*inplace_power*/ - 0, /*inplace_lshift*/ - 0, /*inplace_rshift*/ - 0, /*inplace_and*/ - 0, /*inplace_xor*/ - 0, /*inplace_or*/ - (binaryfunc)0, /*nb_floor_divide*/ - (binaryfunc)0, /*nb_true_divide*/ - 0, /*nb_inplace_floor_divide*/ - 0, /*nb_inplace_true_divide*/ - (unaryfunc)NULL, /*nb_index*/ - }; - - - -PyBuffer (provider) -------------------- - -PyBuffer usage is widely spread in multiarray: - -1) The void scalar makes use of buffers -2) Multiarray has methods for creating buffers etc. explicitly -3) Arrays can be created from buffers etc. -4) The .data attribute of an array is a buffer - -Py3 introduces the PEP 3118 buffer protocol as the *only* protocol, -so we must implement it. - -The exporter parts of the PEP 3118 buffer protocol are currently -implemented in ``buffer.c`` for arrays, and in ``scalartypes.c.src`` -for generic array scalars. The generic array scalar exporter, however, -doesn't currently produce format strings, which needs to be fixed. - -Also some code also stops working when ``bf_releasebuffer`` is -defined. Most importantly, ``PyArg_ParseTuple("s#", ...)`` refuses to -return a buffer if ``bf_releasebuffer`` is present. For this reason, -the buffer interface for arrays is implemented currently *without* -defining ``bf_releasebuffer`` at all. This forces us to go through -some additional work. - -There are a couple of places that need further attention: - -- VOID_getitem - - In some cases, this returns a buffer object on Python 2. On Python 3, - there is no stand-alone buffer object, so we return a byte array instead. - -The Py2/Py3 compatible PyBufferMethods definition looks like:: - - NPY_NO_EXPORT PyBufferProcs array_as_buffer = { - #if !defined(NPY_PY3K) - #if PY_VERSION_HEX >= 0x02050000 - (readbufferproc)array_getreadbuf, /*bf_getreadbuffer*/ - (writebufferproc)array_getwritebuf, /*bf_getwritebuffer*/ - (segcountproc)array_getsegcount, /*bf_getsegcount*/ - (charbufferproc)array_getcharbuf, /*bf_getcharbuffer*/ - #else - (getreadbufferproc)array_getreadbuf, /*bf_getreadbuffer*/ - (getwritebufferproc)array_getwritebuf, /*bf_getwritebuffer*/ - (getsegcountproc)array_getsegcount, /*bf_getsegcount*/ - (getcharbufferproc)array_getcharbuf, /*bf_getcharbuffer*/ - #endif - #endif - #if PY_VERSION_HEX >= 0x02060000 - (getbufferproc)array_getbuffer, /*bf_getbuffer*/ - (releasebufferproc)array_releasebuffer, /*bf_releasebuffer*/ - #endif - }; - -.. todo:: - - Produce PEP 3118 format strings for array scalar objects. - -.. todo:: - - There's stuff to clean up in numarray/_capi.c - - -PyBuffer (consumer) -------------------- - -There are two places in which we may want to be able to consume buffer -objects and cast them to ndarrays: - -1) `multiarray.frombuffer`, ie., ``PyArray_FromAny`` - - The frombuffer returns only arrays of a fixed dtype. It does not - make sense to support PEP 3118 at this location, since not much - would be gained from that -- the backward compatibility functions - using the old array interface still work. - - So no changes needed here. - -2) `multiarray.array`, ie., ``PyArray_FromAny`` - - In general, we would like to handle :pep:`3118` buffers in the same way - as ``__array_interface__`` objects. Hence, we want to be able to cast - them to arrays already in ``PyArray_FromAny``. - - Hence, ``PyArray_FromAny`` needs additions. - -There are a few caveats in allowing :pep:`3118` buffers in -``PyArray_FromAny``: - -a) `bytes` (and `str` on Py2) objects offer a buffer interface that - specifies them as 1-D array of bytes. - - Previously ``PyArray_FromAny`` has cast these to 'S#' dtypes. We - don't want to change this, since will cause problems in many places. - - We do, however, want to allow other objects that provide 1-D byte arrays - to be cast to 1-D ndarrays and not 'S#' arrays -- for instance, 'S#' - arrays tend to strip trailing NUL characters. - -So what is done in ``PyArray_FromAny`` currently is that: - -- Presence of :pep:`3118` buffer interface is checked before checking - for array interface. If it is present *and* the object is not - `bytes` object, then it is used for creating a view on the buffer. - -- We also check in ``discover_depth`` and ``_array_find_type`` for the - 3118 buffers, so that:: - - array([some_3118_object]) - - will treat the object similarly as it would handle an `ndarray`. - - However, again, bytes (and unicode) have priority and will not be - handled as buffer objects. - -This amounts to possible semantic changes: - -- ``array(buffer)`` will no longer create an object array - ``array([buffer], dtype='O')``, but will instead expand to a view - on the buffer. - -.. todo:: - - Take a second look at places that used PyBuffer_FromMemory and - PyBuffer_FromReadWriteMemory -- what can be done with these? - -.. todo:: - - There's some buffer code in numarray/_capi.c that needs to be addressed. - - -PyBuffer (object) ------------------ - -Since there is a native buffer object in Py3, the `memoryview`, the -`newbuffer` and `getbuffer` functions are removed from `multiarray` in -Py3: their functionality is taken over by the new `memoryview` object. - - -PyString --------- - -There is no PyString in Py3, everything is either Bytes or Unicode. -Unicode is also preferred in many places, e.g., in __dict__. - -There are two issues related to the str/bytes change: - -1) Return values etc. should prefer unicode -2) The 'S' dtype - -This entry discusses return values etc. only, the 'S' dtype is a -separate topic. - -All uses of PyString in NumPy should be changed to one of - -- PyBytes: one-byte character strings in Py2 and Py3 -- PyUString (defined in npy_3kconfig.h): PyString in Py2, PyUnicode in Py3 -- PyUnicode: UCS in Py2 and Py3 - -In many cases the conversion only entails replacing PyString with -PyUString. - -PyString is currently defined to PyBytes in npy_3kcompat.h, for making -things to build. This definition will be removed when Py3 support is -finished. - -Where ``*_AsStringAndSize`` is used, more care needs to be taken, as -encoding Unicode to Bytes may needed. If this cannot be avoided, the -encoding should be ASCII, unless there is a very strong reason to do -otherwise. Especially, I don't believe we should silently fall back to -UTF-8 -- raising an exception may be a better choice. - -Exceptions should use PyUnicode_AsUnicodeEscape -- this should result -to an ASCII-clean string that is appropriate for the exception -message. - -Some specific decisions that have been made so far: - -* descriptor.c: dtype field names are UString - - At some places in NumPy code, there are some guards for Unicode field - names. However, the dtype constructor accepts only strings as field names, - so we should assume field names are *always* UString. - -* descriptor.c: field titles can be arbitrary objects. - If they are UString (or, on Py2, Bytes or Unicode), insert to fields dict. - -* descriptor.c: dtype strings are Unicode. - -* descriptor.c: datetime tuple contains Bytes only. - -* repr() and str() should return UString - -* comparison between Unicode and Bytes is not defined in Py3 - -* Type codes in numerictypes.typeInfo dict are Unicode - -* Func name in errobj is Bytes (should be forced to ASCII) - -.. todo:: - - tp_doc -- it's a char* pointer, but what is the encoding? - Check esp. lib/src/_compiled_base - - Currently, UTF-8 is assumed. - -.. todo:: - - ufunc names -- again, what's the encoding? - -.. todo:: - - Cleanup to do later on: Replace all occurrences of PyString by - PyBytes, PyUnicode, or PyUString. - -.. todo:: - - Revise errobj decision? - -.. todo:: - - Check that non-UString field names are not accepted anywhere. - - -PyUnicode ---------- - -PyUnicode in Py3 is pretty much as it was in Py2, except that it is -now the only "real" string type. - -In Py3, Unicode and Bytes are not comparable, ie., 'a' != b'a'. NumPy -comparison routines were handled to act in the same way, leaving -comparison between Unicode and Bytes undefined. - -.. todo:: - - Check that indeed all comparison routines were changed. - - -Fate of the 'S' dtype ---------------------- - -On Python 3, the 'S' dtype will still be Bytes. - -However,:: - - str, str_ == unicode_ - - -PyInt ------ - -There is no limited-range integer type any more in Py3. It makes no -sense to inherit NumPy ints from Py3 ints. - -Currently, the following is done: - -1) NumPy's integer types no longer inherit from Python integer. -2) int is taken dtype-equivalent to NPY_LONG -3) ints are converted to NPY_LONG - -PyInt methods are currently replaced by PyLong, via macros in npy_3kcompat.h. - -Dtype decision rules were changed accordingly, so that NumPy understands -Py3 int translate to NPY_LONG as far as dtypes are concerned. - -array([1]).dtype will be the default NPY_LONG integer. - -.. todo:: - - Not inheriting from `int` on Python 3 makes the following not work: - ``np.intp("0xff", 16)`` -- because the NumPy type does not take - the second argument. This could perhaps be fixed... - - -Divide ------- - -The Divide operation is no more. - -Calls to PyNumber_Divide were replaced by FloorDivide or TrueDivide, -as appropriate. - -The PyNumberMethods entry is #ifdef'd out on Py3, see above. - - -tp_compare, PyObject_Compare ----------------------------- - -The compare method has vanished, and is replaced with richcompare. -We just #ifdef the compare methods out on Py3. - -New richcompare methods were implemented for: - -* flagsobject.c - -On the consumer side, we have a convenience wrapper in npy_3kcompat.h -providing PyObject_Cmp also on Py3. - - -Pickling --------- - -The ndarray and dtype __setstate__ were modified to be -backward-compatible with Py3: they need to accept a Unicode endian -character, and Unicode data since that's what Py2 str is unpickled to -in Py3. - -An encoding assumption is required for backward compatibility: the user -must do - - loads(f, encoding='latin1') - -to successfully read pickles created by Py2. - -.. todo:: - - Forward compatibility? Is it even possible? - For sure, we are not knowingly going to store data in PyUnicode, - so probably the only way for forward compatibility is to implement - a custom Unpickler for Py2? - -.. todo:: - - If forward compatibility is not possible, aim to store also the endian - character as Bytes... - - -Module initialization ---------------------- - -The module initialization API changed in Python 3.1. - -Most NumPy modules are now converted. - - -PyTypeObject ------------- - -The PyTypeObject of py3k is binary compatible with the py2k version and the -old initializers should work. However, there are several considerations to -keep in mind. - -1) Because the first three slots are now part of a struct some compilers issue - warnings if they are initialized in the old way. - -2) The compare slot has been made reserved in order to preserve binary - compatibility while the tp_compare function went away. The tp_richcompare - function has replaced it and we need to use that slot instead. This will - likely require modifications in the searchsorted functions and generic sorts - that currently use the compare function. - -3) The previous numpy practice of initializing the COUNT_ALLOCS slots was - bogus. They are not supposed to be explicitly initialized and were out of - place in any case because an extra base slot was added in python 2.6. - -Because of these facts it is better to use #ifdefs to bring the old -initializers up to py3k snuff rather than just fill the tp_richcompare -slot. They also serve to mark the places where changes have been -made. Note that explicit initialization can stop once none of the -remaining entries are non-zero, because zero is the default value that -variables with non-local linkage receive. - -The Py2/Py3 compatible TypeObject definition looks like:: - - NPY_NO_EXPORT PyTypeObject Foo_Type = { - #if defined(NPY_PY3K) - PyVarObject_HEAD_INIT(0,0) - #else - PyObject_HEAD_INIT(0) - 0, /* ob_size */ - #endif - "numpy.foo" /* tp_name */ - 0, /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - #if defined(NPY_PY3K) - (void *)0, /* tp_reserved */ - #else - 0, /* tp_compare */ - #endif - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ - 0 /* tp_version_tag (2.6) */ - }; - - - -PySequenceMethods ------------------ - -Types with tp_as_sequence defined - -* multiarray/descriptor.c -* multiarray/scalartypes.c.src -* multiarray/arrayobject.c - -PySequenceMethods in py3k are binary compatible with py2k, but some of the -slots have gone away. I suspect this means some functions need redefining so -the semantics of the slots needs to be checked:: - - PySequenceMethods foo_sequence_methods = { - (lenfunc)0, /* sq_length */ - (binaryfunc)0, /* sq_concat */ - (ssizeargfunc)0, /* sq_repeat */ - (ssizeargfunc)0, /* sq_item */ - (void *)0, /* nee sq_slice */ - (ssizeobjargproc)0, /* sq_ass_item */ - (void *)0, /* nee sq_ass_slice */ - (objobjproc)0, /* sq_contains */ - (binaryfunc)0, /* sq_inplace_concat */ - (ssizeargfunc)0 /* sq_inplace_repeat */ - }; - - -PyMappingMethods ----------------- - -Types with tp_as_mapping defined - -* multiarray/descriptor.c -* multiarray/iterators.c -* multiarray/scalartypes.c.src -* multiarray/flagsobject.c -* multiarray/arrayobject.c - -PyMappingMethods in py3k look to be the same as in py2k. The semantics -of the slots needs to be checked:: - - PyMappingMethods foo_mapping_methods = { - (lenfunc)0, /* mp_length */ - (binaryfunc)0, /* mp_subscript */ - (objobjargproc)0 /* mp_ass_subscript */ - }; - - -PyFile ------- - -Many of the PyFile items have disappeared: - -1) PyFile_Type -2) PyFile_AsFile -3) PyFile_FromString - -Most importantly, in Py3 there is no way to extract a FILE* pointer -from the Python file object. There are, however, new PyFile_* functions -for writing and reading data from the file. - -Compatibility wrappers that return a dup-ed `fdopen` file pointer are -in private/npy_3kcompat.h. This causes more flushing to be necessary, -but it appears there is no alternative solution. The FILE pointer so -obtained must be closed with fclose after use. - -.. todo:: - - Should probably be done much later on... - - Adapt all NumPy I/O to use the PyFile_* methods or the low-level - IO routines. In any case, it's unlikely that C stdio can be used any more. - - Perhaps using PyFile_* makes numpy.tofile e.g. to a gzip to work? - - -READONLY --------- - -The RO alias for READONLY is no more. - -These were replaced, as READONLY is present also on Py2. - - -PyOS ----- - -Deprecations: - -1) PyOS_ascii_strtod -> PyOS_double_from_string; - curiously enough, PyOS_ascii_strtod is not only deprecated but also - causes segfaults - - -PyInstance ----------- - -There are some checks for PyInstance in ``common.c`` and ``ctors.c``. - -Currently, ``PyInstance_Check`` is just #ifdef'd out for Py3. This is, -possibly, not the correct thing to do. - -.. todo:: - - Do the right thing for PyInstance checks. - - -PyCObject / PyCapsule ---------------------- - -The PyCObject API is removed in Python 3.2, so we need to rewrite it -using PyCapsule. - -NumPy was changed to use the Capsule API, using NpyCapsule* wrappers. diff --git a/doc/RELEASE_WALKTHROUGH.rst b/doc/RELEASE_WALKTHROUGH.rst index c82adf221057..2abc89fcd5aa 100644 --- a/doc/RELEASE_WALKTHROUGH.rst +++ b/doc/RELEASE_WALKTHROUGH.rst @@ -1,8 +1,9 @@ -This is a walkthrough of the NumPy 1.21.0 release on Linux, modified for -building with GitHub Actions and cibuildwheels and uploading to the -`anaconda.org staging repository for NumPy `_. -The commands can be copied into the command line, but be sure to replace 1.21.0 -by the correct version. This should be read together with the +This is a walkthrough of the NumPy 2.4.0 release on Linux, which will be the +first feature release using the `numpy/numpy-release +`__ repository. + +The commands can be copied into the command line, but be sure to replace 2.4.0 +with the correct version. This should be read together with the :ref:`general release guide `. Facility preparation @@ -16,8 +17,6 @@ documentation. There are a few ways to streamline things: - Git can be set up to use a keyring to store your GitHub personal access token. Search online for the details. -- You can use the ``keyring`` app to store the PyPI password for twine. See the - online twine documentation for details. Prior to release @@ -26,105 +25,121 @@ Prior to release Add/drop Python versions ------------------------ -When adding or dropping Python versions, three files need to be edited: - -- .github/workflows/wheels.yml # for github cibuildwheel -- .travis.yml # for cibuildwheel aarch64 builds -- setup.py # for classifier and minimum version check. - +When adding or dropping Python versions, multiple config and CI files need to +be edited in addition to changing the minimum version in ``pyproject.toml``. Make these changes in an ordinary PR against main and backport if necessary. -Using the `BLD:` prefix (build label) for the commit summary will cause the -wheel builds to be run so that the changes will be tested, We currently release -wheels for new Python versions after the first Python rc once manylinux and -cibuildwheel support it. For Python 3.11 we were able to release within a week -of the rc1 announcement. +We currently release wheels for new Python versions after the first Python RC +once manylinux and cibuildwheel support that new Python version. Backport pull requests ---------------------- Changes that have been marked for this release must be backported to the -maintenance/1.21.x branch. +maintenance/2.4.x branch. + + +Update 2.4.0 milestones +----------------------- + +Look at the issues/prs with 2.4.0 milestones and either push them off to a +later version, or maybe remove the milestone. You may need to add a milestone. + +Check the numpy-release repo +---------------------------- + +The things to check are the ``cibuildwheel`` version in +``.github/workflows/wheels.yml`` and the ``openblas`` versions in +``openblas_requirements.txt``. Make a release PR ================= -Five documents usually need to be updated or created for the release PR: +Four documents usually need to be updated or created for the release PR: - The changelog -- The release-notes +- The release notes - The ``.mailmap`` file - The ``pyproject.toml`` file -- The ``pyproject.toml.setuppy`` file # 1.26.x only These changes should be made in an ordinary PR against the maintenance branch. -The commit message should contain a ``[wheel build]`` directive to test if the -wheels build. Other small, miscellaneous fixes may be part of this PR. The -commit message might be something like:: +Other small, miscellaneous fixes may be part of this PR. The commit message +might be something like:: - REL: Prepare for the NumPy 1.20.0 release + REL: Prepare for the NumPy 2.4.0 release - - Create 1.20.0-changelog.rst. - - Update 1.20.0-notes.rst. + - Create 2.4.0-changelog.rst. + - Update 2.4.0-notes.rst. - Update .mailmap. - Update pyproject.toml - - Update pyproject.toml.setuppy - - [wheel build] -Generate the changelog ----------------------- - -The changelog is generated using the changelog tool:: +Set the release version +----------------------- - $ spin changelog $GITHUB v1.20.0..maintenance/1.21.x > doc/changelog/1.21.0-changelog.rst +Check the ``pyproject.toml`` file and set the release version and update the +classifier if needed:: -where ``GITHUB`` contains your GitHub access token. The text will need to be -checked for non-standard contributor names and dependabot entries removed. It -is also a good idea to remove any links that may be present in the PR titles -as they don't translate well to markdown, replace them with monospaced text. The -non-standard contributor names should be fixed by updating the ``.mailmap`` -file, which is a lot of work. It is best to make several trial runs before -reaching this point and ping the malefactors using a GitHub issue to get the -needed information. + $ gvim pyproject.toml -Finish the release notes ------------------------- +Check the ``doc/source/release.rst`` file +----------------------------------------- -If there are any release notes snippets in ``doc/release/upcoming_changes/``, -run ``spin docs`` to build the docs, incorporate the contents of the generated -``doc/source/release/notes-towncrier.rst`` file into the release notes file -(e.g., ``doc/source/release/2.3.4-notes.rst``), and delete the now-processed -snippets in ``doc/release/upcoming_changes/``. This is safe to do multiple -times during a release cycle. +make sure that the release notes have an entry in the ``release.rst`` file:: -The generated release note will always need some fixups, the introduction will -need to be written, and significant changes should be called out. For patch -releases the changelog text may also be appended, but not for the initial -release as it is too long. Check previous release notes to see how this is -done. + $ gvim doc/source/release.rst -Set the release version ------------------------ +Generate the changelog +---------------------- -Check the ``pyproject.toml`` and ``pyproject.toml.setuppy`` files and set the -release version if needed:: +The changelog is generated using the changelog tool (``spin changelog``), +which collects merged pull requests and formats them into a release-ready +changelog:: - $ gvim pyproject.toml pyproject.toml.setuppy + $ spin changelog $GITHUB v2.3.0..maintenance/2.4.x > doc/changelog/2.4.0-changelog.rst +where ``GITHUB`` contains your GitHub access token. The text will need to be +checked for non-standard contributor names. It is also a good idea to remove +any links that may be present in the PR titles as they don't translate well to +Markdown, replace them with monospaced text. The non-standard contributor names +should be fixed by updating the ``.mailmap`` file, which is a lot of work. It +is best to make several trial runs before reaching this point and ping the +malefactors using a GitHub issue to get the needed information. -Check the ``pavement.py`` and ``doc/source/release.rst`` files --------------------------------------------------------------- -Check that the ``pavement.py`` file points to the correct release notes. It should -have been updated after the last release, but if not, fix it now. Also make -sure that the notes have an entry in the ``release.rst`` file:: +Finish the release notes +------------------------ - $ gvim pavement.py doc/source/release.rst +If there are any release notes snippets in ``doc/release/upcoming_changes/``, +run ``spin notes``, which will incorporate the snippets into the +``doc/source/release/notes-towncrier.rst`` file and delete the snippets:: + + $ spin notes + $ gvim doc/source/release/notes-towncrier.rst doc/source/release/2.4.0-notes.rst + +Once the ``notes-towncrier`` contents have been incorporated into release notes +the ``.. include:: notes-towncrier.rst`` directive can be removed. The notes +will always need some fixups, the introduction will need to be written, and +significant changes should be called out. For patch releases the changelog text +may also be appended, but not for the initial release as it is too long. Check +previous release notes to see how this is done. + + +Test the wheel builds +--------------------- + +After the release PR is merged, go to the ``numpy-release`` repository in your +browser and manually trigger the workflow on the ``maintenance/2.4.x`` branch +using the ``Run workflow`` button in ``actions``. Make sure that the upload +target is ``none`` in the *environment* dropdown. The wheels take about 1 hour +to build, but sometimes GitHub is very slow. If some wheel builds fail for +unrelated reasons, you can re-run them as normal in the GitHub Actions UI with +``re-run failed``. After the wheels are built review the results, checking that +the number of artifacts are correct, the wheel names are as expected, etc. If +everything looks good, proceed with the release. Release walkthrough @@ -137,14 +152,14 @@ cloned it locally. You can also edit ``.git/config`` and add ``upstream`` if it isn't already present. -1. Prepare the release commit ------------------------------ +1. Tag the release commit +------------------------- Checkout the branch for the release, make sure it is up to date, and clean the repository:: - $ git checkout maintenance/1.21.x - $ git pull upstream maintenance/1.21.x + $ git checkout maintenance/2.4.x + $ git pull upstream maintenance/2.4.x $ git submodule update $ git clean -xdfq @@ -155,103 +170,60 @@ Sanity check:: Tag the release and push the tag. This requires write permission for the numpy repository:: - $ git tag -a -s v1.21.0 -m"NumPy 1.21.0 release" - $ git push upstream v1.21.0 + $ git tag -a -s v2.4.0 -m"NumPy 2.4.0 release" + $ git push upstream v2.4.0 If you need to delete the tag due to error:: - $ git tag -d v1.21.0 - $ git push --delete upstream v1.21.0 - - -2. Build wheels ---------------- - -Tagging the build at the beginning of this process will trigger a wheel build -via cibuildwheel and upload wheels and an sdist to the staging repo. The CI run -on github actions (for all x86-based and macOS arm64 wheels) takes about 1 1/4 -hours. The CI runs on cirrus (for aarch64 and M1) take less time. You can check -for uploaded files at the `staging repository`_, but note that it is not -closely synched with what you see of the running jobs. - -If you wish to manually trigger a wheel build, you can do so: - -- On github actions -> `Wheel builder`_ there is a "Run workflow" button, click - on it and choose the tag to build -- On Cirrus we don't currently have an easy way to manually trigger builds and - uploads. - -If a wheel build fails for unrelated reasons, you can rerun it individually: - -- On github actions select `Wheel builder`_ click on the commit that contains - the build you want to rerun. On the left there is a list of wheel builds, - select the one you want to rerun and on the resulting page hit the - counterclockwise arrows button. -- On cirrus we haven't figured it out. - -.. _`staging repository`: https://anaconda.org/multibuild-wheels-staging/numpy/files -.. _`Wheel builder`: https://github.com/numpy/numpy/actions/workflows/wheels.yml - - -3. Download wheels ------------------- - -When the wheels have all been successfully built and staged, download them from the -Anaconda staging directory using the ``tools/download-wheels.py`` script:: - - $ cd ../numpy - $ mkdir -p release/installers - $ python3 tools/download-wheels.py 1.21.0 - - -4. Generate the README files ----------------------------- - -This needs to be done after all installers are downloaded, but before the pavement -file is updated for continued development:: - - $ paver write_release + $ git tag -d v2.4.0 + $ git push --delete upstream v2.4.0 -5. Upload to PyPI ------------------ +2. Build wheels and sdist +------------------------- -Upload to PyPI using ``twine``. A recent version of ``twine`` of is needed -after recent PyPI changes, version ``3.4.1`` was used here:: +Go to the ``numpy-release`` repository in your browser and manually trigger the +workflow on the ``maintenance/2.4.x`` branch using the ``Run workflow`` button +in ``actions``. Make sure that the upload target is ``pypi`` in the +*environment* dropdown. The wheels take about 1 hour to build, but sometimes +GitHub is very slow. If some wheel builds fail for unrelated reasons, you can +re-run them as normal in the GitHub Actions UI with ``re-run failed``. After +the wheels are built review the results, checking that the number of artifacts +are correct, the wheel names are as expected, etc. If everything looks good +trigger the upload. - $ cd ../numpy - $ twine upload release/installers/*.whl - $ twine upload release/installers/numpy-1.21.0.tar.gz # Upload last. -If one of the commands breaks in the middle, you may need to selectively upload -the remaining files because PyPI does not allow the same file to be uploaded -twice. The source file should be uploaded last to avoid synchronization -problems that might occur if pip users access the files while this is in -process, causing pip to build from source rather than downloading a binary -wheel. PyPI only allows a single source distribution, here we have -chosen the zip archive. +3. Upload files to GitHub Releases +---------------------------------- +Go to ``_, there should be a ``v2.4.0`` +tag, click on it and hit the edit button for that tag and update the title to +"v2.4.0 ()". There are two ways to add files, using an editable text +window and as binary uploads. The text window needs markdown, so translate the +release notes from rst to md:: -6. Upload files to GitHub -------------------------- + $ python tools/write_release.py 2.4.0 -Go to ``_, there should be a ``v1.21.0 -tag``, click on it and hit the edit button for that tag. There are two ways to -add files, using an editable text window and as binary uploads. Start by -editing the ``release/README.md`` that is translated from the rst version using -pandoc. Things that will need fixing: PR lines from the changelog, if included, -are wrapped and need unwrapping, links should be changed to monospaced text. -Then copy the contents to the clipboard and paste them into the text window. It -may take several tries to get it look right. Then +this will create a ``release/README.md`` file that you can edit. Check the +result to see that it looks correct. Things that may need fixing: wrapped lines +that need unwrapping and links that should be changed to monospaced text. Then +copy the contents to the clipboard and paste them into the text window. It may +take several tries to get it look right. Then -- Upload ``release/installers/numpy-1.21.0.tar.gz`` as a binary file. +- Download the sdist (``numpy-2.4.0.tar.gz``) from PyPI and upload it to GitHub + as a binary file. You cannot do this using pip. - Upload ``release/README.rst`` as a binary file. -- Upload ``doc/changelog/1.21.0-changelog.rst`` as a binary file. +- Upload ``doc/changelog/2.4.0-changelog.rst`` as a binary file. - Check the pre-release button if this is a pre-releases. -- Hit the ``{Publish,Update} release`` button at the bottom. +- Hit the ``Publish release`` button at the bottom. + +.. note:: + Please ensure that all 3 files are uploaded are present and the + release text is complete. Releases are configured to be immutable, so + mistakes can't (easily) be fixed anymore. -7. Upload documents to numpy.org (skip for prereleases) +4. Upload documents to numpy.org (skip for prereleases) ------------------------------------------------------- .. note:: You will need a GitHub personal access token to push the update. @@ -261,7 +233,7 @@ and most patch releases. ``make merge-doc`` clones the ``numpy/doc`` repo into ``doc/build/merge`` and updates it with the new documentation:: $ git clean -xdfq - $ git co v1.21.0 + $ git co v2.4.0 $ rm -rf doc/build # want version to be current $ python -m spin docs merge-doc --build $ pushd doc/build/merge @@ -272,62 +244,62 @@ If the release series is a new one, you will need to add a new section to the $ gvim index.html +/'insert here' Further, update the version-switcher json file to add the new release and -update the version marked `(stable)`:: +update the version marked ``(stable)`` and ``preferred``:: $ gvim _static/versions.json -Otherwise, only the ``zip`` link should be updated with the new tag name. Since -we are no longer generating ``pdf`` files, remove the line for the ``pdf`` -files if present:: +Then run ``update.py`` to update the version in ``_static``:: - $ gvim index.html +/'tag v1.21' + $ python3 update.py You can "test run" the new documentation in a browser to make sure the links -work:: +work, although the version dropdown will not change, it pulls its information +from ``numpy.org``:: $ firefox index.html # or google-chrome, etc. Update the stable link and update:: - $ ln -sfn 1.21 stable + $ ln -sfn 2.4 stable $ ls -l # check the link Once everything seems satisfactory, update, commit and upload the changes:: - $ python3 update.py - $ git commit -a -m"Add documentation for v1.21.0" - $ git push + $ git commit -a -m"Add documentation for v2.4.0" + $ git push git@github.com:numpy/doc $ popd -8. Reset the maintenance branch into a development state (skip for prereleases) +5. Reset the maintenance branch into a development state (skip for prereleases) ------------------------------------------------------------------------------- Create release notes for next release and edit them to set the version. These notes will be a skeleton and have little content:: - $ cp doc/source/release/template.rst doc/source/release/1.21.1-notes.rst - $ gvim doc/source/release/1.21.1-notes.rst - $ git add doc/source/release/1.21.1-notes.rst + $ git checkout -b begin-2.4.1 maintenance/2.4.x + $ cp doc/source/release/template.rst doc/source/release/2.4.1-notes.rst + $ gvim doc/source/release/2.4.1-notes.rst + $ git add doc/source/release/2.4.1-notes.rst -Add new release notes to the documentation release list and update the -``RELEASE_NOTES`` variable in ``pavement.py``:: +Add a link to the new release notes:: - $ gvim doc/source/release.rst pavement.py + $ gvim doc/source/release.rst -Update the ``version`` in ``pyproject.toml`` and ``pyproject.toml.setuppy``:: +Update the ``version`` in ``pyproject.toml``:: - $ gvim pyproject.toml pyproject.toml.setuppy + $ gvim pyproject.toml -Commit the result:: +Commit the result, edit the commit message, note the files in the commit, and +add a line ``[skip cirrus] [skip actions]``, then push:: - $ git commit -a -m"MAINT: prepare 1.21.x for further development" + $ git commit -a -m"MAINT: Prepare 2.4.x for further development" + $ git rebase -i HEAD^ $ git push origin HEAD Go to GitHub and make a PR. It should be merged quickly. -9. Announce the release on numpy.org (skip for prereleases) +6. Announce the release on numpy.org (skip for prereleases) ----------------------------------------------------------- This assumes that you have forked ``_:: @@ -335,55 +307,50 @@ This assumes that you have forked ``_:: $ cd ../numpy.org $ git checkout main $ git pull upstream main - $ git checkout -b announce-numpy-1.21.0 + $ git checkout -b announce-numpy-2.4.0 $ gvim content/en/news.md - For all releases, go to the bottom of the page and add a one line link. Look to the previous links for example. -- For the ``*.0`` release in a cycle, add a new section at the top with a short - description of the new features and point the news link to it. +- For the ``*.0`` release in a cycle: + + - Add a new section at the top with a short description of the new + features and point the news link to it. + - Edit the newsHeader and date fields at the top of news.md + - Also edit the buttonText on line 14 in content/en/config.yaml commit and push:: - $ git commit -a -m"announce the NumPy 1.21.0 release" + $ git commit -a -m"announce the NumPy 2.4.0 release" $ git push origin HEAD Go to GitHub and make a PR. -10. Announce to mailing lists ------------------------------ +7. Announce to mailing lists +---------------------------- -The release should be announced on the numpy-discussion, scipy-devel, and +The release should be announced on the numpy-discussion and python-announce-list mailing lists. Look at previous announcements for the basic template. The contributor and PR lists are the same as generated for the -release notes above. If you crosspost, make sure that python-announce-list is +release notes above. If you cross-post, make sure that python-announce-list is BCC so that replies will not be sent to that list. -11. Post-release update main (skip for prereleases) ---------------------------------------------------- +8. Post-release update main (skip for prereleases) +-------------------------------------------------- -Checkout main and forward port the documentation changes:: +Checkout main and forward port the documentation changes. You may also want +to update these notes if procedures have changed or improved:: - $ git checkout -b post-1.21.0-release-update - $ git checkout maintenance/1.21.x doc/source/release/1.21.0-notes.rst - $ git checkout maintenance/1.21.x doc/changelog/1.21.0-changelog.rst - $ git checkout maintenance/1.21.x .mailmap # only if updated for release. + $ git checkout -b post-2.4.0-release-update main + $ git checkout maintenance/2.4.x doc/source/release/2.4.0-notes.rst + $ git checkout maintenance/2.4.x doc/changelog/2.4.0-changelog.rst + $ git checkout maintenance/2.4.x .mailmap # only if updated for release. $ gvim doc/source/release.rst # Add link to new notes $ git status # check status before commit - $ git commit -a -m"MAINT: Update main after 1.21.0 release." + $ git commit -a -m"MAINT: Update main after 2.4.0 release." $ git push origin HEAD Go to GitHub and make a PR. - -12. Update oldest-supported-numpy ---------------------------------- - -If this release is the first one to support a new Python version, or the first -to provide wheels for a new platform or PyPy version, the version pinnings -in https://github.com/scipy/oldest-supported-numpy should be updated. -Either submit a PR with changes to ``setup.cfg`` there, or open an issue with -info on needed changes. - diff --git a/doc/TESTS.rst b/doc/TESTS.rst index 195935ccf380..f0cd063687fd 100644 --- a/doc/TESTS.rst +++ b/doc/TESTS.rst @@ -63,8 +63,10 @@ example, the ``_core`` module, use the following:: Running tests from the command line ----------------------------------- -If you want to build NumPy in order to work on NumPy itself, use the ``spin`` -utility. To run NumPy's full test suite:: +If you want to build NumPy in order to work on NumPy itself, use the +:ref:`spin utility `. + +To run NumPy's full test suite:: $ spin test -m full @@ -74,6 +76,49 @@ Testing a subset of NumPy:: For detailed info on testing, see :ref:`testing-builds` +Running tests in multiple threads +--------------------------------- + +To help with stress testing NumPy for thread safety, the test suite can be run under +`pytest-run-parallel`_. To install ``pytest-run-parallel``:: + + $ pip install pytest-run-parallel + +To run the test suite in multiple threads:: + + $ spin test -p auto # have pytest-run-parallel detect the number of available cores + $ spin test -p 4 # run each test under 4 threads + $ spin test -p auto -- --skip-thread-unsafe=true # run ONLY tests that are thread-safe + +When you write new tests, it is worth testing to make sure they do not fail +under ``pytest-run-parallel``, since the CI jobs make use of it. Some tips on how to +write thread-safe tests can be found `here <#writing-thread-safe-tests>`_. + +.. note:: + + Ideally you should run ``pytest-run-parallel`` using a `free-threaded build of Python + `_ that is 3.14 or + higher. If you decide to use a version of Python that is not free-threaded, you will + need to set the environment variables ``PYTHON_CONTEXT_AWARE_WARNINGS`` and + ``PYTHON_THREAD_INHERIT_CONTEXT`` to 1. + +Running doctests +---------------- + +NumPy documentation contains code examples, "doctests". To check that the examples +are correct, install the ``scipy-doctest`` package:: + + $ pip install scipy-doctest + +and run one of:: + + $ spin check-docs -v + $ spin check-docs numpy/linalg + $ spin check-docs -- -k 'det and not slogdet' + +Note that the doctests are not run when you use ``spin test``. + + Other methods of running tests ------------------------------ @@ -100,8 +145,6 @@ module called ``test_yyy.py``. If you only need to test one aspect of More often, we need to group a number of tests together, so we create a test class:: - import pytest - # import xxx symbols from numpy.xxx.yyy import zzz import pytest @@ -191,36 +234,34 @@ Similarly for methods:: def test_simple(self): assert_(zzz() == 'Hello from zzz') -Easier setup and teardown functions / methods ---------------------------------------------- - -Testing looks for module-level or class method-level setup and teardown -functions by name; thus:: - - def setup_module(): - """Module-level setup""" - print('doing setup') - - def teardown_module(): - """Module-level teardown""" - print('doing teardown') +Setup and teardown methods +-------------------------- +NumPy originally used xunit setup and teardown, a feature of `pytest`. We now encourage +the usage of setup and teardown methods that are called explicitly by the tests that +need them:: class TestMe: - def setup_method(self): - """Class-level setup""" + def setup(self): print('doing setup') + return 1 - def teardown_method(): - """Class-level teardown""" + def teardown(self): print('doing teardown') + def test_xyz(self): + x = self.setup() + assert x == 1 + self.teardown() + +This approach is thread-safe, ensuring tests can run under ``pytest-run-parallel``. +Using pytest setup fixtures (such as xunit setup methods) is generally not thread-safe +and will likely cause thread-safety test failures. -Setup and teardown functions to functions and methods are known as "fixtures", -and they should be used sparingly. ``pytest`` supports more general fixture at various scopes which may be used -automatically via special arguments. For example, the special argument name -``tmpdir`` is used in test to create a temporary directory. +automatically via special arguments. For example, the special argument name +``tmp_path`` is used in tests to create temporary directories. However, +fixtures should be used sparingly. Parametric tests ---------------- @@ -363,9 +404,9 @@ Tests on random data Tests on random data are good, but since test failures are meant to expose new bugs or regressions, a test that passes most of the time but fails occasionally with no code changes is not helpful. Make the random data -deterministic by setting the random number seed before generating it. Use -either Python's ``random.seed(some_number)`` or NumPy's -``numpy.random.seed(some_number)``, depending on the source of random numbers. +deterministic by setting the random number seed before generating it. +Use ``rng = numpy.random.RandomState(some_number)`` to set a seed on a +local instance of `numpy.random.RandomState`. Alternatively, you can use `Hypothesis`_ to generate arbitrary data. Hypothesis manages both Python's and Numpy's random seeds for you, and @@ -376,6 +417,44 @@ The advantages over random generation include tools to replay and share failures without requiring a fixed seed, reporting *minimal* examples for each failure, and better-than-naive-random techniques for triggering bugs. +Writing thread-safe tests +------------------------- + +Writing thread-safe tests may require some trial-and-error. Generally you should +follow the guidelines stated so far, especially when it comes to `setup methods +<#setup-and-teardown-methods>`_ and `seeding random data <#tests-on-random-data>`_. +Explicit setup and the usage of local RNG are thread-safe practices. Here are tips +for some other common problems you may run into. + +Using ``pytest.mark.parametrize`` may occasionally cause thread-safety issues. +To fix this, you can use ``copy()``:: + + @pytest.mark.parametrize('dimensionality', [3, 10, 25]) + @pytest.mark.parametrize('dtype', [np.float32, np.float64]) + def test_solve(dimensionality, dtype): + dimen = dimensionality.copy() + d = dtype.copy() + # use these copied variables instead + ... + +If you are testing something that is inherently thread-unsafe, you can label your +test with ``pytest.mark.thread_unsafe`` so that it will run under a single thread +and not cause test failures:: + + @pytest.mark.thread_unsafe(reason="reason this test is thread-unsafe") + def test_thread_unsafe(): + ... + +Some examples of what should be labeled as thread-unsafe: + +- Usage of ``sys.stdout`` and ``sys.stderr`` +- Mutation of global data, like docstrings, modules, garbage collectors, etc. +- Tests that require a lot of memory, since they could cause crashes. + +Additionally, some ``pytest`` fixtures are thread-unsafe, such as ``monkeypatch`` and +``capsys``. However, ``pytest-run-parallel`` will automatically mark these as +thread-unsafe if you decide to use them. Some fixtures have been patched to be +thread-safe, like ``tmp_path``. Documentation for ``numpy.test`` -------------------------------- @@ -388,3 +467,4 @@ Documentation for ``numpy.test`` .. _Hypothesis: https://hypothesis.readthedocs.io/en/latest/ .. _vscode: https://code.visualstudio.com/docs/python/testing#_enable-a-test-framework .. _pycharm: https://www.jetbrains.com/help/pycharm/testing-your-first-python-application.html +.. _pytest-run-parallel: https://github.com/Quansight-Labs/pytest-run-parallel diff --git a/doc/changelog/1.21.5-changelog.rst b/doc/changelog/1.21.5-changelog.rst index acd3599d48ef..04ff638d42a3 100644 --- a/doc/changelog/1.21.5-changelog.rst +++ b/doc/changelog/1.21.5-changelog.rst @@ -22,7 +22,7 @@ A total of 11 pull requests were merged for this release. * `#20462 `__: BUG: Fix float16 einsum fastpaths using wrong tempvar * `#20463 `__: BUG, DIST: Print os error message when the executable not exist * `#20464 `__: BLD: Verify the ability to compile C++ sources before initiating... -* `#20465 `__: BUG: Force ``npymath` ` to respect ``npy_longdouble`` +* `#20465 `__: BUG: Force ``npymath`` to respect ``npy_longdouble`` * `#20466 `__: BUG: Fix failure to create aligned, empty structured dtype * `#20467 `__: ENH: provide a convenience function to replace npy_load_module * `#20495 `__: MAINT: update wheel to version that supports python3.10 diff --git a/doc/changelog/2.0.0-changelog.rst b/doc/changelog/2.0.0-changelog.rst new file mode 100644 index 000000000000..78e250f508d9 --- /dev/null +++ b/doc/changelog/2.0.0-changelog.rst @@ -0,0 +1,1304 @@ + +Contributors +============ + +A total of 212 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* @Algorithmist-Girl + +* @DWesl +* @Illviljan +* @Searchingdays +* @ellaella12 + +* @liang3zy22 + +* @matoro + +* @mcp292 + +* @mgunyho + +* @msavinash + +* @mykykh + +* @pojaghi + +* @pratiklp00 + +* @stefan6419846 + +* @undermyumbrella1 + +* Aaron Meurer +* Aditi Saluja + +* Adrin Jalali + +* Agriya Khetarpal + +* Albert Steppi + +* Alex Cabrera + +* Alexander Grund +* Andrea Bianchi + +* Andreas Florath + +* Andrew Ardill + +* Andrew Ho + +* Andrew Nelson +* Andrey Rybakov + +* Ankur Singh + +* Anton Prosekin + +* Antony Lee +* Arun Kannawadi + +* Bas van Beek +* Ben Woodruff + +* Bharat Raghunathan +* Bhavya Alekhya + +* Brandon Smith + +* Brian Walshe + +* Brigitta SipoĖ‹cz +* Brock Mendel +* Carl Meyer + +* Charles Bousseau + +* Charles Harris +* Chris Sidebottom +* Christian Lorentzen +* Christian Veenhuis +* Christoph Reiter +* Christopher Sidebottom +* ClÊment Robert +* CÊdric Hannotier +* Cobalt Yang + +* Gonçalo BÃĄrias + +* D.J. Ramones + +* DanShatford + +* Daniel Li + +* Daniel Vanzo +* Daval Parmar +* Developer-Ecosystem-Engineering +* Dhruv Rawat + +* Dimitri Papadopoulos Orfanos +* Edward E +* Edward Yang + +* Eisuke Kawashima + +* Eliah Kagan + +* Élie Goudout + +* Elliott Sales de Andrade +* Emil Olszewski + +* Emily Hunt + +* Éric Piel + +* Eric Wieser +* Eric Xie + +* Even Rouault + +* Evgeni Burovski +* Filipe Laíns + +* Francisco Sousa + +* Ganesh Kathiresan +* Gonçalo BÃĄrias + +* Gonzalo Tornaría + +* Hans Meine +* Heberto Mayorquin + +* Heinz-Alexander Fuetterer + +* Hood Chatham +* Hugo van Kemenade +* Ivan A. Melnikov + +* Jacob M. Casey + +* Jake Lishman + +* Jake VanderPlas +* James Oliver + +* Jan Wassenberg + +* Janukan Sivajeyan + +* Johann Rohwer + +* Johannes Kaisinger + +* John Muradeli + +* Joris Van den Bossche +* Justus Magin +* Jyn Spring ᐴæ˜Ĩ +* Kai Striega +* Kevin Sheppard +* Kevin Wu + +* Khawaja Junaid + +* Kit Lee + +* Kristian Minchev + +* Kristoffer Pedersen + +* Kuan-Wei Chiu + +* Lane Votapka + +* Larry Bradley +* Leo Singer +* Liang Yan + +* Linus Sommer + +* Logan Thomas +* Lucas Colley + +* Luiz Eduardo Amaral + +* Lukas Geiger +* Lysandros Nikolaou + +* Maanas Arora + +* Maharshi Basu + +* Mahder Gebremedhin + +* Marcel Bargull + +* Marcel Loose + +* Mark Mentovai + +* Mark Ryan + +* Marten van Kerkwijk +* Mateusz SokÃŗÅ‚ +* Matt Haberland +* Matt Thompson + +* Matthew Barber +* Matthew Thompson + +* Matthias Bussonnier +* Matthias Koeppe +* Matthias Schaufelberger + +* Matti Picus +* Maxwell Aladago +* Maya Anderson + +* Melissa Weber Mendonça +* Meng Xiangzhuo + +* Michael Kiffer +* Miki Watanabe (æ¸Ąé‚‰ įžŽå¸Œ) +* Milan Curcic + +* Miles Cranmer +* Miro Hrončok + +* Mohamed E. BRIKI + +* Mohaned Qunaibit + +* Mohit Kumar + +* Muhammed Muhsin + +* Mukulika Pahari +* Munira Alduraibi + +* Namami Shanker +* Nathan Goldbaum +* Nyakku Shigure + +* Ola x Nilsson + +* Olivier Mattelaer + +* Olivier Grisel +* Omid Rajaei +* Pablo Losada + +* Pamphile Roy +* Paul Reece + +* Pedro Kaj Kjellerup Nacht + +* Peiyuan Liu + +* Peter Hawkins +* Pierre +* Pieter Eendebak +* Quentin BarthÊlemy + +* Raghuveer Devulapalli +* Ralf Gommers +* Randy Eckenrode + +* Raquel Braunschweig + +* Richard Howe + +* Robert Kern +* Rohit Goswami +* Romain Geissler + +* Ronald van Elburg + +* Ross Barnowski +* Sam James + +* Sam Van Kooten + +* Samuel Albanie + +* Sarah Wang + +* Sarah Zwiep + +* Sarah-Yifei-Wang + +* Sarthak Dawar + +* Sayantika Banik +* Sayed Adel +* Sean Cheah + +* Sebastian Berg +* Serge Guelton +* Shalini Roy + +* Shen Zhou +* Shubhal Gupta + +* Stefan van der Walt +* Stefano Rivera + +* Takumasa N. + +* Taras Tsugrii +* Thomas A Caswell +* Thomas Grainger + +* Thomas Li +* Tim Hoffmann +* Tim Paine + +* Timo RÃļhling + +* Trey Woodlief + +* Tyler Reddy +* Victor Tang + +* Vladimir Fokow + +* Warren Weckesser +* Warrick Ball + +* Will Ayd +* William Andrea + +* William Ayd + +* Xiangyi Wang + +* Yash Pethe + +* Yuki K +* Zach Brugh + +* Zach Rottman + +* Zolisa Bleki + +Pull requests merged +==================== + +A total of 1078 pull requests were merged for this release. + +* `#15457 `__: BUG: Adds support for array parameter declaration in fortran... +* `#21199 `__: ENH: expose datetime.c functions to cython +* `#21429 `__: ENH: Added ``bitwise_count`` UFuncs +* `#21760 `__: MAINT: Make output of Polynomial representations consistent +* `#21975 `__: ENH: Add binding for random pyx files +* `#22449 `__: ENH: Update scalar representations as per NEP 51 +* `#22657 `__: BUG: Fix common block handling in f2py +* `#23096 `__: BLD, SIMD: The meson CPU dispatcher implementation +* `#23282 `__: BUG: Fix data stmt handling for complex values in f2py +* `#23347 `__: DOC: changed formula in random.Generator.pareto doc #22701 +* `#23351 `__: ENH: Use AVX512-FP16 SVML content for float16 umath functions +* `#23508 `__: DOC: Update scalar types in ``Py{TYPE}ArrType_Type`` +* `#23537 `__: NEP: add NEP on a Python API cleanup for NumPy 2.0 +* `#23611 `__: DOC: Make input/output type consistent and add more examples... +* `#23729 `__: ENH: allow int sequences as shape arguments in numpy.memmap +* `#23762 `__: API: Add .mT attribute for arrays +* `#23764 `__: CI,TYP: Bump mypy to 1.4.1 +* `#23780 `__: BUG: Create complex scalars from real and imaginary parts +* `#23785 `__: DOC: tweak NEP 50 examples +* `#23787 `__: DOC: Add brief note about custom converters to genfromtext. +* `#23789 `__: ENH: add copy parameter for api.reshape function +* `#23795 `__: Use tuple instead of string for (LOWER|UPPER)_TABLEs. +* `#23804 `__: REL: Prepare main for NumPy 2.0.0 development +* `#23809 `__: MAINT: removing the deprecated submodule +* `#23810 `__: MAINT: Bump github/codeql-action from 2.3.3 to 2.3.4 +* `#23813 `__: DOC: Clean up errstate handling in our tests +* `#23814 `__: DOC: switching to use the plot directive +* `#23817 `__: MAINT: Bump github/codeql-action from 2.3.4 to 2.3.5 +* `#23819 `__: BUG: Doctest doesn't have a SHOW_WARNINGS directive. +* `#23822 `__: DOC: Added ``pathlib.Path`` where applicable +* `#23825 `__: BLD: use cython3 for one CI run +* `#23826 `__: MAINT: io.open → open +* `#23828 `__: MAINT: fix typos found by codespell +* `#23830 `__: API: deprecate compat and selected lib utils +* `#23831 `__: DOC: use float64 instead of float128 in docstring +* `#23832 `__: REL: Prepare for the NumPy 1.25.0rc1 release +* `#23834 `__: MAINT: IOError → OSError +* `#23835 `__: MAINT: Update versioneer: 0.26 → 0.28 +* `#23836 `__: DOC: update distutils migration guide +* `#23838 `__: BLD: switch to meson-python as the default build backend +* `#23840 `__: REL: Prepare for the NumPy 1.25.0rc1 release +* `#23841 `__: MAINT: Bump pypa/cibuildwheel from 2.12.3 to 2.13.0 +* `#23843 `__: MAINT: Update download-wheels +* `#23845 `__: MAINT: Do not call PyArray_Item_XDECREF in PyArray_Pack +* `#23846 `__: TST: Add tests for np.argsort +* `#23847 `__: MAINT: const correctness for the generalized ufunc C API +* `#23850 `__: MAINT: Bump actions/dependency-review-action from 3.0.4 to 3.0.6 +* `#23851 `__: CI: Update cirrus nightly wheel upload token +* `#23852 `__: CI: Change "weekly" to "nightly" in cirrus +* `#23854 `__: DOC:removed examples which refers to a non existent function +* `#23855 `__: BUG: make use of locals() in a comprehension fully compatible... +* `#23856 `__: CI: bump nightly upload frequency to twice a week +* `#23857 `__: BUG: fix cron syntax +* `#23859 `__: DOC: Note that f2py isn't consiered safe +* `#23861 `__: MAINT: Remove all "NumPy 2" as that should be main now +* `#23865 `__: MAINT: Bump github/codeql-action from 2.3.5 to 2.3.6 +* `#23868 `__: DOC: Fix ``NPY_OUT_ARRAY`` to ``NPY_ARRAY_OUT_ARRAY`` in how-to-extend... +* `#23871 `__: NEP: Fix NEP 53 file format and minor formatting issue +* `#23878 `__: TST: Add tests for np.argsort +* `#23881 `__: ENH: Add array API standard v2022.12 support to numpy.array_api +* `#23887 `__: TYP,DOC: Annotate and document the ``metadata`` parameter of... +* `#23897 `__: DOC: Fix transpose() description with a correct reference to... +* `#23898 `__: API: Change string to bool conversions to be consistent with... +* `#23902 `__: MAINT: Use ``--allow-downgrade`` option for rtools. +* `#23906 `__: MAINT: Use vectorcall for call forwarding in methods +* `#23907 `__: MAINT: Bump github/codeql-action from 2.3.6 to 2.13.4 +* `#23908 `__: MAINT: Bump actions/checkout from 3.5.2 to 3.5.3 +* `#23911 `__: BUG: Allow np.info on non-hashable objects with a dtype +* `#23912 `__: API: Switch to NEP 50 behavior by default +* `#23913 `__: ENH: let zeros, empty, and empty_like accept dtype classes +* `#23914 `__: DOC: Fix reference ``ComplexWarning`` in release note +* `#23915 `__: DOC: Update development_environment doc. +* `#23916 `__: ABI: Bump C-ABI to 2 but accept older NumPy if compiled against... +* `#23917 `__: ENH: Speed up boolean indexing of flatiters +* `#23918 `__: DOC: Fix references to ``AxisError`` in docstrings +* `#23919 `__: API: Remove interrupt handling and ``noprefix.h`` +* `#23920 `__: DOC: fix DOI on badge +* `#23921 `__: DEP: Expire the PyDataMem_SetEventHook deprecation and remove... +* `#23922 `__: API: Remove ``seterrobj``/``geterrobj``/``extobj=`` and related C-API... +* `#23923 `__: BUG:Fix for call to 'vec_st' is ambiguous +* `#23924 `__: MAINT: Bump pypa/cibuildwheel from 2.13.0 to 2.13.1 +* `#23925 `__: MAINT: Disable SIMD version of float64 sin and cos +* `#23927 `__: DOC: Fix references to ``r_`` in ``mr_class`` docstring +* `#23935 `__: MAINT: Update to latest x86-simd-sort +* `#23936 `__: ENH,API: Make the errstate/extobj a contextvar +* `#23941 `__: BUG: Fix NpyIter cleanup in einsum error path +* `#23942 `__: BUG: Fixup for win64 fwrite issue +* `#23943 `__: DOC: Update required C++ version in building.rst (and copy-edit). +* `#23944 `__: DOC: const correctness in PyUFunc_FromFuncAndData... functions +* `#23950 `__: MAINT: Upgrade install-rtools version +* `#23952 `__: Replace a divider with a colon for _monotonicity +* `#23953 `__: BUG: Fix AVX2 intrinsic npyv_store2_till_s64 on MSVC > 19.29 +* `#23960 `__: DOC: adding release note for 23809 +* `#23961 `__: BLD: update pypy in CI to latest version +* `#23962 `__: TEST: change subprocess call to capture stderr too +* `#23964 `__: MAINT: Remove references to removed functions +* `#23965 `__: MAINT: Simplify codespaces conda environment activation +* `#23967 `__: DOC: Fix references to ``trimseq`` in docstrings +* `#23969 `__: MAINT: Update main after 1.25.0 release. +* `#23971 `__: BUG: Fix private procedures in ``f2py`` modules +* `#23977 `__: MAINT: pipes.quote → shlex.quote +* `#23979 `__: MAINT: Fix typos found by codespell +* `#23980 `__: MAINT: use ``yield from`` where applicable +* `#23982 `__: BLD: Port long double identification to C for meson +* `#23983 `__: BLD: change file extension for installed static libraries back... +* `#23984 `__: BLD: improve handling of CBLAS, add ``-Duse-ilp64`` build option +* `#23985 `__: Revert "TST: disable longdouble string/print tests on Linux aarch64" +* `#23990 `__: DOC: Fix np.vectorize Doc +* `#23991 `__: CI: BLD: build wheels and fix test suite for Python 3.12 +* `#23995 `__: MAINT: Do not use ``--side-by-side`` choco option +* `#23997 `__: MAINT: make naming of C aliases for dtype classes consistent +* `#23998 `__: DEP: Expire ``set_numeric_ops`` and the corresponding C functions... +* `#24004 `__: BUG: Fix reduction ``return NULL`` to be ``goto fail`` +* `#24006 `__: ENH: Use high accuracy SVML for double precision umath functions +* `#24009 `__: DOC: Update __array__ description +* `#24011 `__: API: Remove ``old_defines.h`` (part of NumPy 1.7 deprecated C-API) +* `#24012 `__: MAINT: Remove hardcoded f2py numeric/numarray compatibility switch +* `#24014 `__: BUG: Make errstate decorator compatible with threading +* `#24017 `__: MAINT: Further cleanups for errstate +* `#24018 `__: ENH: Use Highway's VQSort on AArch64 +* `#24020 `__: Fix typo in random sampling documentation +* `#24021 `__: BUG: Fix error message for nanargmin/max of empty sequence +* `#24025 `__: TST: improve test for Cholesky decomposition +* `#24026 `__: DOC: Add note for installing ``asv`` library to run benchmark tests +* `#24027 `__: DOC: Fix reference to ``__array_struct__`` in ``arrays.interface.rst`` +* `#24029 `__: DOC: Add link to NEPs in top navbar +* `#24030 `__: BUG: Avoid undefined behavior in array.astype() +* `#24031 `__: BUG: Ensure ``__array_ufunc__`` works without any kwargs passed +* `#24046 `__: DOC: Fix reference to python module ``string`` in ``routines.char.rst`` +* `#24047 `__: DOC: Fix reference to ``array()`` in release note +* `#24049 `__: MAINT: Update main after 1.24.4 release. +* `#24051 `__: MAINT: Pin urllib3 to avoid anaconda-client bug. +* `#24052 `__: MAINT: Bump ossf/scorecard-action from 2.1.3 to 2.2.0 +* `#24053 `__: ENH: Adopt new macOS Accelerate BLAS/LAPACK Interfaces, including... +* `#24054 `__: BUG: Multiply or divides using SIMD without a full vector can... +* `#24058 `__: DOC: Remove references to ``PyArray_SetNumericOps`` and ``PyArray_GetNumericOps`` in release note +* `#24059 `__: MAINT: Remove ability to enter errstate twice (sequentially) +* `#24060 `__: BLD: use ``-ftrapping-math`` with Clang on macOS in Meson build +* `#24061 `__: DOC: PR adds casting option's description to Glossary and ``numpy.concatenate``. +* `#24068 `__: DOC: Add NpzFile class documentation. +* `#24071 `__: MAINT: Overwrite previous wheels when uploading to anaconda. +* `#24073 `__: API: expose PyUFunc_GiveFloatingpointErrors in the dtype API +* `#24075 `__: DOC: Add missing indentation in ``ma.mT`` docstring +* `#24076 `__: DOC: Fix incorrect reST markups in ``numpy.void`` docstring +* `#24077 `__: DOC: Fix documentation for ``ndarray.mT`` +* `#24082 `__: MAINT: testing for IS_MUSL closes #24074 +* `#24083 `__: ENH: Add ``spin`` command ``gdb``; customize ``docs`` and ``test`` +* `#24085 `__: ENH: Replace npy complex structs with native complex types +* `#24087 `__: NEP: Mark NEP 51 as accepted +* `#24090 `__: MAINT: print error from verify_c_api_version.py failing +* `#24092 `__: TST: Pin pydantic<2 in Pyodide workflow +* `#24094 `__: ENH: Added compiler ``args`` and ``link_args`` +* `#24097 `__: DOC: Add reference to dtype parameter in NDArray +* `#24098 `__: ENH: raise early exception if 0d array is used in np.cross +* `#24100 `__: DOC: Clarify correlate function definition +* `#24101 `__: BUG: Fix empty structured array dtype alignment +* `#24102 `__: DOC: fix rst formatting in datetime C API docs +* `#24103 `__: BUG: Only replace dtype temporarily if dimensions changed +* `#24105 `__: DOC: Correctly use savez_compressed in examples for that function. +* `#24107 `__: ENH: Add ``spin benchmark`` command +* `#24112 `__: DOC: Fix warnings and errors caused by reference/c-api/datetimes +* `#24113 `__: DOC: Fix the reference in the docstring of numpy.meshgrid +* `#24123 `__: BUG: ``spin gdb``: launch Python directly so that breakpoint... +* `#24124 `__: MAINT: Bump actions/setup-node from 3.6.0 to 3.7.0 +* `#24125 `__: MAINT: import numpy as ``np`` in ``spin ipython`` +* `#24126 `__: ENH: add mean keyword to std and var +* `#24130 `__: DOC: Fix warning for PyArray_MapIterNew. +* `#24133 `__: DOC: Update python as glue doc. +* `#24135 `__: DOC: Fix string types in ``arrays.dtypes.rst`` +* `#24138 `__: DOC: add NEP 54 on SIMD - moving to C++ and adopting Highway... +* `#24142 `__: ENH: Allow NEP 42 dtypes to use np.save and np.load +* `#24143 `__: Corrected a grammatical error in doc/source/user/absolute_beginners.rst +* `#24144 `__: API: Remove several niche objects for numpy 2.0 python API cleanup +* `#24149 `__: MAINT: Update main after 1.25.1 release. +* `#24150 `__: BUG: properly handle negative indexes in ufunc_at fast path +* `#24152 `__: DOC: Fix reference warning for recarray. +* `#24153 `__: BLD, TST: refactor test to use meson not setup.py, improve spin... +* `#24154 `__: API: deprecate undocumented functions +* `#24158 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#24159 `__: MAINT: Bump pypa/cibuildwheel from 2.13.1 to 2.14.0 +* `#24160 `__: MAINT: Update cibuildwheel to 2.14.0 +* `#24161 `__: BUG: histogram small range robust +* `#24162 `__: ENH: Improve clang-cl compliance +* `#24163 `__: MAINT: update pytest, hypothesis, pytest-cov, and pytz in test_requirements.txt +* `#24172 `__: DOC: Add note that NEP 29 is superseded by SPEC 0 +* `#24173 `__: MAINT: Bump actions/setup-python from 4.6.1 to 4.7.0 +* `#24176 `__: MAINT: do not use copyswap in flatiter internals +* `#24178 `__: BUG: PyObject_IsTrue and PyObject_Not error handling in setflags +* `#24187 `__: BUG: Fix the signature for np.array_api.take +* `#24188 `__: BUG: fix choose refcount leak +* `#24191 `__: BUG: array2string does not add signs for positive integers. Fixes... +* `#24193 `__: DEP: Remove datetime64 deprecation warning when constructing... +* `#24196 `__: MAINT: Remove versioneer +* `#24199 `__: BLD: update OpenBLAS to an intermediate commit +* `#24201 `__: ENH: Vectorize np.partition and np.argpartition using AVX-512 +* `#24202 `__: MAINT: Bump pypa/cibuildwheel from 2.14.0 to 2.14.1 +* `#24204 `__: BUG: random: Fix check for both uniform variates being 0 in random_beta() +* `#24205 `__: MAINT: Fix new or residual typos found by codespell +* `#24206 `__: TST: convert remaining setup.py tests to meson instead +* `#24208 `__: CI: Add a sanitizer CI job +* `#24211 `__: BUG: Fix reference count leak in str(scalar). +* `#24212 `__: BUG: fix invalid function pointer conversion error +* `#24214 `__: ENH: Create helper for conversion to arrays +* `#24219 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#24220 `__: BUG: random: Fix generation of nan by dirichlet. +* `#24222 `__: BUG: Fix cblas detection for the wheel builds +* `#24223 `__: BUG: Fix undefined behavior in complex pow(). +* `#24224 `__: API: Make 64bit default integer on 64bit windows +* `#24225 `__: DOC: Fix doc build warning for random. +* `#24227 `__: DOC: Update year in doc/source/conf.py to 2023 +* `#24228 `__: DOC: fix some double includes in f2py.getting-started.rst +* `#24231 `__: API: expose NPY_DTYPE macro in the dtype API +* `#24235 `__: BLD: only install the ``f2py`` command, not ``f2py3`` or ``f2py3.X`` +* `#24236 `__: BLD: update requirements to use cython>3.0 +* `#24237 `__: BUG: Added missing PyObject_IsTrue error check (return -1) #24177 +* `#24238 `__: BLD/CI: re-enable ILP64 usage and PyPy job in Azure +* `#24240 `__: BUG: Fix C types in scalartypes +* `#24248 `__: BUG: Factor out slow ``getenv`` call used for memory policy warning +* `#24249 `__: TST: enable test that checks for ``numpy.array_api`` entry point +* `#24250 `__: CI: Test NumPy against OpenBLAS weekly builds +* `#24254 `__: ENH: add weighted quantile for inverted_cdf +* `#24256 `__: DEV: Use ``exec_lines`` and not profile dir for ``spin ipython`` +* `#24257 `__: BUG: Add size check for threaded array assignment +* `#24258 `__: DEP: Remove PyArray complex macros and move PyArray_MIN/MAX +* `#24262 `__: DOC: Fix links to random.Generator methods in quickstart +* `#24263 `__: BUG: Fix use of renamed variable. +* `#24267 `__: BUG: random: Fix generation of nan by beta. +* `#24268 `__: CI: Enable running intel_spr_sde_test with Intel SDE +* `#24270 `__: BUG: Move legacy check for void printing +* `#24271 `__: API: Remove legacy-inner-loop-selector +* `#24272 `__: BUG: do not modify the input to ufunc_at +* `#24273 `__: TYP: Trim down the ``_NestedSequence.__getitem__`` signature +* `#24276 `__: DOC: Remove ``np.source`` and ``np.lookfor`` +* `#24277 `__: DOC: inconsistency between doc and code +* `#24278 `__: DOC: fix a couple typos and rst formatting errors in NEP 0053 +* `#24279 `__: CI/BLD: fail by default if no BLAS/LAPACK, add 32-bit Python... +* `#24281 `__: BUG: Further fixes to indexing loop and added tests +* `#24285 `__: CI: correct URL in cirrus.star +* `#24286 `__: CI: only build cirrus wheels when requested +* `#24287 `__: DOC: Fix some incorrectly formatted documents +* `#24289 `__: DOC: update code comment about ``NPY_USE_BLAS_ILP64`` environment... +* `#24291 `__: CI: improve test suite runtime via pytest parallelism and disabling... +* `#24298 `__: DOC: update stride reference doc. +* `#24299 `__: BUG: Fix assumed length f2py regression +* `#24303 `__: CI: apt update before apt install on cirrus +* `#24304 `__: MAINT: Update main after 1.25.2 release. +* `#24307 `__: CI: Cannot run ``intel_spr_sde_test`` on Intel SDE +* `#24311 `__: BLD: update openblas to newer version +* `#24312 `__: DEP: Finalize ``fastCopyAndTranpose`` and other old C-funcs/members... +* `#24315 `__: DOC: Fix some links in documents +* `#24316 `__: API: Cleaning ``numpy/__init__.py`` and main namespace - Part 1... +* `#24320 `__: DOC: Remove promoting twitter in heading +* `#24321 `__: DEP: Remove deprecated numpy.who +* `#24331 `__: DOC: Fix reference warning for buffer. +* `#24332 `__: DOC: Refactor description of ``PyArray_FromAny/PyArray_CheckFromAny`` +* `#24346 `__: DOC: use nightly dependencies [skip actions] [azp skip] [skip... +* `#24347 `__: DOC: Update to release upcoming change document +* `#24349 `__: BUG: polynomial: Handle non-array inputs in polynomial class... +* `#24354 `__: TST: fix distutils tests for deprecations in recent setuptools... +* `#24357 `__: API: Cleaning numpy/__init__.py and main namespace - Part 2 [NEP... +* `#24358 `__: BUG: flexible inheritance segfault +* `#24360 `__: BENCH: fix small array det benchmark +* `#24362 `__: DOC: Add release notes for complex types changes in 2.x +* `#24364 `__: BUG: Remove #undef complex from npy_common.h +* `#24369 `__: ENH: assert_array_less should report max violations instead of... +* `#24370 `__: BLD: Clean up build for complex +* `#24371 `__: MAINT: Fix codespaces setup.sh script +* `#24372 `__: MAINT: Bump pypa/cibuildwheel from 2.14.1 to 2.15.0 +* `#24373 `__: MAINT: Bump actions/dependency-review-action from 3.0.6 to 3.0.7 +* `#24374 `__: MAINT: Update cibuildwheel for cirrus builds +* `#24376 `__: API: Cleaning ``numpy/__init__.py`` and main namespace - Part 3... +* `#24379 `__: ENH: Vendor meson for multi-target build support +* `#24380 `__: DOC: Remove extra indents in documents +* `#24383 `__: DOC: Fix reference warning for ABCPolyBase. +* `#24393 `__: DOC: Add missing sphinx reference roles +* `#24396 `__: BLD: vendor meson-python to make the Windows builds with SIMD... +* `#24400 `__: TST: revert xfail in ``test_umath.py`` +* `#24402 `__: DOC: Fix reference warning for routines.polynomials.rst. +* `#24407 `__: DOC: add warning to ``allclose``, revise "Notes" in ``isclose`` +* `#24412 `__: [BUG] Return value of use_hugepage in hugepage_setup +* `#24413 `__: BUG: cleanup warnings [skip azp][skip circle][skip travis][skip... +* `#24414 `__: BLD: allow specifying the long double format to avoid the runtime... +* `#24415 `__: MAINT: Bump actions/setup-node from 3.7.0 to 3.8.0 +* `#24419 `__: CI/BUG: add Python 3.12 CI job and fix ``numpy.distutils`` AttributeError +* `#24420 `__: ENH: Introduce tracer for enabled CPU targets on each optimized... +* `#24421 `__: DOC: Remove mixed capitalization +* `#24422 `__: MAINT: Remove unused variable ``i`` +* `#24423 `__: MAINT: Bump actions/dependency-review-action from 3.0.7 to 3.0.8 +* `#24425 `__: CI: only run cirrus on commit to PR [skip actions] +* `#24427 `__: MAINT: revert adding ``distutils`` and ``array_api`` to ``np.__all__`` +* `#24434 `__: DOC: Fix reference warning for types-and-structures.rst. +* `#24435 `__: CI: cirrus run linux_aarch64 first +* `#24437 `__: MAINT: Bump actions/setup-node from 3.8.0 to 3.8.1 +* `#24439 `__: MAINT: Pin upper version of sphinx. +* `#24442 `__: DOC: Fix reference warning in Arrayterator and recfunctions. +* `#24445 `__: API: Cleaning ``numpy/__init__.py`` and main namespace - Part 4... +* `#24452 `__: ENH: Add prefix to _ALIGN Macro +* `#24457 `__: MAINT: Upgrade to spin 0.5 +* `#24461 `__: MAINT: Refactor partial load workaround for Clang +* `#24463 `__: MAINT: Fix broken link in runtests.py +* `#24468 `__: BUG: Fix meson build failure due to unchanged inplace auto-generated... +* `#24469 `__: DEP: Replace deprecation warning for non-integral arguments in... +* `#24471 `__: DOC: Fix some incorrect markups +* `#24473 `__: MAINT: Improve docstring and performance of trimseq +* `#24476 `__: MAINT: Move ``RankWarning`` to exceptions module +* `#24477 `__: MAINT: Remove deprecated functions [NEP 52] +* `#24479 `__: CI: Implements Cross-Compile Builds for armhf, ppc64le, and s390x +* `#24481 `__: DOC: Rm np.who from autosummary. +* `#24483 `__: NEP: add NEP 55 for a variable width string dtype +* `#24484 `__: BUG: fix NPY_cast_info error handling in choose +* `#24485 `__: DOC: Fix some broken links +* `#24486 `__: BUG: ``asv dev`` has been removed, use ``asv run`` instead. +* `#24487 `__: DOC: Fix reference warning in some rst and code files. +* `#24488 `__: MAINT: Stop testing on ppc64le. +* `#24493 `__: CI: GitHub Actions CI job restructuring +* `#24494 `__: API: Remove deprecated ``msort`` function +* `#24498 `__: MAINT: Re-write 16-bit qsort dispatch +* `#24504 `__: DOC: Remove extra indents in docstrings +* `#24505 `__: DOC: Fix mentions in ``isin`` docs +* `#24510 `__: DOC: Add missing changelogs for NEP 52 PRs +* `#24511 `__: BUG: Use a default assignment for git_hash [skip ci] +* `#24513 `__: API: Update ``lib.histograms`` namespace +* `#24515 `__: BUG: fix issue with git-version script, needs a shebang to run +* `#24516 `__: DOC: unpin sphinx +* `#24517 `__: MAINT: Harmonize fortranobject, drop C99 style for loop +* `#24518 `__: MAINT: Add expiration notes for NumPy 2.0 removals +* `#24519 `__: MAINT: remove ``setup.py`` and other files for distutils builds +* `#24520 `__: CI: remove obsolete jobs, and move macOS and conda Azure jobs... +* `#24523 `__: CI: switch the Cygwin job to Meson +* `#24527 `__: TYP: add kind argument to numpy.isin type specification +* `#24528 `__: MAINT: Bump actions/checkout from 3.5.3 to 3.6.0 +* `#24532 `__: ENH: ``meson`` backend for ``f2py`` +* `#24535 `__: CI: remove spurious wheel build action runs +* `#24536 `__: API: Update ``lib.nanfunctions`` namespace +* `#24537 `__: API: Update ``lib.type_check`` namespace +* `#24538 `__: API: Update ``lib.function_base`` namespace +* `#24539 `__: CI: fix CircleCI job for move to Meson +* `#24540 `__: API: Add ``lib.array_utils`` namespace +* `#24543 `__: DOC: re-pin sphinx<7.2 +* `#24547 `__: DOC: Cleanup removed objects +* `#24549 `__: DOC: fix typos in percentile documentation +* `#24551 `__: Update .mailmap 2 +* `#24555 `__: BUG, ENH: Fix ``iso_c_binding`` type maps and fix ``bind(c)``... +* `#24556 `__: BUG: fix comparisons between masked and unmasked structured arrays +* `#24559 `__: BUG: ensure nomask in comparison result is not broadcast +* `#24560 `__: CI/BENCH: move more jobs to Meson and fix all broken benchmarks +* `#24562 `__: DOC: Fix typos +* `#24564 `__: API: Readd ``add_docstring`` and ``add_newdoc`` to ``np.lib`` +* `#24566 `__: API: Update ``lib.shape_base`` namespace +* `#24567 `__: API: Update ``arraypad``,``arraysetops``, ``ufunclike`` and ``utils``... +* `#24570 `__: CI: Exclude import libraries from list of DLLs on Cygwin. +* `#24571 `__: MAINT: Add tests for Polynomial with fractions.Fraction coefficients +* `#24573 `__: DOC: Update building docs to use Meson +* `#24577 `__: API: Update ``lib.twodim_base`` namespace +* `#24578 `__: API: Update ``lib.polynomial`` and ``lib.npyio`` namespaces +* `#24579 `__: DOC: fix ``import mat`` warning. +* `#24580 `__: API: Update ``lib.stride_tricks`` namespace +* `#24581 `__: API: Update ``lib.index_tricks`` namespace +* `#24582 `__: DOC: fix typos in ndarray.setflags doc +* `#24584 `__: BLD: fix ``_umath_linalg`` dependencies +* `#24587 `__: API: Cleaning ``numpy/__init__.py`` and main namespace - Part 5... +* `#24589 `__: NEP: fix typos and formatting in NEP 55 +* `#24596 `__: BUG: Fix hash of user-defined dtype +* `#24598 `__: DOC: fix two misspellings in documentation +* `#24599 `__: DOC: unpin sphinx to pick up 7.2.5 +* `#24600 `__: DOC: wrong name in docs +* `#24601 `__: BLD: meson-cpu: fix SIMD support on platforms with no features +* `#24605 `__: DOC: fix isreal docstring (complex -> imaginary) +* `#24607 `__: DOC: Fix import find_common_type warning[skip actions][skip cirrus][sâ€Ļ +* `#24610 `__: MAINT: Avoid creating an intermediate array in np.quantile +* `#24611 `__: TYP: Add the missing ``casting`` keyword to ``np.clip`` +* `#24612 `__: DOC: Replace "cube cube-root" with "cube root" in cbrt docstring +* `#24618 `__: DOC: Fix markups for code blocks +* `#24620 `__: DOC: Update NEP 52 file +* `#24623 `__: TYP: Explicitly declare ``dtype`` and ``generic`` as hashable +* `#24625 `__: CI: Switch SIMD tests to meson +* `#24626 `__: DOC: add release notes link to PyPI. +* `#24628 `__: TYP: Allow ``binary_repr`` to accept any object implementing... +* `#24631 `__: DOC: Clarify usage of --include-paths as an f2py CLI argument +* `#24634 `__: API: Rename ``numpy/core`` to ``numpy/_core`` [NEP 52] +* `#24635 `__: ENH: Refactor the typing "reveal" tests using ``typing.assert_type`` +* `#24636 `__: MAINT: Bump actions/checkout from 3.6.0 to 4.0.0 +* `#24643 `__: TYP, MAINT: General type annotation maintenance +* `#24644 `__: MAINT: remove the ``oldnumeric.h`` header +* `#24657 `__: Add read-only token to linux_qemu.yml +* `#24658 `__: BUG, ENH: Access ``PyArrayMultiIterObject`` fields using macros. +* `#24663 `__: ENH: optimisation of array_equal +* `#24664 `__: BLD: fix bug in random.mtrand extension, don't link libnpyrandom +* `#24666 `__: MAINT: Bump actions/upload-artifact from 3.1.2 to 3.1.3 +* `#24667 `__: DOC: TEST.rst: add example with ``pytest.mark.parametrize`` +* `#24671 `__: BLD: build wheels for 32-bit Python on Windows, using MSVC +* `#24672 `__: MAINT: Bump actions/dependency-review-action from 3.0.8 to 3.1.0 +* `#24674 `__: DOC: Remove extra indents in documents +* `#24677 `__: DOC: improve the docstring's examples for np.searchsorted +* `#24679 `__: MAINT: Refactor of ``numpy/core/_type_aliases.py`` +* `#24680 `__: ENH: add parameter ``strict`` to ``assert_allclose`` +* `#24681 `__: BUG: Fix weak promotion with some mixed float/int dtypes +* `#24682 `__: API: Remove ``ptp``, ``itemset`` and ``newbyteorder`` from ``np.ndarray``... +* `#24690 `__: DOC: Fix reference warning in some rst files +* `#24691 `__: ENH: Add the Array Iterator API to Cython +* `#24693 `__: DOC: NumPy 2.0 migration guide +* `#24695 `__: CI: enable use of Cirrus CI compute credits by collaborators +* `#24696 `__: DOC: Updated the f2py docs to remove a note on ``-fimplicit-none`` +* `#24697 `__: API: Readd ``sctypeDict`` to the main namespace +* `#24698 `__: BLD: fix issue with compiler selection during cross compilation +* `#24702 `__: DOC: Fix typos +* `#24705 `__: TYP: Add annotations for the py3.12 buffer protocol +* `#24710 `__: BUG: Fix np.quantile([0, 1], 0, method='weibull') +* `#24711 `__: BUG: Fix np.quantile([Fraction(2,1)], 0.5) +* `#24714 `__: DOC: Update asarray docstring to use shares_memory +* `#24715 `__: DOC: Fix trailing backticks characters. +* `#24716 `__: CI: do apt update before apt install +* `#24717 `__: MAINT: remove relaxed strides debug build setting +* `#24721 `__: DOC: Doc fixes and updates. +* `#24725 `__: MAINT: Update main after 1.26.0 release. +* `#24733 `__: BLD, BUG: Fix build failure for host flags e.g. ``-march=native``... +* `#24735 `__: MAINT: Update RELEASE_WALKTHROUGH +* `#24740 `__: MAINT: Bump pypa/cibuildwheel from 2.15.0 to 2.16.0 +* `#24741 `__: MAINT: Remove cibuildwheel pin in cirrus_wheels +* `#24745 `__: ENH: Change default values in polynomial package +* `#24752 `__: DOC: Fix reference warning in some rst files +* `#24753 `__: BLD: add libquadmath to licences and other tweaks +* `#24758 `__: ENH: fix printing structured dtypes with a non-legacy dtype member +* `#24762 `__: BUG: Fix order of Windows OS detection macros. +* `#24766 `__: DOC: add a note on the ``.c.src`` format to the distutils migration... +* `#24770 `__: ENH: add parameter ``strict`` to ``assert_equal`` +* `#24772 `__: MAINT: align test_dispatcher s390x targets with _umath_tests_mtargets +* `#24775 `__: ENH: add parameter ``strict`` to ``assert_array_less`` +* `#24777 `__: BUG: ``numpy.array_api``: fix ``linalg.cholesky`` upper decomp... +* `#24778 `__: BUG: Fix DATA statements for f2py +* `#24780 `__: DOC: Replace http:// by https:// +* `#24781 `__: MAINT, DOC: fix typos found by codespell +* `#24787 `__: DOC: Closes issue #24730, 'sigma' to 'signum' in piecewise example +* `#24791 `__: BUG: Fix f2py to enable use of string optional inout argument +* `#24792 `__: TYP,DOC: Document the ``np.number`` parameter type as invariant +* `#24793 `__: MAINT: fix licence path win +* `#24795 `__: MAINT : fix spelling mistake for "imaginary" param in _read closes... +* `#24798 `__: MAINT: Bump actions/checkout from 4.0.0 to 4.1.0 +* `#24799 `__: MAINT: Bump maxim-lobanov/setup-xcode from 1.5.1 to 1.6.0 +* `#24802 `__: BLD: updated vendored-meson/meson for mips64 fix +* `#24805 `__: DOC: Fix reference warning in some rst files +* `#24806 `__: BUG: Fix build on ppc64 when the baseline set to Power9 or higher +* `#24807 `__: API: Remove zero names from dtype aliases +* `#24811 `__: DOC: explain why we avoid string.ascii_letters +* `#24812 `__: MAINT: Bump pypa/cibuildwheel from 2.16.0 to 2.16.1 +* `#24816 `__: MAINT: Upgrade to spin 0.7 +* `#24817 `__: DOC: Fix markups for emphasis +* `#24818 `__: API: deprecate size-2 inputs for ``np.cross`` [Array API] +* `#24820 `__: MAINT: remove ``wheel`` as a build dependency +* `#24825 `__: DOC: Fix docstring of matrix class +* `#24828 `__: BUG, SIMD: use scalar cmul on bad Apple clang x86_64 +* `#24834 `__: DOC: Update debugging section +* `#24835 `__: ENH: Add ufunc for np.char.isalpha +* `#24839 `__: BLD: use scipy-openblas wheel +* `#24845 `__: MAINT: Bump actions/setup-python from 4.7.0 to 4.7.1 +* `#24847 `__: DOC: Fix reference warning in some rst files +* `#24848 `__: DOC: TESTS.rst: suggest np.testing assertion function strict=True +* `#24854 `__: MAINT: Remove 'a' dtype alias +* `#24858 `__: ENH: Extend np.add ufunc to work with unicode and byte dtypes +* `#24860 `__: MAINT: Bump pypa/cibuildwheel from 2.16.1 to 2.16.2 +* `#24864 `__: MAINT: Xfail test failing on PyPy. +* `#24866 `__: API: Add ``NumpyUnpickler`` +* `#24867 `__: DOC: Update types table +* `#24868 `__: ENH: Add find/rfind ufuncs for unicode and byte dtypes +* `#24869 `__: BUG: Fix ma.convolve if propagate_mask=False +* `#24875 `__: DOC: testing.assert_array_equal: distinguish from assert_equal +* `#24876 `__: BLD: fix math func feature checks, fix FreeBSD build, add CI... +* `#24877 `__: ENH: testing: argument ``err_msg`` of assertion functions can be... +* `#24878 `__: ENH: isclose/allclose: support array_like ``atol``/``rtol`` +* `#24880 `__: BUG: Fix memory leak in timsort's buffer resizing +* `#24883 `__: BLD: fix "Failed to guess install tag" in meson-log.txt, add... +* `#24884 `__: DOC: replace 'a' dtype with 'S' in format_parser docs +* `#24886 `__: DOC: Fix eigenvector typo in linalg.py docs +* `#24887 `__: API: Add ``diagonal`` and ``trace`` to ``numpy.linalg`` [Array API] +* `#24888 `__: API: Make ``intp`` ``ssize_t`` and introduce characters nN +* `#24891 `__: MAINT: Bump ossf/scorecard-action from 2.2.0 to 2.3.0 +* `#24893 `__: ENH: meson: implement BLAS/LAPACK auto-detection and many CI... +* `#24896 `__: API: Add missing deprecation and release note files +* `#24901 `__: MAINT: Bump actions/setup-python from 4.7.0 to 4.7.1 +* `#24904 `__: BUG: loongarch doesn't use REAL(10) +* `#24910 `__: BENCH: Fix benchmark bug leading to failures +* `#24913 `__: DOC: fix typos +* `#24915 `__: API: Allow comparisons with and between any python integers +* `#24920 `__: MAINT: Reenable PyPy wheel builds. +* `#24922 `__: API: Add ``np.long`` and ``np.ulong`` +* `#24923 `__: ENH: Add Cython enumeration for NPY_FR_GENERIC +* `#24925 `__: DOC: Fix parameter markups in ``c-api/ufunc.rst`` +* `#24927 `__: DOC: how-to-io.rst: document solution for NumPy JSON serialization +* `#24930 `__: MAINT: Update main after 1.26.1 release. +* `#24931 `__: ENH: testing: consistent names for actual and desired results +* `#24935 `__: DOC: Update lexsort docstring for axis kwargs +* `#24938 `__: DOC: Add warning about ill-conditioning to linalg.inv docstring +* `#24939 `__: DOC: Add legacy directive to mark outdated objects +* `#24940 `__: API: Add ``svdvals`` to ``numpy.linalg`` [Array API] +* `#24941 `__: MAINT: Bump actions/checkout from 4.1.0 to 4.1.1 +* `#24943 `__: MAINT: don't warn for symbols needed by import_array() +* `#24945 `__: MAINT: Make ``numpy.fft.helper`` private +* `#24946 `__: MAINT: Make ``numpy.linalg.linalg`` private +* `#24947 `__: ENH: Add startswith & endswith ufuncs for unicode and bytes dtypes +* `#24949 `__: API: Enforce ABI version and print info when compiled against... +* `#24950 `__: TEST: Add test for checking functions' one location rule +* `#24951 `__: ENH: Add isdigit/isspace/isdecimal/isnumeric ufuncs for string... +* `#24953 `__: DOC: Indicate shape param of ndarray.reshape is position-only +* `#24958 `__: MAINT: Remove unhelpful error replacements from ``import_array()`` +* `#24959 `__: MAINT: Python API cleanup nitpicks +* `#24967 `__: BLD: use classic linker on macOS, the new one in XCode 15 has... +* `#24968 `__: BLD: mingw-w64 build fixes +* `#24969 `__: MAINT: fix a few issues with CPython main/3.13.0a1 +* `#24970 `__: BLD: Use the correct Python interpreter when running tempita.py +* `#24975 `__: DOC: correct Logo SVG files rendered in dark by Figma +* `#24978 `__: MAINT: testing: rename parameters x/y to actual/desired +* `#24979 `__: BLD: clean up incorrect-but-hardcoded define for ``strtold_l``... +* `#24980 `__: BLD: remove ``NPY_USE_BLAS_ILP64`` environment variable [wheel... +* `#24981 `__: DOC: revisions to "absolute beginners" tutorial +* `#24983 `__: ENH: Added a ``lint`` spin command +* `#24984 `__: DOC: fix reference in user/basics.rec.html#record-arrays +* `#24985 `__: MAINT: Disable warnings for items imported by pybind11 +* `#24986 `__: ENH: Added ``changelog`` spin command +* `#24988 `__: ENH: DType API slot for descriptor finalization before array... +* `#24990 `__: MAINT: Bump ossf/scorecard-action from 2.3.0 to 2.3.1 +* `#24991 `__: DOC: add note to default_rng about requiring non-negative seed +* `#24993 `__: BLD: musllinux_aarch64 [wheel build] +* `#24995 `__: DOC: update vectorize docstring for proper rendering of decorator... +* `#24996 `__: DOC: Clarify a point in basic indexing user guide +* `#24997 `__: DOC: Use ``spin`` to generate changelog +* `#25001 `__: DOC: Visually divide main license and bundled licenses in wheels +* `#25005 `__: MAINT: remove LGTM.com configuration file +* `#25006 `__: DOC: update ndarray.item docstring +* `#25008 `__: BLD: unvendor meson-python +* `#25010 `__: MAINT: test-refactor of ``numpy/_core/numeric.py`` +* `#25016 `__: DOC: standardize capitalization of headings +* `#25017 `__: ENH: Added ``notes`` command for spin +* `#25019 `__: Update .mailmap +* `#25022 `__: TYP: add None to ``__getitem__`` in ``numpy.array_api`` +* `#25029 `__: DOC: "What is NumPy?" section of the documentation +* `#25030 `__: DOC: Include ``np.long`` in ``arrays.scalars.rst`` +* `#25032 `__: MAINT: Add missing ``noexcept`` to shuffle helpers +* `#25037 `__: MAINT: Unpin urllib3 for anaconda-client install +* `#25039 `__: MAINT: Adjust typing for readded ``np.long`` +* `#25040 `__: BLD: make macOS version check for Accelerate NEWLAPACK more robust +* `#25042 `__: BUG: ensure passing ``np.dtype`` to itself doesn't crash +* `#25045 `__: ENH: Vectorize np.sort and np.partition with AVX2 +* `#25050 `__: TST: Ensure test is not run on 32bit platforms +* `#25051 `__: MAINT: Make bitfield integers unsigned +* `#25054 `__: API: Introduce ``np.isdtype`` function [Array API] +* `#25055 `__: BLD: improve detection of Netlib libblas/libcblas/liblapack +* `#25056 `__: DOC: Small fixes for NEP 52 +* `#25057 `__: MAINT: Add ``npy_2_compat.h`` which is designed to work also if... +* `#25059 `__: MAINT: ``np.long`` typing nitpick +* `#25060 `__: DOC: standardize capitalization of NEP headings +* `#25062 `__: ENH: Change add/isalpha ufuncs to use buffer class & general... +* `#25063 `__: BLD: change default of the ``allow-noblas`` option to true +* `#25064 `__: DOC: Fix description of auto bin_width +* `#25067 `__: DOC: add missing word to internals.rst +* `#25068 `__: TST: skip flaky test in test_histogram +* `#25072 `__: MAINT: default to C11 rather than C99, fix most build warnings... +* `#25073 `__: BLD,BUG: quadmath required where available [f2py] +* `#25078 `__: BUG: alpha doesn't use REAL(10) +* `#25079 `__: API: Introduce ``np.astype`` [Array API] +* `#25080 `__: API: Add and redefine ``numpy.bool`` [Array API] +* `#25081 `__: DOC: Provide migration notes for scalar inspection functions +* `#25082 `__: MAINT: Bump actions/dependency-review-action from 3.1.0 to 3.1.1 +* `#25085 `__: BLD: limit scipy-openblas32 wheel to 0.3.23.293.2 +* `#25086 `__: API: Add Array API aliases (math, bitwise, linalg, misc) [Array... +* `#25088 `__: API: Add Array API setops [Array API] +* `#25089 `__: BUG, BLD: Fixed VSX4 feature check +* `#25090 `__: BUG: Make n a long int for np.random.multinomial +* `#25091 `__: MAINT: Bump actions/dependency-review-action from 3.1.1 to 3.1.2 +* `#25092 `__: BLD: Fix features.h detection and blocklist complex trig funcs... +* `#25094 `__: BUG: Avoid intp conversion regression in Cython 3 +* `#25099 `__: DOC: Fix license identifier for OpenBLAS +* `#25101 `__: API: Add ``outer`` to ``numpy.linalg`` [Array API] +* `#25102 `__: MAINT: Print towncrier output file location +* `#25104 `__: ENH: Add str_len & count ufuncs for unicode and bytes dtypes +* `#25105 `__: API: Remove ``__array_prepare__`` +* `#25111 `__: TST: Use ``meson`` for testing ``f2py`` +* `#25123 `__: MAINT,BUG: Never import distutils above 3.12 [f2py] +* `#25124 `__: DOC: ``f2py`` CLI documentation enhancements +* `#25127 `__: DOC: angle: update documentation of convention when magnitude... +* `#25129 `__: BUG: Fix FP overflow error in division when the divisor is scalar +* `#25131 `__: MAINT: Update main after 1.26.2 release. +* `#25133 `__: DOC: std/var: improve documentation of ``ddof`` +* `#25136 `__: BUG: Fix -fsanitize=alignment issue in numpy/_core/src/multiarray/arraytypes.c.src +* `#25138 `__: API: Remove The MapIter API from public +* `#25139 `__: MAINT: Bump actions/dependency-review-action from 3.1.2 to 3.1.3 +* `#25140 `__: DOC: clarify boolean index error message +* `#25141 `__: TST: Explicitly pass NumPy path to cython during tests (also... +* `#25144 `__: DOC: Fix typo in NumPy 2.0 migration guide +* `#25145 `__: API: Add ``cross`` to ``numpy.linalg`` [Array API] +* `#25146 `__: BUG: fix issues with ``newaxis`` and ``linalg.solve`` in ``numpy.array_api`` +* `#25149 `__: API: bump MAXDIMS/MAXARGS to 64 introduce NPY_AXIS_RAVEL +* `#25151 `__: BLD, CI: revert pinning scipy-openblas +* `#25152 `__: ENH: Add strip/lstrip/rstrip ufuncs for unicode and bytes +* `#25154 `__: MAINT: Cleanup mapiter struct a bit +* `#25155 `__: API: Add ``matrix_norm``, ``vector_norm``, ``vecdot`` and ``matrix_transpose`` [Array API] +* `#25156 `__: API: Remove PyArray_REFCNT and NPY_REFCOUNT +* `#25157 `__: DOC: ``np.sort`` doc fix contiguous axis +* `#25158 `__: API: Make ``encoding=None`` the default in loadtxt +* `#25160 `__: BUG: Fix moving compiled executable to root with f2py -c on Windows +* `#25161 `__: API: Remove ``PyArray_GetCastFunc`` and any guarantee that ``->castfuncs``... +* `#25162 `__: NEP: Update NEP 55 +* `#25165 `__: DOC: mention submodule init in source install instructions +* `#25167 `__: MAINT: Add ``array-api-tests`` CI stage, add ``ndarray.__array_namespace__`` +* `#25168 `__: API: Introduce ``copy`` argument for ``np.asarray`` [Array API] +* `#25169 `__: API: Introduce ``correction`` argument for ``np.var`` and ``np.std``... +* `#25171 `__: ENH: Add replace ufunc for bytes and unicode dtypes +* `#25176 `__: DOC: replace integer overflow example +* `#25181 `__: BUG: Disallow shadowed modulenames +* `#25184 `__: MAINT,DOC: Fix inline licenses ``f2py`` +* `#25185 `__: MAINT: Fix sneaky typo [f2py] +* `#25186 `__: BUG: Handle ``common`` blocks with ``kind`` specifications from modules +* `#25193 `__: MAINT: Kill all instances of f2py.compile +* `#25194 `__: DOC: try to be nicer about f2py.compile +* `#25195 `__: BUG: Fix single to half-precision conversion on PPC64/VSX3 +* `#25196 `__: DOC: ``f2py`` rewrite with ``meson`` details +* `#25198 `__: MAINT: Replace deprecated ctypes.ARRAY(item_type, size) with... +* `#25209 `__: ENH: Expose abstract DType classes in the experimental DType... +* `#25212 `__: BUG: Don't try to grab callback modules +* `#25221 `__: TST: f2py: fix issue in test skip condition +* `#25222 `__: DOC: Fix wrong return type for PyArray_CastScalarToCType +* `#25223 `__: MAINT: Bump mymindstorm/setup-emsdk from 12 to 13 +* `#25226 `__: BUG: Handle ``iso_c_type`` mappings more consistently +* `#25228 `__: DOC: Improve description of ``axis`` parameter for ``np.median`` +* `#25230 `__: BUG: Raise error in ``np.einsum_path`` when output subscript is... +* `#25232 `__: DEV: Enable the ``spin lldb`` +* `#25233 `__: API: Add ``device`` and ``to_device`` to ``numpy.ndarray`` [Array... +* `#25238 `__: MAINT: do not use ``long`` type +* `#25243 `__: BUG: Fix non-contiguous 32-bit memory load when ARM/Neon is enabled +* `#25246 `__: CI: Add CI test for riscv64 +* `#25247 `__: ENH: Enable SVE detection for Highway VQSort +* `#25248 `__: DOC: Add release note for Highway VQSort on AArch64 +* `#25250 `__: DOC: fix typo (alignment) +* `#25253 `__: CI: streamline macos_arm64 test +* `#25254 `__: BUG: mips doesn't use REAL(10) +* `#25255 `__: ENH: add new wheel builds using Accelerate on macOS >=14 +* `#25257 `__: TST: PyPy needs another gc.collect on latest versions +* `#25259 `__: BUG: Fix output dtype when calling np.char methods with empty... +* `#25261 `__: MAINT: Bump conda-incubator/setup-miniconda from 2.2.0 to 3.0.0 +* `#25264 `__: MAINT: Bump actions/dependency-review-action from 3.1.3 to 3.1.4 +* `#25267 `__: BUG: Fix module name bug in signature files [urgent] [f2py] +* `#25271 `__: API: Shrink MultiIterObject and make ``NPY_MAXARGS`` a runtime... +* `#25272 `__: DOC: Mention installing threadpoolctl in issue template [skip... +* `#25276 `__: MAINT: Bump actions/checkout from 3 to 4 +* `#25280 `__: TST: Fix fp_noncontiguous and fpclass on riscv64 +* `#25282 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.0.0 to 3.0.1 +* `#25284 `__: CI: Install Lapack runtime on Cygwin. +* `#25287 `__: BUG: Handle .pyf.src and fix SciPy [urgent] +* `#25291 `__: MAINT: Allow initializing new-style dtypes inside numpy +* `#25292 `__: API: C-API removals +* `#25295 `__: MAINT: expose and use dtype classes in internal API +* `#25297 `__: BUG: enable linking of external libraries in the f2py Meson backend +* `#25299 `__: MAINT: Performance improvement of polyutils.as_series +* `#25300 `__: DOC: Document how to check for a specific dtype +* `#25302 `__: DOC: Clarify virtualenv setup and dependency installation +* `#25308 `__: MAINT: Update environment.yml to match *_requirements.txt +* `#25309 `__: DOC: Fix path to svg logo files +* `#25310 `__: DOC: Improve documentation for fill_diagonal +* `#25313 `__: BUG: Don't use the _Complex extension in C++ mode +* `#25314 `__: MAINT: Bump actions/setup-python from 4.7.1 to 4.8.0 +* `#25315 `__: MAINT: expose PyUFunc_AddPromoter in the internal ufunc API +* `#25316 `__: CI: remove no-blas=true from spin command on macos_arm64 ci [skip... +* `#25317 `__: ENH: Add fft optional extension submodule to numpy.array_api +* `#25321 `__: MAINT: Run f2py's meson backend with the same python that runs... +* `#25322 `__: DOC: Add examples for ``np.char`` functions +* `#25324 `__: DOC: Add examples for ``np.polynomial.polynomial`` functions +* `#25326 `__: DOC: Add examples to functions in ``np.polynomial.hermite`` +* `#25328 `__: DOC: Add ``np.polynomial.laguerre`` examples +* `#25329 `__: BUG: fix refcounting for dtypemeta aliases +* `#25331 `__: MAINT: Bump actions/setup-python from 4.8.0 to 5.0.0 +* `#25335 `__: BUG: Fix np.char for scalars and add tests +* `#25336 `__: API: make arange ``start`` argument positional-only +* `#25338 `__: BLD: update vendored Meson for AIX shared library fix +* `#25339 `__: DOC: fix some rendering and formatting issues in ``unique_*`` docstrings +* `#25340 `__: DOC: devguide cleanup: remove Gitwash and too verbose Git details +* `#25342 `__: DOC: Add more ``np.char`` documentation +* `#25346 `__: ENH: Enable 16-bit VQSort routines on AArch64 +* `#25347 `__: API: Introduce stringdtype [NEP 55] +* `#25350 `__: DOC: add "building from source" docs +* `#25354 `__: DOC: Add example for ``np.random.default_rng().binomial()`` +* `#25355 `__: DOC: Fix typo in ``np.random.default_rng().logistic()`` +* `#25356 `__: DOC: Add example for ``np.random.default_rng().exponential()`` +* `#25357 `__: DOC: Add example for ``np.random.default_rng().geometric()`` +* `#25361 `__: BUG: Fix regression with ``f2py`` wrappers when modules and subroutines... +* `#25364 `__: ENH,BUG: Handle includes for meson backend +* `#25367 `__: DOC: Fix refguide check script +* `#25368 `__: MAINT: add npy_gil_error to acquire the GIL and set an error +* `#25369 `__: DOC: Correct documentation for polyfit() +* `#25370 `__: ENH: Make numpy.array_api more portable +* `#25372 `__: BUG: Fix failing test_features on SapphireRapids +* `#25376 `__: BUG: Fix build issues on SPR and avx512_qsort float16 +* `#25383 `__: MAINT: Init ``base`` in cpu_avx512_kn +* `#25384 `__: MAINT: Add missing modules to refguide test +* `#25388 `__: API: Adjust ``linalg.pinv`` and ``linalg.cholesky`` to Array... +* `#25389 `__: BUG: ufunc api: update multiarray_umath import path +* `#25394 `__: MAINT: Bump actions/upload-artifact from 3.1.3 to 4.0.0 +* `#25397 `__: BUG, SIMD: Fix quicksort build error when Highway/SVE is enabled +* `#25398 `__: DOC: Plot exact distributions in logistic, logseries and weibull... +* `#25404 `__: DOC: Improve ``np.histogram`` docs +* `#25409 `__: API,MAINT: Reorganize array-wrap calling and introduce ``return_scalar`` +* `#25412 `__: DOC: Clean up of ``_generator.pyx`` +* `#25413 `__: DOC: Add example to ``rng.beta(...)`` +* `#25414 `__: DOC: Add missing examples to ``np.ma`` +* `#25416 `__: ENH: define a gufunc for vecdot (with BLAS support) +* `#25417 `__: MAINT: Bump actions/setup-node from 3.8.1 to 4.0.1 +* `#25418 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#25425 `__: BUG: Fix two errors related to not checking for failed allocations +* `#25426 `__: BUG: avoid seg fault from OOB access in RandomState.set_state() +* `#25430 `__: TST: Fix test_numeric on riscv64 +* `#25431 `__: DOC: Improve ``np.mean`` documentation of the out argument +* `#25432 `__: DOC: Add ``numpy.lib`` docs page +* `#25434 `__: API,BUG,DEP: treat trailing comma as a tuple and thus a structured... +* `#25437 `__: API: Add ``rtol`` to ``matrix_rank`` and ``stable`` [Array API] +* `#25438 `__: DEV: add ``ninja`` to ``test_requirements.txt`` and clean up... +* `#25439 `__: BLD: remove ``-fno-strict-aliasing``, ``--strip-debug`` from cibuildwheel... +* `#25440 `__: CI: show meson-log.txt in Cirrus wheel builds +* `#25441 `__: API,ENH: Change definition of complex sign +* `#25443 `__: TST: fix issue with dtype conversion in ``test_avx_based_ufunc`` +* `#25444 `__: TST: remove ``TestNewBufferProtocol.test_error_too_many_dims`` +* `#25446 `__: Downgrade Highway to latest released version (1.0.7) +* `#25448 `__: TYP: Adjust type annotations for Numpy 2.0 changes +* `#25449 `__: TYP,CI: bump mypy from 1.5.1 to 1.7.1 +* `#25450 `__: MAINT: make the import-time check for old Accelerate more specific +* `#25451 `__: DOC: Fix names of subroutines. +* `#25453 `__: TYP,MAINT: Change more overloads to play nice with pyright +* `#25454 `__: DOC: fix typo ``v_stack`` in 2.0 migration guide +* `#25455 `__: BUG: fix macOS version checks for Accelerate support +* `#25456 `__: BLD: optimize BLAS and LAPACK search order +* `#25459 `__: BLD: fix uninitialized variable warnings from simd/neon/memory.h +* `#25462 `__: TST: skip two tests in aarch64 linux wheel builds +* `#25463 `__: ENH: Add np.strings namespace +* `#25473 `__: MAINT: use cholesky_up gufunc for upper Cholesky decomposition +* `#25484 `__: BUG: handle scalar input in np.char.replace +* `#25492 `__: DOC: update signature of PyArray_Conjugate +* `#25495 `__: API: adjust nD fft ``s`` param to array API +* `#25501 `__: DOC: Update a few interpreted text to verbatim/code. +* `#25503 `__: BLD: unpin cibuildwheel [wheel build] +* `#25504 `__: DOC: add pickleshare to doc dependencies +* `#25505 `__: BLD: replace uses of openblas_support with openblas wheels [wheel... +* `#25507 `__: DOC: mention string, bytes, and void dtypes in dtype intro +* `#25510 `__: BUG:Fix incorrect 'inner' method type annotation in __array_ufunc_ +* `#25511 `__: DOC: np.any: add multidimensional example +* `#25512 `__: DOC: add a section for dealing with NumPy 2.0 for downstream... +* `#25515 `__: BUG: three string ufunc bugs, one leading to segfault +* `#25516 `__: MAINT,BUG: Fix ``--dep`` when ``-L -l`` are present +* `#25520 `__: DOC: unambiguous np.histogram dtype description +* `#25521 `__: DOC: Improve error messages for random.choice +* `#25522 `__: BUG: fix incorrect strcmp implementation for unequal length strings +* `#25524 `__: MAINT: Update main after 1.26.3 release. +* `#25525 `__: MAINT: optimization and broadcasting for .replace() method for... +* `#25527 `__: DOC: Improve ``polynomial`` docs +* `#25528 `__: DOC: Add notes to ``rng.bytes()`` +* `#25529 `__: DOC: Add ``rng.f()`` plot +* `#25530 `__: DOC: Add ``rng.chisquare()`` plot +* `#25531 `__: API: allow building in cython with Py_LIMITED_API +* `#25533 `__: DOC: Improve ``poisson`` plot +* `#25534 `__: DOC: Indicate order is kwarg-only for ndarray.reshape. +* `#25535 `__: MAINT: fix ufunc debug tracing +* `#25536 `__: MAINT, ENH: Implement calling pocketfft via gufunc and allow... +* `#25538 `__: MAINT: Bump actions/dependency-review-action from 3.1.4 to 3.1.5 +* `#25540 `__: DOC: Fix typo in random.geometric docstring +* `#25542 `__: NEP: add NEP 56 on array API standard support in main namespace +* `#25545 `__: MAINT: Update copyright to 2024 (LICENSE & DOC) +* `#25549 `__: DOC: Using ``f2py`` with ``fypp`` +* `#25553 `__: BUG: Fix return shape of inverse_indices in unique_inverse +* `#25554 `__: BUG: support axes argument in np.linalg.tensordot +* `#25555 `__: MAINT, BLD: Fix unused inline functions warnings on clang +* `#25558 `__: ENH: Add replace ufunc to np.strings +* `#25560 `__: BUG: np.linalg.vector_norm: return correct shape for keepdims +* `#25563 `__: SIMD: Extend the enabled targets for Google Highway quicksort +* `#25569 `__: DOC: Fix a typo +* `#25570 `__: ENH: change list-of-array to tuple-of-array returns (Numba compat) +* `#25571 `__: MAINT: Return size_t from num_codepoints in string ufuncs Buffer... +* `#25573 `__: MAINT: add a C alias for the default integer DType +* `#25574 `__: DOC: ensure that docstrings for np.ndarray.copy, np.copy and... +* `#25575 `__: ENH: Wrap string ufuncs in np.strings to allow default arguments +* `#25579 `__: MAINT: Bump actions/upload-artifact from 4.0.0 to 4.1.0 +* `#25582 `__: CI: Bump azure pipeline timeout to 120 minutes +* `#25592 `__: BUG: Fix undefined behavior when converting NaN float16 to datetime... +* `#25593 `__: DOC: fix typos in 2.0 migration guide +* `#25594 `__: MAINT: replace uses of cython numpy.math.pxd with native routines +* `#25595 `__: BUG: Allow ``None`` as ``api_version`` in ``__array_namespace__``... +* `#25598 `__: BLD: include fix for MinGW platform detection +* `#25603 `__: DOC: Update tensordot documentation +* `#25608 `__: MAINT: skip installing rtools on azure +* `#25609 `__: DOC: fft: correct docs about recent deprecations +* `#25610 `__: ENH: Vectorize argsort and argselect with AVX2 +* `#25613 `__: BLD: fix building for windows ARM64 +* `#25614 `__: MAINT: Bump actions/dependency-review-action from 3.1.5 to 4.0.0 +* `#25615 `__: MAINT: add ``newaxis`` to ``__all__`` in ``numpy.array_api`` +* `#25625 `__: NEP: update NEP 55 text to match current stringdtype implementation +* `#25627 `__: TST: Fix f2py doc test collection in editable installs +* `#25628 `__: TST: Fix test_warning_calls on Python 3.12 +* `#25629 `__: TST: Bump pytz to 2023.3.post1 +* `#25631 `__: BUG: Use large file fallocate on 32 bit linux platforms +* `#25636 `__: MAINT: Move np.char methods to np.strings +* `#25638 `__: MAINT: Bump actions/upload-artifact from 4.1.0 to 4.2.0 +* `#25641 `__: DOC: Remove a duplicated argument ``shape`` in ``empty_like`` +* `#25646 `__: DOC: Fix links to f2py codes +* `#25648 `__: DOC: fix syntax highlighting issues in added f2py docs +* `#25650 `__: DOC: improve structure of reference guide +* `#25651 `__: ENH: Allow strings in logical ufuncs +* `#25652 `__: BUG: Fix AVX512 build flags on Intel Classic Compiler +* `#25656 `__: DOC: add autosummary API reference for DType clases. +* `#25657 `__: MAINT: fix warning about visibility tag on clang +* `#25660 `__: MAINT: Bump mymindstorm/setup-emsdk from 13 to 14 +* `#25662 `__: BUG: Allow NumPy int scalars to be divided by out-of-bound Python... +* `#25664 `__: DOC: minor improvement to the partition() docstrings +* `#25668 `__: BUG: correct irfft with n=1 on larger input +* `#25669 `__: BLD: fix potential issue with escape sequences in ``__config__.py`` +* `#25671 `__: MAINT: Bump actions/upload-artifact from 4.2.0 to 4.3.0 +* `#25672 `__: BUG: check for overflow when converting a string to an int scalar +* `#25673 `__: BUG: Ensure meson updates generated umath doc correctly. +* `#25674 `__: DOC: add a section on NumPy's module structure to the refguide +* `#25676 `__: NEP: add note on Python integer "exceptions" to NEP 50 +* `#25678 `__: DOC: fix docstring of quantile and percentile +* `#25680 `__: DOC: replace autosummary for numpy.dtypes with enumerated list +* `#25683 `__: DOC: Try add a section on NEP 50 to migration guide +* `#25687 `__: Update to OpenBLAS 0.3.26 +* `#25689 `__: MAINT: Simplify scalar int division a bit (no need for helper... +* `#25692 `__: DOC: Clarify deprecated width Parameter in numpy.binary_repr... +* `#25695 `__: DOC: empty: standardize notes about uninitialized values +* `#25697 `__: CI: add pinning for scipy-openblas wheels +* `#25699 `__: DOC: Fix some references in document +* `#25707 `__: DOC: fix a small np.einsum example +* `#25709 `__: MAINT: Include header defining backtrace +* `#25710 `__: TST: marks on a fixture have no effect +* `#25711 `__: ENH: support float and longdouble in FFT using C++ pocketfft... +* `#25712 `__: API: Make any and all return booleans by default +* `#25715 `__: [MAINT] Add regression test for np.geomspace +* `#25716 `__: CI: pin cygwin python to 3.9.16-1 [skip cirrus][skip azp][skip... +* `#25717 `__: DOC: Fix some minor formatting errors in NEPs +* `#25721 `__: DEP: Finalize future warning move in lstsq default +* `#25723 `__: NEP: Mark NEP 55 accepted +* `#25727 `__: DOC: Remove function name without signature in ``ma`` +* `#25730 `__: ENH: add a pkg-config file and a ``numpy-config`` script +* `#25732 `__: CI: use version 0.3.26.0.2 of scipy-openblas wheels +* `#25734 `__: DOC: Fix markups of code literals in ``polynomial`` +* `#25735 `__: MAINT: Bump pypa/cibuildwheel from 2.16.4 to 2.16.5 +* `#25736 `__: MAINT: Bump actions/cache from 3 to 4 +* `#25738 `__: MAINT: add ``trapezoid`` as the new name for ``trapz`` +* `#25739 `__: TST: run macos_arm64 test on Github Actions +* `#25740 `__: DOC: Fix doctest failure in ``polynomial`` +* `#25745 `__: DEV: add .editorconfig for C/C++ +* `#25751 `__: DOC: Update ruff rule instruction +* `#25753 `__: DOC: Fix ``ufunc.reduceat`` doc for ``dtype`` +* `#25754 `__: API: Expose the dtype C API +* `#25758 `__: DOC: Fix summary table in linalg routines document +* `#25761 `__: DEP: Finalize future warning for shape=1 descriptor dropping... +* `#25763 `__: CI/BLD: fix bash script tests for cibw +* `#25768 `__: DOC: in ufuncs ``dtype`` is not ignored when ``out`` is passed +* `#25772 `__: MAINT: Update main after 1.26.4 release. +* `#25774 `__: DOC: Update docs build dependencies install cmd +* `#25775 `__: ENH: Add index/rindex ufuncs for unicode and bytes dtypes +* `#25776 `__: DOC: Add missing ``np.size`` entry to routines +* `#25779 `__: MAINT: Bump actions/upload-artifact from 4.3.0 to 4.3.1 +* `#25780 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#25783 `__: DOC: Remove references to ``distutils`` in simd document +* `#25785 `__: MAINT: Bump actions/setup-node from 4.0.1 to 4.0.2 +* `#25788 `__: ENH: Improve performance of np.tensordot +* `#25789 `__: MAINT,API: Always export static inline version of array accessor. +* `#25790 `__: MAINT: Private device struct shouldn't be in public header +* `#25791 `__: ENH: Add rest of unary ufuncs for unicode/bytes dtypes +* `#25792 `__: API: Create ``PyArray_DescrProto`` for legacy descriptor registration +* `#25793 `__: MAINT: update docstrings of string ufuncs to mention StringDType +* `#25794 `__: DEP: expire some deprecations +* `#25795 `__: DOC: fix docstring example in f2py.get_include +* `#25796 `__: MAINT: combine string ufuncs by passing on auxilliary data +* `#25797 `__: MAINT: Move ``NPY_VSTRING`` and make ``NPY_NTYPES NPY_TYPES_LEGACY`` +* `#25800 `__: REV: revert tuple/list return type changes for ``*split`` functions +* `#25801 `__: DOC: Update ``np.char.array`` docstring +* `#25802 `__: MAINT,API: Make metadata, c_metadata, fields, and names only... +* `#25803 `__: BLD: restore 'setup-args=-Duse-ilp64=true' in cibuildwheel [wheel... +* `#25804 `__: MAINT: Use preprocessor directive rather than code when adding... +* `#25806 `__: DOC: Update the CPU build options document +* `#25807 `__: DOC: Fix code-block formatting for new PyArray_RegisterDataType... +* `#25812 `__: API: Make ``descr->f`` only accessible through ``PyDataType_GetArrFuncs`` +* `#25813 `__: DOC: Update genfromtxt documentation +* `#25814 `__: MAINT: Use ``_ITEMSIZE`` rather than ``_DESCR(arr)->elsize`` +* `#25816 `__: API: Introduce ``PyDataType_FLAGS`` accessor for public access +* `#25817 `__: ENH: Add more const qualifiers to C API arguments +* `#25821 `__: BUG: ensure that FFT routines can deal with integer and bool... +* `#25822 `__: BLD: use homebrew gfortran +* `#25825 `__: MAINT: Bump actions/dependency-review-action from 4.0.0 to 4.1.0 +* `#25827 `__: DOC: run towncrier to consolidate the 2.0.0 release notes to... +* `#25828 `__: DOC: two minor fixes for DType API doc formatting +* `#25830 `__: DOC: Fix typo in nep 0052 +* `#25832 `__: DOC: add back 2.0.0 release note snippets that went missing +* `#25833 `__: DOC: Fix some reference warnings +* `#25834 `__: BUG: ensure static_string.buf is never NULL for a non-null string +* `#25837 `__: DEP: removed deprecated product/cumproduct/alltrue/sometrue +* `#25838 `__: MAINT: Update pinned setuptools for Python < 3.12 +* `#25839 `__: TST: fix Cython compile test which invokes ``meson`` +* `#25842 `__: DOC: Fix some incorrect rst markups +* `#25843 `__: BUG: ensure empty cholesky upper does not hang. +* `#25845 `__: DOC: Fix some typos +* `#25847 `__: MAINT: Adjust rest of string ufuncs to static_data approach +* `#25851 `__: DOC: Fix some reference warnings +* `#25852 `__: ENH: Support exotic installation of nvfortran +* `#25854 `__: BUG: Correctly refcount array descr in empty_like +* `#25855 `__: MAINT: Bump actions/dependency-review-action from 4.1.0 to 4.1.2 +* `#25856 `__: MAINT: Remove unnnecessary size argument in StringDType initializer +* `#25861 `__: CI: make chocolatey fail when a dependency doesn't install +* `#25862 `__: Revert "API: Make ``descr->f`` only accessible through ``PyDataType_GetArrFuncs``" +* `#25864 `__: ENH: Implement multiply ufunc for unicode & bytes +* `#25865 `__: ENH: print traceback after printing ABI mismatch error +* `#25866 `__: API: Fix compat header and add new import helpers +* `#25868 `__: MAINT: Bump actions/dependency-review-action from 4.1.2 to 4.1.3 +* `#25870 `__: BUG: use print to actually output something +* `#25873 `__: Update Highway to 1.1.0 +* `#25874 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.0.1 to 3.0.2 +* `#25876 `__: API: Remove no-op C API functions +* `#25877 `__: BUG: Include broadcasting for ``rtol`` argument in ``matrix_rank`` +* `#25879 `__: DOC: Add a document entry of ``PyArray_DescrProto`` +* `#25880 `__: DOC: README.md: point to user-friendly OpenSSF ScoreCard display +* `#25881 `__: BUG: Fix gh-25867 for used functions and subroutines +* `#25883 `__: BUG: fix typo in 'message' static variable of TestDeprecatedDTypeParenthesizedRepeatCount +* `#25884 `__: BUG: Fix typo in LEGACY_CONS_NON_NEGATVE_INBOUNDS_LONG +* `#25885 `__: DOC: fix typos +* `#25886 `__: MAINT: fix code comment typos in numpy/ directory +* `#25887 `__: BUG: Fix ``PyArray_FILLWBYTE`` Cython declaration +* `#25889 `__: CI: run apt update before apt-install in linux-blas workflow +* `#25890 `__: MAINT: refactor StringDType static_string implementation a bit. +* `#25891 `__: ENH: Add expandtabs ufunc for string & unicode dtypes +* `#25894 `__: CI, BLD, TST: Re-enable Emscripten/Pyodide CI job for NumPy +* `#25896 `__: ENH: implement stringdtype <-> timedelta roundtrip casts +* `#25897 `__: API: Make descr->f only accessible through ``PyDataType_GetArrFuncs`` +* `#25900 `__: CI, MAINT: use ``fetch-tags: true`` to speed up NumPy checkouts +* `#25901 `__: BLD: Add meson check to test presence of pocketfft git submodule +* `#25902 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.0.2 to 3.0.3 +* `#25905 `__: CI: allow job matrixes to run all jobs even when one fails +* `#25911 `__: MAINT: remove ``numpy.array_api`` module +* `#25912 `__: MAINT: Bump actions/cache from 4.0.0 to 4.0.1 +* `#25914 `__: API: Remove broadcasting ambiguity from np.linalg.solve +* `#25915 `__: DOC: Fix some document build errors about rst markups +* `#25919 `__: BUG: Ensure non-array logspace base does not influence dtype... +* `#25920 `__: NEP: update status fields of many NEPs +* `#25921 `__: DOC: update and copy-edit 2.0.0 release notes +* `#25922 `__: BUG: fix handling of copy keyword argument when calling __array__ +* `#25924 `__: BUG: remove vestiges of array_api [wheel build] +* `#25928 `__: DOC: Add note about np.char & np.strings in 2.0 migration guide +* `#25929 `__: DOC: Add mention of complex number changes to migration guide +* `#25931 `__: BUG: fix reference leak in PyArray_FromArrayAttr_int +* `#25932 `__: TST: skip rather than xfail a few tests to address CI log pollution +* `#25933 `__: MAINT: ensure towncrier can be run >1x, and is included in ``spin``... +* `#25937 `__: DOC: 2.0 release highlights and compat notes changes +* `#25939 `__: DOC: Add entries of ``npy_datetime`` and ``npy_timedelta`` +* `#25943 `__: API: Restructure the dtype struct to be new dtype friendly +* `#25944 `__: BUG: avoid incorrect stringdtype allocator sharing from array... +* `#25945 `__: BLD: try to build most macOS wheels on GHA +* `#25946 `__: DOC: Add and fixup/move docs for descriptor changes +* `#25947 `__: DOC: Fix incorrect rst markups of c function directives +* `#25948 `__: MAINT: Introduce NPY_FEATURE_VERSION_STRING and report it in... +* `#25950 `__: BUG: Fix reference leak in niche user old user dtypes +* `#25952 `__: BLD: use hash for mamba action +* `#25954 `__: API: Expose ``PyArray_Pack`` +* `#25955 `__: API: revert position-only 'start' in 'np.arange' +* `#25956 `__: Draft: [BUG] Fix Polynomial representation tests +* `#25958 `__: BUG: avoid incorrect type punning in NpyString_acquire_allocators +* `#25961 `__: TST, MAINT: Loosen tolerance in fft test. +* `#25962 `__: DOC: fix typos and rearrange CI +* `#25965 `__: CI: fix wheel tags for Cirrus macOS arm64 +* `#25973 `__: DOC: Backport gh-25971 and gh-25972 +* `#25977 `__: REL: Prepare for the NumPy 2.0.0b1 release [wheel build] +* `#25983 `__: CI: fix last docbuild warnings +* `#25986 `__: BLD: push a tag builds a wheel +* `#25987 `__: REL: Prepare for the NumPy 2.0.0b1 release (2) [wheel build] +* `#25994 `__: DOC: remove reverted release blurb [skip actions][skip azp][skip... +* `#25996 `__: CI: don't use ``fetch-tags`` in wheel build jobs +* `#25997 `__: REL: Prepare for the NumPy 2.0.0b1 release (3) +* `#26008 `__: DOC: mention the ``exceptions`` namespace in the 2.0.0 release... +* `#26009 `__: MAINT: Remove sdist task from pavement.py +* `#26022 `__: BUG: Fixes np.put receiving empty array causes endless loop +* `#26023 `__: MAINT: Bump pypa/cibuildwheel from 2.16.5 to 2.17.0 +* `#26034 `__: MAINT: remove now-unused ``NPY_USE_C99_FORMAT`` +* `#26035 `__: MAINT: remove the now-unused ``NPY_NO_SIGNAL`` +* `#26036 `__: MAINT: handle ``NPY_ALLOW_THREADS`` and related build option... +* `#26040 `__: BUG: Filter out broken Highway platform +* `#26041 `__: BLD: omit pp39-macosx_arm64 from matrix [wheel build] +* `#26042 `__: BUG: fix kwarg handling in assert_warn [skip cirrus][skip azp] +* `#26047 `__: ENH: install StringDType promoter for add +* `#26048 `__: MAINT: avoid use of flexible array member in public header +* `#26049 `__: BUG: raise error trying to coerce object arrays containing timedelta64('NaT')... +* `#26050 `__: BUG: fix reference count leak in __array__ internals +* `#26051 `__: BUG: add missing error handling in string to int cast internals +* `#26052 `__: MAINT: Remove partition and split-like functions from numpy.strings +* `#26053 `__: CI: clean up some unused ``choco install`` invocations +* `#26068 `__: DOC: Backport np.strings docstrings +* `#26073 `__: DOC clarifications on debugging numpy +* `#26074 `__: BUG: fix logic error in stringdtype maximum/minimum ufunc +* `#26075 `__: BUG: Allow the new string dtype summation to work +* `#26076 `__: MAINT: Make PyArrayMultiIterObject struct "smaller" +* `#26085 `__: MAINT: Bump actions/cache from 4.0.1 to 4.0.2 +* `#26109 `__: BUG: adapt cython files to new complex declarations (#26080) +* `#26110 `__: TYP: Adjust ``np.random.integers`` and ``np.random.randint`` +* `#26111 `__: API: Require reduce promoters to start with None to match +* `#26118 `__: MAINT: install all-string promoter for multiply +* `#26122 `__: BUG: fix reference counting error in stringdtype setup +* `#26124 `__: MAINT,API: Const qualify some new API (mostly new DType API) +* `#26127 `__: BUG: update pocketfft to unconditionaly disable use of aligned_alloc +* `#26131 `__: MAINT: add missing noexcept clauses +* `#26154 `__: MAINT: Bump actions/setup-python from 5.0.0 to 5.1.0 +* `#26167 `__: MAINT: Escalate import warning to an import error +* `#26169 `__: BUG,MAINT: Fix __array__ bugs and simplify code +* `#26170 `__: DOC: mention np.lib.NumPyVersion in the 2.0 migration guide +* `#26171 `__: ENH: inherit numerical dtypes from abstract ones. +* `#26173 `__: DOC, TST: make ``numpy.version`` officially public +* `#26186 `__: MAINT: Update Pyodide to 0.25.1 +* `#26192 `__: BUG: Infinite Loop in numpy.base_repr +* `#26193 `__: BUG: fix reference counting error in wrapping_method_resolve_descriptors +* `#26194 `__: DOC: Mention ``copy=True`` for ``__array__`` method in the migration... +* `#26205 `__: BUG: introduce PyArray_SafeCast to fix issues around stringdtype... +* `#26231 `__: API: Readd np.bool_ typing stub +* `#26256 `__: MAINT: Update array-api-tests job +* `#26259 `__: DOC: Backport various documentation fixes +* `#26262 `__: BLD: update to OpenBLAS 0.3.27.0.1 +* `#26265 `__: MAINT: Fix some typos +* `#26272 `__: BUG: Fixes for ``np.vectorize``. +* `#26283 `__: DOC: correct PR referenced in __array_wraps__ change note +* `#26293 `__: BUG: Ensure seed sequences are restored through pickling (#26260) +* `#26297 `__: BUG: Workaround for Intel Compiler mask conversion bug +* `#26305 `__: DOC: Bump pydata-sphinx-theme version +* `#26306 `__: MAINT: Robust string meson template substitution +* `#26307 `__: BLD: use newer openblas wheels [wheel build] +* `#26312 `__: DOC: Follow-up fixes for new theme +* `#26330 `__: BUG: Fix invalid constructor in string_fastsearch.h with C++... +* `#26331 `__: MAINT: address improper error handling and cleanup for ``spin`` +* `#26332 `__: BUG: use PyArray_SafeCast in array_astype +* `#26334 `__: MAINT: Disable compiler sanitizer tests on 2.0.x +* `#26351 `__: ENH: introduce a notion of "compatible" stringdtype instances... +* `#26357 `__: DOC: Added small clarification note, based on discussion in issue... +* `#26358 `__: BUG: Fix rfft for even input length. +* `#26360 `__: MAINT: Simplify bugfix for even rfft +* `#26373 `__: DOC: fix np.unique release notes [skip cirrus] +* `#26374 `__: ENH: add support for nan-like null strings in string replace +* `#26393 `__: BUG: Make sure that NumPy scalars are supported by can_cast +* `#26400 `__: MNT: more gracefully handle spin adding arguments to functions... +* `#26402 `__: DOC: Add missing methods to numpy.strings docs +* `#26403 `__: DOC: Fix links in random documentation. +* `#26417 `__: BUG: support nan-like null strings in [l,r]strip +* `#26423 `__: DOC: Fix some typos and incorrect markups +* `#26424 `__: DOC: add reference docs for NpyString C API +* `#26425 `__: REL: Prepare for the NumPy 2.0.0rc2 release [wheel build] +* `#26427 `__: TYP: Fix ``fromrecords`` type hint and bump mypy to 1.10.0. +* `#26457 `__: MAINT: Various CI fixes +* `#26458 `__: BUG: Use Python pickle protocol version 4 for np.save (#26388) +* `#26459 `__: BUG: fixes for three related stringdtype issues (#26436) +* `#26460 `__: MAINT: Bump pypa/cibuildwheel from 2.17.0 to 2.18.0 +* `#26461 `__: BUG: int32 and intc should both appear in sctypes +* `#26482 `__: DOC: Skip API documentation for numpy.distutils with Python 3.12... +* `#26527 `__: DOC: fix NEP 50 reference +* `#26536 `__: BUG: cast missing in PyPy-specific f2py code, pin spin in CI... +* `#26539 `__: ENH: improve the error raised by ``numpy.isdtype`` +* `#26540 `__: BLD: Make NumPy build reproducibly +* `#26541 `__: BUG: fix incorrect error handling for dtype('a') deprecation +* `#26543 `__: BUG: fix assert in PyArry_ConcatenateArrays with StringDType +* `#26544 `__: BUG: Fix handling of size=() in Generator.choice when a.ndim... +* `#26554 `__: BUG: Fix in1d fast-path range +* `#26555 `__: BUG: Fix typo in array-wrap code that lead to memory leak +* `#26569 `__: MAINT: Avoid by-pointer parameter passing for LINEARIZE_DATA_t... +* `#26583 `__: BUG: Fix memory leaks found with valgrind +* `#26584 `__: MAINT: Unpin pydata-sphinx-theme +* `#26587 `__: DOC: Added web docs for missing ma and strings routines +* `#26591 `__: BUG: Fix memory leaks found by valgrind +* `#26592 `__: DOC: Various documentation updates +* `#26635 `__: DOC: update 2.0 docs +* `#26651 `__: DOC: Update 2.0 migration guide +* `#26652 `__: BUG: Disallow string inputs for copy keyword in np.array and... +* `#26653 `__: BUG: Fix F77 ! comment handling +* `#26654 `__: DOC: Set default as ``-j 1`` for spin docs and move ``-W`` to... +* `#26657 `__: BUG: fix memory leaks found with valgrind (next) +* `#26659 `__: BUG: Replace dots with underscores in f2py meson backend for... +* `#26673 `__: CI: upgrade FreeBSD Cirrus job from FreeBSD 13.2 to 14.0 +* `#26674 `__: MNT: catch invalid fixed-width dtype sizes +* `#26677 `__: CI: Use default llvm on Windows. +* `#26694 `__: DOC: document workaround for deprecation of dim-2 inputs to `cross` +* `#26695 `__: BUG: Adds asanyarray to start of linalg.cross (#26667) +* `#26696 `__: BUG: weighted nanpercentile, nanquantile and multi-dim q +* `#26697 `__: BUG: Fix bug in numpy.pad() + diff --git a/doc/changelog/2.0.1-changelog.rst b/doc/changelog/2.0.1-changelog.rst new file mode 100644 index 000000000000..5a0b9dd207fc --- /dev/null +++ b/doc/changelog/2.0.1-changelog.rst @@ -0,0 +1,52 @@ + +Contributors +============ + +A total of 15 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* @vahidmech + +* Alex Herbert + +* Charles Harris +* Giovanni Del Monte + +* Leo Singer +* Lysandros Nikolaou +* Matti Picus +* Nathan Goldbaum +* Patrick J. Roddy + +* Raghuveer Devulapalli +* Ralf Gommers +* Rostan Tabet + +* Sebastian Berg +* Tyler Reddy +* Yannik Wicke + + +Pull requests merged +==================== + +A total of 24 pull requests were merged for this release. + +* `#26711 `__: MAINT: prepare 2.0.x for further development +* `#26792 `__: TYP: fix incorrect import in ``ma/extras.pyi`` stub +* `#26793 `__: DOC: Mention '1.25' legacy printing mode in ``set_printoptions`` +* `#26794 `__: DOC: Remove mention of NaN and NAN aliases from constants +* `#26821 `__: BLD: Fix x86-simd-sort build failure on openBSD +* `#26822 `__: BUG: Ensure output order follows input in numpy.fft +* `#26823 `__: TYP: fix missing sys import in numeric.pyi +* `#26832 `__: DOC: remove hack to override _add_newdocs_scalars (#26826) +* `#26835 `__: BUG: avoid side-effect of 'include complex.h' +* `#26836 `__: BUG: fix max_rows and chunked string/datetime reading in ``loadtxt`` +* `#26837 `__: BUG: fix PyArray_ImportNumPyAPI under -Werror=strict-prototypes +* `#26856 `__: DOC: Update some documentation +* `#26868 `__: BUG: fancy indexing copy +* `#26869 `__: BUG: Mismatched allocation domains in ``PyArray_FillWithScalar`` +* `#26870 `__: BUG: Handle --f77flags and --f90flags for meson [wheel build] +* `#26887 `__: BUG: Fix new DTypes and new string promotion when signature is... +* `#26888 `__: BUG: remove numpy.f2py from excludedimports +* `#26959 `__: BUG: Quantile closest_observation to round to nearest even order +* `#26960 `__: BUG: Fix off-by-one error in amount of characters in strip +* `#26961 `__: API: Partially revert unique with return_inverse +* `#26962 `__: BUG,MAINT: Fix utf-8 character stripping memory access +* `#26963 `__: BUG: Fix out-of-bound minimum offset for in1d table method +* `#26971 `__: BUG: fix f2py tests to work with v2 API +* `#26995 `__: BUG: Add object cast to avoid warning with limited API diff --git a/doc/changelog/2.0.2-changelog.rst b/doc/changelog/2.0.2-changelog.rst new file mode 100644 index 000000000000..6622407dd8f6 --- /dev/null +++ b/doc/changelog/2.0.2-changelog.rst @@ -0,0 +1,45 @@ + +Contributors +============ + +A total of 13 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Bruno Oliveira + +* Charles Harris +* Chris Sidebottom +* Christian Heimes + +* Christopher Sidebottom +* Mateusz SokÃŗÅ‚ +* Matti Picus +* Nathan Goldbaum +* Pieter Eendebak +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg +* Yair Chuchem + + +Pull requests merged +==================== + +A total of 19 pull requests were merged for this release. + +* `#27000 `__: REL: Prepare for the NumPy 2.0.1 release [wheel build] +* `#27001 `__: MAINT: prepare 2.0.x for further development +* `#27021 `__: BUG: cfuncs.py: fix crash when sys.stderr is not available +* `#27022 `__: DOC: Fix migration note for ``alltrue`` and ``sometrue`` +* `#27061 `__: BUG: use proper input and output descriptor in array_assign_subscript... +* `#27073 `__: BUG: Mirror VQSORT_ENABLED logic in Quicksort +* `#27074 `__: BUG: Bump Highway to latest master +* `#27077 `__: BUG: Off by one in memory overlap check +* `#27122 `__: BUG: Use the new ``npyv_loadable_stride_`` functions for ldexp and... +* `#27126 `__: BUG: Bump Highway to latest +* `#27128 `__: BUG: add missing error handling in public_dtype_api.c +* `#27129 `__: BUG: fix another cast setup in array_assign_subscript +* `#27130 `__: BUG: Fix building NumPy in FIPS mode +* `#27131 `__: BLD: update vendored Meson for cross-compilation patches +* `#27146 `__: MAINT: Scipy openblas 0.3.27.44.4 +* `#27151 `__: BUG: Do not accidentally store dtype metadata in ``np.save`` +* `#27195 `__: REV: Revert undef I and document it +* `#27213 `__: BUG: Fix NPY_RAVEL_AXIS on backwards compatible NumPy 2 builds +* `#27279 `__: BUG: Fix array_equal for numeric and non-numeric scalar types diff --git a/doc/changelog/2.1.0-changelog.rst b/doc/changelog/2.1.0-changelog.rst new file mode 100644 index 000000000000..af7f5a3b07c7 --- /dev/null +++ b/doc/changelog/2.1.0-changelog.rst @@ -0,0 +1,592 @@ + +Contributors +============ + +A total of 110 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !ogidig5 + +* !partev +* !vahidmech + +* !h-vetinari +* Aaron Meurer +* Adrin Jalali + +* Agriya Khetarpal +* Ajay Kumar Janapareddi + +* Alex Herbert + +* Andras Deak +* Andrej Zhilenkov + +* Andrew Nelson +* Anne Gunn + +* Antony Lee +* Arnaud Ma + +* Arun Kannawadi + +* Arun Pa + +* Bas van Beek +* Ben Woodruff + +* Bruno Oliveira + +* Carlos Henrique Hermanny Moreira da Silva + +* Charles Harris +* Chris Sidebottom +* Christian Heimes + +* Christian Lorentzen +* Christopher Sidebottom +* Christopher Titchen + +* ClÊment Robert +* Cobalt Yang + +* Devyani Chavan + +* Dimitri Papadopoulos Orfanos +* Ebigide Jude + +* Eric Xie + +* Evgeni Burovski +* Fabian Vogt + +* Francisco Sousa + +* GUAN MING + +* Gabriel Fougeron + +* Gagandeep Singh +* Giovanni Del Monte + +* Gonzalo Tornaría + +* Gonçalo BÃĄrias + +* Hugo van Kemenade +* Jakob Stevens Haas + +* Jakob Unfried + +* James Joseph Thomas + +* Jean Lecordier + +* Joren Hammudoglu + +* Joris Van den Bossche +* Julia Poo + +* Justus Magin +* Jyn Spring ᐴæ˜Ĩ +* KIU Shueng Chuan +* Karthik Gangula + +* Karthik Kaiplody + +* Kevin Sheppard +* Kristoffer Pedersen + +* Leo Singer +* Liang Yan +* Liangyu Zhang + +* Lucas Colley +* Luiz Eduardo Amaral + +* Lysandros Nikolaou +* Marcel Loose + +* Marten van Kerkwijk +* Mateusz SokÃŗÅ‚ +* Matt Haberland +* Matt Thompson + +* Matthew Roeschke + +* Matthew Thompson + +* Matthias Bussonnier +* Matti Picus +* Melissa Weber Mendonça +* Milica Dančuk + +* Moritz Schreiber + +* Nathan Goldbaum +* Olivier Grisel +* Patrick J. Roddy + +* Paul Juma Otieno + +* Pieter Eendebak +* Raghuveer Devulapalli +* Ralf Gommers +* Raquel Braunschweig + +* Robert Kern +* Rohit Goswami +* Romain Geissler + +* Ross Barnowski +* Rostan Tabet + +* Sam Morley + +* Sayed Adel +* Sean Cheah +* Sebastian Berg +* Serge Guelton +* Slobodan + +* Stefan van der Walt +* Thomas A Caswell +* Thomas Li +* Timo RÃļhling + +* Tsvika Shapira + +* Tuhin Sharma + +* Tyler Reddy +* Victor Eijkhout + +* Warren Weckesser +* Warrick Ball +* Xiangyi Wang + +* Yair Chuchem + +* Yang Liu + +* Yannik Wicke + +* Yevhen Amelin + +* Yuki K + +Pull requests merged +==================== + +A total of 469 pull requests were merged for this release. + +* `#12150 `__: ENH: When histogramming data with integer dtype, force bin width... +* `#24448 `__: TST: add some tests of np.log for complex input. +* `#25704 `__: DOC: quantile: correct/simplify documentation +* `#25705 `__: DOC: Add documentation explaining our promotion rules +* `#25781 `__: ENH: Convert fp32 sin/cos from C universal intrinsics to C++... +* `#25908 `__: ENH: Add center/ljust/rjust/zfill ufuncs for unicode and bytes +* `#25913 `__: NEP: NEP 55 updates and add @mhvk as an author +* `#25963 `__: BUG: Fix bug in numpy.pad() +* `#25964 `__: CI: fix last docbuild warnings +* `#25970 `__: MAINT: Prepare main for NumPy 2.1.0 development +* `#25971 `__: DOC: Fix a note section markup in ``dtype.rst`` +* `#25972 `__: DOC: Fix module setting of ``MaskedArray`` +* `#25974 `__: BUG: Raise error for negative-sized fixed-width dtype +* `#25975 `__: BUG: Fixes np.put receiving empty array causes endless loop +* `#25981 `__: BLD: push a tag builds a wheel +* `#25985 `__: BLD: omit pp39-macosx_arm64 from matrix +* `#25988 `__: DOC: Remove unused parameter description +* `#25990 `__: CI: clean up some unused `choco install` invocations +* `#25995 `__: CI: don't use ``fetch-tags`` in wheel build jobs +* `#25999 `__: BUG: fix kwarg handling in assert_warn [skip cirrus][skip azp] +* `#26000 `__: BUG: Filter out broken Highway platform +* `#26003 `__: MAINT: Bump pypa/cibuildwheel from 2.16.5 to 2.17.0 +* `#26005 `__: DOC: indicate stringdtype support in docstrings for string operations +* `#26006 `__: TST: remove usage of ProcessPoolExecutor in stringdtype tests +* `#26007 `__: MAINT: Remove sdist task from pavement.py +* `#26011 `__: DOC: mention the ``exceptions`` namespace in the 2.0.0 release... +* `#26012 `__: ENH: install StringDType promoter for add +* `#26014 `__: MAINT: remove the now-unused ``NPY_NO_SIGNAL`` +* `#26015 `__: MAINT: remove now-unused ``NPY_USE_C99_FORMAT`` +* `#26016 `__: MAINT: handle ``NPY_ALLOW_THREADS`` and related build option... +* `#26017 `__: MAINT: avoid use of flexible array member in public header +* `#26024 `__: BUG: raise error trying to coerce object arrays containing timedelta64('NaT')... +* `#26025 `__: BUG: fix reference count leak in __array__ internals +* `#26027 `__: BUG: add missing error handling in string to int cast internals +* `#26033 `__: MAINT: Remove partition and split-like functions from numpy.strings +* `#26045 `__: ENH: Optimize np.power for integer type +* `#26055 `__: ENH: Optimize np.power(x, 2) for double and float type +* `#26063 `__: MAINT,API: Const qualify some new API (mostly new DType API) +* `#26064 `__: MAINT: Make PyArrayMultiIterObject struct "smaller" +* `#26066 `__: BUG: Allow the new string dtype summation to work +* `#26067 `__: DOC: note stringdtype output support in np.strings docstrings +* `#26070 `__: DOC clarifications on debugging numpy +* `#26071 `__: BUG: fix logic error in stringdtype maximum/minimum ufunc +* `#26080 `__: BUG: adapt cython files to new complex declarations +* `#26081 `__: TYP: Make array _ShapeType bound and covariant +* `#26082 `__: ENH: Add partition/rpartition ufunc for string dtypes +* `#26083 `__: MAINT: Bump actions/cache from 4.0.1 to 4.0.2 +* `#26089 `__: TYP: Adjust typing for ``np.random.integers`` and ``np.random.randint`` +* `#26090 `__: API: Require reduce promoters to start with None to match +* `#26095 `__: MAINT: Bump actions/dependency-review-action from 4.1.3 to 4.2.3 +* `#26097 `__: DOC: Mention ``copy=True`` for ``__array__`` method in the migration... +* `#26099 `__: DOC: fix typo in doc/source/user/absolute_beginners.rst +* `#26103 `__: API: Default to hidden visibility for API tables +* `#26105 `__: MAINT: install all-string promoter for multiply +* `#26108 `__: MAINT: Remove unnecessarily defensive code from dlpack deleter +* `#26112 `__: TST: fix incorrect dtype in test +* `#26113 `__: BLD: Do not use -O3 flag when building in debug mode +* `#26116 `__: ENH: inherit numerical dtypes from abstract ones. +* `#26119 `__: BUG: fix reference counting error in stringdtype setup +* `#26123 `__: BUG: update pocketfft to unconditionaly disable use of aligned_alloc +* `#26125 `__: DOC: Bump pydata-sphinx-theme version +* `#26128 `__: DOC: Update absolute_beginners.rst +* `#26129 `__: MAINT: add missing noexcept clauses +* `#26130 `__: ENH: Optimize performance of np.atleast_1d +* `#26133 `__: MAINT: Bump actions/dependency-review-action from 4.2.3 to 4.2.4 +* `#26134 `__: CI, BLD: Push NumPy's Emscripten/Pyodide wheels nightly to Anaconda.org... +* `#26135 `__: BUG: masked array division should ignore all FPEs in mask calculation +* `#26136 `__: BUG: fixed datetime64[ns] conversion issue in numpy.vectorize,... +* `#26138 `__: MAINT: Bump actions/setup-python from 5.0.0 to 5.1.0 +* `#26139 `__: MAINT: Bump actions/dependency-review-action from 4.2.4 to 4.2.5 +* `#26142 `__: BUG,MAINT: Fix __array__ bugs and simplify code +* `#26147 `__: BUG: introduce PyArray_SafeCast to fix issues around stringdtype... +* `#26149 `__: MAINT: Escalate import warning to an import error +* `#26151 `__: BUG: Fix test_impossible_feature_enable failing without BASELINE_FEAT +* `#26155 `__: NEP: add NEP 56 mailing list resolution +* `#26160 `__: ENH: Improve performance of np.broadcast_arrays and np.broadcast_shapes +* `#26162 `__: BUG: Infinite Loop in numpy.base_repr +* `#26168 `__: DOC: mention np.lib.NumPyVersion in the 2.0 migration guide +* `#26172 `__: DOC, TST: make ``numpy.version`` officially public +* `#26174 `__: MAINT: Fix failure in routines.version.rst +* `#26182 `__: DOC: Update absolute_beginners.rst +* `#26185 `__: MAINT: Update Pyodide to 0.25.1 +* `#26187 `__: TST: Use platform.machine() for improved portability on riscv64 +* `#26189 `__: MNT: use pythoncapi_compat.h in npy_compat.h +* `#26190 `__: BUG: fix reference counting error in wrapping_method_resolve_descriptors +* `#26207 `__: TST: account for immortal objects in test_iter_refcount +* `#26210 `__: API: Readd ``np.bool_`` typing stub +* `#26212 `__: BENCH: Add benchmarks for np.power(x,2) and np.power(x,0.5) +* `#26213 `__: MNT: try updating pythoncapi-compat +* `#26215 `__: API: Enforce one copy for ``__array__`` when ``copy=True`` +* `#26219 `__: ENH: Enable RVV CPU feature detection +* `#26222 `__: MAINT: Drop Python 3.9 +* `#26227 `__: MAINT: utilize ufunc API const correctness internally +* `#26229 `__: TST: skip limited API test on nogil python build +* `#26232 `__: MAINT: fix typo in _add_newdoc_ufunc docstring +* `#26235 `__: Update numpy.any documentation example +* `#26237 `__: MAINT: Update ``array-api-tests`` job +* `#26239 `__: DOC: add versionadded for copy keyword in np.asarray docstring +* `#26241 `__: DOC: Fixup intp/uintp documentation for ssize_t/size_t changes +* `#26245 `__: DOC: Update ``__array__`` ``copy`` keyword docs +* `#26246 `__: MNT: migrate PyList_GetItem usages to PyList_GetItemRef +* `#26248 `__: MAINT,BUG: Robust string meson template substitution +* `#26251 `__: MNT: disable the allocator cache for nogil builds +* `#26258 `__: BLD: update to OpenBLAS 0.3.27 +* `#26260 `__: BUG: Ensure seed sequences are restored through pickling +* `#26261 `__: ENH: introduce a notion of "compatible" stringdtype instances +* `#26263 `__: MAINT: fix typo +* `#26264 `__: MAINT: fix typo in #include example +* `#26267 `__: MAINT: Update URL in nep 0014 - domain change +* `#26268 `__: API: Disallow 0D input arrays in ``nonzero`` +* `#26270 `__: BUG: ensure np.vectorize doesn't truncate fixed-width strings +* `#26273 `__: ENH: Bump Highway to HEAD and remove platform filter +* `#26274 `__: BLD: use install-tags to optionally install tests +* `#26280 `__: ENH: Speedup clip for floating point +* `#26281 `__: BUG: Workaround for Intel Compiler mask conversion bug +* `#26282 `__: MNT: replace _PyDict_GetItemStringWithError with PyDict_GetItemStringRef +* `#26284 `__: TST: run the smoke tests on more python versions +* `#26285 `__: ENH: Decrease wall time of ``ma.cov`` and ``ma.corrcoef`` +* `#26286 `__: BLD: ensure libnpymath and highway static libs use hidden visibility +* `#26292 `__: API: Add ``shape`` and ``copy`` arguments to ``numpy.reshape`` +* `#26294 `__: MNT: disable the coercion cache for the nogil build +* `#26295 `__: CI: add llvm/clang sanitizer tests +* `#26299 `__: MAINT: Pin sphinx to version 7.2.6 +* `#26302 `__: BLD: use newer openblas wheels [wheel build] +* `#26303 `__: DOC: add explanation of dtype to parameter values for np.append +* `#26304 `__: MAINT: address improper error handling and cleanup for ``spin`` +* `#26309 `__: MAINT: Bump actions/upload-artifact from 4.3.1 to 4.3.2 +* `#26311 `__: DOC: Follow-up fixes for new theme +* `#26313 `__: MAINT: Cleanup ``vecdot``'s signature, typing, and importing +* `#26317 `__: BUG: use PyArray_SafeCast in array_astype +* `#26319 `__: BUG: fix spin bench not running on Windows +* `#26320 `__: DOC: Add replacement NEP links in superseded, replaced-by fields +* `#26322 `__: DOC: Documentation and examples for conversion of np.timedelta64... +* `#26324 `__: BUG: Fix invalid constructor in string_fastsearch.h with C++... +* `#26325 `__: TST: Skip Cython test for editable install +* `#26329 `__: MAINT: Bump actions/upload-artifact from 4.3.2 to 4.3.3 +* `#26338 `__: MAINT: update x86-simd-sort to latest +* `#26340 `__: DOC: Added small clarification note, based on discussion in issue... +* `#26347 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.0.3 to 3.0.4 +* `#26348 `__: NOGIL: Make loop data cache and dispatch cache thread-safe in... +* `#26353 `__: BUG: ensure text padding ufuncs handle stringdtype nan-like nulls +* `#26354 `__: BUG: Fix rfft for even input length. +* `#26355 `__: ENH: add support for nan-like null strings in string replace +* `#26359 `__: MAINT: Simplify bugfix for even rfft +* `#26362 `__: MAINT: Bump actions/dependency-review-action from 4.2.5 to 4.3.1 +* `#26363 `__: MAINT: Bump actions/dependency-review-action from 4.3.1 to 4.3.2 +* `#26364 `__: TST: static types are now immortal in the default build too +* `#26368 `__: [NOGIL] thread local promotion state +* `#26369 `__: DOC: fix np.unique release notes [skip cirrus] +* `#26372 `__: BUG: Make sure that NumPy scalars are supported by can_cast +* `#26377 `__: TYP: Fix incorrect type hint for creating a recarray from fromrecords +* `#26378 `__: DOC: Update internal links for generator.rst and related +* `#26384 `__: BUG: Fix incorrect return type of item with length 0 from chararray.__getitem__ +* `#26385 `__: DOC: Updated remaining links in random folder +* `#26386 `__: DOC: Improve example on array broadcasting +* `#26388 `__: BUG: Use Python pickle protocol version 4 for np.save +* `#26391 `__: DOC: Add missing methods to numpy.strings docs +* `#26392 `__: BUG: support nan-like null strings in [l,r]strip +* `#26396 `__: MNT: more gracefully handle spin adding arguments to functions... +* `#26399 `__: DOC: Update INSTALL.rst +* `#26413 `__: DOC: Fix some typos and incorrect markups +* `#26415 `__: MAINT: updated instructions to get MachAr byte pattern +* `#26416 `__: MAINT: Bump ossf/scorecard-action from 2.3.1 to 2.3.3 +* `#26418 `__: DOC: add reference docs for NpyString C API +* `#26419 `__: MNT: clean up references to array_owned==2 case in StringDType +* `#26426 `__: TYP,TST: Bump mypy to 1.10.0 +* `#26428 `__: MAINT: Bump pypa/cibuildwheel from 2.17.0 to 2.18.0 +* `#26429 `__: TYP: npyio: loadtxt: usecols: add None type +* `#26431 `__: TST: skip test_frompyfunc_leaks in the free-threaded build +* `#26432 `__: MAINT: Add some PR prefixes to the labeler. +* `#26436 `__: BUG: fixes for three related stringdtype issues +* `#26441 `__: BUG: int32 and intc should both appear in sctypes +* `#26442 `__: DOC: Adding links to polynomial table. +* `#26443 `__: TST: temporarily pin spin to work around issue in 0.9 release +* `#26444 `__: DOC: Remove outdated authentication instructions +* `#26445 `__: TST: fix xfailed tests on pypy 7.3.16 +* `#26447 `__: TST: attempt to fix intel SDE SIMD CI +* `#26449 `__: MAINT: fix typo +* `#26452 `__: DEP: Deprecate 'fix_imports' flag in numpy.save +* `#26456 `__: ENH: improve the error raised by ``numpy.isdtype`` +* `#26463 `__: TST: add basic free-threaded CI testing +* `#26464 `__: BLD: update vendored-meson to current Meson master (1.4.99) +* `#26469 `__: MAINT: Bump github/codeql-action from 2.13.4 to 3.25.5 +* `#26471 `__: BLD: cp313 [wheel build] +* `#26474 `__: BLD: Make NumPy build reproducibly +* `#26476 `__: DOC: Skip API documentation for numpy.distutils with Python 3.12... +* `#26478 `__: DOC: Set default as ``-j 1`` for spin docs and move ``-W`` to SPHINXOPTS +* `#26480 `__: TYP: fix type annotation for ``newbyteorder`` +* `#26481 `__: Improve documentation of numpy.ma.filled +* `#26486 `__: MAINT: Bump github/codeql-action from 3.25.5 to 3.25.6 +* `#26487 `__: MAINT: Bump pypa/cibuildwheel from 2.18.0 to 2.18.1 +* `#26488 `__: DOC: add examples to get_printoptions +* `#26489 `__: DOC: add example to get_include +* `#26492 `__: DOC: fix rng.random example in numpy-for-matlab-users +* `#26501 `__: ENH: Implement DLPack version 1 +* `#26503 `__: TST: work around flaky test on free-threaded build +* `#26504 `__: DOC: Copy-edit numpy 2.0 migration guide. +* `#26505 `__: DOC: update the NumPy Roadmap +* `#26507 `__: MAINT: mark temp elision address cache as thread local +* `#26511 `__: MAINT: Bump mamba-org/setup-micromamba from 1.8.1 to 1.9.0 +* `#26512 `__: CI: enable free-threaded wheel builds [wheel build] +* `#26514 `__: MAINT: Avoid gcc compiler warning +* `#26515 `__: MAINT: Fix GCC -Wmaybe-uninitialized warning +* `#26517 `__: DOC: Add missing functions to the migration guide +* `#26519 `__: MAINT: Avoid by-pointer parameter passing for LINEARIZE_DATA_t... +* `#26520 `__: BUG: Fix handling of size=() in Generator.choice when a.ndim... +* `#26524 `__: BUG: fix incorrect error handling for dtype('a') deprecation +* `#26526 `__: BUG: fix assert in PyArry_ConcatenateArrays with StringDType +* `#26529 `__: BUG: ``PyDataMem_SetHandler`` check capsule name +* `#26531 `__: BUG: Fix entry-point of Texinfo docs +* `#26534 `__: BUG: cast missing in PyPy-specific f2py code, pin spin in CI +* `#26537 `__: BUG: Fix F77 ! comment handling +* `#26538 `__: DOC: Update ``gradient`` docstrings +* `#26546 `__: MAINT: Remove redundant print from bug report issue template +* `#26548 `__: BUG: Fix typo in array-wrap code that lead to memory leak +* `#26550 `__: BUG: Make Polynomial evaluation adhere to nep 50 +* `#26552 `__: BUG: Fix in1d fast-path range +* `#26558 `__: BUG: fancy indexing copy +* `#26559 `__: BUG: fix setxor1d when input arrays aren't 1D +* `#26562 `__: MAINT: Bump mamba-org/setup-micromamba from 1.8.1 to 1.9.0 +* `#26563 `__: BUG: Fix memory leaks found with valgrind +* `#26564 `__: CI, BLD: Upgrade to Pyodide 0.26.0 for Emscripten/Pyodide CI... +* `#26566 `__: DOC: update ufunc tutorials to use setuptools +* `#26567 `__: BUG: fix memory leaks found with valgrind (next) +* `#26568 `__: MAINT: Unpin pydata-sphinx-theme +* `#26571 `__: DOC: Added web docs for missing ma and strings routines +* `#26572 `__: ENH: Add array API inspection functions +* `#26579 `__: ENH: Add unstack() +* `#26580 `__: ENH: Add copy and device keyword to np.asanyarray to match np.asarray +* `#26582 `__: BUG: weighted nanpercentile, nanquantile and multi-dim q +* `#26585 `__: MAINT: Bump github/codeql-action from 3.25.6 to 3.25.7 +* `#26586 `__: BUG: Fix memory leaks found by valgrind +* `#26589 `__: BUG: catch invalid fixed-width dtype sizes +* `#26594 `__: DOC: Update constants.rst: fix URL redirect +* `#26597 `__: ENH: Better error message for axis=None in ``np.put_along_axis``... +* `#26599 `__: ENH: use size-zero dtype for broadcast-shapes +* `#26602 `__: TST: Re-enable int8/uint8 einsum tests +* `#26603 `__: BUG: Disallow string inputs for ``copy`` keyword in ``np.array``... +* `#26604 `__: refguide-check with pytest as a runner +* `#26605 `__: DOC: fix typos in numpy v2.0 documentation +* `#26606 `__: DOC: Update randn() to use rng.standard_normal() +* `#26607 `__: MNT: Reorganize non-constant global statics into structs +* `#26609 `__: DOC: Updated notes and examples for np.insert. +* `#26610 `__: BUG: np.take handle 64-bit indices on 32-bit platforms +* `#26611 `__: MNT: Remove ``set_string_function`` +* `#26614 `__: MAINT: Bump github/codeql-action from 3.25.7 to 3.25.8 +* `#26619 `__: TST: Re-enable ``test_shift_all_bits`` on clang-cl +* `#26626 `__: DOC: add ``getbufsize`` example +* `#26627 `__: DOC: add ``setbufsize`` example +* `#26628 `__: DOC: add ``matrix_transpose`` example +* `#26629 `__: DOC: add ``unique_all`` example +* `#26630 `__: DOC: add ``unique_counts`` example +* `#26631 `__: DOC: add ``unique_inverse`` example +* `#26632 `__: DOC: add ``unique_values`` example +* `#26633 `__: DOC: fix ``matrix_transpose`` doctest +* `#26634 `__: BUG: Replace dots with underscores in f2py meson backend for... +* `#26636 `__: MAINT: Bump actions/dependency-review-action from 4.3.2 to 4.3.3 +* `#26637 `__: BUG: fix incorrect randomized parameterization in bench_linalg +* `#26638 `__: MNT: use reproducible RNG sequences in benchmarks +* `#26639 `__: MNT: more benchmark cleanup +* `#26641 `__: DOC: Update 2.0 migration guide +* `#26644 `__: DOC: Added clean_dirs to spin docs to remove generated folders +* `#26645 `__: DOC: Enable web docs for numpy.trapezoid and add back links +* `#26646 `__: DOC: Update docstring for invert function +* `#26655 `__: CI: modified CI job to test editable install +* `#26658 `__: MAINT: Bump pypa/cibuildwheel from 2.18.1 to 2.19.0 +* `#26662 `__: DOC: add CI and NEP commit acronyms +* `#26664 `__: CI: build and upload free-threaded nightly wheels for macOS +* `#26667 `__: BUG: Adds asanyarray to start of linalg.cross +* `#26670 `__: MAINT: Bump github/codeql-action from 3.25.8 to 3.25.9 +* `#26672 `__: CI: upgrade FreeBSD Cirrus job from FreeBSD 13.2 to 14.0 +* `#26675 `__: CI: Use default llvm on Windows. +* `#26676 `__: MAINT: mark evil_global_disable_warn_O4O8_flag as thread-local +* `#26679 `__: DOC: add ``np.linalg`` examples +* `#26680 `__: remove doctesting from refguide-check, add ``spin check-tutorials`` +* `#26684 `__: MAINT: Bump pypa/cibuildwheel from 2.19.0 to 2.19.1 +* `#26685 `__: MAINT: Bump github/codeql-action from 3.25.9 to 3.25.10 +* `#26686 `__: MAINT: Add comment lost in previous PR. +* `#26691 `__: BUILD: check for scipy-doctest, remove it from requirements +* `#26692 `__: DOC: document workaround for deprecation of dim-2 inputs to ``cross`` +* `#26693 `__: BUG: allow replacement in the dispatch cache +* `#26702 `__: DOC: Added missing See Also sections in Polynomial module +* `#26703 `__: BUG: Handle ``--f77flags`` and ``--f90flags`` for ``meson`` +* `#26706 `__: TST: Skip an f2py module test on Windows +* `#26714 `__: MAINT: Update main after 2.0.0 release. +* `#26716 `__: DOC: Add clarifications np.argpartition +* `#26717 `__: DOC: Mention more error paths and try to consolidate import errors +* `#26721 `__: DOC, MAINT: Turn on version warning banner provided by PyData... +* `#26722 `__: DOC: Update roadmap a bit more +* `#26724 `__: ENH: Add Array API 2023.12 version support +* `#26737 `__: DOC: Extend release notes for #26611 +* `#26739 `__: DOC: Update NEPs statuses +* `#26741 `__: DOC: Remove mention of NaN and NAN aliases from constants +* `#26742 `__: DOC: Mention '1.25' legacy printing mode in ``set_printoptions`` +* `#26744 `__: BUG: Fix new DTypes and new string promotion when signature is... +* `#26750 `__: ENH: Add locking to umath_linalg if no lapack is detected at... +* `#26760 `__: TYP: fix incorrect import in ``ma/extras.pyi`` stub +* `#26762 `__: BUG: fix max_rows and chunked string/datetime reading in ``loadtxt`` +* `#26766 `__: ENH: Support integer dtype inputs in rounding functions +* `#26769 `__: BUG: Quantile closest_observation to round to nearest even order +* `#26770 `__: DOC, NEP: Update NEP44 +* `#26771 `__: BUG: fix PyArray_ImportNumPyAPI under -Werror=strict-prototypes +* `#26776 `__: BUG: remove numpy.f2py from excludedimports +* `#26780 `__: MAINT: use an atomic load/store and a mutex to initialize the... +* `#26788 `__: TYP: fix missing ``sys`` import in numeric.pyi +* `#26789 `__: BUG: avoid side-effect of 'include complex.h' +* `#26790 `__: DOC: Update link to Python stdlib random. +* `#26795 `__: BUG: add order to out array of ``numpy.fft`` +* `#26797 `__: BLD: Fix x86-simd-sort build failure on openBSD +* `#26799 `__: MNT: Update dlpack docs and typing stubs +* `#26802 `__: Missing meson pass-through argument +* `#26805 `__: DOC: Update 2.0 migration guide and release note +* `#26808 `__: DOC: Change selected hardlinks to NEPs to intersphinx mappings +* `#26811 `__: DOC: update notes on sign for complex numbers +* `#26812 `__: CI,TST: Fix meson tests needing gfortran [wheel build] +* `#26813 `__: TST: fix 'spin test single_test' for future versions of spin +* `#26814 `__: DOC: Add ``>>> import numpy as np`` stubs everywhere +* `#26815 `__: MAINT: Bump github/codeql-action from 3.25.10 to 3.25.11 +* `#26826 `__: DOC: remove hack to override _add_newdocs_scalars +* `#26827 `__: DOC: AI-Gen examples ctypeslib.as_ctypes_types +* `#26828 `__: DOC: AI generated examples for ma.left_shift. +* `#26829 `__: DOC: AI-Gen examples for ma.put +* `#26830 `__: DOC: AI generated examples for ma.reshape +* `#26831 `__: DOC: AI generated examples for ma.correlate. +* `#26833 `__: MAINT: Bump pypa/cibuildwheel from 2.19.1 to 2.19.2 +* `#26841 `__: BENCH: Missing ufunc in benchmarks +* `#26842 `__: BUILD: clean out py2 stuff from npy_3kcompat.h +* `#26846 `__: MAINT: back printoptions with a true context variable +* `#26847 `__: TYP: fix ``ufunc`` method type annotations +* `#26848 `__: TYP: include the ``|`` prefix for ``dtype`` char codes +* `#26849 `__: BUG: Mismatched allocation domains in ``PyArray_FillWithScalar`` +* `#26858 `__: TYP: Annotate type aliases as ``typing.TypeAlias`` +* `#26866 `__: MAINT: Bump actions/upload-artifact from 4.3.3 to 4.3.4 +* `#26867 `__: TYP,BUG: fix ``numpy.__dir__`` annotations +* `#26871 `__: TYP: adopt ``typing.LiteralString`` and use more of ``typing.Literal`` +* `#26872 `__: TYP: use ``types.CapsuleType`` on python>=3.13 +* `#26873 `__: TYP: improved ``numpy._array_api_info`` typing +* `#26875 `__: TYP,BUG: Replace ``numpy._typing._UnknownType`` with ``typing.Never`` +* `#26877 `__: BUG: start applying ruff/flake8-implicit-str-concat rules (ISC) +* `#26879 `__: MAINT: start applying ruff/flake8-simplify rules (SIM) +* `#26880 `__: DOC: Fix small incorrect markup +* `#26881 `__: DOC, MAINT: fix typos found by codespell +* `#26882 `__: MAINT: start applying ruff/pyupgrade rules (UP) +* `#26883 `__: BUG: Make issctype always return bool. +* `#26884 `__: MAINT: Remove a redundant import from the generated __ufunc_api.h. +* `#26889 `__: API: Add ``device`` and ``to_device`` to scalars +* `#26891 `__: DOC: Add a note that one should free the proto struct +* `#26892 `__: ENH: Allow use of clip with Python integers to always succeed +* `#26894 `__: MAINT: Bump actions/setup-node from 4.0.2 to 4.0.3 +* `#26895 `__: DOC: Change documentation copyright strings to use a dynamic... +* `#26896 `__: DOC: Change NEP hardlinks to intersphinx mappings. +* `#26897 `__: TYP: type hint ``numpy.polynomial`` +* `#26901 `__: BUG: ``np.loadtxt`` return F_CONTIGUOUS ndarray if row size is... +* `#26902 `__: Apply some ruff/flake8-bugbear rules (B004 and B005) +* `#26903 `__: BUG: Fix off-by-one error in amount of characters in strip +* `#26904 `__: BUG,ENH: Fix generic scalar infinite recursion issues +* `#26905 `__: API: Do not consider subclasses for NEP 50 weak promotion +* `#26906 `__: MAINT: Bump actions/setup-python from 5.1.0 to 5.1.1 +* `#26908 `__: ENH: Provide a hook for gufuncs to process core dimensions. +* `#26913 `__: MAINT: declare that NumPy's C extensions support running without... +* `#26914 `__: API: Partially revert unique with return_inverse +* `#26919 `__: BUG,MAINT: Fix utf-8 character stripping memory access +* `#26923 `__: MAINT: Bump actions/dependency-review-action from 4.3.3 to 4.3.4 +* `#26924 `__: MAINT: Bump github/codeql-action from 3.25.11 to 3.25.12 +* `#26927 `__: TYP: Transparent ``__array__`` shape-type +* `#26928 `__: TYP: Covariant ``numpy.flatiter`` type parameter +* `#26929 `__: TYP: Positional-only dunder binop method parameters +* `#26930 `__: BUG: Fix out-of-bound minimum offset for in1d table method +* `#26931 `__: DOC, BUG: Fix running full test command in docstring +* `#26934 `__: MAINT: add PyArray_ZeroContiguousBuffer helper and use it in... +* `#26935 `__: BUG: fix ``f2py`` tests to work with v2 API +* `#26937 `__: TYP,BUG: Remove ``numpy.cast`` and ``numpy.disp`` from the typing... +* `#26938 `__: TYP,BUG: Fix ``dtype`` type alias specialization issue in ``__init__.pyi`` +* `#26942 `__: TYP: Improved ``numpy.generic`` rich comparison operator type... +* `#26943 `__: TYP,BUG: Remove non-existant ``numpy.__git_version__`` in the... +* `#26946 `__: TYP: Add missing typecodes in ``numpy._core.numerictypes.typecodes`` +* `#26950 `__: MAINT: add freethreading_compatible directive to cython build +* `#26953 `__: TYP: Replace ``typing.Union`` with ``|`` in ``numpy._typing`` +* `#26954 `__: TYP: Replace ``typing.Optional[T]`` with ``T | None`` in the... +* `#26964 `__: DOC: Issue template for static typing +* `#26968 `__: MAINT: add a 'tests' install tag to the `numpy._core._simd` extension... +* `#26969 `__: BUG: Fix unicode strip +* `#26972 `__: BUG: Off by one in memory overlap check +* `#26975 `__: TYP: Use ``Final`` and ``LiteralString`` for the constants in... +* `#26980 `__: DOC: add sphinx-copybutton +* `#26981 `__: ENH: add support in f2py to declare gil-disabled support +* `#26983 `__: TYP,BUG: Type annotations for ``numpy.trapezoid`` +* `#26984 `__: TYP,BUG: Fix potentially unresolved typevar in ``median`` and... +* `#26985 `__: BUG: Add object cast to avoid warning with limited API +* `#26989 `__: DOC: fix ctypes example +* `#26991 `__: MAINT: mark scipy-openblas nightly tests as allowed to fail +* `#26992 `__: TYP: Covariant ``numpy.ndenumerate`` type parameter +* `#26993 `__: TYP,BUG: FIx ``numpy.ndenumerate`` annotations for ``object_``... +* `#26996 `__: ENH: Add ``__slots__`` to private (sub-)classes in ``numpy.lib._index_tricks_impl`` +* `#27002 `__: MAINT: Update main after 2.0.1 release. +* `#27008 `__: TYP,BUG: Complete type stubs for ``numpy.dtypes`` +* `#27009 `__: TST, MAINT: Loosen required test precision +* `#27010 `__: DOC: update tutorials link +* `#27011 `__: MAINT: replace PyThread_type_lock with PyMutex on Python >= 3.13.0b3 +* `#27013 `__: BUG: cfuncs.py: fix crash when sys.stderr is not available +* `#27014 `__: BUG: fix gcd inf +* `#27015 `__: DOC: Fix migration note for ``alltrue`` and ``sometrue`` +* `#27017 `__: DOC: Release note for feature added in gh-26908. +* `#27019 `__: TYP: improved ``numpy.array`` type hints for array-like input +* `#27025 `__: DOC: Replace np.matrix in .view() docstring example. +* `#27026 `__: DOC: fix tiny typo +* `#27027 `__: BUG: Fix simd loadable stride logic +* `#27031 `__: DOC: document 'floatmode' and 'legacy' keys from np.get_printoptions'... +* `#27034 `__: BUG: random: Fix edge case of Johnk's algorithm for the beta... +* `#27041 `__: MAINT: Bump github/codeql-action from 3.25.12 to 3.25.14 +* `#27043 `__: CI: unify free-threaded wheel builds with other builds +* `#27046 `__: BUG: random: prevent zipf from hanging when parameter is large. +* `#27047 `__: BUG: use proper input and output descriptor in array_assign_subscript... +* `#27048 `__: BUG: random: Fix long delays/hangs with zipf(a) when a near 1. +* `#27050 `__: BUG: Mirror VQSORT_ENABLED logic in Quicksort +* `#27051 `__: TST: Refactor to consistently use CompilerChecker +* `#27052 `__: TST: fix issues with tests that use numpy.testing.extbuild +* `#27055 `__: MAINT: Bump ossf/scorecard-action from 2.3.3 to 2.4.0 +* `#27056 `__: MAINT: Bump github/codeql-action from 3.25.14 to 3.25.15 +* `#27057 `__: BUG: fix another cast setup in array_assign_subscript +* `#27058 `__: DOC: Add some missing examples for ``np.strings`` methods +* `#27059 `__: ENH: Disable name suggestions on some AttributeErrors +* `#27060 `__: MAINT: linalg: Simplify some linalg gufuncs. +* `#27070 `__: BUG: Bump Highway to latest master +* `#27076 `__: DEP: lib: Deprecate acceptance of float (and more) in bincount. +* `#27079 `__: MAINT: 3.9/10 cleanups +* `#27081 `__: CI: Upgrade ``array-api-tests`` +* `#27085 `__: ENH: fixes for warnings on free-threaded wheel builds +* `#27087 `__: ENH: mark the dragon4 scratch space as thread-local +* `#27090 `__: DOC: update np.shares_memory() docs +* `#27091 `__: API,BUG: Fix copyto (and ufunc) handling of scalar cast safety +* `#27094 `__: DOC: Add release note about deprecation introduced in gh-27076. +* `#27095 `__: DOC: Fix indentation of a few release notes. +* `#27096 `__: BUG: Complex printing tests fail on Windows ARM64 +* `#27097 `__: MAINT: Bump actions/upload-artifact from 4.3.4 to 4.3.5 +* `#27098 `__: BUG: add missing error handling in public_dtype_api.c +* `#27102 `__: DOC: Fixup promotion doc +* `#27104 `__: BUG: Fix building NumPy in FIPS mode +* `#27108 `__: DOC: remove incorrect docstring comment +* `#27110 `__: BLD: cp313 cp313t linux_aarch64 [wheel build] +* `#27112 `__: BUG: Fix repr for integer scalar subclasses +* `#27113 `__: DEV: make linter.py runnable from outside the root of the repo +* `#27114 `__: MAINT: Bump pypa/cibuildwheel from 2.19.2 to 2.20.0 +* `#27115 `__: BUG: Use the new ``npyv_loadable_stride_`` functions for ldexp and... +* `#27117 `__: BUG: Ensure that scalar binops prioritize __array_ufunc__ +* `#27118 `__: BLD: update vendored Meson for cross-compilation patches +* `#27123 `__: BUG: Bump Highway to latest +* `#27124 `__: MAINT: Bump github/codeql-action from 3.25.15 to 3.26.0 +* `#27125 `__: MAINT: Bump actions/upload-artifact from 4.3.5 to 4.3.6 +* `#27127 `__: BUG: Fix missing error return in copyto +* `#27144 `__: MAINT: Scipy openblas 0.3.27.44.4 +* `#27149 `__: BUG: Do not accidentally store dtype metadata in ``np.save`` +* `#27162 `__: BLD: use smaller scipy-openblas builds +* `#27166 `__: ENH: fix thread-unsafe C API usages +* `#27173 `__: MAINT: Bump pythoncapi-compat version. +* `#27176 `__: REL: Prepare for the NumPy 2.1.0rc1 release [wheel build] +* `#27180 `__: DOC: Add release notes for #26897 +* `#27181 `__: DOC: Add release notes for #27008 +* `#27190 `__: BUILD: use a shrunken version of scipy-openblas wheels [wheel... +* `#27193 `__: REV: Revert undef I and document it +* `#27196 `__: BUILD: improve download script +* `#27197 `__: MAINT: update default NPY_FEATURE_VERSION after dropping py39 +* `#27200 `__: DOC: add free-threading release notes +* `#27209 `__: BUG: Fix NPY_RAVEL_AXIS on backwards compatible NumPy 2 builds +* `#27216 `__: TYP: Fixed & improved type hints for ``numpy.histogram2d`` +* `#27217 `__: TYP: Fix incompatible overrides in the ``numpy._typing._ufunc``... +* `#27229 `__: BUG: Fix ``PyArray_ZeroContiguousBuffer`` (resize) with struct... +* `#27233 `__: DOC: add docs on thread safety in NumPy +* `#27234 `__: BUG: Allow fitting of degree zero polynomials with Polynomial.fit diff --git a/doc/changelog/2.1.1-changelog.rst b/doc/changelog/2.1.1-changelog.rst new file mode 100644 index 000000000000..d18636771e1a --- /dev/null +++ b/doc/changelog/2.1.1-changelog.rst @@ -0,0 +1,30 @@ + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Andrew Nelson +* Charles Harris +* Mateusz SokÃŗÅ‚ +* Maximilian Weigand + +* Nathan Goldbaum +* Pieter Eendebak +* Sebastian Berg + +Pull requests merged +==================== + +A total of 10 pull requests were merged for this release. + +* `#27236 `__: REL: Prepare for the NumPy 2.1.0 release [wheel build] +* `#27252 `__: MAINT: prepare 2.1.x for further development +* `#27259 `__: BUG: revert unintended change in the return value of set_printoptions +* `#27266 `__: BUG: fix reference counting bug in __array_interface__ implementationâ€Ļ +* `#27267 `__: TST: Add regression test for missing descr in array-interface +* `#27276 `__: BUG: Fix #27256 and #27257 +* `#27278 `__: BUG: Fix array_equal for numeric and non-numeric scalar types +* `#27287 `__: MAINT: Update maintenance/2.1.x after the 2.0.2 release +* `#27303 `__: BLD: cp311- macosx_arm64 wheels [wheel build] +* `#27304 `__: BUG: f2py: better handle filtering of public/private subroutines diff --git a/doc/changelog/2.1.2-changelog.rst b/doc/changelog/2.1.2-changelog.rst new file mode 100644 index 000000000000..bd0f7bd2422c --- /dev/null +++ b/doc/changelog/2.1.2-changelog.rst @@ -0,0 +1,38 @@ + +Contributors +============ + +A total of 11 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Chris Sidebottom +* Ishan Koradia + +* JoÃŖo Eiras + +* Katie Rust + +* Marten van Kerkwijk +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Pieter Eendebak +* Slava Gorloff + + +Pull requests merged +==================== + +A total of 14 pull requests were merged for this release. + +* `#27333 `__: MAINT: prepare 2.1.x for further development +* `#27400 `__: BUG: apply critical sections around populating the dispatch cache +* `#27406 `__: BUG: Stub out get_build_msvc_version if distutils.msvccompiler... +* `#27416 `__: BUILD: fix missing include for std::ptrdiff_t for C++23 language... +* `#27433 `__: BLD: pin setuptools to avoid breaking numpy.distutils +* `#27437 `__: BUG: Allow unsigned shift argument for np.roll +* `#27439 `__: BUG: Disable SVE VQSort +* `#27471 `__: BUG: rfftn axis bug +* `#27479 `__: BUG: Fix extra decref of PyArray_UInt8DType. +* `#27480 `__: CI: use PyPI not scientific-python-nightly-wheels for CI doc... +* `#27481 `__: MAINT: Check for SVE support on demand +* `#27484 `__: BUG: initialize the promotion state to be weak +* `#27501 `__: MAINT: Bump pypa/cibuildwheel from 2.20.0 to 2.21.2 +* `#27506 `__: BUG: avoid segfault on bad arguments in ndarray.__array_function__ diff --git a/doc/changelog/2.1.3-changelog.rst b/doc/changelog/2.1.3-changelog.rst new file mode 100644 index 000000000000..073bd002e7ca --- /dev/null +++ b/doc/changelog/2.1.3-changelog.rst @@ -0,0 +1,49 @@ + +Contributors +============ + +A total of 15 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Abhishek Kumar + +* Austin + +* Benjamin A. Beasley + +* Charles Harris +* Christian Lorentzen +* Marcel Telka + +* Matti Picus +* Michael Davidsaver + +* Nathan Goldbaum +* Peter Hawkins +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg +* dependabot[bot] +* kp2pml30 + + +Pull requests merged +==================== + +A total of 21 pull requests were merged for this release. + +* `#27512 `__: MAINT: prepare 2.1.x for further development +* `#27537 `__: MAINT: Bump actions/cache from 4.0.2 to 4.1.1 +* `#27538 `__: MAINT: Bump pypa/cibuildwheel from 2.21.2 to 2.21.3 +* `#27539 `__: MAINT: MSVC does not support #warning directive +* `#27543 `__: BUG: Fix user dtype can-cast with python scalar during promotion +* `#27561 `__: DEV: bump ``python`` to 3.12 in environment.yml +* `#27562 `__: BLD: update vendored Meson to 1.5.2 +* `#27563 `__: BUG: weighted quantile for some zero weights (#27549) +* `#27565 `__: MAINT: Use miniforge for macos conda test. +* `#27566 `__: BUILD: satisfy gcc-13 pendantic errors +* `#27569 `__: BUG: handle possible error for PyTraceMallocTrack +* `#27570 `__: BLD: start building Windows free-threaded wheels [wheel build] +* `#27571 `__: BUILD: vendor tempita from Cython +* `#27574 `__: BUG: Fix warning "differs in levels of indirection" in npy_atomic.h... +* `#27592 `__: MAINT: Update Highway to latest +* `#27593 `__: BUG: Adjust numpy.i for SWIG 4.3 compatibility +* `#27616 `__: BUG: Fix Linux QEMU CI workflow +* `#27668 `__: BLD: Do not set __STDC_VERSION__ to zero during build +* `#27669 `__: ENH: fix wasm32 runtime type error in numpy._core +* `#27672 `__: BUG: Fix a reference count leak in npy_find_descr_for_scalar. +* `#27673 `__: BUG: fixes for StringDType/unicode promoters diff --git a/doc/changelog/2.2.0-changelog.rst b/doc/changelog/2.2.0-changelog.rst new file mode 100644 index 000000000000..b82a3d03b4fc --- /dev/null +++ b/doc/changelog/2.2.0-changelog.rst @@ -0,0 +1,437 @@ + +Contributors +============ + +A total of 106 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !Dreamge + +* !bersbersbers + +* !fengluoqiuwu + +* !h-vetinari +* !hutauf + +* !musvaage + +* !nullSoup + +* Aarni Koskela + +* Abhishek Kumar + +* Abraham Medina + +* Aditi Juneja + +* Adrien Corenflos + +* Agriya Khetarpal +* Ajay Kumar Janapareddi +* Akula Guru Datta + +* Amit Subhash Chejara + +* Andrew Nelson +* Anne Gunn +* Austin Ran + +* Ben Walsh +* Benjamin A. Beasley + +* Benoit Prabel + +* Charles Harris +* Chris Fu (傅įĢ‹ä¸š) +* Chris Sidebottom +* Christian Lorentzen +* Christopher Sidebottom +* ClÊment Robert +* Dane Reimers + +* Dimitri Papadopoulos Orfanos +* Evgeni Burovski +* GUAN MING +* Habiba Hye + +* Harry Zhang + +* Hugo van Kemenade +* Ian Harris + +* Isaac Warren + +* Ishan Koradia + +* Ishan Purekar + +* Jake VanderPlas +* Jianyu Wen + +* Johannes Kaisinger +* John Kirkham +* Joren Hammudoglu +* JoÃŖo Eiras + +* KM Khalid Saifullah + +* Karel Planken + +* Katie Rust + +* Khem Raj +* Kira Prokopenko + +* Lars GrÃŧter +* Linus Sommer +* Lucas Colley +* Luiz Eduardo Amaral +* Luke Aarohi + +* Marcel Telka + +* Mark Harfouche +* Marten van Kerkwijk +* Maryanne Wachter + +* Mateusz SokÃŗÅ‚ +* Matt Haberland +* Matthias Diener + +* Matthieu Darbois +* Matti Picus +* Maximilian Weigand + +* Melissa Weber Mendonça +* Michael Davidsaver + +* Nathan Goldbaum +* Nicolas Tessore + +* Nitish Satyavolu + +* Oscar Armas-Luy + +* Peter Hawkins +* Peter Kämpf + +* Pieter Eendebak +* Raghu Rajan + +* Raghuveer Devulapalli +* Ralf Gommers +* Robert Kern +* Rohit Goswami +* Ross Barnowski +* Ryan Teoh + +* Santhana Mikhail Antony S + +* Sayed Adel +* Sebastian Berg +* Sebastian Vittersø + +* Sebin Thomas + +* Serge Panev + +* Shaurya Barkund + +* Shiv Katira + +* Simon Altrogge +* Slava Gorloff + +* Slobodan Miletic + +* Soutrik Bandyopadhyay + +* Stan Ulbrych + +* Stefan van der Walt +* Tim Hoffmann +* Timo RÃļhling +* Tyler Reddy +* Vahid Tavanashad + +* Victor Herdeiro + +* Vijayakumar Z + +* Warren Weckesser +* Xiao Yuan + +* Yashasvi Misra +* bilderbuchi + +* dependabot[bot] + +Pull requests merged +==================== + +A total of 317 pull requests were merged for this release. + +* `#14622 `__: BUG: fix datetime64/timedelta64 hash and match Python +* `#15181 `__: ENH: Add nd-support to trim_zeros +* `#17780 `__: ENH, BLD: Define RISCV-32 support +* `#23547 `__: DOC: Fix a typo in description and add an example of ``numpy.tensordot`` +* `#25984 `__: BUG: Allow fitting of degree zero polynomials with Polynomial.fit +* `#26398 `__: DOC: order of indices returned in tril_indices and triu_indices +* `#26406 `__: DOC: Changed vdot docs as suggested +* `#26570 `__: CI, BLD: Use ``cibuildwheel`` to build WASM NumPy wheels +* `#26642 `__: DOC: Add examples to ``np.char`` +* `#26855 `__: TYP: improved ``numpy.frompyfunc`` type hints +* `#26857 `__: MAINT: Start applying ruff/Pycodestyle rules +* `#26865 `__: TYP: add missing annotations for ``numpy.object_.__new__`` +* `#26941 `__: TYP: Non-distributive ``numpy.generic`` type args. +* `#26944 `__: TYP: Annotate ``numpy._core._type_aliases`` . +* `#26979 `__: TYP: Explicit ``numpy.__all__`` in the stubs +* `#26994 `__: TYP: Typing fixes for ``numpy.iinfo`` & ``numpy.finfo`` +* `#27049 `__: BUG: f2py: better handle filtering of public/private subroutines +* `#27088 `__: WHL: bump (musl) linux image [wheel build] +* `#27100 `__: TYP: Fixed & improved type hints for ``numpy.histogram2d`` +* `#27101 `__: TST, DOC: add doc and test for transpose axes with negative indices +* `#27116 `__: DOC: update NEP 50 draft status to "Final" +* `#27119 `__: ENH: Use ``PyObject_GetOptionalAttr`` +* `#27132 `__: TYP: Assume that ``typing_extensions`` is always available in... +* `#27134 `__: REL: Prepare main for 2.2.0 development +* `#27139 `__: TYP: Fixed & improved ``numpy.dtype.__new__`` +* `#27140 `__: MAINT: Scipy openblas 0.3.27.44.4 +* `#27143 `__: BUG: Do not accidentally store dtype metadata in ``np.save`` +* `#27145 `__: ENH: fix thread-unsafe C API usages +* `#27147 `__: BLD: use smaller scipy-openblas builds +* `#27148 `__: BUG: Raise if histogram cannot create finite bin sizes +* `#27150 `__: TYP: Sane defaults for the platform-specific ``NBitBase`` types. +* `#27152 `__: TYP: Simplified ufunc imports in ``numpy._typing`` +* `#27153 `__: TYP: Fix incompatible overrides in the ``numpy._typing._ufunc``... +* `#27154 `__: TYP: Use ``typing_extensions.Self`` in the ``numpy`` stubs +* `#27156 `__: MAINT: Remove any promotion-state switching logic +* `#27157 `__: TYP: add td64 overload for ``np.mean`` +* `#27158 `__: CI: Re-enable nightly OpenBLAS test runs +* `#27160 `__: DEP: Finalize ``bool(empty_array)`` deprecation +* `#27164 `__: MAINT: use npy_argparse for einsum +* `#27168 `__: DOC: add td64 example in ``np.mean`` +* `#27171 `__: TYP: Shape-typed array constructors: ``numpy.{empty,zeros,ones,full}`` +* `#27177 `__: TYP: 1-d ``numpy.arange`` return shape-type +* `#27178 `__: TYP,TST: Bump mypy to 1.11.1 +* `#27179 `__: TYP: Improved ``numpy.piecewise`` type-hints +* `#27182 `__: REV: Revert undef I and document it +* `#27184 `__: BUILD: update to OpenBLAS 0.3.28 +* `#27187 `__: MAINT: update default NPY_FEATURE_VERSION after dropping py39 +* `#27189 `__: MAINT: improve download script +* `#27202 `__: BUG: Fix NPY_RAVEL_AXIS on backwards compatible NumPy 2 builds +* `#27203 `__: DOC: update PyArray_CheckAxis doc +* `#27207 `__: TYP: Deprecate calling ``numpy.save`` with ``fix_imports`` (PEP... +* `#27208 `__: TYP: Disallow scalars and 0d-arrays in ``numpy.nonzero`` +* `#27210 `__: TYP: Semi-transparent ``numpy.shape`` shape-type annotations. +* `#27211 `__: TYP: Stop using ``Any`` as shape-type default +* `#27215 `__: MAINT: Bump github/codeql-action from 3.26.0 to 3.26.2 +* `#27218 `__: DEV: Add ``.editorconfig`` rules for Python +* `#27219 `__: TYP: Replace ``ellipsis`` with ``types.EllipsisType`` +* `#27220 `__: TYP: Fixed & improved ``TypeVar`` use for ``numpy.char.chararray`` +* `#27221 `__: MAINT: Bump actions/upload-artifact from 4.3.3 to 4.3.6 +* `#27223 `__: DOC: add docs on thread safety in NumPy +* `#27226 `__: BUG: Fix ``PyArray_ZeroContiguousBuffer`` (resize) with struct... +* `#27228 `__: DOC: Remove obsolete note from the top of the 2.0.0 release notes. +* `#27235 `__: MAINT: MSVC does not support #warning directive +* `#27237 `__: TYP: Fix several typing issues in ``numpy.polynomial`` +* `#27238 `__: DOC: update ``np.unique`` docstring +* `#27242 `__: MAINT: Update main after 2.1.0 release. +* `#27246 `__: MAINT: Bump github/codeql-action from 3.26.2 to 3.26.3 +* `#27247 `__: DOC: update documentation release process +* `#27249 `__: BUG: fix reference counting bug in __array_interface__ implementation +* `#27255 `__: BUG: revert unintended change in the return value of set_printoptions +* `#27261 `__: TST: Add regression test for missing descr in array-interface +* `#27262 `__: BUG: Fix #27256 and #27257 +* `#27268 `__: MAINT: Bump github/codeql-action from 3.26.3 to 3.26.4 +* `#27272 `__: ENH: make check-{docs,tutorials} fail on dtype mismatch +* `#27275 `__: BUG: Fix array_equal for numeric and non-numeric scalar types +* `#27277 `__: DOC/DEV/CI: mambaforge -> miniforge +* `#27281 `__: MAINT: Bump github/codeql-action from 3.26.4 to 3.26.5 +* `#27284 `__: BLD: cp311- macosx_arm64 wheels [wheel build] +* `#27286 `__: MAINT: Update main after the 2.0.2 release +* `#27289 `__: MAINT: Start applying ruff rules (RUF) +* `#27290 `__: MAINT: Keep applying ruff/pyupgrade rules (UP) +* `#27291 `__: DOC, MAINT: Fix new typos found by codespell +* `#27292 `__: MAINT: Start applying ruff/flake8-type-checking rules (TCH) +* `#27293 `__: MAINT: Keep applying ruff/flake8-bugbear rules (B) +* `#27294 `__: BUILD: refactor circleci to use spin [skip actions][skip azp][skip... +* `#27295 `__: MAINT: Start applying rruff/flake8-pie rules (PIE) +* `#27296 `__: MAINT: Start applying ruff/flake8-comprehensions rules (C4) +* `#27297 `__: MAINT: Apply ruff/flake8-raise rules (RSE) +* `#27298 `__: MAINT: Apply ruff/flynt rules (FLY) +* `#27302 `__: BUG: Fix bug in ``doc/neps/tools/build_index.py`` +* `#27307 `__: MAINT: Apply ruff/pycodestyle warning rules (W) +* `#27311 `__: MAINT: Bump actions/setup-python from 5.1.1 to 5.2.0 +* `#27312 `__: MAINT: Bump github/codeql-action from 3.26.5 to 3.26.6 +* `#27316 `__: BUILD: update pypy test version +* `#27320 `__: MAINT: increase max line length from 79 to 88, upgrade pycodestyle +* `#27322 `__: DOC: Removed reference to deprecated "newshape" parameter in... +* `#27323 `__: TYP: add ``ma.zeros_like`` and ``ma.ones_like`` typing +* `#27326 `__: MAINT: Bump actions/upload-artifact from 4.3.6 to 4.4.0 +* `#27330 `__: BLD: Win-arm64 cross compile workflow +* `#27331 `__: MAINT: GitHub Actions: Replace deprecated macos-12 with macos-latest +* `#27332 `__: MAINT: Update main after 2.1.1 release. +* `#27334 `__: TYP: Concrete ``float64`` and ``complex128`` scalar types with... +* `#27335 `__: ENH: Add ``allow_pickle`` flag to ``savez`` +* `#27344 `__: MAINT: fix typos +* `#27346 `__: BUG,TYP: Allow subscripting ``iinfo`` and ``finfo`` generic types... +* `#27347 `__: DOC: Mention that c is reassigned but still points to a (quickstart) +* `#27353 `__: MNT, CI: Use separate jobs for WASM wheel builds/uploads +* `#27355 `__: MAINT: Bump actions/setup-python from 5.1.1 to 5.2.0 +* `#27356 `__: MAINT: Bump actions/upload-artifact from 4.3.6 to 4.4.0 +* `#27359 `__: MAINT: fix typo in random.binomial +* `#27360 `__: BUG: fix _shrink edge case in np.ma.mask_or +* `#27361 `__: BUILD: fix missing include for std::ptrdiff_t for C++23 language... +* `#27363 `__: DOC: Remove reshape from appearing twice in toctree +* `#27364 `__: DOC: Update np.\*stack doc to reflect behavior +* `#27365 `__: MAINT: Bump deadsnakes/action from 3.1.0 to 3.2.0 +* `#27369 `__: DOC: fix incorrect definitions +* `#27372 `__: CI: Update cirrus nightly token +* `#27376 `__: MAINT: Fix a few typos - and sometimes improve wording +* `#27381 `__: DOC: add vecdot to 'See also' of np.dot and np.inner +* `#27384 `__: MAINT: Fix a few more typos +* `#27385 `__: DOC: Update np.unique_all example to demonstrate namedtuple output +* `#27387 `__: DOC: Clarify np.searchsorted documentation and add example for... +* `#27390 `__: MAINT: Bump github/codeql-action from 3.26.6 to 3.26.7 +* `#27391 `__: MAINT: Bump pypa/cibuildwheel from 2.20.0 to 2.21.0 +* `#27392 `__: BUG: apply critical sections around populating the dispatch cache +* `#27403 `__: DOC: Fix minor issues in arrays.promotion.rst +* `#27406 `__: BUG: Stub out ``get_build_msvc_version`` if ``distutils.msvccompiler``... +* `#27408 `__: DOC: more informative _excluded_ argument explanation in np.vectorize +* `#27412 `__: MAINT: Bump pypa/cibuildwheel from 2.21.0 to 2.21.1 +* `#27414 `__: MAINT: add Python 3.13 to classifiers +* `#27417 `__: TYP: Allow callable ``converters`` arg in ``numpy.loadtxt`` +* `#27418 `__: TYP: Fix default return dtype of ``numpy.random.Generator.integers``... +* `#27419 `__: TYP: Modernized ``numpy.dtypes`` annotations +* `#27420 `__: TYP: Optional 2nd ``numpy.complexfloating`` type parameter +* `#27421 `__: BUG: Add regression test for gh-27273 +* `#27423 `__: TYP: Add missing type arguments +* `#27424 `__: DOC: Add release notes for #27334 +* `#27425 `__: MAINT: Use correct Python interpreter in tests +* `#27426 `__: MAINT: Bump github/codeql-action from 3.26.7 to 3.26.8 +* `#27427 `__: TYP: Fixed & improved type-hinting for ``any`` and ``all`` +* `#27429 `__: BLD: pin setuptools to avoid breaking numpy.distutils +* `#27430 `__: TYP: Fix type of ``copy`` argument in ``ndarray.reshape`` +* `#27431 `__: BUG: Allow unsigned shift argument for np.roll +* `#27434 `__: ENH: make np.dtype(scalar_type) return the default dtype instance +* `#27438 `__: BUG: Disable SVE VQSort +* `#27440 `__: DOC: Add a link to the migration guide for the deprecation warning... +* `#27441 `__: DOC: remove old versionadded comments from arrays.classes.rst +* `#27442 `__: DOC: Remove old versionchanged directives from config.rst +* `#27443 `__: updated the version of mean param from the release notes (2.0.0) +* `#27444 `__: TST: Added the test case for masked array tofile failing +* `#27445 `__: DOC: removed older versionadded directives to ufuncs.rst +* `#27448 `__: DOC: Example for char.array +* `#27453 `__: DOC: Added docstring for numpy.ma.take() function. +* `#27454 `__: DOC: Remove outdated versionadded/changed directives +* `#27458 `__: MAINT: Bump github/codeql-action from 3.26.8 to 3.26.9 +* `#27464 `__: DOC: Fix a copy-paste mistake in the cumulative_sum docstring. +* `#27465 `__: DOC: update ndindex reference in np.choose docstring +* `#27466 `__: BUG: rfftn axis bug +* `#27469 `__: DOC: Added ``CONTRIBUTING.rst`` +* `#27470 `__: TYP: Add type stubs for stringdtype in np.char and np.strings +* `#27472 `__: MAINT: Check for SVE support on demand +* `#27475 `__: CI: use PyPI not scientific-python-nightly-wheels for CI doc... +* `#27478 `__: BUG: Fix extra decref of PyArray_UInt8DType. +* `#27482 `__: Show shape any time it cannot be inferred in repr +* `#27485 `__: MAINT: Bump github/codeql-action from 3.26.9 to 3.26.10 +* `#27486 `__: MAINT: Bump scientific-python/upload-nightly-action from 0.5.0... +* `#27490 `__: API: register NEP 35 functions as array_functions +* `#27491 `__: MAINT: Bump mamba-org/setup-micromamba from 1.9.0 to 1.10.0 +* `#27495 `__: MAINT: Bump pypa/cibuildwheel from 2.21.1 to 2.21.2 +* `#27496 `__: MAINT: Bump mamba-org/setup-micromamba from 1.10.0 to 2.0.0 +* `#27497 `__: DOC: Correct selected C docstrings to eliminate warnings +* `#27499 `__: DOC: fix missing arguments (copy and device) from asanyarray's... +* `#27502 `__: MAINT: Bump github/codeql-action from 3.26.10 to 3.26.11 +* `#27503 `__: BUG: avoid segfault on bad arguments in ndarray.__array_function__ +* `#27504 `__: ENH: Allow ``ndarray.__array_function__`` to dispatch functions... +* `#27508 `__: MAINT: Pin setuptools for testing [wheel build] +* `#27510 `__: TYP: Mark stub-only classes as ``@type_check_only`` +* `#27511 `__: TYP: Annotate type aliases without annotation +* `#27513 `__: MAINT: Update main after NumPy 2.1.2 release +* `#27517 `__: BENCH: Add benchmarks for np.non_zero +* `#27518 `__: TST: Add tests for np.nonzero with different input types +* `#27520 `__: TYP: Remove unused imports in the stubs +* `#27521 `__: TYP: Fill in the missing ``__all__`` exports +* `#27524 `__: MAINT: Bump actions/cache from 4.0.2 to 4.1.0 +* `#27525 `__: MAINT: Bump actions/upload-artifact from 4.4.0 to 4.4.1 +* `#27526 `__: MAINT: Bump github/codeql-action from 3.26.11 to 3.26.12 +* `#27532 `__: MAINT: Bump actions/cache from 4.1.0 to 4.1.1 +* `#27534 `__: BUG: Fix user dtype can-cast with python scalar during promotion +* `#27535 `__: MAINT: Bump pypa/cibuildwheel from 2.21.2 to 2.21.3 +* `#27536 `__: MAINT: Bump actions/upload-artifact from 4.4.1 to 4.4.3 +* `#27549 `__: BUG: weighted quantile for some zero weights +* `#27550 `__: BLD: update vendored Meson to 1.5.2 +* `#27551 `__: MAINT: Bump github/codeql-action from 3.26.12 to 3.26.13 +* `#27553 `__: BLD: rename ``meson_options.txt`` to ``meson.options`` +* `#27555 `__: DEV: bump ``python`` to 3.12 in environment.yml +* `#27556 `__: DOC: Clarify use of standard deviation in mtrand.pyx +* `#27557 `__: BUG: Fix warning "differs in levels of indirection" in npy_atomic.h... +* `#27558 `__: MAINT: distutils: remove obsolete search for ``ecc`` executable +* `#27560 `__: CI: start building Windows free-threaded wheels +* `#27564 `__: BUILD: satisfy gcc-13 pendantic errors +* `#27567 `__: BUG: handle possible error for PyTraceMallocTrack +* `#27568 `__: BUILD: vendor tempita from Cython +* `#27579 `__: BUG: Adjust numpy.i for SWIG 4.3 compatibility +* `#27586 `__: MAINT: Update Highway to latest +* `#27587 `__: BLD: treat SVML object files better to avoid compiler warnings +* `#27595 `__: DOC: Clarify obj parameter types in numpy.delete documentation +* `#27598 `__: DOC: add examples to ctypeslib +* `#27602 `__: Update documentation for floating-point precision and determinant... +* `#27604 `__: DOC: Fix rendering in docstring of nan_to_num +* `#27612 `__: ENH: Add comments to ``string_fastsearch.h`` , rename some C-methods +* `#27613 `__: BUG: Fix Linux QEMU CI workflow +* `#27615 `__: ENH: Fix np.insert to handle boolean arrays as masks +* `#27617 `__: DOC: Update the RELEASE_WALKTHROUGH.rst file. +* `#27619 `__: MAINT: Bump actions/cache from 4.1.1 to 4.1.2 +* `#27620 `__: MAINT: Bump actions/dependency-review-action from 4.3.4 to 4.3.5 +* `#27621 `__: MAINT: Bump github/codeql-action from 3.26.13 to 3.27.0 +* `#27627 `__: ENH: Re-enable VSX from build targets for sin/cos +* `#27630 `__: ENH: Extern memory management to Cython +* `#27634 `__: MAINT: Bump actions/setup-python from 5.2.0 to 5.3.0 +* `#27636 `__: BUG: fixes for StringDType/unicode promoters +* `#27643 `__: BUG : avoid maximum fill value of datetime and timedelta return... +* `#27644 `__: DOC: Remove ambiguity in docs for ndarray.byteswap() +* `#27650 `__: BLD: Do not set __STDC_VERSION__ to zero during build +* `#27652 `__: TYP,TST: Bump ``mypy`` from ``1.11.1`` to ``1.13.0`` +* `#27653 `__: TYP: Fix Array API method signatures +* `#27659 `__: TYP: Transparent ``ndarray`` unary operator method signatures +* `#27661 `__: BUG: np.cov transpose control +* `#27663 `__: MAINT: fix wasm32 runtime type error in numpy._core +* `#27664 `__: MAINT: Bump actions/dependency-review-action from 4.3.5 to 4.4.0 +* `#27665 `__: ENH: Re-enable VXE from build targets for sin/cos +* `#27666 `__: BUG: Fix a reference count leak in npy_find_descr_for_scalar. +* `#27667 `__: TYP: Allow returning non-array-likes from the ``apply_along_axis``... +* `#27676 `__: CI: Attempt to fix CI on 32 bit linux +* `#27678 `__: DOC: fix incorrect versionadded for np.std +* `#27680 `__: MAINT: fix typo / copy paste error +* `#27681 `__: TYP: Fix some inconsistencies in the scalar methods and properties +* `#27683 `__: TYP: Improve ``np.sum`` and ``np.mean`` return types with given... +* `#27684 `__: DOC: fix spelling of "reality" in ``_nanfunctions_impl.pyi`` +* `#27685 `__: MAINT: Drop useless shebang +* `#27691 `__: TYP: Use ``_typeshed`` to clean up the stubs +* `#27693 `__: MAINT: Update main after 2.1.3 release. +* `#27695 `__: BUG: Fix multiple modules in F2PY and COMMON handling +* `#27702 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.0.4 to 3.1.0 +* `#27705 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.0 to 2.0.1 +* `#27706 `__: DOC: Remove empty notes +* `#27707 `__: CI: Set up free-threaded CI using quansight-labs/setup-python +* `#27708 `__: DOC: Remove version notes +* `#27714 `__: DOC: fix a mistake in the docstring of vector_norm +* `#27715 `__: BUG: fix incorrect output descriptor in fancy indexing +* `#27716 `__: ENH: Make ``__module__`` attribute coherent across API +* `#27721 `__: DOC: fix name of shape parameter kappa of von Mises distribution +* `#27723 `__: BUG: Allow empty memmaps in most situations +* `#27724 `__: MAINT: Bump github/codeql-action from 3.27.0 to 3.27.1 +* `#27728 `__: BUG: Handle ``--lower`` for F2PY directives and callbacks +* `#27729 `__: BUG: f2py: fix issues with thread-local storage define +* `#27730 `__: TST: Add an F2PY check for exposing variables without functions +* `#27731 `__: BUG: Fix ``fortranname`` for functions +* `#27734 `__: Fix documentation for the chi-square distribution +* `#27735 `__: ENH: Add a ``__dict__`` to ufunc objects and allow overriding... +* `#27736 `__: TYP: Optional ``numpy.number`` type parameters +* `#27742 `__: MAINT: Bump github/codeql-action from 3.27.1 to 3.27.2 +* `#27743 `__: DOC: Fix typos in subclassing documentation +* `#27746 `__: DOC: Added additional guidance for compiling in Windows +* `#27750 `__: TYP: Fix ``ndarray.item()`` and improve ``ndarray.tolist()`` +* `#27753 `__: TYP: Fix the annotations of ``ndarray.real`` and ``ndarray.imag`` +* `#27754 `__: MAINT: Bump github/codeql-action from 3.27.2 to 3.27.3 +* `#27755 `__: TYP: Annotate ``__setitem__`` , ``__contains__`` and ``__iter__``... +* `#27756 `__: TYP: 1-d shape-typing for ``ndarray.flatten`` and ``ravel`` +* `#27757 `__: TYP: Remove the non-existent ``bitwise_count`` methods of ``ndarray``... +* `#27758 `__: TYP: Remove ``ndarray`` binop overloads for ``NDArray[Never]`` +* `#27763 `__: DOC: Note that allow-pickle is not safe also in error +* `#27765 `__: TYP: Shape-typed ``ndarray`` inplace binary operator methods. +* `#27766 `__: MAINT: Bump github/codeql-action from 3.27.3 to 3.27.4 +* `#27767 `__: TYP: Support shape-typing in ``reshape`` and ``resize`` +* `#27769 `__: TYP: Towards a less messy ``__init__.pyi`` +* `#27770 `__: TYP: Fix incorrect baseclass of ``linalg.LinAlgError`` +* `#27771 `__: ENH: ``default_rng`` coerces ``RandomState`` to ``Generator`` +* `#27773 `__: BUG: Fix repeat, accumulate for strings and accumulate API logic +* `#27775 `__: TYP: Fix undefined type-parameter name +* `#27776 `__: TYP: Fix method overload issues in ``ndarray`` and ``generic`` +* `#27778 `__: TYP: Generic ``numpy.generic`` type parameter for the ``item()``... +* `#27779 `__: TYP: Type hints for ``numpy.__config__`` +* `#27788 `__: DOC: Make wording in absolute beginners guide more beginner friendly +* `#27790 `__: TYP: Generic ``timedelta64`` and ``datetime64`` scalar types +* `#27792 `__: TYP: Generic ``numpy.bool`` and statically typed boolean logic +* `#27794 `__: MAINT: Upgrade to spin 0.13 +* `#27795 `__: update pythoncapi-compat to latest HEAD +* `#27800 `__: BUG: Ensure context path is taken in masked array array-wrap +* `#27802 `__: BUG: Ensure that same-kind casting works for uints (mostly) +* `#27803 `__: MAINT: Bump github/codeql-action from 3.27.4 to 3.27.5 +* `#27806 `__: DOC: Improve choice() documentation about return types +* `#27807 `__: BUG,ENH: Fix internal ``__array_wrap__`` for direct calls +* `#27808 `__: ENH: Ensure hugepages are also indicated for calloc allocations +* `#27809 `__: BUG: Fix array flags propagation in boolean indexing +* `#27810 `__: MAINT: Bump actions/dependency-review-action from 4.4.0 to 4.5.0 +* `#27812 `__: BUG: ``timedelta64.__[r]divmod__`` segfaults for incompatible... +* `#27813 `__: DOC: fix broken reference in arrays.classes.rst +* `#27815 `__: DOC: Add a release fragment for gh-14622 +* `#27816 `__: MAINT: Fixup that spin can be installed via conda too now +* `#27817 `__: DEV: changelog: make title processing more robust +* `#27828 `__: CI: skip ninja installation in linux_qemu workflows +* `#27829 `__: CI: update circleci to python3.11.10, limit parallel builds.... +* `#27831 `__: BUG: Fix mismatch in definition and declaration for a couple... +* `#27843 `__: DOC: Correct version-added for mean arg for nanvar and nanstd +* `#27845 `__: BUG: Never negate strides in reductions (for now) +* `#27846 `__: ENH: add matvec and vecmat gufuncs +* `#27852 `__: DOC: Correct versionadded for vecmat and matvec. +* `#27853 `__: REL: Prepare for the NumPy 2.2.0rc1 release [wheel build] +* `#27874 `__: BUG: fix importing numpy in Python's optimized mode (#27868) +* `#27895 `__: DOC: Fix double import in docs (#27878) +* `#27904 `__: MAINT: Ensure correct handling for very large unicode strings +* `#27906 `__: MAINT: Use mask_store instead of store for compiler workaround +* `#27908 `__: MAINT: Update highway from main. +* `#27911 `__: ENH: update __module__ in numpy.random module +* `#27912 `__: ENH: Refactor ``__qualname__`` across API +* `#27913 `__: PERF: improve multithreaded ufunc scaling +* `#27916 `__: MAINT: Bump actions/cache from 4.1.2 to 4.2.0 + diff --git a/doc/changelog/2.2.1-changelog.rst b/doc/changelog/2.2.1-changelog.rst new file mode 100644 index 000000000000..ba3c4f19eb3f --- /dev/null +++ b/doc/changelog/2.2.1-changelog.rst @@ -0,0 +1,34 @@ + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Joren Hammudoglu +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Simon Altrogge +* Thomas A Caswell +* Warren Weckesser +* Yang Wang + + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#27935 `__: MAINT: Prepare 2.2.x for further development +* `#27950 `__: TEST: cleanups [skip cirrus][skip azp] +* `#27958 `__: BUG: fix use-after-free error in npy_hashtable.cpp (#27955) +* `#27959 `__: BLD: add missing include +* `#27982 `__: BUG:fix compile error libatomic link test to meson.build +* `#27990 `__: TYP: Fix falsely rejected value types in ``ndarray.__setitem__`` +* `#27991 `__: MAINT: Don't wrap ``#include `` with ``extern "C"`` +* `#27993 `__: BUG: Fix segfault in stringdtype lexsort +* `#28006 `__: MAINT: random: Tweak module code in mtrand.pyx to fix a Cython... +* `#28007 `__: BUG: Cython API was missing NPY_UINTP. +* `#28021 `__: CI: pin scipy-doctest to 1.5.1 +* `#28044 `__: TYP: allow ``None`` in operand sequence of nditer diff --git a/doc/changelog/2.2.2-changelog.rst b/doc/changelog/2.2.2-changelog.rst new file mode 100644 index 000000000000..ac856c97174c --- /dev/null +++ b/doc/changelog/2.2.2-changelog.rst @@ -0,0 +1,37 @@ + +Contributors +============ + +A total of 8 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Alicia Boya García + +* Charles Harris +* Joren Hammudoglu +* Kai Germaschewski + +* Nathan Goldbaum +* PTUsumit + +* Rohit Goswami +* Sebastian Berg + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#28050 `__: MAINT: Prepare 2.2.x for further development +* `#28055 `__: TYP: fix ``void`` arrays not accepting ``str`` keys in ``__setitem__`` +* `#28066 `__: TYP: fix unnecessarily broad ``integer`` binop return types (#28065) +* `#28112 `__: TYP: Better ``ndarray`` binop return types for ``float64`` &... +* `#28113 `__: TYP: Return the correct ``bool`` from ``issubdtype`` +* `#28114 `__: TYP: Always accept ``date[time]`` in the ``datetime64`` constructor +* `#28120 `__: BUG: Fix auxdata initialization in ufunc slow path +* `#28131 `__: BUG: move reduction initialization to ufunc initialization +* `#28132 `__: TYP: Fix ``interp`` to accept and return scalars +* `#28137 `__: BUG: call PyType_Ready in f2py to avoid data races +* `#28145 `__: BUG: remove unnecessary call to PyArray_UpdateFlags +* `#28160 `__: BUG: Avoid data race in PyArray_CheckFromAny_int +* `#28175 `__: BUG: Fix f2py directives and --lower casing +* `#28176 `__: TYP: Fix overlapping overloads issue in 2->1 ufuncs +* `#28177 `__: TYP: preserve shape-type in ndarray.astype() +* `#28178 `__: TYP: Fix missing and spurious top-level exports diff --git a/doc/changelog/2.2.3-changelog.rst b/doc/changelog/2.2.3-changelog.rst new file mode 100644 index 000000000000..2cb6e99eec51 --- /dev/null +++ b/doc/changelog/2.2.3-changelog.rst @@ -0,0 +1,43 @@ + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !amotzop +* Charles Harris +* Chris Sidebottom +* Joren Hammudoglu +* Matthew Brett +* Nathan Goldbaum +* Raghuveer Devulapalli +* Sebastian Berg +* Yakov Danishevsky + + +Pull requests merged +==================== + +A total of 21 pull requests were merged for this release. + +* `#28185 `__: MAINT: Prepare 2.2.x for further development +* `#28201 `__: BUG: fix data race in a more minimal way on stable branch +* `#28208 `__: BUG: Fix ``from_float_positional`` errors for huge pads +* `#28209 `__: BUG: fix data race in np.repeat +* `#28212 `__: MAINT: Use VQSORT_COMPILER_COMPATIBLE to determine if we should... +* `#28224 `__: MAINT: update highway to latest +* `#28236 `__: BUG: Add cpp atomic support (#28234) +* `#28237 `__: BLD: Compile fix for clang-cl on WoA +* `#28243 `__: TYP: Avoid upcasting ``float64`` in the set-ops +* `#28249 `__: BLD: better fix for clang / ARM compiles +* `#28266 `__: TYP: Fix ``timedelta64.__divmod__`` and ``timedelta64.__mod__``... +* `#28274 `__: TYP: Fixed missing typing information of set_printoptions +* `#28278 `__: BUG: backport resource cleanup bugfix from gh-28273 +* `#28282 `__: BUG: fix incorrect bytes to stringdtype coercion +* `#28283 `__: TYP: Fix scalar constructors +* `#28284 `__: TYP: stub ``numpy.matlib`` +* `#28285 `__: TYP: stub the missing ``numpy.testing`` modules +* `#28286 `__: CI: Fix the github label for ``TYP:`` PR's and issues +* `#28305 `__: TYP: Backport typing updates from main +* `#28321 `__: BUG: fix race initializing legacy dtype casts +* `#28324 `__: CI: update test_moderately_small_alpha diff --git a/doc/changelog/2.2.4-changelog.rst b/doc/changelog/2.2.4-changelog.rst new file mode 100644 index 000000000000..1e2664ebde48 --- /dev/null +++ b/doc/changelog/2.2.4-changelog.rst @@ -0,0 +1,45 @@ + +Contributors +============ + +A total of 15 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Abhishek Kumar +* Andrej Zhilenkov +* Andrew Nelson +* Charles Harris +* Giovanni Del Monte +* Guan Ming(Wesley) Chiu + +* Jonathan Albrecht + +* Joren Hammudoglu +* Mark Harfouche +* Matthieu Darbois +* Nathan Goldbaum +* Pieter Eendebak +* Sebastian Berg +* Tyler Reddy +* lvllvl + + +Pull requests merged +==================== + +A total of 17 pull requests were merged for this release. + +* `#28333 `__: MAINT: Prepare 2.2.x for further development. +* `#28348 `__: TYP: fix positional- and keyword-only params in astype, cross... +* `#28377 `__: MAINT: Update FreeBSD version and fix test failure +* `#28379 `__: BUG: numpy.loadtxt reads only 50000 lines when skip_rows >= max_rows +* `#28385 `__: BUG: Make np.nonzero threading safe +* `#28420 `__: BUG: safer bincount casting (backport to 2.2.x) +* `#28422 `__: BUG: Fix building on s390x with clang +* `#28423 `__: CI: use QEMU 9.2.2 for Linux Qemu tests +* `#28424 `__: BUG: skip legacy dtype multithreaded test on 32 bit runners +* `#28435 `__: BUG: Fix searchsorted and CheckFromAny byte-swapping logic +* `#28449 `__: BUG: sanity check ``__array_interface__`` number of dimensions +* `#28510 `__: MAINT: Hide decorator from pytest traceback +* `#28512 `__: TYP: Typing fixes backported from #28452, #28491, #28494 +* `#28521 `__: TYP: Backport fixes from #28505, #28506, #28508, and #28511 +* `#28533 `__: TYP: Backport typing fixes from main (2) +* `#28534 `__: TYP: Backport typing fixes from main (3) +* `#28542 `__: TYP: Backport typing fixes from main (4) diff --git a/doc/changelog/2.2.5-changelog.rst b/doc/changelog/2.2.5-changelog.rst new file mode 100644 index 000000000000..409c243d148e --- /dev/null +++ b/doc/changelog/2.2.5-changelog.rst @@ -0,0 +1,39 @@ + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Joren Hammudoglu +* Baskar Gopinath + +* Nathan Goldbaum +* Nicholas Christensen + +* Sayed Adel +* karl + + +Pull requests merged +==================== + +A total of 19 pull requests were merged for this release. + +* `#28545 `__: MAINT: Prepare 2.2.x for further development +* `#28582 `__: BUG: Fix return type of NpyIter_GetIterNext in Cython declarations +* `#28583 `__: BUG: avoid deadlocks with C++ shared mutex in dispatch cache +* `#28585 `__: TYP: fix typing errors in ``_core.strings`` +* `#28631 `__: MAINT, CI: Update Ubuntu to 22.04 in azure-pipelines +* `#28632 `__: BUG: Set writeable flag for writeable dlpacks. +* `#28633 `__: BUG: Fix crackfortran parsing error when a division occurs within... +* `#28650 `__: TYP: fix ``ndarray.tolist()`` and ``.item()`` for unknown dtype +* `#28654 `__: BUG: fix deepcopying StringDType arrays (#28643) +* `#28661 `__: TYP: Accept objects that ``write()`` to ``str`` in ``savetxt`` +* `#28663 `__: CI: Replace QEMU armhf with native (32-bit compatibility mode) +* `#28682 `__: SIMD: Resolve Highway QSort symbol linking error on aarch32/ASIMD +* `#28683 `__: TYP: add missing ``"b1"`` literals for ``dtype[bool]`` +* `#28705 `__: TYP: Fix false rejection of ``NDArray[object_].__abs__()`` +* `#28706 `__: TYP: Fix inconsistent ``NDArray[float64].__[r]truediv__`` return... +* `#28723 `__: TYP: fix string-like ``ndarray`` rich comparison operators +* `#28758 `__: TYP: some ``[arg]partition`` fixes +* `#28772 `__: TYP: fix incorrect ``random.Generator.integers`` return type +* `#28774 `__: TYP: fix ``count_nonzero`` signature diff --git a/doc/changelog/2.2.6-changelog.rst b/doc/changelog/2.2.6-changelog.rst new file mode 100644 index 000000000000..16c62da4a927 --- /dev/null +++ b/doc/changelog/2.2.6-changelog.rst @@ -0,0 +1,32 @@ + +Contributors +============ + +A total of 8 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Ilhan Polat +* Joren Hammudoglu +* Marco Gorelli + +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Sayed Adel + +Pull requests merged +==================== + +A total of 11 pull requests were merged for this release. + +* `#28778 `__: MAINT: Prepare 2.2.x for further development +* `#28851 `__: BLD: Update vendor-meson to fix module_feature conflicts arguments... +* `#28852 `__: BUG: fix heap buffer overflow in np.strings.find +* `#28853 `__: TYP: fix ``NDArray[floating] + float`` return type +* `#28864 `__: BUG: fix stringdtype singleton thread safety +* `#28865 `__: MAINT: use OpenBLAS 0.3.29 +* `#28889 `__: MAINT: from_dlpack thread safety fixes (#28883) +* `#28913 `__: TYP: Fix non-existent ``CanIndex`` annotation in ``ndarray.setfield`` +* `#28915 `__: MAINT: Avoid dereferencing/strict aliasing warnings +* `#28916 `__: BUG: Fix missing check for PyErr_Occurred() in _pyarray_correlate. +* `#28966 `__: TYP: reject complex scalar types in ndarray.__ifloordiv__ diff --git a/doc/changelog/2.3.0-changelog.rst b/doc/changelog/2.3.0-changelog.rst new file mode 100644 index 000000000000..7ca672ba8dbf --- /dev/null +++ b/doc/changelog/2.3.0-changelog.rst @@ -0,0 +1,704 @@ + +Contributors +============ + +A total of 134 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* !EarlMilktea + +* !amotzop + +* !fengluoqiuwu +* !h-vetinari +* !karl3wm + +* !partev +* !samir539 + +* !wenlong2 + +* Aarni Koskela + +* Abdu Zoghbi + +* Abhishek Kumar +* Adam J. Stewart +* Aditi Juneja +* Adrin Jalali +* Agriya Khetarpal +* Alicia Boya García + +* Andrej Zhilenkov +* Andrew Nelson +* Angus Gibson + +* Antonio Rech Santos + +* Ari Gato + +* Arnaud Tremblay + +* Arvid Bessen + +* Baskar Gopinath + +* Carlos Martin + +* Charles Harris +* Chris Navarro + +* Chris Sidebottom +* Christian Lorentzen +* Christine P. Chai + +* Christopher Sidebottom +* ClÊment Robert +* Colin Gilgenbach + +* Craig Peters + +* CÊdric Hannotier +* Daniel Hrisca +* Derek Homeier +* Diego Baldassar + +* Dimitri Papadopoulos Orfanos +* Eoghan O'Connell + +* Eric Larson +* Ernst Peng + +* Evgeni Burovski +* Filipe Laíns +* François Rozet + +* François de Coatpont + +* GUAN MING +* Giovanni Del Monte +* Guan Ming(Wesley) Chiu + +* Guido Imperiale + +* Gyeongjae Choi + +* Halle Loveday + +* Hannah Wheeler + +* Hao Chen + +* Harmen Stoppels + +* Hin-Tak Leung + +* Ian DesJardin + +* Ihar Hrachyshka + +* Ilhan Polat +* Inessa Pawson +* J. Steven Dodge + +* Jake VanderPlas +* Jiachen An + +* Jiuding Tan (谭九éŧŽ) +* Joe Rickerby + +* John Kirkham +* John Stilley + +* Jonathan Albrecht + +* Joren Hammudoglu +* Kai Germaschewski + +* Krishna Bindumadhavan + +* Lucas Colley +* Luka Krmpotić + +* Lysandros Nikolaou +* Maanas Arora +* Makima C. Yang + +* Marco Barbosa + +* Marco Edward Gorelli + +* Mark Harfouche +* Marten van Kerkwijk +* Mateusz SokÃŗÅ‚ +* Matt Haberland +* Matthew Brett +* Matthew Goldsberry + +* Matthew Sterrett +* Matthias Diener +* Matthieu Darbois +* Matti Picus +* Melissa Weber Mendonça +* Michael Siebert +* Mike O'Brien + +* Mohammed Abdul Rahman + +* Mugundan Selvanayagam + +* Musharaf Aijaz Baba + +* Musharraffaijaz + +* Nathan Goldbaum +* Nicholas Christensen + +* Nitish Satyavolu + +* Omid Rajaei +* PTUsumit + +* Peter Hawkins +* Peyton Murray +* Pieter Eendebak +* Raghuveer Devulapalli +* Ralf Gommers +* Rob Timpe + +* Robert Kern +* Rohit Goswami +* Ross Barnowski +* Roy Smart +* Saransh Chopra +* Saraswathy Kalaiselvan + +* Sayed Adel +* Sebastian Berg +* Shantanu Jain + +* Shashwat Pandey + +* Shi Entong + +* Simon Altrogge +* Stan Ulbrych +* Thomas A Caswell +* ThÊotime Grohens + +* Tyler Reddy +* WANG Xuerui + +* WEN Hao + +* Wang Yang (杨æ—ē) + +* Warren Weckesser +* Warrick Ball +* William Andrea +* Yakov Danishevsky + +* Yichi Zhang + +* Yuvraj Pradhan + +* dependabot[bot] +* hfloveday12 + + +Pull requests merged +==================== + +A total of 556 pull requests were merged for this release. + +* `#22718 `__: DOC: Add docs on using GitHub Codespaces for NumPy development +* `#25675 `__: ENH: add matvec and vecmat gufuncs +* `#25934 `__: ENH: Convert tanh from C universal intrinsics to C++ using Highway +* `#25991 `__: ENH: Optimize polyutils as_series +* `#26018 `__: ENH add hash based unique +* `#26745 `__: ENH, DOC: Add support for interactive examples for NumPy with... +* `#26958 `__: BUG: index overlap copy +* `#27288 `__: BUG: Scalar array comparison should return np.bool +* `#27300 `__: CI: pycodestyle → ruff +* `#27309 `__: MNT: Enforce ruff/Pyflakes rules (F) +* `#27324 `__: DOC: Removing module name from by-topic docs +* `#27343 `__: ENH: Add support for flat indexing on flat iterator +* `#27404 `__: DOC: document type promotion with Python types +* `#27522 `__: ENH: Cleanup npy_find_array_wrap +* `#27523 `__: ENH: Improve performance of np.count_nonzero for float arrays +* `#27648 `__: MAINT: Fix the code style to our C-Style-Guide +* `#27738 `__: DEP: testing: disable deprecated use of keywords x/y +* `#27784 `__: BUG: ``sinc``\ : fix underflow for float16 +* `#27789 `__: ENH: Implement np.strings.slice as a gufunc +* `#27819 `__: CI: add windows free-threaded CI +* `#27823 `__: BEG, MAINT: Begin NumPy 2.3.0 development. +* `#27824 `__: BUG: Fix mismatch in definition and declaration for a couple... +* `#27826 `__: CI: update circleci to python3.11.10, limit parallel builds. +* `#27827 `__: CI: skip ninja installation in linux_qemu workflows +* `#27830 `__: ENH: speedup evaluation of numpy.polynomial.legendre.legval. +* `#27839 `__: DOC: Correct version-added for mean arg for nanvar and nanstd +* `#27841 `__: BUG: Never negate strides in reductions (for now) +* `#27847 `__: MAINT: Bump pypa/cibuildwheel from 2.21.3 to 2.22.0 +* `#27848 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.1 to 2.0.2 +* `#27850 `__: DOC: Correct versionadded for vecmat and matvec. +* `#27854 `__: MAINT: Use mask_store instead of store for compiler bug workaround +* `#27856 `__: SIMD: add lsx optimization for loongarch, and add Qemu tests +* `#27858 `__: DOC: Fix typo +* `#27860 `__: MAINT: Add helper for static or heap allocated scratch space +* `#27862 `__: MAINT: Drop Python 3.10 support. +* `#27864 `__: ENH: stack custom multiarray import exception with the original... +* `#27868 `__: BUG: fix importing numpy in Python's optimized mode +* `#27869 `__: TYP: Fix ``np.interp`` signature for scalar types +* `#27875 `__: MAINT: Ensure correct handling for very large unicode strings +* `#27877 `__: ENH: Refactor ``__qualname__`` across API +* `#27878 `__: DOC: Fix double import in docs +* `#27879 `__: DEV: Add venv files to .gitignore +* `#27883 `__: MAINT,ENH: Reorganize buffered iteration setup +* `#27884 `__: ENH: Remove unnecessary list collection +* `#27886 `__: MAINT: Move uint aligned check to actual transfer function setup +* `#27887 `__: MAINT: A few other small nditer fixes +* `#27896 `__: PERF: improve multithreaded ufunc scaling +* `#27897 `__: MAINT: Bump github/codeql-action from 3.27.5 to 3.27.6 +* `#27898 `__: MAINT: Remove ``25675.new_feature.rst`` snippet. +* `#27899 `__: TST: add timeouts for github actions tests and wheel builds. +* `#27901 `__: MAINT: simplify power fast path logic +* `#27910 `__: MAINT: Make qualname tests more specific and fix code where needed +* `#27914 `__: DOC: Remove 27896-snippet. +* `#27915 `__: MAINT: Bump actions/cache from 4.1.2 to 4.2.0 +* `#27917 `__: CI: Use hashes in specifying some actions. +* `#27920 `__: DOC: Fix invalid URL in the index.rst file. +* `#27921 `__: MAINT: Bump actions/checkout from 4.1.1 to 4.2.2 +* `#27922 `__: MAINT: Move user pointers out of axisdata and simplify iternext +* `#27923 `__: ENH: Add cython wrappers for NpyString API +* `#27927 `__: DOC: Use internal/intersphinx links for neps. +* `#27930 `__: MAINT: Fix cirrus MacOs wheel builds [wheel build] +* `#27931 `__: CI: audit with zizmor +* `#27933 `__: BUG: fix building numpy on musl s390x +* `#27936 `__: MAINT: Update main after 2.2.0 release. +* `#27940 `__: BUG: Fix potential inconsistent behaviour for high-demnsional... +* `#27943 `__: TEST: cleanups +* `#27947 `__: BUG:fix compile error libatomic link test to meson.build +* `#27955 `__: BUG: fix use-after-free error in npy_hashtable.cpp +* `#27956 `__: BLD: add missing include to fix build with freethreading +* `#27962 `__: MAINT: Bump github/codeql-action from 3.27.6 to 3.27.7 +* `#27963 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.2 to 2.0.3 +* `#27967 `__: TYP: Fix falsely rejected value types in ``ndarray.__setitem__`` +* `#27973 `__: MAINT: Apply assorted ruff/flake8-pie rules (PIE) +* `#27974 `__: MAINT: Apply ruff/flake8-implicit-str-concat rules (ISC) +* `#27975 `__: MAINT: Apply ruff/flake8-comprehensions rules (C4) +* `#27976 `__: MAINT: Apply assorted ruff/flake8-pyi rules (PYI) +* `#27978 `__: MAINT: Apply assorted ruff/flake8-simplify rules (SIM) +* `#27981 `__: DOC: Document abi3 compat +* `#27992 `__: BUG: Fix segfault in stringdtype lexsort +* `#27996 `__: MAINT: Bump github/codeql-action from 3.27.7 to 3.27.9 +* `#27997 `__: MAINT: Remove unnecessary (and not safe in free-threaded) 1-D... +* `#27998 `__: API,MAINT: Make ``NpyIter_GetTransferFlags`` public and avoid... +* `#27999 `__: DOC, MAINT: Fix typos found by codespell +* `#28001 `__: DOC: Fix documentation for np.dtype.kind to include 'T' for StringDType +* `#28003 `__: MAINT: random: Tweak module code in mtrand.pyx to fix a Cython... +* `#28004 `__: DOC: fix several doctests in dtype method docstrings +* `#28005 `__: BUG: Cython API was missing NPY_UINTP. +* `#28008 `__: BUG: Fix handling of matrix class in np.unique. +* `#28009 `__: TST: lib: Test average with object dtype and no weights. +* `#28013 `__: DOC: Fixed typos in development_advanced_debugging.rst +* `#28015 `__: MAINT: run ruff from the repository root +* `#28020 `__: CI: pin scipy-doctest to 1.5.1 +* `#28022 `__: MAINT: Add all submodules to ruff exclusion list. +* `#28023 `__: DOC: update to scipy-doctest 1.6.0 and fix tests +* `#28029 `__: MAINT: Bump actions/upload-artifact from 4.4.3 to 4.5.0 +* `#28032 `__: BUG,MAINT: Fix size bug in new alloc helper and use it in one... +* `#28033 `__: MAINT: Use userpointers to avoid NPY_MAXARGS in iternext() +* `#28035 `__: MAINT: Move ``lib.format`` and ``ctypeslib`` to submodules/private... +* `#28036 `__: Replace Twitter with X +* `#28039 `__: TYP: allow ``None`` in operand sequence of nditer +* `#28043 `__: BUG: Ensure einsum uses chunking (now that nditer doesn't) +* `#28051 `__: MAINT: Update main after 2.2.1 release. +* `#28053 `__: TYP: fix ``void`` arrays not accepting ``str`` keys in ``__setitem__`` +* `#28056 `__: BUG: Fix f2py directives and ``--lower`` casing +* `#28058 `__: MAINT: Update ``spin lint`` command +* `#28060 `__: CI: fix check that GIL remains disabled for free-threaded wheels +* `#28065 `__: TYP: fix unnecessarily broad ``integer`` binop return types +* `#28069 `__: MAINT: update NEP 29 +* `#28073 `__: TYP: use mypy_primer to surface type checking regressions +* `#28074 `__: DOC: clarify np.gradient varargs requirement for axis parameter +* `#28075 `__: MAINT: Replace usage of fixed strides with inner strides in einsum +* `#28080 `__: ENH: Allow an arbitrary number of operands in nditer +* `#28081 `__: DOC: Add release snippets for iteration changes +* `#28083 `__: MAINT: Update LICENSE Copyright to 2025 +* `#28088 `__: BUG: update check for highway compiler support +* `#28089 `__: MAINT: bump ``mypy`` to ``1.14.1`` +* `#28090 `__: DOC:Fixed docstring with example use of np.select +* `#28091 `__: MAINT: Refactor stringdtype casts.c to use cpp templates +* `#28092 `__: MAINT: LoongArch: switch away from the __loongarch64 preprocessor... +* `#28094 `__: DOC: Fix documentation example for numpy.ma.masked +* `#28100 `__: DOC: Move linalg.outer from Decompositions to Matrix and vector... +* `#28101 `__: DOC: Fix sphinx markup in source/reference/random/extending.rst +* `#28102 `__: MAINT: update oldest supported GCC version from 8.4 to 9.3 +* `#28103 `__: MAINT: random: Call np.import_array() in _examples/cython/extending_distribution... +* `#28105 `__: ENH: support no-copy pickling for any array that can be transposed... +* `#28108 `__: TYP: Better ``ndarray`` binop return types for ``float64`` &... +* `#28109 `__: TYP: Fix the incorrect ``bool`` return type of ``issubdtype`` +* `#28110 `__: TYP: Always accept ``date[time]`` in the ``datetime64`` constructor +* `#28116 `__: MAINT: random: Explicitly cast RAND_INT_MAX to double to avoid... +* `#28118 `__: BUG: Fix auxdata initialization in ufunc slow path +* `#28121 `__: MAINT: Correct NumPy 2.3 C-API versioning and version information +* `#28123 `__: BUG: move reduction initialization to ufunc initialization +* `#28127 `__: DOC: Improve slice docstrings +* `#28128 `__: BUG: Don't use C99 construct in import_array +* `#28129 `__: DEP: Deprecate ``numpy.typing.mypy_plugin`` +* `#28130 `__: CI: Fix mypy_primer comment workflow +* `#28133 `__: BUG: call PyType_Ready in f2py to avoid data races +* `#28134 `__: DEP: Deprecate ``numpy.typing.mypy_plugin``\ : The sequel +* `#28141 `__: DOC: Add instructions to build NumPy on WoA +* `#28142 `__: ENH: inline UTF-8 byte counter and make it branchless +* `#28144 `__: BUG: remove unnecessary call to PyArray_UpdateFlags +* `#28148 `__: MAINT: Replace usage of outdated fixed strides with inner strides... +* `#28149 `__: BUG: Fix ``from_float_positional`` errors for huge pads +* `#28154 `__: BUG: Avoid data race in PyArray_CheckFromAny_int +* `#28161 `__: DOC: Clarify ``np.loadtxt`` encoding argument default value in... +* `#28163 `__: MAINT: Avoid a redundant copy on ``a[...] = b`` +* `#28167 `__: DOC: fix formatting typo in basics.copies.rst +* `#28168 `__: TYP: Fix overlapping overloads issue in "2 in, 1 out" ufuncs +* `#28169 `__: TYP: preserve shape-type in ``ndarray.astype()`` +* `#28170 `__: TYP: Fix missing and spurious top-level exports +* `#28172 `__: BUG: Include Python-including headers first +* `#28179 `__: DOC: Remove duplicate wishlist tab in NEPs. +* `#28180 `__: DOC: Update links in HOWTO_RELEASE.rst +* `#28181 `__: CI: replace quansight-labs/setup-python with astral-sh/setup-uv +* `#28183 `__: MAINT: testing: specify python executable to use in extbuild +* `#28186 `__: MAINT: Update main after 2.2.2 release. +* `#28189 `__: MAINT, DOC: Add sphinx extension to allow svg images in PDF docs... +* `#28202 `__: MAINT: Use VQSORT_COMPILER_COMPATIBLE to determine if we should... +* `#28203 `__: BUG: fix data race in ``np.repeat`` +* `#28206 `__: BUG: Remove unnecessary copying and casting from out array in... +* `#28210 `__: corrected the numpy logo visibility issues on darkmode with the... +* `#28211 `__: MAINT: Hide decorator from pytest traceback +* `#28214 `__: ENH: add pkg_config entrypoint +* `#28219 `__: DOC: Add versionadded directive for axis argument in trim_zeros... +* `#28221 `__: BUG: allclose does not warn for invalid value encountered in... +* `#28222 `__: MAINT: Update highway to latest +* `#28223 `__: MAINT: Add [[maybe_unused] to silence some warnings +* `#28226 `__: DOC: Clarify ``__array__`` protocol arguments +* `#28228 `__: BUG: handle case when StringDType na_object is nan in float to... +* `#28229 `__: DOC: Fix a typo in doc/source/dev/development_workflow.rst +* `#28230 `__: DOC: FIx a link in Roadmap +* `#28231 `__: DOC: Fix external links in the navbar of neps webpage +* `#28232 `__: BUG: Fix float128 FPE handling on ARM64 with Clang compiler +* `#28234 `__: BUG: Add cpp atomic support +* `#28235 `__: MAINT: Compile fix for clang-cl on WoA +* `#28241 `__: TYP: Avoid upcasting ``float64`` in the set-ops +* `#28242 `__: CI: Fix the github label for ``TYP:`` PR's and issues +* `#28246 `__: BLD: better fix for clang / ARM compiles +* `#28250 `__: dtype.__repr__: prefer __name__ for user-defined types. +* `#28252 `__: test_casting_unittests.py: remove tuple +* `#28254 `__: MAINT: expire deprecations +* `#28258 `__: DOC: Change the scientific page link in NumPy/MATLAB +* `#28259 `__: TYP: Fix ``timedelta64.__divmod__`` and ``timedelta64.__mod__``... +* `#28262 `__: TYP: expire deprecations +* `#28263 `__: ENH: Add ARM64 (aarch64) CI testing +* `#28264 `__: DOC: Remove an invalid link in f2py-examples.rst +* `#28270 `__: TYP: Fixed missing typing information of set_printoptions +* `#28273 `__: CI: update sanitizer CI to use python compiled with ASAN and... +* `#28276 `__: BUG: fix incorrect bytes to StringDType coercion +* `#28279 `__: TYP: Fix scalar constructors +* `#28280 `__: TYP: stub ``numpy.matlib`` +* `#28281 `__: TYP: stub the missing ``numpy.testing`` modules +* `#28288 `__: DOC: Correct a typo in Intel License URL +* `#28290 `__: BUG: fix race initializing legacy dtype casts +* `#28291 `__: BUG: Prevent class-bound attr mutation in ``lib._iotools.NameValidator`` +* `#28294 `__: MAINT: Enable building tanh on vector length agnostic architectures +* `#28295 `__: TYP: stub ``numpy._globals`` +* `#28296 `__: TYP: stub ``numpy._expired_attrs_2_0`` +* `#28297 `__: TYP: stub ``numpy._configtool`` and ``numpy._distributor_init`` +* `#28298 `__: TYP: stub ``numpy.lib._iotools`` +* `#28299 `__: TYP: stub ``lib.user_array`` and ``lib._user_array_impl`` +* `#28300 `__: TYP: stub ``lib.introspect`` +* `#28301 `__: TYP: stub ``lib.recfunctions`` +* `#28302 `__: TYP: fix and improve ``numpy._core.arrayprint`` +* `#28303 `__: TYP: stub ``lib._datasource`` and fix ``lib._npyio_impl`` +* `#28304 `__: DOC: Remove reference to python2 +* `#28307 `__: MAINT: bump ``mypy`` to ``1.15.0`` +* `#28312 `__: DOC: remove references to Python 2 +* `#28319 `__: BUG: numpy.loadtxt reads only 50000 lines when skip_rows >= max_rows +* `#28320 `__: MAINT: Update actions/cache and use hash. +* `#28323 `__: DOC: Correct a typo in Exception TooHardError +* `#28327 `__: TYP: fix positional- and keyword-only params in ``astype``\ ,... +* `#28328 `__: CI: Update FreeBSD base image in ``cirrus_arm.yml`` +* `#28330 `__: ENH: Ensure ``lib._format_impl.read_array`` handles file reading... +* `#28332 `__: BUG: avoid segfault in np._core.multiarray.scalar +* `#28335 `__: MAINT: Update main after 2.2.3 release. +* `#28336 `__: DOC: Update link to Anaconda Eclipse/PyDev documentation +* `#28338 `__: MAINT: use OpenBLAS 0.3.29 +* `#28339 `__: MAIN: Update c,c++ line length to 88 +* `#28343 `__: BUG: Fix ``linalg.norm`` to handle empty matrices correctly. +* `#28350 `__: DOC: fix typo +* `#28353 `__: DOC: Make numpy.fft a clickable link to module +* `#28355 `__: BUG: safer bincount casting +* `#28358 `__: MAINT: No need to check for check for FPEs in casts to/from object +* `#28359 `__: DOC: Make the first paragraph more concise in internals.rst +* `#28361 `__: BUG: Make np.nonzero threading safe +* `#28370 `__: DOC: Revise bullet point formatting in ``arrays.promotions.rst`` +* `#28382 `__: DOC: fix C API docs for ``PyArray_Size`` +* `#28383 `__: DOC: Added links to CTypes and CFFI in Numba +* `#28386 `__: MAINT: Extend the default ruff exclude files +* `#28387 `__: DOC: fix expected exception from StringDType without string coercion +* `#28390 `__: MAINT: speed up slow test under TSAN +* `#28391 `__: CI: use free-threaded build for ASAN tests +* `#28392 `__: CI: build Linux aarch64 wheels on GitHub Actions +* `#28393 `__: BUG: Fix building on s390x with clang +* `#28396 `__: BUG: Fix crackfortran parsing error when a division occurs within... +* `#28404 `__: MAINT: remove legacy ucsnarrow module +* `#28406 `__: BUG: Include Python.h first +* `#28407 `__: BUG: sanity check ``__array_interface__`` number of dimensions +* `#28408 `__: DOC: Update link to Nix in Cross Compilation +* `#28411 `__: CI: use QEMU 9.2.2 for Linux Qemu tests +* `#28413 `__: DOC: add scimath in np.lib submodules listing +* `#28414 `__: DOC: Add missing punctuation to the random sampling page +* `#28415 `__: BLD: update cibuildwheel and build PyPy 3.11 wheels [wheel build] +* `#28421 `__: BUG: skip legacy dtype multithreaded test on 32 bit runners +* `#28426 `__: BUG: Limit the maximal number of bins for automatic histogram... +* `#28427 `__: DOC: remove mention of Poly.nickname +* `#28431 `__: MAINT: PY_VERSION_HEX simplify +* `#28436 `__: BUILD: move to manylinux_2_28 wheel builds +* `#28437 `__: DOC: fix documentation for Flag checking functions and macros +* `#28442 `__: ENH: Check for floating point exceptions in dot +* `#28444 `__: DOC: fix URL redirects +* `#28447 `__: DOC: repositioned bitwise_count under bit-wise operations +* `#28451 `__: DOC: Add -avx512_spr to disable AVX512 in build options +* `#28452 `__: TYP: stub ``random._pickle`` +* `#28453 `__: BUG: Fix return type of NpyIter_GetIterNext in Cython declarations +* `#28455 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.3 to 2.0.4 +* `#28456 `__: MAINT: Bump actions/cache from 4.2.0 to 4.2.2 +* `#28458 `__: MAINT: Bump actions/upload-artifact from 4.5.0 to 4.6.1 +* `#28459 `__: MAINT: Bump github/codeql-action from 3.27.9 to 3.28.11 +* `#28460 `__: MAINT: Bump astral-sh/setup-uv from 5.2.1 to 5.3.1 +* `#28461 `__: MAINT: Update dependabot.yml file +* `#28462 `__: TYP: Add specializations to meshgrid stubs +* `#28464 `__: MAINT: Bump actions/setup-python from 5.3.0 to 5.4.0 +* `#28465 `__: MAINT: Bump ossf/scorecard-action from 2.4.0 to 2.4.1 +* `#28466 `__: MAINT: Bump actions/checkout from 4.1.1 to 4.2.2 +* `#28467 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.1.0 to 3.1.1 +* `#28468 `__: MAINT: Bump actions/download-artifact from 4.1.8 to 4.1.9 +* `#28473 `__: DOC: add typing badge to README +* `#28475 `__: MAINT: skip slow_pypy tests on pypy +* `#28477 `__: MAINT: fix typo in normal distribution functions docstrings +* `#28480 `__: ENH: Convert logical from C universal intrinsics to C++ using... +* `#28483 `__: DOC: only change tp_name on CPython +* `#28485 `__: MAINT: Bump actions/setup-python from 5.3.0 to 5.4.0 +* `#28488 `__: fix aarch64 CI run +* `#28489 `__: MAINT: Enable building loop_logical on vector length agnostic... +* `#28491 `__: TYP: fix typing errors in ``_core.shape_base`` +* `#28492 `__: TYP: fix typing errors in ``_core.strings`` +* `#28494 `__: TYP: fix typing errors in ``_core.records`` +* `#28495 `__: DOC: let docstring mention that unique_values is now unsorted +* `#28497 `__: TYP: don't use literals in shape-types +* `#28498 `__: TYP: accept non-integer shapes in array constructor without a... +* `#28499 `__: TYP: remove unneseccary cast +* `#28500 `__: TYP: stub ``numpy.random._bounded_integers`` +* `#28502 `__: TYP: stub ``numpy.random._common`` +* `#28503 `__: API: Always allow ``sorted=False`` and make a note about it +* `#28505 `__: TYP: stub ``numpy._core.umath`` +* `#28506 `__: TYP: fix typing errors in ``numpy.lib._arrayterator_impl`` +* `#28507 `__: MAINT: remove ``ma.timer_comparison`` +* `#28508 `__: TYP: fix signatures of ``ndarray.put`` and ``ndarray.view`` +* `#28509 `__: TYP: annotate the missing ``ufunc.resolve_dtypes`` method +* `#28511 `__: TYP: fix stubtest errors in ``numpy._core`` +* `#28513 `__: TYP: stub ``numpy._core.overrides`` +* `#28514 `__: TYP: stub ``numpy._utils`` +* `#28515 `__: TYP: stub ``numpy._core._dtype[_ctypes]`` +* `#28517 `__: TYP: stub the remaining ``numpy._core.\*`` modules +* `#28518 `__: TYP: stub the missing submodules of ``numpy.linalg`` +* `#28519 `__: TYP: stub ``numpy._pyinstaller`` +* `#28520 `__: TYP: stub ``numpy.fft.helper`` (deprecated) +* `#28522 `__: TYP: stub ``numpy.f2py`` +* `#28523 `__: TYP: annotate the missing deprecated ``row_stack`` function +* `#28524 `__: CI, TST: Bump to cibuildwheel 2.23 (Pyodide 0.27.0) for WASM... +* `#28525 `__: TYP: fix stubtest errors in ``numpy.dtype`` and ``numpy.dtypes.\*`` +* `#28526 `__: TYP: fix stubtest errors in ``timedelta64`` and ``object_`` +* `#28527 `__: TYP: fix stubtest errors in ``numpy.lib._function_base_impl`` +* `#28528 `__: TYP: fix stubtest errors in ``numpy.lib._arraysetops_impl`` +* `#28529 `__: TYP: fix stubtest errors in ``numpy.lib._index_tricks_impl`` +* `#28530 `__: TYP: fix stubtest errors in ``numpy.lib._twodim_base_impl`` +* `#28531 `__: ENH: Add Cygwin extensions to list to copy to CWD in f2py meson... +* `#28532 `__: DOC: minor editorial change +* `#28535 `__: TYP: fix stubtest errors in ``numpy._core`` +* `#28536 `__: TYP: fix stubtest errors in ``numpy._globals`` +* `#28537 `__: TYP: fix stubtest errors in ``numpy.mat[rix]lib`` +* `#28538 `__: TYP: fix stubtest errors in ``numpy.random`` +* `#28539 `__: TYP: fix stubtest errors in ``numpy.testing`` +* `#28540 `__: TYP: fix typing errors in ``numpy.ndarray`` +* `#28541 `__: TYP: fix stubtest error in ``numpy.ma`` +* `#28546 `__: MAINT: Update main after NumPy 2.2.4 release. +* `#28547 `__: MAINT: Bump pypa/cibuildwheel from 2.23.0 to 2.23.1 +* `#28555 `__: MAINT: Bump actions/download-artifact from 4.1.9 to 4.2.0 +* `#28556 `__: NEP 54: Change status to Accepted +* `#28560 `__: MAINT: Bump actions/download-artifact from 4.2.0 to 4.2.1 +* `#28561 `__: MAINT: Bump github/codeql-action from 3.28.11 to 3.28.12 +* `#28562 `__: MAINT: Bump actions/upload-artifact from 4.6.1 to 4.6.2 +* `#28563 `__: MAINT: Bump actions/cache from 4.2.2 to 4.2.3 +* `#28568 `__: MAINT: Bump astral-sh/setup-uv from 5.3.1 to 5.4.0 +* `#28569 `__: Fixing various spelling errors +* `#28571 `__: BLD: use ``manylinux_2_28:2025.03.23-1`` [wheel build] +* `#28576 `__: API,ENH: Allow forcing an array result in ufuncs +* `#28577 `__: BUG: avoid deadlocks with C++ shared mutex in dispatch cache +* `#28581 `__: MAINT: Bump github/codeql-action from 3.28.12 to 3.28.13 +* `#28586 `__: MAINT: Bump pypa/cibuildwheel from 2.23.1 to 2.23.2 +* `#28587 `__: MAINT: Bump actions/setup-python from 5.4.0 to 5.5.0 +* `#28591 `__: TYP: Type masked array shape, dtype, __int__, and __float__ +* `#28593 `__: TYP: Type ``numpy.ma.min`` +* `#28600 `__: BUG: Set writeable flag for writeable dlpacks. +* `#28601 `__: MAINT: Bump scientific-python/upload-nightly-action from 0.6.1... +* `#28607 `__: CI: fix cirrus config [wheel build] +* `#28611 `__: MAINT: Bump astral-sh/setup-uv from 5.4.0 to 5.4.1 +* `#28612 `__: TYP: Type ``ma.max`` and ``ma.ptp`` +* `#28615 `__: ENH: Upgrade Array API version to 2024.12 +* `#28616 `__: TYP: Type ``ma.MaskedArray.min`` +* `#28617 `__: MAINT: Bump actions/dependency-review-action from 4.5.0 to 4.6.0 +* `#28618 `__: MAINT, CI: Update Ubuntu to 22.04 in azure-pipelines +* `#28619 `__: ENH: Use openmp on x86-simd-sort to speed up np.sort and np.argsort +* `#28621 `__: DOC: Fix typo in ``numpy/typing/__init__.py`` +* `#28623 `__: TYP: Type ``ma.MaskedArray.max`` and ``ma.MaskedArray.ptp`` +* `#28624 `__: BUG: fix ``np.vectorize`` for object dtype +* `#28626 `__: DOC: update array API standard version in compatibility page +* `#28627 `__: MAINT: replace string.format() with f-strings +* `#28635 `__: BLD: Update vendor-meson to fix module_feature conflicts arguments... +* `#28637 `__: TYP: Fix overload for ``ma.MaskedArray.{min,max,ptp}`` and ``ma.{min,max,ptp}`` ... +* `#28638 `__: TYP: Type ``MaskedArray.{argmin, argmax}`` and ``np.ma.{argmin,``... +* `#28643 `__: BUG: fix deepcopying StringDType arrays +* `#28644 `__: TYP: fix ``ndarray.tolist()`` and ``.item()`` for unknown dtype +* `#28645 `__: DOC: fixes classes decorated with set_module not showing its... +* `#28647 `__: DOC: Fix typos found by codespell +* `#28649 `__: ENH: Improve np.linalg.det performance +* `#28653 `__: CI: Replace QEMU armhf with native (32-bit compatibility mode) +* `#28657 `__: TYP: simplified type-aliases in ``numpy._typing`` +* `#28660 `__: TYP: Accept objects that ``write()`` to ``str`` in ``savetxt`` +* `#28662 `__: MAINT: Remove distutils CPU dispatcher compatibility code +* `#28664 `__: TYP: Type ``MaskedArray.sort`` +* `#28666 `__: MAINT: Bump github/codeql-action from 3.28.13 to 3.28.14 +* `#28667 `__: TYP: replace ``_ScalarType`` with ``_SCT`` +* `#28668 `__: TYP: replace ``_ArrayType`` with ``_ArrayT`` +* `#28669 `__: TYP: default to ``dtype[Any]`` +* `#28671 `__: SIMD: Fix Highway QSort symbol linking error on aarch32/ASIMD +* `#28672 `__: MAINT: Bump github/codeql-action from 3.28.14 to 3.28.15 +* `#28674 `__: TYP: add missing ``"b1"`` literals for ``dtype[bool]`` +* `#28675 `__: TYP: fix and improve ``numpy.lib._type_check_impl`` +* `#28676 `__: TYP: fix mypy test failures +* `#28677 `__: TYP: Type ``MaskedArray.partition`` and ``MaskedArray.argpartition`` +* `#28678 `__: DEP: Deprecate ``.T`` property for non-2dim arrays and scalars +* `#28680 `__: TYP: Type ``MaskedArray.take`` and ``np.ma.take`` +* `#28684 `__: TYP: replace ``_DType`` with ``_DTypeT`` +* `#28688 `__: TYP: rename ``_ShapeType`` TypeVar to ``_ShapeT`` +* `#28689 `__: TYP: Type ``MaskedArray.__{ge,gt,le,lt}__`` +* `#28690 `__: TYP: replace ``_SCT`` with ``_ScalarT`` +* `#28693 `__: BLD: fix meson_version warning +* `#28695 `__: DOC: linalg.matrix_transpose: add alias note +* `#28699 `__: TYP: Fix false rejection of ``NDArray[object_].__abs__()`` +* `#28702 `__: TYP: Fix inconsistent ``NDArray[float64].__[r]truediv__`` return... +* `#28703 `__: MAINT: Improve float16 and float32 printing +* `#28710 `__: ENH: Improve performance for np.result_type +* `#28712 `__: MAINT: ``%i`` → ``%d`` +* `#28715 `__: TYP: Type ``np.ma.{is_masked,ndim,size,ids,iscontiguous}`` +* `#28717 `__: TYP: fix string-like ``ndarray`` rich comparison operators +* `#28719 `__: MAINT: switching from ``%i`` to ``fstrings`` +* `#28720 `__: TYP: drop py310 support +* `#28724 `__: STY: Apply assorted ruff rules (RUF) +* `#28725 `__: STY: Enforce ruff/pycodestyle warnings (W) +* `#28726 `__: STY: Apply assorted ruff/refurb rules (FURB) +* `#28728 `__: STY: Apply assorted ruff/pyupgrade rules (UP) +* `#28731 `__: BUG: Prevent nanmax/nanmin from copying memmap arrays +* `#28733 `__: TYP: remove ``_typing._UnknownType`` and ``_ArrayLikeUnknown`` +* `#28735 `__: TYP: Type ``MaskedArray.count`` and ``np.ma.count`` +* `#28738 `__: TYP: fix incorrect ``random.Generator.integers`` return type +* `#28739 `__: MNT: get rid of references to Python 3.10 +* `#28740 `__: MAINT: Bump astral-sh/setup-uv from 5.4.1 to 5.4.2 +* `#28741 `__: BUG: Re-enable overriding functions in the ``np.strings`` module. +* `#28742 `__: TYP: Type ``MaskedArray.filled`` and ``np.ma.filled`` +* `#28743 `__: MNT: Enforce ruff/pygrep-hooks rules (PGH) +* `#28744 `__: STY: Apply more ruff rules (RUF) +* `#28745 `__: TYP: Type ``MaskedArray.put``\ , ``np.ma.put``\ , ``np.ma.putmask`` +* `#28746 `__: TYP: ``numpy.ma`` squiggly line cleanup +* `#28747 `__: TYP: some ``[arg]partition`` fixes +* `#28748 `__: ENH: Support Python 3.14 +* `#28750 `__: TYP: fix ``count_nonzero`` signature +* `#28751 `__: MNT: discard Python 2 leftover +* `#28752 `__: MNT: Apply ruff/Pylint rule PLW0129 (assertions that never fail) +* `#28754 `__: MNT: Enforce ruff/Pylint Error rules (PLE) +* `#28755 `__: MNT: Apply assorted ruff/Pylint Refactor rules (PLR) +* `#28756 `__: MNT: Apply assorted ruff/Pylint Warning rules (PLW) +* `#28757 `__: BUG: Fix AVX512_SPR dispatching for SVML half-precision operations +* `#28760 `__: STY: Apply ruff/pyupgrade rule UP032 +* `#28763 `__: STY: Use f-string instead of ``format`` call +* `#28764 `__: MNT: Enforce ruff rules: Flynt (FLY) and flake8-pie (PIE) +* `#28765 `__: MNT: Enforce ruff/flake8-bugbear rules (B) +* `#28766 `__: TYP: Type ``MaskedArray.compressed`` and ``np.ma.compressed`` +* `#28768 `__: MAINT: getting rid of old ``%`` and ``.format(...)`` strings... +* `#28769 `__: ENH: Improve Floating Point Cast Performance on ARM +* `#28770 `__: MNT: Enforce ruff/pyupgrade rules (UP) +* `#28771 `__: ENH: Include offset in error message when fallocate() fails +* `#28775 `__: STY: Partially apply ruff/pycodestyle rules (E) +* `#28779 `__: MAINT: Update main after Numpy 2.2.5 release +* `#28789 `__: BUG: Re-enable GCC function-specific optimization attributes +* `#28793 `__: TYP: Type ``np.ma.allclose`` and ``np.ma.allequal`` +* `#28798 `__: TST: skip test if spawning threads triggers a RuntimeError +* `#28803 `__: MAINT: Bump github/codeql-action from 3.28.15 to 3.28.16 +* `#28804 `__: BUG: fix heap buffer overflow in np.strings.find +* `#28806 `__: BUG: Fix `` __array__(None)`` to preserve dtype +* `#28807 `__: TYP: fix ``NDArray[floating] + float`` return type +* `#28808 `__: CI: Make clang_TSAN CI job use cpython_sanity docker image +* `#28809 `__: TYP: write ``dtype[Any]`` as ``dtype`` +* `#28810 `__: TYP: replace ``_Self`` type parameters with ``typing.Self`` +* `#28811 `__: TYP: remove unnecessary scalar-type ``Any`` type-args +* `#28816 `__: MAINT: Bump actions/setup-python from 5.5.0 to 5.6.0 +* `#28817 `__: MAINT: Bump astral-sh/setup-uv from 5.4.2 to 6.0.0 +* `#28818 `__: MAINT: Bump actions/download-artifact from 4.2.1 to 4.3.0 +* `#28819 `__: TYP: simplify redundant unions of builtin scalar types +* `#28820 `__: TYP: ``None`` at the end of a union +* `#28821 `__: BUG: Use unrotated companion matrix in polynomial.polyroots. +* `#28831 `__: TYP: Fix type annotations for ``np.ma.nomask`` and ``np.ma.MaskType`` +* `#28832 `__: TYP: Type ``np.ma.getmask`` +* `#28833 `__: TYP: Type ``np.ma.is_mask`` +* `#28836 `__: ENH: Provide Windows 11 ARM64 wheels (#22530) +* `#28841 `__: BUG: Fix Clang warning in loops_half.dispatch.c.src +* `#28845 `__: TYP: Type ``MaskedArray.nonzero`` +* `#28847 `__: TYP: Use _Array1D alias in ``numpy.ma.core.pyi`` +* `#28848 `__: TYP: Type ``MaskedArray.ravel`` +* `#28849 `__: TYP: Type ``MaskedArray.repeat``\ , improve overloads for ``NDArray.repeat``\... +* `#28850 `__: TYP: Type ``MaskedArray.swapaxes`` +* `#28854 `__: MAINT: Bump pypa/cibuildwheel from 2.23.2 to 2.23.3 +* `#28855 `__: TYP: add missing ``mod`` params to ``__[r]pow__`` +* `#28856 `__: TYP: generic ``StringDType`` +* `#28857 `__: TYP: implicit ``linalg`` private submodule re-exports +* `#28858 `__: TYP: fix the ``set_module`` signature +* `#28859 `__: DOC: Replace http:// with https:// +* `#28860 `__: BLD: update vendored Meson: v1.6.1 and iOS support +* `#28862 `__: BUG: fix stringdtype singleton thread safety +* `#28863 `__: TYP: Improve consistency of (masked) array typing aliases +* `#28867 `__: TYP: Type ``MaskedArray.{__setmask__,mask,harden_mask,soften_mask,hardmask,unsha``... +* `#28868 `__: TYP: Type ``MaskedArray.{imag, real, baseclass, mT}`` +* `#28869 `__: MAINT: Bump astral-sh/setup-uv from 6.0.0 to 6.0.1 +* `#28870 `__: MNT: retire old script for SVN repositories +* `#28871 `__: MNT: retire script superseded by ruff rule W605 +* `#28872 `__: DOC: consistent and updated LICENSE files for wheels +* `#28874 `__: DOC: ``numpy.i`` will not be included as part of SWIG +* `#28876 `__: MNT: discard unused function using os.system() +* `#28877 `__: DOC: update content of cross compilation build docs +* `#28878 `__: STY: Enforce more ruff rules +* `#28879 `__: STY: Apply assorted ruff/refurb rules (FURB) +* `#28880 `__: TYP: Type ``MaskedArray.all`` and ``MaskedArray.any`` +* `#28882 `__: MAINT: address warning in SWIG tests +* `#28883 `__: MAINT: from_dlpack thread safety fixes +* `#28884 `__: DEP: deprecate ``numpy.typing.NBitBase`` +* `#28887 `__: MAINT: Bump github/codeql-action from 3.28.16 to 3.28.17 +* `#28888 `__: DOC: math mode x to \times in docstring for numpy.linalg.multi_dot +* `#28892 `__: MAINT: Avoid dereferencing/strict aliasing warnings +* `#28893 `__: TYP: remove non-existent extended-precision scalar types +* `#28898 `__: BUG: Fix missing check for PyErr_Occurred() in _pyarray_correlate. +* `#28904 `__: BLD: update vendored Meson to include iOS fix +* `#28905 `__: TYP: Test ``MaskedArray.transpose`` and ``MaskedArray.T``\ ,... +* `#28906 `__: TYP: np.argmin and np.argmax overload changes +* `#28908 `__: TYP: Fix non-existent ``CanIndex`` annotation in ``ndarray.setfield`` +* `#28912 `__: TYP: add ``float64`` overloads to ``{lin,log,geom}space`` +* `#28918 `__: DOC: Fixes absent line numbers on link to classes decorated with... +* `#28923 `__: BUG: Use string conversion defined on dtype for .str +* `#28927 `__: MAINT: Remove outdated ``MaskedArray.__div__`` and ``MaskedArray.__idiv__`` +* `#28928 `__: MNT: add support for 3.14.0b1 +* `#28929 `__: MAINT: remove py2 ``__div__`` methods from ``poly1d`` and ``ABCPolyBase`` +* `#28930 `__: MAINT: remove py2 ``__div__`` remnants from the tests +* `#28931 `__: MAINT: remove py2 ``__div__`` methods from ``lib.user_array.container`` +* `#28932 `__: MAINT: remove references to 256-bits extended precision types +* `#28933 `__: MAINT: Use consistent naming for ``numpy/typing/tests/data/fail/ma.pyi`` +* `#28934 `__: TYP, TST: improved type-testing +* `#28935 `__: MAINT: Enable ruff E251 +* `#28936 `__: TST: Prevent import error when tests are not included in the... +* `#28937 `__: CI: fix TSAN CI by using a different docker image +* `#28938 `__: MNT: clean up free-threaded CI configuration +* `#28939 `__: MAINT: Bump actions/dependency-review-action from 4.6.0 to 4.7.0 +* `#28940 `__: TYP: optional type parameters for ``ndarray`` and ``flatiter`` +* `#28941 `__: DOC: Fix titles in ``development_ghcodespaces.rst`` +* `#28945 `__: MAINT: Enable linting with ruff E501 +* `#28952 `__: MAINT: Bump actions/dependency-review-action from 4.7.0 to 4.7.1 +* `#28954 `__: MAINT: Enable linting with ruff E501 for numpy._core +* `#28956 `__: DOC: Remove references to Python 2/3 +* `#28958 `__: TYP: reject complex scalar types in ``ndarray.__ifloordiv__`` +* `#28959 `__: TYP: remove redundant ``ndarray`` inplace operator overloads +* `#28960 `__: TYP: fix mypy & pyright errors in ``np.matrix`` +* `#28961 `__: DEP: finalize removal of ``numpy.compat`` +* `#28962 `__: TYP: type-testing without the mypy plugin +* `#28963 `__: MAINT: Update ruff to 0.11.9 in linting requirements +* `#28969 `__: MNT: Enforce ruff/isort rules (I) +* `#28971 `__: MAINT: Enable linting with ruff E501 +* `#28972 `__: MNT: Get rif of ``# pylint: `` pragma controls +* `#28974 `__: MNT: Get rid of ``version: $Id`` CVS tags +* `#28975 `__: MNT: import numpy as np +* `#28976 `__: MNT: Get rid of Pyflakes / flake8 +* `#28977 `__: MNT: Enforce ruff/flake8-implicit-str-concat rules (ISC) +* `#28978 `__: MNT: Enforce ruff/pandas-vet rules (PD) +* `#28981 `__: STY: reformat the ``_typing`` imports without trailing commas +* `#28982 `__: TYP: Gradual shape type defaults +* `#28984 `__: MNT: Use isinstance() instead of comparing type() +* `#28986 `__: TYP: Type ``MaskedArray.__{iadd,isub,imul,itruediv,ifloordiv,ipow}__`` +* `#28987 `__: MNT: Align ruff pin between ``requirements/linter_requirements.txt``... +* `#28988 `__: TYP: add missing ``ndarray.__{add,mul}__`` ``character`` type... +* `#28989 `__: MAINT: Bump github/codeql-action from 3.28.17 to 3.28.18 +* `#28990 `__: Revert "DEP: Deprecate ``.T`` property for non-2dim arrays and... +* `#28993 `__: MAINT: update NPY_FEATURE_VERSION after dropping python 3.10 +* `#28994 `__: TYP: allow inplace division of ``NDArray[timedelta64]`` by floats +* `#28995 `__: TYP: remove ``from __future__ import annotations`` +* `#28998 `__: MAINT: Update main after 2.2.6 release. +* `#29002 `__: MAINT: Update download-wheels for multiple pages +* `#29006 `__: ENH: Disable the alloc cache under address and memory sanitizers +* `#29008 `__: MNT: fix CI issues on main +* `#29018 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#29033 `__: BUG: Fix workflow error +* `#29042 `__: MNT: constant string arrays instead of pointers in C +* `#29043 `__: BUG: Avoid compile errors in f2py modules +* `#29044 `__: BUG: Fix f2py derived types in modules +* `#29046 `__: BUG: Fix cache use regression +* `#29047 `__: REL: Prepare for the NumPy 2.3.0rc1 release [wheel build] +* `#29070 `__: TYP: Various typing fixes. +* `#29072 `__: MAINT: use pypy 3.11 nightly which has a fix for ctypeslib +* `#29073 `__: BLD: use sonoma image on Cirrus for wheel build +* `#29074 `__: BUG: add bounds-checking to in-place string multiply +* `#29082 `__: BLD: bump OpenBLAS version, use OpenBLAS for win-arm64 [wheel... +* `#29089 `__: MNT: Avoid use of deprecated _PyDict_GetItemStringWithError in... +* `#29099 `__: BUG: f2py: thread-safe forcomb (#29091) +* `#29100 `__: TYP: fix NDArray[integer] inplace operator mypy issue +* `#29101 `__: PERF: Make NpzFile member existence constant time +* `#29116 `__: MAINT: Update to vs2022 in NumPy 2.3.x [wheel build] +* `#29118 `__: MAINT: fix SPDX license expressions for LAPACK, GCC runtime libs +* `#29132 `__: MAINT: Fix for segfaults with GCC 15 + diff --git a/doc/changelog/2.3.1-changelog.rst b/doc/changelog/2.3.1-changelog.rst new file mode 100644 index 000000000000..a1c840f8beda --- /dev/null +++ b/doc/changelog/2.3.1-changelog.rst @@ -0,0 +1,34 @@ + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Brad Smith + +* Charles Harris +* Developer-Ecosystem-Engineering +* François Rozet +* Joren Hammudoglu +* Matti Picus +* Mugundan Selvanayagam +* Nathan Goldbaum +* Sebastian Berg + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#29140 `__: MAINT: Prepare 2.3.x for further development +* `#29191 `__: BUG: fix matmul with transposed out arg (#29179) +* `#29192 `__: TYP: Backport typing fixes and improvements. +* `#29205 `__: BUG: Revert ``np.vectorize`` casting to legacy behavior (#29196) +* `#29222 `__: TYP: Backport typing fixes +* `#29233 `__: BUG: avoid negating unsigned integers in resize implementation... +* `#29234 `__: TST: Fix test that uses unininitialized memory (#29232) +* `#29235 `__: BUG: Address interaction between SME and FPSR (#29223) +* `#29237 `__: BUG: Enforce integer limitation in concatenate (#29231) +* `#29238 `__: CI: Add support for building NumPy with LLVM for Win-ARM64 +* `#29241 `__: ENH: Detect CPU features on OpenBSD ARM and PowerPC64 +* `#29242 `__: ENH: Detect CPU features on FreeBSD / OpenBSD RISC-V64. diff --git a/doc/changelog/2.3.2-changelog.rst b/doc/changelog/2.3.2-changelog.rst new file mode 100644 index 000000000000..5c893a510ae7 --- /dev/null +++ b/doc/changelog/2.3.2-changelog.rst @@ -0,0 +1,38 @@ + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* Charles Harris +* Joren Hammudoglu +* Maanas Arora +* Marco Edward Gorelli +* Matti Picus +* Nathan Goldbaum +* Sebastian Berg +* kostayScr + + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#29256 `__: MAINT: Prepare 2.3.x for further development +* `#29283 `__: TYP: Work around a mypy issue with bool arrays (#29248) +* `#29284 `__: BUG: fix fencepost error in StringDType internals +* `#29287 `__: BUG: handle case in mapiter where descriptors might get replaced... +* `#29350 `__: BUG: Fix shape error path in array-interface +* `#29412 `__: BUG: Allow reading non-npy files in npz and add test +* `#29413 `__: TST: Avoid uninitialized values in test (#29341) +* `#29414 `__: BUG: Fix reference leakage for output arrays in reduction functions +* `#29415 `__: BUG: fix casting issue in center, ljust, rjust, and zfill (#29369) +* `#29416 `__: TYP: Fix overloads in ``np.char.array`` and ``np.char.asarray``... +* `#29417 `__: BUG: Any dtype should call ``square`` on ``arr \*\* 2`` (#29392) +* `#29424 `__: MAINT: use a stable pypy release in CI +* `#29425 `__: MAINT: Support python 314rc1 +* `#29429 `__: MAINT: Update highway to match main. +* `#29430 `__: BLD: use github to build macos-arm64 wheels with OpenBLAS and... +* `#29437 `__: BUG: fix datetime/timedelta hash memory leak (#29411) diff --git a/doc/changelog/2.3.3-changelog.rst b/doc/changelog/2.3.3-changelog.rst new file mode 100644 index 000000000000..0398b30072af --- /dev/null +++ b/doc/changelog/2.3.3-changelog.rst @@ -0,0 +1,50 @@ + +Contributors +============ + +A total of 13 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Aleksandr A. Voyt + +* Bernard Roesler + +* Charles Harris +* Hunter Hogan + +* Joren Hammudoglu +* Maanas Arora +* Matti Picus +* Nathan Goldbaum +* Raghuveer Devulapalli +* Sanjay Kumar Sakamuri Kamalakar + +* Tobias Markus + +* Warren Weckesser +* Zebreus + + +Pull requests merged +==================== + +A total of 23 pull requests were merged for this release. + +* `#29440 `__: MAINT: Prepare 2.3.x for further development. +* `#29446 `__: BUG: Fix test_configtool_pkgconfigdir to resolve PKG_CONFIG_DIR... +* `#29447 `__: BLD: allow targeting webassembly without emscripten +* `#29460 `__: MAINT: Backport write_release.py +* `#29473 `__: MAINT: Bump pypa/cibuildwheel from 3.1.0 to 3.1.2 +* `#29500 `__: BUG: Always return a real dtype from linalg.cond (gh-18304) (#29333) +* `#29501 `__: MAINT: Add .file entry to all .s SVML files +* `#29556 `__: BUG: Casting from one timedelta64 to another didn't handle NAT. +* `#29562 `__: BLD: update vendored Meson to 1.8.3 [wheel build] +* `#29563 `__: BUG: Fix metadata not roundtripping when pickling datetime (#29555) +* `#29587 `__: TST: update link and version for Intel SDE download +* `#29593 `__: TYP: add ``sorted`` kwarg to ``unique`` +* `#29672 `__: MAINT: Update pythoncapi-compat from main. +* `#29673 `__: MAINT: Update cibuildwheel. +* `#29674 `__: MAINT: Fix typo in wheels.yml +* `#29683 `__: BUG, BLD: Correct regex for ppc64 VSX3/VSX4 feature detection +* `#29684 `__: TYP: ndarray.fill() takes no keyword arguments +* `#29685 `__: BUG: avoid thread-unsafe refcount check in temp elision +* `#29687 `__: CI: replace comment-hider action in mypy_primer workflow +* `#29689 `__: BLD: Add missing include +* `#29691 `__: BUG: use correct input dtype in flatiter assignment +* `#29700 `__: TYP: fix np.bool method declarations +* `#29701 `__: BUG: Correct ambiguous logic for s390x CPU feature detection + diff --git a/doc/changelog/2.3.4-changelog.rst b/doc/changelog/2.3.4-changelog.rst new file mode 100644 index 000000000000..f94b46a07573 --- /dev/null +++ b/doc/changelog/2.3.4-changelog.rst @@ -0,0 +1,61 @@ + +Contributors +============ + +A total of 17 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* Charles Harris +* Christian Barbia + +* Evgeni Burovski +* Joren Hammudoglu +* Maaz + +* Mateusz SokÃŗÅ‚ +* Matti Picus +* Nathan Goldbaum +* Ralf Gommers +* Riku Sakamoto + +* Sandeep Gupta + +* Sayed Awad +* Sebastian Berg +* Sergey Fedorov + +* Warren Weckesser +* dependabot[bot] + +Pull requests merged +==================== + +A total of 30 pull requests were merged for this release. + +* `#29725 `__: MAINT: Prepare 2.3.x for further development +* `#29781 `__: MAINT: Pin some upstream dependences +* `#29782 `__: BLD: enable x86-simd-sort to build on KNL with -mavx512f +* `#29783 `__: BUG: Include python-including headers first (#29281) +* `#29784 `__: TYP: fix np.number and np.\*integer method declaration +* `#29785 `__: TYP: mypy 1.18.1 +* `#29788 `__: TYP: replace scalar type __init__ with __new__ +* `#29790 `__: BUG: Fix ``dtype`` refcount in ``__array__`` (#29715) +* `#29791 `__: TYP: fix method declarations in floating, timedelta64, and datetime64Backport +* `#29792 `__: MAINT: delete unused variables in unary logical dispatch +* `#29797 `__: BUG: Fix pocketfft umath strides for AIX compatibility (#29768) +* `#29798 `__: BUG: np.setbufsize should raise ValueError for negative input +* `#29799 `__: BUG: Fix assert in nditer buffer setup +* `#29800 `__: BUG: Stable ScalarType ordering +* `#29838 `__: TST: Pin pyparsing to avoid matplotlib errors. +* `#29839 `__: BUG: linalg: emit a MemoryError on a malloc failure (#29811) +* `#29840 `__: BLD: change file extension for libnpymath on win-arm64 from .a... +* `#29864 `__: CI: Fix loongarch64 CI (#29856) +* `#29865 `__: TYP: Various typing fixes +* `#29910 `__: BUG: Fix float16-sort failures on 32-bit x86 MSVC (#29908) +* `#29911 `__: TYP: add missing ``__slots__`` (#29901) +* `#29913 `__: TYP: wrong argument defaults in ``testing._private`` (#29902) +* `#29920 `__: BUG: avoid segmentation fault in string_expandtabs_length_promoter +* `#29921 `__: BUG: Fix INT_MIN % -1 to return 0 for all signed integer types... +* `#29922 `__: TYP: minor fixes related to ``errstate`` (#29914) +* `#29923 `__: TST: use requirements/test_requirements across CI (#29919) +* `#29926 `__: BUG: fix negative samples generated by Wald distribution (#29609) +* `#29940 `__: MAINT: Bump pypa/cibuildwheel from 3.1.4 to 3.2.1 +* `#29949 `__: STY: rename @classmethod arg to cls +* `#29950 `__: MAINT: Simplify string arena growth strategy (#29885) + diff --git a/doc/changelog/2.3.5-changelog.rst b/doc/changelog/2.3.5-changelog.rst new file mode 100644 index 000000000000..123e1e9d0453 --- /dev/null +++ b/doc/changelog/2.3.5-changelog.rst @@ -0,0 +1,40 @@ + +Contributors +============ + +A total of 10 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Aaron Kollasch + +* Charles Harris +* Joren Hammudoglu +* Matti Picus +* Nathan Goldbaum +* Rafael Laboissière + +* Sayed Awad +* Sebastian Berg +* Warren Weckesser +* Yasir Ashfaq + + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#29979 `__: MAINT: Prepare 2.3.x for further development +* `#30026 `__: SIMD, BLD: Backport FPMATH mode on x86-32 and filter successor... +* `#30029 `__: MAINT: Backport write_release.py +* `#30041 `__: TYP: Various typing updates +* `#30059 `__: BUG: Fix np.strings.slice if stop=None or start and stop >= len... +* `#30063 `__: BUG: Fix np.strings.slice if start > stop +* `#30076 `__: BUG: avoid negating INT_MIN in PyArray_Round implementation (#30071) +* `#30090 `__: BUG: Fix resize when it contains references (#29970) +* `#30129 `__: BLD: update scipy-openblas, use -Dpkg_config_path (#30049) +* `#30130 `__: BUG: Avoid compilation error of wrapper file generated with SWIG... +* `#30157 `__: BLD: use scipy-openblas 0.3.30.7 (#30132) +* `#30158 `__: DOC: Remove nonexistent ``order`` parameter docs of ``ma.asanyarray``... +* `#30185 `__: BUG: Fix check of PyMem_Calloc return value. (#30176) +* `#30217 `__: DOC: fix links for newly rebuilt numpy-tutorials site +* `#30218 `__: BUG: Fix build on s390x with clang (#30214) +* `#30237 `__: ENH: Make FPE blas check a runtime check for all apple arm systems + diff --git a/doc/changelog/2.4.0-changelog.rst b/doc/changelog/2.4.0-changelog.rst new file mode 100644 index 000000000000..472811f6be62 --- /dev/null +++ b/doc/changelog/2.4.0-changelog.rst @@ -0,0 +1,828 @@ + +Contributors +============ + +A total of 142 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* !MyUserNameWasTakenLinux + +* !ianlv + +* !kostayScr + +* !olivier + +* Aadya Chinubhai + +* Aaron Kollasch + +* Abdu Zoghbi + +* Abhishek Kumar +* Abhishek Tiwari + +* Adam Turner + +* Akhil Kannan + +* Aleksandr A. Voyt + +* Amelia Thurdekoos + +* Andrew Nelson +* Angus Gibson + +* Anik Chand + +* Aniket Singh Yadav + +* Ankit Ahlawat + +* Arthur Lacote + +* Ben Woodruff +* Bernard Roesler + +* Brad Smith + +* Britney Whittington + +* Carlos Martin +* Charles Harris +* Charlie Lin + +* Chris Navarro +* Christian Barbia + +* Christian Bourjau + +* Christine P. Chai +* Christopher Sidebottom +* ClÊment Robert +* Copilot + +* Dan Raviv + +* Daniel Bertalan + +* David Seifert + +* Dennis Van de Vorst + +* Developer-Ecosystem-Engineering +* Diego Atencia + +* Dillon Niederhut +* Dimitri Papadopoulos Orfanos +* Diya Singh + +* Evgeni Burovski +* Faizan-Ul Huda + +* François Rozet +* GermÃĄn Godoy Gutierrez + +* Gubaydullin Danis + +* Guido Imperiale +* Hamza Meel + +* Hannah Aizenman +* Henry Schreiner +* Hunter Hogan + +* Iason Krommydas + +* Inessa Pawson +* Jake VanderPlas +* Jingu Kang + +* Joe Rickerby + +* Johnnie Gray + +* Jonathan Reimer + +* Joren Hammudoglu +* Kaiyuan Yang + +* Kelvin Li + +* Khelf Mohamed + +* Koki Watanabe + +* Kumar Aditya + +* Leonardo Paredes + +* Lucas Colley +* Lysandros Nikolaou +* Maanas Arora +* Marc Redemske + +* Marco Barbosa +* Marco Edward Gorelli +* Mark Ryan +* Marten van Kerkwijk +* Maryanne Wachter +* Mateusz SokÃŗÅ‚ +* Matthias Bussonnier +* Matti Picus +* Melissa Weber Mendonça +* Michael Davidsaver +* Michael Siebert +* Michał GÃŗrny +* Mohammed Abdul Rahman +* Mohammed Zuhaib + +* Mohit Deoli + +* Moritz Groß + +* Mugundan Selvanayagam +* Muhammad Maaz + +* Mukulika Pahari +* Nathan Goldbaum +* Nicholas Bidler + +* Paresh Joshi + +* Parsa Shemirani + +* Paul Caprioli + +* Phoenix Studio + +* Pieter Eendebak +* Rafael Laboissière + +* Raghuveer Devulapalli +* Ralf BÃŧrkle + +* Ralf Gommers +* Richard Smythe + +* Riku Sakamoto + +* Rohit Goswami +* Ross Barnowski +* Rupesh Sharma + +* Sachin Shah + +* Samruddhi Baviskar + +* Sandeep Gupta + +* Sandro + +* Sanjay Kumar Sakamuri Kamalakar + +* Sarang Joshi + +* Sayed Awad +* Sebastian Berg +* Sergey Fedorov + +* Shirong Wang + +* Shyok Mutsuddi + +* Simola Nayak + +* Stan Ulbrych +* Steven Hur + +* Swayam Singh + +* T.Yamada + +* Tim Hoffmann +* Timileyin Daso + +* Tobias Markus + +* Tontonio3 + +* Toshaksha + +* Trey Cole + +* Tyler Reddy +* Varad Raj Singh + +* Veit Heller + +* Vineet Kumar + +* Wang Yang (杨æ—ē) +* Warren Weckesser +* William Pursell + +* Xiaoyu + +* Yasir Ashfaq + +* Yuki K +* Yuvraj Pradhan +* Zebreus + +* Zhi Li + +* dependabot[bot] + +Pull requests merged +==================== + +A total of 673 pull requests were merged for this release. + +* `#23513 `__: ENH: speed up einsum with optimize using batched matmul +* `#24501 `__: DOC: add description of dtype b1 in arrays.dtypes.rst +* `#25245 `__: ENH: Enable native half-precision scalar conversion operations... +* `#28147 `__: DOC: Fix ambiguity in polyfit description +* `#28158 `__: DOC: Update CONTRIBUTING.rst +* `#28590 `__: ENH: Use array indexing preparation routines for flatiter objects +* `#28595 `__: BUG: quantile should error when weights are all zeros +* `#28622 `__: ENH, SIMD: Initial implementation of Highway wrapper +* `#28767 `__: ENH: np.unique: support hash based unique for string dtype +* `#28826 `__: DOC: Add flat examples to argmax and argmin +* `#28896 `__: ENH: Modulate dispatched x86 CPU features +* `#28925 `__: DEP: Deprecate setting the strides attribute of a numpy array +* `#28955 `__: MNT: Update windows-2019 to windows-2022[wheel build] +* `#28970 `__: MNT: Enforce ruff/Perflint rules (PERF) +* `#28979 `__: DOC: improves np.fromfile file description (#28840) +* `#28983 `__: MAINT: Options to catch more issues reported by pytest +* `#28985 `__: MNT: constant string arrays instead of pointers in C +* `#28996 `__: ENH: add __array_function__ protocol in polynomial +* `#29007 `__: CI: update cibuildwheel to 3.0.0b1 and enable cp314 and cp314t... +* `#29012 `__: TYP: Type ``MaskedArray.__{add,radd,sub,rsub}__`` +* `#29019 `__: BEG, MAINT: Begin NumPy 2.4.0 development. +* `#29022 `__: MAINT: Convert multiarray to multi-phase init (PEP 489) +* `#29028 `__: MAINT: Convert pocketfft_umath to multi-phase init (PEP 489) +* `#29032 `__: BUG: Fix workflow bug +* `#29034 `__: BUG: Avoid compile errors in f2py modules +* `#29036 `__: DOC: Expand/clean up extension module import error +* `#29039 `__: BLD: bump OpenBLAS version, use OpenBLAS for win-arm64 +* `#29040 `__: BUG: Fix f2py derived types in modules +* `#29041 `__: BUG: Fix cache use regression +* `#29048 `__: TYP: annotate ``strings.slice`` +* `#29050 `__: TYP: remove expired ``tostring`` methods +* `#29051 `__: MNT: use pypy 3.11 nightly which has a fix for ctypeslib +* `#29052 `__: ENH: show warning when np.maximum receives more than 2 inputs +* `#29053 `__: BLD: allow targeting webassembly without emscripten +* `#29057 `__: TYP: fix invalid overload definition in ``_core.defchararray.add`` +* `#29058 `__: TYP: fill in some of the missing annotations in the stubs +* `#29060 `__: BUG: add bounds-checking to in-place string multiply +* `#29061 `__: BLD: use sonoma image on Cirrus for wheel build +* `#29066 `__: DOC: fix typo in documentation of vecmat +* `#29068 `__: MAINT: Enforce ruff E501 +* `#29078 `__: CI: clean up cibuildwheel config a bit +* `#29080 `__: CI: bump to cibuildwheel 3.0.0b4 +* `#29083 `__: MAINT: Avoid use of deprecated _PyDict_GetItemStringWithError... +* `#29084 `__: BENCH: Increase array sizes for ufunc and sort benchmarks +* `#29085 `__: MAINT: Bump ``scipy-doctest`` to 1.8 +* `#29088 `__: MAINT: Add ``build-\*`` directories to ``.gitignore`` +* `#29091 `__: BUG: f2py: thread-safe forcomb +* `#29092 `__: TYP: fix ``NDArray[integer]`` inplace operator mypy issue +* `#29093 `__: MAINT: Bump ossf/scorecard-action from 2.4.1 to 2.4.2 +* `#29094 `__: BUG: remove ``NPY_ALIGNMENT_REQUIRED`` +* `#29095 `__: MAINT: bump ``mypy`` to ``1.16.0`` +* `#29097 `__: TYP: run mypy in strict mode +* `#29098 `__: PERF: Make NpzFile member existence checks constant-time +* `#29105 `__: BUG: Allow np.percentile to operate on float16 data +* `#29106 `__: DOC: Fix some incorrect reST markups +* `#29111 `__: MAINT: fix SPDX license expressions for LAPACK, GCC runtime libs +* `#29112 `__: ENH: Improve error message in numpy.testing.assert_array_compare +* `#29115 `__: MAINT: cleanup from finalized concatenate deprecation +* `#29119 `__: DOC: remove very outdated info on ATLAS +* `#29120 `__: TYP: minor ufunc alias fixes in ``__init__.pyi`` +* `#29121 `__: MAINT: Bump github/codeql-action from 3.28.18 to 3.28.19 +* `#29122 `__: DOC: fix typo in Numpy's module structure +* `#29128 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.1.1 to 3.2.0 +* `#29129 `__: ENH: add a casting option 'same_value' and use it in np.astype +* `#29133 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.4 to 2.0.5 +* `#29137 `__: BUG: make round consistently return a copy +* `#29141 `__: MAINT: Update main after 2.3.0 release. +* `#29142 `__: TYP: update typing stubs for ``_pyinstaller/hook-numpy.py`` +* `#29143 `__: DOC: Document assertion comparison behavior between scalar and... +* `#29145 `__: TYP: add missing ``numpy.lib`` exports +* `#29146 `__: TYP: fix minor ``f2py`` stub inconsistencies +* `#29147 `__: BUG: Missing array-api ``capabilities()`` key +* `#29148 `__: TST: migrating from pytz to zoneinfo + tzdata (where needed) +* `#29149 `__: CI: Bump ``array-api-tests`` to ``v2025.05.23`` +* `#29154 `__: DOC: Remove version switcher colors +* `#29155 `__: TYP: ``double = float64`` and ``cdouble = complex128`` +* `#29156 `__: CI: Run mypy with Python 3.13 +* `#29158 `__: DOC: tweak release walkthrough for numpy.org news blurb +* `#29160 `__: DOC: Suppress distutils doc build warnings for python 3.12+ +* `#29165 `__: ENH: Use itertools.product for ndindex to improve performance +* `#29166 `__: TYP: Fix missing ``_core.numeric`` (re-)exports +* `#29168 `__: TYP: Simplified ``dtype.__new__`` overloads +* `#29169 `__: TYP: ``out=...`` in ufuncs +* `#29170 `__: TYP: ``numpy._NoValue`` +* `#29171 `__: TYP: Accept dispatcher function with optional returns in ``_core.overrides`` +* `#29175 `__: TYP: Fix invalid inline annotations in ``lib._function_base_impl`` +* `#29176 `__: TYP: ``any(None)`` and ``all(None)`` +* `#29177 `__: TYP: ``lib._iotools`` annotation improvements +* `#29179 `__: BUG: fix matmul with transposed out arg +* `#29180 `__: MAINT: Bump pypa/cibuildwheel from 3.0.0b4 to 3.0.0 +* `#29181 `__: TYP: fix and improve ``numpy.lib._utils_impl`` +* `#29183 `__: STY: ruff/isort config tweaks +* `#29184 `__: TYP: fix ``ravel_multi_index`` false rejections +* `#29185 `__: STY: ruff/isort config tweaks - episode 2 +* `#29186 `__: MAINT: bump ``ruff`` to ``0.11.13`` +* `#29187 `__: TYP: add ``__all__`` in ``numpy._core.__init__`` +* `#29188 `__: MAINT: strides comparison performance fix, compare discussion... +* `#29195 `__: MAINT: Bump github/codeql-action from 3.28.19 to 3.29.0 +* `#29196 `__: BUG: Revert ``np.vectorize`` casting to legacy behavior +* `#29197 `__: TST: additional tests for matmul with non-contiguous input and... +* `#29204 `__: TYP: fix ``ndarray.__array__`` annotation for ``copy`` +* `#29208 `__: ENH: improve Timsort with powersort merge-policy +* `#29210 `__: BUG: fix linting +* `#29212 `__: CI: Add native ``ppc64le`` CI job using GitHub Actions +* `#29215 `__: CI: Add support for building NumPy with LLVM for Win-ARM64 +* `#29216 `__: MAINT: Fix some undef warnings +* `#29218 `__: TYP: Workaround for a mypy issue in ``ndarray.__iter__`` +* `#29219 `__: MAINT: bump ``mypy`` to ``1.16.1`` +* `#29220 `__: MAINT: bump ``ruff`` to ``0.12.0`` +* `#29221 `__: ENH: Detect CPU features on OpenBSD ARM and PowerPC64 +* `#29223 `__: BUG: Address interaction between SME and FPSR +* `#29224 `__: ENH: Detect CPU features on FreeBSD / OpenBSD RISC-V64. +* `#29227 `__: TYP: Support iteration of ``StringDType`` arrays +* `#29230 `__: BUG: avoid negating unsigned integers in resize implementation +* `#29231 `__: BUG: Enforce integer limitation in concatenate +* `#29232 `__: TST: Fix test that uses uninitialized memory +* `#29240 `__: ENH: Let numpy.size accept multiple axes. +* `#29248 `__: TYP: Work around a mypy issue with bool arrays +* `#29250 `__: MAINT: Enable linting with ruff E501 +* `#29252 `__: DOC: Fix some markup errors +* `#29254 `__: DOC: Clarify dtype argument for __array__ in custom container... +* `#29257 `__: MAINT: Update main after 2.3.1 release. +* `#29265 `__: TYP: Type ``MaskedArray.__{mul,rmul}__`` +* `#29269 `__: BUG: fix fencepost error in StringDType internals +* `#29271 `__: TYP: Add overloads for ``MaskedArray.__{div,rdiv,floordiv,rfloordiv}__`` +* `#29272 `__: MAINT: Fix ``I001`` ruff error on main +* `#29273 `__: ENH: Extend numpy.pad to handle pad_width dictionary argument. +* `#29275 `__: DOC: avoid searching some directories for doxygen-commented source... +* `#29277 `__: TYP: Add type annotations for ``MaskedArray.__{pow,rpow}__`` +* `#29278 `__: TYP: fix overloads where ``out: _ArrayT`` was typed as being... +* `#29281 `__: BUG: Include python-including headers first +* `#29285 `__: MAINT: Bump github/codeql-action from 3.29.0 to 3.29.1 +* `#29286 `__: BUG: handle case in mapiter where descriptors might get replaced +* `#29289 `__: BUG: Fix macro redefinition +* `#29290 `__: BUG: Fix version check in blas_utils.c +* `#29291 `__: MAINT: Enable linting with ruff E501 +* `#29296 `__: MAINT: Bump github/codeql-action from 3.29.1 to 3.29.2 +* `#29300 `__: MAINT: Enable linting with ruff E501 +* `#29301 `__: DEP: Give a visible warning when ``align=`` to dtype is a non-bool +* `#29302 `__: DOCS: Remove incorrect "Returns" section from ``MaskedArray.sort`` +* `#29303 `__: TYP: Add shape typing to return values of ``np.nonzero`` and... +* `#29305 `__: TYP: add explicit types for np.quantile +* `#29306 `__: DOC: remove redundant words +* `#29307 `__: TYP: Type ``MaskedArray.{trace,round,cumsum,cumprod}`` +* `#29308 `__: Fix incorrect grammar in TypeError message for ufunc argument... +* `#29309 `__: TYP: Type ``MaskedArray.dot`` and ``MaskedArray.anom`` +* `#29310 `__: TYP: rename ``_T`` to ``_ScalarT`` in ``matlib.pyi`` for consistency +* `#29311 `__: DOCS: Fix rendering of ``MaskedArray.anom`` ``dtype`` +* `#29312 `__: BLD: remove unused github workflow +* `#29313 `__: BUG: Allow reading non-npy files in npz and add test +* `#29314 `__: MAINT: Replace setting of array shape by reshape operation +* `#29316 `__: MAINT: remove out-of-date comment +* `#29318 `__: BUG: Fix np.testing utils failing for masked scalar vs. scalar... +* `#29320 `__: DOC: Fix spelling +* `#29321 `__: MNT: Cleanup infs handling in np.testing assertion utilities +* `#29322 `__: MAINT: remove internal uses of assert_warns and suppress_warnings +* `#29325 `__: DOC: Clarify assert_allclose differences vs. allclose +* `#29327 `__: MAINT: Rename nep-0049.rst. +* `#29329 `__: BLD: update ``highway`` submodule to latest master +* `#29331 `__: TYP: ``svd`` overload incorrectly noted ``Literal[False]`` to... +* `#29332 `__: TYP: Allow passing ``dtype=None`` to ``trace`` +* `#29333 `__: BUG: Always return a real dtype from linalg.cond (gh-18304) +* `#29334 `__: MAINT: Bump pypa/cibuildwheel from 3.0.0 to 3.0.1 +* `#29335 `__: DOC: vectorize with signature doesn't pre-call function +* `#29338 `__: API,BUG: Fix scalar handling in array-interface allowing NULL... +* `#29340 `__: TYP: correct default value of ``unicode`` in ``chararray.__new__``... +* `#29341 `__: TST: Avoid uninitialized values in test +* `#29343 `__: DOC: Add missing ``self`` in ``__array_ufunc__`` signature +* `#29347 `__: DOC: Fix NEP 49 Resolution Link Formatting (part of #29328) +* `#29351 `__: BLD: print long double format used +* `#29356 `__: BUG: fix test_npy_uintp_type_enum +* `#29358 `__: BUG: Fix reference leakage for output arrays in reduction functions +* `#29362 `__: DOC: specify that ``numpy.nan_to_num`` supports array like arguments +* `#29364 `__: TST: refactor typing check for @ +* `#29368 `__: BUG: avoid segmentation fault in ``string_expandtabs_length_promoter`` +* `#29369 `__: BUG: fix casting issue in center, ljust, rjust, and zfill +* `#29370 `__: ENH: Allow subscript access for ``np.bool`` by adding ``__class_getitem__`` +* `#29371 `__: MNT: add linter for thread-unsafe C API uses +* `#29372 `__: BUG: Fix np.unique with axis=0 and 1D input not collapsing NaNs... +* `#29374 `__: DEV: remove "packages" from ``.gitignore`` +* `#29375 `__: STY: Fix typo in npy_cpu_dispatch.c +* `#29377 `__: TYP: Fix overloads in ``np.char.array`` and ``np.char.asarray``... +* `#29380 `__: BUG: Fix repeatability issues in test suite +* `#29381 `__: TYP: Type ``MaskedArray.{sum,std,var,mean,prod}`` +* `#29383 `__: TYP: Type ``MaskedArray.view`` +* `#29385 `__: BLD: Add sw_64 support +* `#29386 `__: DOC: Fix ``PyArrayMapIterObject`` document +* `#29387 `__: DOC: document ``mean`` parameter in ``ndarray.std`` and ``ndarray.var``... +* `#29390 `__: DOC: better differentiate arrays in dstack docstring +* `#29392 `__: BUG: Any dtype should call ``square`` on ``arr \*\* 2`` +* `#29394 `__: ENH: avoid thread safety issues around uses of ``PySequence_Fast`` +* `#29396 `__: ENH: Show unit information in repr for datetime64("NaT") +* `#29401 `__: TYP: Type ``MaskedArray.resize``\ , wrap ``NoReturn`` tests in... +* `#29402 `__: DOC: Correct more ndarray defaults +* `#29403 `__: MAINT: remove unnecessary ``kwargs`` update in ``MaskedArray.reshape`` +* `#29404 `__: TYP: Type ``MaskedArray.reshape`` +* `#29405 `__: MAINT/BUG: Followups for PySequence_Fast locking +* `#29406 `__: MAINT: Bump github/codeql-action from 3.29.2 to 3.29.3 +* `#29407 `__: MAINT: use a stable pypy release in CI +* `#29411 `__: BUG: fix datetime/timedelta hash memory leak +* `#29418 `__: TYP: Type ``MaskedArray.__deepcopy__`` and ``MaskedArray.argsort`` +* `#29419 `__: DOC: Fix index name in notes for np.take +* `#29423 `__: BUG: allow ``MaskedArray.fill_value`` be a string when ``dtype=StringDType`` +* `#29426 `__: MAINT: Bump github/codeql-action from 3.29.3 to 3.29.4 +* `#29427 `__: DOC: Remove outdated ``numpy.exceptions`` compatibility note. +* `#29428 `__: TYP: Add test which hits ``np.array`` constructor overload with... +* `#29431 `__: ENH: Enable RVV acceleration for auto-vectorization in RISC-V +* `#29432 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#29433 `__: MAINT: Bump pypa/cibuildwheel from 3.0.1 to 3.1.0 +* `#29435 `__: BUG: Fix test_configtool_pkgconfigdir to resolve PKG_CONFIG_DIR... +* `#29436 `__: BLD: allow targeting webassembly without emscripten +* `#29441 `__: MAINT: Update main after 2.3.2 release. +* `#29444 `__: MAINT: Add Python 3.14 to classifier. +* `#29445 `__: DOC: Update RELEASE_WALKTHROUGH.rst +* `#29450 `__: DOC: Clarify that ``numpy.printoptions`` applies only to ``ndarray``\... +* `#29456 `__: MAINT: Replace pavement.py +* `#29457 `__: TYP: Type ``MaskedArray.__new__`` +* `#29459 `__: BLD: provide explicit control over cpu-baseline detection +* `#29466 `__: TYP: Type ``MaskedArray.flat`` +* `#29467 `__: TYP: Type ``MaskedArray.recordmask`` +* `#29468 `__: TYP: Type ``MaskedArray.fill_value`` +* `#29470 `__: TYP: Remove ``MaskedArray.__reduce__``\ , and punt on ``MaskedArray.__{eq,ne}__```... +* `#29471 `__: MAINT: Bump pypa/cibuildwheel from 3.1.0 to 3.1.2 +* `#29472 `__: TYP: Type ``MaskedArray.__getitem__`` +* `#29478 `__: TYP: Type ``MaskedArray.__setitem__`` +* `#29479 `__: MAINT: Do not exclude ``typing/tests/data`` from ruff +* `#29480 `__: TYP: Type ``MaskedArray.compress`` +* `#29481 `__: MAINT: Add .file entry to all .s SVML files +* `#29483 `__: TYP: Type ``MaskedArray.__array_finalize__`` and ``MaskedArray.__array_wrap__`` +* `#29484 `__: MAINT: Bump github/codeql-action from 3.29.4 to 3.29.5 +* `#29487 `__: CI: Add UBSAN CI jobs for macOS arm64 and Linux x86-64 +* `#29489 `__: DOC: Add 'See Also' refs for sign, copysign and signbit. +* `#29493 `__: MAINT: bump ``mypy`` to ``1.17.1`` +* `#29495 `__: MAINT: Bump pypa/cibuildwheel from 3.1.2 to 3.1.3 +* `#29502 `__: DOC: Add narrative documentation for printing NumPy arrays +* `#29504 `__: ENH: Use extern C in arraytypes.h.src file for cpp files +* `#29505 `__: BUG: Casting from one timedelta64 to another didn't handle NAT. +* `#29509 `__: BLD: update vendored Meson to 1.8.3 +* `#29512 `__: TST: don't explicitly specify -j in TSAN build +* `#29513 `__: MAINT: Bump hypothesis to 6.137.1 +* `#29514 `__: DOC: Add 'today' string to datetime64 documentation +* `#29521 `__: MAINT: Bump actions/download-artifact from 4.3.0 to 5.0.0 +* `#29522 `__: BUG: random: Fix handling of very small p in Generator.binomial. +* `#29526 `__: TYP: Type ``MaskedIterator`` +* `#29529 `__: MAINT: Bump actions/cache from 4.2.3 to 4.2.4 +* `#29530 `__: MAINT: Bump github/codeql-action from 3.29.5 to 3.29.6 +* `#29531 `__: TYP: Type default values in stubs in ``numpy/ma`` +* `#29532 `__: DOC:Clarify build compatibility to dev depending page +* `#29533 `__: MAINT: Bump github/codeql-action from 3.29.6 to 3.29.8 +* `#29534 `__: DOC: Add 'now' string to datetime64 documentation +* `#29535 `__: BLD: update licensing metadata to use PEP 639 +* `#29537 `__: ENH: np.unique: support hash based unique for complex dtype +* `#29539 `__: BUG: left bit shift undefined behavior +* `#29540 `__: CI: run some wheel build jobs by default, and clean up the rest +* `#29541 `__: STY: fix typo in dtypemeta.c [skip azp][skip actions] +* `#29542 `__: MAINT: Bump actions/checkout from 4.2.2 to 5.0.0 +* `#29546 `__: fix: File exists error on macOS when running spin lint +* `#29548 `__: MAINT: Use double quotes (ruff rule ``Q``\ ) (only on ``.pyi``... +* `#29550 `__: DEP: Deprecate NumPy warning control utilities +* `#29551 `__: BUG: resolve invalid grep with env neutral script +* `#29553 `__: MAINT: Bump github/codeql-action from 3.29.8 to 3.29.9 +* `#29555 `__: BUG: Fix metadata not roundtripping when pickling datetime +* `#29557 `__: DOC: Document datetime and timedelta to python's object type... +* `#29564 `__: TYP: use ``TypeAliasType`` for ``ArrayLike`` and ``DTypeLike``... +* `#29565 `__: STY: ruff rule name comments +* `#29569 `__: ENH: Add ndmax parameter to np.array to control recursion depth +* `#29572 `__: ENH: enable processing object file for f2py meson backend +* `#29574 `__: TYP: add ``ndmax`` parameter to ``np.array`` +* `#29579 `__: BLD: wire up ``ASIMDDP`` feature to ``ARM_FEATURES`` +* `#29582 `__: DOC: Add link to homepage in doc landing page +* `#29585 `__: TST: update link and version for Intel SDE download +* `#29586 `__: TYP: add ``sorted`` kwarg to ``unique`` +* `#29588 `__: DOC: Make the image credit author link clickable +* `#29589 `__: MAINT: Bump actions/dependency-review-action from 4.7.1 to 4.7.2 +* `#29590 `__: MAINT: Bump github/codeql-action from 3.29.9 to 3.29.10 +* `#29594 `__: TYP: Add defaults to ``numpy/core`` and ``numpy/__init__.py`` +* `#29596 `__: TST: Replace xunit setup with methods +* `#29598 `__: BUG: fix for evaluation of random_f and random_standard_cauchy... +* `#29601 `__: DOC: fix for f2py migrating-to-meson page +* `#29602 `__: MAINT: Bump pypa/cibuildwheel from 3.1.3 to 3.1.4 +* `#29604 `__: DOC: Fix typo in tril_indices and triu_indices docstrings +* `#29605 `__: TST: Replace xunit setup with methods +* `#29607 `__: TST: Enable unit tests for RISC-V CPU dispatcher utilities +* `#29608 `__: TYP: ndarray.fill() takes no keyword arguments +* `#29609 `__: BUG: fix negative samples generated by Wald distribution +* `#29611 `__: CI: more specific mypy_primer ``on:`` paths +* `#29612 `__: CI: replace comment-hider action in mypy_primer workflow +* `#29615 `__: MAINT: Bump github/codeql-action from 3.29.10 to 3.29.11 +* `#29616 `__: TST: Replace xunit setup with methods +* `#29617 `__: DOC: Correct a few formatting issues +* `#29618 `__: MAINT: fix typo in cmds.py +* `#29621 `__: ENH: Extend coverage for benchmark of np.unique +* `#29628 `__: TST: Replace xunit setup with methods +* `#29629 `__: TYP: replace scalar type ``__init__`` with ``__new__`` +* `#29630 `__: TYP: fix slightly incorrect ``memoryview`` type argument in ``ScalarType`` +* `#29631 `__: TYP: Make ``datetime64`` a generic type at runtime +* `#29633 `__: TYP: add missing ``_NoValue`` annotations in ``_core.fromnumeric`` +* `#29634 `__: TYP: Add missing defaults to stubs +* `#29636 `__: MAINT: Bump actions/dependency-review-action from 4.7.2 to 4.7.3 +* `#29641 `__: TST: Replace xunit setup with methods +* `#29642 `__: ENH: Add extended sorting APIs +* `#29646 `__: DOC: Fix typo in basics.strings.rst +* `#29648 `__: TST: delete global env_setup fixture +* `#29649 `__: BUG: avoid thread-unsafe refcount check in temp elision +* `#29653 `__: MAINT: Bump github/codeql-action from 3.29.11 to 3.30.0 +* `#29654 `__: MAINT: Add Linux Foundation Health Badge to README +* `#29655 `__: DOC: clarify numpy.asarray, numpy.asanyarray, numpy.asarray_chkfinite... +* `#29656 `__: ENH: Improve performance of numpy scalar __copy__ and __deepcopy__ +* `#29657 `__: TST: Replace xunit setup with methods +* `#29658 `__: MAINT: Optimize the logical implementation for RISC-V based on... +* `#29662 `__: BLD: Add missing include +* `#29665 `__: BUG: use correct input dtype in flatiter indexed assignment +* `#29666 `__: TST: Replace xunit setup with methods +* `#29667 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.5 to 2.0.6 +* `#29669 `__: MAINT: Bump actions/github-script from 7.0.1 to 8.0.0 +* `#29670 `__: MAINT: Bump actions/setup-python from 5.6.0 to 6.0.0 +* `#29671 `__: TST: Replace test_smoke xunit setup with methods +* `#29678 `__: BUG, BLD: Correct regex for ppc64 VSX3/VSX4 feature detection +* `#29686 `__: MAINT: Bump github/codeql-action from 3.30.0 to 3.30.1 +* `#29692 `__: TST: Replace test_deprecations setup/teardown with context manager +* `#29693 `__: TST: xfail test_kind::test_quad_precision on AIX/PPC +* `#29695 `__: TYP: fix ``np.bool`` method declarations +* `#29697 `__: BUG: Correct ambiguous logic for ``s390x`` CPU feature detection +* `#29704 `__: BLD: Add missing +* `#29706 `__: TYP: fix ``np.number`` and ``np.\*integer`` method declaration +* `#29713 `__: MAINT: update spin to 0.14 in requirements files +* `#29714 `__: TST: update test_regression::test_gph25784 +* `#29715 `__: BUG: Fix ``dtype`` refcount in ``__array__`` +* `#29716 `__: BUG: standardize 'Mean of empty slice' inconsistent message #29711 +* `#29718 `__: TST: not to include the LONGDOUBLE test on AIX +* `#29723 `__: MAINT: Bump github/codeql-action from 3.30.1 to 3.30.2 +* `#29726 `__: MAINT: Update main after 2.3.3 release. +* `#29729 `__: TST: Fix np.random thread test failures +* `#29730 `__: BLD: enable x86-simd-sort to build on KNL with -mavx512f +* `#29732 `__: DOC: update documentation on how to prepare and do a release +* `#29733 `__: TYP: fix method declarations in ``floating``\ , ``timedelta64``\... +* `#29734 `__: TYP: fix ``ndarray.strides`` decorator order +* `#29735 `__: MAINT: Bump actions/checkout from 4.2.2 to 5.0.0 +* `#29736 `__: TYP: sort out some of the ``# type: ignore`` comments in ``__init__.pyi`` +* `#29737 `__: ENH, API: New sorting slots for DType API +* `#29739 `__: TYP: Remove ``None`` from definition of ``DTypeLike`` type alias +* `#29740 `__: TST: disable overflow exception test of numpy.power on AIX +* `#29741 `__: MAINT: Bump github/codeql-action from 3.30.2 to 3.30.3 +* `#29743 `__: MAINT: delete unused variables in unary logical dispatch +* `#29744 `__: TST: Simplify and clarify StringDType testing support utilities +* `#29745 `__: BUG: Fix max_depth validation condition in PyArray_FromAny_int +* `#29749 `__: TYP: mypy 1.18.1 +* `#29750 `__: BLD: change file extension for libnpymath on win-arm64 from .a... +* `#29751 `__: ENH: implement powersort merge-policy for argsort +* `#29753 `__: DOC: Fix typo in absolute_beginners.rst +* `#29754 `__: MAINT: pin asv<0.6.5 +* `#29755 `__: DOC: Clarify description of diagonal covariance in multivariate_normal... +* `#29757 `__: DOC: add dev docs on C debuggers and compiler sanitizers +* `#29760 `__: DOC: Improve documentation for f2py and Meson usage, add ufunc... +* `#29761 `__: BUG: Stable ``ScalarType`` ordering +* `#29764 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#29767 `__: DOC: add another mention of 'same_value' +* `#29768 `__: BUG: Fix pocketfft umath strides for powerpc compatibility +* `#29773 `__: DOC: Correct a typo in Troubleshooting guidelines +* `#29774 `__: BUG: np.setbufsize should raise ValueError for negative input +* `#29775 `__: DOC: Link cross references in numpy documentation +* `#29776 `__: TYP: fix and improve ``{f,i}info`` stubs in ``_core.getlimits`` +* `#29777 `__: BLD: Upgrade spin requirement to version 0.15 +* `#29780 `__: BUG: Fix assert in nditer buffer setup +* `#29794 `__: MAINT: Bump pypa/cibuildwheel from 3.1.4 to 3.2.0 +* `#29796 `__: TST: clarify logic in float_alias_names test +* `#29802 `__: MAINT: Remove xfail and deprecation filter from a test. +* `#29803 `__: DOC: Improve the formatting of Random Generator documentation +* `#29806 `__: MAINT: Bump actions/cache from 4.2.4 to 4.3.0 +* `#29811 `__: BUG: linalg: emit a MemoryError on a malloc failure +* `#29812 `__: BLD: refactor to avoid 'unused function' warnings' +* `#29813 `__: ENH: add warning when calling ufunc with 'where' and without... +* `#29814 `__: MAINT: Bump github/codeql-action from 3.30.3 to 3.30.4 +* `#29815 `__: DOC: Update docstring for ``count_nonzero`` +* `#29816 `__: TST: Mark thread-unsafe tests +* `#29817 `__: MAINT: Bump actions/dependency-review-action from 4.7.3 to 4.8.0 +* `#29819 `__: ENH: Add fast path in ufuncs for numerical scalars. +* `#29823 `__: ENH: cleanup warning generation and unmark xfailed tests +* `#29831 `__: MAINT: Bump github/codeql-action from 3.30.4 to 3.30.5 +* `#29832 `__: MAINT: Bump int128/hide-comment-action from 1.43.0 to 1.44.0 +* `#29833 `__: TST: Pin pyparsing to avoid matplotlib errors. +* `#29836 `__: ENH, FEAT: Reorganize finfo and add new constant slot +* `#29837 `__: ENH: speedup numpy.quantile when weights are provided +* `#29841 `__: DEP: Raise ``TypeError`` on attempt to convert array with ``ndim``... +* `#29842 `__: TYP: Fix ``generic.__new__`` return type +* `#29843 `__: TYP: remove unused ``# type: ignore``\ s +* `#29844 `__: TYP: fix ``testing.assert_warns`` decorator order +* `#29845 `__: TYP: Add missing ``rtol`` kwarg to ``linalg.pinv`` +* `#29846 `__: TYP: Fix signatures of ``linalg.matmul`` and ``linalg.outer`` +* `#29847 `__: TYP: Fix incompatible defaults in ``polyfit``\ , ``histogram``\... +* `#29848 `__: MAINT, TYP: bump ``mypy`` to ``1.18.2`` +* `#29849 `__: CI: Use ``uv`` instead of ``pip`` in the mypy workflow +* `#29852 `__: DOC: Add a few missing commas in math operations +* `#29854 `__: MAINT: Bump ossf/scorecard-action from 2.4.2 to 2.4.3 +* `#29856 `__: CI: Try to fix loongarch64 CI +* `#29858 `__: TST: Make temporary file usage thread safe +* `#29861 `__: MAINT: Add Cython linter to spin +* `#29862 `__: TYP: Improved ``ndarray`` augmented assignment operators +* `#29866 `__: MAINT: Bump github/codeql-action from 3.30.5 to 3.30.6 +* `#29867 `__: TST: Fix misc thread unsafe data races +* `#29868 `__: BLD: Fix MSVC warnings and add CI check with allowlist +* `#29869 `__: DOC: Add warning and examples for sliding_window_view +* `#29872 `__: ENH: Set DLPack tensor ``shape`` and ``strides`` to NULL iff... +* `#29875 `__: PERF: Intern strings used to build global tuples. +* `#29880 `__: MAINT: Rewrite setitem to use the new API (mostly) +* `#29882 `__: DOC: Remove unused arrays from the structured dtype ufunc example. +* `#29883 `__: DOC: Add note on meson buildtype for debug builds +* `#29885 `__: MAINT: Simplify string arena growth strategy +* `#29886 `__: CI: macos-13 --> macos-15-intel +* `#29889 `__: DOC: Documentation related finfo refactors and new slot addition +* `#29891 `__: MAINT: Bump github/codeql-action from 3.30.6 to 4.30.7 +* `#29892 `__: DOC: Add Plausible analytics to the NumPy documentation +* `#29893 `__: BUG: Fix INT_MIN % -1 to return 0 for all signed integer types +* `#29895 `__: MAINT: Bump astral-sh/setup-uv from 6.8.0 to 7.0.0 +* `#29897 `__: BUG: Fixup float16 conversion error path and add tests +* `#29899 `__: BUG: Ensure backwards compatibility for patching finfo +* `#29900 `__: ENH: Add registration for sorting loops using new ufunc convenience... +* `#29901 `__: TYP: add missing ``__slots__`` +* `#29902 `__: TYP: wrong argument defaults in ``testing._private`` +* `#29903 `__: TYP: fix incorrect ``ma.sort`` arg default for ``stable`` +* `#29904 `__: MAINT: bump ``ruff`` from ``0.12.0`` to ``0.14.0`` +* `#29905 `__: TYP: Parameters with missing default value +* `#29906 `__: TST: do not use matplotlib 3.10.6 +* `#29908 `__: BUG: Fix float16-sort failures on 32-bit x86 MSVC +* `#29909 `__: DEP: finalize deprecation of numpy/fft.helpers and numpy.linalg.linalg... +* `#29912 `__: DOC: Add URL to valgrind tool in Advanced Debugging Page +* `#29914 `__: TYP: minor fixes related to ``errstate`` +* `#29915 `__: TYP: move ``matrix`` from ``__init__.pyi`` to ``matrixlib/defmatrix.pyi`` +* `#29917 `__: Fix memory leak in import_array() +* `#29919 `__: TST: use requirements/test_requirements across CI +* `#29924 `__: MAINT: Bump github/codeql-action from 4.30.7 to 4.30.8 +* `#29925 `__: MAINT: Avoid assumptions about how memory is allocated +* `#29927 `__: TST: Add unit test for RISC-V CPU features +* `#29930 `__: DOC: Completed and fixed PR #29578 +* `#29931 `__: ENH: In spec registration, allow looking up ufuncs in any module. +* `#29934 `__: CI: Use POWER10 GHA runner for NumPy test jobs +* `#29935 `__: CI: Run mypy on Python 3.14 and ignore more paths +* `#29936 `__: MAINT: Bump astral-sh/setup-uv from 7.0.0 to 7.1.0 +* `#29937 `__: MAINT: Bump pypa/cibuildwheel from 3.2.0 to 3.2.1 +* `#29938 `__: MAINT: Bump int128/hide-comment-action from 1.44.0 to 1.46.0 +* `#29939 `__: MAINT: Bump actions/dependency-review-action from 4.8.0 to 4.8.1 +* `#29942 `__: TST: Convert mixed_types_structured to method +* `#29944 `__: BUG: Fix np.strings.slice if stop=None or start and stop >= len +* `#29947 `__: BUG: support axis sequence in ``np.trim_zeros`` +* `#29948 `__: STY: rename ``@classmethod`` arg to ``cls`` +* `#29951 `__: MAINT: replace use of ``asanyarray`` with ``out=...`` to keep... +* `#29952 `__: TYP: add ``__class_getitem__`` to ``bool`` and ``datetime64`` +* `#29954 `__: TYP: fix inconsistent ``float64.__getformat__`` stub +* `#29956 `__: TYP: fix return type annotation for normalize_axis_tuple utility +* `#29957 `__: TST: Remove recwarn from tests +* `#29958 `__: TYP: Fix inconsistent ``__all__`` stubs +* `#29959 `__: TYP: stub ``numpy.ma.testutils`` +* `#29960 `__: DOC: fix formatting in ``np.percentile`` docstring +* `#29961 `__: TYP: update the ``finfo`` stubs +* `#29962 `__: MAINT: remove obsolete ``generic.tostring`` method descriptor... +* `#29963 `__: MAINT: Remove removed array methods +* `#29964 `__: TYP: add missing ``generic`` methods +* `#29965 `__: TYP: mark ``flexible`` as ``@final`` +* `#29966 `__: TYP: minor fixes in ``__pow__`` methods +* `#29967 `__: TYP: improved ``busdaycalendar`` annotations +* `#29968 `__: TYP: missing ``vectorize`` default argument +* `#29969 `__: TYP: fix stubtest errors in ``lib._function_base_impl`` +* `#29970 `__: BUG: Fix resize when it contains references +* `#29971 `__: TYP: update ``ScalarType`` type +* `#29972 `__: TYP: expand ``TypedDict`` kwargs in ``full`` to appease stubtest +* `#29973 `__: DEP: Remove deprecated ``interpolation`` parameter from quantile/percentile +* `#29976 `__: TYP: fix ``random.Generator.shuffle`` input type +* `#29978 `__: DEP: remove ``in1d`` +* `#29980 `__: DEP: remove ``ndindex.ndincr`` (deprecated since 1.20) +* `#29981 `__: TYP: change ``ndenumerate.__new__`` into ``__init__`` +* `#29982 `__: TYP: change ``nditer.__new__`` into ``__init__`` and tighten... +* `#29983 `__: TYP: minor fixes and improvements in ``record`` and ``recarray`` +* `#29984 `__: DEP: remove the ``fix_imports`` parameter from ``save()`` +* `#29985 `__: MAINT: Remove ``_core.MachAr`` remnants +* `#29986 `__: DEP: Remove ``ndarray.ctypes.get_\*`` methods (deprecated since... +* `#29988 `__: MAINT: remove remnants of ``linalg.linalg`` and ``fft.helper`` +* `#29989 `__: BUG: Fix np.strings.slice if start > stop +* `#29991 `__: TYP: some minor fixes for the constants in ``_core.multiarray`` +* `#29992 `__: DOC: update SIMD build options to cover riscv64 +* `#29993 `__: MAINT: avoid namespace pollution in ``_core._type_aliases`` +* `#29994 `__: DEP: remove the ``newshape`` parameter from ``reshape()`` +* `#29996 `__: MAINT: Update main after the NumPy 2.3.4 release. +* `#29997 `__: MAINT: remove deprecated in numpy/lib/_function_base_impl.py +* `#29998 `__: MAINT: Update write_release.py +* `#29999 `__: TYP: fix ``char.startswith`` signature +* `#30000 `__: ENH: Add ``stable`` kwarg to ``chararray.argsort`` +* `#30001 `__: TYP: fix ``ndarray.sort(stable=True)`` +* `#30002 `__: TYP: inconsistent ``strings.slice`` default argument for ``stop`` +* `#30003 `__: TYP: remove implicit re-export in ``_core._exceptions`` +* `#30004 `__: TYP: stub ``MesonTemplate.objects_substitution()`` in ``f2py._backends._meson`` +* `#30005 `__: CI, TST: Enable parallel threads testing in macOS CI job +* `#30006 `__: TYP: fix stubtest errors in ``numpy.lib.\*`` +* `#30007 `__: MAINT: Remove ``NDArrayOperatorsMixin.um`` class attribute ``umath``... +* `#30008 `__: DOC: Add concrete Meson build example for NumPy C ufunc extension +* `#30009 `__: TYP: update ``corrcoef`` signature +* `#30011 `__: TYP: ``linalg.svdvals``\ : fix inconsistent signature and add... +* `#30012 `__: MAINT: remove confusing parameter default for ``shape`` in ``reshape`` +* `#30013 `__: TYP: ``linalg.tensordot``\ : fix inconsistent signature and simplify... +* `#30014 `__: TYP: stub ``linalg.lapack_lite.LapackError`` +* `#30015 `__: MAINT: Bump github/codeql-action from 4.30.8 to 4.30.9 +* `#30018 `__: TYP: fix stubtest errors in ``numpy.ma`` +* `#30019 `__: MAINT, TST: Increase tolerance in fft test. +* `#30020 `__: DOC: Correct typos in numpy API documentation +* `#30021 `__: DEP: Remove ``delimitor`` kwarg from ``ma.mrecords.fromtextfile`` +* `#30030 `__: MAINT: Bump astral-sh/setup-uv from 7.1.0 to 7.1.1 +* `#30031 `__: TYP: fix stubtest errors in ``numpy.polynomial.\*`` +* `#30032 `__: TYP: ``testing.check_support_sve``\ : fix inconsistent parameter... +* `#30033 `__: TYP: fix stubtest error in ``numpy.typing`` +* `#30034 `__: TYP: Add type annotations for ASIMD, NEON, and RVV targets +* `#30035 `__: DEV: add a ``spin stubtest`` command +* `#30036 `__: TYP: restore abstract scalar type constructor parameters +* `#30039 `__: DEV: Set correct ``PYTHONPATH`` in ``spin stubtest`` +* `#30040 `__: DOC: Clarify signed vs unsigned ``intptr_t`` vs ``uintptr_t``... +* `#30043 `__: CI, TYP: stubtest +* `#30044 `__: MAINT: bump ``hypothesis`` to ``6.142.2`` +* `#30045 `__: DEV: separate stubtest allowlist for py312+ +* `#30049 `__: BLD: update scipy-openblas, use -Dpkg_config_path +* `#30050 `__: CI: Skip test runs if all changes are docs or stubs +* `#30051 `__: CI: Python 3.14 stable +* `#30052 `__: TYP, STY: ``polynomial``\ : reformat the stubs +* `#30053 `__: TYP: Type-checking the stubs +* `#30054 `__: BUG: allow division between object-dtype arrays and timedelta... +* `#30055 `__: TYP: Annotate ``ma.array``\ , ``ma.asarray``\ , and ``ma.asanyarray`` +* `#30057 `__: DOC: Remove nonexistent ``order`` parameter docs of ``ma.asanyarray`` +* `#30058 `__: BUG: fix int left shift UB in CPU feature detection +* `#30060 `__: TYP: ``polynomial.polyutils``\ : fix callable type signatures +* `#30061 `__: CI, TYP: Fix stubtest CI failures on py311 +* `#30064 `__: TST: Remove unnecessary test__datasource thread-unsafe markers +* `#30065 `__: TYP: ``polynomial``\ : Simplify ``chebpts{1,2}`` function stubs +* `#30067 `__: TYP: ``numpy.ma``\ : Annotate 27 functions related to masks and... +* `#30068 `__: MAINT: remove deprecated ``style`` argument and deprecations... +* `#30071 `__: BUG: avoid negating INT_MIN in PyArray_Round implementation +* `#30073 `__: DOC: Correct a typo: an 1d -> a 1d +* `#30074 `__: DOC: Fix a couple typos in generalized-ufuncs.rst. +* `#30077 `__: BUG: prefer passing a pointer to the helper function to avoid... +* `#30080 `__: MAINT: Bump actions/upload-artifact from 4.6.2 to 5.0.0 +* `#30081 `__: MAINT: Bump github/codeql-action from 4.30.9 to 4.31.0 +* `#30082 `__: MAINT: Bump astral-sh/setup-uv from 7.1.1 to 7.1.2 +* `#30083 `__: DOC: Fix the first small 'process_core_dims()' example. +* `#30084 `__: TYP: ``numpy.ma``\ : Annotate the callable wrapper classes +* `#30091 `__: BUG, TYP: Fix ``ma.core._frommethod`` function signatures +* `#30093 `__: DOC: Correct grammatical usage like a/an +* `#30097 `__: CI: Update ARM job (armhf_test) to use Ubuntu 24.04 +* `#30099 `__: BUG, TYP: Fix ``ma.core._convert2ma`` function signatures +* `#30100 `__: BLD: use blobless checkout on CircleCI +* `#30101 `__: TST: Add thread-safe testing guidelines +* `#30102 `__: ENH: Make FPE blas check a runtime check for all apple arm systems +* `#30104 `__: BUG, TYP: ufunc method signatures +* `#30106 `__: MAINT: Bump github/codeql-action from 4.31.0 to 4.31.2 +* `#30108 `__: TYP: shape-type-aware ``swapaxes`` +* `#30111 `__: DOC: Add a plot to the 'unwrap' docstring. +* `#30114 `__: BUG, TYP: ``ndarray`` method runtime signatures +* `#30118 `__: CI: disable flaky ubuntu UBsan CI job +* `#30121 `__: BUG, TYP: scalar-type constructor runtime signatures +* `#30124 `__: BUG, TYP: ``flatiter`` method runtime signatures, and better... +* `#30125 `__: BUG: Fix handling by ``unique`` of signed zero in complex types. +* `#30126 `__: BUG: ``nditer`` runtime signatures +* `#30127 `__: DOC: remove outdated notes on how to build against numpy in conda-forge +* `#30128 `__: BUG: Avoid compilation error of wrapper file generated with SWIG... +* `#30132 `__: BLD: use scipy-openblas 0.3.30.7 +* `#30137 `__: BUG: ``broadcast`` runtime signatures +* `#30138 `__: BUG: array construction function runtime signatures +* `#30139 `__: MAINT,BUG: make later arguments in array2string keyword only. +* `#30140 `__: BUG, DOC, TYP: ``empty`` and ``zeros`` runtime signatures, and... +* `#30141 `__: MAINT: fix math markup (\times -> \\times) in numpy.linalg.multidot... +* `#30142 `__: MAINT: Migrate einsum.c.src to C++ (einsum.cpp) +* `#30143 `__: BUG, TYP: ``_core.multiarray.\*`` function runtime signatures +* `#30147 `__: BUG, TYP: add the remaining ``_core.multiarray`` function runtime... +* `#30148 `__: DOC: Fix Returns section formatting in linalg.qr and linalg.svd +* `#30149 `__: MAINT: Not show signature in git_version +* `#30153 `__: BUG: decref on error in PyArray_NewFromDescr (#30152) +* `#30154 `__: BUG: update requires to requirements in numpy.multiarray see... +* `#30155 `__: BUG, DOC: ``ndarray`` dunder method runtime signatures and missing... +* `#30160 `__: TYP: fix an invalid default value for ``array``\ 's ``ndmax``... +* `#30161 `__: ENH: Run SWIG unit tests in CI action +* `#30163 `__: ENH: Add ``order`` parameter to ``np.ma.asanyarray`` +* `#30164 `__: BUG: ``numpy.random.\*`` class runtime signatures +* `#30165 `__: MAINT: some ``numpy.polynomial.\*`` namespace pollution cleanup +* `#30166 `__: CI: add check for numpy-release version of scipy-openblas +* `#30168 `__: TYP, DEP: ``numpy.fix`` pending deprecation +* `#30169 `__: BUG: ``np.dtype`` and ``np.dtypes.\*`` runtime signatures +* `#30170 `__: ENH: Reduce compute time for ``tobytes`` in non-contiguos paths +* `#30175 `__: ENH: Updates for the ``spin bench`` command. +* `#30176 `__: BUG: Fix check of PyMem_Calloc return value. +* `#30179 `__: MAINT,API: Introduce __numpy_dtype__ and fix dtype attribute... +* `#30183 `__: DOC: Corrected grammatical issues in code comments +* `#30190 `__: MAINT: ``ma.asanyarray``\ : use ``order=None`` as default +* `#30191 `__: MAINT: Bump int128/hide-comment-action from 1.46.0 to 1.47.0 +* `#30193 `__: BUG, DOC: ``np.generic`` missing method runtime signatures and... +* `#30196 `__: DOC: Fix some broken refs and Typos. +* `#30197 `__: ENH,MAINT: rewrite np.fix to use np.trunc internally +* `#30199 `__: DOC: update result_type docs to link to promotion rules +* `#30201 `__: ENH: Detect Fortran vs C order in array_assign_boolean_subscript +* `#30202 `__: MAINT: Bump actions/dependency-review-action from 4.8.1 to 4.8.2 +* `#30203 `__: MAINT: Bump astral-sh/setup-uv from 7.1.2 to 7.1.3 +* `#30206 `__: DOC: an Mercurial -> a Mercurial +* `#30208 `__: DOC: Release notes for the runtime signature changes +* `#30209 `__: MAINT: Bump pypa/cibuildwheel from 3.2.1 to 3.3.0 +* `#30211 `__: ENH: ``ufunc.__signature__`` +* `#30213 `__: DOC: fix links for newly rebuilt numpy-tutorials site +* `#30214 `__: BUG: Fix build on s390x with clang +* `#30219 `__: MAINT: Bump github/codeql-action from 4.31.2 to 4.31.3 +* `#30221 `__: TYP: Annotate remaining ``ma.MaskedArray`` methods +* `#30222 `__: CI: remove (mainly windows) jobs from Azure pipelines +* `#30223 `__: STY: fix ``ma.MaskedAArray.tolist`` docstring indentation +* `#30224 `__: TYP: ``ravel``\ : less awkward return types +* `#30226 `__: TYP: stub ``ma.core.get_masked_subclass`` +* `#30227 `__: CI: fixes https://github.com/numpy/numpy/security/code-scanning/364 +* `#30228 `__: BUG: fix data race in ``wrapping_auxdata_freelist`` by making... +* `#30229 `__: ENH, TYP: transparent ``ma.extras._fromnxfunction`` runtime signatures +* `#30231 `__: TYP: Shape-typing in ``lib._twodim_base_impl`` +* `#30232 `__: CI: removes azure pipelines +* `#30233 `__: TYP: ``_core.numeric``\ : shape-typing and fixed overlapping... +* `#30234 `__: BUG: fix data race in ``PyArray_DescrHash`` +* `#30235 `__: MAINT: undo change to ``fromstring`` text signature for 2.4.0 +* `#30239 `__: DOC: Correct an equation error in ``numpy.random.Generator.pareto`` +* `#30242 `__: BUG: fix einsum ``optimize=True`` parsing error +* `#30243 `__: BUG: Add missing ``PyErr_Occurred()`` check to fast-path +* `#30246 `__: TYP: ``lib._function_base_impl``\ : many typing improvements +* `#30247 `__: DOC: Update wording in numpy.coremath +* `#30248 `__: DOC: remove mention of 'skip azp' since we no longer use azure +* `#30252 `__: MAINT: Bump actions/checkout from 5.0.0 to 5.0.1 +* `#30253 `__: MAINT: Bump github/codeql-action from 4.31.3 to 4.31.4 +* `#30255 `__: BUG: always ignore FPE when Accelerate is the BLAS backend +* `#30256 `__: CI: update ``paths-ignore`` for mypy and wheels workflows +* `#30259 `__: TST: mark tests which call ``gc.collect()`` as thread unsafe +* `#30261 `__: TYP: fix shape-type of structured array fields +* `#30263 `__: TST: scalar fast path multithreaded test +* `#30266 `__: ENH: New-style sorting for StringDType +* `#30270 `__: ENH: Use descriptor rather than custom ``tp_getattro`` +* `#30271 `__: TST: Join threads in ``test_printoptions_thread_safety`` +* `#30273 `__: MAINT: Bump actions/checkout from 5.0.1 to 6.0.0 +* `#30276 `__: MAINT: Bump astral-sh/setup-uv from 7.1.3 to 7.1.4 +* `#30277 `__: BUG: Fix misleading ValueError in convolve on empty inputs due... +* `#30278 `__: BUG: fix np.resize refchk on python 3.14 +* `#30279 `__: MAINT: refactor unary temporary elision check +* `#30282 `__: DEP, TYP: ``ndarray.shape`` setter pending deprecation +* `#30284 `__: DEP: deprecate ``numpy.lib.user_array.container`` +* `#30286 `__: MAINT: add ``matmul`` to ``_core.umath.__all__`` +* `#30288 `__: MAINT: Bump github/codeql-action from 4.31.4 to 4.31.5 +* `#30289 `__: TYP: ``_core.overrides.set_module`` implicit re-export +* `#30290 `__: TYP: move the ``normalize_axis_\*`` function definitions from... +* `#30291 `__: TYP: ``lib._function_base_impl._quantile_ureduce_func`` inline... +* `#30293 `__: TYP: move ``vectorize`` stubs to ``lib._function_base_impl`` +* `#30294 `__: TYP: ``_core.\*``\ : stubs for some private functions and constants +* `#30295 `__: MAINT: remove ``lib._shape_base_impl._replace_zero_by_x_arrays`` +* `#30296 `__: TYP: ``lib.\*``\ : stubs for some private functions used by ``_function_base_imp``... +* `#30297 `__: MAINT: ``broadcast_shapes``\ : update presumed ``NPY_MAXARGS``... +* `#30300 `__: BUG: Fix RecursionError and raise ValueError for unmatched parentheses +* `#30303 `__: MAINT: Bump actions/setup-python from 6.0.0 to 6.1.0 +* `#30310 `__: MAINT: avoid unused variable warnings in dtype tests +* `#30312 `__: MAINT: Implement some RAII classes and use them in stringdtype/casts.cpp +* `#30313 `__: DOC: record a data -> record a data point +* `#30314 `__: BUG: Fix descriptor changes related build/parse value issues... +* `#30318 `__: DOC: Fix duplicate ``import pytest`` in testing documentation... +* `#30321 `__: TYP: ``__numpy_dtype__`` +* `#30324 `__: TYP: ``ndenumerate`` generic type parameter default +* `#30325 `__: DOC, TYP: Expand the 2.3 ``numpy.typing`` deprecation docs +* `#30326 `__: TYP: ``ma.mrecords.MaskedRecords`` generic type parameter defaults +* `#30327 `__: TYP: ``_core._umath_tests`` module stubs +* `#30347 `__: REL: Prepare for the NumPy 2.4.0rc1 release +* `#30377 `__: MAINT: don't assert RecursionError in monster dtype test (#30375) +* `#30378 `__: CI: bump FreeBSD from 14.2 to 14.3 +* `#30398 `__: MAINT: Use RAII objects in unique.cpp to ensure safe resource... +* `#30399 `__: BUG: raise BufferError when creating dlpack with wrong device... +* `#30400 `__: BUG: fix free-threaded races in RandomState +* `#30401 `__: BUG: fix reduction issue in weighted quantile (#30070) +* `#30403 `__: SIMD, BLD: Fix Highway target attribute build failure on ppc64... +* `#30408 `__: BUG: Add missing return status check of NpyIter_EnableExternalLoop()... +* `#30419 `__: DOC: Improve cross-links in thread safety documentation (#30373) +* `#30420 `__: BUG: fix double evaluation in PyArrayScalar_RETURN_BOOL_FROM_LONG... +* `#30432 `__: BUG: fix remaining data races in mtrand.pyx (#30426) +* `#30459 `__: TYP: restore ``generic.__hash__`` (#30456) diff --git a/doc/changelog/2.4.1-changelog.rst b/doc/changelog/2.4.1-changelog.rst new file mode 100644 index 000000000000..3cf0d8ad0ec5 --- /dev/null +++ b/doc/changelog/2.4.1-changelog.rst @@ -0,0 +1,37 @@ + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Alexander Shadchin +* Bill Tompkins + +* Charles Harris +* Joren Hammudoglu +* Marten van Kerkwijk +* Nathan Goldbaum +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg + +Pull requests merged +==================== + +A total of 15 pull requests were merged for this release. + +* `#30490 `__: MAINT: Prepare 2.4.x for further development +* `#30503 `__: DOC: ``numpy.select``\ : fix ``default`` parameter docstring... +* `#30504 `__: REV: Revert part of #30164 (#30500) +* `#30506 `__: TYP: ``numpy.select``\ : allow passing array-like ``default``... +* `#30507 `__: MNT: use if constexpr for compile-time branch selection +* `#30513 `__: BUG: Fix leak in flat assignment iterator +* `#30516 `__: BUG: fix heap overflow in fixed-width string multiply (#30511) +* `#30523 `__: BUG: Ensure summed weights returned by np.average always are... +* `#30527 `__: TYP: Fix return type of histogram2d +* `#30594 `__: MAINT: avoid passing ints to random functions that take double... +* `#30595 `__: BLD: Avoiding conflict with pygit2 for static build +* `#30596 `__: MAINT: Fix msvccompiler missing error on FreeBSD +* `#30608 `__: BLD: update vendored Meson to 1.9.2 +* `#30620 `__: ENH: use more fine-grained critical sections in array coercion... +* `#30623 `__: BUG: Undo result type change of quantile/percentile but keep... diff --git a/doc/changelog/2.4.2-changelog.rst b/doc/changelog/2.4.2-changelog.rst new file mode 100644 index 000000000000..06d50fa5e8f0 --- /dev/null +++ b/doc/changelog/2.4.2-changelog.rst @@ -0,0 +1,35 @@ + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Daniel Tang + +* Joren Hammudoglu +* Kumar Aditya +* Matti Picus +* Nathan Goldbaum +* Ralf Gommers +* Sebastian Berg +* Vikram Kumar + + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#30629 `__: MAINT: Prepare 2.4.x for further development +* `#30636 `__: TYP: ``arange``\ : accept datetime strings +* `#30657 `__: MAINT: avoid possible race condition by not touching ``os.environ``... +* `#30700 `__: BUG: validate contraction axes in tensordot (#30521) +* `#30701 `__: DOC: __array_namespace__info__: set_module not __module__ (#30679) +* `#30702 `__: BUG: fix free-threaded PyObject layout in replace_scalar_type_names... +* `#30703 `__: TST: fix limited API example in tests for latest Cython +* `#30709 `__: BUG: Fix some bugs found via valgrind (#30680) +* `#30712 `__: MAINT: replace ob_type access with Py_TYPE in PyArray_CheckExact +* `#30713 `__: BUG: Fixup the quantile promotion fixup +* `#30736 `__: BUG: fix thread safety of ``array_getbuffer`` (#30667) +* `#30737 `__: backport scipy-openblas version change + diff --git a/doc/conftest.py b/doc/conftest.py index 5e00b1e127fe..99d6797d8a06 100644 --- a/doc/conftest.py +++ b/doc/conftest.py @@ -1,10 +1,12 @@ """ Pytest configuration and fixtures for the Numpy test suite. """ +import doctest + +import matplotlib import pytest + import numpy -import matplotlib -import doctest matplotlib.use('agg', force=True) @@ -29,4 +31,3 @@ def check_output(self, want, got, optionflags): def add_np(doctest_namespace): numpy.random.seed(1) doctest_namespace['np'] = numpy - diff --git a/doc/neps/conf.py b/doc/neps/conf.py index 6cf97ddfe59f..056002135dbd 100644 --- a/doc/neps/conf.py +++ b/doc/neps/conf.py @@ -15,7 +15,8 @@ # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # -import os +from datetime import datetime + # import sys # sys.path.insert(0, os.path.abspath('.')) @@ -38,17 +39,18 @@ templates_path = ['../source/_templates/'] # The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: +# You can specify multiple suffix as a dict mapping suffixes to parsers: # -# source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +# source_suffix = {'.rst': 'restructuredtext', '.md': 'markdown'} +source_suffix = {'.rst': 'restructuredtext'} # The master toctree document. master_doc = 'content' # General information about the project. project = 'NumPy Enhancement Proposals' -copyright = '2017-2018, NumPy Developers' +year = datetime.now().year +copyright = f'2017-{year}, NumPy Developers' author = 'NumPy Developers' title = 'NumPy Enhancement Proposals Documentation' @@ -85,21 +87,18 @@ html_theme = 'pydata_sphinx_theme' -html_logo = '../source/_static/numpylogo.svg' - html_favicon = '../source/_static/favicon/favicon.ico' html_theme_options = { + "logo": { + "image_light": "_static/numpylogo.svg", + "image_dark": "_static/numpylogo_dark.svg", + }, "github_url": "https://github.com/numpy/numpy", - "external_links": [ - {"name": "Wishlist", - "url": "https://github.com/numpy/numpy/issues?q=is%3Aopen+is%3Aissue+label%3A%2223+-+Wish+List%22", - }, - ], "show_prev_next": False, } -html_title = "%s" % (project) +html_title = f"{project}" html_static_path = ['../source/_static'] html_last_updated_fmt = '%b %d, %Y' @@ -116,7 +115,6 @@ plot_html_show_source_link = False - # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. diff --git a/doc/neps/content.rst b/doc/neps/content.rst index a188deae2ab2..ddc445e462fd 100644 --- a/doc/neps/content.rst +++ b/doc/neps/content.rst @@ -16,10 +16,7 @@ Roadmap Index The Scope of NumPy Current roadmap - Wishlist (opens new window) |wishlist_link| + Wishlist -.. |wishlist_link| raw:: html - - WishList diff --git a/doc/neps/index.rst b/doc/neps/index.rst index 609eeef61b2c..1891641cbafd 100644 --- a/doc/neps/index.rst +++ b/doc/neps/index.rst @@ -5,8 +5,8 @@ Roadmap & NumPy enhancement proposals This page provides an overview of development priorities for NumPy. Specifically, it contains a roadmap with a higher-level overview, as well as NumPy Enhancement Proposals (NEPs)—suggested changes -to the library—in various stages of discussion or completion (see `NEP -0 `__). +to the library—in various stages of discussion or completion. +See :doc:`nep-0000` for more informations about NEPs. Roadmap ------- @@ -15,7 +15,6 @@ Roadmap The Scope of NumPy Current roadmap - Wish list NumPy enhancement proposals (NEPs) ---------------------------------- diff --git a/doc/neps/nep-0013-ufunc-overrides.rst b/doc/neps/nep-0013-ufunc-overrides.rst index d69af6924940..eea7bfb91949 100644 --- a/doc/neps/nep-0013-ufunc-overrides.rst +++ b/doc/neps/nep-0013-ufunc-overrides.rst @@ -526,7 +526,7 @@ multiplication:: def __init__(self, value): self.value = value def __repr__(self): - return "MyObject({!r})".format(self.value) + return f"MyObject({self.value!r})" def __mul__(self, other): return MyObject(1234) def __rmul__(self, other): diff --git a/doc/neps/nep-0016-abstract-array.rst b/doc/neps/nep-0016-abstract-array.rst index a7a43b29bb99..9ef148b358d9 100644 --- a/doc/neps/nep-0016-abstract-array.rst +++ b/doc/neps/nep-0016-abstract-array.rst @@ -13,8 +13,7 @@ NEP 16 — An abstract base class for identifying "duck arrays" .. note:: This NEP has been withdrawn in favor of the protocol based approach - described in - `NEP 22 `__ + described in :doc:`nep-0022-ndarray-duck-typing-overview` Abstract -------- diff --git a/doc/neps/nep-0016-benchmark.py b/doc/neps/nep-0016-benchmark.py index ec8e44726876..e3783baa2de5 100644 --- a/doc/neps/nep-0016-benchmark.py +++ b/doc/neps/nep-0016-benchmark.py @@ -1,14 +1,17 @@ -import perf import abc + +import perf + import numpy as np + class NotArray: pass class AttrArray: __array_implementer__ = True -class ArrayBase(abc.ABC): +class ArrayBase(abc.ABC): # noqa: B024 pass class ABCArray1(ArrayBase): @@ -17,6 +20,7 @@ class ABCArray1(ArrayBase): class ABCArray2: pass + ArrayBase.register(ABCArray2) not_array = NotArray() @@ -33,6 +37,7 @@ class ABCArray2: def t(name, statement): runner.timeit(name, statement, globals=globals()) + t("np.asarray([])", "np.asarray([])") arrobj = np.array([]) t("np.asarray(arrobj)", "np.asarray(arrobj)") @@ -45,4 +50,3 @@ def t(name, statement): t("ABC, False", "isinstance(not_array, ArrayBase)") t("ABC, True, via inheritance", "isinstance(abc_array_1, ArrayBase)") t("ABC, True, via register", "isinstance(abc_array_2, ArrayBase)") - diff --git a/doc/neps/nep-0018-array-function-protocol.rst b/doc/neps/nep-0018-array-function-protocol.rst index a1682435272f..8eec748e3be1 100644 --- a/doc/neps/nep-0018-array-function-protocol.rst +++ b/doc/neps/nep-0018-array-function-protocol.rst @@ -141,7 +141,7 @@ The type of ``types`` is intentionally vague: instead for performance reasons. In any case, ``__array_function__`` implementations should not rely on the iteration order of ``types``, which would violate a well-defined "Type casting hierarchy" (as described in -`NEP-13 `_). +:ref:`NEP-13 `). Example for a project implementing the NumPy API ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -300,7 +300,7 @@ are valid then which has precedence? For the most part, the rules for dispatch with ``__array_function__`` match those for ``__array_ufunc__`` (see -`NEP-13 `_). +:ref:`NEP-13 `). In particular: - NumPy will gather implementations of ``__array_function__`` from all @@ -819,7 +819,7 @@ don't think it makes sense to do so now, because code generation involves tradeoffs and NumPy's experience with type annotations is still `quite limited `_. Even if NumPy was Python 3 only (which will happen -`sometime in 2019 `_), +:ref:`sometime in 2019 `), we aren't ready to annotate NumPy's codebase directly yet. Support for implementation-specific arguments diff --git a/doc/neps/nep-0021-advanced-indexing.rst b/doc/neps/nep-0021-advanced-indexing.rst index 849ed874c21b..11ef238d6179 100644 --- a/doc/neps/nep-0021-advanced-indexing.rst +++ b/doc/neps/nep-0021-advanced-indexing.rst @@ -123,7 +123,7 @@ with shape ``(1,)``, not a 2D sub-matrix with shape ``(1, 1)``. Mixed indexing seems so tricky that it is tempting to say that it never should be used. However, it is not easy to avoid, because NumPy implicitly adds full slices if there are fewer indices than the full dimensionality of the indexed -array. This means that indexing a 2D array like `x[[0, 1]]`` is equivalent to +array. This means that indexing a 2D array like ``x[[0, 1]]`` is equivalent to ``x[[0, 1], :]``. These cases are not surprising, but they constrain the behavior of mixed indexing. @@ -219,7 +219,7 @@ be deduced: no transposing should be done. The axes created by the integer array indices are always inserted at the front, even for a single index. -4. Boolean indexing is conceptionally outer indexing. Broadcasting +4. Boolean indexing is conceptually outer indexing. Broadcasting together with other advanced indices in the manner of legacy indexing is generally not helpful or well defined. A user who wishes the "``nonzero``" plus broadcast behaviour can thus @@ -236,7 +236,7 @@ be deduced: For the beginning, this probably means cases where ``arr[ind]`` and ``arr.oindex[ind]`` return different results give deprecation warnings. This includes every use of vectorized indexing with multiple integer arrays. - Due to the transposing behaviour, this means that``arr[0, :, index_arr]`` + Due to the transposing behaviour, this means that ``arr[0, :, index_arr]`` will be deprecated, but ``arr[:, 0, index_arr]`` will not for the time being. 7. To ensure that existing subclasses of `ndarray` that override indexing @@ -285,7 +285,7 @@ Open Questions Copying always "fixes" this possible inconsistency. * The final state to morph plain indexing in is not fixed in this PEP. - It is for example possible that `arr[index]`` will be equivalent to + It is for example possible that ``arr[index]`` will be equivalent to ``arr.oindex`` at some point in the future. Since such a change will take years, it seems unnecessary to make specific decisions at this time. @@ -649,7 +649,7 @@ eventualities. Copyright --------- -This document is placed under the CC0 1.0 Universell (CC0 1.0) Public Domain Dedication [1]_. +This document is placed under the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication [1]_. References and footnotes @@ -659,5 +659,4 @@ References and footnotes with this work has waived all copyright and related or neighboring rights to this work. The CC0 license may be found at https://creativecommons.org/publicdomain/zero/1.0/ -.. [2] e.g., see NEP 18, - http://www.numpy.org/neps/nep-0018-array-function-protocol.html +.. [2] e.g., see :doc:`nep-0018-array-function-protocol` diff --git a/doc/neps/nep-0022-ndarray-duck-typing-overview.rst b/doc/neps/nep-0022-ndarray-duck-typing-overview.rst index bed956ce735e..f4efd130387f 100644 --- a/doc/neps/nep-0022-ndarray-duck-typing-overview.rst +++ b/doc/neps/nep-0022-ndarray-duck-typing-overview.rst @@ -256,8 +256,7 @@ It’s tempting to try to define cleaned up versions of ndarray methods with a more minimal interface to allow for easier implementation. For example, ``__array_reshape__`` could drop some of the strange arguments accepted by ``reshape`` and ``__array_basic_getitem__`` -could drop all the `strange edge cases -`__ of +could drop all the :doc:`strange edge cases ` of NumPy’s advanced indexing. But as discussed above, we don’t really know what APIs we need for diff --git a/doc/neps/nep-0023-backwards-compatibility.rst b/doc/neps/nep-0023-backwards-compatibility.rst index 660c626e9278..a3879f550e3c 100644 --- a/doc/neps/nep-0023-backwards-compatibility.rst +++ b/doc/neps/nep-0023-backwards-compatibility.rst @@ -224,8 +224,7 @@ Functionality with more strict deprecation policies ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - ``numpy.random`` has its own backwards compatibility policy with additional - requirements on top of the ones in this NEP, see - `NEP 19 `_. + requirements on top of the ones in this NEP, see :doc:`nep-0019-rng-policy`. - The file format of ``.npy`` and ``.npz`` files is strictly versioned independent of the NumPy version; existing format versions must remain backwards compatible even if a newer format version is introduced. diff --git a/doc/neps/nep-0029-deprecation_policy.rst b/doc/neps/nep-0029-deprecation_policy.rst index b725711c58a3..8b7c300edb24 100644 --- a/doc/neps/nep-0029-deprecation_policy.rst +++ b/doc/neps/nep-0029-deprecation_policy.rst @@ -133,7 +133,14 @@ Apr 05, 2024 3.10+ 1.23+ Jun 22, 2024 3.10+ 1.24+ Dec 18, 2024 3.10+ 1.25+ Apr 04, 2025 3.11+ 1.25+ -Apr 24, 2026 3.12+ 1.25+ +Jun 17, 2025 3.11+ 1.26+ +Sep 16, 2025 3.11+ 2.0+ +Apr 24, 2026 3.12+ 2.0+ +Jun 16, 2026 3.12+ 2.1+ +Aug 19, 2026 3.12+ 2.2+ +Dec 09, 2026 3.12+ 2.3+ +Apr 02, 2027 3.13+ 2.3+ +Apr 07, 2028 3.14+ 2.3+ ============ ====== ===== @@ -151,7 +158,7 @@ Drop Schedule On Dec 22, 2021 drop support for NumPy 1.18 (initially released on Dec 22, 2019) On Dec 26, 2021 drop support for Python 3.7 (initially released on Jun 27, 2018) On Jun 21, 2022 drop support for NumPy 1.19 (initially released on Jun 20, 2020) - On Jan 31, 2023 drop support for NumPy 1.20 (initially released on Jan 31, 2021) + On Jan 31, 2023 drop support for NumPy 1.20 (initially released on Jan 30, 2021) On Apr 14, 2023 drop support for Python 3.8 (initially released on Oct 14, 2019) On Jun 23, 2023 drop support for NumPy 1.21 (initially released on Jun 22, 2021) On Jan 01, 2024 drop support for NumPy 1.22 (initially released on Dec 31, 2021) @@ -159,7 +166,14 @@ Drop Schedule On Jun 22, 2024 drop support for NumPy 1.23 (initially released on Jun 22, 2022) On Dec 18, 2024 drop support for NumPy 1.24 (initially released on Dec 18, 2022) On Apr 04, 2025 drop support for Python 3.10 (initially released on Oct 04, 2021) + On Jun 17, 2025 drop support for NumPy 1.25 (initially released on Jun 17, 2023) + On Sep 16, 2025 drop support for NumPy 1.26 (initially released on Sep 16, 2023) On Apr 24, 2026 drop support for Python 3.11 (initially released on Oct 24, 2022) + On Jun 16, 2026 drop support for NumPy 2.0 (initially released on Jun 15, 2024) + On Aug 19, 2026 drop support for NumPy 2.1 (initially released on Aug 18, 2024) + On Dec 09, 2026 drop support for NumPy 2.2 (initially released on Dec 08, 2024) + On Apr 02, 2027 drop support for Python 3.12 (initially released on Oct 02, 2023) + On Apr 07, 2028 drop support for Python 3.13 (initially released on Oct 07, 2024) Implementation @@ -296,6 +310,13 @@ Code to generate support and drop schedule tables :: Jun 22, 2022: NumPy 1.23 Oct 24, 2022: Python 3.11 Dec 18, 2022: NumPy 1.24 + Jun 17, 2023: NumPy 1.25 + Sep 16, 2023: NumPy 1.26 + Oct 2, 2023: Python 3.12 + Jun 15, 2024: NumPy 2.0 + Aug 18, 2024: NumPy 2.1 + Oct 7, 2024: Python 3.13 + Dec 8, 2024: NumPy 2.2 """ releases = [] diff --git a/doc/neps/nep-0030-duck-array-protocol.rst b/doc/neps/nep-0030-duck-array-protocol.rst index bb58eaf4fa24..4a3d268697a2 100644 --- a/doc/neps/nep-0030-duck-array-protocol.rst +++ b/doc/neps/nep-0030-duck-array-protocol.rst @@ -102,14 +102,14 @@ a complete implementation would look like the following: The implementation above exemplifies the simplest case, but the overall idea is that libraries will implement a ``__duckarray__`` method that returns the original object, and an ``__array__`` method that either creates and returns an -appropriate NumPy array, or raises a``TypeError`` to prevent unintentional use +appropriate NumPy array, or raises a ``TypeError`` to prevent unintentional use as an object in a NumPy array (if ``np.asarray`` is called on an arbitrary object that does not implement ``__array__``, it will create a NumPy array scalar). In case of existing libraries that don't already implement ``__array__`` but would like to use duck array typing, it is advised that they introduce -both ``__array__`` and``__duckarray__`` methods. +both ``__array__`` and ``__duckarray__`` methods. Usage ----- @@ -176,7 +176,7 @@ Previous proposals and discussion --------------------------------- The duck typing protocol proposed here was described in a high level in -`NEP 22 `_. +:ref:`NEP 22 `. Additionally, longer discussions about the protocol and related proposals took place in diff --git a/doc/neps/nep-0031-uarray.rst b/doc/neps/nep-0031-uarray.rst index cf06d1109c11..cb906248fde6 100644 --- a/doc/neps/nep-0031-uarray.rst +++ b/doc/neps/nep-0031-uarray.rst @@ -319,7 +319,7 @@ It has been formally realized (at least in part) that a backend system is needed for this, in the `NumPy roadmap `_. For ``numpy.random``, it's still necessary to make the C-API fit the one -proposed in `NEP-19 `_. +proposed in :ref:`NEP-19 `. This is impossible for `mkl-random`, because then it would need to be rewritten to fit that framework. The guarantees on stream compatibility will be the same as before, but if there's a backend that affects @@ -620,8 +620,8 @@ Discussion ---------- * ``uarray`` blogpost: https://labs.quansight.org/blog/2019/07/uarray-update-api-changes-overhead-and-comparison-to-__array_function__/ -* The discussion section of NEP-18: https://numpy.org/neps/nep-0018-array-function-protocol.html#discussion -* NEP-22: https://numpy.org/neps/nep-0022-ndarray-duck-typing-overview.html +* The discussion section of :ref:`NEP18` +* :ref:`NEP22` * Dask issue #4462: https://github.com/dask/dask/issues/4462 * PR #13046: https://github.com/numpy/numpy/pull/13046 * Dask issue #4883: https://github.com/dask/dask/issues/4883 @@ -636,11 +636,11 @@ References and footnotes .. [1] uarray, A general dispatch mechanism for Python: https://uarray.readthedocs.io -.. [2] NEP 18 — A dispatch mechanism for NumPy’s high level array functions: https://numpy.org/neps/nep-0018-array-function-protocol.html +.. [2] :ref:`NEP18` -.. [3] NEP 22 — Duck typing for NumPy arrays – high level overview: https://numpy.org/neps/nep-0022-ndarray-duck-typing-overview.html +.. [3] :ref:`NEP22` -.. [4] NEP 13 — A Mechanism for Overriding Ufuncs: https://numpy.org/neps/nep-0013-ufunc-overrides.html +.. [4] :ref:`NEP13` .. [5] Reply to Adding to the non-dispatched implementation of NumPy methods: https://mail.python.org/archives/list/numpy-discussion@python.org/thread/5GUDMALWDIRHITG5YUOCV343J66QSX3U/#5GUDMALWDIRHITG5YUOCV343J66QSX3U @@ -650,7 +650,7 @@ References and footnotes .. [8] unumpy: NumPy, but implementation-independent: https://unumpy.readthedocs.io -.. [9] NEP 30 — Duck Typing for NumPy Arrays - Implementation: https://www.numpy.org/neps/nep-0030-duck-array-protocol.html +.. [9] :ref:`NEP30` .. [10] http://scipy.github.io/devdocs/fft.html#backend-control diff --git a/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst b/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst index 63593277dd9a..09a376298245 100644 --- a/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst +++ b/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst @@ -430,17 +430,17 @@ Discussion References ---------- -.. [1] `NEP 18 - A dispatch mechanism for NumPy's high level array functions `_. +.. [1] :ref:`NEP18`. .. [2] `PEP 3102 — Keyword-Only Arguments `_. -.. [3] `NEP 30 — Duck Typing for NumPy Arrays - Implementation `_. +.. [3] :ref:`NEP30`. -.. [4] `NEP 31 — Context-local and global overrides of the NumPy API `_. +.. [4] :ref:`NEP31`. .. [5] `Array creation routines `_. -.. [6] `NEP 37 — A dispatch protocol for NumPy-like modules `_. +.. [6] :ref:`NEP37`. .. [7] `Implementation's pull request on GitHub `_ diff --git a/doc/neps/nep-0036-fair-play.rst b/doc/neps/nep-0036-fair-play.rst index 5d55c8aa25d5..022bf9435513 100644 --- a/doc/neps/nep-0036-fair-play.rst +++ b/doc/neps/nep-0036-fair-play.rst @@ -121,10 +121,8 @@ Fair play rules 4. *DO* use official mechanism to engage with the API. - Protocols such as `__array_ufunc__ - `__ and - `__array_function__ - `__ + Protocols such as :ref:`__array_ufunc__ ` and + :ref:`__array_function__ ` were designed to help external packages interact more easily with NumPy. E.g., the latter allows objects from foreign libraries to pass through NumPy. We actively encourage using any of diff --git a/doc/neps/nep-0037-array-module.rst b/doc/neps/nep-0037-array-module.rst index 653141661421..7777cc73c2a6 100644 --- a/doc/neps/nep-0037-array-module.rst +++ b/doc/neps/nep-0037-array-module.rst @@ -29,7 +29,7 @@ expect will make it easier to adopt. Why ``__array_function__`` hasn't been enough --------------------------------------------- -There are two broad ways in which NEP-18 has fallen short of its goals: +There are two broad ways in which :ref:`NEP-18 ` has fallen short of its goals: 1. **Backwards compatibility concerns**. `__array_function__` has significant implications for libraries that use it: @@ -64,7 +64,7 @@ There are two broad ways in which NEP-18 has fallen short of its goals: - **Array creation** routines (e.g., ``np.arange`` and those in ``np.random``) need some other mechanism for indicating what type of - arrays to create. `NEP 35 `_ + arrays to create. :ref:`NEP 35 ` proposed adding optional ``like=`` arguments to functions without existing array arguments. However, we still lack any mechanism to override methods on objects, such as those needed by @@ -72,8 +72,7 @@ There are two broad ways in which NEP-18 has fallen short of its goals: - **Array conversion** can't reuse the existing coercion functions like ``np.asarray``, because ``np.asarray`` sometimes means "convert to an exact ``np.ndarray``" and other times means "convert to something _like_ - a NumPy array." This led to the `NEP 30 - `_ proposal for + a NumPy array." This led to the :ref:`NEP 30 ` proposal for a separate ``np.duckarray`` function, but this still does not resolve how to cast one duck array into a type matching another duck array. @@ -144,8 +143,8 @@ we can simply pull out the appropriate submodule: noise = module.random.randn(*array.shape) return array + noise -We can also write the duck-array ``stack`` function from `NEP 30 -`_, without the need +We can also write the duck-array ``stack`` function from +:ref:`NEP 30 `, without the need for a new ``np.duckarray`` function: .. code:: python diff --git a/doc/neps/nep-0038-SIMD-optimizations.rst b/doc/neps/nep-0038-SIMD-optimizations.rst index eb1157342948..445c008a76c3 100644 --- a/doc/neps/nep-0038-SIMD-optimizations.rst +++ b/doc/neps/nep-0038-SIMD-optimizations.rst @@ -8,7 +8,7 @@ NEP 38 — Using SIMD optimization instructions for performance :Status: Final :Type: Standards :Created: 2019-11-25 -:Resolution: https://mail.python.org/archives/list/numpy-discussion@python.org/thread/PVWJ74UVBRZ5ZWF6MDU7EUSJXVNILAQB/#PVWJ74UVBRZ5ZWF6MDU7EUSJXVNILAQB +:Resolution: `NumPy Discussion `_ Abstract diff --git a/doc/neps/nep-0043-extensible-ufuncs.rst b/doc/neps/nep-0043-extensible-ufuncs.rst index 4bac8d7a3282..1370e14d6c4e 100644 --- a/doc/neps/nep-0043-extensible-ufuncs.rst +++ b/doc/neps/nep-0043-extensible-ufuncs.rst @@ -241,7 +241,7 @@ to define string equality, will be added to a ufunc. nin = 1 nout = 1 # DTypes are stored on the BoundArrayMethod and not on the internal - # ArrayMethod, to reference cyles. + # ArrayMethod, to reference cycles. DTypes = (String, String, Bool) def resolve_descriptors(self: ArrayMethod, DTypes, given_descrs): diff --git a/doc/neps/nep-0044-restructuring-numpy-docs.rst b/doc/neps/nep-0044-restructuring-numpy-docs.rst index 9c4721664fd4..d1a1a0827ad7 100644 --- a/doc/neps/nep-0044-restructuring-numpy-docs.rst +++ b/doc/neps/nep-0044-restructuring-numpy-docs.rst @@ -86,7 +86,8 @@ up-to-date official documentation that can be easily updated. Status and ideas of each type of doc content -------------------------------------------- -**Reference guide** +Reference guide +^^^^^^^^^^^^^^^ NumPy has a quite complete reference guide. All functions are documented, most have examples, and most are cross-linked well with *See Also* sections. Further @@ -94,7 +95,8 @@ improving the reference guide is incremental work that can be done (and is being done) by many people. There are, however, many explanations in the reference guide. These can be moved to a more dedicated Explanations section on the docs. -**How-to guides** +How-to guides +^^^^^^^^^^^^^ NumPy does not have many how-to's. The subclassing and array ducktyping section may be an example of a how-to. Others that could be added are: @@ -106,7 +108,8 @@ may be an example of a how-to. Others that could be added are: - Performance (memory layout, profiling, use with Numba, Cython, or Pythran) - Writing generic code that works with NumPy, Dask, CuPy, pydata/sparse, etc. -**Explanations** +Explanations +^^^^^^^^^^^^ There is a reasonable amount of content on fundamental NumPy concepts such as indexing, vectorization, broadcasting, (g)ufuncs, and dtypes. This could be @@ -114,7 +117,7 @@ organized better and clarified to ensure it's really about explaining the concep and not mixed with tutorial or how-to like content. There are few explanations about anything other than those fundamental NumPy -concepts. +concepts. Some examples of concepts that could be expanded: @@ -125,7 +128,8 @@ Some examples of concepts that could be expanded: In addition, there are many explanations in the Reference Guide, which should be moved to this new dedicated Explanations section. -**Tutorials** +Tutorials +^^^^^^^^^ There's a lot of scope for writing better tutorials. We have a new *NumPy for absolute beginners tutorial* [3]_ (GSoD project of Anne Bonner). In addition we @@ -154,19 +158,15 @@ propose a *How to write a tutorial* document, which would help users contribute new high-quality content to the documentation. Data sets ---------- +~~~~~~~~~ Using interesting data in the NumPy docs requires giving all users access to that data, either inside NumPy or in a separate package. The former is not the best idea, since it's hard to do without increasing the size of NumPy -significantly. Even for SciPy there has so far been no consensus on this (see -`scipy PR 8707 `_ on adding a new -``scipy.datasets`` subpackage). - -So we'll aim for a new (pure Python) package, named ``numpy-datasets`` or -``scipy-datasets`` or something similar. That package can take some lessons from -how, e.g., scikit-learn ships data sets. Small data sets can be included in the -repo, large data sets can be accessed via a downloader class or function. +significantly. + +Whenever possible, documentation pages should use examples from the +:mod:`scipy.datasets` package. Related work ============ diff --git a/doc/neps/nep-0046-sponsorship-guidelines.rst b/doc/neps/nep-0046-sponsorship-guidelines.rst index a4c0da29d367..4986dab9bfe0 100644 --- a/doc/neps/nep-0046-sponsorship-guidelines.rst +++ b/doc/neps/nep-0046-sponsorship-guidelines.rst @@ -85,8 +85,7 @@ Sponsors will get acknowledged through: - a small logo displayed on the front page of the NumPy website - prominent logo placement on https://numpy.org/about/ - logos displayed in talks about NumPy by maintainers -- announcements of the sponsorship on the NumPy mailing list and the numpy-team - Twitter account +- announcements of the sponsorship on the NumPy mailing list In addition to Sponsors, we already have the concept of Institutional Partner (defined in NumPy's diff --git a/doc/neps/nep-0048-spending-project-funds.rst b/doc/neps/nep-0048-spending-project-funds.rst index f2071587ce28..8e58d1a3ba04 100644 --- a/doc/neps/nep-0048-spending-project-funds.rst +++ b/doc/neps/nep-0048-spending-project-funds.rst @@ -125,7 +125,7 @@ a volunteer in a reasonable amount of time. There are also many tasks, activities, and projects outside of development work that are important and could enhance the project or community - think of, for example, user surveys, translations, outreach, dedicated -mentoring of newcomers, community organizating, website improvements, and +mentoring of newcomers, community organizing, website improvements, and administrative tasks. Time of people to perform tasks is also not the only thing that funds can be diff --git a/doc/neps/nep-0049.rst b/doc/neps/nep-0049-data-allocation-strategies.rst similarity index 98% rename from doc/neps/nep-0049.rst rename to doc/neps/nep-0049-data-allocation-strategies.rst index 180cfea17156..ec18f7a315d9 100644 --- a/doc/neps/nep-0049.rst +++ b/doc/neps/nep-0049-data-allocation-strategies.rst @@ -8,8 +8,7 @@ NEP 49 — Data allocation strategies :Status: Final :Type: Standards Track :Created: 2021-04-18 -:Resolution: https://mail.python.org/archives/list/numpy-discussion@python.org/thread/YZ3PNTXZUT27B6ITFAD3WRSM3T3SRVK4/#PKYXCTG4R5Q6LIRZC4SEWLNBM6GLRF26 - +:Resolution: `NumPy Discussion `_ Abstract -------- diff --git a/doc/neps/nep-0050-scalar-promotion.rst b/doc/neps/nep-0050-scalar-promotion.rst index fc161ef9629f..974f6691d363 100644 --- a/doc/neps/nep-0050-scalar-promotion.rst +++ b/doc/neps/nep-0050-scalar-promotion.rst @@ -4,7 +4,7 @@ NEP 50 — Promotion rules for Python scalars =========================================== :Author: Sebastian Berg -:Status: Draft +:Status: Final :Type: Standards Track :Created: 2021-05-25 @@ -214,7 +214,7 @@ arrays that are not 0-D, such as ``array([2])``. - ``int64(301)`` - *Exception* [T5]_ * - ``uint8(100) + 200`` - - ``int64(301)`` + - ``int64(300)`` - ``uint8(44)`` *and* ``RuntimeWarning`` [T6]_ * - ``float32(1) + 3e100`` - ``float64(3e100)`` @@ -509,9 +509,9 @@ will be ignored. This means, that operations will never silently use the The user will have to write one of:: np.array([3]) + np.array(2**100) - np.array([3]) + np.array(2**100, dtype=object) + np.array([3]) + np.array(2**100, dtype=np.object_) -As such implicit conversion to ``object`` should be rare and the work-around +As such implicit conversion to ``object_`` should be rare and the work-around is clear, we expect that the backwards compatibility concerns are fairly small. diff --git a/doc/neps/nep-0052-python-api-cleanup.rst b/doc/neps/nep-0052-python-api-cleanup.rst index a161dbd91b8f..870877a91bf6 100644 --- a/doc/neps/nep-0052-python-api-cleanup.rst +++ b/doc/neps/nep-0052-python-api-cleanup.rst @@ -8,7 +8,7 @@ NEP 52 — Python API cleanup for NumPy 2.0 :Author: StÊfan van der Walt :Author: Nathan Goldbaum :Author: Mateusz SokÃŗÅ‚ -:Status: Accepted +:Status: Final :Type: Standards Track :Created: 2023-03-28 :Resolution: https://mail.python.org/archives/list/numpy-discussion@python.org/thread/QLMPFTWA67DXE3JCUQT2RIRLQ44INS4F/ diff --git a/doc/neps/nep-0053-c-abi-evolution.rst b/doc/neps/nep-0053-c-abi-evolution.rst index 6abdb1d854cf..16744dc0fde3 100644 --- a/doc/neps/nep-0053-c-abi-evolution.rst +++ b/doc/neps/nep-0053-c-abi-evolution.rst @@ -253,7 +253,7 @@ a user to: yet compatible. The import of ``numpy2_compat`` (and an error when it is missing) will be -inserted by the NumPy eaders as part of the ``import_array()`` call. +inserted by the NumPy headers as part of the ``import_array()`` call. Alternatives ============ diff --git a/doc/neps/nep-0054-simd-cpp-highway.rst b/doc/neps/nep-0054-simd-cpp-highway.rst index f06de05ca036..11452fc9b5a3 100644 --- a/doc/neps/nep-0054-simd-cpp-highway.rst +++ b/doc/neps/nep-0054-simd-cpp-highway.rst @@ -1,11 +1,11 @@ .. _NEP54: =================================================================================== -NEP 54 — SIMD infrastructure evolution: adopting Google Highway when moving to C++? +NEP 54 — SIMD infrastructure evolution: adopting Google Highway when moving to C++ =================================================================================== :Author: Sayed Adel, Jan Wassenberg, Matti Picus, Ralf Gommers, Chris Sidebottom -:Status: Draft +:Status: Accepted :Type: Standards Track :Created: 2023-07-06 :Resolution: TODO @@ -17,7 +17,7 @@ Abstract We are moving the SIMD intrinsic framework, Universal Intrinsics, from C to C++. We have also moved to Meson as the build system. The Google Highway intrinsics project is proposing we use Highway instead of our Universal -Intrinsics as described in `NEP 38`_. This is a complex and multi-faceted +Intrinsics as described in :ref:`NEP 38 `. This is a complex and multi-faceted decision - this NEP is an attempt to describe the trade-offs involved and what would need to be done. @@ -350,7 +350,6 @@ References and Footnotes this NEP as an example) or licensed under the `Open Publication License`_. .. _Open Publication License: https://www.opencontent.org/openpub/ -.. _`NEP 38`: https://numpy.org/neps/nep-0038-SIMD-optimizations.html .. _`gh-20866`: https://github.com/numpy/numpy/pull/20866 .. _`gh-21057`: https://github.com/numpy/numpy/pull/21057 .. _`gh-23096`: https://github.com/numpy/numpy/pull/23096 diff --git a/doc/neps/nep-0055-string_dtype.rst b/doc/neps/nep-0055-string_dtype.rst index 2e3f3cbf03c4..28dc9572ed6a 100644 --- a/doc/neps/nep-0055-string_dtype.rst +++ b/doc/neps/nep-0055-string_dtype.rst @@ -51,9 +51,7 @@ needs and then changes in the Python ecosystem. Support for strings was added to NumPy to support users of the NumArray ``chararray`` type. Remnants of this are still visible in the NumPy API: string-related functionality lives in ``np.char``, to support the -``np.char.chararray`` class. This class is not formally deprecated, but has a -had comment in the module docstring suggesting to use string dtypes instead -since NumPy 1.4. +``np.char.chararray`` class, which was deprecated in NumPy 2.5. NumPy's ``bytes_`` DType was originally used to represent the Python 2 ``str`` type before Python 3 support was added to NumPy. The bytes DType makes the most @@ -224,7 +222,7 @@ to fixed-width unicode arrays:: In [3]: data = [str(i) * 10 for i in range(100_000)] - In [4]: %timeit arr_object = np.array(data, dtype=object) + In [4]: %timeit arr_object = np.array(data, dtype=np.object_) 3.15 ms Âą 74.4 Âĩs per loop (mean Âą std. dev. of 7 runs, 100 loops each) In [5]: %timeit arr_stringdtype = np.array(data, dtype=StringDType()) @@ -242,7 +240,7 @@ for strings, the string loading performance of ``StringDType`` should improve. String operations have similar performance:: - In [7]: %timeit np.array([s.capitalize() for s in data], dtype=object) + In [7]: %timeit np.array([s.capitalize() for s in data], dtype=np.object_) 31.6 ms Âą 728 Âĩs per loop (mean Âą std. dev. of 7 runs, 10 loops each) In [8]: %timeit np.char.capitalize(arr_stringdtype) @@ -990,7 +988,7 @@ in the array buffer as a short string. No matter where it is stored, once a string is initialized it is marked with the ``NPY_STRING_INITIALIZED`` flag. This lets us clearly distinguish between an -unitialized empty string and a string that has been mutated into the empty +uninitialized empty string and a string that has been mutated into the empty string. The size of the allocation is stored in the arena to allow reuse of the arena diff --git a/doc/neps/nep-0056-array-api-main-namespace.rst b/doc/neps/nep-0056-array-api-main-namespace.rst index 5fb8ad250a81..41e070444e81 100644 --- a/doc/neps/nep-0056-array-api-main-namespace.rst +++ b/doc/neps/nep-0056-array-api-main-namespace.rst @@ -302,7 +302,7 @@ three types of behavior rather than two - ``copy=None`` means "copy if needed". an exception because they use* ``copy=False`` *explicitly in their copy but a copy was previously made anyway, they have to inspect their code and determine whether the intent of the code was the old or the new semantics (both seem -rougly equally likely), and adapt the code as appropriate. We expect most cases +roughly equally likely), and adapt the code as appropriate. We expect most cases to be* ``np.array(..., copy=False)``, *because until a few years ago that had lower overhead than* ``np.asarray(...)``. *This was solved though, and* ``np.asarray(...)`` *is idiomatic NumPy usage.* diff --git a/doc/neps/nep-0057-numpy-platform-support.rst b/doc/neps/nep-0057-numpy-platform-support.rst new file mode 100644 index 000000000000..570287d4d0d4 --- /dev/null +++ b/doc/neps/nep-0057-numpy-platform-support.rst @@ -0,0 +1,328 @@ +.. _NEP57: + +=============================== +NEP 57 — NumPy platform support +=============================== + +:Author: Ralf Gommers +:Status: Draft +:Type: Process +:Created: 2026-01-30 +:Resolution: - + + +Abstract +-------- + +This NEP documents how a platform - i.e., a specific operating system, CPU +architecture and CPython interpreter - becomes supported in NumPy, what +platforms are currently supported, and were supported in the (recent) past. + + +Motivation and scope +-------------------- + +This policy is drafted now (early 2026) because there is a lot of interest in +extending the number of platforms NumPy supports through wheels in particular. +It is a policy specific to NumPy - even though other projects may possibly want +to refer to it - for several reasons: + +* It involves committing to a nontrivial amount of maintainer effort, +* Personal commitment from a maintainer may make the difference between a + yes and a no of supporting a platform (e.g., NumPy supported PyPy for a + long time because of the efforts of one maintainer) +* Support for a platform being possible at all may depend on features of the + code base (e.g., NumPy supports 32-bit Python on Windows while SciPy does + not because there's no suitable compiler toolchain for it). +* The number of wheels depends on whether the Stable ABI can be used (NumPy + is more performance-sensitive for small arrays, so can't use it) + + +The scope of this NEP includes: + +- The definition of tiers of support for platforms by NumPy +- Policies and decision making for moving a platform to a different tier + +Out of scope for this NEP are: + +- Binary distributions of NumPy outside of PyPI +- Partial testing in CI (e.g., testing only SIMD-specific code under QEMU) +- More detailed breakdowns of wheels and support matrices, like compiler flavor + and minimum version, or the BLAS library that is used in the build. + + +Support tiers +------------- + +*This section is inspired by PEP 11 (CPython platform support), although +definitions are not matching, because NumPy is not nearly as large a project as +CPython.* + +Platform support is broken down into tiers. Each tier comes with different +requirements which lead to different promises being made about support. + +To be promoted to a tier, +`Steering council +`__ +support is required and is expected to be driven by team consensus. Demotion to +a lower tier occurs when the requirements of the current tier are no longer met +for a platform for an extended period of time based on the judgment of the +Steering Council. For platforms which no longer meet the requirements of any +tier by the middle of a new feature release cycle, an announcement will be made +to warn the community of the pending removal of support for the platform. If +the platform is not brought into line for at least one of the tiers by the +first release candidate, it will be listed as unsupported in this NEP. + + +General principles +~~~~~~~~~~~~~~~~~~ + +1. Maintainer effort is expensive, and we collectively have limited bandwidth - + hence platform support is strongly influenced by the willingness of one or + more maintainers to put in that effort. + + - Maintainers are trusted by the whole team. We generally do not question + *why* a maintainer is motivated to put in the effort. If they are being + paid for their effort or doing it as part of their job, that is fine - + however they should disclose this to the Steering Council, and indicate + whether long-term support is conditional on their employment or contractor + status for the support tiers that include releasing wheels to PyPI. + + *Rationale: releasing wheels to PyPI is a long-term commitment by the + project as a whole, see the backwards compatibility section below.* + +2. CI support for the platform is required, preferably with native runners. + Free is best, however decisions on paid CI are up to the Steering Council. + Emulation for running the test suite (e.g., under QEMU) or self-hosted + buildbots are slower and less reliable, hence not preferred. + +3. There should be broad enough demand for support for the platform for the + tiers that include releasing wheels to PyPI. + + - A previously used rule of thumb: >=0.5% of the user base should be on this + platform. There may be reasons to deviate from this rule of thumb. + + *Note: finding clean data sources isn't always easy. If wheels are already + being shipped, for NumPy or for a comparable project, then download data + from PyPI may be obtained through BigQuery. For new platforms, sources + like the* + `Steam Hardware & Software Survey `__ + *may have to be used.* + +4. Adding a regular CI job (i.e., not aimed at uploading wheels to PyPI) for a + platform to the NumPy CI matrix is much cheaper, and easily reverted in case + of problems. The bar for adding such jobs is low, and assessed on a + case-by-case basis. + +5. For all platforms in any supported tier: the relevant prerequisites in our + dependencies must be met. E.g., build tools have support, and for wheels + there is support in CPython, PyPI, cibuildwheel, manylinux, and + ``scipy-openblas64`` or another easily-integrated BLAS library. + +6. Decision making: + + - Moving a platform to a lower support tier must be discussed on the mailing list. + The circumstances for each platform are unique so the community will + evaluate each proposal to demote a platform on a case-by-case basis. + - Moving a platform to a higher support tier, if that higher tier includes + releasing wheels on PyPI for that platform, must be discussed on the + mailing list. + - Adding an entry to a support tier in this NEP for (a) an unsupported + platform or (b) a tier which does not include uploading wheels to PyPI can + be done on GitHub through a regular pull request (assuming it's clear from + the discussion that the relevant maintainers agree it doesn't need to hit + the mailing list). + + +Releasing wheels to PyPI +'''''''''''''''''''''''' + +The wheels that the NumPy team releases on PyPI for the ``numpy`` package get +hundreds of millions of downloads a month. We therefore highly value both +reliability and supply chain security of those release artifacts. Compromising +on those aspects is unlikely to be acceptable for the NumPy team. + +The details of how wheels are produced, tested and distributed can be found in +the `numpy/numpy-release `__ +repository. Some key requirements of the current setup, which aren't likely to +change soon, are: + +1. Must be buildable on publicly-visible CI infrastructure (i.e., GitHub). +2. Must be tested well enough (meaning native runners are preferred; QEMU is quite slow). +3. Must be publishable to PyPI automatically, through PyPI's trusted publishing + mechanism. + + +Tier 1 +~~~~~~ + +- Must have regular CI support on GitHub or (exceptionally) through another + well-integrated CI platform that the release team and Steering Council deem + acceptable. +- The NumPy team releases wheels on PyPI for this platform. +- CI failures (either regular CI or wheel build CI) block releases. +- All maintainers are responsible to keep the ``main`` branch and wheel builds + working. + +Tier 1 platforms: + ++---------------------------+--------------------------------------------------------------------------+ +| Platform | Notes | ++===========================+==========================================================================+ +| Windows x86-64 | | ++---------------------------+--------------------------------------------------------------------------+ +| Windows arm64 | | ++---------------------------+--------------------------------------------------------------------------+ +| Windows x86 | 32-bit Python: note this is shipped without BLAS, it's legacy | ++---------------------------+--------------------------------------------------------------------------+ +| Linux x86-64 (manylinux) | | ++---------------------------+--------------------------------------------------------------------------+ +| Linux aarch64 (manylinux) | | ++---------------------------+--------------------------------------------------------------------------+ +| macOS arm64 | | ++---------------------------+--------------------------------------------------------------------------+ +| macOS x86-64 | Expected to move to unsupported by 2027/28 once the platform is dropped | +| | by GitHub | ++---------------------------+--------------------------------------------------------------------------+ + + +Tier 2 +~~~~~~ + +- Must have regular CI support, either as defined for Tier 1 or through a + reliable self-hosted service. +- The NumPy team releases wheels on PyPI for this platform. +- CI failures block releases. +- Must have at least one maintainer who commits to take primary and long-term + responsibility for keeping the ``main`` branch and wheel builds working. + +Tier 2 platforms: + ++---------------------------+-------+------------------------------------------+ +| Platform | Notes | Contacts | ++===========================+=======+==========================================+ +| Linux x86-64 (musllinux) | | Ralf Gommers | ++---------------------------+-------+------------------------------------------+ +| Linux aarch64 (musllinux) | | Ralf Gommers | ++---------------------------+-------+------------------------------------------+ +| Free-threaded CPython | | Nathan Goldbaum, Kumar Aditya, | +| | | Ralf Gommers | ++---------------------------+-------+------------------------------------------+ + + +Tier 3 +~~~~~~ + +- Is supported as part of NumPy's regular CI setup for the ``main`` branch. CI + support as defined for Tier 2. +- No wheels are released on PyPI for this platform. +- CI failures block releases (skips may be applied when the failure is clearly + platform-specific and does not indicate a regression in core functionality). +- Must have at least one maintainer or a regular contributor trusted by the + NumPy maintainers who commits to take responsibility for CI on the ``main`` + branch working. + +Tier 3 platforms: + ++--------------------+----------------------------------------+----------------------------------+ +| Platform | Notes | Contacts | ++====================+========================================+==================================+ +| FreeBSD | Runs on Cirrus CI | Ralf Gommers | ++--------------------+----------------------------------------+----------------------------------+ +| Linux ppc64le | Runs on IBM-provided self-hosted | Sandeep Gupta | +| | runners, see gh-22318_ | | ++--------------------+----------------------------------------+----------------------------------+ +| Emscripten/Pyodide | We currently provide nightly wheels, | Agriya Khetarpal, Gyeongjae Choi | +| | used for interactive docs | | ++--------------------+----------------------------------------+----------------------------------+ + + +Unsupported platforms +~~~~~~~~~~~~~~~~~~~~~ + +All platforms not listed in the above tiers are unsupported by the NumPy team. +We do not develop and test on such platforms, and so cannot provide any +promises that NumPy will work on them. + +However, the code base does include unsupported code – that is, code specific +to unsupported platforms. Contributions in this area are welcome as long as +they: + +- pose a minimal maintenance burden to the core team, and +- benefit substantially more people than the contributor. + +Unsupported platforms (previously in a supported tier, may be an incomplete +list): + ++------------------------------------+--------------------------------------------------+ +| Platform | Notes | ++====================================+==================================================+ +| PyPy | Was Tier 2 for releases <=2.4.x, see gh-30416_ | ++------------------------------------+--------------------------------------------------+ +| macOS ppc64, universal, universal2 | | ++------------------------------------+--------------------------------------------------+ +| Linux i686 | Dropped in 1.22.0, low demand | ++------------------------------------+--------------------------------------------------+ +| Linux on IBM Z (s390x) | CI jobs used to run on TravisCI | ++------------------------------------+--------------------------------------------------+ + +Unsupported platforms (known interest in moving to a higher tier): + ++----------+------------------+ +| Platform | Notes | ++==========+==================+ +| iOS | See gh-28759_ | ++----------+------------------+ +| Android | See gh-30412_ | ++----------+------------------+ +| RISC-V | See gh-30216_ | ++----------+------------------+ +| WASI | See gh-25859_ | ++----------+------------------+ + + +Backward compatibility +---------------------- + +Moving a platform to a lower tier of support is generally backwards compatible. +The exception is stopping to release wheels on PyPI for a platform. That causes +significant disruption for existing users on that platform. Their install commands +(e.g., ``pip install numpy``) may stop working because if a new release no longer +has wheels for the platform, by default ``pip`` will try to build from source rather +than using a wheel from an older version of ``numpy``. Therefore, we should be very +reluctant to drop wheels for any platform. + + +Discussion +---------- + +- `ENH: Provide Windows ARM64 wheels (numpy#22530) `__ +- `Releasing PowerPC (ppc64le) wheels? (numpy#22318) `__ +- `MAINT: drop support for PyPy (numpy#30416) `__ +- `ENH: Build and distribute manylinux wheels for riscv64 `__ +- `BLD: Add support for building iOS wheels (numpy#28759) `__ +- `BLD: Add Android support `__ +- `ENH: WASI Build `__ +- `PEP 11 - CPython platform support `__ +- `Debian's supported architectures `__ +- `Discussion about supported platforms for wheels (scientific-python issue/discussion (Nov 2025) `__ +- `What platforms should wheels be provided for by default? (Packaging Discourse thread, 2026) `__ +- `Expectations that projects provide ever more wheels (pypackaging-native) `__ + + +References and footnotes +------------------------ + +.. _gh-22318: https://github.com/numpy/numpy/issues/22318 +.. _gh-22530: https://github.com/numpy/numpy/issues/22530 +.. _gh-25859: https://github.com/numpy/numpy/issues/25859 +.. _gh-28759: https://github.com/numpy/numpy/pull/28759 +.. _gh-30216: https://github.com/numpy/numpy/issues/30216 +.. _gh-30412: https://github.com/numpy/numpy/pull/30412 +.. _gh-30416: https://github.com/numpy/numpy/issues/30416 + + +Copyright +--------- + +This document has been placed in the public domain. diff --git a/doc/neps/roadmap.rst b/doc/neps/roadmap.rst index 6a9761b05230..f4a9907dcc7e 100644 --- a/doc/neps/roadmap.rst +++ b/doc/neps/roadmap.rst @@ -18,25 +18,17 @@ may include (among other things) interoperability protocols, better duck typing support and ndarray subclass handling. The key goal is: *make it easy for code written for NumPy to also work with -other NumPy-like projects.* This will enable GPU support via, e.g, CuPy or JAX, +other NumPy-like projects.* This will enable GPU support via, e.g, CuPy, JAX or PyTorch, distributed array support via Dask, and writing special-purpose arrays (either from scratch, or as a ``numpy.ndarray`` subclass) that work well with SciPy, -scikit-learn and other such packages. +scikit-learn and other such packages. A large step forward in this area was +made in NumPy 2.0, with adoption of and compliance with the array API standard +(v2022.12, see :ref:`NEP47`). Future work in this direction will include +support for newer versions of the array API standard, and adding features as +needed based on real-world experience and needs. -The ``__array_ufunc__`` and ``__array_function__`` protocols are stable, but -do not cover the whole API. New protocols for overriding other functionality -in NumPy are needed. Work in this area aims to bring to completion one or more -of the following proposals: - -- :ref:`NEP30` -- :ref:`NEP31` -- :ref:`NEP35` -- :ref:`NEP37` - -In addition we aim to provide ways to make it easier for other libraries to -implement a NumPy-compatible API. This may include defining consistent subsets -of the API, as discussed in `this section of NEP 37 -`__. +In addition, the ``__array_ufunc__`` and ``__array_function__`` protocols +fulfill a role here - they are stable and used by several downstream projects. Performance @@ -46,17 +38,31 @@ Improvements to NumPy's performance are important to many users. We have focused this effort on Universal SIMD (see :ref:`NEP38`) intrinsics which provide nice improvements across various hardware platforms via an abstraction layer. The infrastructure is in place, and we welcome follow-on PRs to add -SIMD support across all relevant NumPy functions. +SIMD support across relevant NumPy functionality. + +Transitioning from C to C++, both in the SIMD infrastructure and in NumPy +internals more widely, is in progress. We have also started to make use of +Google Highway (see :ref:`NEP54`), and that usage is likely to expand. Work +towards support for newer SIMD instruction sets, like SVE on arm64, is ongoing. Other performance improvement ideas include: -- A better story around parallel execution. +- A better story around parallel execution (related is support for free-threaded + CPython, see further down). +- Enable the ability to allow NumPy to use faster, but less precise, + implementations for ufuncs. + Until now, the only state modifying ufunc behavior has been ``np.errstate``. + But, with NumPy 2.0 improvements in the ``np.errstate`` and the ufunc C + implementation make this type of addition easier. - Optimizations in individual functions. -- Reducing ufunc and ``__array_function__`` overhead. Furthermore we would like to improve the benchmarking system, in terms of coverage, -easy of use, and publication of the results (now -`here `__) as part of the docs or website. +easy of use, and publication of the results. Benchmarking PRs/branches compared +to the `main` branch is a primary purpose, and required for PRs that are +performance-focused (e.g., adding SIMD acceleration to a function). In +addition, we'd like a performance overview like the one we had `here +`__, set up in a way that is more +maintainable long-term. Documentation and website @@ -68,69 +74,154 @@ documentation on many topics are missing or outdated. See :ref:`NEP44` for planned improvements. Adding more tutorials is underway in the `numpy-tutorials repo `__. -Our website (https://numpy.org) was completely redesigned recently. We aim to -further improve it by adding translations, more case studies and other -high-level content, and more (see `this tracking issue `__). +We also intend to make all the example code in our documentation interactive - +work is underway to do so via ``jupyterlite-sphinx`` and Pyodide. NumPy 2.3.0 +provides interactive documentation for examples as a pilot for this effort. + +Our website (https://numpy.org) is in good shape. Further work on expanding the +number of languages that the website is translated in is desirable. As are +improvements to the interactive notebook widget, through JupyterLite. Extensibility ------------- -We aim to make it much easier to extend NumPy. The primary topic here is to -improve the dtype system - see :ref:`NEP41` and related NEPs linked from it. -Concrete goals for the dtype system rewrite are: - -- Easier custom dtypes: +We aim to continue making it easier to extend NumPy. The primary topic here is to +improve the dtype system - see for example :ref:`NEP41` and related NEPs linked +from it. In NumPy 2.0, a `new C API for user-defined dtypes `__ +was made public. We aim to encourage its usage and improve this API further, +including support for writing a dtype in Python. - - Simplify and/or wrap the current C-API - - More consistent support for dtype metadata - - Support for writing a dtype in Python +Ideas for new dtypes that may be developed outside of the main NumPy repository +first, and that could potentially be upstreamed into NumPy later, include: -- Allow adding (a) new string dtype(s). This could be encoded strings with - fixed-width storage (e.g., ``utf8`` or ``latin1``), and/or a variable length - string dtype. The latter could share an implementation with ``dtype=object``, - but be explicitly type-checked. - One of these should probably be the default for text data. The current - string dtype support is neither efficient nor user friendly. +- A quad-precision (128-bit) dtype +- A ``bfloat16`` dtype +- A fixed-width string dtype which supports encodings (e.g., ``utf8`` or + ``latin1``) +- A unit dtype +We further plan to extend the ufunc C API as needs arise. +One possibility here is creating a new, more powerful, API to allow hooking +into existing NumPy ufunc implementations. User experience --------------- Type annotations ```````````````` -NumPy 1.20 adds type annotations for most NumPy functionality, so users can use -tools like `mypy`_ to type check their code and IDEs can improve their support +Type annotations for NumPy functionality are complete, so users can use tools +like `mypy`_ to type check their code and IDEs can improve their support for NumPy. Improving those type annotations, for example to support annotating -array shapes and dtypes, is ongoing. +array shapes (see `gh-16544 `__), +is ongoing. Platform support ```````````````` We aim to increase our support for different hardware architectures. This includes adding CI coverage when CI services are available, providing wheels on -PyPI for POWER8/9 (``ppc64le``), providing better build and install -documentation, and resolving build issues on other platforms like AIX. +PyPI for platforms that are in high enough demand (e.g., we added ``musllinux`` +ones for NumPy 2.0), and resolving build issues on platforms that we don't test +in CI (e.g., AIX). + +We intend to write a NEP covering the support levels we provide and what is +required for a platform to move to a higher tier of support, similar to +`PEP 11 `__. + +Further consistency fixes to promotion and scalar logic +``````````````````````````````````````````````````````` +NumPy 2.0 fixed many issues around promotion especially with respect to scalars. +We plan to continue fixing remaining inconsistencies. +For example, NumPy converts 0-D objects to scalars, and some promotions +still allowed by NumPy are problematic. + +Support for free-threaded CPython +````````````````````````````````` +CPython 3.13 will be the first release to offer a free-threaded build (i.e., +a CPython build with the GIL disabled). Work is in progress to support this +well in NumPy. After that is stable and complete, there may be opportunities to +actually make use of the potential for performance improvements from +free-threaded CPython, or make it easier to do so for NumPy's users. + +Binary size reduction +````````````````````` +The number of downloads of NumPy from PyPI and other platforms continues to +increase - as of May 2024 we're at >200 million downloads/month from PyPI +alone. Reducing the size of an installed NumPy package has many benefits: +faster installs, lower disk space usage, smaller load on PyPI, less +environmental impact, easier to fit more packages on top of NumPy in +resource-constrained environments and platforms like AWS Lambda, lower latency +for Pyodide users, and so on. We aim for significant reductions, as well as +making it easier for end users and packagers to produce smaller custom builds +(e.g., we added support for stripping tests before 2.1.0). See +`gh-25737 `__ for details. + +Support use of CPython's limited C API +`````````````````````````````````````` +Use of the CPython limited C API, allowing producing ``abi3`` wheels that use +the stable ABI and are hence independent of CPython feature releases, has +benefits for both downstream packages that use NumPy's C API and for NumPy +itself. In NumPy 2.0, work was done to enable using the limited C API with +the Cython support in NumPy (see `gh-25531 `__). +More work and testing is needed to ensure full support for downstream packages. + +We also want to explore what is needed for NumPy itself to use the limited +C API - this would make testing new CPython dev and pre-release versions across +the ecosystem easier, and significantly reduce the maintenance effort for CI +jobs in NumPy itself. + +Create a header-only package for NumPy +`````````````````````````````````````` +We have reduced the platform-dependent content in the public NumPy headers to +almost nothing. It is now feasible to create a separate package with only +NumPy headers and a discovery mechanism for them, in order to enable downstream +packages to build against the NumPy C API without having NumPy installed. +This will make it easier/cheaper to use NumPy's C API, especially on more +niche platforms for which we don't provide wheels. + + +NumPy 2.0 stabilization & downstream usage +------------------------------------------ + +We made a very large amount of changes (and improvements!) in NumPy 2.0. The +release process has taken a very long time, and part of the ecosystem is still +catching up. We may need to slow down for a while, and possibly help the rest +of the ecosystem with adapting to the ABI and API changes. + +We will need to assess the costs and benefits to NumPy itself, +downstream package authors, and end users. Based on that assessment, we need to +come to a conclusion on whether it's realistic to do another ABI-breaking +release again in the future or not. This will also inform the future evolution +of our C API. + + +Security +-------- + +NumPy is quite secure - we get only a limited number of reports about potential +vulnerabilities, and most of those are incorrect. We have made strides with a +documented security policy, a private disclosure method, and maintaining an +OpenSSF scorecard (with a high score). However, we have not changed much in how +we approach supply chain security in quite a while. We aim to make improvements +here, for example achieving fully reproducible builds for all the build +artifacts we publish - and providing full provenance information for them. Maintenance ----------- -- ``MaskedArray`` needs to be improved, ideas include: +- ``numpy.ma`` is still in poor shape and under-maintained. It needs to be + improved, ideas include: - - Rewrite masked arrays to not be a ndarray subclass -- maybe in a separate project? + - Rewrite masked arrays to not be an ndarray subclass -- maybe in a separate project? - MaskedArray as a duck-array type, and/or - dtypes that support missing values -- Fortran integration via ``numpy.f2py`` requires a number of improvements, see - `this tracking issue `__. -- A backend system for ``numpy.fft`` (so that e.g. ``fft-mkl`` doesn't need to monkeypatch numpy). - Write a strategy on how to deal with overlap between NumPy and SciPy for ``linalg``. -- Deprecate ``np.matrix`` (very slowly). +- Deprecate ``np.matrix`` (very slowly) - this is feasible once the switch-over + from sparse matrices to sparse arrays in SciPy is complete. - Add new indexing modes for "vectorized indexing" and "outer indexing" (see :ref:`NEP21`). - Make the polynomial API easier to use. -- Integrate an improved text file loader. -- Ufunc and gufunc improvements, see `gh-8892 `__ - and `gh-11492 `__. .. _`mypy`: https://mypy.readthedocs.io diff --git a/doc/neps/scope.rst b/doc/neps/scope.rst index 93887c4b12ff..ffa3d8655ad8 100644 --- a/doc/neps/scope.rst +++ b/doc/neps/scope.rst @@ -36,10 +36,10 @@ Here, we describe aspects of N-d array computation that are within scope for Num - NumPy provides some **infrastructure for other packages in the scientific Python ecosystem**: - - numpy.distutils (build support for C++, Fortran, BLAS/LAPACK, and other - relevant libraries for scientific computing) + - numpy.distutils (removed in NumPy 2.5.0, was providing build support for C++, Fortran, + BLAS/LAPACK, and other relevant libraries for scientific computing) - f2py (generating bindings for Fortran code) - - testing utilities + - testing utilities (mostly deprecated, pytest does a good job) - **Speed**: we take performance concerns seriously and aim to execute operations on large arrays with similar performance as native C diff --git a/doc/neps/tools/build_index.py b/doc/neps/tools/build_index.py index e8ca86e68c13..f727f0b0cc81 100644 --- a/doc/neps/tools/build_index.py +++ b/doc/neps/tools/build_index.py @@ -4,11 +4,12 @@ categories. """ -import os -import jinja2 import glob +import os import re +import jinja2 + def render(tpl_path, context): path, filename = os.path.split(tpl_path) @@ -19,7 +20,7 @@ def render(tpl_path, context): def nep_metadata(): ignore = ('nep-template.rst') sources = sorted(glob.glob(r'nep-*.rst')) - sources = [s for s in sources if not s in ignore] + sources = [s for s in sources if s not in ignore] meta_re = r':([a-zA-Z\-]*): (.*)' @@ -45,7 +46,7 @@ def nep_metadata(): else: raise RuntimeError("Unable to find NEP title.") - tags['Title'] = lines[i+1].strip() + tags['Title'] = lines[i + 1].strip() tags['Filename'] = source if not tags['Title'].startswith(f'NEP {nr} — '): @@ -55,7 +56,7 @@ def nep_metadata(): f' {tags["Title"]!r}') if tags['Status'] in ('Accepted', 'Rejected', 'Withdrawn'): - if not 'Resolution' in tags: + if 'Resolution' not in tags: raise RuntimeError( f'NEP {nr} is Accepted/Rejected/Withdrawn but ' 'has no Resolution tag' @@ -70,7 +71,7 @@ def nep_metadata(): for nr, tags in neps.items(): if tags['Status'] == 'Superseded': - if not 'Replaced-By' in tags: + if 'Replaced-By' not in tags: raise RuntimeError( f'NEP {nr} has been Superseded, but has no Replaced-By tag' ) @@ -78,7 +79,7 @@ def nep_metadata(): replaced_by = int(re.findall(r'\d+', tags['Replaced-By'])[0]) replacement_nep = neps[replaced_by] - if not 'Replaces' in replacement_nep: + if 'Replaces' not in replacement_nep: raise RuntimeError( f'NEP {nr} is superseded by {replaced_by}, but that NEP has ' f"no Replaces tag." @@ -117,7 +118,7 @@ def parse_replaces_metadata(replacement_nep): "open", "rejected", ): infile = f"{nepcat}.rst.tmpl" - outfile =f"{nepcat}.rst" + outfile = f"{nepcat}.rst" print(f'Compiling {infile} -> {outfile}') genf = render(infile, meta) diff --git a/doc/postprocess.py b/doc/postprocess.py index 4b48fa443149..3415c9afb711 100755 --- a/doc/postprocess.py +++ b/doc/postprocess.py @@ -34,16 +34,17 @@ def process_tex(lines): """ new_lines = [] for line in lines: - if (line.startswith(r'\section{numpy.') - or line.startswith(r'\subsection{numpy.') - or line.startswith(r'\subsubsection{numpy.') - or line.startswith(r'\paragraph{numpy.') - or line.startswith(r'\subparagraph{numpy.') - ): - pass # skip! + if line.startswith(("\\section{numpy.", + "\\subsection{numpy.", + "\\subsubsection{numpy.", + "\\paragraph{numpy.", + "\\subparagraph{numpy.", + )): + pass else: new_lines.append(line) return new_lines + if __name__ == "__main__": main() diff --git a/doc/preprocess.py b/doc/preprocess.py index 83980bb2fed5..bc43e89764f8 100755 --- a/doc/preprocess.py +++ b/doc/preprocess.py @@ -1,11 +1,10 @@ #!/usr/bin/env python3 -import subprocess import os -import sys from string import Template + def main(): - doxy_gen(os.path.abspath(os.path.join('..'))) + doxy_gen(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) def doxy_gen(root_path): """ @@ -25,27 +24,28 @@ def doxy_gen(root_path): class DoxyTpl(Template): delimiter = '@' + def doxy_config(root_path): """ Fetch all Doxygen sub-config files and gather it with the main config file. """ confs = [] dsrc_path = os.path.join(root_path, "doc", "source") - sub = dict(ROOT_DIR=root_path) + sub = {'ROOT_DIR': root_path} with open(os.path.join(dsrc_path, "doxyfile")) as fd: conf = DoxyTpl(fd.read()) confs.append(conf.substitute(CUR_DIR=dsrc_path, **sub)) - for dpath, _, files in os.walk(root_path): - if ".doxyfile" not in files: - continue - conf_path = os.path.join(dpath, ".doxyfile") - with open(conf_path) as fd: - conf = DoxyTpl(fd.read()) - confs.append(conf.substitute(CUR_DIR=dpath, **sub)) + for subdir in ["doc", "numpy"]: + for dpath, _, files in os.walk(os.path.join(root_path, subdir)): + if ".doxyfile" not in files: + continue + conf_path = os.path.join(dpath, ".doxyfile") + with open(conf_path) as fd: + conf = DoxyTpl(fd.read()) + confs.append(conf.substitute(CUR_DIR=dpath, **sub)) return confs if __name__ == "__main__": main() - diff --git a/doc/release/upcoming_changes/12150.improvement.rst b/doc/release/upcoming_changes/12150.improvement.rst deleted file mode 100644 index f73a6d2aaa28..000000000000 --- a/doc/release/upcoming_changes/12150.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -``histogram`` auto-binning now returns bin sizes >=1 for integer input data ---------------------------------------------------------------------------- -For integer input data, bin sizes smaller than 1 result in spurious empty -bins. This is now avoided when the number of bins is computed using one of the -algorithms provided by `histogram_bin_edges`. diff --git a/doc/release/upcoming_changes/26103.c_api.rst b/doc/release/upcoming_changes/26103.c_api.rst deleted file mode 100644 index 9d0d998e2dfc..000000000000 --- a/doc/release/upcoming_changes/26103.c_api.rst +++ /dev/null @@ -1,15 +0,0 @@ -API symbols now hidden but customizable ---------------------------------------- -NumPy now defaults to hide the API symbols it adds to allow all NumPy API -usage. -This means that by default you cannot dynamically fetch the NumPy API from -another library (this was never possible on windows). - -If you are experiencing linking errors related to ``PyArray_API`` or -``PyArray_RUNTIME_VERSION``, you can define the -:c:macro:`NPY_API_SYMBOL_ATTRIBUTE` to opt-out of this change. - -If you are experiencing problems due to an upstream header including NumPy, -the solution is to make sure you ``#include "numpy/ndarrayobject.h"`` before -their header and import NumPy yourself based on :ref:`including-the-c-api`. - diff --git a/doc/release/upcoming_changes/26268.expired.rst b/doc/release/upcoming_changes/26268.expired.rst deleted file mode 100644 index 932fdbfae6d7..000000000000 --- a/doc/release/upcoming_changes/26268.expired.rst +++ /dev/null @@ -1 +0,0 @@ -* Scalars and 0D arrays are disallowed for `numpy.nonzero` and `numpy.ndarray.nonzero`. diff --git a/doc/release/upcoming_changes/26285.change.rst b/doc/release/upcoming_changes/26285.change.rst deleted file mode 100644 index d652c58dc799..000000000000 --- a/doc/release/upcoming_changes/26285.change.rst +++ /dev/null @@ -1,13 +0,0 @@ -``ma.corrcoef`` may return a slightly different result ------------------------------------------------------- -A pairwise observation approach is currently used in `ma.corrcoef` to -calculate the standard deviations for each pair of variables. This has been -changed as it is being used to normalise the covariance, estimated using -`ma.cov`, which does not consider the observations for each variable in a -pairwise manner, rendering it unnecessary. The normalisation has been -replaced by the more appropriate standard deviation for each variable, -which significantly reduces the wall time, but will return slightly different -estimates of the correlation coefficients in cases where the observations -between a pair of variables are not aligned. However, it will return the same -estimates in all other cases, including returning the same correlation matrix -as `corrcoef` when using a masked array with no masked values. \ No newline at end of file diff --git a/doc/release/upcoming_changes/26285.performance.rst b/doc/release/upcoming_changes/26285.performance.rst deleted file mode 100644 index 79009f662a0f..000000000000 --- a/doc/release/upcoming_changes/26285.performance.rst +++ /dev/null @@ -1,5 +0,0 @@ -``ma.cov`` and ``ma.corrcoef`` are now significantly faster ------------------------------------------------------------ -The private function has been refactored along with `ma.cov` and -`ma.corrcoef`. They are now significantly faster, particularly on large, -masked arrays. \ No newline at end of file diff --git a/doc/release/upcoming_changes/26292.new_feature.rst b/doc/release/upcoming_changes/26292.new_feature.rst deleted file mode 100644 index fc2c33571d77..000000000000 --- a/doc/release/upcoming_changes/26292.new_feature.rst +++ /dev/null @@ -1 +0,0 @@ -* `numpy.reshape` and `numpy.ndarray.reshape` now support ``shape`` and ``copy`` arguments. diff --git a/doc/release/upcoming_changes/26313.change.rst b/doc/release/upcoming_changes/26313.change.rst deleted file mode 100644 index 99c8b1d879f9..000000000000 --- a/doc/release/upcoming_changes/26313.change.rst +++ /dev/null @@ -1,2 +0,0 @@ -* As `numpy.vecdot` is now a ufunc it has a less precise signature. - This is due to the limitations of ufunc's typing stub. diff --git a/doc/release/upcoming_changes/26388.performance.rst b/doc/release/upcoming_changes/26388.performance.rst deleted file mode 100644 index 885bc28c4a78..000000000000 --- a/doc/release/upcoming_changes/26388.performance.rst +++ /dev/null @@ -1,3 +0,0 @@ - * `numpy.save` now uses pickle protocol version 4 for saving arrays with - object dtype, which allows for pickle objects larger than 4GB and improves - saving speed by about 5% for large arrays. \ No newline at end of file diff --git a/doc/release/upcoming_changes/26452.deprecation.rst b/doc/release/upcoming_changes/26452.deprecation.rst deleted file mode 100644 index 146b50af048c..000000000000 --- a/doc/release/upcoming_changes/26452.deprecation.rst +++ /dev/null @@ -1,4 +0,0 @@ - * The `fix_imports` keyword argument in `numpy.save` is deprecated. Since - NumPy 1.17, `numpy.save` uses a pickle protocol that no longer supports - Python 2, and ignored `fix_imports` keyword. This keyword is kept only - for backward compatibility. It is now deprecated. \ No newline at end of file diff --git a/doc/release/upcoming_changes/26501.new_feature.rst b/doc/release/upcoming_changes/26501.new_feature.rst deleted file mode 100644 index c7465925295c..000000000000 --- a/doc/release/upcoming_changes/26501.new_feature.rst +++ /dev/null @@ -1,2 +0,0 @@ -* NumPy now supports DLPack v1, support for older versions will - be deprecated in the future. diff --git a/doc/release/upcoming_changes/26580.new_feature.rst b/doc/release/upcoming_changes/26580.new_feature.rst deleted file mode 100644 index c625e9b9d8a2..000000000000 --- a/doc/release/upcoming_changes/26580.new_feature.rst +++ /dev/null @@ -1 +0,0 @@ -* `numpy.asanyarray` now supports ``copy`` and ``device`` arguments, matching `numpy.asarray`. diff --git a/doc/release/upcoming_changes/29536.deprecation.rst b/doc/release/upcoming_changes/29536.deprecation.rst new file mode 100644 index 000000000000..7367a135cdd1 --- /dev/null +++ b/doc/release/upcoming_changes/29536.deprecation.rst @@ -0,0 +1,11 @@ +Setting the ``shape`` attribute is deprecated +--------------------------------------------- +Setting the shape attribute is now deprecated since mutating +an array is unsafe if an array is shared, especially by multiple +threads. As an alternative, you can create a new view via +`np.reshape` or `np.ndarray.reshape`. For example: ``x = np.arange(15); x = np.reshape(x, (3, 5))``. +To ensure no copy is made from the data, one can use ``np.reshape(..., copy=False)``. + +Directly setting the shape on an array is discouraged, but for cases where it is difficult to work +around, e.g., in ``__array_finalize__`` possible with the private method `np.ndarray._set_shape`. + diff --git a/doc/release/upcoming_changes/29929.improvement.rst b/doc/release/upcoming_changes/29929.improvement.rst new file mode 100644 index 000000000000..633d8768a03f --- /dev/null +++ b/doc/release/upcoming_changes/29929.improvement.rst @@ -0,0 +1,19 @@ +For ``f2py``, the behaviour of ``intent(inplace)`` has improved. +Previously, if an input array did not have the right dtype or order, +the input array was modified in-place, changing its dtype and +replacing its data by a corrected copy. Now, instead, the corrected +copy is kept a separate array, which, after being passed and +presumably modified by the fortran routine, is copied back to the +input routine. The above means one no longer has the risk that +pre-existing views or slices of the input array start pointing to +unallocated memory (at the price of increased overhead for the +write-back copy at the end of the call). + +A potential problem would be that one might get very different results +if one, e.g., previously passed in an integer array where a double +array was expected: the writeback to integer would likely give wrong +results. To avoid such situations, ``intent(inplace)`` will now only +allow arrays that have equivalent type to that used in the fortran +routine, i.e., ``dtype.kind`` is the same. For instance, a routine +expecting double would be able to receive float, but would raise on +integer input. diff --git a/doc/release/upcoming_changes/30181.deprecation.rst b/doc/release/upcoming_changes/30181.deprecation.rst new file mode 100644 index 000000000000..c9ca61dd67f9 --- /dev/null +++ b/doc/release/upcoming_changes/30181.deprecation.rst @@ -0,0 +1,6 @@ +Resizing a Numpy array inplace is deprecated +-------------------------------------------- +Resizing a Numpy array inplace is deprecated since mutating +an array is unsafe if an array is shared, especially by multiple +threads. As an alternative, you can create a resized array via ``np.resize``. + diff --git a/doc/release/upcoming_changes/30340.expired.rst b/doc/release/upcoming_changes/30340.expired.rst new file mode 100644 index 000000000000..79dd57dde737 --- /dev/null +++ b/doc/release/upcoming_changes/30340.expired.rst @@ -0,0 +1 @@ +* ``numpy.distutils`` has been removed diff --git a/doc/release/upcoming_changes/30381.new_feature.rst b/doc/release/upcoming_changes/30381.new_feature.rst new file mode 100644 index 000000000000..8dc3ce34e3bc --- /dev/null +++ b/doc/release/upcoming_changes/30381.new_feature.rst @@ -0,0 +1,11 @@ +Pixi package definitions +------------------------ +Pixi package definitions have been added for different kinds +of from-source builds of NumPy. These can be used in +downstream Pixi workspaces via the ``pixi-build`` feature. + +Definitions for both ``default`` and AddressSanitizer-instrumented +(``asan``) builds are available in the source code under the +``pixi-packages/`` directory. + +``linux-64`` and ``osx-arm64`` platforms are supported. diff --git a/doc/release/upcoming_changes/30411.compatibility.rst b/doc/release/upcoming_changes/30411.compatibility.rst new file mode 100644 index 000000000000..54ba1b1fd32d --- /dev/null +++ b/doc/release/upcoming_changes/30411.compatibility.rst @@ -0,0 +1,18 @@ +``linalg.eig`` and ``linalg.eigvals`` now always return complex arrays +---------------------------------------------------------------------- + +Previously, the return values depended on whether the eigenvalues happen to lie +on the real line (which, for a general, non-symmetric matrix, is not guaranteed). + +The change makes consistent what was a value-dependent result. To retain the +previous behavior, do:: + + w = eigvals(a) + if np.any(w.imag == 0): # this is what NumPy used to do + w = w.real + +If your matrix is symmetrix/hermitian, use ``eigh`` and ``eigvalsh`` instead of +``eig`` and ``eigvals``. These are guaranteed to return real values. A common +case is covariance matrices, which are symmetric and positive definite by +construction. + diff --git a/doc/release/upcoming_changes/30460.expired.rst b/doc/release/upcoming_changes/30460.expired.rst new file mode 100644 index 000000000000..5fb6bf470866 --- /dev/null +++ b/doc/release/upcoming_changes/30460.expired.rst @@ -0,0 +1 @@ +* Passing ``None`` as dtype to ``np.finfo`` will now raise a ``TypeError`` (deprecated since 1.25) diff --git a/doc/release/upcoming_changes/30461.expired.rst b/doc/release/upcoming_changes/30461.expired.rst new file mode 100644 index 000000000000..e9d05eda1b7b --- /dev/null +++ b/doc/release/upcoming_changes/30461.expired.rst @@ -0,0 +1 @@ +* ``numpy.cross`` no longer supports 2-dimensional vectors (deprecated since 2.0) diff --git a/doc/release/upcoming_changes/30462.expired.rst b/doc/release/upcoming_changes/30462.expired.rst new file mode 100644 index 000000000000..ee8b62796640 --- /dev/null +++ b/doc/release/upcoming_changes/30462.expired.rst @@ -0,0 +1 @@ +* ``numpy._core.numerictypes.maximum_sctype`` has been removed (deprecated since 2.0) diff --git a/doc/release/upcoming_changes/30463.expired.rst b/doc/release/upcoming_changes/30463.expired.rst new file mode 100644 index 000000000000..232448966104 --- /dev/null +++ b/doc/release/upcoming_changes/30463.expired.rst @@ -0,0 +1,2 @@ +* ``numpy.row_stack`` has been removed in favor of ``numpy.vstack`` (deprecated since 2.0). +* ``get_array_wrap`` has been removed (deprecated since 2.0). diff --git a/doc/release/upcoming_changes/30467.expired.rst b/doc/release/upcoming_changes/30467.expired.rst new file mode 100644 index 000000000000..3474787b2e1f --- /dev/null +++ b/doc/release/upcoming_changes/30467.expired.rst @@ -0,0 +1 @@ +* ``recfromtxt`` and ``recfromcsv`` have been removed from ``numpy.lib._npyio`` in favor of ``numpy.genfromtxt`` (deprecated since 2.0). diff --git a/doc/release/upcoming_changes/30480.typing.rst b/doc/release/upcoming_changes/30480.typing.rst new file mode 100644 index 000000000000..fa27a8ecbe37 --- /dev/null +++ b/doc/release/upcoming_changes/30480.typing.rst @@ -0,0 +1,8 @@ +``numpy.linalg`` typing improvements and preliminary shape-typing support +------------------------------------------------------------------------- +Input and output dtypes for ``numpy.linalg`` functions are now more precise. Several of these +functions also gain preliminary shape-typing support while remaining backward compatible. +For example, the return type of ``numpy.linalg.matmul`` now depends on the shape-type of its inputs, +or fall back to the backward-compatible return type if the shape-types are unknown at type-checking +time. Because of limitations in Python's type system and current type-checkers, shape-typing cannot +cover every situation and is often only implemented for the most common lower-rank cases. diff --git a/doc/release/upcoming_changes/30489.compatibility.rst b/doc/release/upcoming_changes/30489.compatibility.rst new file mode 100644 index 000000000000..6eb1387fab6b --- /dev/null +++ b/doc/release/upcoming_changes/30489.compatibility.rst @@ -0,0 +1,5 @@ +MSVC support +------------ +NumPy now requires minimum MSVC 19.35 toolchain version on +Windows platforms. This corresponds to Visual Studio 2022 +version 17.5 Preview 2 or newer. \ No newline at end of file diff --git a/doc/release/upcoming_changes/30517.performance.rst b/doc/release/upcoming_changes/30517.performance.rst new file mode 100644 index 000000000000..df15498f7470 --- /dev/null +++ b/doc/release/upcoming_changes/30517.performance.rst @@ -0,0 +1,8 @@ +Improved performance of ``numpy.searchsorted`` +---------------------------------------------- +The C++ binary search implementation used by ``numpy.searchsorted`` now has +a much better performance when searching for multiple keys. The new +implementation batches binary search steps across all keys to leverage cache +locality and out-of-order execution. Benchmarks show the new implementation can +be up to 20 times faster for hundreds of thousands keys while single-key +performance remains comparable to previous versions. \ No newline at end of file diff --git a/doc/release/upcoming_changes/30538.change.rst b/doc/release/upcoming_changes/30538.change.rst new file mode 100644 index 000000000000..8372e94b9bd8 --- /dev/null +++ b/doc/release/upcoming_changes/30538.change.rst @@ -0,0 +1,18 @@ +``numpy.ctypeslib.as_ctypes`` now does not support scalar types +---------------------------------------------------------------- +The function ``numpy.ctypeslib.as_ctypes`` has been updated to only accept ``numpy.ndarray``. +Passing a scalar type (e.g., ``numpy.int32(5)``) will now raise a ``TypeError``. +This change was made to avoid the issue `gh-30354 `__ +and to enforce the readonly nature of scalar types in NumPy. +The previous behavior relied on undocumented implicit temporary arrays and was not well-defined. +Users who need to convert scalar types to ctypes should first convert them to an array +(e.g., ``numpy.asarray``) before passing them to ``numpy.ctypeslib.as_ctypes``. + + +``__array_interface__`` changes on scalars +------------------------------------------ +Scalars now export the ``__array_interface__`` directly rather than including +an array copy as a ``__ref`` entry. This means that scalars are now exported +as read-only while they previously exported as writeable. +The path via ``__ref`` was undocumented and not consistently used even +within NumPy itself. diff --git a/doc/release/upcoming_changes/30566.typing.rst b/doc/release/upcoming_changes/30566.typing.rst new file mode 100644 index 000000000000..fd9aabf85b6f --- /dev/null +++ b/doc/release/upcoming_changes/30566.typing.rst @@ -0,0 +1,5 @@ +``numpy.ma`` typing annotations +------------------------------- +The ``numpy.ma`` module is now fully covered by typing annotations. +This includes annotations for masked arrays, masks, and various functions and methods. +With this, NumPy has achieved 100% typing coverage across all its submodules. diff --git a/doc/release/upcoming_changes/30604.expired.rst b/doc/release/upcoming_changes/30604.expired.rst new file mode 100644 index 000000000000..50cab89f3c3a --- /dev/null +++ b/doc/release/upcoming_changes/30604.expired.rst @@ -0,0 +1 @@ +* The ``numpy.chararray`` re-export of ``numpy.char.chararray`` has been removed (deprecated since 2.0). diff --git a/doc/release/upcoming_changes/30605.deprecation.rst b/doc/release/upcoming_changes/30605.deprecation.rst new file mode 100644 index 000000000000..062160f210ef --- /dev/null +++ b/doc/release/upcoming_changes/30605.deprecation.rst @@ -0,0 +1 @@ +* ``numpy.char.chararray`` is deprecated. Use an ``ndarray`` with a string or bytes dtype instead. diff --git a/doc/release/upcoming_changes/30610.expired.rst b/doc/release/upcoming_changes/30610.expired.rst new file mode 100644 index 000000000000..eb806c954b16 --- /dev/null +++ b/doc/release/upcoming_changes/30610.expired.rst @@ -0,0 +1 @@ +* ``bincount`` now raises a ``TypeError`` for non-integer inputs (deprecated since 2.1). diff --git a/doc/release/upcoming_changes/30612.expired.rst b/doc/release/upcoming_changes/30612.expired.rst new file mode 100644 index 000000000000..1e29d3c96d53 --- /dev/null +++ b/doc/release/upcoming_changes/30612.expired.rst @@ -0,0 +1 @@ +* The ``numpy.lib.math`` alias for the standard library ``math`` module has been removed (deprecated since 1.25). diff --git a/doc/release/upcoming_changes/30613.expired.rst b/doc/release/upcoming_changes/30613.expired.rst new file mode 100644 index 000000000000..89610f3577e6 --- /dev/null +++ b/doc/release/upcoming_changes/30613.expired.rst @@ -0,0 +1 @@ +* Data type alias ``'a'`` was removed in favor of ``'S'`` (deprecated since 2.0). diff --git a/doc/release/upcoming_changes/30614.expired.rst b/doc/release/upcoming_changes/30614.expired.rst new file mode 100644 index 000000000000..e0d95d2a75fc --- /dev/null +++ b/doc/release/upcoming_changes/30614.expired.rst @@ -0,0 +1 @@ +* ``_add_newdoc_ufunc(ufunc, newdoc)`` has been removed in favor of ``ufunc.__doc__ = newdoc`` (deprecated in 2.2) diff --git a/doc/release/upcoming_changes/30644.deprecation.rst b/doc/release/upcoming_changes/30644.deprecation.rst new file mode 100644 index 000000000000..41219eca7e94 --- /dev/null +++ b/doc/release/upcoming_changes/30644.deprecation.rst @@ -0,0 +1,6 @@ +``numpy.fix`` is deprecated +--------------------------- + +`numpy.fix` is deprecated. Use `numpy.trunc` instead, which is faster +and follows the Array API standard. Both functions provide identical +functionality: rounding array elements towards zero. diff --git a/doc/release/upcoming_changes/30653.new_feature.rst b/doc/release/upcoming_changes/30653.new_feature.rst new file mode 100644 index 000000000000..0b79fd25fc77 --- /dev/null +++ b/doc/release/upcoming_changes/30653.new_feature.rst @@ -0,0 +1,6 @@ +``numpy.ndarray`` now supports structural pattern matching +---------------------------------------------------------- +`numpy.ndarray` and its subclasses now have the ``Py_TPFLAGS_SEQUENCE`` flag +set, enabling structural pattern matching (PEP 634) with ``match``/``case`` +statements. This also enables Cython to optimize integer indexing operations. +See :ref:`arrays.ndarray.pattern-matching` for details. diff --git a/doc/release/upcoming_changes/30707.change.rst b/doc/release/upcoming_changes/30707.change.rst new file mode 100644 index 000000000000..7f3846b6de97 --- /dev/null +++ b/doc/release/upcoming_changes/30707.change.rst @@ -0,0 +1,4 @@ +``meshgrid`` now always returns a tuple +--------------------------------------- +``np.meshgrid`` previously used to return a list when ``sparse`` was true and ``copy`` was false. +Now, it always returns a tuple regardless of the arguments. diff --git a/doc/release/upcoming_changes/30738.deprecation.rst b/doc/release/upcoming_changes/30738.deprecation.rst new file mode 100644 index 000000000000..381117ec84cc --- /dev/null +++ b/doc/release/upcoming_changes/30738.deprecation.rst @@ -0,0 +1,4 @@ +``numpy.ma.round_`` is deprecated +--------------------------------- +``numpy.ma.round_`` is deprecated. +``numpy.ma.round`` can be used as a replacement. diff --git a/doc/release/upcoming_changes/30770.compatibility.rst b/doc/release/upcoming_changes/30770.compatibility.rst new file mode 100644 index 000000000000..a1987f77e4c5 --- /dev/null +++ b/doc/release/upcoming_changes/30770.compatibility.rst @@ -0,0 +1,28 @@ +Cython support +-------------- + +NumPy's Cython headers (accessed via ``cimport numpy``) now require +Cython 3.0 or newer to build. If you try to compile a project that depends on +NumPy's Cython headers using Cython 0.29 or older, you will see a message like +this: + +:: + + Error compiling Cython file: + ------------------------------------------------------------ + ... + # versions. + # + # See __init__.cython-30.pxd for the real Cython header + # + + DEF err = int('Build aborted: the NumPy Cython headers require Cython 3.0.0 or newer.') + ------------------------------------------------------------ + + /path/to/site-packages/numpy/__init__.pxd:11:13: Error in compile-time expression: ValueError: invalid literal for int() with base 10: 'Build aborted: the NumPy Cython headers require Cython 3.0.0 or newer.' + + +Note that the invalid integer is not a bug in NumPy - we are intentionally +generating this error to avoid triggering a more obscure error later in the +build when an older Cython version tries to use a Cython feature that was not +available in the old Cython version. diff --git a/doc/release/upcoming_changes/30774.deprecation.rst b/doc/release/upcoming_changes/30774.deprecation.rst new file mode 100644 index 000000000000..00b941ef3a8c --- /dev/null +++ b/doc/release/upcoming_changes/30774.deprecation.rst @@ -0,0 +1,4 @@ +``typename`` is deprecated +-------------------------- +``numpy.typename`` is deprecated because the names returned by it were outdated and inconsistent. +``numpy.dtype.name`` can be used as a replacement. diff --git a/doc/release/upcoming_changes/30802.deprecation.rst b/doc/release/upcoming_changes/30802.deprecation.rst new file mode 100644 index 000000000000..82fe6672b885 --- /dev/null +++ b/doc/release/upcoming_changes/30802.deprecation.rst @@ -0,0 +1 @@ +* The ``numpy.char.[as]array`` functions are deprecated. Use an ``numpy.[as]array`` with a string or bytes dtype instead. diff --git a/doc/release/upcoming_changes/30846.compatibility.rst b/doc/release/upcoming_changes/30846.compatibility.rst new file mode 100644 index 000000000000..68a6685f6673 --- /dev/null +++ b/doc/release/upcoming_changes/30846.compatibility.rst @@ -0,0 +1,4 @@ +Default memory allocator change +------------------------------- +NumPy now uses ``PyMem_RawMalloc`` and ``PyMem_RawFree`` as the default memory allocator, +instead of system's ``malloc`` and ``free`` directly. diff --git a/doc/release/upcoming_changes/30846.performance.rst b/doc/release/upcoming_changes/30846.performance.rst new file mode 100644 index 000000000000..39d2d68cbda3 --- /dev/null +++ b/doc/release/upcoming_changes/30846.performance.rst @@ -0,0 +1,18 @@ +Improved scaling of ufuncs on free-threading +-------------------------------------------- + +NumPy's ufuncs now scale significantly better on free-threading builds +of CPython due to the following optimizations: + +* **Lock-free dispatch table:** The ufuncs dispatch table is now + implemented as a lock-free concurrent hash map, allowing multiple threads + to call ufuncs without contention. + +* **Immortal shared objects:** Certain shared objects, such as global memory + handlers, have been made immortal. This effectively reduces reference + counting contention across threads. + +* **Optimized memory allocation:** NumPy now utilizes ``PyMem_RawMalloc`` and + ``PyMem_RawFree`` for memory allocation. On Python 3.15 and newer, this + leverages ``mimalloc`` and significantly reduces memory allocation overhead + in multi-threaded workloads. \ No newline at end of file diff --git a/doc/release/upcoming_changes/README.rst b/doc/release/upcoming_changes/README.rst index 91b7f7e000a0..c1b9a91dd3c1 100644 --- a/doc/release/upcoming_changes/README.rst +++ b/doc/release/upcoming_changes/README.rst @@ -24,6 +24,7 @@ Each file should be named like ``..rst``, where * ``improvement``: General improvements and edge-case changes which are not new features or compatibility related. * ``performance``: Performance changes that should not affect other behaviour. +* ``typing``: Improvements and changes related to static typing. * ``change``: Other changes * ``highlight``: Adds a highlight bullet point to use as a possibly highlight of the release. @@ -40,7 +41,7 @@ So for example: ``123.new_feature.rst`` would have the content:: The ``my_new_feature`` option is now available for `my_favorite_function`. To use it, write ``np.my_favorite_function(..., my_new_feature=True)``. -``highlight`` is usually formatted as bulled points making the fragment +``highlight`` is usually formatted as bullet points making the fragment ``* This is a highlight``. Note the use of single-backticks to get an internal link (assuming @@ -59,4 +60,3 @@ will look in the final release notes. This README was adapted from the pytest changelog readme under the terms of the MIT licence. - diff --git a/doc/source/_static/numpy.css b/doc/source/_static/numpy.css index 180dec530649..1555dafb5539 100644 --- a/doc/source/_static/numpy.css +++ b/doc/source/_static/numpy.css @@ -1,10 +1,11 @@ @import url('https://fonts.googleapis.com/css2?family=Lato:ital,wght@0,400;0,700;0,900;1,400;1,700;1,900&family=Open+Sans:ital,wght@0,400;0,600;1,400;1,600&display=swap'); .navbar-brand img { - height: 75px; + height: 75px; } + .navbar-brand { - height: 75px; + height: 75px; } body { @@ -19,19 +20,7 @@ body { width: 15%; } -/* Version switcher colors from PyData Sphinx Theme */ - -.version-switcher__button[data-active-version-name*="devdocs"] { - background-color: var(--pst-color-warning); - border-color: var(--pst-color-warning); - opacity: 0.9; -} - -.version-switcher__button:not([data-active-version-name*="stable"]):not([data-active-version-name*="dev"]):not([data-active-version-name*="pull"]) { - background-color: var(--pst-color-danger); - border-color: var(--pst-color-danger); - opacity: 0.9; -} +/* Version switcher from PyData Sphinx Theme */ .version-switcher__menu a.list-group-item { font-size: small; @@ -71,4 +60,43 @@ div.admonition-legacy>.admonition-title::after { div.admonition-legacy>.admonition-title { background-color: var(--pst-color-warning-bg); -} \ No newline at end of file +} + +/* Buttons for JupyterLite-enabled interactive examples */ + +.try_examples_button { + color: white; + background-color: var(--pst-color-info); + border: none; + padding: 5px 10px; + border-radius: 0.25rem; + margin-top: 3px; /* better alignment under admonitions */ + margin-bottom: 5px !important; /* fix uneven button sizes under admonitions */ + box-shadow: 0 2px 5px rgba(108, 108, 108, 0.2); + font-weight: bold; + font-size: small; +} + +/* Use more accessible colours for text in dark mode */ +[data-theme=dark] .try_examples_button { + color: black; +} + +.try_examples_button:hover { + transform: scale(1.02); + box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2); + cursor: pointer; +} + +.try_examples_button_container { + display: flex; + justify-content: flex-start; + gap: 10px; + margin-bottom: 20px; +} + +/* Better gaps for examples buttons under admonitions */ + +.try_examples_outer_iframe { + margin-top: 0.4em; +} diff --git a/doc/source/building/blas_lapack.rst b/doc/source/building/blas_lapack.rst index 6ae5f3f78a82..c00b3646d84e 100644 --- a/doc/source/building/blas_lapack.rst +++ b/doc/source/building/blas_lapack.rst @@ -16,20 +16,20 @@ plain ``libblas``/``liblapack``. This may vary per platform or over releases. That order, and which libraries are tried, can be changed through the ``blas-order`` and ``lapack-order`` build options, for example:: - $ python -m pip install . -C-Dblas-order=openblas,mkl,blis -C-Dlapack-order=openblas,mkl,lapack + $ python -m pip install . -Csetup-args=-Dblas-order=openblas,mkl,blis -Csetup-args=-Dlapack-order=openblas,mkl,lapack The first suitable library that is found will be used. In case no suitable library is found, the NumPy build will print a warning and then use (slow!) NumPy-internal fallback routines. In order to disallow use of those slow routines, the ``allow-noblas`` build option can be used:: - $ python -m pip install . -C-Dallow-noblas=false + $ python -m pip install . -Csetup-args=-Dallow-noblas=false By default the LP64 (32-bit integer) interface to BLAS and LAPACK will be used. For building against the ILP64 (64-bit integer) interface, one must use the ``use-ilp64`` build option:: - $ python -m pip install . -C-Duse-ilp64=true + $ python -m pip install . -Csetup-args=-Duse-ilp64=true .. _accelerated-blas-lapack-libraries: @@ -96,7 +96,7 @@ Full list of BLAS and LAPACK related build options -------------------------------------------------- BLAS and LAPACK are complex dependencies. Some libraries have more options that -are exposed via build options (see ``meson_options.txt`` in the root of the +are exposed via build options (see ``meson.options`` in the root of the repo for all of NumPy's build options). - ``blas``: name of the BLAS library to use (default: ``auto``), diff --git a/doc/source/building/cpu_simd.rst b/doc/source/building/cpu_simd.rst new file mode 100644 index 000000000000..f99a68b0c28a --- /dev/null +++ b/doc/source/building/cpu_simd.rst @@ -0,0 +1,26 @@ +CPU support & SIMD +================== + +NumPy supports a wide range of platforms and CPUs, and includes a significant +amount of code optimized for specific CPUs. By default, NumPy targets a +baseline with the minimum required SIMD instruction sets that are needed +(e.g., SSE4.2 on x86-64 CPUs) and uses dynamic dispatch to use newer instruction +sets (e.g., AVX2 and AVX512 on x86-64) when those are detected at runtime. + +There are a number of build options that can be used to modify that behavior. +The default build settings are chosen for both portability and performance, and +should be reasonably close to optimal for creating redistributable binaries as +well as local installs. That said, there are reasons one may want to change the +default behavior, for example to obtain smaller binaries, to install on very old +hardware, to work around bugs, or for testing. + +To detect and use all CPU features available on your local machine:: + + $ python -m pip install . -Csetup-args=-Dcpu-baseline="native" -Csetup-args=-Dcpu-dispatch="none" + +To use a lower baseline without any SIMD optimizations, useful for very old CPUs:: + + $ python -m pip install . -Csetup-args=-Dcpu-baseline="none" + +For more usage scenarios and more in-depth information about NumPy's SIMD support, +see :ref:`cpu-build-options`. diff --git a/doc/source/building/cross_compilation.rst b/doc/source/building/cross_compilation.rst index a162eb1d2f1a..f03b620ff031 100644 --- a/doc/source/building/cross_compilation.rst +++ b/doc/source/building/cross_compilation.rst @@ -2,10 +2,10 @@ Cross compilation ================= Cross compilation is a complex topic, we only add some hopefully helpful hints -here (for now). As of May 2023, cross-compilation based on ``crossenv`` is -known to work, as used (for example) in conda-forge. Cross-compilation without -``crossenv`` requires some manual overrides. You instruct these overrides by -passing options to ``meson setup`` via `meson-python`_. +here (for now). As of May 2025, cross-compilation with a Meson cross file as +well as cross-compilation based on ``crossenv`` are known to work. Conda-forge +uses the latter method. Cross-compilation without ``crossenv`` requires passing +build options to ``meson setup`` via `meson-python`_. .. _meson-python: https://meson-python.readthedocs.io/en/latest/how-to-guides/meson-args.html @@ -15,7 +15,7 @@ possible as well. Here are links to the NumPy "build recipes" on those distros: - `Void Linux `_ -- `Nix `_ +- `Nix `_ - `Conda-forge `_ See also `Meson's documentation on cross compilation @@ -24,7 +24,7 @@ may need to pass to Meson to successfully cross compile. One possible hiccup is that the build requires running a compiled executable in order to determine the ``long double`` format for the host platform. This may be -an obstable, since it requires ``crossenv`` or QEMU to run the host (cross) +an obstacle, since it requires ``crossenv`` or QEMU to run the host (cross) Python. To avoid this problem, specify the paths to the relevant directories in your *cross file*: @@ -33,9 +33,18 @@ your *cross file*: [properties] longdouble_format = 'IEEE_DOUBLE_LE' +For an example of a cross file needed to cross-compile NumPy, see +`numpy#288861 `__. +Putting that together, invoking a cross build with such a cross file, looks like: + +.. code:: bash + + $ python -m build --wheel -Csetup-args="--cross-file=aarch64-myos-cross-file.txt" + For more details and the current status around cross compilation, see: - The state of cross compilation in Python: `pypackaging-native key issue page `__ +- The `set of NumPy issues with the "Cross compilation" label `__ - Tracking issue for SciPy cross-compilation needs and issues: `scipy#14812 `__ diff --git a/doc/source/building/distutils_equivalents.rst b/doc/source/building/distutils_equivalents.rst index 156174d02358..65821bfec9d9 100644 --- a/doc/source/building/distutils_equivalents.rst +++ b/doc/source/building/distutils_equivalents.rst @@ -3,7 +3,7 @@ Meson and ``distutils`` ways of doing things -------------------------------------------- -*Old workflows (numpy.distutils based):* +*Old workflows (numpy.distutils based, no longer relevant):* 1. ``python runtests.py`` 2. ``python setup.py build_ext -i`` + ``export diff --git a/doc/source/building/index.rst b/doc/source/building/index.rst index 54a58a7999d8..2a89fefde4f9 100644 --- a/doc/source/building/index.rst +++ b/doc/source/building/index.rst @@ -52,7 +52,7 @@ your system. * BLAS and LAPACK libraries. `OpenBLAS `__ is the NumPy default; other variants include Apple Accelerate, `MKL `__, - `ATLAS `__ and + `ATLAS `__ and `Netlib `__ (or "Reference") BLAS and LAPACK. @@ -161,7 +161,8 @@ your system. This is needed even if you use the MinGW-w64 or Intel compilers, in order to ensure you have the Windows Universal C Runtime (the other components of Visual Studio are not needed when using Mingw-w64, and can be deselected if - desired, to save disk space). + desired, to save disk space). The recommended version of the UCRT is + >= 10.0.22621.0. .. tab-set:: @@ -174,6 +175,12 @@ your system. run a ``.bat`` file for the correct bitness and architecture (e.g., for 64-bit Intel CPUs, use ``vcvars64.bat``). + If using a Conda environment while a version of Visual Studio 2019+ is + installed that includes the MSVC v142 package (VS 2019 C++ x86/x64 + build tools), activating the conda environment should cause Visual + Studio to be found and the appropriate .bat file executed to set + these variables. + For detailed guidance, see `Use the Microsoft C++ toolset from the command line `__. @@ -213,6 +220,68 @@ your system. try again. The Fortran compiler should be installed as described in this section. + .. tab-item:: Windows on ARM64 + :sync: Windows on ARM64 + + In Windows on ARM64, the set of a compiler options that are available for + building NumPy are limited. Compilers such as GCC and GFortran are not yet + supported for Windows on ARM64. Currently, the NumPy build for Windows on ARM64 + is supported with MSVC and LLVM toolchains. The use of a Fortran compiler is + more tricky than on other platforms, because MSVC does not support Fortran, and + gfortran and MSVC can't be used together. If you don't need to run the ``f2py`` + tests, simply using MSVC is easiest. Otherwise, you will need the following + set of compilers: + + 1. MSVC + flang (``cl``, ``flang``) + 2. LLVM + flang (``clang-cl``, ``flang``) + + First, install Microsoft Visual Studio - the 2022 Community Edition will + work (see the `Visual Studio download site `__). + Ensure that you have installed necessary Visual Studio components for building NumPy + on WoA from `here `__. + + To use the flang compiler for Windows on ARM64, install Latest LLVM + toolchain for WoA from `here `__. + + .. tab-set:: + + .. tab-item:: MSVC + + The MSVC installer does not put the compilers on the system path, and + the install location may change. To query the install location, MSVC + comes with a ``vswhere.exe`` command-line utility. And to make the + C/C++ compilers available inside the shell you are using, you need to + run a ``.bat`` file for the correct bitness and architecture (e.g., for + ARM64-based CPUs, use ``vcvarsarm64.bat``). + + For detailed guidance, see `Use the Microsoft C++ toolset from the command line + `__. + + .. tab-item:: LLVM + + Similar to MSVC, LLVM does not put the compilers on the system path. + To set system path for LLVM compilers, users may need to use ``set`` + command to put compilers on the system path. To check compiler's path + for LLVM's clang-cl, try invoking LLVM's clang-cl compiler in the shell you use + (``clang-cl --version``). + + .. note:: + + Compilers should be on the system path (i.e., the ``PATH`` environment + variable should contain the directory in which the compiler executables + can be found) in order to be found, with the exception of MSVC which + will be found automatically if and only if there are no other compilers + on the ``PATH``. You can use any shell (e.g., Powershell, ``cmd`` or + Git Bash) to invoke a build. To check that this is the case, try + invoking a Fortran compiler in the shell you use (e.g., ``flang + --version``). + + .. warning:: + + Currently, Conda environment is not yet supported officially on `Windows + on ARM64 `__. + The present approach uses virtualenv for building NumPy from source on + Windows on ARM64. Building NumPy from source -------------------------- @@ -224,7 +293,7 @@ Otherwise, conda is recommended. .. note:: If you don't have a conda installation yet, we recommend using - Mambaforge_; any conda flavor will work though. + Miniforge_; any conda flavor will work though. Building from source to use NumPy ````````````````````````````````` @@ -256,6 +325,12 @@ Building from source to use NumPy git submodule update --init pip install . --no-build-isolation + .. warning:: + + On Windows, the AR, LD, and LDFLAGS environment variables may be set, + which will cause the pip install command to fail. These variables are only + needed for flang and can be safely unset prior to running pip install. + .. tab-item:: Virtual env or system Python :sync: pip @@ -289,7 +364,7 @@ Then you want to do the following: 1. Create a dedicated development environment (virtual environment or conda environment), 2. Install all needed dependencies (*build*, and also *test*, *doc* and - *optional* dependencies), + *optional* dependencies), 3. Build NumPy with the ``spin`` developer interface. Step (3) is always the same, steps (1) and (2) are different between conda and @@ -348,9 +423,25 @@ virtual environments: python -m venv venv .\venv\Scripts\activate + .. tab-item:: Windows on ARM64 + :sync: Windows on ARM64 + + :: + + python -m venv venv + .\venv\Scripts\activate + + .. note:: + + Building NumPy with BLAS and LAPACK functions requires OpenBLAS + library at Runtime. In Windows on ARM64, this can be done by setting + up pkg-config for OpenBLAS dependency. The build steps for OpenBLAS + for Windows on ARM64 can be found `here `__. + + Then install the Python-level dependencies from PyPI with:: - python -m pip install -r requirements/all_requirements.txt + python -m pip install -r requirements/build_requirements.txt To build NumPy in an activated development environment, run:: @@ -363,6 +454,13 @@ like build the html documentation or running benchmarks. The ``spin`` interface is self-documenting, so please see ``spin --help`` and ``spin --help`` for detailed guidance. +.. warning:: + + In an activated conda environment on Windows, the AR, LD, and LDFLAGS + environment variables may be set, which will cause the build to fail. + These variables are only needed for flang and can be safely unset + for build. + .. _meson-editable-installs: .. admonition:: IDE support & editable installs @@ -375,7 +473,7 @@ interface is self-documenting, so please see ``spin --help`` and install"). Editable installs are supported. It is important to understand that **you - may use either an editable install or ``spin`` in a given repository clone, + may use either an editable install or** ``spin`` **in a given repository clone, but not both**. If you use editable installs, you have to use ``pytest`` and other development tools directly instead of using ``spin``. @@ -417,6 +515,7 @@ Customizing builds compilers_and_options blas_lapack + cpu_simd cross_compilation redistributable_binaries @@ -432,5 +531,5 @@ Background information distutils_equivalents -.. _Mambaforge: https://github.com/conda-forge/miniforge#mambaforge +.. _Miniforge: https://github.com/conda-forge/miniforge .. _meson-python: https://mesonbuild.com/meson-python/ diff --git a/doc/source/building/introspecting_a_build.rst b/doc/source/building/introspecting_a_build.rst index f23628bf3ffd..268365f595bf 100644 --- a/doc/source/building/introspecting_a_build.rst +++ b/doc/source/building/introspecting_a_build.rst @@ -19,4 +19,4 @@ These things are all available after the configure stage of the build (i.e., information, rather than running the build and reading the full build log. For more details on this topic, see the -`SciPy doc page on build introspection `__. +`SciPy doc page on build introspection `__. diff --git a/doc/source/building/understanding_meson.rst b/doc/source/building/understanding_meson.rst index b990ff283271..0c29302c9abb 100644 --- a/doc/source/building/understanding_meson.rst +++ b/doc/source/building/understanding_meson.rst @@ -87,11 +87,11 @@ that's just an arbitrary name we picked here):: meson install -C build -It will then install to ``build-install/lib/python3.11/site-packages/numpy``, +It will then install to ``build-install/lib/python3.12/site-packages/numpy``, which is not on your Python path, so to add it do (*again, this is for learning purposes, using ``PYTHONPATH`` explicitly is typically not the best idea*):: - export PYTHONPATH=$PWD/build-install/lib/python3.11/site-packages/ + export PYTHONPATH=$PWD/build-install/lib/python3.12/site-packages/ Now we should be able to import ``numpy`` and run the tests. Remembering that we need to move out of the root of the repo to ensure we pick up the package diff --git a/doc/source/conf.py b/doc/source/conf.py index 83c58c2c3c2d..3ae97041a5ea 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -1,7 +1,10 @@ +import importlib import os import re import sys -import importlib +import sysconfig +from datetime import datetime + from docutils import nodes from docutils.parsers.rst import Directive @@ -15,11 +18,15 @@ # must be kept alive to hold the patched names _name_cache = {} +FREE_THREADED_BUILD = sysconfig.get_config_var('Py_GIL_DISABLED') + + def replace_scalar_type_names(): """ Rename numpy types to use the canonical names to make sphinx behave """ import ctypes - Py_ssize_t = ctypes.c_int64 if ctypes.sizeof(ctypes.c_void_p) == 8 else ctypes.c_int32 + sizeof_void_p = ctypes.sizeof(ctypes.c_void_p) + Py_ssize_t = ctypes.c_int64 if sizeof_void_p == 8 else ctypes.c_int32 class PyObject(ctypes.Structure): pass @@ -27,11 +34,19 @@ class PyObject(ctypes.Structure): class PyTypeObject(ctypes.Structure): pass - PyObject._fields_ = [ - ('ob_refcnt', Py_ssize_t), - ('ob_type', ctypes.POINTER(PyTypeObject)), - ] - + if not FREE_THREADED_BUILD: + PyObject._fields_ = [ + ('ob_refcnt', Py_ssize_t), + ('ob_type', ctypes.POINTER(PyTypeObject)), + ] + else: + # As of Python 3.14 + PyObject._fields_ = [ + ('ob_refcnt_full', ctypes.c_int64), + # an anonymous struct that we don't try to model + ('__private', ctypes.c_int64), + ('ob_type', ctypes.POINTER(PyTypeObject)), + ] PyTypeObject._fields_ = [ # varhead @@ -41,10 +56,6 @@ class PyTypeObject(ctypes.Structure): ('tp_name', ctypes.c_char_p), ] - # prevent numpy attaching docstrings to the scalar types - assert 'numpy._core._add_newdocs_scalars' not in sys.modules - sys.modules['numpy._core._add_newdocs_scalars'] = object() - import numpy # change the __name__ of the scalar types @@ -56,11 +67,13 @@ class PyTypeObject(ctypes.Structure): ]: typ = getattr(numpy, name) c_typ = PyTypeObject.from_address(id(typ)) - c_typ.tp_name = _name_cache[typ] = b"numpy." + name.encode('utf8') + if sys.implementation.name == 'cpython': + c_typ.tp_name = _name_cache[typ] = b"numpy." + name.encode('utf8') + else: + # It is not guarenteed that the c_typ has this model on other + # implementations + _name_cache[typ] = b"numpy." + name.encode('utf8') - # now generate the docstrings as usual - del sys.modules['numpy._core._add_newdocs_scalars'] - import numpy._core._add_newdocs_scalars replace_scalar_type_names() @@ -68,6 +81,7 @@ class PyTypeObject(ctypes.Structure): # As of NumPy 1.25, a deprecation of `str`/`bytes` attributes happens. # For some reasons, the doc build accesses these, so ignore them. import warnings + warnings.filterwarnings("ignore", "In the future.*NumPy scalar", FutureWarning) @@ -93,7 +107,10 @@ class PyTypeObject(ctypes.Structure): 'IPython.sphinxext.ipython_console_highlighting', 'IPython.sphinxext.ipython_directive', 'sphinx.ext.mathjax', + 'sphinx_copybutton', 'sphinx_design', + 'sphinx.ext.imgconverter', + 'jupyterlite_sphinx', ] skippable_extensions = [ @@ -110,22 +127,24 @@ class PyTypeObject(ctypes.Structure): templates_path = ['_templates'] # The suffix of source filenames. -source_suffix = '.rst' +source_suffix = {'.rst': 'restructuredtext'} # General substitutions. project = 'NumPy' -copyright = '2008-2024, NumPy Developers' +year = datetime.now().year +copyright = f'2008-{year}, NumPy Developers' # The default replacements for |version| and |release|, also used in various # other places throughout the built documents. # import numpy + # The short X.Y version (including .devXXXX, rcX, b1 suffixes if present) version = re.sub(r'(\d+\.\d+)\.\d+(.*)', r'\1\2', numpy.__version__) version = re.sub(r'(\.dev\d+).*?$', r'\1', version) # The full version, including alpha/beta/rc tags. release = numpy.__version__ -print("%s %s" % (version, release)) +print(f"{version} {release}") # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: @@ -139,14 +158,6 @@ class PyTypeObject(ctypes.Structure): # The reST default role (used for this markup: `text`) to use for all documents. default_role = "autolink" -# List of directories, relative to source directories, that shouldn't be searched -# for source files. -exclude_dirs = [] - -exclude_patterns = [] -if sys.version_info[:2] >= (3, 12): - exclude_patterns += ["reference/distutils.rst"] - # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False @@ -182,7 +193,7 @@ def run(self): "NumPy versions.") try: - self.content[0] = text+" "+self.content[0] + self.content[0] = text + " " + self.content[0] except IndexError: # Content is empty; use the default text source, lineno = self.state_machine.get_source_and_line( @@ -233,8 +244,7 @@ def setup(app): html_favicon = '_static/favicon/favicon.ico' # Set up the version switcher. The versions.json is stored in the doc repo. -if os.environ.get('CIRCLE_JOB', False) and \ - os.environ.get('CIRCLE_BRANCH', '') != 'main': +if os.environ.get('CIRCLE_JOB') and os.environ['CIRCLE_BRANCH'] != 'main': # For PR, name is set to its ref switcher_version = os.environ['CIRCLE_BRANCH'] elif ".dev" in version: @@ -266,9 +276,14 @@ def setup(app): "version_match": switcher_version, "json_url": "https://numpy.org/doc/_static/versions.json", }, + "show_version_warning_banner": True, + "analytics": { + "plausible_analytics_domain": "numpy.org/doc/stable/", + "plausible_analytics_url": ("https://views.scientific-python.org/js/script.js"), + }, } -html_title = "%s v%s Manual" % (project, version) +html_title = f"{project} v{version} Manual" html_static_path = ['_static'] html_last_updated_fmt = '%b %d, %Y' html_css_files = ["numpy.css"] @@ -289,6 +304,9 @@ def setup(app): plot_html_show_formats = False plot_html_show_source_link = False +# sphinx-copybutton configurations +copybutton_prompt_text = r">>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: " +copybutton_prompt_is_regexp = True # ----------------------------------------------------------------------------- # LaTeX output # ----------------------------------------------------------------------------- @@ -433,14 +451,11 @@ def setup(app): # ----------------------------------------------------------------------------- # Coverage checker # ----------------------------------------------------------------------------- -coverage_ignore_modules = r""" - """.split() -coverage_ignore_functions = r""" - test($|_) (some|all)true bitwise_not cumproduct pkgload - generic\. - """.split() -coverage_ignore_classes = r""" - """.split() +coverage_ignore_modules = [] +coverage_ignore_functions = [ + 'test($|_)', '(some|all)true', 'bitwise_not', 'cumproduct', 'pkgload', 'generic\\.' +] +coverage_ignore_classes = [] coverage_c_path = [] coverage_c_regexes = {} @@ -458,7 +473,8 @@ def setup(app): plot_formats = [('png', 100), 'pdf'] import math -phi = (math.sqrt(5) + 1)/2 + +phi = (math.sqrt(5) + 1) / 2 plot_rcparams = { 'font.size': 8, @@ -467,7 +483,7 @@ def setup(app): 'xtick.labelsize': 8, 'ytick.labelsize': 8, 'legend.fontsize': 8, - 'figure.figsize': (3*phi, 3), + 'figure.figsize': (3 * phi, 3), 'figure.subplot.bottom': 0.2, 'figure.subplot.left': 0.2, 'figure.subplot.right': 0.9, @@ -482,7 +498,7 @@ def setup(app): # ----------------------------------------------------------------------------- import inspect -from os.path import relpath, dirname +from os.path import dirname, relpath for name in ['sphinx.ext.linkcode', 'numpydoc.linkcode']: try: @@ -504,7 +520,6 @@ def _get_c_source_file(obj): # todo: come up with a better way to generate these return None - def linkcode_resolve(domain, info): """ Determine the URL corresponding to Python object @@ -538,9 +553,14 @@ def linkcode_resolve(domain, info): fn = None lineno = None - # Make a poor effort at linking C extension types - if isinstance(obj, type) and obj.__module__ == 'numpy': - fn = _get_c_source_file(obj) + if isinstance(obj, type): + # Make a poor effort at linking C extension types + if obj.__module__ == 'numpy': + fn = _get_c_source_file(obj) + + # This can be removed when removing the decorator set_module. Fix issue #28629 + if hasattr(obj, '_module_source'): + obj.__module__, obj._module_source = obj._module_source, obj.__module__ if fn is None: try: @@ -567,21 +587,25 @@ def linkcode_resolve(domain, info): else: linespec = "" + if isinstance(obj, type) and hasattr(obj, '_module_source'): + obj.__module__, obj._module_source = obj._module_source, obj.__module__ + if 'dev' in numpy.__version__: - return "https://github.com/numpy/numpy/blob/main/numpy/%s%s" % ( - fn, linespec) + return f"https://github.com/numpy/numpy/blob/main/numpy/{fn}{linespec}" else: return "https://github.com/numpy/numpy/blob/v%s/numpy/%s%s" % ( numpy.__version__, fn, linespec) + +from pygments.lexer import inherit from pygments.lexers import CLexer -from pygments.lexer import inherit, bygroups from pygments.token import Comment + class NumPyLexer(CLexer): name = 'NUMPYLEXER' - tokens = { + tokens = { # noqa: RUF012 'statements': [ (r'@[a-zA-Z_]*@', Comment.Preproc, 'macro'), inherit, @@ -592,7 +616,7 @@ class NumPyLexer(CLexer): # ----------------------------------------------------------------------------- # Breathe & Doxygen # ----------------------------------------------------------------------------- -breathe_projects = dict(numpy=os.path.join("..", "build", "doxygen", "xml")) +breathe_projects = {'numpy': os.path.join("..", "build", "doxygen", "xml")} breathe_default_project = "numpy" breathe_default_members = ("members", "undoc-members", "protected-members") @@ -603,4 +627,17 @@ class NumPyLexer(CLexer): ('c:identifier', 'PyHeapTypeObject'), ] +# ----------------------------------------------------------------------------- +# Interactive documentation examples via JupyterLite +# ----------------------------------------------------------------------------- +global_enable_try_examples = True +try_examples_global_button_text = "Try it in your browser!" +try_examples_global_warning_text = ( + "NumPy's interactive examples are experimental and may not always work" + " as expected, with high load times especially on low-resource platforms," + " and the version of NumPy might not be in sync with the one you are" + " browsing the documentation for. If you encounter any issues, please" + " report them on the" + " [NumPy issue tracker](https://github.com/numpy/numpy/issues)." +) diff --git a/doc/source/dev/depending_on_numpy.rst b/doc/source/dev/depending_on_numpy.rst index aa970405d2fc..98dc552a779e 100644 --- a/doc/source/dev/depending_on_numpy.rst +++ b/doc/source/dev/depending_on_numpy.rst @@ -26,13 +26,21 @@ well known scientific Python projects, does **not** use semantic versioning. Instead, backwards incompatible API changes require deprecation warnings for at least two releases. For more details, see :ref:`NEP23`. -NumPy has both a Python API and a C API. The C API can be used directly or via -Cython, f2py, or other such tools. If your package uses the C API, then ABI -(application binary interface) stability of NumPy is important. NumPy's ABI is -forward but not backward compatible. This means: binaries compiled against a -given target version of NumPy's C API will still run correctly with newer NumPy -versions, but not with older versions. - +NumPy provides both a Python API and a C-API. The C-API can be accessed +directly or through tools like Cython or f2py. If your package uses the +C-API, it's important to understand NumPy's application binary interface +(ABI) compatibility: NumPy's ABI is forward compatible but not backward +compatible. This means that binaries compiled against an older version of +NumPy will still work with newer versions, but binaries compiled against a +newer version will not necessarily work with older ones. + +Modules can also be safely built against NumPy 2.0 or later in +:ref:`CPython's abi3 mode `, which allows +building against a single (minimum-supported) version of Python but be +forward compatible higher versions in the same series (e.g., ``3.x``). +This can greatly reduce the number of wheels that need to be built and +distributed. For more information and examples, see the +`cibuildwheel docs `__. .. _testing-prereleases: @@ -80,16 +88,16 @@ Build-time dependency `__. -If a package either uses the NumPy C API directly or it uses some other tool +If a package either uses the NumPy C-API directly or it uses some other tool that depends on it like Cython or Pythran, NumPy is a *build-time* dependency -of the package. +of the package. -By default, NumPy will expose an API that is backwards compatible with the -oldest NumPy version that supports the currently oldest compatible Python -version. NumPy 1.25.0 supports Python 3.9 and higher and NumPy 1.19 is the -first version to support Python 3.9. Thus, we guarantee that, when using -defaults, NumPy 1.25 will expose a C-API compatible with NumPy 1.19. -(the exact version is set within NumPy-internal header files). +By default, NumPy exposes an API that is backward compatible with the earliest +NumPy version that supports the oldest Python version currently supported by +NumPy. For example, NumPy 1.25.0 supports Python 3.9 and above; and the +earliest NumPy version to support Python 3.9 was 1.19. Therefore we guarantee +NumPy 1.25 will, when using defaults, expose a C-API compatible with NumPy +1.19. (the exact version is set within NumPy-internal header files). NumPy is also forward compatible for all minor releases, but a major release will require recompilation (see NumPy 2.0-specific advice further down). @@ -120,14 +128,9 @@ compatible with a new major release of NumPy and may not be compatible with very old versions. For conda-forge packages, please see -`here `__. - -as of now, it is usually as easy as including:: - - host: - - numpy - run: - - {{ pin_compatible('numpy') }} +`here `__ +for instructions on how to declare a dependency on ``numpy`` when using the C +API. Runtime dependency & version ranges @@ -138,9 +141,7 @@ for dropping support for old Python and NumPy versions: :ref:`NEP29`. We recommend all packages depending on NumPy to follow the recommendations in NEP 29. -For *run-time dependencies*, specify version bounds using -``install_requires`` in ``setup.py`` (assuming you use ``numpy.distutils`` or -``setuptools`` to build). +For *run-time dependencies*, specify version bounds in `pyproject.toml`. Most libraries that rely on NumPy will not need to set an upper version bound: NumPy is careful to preserve backward-compatibility. @@ -150,7 +151,7 @@ frequently, (b) use a large part of NumPy's API surface, and (c) is worried that changes in NumPy may break your code, you can set an upper bound of ``=2.0`` (or go. We'll focus on the "keep compatibility with 1.xx and 2.x" now, which is a little more involved. -*Example for a package using the NumPy C API (via C/Cython/etc.) which wants to support +*Example for a package using the NumPy C-API (via C/Cython/etc.) which wants to support NumPy 1.23.5 and up*: .. code:: ini diff --git a/doc/source/dev/development_advanced_debugging.rst b/doc/source/dev/development_advanced_debugging.rst index 286e24389d60..eb81b335f56a 100644 --- a/doc/source/dev/development_advanced_debugging.rst +++ b/doc/source/dev/development_advanced_debugging.rst @@ -5,12 +5,16 @@ Advanced debugging tools ======================== If you reached here, you want to dive into, or use, more advanced tooling. -This is usually not necessary for first time contributors and most +This is usually not necessary for first-time contributors and most day-to-day development. These are used more rarely, for example close to a new NumPy release, or when a large or particular complex change was made. -Since not all of these tools are used on a regular bases and only available +Some of these tools are used in NumPy's continuous integration tests. If you +see a test failure that only happens under a debugging tool, these instructions +should hopefully enable you to reproduce the test failure locally. + +Since not all of these tools are used on a regular basis and only available on some systems, please expect differences, issues, or quirks; we will be happy to help if you get stuck and appreciate any improvements or suggestions to these workflows. @@ -20,7 +24,7 @@ Finding C errors with additional tooling ######################################## Most development will not require more than a typical debugging toolchain -as shown in :ref:`Debugging `. +as shown in :ref:`Debugging `. But for example memory leaks can be particularly subtle or difficult to narrow down. @@ -32,7 +36,8 @@ However, you can ensure that we can track down such issues more easily: consider creating an additional simpler test as well. This can be helpful, because often it is only easy to find which test triggers an issue and not which line of the test. -* Never use ``np.empty`` if data is read/used. ``valgrind`` will notice this +* Never use ``np.empty`` if data is read/used. + `Valgrind `_ will notice this and report an error. When you do not care about values, you can generate random values instead. @@ -49,10 +54,10 @@ manager on Linux systems, but are also available on other platforms, possibly in a less convenient format. If you cannot easily install a debug build of Python from a system package manager, you can build one yourself using `pyenv `_. For example, to install and globally -activate a debug build of Python 3.10.8, one would do:: +activate a debug build of Python 3.13.3, one would do:: - pyenv install -g 3.10.8 - pyenv global 3.10.8 + pyenv install -g 3.13.3 + pyenv global 3.13.3 Note that ``pyenv install`` builds Python from source, so you must ensure that Python's dependencies are installed before building, see the pyenv documentation @@ -127,7 +132,8 @@ to mark them, but expect some false positives. ``valgrind`` ============ -Valgrind is a powerful tool to find certain memory access problems and should +`Valgrind `_ is a powerful tool +to find certain memory access problems and should be run on complicated C code. Basic use of ``valgrind`` usually requires no more than:: @@ -166,7 +172,7 @@ Valgrind helps: Python allocators.) Even though using valgrind for memory leak detection is slow and less sensitive -it can be a convenient: you can run most programs with valgrind without +it can be convenient: you can run most programs with valgrind without modification. Things to be aware of: @@ -188,7 +194,7 @@ Use together with ``pytest`` You can run the test suite with valgrind which may be sufficient when you are only interested in a few tests:: - PYTHOMMALLOC=malloc valgrind python runtests.py \ + PYTHONMALLOC=malloc valgrind python runtests.py \ -t numpy/_core/tests/test_multiarray.py -- --continue-on-collection-errors Note the ``--continue-on-collection-errors``, which is currently necessary due to @@ -213,3 +219,267 @@ command for NumPy). .. _pytest-valgrind: https://github.com/seberg/pytest-valgrind + +C debuggers +=========== + +Whenever NumPy crashes or when working on changes to NumPy's low-level C or C++ +code, it's often convenient to run Python under a C debugger to get more +information. A debugger can aid in understanding an interpreter crash (e.g. due +to a segmentation fault) by providing a C call stack at the site of the +crash. The call stack often provides valuable context to understand the nature +of a crash. C debuggers are also very useful during development, allowing +interactive debugging in the C implementation of NumPy. + +The NumPy developers often use both ``gdb`` and ``lldb`` to debug Numpy. As a +rule of thumb, ``gdb`` is often easier to use on Linux while ``lldb`` is easier +to use on a Mac environment. They have disjoint user interfaces, so you will need to +learn how to use whichever one you land on. The ``gdb`` to ``lldb`` `command map +`_ is a convenient reference for how to +accomplish common recipes in both debuggers. + + +Building With Debug Symbols +--------------------------- + +The ``spin`` `development workflow tool +`_. has built-in support for working +with both ``gdb`` and ``lldb`` via the ``spin gdb`` and ``spin lldb`` commands. + +.. note:: + + Building with ``-Dbuildtype=debug`` has a couple of important effects to + be aware of: + + * **Assertions are enabled**: This build type does not define the ``NDEBUG`` + macro, which means that any C-level assertions in the code will be + active. This is very useful for debugging, as it can help pinpoint + where an unexpected condition occurs. + + * **Compiler flags may need overriding**: Some compiler toolchains, + particularly those from ``conda-forge``, may set optimization flags + like ``-O2`` by default. These can override the ``debug`` build type. + To ensure a true debug build in such environments, you may need to + manually unset or override this flag. + + For more details on both points, see the `meson-python guide on + debug builds `_. + +For both debuggers, it's advisable to build NumPy in either the ``debug`` or +``debugoptimized`` meson build profile. To use ``debug`` you can pass the option +via ``spin build``: + +.. code-block:: bash + + spin build -- -Dbuildtype=debug + +to use ``debugoptimized`` you're pass ``-Dbuildtype=debugoptimized`` instead. + +You can pass additional arguments to `meson setup +`_ besides ``buildtype`` using the +same positional argument syntax for ``spin build``. + +Running a Test Script +--------------------- + +Let's say you have a test script named `test.py` that lives in a ``test`` folder +in the same directory as the NumPy source checkout. You could execute the test +script using the ``spin`` build of NumPy with the following incantation: + +.. code-block:: bash + + spin gdb ../test/test.py + +This will launch into gdb. If all you care about is a call stack for a crash, +type "r" and hit enter. Your test script will run and if a crash happens, you +type "bt" to get a traceback. For ``lldb``, the instructions are similar, just +replace ``spin gdb`` with ``spin lldb``. + +You can also set breakpoints and use other more advanced techniques. See the +documentation for your debugger for more details. + +One common issue with breakpoints in NumPy is that some code paths get hit +repeatedly during the import of the ``numpy`` module. This can make it tricky or +tedious to find the first "real" call after the NumPy import has completed and +the ``numpy`` module is fully initialized. + +One workaround is to use a script like this: + +.. code-block:: python + + import os + import signal + + import numpy as np + + PID = os.getpid() + + def do_nothing(*args): + pass + + signal.signal(signal.SIGUSR1, do_nothing) + + os.kill(PID, signal.SIGUSR1) + + # the code to run under a debugger follows + + +This example installs a signal handler for the ``SIGUSR1`` signal that does +nothing and then calls ``os.kill`` on the Python process with the ``SIGUSR1`` +signal. This causes the signal handler to fire and critically also causes both +``gdb`` and ``lldb`` to halt execution inside of the ``kill`` syscall. + +If you run ``lldb`` you should see output something like this: + +.. code-block:: + + Process 67365 stopped + * thread #1, queue = 'com.apple.main-thread', stop reason = signal SIGUSR1 + frame #0: 0x000000019c4b9da4 libsystem_kernel.dylib`__kill + 8 + libsystem_kernel.dylib`__kill: + -> 0x19c4b9da4 <+8>: b.lo 0x19c4b9dc4 ; <+40> + 0x19c4b9da8 <+12>: pacibsp + 0x19c4b9dac <+16>: stp x29, x30, [sp, #-0x10]! + 0x19c4b9db0 <+20>: mov x29, sp + Target 0: (python3.13) stopped. + (lldb) bt + * thread #1, queue = 'com.apple.main-thread', stop reason = signal SIGUSR1 + * frame #0: 0x000000019c4b9da4 libsystem_kernel.dylib`__kill + 8 + frame #1: 0x000000010087f5c4 libpython3.13.dylib`os_kill + 104 + frame #2: 0x000000010071374c libpython3.13.dylib`cfunction_vectorcall_FASTCALL + 276 + frame #3: 0x00000001006c1e3c libpython3.13.dylib`PyObject_Vectorcall + 88 + frame #4: 0x00000001007edd1c libpython3.13.dylib`_PyEval_EvalFrameDefault + 23608 + frame #5: 0x00000001007e7e6c libpython3.13.dylib`PyEval_EvalCode + 252 + frame #6: 0x0000000100852944 libpython3.13.dylib`run_eval_code_obj + 180 + frame #7: 0x0000000100852610 libpython3.13.dylib`run_mod + 220 + frame #8: 0x000000010084fa4c libpython3.13.dylib`_PyRun_SimpleFileObject + 868 + frame #9: 0x000000010084f400 libpython3.13.dylib`_PyRun_AnyFileObject + 160 + frame #10: 0x0000000100874ab8 libpython3.13.dylib`pymain_run_file + 336 + frame #11: 0x0000000100874324 libpython3.13.dylib`Py_RunMain + 1516 + frame #12: 0x000000010087459c libpython3.13.dylib`pymain_main + 324 + frame #13: 0x000000010087463c libpython3.13.dylib`Py_BytesMain + 40 + frame #14: 0x000000019c152b98 dyld`start + 6076 + (lldb) + +As you can see, the C stack trace is inside of the ``kill`` syscall and an +``lldb`` prompt is active, allowing interactively setting breakpoints. Since the +``os.kill`` call happens after the ``numpy`` module is already fully +initialized, this means any breakpoints set inside of ``kill`` will happen +*after* ``numpy`` is finished initializing. + +Use together with ``pytest`` +---------------------------- + +You can also run ``pytest`` tests under a debugger. This requires using +the debugger in a slightly more manual fashion, since ``spin`` does not yet +automate this process. First, run ``spin build`` to ensure there is a fully +built copy of NumPy managed by ``spin``. Then, to run the tests under ``lldb`` +you would do something like this: + +.. code-block:: bash + + spin lldb $(which python) $(which pytest) build-install/usr/lib/python3.13/site-packages/numpy/_core/tests/test_multiarray.py + +This will execute the tests in ``test_multiarray.py`` under lldb after typing +'r' and hitting enter. Note that this command comes from a session using Python +3.13 on a Mac. If you are using a different Python version or operating system, +the directory layout inside ``build-install`` may be slightly different. + +You can set breakpoints as described above. The issue about breakpoints +commonly being hit during NumPy import also applies - consider refactoring your +test workflow into a test script so you can adopt the workaround using +``os.kill`` described above. + +Note the use of ``$(which python)`` to ensure the debugger receives a path to a +Python executable. If you are using ``pyenv``, you may need to replace ``which +python`` with ``pyenv which python``, since ``pyenv`` relies on shim scripts +that ``which`` doesn't know about. + + +Compiler Sanitizers +=================== + +The `compiler sanitizer `_ suites +shipped by both GCC and LLVM offer a means to detect many common programming +errors at runtime. The sanitizers work by instrumenting the application code at +build time so additional runtime checks fire. Typically, sanitizers are run +during the course of regular testing and if a sanitizer check fails, this leads +to a test failure or crash, along with a report about the nature of the failure. + +While it is possible to use sanitizers with a "regular" build of CPython - it is +best if you can set up a Python environment based on a from-source Python build +with sanitizer instrumentation, and then use the instrumented Python to build +NumPy and run the tests. If the entire Python stack is instrumented using the +same sanitizer runtime, it becomes possible to identify issues that happen +across the Python stack. This enables detecting memory leaks in NumPy due to +misuse of memory allocated in CPython, for example. + +Build Python with Sanitizer Instrumentation +------------------------------------------- + +See the `section in the Python developer's guide +`_ on this topic for +more information about building Python from source. To enable address sanitizer, +you will need to pass ``--with-address-sanitizer`` to the ``configure`` script +invocation when you build Python. + +You can also use `pyenv `_ to automate the +process of building Python and quickly activate or deactivate a Python +installation using a command-line interface similar to virtual +environments. With ``pyenv`` you could install an ASAN-instrumented build of +Python 3.13 like this: + +.. code-block:: bash + + CONFIGURE_OPTS="--with-address-sanitizer" pyenv install 3.13 + +If you are interested in thread sanitizer, the ``cpython_sanity`` `docker images +`_ might also be a quicker choice +that bypasses building Python from source, although it may be annoying to do +debugging work inside of a docker image. + +Use together with ``spin`` +-------------------------- + +However you build Python, once you have an instrumented Python build, you can +install NumPy's development and test dependencies and build NumPy with address +sanitizer instrumentation. For example, to build NumPy with the ``debug`` +profile and address sanitizer, you would pass additional build options to +``meson`` like this: + +.. code-block:: bash + + spin build -- -Dbuildtype=debug -Db_sanitize=address + + +Once the build is finished, you can use other ``spin`` command like ``spin +test`` and ``spin gdb`` as with any other Python build. + +Special considerations +---------------------- + +Some NumPy tests intentionally lead to ``malloc`` returning ``NULL``. In its +default configuration, some of the compiler sanitizers flag this as an +error. You can disable that check by passing ``allocator_may_return_null=1`` to +the sanitizer as an option. For example, with address sanitizer: + +.. code-block:: bash + + ASAN_OPTIONS=allocator_may_return_null=1 spin test + +You may see memory leaks coming from the Python interpreter, particularly on +MacOS. If the memory leak reports are not useful, you can disable leak detection +by passing ``detect_leaks=0`` in ``ASAN_OPTIONS``. You can pass more than one +option using a colon-delimited list, like this: + +.. code-block:: bash + + ASAN_OPTIONS=allocator_may_return_null=1:halt_on_error=1:detect_leaks=1 spin test + +The ``halt_on_error`` option can be particularly useful -- it hard-crashes the +Python executable whenever it detects an error, along with a report about the +error that includes a stack trace. + +You can also take a look at the ``compiler_sanitizers.yml`` GitHub actions +workflow configuration. It describes several different CI jobs that are run as +part of the NumPy tests using Thread, Address, and Undefined Behavior sanitizer. diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index b1cc7d96ffe2..5d77509b43dc 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -92,6 +92,7 @@ one of:: $ spin test -v $ spin test numpy/random # to run the tests in a specific module $ spin test -v -t numpy/_core/tests/test_nditer.py::test_iter_c_order + $ spin test -p auto # to run tests in parallel threads using pytest-run-parallel This builds NumPy first, so the first time it may take a few minutes. @@ -114,6 +115,15 @@ argument to pytest:: $ spin test -v -t numpy/_core/tests/test_multiarray.py -- -k "MatMul and not vector" +To run "doctests" -- to check that the code examples in the documentation is correct -- +use the `check-docs` spin command. It relies on the `scipy-docs` package, which +provides several additional features on top of the standard library ``doctest`` +package. Install ``scipy-doctest`` and run one of:: + + $ spin check-docs -v + $ spin check-docs numpy/linalg + $ spin check-docs -v -- -k 'det and not slogdet' + .. note:: Remember that all tests of NumPy should pass before committing your changes. @@ -176,6 +186,16 @@ For more extensive information, see :ref:`testing-guidelines`. Note: do not run the tests from the root directory of your numpy git repo without ``spin``, that will result in strange test errors. +Running type checks +------------------- +Changes that involve static type declarations are also executed using ``spin``. +The invocation will look like the following: + + $ spin mypy + +This will look in the ``typing/tests`` directory for sets of operations to +test for type incompatibility. + Running linting --------------- Lint checks can be performed on newly added lines of Python code. @@ -186,18 +206,82 @@ Install all dependent packages using pip:: To run lint checks before committing new code, run:: - $ python tools/linter.py - -To check all changes in newly added Python code of current branch with target branch, run:: - - $ python tools/linter.py --branch main - -If there are no errors, the script exits with no message. In case of errors, -check the error message for details:: - - $ python tools/linter.py --branch main - ./numpy/_core/tests/test_scalarmath.py:34:5: E303 too many blank lines (3) - 1 E303 too many blank lines (3) + $ spin lint + +If there are no errors, the output will look like:: + + $ spin lint + Running Ruff Check... + All checks passed! + + Running C API borrow-reference linter... + Scanning 548 C/C++ source files... + + All checks passed! C API borrow-ref linter found no issues. + + + Running cython-lint... + +In case of errors, check the error message for details:: + + $ spin lint + Running Ruff Check... + I001 [*] Import block is un-sorted or un-formatted + --> numpy/matlib.py:12:1 + | + 10 | PendingDeprecationWarning, stacklevel=2) + 11 | + 12 | / import numpy as np + 13 | | + 14 | | # Matlib.py contains all functions in the numpy namespace with a few + 15 | | # replacements. See doc/source/reference/routines.matlib.rst for details. + 16 | | # Need * as we're copying the numpy namespace. + 17 | | from numpy import * # noqa: F403 + 18 | | from numpy.matrixlib.defmatrix import matrix, asmatrix + | |______________________________________________________^ + 19 | + 20 | __version__ = np.__version__ + | + help: Organize imports + 15 | # replacements. See doc/source/reference/routines.matlib.rst for details. + 16 | # Need * as we're copying the numpy namespace. + 17 | from numpy import * # noqa: F403 + - from numpy.matrixlib.defmatrix import matrix, asmatrix + 18 + from numpy.matrixlib.defmatrix import asmatrix, matrix + 19 | + 20 | __version__ = np.__version__ + 21 | + + E501 Line too long (127 > 88) + --> numpy/matlib.py:214:89 + | + 212 | ------- + 213 | I : matrix + 214 | A `n` x `M` matrix where all elements are equal to zero, except for the `k`-th diagonal, whose values are equal to one. + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 215 | + 216 | See Also + | + + Found 2 errors. + [*] 1 fixable with the `--fix` option. + +To automatically fix issues that can be fixed, run:: + + $ spin lint --fix + Running Ruff Check... + E501 Line too long (127 > 88) + --> numpy/matlib.py:214:89 + | + 212 | ------- + 213 | I : matrix + 214 | A `n` x `M` matrix where all elements are equal to zero, except for the `k`-th diagonal, whose values are equal to one. + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 215 | + 216 | See Also + | + + Found 2 errors (1 fixed, 1 remaining). It is advisable to run lint checks before pushing commits to a remote branch since the linter runs as part of the CI pipeline. @@ -205,7 +289,7 @@ since the linter runs as part of the CI pipeline. For more details on Style Guidelines: - `Python Style Guide`_ -- `C Style Guide`_ +- :ref:`NEP45` Rebuilding & cleaning the workspace ----------------------------------- @@ -327,7 +411,6 @@ typically packaged as ``python-dbg``) is highly recommended. .. _Waf: https://code.google.com/p/waf/ .. _`match test names using python operators`: https://docs.pytest.org/en/latest/usage.html#specifying-tests-selecting-tests .. _`Python Style Guide`: https://www.python.org/dev/peps/pep-0008/ -.. _`C Style Guide`: https://numpy.org/neps/nep-0045-c_style_guide.html Understanding the code & getting started ---------------------------------------- diff --git a/doc/source/dev/development_ghcodespaces.rst b/doc/source/dev/development_ghcodespaces.rst new file mode 100644 index 000000000000..b6c8f0d5f0f4 --- /dev/null +++ b/doc/source/dev/development_ghcodespaces.rst @@ -0,0 +1,104 @@ +.. _development_ghcodespaces: + + +Using GitHub Codespaces for NumPy development +============================================= + +This section of the documentation will guide you through: + +* using GitHub Codespaces for your NumPy development environment +* creating a personal fork of the NumPy repository on GitHub +* a quick tour of GitHub Codespaces and VSCode desktop application +* working on the NumPy documentation in GitHub Codespaces + +GitHub Codespaces +----------------- + +`GitHub Codespaces`_ is a service that provides cloud based +development environments so that you don't have to install anything +on your local machine or worry about configuration. + +What is a codespace? +-------------------- + +A codespace is an instance of Codespaces - and thus a development environment +that is hosted in the cloud. Each codespace runs on a virtual machine hosted by +GitHub. You can choose the type of machine you want to use, depending on the +resources you need. Various types of machine are available, starting with a +2-core processor, 4 GB of RAM, and 32 GB of storage. You can connect to a +codespace from your browser, from Visual Studio Code, from the JetBrains +Gateway application, or by using GitHub CLI. + +Forking the NumPy repository +---------------------------- + +The best way to work on the NumPy codebase as a contributor is by making a fork +of the repository first. + +#. Browse to the `NumPy repository on GitHub`_ and `create your own fork`_. +#. Browse to your fork. Your fork will have a URL like + https://github.com/inessapawson/numpy, except with your GitHub username in place of ``inessapawson``. + +Starting GitHub Codespaces +-------------------------- + +You can create a codespace from the green "<> Code" button on the repository +home page and choose "Codespaces", or click this link `open`_. + +Quick workspace tour +-------------------- + +You can develop code in a codespace using your choice of tool: + +* a command shell, via an SSH connection initiated using GitHub CLI._ +* one of the JetBrains IDEs, via the JetBrains Gateway._ +* the Visual Studio Code desktop application._ +* a browser-based version of Visual Studio Code._ + +In this quickstart, we will be using the VSCode desktop application as the +editor. If you have not used it before, see the Getting started `VSCode docs`_ +to familiarize yourself with this tool. + +Your workspace will look similar to the image below: + +Development workflow with GitHub Codespaces +------------------------------------------- + +The :ref:`development-workflow` section of this documentation contains +information regarding the NumPy development workflow. Make sure to check this +before you start working on your contributions. + +Rendering the NumPy documentation +--------------------------------- + +You can find the detailed documentation on how the rendering of the +documentation with Sphinx works in the :ref:`howto-build-docs` section. + +The documentation is pre-built during your codespace initialization. So once +this task is completed, you have two main options to render the documentation +in GitHub Codespaces. + +FAQs and troubleshooting +------------------------ + +**How long does my codespace stay active if I'm not using it?** +If you leave your codespace running without interaction, or if you exit your +codespace without explicitly stopping it, by default the codespace will timeout +after 30 minutes of inactivity. You can customize the duration of the timeout +period for new codespaces that you create. + +**Can I come back to a previous codespace?** +The lifecycle of a codespace begins when you create a codespace and ends when +you delete it. You can disconnect and reconnect to an active codespace without +affecting its running processes. You may stop and restart a codespace without +losing changes that you have made to your project. + +.. _GitHub Codespaces: https://github.com/features/codespaces +.. _NumPy repository on GitHub: https://github.com/NumPy/NumPy +.. _create your own fork: https://help.github.com/en/articles/fork-a-repo +.. _open: https://github.com/codespaces/new?hide_repo_select=true&ref=main&repo=908607 +.. _VSCode docs: https://code.visualstudio.com/docs/getstarted/tips-and-tricks +.. _command shell, via an SSH connection initiated using GitHub CLI: https://docs.github.com/en/authentication/connecting-to-github-with-ssh +.. _one of the JetBrains IDEs, via the JetBrains Gateway: https://docs.github.com/en/codespaces/developing-in-codespaces/using-github-codespaces-in-your-jetbrains-ide +.. _the Visual Studio Code desktop application: https://docs.github.com/en/codespaces/developing-in-codespaces/using-github-codespaces-in-visual-studio-code +.. _a browser-based version of Visual Studio Code: https://docs.github.com/en/codespaces/developing-in-codespaces/developing-in-a-codespace diff --git a/doc/source/dev/development_workflow.rst b/doc/source/dev/development_workflow.rst index 1af4521482e0..10b07cc1f437 100644 --- a/doc/source/dev/development_workflow.rst +++ b/doc/source/dev/development_workflow.rst @@ -166,12 +166,14 @@ Standard acronyms to start the commit message with are:: BENCH: changes to the benchmark suite BLD: change related to building numpy BUG: bug fix + CI: continuous integration DEP: deprecate something, or remove a deprecated object DEV: development tool or utility DOC: documentation ENH: enhancement MAINT: maintenance commit (refactoring, typos, etc.) MNT: alias for MAINT + NEP: NumPy enhancement proposals REL: related to releasing numpy REV: revert an earlier commit STY: style fix (whitespace, PEP8) @@ -203,13 +205,6 @@ these fragments in each commit message of a PR: settings. `See the configuration files for these checks. `__ -* ``[skip azp]``: skip Azure jobs - - `Azure `__ is - where all comprehensive tests are run. This is an expensive run, and one you - could typically skip if you do documentation-only changes, for example. - `See the main configuration file for these checks. `__ - * ``[skip circle]``: skip CircleCI jobs `CircleCI `__ is where we build the documentation and @@ -228,7 +223,7 @@ these fragments in each commit message of a PR: Test building wheels ~~~~~~~~~~~~~~~~~~~~ -Numpy currently uses `cibuildwheel `_ +Numpy currently uses `cibuildwheel `_ in order to build wheels through continuous integration services. To save resources, the cibuildwheel wheel builders are not run by default on every single PR or commit to main. diff --git a/doc/source/dev/ghcodespaces-imgs/codespaces-codebutton.png b/doc/source/dev/ghcodespaces-imgs/codespaces-codebutton.png new file mode 100644 index 000000000000..da3e7e3bde2f Binary files /dev/null and b/doc/source/dev/ghcodespaces-imgs/codespaces-codebutton.png differ diff --git a/doc/source/dev/howto-docs.rst b/doc/source/dev/howto-docs.rst index 097456fad0b4..1eea77041740 100644 --- a/doc/source/dev/howto-docs.rst +++ b/doc/source/dev/howto-docs.rst @@ -79,7 +79,7 @@ ideas and feedback. If you want to alert us to a gap, If you're looking for subjects, our formal roadmap for documentation is a *NumPy Enhancement Proposal (NEP)*, -`NEP 44 - Restructuring the NumPy Documentation `__. +:ref:`NEP44`. It identifies areas where our docs need help and lists several additions we'd like to see, including :ref:`Jupyter notebooks `. diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst index d2846f48b833..625e69712030 100644 --- a/doc/source/dev/index.rst +++ b/doc/source/dev/index.rst @@ -19,6 +19,21 @@ we list them in alphabetical order): - Website design and development - Writing technical documentation +We understand that everyone has a different level of experience, +also NumPy is a pretty well-established project, so it's hard to +make assumptions about an ideal "first-time-contributor". +So, that's why we don't mark issues with the "good-first-issue" +label. Instead, you'll find `issues labeled "Sprintable" `__. +These issues can either be: + +- **Easily fixed** when you have guidance from an experienced + contributor (perfect for working in a sprint). +- **A learning opportunity** for those ready to dive deeper, + even if you're not in a sprint. + +Additionally, depending on your prior experience, some "Sprintable" +issues might be easy, while others could be more challenging for you. + The rest of this document discusses working on the NumPy code base and documentation. We're in the process of updating our descriptions of other activities and roles. If you are interested in these other activities, please contact us! @@ -168,6 +183,8 @@ Guidelines * No changes are ever committed without review and approval by a core team member. Please ask politely on the PR or on the `mailing list`_ if you get no response to your pull request within a week. +* Do not include copyright notices in source code without explicitly discussing the need first. + In general, any code you contribute to the project is under the project `license `_. .. _stylistic-guidelines: @@ -175,8 +192,8 @@ Stylistic guidelines -------------------- * Set up your editor to follow `PEP 8 `_ (remove trailing white space, no tabs, etc.). Check code with - pyflakes / flake8. + pep-0008/>`_ (remove trailing white space, no tabs, etc.). Check code + with ruff. * Use NumPy data types instead of strings (``np.uint8`` instead of ``"uint8"``). @@ -238,9 +255,11 @@ The rest of the story :maxdepth: 2 development_environment + spin howto_build_docs development_workflow development_advanced_debugging + development_ghcodespaces reviewer_guidelines ../benchmarking NumPy C style guide diff --git a/doc/source/dev/internals.code-explanations.rst b/doc/source/dev/internals.code-explanations.rst index b1ee9b114aa8..1bb8f60528c1 100644 --- a/doc/source/dev/internals.code-explanations.rst +++ b/doc/source/dev/internals.code-explanations.rst @@ -401,7 +401,7 @@ Iterators for the output arguments are then processed. Finally, the decision is made about how to execute the looping mechanism to ensure that all elements of the input arrays are combined to produce the output arrays of the correct type. The options for loop -execution are one-loop (for :term`contiguous`, aligned, and correct data +execution are one-loop (for :term:`contiguous`, aligned, and correct data type), strided-loop (for non-contiguous but still aligned and correct data type), and a buffered loop (for misaligned or incorrect data type situations). Depending on which execution method is called for, diff --git a/doc/source/dev/internals.rst b/doc/source/dev/internals.rst index 439645c374c4..df31d8406ca4 100644 --- a/doc/source/dev/internals.rst +++ b/doc/source/dev/internals.rst @@ -6,10 +6,10 @@ Internal organization of NumPy arrays ************************************* -It helps to understand a bit about how NumPy arrays are handled under the covers -to help understand NumPy better. This section will not go into great detail. -Those wishing to understand the full details are requested to refer to Travis -Oliphant's book `Guide to NumPy `_. +It helps to learn a bit about how NumPy arrays are handled under the covers +to understand NumPy better. This section provides a brief explanation. +More details are available in Travis Oliphant's book +`Guide to NumPy `_. NumPy arrays consist of two major components: the raw array data (from now on, referred to as the data buffer), and the information about the raw array data. diff --git a/doc/source/dev/spin.rst b/doc/source/dev/spin.rst new file mode 100644 index 000000000000..c1a00c337c30 --- /dev/null +++ b/doc/source/dev/spin.rst @@ -0,0 +1,28 @@ +.. _spin_tool: + +Spin: NumPy’s developer tool +---------------------------- + +NumPy uses a command-line tool called ``spin`` to support common development +tasks such as building from source, running tests, building documentation, +and managing other +developer workflows. + +The ``spin`` tool provides a consistent interface for contributors working on +NumPy itself, wrapping multiple underlying tools and configurations into a +single command that follows NumPy’s development conventions. +Running the full test suite:: + + $ spin test -m full + +Running a subset of tests:: + + $ spin test -t numpy/_core/tests + +Running tests with coverage:: + + $ spin test --coverage + +Building the documentation:: + + $ spin docs \ No newline at end of file diff --git a/doc/source/doxyfile b/doc/source/doxyfile index ea45b9578309..60ab1058dbba 100644 --- a/doc/source/doxyfile +++ b/doc/source/doxyfile @@ -1,4 +1,4 @@ -# Doxyfile 1.8.18 +# Doxyfile 1.13.2 #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- @@ -11,7 +11,6 @@ OUTPUT_DIRECTORY = @ROOT_DIR/doc/build/doxygen CREATE_SUBDIRS = NO ALLOW_UNICODE_NAMES = NO OUTPUT_LANGUAGE = English -OUTPUT_TEXT_DIRECTION = None BRIEF_MEMBER_DESC = YES REPEAT_BRIEF = YES ABBREVIATE_BRIEF = "The $name class" \ @@ -145,15 +144,11 @@ REFERENCES_LINK_SOURCE = YES SOURCE_TOOLTIPS = YES USE_HTAGS = NO VERBATIM_HEADERS = YES -CLANG_ASSISTED_PARSING = NO -CLANG_OPTIONS = -CLANG_DATABASE_PATH = #--------------------------------------------------------------------------- # Configuration options related to the alphabetical class index #--------------------------------------------------------------------------- ALPHABETICAL_INDEX = YES COLS_IN_ALPHA_INDEX = 5 -IGNORE_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the HTML output #--------------------------------------------------------------------------- @@ -168,7 +163,6 @@ HTML_EXTRA_FILES = HTML_COLORSTYLE_HUE = 220 HTML_COLORSTYLE_SAT = 100 HTML_COLORSTYLE_GAMMA = 80 -HTML_TIMESTAMP = NO HTML_DYNAMIC_MENUS = YES HTML_DYNAMIC_SECTIONS = NO HTML_INDEX_NUM_ENTRIES = 100 @@ -201,7 +195,6 @@ TREEVIEW_WIDTH = 250 EXT_LINKS_IN_WINDOW = NO HTML_FORMULA_FORMAT = png FORMULA_FONTSIZE = 10 -FORMULA_TRANSPARENT = YES FORMULA_MACROFILE = USE_MATHJAX = NO MATHJAX_FORMAT = HTML-CSS @@ -234,9 +227,7 @@ PDF_HYPERLINKS = YES USE_PDFLATEX = YES LATEX_BATCHMODE = NO LATEX_HIDE_INDICES = NO -LATEX_SOURCE_CODE = NO LATEX_BIB_STYLE = plain -LATEX_TIMESTAMP = NO LATEX_EMOJI_DIRECTORY = #--------------------------------------------------------------------------- # Configuration options related to the RTF output @@ -247,7 +238,6 @@ COMPACT_RTF = NO RTF_HYPERLINKS = NO RTF_STYLESHEET_FILE = RTF_EXTENSIONS_FILE = -RTF_SOURCE_CODE = NO #--------------------------------------------------------------------------- # Configuration options related to the man page output #--------------------------------------------------------------------------- @@ -268,7 +258,6 @@ XML_NS_MEMB_FILE_SCOPE = NO #--------------------------------------------------------------------------- GENERATE_DOCBOOK = NO DOCBOOK_OUTPUT = docbook -DOCBOOK_PROGRAMLISTING = NO #--------------------------------------------------------------------------- # Configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- @@ -303,13 +292,10 @@ EXTERNAL_PAGES = YES #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- -CLASS_DIAGRAMS = YES DIA_PATH = HIDE_UNDOC_RELATIONS = YES HAVE_DOT = NO DOT_NUM_THREADS = 0 -DOT_FONTNAME = Helvetica -DOT_FONTSIZE = 10 DOT_FONTPATH = CLASS_GRAPH = YES COLLABORATION_GRAPH = YES @@ -334,7 +320,6 @@ PLANTUML_CFG_FILE = PLANTUML_INCLUDE_PATH = DOT_GRAPH_MAX_NODES = 50 MAX_DOT_GRAPH_DEPTH = 0 -DOT_TRANSPARENT = NO DOT_MULTI_TARGETS = NO GENERATE_LEGEND = YES DOT_CLEANUP = YES diff --git a/doc/source/f2py/buildtools/distutils-to-meson.rst b/doc/source/f2py/buildtools/distutils-to-meson.rst index bf5da973e9fa..920848b9d0d3 100644 --- a/doc/source/f2py/buildtools/distutils-to-meson.rst +++ b/doc/source/f2py/buildtools/distutils-to-meson.rst @@ -5,12 +5,11 @@ ------------------------ As per the timeline laid out in :ref:`distutils-status-migration`, -``distutils`` has ceased to be the default build backend for ``f2py``. This page -collects common workflows in both formats. +``distutils`` has been removed. This page collects common workflows. .. note:: - This is a ****living**** document, `pull requests `_ are very welcome! + This is a **living** document, `pull requests `_ are very welcome! 1.1 Baseline ~~~~~~~~~~~~ @@ -44,8 +43,6 @@ This will not win any awards, but can be a reasonable starting point. 1.2.1 Basic Usage ^^^^^^^^^^^^^^^^^ -This is unchanged: - .. code:: bash python -m numpy.f2py -c fib.f90 -m fib @@ -57,46 +54,21 @@ This is unchanged: 1.2.2 Specify the backend ^^^^^^^^^^^^^^^^^^^^^^^^^ -.. tab-set:: - - .. tab-item:: Distutils - :sync: distutils - - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend distutils +.. code-block:: bash - This is the default for Python versions before 3.12. + python -m numpy.f2py -c fib.f90 -m fib - .. tab-item:: Meson - :sync: meson - - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend meson - - This is the only option for Python versions after 3.12. +This is the only option. There used to be a ``distutils`` backend but it was +removed in NumPy2.5.0. 1.2.3 Pass a compiler name ^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. tab-set:: - - .. tab-item:: Distutils - :sync: distutils +.. code-block:: bash - .. code-block:: bash + FC=gfortran python -m numpy.f2py -c fib.f90 -m fib - python -m numpy.f2py -c fib.f90 -m fib --backend distutils --fcompiler=gfortran - - .. tab-item:: Meson - :sync: meson - - .. code-block:: bash - - FC="gfortran" python -m numpy.f2py -c fib.f90 -m fib --backend meson - - Native files can also be used. +Native files can also be used. Similarly, ``CC`` can be used in both cases to set the ``C`` compiler. Since the environment variables are generally pretty common across both, so a small @@ -117,14 +89,12 @@ sample is included below. +------------------------------------+-------------------------------+ | LDFLAGS | Linker options | +------------------------------------+-------------------------------+ - | LD\ :sub:`LIBRARY`\ \ :sub:`PATH`\ | Library file locations (Unix) | + | LD_LIBRARY_PATH | Library file locations (Unix) | +------------------------------------+-------------------------------+ | LIBS | Libraries to link against | +------------------------------------+-------------------------------+ | PATH | Search path for executables | +------------------------------------+-------------------------------+ - | LDFLAGS | Linker flags | - +------------------------------------+-------------------------------+ | CXX | C++ compiler | +------------------------------------+-------------------------------+ | CXXFLAGS | C++ compiler options | @@ -139,73 +109,31 @@ sample is included below. 1.2.4 Dependencies ^^^^^^^^^^^^^^^^^^ -Here, ``meson`` can actually be used to set dependencies more robustly. - -.. tab-set:: - - .. tab-item:: Distutils - :sync: distutils - - .. code-block:: bash +.. code-block:: bash - python -m numpy.f2py -c fib.f90 -m fib --backend distutils -llapack + python -m numpy.f2py -c fib.f90 -m fib --dep lapack - Note that this approach in practice is error prone. - - .. tab-item:: Meson - :sync: meson - - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend meson --dep lapack - - This maps to ``dependency("lapack")`` and so can be used for a wide variety - of dependencies. They can be `customized further `_ - to use CMake or other systems to resolve dependencies. +This maps to ``dependency("lapack")`` and so can be used for a wide variety +of dependencies. They can be `customized further `_ +to use CMake or other systems to resolve dependencies. 1.2.5 Libraries ^^^^^^^^^^^^^^^ -Both ``meson`` and ``distutils`` are capable of linking against libraries. - -.. tab-set:: - - .. tab-item:: Distutils - :sync: distutils - - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend distutils -lmylib -L/path/to/mylib +``meson`` is capable of linking against libraries. - .. tab-item:: Meson - :sync: meson +.. code-block:: bash - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend meson -lmylib -L/path/to/mylib + python -m numpy.f2py -c fib.f90 -m fib -lmylib -L/path/to/mylib 1.3 Customizing builds ~~~~~~~~~~~~~~~~~~~~~~ -.. tab-set:: - - .. tab-item:: Distutils - :sync: distutils - - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend distutils --build-dir blah - - This can be technically integrated with other codes, see :ref:`f2py-distutils`. - - .. tab-item:: Meson - :sync: meson - - .. code-block:: bash +.. code-block:: bash - python -m numpy.f2py -c fib.f90 -m fib --backend meson --build-dir blah + python -m numpy.f2py -c fib.f90 -m fib --build-dir blah - The resulting build can be customized via the - `Meson Build How-To Guide `_. - In fact, the resulting set of files can even be committed directly and used - as a meson subproject in a separate codebase. +The resulting build can be customized via the +`Meson Build How-To Guide `_. +In fact, the resulting set of files can even be committed directly and used +as a meson subproject in a separate codebase. diff --git a/doc/source/f2py/buildtools/distutils.rst b/doc/source/f2py/buildtools/distutils.rst deleted file mode 100644 index 87e17a811cd0..000000000000 --- a/doc/source/f2py/buildtools/distutils.rst +++ /dev/null @@ -1,84 +0,0 @@ -.. _f2py-distutils: - -============================= -Using via `numpy.distutils` -============================= - -.. legacy:: - - ``distutils`` has been removed in favor of ``meson`` see - :ref:`distutils-status-migration`. - - -.. currentmodule:: numpy.distutils.core - -:mod:`numpy.distutils` is part of NumPy, and extends the standard Python -``distutils`` module to deal with Fortran sources and F2PY signature files, e.g. -compile Fortran sources, call F2PY to construct extension modules, etc. - -.. topic:: Example - - Consider the following ``setup_file.py`` for the ``fib`` and ``scalar`` - examples from :ref:`f2py-getting-started` section: - - .. literalinclude:: ./../code/setup_example.py - :language: python - - Running - - .. code-block:: bash - - python setup_example.py build - - will build two extension modules ``scalar`` and ``fib2`` to the - build directory. - -Extensions to ``distutils`` -=========================== - -:mod:`numpy.distutils` extends ``distutils`` with the following features: - -* :class:`Extension` class argument ``sources`` may contain Fortran source - files. In addition, the list ``sources`` may contain at most one - F2PY signature file, and in this case, the name of an Extension module must - match with the ```` used in signature file. It is - assumed that an F2PY signature file contains exactly one ``python - module`` block. - - If ``sources`` do not contain a signature file, then F2PY is used to scan - Fortran source files to construct wrappers to the Fortran codes. - - Additional options to the F2PY executable can be given using the - :class:`Extension` class argument ``f2py_options``. - -* The following new ``distutils`` commands are defined: - - ``build_src`` - to construct Fortran wrapper extension modules, among many other things. - ``config_fc`` - to change Fortran compiler options. - - Additionally, the ``build_ext`` and ``build_clib`` commands are also enhanced - to support Fortran sources. - - Run - - .. code-block:: bash - - python config_fc build_src build_ext --help - - to see available options for these commands. - -* When building Python packages containing Fortran sources, one - can choose different Fortran compilers by using the ``build_ext`` - command option ``--fcompiler=``. Here ```` can be one of the - following names (on ``linux`` systems):: - - absoft compaq fujitsu g95 gnu gnu95 intel intele intelem lahey nag nagfor nv pathf95 pg vast - - See ``numpy_distutils/fcompiler.py`` for an up-to-date list of - supported compilers for different platforms, or run - - .. code-block:: bash - - python -m numpy.f2py -c --backend distutils --help-fcompiler diff --git a/doc/source/f2py/buildtools/index.rst b/doc/source/f2py/buildtools/index.rst index 37782e5ca74b..671fd5b6d2cf 100644 --- a/doc/source/f2py/buildtools/index.rst +++ b/doc/source/f2py/buildtools/index.rst @@ -11,7 +11,7 @@ with ``f2py``. The default build system for ``f2py`` has traditionally been through the enhanced ``numpy.distutils`` module. This module is based on ``distutils`` - which was removed in ``Python 3.12.0`` in **October 2023**. Like the rest of + which was removed in ``NumPy2.5.0`` in **June 2026**. Like the rest of NumPy and SciPy, ``f2py`` uses ``meson`` now, see :ref:`distutils-status-migration` for some more details. @@ -107,7 +107,6 @@ Build systems .. toctree:: :maxdepth: 2 - distutils meson cmake skbuild diff --git a/doc/source/f2py/buildtools/meson.rst b/doc/source/f2py/buildtools/meson.rst index c17c5d2ddc87..44560bef8c5f 100644 --- a/doc/source/f2py/buildtools/meson.rst +++ b/doc/source/f2py/buildtools/meson.rst @@ -15,11 +15,6 @@ Using via ``meson`` The default build system for ``f2py`` is now ``meson``, see :ref:`distutils-status-migration` for some more details.. -The key advantage gained by leveraging ``meson`` over the techniques described -in :ref:`f2py-distutils` is that this feeds into existing systems and larger -projects with ease. ``meson`` has a rather pythonic syntax which makes it more -comfortable and amenable to extension for ``python`` users. - Fibonacci walkthrough (F77) =========================== diff --git a/doc/source/f2py/code/setup_example.py b/doc/source/f2py/code/setup_example.py deleted file mode 100644 index 479acc004d60..000000000000 --- a/doc/source/f2py/code/setup_example.py +++ /dev/null @@ -1,16 +0,0 @@ -from numpy.distutils.core import Extension - -ext1 = Extension(name = 'scalar', - sources = ['scalar.f']) -ext2 = Extension(name = 'fib2', - sources = ['fib2.pyf', 'fib1.f']) - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(name = 'f2py_example', - description = "F2PY Users Guide examples", - author = "Pearu Peterson", - author_email = "pearu@cens.ioc.ee", - ext_modules = [ext1, ext2] - ) -# End of setup_example.py diff --git a/doc/source/f2py/code/var.pyf b/doc/source/f2py/code/var.pyf index 8275ff3afe21..b7c080682a62 100644 --- a/doc/source/f2py/code/var.pyf +++ b/doc/source/f2py/code/var.pyf @@ -5,7 +5,7 @@ python module var ''' interface usercode ''' - PyDict_SetItemString(d,"BAR",PyInt_FromLong(BAR)); + PyDict_SetItemString(d,"BAR",PyLong_FromLong(BAR)); ''' end interface end python module diff --git a/doc/source/f2py/f2py-examples.rst b/doc/source/f2py/f2py-examples.rst index 6a580b19cd68..ea9366ff6e65 100644 --- a/doc/source/f2py/f2py-examples.rst +++ b/doc/source/f2py/f2py-examples.rst @@ -241,7 +241,6 @@ Read more * `Wrapping C codes using f2py `_ * `F2py section on the SciPy Cookbook `_ -* `F2py example: Interactive System for Ice sheet Simulation `_ * `"Interfacing With Other Languages" section on the SciPy Cookbook. `_ diff --git a/doc/source/f2py/f2py-testing.rst b/doc/source/f2py/f2py-testing.rst index 945b4ccaa338..687b414975ee 100644 --- a/doc/source/f2py/f2py-testing.rst +++ b/doc/source/f2py/f2py-testing.rst @@ -45,11 +45,22 @@ class present in ``util.py``. This class many helper functions for parsing and compiling test source files. Its child classes can override its ``sources`` data member to provide their own source files. -This superclass will then compile the added source files upon object creation andtheir +This superclass will then compile the added source files upon object creation and their functions will be appended to ``self.module`` data member. Thus, the child classes will be able to access the fortran functions specified in source file by calling ``self.module.[fortran_function_name]``. +.. versionadded:: v2.0.0b1 + +Each of the ``f2py`` tests should run without failure if no Fortran compilers +are present on the host machine. To facilitate this, the ``CompilerChecker`` is +used, essentially providing a ``meson`` dependent set of utilities namely +``has_{c,f77,f90,fortran}_compiler()``. + +For the CLI tests in ``test_f2py2e``, flags which are expected to call ``meson`` +or otherwise depend on a compiler need to call ``compiler_check_f2pycli()`` +instead of ``f2pycli()``. + Example ~~~~~~~ @@ -77,4 +88,4 @@ A test can be implemented as follows:: We override the ``sources`` data member to provide the source file. The source files are compiled and subroutines are attached to module data member when the class object -is created. The ``test_module`` function calls the subroutines and tests their results. \ No newline at end of file +is created. The ``test_module`` function calls the subroutines and tests their results. diff --git a/doc/source/f2py/f2py.getting-started.rst b/doc/source/f2py/f2py.getting-started.rst index dd1349979a39..e5df85b93a75 100644 --- a/doc/source/f2py/f2py.getting-started.rst +++ b/doc/source/f2py/f2py.getting-started.rst @@ -22,15 +22,12 @@ following steps: * F2PY compiles all sources and builds an extension module containing the wrappers. - * In building the extension modules, F2PY uses ``meson`` and used to use - ``numpy.distutils`` For different build systems, see :ref:`f2py-bldsys`. + * In building the extension modules, F2PY uses ``meson``. For different + build systems, see :ref:`f2py-bldsys`. .. note:: - See :ref:`f2py-meson-distutils` for migration information. - - * Depending on your operating system, you may need to install the Python development headers (which provide the file ``Python.h``) separately. In Linux Debian-based distributions this package should be called ``python3-dev``, @@ -157,9 +154,8 @@ Fortran subroutine ``FIB`` is accessible via ``fib1.fib``:: Clearly, this is unexpected, as Fortran typically passes by reference. That the above example worked with ``dtype=float`` is considered accidental. - F2PY provides an ``intent(inplace)`` attribute that modifies the attributes - of an input array so that any changes made by the Fortran routine will be - reflected in the input argument. For example, if one specifies the + F2PY provides an ``intent(inplace)`` attribute that ensures that changes + are copied back to the input argument. For example, if one specifies the ``intent(inplace) a`` directive (see :ref:`f2py-attributes` for details), then the example above would read:: @@ -224,7 +220,7 @@ Fortran code, we can apply the wrapping steps one by one. .. literalinclude:: ./code/fib2.pyf :language: fortran -* Finally, we build the extension module with ``numpy.distutils`` by running: +* Finally, we build the extension module by running: :: @@ -308,4 +304,4 @@ the previous case:: >>> print(fib3.fib(8)) [ 0. 1. 1. 2. 3. 5. 8. 13.] -.. _`system dependencies panel`: http://scipy.github.io/devdocs/building/index.html#system-level-dependencies +.. _`system dependencies panel`: https://scipy.github.io/devdocs/building/index.html#system-level-dependencies diff --git a/doc/source/f2py/index.rst b/doc/source/f2py/index.rst index b5cfb168073a..46f1de0212d6 100644 --- a/doc/source/f2py/index.rst +++ b/doc/source/f2py/index.rst @@ -45,6 +45,8 @@ end matches the NumPy version printed from ``python -m numpy.f2py``, then you can use the shorter version. If not, or if you cannot run ``f2py``, you should replace all calls to ``f2py`` mentioned in this guide with the longer version. +For Meson build examples, see :doc:`usage`. + .. toctree:: :maxdepth: 3 diff --git a/doc/source/f2py/python-usage.rst b/doc/source/f2py/python-usage.rst index 54f74f02b6bf..ecffd695e05a 100644 --- a/doc/source/f2py/python-usage.rst +++ b/doc/source/f2py/python-usage.rst @@ -115,8 +115,9 @@ two notable exceptions: * ``intent(inout)`` array arguments must always be :term:`proper-contiguous ` and have a compatible ``dtype``, otherwise an exception is raised. -* ``intent(inplace)`` array arguments will be changed *in situ* if the argument - has a different type than expected (see the ``intent(inplace)`` +* ``intent(inplace)`` array arguments must be arrays. If these have + incompatible order or size, a converted copy is passed in, which is + copied back into the original array on exit (see the ``intent(inplace)`` :ref:`attribute ` for more information). In general, if a NumPy array is :term:`proper-contiguous ` and has @@ -243,6 +244,13 @@ In Python: .. literalinclude:: ./code/results/extcallback_session.dat :language: python +.. note:: + + When using modified Fortran code via ``callstatement`` or other directives, + the wrapped Python function must be called as a callback, otherwise only the + bare Fortran routine will be used. For more details, see + https://github.com/numpy/numpy/issues/26681#issuecomment-2466460943 + Resolving arguments to call-back functions ------------------------------------------ diff --git a/doc/source/f2py/signature-file.rst b/doc/source/f2py/signature-file.rst index ba370d73582b..3ac47b113745 100644 --- a/doc/source/f2py/signature-file.rst +++ b/doc/source/f2py/signature-file.rst @@ -392,18 +392,15 @@ The following attributes can be used by F2PY. * ``inplace`` The corresponding argument is considered to be an input/output or *in situ* output argument. ``intent(inplace)`` arguments must be NumPy arrays of a proper - size. If the type of an array is not "proper" or the array is - non-contiguous then the array will be modified in-place to fix the type and - make it contiguous. + size. If the size of an array is not "proper" or the array is + non-contiguous then the routine will be passed a fixed copy of array, + which has the :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flag set, so that the + result will be copied back to the original array on exit. .. note:: - Using ``intent(inplace)`` is generally not recommended either. - - For example, when slices have been taken from an ``intent(inplace)`` argument - then after in-place changes, the data pointers for the slices may point to - an unallocated memory area. - + Since copies may be made, ``intent(inplace)`` can be slower than expected. + It is recommended over ``inout``, but not over ``in,out``. * ``out`` The corresponding argument is considered to be a return variable. It is appended to the diff --git a/doc/source/f2py/usage.rst b/doc/source/f2py/usage.rst index 859a2c38be5f..a1fd38d57b9d 100644 --- a/doc/source/f2py/usage.rst +++ b/doc/source/f2py/usage.rst @@ -86,6 +86,13 @@ Here ```` may also contain signature files. Among other options ``--wrap-functions`` is default because it ensures maximum portability and compiler independence. +``--[no-]freethreading-compatible`` + Create a module that declares it does or doesn't require the GIL. The default + is ``--no-freethreading-compatible`` for backwards compatibility. Inspect the + fortran code you are wrapping for thread safety issues before passing + ``--freethreading-compatible``, as ``f2py`` does not analyze fortran code for + thread safety issues. + ``--include-paths ":..."`` Search include files from given directories. @@ -94,10 +101,6 @@ Here ```` may also contain signature files. Among other options and ``;`` on Windows. In ``CMake`` this corresponds to using ``$``. -``--help-link []`` - List system resources found by ``numpy_distutils/system_info.py``. For - example, try ``f2py --help-link lapack_opt``. - 3. Building a module ~~~~~~~~~~~~~~~~~~~~ @@ -120,7 +123,7 @@ module is constructed by scanning all Fortran source codes for routine signatures, before proceeding to build the extension module. .. warning:: - From Python 3.12 onwards, ``distutils`` has been removed. Use environment + ``distutils`` has been removed. Use environment variables or native files to interact with ``meson`` instead. See its `FAQ `__ for more information. @@ -128,17 +131,13 @@ Among other options (see below) and options described for previous modes, the fo .. note:: - .. versionchanged:: 1.26.0 - There are now two separate build backends which can be used, ``distutils`` - and ``meson``. Users are **strongly** recommended to switch to ``meson`` - since it is the default above Python ``3.12``. + .. versionchanged:: 2.5.0 + The ``distutils`` backend has been removed. Common build flags: ``--backend `` - Specify the build backend for the compilation process. The supported backends - are ``meson`` and ``distutils``. If not specified, defaults to ``distutils``. - On Python 3.12 or higher, the default is ``meson``. + Legacy option, only ``meson`` is supported. ``--f77flags=`` Specify F77 compiler flags ``--f90flags=`` @@ -158,39 +157,13 @@ Common build flags: Add directory ```` to the list of directories to be searched for ``-l``. -The ``meson`` specific flags are: - -``--dep `` **meson only** +``--dep `` Specify a meson dependency for the module. This may be passed multiple times for multiple dependencies. Dependencies are stored in a list for further processing. Example: ``--dep lapack --dep scalapack`` This will identify "lapack" and "scalapack" as dependencies and remove them from argv, leaving a dependencies list containing ["lapack", "scalapack"]. -The older ``distutils`` flags are: - -``--help-fcompiler`` **no meson** - List the available Fortran compilers. -``--fcompiler=`` **no meson** - Specify a Fortran compiler type by vendor. -``--f77exec=`` **no meson** - Specify the path to a F77 compiler -``--f90exec=`` **no meson** - Specify the path to a F90 compiler -``--opt=`` **no meson** - Specify optimization flags -``--arch=`` **no meson** - Specify architecture specific optimization flags -``--noopt`` **no meson** - Compile without optimization flags -``--noarch`` **no meson** - Compile without arch-dependent optimization flags -``link-`` **no meson** - Link the extension module with as defined by - ``numpy_distutils/system_info.py``. E.g. to link with optimized LAPACK - libraries (vecLib on MacOSX, ATLAS elsewhere), use ``--link-lapack_opt``. - See also ``--help-link`` switch. - .. note:: The ``f2py -c`` option must be applied either to an existing ``.pyf`` file @@ -288,35 +261,64 @@ When using ``numpy.f2py`` as a module, the following functions can be invoked. .. automodule:: numpy.f2py :members: -Automatic extension module generation -===================================== +Building with Meson (Examples) +============================== + +Using f2py with Meson +~~~~~~~~~~~~~~~~~~~~~ + +Meson is a modern build system recommended for building Python extension +modules, especially starting with Python 3.12 and NumPy 2.x. Meson provides +a robust and maintainable way to build Fortran extensions with f2py. + +To build a Fortran extension using f2py and Meson, you can use Meson's +``custom_target`` to invoke f2py and generate the extension module. The +following minimal example demonstrates how to do this: + +This example shows how to build the ``add`` extension from the ``add.f`` and ``add.pyf`` +files described in the :ref:`f2py-examples` (note that you do not always need +a ``.pyf`` file: in many cases ``f2py`` can figure out the annotations by itself). + +Project layout:: + + f2py_examples/ + meson.build + add.f + add.pyf (optional) + __init__.py (can be empty) + +Example ``meson.build``: + +.. code-block:: meson -If you want to distribute your f2py extension module, then you only -need to include the .pyf file and the Fortran code. The distutils -extensions in NumPy allow you to define an extension module entirely -in terms of this interface file. A valid ``setup.py`` file allowing -distribution of the ``add.f`` module (as part of the package -``f2py_examples`` so that it would be loaded as ``f2py_examples.add``) is: + project('f2py_examples', 'fortran') -.. code-block:: python + py = import('python').find_installation() - def configuration(parent_package='', top_path=None) - from numpy.distutils.misc_util import Configuration - config = Configuration('f2py_examples',parent_package, top_path) - config.add_extension('add', sources=['add.pyf','add.f']) - return config + # List your Fortran source files + sources = files('add.pyf', 'add.f') - if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) + # Build the extension by invoking f2py via a custom target + add_mod = custom_target( + 'add_extension', + input: sources, + output: ['add' + py.extension_suffix()], + command: [ + py.full_path(), '-m', 'numpy.f2py', + '-c', 'add.pyf', 'add.f', + '-m', 'add' + ], + build_by_default: true + ) -Installation of the new package is easy using:: + # Install into site-packages under the f2py_examples package + install_subdir('.', install_dir: join_paths(py.site_packages_dir(), 'f2py_examples'), + strip_directory: false, + exclude_files: ['meson.build']) - pip install . + # Also install the built extension (place it beside __init__.py) + install_data(add_mod, install_dir: join_paths(py.site_packages_dir(), 'f2py_examples')) -assuming you have the proper permissions to write to the main site- -packages directory for the version of Python you are using. For the -resulting package to work, you need to create a file named ``__init__.py`` -(in the same directory as ``add.pyf``). Notice the extension module is -defined entirely in terms of the ``add.pyf`` and ``add.f`` files. The -conversion of the .pyf file to a .c file is handled by `numpy.distutils`. +For more details and advanced usage, see the Meson build guide in the +user documentation or refer to SciPy's Meson build files for real-world +examples: https://github.com/scipy/scipy/tree/main/meson.build diff --git a/doc/source/f2py/windows/index.rst b/doc/source/f2py/windows/index.rst index 8d95c4bbce46..aa7851da5dd2 100644 --- a/doc/source/f2py/windows/index.rst +++ b/doc/source/f2py/windows/index.rst @@ -9,7 +9,7 @@ F2PY and Windows F2PY support for Windows is not always at par with Linux support .. note:: - `ScPy's documentation`_ has some information on system-level dependencies + `SciPy's documentation`_ has some information on system-level dependencies which are well tested for Fortran as well. Broadly speaking, there are two issues working with F2PY on Windows: @@ -71,12 +71,6 @@ Cygwin (FOSS) Cygwin is meant to compile UNIX software on Windows, instead of building native Windows programs. This means cross compilation is required. -The compilation suites described so far are compatible with the `now -deprecated`_ ``np.distutils`` build backend which is exposed by the F2PY CLI. -Additional build system usage (``meson``, ``cmake``) as described in -:ref:`f2py-bldsys` allows for a more flexible set of compiler -backends including: - Intel oneAPI The newer Intel compilers (``ifx``, ``icx``) are based on LLVM and can be used for native compilation. Licensing requirements can be onerous. @@ -217,4 +211,4 @@ path using a hash. This needs to be added to the ``PATH`` variable. .. _are outdated: https://github.com/conda-forge/conda-forge.github.io/issues/1044 .. _now deprecated: https://github.com/numpy/numpy/pull/20875 .. _LLVM Flang: https://releases.llvm.org/11.0.0/tools/flang/docs/ReleaseNotes.html -.. _ScPy's documentation: http://scipy.github.io/devdocs/building/index.html#system-level-dependencies +.. _SciPy's documentation: https://scipy.github.io/devdocs/building/index.html#system-level-dependencies diff --git a/doc/source/f2py/windows/intel.rst b/doc/source/f2py/windows/intel.rst index ab0cea219e70..c28b27d4bffe 100644 --- a/doc/source/f2py/windows/intel.rst +++ b/doc/source/f2py/windows/intel.rst @@ -52,6 +52,6 @@ Powershell usage is a little less pleasant, and this configuration now works wit Note that the actual path to your local installation of `ifort` may vary, and the command above will need to be updated accordingly. .. _have been relaxed: https://www.intel.com/content/www/us/en/developer/articles/release-notes/oneapi-fortran-compiler-release-notes.html -.. _disassembly of components and liability: https://software.sintel.com/content/www/us/en/develop/articles/end-user-license-agreement.html +.. _disassembly of components and liability: https://www.intel.com/content/www/us/en/developer/articles/license/end-user-license-agreement.html .. _Intel Fortran Compilers: https://www.intel.com/content/www/us/en/developer/articles/tool/oneapi-standalone-components.html#inpage-nav-6-1 -.. _Classic Intel C/C++ Compiler: https://www.intel.com/content/www/us/en/developer/articles/tool/oneapi-standalone-components.html#inpage-nav-6-undefined \ No newline at end of file +.. _Classic Intel C/C++ Compiler: https://www.intel.com/content/www/us/en/developer/articles/tool/oneapi-standalone-components.html#inpage-nav-6-undefined diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst index ae2ab6ea4247..838963eb9ccc 100644 --- a/doc/source/glossary.rst +++ b/doc/source/glossary.rst @@ -472,7 +472,8 @@ Glossary Strides are computed automatically from an array's dtype and shape, but can be directly specified using - :doc:`as_strided. ` + :doc:`as_strided `. + Bounds validation can be enabled with the ``check_bounds`` parameter. For details, see :doc:`numpy.ndarray.strides `. diff --git a/doc/source/index.rst b/doc/source/index.rst index 02f3a8dc12b0..00d1bb62e6b3 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -21,6 +21,7 @@ NumPy documentation `Historical versions of documentation `_ **Useful links**: +`Home `_ | `Installation `_ | `Source Repository `_ | `Issue Tracker `_ | diff --git a/doc/source/jupyter_lite_config.json b/doc/source/jupyter_lite_config.json new file mode 100644 index 000000000000..6b25be20912a --- /dev/null +++ b/doc/source/jupyter_lite_config.json @@ -0,0 +1,5 @@ +{ + "LiteBuildConfig": { + "no_sourcemaps": true + } +} diff --git a/doc/source/numpy_2_0_migration_guide.rst b/doc/source/numpy_2_0_migration_guide.rst index 7de294bb8d86..3155bd2d78d4 100644 --- a/doc/source/numpy_2_0_migration_guide.rst +++ b/doc/source/numpy_2_0_migration_guide.rst @@ -23,7 +23,7 @@ guide can be automatically adapted in downstream code with a dedicated `Ruff `__ rule, namely rule `NPY201 `__. -You should install ``ruff>=0.2.0`` and add the ``NPY201`` rule to your +You should install ``ruff>=0.4.8`` and add the ``NPY201`` rule to your ``pyproject.toml``:: [tool.ruff.lint] @@ -149,8 +149,8 @@ Please do not hesitate to open a NumPy issue, if you require assistance or the provided functions are not sufficient. **Custom User DTypes:** -Existing user dtypes must now use ``PyArray_DescrProto`` to define their -dtype and slightly modify the code. See note in `PyArray_RegisterDataType`. +Existing user dtypes must now use :c:type:`PyArray_DescrProto` to define +their dtype and slightly modify the code. See note in :c:func:`PyArray_RegisterDataType`. Functionality moved to headers requiring ``import_array()`` ----------------------------------------------------------- @@ -202,24 +202,37 @@ native C99 types. While the memory layout of those types remains identical to the types used in NumPy 1.x, the API is slightly different, since direct field access (like ``c.real`` or ``c.imag``) is no longer possible. -It is recommended to use the functions `npy_creal` and `npy_cimag` (and the -corresponding float and long double variants) to retrieve +It is recommended to use the functions ``npy_creal`` and ``npy_cimag`` +(and the corresponding float and long double variants) to retrieve the real or imaginary part of a complex number, as these will work with both -NumPy 1.x and with NumPy 2.x. New functions `npy_csetreal` and `npy_csetimag`, -along with compatibility macros `NPY_CSETREAL` and `NPY_CSETIMAG` (and the -corresponding float and long double variants), have been -added for setting the real or imaginary part. +NumPy 1.x and with NumPy 2.x. New functions ``npy_csetreal`` and +``npy_csetimag``, along with compatibility macros ``NPY_CSETREAL`` and +``NPY_CSETIMAG`` (and the corresponding float and long double variants), +have been added for setting the real or imaginary part. The underlying type remains a struct under C++ (all of the above still remains valid). -This has implications for Cython. It is recommened to always use the native +This has implications for Cython. It is recommended to always use the native typedefs ``cfloat_t``, ``cdouble_t``, ``clongdouble_t`` rather than the NumPy types ``npy_cfloat``, etc, unless you have to interface with C code written using the NumPy types. You can still write cython code using the ``c.real`` and ``c.imag`` attributes (using the native typedefs), but you can no longer use in-place operators ``c.imag += 1`` in Cython's c++ mode. +Because NumPy 2 now includes ``complex.h`` code that uses a variable named +``I`` may see an error such as + +.. code-block::C + error: expected ‘)’ before ‘__extension__’ + double I, + +to use the name ``I`` requires an ``#undef I`` now. + +.. note:: + NumPy 2.0.1 briefly included the ``#undef I`` to help users not already + including ``complex.h``. + Changes to namespaces ===================== @@ -231,7 +244,7 @@ private. Please see the tables below for guidance on migration. For most changes this means replacing it with a backwards compatible alternative. -Please refer to `NEP 52 `_ for more details. +Please refer to :ref:`NEP52` for more details. Main namespace -------------- @@ -246,12 +259,14 @@ removed member migration guideline add_docstring It's still available as ``np.lib.add_docstring``. add_newdoc It's still available as ``np.lib.add_newdoc``. add_newdoc_ufunc It's an internal function and doesn't have a replacement. -alltrue Use ``all`` instead. +alltrue Use ``np.all`` instead. asfarray Use ``np.asarray`` with a float dtype instead. byte_bounds Now it's available under ``np.lib.array_utils.byte_bounds`` cast Use ``np.asarray(arr, dtype=dtype)`` instead. cfloat Use ``np.complex128`` instead. +charrarray It's still available as ``np.char.chararray``. clongfloat Use ``np.clongdouble`` instead. +compare_chararrays It's still available as ``np.char.compare_chararrays``. compat There's no replacement, as Python 2 is no longer supported. complex\_ Use ``np.complex128`` instead. cumproduct Use ``np.cumprod`` instead. @@ -266,6 +281,7 @@ find_common_type Use ``numpy.promote_types`` or ``numpy.result_type`` ins To achieve semantics for the ``scalar_types`` argument, use ``numpy.result_type`` and pass the Python values ``0``, ``0.0``, or ``0j``. +format_parser It's still available as ``np.rec.format_parser``. get_array_wrap float\_ Use ``np.float64`` instead. geterrobj Use the np.errstate context manager instead. @@ -304,7 +320,7 @@ set_string_function Use ``np.set_printoptions`` instead with a formatter for custom printing of NumPy objects. singlecomplex Use ``np.complex64`` instead. string\_ Use ``np.bytes_`` instead. -sometrue Use ``any`` instead. +sometrue Use ``np.any`` instead. source Use ``inspect.getsource`` instead. tracemalloc_domain It's now available from ``np.lib``. unicode\_ Use ``np.str_`` instead. @@ -343,7 +359,7 @@ namespace, which is their primary location. To make it unambiguous how to access public function, ``np.lib`` is now empty and contains only a handful of specialized submodules, classes and functions: -- ``array_utils``, ``format``, ``introspect``, ``mixins``, ``npyio`` +- ``array_utils``, ``format``, ``introspect``, ``mixins``, ``npyio``, ``scimath`` and ``stride_tricks`` submodules, - ``Arrayterator`` and ``NumpyVersion`` classes, @@ -382,7 +398,6 @@ expired member migration guideline newbyteorder Use ``arr.view(arr.dtype.newbyteorder(order))`` instead. ptp Use ``np.ptp(arr, ...)`` instead. setitem Use ``arr[index] = value`` instead. -... ... ====================== ======================================================== diff --git a/doc/source/reference/array_api.rst b/doc/source/reference/array_api.rst index 6f130efc8ca8..66a607f12286 100644 --- a/doc/source/reference/array_api.rst +++ b/doc/source/reference/array_api.rst @@ -4,21 +4,19 @@ Array API standard compatibility ******************************** -NumPy's main namespace as well as the `numpy.fft` and `numpy.linalg` namespaces -are compatible [1]_ with the -`2022.12 version `__ +The NumPy 2.3.0 main namespace as well as the `numpy.fft` and `numpy.linalg` +namespaces are compatible with the +`2024.12 version `__ of the Python array API standard. -NumPy aims to implement support for the -`2023.12 version `__ -and future versions of the standard - assuming that those future versions can be -upgraded to given NumPy's -`backwards compatibility policy `__. +NumPy aims to implement support for the future versions of the standard +- assuming that those future versions can be upgraded to given NumPy's +:ref:`backwards compatibility policy `. For usage guidelines for downstream libraries and end users who want to write code that will work with both NumPy and other array libraries, we refer to the documentation of the array API standard itself and to code and -developer-focused documention in SciPy and scikit-learn. +developer-focused documentation in SciPy and scikit-learn. Note that in order to use standard-complaint code with older NumPy versions (< 2.0), the `array-api-compat @@ -32,9 +30,10 @@ rather than anything NumPy-specific, the `array-api-strict NumPy 1.22.0 was the first version to include support for the array API standard, via a separate ``numpy.array_api`` submodule. This module was marked as experimental (it emitted a warning on import) and removed in - NumPy 2.0 because full support was included in the main namespace. - `NEP 47 `__ and - `NEP 56 `__ + NumPy 2.0 because full support (2022.12 version [1]_) was included in + the main namespace. + :ref:`NEP 47 ` and + :ref:`NEP 56 ` describe the motivation and scope for implementing the array API standard in NumPy. @@ -57,7 +56,7 @@ an entry point. .. rubric:: Footnotes .. [1] With a few very minor exceptions, as documented in - `NEP 56 `__. + :ref:`NEP 56 `. The ``sum``, ``prod`` and ``trace`` behavior adheres to the 2023.12 version instead, as do function signatures; the only known incompatibility that may remain is that the standard forbids unsafe casts for in-place operators diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst index 04bced806587..d38043661a52 100644 --- a/doc/source/reference/arrays.classes.rst +++ b/doc/source/reference/arrays.classes.rst @@ -32,7 +32,7 @@ Note that :func:`asarray` always returns the base-class ndarray. If you are confident that your use of the array object can handle any subclass of an ndarray, then :func:`asanyarray` can be used to allow subclasses to propagate more cleanly through your subroutine. In -principal a subclass could redefine any aspect of the array and +principle, a subclass could redefine any aspect of the array and therefore, under strict guidelines, :func:`asanyarray` would rarely be useful. However, most subclasses of the array object will not redefine certain aspects of the array object such as the buffer @@ -52,8 +52,6 @@ NumPy provides several hooks that classes can customize: .. py:method:: class.__array_ufunc__(ufunc, method, *inputs, **kwargs) - .. versionadded:: 1.13 - Any class, ndarray subclass or not, can define this method or set it to None in order to override the behavior of NumPy's ufuncs. This works quite similarly to Python's ``__mul__`` and other binary operation routines. @@ -156,8 +154,6 @@ NumPy provides several hooks that classes can customize: .. py:method:: class.__array_function__(func, types, args, kwargs) - .. versionadded:: 1.16 - - ``func`` is an arbitrary callable exposed by NumPy's public API, which was called in the form ``func(*args, **kwargs)``. - ``types`` is a collection :py:class:`collections.abc.Collection` @@ -292,7 +288,7 @@ NumPy provides several hooks that classes can customize: .. note:: It is hoped to eventually deprecate this method in favour of - func:`__array_ufunc__` for ufuncs (and :func:`__array_function__` + :func:`__array_ufunc__` for ufuncs (and :func:`__array_function__` for a few other functions like :func:`numpy.squeeze`). .. py:attribute:: class.__array_priority__ @@ -307,19 +303,31 @@ NumPy provides several hooks that classes can customize: .. py:method:: class.__array__(dtype=None, copy=None) - If defined on an object, should return an ``ndarray``. - This method is called by array-coercion functions like np.array() + If defined on an object, it must return a NumPy ``ndarray``. + This method is called by array-coercion functions like ``np.array()`` if an object implementing this interface is passed to those functions. - The third-party implementations of ``__array__`` must take ``dtype`` and - ``copy`` keyword arguments, as ignoring them might break third-party code - or NumPy itself. - - ``dtype`` is a data type of the returned array. - - ``copy`` is an optional boolean that indicates whether a copy should be - returned. For ``True`` a copy should always be made, for ``None`` only - if required (e.g. due to passed ``dtype`` value), and for ``False`` a copy - should never be made (if a copy is still required, an appropriate exception - should be raised). + Third-party implementations of ``__array__`` must take ``dtype`` and + ``copy`` arguments. + + .. deprecated:: NumPy 2.0 + Not implementing ``copy`` and ``dtype`` is deprecated as of NumPy 2. + When adding them, you must ensure correct behavior for ``copy``. + + - ``dtype`` is the requested data type of the returned array and is passed + by NumPy positionally (only if requested by the user). + It is acceptable to ignore the ``dtype`` because NumPy will check the + result and cast to ``dtype`` if necessary. If it is more efficient to + coerce the data to the requested dtype without relying on NumPy, + you should handle it in your library. + - ``copy`` is a boolean passed by keyword. If ``copy=True`` you *must* + return a copy. Returning a view into existing data will lead to incorrect + user code. + If ``copy=False`` the user requested that a copy is never made and you *must* + raise an error unless no copy is made and the returned array is a view into + existing data. It is valid to always raise an error for ``copy=False``. + The default ``copy=None`` (not passed) allows for the result to either be a + view or a copy. However, a view return should be preferred when possible. Please refer to :ref:`Interoperability with NumPy ` for the protocol hierarchy, of which ``__array__`` is the oldest and least @@ -330,6 +338,66 @@ NumPy provides several hooks that classes can customize: `, results will *not* be written to the object returned by :func:`__array__`. This practice will return ``TypeError``. + **Example** + + Use ``__array__`` to create a diagonal array of fixed size and value: + + >>> import numpy as np + >>> class DiagonalArray: + ... def __init__(self, N, value): + ... self._N = N + ... self._i = value + ... def __repr__(self): + ... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" + ... def __array__(self, dtype=None, copy=None): + ... if copy is False: + ... raise ValueError( + ... "`copy=False` isn't supported. A copy is always created." + ... ) + ... return self._i * np.eye(self._N, dtype=dtype) + + Our custom array can be instantiated like: + + >>> arr = DiagonalArray(5, 1) + >>> arr + DiagonalArray(N=5, value=1) + + We can convert to a numpy array using :func:`numpy.array` or + :func:`numpy.asarray`, which will call its ``__array__`` method to obtain a + standard ``numpy.ndarray``. + + >>> np.asarray(arr) + array([[1., 0., 0., 0., 0.], + [0., 1., 0., 0., 0.], + [0., 0., 1., 0., 0.], + [0., 0., 0., 1., 0.], + [0., 0., 0., 0., 1.]]) + + Using ``dtype`` should return an appropriate ndarray or raise an error: + + >>> np.asarray(arr, dtype=np.float32) + array([[1., 0., 0., 0., 0.], + [0., 1., 0., 0., 0.], + [0., 0., 1., 0., 0.], + [0., 0., 0., 1., 0.], + [0., 0., 0., 0., 1.]], dtype=float32) + + If we operate on ``arr`` with a numpy function, numpy will again use the + ``__array__`` interface to convert it to an array and then apply the function + in the usual way. + + >>> np.multiply(arr, 2) + array([[2., 0., 0., 0., 0.], + [0., 2., 0., 0., 0.], + [0., 0., 2., 0., 0.], + [0., 0., 0., 2., 0.], + [0., 0., 0., 0., 2.]]) + + Notice that the return type is a standard ``numpy.ndarray``. + + >>> type(np.multiply(arr, 2)) + + .. _matrix-objects: Matrix objects @@ -409,24 +477,33 @@ alias for "matrix "in NumPy. Example 1: Matrix creation from a string ->>> a = np.asmatrix('1 2 3; 4 5 3') ->>> print((a*a.T).I) +.. try_examples:: + + >>> import numpy as np + >>> a = np.asmatrix('1 2 3; 4 5 3') + >>> print((a*a.T).I) [[ 0.29239766 -0.13450292] - [-0.13450292 0.08187135]] + [-0.13450292 0.08187135]] -Example 2: Matrix creation from nested sequence +Example 2: Matrix creation from a nested sequence ->>> np.asmatrix([[1,5,10],[1.0,3,4j]]) -matrix([[ 1.+0.j, 5.+0.j, 10.+0.j], - [ 1.+0.j, 3.+0.j, 0.+4.j]]) +.. try_examples:: + + >>> import numpy as np + >>> np.asmatrix([[1,5,10],[1.0,3,4j]]) + matrix([[ 1.+0.j, 5.+0.j, 10.+0.j], + [ 1.+0.j, 3.+0.j, 0.+4.j]]) Example 3: Matrix creation from an array ->>> np.asmatrix(np.random.rand(3,3)).T -matrix([[4.17022005e-01, 3.02332573e-01, 1.86260211e-01], - [7.20324493e-01, 1.46755891e-01, 3.45560727e-01], - [1.14374817e-04, 9.23385948e-02, 3.96767474e-01]]) +.. try_examples:: + + >>> import numpy as np + >>> np.asmatrix(np.random.rand(3,3)).T + matrix([[4.17022005e-01, 3.02332573e-01, 1.86260211e-01], + [7.20324493e-01, 1.46755891e-01, 3.45560727e-01], + [1.14374817e-04, 9.23385948e-02, 3.96767474e-01]]) Memory-mapped file arrays @@ -458,16 +535,22 @@ array actually get written to disk. Example: ->>> a = np.memmap('newfile.dat', dtype=float, mode='w+', shape=1000) ->>> a[10] = 10.0 ->>> a[30] = 30.0 ->>> del a ->>> b = np.fromfile('newfile.dat', dtype=float) ->>> print(b[10], b[30]) -10.0 30.0 ->>> a = np.memmap('newfile.dat', dtype=float) ->>> print(a[10], a[30]) -10.0 30.0 +.. try_examples:: + + >>> import numpy as np + + >>> a = np.memmap('newfile.dat', dtype=np.float64, mode='w+', shape=1000) + >>> a[10] = 10.0 + >>> a[30] = 30.0 + >>> del a + + >>> b = np.fromfile('newfile.dat', dtype=np.float64) + >>> print(b[10], b[30]) + 10.0 30.0 + + >>> a = np.memmap('newfile.dat', dtype=np.float64) + >>> print(a[10], a[30]) + 10.0 30.0 Character arrays (:mod:`numpy.char`) @@ -485,6 +568,10 @@ Character arrays (:mod:`numpy.char`) `dtype` `object_`, `bytes_` or `str_`, and use the free functions in the `numpy.char` module for fast vectorized string operations. +.. deprecated:: 2.5 + ``numpy.char.chararray`` is deprecated. Use an ``ndarray`` with a string or + bytes dtype instead. + These are enhanced arrays of either :class:`str_` type or :class:`bytes_` type. These arrays inherit from the :class:`ndarray`, but specially-define the operations ``+``, ``*``, @@ -602,15 +689,18 @@ This default iterator selects a sub-array of dimension :math:`N-1` from the array. This can be a useful construct for defining recursive algorithms. To loop over the entire array requires :math:`N` for-loops. ->>> a = np.arange(24).reshape(3,2,4)+10 ->>> for val in a: -... print('item:', val) -item: [[10 11 12 13] - [14 15 16 17]] -item: [[18 19 20 21] - [22 23 24 25]] -item: [[26 27 28 29] - [30 31 32 33]] +.. try_examples:: + + >>> import numpy as np + >>> a = np.arange(24).reshape(3,2,4) + 10 + >>> for val in a: + ... print('item:', val) + item: [[10 11 12 13] + [14 15 16 17]] + item: [[18 19 20 21] + [22 23 24 25]] + item: [[26 27 28 29] + [30 31 32 33]] Flat iteration @@ -625,13 +715,16 @@ As mentioned previously, the flat attribute of ndarray objects returns an iterator that will cycle over the entire array in C-style contiguous order. ->>> for i, val in enumerate(a.flat): -... if i%5 == 0: print(i, val) -0 10 -5 15 -10 20 -15 25 -20 30 +.. try_examples:: + + >>> import numpy as np + >>> for i, val in enumerate(a.flat): + ... if i%5 == 0: print(i, val) + 0 10 + 5 15 + 10 20 + 15 25 + 20 30 Here, I've used the built-in enumerate iterator to return the iterator index as well as the value. @@ -648,12 +741,16 @@ N-dimensional enumeration Sometimes it may be useful to get the N-dimensional index while iterating. The ndenumerate iterator can achieve this. ->>> for i, val in np.ndenumerate(a): -... if sum(i)%5 == 0: print(i, val) -(0, 0, 0) 10 -(1, 1, 3) 25 -(2, 0, 3) 29 -(2, 1, 2) 32 +.. try_examples:: + + >>> import numpy as np + >>> for i, val in np.ndenumerate(a): + ... if sum(i)%5 == 0: + print(i, val) + (0, 0, 0) 10 + (1, 1, 3) 25 + (2, 0, 3) 29 + (2, 1, 2) 32 Iterator for broadcasting @@ -670,9 +767,12 @@ objects as inputs and returns an iterator that returns tuples providing each of the input sequence elements in the broadcasted result. ->>> for val in np.broadcast([[1, 0], [2, 3]], [0, 1]): -... print(val) -(np.int64(1), np.int64(0)) -(np.int64(0), np.int64(1)) -(np.int64(2), np.int64(0)) -(np.int64(3), np.int64(1)) +.. try_examples:: + + >>> import numpy as np + >>> for val in np.broadcast([[1, 0], [2, 3]], [0, 1]): + ... print(val) + (np.int64(1), np.int64(0)) + (np.int64(0), np.int64(1)) + (np.int64(2), np.int64(0)) + (np.int64(3), np.int64(1)) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index ea76425e0160..9cb7f59db78b 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -6,8 +6,6 @@ Datetimes and timedeltas ************************ -.. versionadded:: 1.7.0 - Starting in NumPy 1.7, there are core array data types which natively support datetime functionality. The data type is called :class:`datetime64`, so named because :class:`~datetime.datetime` is already taken by the Python standard library. @@ -55,12 +53,23 @@ months ('M'), weeks ('W'), and days ('D'), while the time units are hours ('h'), minutes ('m'), seconds ('s'), milliseconds ('ms'), and some additional SI-prefix seconds-based units. The `datetime64` data type also accepts the string "NAT", in any combination of lowercase/uppercase -letters, for a "Not A Time" value. +letters, for a "Not A Time" value. The string "now" is also supported and +returns the current UTC time. By default, it uses second ('s') precision, but +you can specify a different unit (e.g., 'M', 'D', 'h') to truncate the result +to that precision. Units finer than seconds (such as 'ms' or 'ns') are +supported but will show fractional parts as zeros, effectively truncating to +whole seconds. The string "today" is also supported and returns the current UTC +date with day precision. It also supports the same precision specifiers +as ``now``. .. admonition:: Example + .. try_examples:: + A simple ISO date: + >>> import numpy as np + >>> np.datetime64('2005-02-25') np.datetime64('2005-02-25') @@ -89,16 +98,36 @@ letters, for a "Not A Time" value. >>> np.datetime64('nat') np.datetime64('NaT') + The current time (UTC, default second precision): + + >>> np.datetime64('now') + np.datetime64('2025-08-05T02:22:14') # result will depend on the current time + + >>> np.datetime64('now', 'D') + np.datetime64('2025-08-05') + + >>> np.datetime64('now', 'ms') + np.datetime64('2025-08-05T02:22:14.000') + + The current date: + + >>> np.datetime64('today') + np.datetime64('2025-08-05') # result will depend on the current date + When creating an array of datetimes from a string, it is still possible to automatically select the unit from the inputs, by using the datetime type with generic units. .. admonition:: Example - >>> np.array(['2007-07-13', '2006-01-13', '2010-08-13'], dtype='datetime64') + .. try_examples:: + + >>> import numpy as np + + >>> np.array(['2007-07-13', '2006-01-13', '2010-08-13'], dtype=np.datetime64) array(['2007-07-13', '2006-01-13', '2010-08-13'], dtype='datetime64[D]') - >>> np.array(['2001-01-01T12:00', '2002-02-03T13:56:03.172'], dtype='datetime64') + >>> np.array(['2001-01-01T12:00', '2002-02-03T13:56:03.172'], dtype=np.datetime64) array(['2001-01-01T12:00:00.000', '2002-02-03T13:56:03.172'], dtype='datetime64[ms]') @@ -107,6 +136,10 @@ POSIX timestamps with the given unit. .. admonition:: Example + .. try_examples:: + + >>> import numpy as np + >>> np.array([0, 1577836800], dtype='datetime64[s]') array(['1970-01-01T00:00:00', '2020-01-01T00:00:00'], dtype='datetime64[s]') @@ -120,8 +153,12 @@ example :func:`arange` can be used to generate ranges of dates. .. admonition:: Example + .. try_examples:: + All the dates for one month: + >>> import numpy as np + >>> np.arange('2005-02', '2005-03', dtype='datetime64[D]') array(['2005-02-01', '2005-02-02', '2005-02-03', '2005-02-04', '2005-02-05', '2005-02-06', '2005-02-07', '2005-02-08', @@ -140,6 +177,10 @@ because the moment of time is still being represented exactly. .. admonition:: Example + .. try_examples:: + + >>> import numpy as np + >>> np.datetime64('2005') == np.datetime64('2005-01-01') True @@ -167,6 +208,10 @@ data type also accepts the string "NAT" in place of the number for a "Not A Time .. admonition:: Example + .. try_examples:: + + >>> import numpy as np + >>> np.timedelta64(1, 'D') np.timedelta64(1,'D') @@ -181,6 +226,10 @@ simple datetime calculations. .. admonition:: Example + .. try_examples:: + + >>> import numpy as np + >>> np.datetime64('2009-01-01') - np.datetime64('2008-01-01') np.timedelta64(366,'D') @@ -214,6 +263,10 @@ calculating the averaged values from the 400 year leap-year cycle. .. admonition:: Example + .. try_examples:: + + >>> import numpy as np + >>> a = np.timedelta64(1, 'Y') >>> np.timedelta64(a, 'M') @@ -273,6 +326,66 @@ us / Îŧs microsecond +/- 2.9e5 years [290301 BC, 294241 AD] as attosecond +/- 9.2 seconds [ 1969 AD, 1970 AD] ======== ================ ======================= ========================== + +Converting datetime and timedelta to Python Object +================================================== + +NumPy follows a strict protocol when converting `datetime64` and/or `timedelta64` to Python Objects (e.g., ``tuple``, ``list``, `datetime.datetime`). + +The protocol is described in the following table: + +================================ ================================= ================================== + Input Type for `datetime64` for `timedelta64` +================================ ================================= ================================== + ``NaT`` ``None`` ``None`` + ns/ps/fs/as ``int`` ``int`` + Îŧs/ms/s/m/h `datetime.datetime` `datetime.timedelta` + D/W (Linear units) `datetime.date` `datetime.timedelta` + Y/M (Non-linear units) `datetime.date` ``int`` + Generic units `datetime.date` ``int`` +================================ ================================= ================================== + +.. admonition:: Example + + .. try_examples:: + + >>> import numpy as np + + >>> type(np.datetime64('NaT').item()) + + + >>> type(np.timedelta64('NaT').item()) + + + >>> type(np.timedelta64(123, 'ns').item()) + + + >>> type(np.datetime64('2025-01-01T12:00:00.123456').item()) + + + >>> type(np.timedelta64(10, 'D').item()) + + + +In the case where conversion of `datetime64` and/or `timedelta64` is done against Python types like ``int``, ``float``, and ``str`` the corresponding return types will be ``np.str_``, ``np.int64`` and ``np.float64``. + + +.. admonition:: Example + + .. try_examples:: + + >>> import numpy as np + + >>> type(np.timedelta64(1, 'D').astype(int)) + + + >>> type(np.datetime64('2025-01-01T12:00:00.123456').astype(float)) + + + >>> type(np.timedelta64(123, 'ns').astype(str)) + + + Business day functionality ========================== @@ -293,6 +406,10 @@ specified in business days to datetimes with a unit of 'D' (day). .. admonition:: Example + .. try_examples:: + + >>> import numpy as np + >>> np.busday_offset('2011-06-23', 1) np.datetime64('2011-06-24') @@ -307,6 +424,10 @@ The rules most typically used are 'forward' and 'backward'. .. admonition:: Example + .. try_examples:: + + >>> import numpy as np + >>> np.busday_offset('2011-06-25', 2) Traceback (most recent call last): File "", line 1, in @@ -329,8 +450,12 @@ is necessary to get a desired answer. .. admonition:: Example + .. try_examples:: + The first business day on or after a date: + >>> import numpy as np + >>> np.busday_offset('2011-03-20', 0, roll='forward') np.datetime64('2011-03-21') >>> np.busday_offset('2011-03-22', 0, roll='forward') @@ -350,6 +475,10 @@ weekmask. .. admonition:: Example + .. try_examples:: + + >>> import numpy as np + >>> np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun') np.datetime64('2012-05-13') @@ -364,6 +493,10 @@ To test a `datetime64` value to see if it is a valid day, use :func:`is_busday`. .. admonition:: Example + .. try_examples:: + + >>> import numpy as np + >>> np.is_busday(np.datetime64('2011-07-15')) # a Friday True >>> np.is_busday(np.datetime64('2011-07-16')) # a Saturday @@ -381,6 +514,10 @@ dates, use :func:`busday_count`: .. admonition:: Example + .. try_examples:: + + >>> import numpy as np + >>> np.busday_count(np.datetime64('2011-07-11'), np.datetime64('2011-07-18')) 5 >>> np.busday_count(np.datetime64('2011-07-18'), np.datetime64('2011-07-11')) @@ -391,6 +528,10 @@ how many of them are valid dates, you can do this: .. admonition:: Example + .. try_examples:: + + >>> import numpy as np + >>> a = np.arange(np.datetime64('2011-07-11'), np.datetime64('2011-07-18')) >>> np.count_nonzero(np.is_busday(a)) 5 @@ -438,6 +579,10 @@ given below. 23:59:60.450 UTC" is a valid timestamp which is not parseable by `datetime64`: + .. try_examples:: + + >>> import numpy as np + >>> np.datetime64("2016-12-31 23:59:60.450") Traceback (most recent call last): File "", line 1, in @@ -451,14 +596,18 @@ given below. Compute the number of SI seconds between "2021-01-01 12:56:23.423 UTC" and "2001-01-01 00:00:00.000 UTC": + .. try_examples:: + + >>> import numpy as np + >>> ( ... np.datetime64("2021-01-01 12:56:23.423") ... - np.datetime64("2001-01-01") ... ) / np.timedelta64(1, "s") 631198583.423 - however correct answer is `631198588.423` SI seconds because there were 5 - leap seconds between 2001 and 2021. + However, the correct answer is `631198588.423` SI seconds, because there were + 5 leap seconds between 2001 and 2021. - Timedelta64 computations for dates in the past do not return SI seconds, as one would expect. @@ -469,16 +618,20 @@ given below. where UT is `universal time `_: + .. try_examples:: + + >>> import numpy as np + >>> a = np.datetime64("0000-01-01", "us") >>> b = np.datetime64("1600-01-01", "us") >>> b - a numpy.timedelta64(50491123200000000,'us') - The computed results, `50491123200` seconds, is obtained as the elapsed - number of days (`584388`) times `86400` seconds; this is the number of - seconds of a clock in sync with earth rotation. The exact value in SI - seconds can only be estimated, e.g using data published in `Measurement of - the Earth's rotation: 720 BC to AD 2015, 2016, Royal Society's Proceedings - A 472, by Stephenson et.al. `_. A - sensible estimate is `50491112870 Âą 90` seconds, with a difference of 10330 - seconds. + The computed results, `50491123200` seconds, are obtained as the elapsed + number of days (`584388`) times `86400` seconds; this is the number of + seconds of a clock in sync with the Earth's rotation. The exact value in SI + seconds can only be estimated, e.g., using data published in `Measurement of + the Earth's rotation: 720 BC to AD 2015, 2016, Royal Society's Proceedings + A 472, by Stephenson et.al. `_. A + sensible estimate is `50491112870 Âą 90` seconds, with a difference of 10330 + seconds. diff --git a/doc/source/reference/arrays.dtypes.rst b/doc/source/reference/arrays.dtypes.rst index b2a6f5ab8a2d..2a5b8ce50fc5 100644 --- a/doc/source/reference/arrays.dtypes.rst +++ b/doc/source/reference/arrays.dtypes.rst @@ -68,15 +68,19 @@ Sub-arrays always have a C-contiguous memory layout. A simple data type containing a 32-bit big-endian integer: (see :ref:`arrays.dtypes.constructing` for details on construction) - >>> dt = np.dtype('>i4') - >>> dt.byteorder - '>' - >>> dt.itemsize - 4 - >>> dt.name - 'int32' - >>> dt.type is np.int32 - True + .. try_examples:: + + >>> import numpy as np + + >>> dt = np.dtype('>i4') + >>> dt.byteorder + '>' + >>> dt.itemsize + 4 + >>> dt.name + 'int32' + >>> dt.type is np.int32 + True The corresponding array scalar type is :class:`int32`. @@ -85,24 +89,32 @@ Sub-arrays always have a C-contiguous memory layout. A structured data type containing a 16-character string (in field 'name') and a sub-array of two 64-bit floating-point number (in field 'grades'): - >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) - >>> dt['name'] - dtype('>> dt['grades'] - dtype(('>> import numpy as np + + >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> dt['name'] + dtype('>> dt['grades'] + dtype(('` type that also has two fields: - >>> x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) - >>> x[1] - ('John', [6., 7.]) - >>> x[1]['grades'] - array([6., 7.]) - >>> type(x[1]) - - >>> type(x[1]['grades']) - + .. try_examples:: + + >>> import numpy as np + + >>> x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) + >>> x[1] + ('John', [6., 7.]) + >>> x[1]['grades'] + array([6., 7.]) + >>> type(x[1]) + + >>> type(x[1]['grades']) + .. _arrays.dtypes.constructing: @@ -148,8 +160,12 @@ Array-scalar types .. admonition:: Example - >>> dt = np.dtype(np.int32) # 32-bit integer - >>> dt = np.dtype(np.complex128) # 128-bit complex floating-point number + .. try_examples:: + + >>> import numpy as np + + >>> dt = np.dtype(np.int32) # 32-bit integer + >>> dt = np.dtype(np.complex128) # 128-bit complex floating-point number Generic types The generic hierarchical type objects convert to corresponding @@ -171,11 +187,14 @@ Generic types to an array of ``float64``, even though ``float32`` is a subdtype of ``np.floating``. +.. _dtype-constructing-from-python-types: Built-in Python types - Several python types are equivalent to a corresponding + Several Python types are equivalent to a corresponding array scalar when used to generate a :class:`dtype` object: + =================== =============== + Python type NumPy type =================== =============== :class:`int` :class:`int\_` :class:`bool` :class:`bool\_` @@ -191,9 +210,13 @@ Built-in Python types .. admonition:: Example - >>> dt = np.dtype(float) # Python-compatible floating-point number - >>> dt = np.dtype(int) # Python-compatible integer - >>> dt = np.dtype(object) # Python object + .. try_examples:: + + >>> import numpy as np + + >>> dt = np.dtype(float) # Python-compatible floating-point number + >>> dt = np.dtype(int) # Python-compatible integer + >>> dt = np.dtype(object) # Python object .. note:: @@ -219,48 +242,64 @@ One-character strings .. admonition:: Example - >>> dt = np.dtype('b') # byte, native byte order - >>> dt = np.dtype('>H') # big-endian unsigned short - >>> dt = np.dtype('>> dt = np.dtype('d') # double-precision floating-point number + .. try_examples:: + + >>> import numpy as np + + >>> dt = np.dtype('b') # byte, native byte order + >>> dt = np.dtype('>H') # big-endian unsigned short + >>> dt = np.dtype('>> dt = np.dtype('d') # double-precision floating-point number Array-protocol type strings (see :ref:`arrays.interface`) The first character specifies the kind of data and the remaining characters specify the number of bytes per item, except for Unicode, - where it is interpreted as the number of characters. The item size - must correspond to an existing type, or an error will be raised. The - supported kinds are - - ================ ======================== - ``'?'`` boolean - ``'b'`` (signed) byte - ``'B'`` unsigned byte - ``'i'`` (signed) integer - ``'u'`` unsigned integer - ``'f'`` floating-point - ``'c'`` complex-floating point - ``'m'`` timedelta - ``'M'`` datetime - ``'O'`` (Python) objects - ``'S'``, ``'a'`` zero-terminated bytes (not recommended) - ``'U'`` Unicode string - ``'V'`` raw data (:class:`void`) - ================ ======================== + where it is interpreted as the number of characters, and except ``b1`` + which represents boolean. The item size must correspond to an existing + type, or an error will be raised. The supported kinds are + + ================== ======================== + ``'?'`` boolean + ``'b'`` (signed) byte + ``'B'`` unsigned byte + ``'h'`` (signed) short + ``'H'`` unsigned short + ``'i'`` (signed) integer + ``'I'`` unsigned integer + ``'l'`` (signed) long integer + ``'L'`` unsigned long integer + ``'q'`` (signed) long long integer + ``'Q'`` unsigned long long integer + ``'f'`` single precision + ``'F'`` complex single precision + ``'d'`` double precision + ``'D'`` complex double precision + ``'g'`` long precision + ``'G'`` complex long double precision + ``'O'`` (Python) objects + ``'S'`` zero-terminated bytes (not recommended) + ``'U'`` Unicode string + ``'V'`` raw data (:class:`void`) + ================== ======================== .. admonition:: Example - >>> dt = np.dtype('i4') # 32-bit signed integer - >>> dt = np.dtype('f8') # 64-bit floating-point number - >>> dt = np.dtype('c16') # 128-bit complex floating-point number - >>> dt = np.dtype('S25') # 25-length zero-terminated bytes - >>> dt = np.dtype('U25') # 25-character string + .. try_examples:: + + >>> import numpy as np + + >>> dt = np.dtype('i4') # 32-bit signed integer + >>> dt = np.dtype('f8') # 64-bit floating-point number + >>> dt = np.dtype('c16') # 128-bit complex floating-point number + >>> dt = np.dtype('S25') # 25-length zero-terminated bytes + >>> dt = np.dtype('U25') # 25-character string .. _string-dtype-note: .. admonition:: Note on string types For backward compatibility with existing code originally written to support - Python 2, ``S`` and ``a`` typestrings are zero-terminated bytes. + Python 2, ``S`` and ``a`` typestrings are zero-terminated bytes. For unicode strings, use ``U``, `numpy.str_`. For signed bytes that do not need zero-termination ``b`` or ``i1`` can be used. @@ -280,12 +319,15 @@ String with comma-separated fields .. admonition:: Example + .. try_examples:: + - field named ``f0`` containing a 32-bit integer - field named ``f1`` containing a 2 x 3 sub-array of 64-bit floating-point numbers - field named ``f2`` containing a 32-bit floating-point number - >>> dt = np.dtype("i4, (2,3)f8, f4") + >>> import numpy as np + >>> dt = np.dtype("i4, (2,3)f8, f4") - field named ``f0`` containing a 3-character string - field named ``f1`` containing a sub-array of shape (3,) @@ -293,15 +335,20 @@ String with comma-separated fields - field named ``f2`` containing a 3 x 4 sub-array containing 10-character strings - >>> dt = np.dtype("S3, 3u8, (3,4)S10") + >>> import numpy as np + >>> dt = np.dtype("S3, 3u8, (3,4)S10") Type strings Any string name of a NumPy dtype, e.g.: .. admonition:: Example - >>> dt = np.dtype('uint32') # 32-bit unsigned integer - >>> dt = np.dtype('float64') # 64-bit floating-point number + .. try_examples:: + + >>> import numpy as np + + >>> dt = np.dtype('uint32') # 32-bit unsigned integer + >>> dt = np.dtype('float64') # 64-bit floating-point number .. index:: triple: dtype; construction; from tuple @@ -313,8 +360,12 @@ Type strings .. admonition:: Example - >>> dt = np.dtype((np.void, 10)) # 10-byte wide data block - >>> dt = np.dtype(('U', 10)) # 10-character unicode string + .. try_examples:: + + >>> import numpy as np + + >>> dt = np.dtype((np.void, 10)) # 10-byte wide data block + >>> dt = np.dtype(('U', 10)) # 10-character unicode string ``(fixed_dtype, shape)`` .. index:: @@ -330,8 +381,12 @@ Type strings .. admonition:: Example - >>> dt = np.dtype((np.int32, (2,2))) # 2 x 2 integer sub-array - >>> dt = np.dtype(('i4, (2,3)f8, f4', (2,3))) # 2 x 3 structured sub-array + .. try_examples:: + + >>> import numpy as np + + >>> dt = np.dtype((np.int32, (2,2))) # 2 x 2 integer sub-array + >>> dt = np.dtype(('i4, (2,3)f8, f4', (2,3))) # 2 x 3 structured sub-array .. index:: triple: dtype; construction; from list @@ -362,15 +417,19 @@ Type strings .. admonition:: Example - Data-type with fields ``big`` (big-endian 32-bit integer) and - ``little`` (little-endian 32-bit integer): + .. try_examples:: + + Data-type with fields ``big`` (big-endian 32-bit integer) and + ``little`` (little-endian 32-bit integer): + + >>> import numpy as np - >>> dt = np.dtype([('big', '>i4'), ('little', '>> dt = np.dtype([('big', '>i4'), ('little', '>> dt = np.dtype([('R','u1'), ('G','u1'), ('B','u1'), ('A','u1')]) + >>> dt = np.dtype([('R','u1'), ('G','u1'), ('B','u1'), ('A','u1')]) .. index:: triple: dtype; construction; from dict @@ -401,19 +460,23 @@ Type strings .. admonition:: Example - Data type with fields ``r``, ``g``, ``b``, ``a``, each being - an 8-bit unsigned integer: + .. try_examples:: - >>> dt = np.dtype({'names': ['r','g','b','a'], - ... 'formats': [np.uint8, np.uint8, np.uint8, np.uint8]}) + Data type with fields ``r``, ``g``, ``b``, ``a``, each being + an 8-bit unsigned integer: - Data type with fields ``r`` and ``b`` (with the given titles), - both being 8-bit unsigned integers, the first at byte position - 0 from the start of the field and the second at position 2: + >>> import numpy as np - >>> dt = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'], - ... 'offsets': [0, 2], - ... 'titles': ['Red pixel', 'Blue pixel']}) + >>> dt = np.dtype({'names': ['r','g','b','a'], + ... 'formats': [np.uint8, np.uint8, np.uint8, np.uint8]}) + + Data type with fields ``r`` and ``b`` (with the given titles), + both being 8-bit unsigned integers, the first at byte position + 0 from the start of the field and the second at position 2: + + >>> dt = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'], + ... 'offsets': [0, 2], + ... 'titles': ['Red pixel', 'Blue pixel']}) ``{'field1': ..., 'field2': ..., ...}`` @@ -430,12 +493,16 @@ Type strings .. admonition:: Example - Data type containing field ``col1`` (10-character string at - byte position 0), ``col2`` (32-bit float at byte position 10), - and ``col3`` (integers at byte position 14): + .. try_examples:: + + Data type containing field ``col1`` (10-character string at + byte position 0), ``col2`` (32-bit float at byte position 10), + and ``col3`` (integers at byte position 14): + + >>> import numpy as np - >>> dt = np.dtype({'col1': ('U10', 0), 'col2': (np.float32, 10), - ... 'col3': (int, 14)}) + >>> dt = np.dtype({'col1': ('U10', 0), 'col2': (np.float32, 10), + ... 'col3': (int, 14)}) ``(base_dtype, new_dtype)`` In NumPy 1.7 and later, this form allows `base_dtype` to be interpreted as @@ -453,20 +520,24 @@ Type strings .. admonition:: Example - 32-bit integer, whose first two bytes are interpreted as an integer - via field ``real``, and the following two bytes via field ``imag``. + .. try_examples:: - >>> dt = np.dtype((np.int32,{'real':(np.int16, 0),'imag':(np.int16, 2)})) + 32-bit integer, whose first two bytes are interpreted as an integer + via field ``real``, and the following two bytes via field ``imag``. - 32-bit integer, which is interpreted as consisting of a sub-array - of shape ``(4,)`` containing 8-bit integers: + >>> import numpy as np - >>> dt = np.dtype((np.int32, (np.int8, 4))) + >>> dt = np.dtype((np.int32,{'real':(np.int16, 0),'imag':(np.int16, 2)})) - 32-bit integer, containing fields ``r``, ``g``, ``b``, ``a`` that - interpret the 4 bytes in the integer as four unsigned integers: + 32-bit integer, which is interpreted as consisting of a sub-array + of shape ``(4,)`` containing 8-bit integers: - >>> dt = np.dtype(('i4', [('r','u1'),('g','u1'),('b','u1'),('a','u1')])) + >>> dt = np.dtype((np.int32, (np.int8, 4))) + + 32-bit integer, containing fields ``r``, ``g``, ``b``, ``a`` that + interpret the 4 bytes in the integer as four unsigned integers: + + >>> dt = np.dtype(('i4', [('r','u1'),('g','u1'),('b','u1'),('a','u1')])) Checking the data type @@ -475,11 +546,15 @@ When checking for a specific data type, use ``==`` comparison. .. admonition:: Example - >>> a = np.array([1, 2], dtype=np.float32) - >>> a.dtype == np.float32 - True + .. try_examples:: + + >>> import numpy as np -As opposed to python types, a comparison using ``is`` should not be used. + >>> a = np.array([1, 2], dtype=np.float32) + >>> a.dtype == np.float32 + True + +As opposed to Python types, a comparison using ``is`` should not be used. First, NumPy treats data type specifications (everything that can be passed to the :class:`dtype` constructor) as equivalent to the data type object itself. @@ -487,31 +562,39 @@ This equivalence can only be handled through ``==``, not through ``is``. .. admonition:: Example - A :class:`dtype` object is equal to all data type specifications that are - equivalent to it. - - >>> a = np.array([1, 2], dtype=float) - >>> a.dtype == np.dtype(np.float64) - True - >>> a.dtype == np.float64 - True - >>> a.dtype == float - True - >>> a.dtype == "float64" - True - >>> a.dtype == "d" - True + .. try_examples:: + + A :class:`dtype` object is equal to all data type specifications that are + equivalent to it. + + >>> import numpy as np + + >>> a = np.array([1, 2], dtype=np.float64) + >>> a.dtype == np.dtype(np.float64) + True + >>> a.dtype == np.float64 + True + >>> a.dtype == float + True + >>> a.dtype == "float64" + True + >>> a.dtype == "d" + True Second, there is no guarantee that data type objects are singletons. .. admonition:: Example - Do not use ``is`` because data type objects may or may not be singletons. + .. try_examples:: + + Do not use ``is`` because data type objects may or may not be singletons. + + >>> import numpy as np - >>> np.dtype(float) is np.dtype(float) - True - >>> np.dtype([('a', float)]) is np.dtype([('a', float)]) - False + >>> np.dtype(float) is np.dtype(float) + True + >>> np.dtype([('a', float)]) is np.dtype([('a', float)]) + False :class:`dtype` ============== diff --git a/doc/source/reference/arrays.interface.rst b/doc/source/reference/arrays.interface.rst index b78e8e75cb1f..75c17060c8fc 100644 --- a/doc/source/reference/arrays.interface.rst +++ b/doc/source/reference/arrays.interface.rst @@ -13,13 +13,9 @@ The array interface protocol This page describes the NumPy-specific API for accessing the contents of a NumPy array from other C extensions. :pep:`3118` -- :c:func:`The Revised Buffer Protocol ` introduces - similar, standardized API to Python 2.6 and 3.0 for any extension - module to use. Cython__'s buffer array support - uses the :pep:`3118` API; see the `Cython NumPy - tutorial`__. Cython provides a way to write code that supports the buffer - protocol with Python versions older than 2.6 because it has a - backward-compatible implementation utilizing the array interface - described here. + similar, standardized API for any extension module to use. Cython__'s + buffer array support uses the :pep:`3118` API; see the `Cython NumPy + tutorial`__. __ https://cython.org/ __ https://github.com/cython/cython/wiki/tutorials-numpy @@ -124,7 +120,7 @@ This approach to the interface consists of the object having an **Default**: ``[('', typestr)]`` - **data** (optional) + **data** A 2-tuple whose first argument is a :doc:`Python integer ` that points to the data-area storing the array contents. @@ -140,15 +136,23 @@ This approach to the interface consists of the object having an This attribute can also be an object exposing the :ref:`buffer interface ` which - will be used to share the data. If this key is not present (or - returns None), then memory sharing will be done - through the buffer interface of the object itself. In this + will be used to share the data. If this key is ``None``, then memory sharing + will be done through the buffer interface of the object itself. In this case, the offset key can be used to indicate the start of the buffer. A reference to the object exposing the array interface must be stored by the new object if the memory area is to be secured. - **Default**: ``None`` + .. note:: + Not specifying this field uses a "scalar" path that we may remove in the future + as we are not aware of any users. In this case, NumPy assigns the original object + as a scalar into the array. + + .. versionchanged:: 2.4 + Prior to NumPy 2.4 a ``NULL`` pointer used the undocumented "scalar" path + and was thus usually not accepted (and triggered crashes on some paths). + After NumPy 2.4, ``NULL`` is accepted, although NumPy will create a 1-byte sized + new allocation for the array. **strides** (optional) Either ``None`` to indicate a C-style contiguous array or diff --git a/doc/source/reference/arrays.ndarray.rst b/doc/source/reference/arrays.ndarray.rst index 5429a272569d..17922bbb7a44 100644 --- a/doc/source/reference/arrays.ndarray.rst +++ b/doc/source/reference/arrays.ndarray.rst @@ -32,35 +32,39 @@ objects implementing the :class:`memoryview` or :ref:`array .. admonition:: Example - A 2-dimensional array of size 2 x 3, composed of 4-byte integer - elements: + .. try_examples:: - >>> x = np.array([[1, 2, 3], [4, 5, 6]], np.int32) - >>> type(x) - - >>> x.shape - (2, 3) - >>> x.dtype - dtype('int32') + A 2-dimensional array of size 2 x 3, composed of 4-byte integer + elements: - The array can be indexed using Python container-like syntax: + >>> import numpy as np - >>> # The element of x in the *second* row, *third* column, namely, 6. - >>> x[1, 2] - 6 + >>> x = np.array([[1, 2, 3], [4, 5, 6]], np.int32) + >>> type(x) + + >>> x.shape + (2, 3) + >>> x.dtype + dtype('int32') - For example :ref:`slicing ` can produce views of - the array: + The array can be indexed using Python container-like syntax: - >>> y = x[:,1] - >>> y - array([2, 5], dtype=int32) - >>> y[0] = 9 # this also changes the corresponding element in x - >>> y - array([9, 5], dtype=int32) - >>> x - array([[1, 9, 3], - [4, 5, 6]], dtype=int32) + >>> # The element of x in the *second* row, *third* column, namely, 6. + >>> x[1, 2] + 6 + + For example :ref:`slicing ` can produce views of + the array: + + >>> y = x[:,1] + >>> y + array([2, 5], dtype=int32) + >>> y[0] = 9 # this also changes the corresponding element in x + >>> y + array([9, 5], dtype=int32) + >>> x + array([[1, 9, 3], + [4, 5, 6]], dtype=int32) Constructing arrays @@ -287,7 +291,6 @@ Array conversion ndarray.item ndarray.tolist - ndarray.tostring ndarray.tobytes ndarray.tofile ndarray.dump @@ -360,36 +363,40 @@ Many of these methods take an argument named *axis*. In such cases, .. admonition:: Example of the *axis* argument - A 3-dimensional array of size 3 x 3 x 3, summed over each of its - three axes - - >>> x = np.arange(27).reshape((3,3,3)) - >>> x - array([[[ 0, 1, 2], - [ 3, 4, 5], - [ 6, 7, 8]], - [[ 9, 10, 11], - [12, 13, 14], - [15, 16, 17]], - [[18, 19, 20], - [21, 22, 23], - [24, 25, 26]]]) - >>> x.sum(axis=0) - array([[27, 30, 33], - [36, 39, 42], - [45, 48, 51]]) - >>> # for sum, axis is the first keyword, so we may omit it, - >>> # specifying only its value - >>> x.sum(0), x.sum(1), x.sum(2) - (array([[27, 30, 33], - [36, 39, 42], - [45, 48, 51]]), - array([[ 9, 12, 15], - [36, 39, 42], - [63, 66, 69]]), - array([[ 3, 12, 21], - [30, 39, 48], - [57, 66, 75]])) + .. try_examples:: + + A 3-dimensional array of size 3 x 3 x 3, summed over each of its + three axes: + + >>> import numpy as np + + >>> x = np.arange(27).reshape((3,3,3)) + >>> x + array([[[ 0, 1, 2], + [ 3, 4, 5], + [ 6, 7, 8]], + [[ 9, 10, 11], + [12, 13, 14], + [15, 16, 17]], + [[18, 19, 20], + [21, 22, 23], + [24, 25, 26]]]) + >>> x.sum(axis=0) + array([[27, 30, 33], + [36, 39, 42], + [45, 48, 51]]) + >>> # for sum, axis is the first keyword, so we may omit it, + >>> # specifying only its value + >>> x.sum(0), x.sum(1), x.sum(2) + (array([[27, 30, 33], + [36, 39, 42], + [45, 48, 51]]), + array([[ 9, 12, 15], + [36, 39, 42], + [63, 66, 69]]), + array([[ 3, 12, 21], + [30, 39, 48], + [57, 66, 75]])) The parameter *dtype* specifies the data type over which a reduction operation (like summing) should take place. The default reduce data @@ -463,11 +470,11 @@ Truth value of an array (:class:`bool() `): Truth-value testing of an array invokes :meth:`ndarray.__bool__`, which raises an error if the number of - elements in the array is larger than 1, because the truth value + elements in the array is not 1, because the truth value of such arrays is ambiguous. Use :meth:`.any() ` and :meth:`.all() ` instead to be clear about what is meant - in such cases. (If the number of elements is 0, the array evaluates - to ``False``.) + in such cases. (If you wish to check for whether an array is empty, + use for example ``.size > 0``.) Unary operations: @@ -611,3 +618,27 @@ Utility method for typing: :toctree: generated/ ndarray.__class_getitem__ + +.. _arrays.ndarray.pattern-matching: + +Structural pattern matching +=========================== + +Arrays support :pep:`structural pattern matching <634>`. The array is matched +as a sequence, so you can unpack arrays along the first dimension in +``match``/``case`` statements:: + + >>> arr = np.array([[1, 2], [3, 4]]) + >>> match arr: + ... case [row1, row2]: + ... print(f"row1={row1}, row2={row2}") + row1=[1 2], row2=[3 4] + +Nested patterns work too, matching inner dimensions:: + + >>> match arr: + ... case [[a, b], [c, d]]: + ... print(f"a={a}, b={b}, c={c}, d={d}") + a=1, b=2, c=3, d=4 + +All ndarray subclasses inherit this behavior. diff --git a/doc/source/reference/arrays.nditer.rst b/doc/source/reference/arrays.nditer.rst index d5d19d244e94..add33f4a2b46 100644 --- a/doc/source/reference/arrays.nditer.rst +++ b/doc/source/reference/arrays.nditer.rst @@ -32,11 +32,15 @@ using the standard Python iterator interface. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - >>> for x in np.nditer(a): - ... print(x, end=' ') - ... - 0 1 2 3 4 5 + .. try_examples:: + + >>> import numpy as np + + >>> a = np.arange(6).reshape(2,3) + >>> for x in np.nditer(a): + ... print(x, end=' ') + ... + 0 1 2 3 4 5 An important thing to be aware of for this iteration is that the order is chosen to match the memory layout of the array instead of using a @@ -48,16 +52,20 @@ of that transpose in C order. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - >>> for x in np.nditer(a.T): - ... print(x, end=' ') - ... - 0 1 2 3 4 5 + .. try_examples:: + + >>> import numpy as np - >>> for x in np.nditer(a.T.copy(order='C')): - ... print(x, end=' ') - ... - 0 3 1 4 2 5 + >>> a = np.arange(6).reshape(2,3) + >>> for x in np.nditer(a.T): + ... print(x, end=' ') + ... + 0 1 2 3 4 5 + + >>> for x in np.nditer(a.T.copy(order='C')): + ... print(x, end=' ') + ... + 0 3 1 4 2 5 The elements of both `a` and `a.T` get traversed in the same order, namely the order they are stored in memory, whereas the elements of @@ -76,15 +84,19 @@ order='C' for C order and order='F' for Fortran order. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - >>> for x in np.nditer(a, order='F'): - ... print(x, end=' ') - ... - 0 3 1 4 2 5 - >>> for x in np.nditer(a.T, order='C'): - ... print(x, end=' ') - ... - 0 3 1 4 2 5 + .. try_examples:: + + >>> import numpy as np + + >>> a = np.arange(6).reshape(2,3) + >>> for x in np.nditer(a, order='F'): + ... print(x, end=' ') + ... + 0 3 1 4 2 5 + >>> for x in np.nditer(a.T, order='C'): + ... print(x, end=' ') + ... + 0 3 1 4 2 5 .. _nditer-context-manager: @@ -111,17 +123,21 @@ context is exited. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - >>> a - array([[0, 1, 2], - [3, 4, 5]]) - >>> with np.nditer(a, op_flags=['readwrite']) as it: - ... for x in it: - ... x[...] = 2 * x - ... - >>> a - array([[ 0, 2, 4], - [ 6, 8, 10]]) + .. try_examples:: + + >>> import numpy as np + + >>> a = np.arange(6).reshape(2,3) + >>> a + array([[0, 1, 2], + [3, 4, 5]]) + >>> with np.nditer(a, op_flags=['readwrite']) as it: + ... for x in it: + ... x[...] = 2 * x + ... + >>> a + array([[ 0, 2, 4], + [ 6, 8, 10]]) If you are writing code that needs to support older versions of numpy, note that prior to 1.15, :class:`nditer` was not a context manager and @@ -150,16 +166,20 @@ elements each. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - >>> for x in np.nditer(a, flags=['external_loop']): - ... print(x, end=' ') - ... - [0 1 2 3 4 5] + .. try_examples:: + + >>> import numpy as np - >>> for x in np.nditer(a, flags=['external_loop'], order='F'): - ... print(x, end=' ') - ... - [0 3] [1 4] [2 5] + >>> a = np.arange(6).reshape(2,3) + >>> for x in np.nditer(a, flags=['external_loop']): + ... print(x, end=' ') + ... + [0 1 2 3 4 5] + + >>> for x in np.nditer(a, flags=['external_loop'], order='F'): + ... print(x, end=' ') + ... + [0 3] [1 4] [2 5] Tracking an index or multi-index -------------------------------- @@ -176,26 +196,30 @@ progression of the index: .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - >>> it = np.nditer(a, flags=['f_index']) - >>> for x in it: - ... print("%d <%d>" % (x, it.index), end=' ') - ... - 0 <0> 1 <2> 2 <4> 3 <1> 4 <3> 5 <5> - - >>> it = np.nditer(a, flags=['multi_index']) - >>> for x in it: - ... print("%d <%s>" % (x, it.multi_index), end=' ') - ... - 0 <(0, 0)> 1 <(0, 1)> 2 <(0, 2)> 3 <(1, 0)> 4 <(1, 1)> 5 <(1, 2)> - - >>> with np.nditer(a, flags=['multi_index'], op_flags=['writeonly']) as it: - ... for x in it: - ... x[...] = it.multi_index[1] - it.multi_index[0] - ... - >>> a - array([[ 0, 1, 2], - [-1, 0, 1]]) + .. try_examples:: + + >>> import numpy as np + + >>> a = np.arange(6).reshape(2,3) + >>> it = np.nditer(a, flags=['f_index']) + >>> for x in it: + ... print("%d <%d>" % (x, it.index), end=' ') + ... + 0 <0> 1 <2> 2 <4> 3 <1> 4 <3> 5 <5> + + >>> it = np.nditer(a, flags=['multi_index']) + >>> for x in it: + ... print("%d <%s>" % (x, it.multi_index), end=' ') + ... + 0 <(0, 0)> 1 <(0, 1)> 2 <(0, 2)> 3 <(1, 0)> 4 <(1, 1)> 5 <(1, 2)> + + >>> with np.nditer(a, flags=['multi_index'], op_flags=['writeonly']) as it: + ... for x in it: + ... x[...] = it.multi_index[1] - it.multi_index[0] + ... + >>> a + array([[ 0, 1, 2], + [-1, 0, 1]]) Tracking an index or multi-index is incompatible with using an external loop, because it requires a different index value per element. If @@ -204,11 +228,15 @@ raise an exception. .. admonition:: Example - >>> a = np.zeros((2,3)) - >>> it = np.nditer(a, flags=['c_index', 'external_loop']) - Traceback (most recent call last): - File "", line 1, in - ValueError: Iterator flag EXTERNAL_LOOP cannot be used if an index or multi-index is being tracked + .. try_examples:: + + >>> import numpy as np + + >>> a = np.zeros((2,3)) + >>> it = np.nditer(a, flags=['c_index', 'external_loop']) + Traceback (most recent call last): + File "", line 1, in + ValueError: Iterator flag EXTERNAL_LOOP cannot be used if an index or multi-index is being tracked Alternative looping and element access -------------------------------------- @@ -222,29 +250,33 @@ produce identical results to the ones in the previous section. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - >>> it = np.nditer(a, flags=['f_index']) - >>> while not it.finished: - ... print("%d <%d>" % (it[0], it.index), end=' ') - ... is_not_finished = it.iternext() - ... - 0 <0> 1 <2> 2 <4> 3 <1> 4 <3> 5 <5> - - >>> it = np.nditer(a, flags=['multi_index']) - >>> while not it.finished: - ... print("%d <%s>" % (it[0], it.multi_index), end=' ') - ... is_not_finished = it.iternext() - ... - 0 <(0, 0)> 1 <(0, 1)> 2 <(0, 2)> 3 <(1, 0)> 4 <(1, 1)> 5 <(1, 2)> - - >>> with np.nditer(a, flags=['multi_index'], op_flags=['writeonly']) as it: - ... while not it.finished: - ... it[0] = it.multi_index[1] - it.multi_index[0] - ... is_not_finished = it.iternext() - ... - >>> a - array([[ 0, 1, 2], - [-1, 0, 1]]) + .. try_examples:: + + >>> import numpy as np + + >>> a = np.arange(6).reshape(2,3) + >>> it = np.nditer(a, flags=['f_index']) + >>> while not it.finished: + ... print("%d <%d>" % (it[0], it.index), end=' ') + ... is_not_finished = it.iternext() + ... + 0 <0> 1 <2> 2 <4> 3 <1> 4 <3> 5 <5> + + >>> it = np.nditer(a, flags=['multi_index']) + >>> while not it.finished: + ... print("%d <%s>" % (it[0], it.multi_index), end=' ') + ... is_not_finished = it.iternext() + ... + 0 <(0, 0)> 1 <(0, 1)> 2 <(0, 2)> 3 <(1, 0)> 4 <(1, 1)> 5 <(1, 2)> + + >>> with np.nditer(a, flags=['multi_index'], op_flags=['writeonly']) as it: + ... while not it.finished: + ... it[0] = it.multi_index[1] - it.multi_index[0] + ... is_not_finished = it.iternext() + ... + >>> a + array([[ 0, 1, 2], + [-1, 0, 1]]) Buffering the array elements ---------------------------- @@ -263,16 +295,20 @@ is enabled. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - >>> for x in np.nditer(a, flags=['external_loop'], order='F'): - ... print(x, end=' ') - ... - [0 3] [1 4] [2 5] + .. try_examples:: - >>> for x in np.nditer(a, flags=['external_loop','buffered'], order='F'): - ... print(x, end=' ') - ... - [0 3 1 4 2 5] + >>> import numpy as np + + >>> a = np.arange(6).reshape(2,3) + >>> for x in np.nditer(a, flags=['external_loop'], order='F'): + ... print(x, end=' ') + ... + [0 3] [1 4] [2 5] + + >>> for x in np.nditer(a, flags=['external_loop','buffered'], order='F'): + ... print(x, end=' ') + ... + [0 3 1 4 2 5] Iterating as a specific data type --------------------------------- @@ -305,13 +341,17 @@ data type doesn't match precisely. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - 3 - >>> for x in np.nditer(a, op_dtypes=['complex128']): - ... print(np.sqrt(x), end=' ') - ... - Traceback (most recent call last): - File "", line 1, in - TypeError: Iterator operand required copying or buffering, but neither copying nor buffering was enabled + .. try_examples:: + + >>> import numpy as np + + >>> a = np.arange(6).reshape(2,3) - 3 + >>> for x in np.nditer(a, op_dtypes=['complex128']): + ... print(np.sqrt(x), end=' ') + ... + Traceback (most recent call last): + File "", line 1, in + TypeError: Iterator operand required copying or buffering, but neither copying nor buffering was enabled In copying mode, 'copy' is specified as a per-operand flag. This is done to provide control in a per-operand fashion. Buffering mode is @@ -319,17 +359,21 @@ specified as an iterator flag. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - 3 - >>> for x in np.nditer(a, op_flags=['readonly','copy'], - ... op_dtypes=['complex128']): - ... print(np.sqrt(x), end=' ') - ... - 1.7320508075688772j 1.4142135623730951j 1j 0j (1+0j) (1.4142135623730951+0j) + .. try_examples:: + + >>> import numpy as np - >>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['complex128']): - ... print(np.sqrt(x), end=' ') - ... - 1.7320508075688772j 1.4142135623730951j 1j 0j (1+0j) (1.4142135623730951+0j) + >>> a = np.arange(6).reshape(2,3) - 3 + >>> for x in np.nditer(a, op_flags=['readonly','copy'], + ... op_dtypes=['complex128']): + ... print(np.sqrt(x), end=' ') + ... + 1.7320508075688772j 1.4142135623730951j 1j 0j (1+0j) (1.4142135623730951+0j) + + >>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['complex128']): + ... print(np.sqrt(x), end=' ') + ... + 1.7320508075688772j 1.4142135623730951j 1j 0j (1+0j) (1.4142135623730951+0j) The iterator uses NumPy's casting rules to determine whether a specific @@ -342,26 +386,30 @@ complex to float. .. admonition:: Example - >>> a = np.arange(6.) - >>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['float32']): - ... print(x, end=' ') - ... - Traceback (most recent call last): - File "", line 1, in - TypeError: Iterator operand 0 dtype could not be cast from dtype('float64') to dtype('float32') according to the rule 'safe' - - >>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['float32'], - ... casting='same_kind'): - ... print(x, end=' ') - ... - 0.0 1.0 2.0 3.0 4.0 5.0 - - >>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['int32'], casting='same_kind'): - ... print(x, end=' ') - ... - Traceback (most recent call last): - File "", line 1, in - TypeError: Iterator operand 0 dtype could not be cast from dtype('float64') to dtype('int32') according to the rule 'same_kind' + .. try_examples:: + + >>> import numpy as np + + >>> a = np.arange(6.) + >>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['float32']): + ... print(x, end=' ') + ... + Traceback (most recent call last): + File "", line 1, in + TypeError: Iterator operand 0 dtype could not be cast from dtype('float64') to dtype('float32') according to the rule 'safe' + + >>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['float32'], + ... casting='same_kind'): + ... print(x, end=' ') + ... + 0.0 1.0 2.0 3.0 4.0 5.0 + + >>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['int32'], casting='same_kind'): + ... print(x, end=' ') + ... + Traceback (most recent call last): + File "", line 1, in + TypeError: Iterator operand 0 dtype could not be cast from dtype('float64') to dtype('int32') according to the rule 'same_kind' One thing to watch out for is conversions back to the original data type when using a read-write or write-only operand. A common case is @@ -373,14 +421,18 @@ would violate the casting rule. .. admonition:: Example - >>> a = np.arange(6) - >>> for x in np.nditer(a, flags=['buffered'], op_flags=['readwrite'], - ... op_dtypes=['float64'], casting='same_kind'): - ... x[...] = x / 2.0 - ... - Traceback (most recent call last): - File "", line 2, in - TypeError: Iterator requested dtype could not be cast from dtype('float64') to dtype('int64'), the operand 0 dtype, according to the rule 'same_kind' + .. try_examples:: + + >>> import numpy as np + + >>> a = np.arange(6) + >>> for x in np.nditer(a, flags=['buffered'], op_flags=['readwrite'], + ... op_dtypes=['float64'], casting='same_kind'): + ... x[...] = x / 2.0 + ... + Traceback (most recent call last): + File "", line 2, in + TypeError: Iterator requested dtype could not be cast from dtype('float64') to dtype('int64'), the operand 0 dtype, according to the rule 'same_kind' Broadcasting array iteration ============================ @@ -396,26 +448,34 @@ a two dimensional array together. .. admonition:: Example - >>> a = np.arange(3) - >>> b = np.arange(6).reshape(2,3) - >>> for x, y in np.nditer([a,b]): - ... print("%d:%d" % (x,y), end=' ') - ... - 0:0 1:1 2:2 0:3 1:4 2:5 + .. try_examples:: + + >>> import numpy as np + + >>> a = np.arange(3) + >>> b = np.arange(6).reshape(2,3) + >>> for x, y in np.nditer([a,b]): + ... print("%d:%d" % (x,y), end=' ') + ... + 0:0 1:1 2:2 0:3 1:4 2:5 When a broadcasting error occurs, the iterator raises an exception which includes the input shapes to help diagnose the problem. .. admonition:: Example - >>> a = np.arange(2) - >>> b = np.arange(6).reshape(2,3) - >>> for x, y in np.nditer([a,b]): - ... print("%d:%d" % (x,y), end=' ') - ... - Traceback (most recent call last): - ... - ValueError: operands could not be broadcast together with shapes (2,) (2,3) + .. try_examples:: + + >>> import numpy as np + + >>> a = np.arange(2) + >>> b = np.arange(6).reshape(2,3) + >>> for x, y in np.nditer([a,b]): + ... print("%d:%d" % (x,y), end=' ') + ... + Traceback (most recent call last): + ... + ValueError: operands could not be broadcast together with shapes (2,) (2,3) Iterator-allocated output arrays -------------------------------- @@ -432,14 +492,18 @@ parameter support. .. admonition:: Example - >>> def square(a): - ... with np.nditer([a, None]) as it: - ... for x, y in it: - ... y[...] = x*x - ... return it.operands[1] - ... - >>> square([1,2,3]) - array([1, 4, 9]) + .. try_examples:: + + >>> import numpy as np + + >>> def square(a): + ... with np.nditer([a, None]) as it: + ... for x, y in it: + ... y[...] = x*x + ... return it.operands[1] + ... + >>> square([1,2,3]) + array([1, 4, 9]) By default, the :class:`nditer` uses the flags 'allocate' and 'writeonly' for operands that are passed in as None. This means we were able to provide @@ -469,31 +533,35 @@ reasons. .. admonition:: Example - >>> def square(a, out=None): - ... it = np.nditer([a, out], - ... flags = ['external_loop', 'buffered'], - ... op_flags = [['readonly'], - ... ['writeonly', 'allocate', 'no_broadcast']]) - ... with it: - ... for x, y in it: - ... y[...] = x*x - ... return it.operands[1] - ... - - >>> square([1,2,3]) - array([1, 4, 9]) - - >>> b = np.zeros((3,)) - >>> square([1,2,3], out=b) - array([1., 4., 9.]) - >>> b - array([1., 4., 9.]) - - >>> square(np.arange(6).reshape(2,3), out=b) - Traceback (most recent call last): - ... - ValueError: non-broadcastable output operand with shape (3,) doesn't - match the broadcast shape (2,3) + .. try_examples:: + + >>> import numpy as np + + >>> def square(a, out=None): + ... it = np.nditer([a, out], + ... flags = ['external_loop', 'buffered'], + ... op_flags = [['readonly'], + ... ['writeonly', 'allocate', 'no_broadcast']]) + ... with it: + ... for x, y in it: + ... y[...] = x*x + ... return it.operands[1] + ... + + >>> square([1,2,3]) + array([1, 4, 9]) + + >>> b = np.zeros((3,)) + >>> square([1,2,3], out=b) + array([1., 4., 9.]) + >>> b + array([1., 4., 9.]) + + >>> square(np.arange(6).reshape(2,3), out=b) + Traceback (most recent call last): + ... + ValueError: non-broadcastable output operand with shape (3,) doesn't + match the broadcast shape (2,3) Outer product iteration ----------------------- @@ -525,22 +593,26 @@ Everything to do with the outer product is handled by the iterator setup. .. admonition:: Example - >>> a = np.arange(3) - >>> b = np.arange(8).reshape(2,4) - >>> it = np.nditer([a, b, None], flags=['external_loop'], - ... op_axes=[[0, -1, -1], [-1, 0, 1], None]) - >>> with it: - ... for x, y, z in it: - ... z[...] = x*y - ... result = it.operands[2] # same as z - ... - >>> result - array([[[ 0, 0, 0, 0], - [ 0, 0, 0, 0]], - [[ 0, 1, 2, 3], - [ 4, 5, 6, 7]], - [[ 0, 2, 4, 6], - [ 8, 10, 12, 14]]]) + .. try_examples:: + + >>> import numpy as np + + >>> a = np.arange(3) + >>> b = np.arange(8).reshape(2,4) + >>> it = np.nditer([a, b, None], flags=['external_loop'], + ... op_axes=[[0, -1, -1], [-1, 0, 1], None]) + >>> with it: + ... for x, y, z in it: + ... z[...] = x*y + ... result = it.operands[2] # same as z + ... + >>> result + array([[[ 0, 0, 0, 0], + [ 0, 0, 0, 0]], + [[ 0, 1, 2, 3], + [ 4, 5, 6, 7]], + [[ 0, 2, 4, 6], + [ 8, 10, 12, 14]]]) Note that once the iterator is closed we can not access :func:`operands ` and must use a reference created inside the context manager. @@ -557,17 +629,21 @@ For a simple example, consider taking the sum of all elements in an array. .. admonition:: Example - >>> a = np.arange(24).reshape(2,3,4) - >>> b = np.array(0) - >>> with np.nditer([a, b], flags=['reduce_ok'], - ... op_flags=[['readonly'], ['readwrite']]) as it: - ... for x,y in it: - ... y[...] += x - ... - >>> b - array(276) - >>> np.sum(a) - 276 + .. try_examples:: + + >>> import numpy as np + + >>> a = np.arange(24).reshape(2,3,4) + >>> b = np.array(0) + >>> with np.nditer([a, b], flags=['reduce_ok'], + ... op_flags=[['readonly'], ['readwrite']]) as it: + ... for x,y in it: + ... y[...] += x + ... + >>> b + array(276) + >>> np.sum(a) + 276 Things are a little bit more tricky when combining reduction and allocated operands. Before iteration is started, any reduction operand must be @@ -576,22 +652,26 @@ sums along the last axis of `a`. .. admonition:: Example - >>> a = np.arange(24).reshape(2,3,4) - >>> it = np.nditer([a, None], flags=['reduce_ok'], - ... op_flags=[['readonly'], ['readwrite', 'allocate']], - ... op_axes=[None, [0,1,-1]]) - >>> with it: - ... it.operands[1][...] = 0 - ... for x, y in it: - ... y[...] += x - ... result = it.operands[1] - ... - >>> result - array([[ 6, 22, 38], - [54, 70, 86]]) - >>> np.sum(a, axis=2) - array([[ 6, 22, 38], - [54, 70, 86]]) + .. try_examples:: + + >>> import numpy as np + + >>> a = np.arange(24).reshape(2,3,4) + >>> it = np.nditer([a, None], flags=['reduce_ok'], + ... op_flags=[['readonly'], ['readwrite', 'allocate']], + ... op_axes=[None, [0,1,-1]]) + >>> with it: + ... it.operands[1][...] = 0 + ... for x, y in it: + ... y[...] += x + ... result = it.operands[1] + ... + >>> result + array([[ 6, 22, 38], + [54, 70, 86]]) + >>> np.sum(a, axis=2) + array([[ 6, 22, 38], + [54, 70, 86]]) To do buffered reduction requires yet another adjustment during the setup. Normally the iterator construction involves copying the first @@ -610,21 +690,25 @@ buffering. .. admonition:: Example - >>> a = np.arange(24).reshape(2,3,4) - >>> it = np.nditer([a, None], flags=['reduce_ok', - ... 'buffered', 'delay_bufalloc'], - ... op_flags=[['readonly'], ['readwrite', 'allocate']], - ... op_axes=[None, [0,1,-1]]) - >>> with it: - ... it.operands[1][...] = 0 - ... it.reset() - ... for x, y in it: - ... y[...] += x - ... result = it.operands[1] - ... - >>> result - array([[ 6, 22, 38], - [54, 70, 86]]) + .. try_examples:: + + >>> import numpy as np + + >>> a = np.arange(24).reshape(2,3,4) + >>> it = np.nditer([a, None], flags=['reduce_ok', + ... 'buffered', 'delay_bufalloc'], + ... op_flags=[['readonly'], ['readwrite', 'allocate']], + ... op_axes=[None, [0,1,-1]]) + >>> with it: + ... it.operands[1][...] = 0 + ... it.reset() + ... for x, y in it: + ... y[...] += x + ... result = it.operands[1] + ... + >>> result + array([[ 6, 22, 38], + [54, 70, 86]]) .. for doctests Include Cython section separately. Those tests are skipped entirely via an diff --git a/doc/source/reference/arrays.promotion.rst b/doc/source/reference/arrays.promotion.rst new file mode 100644 index 000000000000..32e503383217 --- /dev/null +++ b/doc/source/reference/arrays.promotion.rst @@ -0,0 +1,282 @@ +.. currentmodule:: numpy + +.. _arrays.promotion: + +**************************** +Data type promotion in NumPy +**************************** + +When mixing two different data types, NumPy has to determine the appropriate +dtype for the result of the operation. This step is referred to as *promotion* +or *finding the common dtype*. + +In typical cases, the user does not need to worry about the details of +promotion, since the promotion step usually ensures that the result will +either match or exceed the precision of the input. + +For example, when the inputs are of the same dtype, the dtype of the result +matches the dtype of the inputs: + + >>> np.int8(1) + np.int8(1) + np.int8(2) + +Mixing two different dtypes normally produces a result with the dtype of the +higher precision input: + + >>> np.int8(4) + np.int64(8) # 64 > 8 + np.int64(12) + >>> np.float32(3) + np.float16(3) # 32 > 16 + np.float32(6.0) + +In typical cases, this does not lead to surprises. However, if you work with +non-default dtypes like unsigned integers and low-precision floats, or if you +mix NumPy integers, NumPy floats, and Python scalars, some +details of NumPy promotion rules may be relevant. Note that these detailed +rules do not always match those of other languages [#hist-reasons]_. + +Numerical dtypes come in four "kinds" with a natural hierarchy. + +1. unsigned integers (``uint``) +2. signed integers (``int``) +3. float (``float``) +4. complex (``complex``) + +In addition to kind, NumPy numerical dtypes also have an associated precision, specified +in bits. Together, the kind and precision specify the dtype. For example, a +``uint8`` is an unsigned integer stored using 8 bits. + +The result of an operation will always be of an equal or higher kind of any of +the inputs. Furthermore, the result will always have a precision greater than +or equal to those of the inputs. Already, this can lead to some examples which +may be unexpected: + +1. When mixing floating point numbers and integers, the precision of the + integer may force the result to a higher precision floating point. For + example, the result of an operation involving ``int64`` and ``float16`` + is ``float64``. +2. When mixing unsigned and signed integers with the same precision, the + result will have *higher* precision than either inputs. Additionally, + if one of them has 64bit precision already, no higher precision integer + is available and for example an operation involving ``int64`` and ``uint64`` + gives ``float64``. + +Please see the `Numerical promotion` section and image below for details +on both. + +Detailed behavior of Python scalars +----------------------------------- +Since NumPy 2.0 [#NEP50]_, an important point in our promotion rules is +that although operations involving two NumPy dtypes never lose precision, +operations involving a NumPy dtype and a Python scalar (``int``, ``float``, +or ``complex``) *can* lose precision. For instance, it is probably intuitive +that the result of an operation between a Python integer and a NumPy integer +should be a NumPy integer. However, Python integers have arbitrary precision +whereas all NumPy dtypes have fixed precision, so the arbitrary precision +of Python integers cannot be preserved. + +More generally, NumPy considers the "kind" of Python scalars, but ignores +their precision when determining the result dtype. This is often convenient. +For instance, when working with arrays of a low precision dtype, it is usually +desirable for simple operations with Python scalars to preserve the dtype. + + >>> arr_float32 = np.array([1, 2.5, 2.1], dtype=np.float32) + >>> arr_float32 + 10.0 # undesirable to promote to float64 + array([11. , 12.5, 12.1], dtype=float32) + >>> arr_int16 = np.array([3, 5, 7], dtype=np.int16) + >>> arr_int16 + 10 # undesirable to promote to int64 + array([13, 15, 17], dtype=int16) + +In both cases, the result precision is dictated by the NumPy dtype. +Because of this, ``arr_float32 + 3.0`` behaves the same as +``arr_float32 + np.float32(3.0)``, and ``arr_int16 + 10`` behaves as +``arr_int16 + np.int16(10.)``. + +As another example, when mixing NumPy integers with a Python ``float`` +or ``complex``, the result always has type ``float64`` or ``complex128``: + + >> np.int16(1) + 1.0 + np.float64(2.0) + +However, these rules can also lead to surprising behavior when working with +low precision dtypes. + +First, since the Python value is converted to a NumPy one before the operation +can by performed, operations can fail with an error when the result seems +obvious. For instance, ``np.int8(1) + 1000`` cannot continue because ``1000`` +exceeds the maximum value of an ``int8``. When the Python scalar +cannot be coerced to the NumPy dtype, an error is raised: + + >>> np.int8(1) + 1000 + Traceback (most recent call last): + ... + OverflowError: Python integer 1000 out of bounds for int8 + >>> np.int64(1) * 10**100 + Traceback (most recent call last): + ... + OverflowError: Python int too large to convert to C long + >>> np.float32(1) + 1e300 + np.float32(inf) + ... RuntimeWarning: overflow encountered in cast + +Second, since the Python float or integer precision is always ignored, a low +precision NumPy scalar will keep using its lower precision unless explicitly +converted to a higher precision NumPy dtype or Python scalar (e.g. via ``int()``, +``float()``, or ``scalar.item()``). This lower precision may be detrimental to +some calculations or lead to incorrect results, especially in the case of integer +overflows: + + >>> np.int8(100) + 100 # the result exceeds the capacity of int8 + np.int8(-56) + ... RuntimeWarning: overflow encountered in scalar add + +Note that NumPy warns when overflows occur for scalars, but not for arrays; +e.g., ``np.array(100, dtype=np.uint8) + 100`` will *not* warn. + +Numerical promotion +------------------- + +The following image shows the numerical promotion rules with the kinds +on the vertical axis and the precision on the horizontal axis. + +.. figure:: figures/nep-0050-promotion-no-fonts.svg + :figclass: align-center + +The input dtype with the higher kind determines the kind of the result dtype. +The result dtype has a precision as low as possible without appearing to the +left of either input dtype in the diagram. + +Note the following specific rules and observations: + +1. When a Python ``float`` or ``complex`` interacts with a NumPy integer + the result will be ``float64`` or ``complex128`` (yellow border). + NumPy booleans will also be cast to the default integer [#default-int]_. + This is not relevant when additionally NumPy floating point values are + involved. +2. The precision is drawn such that ``float16 < int16 < uint16`` because + large ``uint16`` do not fit ``int16`` and large ``int16`` will lose precision + when stored in a ``float16``. + This pattern however is broken since NumPy always considers ``float64`` + and ``complex128`` to be acceptable promotion results for any integer + value. +3. A special case is that NumPy promotes many combinations of signed and + unsigned integers to ``float64``. A higher kind is used here because no + signed integer dtype is sufficiently precise to hold a ``uint64``. + + +Exceptions to the general promotion rules +----------------------------------------- + +In NumPy promotion refers to what specific functions do with the result and +in some cases, this means that NumPy may deviate from what the `np.result_type` +would give. + +Behavior of ``sum`` and ``prod`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +``np.sum`` and ``np.prod`` will always return the default integer type +when summing over integer values (or booleans). This is usually an ``int64``. +The reason for this is that integer summations are otherwise very likely +to overflow and give confusing results. +This rule also applies to the underlying ``np.add.reduce`` and +``np.multiply.reduce``. + +Notable behavior with NumPy or Python integer scalars +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +NumPy promotion refers to the result dtype and operation precision, +but the operation will sometimes dictate that result. +Division always returns floating point values and comparison always booleans. + +This leads to what may appear as "exceptions" to the rules: + +* NumPy comparisons with Python integers or mixed precision integers always + return the correct result. The inputs will never be cast in a way which + loses precision. +* Equality comparisons between types which cannot be promoted will be + considered all ``False`` (equality) or all ``True`` (not-equal). +* Unary math functions like ``np.sin`` that always return floating point + values, accept any Python integer input by converting it to ``float64``. +* Division always returns floating point values and thus also allows divisions + between any NumPy integer with any Python integer value by casting both + to ``float64``. + +In principle, some of these exceptions may make sense for other functions. +Please raise an issue if you feel this is the case. + +Notable behavior with Python builtin type classes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When combining Python's builtin scalar *types* (i.e., ``float``, ``int``, +or ``complex``, not scalar *values*), the promotion rules can appear +surprising: + + >>> np.result_type(7, np.array([1], np.float32)) + dtype('float32') # The scalar value '7' does not impact type promotion + >>> np.result_type(type(7), np.array([1], np.float32)) + dtype('float64') # The *type* of the scalar value '7' does impact promotion + # Similar situations happen with Python's float and complex types + +The reason for this behavior is that NumPy converts ``int`` to its default +integer type, and uses that type for promotion: + + >>> np.result_type(int) + dtype('int64') + +See also :ref:`dtype-constructing-from-python-types` for more details. + +Promotion of non-numerical datatypes +------------------------------------ + +NumPy extends the promotion to non-numerical types, although in many cases +promotion is not well defined and simply rejected. + +The following rules apply: + +* NumPy byte strings (``np.bytes_``) can be promoted to unicode strings + (``np.str_``). However, casting the bytes to unicode will fail for + non-ascii characters. +* For some purposes NumPy will promote almost any other datatype to strings. + This applies to array creation or concatenation. +* The array constructors like ``np.array()`` will use ``object`` dtype when + there is no viable promotion. +* Structured dtypes can promote when their field names and order matches. + In that case all fields are promoted individually. +* NumPy ``timedelta`` can in some cases promote with integers. + +.. note:: + Some of these rules are somewhat surprising, and are being considered for + change in the future. However, any backward-incompatible changes have to + be weighed against the risks of breaking existing code. Please raise an + issue if you have particular ideas about how promotion should work. + +Details of promoted ``dtype`` instances +--------------------------------------- +The above discussion has mainly dealt with the behavior when mixing different +DType classes. +A ``dtype`` instance attached to an array can carry additional information +such as byte-order, metadata, string length, or exact structured dtype layout. + +While the string length or field names of a structured dtype are important, +NumPy considers byte-order, metadata, and the exact layout of a structured +dtype as storage details. + +During promotion NumPy does *not* take these storage details into account: + +* Byte-order is converted to native byte-order. +* Metadata attached to the dtype may or may not be preserved. +* Resulting structured dtypes will be packed (but aligned if inputs were). + +This behaviors is the best behavior for most programs where storage details +are not relevant to the final results and where the use of incorrect byte-order +could drastically slow down evaluation. + + +.. [#hist-reasons] To a large degree, this may just be for choices made early + on in NumPy's predecessors. For more details, see :ref:`NEP 50 `. + +.. [#NEP50] See also :ref:`NEP 50 ` which changed the rules for + NumPy 2.0. Previous versions of NumPy would sometimes return higher + precision results based on the input value of Python scalars. + Further, previous versions of NumPy would typically ignore the higher + precision of NumPy scalars or 0-D arrays for promotion purposes. + +.. [#default-int] The default integer is marked as ``int64`` in the schema + but is ``int32`` on 32bit platforms. However, most modern systems are 64bit. diff --git a/doc/source/reference/arrays.rst b/doc/source/reference/arrays.rst index e8c9bc348f31..2e5869dc5379 100644 --- a/doc/source/reference/arrays.rst +++ b/doc/source/reference/arrays.rst @@ -41,6 +41,7 @@ of also more complicated arrangements of data. arrays.ndarray arrays.scalars arrays.dtypes + arrays.promotion arrays.nditer arrays.classes maskedarray diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst index 11b3bdc16c6c..f859db4620d4 100644 --- a/doc/source/reference/arrays.scalars.rst +++ b/doc/source/reference/arrays.scalars.rst @@ -65,10 +65,10 @@ Some of the scalar types are essentially equivalent to fundamental Python types and therefore inherit from them as well as from the generic array scalar type: -==================== =========================== ============= +==================== =========================== ========= Array scalar type Related Python type Inherits? -==================== =========================== ============= -:class:`int_` :class:`int` Python 2 only +==================== =========================== ========= +:class:`int_` :class:`int` no :class:`double` :class:`float` yes :class:`cdouble` :class:`complex` yes :class:`bytes_` :class:`bytes` yes @@ -76,7 +76,7 @@ Array scalar type Related Python type Inherits? :class:`bool_` :class:`bool` no :class:`datetime64` :class:`datetime.datetime` no :class:`timedelta64` :class:`datetime.timedelta` no -==================== =========================== ============= +==================== =========================== ========= The :class:`bool_` data type is very similar to the Python :class:`bool` but does not inherit from it because Python's @@ -86,9 +86,9 @@ Python Boolean scalar. .. warning:: - The :class:`int_` type does **not** inherit from the - :class:`int` built-in under Python 3, because type :class:`int` is no - longer a fixed-width integer type. + The :class:`int_` type does **not** inherit from the built-in + :class:`int`, because type :class:`int` is not a fixed-width + integer type. .. tip:: The default data type in NumPy is :class:`double`. @@ -189,31 +189,35 @@ Inexact types `format_float_positional` and `format_float_scientific`. This means that variables with equal binary values but whose datatypes are of - different precisions may display differently:: - - >>> f16 = np.float16("0.1") - >>> f32 = np.float32(f16) - >>> f64 = np.float64(f32) - >>> f16 == f32 == f64 - True - >>> f16, f32, f64 - (0.1, 0.099975586, 0.0999755859375) - - Note that none of these floats hold the exact value :math:`\frac{1}{10}`; - ``f16`` prints as ``0.1`` because it is as close to that value as possible, - whereas the other types do not as they have more precision and therefore have - closer values. - - Conversely, floating-point scalars of different precisions which approximate - the same decimal value may compare unequal despite printing identically: - - >>> f16 = np.float16("0.1") - >>> f32 = np.float32("0.1") - >>> f64 = np.float64("0.1") - >>> f16 == f32 == f64 - False - >>> f16, f32, f64 - (0.1, 0.1, 0.1) + different precisions may display differently: + + .. try_examples:: + + >>> import numpy as np + + >>> f16 = np.float16("0.1") + >>> f32 = np.float32(f16) + >>> f64 = np.float64(f32) + >>> f16 == f32 == f64 + True + >>> f16, f32, f64 + (0.1, 0.099975586, 0.0999755859375) + + Note that none of these floats hold the exact value :math:`\frac{1}{10}`; + ``f16`` prints as ``0.1`` because it is as close to that value as possible, + whereas the other types do not as they have more precision and therefore have + closer values. + + Conversely, floating-point scalars of different precisions which approximate + the same decimal value may compare unequal despite printing identically: + + >>> f16 = np.float16("0.1") + >>> f32 = np.float32("0.1") + >>> f64 = np.float64("0.1") + >>> f16 == f32 == f64 + False + >>> f16, f32, f64 + (0.1, 0.1, 0.1) Floating-point types ~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 7a2f0cbcda91..d2bdae695933 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -66,15 +66,11 @@ and its sub-types). .. c:function:: void PyArray_ENABLEFLAGS(PyArrayObject* arr, int flags) - .. versionadded:: 1.7 - Enables the specified array flags. This function does no validation, and assumes that you know what you're doing. .. c:function:: void PyArray_CLEARFLAGS(PyArrayObject* arr, int flags) - .. versionadded:: 1.7 - Clears the specified array flags. This function does no validation, and assumes that you know what you're doing. @@ -97,8 +93,6 @@ and its sub-types). .. c:function:: npy_intp *PyArray_SHAPE(PyArrayObject *arr) - .. versionadded:: 1.7 - A synonym for :c:func:`PyArray_DIMS`, named to be consistent with the `shape ` usage within Python. @@ -127,7 +121,7 @@ and its sub-types). Returns the total size (in number of elements) of the array. -.. c:function:: npy_intp PyArray_Size(PyArrayObject* obj) +.. c:function:: npy_intp PyArray_Size(PyObject* obj) Returns 0 if *obj* is not a sub-class of ndarray. Otherwise, returns the total number of elements in the array. Safer version @@ -157,8 +151,6 @@ and its sub-types). .. c:function:: PyArray_Descr *PyArray_DTYPE(PyArrayObject* arr) - .. versionadded:: 1.7 - A synonym for PyArray_DESCR, named to be consistent with the 'dtype' usage within Python. @@ -275,8 +267,6 @@ From scratch PyArrayObject* prototype, NPY_ORDER order, PyArray_Descr* descr, \ int subok) - .. versionadded:: 1.6 - This function steals a reference to *descr* if it is not NULL. This array creation routine allows for the convenient creation of a new array matching an existing array's shapes and memory layout, @@ -406,8 +396,6 @@ From scratch .. c:function:: int PyArray_SetBaseObject(PyArrayObject* arr, PyObject* obj) - .. versionadded:: 1.7 - This function **steals a reference** to ``obj`` and sets it as the base property of ``arr``. @@ -688,7 +676,7 @@ From other objects Encapsulate the functionality of functions and methods that take the axis= keyword and work properly with None as the axis argument. The input array is ``obj``, while ``*axis`` is a - converted integer (so that >=MAXDIMS is the None value), and + converted integer (so that ``*axis == NPY_RAVEL_AXIS`` is the None value), and ``requirements`` gives the needed properties of ``obj``. The output is a converted version of the input so that requirements are met and if needed a flattening has occurred. On output @@ -796,7 +784,7 @@ cannot not be accessed directly. Allows setting of the itemsize, this is *only* relevant for string/bytes datatypes as it is the current pattern to define one with a new size. -.. c:function:: npy_intp PyDataType_ALIGNENT(PyArray_Descr *descr) +.. c:function:: npy_intp PyDataType_ALIGNMENT(PyArray_Descr *descr) The alignment of the datatype. @@ -823,7 +811,7 @@ cannot not be accessed directly. .. c:function:: PyArray_ArrayDescr *PyDataType_SUBARRAY(PyArray_Descr *descr) - Information about a subarray dtype eqivalent to the Python `np.dtype.base` + Information about a subarray dtype equivalent to the Python `np.dtype.base` and `np.dtype.shape`. If this is non- ``NULL``, then this data-type descriptor is a @@ -934,8 +922,6 @@ argument must be a :c:expr:`PyObject *` that can be directly interpreted as a called on flexible dtypes. Types that are attached to an array will always be sized, hence the array form of this macro not existing. - .. versionchanged:: 1.18 - For structured datatypes with no fields this function now returns False. .. c:function:: int PyTypeNum_ISUSERDEF(int num) @@ -1065,8 +1051,6 @@ Converting data types .. c:function:: int PyArray_CanCastTypeTo( \ PyArray_Descr* fromtype, PyArray_Descr* totype, NPY_CASTING casting) - .. versionadded:: 1.6 - Returns non-zero if an array of data type *fromtype* (which can include flexible types) can be cast safely to an array of data type *totype* (which can include flexible types) according to @@ -1081,23 +1065,18 @@ Converting data types .. c:function:: int PyArray_CanCastArrayTo( \ PyArrayObject* arr, PyArray_Descr* totype, NPY_CASTING casting) - .. versionadded:: 1.6 - Returns non-zero if *arr* can be cast to *totype* according to the casting rule given in *casting*. If *arr* is an array scalar, its value is taken into account, and non-zero is also returned when the value will not overflow or be truncated to an integer when converting to a smaller type. - This is almost the same as the result of - PyArray_CanCastTypeTo(PyArray_MinScalarType(arr), totype, casting), - but it also handles a special case arising because the set - of uint values is not a subset of the int values for types with the - same number of bits. - .. c:function:: PyArray_Descr* PyArray_MinScalarType(PyArrayObject* arr) - .. versionadded:: 1.6 + .. note:: + With the adoption of NEP 50 in NumPy 2, this function is not used + internally. It is currently provided for backwards compatibility, + but expected to be eventually deprecated. If *arr* is an array, returns its data type descriptor, but if *arr* is an array scalar (has 0 dimensions), it finds the data type @@ -1111,8 +1090,6 @@ Converting data types .. c:function:: PyArray_Descr* PyArray_PromoteTypes( \ PyArray_Descr* type1, PyArray_Descr* type2) - .. versionadded:: 1.6 - Finds the data type of smallest size and kind to which *type1* and *type2* may be safely converted. This function is symmetric and associative. A string or unicode result will be the proper size for @@ -1122,8 +1099,6 @@ Converting data types npy_intp narrs, PyArrayObject **arrs, npy_intp ndtypes, \ PyArray_Descr **dtypes) - .. versionadded:: 1.6 - This applies type promotion to all the input arrays and dtype objects, using the NumPy rules for combining scalars and arrays, to determine the output type for an operation with the given set of @@ -1134,8 +1109,7 @@ Converting data types .. c:function:: int PyArray_ObjectType(PyObject* op, int mintype) - This function is superseded by :c:func:`PyArray_MinScalarType` and/or - :c:func:`PyArray_ResultType`. + This function is superseded by :c:func:`PyArray_ResultType`. This function is useful for determining a common type that two or more arrays can be converted to. It only works for non-flexible @@ -1163,11 +1137,6 @@ Converting data types ``DECREF`` 'd or a memory-leak will occur. The example template-code below shows a typical usage: - .. versionchanged:: 1.18.0 - A mix of scalars and zero-dimensional arrays now produces a type - capable of holding the scalar value. - Previously priority was given to the dtype of the arrays. - .. code-block:: c mps = PyArray_ConvertToCommonType(obj, &n); @@ -1240,6 +1209,11 @@ User-defined data types With these two changes, the code should compile and work on both 1.x and 2.x or later. + In the unlikely case that you are heap allocating the dtype struct you + should free it again on NumPy 2, since a copy is made. + The struct is not a valid Python object, so do not use ``Py_DECREF`` + on it. + Register a data-type as a new user-defined data type for arrays. The type must have most of its entries filled in. This is not always checked and errors can produce segfaults. In @@ -1259,6 +1233,13 @@ User-defined data types registered (checked only by the address of the pointer), then return the previously-assigned type-number. + The number of user DTypes known to numpy is stored in + ``NPY_NUMUSERTYPES``, a static global variable that is public in the + C API. Accessing this symbol is inherently *not* thread-safe. If + for some reason you need to use this API in a multithreaded context, + you will need to add your own locking, NumPy does not ensure new + data types can be added in a thread-safe manner. + .. c:function:: int PyArray_RegisterCastFunc( \ PyArray_Descr* descr, int totype, PyArray_VectorUnaryFunc* castfunc) @@ -1565,7 +1546,7 @@ Flag checking For all of these macros *arr* must be an instance of a (subclass of) :c:data:`PyArray_Type`. -.. c:function:: int PyArray_CHKFLAGS(PyObject *arr, int flags) +.. c:function:: int PyArray_CHKFLAGS(const PyArrayObject *arr, int flags) The first parameter, arr, must be an ndarray or subclass. The parameter, *flags*, should be an integer consisting of bitwise @@ -1574,60 +1555,60 @@ For all of these macros *arr* must be an instance of a (subclass of) :c:data:`NPY_ARRAY_OWNDATA`, :c:data:`NPY_ARRAY_ALIGNED`, :c:data:`NPY_ARRAY_WRITEABLE`, :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`. -.. c:function:: int PyArray_IS_C_CONTIGUOUS(PyObject *arr) +.. c:function:: int PyArray_IS_C_CONTIGUOUS(const PyArrayObject *arr) Evaluates true if *arr* is C-style contiguous. -.. c:function:: int PyArray_IS_F_CONTIGUOUS(PyObject *arr) +.. c:function:: int PyArray_IS_F_CONTIGUOUS(const PyArrayObject *arr) Evaluates true if *arr* is Fortran-style contiguous. -.. c:function:: int PyArray_ISFORTRAN(PyObject *arr) +.. c:function:: int PyArray_ISFORTRAN(const PyArrayObject *arr) Evaluates true if *arr* is Fortran-style contiguous and *not* C-style contiguous. :c:func:`PyArray_IS_F_CONTIGUOUS` is the correct way to test for Fortran-style contiguity. -.. c:function:: int PyArray_ISWRITEABLE(PyObject *arr) +.. c:function:: int PyArray_ISWRITEABLE(const PyArrayObject *arr) Evaluates true if the data area of *arr* can be written to -.. c:function:: int PyArray_ISALIGNED(PyObject *arr) +.. c:function:: int PyArray_ISALIGNED(const PyArrayObject *arr) Evaluates true if the data area of *arr* is properly aligned on the machine. -.. c:function:: int PyArray_ISBEHAVED(PyObject *arr) +.. c:function:: int PyArray_ISBEHAVED(const PyArrayObject *arr) Evaluates true if the data area of *arr* is aligned and writeable and in machine byte-order according to its descriptor. -.. c:function:: int PyArray_ISBEHAVED_RO(PyObject *arr) +.. c:function:: int PyArray_ISBEHAVED_RO(const PyArrayObject *arr) Evaluates true if the data area of *arr* is aligned and in machine byte-order. -.. c:function:: int PyArray_ISCARRAY(PyObject *arr) +.. c:function:: int PyArray_ISCARRAY(const PyArrayObject *arr) Evaluates true if the data area of *arr* is C-style contiguous, and :c:func:`PyArray_ISBEHAVED` (*arr*) is true. -.. c:function:: int PyArray_ISFARRAY(PyObject *arr) +.. c:function:: int PyArray_ISFARRAY(const PyArrayObject *arr) Evaluates true if the data area of *arr* is Fortran-style contiguous and :c:func:`PyArray_ISBEHAVED` (*arr*) is true. -.. c:function:: int PyArray_ISCARRAY_RO(PyObject *arr) +.. c:function:: int PyArray_ISCARRAY_RO(const PyArrayObject *arr) Evaluates true if the data area of *arr* is C-style contiguous, aligned, and in machine byte-order. -.. c:function:: int PyArray_ISFARRAY_RO(PyObject *arr) +.. c:function:: int PyArray_ISFARRAY_RO(const PyArrayObject *arr) Evaluates true if the data area of *arr* is Fortran-style contiguous, aligned, and in machine byte-order **.** -.. c:function:: int PyArray_ISONESEGMENT(PyObject *arr) +.. c:function:: int PyArray_ISONESEGMENT(const PyArrayObject *arr) Evaluates true if the data area of *arr* consists of a single (C-style or Fortran-style) contiguous segment. @@ -1805,9 +1786,9 @@ the functions that must be implemented for each slot. - ``0.0`` is the default for ``sum([])``. But ``-0.0`` is the correct identity otherwise as it preserves the sign for ``sum([-0.0])``. - We use no identity for object, but return the default of ``0`` and - ``1`` for the empty ``sum([], dtype=object)`` and - ``prod([], dtype=object)``. - This allows ``np.sum(np.array(["a", "b"], dtype=object))`` to work. + ``1`` for the empty ``sum([], dtype=np.object_)`` and + ``prod([], dtype=np.object_)``. + This allows ``np.sum(np.array(["a", "b"], dtype=np.object_))`` to work. - ``-inf`` or ``INT_MIN`` for ``max`` is an identity, but at least ``INT_MIN`` not a good *default* when there are no items. @@ -1905,6 +1886,37 @@ with the rest of the ArrayMethod API. the main ufunc registration function. This adds a new implementation/loop to a ufunc. It replaces `PyUFunc_RegisterLoopForType`. +.. c:type:: PyUFunc_LoopSlot + + Structure used to add multiple loops to ufuncs from ArrayMethod specs. + This is used in `PyUFunc_AddLoopsFromSpecs`. + + .. c:struct:: PyUFunc_LoopSlot + + .. c:member:: const char *name + + The name of the ufunc to add the loop to, in the form like that of + entry points, ``(module ':')? (object '.')* name``, with ``numpy`` + the default module. Examples: ``sin``, ``strings.str_len``, + ``numpy.strings:str_len``. + + .. c:member:: PyArrayMethod_Spec *spec + + The ArrayMethod spec to use to create the loop. + +.. c:function:: int PyUFunc_AddLoopsFromSpecs( \ + PyUFunc_LoopSlot *slots) + + .. versionadded:: 2.4 + + Add multiple loops to ufuncs from ArrayMethod specs. This also + handles the registration of methods for the ufunc-like functions + ``sort`` and ``argsort``. See :ref:`array-methods-sorting` for details. + + The ``slots`` argument must be a NULL-terminated array of + `PyUFunc_LoopSlot` (see above), which give the name of the + ufunc and spec needed to create the loop. + .. c:function:: int PyUFunc_AddPromoter( \ PyObject *ufunc, PyObject *DType_tuple, PyObject *promoter) @@ -2055,6 +2067,36 @@ code: Py_INCREF(loop_descrs[2]); } +.. _array-methods-sorting: + +Sorting and Argsorting +~~~~~~~~~~~~~~~~~~~~~~~ + +Sorting and argsorting methods for dtypes can be registered using the +ArrayMethod API. This is done by adding an ArrayMethod spec with the name +``"sort"`` or ``"argsort"`` respectively. The spec must have ``nin=1`` +and ``nout=1`` for both sort and argsort. Sorting is inplace, hence we +enforce that ``data[0] == data[1]``. Argsorting returns a new array of +indices, so the output must be of ``NPY_INTP`` type. + +The ``context`` passed to the loop contains the ``parameters`` field which +for these operations is a ``PyArrayMethod_SortParameters *`` struct. This +struct contains a ``flags`` field which is a bitwise OR of ``NPY_SORTKIND`` +values indicating the kind of sort to perform (that is, whether it is a +stable and/or descending sort). If the strided loop depends on the flags, +a good way to deal with this is to define :c:macro:`NPY_METH_get_loop`, +and not set any of the other loop slots. + +.. c:struct:: PyArrayMethod_SortParameters + + .. c:member:: NPY_SORTKIND flags + + The flags passed to the sort operation. This is a bitwise OR of + ``NPY_SORTKIND`` values indicating the kind of sort to perform. + +These specs can be registered using :c:func:`PyUFunc_AddLoopsFromSpecs` +along with other ufunc loops. + API for calling array methods ----------------------------- @@ -2206,19 +2248,18 @@ Shape Manipulation PyArrayObject* self, PyArray_Dims* newshape, int refcheck, \ NPY_ORDER fortran) - Equivalent to :meth:`ndarray.resize` (*self*, *newshape*, refcheck - ``=`` *refcheck*, order= fortran ). This function only works on - single-segment arrays. It changes the shape of *self* inplace and - will reallocate the memory for *self* if *newshape* has a - different total number of elements then the old shape. If - reallocation is necessary, then *self* must own its data, have - *self* - ``>base==NULL``, have *self* - ``>weakrefs==NULL``, and - (unless refcheck is 0) not be referenced by any other array. - The fortran argument can be :c:data:`NPY_ANYORDER`, :c:data:`NPY_CORDER`, - or :c:data:`NPY_FORTRANORDER`. It currently has no effect. Eventually - it could be used to determine how the resize operation should view - the data when constructing a differently-dimensioned array. - Returns None on success and NULL on error. + Equivalent to :meth:`ndarray.resize` (*self*, *newshape*, *refcheck*). + This function only works on single-segment arrays. It changes the shape of + *self* inplace and will reallocate the memory for *self* if *newshape* has + a different total number of elements then the old shape. If reallocation is + necessary, then *self* must own its data, have *self* - ``>base==NULL``, + have *self* - ``>weakrefs==NULL``, and (unless refcheck is 0) not be + referenced by any other array. The fortran argument can be + :c:data:`NPY_ANYORDER`, :c:data:`NPY_CORDER`, or + :c:data:`NPY_FORTRANORDER`. It currently has no effect. Eventually it + could be used to determine how the resize operation should view the data + when constructing a differently-dimensioned array. Returns None on success + and NULL on error. .. c:function:: PyObject* PyArray_Transpose( \ PyArrayObject* self, PyArray_Dims* permute) @@ -2322,21 +2363,36 @@ Item selection and manipulation .. c:function:: PyObject* PyArray_Sort(PyArrayObject* self, int axis, NPY_SORTKIND kind) - Equivalent to :meth:`ndarray.sort` (*self*, *axis*, *kind*). - Return an array with the items of *self* sorted along *axis*. The array - is sorted using the algorithm denoted by *kind*, which is an integer/enum pointing - to the type of sorting algorithms used. - -.. c:function:: PyObject* PyArray_ArgSort(PyArrayObject* self, int axis) - - Equivalent to :meth:`ndarray.argsort` (*self*, *axis*). - Return an array of indices such that selection of these indices - along the given ``axis`` would return a sorted version of *self*. If *self* ->descr - is a data-type with fields defined, then self->descr->names is used - to determine the sort order. A comparison where the first field is equal - will use the second field and so on. To alter the sort order of a - structured array, create a new data-type with a different order of names - and construct a view of the array with that new data-type. + Return an array with the items of ``self`` sorted along ``axis``. The array + is sorted using an algorithm whose properties are specified by the value of + ``kind``, an integer/enum specifying the requirements of the sorting + algorithm used. If ``self* ->descr`` is a data-type with fields defined, + then ``self->descr->names`` is used to determine the sort order. A comparison + where the first field is equal will use the second field and so on. To + alter the sort order of a structured array, create a new data-type with a + different order of names and construct a view of the array with that new + data-type. + + This is the C level function called by the ndarray method + :meth:`ndarray.sort`, though with a different meaning + of ``kind`` -- see ``NPY_SORTKIND`` below. + +.. c:function:: PyObject* PyArray_ArgSort(PyArrayObject* self, int axis, NPY_SORTKIND kind) + + Return an array of indices such that selection of these indices along the + given ``axis`` would return a sorted version of ``self``. The array is + sorted using an algorithm whose properties are specified by ``kind``, an + integer/enum specifying the requirements of the sorting algorithm used. If + ``self->descr`` is a data-type with fields defined, then + ``self->descr->names`` is used to determine the sort order. A comparison + where the first field is equal will use the second field and so on. To + alter the sort order of a structured array, create a new data-type with a + different order of names and construct a view of the array with that new + data-type. + + This is the C level function called by the ndarray method + :meth:`ndarray.argsort`, though with a different + meaning of ``kind`` -- see ``NPY_SORTKIND`` below. .. c:function:: PyObject* PyArray_LexSort(PyObject* sort_keys, int axis) @@ -2411,8 +2467,6 @@ Item selection and manipulation .. c:function:: npy_intp PyArray_CountNonzero(PyArrayObject* self) - .. versionadded:: 1.6 - Counts the number of non-zero elements in the array object *self*. .. c:function:: PyObject* PyArray_Nonzero(PyArrayObject* self) @@ -2672,8 +2726,6 @@ Array Functions .. c:function:: PyObject* PyArray_MatrixProduct2( \ PyObject* obj1, PyObject* obj, PyArrayObject* out) - .. versionadded:: 1.6 - Same as PyArray_MatrixProduct, but store the result in *out*. The output array must have the correct shape, type, and be C-contiguous, or an exception is raised. @@ -2683,8 +2735,6 @@ Array Functions PyArray_Descr* dtype, NPY_ORDER order, NPY_CASTING casting, \ PyArrayObject* out) - .. versionadded:: 1.6 - Applies the Einstein summation convention to the array operands provided, returning a new array or placing the result in *out*. The string in *subscripts* is a comma separated list of index @@ -2776,8 +2826,6 @@ Other functions Auxiliary data with object semantics ------------------------------------ -.. versionadded:: 1.7.0 - .. c:type:: NpyAuxData When working with more complex dtypes which are composed of other dtypes, @@ -2910,7 +2958,7 @@ of this useful approach to looping over an array from C. .. c:function:: void PyArray_ITER_NEXT(PyObject* iterator) - Incremement the index and the dataptr members of the *iterator* to + Increment the index and the dataptr members of the *iterator* to point to the next element of the array. If the array is not (C-style) contiguous, also increment the N-dimensional coordinates array. @@ -3059,8 +3107,6 @@ Broadcasting (multi-iterators) Neighborhood iterator --------------------- -.. versionadded:: 1.4.0 - Neighborhood iterators are subclasses of the iterator object, and can be used to iter over a neighborhood of a point. For example, you may want to iterate over every voxel of a 3d image, and for every such voxel, iterate over an @@ -3238,30 +3284,18 @@ Array scalars .. c:function:: NPY_SCALARKIND PyArray_ScalarKind( \ int typenum, PyArrayObject** arr) - See the function :c:func:`PyArray_MinScalarType` for an alternative - mechanism introduced in NumPy 1.6.0. + Legacy way to query special promotion for scalar values. This is not + used in NumPy itself anymore and is expected to be deprecated eventually. - Return the kind of scalar represented by *typenum* and the array - in *\*arr* (if *arr* is not ``NULL`` ). The array is assumed to be - rank-0 and only used if *typenum* represents a signed integer. If - *arr* is not ``NULL`` and the first element is negative then - :c:data:`NPY_INTNEG_SCALAR` is returned, otherwise - :c:data:`NPY_INTPOS_SCALAR` is returned. The possible return values - are the enumerated values in :c:type:`NPY_SCALARKIND`. + New DTypes can define promotion rules specific to Python scalars. .. c:function:: int PyArray_CanCoerceScalar( \ char thistype, char neededtype, NPY_SCALARKIND scalar) - See the function :c:func:`PyArray_ResultType` for details of - NumPy type promotion, updated in NumPy 1.6.0. + Legacy way to query special promotion for scalar values. This is not + used in NumPy itself anymore and is expected to be deprecated eventually. - Implements the rules for scalar coercion. Scalars are only - silently coerced from thistype to neededtype if this function - returns nonzero. If scalar is :c:data:`NPY_NOSCALAR`, then this - function is equivalent to :c:func:`PyArray_CanCastSafely`. The rule is - that scalars of the same KIND can be coerced into arrays of the - same KIND. This rule means that high-precision scalars will never - cause low-precision arrays of the same KIND to be upcast. + Use ``PyArray_ResultType`` for similar purposes. Data-type descriptors @@ -3562,6 +3596,121 @@ member of ``PyArrayDTypeMeta_Spec`` struct. force newly created arrays to have a newly created descriptor instance, no matter what input descriptor is provided by a user. +.. c:macro:: NPY_DT_get_constant + +.. c:type:: int (PyArrayDTypeMeta_GetConstant)( \ + PyArray_Descr *descr, int constant_id, void *out) + + If defined, allows the DType to expose constant values such as machine + limits, special values (infinity, NaN), and floating-point characteristics. + The *descr* is the descriptor instance, *constant_id* is one of the + ``NPY_CONSTANT_*`` macros, and *out* is a pointer to uninitialized memory + where the constant value should be written. The memory pointed to by *out* + may be unaligned and is uninitialized. + Returns 1 on success, 0 if the constant is not available, + or -1 with an error set. + + **Constant IDs**: + + The following constant IDs are defined for retrieving dtype-specific values: + + **Basic constants** (available for all numeric types): + + .. c:macro:: NPY_CONSTANT_zero + + The zero value for the dtype. + + .. c:macro:: NPY_CONSTANT_one + + The one value for the dtype. + + .. c:macro:: NPY_CONSTANT_minimum_finite + + The minimum finite value representable by the dtype. For floating-point types, + this is the most negative finite value (e.g., ``-FLT_MAX``). + + .. c:macro:: NPY_CONSTANT_maximum_finite + + The maximum finite value representable by the dtype. + + **Floating-point special values**: + + .. c:macro:: NPY_CONSTANT_inf + + Positive infinity (only for floating-point types). + + .. c:macro:: NPY_CONSTANT_ninf + + Negative infinity (only for floating-point types). + + .. c:macro:: NPY_CONSTANT_nan + + Not-a-Number (only for floating-point types). + + **Floating-point characteristics** (values of the dtype's native type): + + .. c:macro:: NPY_CONSTANT_finfo_radix + + The radix (base) of the floating-point representation. This is 2 for all + floating-point types. + + .. c:macro:: NPY_CONSTANT_finfo_eps + + Machine epsilon: the difference between 1.0 and the next representable value + greater than 1.0. Corresponds to C macros like ``FLT_EPSILON``, ``DBL_EPSILON``. + + .. note:: + For long double in IBM double-double format (PowerPC), this is defined as + ``0x1p-105L`` (2^-105) based on the ~106 bits of mantissa precision. + + .. c:macro:: NPY_CONSTANT_finfo_epsneg + + The difference between 1.0 and the next representable value less than 1.0. + Typically ``eps / radix`` for binary floating-point types. + + .. c:macro:: NPY_CONSTANT_finfo_smallest_normal + + The smallest positive normalized floating-point number. Corresponds to C + macros like ``FLT_MIN``, ``DBL_MIN``. This is the smallest value with a + leading 1 bit in the mantissa. + + .. c:macro:: NPY_CONSTANT_finfo_smallest_subnormal + + The smallest positive subnormal (denormalized) floating-point number. + Corresponds to C macros like ``FLT_TRUE_MIN``, ``DBL_TRUE_MIN``. This is + the smallest representable positive value, with leading 0 bits in the mantissa. + + **Floating-point characteristics** (integer values, type ``npy_intp``): + + These constants return integer metadata about the floating-point representation. + They are marked with the ``1 << 16`` bit to indicate they return ``npy_intp`` + values rather than the dtype's native type. + + .. c:macro:: NPY_CONSTANT_finfo_nmant + + Number of mantissa bits (excluding the implicit leading bit). For example, + IEEE 754 binary64 (double) has 52 explicit mantissa bits, so this returns 52. + Corresponds to ``MANT_DIG - 1`` from C standard macros. + + .. c:macro:: NPY_CONSTANT_finfo_min_exp + + Minimum exponent value. This is the minimum negative integer such that the + radix raised to the power of one less than that integer is a normalized + floating-point number. Corresponds to ``MIN_EXP - 1`` from C standard macros + (e.g., ``FLT_MIN_EXP - 1``). + + .. c:macro:: NPY_CONSTANT_finfo_max_exp + + Maximum exponent value. This is the maximum positive integer such that the + radix raised to the power of one less than that integer is a representable + finite floating-point number. Corresponds to ``MAX_EXP`` from C standard + macros (e.g., ``FLT_MAX_EXP``). + + .. c:macro:: NPY_CONSTANT_finfo_decimal_digits + + The number of decimal digits of precision. Corresponds to ``DIG`` from C + standard macros (e.g., ``FLT_DIG``, ``DBL_DIG``). + PyArray_ArrFuncs slots ^^^^^^^^^^^^^^^^^^^^^^ @@ -3855,7 +4004,7 @@ In this case, the helper C files typically do not have a canonical place where ``PyArray_ImportNumPyAPI`` should be called (although it is OK and fast to call it often). -To solve this, NumPy provides the following pattern that the the main +To solve this, NumPy provides the following pattern that the main file is modified to define ``PY_ARRAY_UNIQUE_SYMBOL`` before the include: .. code-block:: c @@ -3975,7 +4124,7 @@ the C-API is needed then some additional steps must be taken. behavior as NumPy 1.x. .. note:: - Windows never had shared visbility although you can use this macro + Windows never had shared visibility although you can use this macro to achieve it. We generally discourage sharing beyond shared boundary lines since importing the array API includes NumPy version checks. @@ -4071,27 +4220,10 @@ extension with the lowest :c:data:`NPY_FEATURE_VERSION` as possible. .. c:function:: unsigned int PyArray_GetNDArrayCFeatureVersion(void) - .. versionadded:: 1.4.0 - This just returns the value :c:data:`NPY_FEATURE_VERSION`. :c:data:`NPY_FEATURE_VERSION` changes whenever the API changes (e.g. a function is added). A changed value does not always require a recompile. -Internal Flexibility -~~~~~~~~~~~~~~~~~~~~ - -.. c:function:: void PyArray_SetStringFunction(PyObject* op, int repr) - - This function allows you to alter the tp_str and tp_repr methods - of the array object to any Python function. Thus you can alter - what happens for all arrays when str(arr) or repr(arr) is called - from Python. The function to be called is passed in as *op*. If - *repr* is non-zero, then this function will be called in response - to repr(arr), otherwise the function will be called in response to - str(arr). No check on whether or not *op* is callable is - performed. The callable passed in to *op* should expect an array - argument and should return a string to be printed. - Memory management ~~~~~~~~~~~~~~~~~ @@ -4102,8 +4234,8 @@ Memory management .. c:function:: char* PyDataMem_RENEW(void * ptr, size_t newbytes) - Macros to allocate, free, and reallocate memory. These macros are used - internally to create arrays. + Functions to allocate, free, and reallocate memory. These are used + internally to manage array data memory unless overridden. .. c:function:: npy_intp* PyDimMem_NEW(int nd) @@ -4140,6 +4272,8 @@ Memory management Returns 0 if nothing was done, -1 on error, and 1 if action was taken. +.. _array.ndarray.capi.threading: + Threading support ~~~~~~~~~~~~~~~~~ @@ -4379,7 +4513,11 @@ Enumerated Types .. c:enum:: NPY_SORTKIND A special variable-type which can take on different values to indicate - the sorting algorithm being used. + the sorting algorithm being used. These algorithm types have not been + treated strictly for some time, but rather treated as stable/not stable. + In NumPy 2.4 they are replaced by requirements (see below), but done in a + backwards compatible way. These values will continue to work, except that + that NPY_HEAPSORT will do the same thing as NPY_QUICKSORT. .. c:enumerator:: NPY_QUICKSORT @@ -4393,11 +4531,32 @@ Enumerated Types .. c:enumerator:: NPY_NSORTS - Defined to be the number of sorts. It is fixed at three by the need for - backwards compatibility, and consequently :c:data:`NPY_MERGESORT` and - :c:data:`NPY_STABLESORT` are aliased to each other and may refer to one - of several stable sorting algorithms depending on the data type. + Defined to be the number of sorts. It is fixed at three by the need for + backwards compatibility, and consequently :c:data:`NPY_MERGESORT` and + :c:data:`NPY_STABLESORT` are aliased to each other and may refer to one + of several stable sorting algorithms depending on the data type. + + In NumPy 2.4 the algorithm names are replaced by requirements. You can still use + the old values, a recompile is not needed, but they are reinterpreted such that + + * NPY_QUICKSORT and NPY_HEAPSORT -> NPY_SORT_DEFAULT + * NPY_MERGESORT and NPY_STABLE -> NPY_SORT_STABLE + + .. c:enumerator:: NPY_SORT_DEFAULT + + The default sort for the type. For the NumPy builtin types it may be + stable or not, but will be ascending and sort NaN types to the end. It + is usually chosen for speed and/or low memory. + + .. c:enumerator:: NPY_SORT_STABLE + (Requirement) Specifies that the sort must be stable. + + .. c:enumerator:: NPY_SORT_DESCENDING + + (Requirement) Specifies that the sort must be in descending order. + This functionality is not yet implemented for any of the NumPy types + and cannot yet be set from the Python interface. .. c:enum:: NPY_SCALARKIND @@ -4484,8 +4643,6 @@ Enumerated Types .. c:enum:: NPY_CASTING - .. versionadded:: 1.6 - An enumeration type indicating how permissive data conversions should be. This is used by the iterator added in NumPy 1.6, and is intended to be used more broadly in a future version. @@ -4512,5 +4669,12 @@ Enumerated Types Allow any cast, no matter what kind of data loss may occur. +.. c:macro:: NPY_SAME_VALUE_CASTING + + Error if any values change during a cast. Currently + supported only in ``ndarray.astype(... casting='same_value')`` + + .. versionadded:: 2.4 + .. index:: pair: ndarray; C-API diff --git a/doc/source/reference/c-api/config.rst b/doc/source/reference/c-api/config.rst index 097eba9b7089..939beeefd666 100644 --- a/doc/source/reference/c-api/config.rst +++ b/doc/source/reference/c-api/config.rst @@ -78,8 +78,6 @@ Platform information .. c:macro:: NPY_CPU_S390 .. c:macro:: NPY_CPU_PARISC - .. versionadded:: 1.3.0 - CPU architecture of the platform; only one of the above is defined. @@ -91,8 +89,6 @@ Platform information .. c:macro:: NPY_BYTE_ORDER - .. versionadded:: 1.3.0 - Portable alternatives to the ``endian.h`` macros of GNU Libc. If big endian, :c:data:`NPY_BYTE_ORDER` == :c:data:`NPY_BIG_ENDIAN`, and similarly for little endian architectures. @@ -101,8 +97,6 @@ Platform information .. c:function:: int PyArray_GetEndianness() - .. versionadded:: 1.3.0 - Returns the endianness of the current platform. One of :c:data:`NPY_CPU_BIG`, :c:data:`NPY_CPU_LITTLE`, or :c:data:`NPY_CPU_UNKNOWN_ENDIAN`. diff --git a/doc/source/reference/c-api/coremath.rst b/doc/source/reference/c-api/coremath.rst index f8e0efb34d24..b2e3af4c0944 100644 --- a/doc/source/reference/c-api/coremath.rst +++ b/doc/source/reference/c-api/coremath.rst @@ -1,8 +1,7 @@ NumPy core math library ======================= -The numpy core math library (``npymath``) is a first step in this direction. This -library contains most math-related C99 functionality, which can be used on +This library contains most math-related C99 functionality, which can be used on platforms where C99 is not well supported. The core math functions have the same API as the C99 ones, except for the ``npy_*`` prefix. @@ -185,8 +184,6 @@ Those can be useful for precise floating point comparison. * NPY_FPE_UNDERFLOW * NPY_FPE_INVALID - .. versionadded:: 1.15.0 - .. c:function:: int npy_clear_floatstatus() Clears the floating point status. Returns the previous status mask. @@ -201,8 +198,6 @@ Those can be useful for precise floating point comparison. prevent aggressive compiler optimizations from reordering this function call. Returns the previous status mask. - .. versionadded:: 1.15.0 - .. _complex-numbers: Support for complex numbers @@ -322,20 +317,6 @@ The generic steps to take are: machine. Otherwise you pick up a static library built for the wrong architecture. -When you build with ``numpy.distutils`` (deprecated), then use this in your ``setup.py``: - - .. hidden in a comment so as to be included in refguide but not rendered documentation - >>> import numpy.distutils.misc_util - >>> config = np.distutils.misc_util.Configuration(None, '', '.') - >>> with open('foo.c', 'w') as f: pass - - >>> from numpy.distutils.misc_util import get_info - >>> info = get_info('npymath') - >>> _ = config.add_extension('foo', sources=['foo.c'], extra_info=info) - -In other words, the usage of ``info`` is exactly the same as when using -``blas_info`` and co. - When you are building with `Meson `__, use:: # Note that this will get easier in the future, when Meson has diff --git a/doc/source/reference/c-api/data_memory.rst b/doc/source/reference/c-api/data_memory.rst index f041c1a6a32a..a542bcf7c713 100644 --- a/doc/source/reference/c-api/data_memory.rst +++ b/doc/source/reference/c-api/data_memory.rst @@ -134,9 +134,8 @@ A better technique would be to use a ``PyCapsule`` as a base object: Example of memory tracing with ``np.lib.tracemalloc_domain`` ------------------------------------------------------------ -Note that since Python 3.6 (or newer), the builtin ``tracemalloc`` module can be used to -track allocations inside NumPy. NumPy places its CPU memory allocations into the -``np.lib.tracemalloc_domain`` domain. +The builtin ``tracemalloc`` module can be used to track allocations inside NumPy. +NumPy places its CPU memory allocations into the ``np.lib.tracemalloc_domain`` domain. For additional information, check: https://docs.python.org/3/library/tracemalloc.html. Here is an example on how to use ``np.lib.tracemalloc_domain``: diff --git a/doc/source/reference/c-api/datetimes.rst b/doc/source/reference/c-api/datetimes.rst index 5e344c7c1b74..34fc81ed1351 100644 --- a/doc/source/reference/c-api/datetimes.rst +++ b/doc/source/reference/c-api/datetimes.rst @@ -194,7 +194,7 @@ Conversion functions Returns the string length to use for converting datetime objects with the given local time and unit settings to strings. - Use this when constructings strings to supply to + Use this when constructing strings to supply to ``NpyDatetime_MakeISO8601Datetime``. .. c:function:: int NpyDatetime_MakeISO8601Datetime(\ diff --git a/doc/source/reference/c-api/dtype.rst b/doc/source/reference/c-api/dtype.rst index ce23c51aa9ea..f6b2289ba18a 100644 --- a/doc/source/reference/c-api/dtype.rst +++ b/doc/source/reference/c-api/dtype.rst @@ -1,3 +1,5 @@ + + Data type API ============= @@ -500,3 +502,4 @@ format specifier in printf and related commands. .. c:macro:: NPY_UINTP_FMT .. c:macro:: NPY_LONGDOUBLE_FMT + diff --git a/doc/source/reference/c-api/generalized-ufuncs.rst b/doc/source/reference/c-api/generalized-ufuncs.rst index 44b16f90eed4..b8a37e98b81e 100644 --- a/doc/source/reference/c-api/generalized-ufuncs.rst +++ b/doc/source/reference/c-api/generalized-ufuncs.rst @@ -4,6 +4,8 @@ Generalized universal function API ================================== +.. seealso:: :ref:`ufuncs` + There is a general need for looping over not only functions on scalars but also over functions on vectors (or arrays). This concept is realized in NumPy by generalizing the universal functions @@ -17,7 +19,7 @@ what the "core" dimensionality of the inputs is, as well as the corresponding dimensionality of the outputs (the element-wise ufuncs have zero core dimensions). The list of the core dimensions for all arguments is called the "signature" of a ufunc. For example, the -ufunc numpy.add has signature ``(),()->()`` defining two scalar inputs +ufunc ``numpy.add`` has signature ``(),()->()`` defining two scalar inputs and one scalar output. Another example is the function ``inner1d(a, b)`` with a signature of @@ -57,10 +59,12 @@ taken when calling such a function. An example would be the function ``euclidean_pdist(a)``, with signature ``(n,d)->(p)``, that given an array of ``n`` ``d``-dimensional vectors, computes all unique pairwise Euclidean distances among them. The output dimension ``p`` must therefore be equal to -``n * (n - 1) / 2``, but it is the caller's responsibility to pass in an -output array of the right size. If the size of a core dimension of an output +``n * (n - 1) / 2``, but by default, it is the caller's responsibility to pass +in an output array of the right size. If the size of a core dimension of an output cannot be determined from a passed in input or output array, an error will be -raised. +raised. This can be changed by defining a ``PyUFunc_ProcessCoreDimsFunc`` function +and assigning it to the ``proces_core_dims_func`` field of the ``PyUFuncObject`` +structure. See below for more details. Note: Prior to NumPy 1.10.0, less strict checks were in place: missing core dimensions were created by prepending 1's to the shape as necessary, core @@ -77,7 +81,7 @@ Elementary Function (e.g. adding two numbers is the most basic operation in adding two arrays). The ufunc applies the elementary function multiple times on different parts of the arrays. The input/output of elementary - functions can be vectors; e.g., the elementary function of inner1d + functions can be vectors; e.g., the elementary function of ``inner1d`` takes two vectors as input. Signature @@ -214,3 +218,117 @@ input/output arrays ``a``, ``b``, ``c``. Furthermore, ``dimensions`` will be ``[N, I, J]`` to define the size of ``N`` of the loop and the sizes ``I`` and ``J`` for the core dimensions ``i`` and ``j``. Finally, ``steps`` will be ``[a_N, b_N, c_N, a_i, a_j, b_i]``, containing all necessary strides. + +Customizing core dimension size processing +------------------------------------------ + +The optional function of type ``PyUFunc_ProcessCoreDimsFunc``, stored +on the ``process_core_dims_func`` attribute of the ufunc, provides the +author of the ufunc a "hook" into the processing of the core dimensions +of the arrays that were passed to the ufunc. The two primary uses of +this "hook" are: + +* Check that constraints on the core dimensions required + by the ufunc are satisfied (and set an exception if they are not). +* Compute output shapes for any output core dimensions that were not + determined by the input arrays. + +As an example of the first use, consider the generalized ufunc ``minmax`` +with signature ``(n)->(2)`` that simultaneously computes the minimum and +maximum of a sequence. It should require that ``n > 0``, because +the minimum and maximum of a sequence with length 0 is not meaningful. +In this case, the ufunc author might define the function like this: + + .. code-block:: c + + int minmax_process_core_dims(PyUFuncObject *ufunc, + npy_intp *core_dim_sizes) + { + npy_intp n = core_dim_sizes[0]; + if (n == 0) { + PyErr_SetString(PyExc_ValueError, + "minmax requires the core dimension to " + "be at least 1."); + return -1; + } + return 0; + } + +In this case, the length of the array ``core_dim_sizes`` will be 2. +The second value in the array will always be 2, so there is no need +for the function to inspect it. The core dimension ``n`` is stored +in the first element. The function sets an exception and returns -1 +if it finds that ``n`` is 0. + +The second use for the "hook" is to compute the size of output arrays +when the output arrays are not provided by the caller and one or more +core dimension of the output is not also an input core dimension. +If the ufunc does not have a function defined on the +``process_core_dims_func`` attribute, an unspecified output core +dimension size will result in an exception being raised. With the +"hook" provided by ``process_core_dims_func``, the author of the ufunc +can set the output size to whatever is appropriate for the ufunc. + +In the array passed to the "hook" function, core dimensions that +were not determined by the input are indicated by having the value -1 +in the ``core_dim_sizes`` array. The function can replace the -1 with +whatever value is appropriate for the ufunc, based on the core dimensions +that occurred in the input arrays. + +.. warning:: + The function must never change a value in ``core_dim_sizes`` that + is not -1 on input. Changing a value that was not -1 will generally + result in incorrect output from the ufunc, and could result in the + Python interpreter crashing. + +For example, consider the generalized ufunc ``conv1d`` for which +the elementary function computes the "full" convolution of two +one-dimensional arrays ``x`` and ``y`` with lengths ``m`` and ``n``, +respectively. The output of this convolution has length ``m + n - 1``. +To implement this as a generalized ufunc, the signature is set to +``(m),(n)->(p)``, and in the "hook" function, if the core dimension +``p`` is found to be -1, it is replaced with ``m + n - 1``. If ``p`` +is *not* -1, it must be verified that the given value equals ``m + n - 1``. +If it does not, the function must set an exception and return -1. +For a meaningful result, the operation also requires that ``m + n`` +is at least 1, i.e. both inputs can't have length 0. + +Here's how that might look in code: + + .. code-block:: c + + int conv1d_process_core_dims(PyUFuncObject *ufunc, + npy_intp *core_dim_sizes) + { + // core_dim_sizes will hold the core dimensions [m, n, p]. + // p will be -1 if the caller did not provide the out argument. + npy_intp m = core_dim_sizes[0]; + npy_intp n = core_dim_sizes[1]; + npy_intp p = core_dim_sizes[2]; + npy_intp required_p = m + n - 1; + + if (m == 0 && n == 0) { + // Disallow both inputs having length 0. + PyErr_SetString(PyExc_ValueError, + "conv1d: both inputs have core dimension 0; the function " + "requires that at least one input has size greater than 0."); + return -1; + } + if (p == -1) { + // Output array was not given in the call of the ufunc. + // Set the correct output size here. + core_dim_sizes[2] = required_p; + return 0; + } + // An output array *was* given. Validate its core dimension. + if (p != required_p) { + PyErr_Format(PyExc_ValueError, + "conv1d: the core dimension p of the out parameter " + "does not equal m + n - 1, where m and n are the " + "core dimensions of the inputs x and y; got m=%zd " + "and n=%zd so p must be %zd, but got p=%zd.", + m, n, required_p, p); + return -1; + } + return 0; + } diff --git a/doc/source/reference/c-api/iterator.rst b/doc/source/reference/c-api/iterator.rst index 71bf44f4b239..5ab1d5a7ea7b 100644 --- a/doc/source/reference/c-api/iterator.rst +++ b/doc/source/reference/c-api/iterator.rst @@ -7,8 +7,6 @@ Array iterator API pair: iterator; C-API pair: C-API; iterator -.. versionadded:: 1.6 - Array iterator -------------- @@ -436,6 +434,9 @@ Construction and destruction is enabled, the caller must be sure to check whether ``NpyIter_IterationNeedsAPI(iter)`` is true, in which case it may not release the GIL during iteration. + If you are working with known dtypes `NpyIter_GetTransferFlags` is + a faster and more precise way to check for whether the iterator needs + the API due to buffering. .. c:macro:: NPY_ITER_ZEROSIZE_OK @@ -639,8 +640,6 @@ Construction and destruction .. c:macro:: NPY_ITER_ARRAYMASK - .. versionadded:: 1.7 - Indicates that this operand is the mask to use for selecting elements when writing to operands which have the :c:data:`NPY_ITER_WRITEMASKED` flag applied to them. @@ -663,8 +662,6 @@ Construction and destruction .. c:macro:: NPY_ITER_WRITEMASKED - .. versionadded:: 1.7 - This array is the mask for all `writemasked ` operands. Code uses the ``writemasked`` flag which indicates that only elements where the chosen ARRAYMASK operand is True @@ -715,7 +712,7 @@ Construction and destruction may not be repeated. The following example is how normal broadcasting applies to a 3-D array, a 2-D array, a 1-D array and a scalar. - **Note**: Before NumPy 1.8 ``oa_ndim == 0` was used for signalling + **Note**: Before NumPy 1.8 ``oa_ndim == 0`` was used for signalling that ``op_axes`` and ``itershape`` are unused. This is deprecated and should be replaced with -1. Better backward compatibility may be achieved by using :c:func:`NpyIter_MultiNew` for this case. @@ -829,6 +826,20 @@ Construction and destruction Returns ``NPY_SUCCEED`` or ``NPY_FAIL``. +.. c:function:: NPY_ARRAYMETHOD_FLAGS NpyIter_GetTransferFlags(NpyIter *iter) + + .. versionadded:: 2.3 + + Fetches the `NPY_METH_RUNTIME_FLAGS` which provide the information on + whether buffering needs the Python GIL (`NPY_METH_REQUIRES_PYAPI`) or + floating point errors may be set (`NPY_METH_NO_FLOATINGPOINT_ERRORS`). + + Prior to NumPy 2.3, the public function available was + ``NpyIter_IterationNeedsAPI``, which is still available and additionally + checks for object (or similar) dtypes and not exclusively for + buffering/iteration needs itself. + In general, this function should be preferred. + .. c:function:: int NpyIter_Reset(NpyIter* iter, char** errmsg) Resets the iterator back to its initial state, at the beginning @@ -1127,8 +1138,6 @@ Construction and destruction .. c:function:: npy_bool NpyIter_IsFirstVisit(NpyIter* iter, int iop) - .. versionadded:: 1.7 - Checks to see whether this is the first time the elements of the specified reduction operand which the iterator points at are being seen for the first time. The function returns a reasonable answer diff --git a/doc/source/reference/c-api/strings.rst b/doc/source/reference/c-api/strings.rst index 43d280d14e09..2e7dc34a337f 100644 --- a/doc/source/reference/c-api/strings.rst +++ b/doc/source/reference/c-api/strings.rst @@ -6,7 +6,7 @@ NpyString API .. versionadded:: 2.0 This API allows access to the UTF-8 string data stored in NumPy StringDType -arrays. See `NEP-55 `_ for +arrays. See :ref:`NEP-55 ` for more in-depth details into the design of StringDType. Examples diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index b0f274f38a74..a039af130860 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -36,10 +36,10 @@ New types are defined in C by two basic steps: Instead of special method names which define behavior for Python classes, there are "function tables" which point to functions that -implement the desired results. Since Python 2.2, the PyTypeObject -itself has become dynamic which allows C types that can be "sub-typed -"from other C-types in C, and sub-classed in Python. The children -types inherit the attributes and methods from their parent(s). +implement the desired results. The PyTypeObject itself is dynamic +which allows C types that can be "sub-typed" from other C-types in C, +and sub-classed in Python. The children types inherit the attributes +and methods from their parent(s). There are two major new types: the ndarray ( :c:data:`PyArray_Type` ) and the ufunc ( :c:data:`PyUFunc_Type` ). Additional types play a @@ -215,12 +215,11 @@ The :c:data:`PyArray_Type` can also be sub-typed. .. tip:: - The ``tp_as_number`` methods use a generic approach to call whatever - function has been registered for handling the operation. When the - ``_multiarray_umath module`` is imported, it sets the numeric operations - for all arrays to the corresponding ufuncs. This choice can be changed with - :c:func:`PyUFunc_ReplaceLoopBySignature` The ``tp_str`` and ``tp_repr`` - methods can also be altered using :c:func:`PyArray_SetStringFunction`. + The :c:member:`tp_as_number ` methods use + a generic approach to call whatever function has been registered for + handling the operation. When the ``_multiarray_umath`` module is imported, + it sets the numeric operations for all arrays to the corresponding ufuncs. + This choice can be changed with :c:func:`PyUFunc_ReplaceLoopBySignature`. PyGenericArrType_Type --------------------- @@ -366,7 +365,7 @@ PyArrayDescr_Type and PyArray_Descr places an item of this type: ``offsetof(struct {char c; type v;}, v)`` - See `PyDataType_ALIGNMENT` for a way to access this field in a NumPy 1.x + See :c:func:`PyDataType_ALIGNMENT` for a way to access this field in a NumPy 1.x compatible way. .. c:member:: PyObject *metadata @@ -729,6 +728,7 @@ PyArrayMethod_Context and PyArrayMethod_Spec PyObject *caller; struct PyArrayMethodObject_tag *method; PyArray_Descr *const *descriptors; + void *parameters; } PyArrayMethod_Context .. c:member:: PyObject *caller @@ -745,6 +745,15 @@ PyArrayMethod_Context and PyArrayMethod_Spec An array of descriptors for the ufunc loop, filled in by ``resolve_descriptors``. The length of the array is ``nin`` + ``nout``. + .. c:member:: void *parameters + + A pointer to a structure containing any runtime parameters needed by the + loop. This is ``NULL`` if no parameters are needed. The type of the + struct is specific to the registered function. + + .. versionchanged:: NumPy 2.4 + The `parameters` member was added in NumPy 2.4. + .. c:type:: PyArrayMethod_Spec A struct used to register an ArrayMethod with NumPy. We use the slots @@ -1609,6 +1618,32 @@ for completeness and assistance in understanding the code. The C-structure associated with :c:var:`PyArrayMapIter_Type`. This structure is useful if you are trying to understand the advanced-index mapping code. It is defined in the - ``arrayobject.h`` header. This type is not exposed to Python and + ``multiarray/mapping.h`` header. This type is not exposed to Python and could be replaced with a C-structure. As a Python type it takes advantage of reference- counted memory management. + + +NumPy C-API and C complex +========================= +When you use the NumPy C-API, you will have access to complex real declarations +``npy_cdouble`` and ``npy_cfloat``, which are declared in terms of the C +standard types from ``complex.h``. Unfortunately, ``complex.h`` contains +``#define I ...`` (where the actual definition depends on the compiler), which +means that any downstream user that does ``#include `` +could get ``I`` defined, and using something like declaring ``double I;`` in +their code will result in an obscure compiler error like + +.. code-block::C + error: expected ‘)’ before ‘__extension__’ + double I, + +This error can be avoided by adding:: + + #undef I + +to your code. + +.. versionchanged:: 2.0 + The inclusion of ``complex.h`` was new in NumPy 2, so that code defining + a different ``I`` may not have required the ``#undef I`` on older versions. + NumPy 2.0.1 briefly included the ``#under I`` diff --git a/doc/source/reference/c-api/ufunc.rst b/doc/source/reference/c-api/ufunc.rst index d0484358cc91..2dbc8cae2fa1 100644 --- a/doc/source/reference/c-api/ufunc.rst +++ b/doc/source/reference/c-api/ufunc.rst @@ -11,6 +11,9 @@ Constants --------- ``UFUNC_{THING}_{ERR}`` + + Deprecated, use ``NPY_{THING}_{ERR}`` instead + .. c:macro:: UFUNC_FPE_DIVIDEBYZERO .. c:macro:: UFUNC_FPE_OVERFLOW diff --git a/doc/source/reference/constants.rst b/doc/source/reference/constants.rst index 2e2795a8b29f..00a2d607b356 100644 --- a/doc/source/reference/constants.rst +++ b/doc/source/reference/constants.rst @@ -8,7 +8,7 @@ NumPy includes several constants: .. data:: e - Euler's constant, base of natural logarithms, Napier's constant. + Euler's number, base of natural logarithms, Napier's constant. ``e = 2.71828182845904523536028747135266249775724709369995...`` @@ -62,6 +62,9 @@ NumPy includes several constants: .. rubric:: Examples +.. try_examples:: + + >>> import numpy as np >>> np.inf inf >>> np.array([1]) / 0. @@ -88,10 +91,11 @@ NumPy includes several constants: NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This means that Not a Number is not equivalent to infinity. - `NaN` and `NAN` are aliases of `nan`. - .. rubric:: Examples +.. try_examples:: + + >>> import numpy as np >>> np.nan nan >>> np.log(-1) @@ -106,6 +110,9 @@ NumPy includes several constants: .. rubric:: Examples +.. try_examples:: + + >>> import numpy as np >>> np.newaxis is None True >>> x = np.arange(3) @@ -121,16 +128,16 @@ NumPy includes several constants: [[2]]]) >>> x[:, np.newaxis] * x array([[0, 0, 0], - [0, 1, 2], - [0, 2, 4]]) + [0, 1, 2], + [0, 2, 4]]) Outer product, same as ``outer(x, y)``: >>> y = np.arange(3, 6) >>> x[:, np.newaxis] * y array([[ 0, 0, 0], - [ 3, 4, 5], - [ 6, 8, 10]]) + [ 3, 4, 5], + [ 6, 8, 10]]) ``x[np.newaxis, :]`` is equivalent to ``x[np.newaxis]`` and ``x[None]``: diff --git a/doc/source/reference/distutils.rst b/doc/source/reference/distutils.rst deleted file mode 100644 index 72b61e3a94db..000000000000 --- a/doc/source/reference/distutils.rst +++ /dev/null @@ -1,219 +0,0 @@ -.. _numpy-distutils-refguide: - -********************************** -Packaging (:mod:`numpy.distutils`) -********************************** - -.. module:: numpy.distutils - -.. warning:: - - ``numpy.distutils`` is deprecated, and will be removed for - Python >= 3.12. For more details, see :ref:`distutils-status-migration` - -.. warning:: - - Note that ``setuptools`` does major releases often and those may contain - changes that break :mod:`numpy.distutils`, which will *not* be updated anymore - for new ``setuptools`` versions. It is therefore recommended to set an - upper version bound in your build configuration for the last known version - of ``setuptools`` that works with your build. - -NumPy provides enhanced distutils functionality to make it easier to -build and install sub-packages, auto-generate code, and extension -modules that use Fortran-compiled libraries. A useful :class:`Configuration -` class is also provided in -:mod:`numpy.distutils.misc_util` that can make it easier to construct -keyword arguments to pass to the setup function (by passing the -dictionary obtained from the todict() method of the class). More -information is available in the :ref:`distutils-user-guide`. - -The choice and location of linked libraries such as BLAS and LAPACK as well as -include paths and other such build options can be specified in a ``site.cfg`` -file located in the NumPy root repository or a ``.numpy-site.cfg`` file in your -home directory. See the ``site.cfg.example`` example file included in the NumPy -repository or sdist for documentation. - -.. index:: - single: distutils - - -Modules in :mod:`numpy.distutils` -================================= -.. toctree:: - :maxdepth: 2 - - distutils/misc_util - - -.. currentmodule:: numpy.distutils - -.. autosummary:: - :toctree: generated/ - - ccompiler - ccompiler_opt - cpuinfo.cpu - core.Extension - exec_command - log.set_verbosity - system_info.get_info - system_info.get_standard_file - - -Configuration class -=================== - -.. currentmodule:: numpy.distutils.misc_util - -.. class:: Configuration(package_name=None, parent_name=None, top_path=None, package_path=None, **attrs) - - Construct a configuration instance for the given package name. If - *parent_name* is not None, then construct the package as a - sub-package of the *parent_name* package. If *top_path* and - *package_path* are None then they are assumed equal to - the path of the file this instance was created in. The setup.py - files in the numpy distribution are good examples of how to use - the :class:`Configuration` instance. - - .. automethod:: todict - - .. automethod:: get_distribution - - .. automethod:: get_subpackage - - .. automethod:: add_subpackage - - .. automethod:: add_data_files - - .. automethod:: add_data_dir - - .. automethod:: add_include_dirs - - .. automethod:: add_headers - - .. automethod:: add_extension - - .. automethod:: add_library - - .. automethod:: add_scripts - - .. automethod:: add_installed_library - - .. automethod:: add_npy_pkg_config - - .. automethod:: paths - - .. automethod:: get_config_cmd - - .. automethod:: get_build_temp_dir - - .. automethod:: have_f77c - - .. automethod:: have_f90c - - .. automethod:: get_version - - .. automethod:: make_svn_version_py - - .. automethod:: make_config_py - - .. automethod:: get_info - -Building installable C libraries -================================ - -Conventional C libraries (installed through `add_library`) are not installed, and -are just used during the build (they are statically linked). An installable C -library is a pure C library, which does not depend on the python C runtime, and -is installed such that it may be used by third-party packages. To build and -install the C library, you just use the method `add_installed_library` instead of -`add_library`, which takes the same arguments except for an additional -``install_dir`` argument:: - - .. hidden in a comment so as to be included in refguide but not rendered documentation - >>> import numpy.distutils.misc_util - >>> config = np.distutils.misc_util.Configuration(None, '', '.') - >>> with open('foo.c', 'w') as f: pass - - >>> config.add_installed_library('foo', sources=['foo.c'], install_dir='lib') - -npy-pkg-config files --------------------- - -To make the necessary build options available to third parties, you could use -the `npy-pkg-config` mechanism implemented in `numpy.distutils`. This mechanism is -based on a .ini file which contains all the options. A .ini file is very -similar to .pc files as used by the pkg-config unix utility:: - - [meta] - Name: foo - Version: 1.0 - Description: foo library - - [variables] - prefix = /home/user/local - libdir = ${prefix}/lib - includedir = ${prefix}/include - - [default] - cflags = -I${includedir} - libs = -L${libdir} -lfoo - -Generally, the file needs to be generated during the build, since it needs some -information known at build time only (e.g. prefix). This is mostly automatic if -one uses the `Configuration` method `add_npy_pkg_config`. Assuming we have a -template file foo.ini.in as follows:: - - [meta] - Name: foo - Version: @version@ - Description: foo library - - [variables] - prefix = @prefix@ - libdir = ${prefix}/lib - includedir = ${prefix}/include - - [default] - cflags = -I${includedir} - libs = -L${libdir} -lfoo - -and the following code in setup.py:: - - >>> config.add_installed_library('foo', sources=['foo.c'], install_dir='lib') - >>> subst = {'version': '1.0'} - >>> config.add_npy_pkg_config('foo.ini.in', 'lib', subst_dict=subst) - -This will install the file foo.ini into the directory package_dir/lib, and the -foo.ini file will be generated from foo.ini.in, where each ``@version@`` will be -replaced by ``subst_dict['version']``. The dictionary has an additional prefix -substitution rule automatically added, which contains the install prefix (since -this is not easy to get from setup.py). - -Reusing a C library from another package ----------------------------------------- - -Info are easily retrieved from the `get_info` function in -`numpy.distutils.misc_util`:: - - >>> info = np.distutils.misc_util.get_info('npymath') - >>> config.add_extension('foo', sources=['foo.c'], extra_info=info) - - - -An additional list of paths to look for .ini files can be given to `get_info`. - -Conversion of ``.src`` files -============================ - -NumPy distutils supports automatic conversion of source files named -.src. This facility can be used to maintain very similar -code blocks requiring only simple changes between blocks. During the -build phase of setup, if a template file named .src is -encountered, a new file named is constructed from the -template and placed in the build directory to be used instead. Two -forms of template conversion are supported. The first form occurs for -files named .ext.src where ext is a recognized Fortran -extension (f, f90, f95, f77, for, ftn, pyf). The second form is used -for all other cases. See :ref:`templating`. diff --git a/doc/source/reference/distutils/misc_util.rst b/doc/source/reference/distutils/misc_util.rst deleted file mode 100644 index bbb83a5ab061..000000000000 --- a/doc/source/reference/distutils/misc_util.rst +++ /dev/null @@ -1,7 +0,0 @@ -distutils.misc_util -=================== - -.. automodule:: numpy.distutils.misc_util - :members: - :undoc-members: - :exclude-members: Configuration diff --git a/doc/source/reference/distutils_guide.rst b/doc/source/reference/distutils_guide.rst deleted file mode 100644 index 0a815797ac30..000000000000 --- a/doc/source/reference/distutils_guide.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. _distutils-user-guide: - -``numpy.distutils`` user guide -============================== - -.. warning:: - - ``numpy.distutils`` is deprecated, and will be removed for - Python >= 3.12. For more details, see :ref:`distutils-status-migration` - - -.. include:: ../../DISTUTILS.rst - :start-line: 6 diff --git a/doc/source/reference/distutils_status_migration.rst b/doc/source/reference/distutils_status_migration.rst index 366b0e67f06a..e4ca4fedcf81 100644 --- a/doc/source/reference/distutils_status_migration.rst +++ b/doc/source/reference/distutils_status_migration.rst @@ -3,16 +3,7 @@ Status of ``numpy.distutils`` and migration advice ================================================== -`numpy.distutils` has been deprecated in NumPy ``1.23.0``. It will be removed -for Python 3.12; for Python <= 3.11 it will not be removed until 2 years after -the Python 3.12 release (Oct 2025). - - -.. warning:: - - ``numpy.distutils`` is only tested with ``setuptools < 60.0``, newer - versions may break. See :ref:`numpy-setuptools-interaction` for details. - +``numpy.distutils`` was removed in NumPy ``2.5.0``. Migration advice ---------------- @@ -27,7 +18,7 @@ using a well-designed, modern and reliable build system, we recommend: If you have modest needs (only simple Cython/C extensions; no need for Fortran, BLAS/LAPACK, nested ``setup.py`` files, or other features of -``numpy.distutils``) and have been happy with ``numpy.distutils`` so far, you +``numpy.distutils``) and have been happy with ``numpy.distutils``, you can also consider switching to ``setuptools``. Note that most functionality of ``numpy.distutils`` is unlikely to be ported to ``setuptools``. @@ -47,7 +38,7 @@ migrating. For more details about the SciPy migration, see: - `RFC: switch to Meson as a build system `__ - `Tracking issue for Meson support `__ -NumPy will migrate to Meson for the 1.26 release. +NumPy migrated to Meson for the 1.26 release. Moving to CMake / scikit-build @@ -73,15 +64,12 @@ present in ``setuptools``: - Support for a few other scientific libraries, like FFTW and UMFPACK - Better MinGW support - Per-compiler build flag customization (e.g. `-O3` and `SSE2` flags are default) -- a simple user build config system, see `site.cfg.example `__ +- a simple user build config system, see `site.cfg.example `__ - SIMD intrinsics support - Support for the NumPy-specific ``.src`` templating format for ``.c``/``.h`` files -The most widely used feature is nested ``setup.py`` files. This feature may -perhaps still be ported to ``setuptools`` in the future (it needs a volunteer -though, see `gh-18588 `__ for -status). Projects only using that feature could move to ``setuptools`` after -that is done. In case a project uses only a couple of ``setup.py`` files, it +The most widely used feature is nested ``setup.py`` files. In case a project +uses only a couple of ``setup.py`` files, it also could make sense to simply aggregate all the content of those files into a single ``setup.py`` file and then move to ``setuptools``. This involves dropping all ``Configuration`` instances, and using ``Extension`` instead. @@ -100,29 +88,6 @@ E.g.,:: For more details, see the `setuptools documentation `__ - -.. _numpy-setuptools-interaction: - -Interaction of ``numpy.distutils`` with ``setuptools`` ------------------------------------------------------- - -It is recommended to use ``setuptools < 60.0``. Newer versions may work, but -are not guaranteed to. The reason for this is that ``setuptools`` 60.0 enabled -a vendored copy of ``distutils``, including backwards incompatible changes that -affect some functionality in ``numpy.distutils``. - -If you are using only simple Cython or C extensions with minimal use of -``numpy.distutils`` functionality beyond nested ``setup.py`` files (its most -popular feature, see :class:`Configuration `), -then latest ``setuptools`` is likely to continue working. In case of problems, -you can also try ``SETUPTOOLS_USE_DISTUTILS=stdlib`` to avoid the backwards -incompatible changes in ``setuptools``. - -Whatever you do, it is recommended to put an upper bound on your ``setuptools`` -build requirement in ``pyproject.toml`` to avoid future breakage - see -:ref:`for-downstream-package-authors`. - - .. _CMake: https://cmake.org/ .. _Meson: https://mesonbuild.com/ .. _meson-python: https://meson-python.readthedocs.io diff --git a/doc/source/reference/figures/nep-0050-promotion-no-fonts.svg b/doc/source/reference/figures/nep-0050-promotion-no-fonts.svg new file mode 100644 index 000000000000..579480132b3d --- /dev/null +++ b/doc/source/reference/figures/nep-0050-promotion-no-fonts.svg @@ -0,0 +1,1471 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/source/reference/global_state.rst b/doc/source/reference/global_state.rst index e0ab1bb2a7ba..e66c86faf1b3 100644 --- a/doc/source/reference/global_state.rst +++ b/doc/source/reference/global_state.rst @@ -1,14 +1,13 @@ .. _global_state: -************ -Global state -************ - -NumPy has a few import-time, compile-time, or runtime options -which change the global behaviour. -Most of these are related to performance or for debugging -purposes and will not be interesting to the vast majority -of users. +**************************** +Global Configuration Options +**************************** + +NumPy has a few import-time, compile-time, or runtime configuration +options which change the global behaviour. Most of these are related to +performance or for debugging purposes and will not be interesting to the +vast majority of users. Performance-related options diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst index ed9641409014..2a7ac83a96ca 100644 --- a/doc/source/reference/index.rst +++ b/doc/source/reference/index.rst @@ -40,7 +40,6 @@ Python API :maxdepth: 1 typing - distutils C API ===== @@ -58,10 +57,11 @@ Other topics array_api simd/index + thread_safety global_state security + testing distutils_status_migration - distutils_guide swig diff --git a/doc/source/reference/maskedarray.baseclass.rst b/doc/source/reference/maskedarray.baseclass.rst index 7121914b93e2..398abd4eda63 100644 --- a/doc/source/reference/maskedarray.baseclass.rst +++ b/doc/source/reference/maskedarray.baseclass.rst @@ -1,8 +1,5 @@ .. currentmodule:: numpy.ma -.. for doctests - >>> from numpy import ma - .. _numpy.ma.constants: Constants of the :mod:`numpy.ma` module @@ -18,10 +15,14 @@ defines several constants. specific entry of a masked array is masked, or to mask one or several entries of a masked array:: - >>> x = ma.array([1, 2, 3], mask=[0, 1, 0]) - >>> x[1] is ma.masked + .. try_examples:: + + >>> import numpy as np + + >>> x = np.ma.array([1, 2, 3], mask=[0, 1, 0]) + >>> x[1] is np.ma.masked True - >>> x[-1] = ma.masked + >>> x[-1] = np.ma.masked >>> x masked_array(data=[1, --, --], mask=[False, True, True], @@ -136,7 +137,6 @@ Conversion MaskedArray.toflex MaskedArray.tolist MaskedArray.torecords - MaskedArray.tostring MaskedArray.tobytes @@ -264,7 +264,6 @@ Arithmetic: MaskedArray.__rsub__ MaskedArray.__mul__ MaskedArray.__rmul__ - MaskedArray.__div__ MaskedArray.__truediv__ MaskedArray.__rtruediv__ MaskedArray.__floordiv__ @@ -296,7 +295,6 @@ Arithmetic, in-place: MaskedArray.__iadd__ MaskedArray.__isub__ MaskedArray.__imul__ - MaskedArray.__idiv__ MaskedArray.__itruediv__ MaskedArray.__ifloordiv__ MaskedArray.__imod__ diff --git a/doc/source/reference/maskedarray.generic.rst b/doc/source/reference/maskedarray.generic.rst index 161ce14b76d2..4f53c6146b53 100644 --- a/doc/source/reference/maskedarray.generic.rst +++ b/doc/source/reference/maskedarray.generic.rst @@ -20,8 +20,8 @@ What is a masked array? ----------------------- In many circumstances, datasets can be incomplete or tainted by the presence -of invalid data. For example, a sensor may have failed to record a data, or -recorded an invalid value. The :mod:`numpy.ma` module provides a convenient +of invalid data. For example, a sensor may have failed to record a data point, +or recorded an invalid value. The :mod:`numpy.ma` module provides a convenient way to address this issue, by introducing masked arrays. A masked array is the combination of a standard :class:`numpy.ndarray` and a @@ -35,19 +35,21 @@ masked (invalid). The package ensures that masked entries are not used in computations. -As an illustration, let's consider the following dataset:: +.. try_examples:: + + As an illustration, let's consider the following dataset: >>> import numpy as np >>> import numpy.ma as ma >>> x = np.array([1, 2, 3, -1, 5]) -We wish to mark the fourth entry as invalid. The easiest is to create a masked -array:: + We wish to mark the fourth entry as invalid. The easiest is to create a masked + array:: >>> mx = ma.masked_array(x, mask=[0, 0, 0, 1, 0]) -We can now compute the mean of the dataset, without taking the invalid data -into account:: + We can now compute the mean of the dataset, without taking the invalid data + into account: >>> mx.mean() 2.75 @@ -62,17 +64,19 @@ class, which is a subclass of :class:`numpy.ndarray`. The class, its attributes and methods are described in more details in the :ref:`MaskedArray class ` section. -The :mod:`numpy.ma` module can be used as an addition to :mod:`numpy`: :: +.. try_examples:: + + The :mod:`numpy.ma` module can be used as an addition to :mod:`numpy`: >>> import numpy as np >>> import numpy.ma as ma -To create an array with the second element invalid, we would do:: + To create an array with the second element invalid, we would do:: >>> y = ma.array([1, 2, 3], mask = [0, 1, 0]) -To create a masked array where all values close to 1.e20 are invalid, we would -do:: + To create a masked array where all values close to 1.e20 are invalid, we would + do: >>> z = ma.masked_values([1.0, 1.e20, 3.0, 4.0], 1.e20) @@ -108,17 +112,20 @@ There are several ways to construct a masked array. mask of the view is set to :attr:`nomask` if the array has no named fields, or an array of boolean with the same structure as the array otherwise. - >>> x = np.array([1, 2, 3]) - >>> x.view(ma.MaskedArray) - masked_array(data=[1, 2, 3], - mask=False, - fill_value=999999) - >>> x = np.array([(1, 1.), (2, 2.)], dtype=[('a',int), ('b', float)]) - >>> x.view(ma.MaskedArray) - masked_array(data=[(1, 1.0), (2, 2.0)], - mask=[(False, False), (False, False)], - fill_value=(999999, 1e+20), - dtype=[('a', '>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> x.view(ma.MaskedArray) + masked_array(data=[1, 2, 3], + mask=False, + fill_value=999999) + >>> x = np.array([(1, 1.), (2, 2.)], dtype=[('a',int), ('b', float)]) + >>> x.view(ma.MaskedArray) + masked_array(data=[(1, 1.0), (2, 2.0)], + mask=[(False, False), (False, False)], + fill_value=(999999, 1e+20), + dtype=[('a', '>> import numpy as np >>> x = ma.array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]]) >>> x[~x.mask] masked_array(data=[1, 4], - mask=[False, False], - fill_value=999999) + mask=[False, False], + fill_value=999999) -Another way to retrieve the valid data is to use the :meth:`compressed` -method, which returns a one-dimensional :class:`~numpy.ndarray` (or one of its -subclasses, depending on the value of the :attr:`~MaskedArray.baseclass` -attribute):: + Another way to retrieve the valid data is to use the :meth:`compressed` + method, which returns a one-dimensional :class:`~numpy.ndarray` (or one of its + subclasses, depending on the value of the :attr:`~MaskedArray.baseclass` + attribute): >>> x.compressed() array([1, 4]) -Note that the output of :meth:`compressed` is always 1D. + Note that the output of :meth:`compressed` is always 1D. @@ -218,7 +228,9 @@ Masking an entry ~~~~~~~~~~~~~~~~ The recommended way to mark one or several specific entries of a masked array -as invalid is to assign the special value :attr:`masked` to them:: +as invalid is to assign the special value :attr:`masked` to them: + +.. try_examples:: >>> x = ma.array([1, 2, 3]) >>> x[0] = ma.masked @@ -257,8 +269,11 @@ but this usage is discouraged. All the entries of an array can be masked at once by assigning ``True`` to the -mask:: +mask: +.. try_examples:: + + >>> import numpy.ma as ma >>> x = ma.array([1, 2, 3], mask=[0, 0, 1]) >>> x.mask = True >>> x @@ -267,8 +282,8 @@ mask:: fill_value=999999, dtype=int64) -Finally, specific entries can be masked and/or unmasked by assigning to the -mask a sequence of booleans:: + Finally, specific entries can be masked and/or unmasked by assigning to the + mask a sequence of booleans: >>> x = ma.array([1, 2, 3]) >>> x.mask = [0, 1, 0] @@ -281,8 +296,11 @@ Unmasking an entry ~~~~~~~~~~~~~~~~~~ To unmask one or several specific entries, we can just assign one or several -new valid values to them:: +new valid values to them: + +.. try_examples:: + >>> import numpy.ma as ma >>> x = ma.array([1, 2, 3], mask=[0, 0, 1]) >>> x masked_array(data=[1, 2, --], @@ -300,37 +318,44 @@ new valid values to them:: attribute. This feature was introduced to prevent overwriting the mask. To force the unmasking of an entry where the array has a hard mask, the mask must first to be softened using the :meth:`soften_mask` method - before the allocation. It can be re-hardened with :meth:`harden_mask`:: - - >>> x = ma.array([1, 2, 3], mask=[0, 0, 1], hard_mask=True) - >>> x - masked_array(data=[1, 2, --], - mask=[False, False, True], - fill_value=999999) - >>> x[-1] = 5 - >>> x - masked_array(data=[1, 2, --], - mask=[False, False, True], - fill_value=999999) - >>> x.soften_mask() - masked_array(data=[1, 2, --], - mask=[False, False, True], - fill_value=999999) - >>> x[-1] = 5 - >>> x - masked_array(data=[1, 2, 5], - mask=[False, False, False], - fill_value=999999) - >>> x.harden_mask() - masked_array(data=[1, 2, 5], - mask=[False, False, False], - fill_value=999999) + before the allocation. It can be re-hardened with :meth:`harden_mask` as + follows: + +.. try_examples:: + + >>> import numpy.ma as ma + >>> x = ma.array([1, 2, 3], mask=[0, 0, 1], hard_mask=True) + >>> x + masked_array(data=[1, 2, --], + mask=[False, False, True], + fill_value=999999) + >>> x[-1] = 5 + >>> x + masked_array(data=[1, 2, --], + mask=[False, False, True], + fill_value=999999) + >>> x.soften_mask() + masked_array(data=[1, 2, --], + mask=[False, False, True], + fill_value=999999) + >>> x[-1] = 5 + >>> x + masked_array(data=[1, 2, 5], + mask=[False, False, False], + fill_value=999999) + >>> x.harden_mask() + masked_array(data=[1, 2, 5], + mask=[False, False, False], + fill_value=999999) To unmask all masked entries of a masked array (provided the mask isn't a hard mask), the simplest solution is to assign the constant :attr:`nomask` to the -mask:: +mask: + +.. try_examples:: + >>> import numpy.ma as ma >>> x = ma.array([1, 2, 3], mask=[0, 0, 1]) >>> x masked_array(data=[1, 2, --], @@ -352,8 +377,11 @@ its mechanisms for indexing and slicing. When accessing a single entry of a masked array with no named fields, the output is either a scalar (if the corresponding entry of the mask is ``False``) or the special value :attr:`masked` (if the corresponding entry of -the mask is ``True``):: +the mask is ``True``): + +.. try_examples:: + >>> import numpy.ma as ma >>> x = ma.array([1, 2, 3], mask=[0, 0, 1]) >>> x[0] 1 @@ -367,6 +395,9 @@ If the masked array has named fields, accessing a single entry returns a array with the same dtype as the initial array if at least one of the fields is masked. +.. try_examples:: + + >>> import numpy.ma as ma >>> y = ma.masked_array([(1,2), (3, 4)], ... mask=[(0, 0), (0, 1)], ... dtype=[('a', int), ('b', int)]) @@ -382,6 +413,9 @@ mask is either :attr:`nomask` (if there was no invalid entries in the original array) or a view of the corresponding slice of the original mask. The view is required to ensure propagation of any modification of the mask to the original. +.. try_examples:: + + >>> import numpy.ma as ma >>> x = ma.array([1, 2, 3, 4, 5], mask=[0, 1, 0, 0, 1]) >>> mx = x[:3] >>> mx @@ -398,6 +432,7 @@ required to ensure propagation of any modification of the mask to the original. >>> x.data array([ 1, -1, 3, 4, 5]) + Accessing a field of a masked array with structured datatype returns a :class:`MaskedArray`. @@ -417,8 +452,11 @@ meaning that the corresponding :attr:`~MaskedArray.data` entries The :mod:`numpy.ma` module comes with a specific implementation of most ufuncs. Unary and binary functions that have a validity domain (such as :func:`~numpy.log` or :func:`~numpy.divide`) return the :data:`masked` -constant whenever the input is masked or falls outside the validity domain:: +constant whenever the input is masked or falls outside the validity domain: + +.. try_examples:: + >>> import numpy.ma as ma >>> ma.log([-1, 0, 1, 2]) masked_array(data=[--, --, 0.0, 0.6931471805599453], mask=[ True, True, False, False], @@ -430,8 +468,11 @@ result of a binary ufunc is masked wherever any of the input is masked. If the ufunc also returns the optional context output (a 3-element tuple containing the name of the ufunc, its arguments and its domain), the context is processed and entries of the output masked array are masked wherever the corresponding -input fall outside the validity domain:: +input fall outside the validity domain: + +.. try_examples:: + >>> import numpy.ma as ma >>> x = ma.array([-1, 1, 0, 2, 3], mask=[0, 0, 0, 0, 1]) >>> np.log(x) masked_array(data=[--, 0.0, --, 0.6931471805599453, --], @@ -447,7 +488,9 @@ Data with a given value representing missing data Let's consider a list of elements, ``x``, where values of -9999. represent missing data. We wish to compute the average value of the data and the vector -of anomalies (deviations from the average):: +of anomalies (deviations from the average): + +.. try_examples:: >>> import numpy.ma as ma >>> x = [0.,1.,-9999.,3.,4.] @@ -466,6 +509,10 @@ Filling in the missing data Suppose now that we wish to print that same data, but with the missing values replaced by the average value. +.. try_examples:: + + >>> import numpy.ma as ma + >>> mx = ma.masked_values (x, -9999.) >>> print(mx.filled(mx.mean())) [0. 1. 2. 3. 4.] @@ -474,7 +521,9 @@ Numerical operations -------------------- Numerical operations can be easily performed without worrying about missing -values, dividing by zero, square roots of negative numbers, etc.:: +values, dividing by zero, square roots of negative numbers, etc.: + +.. try_examples:: >>> import numpy.ma as ma >>> x = ma.array([1., -1., 3., 4., 5., 6.], mask=[0,0,0,0,1,0]) @@ -492,8 +541,12 @@ Ignoring extreme values Let's consider an array ``d`` of floats between 0 and 1. We wish to compute the average of the values of ``d`` while ignoring any data outside -the range ``[0.2, 0.9]``:: +the range ``[0.2, 0.9]``: +.. try_examples:: + + >>> import numpy as np + >>> import numpy.ma as ma >>> d = np.linspace(0, 1, 20) >>> print(d.mean() - ma.masked_outside(d, 0.2, 0.9).mean()) -0.05263157894736836 diff --git a/doc/source/reference/module_structure.rst b/doc/source/reference/module_structure.rst index 01a5bcff7fbc..5c6d8139b055 100644 --- a/doc/source/reference/module_structure.rst +++ b/doc/source/reference/module_structure.rst @@ -5,7 +5,7 @@ NumPy's module structure ************************ NumPy has a large number of submodules. Most regular usage of NumPy requires -only the main namespace and a smaller set of submodules. The rest either either +only the main namespace and a smaller set of submodules. The rest either have special-purpose or niche namespaces. Main namespaces @@ -44,7 +44,6 @@ Prefer not to use these namespaces for new code. There are better alternatives and/or this code is deprecated or isn't reliable. - :ref:`numpy.char ` - legacy string functionality, only for fixed-width strings -- :ref:`numpy.distutils ` (deprecated) - build system support - :ref:`numpy.f2py ` - Fortran binding generation (usually used from the command line only) - :ref:`numpy.ma ` - masked arrays (not very reliable, needs an overhaul) - :ref:`numpy.matlib ` (pending deprecation) - functions supporting ``matrix`` instances @@ -70,7 +69,6 @@ and/or this code is deprecated or isn't reliable. numpy.rec numpy.version numpy.char - numpy.distutils numpy.f2py <../f2py/index> numpy.ma numpy.matlib diff --git a/doc/source/reference/random/bit_generators/index.rst b/doc/source/reference/random/bit_generators/index.rst index 00f9edb4af59..cb9650b07d85 100644 --- a/doc/source/reference/random/bit_generators/index.rst +++ b/doc/source/reference/random/bit_generators/index.rst @@ -91,7 +91,7 @@ user, which is up to you. # If the user did not provide a seed, it should return `None`. seed = get_user_seed() ss = SeedSequence(seed) - print('seed = {}'.format(ss.entropy)) + print(f'seed = {ss.entropy}') bg = PCG64(ss) .. end_block diff --git a/doc/source/reference/random/c-api.rst b/doc/source/reference/random/c-api.rst index 2819c769cb44..ba719b799866 100644 --- a/doc/source/reference/random/c-api.rst +++ b/doc/source/reference/random/c-api.rst @@ -3,8 +3,6 @@ C API for random .. currentmodule:: numpy.random -.. versionadded:: 1.19.0 - Access to various distributions below is available via Cython or C-wrapper libraries like CFFI. All the functions accept a :c:type:`bitgen_t` as their first argument. To access these from Cython or C, you must link with the diff --git a/doc/source/reference/random/extending.rst b/doc/source/reference/random/extending.rst index 9c7dc86b2825..20c8375d72d6 100644 --- a/doc/source/reference/random/extending.rst +++ b/doc/source/reference/random/extending.rst @@ -11,10 +11,13 @@ small set of required functions. Numba ----- -Numba can be used with either CTypes or CFFI. The current iteration of the +Numba can be used with either +`CTypes `_ +or `CFFI `_. +The current iteration of the `BitGenerator`\ s all export a small set of functions through both interfaces. -This example shows how numba can be used to produce gaussian samples using +This example shows how Numba can be used to produce Gaussian samples using a pure Python implementation which is then compiled. The random numbers are provided by ``ctypes.next_double``. @@ -76,7 +79,7 @@ directly from the ``_generator`` shared object, using the `BitGenerator.cffi` in .. literalinclude:: ../../../../numpy/random/_examples/cffi/extending.py :language: python - :start-after: dlopen + :start-at: dlopen New BitGenerators diff --git a/doc/source/reference/random/generator.rst b/doc/source/reference/random/generator.rst index eaa29feae57e..953cf9b3845e 100644 --- a/doc/source/reference/random/generator.rst +++ b/doc/source/reference/random/generator.rst @@ -72,6 +72,9 @@ By default, `Generator.permuted` returns a copy. To operate in-place with `Generator.permuted`, pass the same array as the first argument *and* as the value of the ``out`` parameter. For example, +.. try_examples:: + + >>> import numpy as np >>> rng = np.random.default_rng() >>> x = np.arange(0, 15).reshape(3, 5) >>> x #doctest: +SKIP @@ -84,12 +87,12 @@ the value of the ``out`` parameter. For example, [ 6, 7, 8, 9, 5], [10, 14, 11, 13, 12]]) -Note that when ``out`` is given, the return value is ``out``: + Note that when ``out`` is given, the return value is ``out``: >>> y is x True -.. _generator-handling-axis-parameter: +.. _generator-handling-axis-parameter: Handling the ``axis`` parameter ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -100,6 +103,9 @@ which dimension of the input array to use as the sequence. In the case of a two-dimensional array, ``axis=0`` will, in effect, rearrange the rows of the array, and ``axis=1`` will rearrange the columns. For example +.. try_examples:: + + >>> import numpy as np >>> rng = np.random.default_rng() >>> x = np.arange(0, 15).reshape(3, 5) >>> x @@ -119,6 +125,10 @@ how `numpy.sort` treats it. Each slice along the given axis is shuffled independently of the others. Compare the following example of the use of `Generator.permuted` to the above example of `Generator.permutation`: +.. try_examples:: + + >>> import numpy as np + >>> rng = np.random.default_rng() >>> rng.permuted(x, axis=1) #doctest: +SKIP array([[ 1, 0, 2, 4, 3], # random [ 5, 7, 6, 9, 8], @@ -132,8 +142,10 @@ Shuffling non-NumPy sequences ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ `Generator.shuffle` works on non-NumPy sequences. That is, if it is given a sequence that is not a NumPy array, it shuffles that sequence in-place. -For example, +.. try_examples:: + + >>> import numpy as np >>> rng = np.random.default_rng() >>> a = ['A', 'B', 'C', 'D', 'E'] >>> rng.shuffle(a) # shuffle the list in-place diff --git a/doc/source/reference/random/index.rst b/doc/source/reference/random/index.rst index a2f508c58bbf..6da0a8c4e0a0 100644 --- a/doc/source/reference/random/index.rst +++ b/doc/source/reference/random/index.rst @@ -4,8 +4,8 @@ .. currentmodule:: numpy.random -Random sampling (:mod:`numpy.random`) -===================================== +Random sampling +=============== .. _random-quick-start: @@ -18,18 +18,24 @@ probability distributions. In general, users will create a `Generator` instance with `default_rng` and call the various methods on it to obtain samples from different distributions. -:: +.. try_examples:: >>> import numpy as np >>> rng = np.random.default_rng() - # Generate one random float uniformly distributed over the range [0, 1) + + Generate one random float uniformly distributed over the range :math:`[0, 1)`: + >>> rng.random() #doctest: +SKIP 0.06369197489564249 # may vary - # Generate an array of 10 numbers according to a unit Gaussian distribution. + + Generate an array of 10 numbers according to a unit Gaussian distribution: + >>> rng.standard_normal(10) #doctest: +SKIP array([-0.31018314, -1.8922078 , -0.3628523 , -0.63526532, 0.43181166, # may vary 0.51640373, 1.25693945, 0.07779185, 0.84090247, -2.13406828]) - # Generate an array of 5 integers uniformly over the range [0, 10). + + Generate an array of 5 integers uniformly over the range :math:`[0, 10)`: + >>> rng.integers(low=0, high=10, size=5) #doctest: +SKIP array([8, 7, 6, 2, 0]) # may vary @@ -40,14 +46,15 @@ generate different numbers each time. The pseudo-random sequences will be independent for all practical purposes, at least those purposes for which our pseudo-randomness was good for in the first place. -:: +.. try_examples:: - >>> rng1 = np.random.default_rng() - >>> rng1.random() #doctest: +SKIP - 0.6596288841243357 # may vary - >>> rng2 = np.random.default_rng() - >>> rng2.random() #doctest: +SKIP - 0.11885628817151628 # may vary + >>> import numpy as np + >>> rng1 = np.random.default_rng() + >>> rng1.random() #doctest: +SKIP + 0.6596288841243357 # may vary + >>> rng2 = np.random.default_rng() + >>> rng2.random() #doctest: +SKIP + 0.11885628817151628 # may vary .. warning:: @@ -66,18 +73,19 @@ intentionally *trying* to reproduce their result. A convenient way to get such a seed number is to use :py:func:`secrets.randbits` to get an arbitrary 128-bit integer. -:: - >>> import secrets - >>> import numpy as np - >>> secrets.randbits(128) #doctest: +SKIP - 122807528840384100672342137672332424406 # may vary - >>> rng1 = np.random.default_rng(122807528840384100672342137672332424406) - >>> rng1.random() - 0.5363922081269535 - >>> rng2 = np.random.default_rng(122807528840384100672342137672332424406) - >>> rng2.random() - 0.5363922081269535 +.. try_examples:: + + >>> import numpy as np + >>> import secrets + >>> secrets.randbits(128) #doctest: +SKIP + 122807528840384100672342137672332424406 # may vary + >>> rng1 = np.random.default_rng(122807528840384100672342137672332424406) + >>> rng1.random() + 0.5363922081269535 + >>> rng2 = np.random.default_rng(122807528840384100672342137672332424406) + >>> rng2.random() + 0.5363922081269535 See the documentation on `default_rng` and `SeedSequence` for more advanced options for controlling the seed in specialized scenarios. @@ -160,9 +168,9 @@ Features Parallel Applications Multithreaded Generation - new-or-different + New or Different Comparing Performance - c-api + C API Examples of using Numba, Cython, CFFI Original Source of the Generator and BitGenerators diff --git a/doc/source/reference/random/multithreading.rst b/doc/source/reference/random/multithreading.rst index 99b7ec781b55..28e045f10dc0 100644 --- a/doc/source/reference/random/multithreading.rst +++ b/doc/source/reference/random/multithreading.rst @@ -9,8 +9,11 @@ well-behaved (writable and aligned). Under normal circumstances, arrays created using the common constructors such as :meth:`numpy.empty` will satisfy these requirements. -This example makes use of Python 3 :mod:`concurrent.futures` to fill an array -using multiple threads. Threads are long-lived so that repeated calls do not +.. seealso:: + :ref:`thread_safety` for general information about thread safety in NumPy. + +This example makes use of :mod:`concurrent.futures` to fill an array using +multiple threads. Threads are long-lived so that repeated calls do not require any additional overheads from thread creation. The random numbers generated are reproducible in the sense that the same diff --git a/doc/source/reference/random/performance.py b/doc/source/reference/random/performance.py index 794142836652..87c07c3262a6 100644 --- a/doc/source/reference/random/performance.py +++ b/doc/source/reference/random/performance.py @@ -3,7 +3,7 @@ import pandas as pd import numpy as np -from numpy.random import MT19937, PCG64, PCG64DXSM, Philox, SFC64 +from numpy.random import MT19937, PCG64, PCG64DXSM, SFC64, Philox PRNGS = [MT19937, PCG64, PCG64DXSM, Philox, SFC64] @@ -59,11 +59,11 @@ table = table.T table = table.reindex(columns) table = table.T -table = table.reindex([k for k in funcs], axis=0) +table = table.reindex(list(funcs), axis=0) print(table.to_csv(float_format='%0.1f')) -rel = table.loc[:, ['RandomState']].values @ np.ones( +rel = table.loc[:, ['RandomState']].to_numpy() @ np.ones( (1, table.shape[1])) / table rel.pop('RandomState') rel = rel.T @@ -74,13 +74,11 @@ print(rel.to_csv(float_format='%0d')) # Cross-platform table -rows = ['32-bit Unsigned Ints','64-bit Unsigned Ints','Uniforms','Normals','Exponentials'] +rows = ['32-bit Unsigned Ints', '64-bit Unsigned Ints', 'Uniforms', + 'Normals', 'Exponentials'] xplat = rel.reindex(rows, axis=0) -xplat = 100 * (xplat / xplat.MT19937.values[:,None]) +xplat = 100 * (xplat / xplat.MT19937.to_numpy()[:, None]) overall = np.exp(np.log(xplat).mean(0)) xplat = xplat.T.copy() -xplat['Overall']=overall +xplat['Overall'] = overall print(xplat.T.round(1)) - - - diff --git a/doc/source/reference/routines.array-manipulation.rst b/doc/source/reference/routines.array-manipulation.rst index be2b1120e080..5a2b30b8b0d9 100644 --- a/doc/source/reference/routines.array-manipulation.rst +++ b/doc/source/reference/routines.array-manipulation.rst @@ -18,7 +18,6 @@ Changing array shape .. autosummary:: :toctree: generated/ - reshape ravel ndarray.flat @@ -88,6 +87,7 @@ Splitting arrays dsplit hsplit vsplit + unstack Tiling arrays ============= @@ -118,6 +118,5 @@ Rearranging elements flip fliplr flipud - reshape roll rot90 diff --git a/doc/source/reference/routines.bitwise.rst b/doc/source/reference/routines.bitwise.rst index abf91bd269bf..f6c15dd60f34 100644 --- a/doc/source/reference/routines.bitwise.rst +++ b/doc/source/reference/routines.bitwise.rst @@ -17,6 +17,7 @@ Elementwise bit operations bitwise_left_shift right_shift bitwise_right_shift + bitwise_count Bit packing ----------- diff --git a/doc/source/reference/routines.char.rst b/doc/source/reference/routines.char.rst index b62294b9a191..7dfb2e6a18e3 100644 --- a/doc/source/reference/routines.char.rst +++ b/doc/source/reference/routines.char.rst @@ -9,17 +9,19 @@ Legacy fixed-width string functionality .. legacy:: - The string operations in this module, as well as the `numpy.char.chararray` - class, are planned to be deprecated in the future. Use `numpy.strings` - instead. + The string operations in this module are planned to be deprecated in the future, and + the `numpy.char.chararray` class is deprecated in NumPy 2.5. Use `numpy.strings` instead. The `numpy.char` module provides a set of vectorized string operations for arrays of type `numpy.str_` or `numpy.bytes_`. For example - >>> np.char.capitalize(["python", "numpy"]) - array(['Python', 'Numpy'], dtype='>> np.char.add(["num", "doc"], ["py", "umentation"]) - array(['numpy', 'documentation'], dtype='>> import numpy as np + >>> np.char.capitalize(["python", "numpy"]) + array(['Python', 'Numpy'], dtype='>> np.char.add(["num", "doc"], ["py", "umentation"]) + array(['numpy', 'documentation'], dtype='>> import numpy as np + >>> import math + >>> np.emath.log(-math.exp(1)) == (1+1j*math.pi) + True + +Similarly, `sqrt`, other base logarithms, `power` and trig functions +are correctly handled. See their respective docstrings for specific examples. + +Functions +--------- + +.. autosummary:: + :toctree: generated/ + + arccos + arcsin + arctanh + log + log2 + logn + log10 + power + sqrt diff --git a/doc/source/reference/routines.err.rst b/doc/source/reference/routines.err.rst index 5272073a3b00..f46634793fa3 100644 --- a/doc/source/reference/routines.err.rst +++ b/doc/source/reference/routines.err.rst @@ -1,8 +1,80 @@ +.. _fp_error_handling: + Floating point error handling ============================= .. currentmodule:: numpy +Error handling settings are stored in :py:mod:`python:contextvars` +allowing different threads or async tasks to have independent configurations. +For more information, see :ref:`thread_safety`. + +.. _misc-error-handling: + +How numpy handles numerical exceptions +-------------------------------------- + +The default is to ``'warn'`` for ``invalid``, ``divide``, and ``overflow`` +and ``'ignore'`` for ``underflow``. But this can be changed, and it can be +set individually for different kinds of exceptions. The different behaviors +are: + +- ``'ignore'`` : Take no action when the exception occurs. +- ``'warn'`` : Print a :py:exc:`RuntimeWarning` (via the Python :py:mod:`warnings` module). +- ``'raise'`` : Raise a :py:exc:`FloatingPointError`. +- ``'call'`` : Call a specified function. +- ``'print'`` : Print a warning directly to ``stdout``. +- ``'log'`` : Record error in a Log object. + +These behaviors can be set for all kinds of errors or specific ones: + +- ``all`` : apply to all numeric exceptions +- ``invalid`` : when NaNs are generated +- ``divide`` : divide by zero (for integers as well!) +- ``overflow`` : floating point overflows +- ``underflow`` : floating point underflows + +Note that integer divide-by-zero is handled by the same machinery. + +The error handling mode can be configured :func:`numpy.errstate` +context manager. + +Examples +-------- + +:: + + >>> with np.errstate(all='warn'): + ... np.zeros(5, dtype=np.float32) / 0.0 + :2: RuntimeWarning: invalid value encountered in divide + array([nan, nan, nan, nan, nan], dtype=float32) + +:: + + >>> with np.errstate(under='ignore'): + ... np.array([1.e-100])**10 + array([0.]) + +:: + + >>> with np.errstate(invalid='raise'): + ... np.sqrt(np.array([-1.])) + ... + Traceback (most recent call last): + File "", line 2, in + np.sqrt(np.array([-1.])) + ~~~~~~~^^^^^^^^^^^^^^^^^ + FloatingPointError: invalid value encountered in sqrt + +:: + + >>> def errorhandler(errstr, errflag): + ... print("saw stupid error!") + >>> with np.errstate(call=errorhandler, all='call'): + ... np.zeros(5, dtype=np.int32) / 0 + saw stupid error! + array([nan, nan, nan, nan, nan]) + Setting and getting error handling ---------------------------------- diff --git a/doc/source/reference/routines.io.rst b/doc/source/reference/routines.io.rst index 2b8dd98f36a4..ccd4467af545 100644 --- a/doc/source/reference/routines.io.rst +++ b/doc/source/reference/routines.io.rst @@ -59,8 +59,15 @@ Memory mapping files memmap lib.format.open_memmap +.. _text_formatting_options: + Text formatting options ----------------------- + +Text formatting settings are maintained in a :py:mod:`context variable `, +allowing different threads or async tasks to have independent configurations. +For more information, see :ref:`thread_safety`. + .. autosummary:: :toctree: generated/ diff --git a/doc/source/reference/routines.linalg.rst b/doc/source/reference/routines.linalg.rst index ae9eb629d919..3920e28f9994 100644 --- a/doc/source/reference/routines.linalg.rst +++ b/doc/source/reference/routines.linalg.rst @@ -2,8 +2,8 @@ .. module:: numpy.linalg -Linear algebra (:mod:`numpy.linalg`) -==================================== +Linear algebra +============== The NumPy linear algebra functions rely on BLAS and LAPACK to provide efficient low level implementations of standard linear algebra algorithms. Those @@ -60,8 +60,11 @@ Matrix and vector products linalg.vecdot inner outer + linalg.outer matmul linalg.matmul (Array API compatible location) + matvec + vecmat tensordot linalg.tensordot (Array API compatible location) einsum @@ -69,6 +72,7 @@ Matrix and vector products linalg.matrix_power kron linalg.cross + Decompositions -------------- @@ -76,7 +80,6 @@ Decompositions :toctree: generated/ linalg.cholesky - linalg.outer linalg.qr linalg.svd linalg.svdvals @@ -139,8 +142,6 @@ Exceptions Linear algebra on several matrices at once ------------------------------------------ -.. versionadded:: 1.8.0 - Several of the linear algebra routines listed above are able to compute results for several matrices at once, if they are stacked into the same array. diff --git a/doc/source/reference/routines.ma.rst b/doc/source/reference/routines.ma.rst index 2b1b5dac1710..c29ccc4a5f24 100644 --- a/doc/source/reference/routines.ma.rst +++ b/doc/source/reference/routines.ma.rst @@ -247,8 +247,8 @@ Conversion operations ma.masked_where -> to a ndarray -~~~~~~~~~~~~~~ +> to an ndarray +~~~~~~~~~~~~~~~ .. autosummary:: :toctree: generated/ diff --git a/doc/source/reference/routines.math.rst b/doc/source/reference/routines.math.rst index 2c77b2cc1488..f08304e74f51 100644 --- a/doc/source/reference/routines.math.rst +++ b/doc/source/reference/routines.math.rst @@ -63,6 +63,8 @@ Sums, products, differences sum nanprod nansum + cumulative_sum + cumulative_prod cumprod cumsum nancumprod @@ -71,6 +73,7 @@ Sums, products, differences ediff1d gradient cross + trapezoid Exponents and logarithms ------------------------ @@ -190,4 +193,3 @@ Miscellaneous interp - bitwise_count diff --git a/doc/source/reference/routines.polynomials.chebyshev.rst b/doc/source/reference/routines.polynomials.chebyshev.rst index 087b7beb9f06..3256bd52b9cd 100644 --- a/doc/source/reference/routines.polynomials.chebyshev.rst +++ b/doc/source/reference/routines.polynomials.chebyshev.rst @@ -1,5 +1,3 @@ -.. versionadded:: 1.4.0 - .. automodule:: numpy.polynomial.chebyshev :no-members: :no-inherited-members: diff --git a/doc/source/reference/routines.polynomials.hermite.rst b/doc/source/reference/routines.polynomials.hermite.rst index c881d9aaf1ea..30c81fb04628 100644 --- a/doc/source/reference/routines.polynomials.hermite.rst +++ b/doc/source/reference/routines.polynomials.hermite.rst @@ -1,5 +1,3 @@ -.. versionadded:: 1.6.0 - .. automodule:: numpy.polynomial.hermite :no-members: :no-inherited-members: diff --git a/doc/source/reference/routines.polynomials.hermite_e.rst b/doc/source/reference/routines.polynomials.hermite_e.rst index bfcb900c8782..edfbee25ffc4 100644 --- a/doc/source/reference/routines.polynomials.hermite_e.rst +++ b/doc/source/reference/routines.polynomials.hermite_e.rst @@ -1,5 +1,3 @@ -.. versionadded:: 1.6.0 - .. automodule:: numpy.polynomial.hermite_e :no-members: :no-inherited-members: diff --git a/doc/source/reference/routines.polynomials.laguerre.rst b/doc/source/reference/routines.polynomials.laguerre.rst index 68c44630077c..35cd84ff9b0b 100644 --- a/doc/source/reference/routines.polynomials.laguerre.rst +++ b/doc/source/reference/routines.polynomials.laguerre.rst @@ -1,5 +1,3 @@ -.. versionadded:: 1.6.0 - .. automodule:: numpy.polynomial.laguerre :no-members: :no-inherited-members: diff --git a/doc/source/reference/routines.polynomials.legendre.rst b/doc/source/reference/routines.polynomials.legendre.rst index e10065b4d5fe..0bf91647ab4e 100644 --- a/doc/source/reference/routines.polynomials.legendre.rst +++ b/doc/source/reference/routines.polynomials.legendre.rst @@ -1,5 +1,3 @@ -.. versionadded:: 1.6.0 - .. automodule:: numpy.polynomial.legendre :no-members: :no-inherited-members: diff --git a/doc/source/reference/routines.polynomials.polynomial.rst b/doc/source/reference/routines.polynomials.polynomial.rst index 71000a60db2c..5784b80a2787 100644 --- a/doc/source/reference/routines.polynomials.polynomial.rst +++ b/doc/source/reference/routines.polynomials.polynomial.rst @@ -1,5 +1,3 @@ -.. versionadded:: 1.4.0 - .. automodule:: numpy.polynomial.polynomial :no-members: :no-inherited-members: diff --git a/doc/source/reference/routines.polynomials.rst b/doc/source/reference/routines.polynomials.rst index 8610cb01e7e9..00b4460eae21 100644 --- a/doc/source/reference/routines.polynomials.rst +++ b/doc/source/reference/routines.polynomials.rst @@ -47,23 +47,28 @@ The `~numpy.polynomial.polynomial.Polynomial` class is imported for brevity:: from numpy.polynomial import Polynomial -+------------------------+------------------------------+---------------------------------------+ -| **How to...** | Legacy (`numpy.poly1d`) | `numpy.polynomial` | -+------------------------+------------------------------+---------------------------------------+ -| Create a | ``p = np.poly1d([1, 2, 3])`` | ``p = Polynomial([3, 2, 1])`` | -| polynomial object | | | -| from coefficients [1]_ | | | -+------------------------+------------------------------+---------------------------------------+ -| Create a polynomial | ``r = np.poly([-1, 1])`` | ``p = Polynomial.fromroots([-1, 1])`` | -| object from roots | ``p = np.poly1d(r)`` | | -+------------------------+------------------------------+---------------------------------------+ -| Fit a polynomial of | | | -| degree ``deg`` to data | ``np.polyfit(x, y, deg)`` | ``Polynomial.fit(x, y, deg)`` | -+------------------------+------------------------------+---------------------------------------+ - ++------------------------+----------------------------------------+---------------------------------------+ +| **How to...** | Legacy (`numpy.poly1d`) | `numpy.polynomial` | ++------------------------+----------------------------------------+---------------------------------------+ +| Create a | ``p = np.poly1d([1, 2, 3])`` | ``p = Polynomial([3, 2, 1])`` | +| polynomial object | | | +| from coefficients [1]_ | | | ++------------------------+----------------------------------------+---------------------------------------+ +| Create a polynomial | ``r = np.poly([-1, 1])`` | ``p = Polynomial.fromroots([-1, 1])`` | +| object from roots | ``p = np.poly1d(r)`` | | ++------------------------+----------------------------------------+---------------------------------------+ +| Fit a polynomial of | | | +| degree ``deg`` to data | ``np.polyfit(x, y, deg)`` | ``Polynomial.fit(x, y, deg)`` | ++------------------------+----------------------------------------+---------------------------------------+ +| Evaluate a polynomial | ``p(2.0)`` or | ``p(2.0)`` or ``polyval(2.0, p.coef)``| +| at a point [2]_ | ``np.polyval([1, 2, 3], 2.0)`` | (use ``p.convert().coef`` after fit) | ++------------------------+----------------------------------------+---------------------------------------+ .. [1] Note the reversed ordering of the coefficients +.. [2] When evaluating polynomials created with ``fit()``, use ``p(x)`` or + ``polyval(x, p.convert().coef)`` to handle domain/window scaling correctly. + Transition Guide ~~~~~~~~~~~~~~~~ @@ -83,17 +88,21 @@ convert from the legacy polynomial API to the new. For example, the following demonstrates how you would convert a `numpy.poly1d` instance representing the expression :math:`x^{2} + 2x + 3` to a `~numpy.polynomial.polynomial.Polynomial` instance representing the same -expression:: +expression: + +.. try_examples:: + + >>> import numpy as np >>> p1d = np.poly1d([1, 2, 3]) >>> p = np.polynomial.Polynomial(p1d.coef[::-1]) -In addition to the ``coef`` attribute, polynomials from the polynomial -package also have ``domain`` and ``window`` attributes. -These attributes are most relevant when fitting -polynomials to data, though it should be noted that polynomials with -different ``domain`` and ``window`` attributes are not considered equal, and -can't be mixed in arithmetic:: + In addition to the ``coef`` attribute, polynomials from the polynomial + package also have ``domain`` and ``window`` attributes. + These attributes are most relevant when fitting + polynomials to data, though it should be noted that polynomials with + different ``domain`` and ``window`` attributes are not considered equal, and + can't be mixed in arithmetic: >>> p1 = np.polynomial.Polynomial([1, 2, 3]) >>> p1 @@ -184,3 +193,4 @@ Documentation for legacy polynomials :maxdepth: 2 routines.polynomials.poly1d + \ No newline at end of file diff --git a/doc/source/reference/routines.rec.rst b/doc/source/reference/routines.rec.rst index 21700332418b..c8c12cc31cef 100644 --- a/doc/source/reference/routines.rec.rst +++ b/doc/source/reference/routines.rec.rst @@ -11,17 +11,20 @@ Record arrays expose the fields of structured arrays as properties. Most commonly, ndarrays contain elements of a single type, e.g. floats, integers, bools etc. However, it is possible for elements to be combinations -of these using structured types, such as:: +of these using structured types, such as: - >>> a = np.array([(1, 2.0), (1, 2.0)], +.. try_examples:: + + >>> import numpy as np + >>> a = np.array([(1, 2.0), (1, 2.0)], ... dtype=[('x', np.int64), ('y', np.float64)]) >>> a array([(1, 2.), (1, 2.)], dtype=[('x', '>> a['x'] array([1, 1]) @@ -29,13 +32,11 @@ one would a dictionary:: >>> a['y'] array([2., 2.]) -Record arrays allow us to access fields as properties:: + Record arrays allow us to access fields as properties: >>> ar = np.rec.array(a) - >>> ar.x array([1, 1]) - >>> ar.y array([2., 2.]) @@ -55,4 +56,3 @@ Functions Also, the `numpy.recarray` class and the `numpy.record` scalar dtype are present in this namespace. - diff --git a/doc/source/reference/routines.rst b/doc/source/reference/routines.rst index e4dabd0e60a0..df60405f8030 100644 --- a/doc/source/reference/routines.rst +++ b/doc/source/reference/routines.rst @@ -4,11 +4,9 @@ Routines and objects by topic ***************************** -In this chapter routine docstrings are presented, grouped by functionality. +In this chapter, routine docstrings are presented, grouped by functionality. Many docstrings contain example code, which demonstrates basic usage -of the routine. The examples assume that NumPy is imported with:: - - >>> import numpy as np +of the routine. A convenient way to execute examples is the ``%doctest_mode`` mode of IPython, which allows for pasting of multi-line examples and preserves diff --git a/doc/source/reference/routines.set.rst b/doc/source/reference/routines.set.rst index fbb5afdc1b75..47080f96fff8 100644 --- a/doc/source/reference/routines.set.rst +++ b/doc/source/reference/routines.set.rst @@ -19,7 +19,6 @@ Boolean operations .. autosummary:: :toctree: generated/ - in1d intersect1d isin setdiff1d diff --git a/doc/source/reference/routines.strings.rst b/doc/source/reference/routines.strings.rst index f0af9475d10f..68387aee22ff 100644 --- a/doc/source/reference/routines.strings.rst +++ b/doc/source/reference/routines.strings.rst @@ -9,10 +9,12 @@ String functionality The `numpy.strings` module provides a set of universal functions operating on arrays of type `numpy.str_` or `numpy.bytes_`. -For example +For example, - >>> np.strings.add(["num", "doc"], ["py", "umentation"]) - array(['numpy', 'documentation'], dtype='>> np.strings.add(["num", "doc"], ["py", "umentation"]) + array(['numpy', 'documentation'], dtype='`_. -The `Python security reporting guidelines `_ -are a good resource and its notes apply also to NumPy. NumPy's maintainers are not security experts. However, we are conscientious about security and experts of both the NumPy codebase and how it's used. @@ -14,17 +12,31 @@ A security advisory we are not aware of beforehand can lead to a lot of work for all involved parties. -Advice for using NumPy on untrusted data ----------------------------------------- +Important +--------- +NumPy is not designed to be exposed directly to untrusted users. A user who can freely execute NumPy (or Python) functions must be considered -to have the same privilege as the process/Python interpreter. +to have the same privileges as the process/Python interpreter. + +If one can already execute Python code, there are far worse things one can do +than use all available CPU cycles, or provoke a symptom of a bug in Code like +use-after-free or a segfault. Therefore, while such issues may be bugs, they +are not security issues. + +Before reporting a security issue, please consider and describe the attack +vector in detail - and in particular whether that attack vector assumes being +able to freely execute NumPy functions. + + +Advice for using NumPy on untrusted data +---------------------------------------- -That said, NumPy should be generally safe to use on *data* provided by +NumPy should be generally safe to use on *data* provided by unprivileged users and read through safe API functions (e.g. loaded from a text file or ``.npy`` file without pickle support). Malicious *values* or *data sizes* should never lead to privilege escalation. -Note that the above refers to array data. We do not currently consider for +Note that the above refers to *array data*. We do not currently consider for example ``f2py`` to be safe: it is typically used to compile a program that is then run. Any ``f2py`` invocation must thus use the same privilege as the later execution. diff --git a/doc/source/reference/simd/build-options.rst b/doc/source/reference/simd/build-options.rst index b4daf09a5b42..8532ee307fa1 100644 --- a/doc/source/reference/simd/build-options.rst +++ b/doc/source/reference/simd/build-options.rst @@ -1,222 +1,396 @@ +.. _cpu-build-options: + ***************** -CPU build options +CPU Build Options ***************** -Description ------------ - -The following options are mainly used to change the default behavior of optimizations -that target certain CPU features: +Overview +-------- -- ``cpu-baseline``: minimal set of required CPU features. - Default value is ``min`` which provides the minimum CPU features that can - safely run on a wide range of platforms within the processor family. +NumPy provides configuration options to optimize performance based on CPU capabilities. +These options allow you to specify which CPU features to support, balancing performance, compatibility, and binary size. +This document explains how to use these options effectively across various CPU architectures. - .. note:: +Key Configuration Options +------------------------- - During the runtime, NumPy modules will fail to load if any of specified features - are not supported by the target CPU (raises Python runtime error). +NumPy uses several build options to control CPU optimizations: -- ``cpu-dispatch``: dispatched set of additional CPU features. - Default value is ``max -xop -fma4`` which enables all CPU - features, except for AMD legacy features (in case of X86). +- ``cpu-baseline``: The minimum set of CPU features required to run the compiled NumPy. + + * Default: ``min`` (provides compatibility across a wide range of platforms) + * If your target CPU doesn't support all specified baseline features, NumPy will fail to load with a Python runtime error - .. note:: +- ``cpu-baseline-detect``: controls detection of CPU baseline based on compiler + flags. Default value is ``auto`` that enables detection if ``-march=`` + or a similar compiler flag is used. The other possible values are ``enabled`` + and ``disabled`` to respective enable or disable it unconditionally. - During the runtime, NumPy modules will skip any specified features - that are not available in the target CPU. +- ``cpu-dispatch``: Additional CPU features for which optimized code paths will be generated. + + * Default: ``max`` (enables all available optimizations) + * At runtime, NumPy will automatically select the fastest available code path based on your CPU's capabilities -These options are accessible at build time by passing setup arguments to meson-python -via the build frontend (e.g., ``pip`` or ``build``). -They accept a set of :ref:`CPU features ` -or groups of features that gather several features or -:ref:`special options ` that -perform a series of procedures. +- ``disable-optimization``: Completely disables all CPU optimizations. + + * Default: ``false`` (optimizations are enabled) + * When set to ``true``, disables all CPU optimized code including dispatch, SIMD, and loop unrolling + * Useful for debugging, testing, or in environments where optimization causes issues -To customize CPU/build options:: +These options are specified at build time via meson-python arguments:: - pip install . -Csetup-args=-Dcpu-baseline="avx2 fma3" -Csetup-args=-Dcpu-dispatch="max" + pip install . -Csetup-args=-Dcpu-baseline="min" -Csetup-args=-Dcpu-dispatch="max" + # or through spin + spin build -- -Dcpu-baseline="min" -Dcpu-dispatch="max" -Quick start ------------ +``cpu-baseline`` and ``cpu-dispatch`` can be set to specific :ref:`CPU groups, features`, or :ref:`special options ` +that perform specific actions. The following sections describe these options in detail. -In general, the default settings tend to not impose certain CPU features that -may not be available on some older processors. Raising the ceiling of the -baseline features will often improve performance and may also reduce -binary size. +Common Usage Scenarios +---------------------- +Building for Local Use Only +~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The following are the most common scenarios that may require changing -the default settings: +When building for your machine only and not planning to distribute:: + python -m build --wheel -Csetup-args=-Dcpu-baseline="native" -Csetup-args=-Dcpu-dispatch="none" -I am building NumPy for my local use -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This automatically detects and uses all CPU features available on your machine. -And I do not intend to export the build to other users or target a -different CPU than what the host has. +.. note:: + A fatal error will be raised if ``native`` isn't supported by the host platform. -Set ``native`` for baseline, or manually specify the CPU features in case of option -``native`` isn't supported by your platform:: +Excluding Specific Features +~~~~~~~~~~~~~~~~~~~~~~~~~~~ - python -m build --wheel -Csetup-args=-Dcpu-baseline="native" +You may want to exclude certain CPU features from the dispatched features:: -Building NumPy with extra CPU features isn't necessary for this case, -since all supported features are already defined within the baseline features:: + # For x86-64: exclude all AVX-512 features + python -m build --wheel -Csetup-args=-Dcpu-dispatch="max -X86_V4" - python -m build --wheel -Csetup-args=-Dcpu-baseline="native" \ - -Csetup-args=-Dcpu-dispatch="none" + # For ARM64: exclude SVE + python -m build --wheel -Csetup-args=-Dcpu-dispatch="max -SVE" .. note:: + Excluding a feature will also exclude any successor features that are + implied by the excluded feature. For example, excluding ``X86_V4`` will + exclude ``AVX512_ICL`` and ``AVX512_SPR`` as well. + +Targeting Older CPUs +~~~~~~~~~~~~~~~~~~~~ + +On ``x86-64``, by default the baseline is set to ``min`` which maps to ``X86_V2``. +This unsuitable for older CPUs (before 2009) or old virtual machines. +To address this, set the baseline to ``none``:: + + python -m build --wheel -Csetup-args=-Dcpu-baseline="none" - A fatal error will be raised if ``native`` isn't supported by the host platform. +This will create a build that is compatible with all x86 CPUs, but +without any manual optimizations or SIMD code paths for the baseline. +The build will rely only on dispatched code paths for optimization. -I do not want to support the old processors of the x86 architecture -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Targeting Newer CPUs +~~~~~~~~~~~~~~~~~~~~ -Since most of the CPUs nowadays support at least ``AVX``, ``F16C`` features, you can use:: +Raising the baseline improves performance for two main reasons: - python -m build --wheel -Csetup-args=-Dcpu-baseline="avx f16c" +1. Dispatched kernels don't cover all code paths +2. A higher baseline leads to smaller binary size as the compiler won't generate code paths for excluded dispatched features + +For CPUs from 2015 and newer, setting the baseline to ``X86_V3`` may be suitable:: + + python -m build --wheel -Csetup-args=-Dcpu-baseline="min+X86_V3" + +.. _opt-supported-features: + +Supported CPU Features By Architecture +-------------------------------------- + +NumPy supports optimized code paths for multiple CPU architectures. Below are the supported feature groups for each architecture. +The name of the feature group can be used in the build options ``cpu-baseline`` and ``cpu-dispatch``. + +X86 +~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + - Includes + * - ``X86_V2`` + - + - ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE4_1`` ``SSE4_2`` ``POPCNT`` ``CX16`` ``LAHF`` + * - ``X86_V3`` + - ``X86_V2`` + - ``AVX`` ``AVX2`` ``FMA3`` ``BMI`` ``BMI2`` ``LZCNT`` ``F16C`` ``MOVBE`` + * - ``X86_V4`` + - ``X86_V3`` + - ``AVX512F`` ``AVX512CD`` ``AVX512VL`` ``AVX512BW`` ``AVX512DQ`` + * - ``AVX512_ICL`` + - ``X86_V4`` + - ``AVX512VBMI`` ``AVX512VBMI2`` ``AVX512VNNI`` ``AVX512BITALG`` ``AVX512VPOPCNTDQ`` ``AVX512IFMA`` ``VAES`` ``GFNI`` ``VPCLMULQDQ`` + * - ``AVX512_SPR`` + - ``AVX512_ICL`` + - ``AVX512FP16`` + +These groups correspond to CPU generations: + +- ``X86_V2``: x86-64-v2 microarchitectures (CPUs since 2009) +- ``X86_V3``: x86-64-v3 microarchitectures (CPUs since 2015) +- ``X86_V4``: x86-64-v4 microarchitectures (AVX-512 capable CPUs) +- ``AVX512_ICL``: Intel Ice Lake and similar CPUs +- ``AVX512_SPR``: Intel Sapphire Rapids and newer CPUs .. note:: + On 32-bit x86, ``cx16`` is excluded from ``X86_V2``. + +On IBM/POWER big-endian +~~~~~~~~~~~~~~~~~~~~~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + * - ``VSX`` + - + * - ``VSX2`` + - ``VSX`` + * - ``VSX3`` + - ``VSX`` ``VSX2`` + * - ``VSX4`` + - ``VSX`` ``VSX2`` ``VSX3`` + +On IBM/POWER little-endian +~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + * - ``VSX`` + - ``VSX2`` + * - ``VSX2`` + - ``VSX`` + * - ``VSX3`` + - ``VSX`` ``VSX2`` + * - ``VSX4`` + - ``VSX`` ``VSX2`` ``VSX3`` + +On ARMv7/A32 +~~~~~~~~~~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + * - ``NEON`` + - + * - ``NEON_FP16`` + - ``NEON`` + * - ``NEON_VFPV4`` + - ``NEON`` ``NEON_FP16`` + * - ``ASIMD`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` + * - ``ASIMDHP`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` + * - ``ASIMDDP`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` + * - ``ASIMDFHM`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` ``ASIMDHP`` + +On ARMv8/A64 +~~~~~~~~~~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + * - ``NEON`` + - ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` + * - ``NEON_FP16`` + - ``NEON`` ``NEON_VFPV4`` ``ASIMD`` + * - ``NEON_VFPV4`` + - ``NEON`` ``NEON_FP16`` ``ASIMD`` + * - ``ASIMD`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` + * - ``ASIMDHP`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` + * - ``ASIMDDP`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` + * - ``ASIMDFHM`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` ``ASIMDHP`` + +On IBM/ZSYSTEM(S390X) +~~~~~~~~~~~~~~~~~~~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + * - ``VX`` + - + * - ``VXE`` + - ``VX`` + * - ``VXE2`` + - ``VX`` ``VXE`` + +On RISCV64 +~~~~~~~~~~~~~~~~~~~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + * - ``RVV`` + - - ``cpu-baseline`` force combine all implied features, so there's no need - to add SSE features. +.. _opt-special-options: +Special Options +--------------- -I'm facing the same case above but with ppc64 architecture -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Beyond specific feature names, you can use these special values: -Then raise the ceiling of the baseline features to Power8:: +``NONE`` +~~~~~~~~ - python -m build --wheel -Csetup-args=-Dcpu-baseline="vsx2" +Enables no features (equivalent to an empty string). -Having issues with AVX512 features? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +``NATIVE`` +~~~~~~~~~~ -You may have some reservations about including of ``AVX512`` or -any other CPU feature and you want to exclude from the dispatched features:: +Enables all features supported by the host CPU. - python -m build --wheel -Csetup-args=-Dcpu-dispatch="max -avx512f -avx512cd \ - -avx512_knl -avx512_knm -avx512_skx -avx512_clx -avx512_cnl -avx512_icl" +``DETECT`` +~~~~~~~~~~ -.. _opt-supported-features: +Detects the features enabled by the compiler. This option is appended by default +to ``cpu-baseline`` if ``-march``, ``-mcpu``, ``-xhost``, or ``/QxHost`` is set in +the environment variable ``CFLAGS`` unless ``cpu-baseline-detect`` is ``disabled``. -Supported features ------------------- +``MIN`` +~~~~~~~ -The names of the features can express one feature or a group of features, -as shown in the following tables supported depend on the lowest interest: +Enables the minimum CPU features for each architecture: -.. note:: +.. list-table:: + :header-rows: 1 + :align: left - The following features may not be supported by all compilers, - also some compilers may produce different set of implied features - when it comes to features like ``AVX512``, ``AVX2``, and ``FMA3``. - See :ref:`opt-platform-differences` for more details. + * - For Arch + - Implies + * - x86 (32-bit) + - ``X86_V2`` + * - x86-64 + - ``X86_V2`` + * - IBM/POWER (big-endian) + - ``NONE`` + * - IBM/POWER (little-endian) + - ``VSX`` ``VSX2`` + * - ARMv7/ARMHF + - ``NONE`` + * - ARMv8/AArch64 + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` + * - IBM/ZSYSTEM(S390X) + - ``NONE`` + * - riscv64 + - ``NONE`` -.. include:: generated_tables/cpu_features.inc -.. _opt-special-options: +``MAX`` +~~~~~~~ -Special options ---------------- +Enables all features supported by the compiler and platform. -- ``NONE``: enable no features. +Operator Operators (``-``/``+``) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- ``NATIVE``: Enables all CPU features that supported by the host CPU, - this operation is based on the compiler flags (``-march=native``, ``-xHost``, ``/QxHost``) +Remove or add specific features, useful with ``MAX``, ``MIN``, and ``NATIVE``: -- ``MIN``: Enables the minimum CPU features that can safely run on a wide range of platforms: +- Adding a feature (``+``) includes all implied features +- Removing a feature (``-``) excludes all successor features that imply the removed feature - .. table:: - :align: left +Examples:: - ====================================== ======================================= - For Arch Implies - ====================================== ======================================= - x86 (32-bit mode) ``SSE`` ``SSE2`` - x86_64 ``SSE`` ``SSE2`` ``SSE3`` - IBM/POWER (big-endian mode) ``NONE`` - IBM/POWER (little-endian mode) ``VSX`` ``VSX2`` - ARMHF ``NONE`` - ARM64 A.K. AARCH64 ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` - ``ASIMD`` - IBM/ZSYSTEM(S390X) ``NONE`` - ====================================== ======================================= + python -m build --wheel -Csetup-args=-Dcpu-dispatch="max-X86_V4" + python -m build --wheel -Csetup-args=-Dcpu-baseline="min+X86_V4" -- ``MAX``: Enables all supported CPU features by the compiler and platform. +Usage And Behaviors +------------------- -- ``Operators-/+``: remove or add features, useful with options ``MAX``, ``MIN`` and ``NATIVE``. +Case Insensitivity +~~~~~~~~~~~~~~~~~~ -Behaviors ---------- +CPU features and options are case-insensitive:: + + python -m build --wheel -Csetup-args=-Dcpu-dispatch="X86_v4" + +Mixing Features across Architectures +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- CPU features and other options are case-insensitive, for example:: +You can mix features from different architectures:: - python -m build --wheel -Csetup-args=-Dcpu-dispatch="SSE41 avx2 FMA3" + python -m build --wheel -Csetup-args=-Dcpu-baseline="X86_V4 VSX4 SVE" -- The order of the requested optimizations doesn't matter:: +Order Independence +~~~~~~~~~~~~~~~~~~ - python -m build --wheel -Csetup-args=-Dcpu-dispatch="SSE41 AVX2 FMA3" - # equivalent to - python -m build --wheel -Csetup-args=-Dcpu-dispatch="FMA3 AVX2 SSE41" +The order of specified features doesn't matter:: -- Either commas or spaces or '+' can be used as a separator, - for example:: + python -m build --wheel -Csetup-args=-Dcpu-dispatch="SVE X86_V4 x86_v3" - python -m build --wheel -Csetup-args=-Dcpu-dispatch="avx2 avx512f" - # or - python -m build --wheel -Csetup-args=-Dcpu-dispatch=avx2,avx512f - # or - python -m build --wheel -Csetup-args=-Dcpu-dispatch="avx2+avx512f" +Separators +~~~~~~~~~~ - all works but arguments should be enclosed in quotes or escaped - by backslash if any spaces are used. +You can use spaces or commas as separators:: -- ``cpu-baseline`` combines all implied CPU features, for example:: + # All of these are equivalent + python -m build --wheel -Csetup-args=-Dcpu-dispatch="X86_V2 X86_V4" + python -m build --wheel -Csetup-args=-Dcpu-dispatch=X86_V2,X86_V4 - python -m build --wheel -Csetup-args=-Dcpu-baseline=sse42 - # equivalent to - python -m build --wheel -Csetup-args=-Dcpu-baseline="sse sse2 sse3 ssse3 sse41 popcnt sse42" +Feature Combination +~~~~~~~~~~~~~~~~~~~ -- ``cpu-baseline`` will be treated as "native" if compiler native flag - ``-march=native`` or ``-xHost`` or ``/QxHost`` is enabled through environment variable - ``CFLAGS``:: +Features specified in options are automatically combined with all implied features:: - export CFLAGS="-march=native" - pip install . - # is equivalent to - pip install . -Csetup-args=-Dcpu-baseline=native + python -m build --wheel -Csetup-args=-Dcpu-baseline=X86_V4 -- ``cpu-baseline`` escapes any specified features that aren't supported - by the target platform or compiler rather than raising fatal errors. +Equivalent to:: - .. note:: + python -m build --wheel -Csetup-args=-Dcpu-baseline="X86_V2 X86_V3 X86_V4" - Since ``cpu-baseline`` combines all implied features, the maximum - supported of implied features will be enabled rather than escape all of them. - For example:: +Baseline Overlapping +~~~~~~~~~~~~~~~~~~~~ - # Requesting `AVX2,FMA3` but the compiler only support **SSE** features - python -m build --wheel -Csetup-args=-Dcpu-baseline="avx2 fma3" - # is equivalent to - python -m build --wheel -Csetup-args=-Dcpu-baseline="sse sse2 sse3 ssse3 sse41 popcnt sse42" +Features specified in ``cpu-baseline`` will be excluded from the ``cpu-dispatch`` features, +along with their implied features, but without excluding successor features that imply them. -- ``cpu-dispatch`` does not combain any of implied CPU features, - so you must add them unless you want to disable one or all of them:: +For instance, if you specify ``cpu-baseline="X86_V4"``, it will exclude ``X86_V4`` and its +implied features ``X86_V2`` and ``X86_V3`` from the ``cpu-dispatch`` features. - # Only dispatches AVX2 and FMA3 - python -m build --wheel -Csetup-args=-Dcpu-dispatch=avx2,fma3 - # Dispatches AVX and SSE features - python -m build --wheel -Csetup-args=-Dcpu-dispatch=ssse3,sse41,sse42,avx,avx2,fma3 +Compile-time Detection +~~~~~~~~~~~~~~~~~~~~~~ -- ``cpu-dispatch`` escapes any specified baseline features and also escapes - any features not supported by the target platform or compiler without raising - fatal errors. +Specifying features to ``cpu-dispatch`` or ``cpu-baseline`` doesn't explicitly enable them. +Features are detected at compile time, and the maximum available features based on your +specified options will be enabled according to toolchain and platform support. + +This detection occurs by testing feature availability in the compiler through compile-time +source files containing common intrinsics for the specified features. If both the compiler +and assembler support the feature, it will be enabled. + +For example, if you specify ``cpu-dispatch="AVX512_ICL"`` but your compiler doesn't support it, +the feature will be excluded from the build. However, any implied features will still be +enabled if they're supported. -Eventually, you should always check the final report through the build log -to verify the enabled features. See :ref:`opt-build-report` for more details. .. _opt-platform-differences: @@ -234,7 +408,7 @@ The need to align certain CPU features that are assured to be supported by successive generations of the same architecture, some cases: - On ppc64le ``VSX(ISA 2.06)`` and ``VSX2(ISA 2.07)`` both imply one another since the - first generation that supports little-endian mode is Power-8`(ISA 2.07)` + first generation that supports little-endian mode is ``Power-8(ISA 2.07)`` - On AArch64 ``NEON NEON_FP16 NEON_VFPV4 ASIMD`` implies each other since they are part of the hardware baseline. @@ -251,43 +425,6 @@ For example:: Please take a deep look at :ref:`opt-supported-features`, in order to determine the features that imply one another. -**Compilation compatibility** - -Some compilers don't provide independent support for all CPU features. For instance -**Intel**'s compiler doesn't provide separated flags for ``AVX2`` and ``FMA3``, -it makes sense since all Intel CPUs that comes with ``AVX2`` also support ``FMA3``, -but this approach is incompatible with other **x86** CPUs from **AMD** or **VIA**. - -For example:: - - # Specify AVX2 will force enables FMA3 on Intel compilers - python -m build --wheel -Csetup-args=-Dcpu-baseline=avx2 - # which is equivalent to - python -m build --wheel -Csetup-args=-Dcpu-baseline="avx2 fma3" - - -The following tables only show the differences imposed by some compilers from the -general context that been shown in the :ref:`opt-supported-features` tables: - -.. note:: - - Features names with strikeout represent the unsupported CPU features. - -.. raw:: html - - - -.. role:: enabled - :class: enabled-feature - -.. role:: disabled - :class: disabled-feature - -.. include:: generated_tables/compilers-diff.inc - .. _opt-build-report: Build report @@ -300,7 +437,7 @@ expected CPU features by the compiler. So we strongly recommend checking the final report log, to be aware of what kind of CPU features are enabled and what are not. -You can find the final report of CPU optimizations at the end of the build log, +You can find the final report of CPU optimizations by tracing meson build log, and here is how it looks on x86_64/gcc: .. raw:: html @@ -310,94 +447,63 @@ and here is how it looks on x86_64/gcc: .. literalinclude:: log_example.txt :language: bash -There is a separate report for each of ``build_ext`` and ``build_clib`` -that includes several sections, and each section has several values, representing the following: - -**Platform**: - -- :enabled:`Architecture`: The architecture name of target CPU. It should be one of - ``x86``, ``x64``, ``ppc64``, ``ppc64le``, ``armhf``, ``aarch64``, ``s390x`` or ``unknown``. - -- :enabled:`Compiler`: The compiler name. It should be one of - gcc, clang, msvc, icc, iccw or unix-like. - -**CPU baseline**: - -- :enabled:`Requested`: The specific features and options to ``cpu-baseline`` as-is. -- :enabled:`Enabled`: The final set of enabled CPU features. -- :enabled:`Flags`: The compiler flags that were used to all NumPy C/C++ sources - during the compilation except for temporary sources that have been used for generating - the binary objects of dispatched features. -- :enabled:`Extra checks`: list of internal checks that activate certain functionality - or intrinsics related to the enabled features, useful for debugging when it comes - to developing SIMD kernels. - -**CPU dispatch**: - -- :enabled:`Requested`: The specific features and options to ``cpu-dispatch`` as-is. -- :enabled:`Enabled`: The final set of enabled CPU features. -- :enabled:`Generated`: At the beginning of the next row of this property, - the features for which optimizations have been generated are shown in the - form of several sections with similar properties explained as follows: - - - :enabled:`One or multiple dispatched feature`: The implied CPU features. - - :enabled:`Flags`: The compiler flags that been used for these features. - - :enabled:`Extra checks`: Similar to the baseline but for these dispatched features. - - :enabled:`Detect`: Set of CPU features that need be detected in runtime in order to - execute the generated optimizations. - - The lines that come after the above property and end with a ':' on a separate line, - represent the paths of c/c++ sources that define the generated optimizations. .. _runtime-simd-dispatch: -Runtime dispatch +Runtime Dispatch ---------------- + Importing NumPy triggers a scan of the available CPU features from the set -of dispatchable features. This can be further restricted by setting the +of dispatchable features. You can restrict this scan by setting the environment variable ``NPY_DISABLE_CPU_FEATURES`` to a comma-, tab-, or -space-separated list of features to disable. This will raise an error if -parsing fails or if the feature was not enabled. For instance, on ``x86_64`` -this will disable ``AVX2`` and ``FMA3``:: +space-separated list of features to disable. + +For instance, on ``x86_64`` this will disable ``X86_V4``:: - NPY_DISABLE_CPU_FEATURES="AVX2,FMA3" + NPY_DISABLE_CPU_FEATURES="X86_V4" -If the feature is not available, a warning will be emitted. +This will raise an error if parsing fails or if the feature was not enabled through the ``cpu-dispatch`` build option. +If the feature is supported by the build but not available on the current CPU, a warning will be emitted instead. -Tracking dispatched functions +Tracking Dispatched Functions ----------------------------- -Discovering which CPU targets are enabled for different optimized functions is achievable -through the Python function ``numpy.lib.introspect.opt_func_info``. -This function offers the flexibility of applying filters using two optional arguments: -one for refining function names and the other for specifying data types in the signatures. + +You can discover which CPU targets are enabled for different optimized functions using +the Python function ``numpy.lib.introspect.opt_func_info``. + +This function offers two optional arguments for filtering results: + +1. ``func_name`` - For refining function names +2. ``signature`` - For specifying data types in the signatures For example:: >> func_info = numpy.lib.introspect.opt_func_info(func_name='add|abs', signature='float64|complex64') >> print(json.dumps(func_info, indent=2)) { - "absolute": { - "dd": { - "current": "SSE41", - "available": "SSE41 baseline(SSE SSE2 SSE3)" - }, - "Ff": { - "current": "FMA3__AVX2", - "available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)" - }, - "Dd": { - "current": "FMA3__AVX2", - "available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)" - } - }, - "add": { - "ddd": { - "current": "FMA3__AVX2", - "available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)" - }, - "FFF": { - "current": "FMA3__AVX2", - "available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)" - } + "absolute": { + "dd": { + "current": "baseline(X86_V2)", + "available": "baseline(X86_V2)" + }, + "Ff": { + "current": "X86_V3", + "available": "X86_V3 baseline(X86_V2)" + }, + "Dd": { + "current": "X86_V3", + "available": "X86_V3 baseline(X86_V2)" + } + }, + "add": { + "ddd": { + "current": "X86_V3", + "available": "X86_V3 baseline(X86_V2)" + }, + "FFF": { + "current": "X86_V3", + "available": "X86_V3 baseline(X86_V2)" + } } } diff --git a/doc/source/reference/simd/gen_features.py b/doc/source/reference/simd/gen_features.py deleted file mode 100644 index b141e23d0dd7..000000000000 --- a/doc/source/reference/simd/gen_features.py +++ /dev/null @@ -1,196 +0,0 @@ -""" -Generate CPU features tables from CCompilerOpt -""" -from os import sys, path -from numpy.distutils.ccompiler_opt import CCompilerOpt - -class FakeCCompilerOpt(CCompilerOpt): - # disable caching no need for it - conf_nocache = True - - def __init__(self, arch, cc, *args, **kwargs): - self.fake_info = (arch, cc, '') - CCompilerOpt.__init__(self, None, **kwargs) - - def dist_compile(self, sources, flags, **kwargs): - return sources - - def dist_info(self): - return self.fake_info - - @staticmethod - def dist_log(*args, stderr=False): - # avoid printing - pass - - def feature_test(self, name, force_flags=None, macros=[]): - # To speed up - return True - -class Features: - def __init__(self, arch, cc): - self.copt = FakeCCompilerOpt(arch, cc, cpu_baseline="max") - - def names(self): - return self.copt.cpu_baseline_names() - - def serialize(self, features_names): - result = [] - for f in self.copt.feature_sorted(features_names): - gather = self.copt.feature_supported.get(f, {}).get("group", []) - implies = self.copt.feature_sorted(self.copt.feature_implies(f)) - result.append((f, implies, gather)) - return result - - def table(self, **kwargs): - return self.gen_table(self.serialize(self.names()), **kwargs) - - def table_diff(self, vs, **kwargs): - fnames = set(self.names()) - fnames_vs = set(vs.names()) - common = fnames.intersection(fnames_vs) - extra = fnames.difference(fnames_vs) - notavl = fnames_vs.difference(fnames) - iextra = {} - inotavl = {} - idiff = set() - for f in common: - implies = self.copt.feature_implies(f) - implies_vs = vs.copt.feature_implies(f) - e = implies.difference(implies_vs) - i = implies_vs.difference(implies) - if not i and not e: - continue - if e: - iextra[f] = e - if i: - inotavl[f] = e - idiff.add(f) - - def fbold(f): - if f in extra: - return f':enabled:`{f}`' - if f in notavl: - return f':disabled:`{f}`' - return f - - def fbold_implies(f, i): - if i in iextra.get(f, {}): - return f':enabled:`{i}`' - if f in notavl or i in inotavl.get(f, {}): - return f':disabled:`{i}`' - return i - - diff_all = self.serialize(idiff.union(extra)) - diff_all += vs.serialize(notavl) - content = self.gen_table( - diff_all, fstyle=fbold, fstyle_implies=fbold_implies, **kwargs - ) - return content - - def gen_table(self, serialized_features, fstyle=None, fstyle_implies=None, - **kwargs): - - if fstyle is None: - fstyle = lambda ft: f'``{ft}``' - if fstyle_implies is None: - fstyle_implies = lambda origin, ft: fstyle(ft) - - rows = [] - have_gather = False - for f, implies, gather in serialized_features: - if gather: - have_gather = True - name = fstyle(f) - implies = ' '.join([fstyle_implies(f, i) for i in implies]) - gather = ' '.join([fstyle_implies(f, i) for i in gather]) - rows.append((name, implies, gather)) - if not rows: - return '' - fields = ["Name", "Implies", "Gathers"] - if not have_gather: - del fields[2] - rows = [(name, implies) for name, implies, _ in rows] - return self.gen_rst_table(fields, rows, **kwargs) - - def gen_rst_table(self, field_names, rows, tab_size=4): - assert(not rows or len(field_names) == len(rows[0])) - rows.append(field_names) - fld_len = len(field_names) - cls_len = [max(len(c[i]) for c in rows) for i in range(fld_len)] - del rows[-1] - cformat = ' '.join('{:<%d}' % i for i in cls_len) - border = cformat.format(*['='*i for i in cls_len]) - - rows = [cformat.format(*row) for row in rows] - # header - rows = [border, cformat.format(*field_names), border] + rows - # footer - rows += [border] - # add left margin - rows = [(' ' * tab_size) + r for r in rows] - return '\n'.join(rows) - -def wrapper_section(title, content, tab_size=4): - tab = ' '*tab_size - if content: - return ( - f"{title}\n{'~'*len(title)}" - f"\n.. table::\n{tab}:align: left\n\n" - f"{content}\n\n" - ) - return '' - -def wrapper_tab(title, table, tab_size=4): - tab = ' '*tab_size - if table: - ('\n' + tab).join(( - '.. tab:: ' + title, - tab + '.. table::', - tab + 'align: left', - table + '\n\n' - )) - return '' - - -if __name__ == '__main__': - - pretty_names = { - "PPC64": "IBM/POWER big-endian", - "PPC64LE": "IBM/POWER little-endian", - "S390X": "IBM/ZSYSTEM(S390X)", - "ARMHF": "ARMv7/A32", - "AARCH64": "ARMv8/A64", - "ICC": "Intel Compiler", - # "ICCW": "Intel Compiler msvc-like", - "MSVC": "Microsoft Visual C/C++" - } - gen_path = path.join( - path.dirname(path.realpath(__file__)), "generated_tables" - ) - with open(path.join(gen_path, 'cpu_features.inc'), 'w') as fd: - fd.write(f'.. generated via {__file__}\n\n') - for arch in ( - ("x86", "PPC64", "PPC64LE", "ARMHF", "AARCH64", "S390X") - ): - title = "On " + pretty_names.get(arch, arch) - table = Features(arch, 'gcc').table() - fd.write(wrapper_section(title, table)) - - with open(path.join(gen_path, 'compilers-diff.inc'), 'w') as fd: - fd.write(f'.. generated via {__file__}\n\n') - for arch, cc_names in ( - ("x86", ("clang", "ICC", "MSVC")), - ("PPC64", ("clang",)), - ("PPC64LE", ("clang",)), - ("ARMHF", ("clang",)), - ("AARCH64", ("clang",)), - ("S390X", ("clang",)) - ): - arch_pname = pretty_names.get(arch, arch) - for cc in cc_names: - title = f"On {arch_pname}::{pretty_names.get(cc, cc)}" - table = Features(arch, cc).table_diff(Features(arch, "gcc")) - fd.write(wrapper_section(title, table)) - - diff --git a/doc/source/reference/simd/generated_tables/compilers-diff.inc b/doc/source/reference/simd/generated_tables/compilers-diff.inc deleted file mode 100644 index d5a87da3c617..000000000000 --- a/doc/source/reference/simd/generated_tables/compilers-diff.inc +++ /dev/null @@ -1,35 +0,0 @@ -.. generated via /numpy/numpy/./doc/source/reference/simd/gen_features.py - -On x86::Intel Compiler -~~~~~~~~~~~~~~~~~~~~~~ -.. table:: - :align: left - - ====================== ================================================================================================================================================================================================================================================================================================================================== ====================== - Name Implies Gathers - ====================== ================================================================================================================================================================================================================================================================================================================================== ====================== - FMA3 SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C :enabled:`AVX2` - AVX2 SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C :enabled:`FMA3` - AVX512F SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 :enabled:`AVX512CD` - :disabled:`XOP` :disabled:`SSE` :disabled:`SSE2` :disabled:`SSE3` :disabled:`SSSE3` :disabled:`SSE41` :disabled:`POPCNT` :disabled:`SSE42` :disabled:`AVX` - :disabled:`FMA4` :disabled:`SSE` :disabled:`SSE2` :disabled:`SSE3` :disabled:`SSSE3` :disabled:`SSE41` :disabled:`POPCNT` :disabled:`SSE42` :disabled:`AVX` - :disabled:`AVX512_SPR` :disabled:`SSE` :disabled:`SSE2` :disabled:`SSE3` :disabled:`SSSE3` :disabled:`SSE41` :disabled:`POPCNT` :disabled:`SSE42` :disabled:`AVX` :disabled:`F16C` :disabled:`FMA3` :disabled:`AVX2` :disabled:`AVX512F` :disabled:`AVX512CD` :disabled:`AVX512_SKX` :disabled:`AVX512_CLX` :disabled:`AVX512_CNL` :disabled:`AVX512_ICL` :disabled:`AVX512FP16` - ====================== ================================================================================================================================================================================================================================================================================================================================== ====================== - -On x86::Microsoft Visual C/C++ -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. table:: - :align: left - - ====================== ================================================================================================================================================================================================================================================================================================================================== ============================================================================= - Name Implies Gathers - ====================== ================================================================================================================================================================================================================================================================================================================================== ============================================================================= - FMA3 SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C :enabled:`AVX2` - AVX2 SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C :enabled:`FMA3` - AVX512F SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 :enabled:`AVX512CD` :enabled:`AVX512_SKX` - AVX512CD SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 AVX512F :enabled:`AVX512_SKX` - :disabled:`AVX512_KNL` :disabled:`SSE` :disabled:`SSE2` :disabled:`SSE3` :disabled:`SSSE3` :disabled:`SSE41` :disabled:`POPCNT` :disabled:`SSE42` :disabled:`AVX` :disabled:`F16C` :disabled:`FMA3` :disabled:`AVX2` :disabled:`AVX512F` :disabled:`AVX512CD` :disabled:`AVX512ER` :disabled:`AVX512PF` - :disabled:`AVX512_KNM` :disabled:`SSE` :disabled:`SSE2` :disabled:`SSE3` :disabled:`SSSE3` :disabled:`SSE41` :disabled:`POPCNT` :disabled:`SSE42` :disabled:`AVX` :disabled:`F16C` :disabled:`FMA3` :disabled:`AVX2` :disabled:`AVX512F` :disabled:`AVX512CD` :disabled:`AVX512_KNL` :disabled:`AVX5124FMAPS` :disabled:`AVX5124VNNIW` :disabled:`AVX512VPOPCNTDQ` - :disabled:`AVX512_SPR` :disabled:`SSE` :disabled:`SSE2` :disabled:`SSE3` :disabled:`SSSE3` :disabled:`SSE41` :disabled:`POPCNT` :disabled:`SSE42` :disabled:`AVX` :disabled:`F16C` :disabled:`FMA3` :disabled:`AVX2` :disabled:`AVX512F` :disabled:`AVX512CD` :disabled:`AVX512_SKX` :disabled:`AVX512_CLX` :disabled:`AVX512_CNL` :disabled:`AVX512_ICL` :disabled:`AVX512FP16` - ====================== ================================================================================================================================================================================================================================================================================================================================== ============================================================================= - diff --git a/doc/source/reference/simd/generated_tables/cpu_features.inc b/doc/source/reference/simd/generated_tables/cpu_features.inc deleted file mode 100644 index 603370e21545..000000000000 --- a/doc/source/reference/simd/generated_tables/cpu_features.inc +++ /dev/null @@ -1,109 +0,0 @@ -.. generated via /numpy/numpy/./doc/source/reference/simd/gen_features.py - -On x86 -~~~~~~ -.. table:: - :align: left - - ============== ========================================================================================================================================================================================== ===================================================== - Name Implies Gathers - ============== ========================================================================================================================================================================================== ===================================================== - ``SSE`` ``SSE2`` - ``SSE2`` ``SSE`` - ``SSE3`` ``SSE`` ``SSE2`` - ``SSSE3`` ``SSE`` ``SSE2`` ``SSE3`` - ``SSE41`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` - ``POPCNT`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` - ``SSE42`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` - ``AVX`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` - ``XOP`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` - ``FMA4`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` - ``F16C`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` - ``FMA3`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` - ``AVX2`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` - ``AVX512F`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` - ``AVX512CD`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` - ``AVX512_KNL`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512ER`` ``AVX512PF`` - ``AVX512_KNM`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512_KNL`` ``AVX5124FMAPS`` ``AVX5124VNNIW`` ``AVX512VPOPCNTDQ`` - ``AVX512_SKX`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512VL`` ``AVX512BW`` ``AVX512DQ`` - ``AVX512_CLX`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512_SKX`` ``AVX512VNNI`` - ``AVX512_CNL`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512_SKX`` ``AVX512IFMA`` ``AVX512VBMI`` - ``AVX512_ICL`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512_SKX`` ``AVX512_CLX`` ``AVX512_CNL`` ``AVX512VBMI2`` ``AVX512BITALG`` ``AVX512VPOPCNTDQ`` - ``AVX512_SPR`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512_SKX`` ``AVX512_CLX`` ``AVX512_CNL`` ``AVX512_ICL`` ``AVX512FP16`` - ============== ========================================================================================================================================================================================== ===================================================== - -On IBM/POWER big-endian -~~~~~~~~~~~~~~~~~~~~~~~ -.. table:: - :align: left - - ======== ========================= - Name Implies - ======== ========================= - ``VSX`` - ``VSX2`` ``VSX`` - ``VSX3`` ``VSX`` ``VSX2`` - ``VSX4`` ``VSX`` ``VSX2`` ``VSX3`` - ======== ========================= - -On IBM/POWER little-endian -~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. table:: - :align: left - - ======== ========================= - Name Implies - ======== ========================= - ``VSX`` ``VSX2`` - ``VSX2`` ``VSX`` - ``VSX3`` ``VSX`` ``VSX2`` - ``VSX4`` ``VSX`` ``VSX2`` ``VSX3`` - ======== ========================= - -On ARMv7/A32 -~~~~~~~~~~~~ -.. table:: - :align: left - - ============== =========================================================== - Name Implies - ============== =========================================================== - ``NEON`` - ``NEON_FP16`` ``NEON`` - ``NEON_VFPV4`` ``NEON`` ``NEON_FP16`` - ``ASIMD`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` - ``ASIMDHP`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` - ``ASIMDDP`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` - ``ASIMDFHM`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` ``ASIMDHP`` - ============== =========================================================== - -On ARMv8/A64 -~~~~~~~~~~~~ -.. table:: - :align: left - - ============== =========================================================== - Name Implies - ============== =========================================================== - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` - ``NEON_FP16`` ``NEON`` ``NEON_VFPV4`` ``ASIMD`` - ``NEON_VFPV4`` ``NEON`` ``NEON_FP16`` ``ASIMD`` - ``ASIMD`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` - ``ASIMDHP`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` - ``ASIMDDP`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` - ``ASIMDFHM`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` ``ASIMDHP`` - ============== =========================================================== - -On IBM/ZSYSTEM(S390X) -~~~~~~~~~~~~~~~~~~~~~ -.. table:: - :align: left - - ======== ============== - Name Implies - ======== ============== - ``VX`` - ``VXE`` ``VX`` - ``VXE2`` ``VX`` ``VXE`` - ======== ============== - diff --git a/doc/source/reference/simd/how-it-works.rst b/doc/source/reference/simd/how-it-works.rst index 3704efa66147..67fe519ca17d 100644 --- a/doc/source/reference/simd/how-it-works.rst +++ b/doc/source/reference/simd/how-it-works.rst @@ -201,7 +201,7 @@ through ``--cpu-dispatch``, but it can also represent other options such as: #define NPY__CPU_TARGET_AVX2 #define NPY__CPU_TARGET_AVX512F // our dispatch-able source - #include "/the/absuolate/path/of/hello.dispatch.c" + #include "/the/absolute/path/of/hello.dispatch.c" - **(D) Dispatch-able configuration header**: The infrastructure generates a config header for each dispatch-able source, this header @@ -234,7 +234,7 @@ through ``--cpu-dispatch``, but it can also represent other options such as: // the additional optimizations, so it could be SSE42 or AVX512F #define CURRENT_TARGET(X) NPY_CAT(NPY_CAT(X, _), NPY__CPU_TARGET_CURRENT) #endif - // Macro 'CURRENT_TARGET' adding the current target as suffux to the exported symbols, + // Macro 'CURRENT_TARGET' adding the current target as suffix to the exported symbols, // to avoid linking duplications, NumPy already has a macro called // 'NPY_CPU_DISPATCH_CURFX' similar to it, located at // numpy/numpy/_core/src/common/npy_cpu_dispatch.h diff --git a/doc/source/reference/simd/index.rst b/doc/source/reference/simd/index.rst index 8005b9054826..fccef8054a24 100644 --- a/doc/source/reference/simd/index.rst +++ b/doc/source/reference/simd/index.rst @@ -32,12 +32,10 @@ The optimization process in NumPy is carried out in three layers: .. note:: NumPy community had a deep discussion before implementing this work, - please check `NEP-38`_ for more clarification. + please check :external+neps:doc:`nep-0038-SIMD-optimizations` for more + clarification. .. toctree:: build-options how-it-works - -.. _`NEP-38`: https://numpy.org/neps/nep-0038-SIMD-optimizations.html - diff --git a/doc/source/reference/simd/log_example.txt b/doc/source/reference/simd/log_example.txt index 79c5c6c253ca..c71306d42aae 100644 --- a/doc/source/reference/simd/log_example.txt +++ b/doc/source/reference/simd/log_example.txt @@ -1,79 +1,64 @@ -########### EXT COMPILER OPTIMIZATION ########### -Platform : - Architecture: x64 - Compiler : gcc +Test features "X86_V2" : Supported +Test features "X86_V3" : Supported +Test features "X86_V4" : Supported +Test features "AVX512_ICL" : Supported +Test features "AVX512_SPR" : Supported +Configuring npy_cpu_dispatch_config.h using configuration +Message: +CPU Optimization Options + baseline: + Requested : min + Enabled : X86_V2 + dispatch: + Requested : max + Enabled : X86_V3 X86_V4 AVX512_ICL AVX512_SPR -CPU baseline : - Requested : 'min' - Enabled : SSE SSE2 SSE3 - Flags : -msse -msse2 -msse3 - Extra checks: none - -CPU dispatch : - Requested : 'max -xop -fma4' - Enabled : SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 AVX512F AVX512CD AVX512_KNL AVX512_KNM AVX512_SKX AVX512_CLX AVX512_CNL AVX512_ICL - Generated : - : - SSE41 : SSE SSE2 SSE3 SSSE3 - Flags : -msse -msse2 -msse3 -mssse3 -msse4.1 - Extra checks: none - Detect : SSE SSE2 SSE3 SSSE3 SSE41 - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_arithmetic.dispatch.c - : numpy/_core/src/umath/_umath_tests.dispatch.c - : - SSE42 : SSE SSE2 SSE3 SSSE3 SSE41 POPCNT - Flags : -msse -msse2 -msse3 -mssse3 -msse4.1 -mpopcnt -msse4.2 - Extra checks: none - Detect : SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 - : build/src.linux-x86_64-3.9/numpy/_core/src/_simd/_simd.dispatch.c - : - AVX2 : SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C - Flags : -msse -msse2 -msse3 -mssse3 -msse4.1 -mpopcnt -msse4.2 -mavx -mf16c -mavx2 - Extra checks: none - Detect : AVX F16C AVX2 - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_arithm_fp.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_arithmetic.dispatch.c - : numpy/_core/src/umath/_umath_tests.dispatch.c - : - (FMA3 AVX2) : SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C - Flags : -msse -msse2 -msse3 -mssse3 -msse4.1 -mpopcnt -msse4.2 -mavx -mf16c -mfma -mavx2 - Extra checks: none - Detect : AVX F16C FMA3 AVX2 - : build/src.linux-x86_64-3.9/numpy/_core/src/_simd/_simd.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_exponent_log.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_trigonometric.dispatch.c - : - AVX512F : SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 - Flags : -msse -msse2 -msse3 -mssse3 -msse4.1 -mpopcnt -msse4.2 -mavx -mf16c -mfma -mavx2 -mavx512f - Extra checks: AVX512F_REDUCE - Detect : AVX512F - : build/src.linux-x86_64-3.9/numpy/_core/src/_simd/_simd.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_arithm_fp.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_arithmetic.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_exponent_log.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_trigonometric.dispatch.c - : - AVX512_SKX : SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 AVX512F AVX512CD - Flags : -msse -msse2 -msse3 -mssse3 -msse4.1 -mpopcnt -msse4.2 -mavx -mf16c -mfma -mavx2 -mavx512f -mavx512cd -mavx512vl -mavx512bw -mavx512dq - Extra checks: AVX512BW_MASK AVX512DQ_MASK - Detect : AVX512_SKX - : build/src.linux-x86_64-3.9/numpy/_core/src/_simd/_simd.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_arithmetic.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_exponent_log.dispatch.c -CCompilerOpt.cache_flush[804] : write cache to path -> /home/seiko/work/repos/numpy/build/temp.linux-x86_64-3.9/ccompiler_opt_cache_ext.py - -########### CLIB COMPILER OPTIMIZATION ########### -Platform : - Architecture: x64 - Compiler : gcc - -CPU baseline : - Requested : 'min' - Enabled : SSE SSE2 SSE3 - Flags : -msse -msse2 -msse3 - Extra checks: none - -CPU dispatch : - Requested : 'max -xop -fma4' - Enabled : SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 AVX512F AVX512CD AVX512_KNL AVX512_KNM AVX512_SKX AVX512_CLX AVX512_CNL AVX512_ICL - Generated : none +Generating multi-targets for "_umath_tests.dispatch.h" + Enabled targets: X86_V3, baseline +Generating multi-targets for "argfunc.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "x86_simd_argsort.dispatch.h" + Enabled targets: X86_V4, X86_V3 +Generating multi-targets for "x86_simd_qsort.dispatch.h" + Enabled targets: X86_V4, X86_V3 +Generating multi-targets for "x86_simd_qsort_16bit.dispatch.h" + Enabled targets: AVX512_SPR, AVX512_ICL +Generating multi-targets for "highway_qsort.dispatch.h" + Enabled targets: +Generating multi-targets for "highway_qsort_16bit.dispatch.h" + Enabled targets: +Generating multi-targets for "loops_arithm_fp.dispatch.h" + Enabled targets: X86_V3, baseline +Generating multi-targets for "loops_arithmetic.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_comparison.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_exponent_log.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_hyperbolic.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_logical.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_minmax.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_modulo.dispatch.h" + Enabled targets: baseline +Generating multi-targets for "loops_trigonometric.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_umath_fp.dispatch.h" + Enabled targets: X86_V4, baseline +Generating multi-targets for "loops_unary.dispatch.h" + Enabled targets: X86_V4, baseline +Generating multi-targets for "loops_unary_fp.dispatch.h" + Enabled targets: baseline +Generating multi-targets for "loops_unary_fp_le.dispatch.h" + Enabled targets: baseline +Generating multi-targets for "loops_unary_complex.dispatch.h" + Enabled targets: X86_V3, baseline +Generating multi-targets for "loops_autovec.dispatch.h" + Enabled targets: X86_V3, baseline +Generating multi-targets for "loops_half.dispatch.h" + Enabled targets: AVX512_SPR, X86_V4, baseline +WARNING: Project targets '>=1.5.2' but uses feature deprecated since '1.3.0': Source file src/umath/svml/linux/avx512/svml_z0_acos_d_la.s in the 'objects' kwarg is not an object.. +Generating multi-targets for "_simd.dispatch.h" + Enabled targets: X86_V3, X86_V4, baseline diff --git a/doc/source/reference/thread_safety.rst b/doc/source/reference/thread_safety.rst new file mode 100644 index 000000000000..b07419259690 --- /dev/null +++ b/doc/source/reference/thread_safety.rst @@ -0,0 +1,78 @@ +.. _thread_safety: + +************* +Thread Safety +************* + +NumPy supports use in a multithreaded context via the `threading` module in the +standard library. Many NumPy operations release the :term:`python:GIL`, so unlike many +situations in Python, it is possible to improve parallel performance by +exploiting multithreaded parallelism in Python. + +The easiest performance gains happen when each worker thread owns its own array +or set of array objects, with no data directly shared between threads. Because +NumPy releases the GIL for many low-level operations, threads that spend most of +the time in low-level code will run in parallel. + +It is possible to share NumPy arrays between threads, but extreme care must be +taken to avoid creating thread safety issues when mutating arrays that are +shared between multiple threads. If two threads simultaneously read from and +write to the same array, they will at best produce inconsistent, racey results that +are not reproducible, let alone correct. It is also possible to crash the Python +interpreter by, for example, resizing an array while another thread is reading +from it to compute a ufunc operation. + +In the future, we may add locking to :class:`~numpy.ndarray` to make writing multithreaded +algorithms using NumPy arrays safer, but for now we suggest focusing on +read-only access of arrays that are shared between threads, or adding your own +locking if you need to mutation and multithreading. + +Note that operations that *do not* release the GIL will see no performance gains +from use of the `threading` module, and instead might be better served with +`multiprocessing`. In particular, operations on arrays with ``dtype=np.object_`` +do not release the GIL. + +Context-local state +------------------- + +NumPy maintains some state for ufuncs context-local basis, which means each +thread in a multithreaded program or task in an asyncio program has its own +independent configuration of the `numpy.errstate` (see +:doc:`/reference/routines.err`), and of :ref:`text_formatting_options`. + +You can update state stored in a context variable by entering a context manager. +As soon as the context manager exits, the state will be reset to its value +before entering the context manager. + +Free-threaded Python +-------------------- + +.. versionadded:: 2.1 + +Starting with NumPy 2.1 and CPython 3.13, NumPy also has experimental support +for python runtimes with the GIL disabled. See +https://py-free-threading.github.io for more information about installing and +using :py:term:`free-threaded ` Python, as well as +information about supporting it in libraries that depend on NumPy. + +Because free-threaded Python does not have a +global interpreter lock to serialize access to Python objects, there are more +opportunities for threads to mutate shared state and create thread safety +issues. In addition to the limitations about locking of the +:class:`~numpy.ndarray` object noted above, this also means that arrays with +``dtype=np.object_`` are not protected by the GIL, creating data races for python +objects that are not possible outside free-threaded python. + +C-API Threading Support +----------------------- + +For developers writing C extensions that interact with NumPy, several parts of +the :doc:`C-API array documentation ` provide detailed +information about multithreading considerations. + +See Also +-------- + +* :doc:`/reference/random/multithreading` - Practical example of using NumPy's + random number generators in a multithreaded context with + :mod:`concurrent.futures`. diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst index 6df29817b0d8..cac15b66cf14 100644 --- a/doc/source/reference/ufuncs.rst +++ b/doc/source/reference/ufuncs.rst @@ -19,6 +19,10 @@ that takes a fixed number of specific inputs and produces a fixed number of specific outputs. For detailed information on universal functions, see :ref:`ufuncs-basics`. + +There are also :ref:`generalized ufuncs ` which +are functions over vectors (or arrays) instead of only single-element scalars. + :class:`ufunc` ============== @@ -40,31 +44,25 @@ advanced usage and will not typically be used. .. rubric:: *out* -.. versionadded:: 1.6 - The first output can be provided as either a positional or a keyword parameter. Keyword 'out' arguments are incompatible with positional ones. -.. versionadded:: 1.10 - The 'out' keyword argument is expected to be a tuple with one entry per output (which can be None for arrays to be allocated by the ufunc). For ufuncs with a single output, passing a single array (instead of a tuple holding a single array) is also valid. -Passing a single array in the 'out' keyword argument to a ufunc with -multiple outputs is deprecated, and will raise a warning in numpy 1.10, -and an error in a future release. +If 'out' is None (the default), a uninitialized output array is created, +which will be filled in the ufunc. At the end, this array is returned +unless it is zero-dimensional, in which case it is converted to a scalar; +this conversion can be avoided by passing in ``out=...``. This can also be +spelled `out=Ellipsis` if you think that is clearer. -If 'out' is None (the default), a uninitialized return array is created. -The output array is then filled with the results of the ufunc in the places -that the broadcast 'where' is True. If 'where' is the scalar True (the -default), then this corresponds to the entire output being filled. -Note that outputs not explicitly filled are left with their -uninitialized values. - -.. versionadded:: 1.13 +Note that the output is filled only in the places that the broadcast +'where' is True. If 'where' is the scalar True (the default), then this +corresponds to all elements of the output, but in other cases, the +elements not explicitly filled are left with their uninitialized values. Operations where ufunc input and output operands have memory overlap are defined to be the same as for equivalent operations where there @@ -79,8 +77,6 @@ can be deduced copies are not necessary. As an example, .. rubric:: *where* -.. versionadded:: 1.7 - Accepts a boolean array which is broadcast together with the operands. Values of True indicate to calculate the ufunc at that position, values of False indicate to leave the value in the output alone. This argument @@ -91,8 +87,6 @@ will leave those values **uninitialized**. .. rubric:: *axes* -.. versionadded:: 1.15 - A list of tuples with indices of axes a generalized ufunc should operate on. For instance, for a signature of ``(i,j),(j,k)->(i,k)`` appropriate for matrix multiplication, the base elements are two-dimensional matrices @@ -105,8 +99,6 @@ tuples can be omitted. .. rubric:: *axis* -.. versionadded:: 1.15 - A single axis over which a generalized ufunc should operate. This is a short-cut for ufuncs that operate over a single, shared core dimension, equivalent to passing in ``axes`` with entries of ``(axis,)`` for each @@ -116,8 +108,6 @@ for a signature ``(i),(i)->()``, it is equivalent to passing in .. rubric:: *keepdims* -.. versionadded:: 1.15 - If this is set to `True`, axes which are reduced over will be left in the result as a dimension with size one, so that the result will broadcast correctly against the inputs. This option can only be used for generalized @@ -128,8 +118,6 @@ the dimensions in the output can be controlled with ``axes`` and ``axis``. .. rubric:: *casting* -.. versionadded:: 1.6 - May be 'no', 'equiv', 'safe', 'same_kind', or 'unsafe'. See :func:`can_cast` for explanations of the parameter values. @@ -142,8 +130,6 @@ onwards, the default is 'same_kind'. .. rubric:: *order* -.. versionadded:: 1.6 - Specifies the calculation iteration order/memory layout of the output array. Defaults to 'K'. 'C' means the output should be C-contiguous, 'F' means F-contiguous, 'A' means F-contiguous if the inputs are F-contiguous and @@ -152,8 +138,6 @@ the element ordering of the inputs as closely as possible. .. rubric:: *dtype* -.. versionadded:: 1.6 - Overrides the DType of the output arrays the same way as the *signature*. This should ensure a matching precision of the calculation. The exact calculation DTypes chosen may depend on the ufunc and the inputs may be @@ -161,8 +145,6 @@ cast to this DType to perform the calculation. .. rubric:: *subok* -.. versionadded:: 1.6 - Defaults to true. If set to false, the output will always be a strict array, not a subtype. @@ -205,14 +187,17 @@ possess. None of the attributes can be set. pair: ufunc; attributes -============ ================================================================= -**__doc__** A docstring for each ufunc. The first part of the docstring is - dynamically generated from the number of outputs, the name, and - the number of inputs. The second part of the docstring is - provided at creation time and stored with the ufunc. +================= ================================================================= +**__doc__** A docstring for each ufunc. The first part of the docstring is + dynamically generated from the number of outputs, the name, and + the number of inputs. The second part of the docstring is + provided at creation time and stored with the ufunc. + +**__name__** The name of the ufunc. -**__name__** The name of the ufunc. -============ ================================================================= +**__signature__** The call signature of the ufunc, as an :class:`inspect.Signature` + object. +================= ================================================================= .. autosummary:: :toctree: generated/ diff --git a/doc/source/release.rst b/doc/source/release.rst index 5226ef89a764..c0207894ac02 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,7 +5,29 @@ Release notes .. toctree:: :maxdepth: 2 + 2.5.0 + 2.4.2 + 2.4.1 + 2.4.0 + 2.3.5 + 2.3.4 + 2.3.3 + 2.3.2 + 2.3.1 + 2.3.0 + 2.2.6 + 2.2.5 + 2.2.4 + 2.2.3 + 2.2.2 + 2.2.1 + 2.2.0 + 2.1.3 + 2.1.2 + 2.1.1 2.1.0 + 2.0.2 + 2.0.1 2.0.0 1.26.4 1.26.3 diff --git a/doc/source/release/1.10.0-notes.rst b/doc/source/release/1.10.0-notes.rst index 88062e4632e9..4a2c4cc5e836 100644 --- a/doc/source/release/1.10.0-notes.rst +++ b/doc/source/release/1.10.0-notes.rst @@ -187,7 +187,7 @@ New Features Reading extra flags from site.cfg --------------------------------- Previously customization of compilation of dependency libraries and numpy -itself was only accomblishable via code changes in the distutils package. +itself was only accomplishable via code changes in the distutils package. Now numpy.distutils reads in the following extra flags from each group of the *site.cfg*: diff --git a/doc/source/release/1.11.0-notes.rst b/doc/source/release/1.11.0-notes.rst index 36cd1d65a266..4700e37203ce 100644 --- a/doc/source/release/1.11.0-notes.rst +++ b/doc/source/release/1.11.0-notes.rst @@ -27,11 +27,11 @@ Details of these improvements can be found below. Build System Changes ==================== -* Numpy now uses ``setuptools`` for its builds instead of plain distutils. +* NumPy now uses ``setuptools`` for its builds instead of plain distutils. This fixes usage of ``install_requires='numpy'`` in the ``setup.py`` files of - projects that depend on Numpy (see gh-6551). It potentially affects the way - that build/install methods for Numpy itself behave though. Please report any - unexpected behavior on the Numpy issue tracker. + projects that depend on NumPy (see gh-6551). It potentially affects the way + that build/install methods for NumPy itself behave though. Please report any + unexpected behavior on the NumPy issue tracker. * Bento build support and related files have been removed. * Single file build support and related files have been removed. @@ -39,7 +39,7 @@ Build System Changes Future Changes ============== -The following changes are scheduled for Numpy 1.12.0. +The following changes are scheduled for NumPy 1.12.0. * Support for Python 2.6, 3.2, and 3.3 will be dropped. * Relaxed stride checking will become the default. See the 1.8.0 release @@ -61,7 +61,7 @@ The following changes are scheduled for Numpy 1.12.0. In a future release the following changes will be made. * The ``rand`` function exposed in ``numpy.testing`` will be removed. That - function is left over from early Numpy and was implemented using the + function is left over from early NumPy and was implemented using the Python random module. The random number generators from ``numpy.random`` should be used instead. * The ``ndarray.view`` method will only allow c_contiguous arrays to be @@ -124,7 +124,7 @@ non-integers for degree specification. *np.dot* now raises ``TypeError`` instead of ``ValueError`` ----------------------------------------------------------- -This behaviour mimics that of other functions such as ``np.inner``. If the two +This behavior mimics that of other functions such as ``np.inner``. If the two arguments cannot be cast to a common type, it could have raised a ``TypeError`` or ``ValueError`` depending on their order. Now, ``np.dot`` will now always raise a ``TypeError``. @@ -194,7 +194,7 @@ New Features * ``f2py.compile`` has a new ``extension`` keyword parameter that allows the fortran extension to be specified for generated temp files. For instance, - the files can be specifies to be ``*.f90``. The ``verbose`` argument is + the files can be specified to be ``*.f90``. The ``verbose`` argument is also activated, it was previously ignored. * A ``dtype`` parameter has been added to ``np.random.randint`` @@ -205,7 +205,7 @@ New Features - ``np.int16``, ``np.uint16``, - ``np.int32``, ``np.uint32``, - ``np.int64``, ``np.uint64``, - - ``np.int_ ``, ``np.intp`` + - ``np.int_``, ``np.intp`` The specification is by precision rather than by C type. Hence, on some platforms ``np.int64`` may be a ``long`` instead of ``long long`` even if @@ -254,7 +254,7 @@ Memory and speed improvements for masked arrays ----------------------------------------------- Creating a masked array with ``mask=True`` (resp. ``mask=False``) now uses ``np.ones`` (resp. ``np.zeros``) to create the mask, which is faster and -avoid a big memory peak. Another optimization was done to avoid a memory +avoids a big memory peak. Another optimization was done to avoid a memory peak and useless computations when printing a masked array. ``ndarray.tofile`` now uses fallocate on linux @@ -304,13 +304,13 @@ Instead, ``np.broadcast`` can be used in all cases. ``np.trace`` now respects array subclasses ------------------------------------------ -This behaviour mimics that of other functions such as ``np.diagonal`` and +This behavior mimics that of other functions such as ``np.diagonal`` and ensures, e.g., that for masked arrays ``np.trace(ma)`` and ``ma.trace()`` give the same result. ``np.dot`` now raises ``TypeError`` instead of ``ValueError`` ------------------------------------------------------------- -This behaviour mimics that of other functions such as ``np.inner``. If the two +This behavior is now consistent with other functions such as ``np.inner``. If the two arguments cannot be cast to a common type, it could have raised a ``TypeError`` or ``ValueError`` depending on their order. Now, ``np.dot`` will now always raise a ``TypeError``. diff --git a/doc/source/release/1.13.0-notes.rst b/doc/source/release/1.13.0-notes.rst index 3bfaf1ea5169..400c9553fbd3 100644 --- a/doc/source/release/1.13.0-notes.rst +++ b/doc/source/release/1.13.0-notes.rst @@ -136,7 +136,7 @@ implement ``__*slice__`` on the derived class, as ``__*item__`` will intercept these calls correctly. Any code that did implement these will work exactly as before. Code that -invokes``ndarray.__getslice__`` (e.g. through ``super(...).__getslice__``) will +invokes ``ndarray.__getslice__`` (e.g. through ``super(...).__getslice__``) will now issue a DeprecationWarning - ``.__getitem__(slice(start, end))`` should be used instead. diff --git a/doc/source/release/1.14.0-notes.rst b/doc/source/release/1.14.0-notes.rst index 68040b470caa..055a933291b9 100644 --- a/doc/source/release/1.14.0-notes.rst +++ b/doc/source/release/1.14.0-notes.rst @@ -409,8 +409,8 @@ This new default changes the float output relative to numpy 1.13. The old behavior can be obtained in 1.13 "legacy" printing mode, see compatibility notes above. -``hermitian`` option added to``np.linalg.matrix_rank`` ------------------------------------------------------- +``hermitian`` option added to ``np.linalg.matrix_rank`` +------------------------------------------------------- The new ``hermitian`` option allows choosing between standard SVD based matrix rank calculation and the more efficient eigenvalue based method for symmetric/hermitian matrices. diff --git a/doc/source/release/1.15.0-notes.rst b/doc/source/release/1.15.0-notes.rst index e84386f0fa5d..7aa85d167d29 100644 --- a/doc/source/release/1.15.0-notes.rst +++ b/doc/source/release/1.15.0-notes.rst @@ -213,7 +213,7 @@ C API changes New functions ``npy_get_floatstatus_barrier`` and ``npy_clear_floatstatus_barrier`` ----------------------------------------------------------------------------------- Functions ``npy_get_floatstatus_barrier`` and ``npy_clear_floatstatus_barrier`` -have been added and should be used in place of the ``npy_get_floatstatus``and +have been added and should be used in place of the ``npy_get_floatstatus`` and ``npy_clear_status`` functions. Optimizing compilers like GCC 8.1 and Clang were rearranging the order of operations when the previous functions were used in the ufunc SIMD functions, resulting in the floatstatus flags being checked @@ -326,8 +326,8 @@ passed explicitly, and are not yet computed automatically. No longer does an IQR of 0 result in ``n_bins=1``, rather the number of bins chosen is related to the data size in this situation. -The edges returned by `histogram`` and ``histogramdd`` now match the data float type ------------------------------------------------------------------------------------- +The edges returned by ``histogram`` and ``histogramdd`` now match the data float type +------------------------------------------------------------------------------------- When passed ``np.float16``, ``np.float32``, or ``np.longdouble`` data, the returned edges are now of the same dtype. Previously, ``histogram`` would only return the same type if explicit bins were given, and ``histogram`` would diff --git a/doc/source/release/1.16.0-notes.rst b/doc/source/release/1.16.0-notes.rst index 07e06ca6e043..7a387629fe46 100644 --- a/doc/source/release/1.16.0-notes.rst +++ b/doc/source/release/1.16.0-notes.rst @@ -271,7 +271,7 @@ via the services of shippable.com. Appending to build flags ------------------------ -`numpy.distutils` has always overridden rather than appended to `LDFLAGS` and +``numpy.distutils`` has always overridden rather than appended to `LDFLAGS` and other similar such environment variables for compiling Fortran extensions. Now, if the `NPY_DISTUTILS_APPEND_FLAGS` environment variable is set to 1, the behavior will be appending. This applied to: `LDFLAGS`, `F77FLAGS`, diff --git a/doc/source/release/1.18.0-notes.rst b/doc/source/release/1.18.0-notes.rst index 15e0ad77f5d1..43d2cdedf4b6 100644 --- a/doc/source/release/1.18.0-notes.rst +++ b/doc/source/release/1.18.0-notes.rst @@ -202,9 +202,9 @@ exception will require adaptation, and code that mistakenly called Moved modules in ``numpy.random`` --------------------------------- As part of the API cleanup, the submodules in ``numpy.random`` -``bit_generator``, ``philox``, ``pcg64``, ``sfc64, ``common``, ``generator``, +``bit_generator``, ``philox``, ``pcg64``, ``sfc64``, ``common``, ``generator``, and ``bounded_integers`` were moved to ``_bit_generator``, ``_philox``, -``_pcg64``, ``_sfc64, ``_common``, ``_generator``, and ``_bounded_integers`` +``_pcg64``, ``_sfc64``, ``_common``, ``_generator``, and ``_bounded_integers`` respectively to indicate that they are not part of the public interface. (`gh-14608 `__) @@ -350,7 +350,7 @@ and load will be addressed in a future release. ``numpy.distutils`` append behavior changed for LDFLAGS and similar ------------------------------------------------------------------- -`numpy.distutils` has always overridden rather than appended to ``LDFLAGS`` and +``numpy.distutils`` has always overridden rather than appended to ``LDFLAGS`` and other similar such environment variables for compiling Fortran extensions. Now the default behavior has changed to appending - which is the expected behavior in most situations. To preserve the old (overwriting) behavior, set the diff --git a/doc/source/release/1.20.0-notes.rst b/doc/source/release/1.20.0-notes.rst index a2276ac5016d..298d417bb0c2 100644 --- a/doc/source/release/1.20.0-notes.rst +++ b/doc/source/release/1.20.0-notes.rst @@ -735,7 +735,7 @@ checking. Negation of user defined BLAS/LAPACK detection order ---------------------------------------------------- -`~numpy.distutils` allows negation of libraries when determining BLAS/LAPACK +``~numpy.distutils`` allows negation of libraries when determining BLAS/LAPACK libraries. This may be used to remove an item from the library resolution phase, i.e. to disallow NetLIB libraries one could do: diff --git a/doc/source/release/1.21.5-notes.rst b/doc/source/release/1.21.5-notes.rst index c69d26771268..b3e810b51c06 100644 --- a/doc/source/release/1.21.5-notes.rst +++ b/doc/source/release/1.21.5-notes.rst @@ -33,7 +33,7 @@ A total of 11 pull requests were merged for this release. * `#20462 `__: BUG: Fix float16 einsum fastpaths using wrong tempvar * `#20463 `__: BUG, DIST: Print os error message when the executable not exist * `#20464 `__: BLD: Verify the ability to compile C++ sources before initiating... -* `#20465 `__: BUG: Force ``npymath` ` to respect ``npy_longdouble`` +* `#20465 `__: BUG: Force ``npymath`` to respect ``npy_longdouble`` * `#20466 `__: BUG: Fix failure to create aligned, empty structured dtype * `#20467 `__: ENH: provide a convenience function to replace npy_load_module * `#20495 `__: MAINT: update wheel to version that supports python3.10 diff --git a/doc/source/release/2.0.0-notes.rst b/doc/source/release/2.0.0-notes.rst index 633336765928..a0763048a59f 100644 --- a/doc/source/release/2.0.0-notes.rst +++ b/doc/source/release/2.0.0-notes.rst @@ -4,18 +4,10 @@ NumPy 2.0.0 Release Notes ========================= -.. note:: - - The release of 2.0 is in progress and the current release overview and - highlights are still in a draft state. However, the highlights should - already list the most significant changes detailed in the full notes below, - and those full notes should be complete (if not copy-edited well enough - yet). - -NumPy 2.0.0 is the first major release since 2006. It is the result of X months -of development since the last feature release by Y contributors, and contains a -large amount of exciting new features as well as a large amount of changes to -both the Python and C APIs. +NumPy 2.0.0 is the first major release since 2006. It is the result of 11 +months of development since the last feature release and is the work of 212 +contributors spread over 1078 pull requests. It contains a large number of +exciting new features as well as changes to both the Python and C APIs. This major release includes breaking changes that could not happen in a regular minor (feature) release - including an ABI break, changes to type promotion @@ -50,10 +42,13 @@ Highlights of this release include: that are about 3 times smaller, - `numpy.char` fixed-length string operations have been accelerated by implementing ufuncs that also support `~numpy.dtypes.StringDType` in - addition to the the fixed-length string dtypes, + addition to the fixed-length string dtypes, - A new tracing and introspection API, `~numpy.lib.introspect.opt_func_info`, to determine which hardware-specific kernels are available and will be dispatched to. + - `numpy.save` now uses pickle protocol version 4 for saving arrays with + object dtype, which allows for pickle objects larger than 4GB and improves + saving speed by about 5% for large arrays. - Python API improvements: @@ -76,8 +71,8 @@ Highlights of this release include: - Improved behavior: - - Improvements to type promotion behavior was changed by adopting `NEP - 50 `_. This fixes many user surprises about promotions which + - Improvements to type promotion behavior was changed by adopting :ref:`NEP + 50 `. This fixes many user surprises about promotions which previously often depended on data values of input arrays rather than only their dtypes. Please see the NEP and the :ref:`numpy-2-migration-guide` for details as this change can lead to changes in output dtypes and lower @@ -88,7 +83,7 @@ Highlights of this release include: - Documentation: - - The reference guide navigation was signficantly improved, and there is now + - The reference guide navigation was significantly improved, and there is now documentation on NumPy's :ref:`module structure `, - The :ref:`building from source ` documentation was completely rewritten, @@ -112,7 +107,7 @@ API and behavior improvements and better future extensibility. This price is: 2. Breaking changes to the NumPy ABI. As a result, binaries of packages that use the NumPy C API and were built against a NumPy 1.xx release will not work with NumPy 2.0. On import, such packages will see an ``ImportError`` - with a message about binary incompatibiliy. + with a message about binary incompatibility. It is possible to build binaries against NumPy 2.0 that will work at runtime with both NumPy 2.0 and 1.x. See :ref:`numpy-2-abi-handling` for more details. @@ -206,7 +201,8 @@ NumPy 2.0 Python API removals * ``np.tracemalloc_domain`` is now only available from ``np.lib``. -* ``np.recfromcsv`` and ``recfromtxt`` are now only available from ``np.lib.npyio``. +* ``np.recfromcsv`` and ``np.recfromtxt`` were removed from the main namespace. + Use ``np.genfromtxt`` with comma delimiter instead. * ``np.issctype``, ``np.maximum_sctype``, ``np.obj2sctype``, ``np.sctype2char``, ``np.sctypes``, ``np.issubsctype`` were all removed from the @@ -255,9 +251,9 @@ NumPy 2.0 Python API removals (`gh-25911 `__) + ``__array_prepare__`` is removed -------------------------------- - UFuncs called ``__array_prepare__`` before running computations for normal ufunc calls (not generalized ufuncs, reductions, etc.). The function was also called instead of ``__array_wrap__`` on the @@ -276,6 +272,15 @@ Deprecations * ``np.compat`` has been deprecated, as Python 2 is no longer supported. +* ``numpy.int8`` and similar classes will no longer support conversion of + out of bounds python integers to integer arrays. For example, + conversion of 255 to int8 will not return -1. + ``numpy.iinfo(dtype)`` can be used to check the machine limits for data types. + For example, ``np.iinfo(np.uint16)`` returns min = 0 and max = 65535. + + ``np.array(value).astype(dtype)`` will give the desired result. + + * ``np.safe_eval`` has been deprecated. ``ast.literal_eval`` should be used instead. (`gh-23830 `__) @@ -298,7 +303,7 @@ Deprecations support for implementations not accepting all three are deprecated. Its signature should be ``__array_wrap__(self, arr, context=None, return_scalar=False)`` - (`gh-25408 `__) + (`gh-25409 `__) * Arrays of 2-dimensional vectors for ``np.cross`` have been deprecated. Use arrays of 3-dimensional vectors instead. @@ -316,9 +321,9 @@ Deprecations (`gh-24978 `__) -`numpy.fft` deprecations for n-D transforms with None values in arguments -------------------------------------------------------------------------- +``numpy.fft`` deprecations for n-D transforms with None values in arguments +--------------------------------------------------------------------------- Using ``fftn``, ``ifftn``, ``rfftn``, ``irfftn``, ``fft2``, ``ifft2``, ``rfft2`` or ``irfft2`` with the ``s`` parameter set to a value that is not ``None`` and the ``axes`` parameter set to ``None`` has been deprecated, in @@ -334,9 +339,9 @@ axis, the ``s`` argument can be omitted. (`gh-25495 `__) + ``np.linalg.lstsq`` now defaults to a new ``rcond`` value --------------------------------------------------------- - `~numpy.linalg.lstsq` now uses the new rcond value of the machine precision times ``max(M, N)``. Previously, the machine precision was used but a FutureWarning was given to notify that this change will happen eventually. @@ -400,7 +405,6 @@ Compatibility notes ``loadtxt`` and ``genfromtxt`` default encoding changed ------------------------------------------------------- - ``loadtxt`` and ``genfromtxt`` now both default to ``encoding=None`` which may mainly modify how ``converters`` work. These will now be passed ``str`` rather than ``bytes``. Pass the @@ -410,48 +414,39 @@ unicode strings rather than bytes. (`gh-25158 `__) + ``f2py`` compatibility notes ---------------------------- +* ``f2py`` will no longer accept ambiguous ``-m`` and ``.pyf`` CLI + combinations. When more than one ``.pyf`` file is passed, an error is + raised. When both ``-m`` and a ``.pyf`` is passed, a warning is emitted and + the ``-m`` provided name is ignored. -``f2py`` will no longer accept ambiguous ``-m`` and ``.pyf`` CLI combinations. -When more than one ``.pyf`` file is passed, an error is raised. When both ``-m`` -and a ``.pyf`` is passed, a warning is emitted and the ``-m`` provided name is -ignored. - -(`gh-25181 `__) - -The ``f2py.compile()`` helper has been removed because it leaked memory, has -been marked as experimental for several years now, and was implemented as a thin -``subprocess.run`` wrapper. It is also one of the test bottlenecks. See -`gh-25122 `_ for the full -rationale. It also used several ``np.distutils`` features which are too fragile -to be ported to work with ``meson``. + (`gh-25181 `__) -Users are urged to replace calls to ``f2py.compile`` with calls to -``subprocess.run("python", "-m", "numpy.f2py",...`` instead, and to use -environment variables to interact with ``meson``. `Native files -`_ are also an option. +* The ``f2py.compile()`` helper has been removed because it leaked memory, has + been marked as experimental for several years now, and was implemented as a + thin ``subprocess.run`` wrapper. It was also one of the test bottlenecks. See + `gh-25122 `_ for the full + rationale. It also used several ``np.distutils`` features which are too + fragile to be ported to work with ``meson``. -(`gh-25193 `__) +* Users are urged to replace calls to ``f2py.compile`` with calls to + ``subprocess.run("python", "-m", "numpy.f2py",...`` instead, and to use + environment variables to interact with ``meson``. `Native files + `_ are also an option. -``arange``'s ``start`` argument is positional-only --------------------------------------------------- -The first argument of ``arange`` is now positional only. This way, -specifying a ``start`` argument as a keyword, e.g. ``arange(start=0, stop=4)``, -raises a TypeError. Other behaviors, are unchanged so ``arange(stop=4)``, -``arange(2, stop=4)`` and so on, are still valid and have the same meaning as -before. + (`gh-25193 `__) -(`gh-25336 `__) Minor changes in behavior of sorting functions ---------------------------------------------- - Due to algorithmic changes and use of SIMD code, sorting functions with methods that aren't stable may return slightly different results in 2.0.0 compared to 1.26.x. This includes the default method of `~numpy.argsort` and `~numpy.argpartition`. + Removed ambiguity when broadcasting in ``np.solve`` --------------------------------------------------- The broadcasting rules for ``np.solve(a, b)`` were ambiguous when ``b`` had 1 @@ -461,6 +456,7 @@ reconstructed by using ``np.solve(a, b[..., None])[..., 0]``. (`gh-25914 `__) + Modified representation for ``Polynomial`` ------------------------------------------ The representation method for `~numpy.polynomial.polynomial.Polynomial` was @@ -477,6 +473,7 @@ C API changes * The ``PyArray_CGT``, ``PyArray_CLT``, ``PyArray_CGE``, ``PyArray_CLE``, ``PyArray_CEQ``, ``PyArray_CNE`` macros have been removed. + * ``PyArray_MIN`` and ``PyArray_MAX`` have been moved from ``ndarraytypes.h`` to ``npy_math.h``. @@ -486,6 +483,7 @@ C API changes This includes functions for acquiring and releasing mutexes which lock access to the string data, as well as packing and unpacking UTF-8 bytestreams from array entries. + * ``NPY_NTYPES`` has been renamed to ``NPY_NTYPES_LEGACY`` as it does not include new NumPy built-in DTypes. In particular the new string DType will likely not work correctly with code that handles legacy DTypes. @@ -519,6 +517,7 @@ C API changes after including ``numpy/ndarrayobject.h`` as it requires ``import_array()``. This includes ``PyDataType_FLAGCHK``, ``PyDataType_REFCHK`` and ``NPY_BEGIN_THREADS_DESCR``. + * The dtype flags on ``PyArray_Descr`` must now be accessed through the ``PyDataType_FLAGS`` inline function to be compatible with both 1.x and 2.x. This function is defined in ``npy_2_compat.h`` to allow backporting. @@ -529,9 +528,9 @@ C API changes (`gh-25816 `__) + Datetime functionality exposed in the C API and Cython bindings --------------------------------------------------------------- - The functions ``NpyDatetime_ConvertDatetime64ToDatetimeStruct``, ``NpyDatetime_ConvertDatetimeStructToDatetime64``, ``NpyDatetime_ConvertPyDateTimeToDatetimeStruct``, @@ -542,9 +541,9 @@ external libraries. (`gh-21199 `__) + Const correctness for the generalized ufunc C API ------------------------------------------------- - The NumPy C API's functions for constructing generalized ufuncs (``PyUFunc_FromFuncAndData``, ``PyUFunc_FromFuncAndDataAndSignature``, ``PyUFunc_FromFuncAndDataAndSignatureAndIdentity``) take ``types`` and ``data`` @@ -557,9 +556,9 @@ code may be. (`gh-23847 `__) + Larger ``NPY_MAXDIMS`` and ``NPY_MAXARGS``, ``NPY_RAVEL_AXIS`` introduced ------------------------------------------------------------------------- - ``NPY_MAXDIMS`` is now 64, you may want to review its use. This is usually used in a stack allocation, where the increase should be safe. However, we do encourage generally to remove any use of ``NPY_MAXDIMS`` and @@ -570,9 +569,9 @@ replaced with ``NPY_RAVEL_AXIS``. See also :ref:`migration_maxdims`. (`gh-25149 `__) + ``NPY_MAXARGS`` not constant and ``PyArrayMultiIterObject`` size change ----------------------------------------------------------------------- - Since ``NPY_MAXARGS`` was increased, it is now a runtime constant and not compile-time constant anymore. We expect almost no users to notice this. But if used for stack allocations @@ -585,9 +584,9 @@ to avoid issues with Cython. (`gh-25271 `__) + Required changes for custom legacy user dtypes ---------------------------------------------- - In order to improve our DTypes it is unfortunately necessary to break the ABI, which requires some changes for dtypes registered with ``PyArray_RegisterDataType``. @@ -596,9 +595,9 @@ to adapt your code and achieve compatibility with both 1.x and 2.x. (`gh-25792 `__) + New Public DType API -------------------- - The C implementation of the NEP 42 DType API is now public. While the DType API has shipped in NumPy for a few versions, it was only usable in sessions with a special environment variable set. It is now possible to write custom DTypes @@ -612,9 +611,9 @@ be updated to work correctly with new DTypes. (`gh-25754 `__) + New C-API import functions -------------------------- - We have now added ``PyArray_ImportNumPyAPI`` and ``PyUFunc_ImportUFuncAPI`` as static inline functions to import the NumPy C-API tables. The new functions have two advantages over ``import_array`` and @@ -646,7 +645,7 @@ The ``metadata`` field is kept, but the macro version should also be preferred. Descriptor ``elsize`` and ``alignment`` access ---------------------------------------------- -Unless compiling only with NumPy 2 support, the ``elsize`` and ``aligment`` +Unless compiling only with NumPy 2 support, the ``elsize`` and ``alignment`` fields must now be accessed via ``PyDataType_ELSIZE``, ``PyDataType_SET_ELSIZE``, and ``PyDataType_ALIGNMENT``. In cases where the descriptor is attached to an array, we advise @@ -663,6 +662,7 @@ NumPy 2.0 C API removals have been removed. We recommend querying ``PyErr_CheckSignals()`` or ``PyOS_InterruptOccurred()`` periodically (these do currently require holding the GIL though). + * The ``noprefix.h`` header has been removed. Replace missing symbols with their prefixed counterparts (usually an added ``NPY_`` or ``npy_``). @@ -716,56 +716,58 @@ NumPy 2.0 C API removals * ``PyArrayFlags_Type`` and ``PyArray_NewFlagsObject`` as well as ``PyArrayFlagsObject`` are private now. There is no known use-case; use the Python API if needed. + * ``PyArray_MoveInto``, ``PyArray_CastTo``, ``PyArray_CastAnyTo`` are removed use ``PyArray_CopyInto`` and if absolutely needed ``PyArray_CopyAnyInto`` (the latter does a flat copy). -* ``PyArray_FillObjectArray`` is removed, its only true use is for + +* ``PyArray_FillObjectArray`` is removed, its only true use was for implementing ``np.empty``. Create a new empty array or use ``PyArray_FillWithScalar()`` (decrefs existing objects). + * ``PyArray_CompareUCS4`` and ``PyArray_CompareString`` are removed. Use the standard C string comparison functions. + * ``PyArray_ISPYTHON`` is removed as it is misleading, has no known use-cases, and is easy to replace. + * ``PyArray_FieldNames`` is removed, as it is unclear what it would be useful for. It also has incorrect semantics in some possible use-cases. + * ``PyArray_TypestrConvert`` is removed, since it seems a misnomer and unlikely to be used by anyone. If you know the size or are limited to few types, just use it explicitly, otherwise go via Python strings. (`gh-25292 `__) - -* ``PyDataType_GetDatetimeMetaData`` has been removed, it did not actually +* ``PyDataType_GetDatetimeMetaData`` is removed, it did not actually do anything since at least NumPy 1.7. (`gh-25802 `__) -``PyArray_GetCastFunc`` was removed ------------------------------------ - -Note that custom legacy user dtypes can still provide a castfunc -as their implementation, but any access to them is now removed. -The reason for this is that NumPy never used these internally -for many years. -If you use simple numeric types, please just use C casts directly. -In case you require an alternative, please let us know so we can -create new API such as ``PyArray_CastBuffer()`` which could -use old or new cast functions depending on the NumPy version. +* ``PyArray_GetCastFunc`` is removed. Note that custom legacy user dtypes + can still provide a castfunc as their implementation, but any access to them + is now removed. The reason for this is that NumPy never used these + internally for many years. If you use simple numeric types, please just use + C casts directly. In case you require an alternative, please let us know so + we can create new API such as ``PyArray_CastBuffer()`` which could use old or + new cast functions depending on the NumPy version. -(`gh-25161 `__) + (`gh-25161 `__) New Features ============ -* ``np.add`` was extended to work with ``unicode`` and ``bytes`` dtypes. +``np.add`` was extended to work with ``unicode`` and ``bytes`` dtypes. +---------------------------------------------------------------------- (`gh-24858 `__) + A new ``bitwise_count`` function -------------------------------- - This new function counts the number of 1-bits in a number. `~numpy.bitwise_count` works on all the numpy integer types and integer-like objects. @@ -779,9 +781,9 @@ integer-like objects. (`gh-19355 `__) + macOS Accelerate support, including the ILP64 --------------------------------------------- - Support for the updated Accelerate BLAS/LAPACK library, including ILP64 (64-bit integer) support, in macOS 13.3 has been added. This brings arm64 support, and significant performance improvements of up to 10x for commonly used linear @@ -796,18 +798,18 @@ PyPI will get wheels built against Accelerate rather than OpenBLAS. (`gh-25255 `__) + Option to use weights for quantile and percentile functions ----------------------------------------------------------- - A ``weights`` keyword is now available for `~numpy.quantile`, `~numpy.percentile`, `~numpy.nanquantile` and `~numpy.nanpercentile`. Only ``method="inverted_cdf"`` supports weights. (`gh-24254 `__) + Improved CPU optimization tracking ---------------------------------- - A new tracer mechanism is available which enables tracking of the enabled targets for each optimized function (i.e., that uses hardware-specific SIMD instructions) in the NumPy library. With this enhancement, it becomes possible @@ -821,9 +823,9 @@ and data type signatures. (`gh-24420 `__) + A new Meson backend for ``f2py`` -------------------------------- - ``f2py`` in compile mode (i.e. ``f2py -c``) now accepts the ``--backend meson`` option. This is the default option for Python >=3.12. For older Python versions, ``f2py`` will still default to ``--backend distutils``. @@ -836,9 +838,9 @@ There are no changes for users of ``f2py`` only as a code generator, i.e. withou (`gh-24532 `__) + ``bind(c)`` support for ``f2py`` -------------------------------- - Both functions and subroutines can be annotated with ``bind(c)``. ``f2py`` will handle both the correct type mapping, and preserve the unique label for other C interfaces. @@ -850,9 +852,9 @@ Fortran. (`gh-24555 `__) + A new ``strict`` option for several testing functions ----------------------------------------------------- - The ``strict`` keyword is now available for `~numpy.testing.assert_allclose`, `~numpy.testing.assert_equal`, and `~numpy.testing.assert_array_less`. Setting ``strict=True`` will disable the broadcasting behaviour for scalars @@ -862,6 +864,7 @@ and ensure that input arrays have the same data type. `gh-24770 `__, `gh-24775 `__) + Add ``np.core.umath.find`` and ``np.core.umath.rfind`` UFuncs ------------------------------------------------------------- Add two ``find`` and ``rfind`` UFuncs that operate on unicode or byte strings @@ -870,9 +873,9 @@ and are used in ``np.char``. They operate similar to ``str.find`` and (`gh-24868 `__) -``diagonal`` and ``trace`` for `numpy.linalg` ---------------------------------------------- +``diagonal`` and ``trace`` for ``numpy.linalg`` +----------------------------------------------- `numpy.linalg.diagonal` and `numpy.linalg.trace` have been added, which are array API standard-compatible variants of `numpy.diagonal` and `numpy.trace`. They differ in the default axis selection which define 2-D @@ -880,18 +883,18 @@ sub-arrays. (`gh-24887 `__) + New ``long`` and ``ulong`` dtypes --------------------------------- - `numpy.long` and `numpy.ulong` have been added as NumPy integers mapping to C's ``long`` and ``unsigned long``. Prior to NumPy 1.24, ``numpy.long`` was an alias to Python's ``int``. (`gh-24922 `__) -``svdvals`` for `numpy.linalg` ------------------------------- +``svdvals`` for ``numpy.linalg`` +-------------------------------- `numpy.linalg.svdvals` has been added. It computes singular values for (a stack of) matrices. Executing ``np.svdvals(x)`` is the same as calling ``np.svd(x, compute_uv=False, hermitian=False)``. @@ -899,25 +902,25 @@ This function is compatible with the array API standard. (`gh-24940 `__) + A new ``isdtype`` function -------------------------- - `numpy.isdtype` was added to provide a canonical way to classify NumPy's dtypes in compliance with the array API standard. (`gh-25054 `__) + A new ``astype`` function ------------------------- - `numpy.astype` was added to provide an array API standard-compatible alternative to the `numpy.ndarray.astype` method. (`gh-25079 `__) + Array API compatible functions' aliases --------------------------------------- - 13 aliases for existing functions were added to improve compatibility with the array API standard: * Trigonometry: ``acos``, ``acosh``, ``asin``, ``asinh``, ``atan``, ``atanh``, ``atan2``. @@ -930,9 +933,9 @@ Array API compatible functions' aliases (`gh-25086 `__) + New ``unique_*`` functions -------------------------- - The `~numpy.unique_all`, `~numpy.unique_counts`, `~numpy.unique_inverse`, and `~numpy.unique_values` functions have been added. They provide functionality of `~numpy.unique` with different sets of flags. They are array API @@ -942,9 +945,9 @@ compilation. (`gh-25088 `__) + Matrix transpose support for ndarrays ------------------------------------- - NumPy now offers support for calculating the matrix transpose of an array (or stack of arrays). The matrix transpose is equivalent to swapping the last two axes of an array. Both ``np.ndarray`` and ``np.ma.MaskedArray`` now expose a @@ -953,9 +956,9 @@ function. (`gh-23762 `__) + Array API compatible functions for ``numpy.linalg`` --------------------------------------------------- - Six new functions and two aliases were added to improve compatibility with the Array API standard for `numpy.linalg`: @@ -984,18 +987,18 @@ the Array API standard for `numpy.linalg`: (`gh-25145 `__) + A ``correction`` argument for ``var`` and ``std`` ------------------------------------------------- - A ``correction`` argument was added to `~numpy.var` and `~numpy.std`, which is an array API standard compatible alternative to ``ddof``. As both arguments serve a similar purpose, only one of them can be provided at the same time. (`gh-25169 `__) + ``ndarray.device`` and ``ndarray.to_device`` -------------------------------------------- - An ``ndarray.device`` attribute and ``ndarray.to_device`` method were added to ``numpy.ndarray`` for array API standard compatibility. @@ -1008,9 +1011,9 @@ For all these new arguments, only ``device="cpu"`` is supported. (`gh-25233 `__) + StringDType has been added to NumPy ----------------------------------- - We have added a new variable-width UTF-8 encoded string data type, implementing a "NumPy array of Python strings", including support for a user-provided missing data sentinel. It is intended as a drop-in replacement for arrays of Python @@ -1020,9 +1023,9 @@ documentation ` for more details. (`gh-25347 `__) + New keywords for ``cholesky`` and ``pinv`` ------------------------------------------ - The ``upper`` and ``rtol`` keywords were added to `numpy.linalg.cholesky` and `numpy.linalg.pinv`, respectively, to improve array API standard compatibility. @@ -1032,9 +1035,9 @@ the future. (`gh-25388 `__) + New keywords for ``sort``, ``argsort`` and ``linalg.matrix_rank`` ----------------------------------------------------------------- - New keyword parameters were added to improve array API standard compatibility: * ``rtol`` was added to `~numpy.linalg.matrix_rank`. @@ -1043,9 +1046,9 @@ New keyword parameters were added to improve array API standard compatibility: (`gh-25437 `__) + New ``numpy.strings`` namespace for string ufuncs ------------------------------------------------- - NumPy now implements some string operations as ufuncs. The old ``np.char`` namespace is still available, and where possible the string manipulation functions in that namespace have been updated to use the new ufuncs, @@ -1057,9 +1060,9 @@ instead of ``np.char``. In the future we may deprecate ``np.char`` in favor of (`gh-25463 `__) -`numpy.fft` support for different precisions and in-place calculations ----------------------------------------------------------------------- +``numpy.fft`` support for different precisions and in-place calculations +------------------------------------------------------------------------ The various FFT routines in `numpy.fft` now do their calculations natively in float, double, or long double precision, depending on the input precision, instead of always calculating in double precision. Hence, the calculation will @@ -1071,9 +1074,9 @@ for in-place calculations. (`gh-25536 `__) + configtool and pkg-config support --------------------------------- - A new ``numpy-config`` CLI script is available that can be queried for the NumPy version and for compile flags needed to use the NumPy C API. This will allow build systems to better support the use of NumPy as a dependency. @@ -1083,9 +1086,9 @@ find its location for use with ``PKG_CONFIG_PATH``, use (`gh-25730 `__) + Array API standard support in the main namespace ------------------------------------------------ - The main ``numpy`` namespace now supports the array API standard. See :ref:`array-api-standard-compatibility` for details. @@ -1094,40 +1097,41 @@ The main ``numpy`` namespace now supports the array API standard. See Improvements ============ -* Strings are now supported by ``any``, ``all``, and the logical ufuncs. +Strings are now supported by ``any``, ``all``, and the logical ufuncs. +---------------------------------------------------------------------- (`gh-25651 `__) + Integer sequences as the shape argument for ``memmap`` ------------------------------------------------------ - `numpy.memmap` can now be created with any integer sequence as the ``shape`` argument, such as a list or numpy array of integers. Previously, only the types of tuple and int could be used without raising an error. (`gh-23729 `__) + ``errstate`` is now faster and context safe ------------------------------------------- - The `numpy.errstate` context manager/decorator is now faster and safer. Previously, it was not context safe and had (rare) issues with thread-safety. (`gh-23936 `__) + AArch64 quicksort speed improved by using Highway's VQSort ---------------------------------------------------------- - The first introduction of the Google Highway library, using VQSort on AArch64. Execution time is improved by up to 16x in some cases, see the PR for benchmark results. Extensions to other platforms will be done in the future. (`gh-24018 `__) + Complex types - underlying C type changes ----------------------------------------- - * The underlying C types for all of NumPy's complex types have been changed to use C99 complex types. @@ -1153,9 +1157,9 @@ Complex types - underlying C type changes (`gh-24085 `__) + ``iso_c_binding`` support and improved common blocks for ``f2py`` ----------------------------------------------------------------- - Previously, users would have to define their own custom ``f2cmap`` file to use type mappings defined by the Fortran2003 ``iso_c_binding`` intrinsic module. These type maps are now natively supported by ``f2py`` @@ -1168,27 +1172,27 @@ modules. This further expands the usability of intrinsics like (`gh-25186 `__) + Call ``str`` automatically on third argument to functions like ``assert_equal`` ------------------------------------------------------------------------------- - The third argument to functions like `~numpy.testing.assert_equal` now has ``str`` called on it automatically. This way it mimics the built-in ``assert`` statement, where ``assert_equal(a, b, obj)`` works like ``assert a == b, obj``. (`gh-24877 `__) + Support for array-like ``atol``/``rtol`` in ``isclose``, ``allclose`` --------------------------------------------------------------------- - The keywords ``atol`` and ``rtol`` in `~numpy.isclose` and `~numpy.allclose` now accept both scalars and arrays. An array, if given, must broadcast to the shapes of the first two array arguments. (`gh-24878 `__) + Consistent failure messages in test functions --------------------------------------------- - Previously, some `numpy.testing` assertions printed messages that referred to the actual and desired results as ``x`` and ``y``. Now, these values are consistently referred to as ``ACTUAL`` and @@ -1196,9 +1200,9 @@ Now, these values are consistently referred to as ``ACTUAL`` and (`gh-24931 `__) + n-D FFT transforms allow ``s[i] == -1`` --------------------------------------- - The `~numpy.fft.fftn`, `~numpy.fft.ifftn`, `~numpy.fft.rfftn`, `~numpy.fft.irfftn`, `~numpy.fft.fft2`, `~numpy.fft.ifft2`, `~numpy.fft.rfft2` and `~numpy.fft.irfft2` functions now use the whole input array along the axis @@ -1206,9 +1210,9 @@ and `~numpy.fft.irfft2` functions now use the whole input array along the axis (`gh-25495 `__) + Guard PyArrayScalar_VAL and PyUnicodeScalarObject for the limited API --------------------------------------------------------------------- - ``PyUnicodeScalarObject`` holds a ``PyUnicodeObject``, which is not available when using ``Py_LIMITED_API``. Add guards to hide it and consequently also make the ``PyArrayScalar_VAL`` macro hidden. @@ -1226,6 +1230,7 @@ Changes * Being fully context and thread-safe, ``np.errstate`` can only be entered once now. + * ``np.setbufsize`` is now tied to ``np.errstate()``: leaving an ``np.errstate`` context will also reset the ``bufsize``. @@ -1252,9 +1257,9 @@ Changes (`gh-25816 `__) + Representation of NumPy scalars changed --------------------------------------- - As per :ref:`NEP 51 `, the scalar representation has been updated to include the type information to avoid confusion with Python scalars. @@ -1272,9 +1277,9 @@ to facilitate updates. (`gh-22449 `__) + Truthiness of NumPy strings changed ----------------------------------- - NumPy strings previously were inconsistent about how they defined if the string is ``True`` or ``False`` and the definition did not match the one used by Python. @@ -1302,9 +1307,9 @@ The change does affect ``np.fromregex`` as it uses direct assignments. (`gh-23871 `__) + A ``mean`` keyword was added to var and std function ---------------------------------------------------- - Often when the standard deviation is needed the mean is also needed. The same holds for the variance and the mean. Until now the mean is then calculated twice, the change introduced here for the `~numpy.var` and `~numpy.std` functions @@ -1313,18 +1318,18 @@ docstrings for details and an example illustrating the speed-up. (`gh-24126 `__) + Remove datetime64 deprecation warning when constructing with timezone --------------------------------------------------------------------- - The `numpy.datetime64` method now issues a UserWarning rather than a DeprecationWarning whenever a timezone is included in the datetime string that is provided. (`gh-24193 `__) + Default integer dtype is now 64-bit on 64-bit Windows ----------------------------------------------------- - The default NumPy integer is now 64-bit on all 64-bit systems as the historic 32-bit default on Windows was a common source of issues. Most users should not notice this. The main issues may occur with code interfacing with libraries @@ -1333,6 +1338,7 @@ written in a compiled language like C. For more information see (`gh-24224 `__) + Renamed ``numpy.core`` to ``numpy._core`` ----------------------------------------- Accessing ``numpy.core`` now emits a DeprecationWarning. In practice @@ -1353,9 +1359,9 @@ the ``NPY_RELAXED_STRIDES_DEBUG`` environment variable or the (`gh-24717 `__) + Redefinition of ``np.intp``/``np.uintp`` (almost never a change) ---------------------------------------------------------------- - Due to the actual use of these types almost always matching the use of ``size_t``/``Py_ssize_t`` this is now the definition in C. Previously, it matched ``intptr_t`` and ``uintptr_t`` which would often @@ -1375,24 +1381,25 @@ However, it means that: (`gh-24888 `__) + ``numpy.fft.helper`` made private --------------------------------- - ``numpy.fft.helper`` was renamed to ``numpy.fft._helper`` to indicate that it is a private submodule. All public functions exported by it should be accessed from `numpy.fft`. (`gh-24945 `__) + ``numpy.linalg.linalg`` made private ------------------------------------ - ``numpy.linalg.linalg`` was renamed to ``numpy.linalg._linalg`` to indicate that it is a private submodule. All public functions exported by it should be accessed from `numpy.linalg`. (`gh-24946 `__) + Out-of-bound axis not the same as ``axis=None`` ----------------------------------------------- In some cases ``axis=32`` or for concatenate any large value @@ -1405,9 +1412,9 @@ Any out of bound axis value will now error, make sure to use .. _copy-keyword-changes-2.0: + New ``copy`` keyword meaning for ``array`` and ``asarray`` constructors ----------------------------------------------------------------------- - Now `numpy.array` and `numpy.asarray` support three values for ``copy`` parameter: * ``None`` - A copy will only be made if it is necessary. @@ -1418,9 +1425,9 @@ The meaning of ``False`` changed as it now raises an exception if a copy is need (`gh-25168 `__) + The ``__array__`` special method now takes a ``copy`` keyword argument. ----------------------------------------------------------------------- - NumPy will pass ``copy`` to the ``__array__`` special method in situations where it would be set to a non-default value (e.g. in a call to ``np.asarray(some_object, copy=False)``). Currently, if an @@ -1432,9 +1439,9 @@ argument with the same meaning as when passed to `numpy.array` or (`gh-25168 `__) + Cleanup of initialization of ``numpy.dtype`` with strings with commas --------------------------------------------------------------------- - The interpretation of strings with commas is changed slightly, in that a trailing comma will now always create a structured dtype. E.g., where previously ``np.dtype("i")`` and ``np.dtype("i,")`` were treated as identical, @@ -1451,9 +1458,9 @@ case for initializations without a comma, like ``np.dtype("(2)i")``. (`gh-25434 `__) + Change in how complex sign is calculated ---------------------------------------- - Following the array API standard, the complex sign is now calculated as ``z / |z|`` (instead of the rather less logical case where the sign of the real part was taken, unless the real part was zero, in which case @@ -1462,9 +1469,9 @@ zero is returned if ``z==0``. (`gh-25441 `__) + Return types of functions that returned a list of arrays -------------------------------------------------------- - Functions that returned a list of ndarrays have been changed to return a tuple of ndarrays instead. Returning tuples consistently whenever a sequence of arrays is returned makes it easier for JIT compilers like Numba, as well as for @@ -1473,20 +1480,26 @@ functions are: `~numpy.atleast_1d`, `~numpy.atleast_2d`, `~numpy.atleast_3d`, `~numpy.broadcast_arrays`, `~numpy.meshgrid`, `~numpy.ogrid`, `~numpy.histogramdd`. + ``np.unique`` ``return_inverse`` shape for multi-dimensional inputs ------------------------------------------------------------------- - When multi-dimensional inputs are passed to ``np.unique`` with ``return_inverse=True``, the ``unique_inverse`` output is now shaped such that the input can be reconstructed directly using ``np.take(unique, unique_inverse)`` when ``axis=None``, and ``np.take_along_axis(unique, unique_inverse, axis=axis)`` otherwise. +.. note:: + This change was reverted in 2.0.1 except for ``axis=None``. The correct + reconstruction is always ``np.take(unique, unique_inverse, axis=axis)``. + When 2.0.0 needs to be supported, add ``unique_inverse.reshape(-1)`` + to code. + (`gh-25553 `__, `gh-25570 `__) + ``any`` and ``all`` return booleans for object arrays ----------------------------------------------------- - The ``any`` and ``all`` functions and methods now return booleans also for object arrays. Previously, they did a reduction which behaved like the Python ``or`` and @@ -1507,7 +1520,5 @@ this is currently not implemented. In some cases, this means you may have to add a specific path for: ``if type(obj) in (int, float, complex): ...``. +(`gh-26393 `__) -**Content from release note snippets in doc/release/upcoming_changes:** - -.. include:: notes-towncrier.rst diff --git a/doc/source/release/2.0.1-notes.rst b/doc/source/release/2.0.1-notes.rst new file mode 100644 index 000000000000..a49f2ee36abd --- /dev/null +++ b/doc/source/release/2.0.1-notes.rst @@ -0,0 +1,74 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.0.1 Release Notes +========================== + +NumPy 2.0.1 is a maintenance release that fixes bugs and regressions +discovered after the 2.0.0 release. NumPy 2.0.1 is the last planned +release in the 2.0.x series, 2.1.0rc1 should be out shortly. + +The Python versions supported by this release are 3.9-3.12. + +Improvements +============ + +``np.quantile`` with method ``closest_observation`` chooses nearest even order statistic +---------------------------------------------------------------------------------------- +This changes the definition of nearest for border cases from the nearest odd +order statistic to nearest even order statistic. The numpy implementation now +matches other reference implementations. + +(`gh-26656 `__) + +Contributors +============ + +A total of 15 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* @vahidmech + +* Alex Herbert + +* Charles Harris +* Giovanni Del Monte + +* Leo Singer +* Lysandros Nikolaou +* Matti Picus +* Nathan Goldbaum +* Patrick J. Roddy + +* Raghuveer Devulapalli +* Ralf Gommers +* Rostan Tabet + +* Sebastian Berg +* Tyler Reddy +* Yannik Wicke + + +Pull requests merged +==================== + +A total of 24 pull requests were merged for this release. + +* `#26711 `__: MAINT: prepare 2.0.x for further development +* `#26792 `__: TYP: fix incorrect import in ``ma/extras.pyi`` stub +* `#26793 `__: DOC: Mention '1.25' legacy printing mode in ``set_printoptions`` +* `#26794 `__: DOC: Remove mention of NaN and NAN aliases from constants +* `#26821 `__: BLD: Fix x86-simd-sort build failure on openBSD +* `#26822 `__: BUG: Ensure output order follows input in numpy.fft +* `#26823 `__: TYP: fix missing sys import in numeric.pyi +* `#26832 `__: DOC: remove hack to override _add_newdocs_scalars (#26826) +* `#26835 `__: BUG: avoid side-effect of 'include complex.h' +* `#26836 `__: BUG: fix max_rows and chunked string/datetime reading in ``loadtxt`` +* `#26837 `__: BUG: fix PyArray_ImportNumPyAPI under -Werror=strict-prototypes +* `#26856 `__: DOC: Update some documentation +* `#26868 `__: BUG: fancy indexing copy +* `#26869 `__: BUG: Mismatched allocation domains in ``PyArray_FillWithScalar`` +* `#26870 `__: BUG: Handle --f77flags and --f90flags for meson [wheel build] +* `#26887 `__: BUG: Fix new DTypes and new string promotion when signature is... +* `#26888 `__: BUG: remove numpy.f2py from excludedimports +* `#26959 `__: BUG: Quantile closest_observation to round to nearest even order +* `#26960 `__: BUG: Fix off-by-one error in amount of characters in strip +* `#26961 `__: API: Partially revert unique with return_inverse +* `#26962 `__: BUG,MAINT: Fix utf-8 character stripping memory access +* `#26963 `__: BUG: Fix out-of-bound minimum offset for in1d table method +* `#26971 `__: BUG: fix f2py tests to work with v2 API +* `#26995 `__: BUG: Add object cast to avoid warning with limited API diff --git a/doc/source/release/2.0.2-notes.rst b/doc/source/release/2.0.2-notes.rst new file mode 100644 index 000000000000..ae5c26250ba7 --- /dev/null +++ b/doc/source/release/2.0.2-notes.rst @@ -0,0 +1,58 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.0.2 Release Notes +========================== + +NumPy 2.0.2 is a maintenance release that fixes bugs and regressions +discovered after the 2.0.1 release. + +The Python versions supported by this release are 3.9-3.12. + + +Contributors +============ + +A total of 13 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Bruno Oliveira + +* Charles Harris +* Chris Sidebottom +* Christian Heimes + +* Christopher Sidebottom +* Mateusz SokÃŗÅ‚ +* Matti Picus +* Nathan Goldbaum +* Pieter Eendebak +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg +* Yair Chuchem + + + +Pull requests merged +==================== + +A total of 19 pull requests were merged for this release. + +* `#27000 `__: REL: Prepare for the NumPy 2.0.1 release [wheel build] +* `#27001 `__: MAINT: prepare 2.0.x for further development +* `#27021 `__: BUG: cfuncs.py: fix crash when sys.stderr is not available +* `#27022 `__: DOC: Fix migration note for ``alltrue`` and ``sometrue`` +* `#27061 `__: BUG: use proper input and output descriptor in array_assign_subscript... +* `#27073 `__: BUG: Mirror VQSORT_ENABLED logic in Quicksort +* `#27074 `__: BUG: Bump Highway to latest master +* `#27077 `__: BUG: Off by one in memory overlap check +* `#27122 `__: BUG: Use the new ``npyv_loadable_stride_`` functions for ldexp and... +* `#27126 `__: BUG: Bump Highway to latest +* `#27128 `__: BUG: add missing error handling in public_dtype_api.c +* `#27129 `__: BUG: fix another cast setup in array_assign_subscript +* `#27130 `__: BUG: Fix building NumPy in FIPS mode +* `#27131 `__: BLD: update vendored Meson for cross-compilation patches +* `#27146 `__: MAINT: Scipy openblas 0.3.27.44.4 +* `#27151 `__: BUG: Do not accidentally store dtype metadata in ``np.save`` +* `#27195 `__: REV: Revert undef I and document it +* `#27213 `__: BUG: Fix NPY_RAVEL_AXIS on backwards compatible NumPy 2 builds +* `#27279 `__: BUG: Fix array_equal for numeric and non-numeric scalar types + diff --git a/doc/source/release/2.1.0-notes.rst b/doc/source/release/2.1.0-notes.rst index d0b0b6f1b785..bb9c71079062 100644 --- a/doc/source/release/2.1.0-notes.rst +++ b/doc/source/release/2.1.0-notes.rst @@ -1,19 +1,362 @@ .. currentmodule:: numpy -========================== +========================= NumPy 2.1.0 Release Notes -========================== +========================= +NumPy 2.1.0 provides support for the upcoming Python 3.13 release and drops +support for Python 3.9. In addition to the usual bug fixes and updated Python +support, it helps get us back into our usual release cycle after the extended +development of 2.0. The highlights for this release are: -Highlights -========== +- Support for the array-api 2023.12 standard. +- Support for Python 3.13. +- Preliminary support for free threaded Python 3.13. -*We'll choose highlights for this release near the end of the release cycle.* +Python versions 3.10-3.13 are supported in this release. -.. if release snippets have been incorporated already, uncomment the follow - line (leave the `.. include:: directive) +New functions +============= -.. **Content from release note snippets in doc/release/upcoming_changes:** +New function ``numpy.unstack`` +------------------------------ + +A new function ``np.unstack(array, axis=...)`` was added, which splits +an array into a tuple of arrays along an axis. It serves as the inverse +of `numpy.stack`. + +(`gh-26579 `__) + + +Deprecations +============ + +* The ``fix_imports`` keyword argument in ``numpy.save`` is deprecated. Since + NumPy 1.17, ``numpy.save`` uses a pickle protocol that no longer supports + Python 2, and ignored ``fix_imports`` keyword. This keyword is kept only + for backward compatibility. It is now deprecated. + + (`gh-26452 `__) + +* Passing non-integer inputs as the first argument of `bincount` is now + deprecated, because such inputs are silently cast to integers with no + warning about loss of precision. + + (`gh-27076 `__) + + +Expired deprecations +==================== + +* Scalars and 0D arrays are disallowed for ``numpy.nonzero`` and ``numpy.ndarray.nonzero``. + + (`gh-26268 `__) + +* ``set_string_function`` internal function was removed and ``PyArray_SetStringFunction`` + was stubbed out. + + (`gh-26611 `__) + + +C API changes +============= + +API symbols now hidden but customizable +--------------------------------------- +NumPy now defaults to hide the API symbols it adds to allow all NumPy API +usage. This means that by default you cannot dynamically fetch the NumPy API +from another library (this was never possible on windows). + +If you are experiencing linking errors related to ``PyArray_API`` or +``PyArray_RUNTIME_VERSION``, you can define the +``NPY_API_SYMBOL_ATTRIBUTE`` to opt-out of this change. + +If you are experiencing problems due to an upstream header including NumPy, +the solution is to make sure you ``#include "numpy/ndarrayobject.h"`` before +their header and import NumPy yourself based on ``including-the-c-api``. + +(`gh-26103 `__) + +Many shims removed from npy_3kcompat.h +-------------------------------------- +Many of the old shims and helper functions were removed from +``npy_3kcompat.h``. If you find yourself in need of these, vendor the previous +version of the file into your codebase. + +(`gh-26842 `__) + +New ``PyUFuncObject`` field ``process_core_dims_func`` +------------------------------------------------------ +The field ``process_core_dims_func`` was added to the structure +``PyUFuncObject``. For generalized ufuncs, this field can be set to a function +of type ``PyUFunc_ProcessCoreDimsFunc`` that will be called when the ufunc is +called. It allows the ufunc author to check that core dimensions satisfy +additional constraints, and to set output core dimension sizes if they have not +been provided. + +(`gh-26908 `__) + + +New Features +============ + +Preliminary Support for Free-Threaded CPython 3.13 +-------------------------------------------------- + +CPython 3.13 will be available as an experimental free-threaded build. See +https://py-free-threading.github.io, `PEP 703 +`_ and the `CPython 3.13 release notes +`_ for +more detail about free-threaded Python. + +NumPy 2.1 has preliminary support for the free-threaded build of CPython +3.13. This support was enabled by fixing a number of C thread-safety issues in +NumPy. Before NumPy 2.1, NumPy used a large number of C global static variables +to store runtime caches and other state. We have either refactored to avoid the +need for global state, converted the global state to thread-local state, or +added locking. + +Support for free-threaded Python does not mean that NumPy is thread +safe. Read-only shared access to ndarray should be safe. NumPy exposes shared +mutable state and we have not added any locking to the array object itself to +serialize access to shared state. Care must be taken in user code to avoid +races if you would like to mutate the same array in multiple threads. It is +certainly possible to crash NumPy by mutating an array simultaneously in +multiple threads, for example by calling a ufunc and the ``resize`` method +simultaneously. For now our guidance is: "don't do that". In the future we would +like to provide stronger guarantees. + +Object arrays in particular need special care, since the GIL +previously provided locking for object array access and no longer does. See +`Issue #27199 `_ for more +information about object arrays in the free-threaded build. + +If you are interested in free-threaded Python, for example because you have a +multiprocessing-based workflow that you are interested in running with Python +threads, we encourage testing and experimentation. + +If you run into problems that you suspect are because of NumPy, please `open an +issue `_, checking first if +the bug also occurs in the "regular" non-free-threaded CPython 3.13 build. Many +threading bugs can also occur in code that releases the GIL; disabling the GIL +only makes it easier to hit threading bugs. + +(`gh-26157 `__) + +* ``numpy.reshape`` and ``numpy.ndarray.reshape`` now support ``shape`` and + ``copy`` arguments. + + (`gh-26292 `__) + +* NumPy now supports DLPack v1, support for older versions will + be deprecated in the future. + + (`gh-26501 `__) + +* ``numpy.asanyarray`` now supports ``copy`` and ``device`` arguments, matching + ``numpy.asarray``. + + (`gh-26580 `__) + +* ``numpy.printoptions``, ``numpy.get_printoptions``, and + ``numpy.set_printoptions`` now support a new option, ``override_repr``, for + defining custom ``repr(array)`` behavior. + + (`gh-26611 `__) + +* ``numpy.cumulative_sum`` and ``numpy.cumulative_prod`` were added as Array + API compatible alternatives for ``numpy.cumsum`` and ``numpy.cumprod``. The + new functions can include a fixed initial (zeros for ``sum`` and ones for + ``prod``) in the result. + + (`gh-26724 `__) + +* ``numpy.clip`` now supports ``max`` and ``min`` keyword arguments which are + meant to replace ``a_min`` and ``a_max``. Also, for ``np.clip(a)`` or + ``np.clip(a, None, None)`` a copy of the input array will be returned instead + of raising an error. + + (`gh-26724 `__) + +* ``numpy.astype`` now supports ``device`` argument. + + (`gh-26724 `__) + +``f2py`` can generate freethreading-compatible C extensions +----------------------------------------------------------- +Pass ``--freethreading-compatible`` to the f2py CLI tool to produce a C +extension marked as compatible with the free threading CPython +interpreter. Doing so prevents the interpreter from re-enabling the GIL at +runtime when it imports the C extension. Note that ``f2py`` does not analyze +fortran code for thread safety, so you must verify that the wrapped fortran +code is thread safe before marking the extension as compatible. + +(`gh-26981 `__) + + +Improvements +============ + +``histogram`` auto-binning now returns bin sizes >=1 for integer input data +--------------------------------------------------------------------------- +For integer input data, bin sizes smaller than 1 result in spurious empty +bins. This is now avoided when the number of bins is computed using one of the +algorithms provided by ``histogram_bin_edges``. + +(`gh-12150 `__) + +``ndarray`` shape-type parameter is now covariant and bound to ``tuple[int, ...]`` +---------------------------------------------------------------------------------- +Static typing for ``ndarray`` is a long-term effort that continues +with this change. It is a generic type with type parameters for +the shape and the data type. Previously, the shape type parameter could be +any value. This change restricts it to a tuple of ints, as one would expect +from using ``ndarray.shape``. Further, the shape-type parameter has been +changed from invariant to covariant. This change also applies to the subtypes +of ``ndarray``, e.g. ``numpy.ma.MaskedArray``. See the +`typing docs `_ +for more information. + +(`gh-26081 `__) + +``np.quantile`` with method ``closest_observation`` chooses nearest even order statistic +---------------------------------------------------------------------------------------- +This changes the definition of nearest for border cases from the nearest odd +order statistic to nearest even order statistic. The numpy implementation now +matches other reference implementations. + +(`gh-26656 `__) + +``lapack_lite`` is now thread safe +---------------------------------- +NumPy provides a minimal low-performance version of LAPACK named ``lapack_lite`` +that can be used if no BLAS/LAPACK system is detected at build time. + +Until now, ``lapack_lite`` was not thread safe. Single-threaded use cases did +not hit any issues, but running linear algebra operations in multiple threads +could lead to errors, incorrect results, or segfaults due to data races. + +We have added a global lock, serializing access to ``lapack_lite`` in multiple +threads. + +(`gh-26750 `__) + +The ``numpy.printoptions`` context manager is now thread and async-safe +----------------------------------------------------------------------- +In prior versions of NumPy, the printoptions were defined using a combination +of Python and C global variables. We have refactored so the state is stored in +a python ``ContextVar``, making the context manager thread and async-safe. + +(`gh-26846 `__) + +Type hinting ``numpy.polynomial`` +--------------------------------- +Starting from the 2.1 release, PEP 484 type annotations have been included for +the functions and convenience classes in ``numpy.polynomial`` and its +sub-packages. + +(`gh-26897 `__) + +Improved ``numpy.dtypes`` type hints +------------------------------------ +The type annotations for ``numpy.dtypes`` are now a better reflection of the +runtime: The ``numpy.dtype`` type-aliases have been replaced with specialized +``dtype`` *subtypes*, and the previously missing annotations for +``numpy.dtypes.StringDType`` have been added. + +(`gh-27008 `__) + + +Performance improvements and changes +==================================== + +* ``numpy.save`` now uses pickle protocol version 4 for saving arrays with + object dtype, which allows for pickle objects larger than 4GB and improves + saving speed by about 5% for large arrays. + + (`gh-26388 `__) + +* OpenBLAS on x86_64 and i686 is built with fewer kernels. Based on + benchmarking, there are 5 clusters of performance around these kernels: + ``PRESCOTT NEHALEM SANDYBRIDGE HASWELL SKYLAKEX``. + + (`gh-27147 `__) + +* OpenBLAS on windows is linked without quadmath, simplifying licensing + + (`gh-27147 `__) + +* Due to a regression in OpenBLAS on windows, the performance improvements when + using multiple threads for OpenBLAS 0.3.26 were reverted. + + (`gh-27147 `__) + +``ma.cov`` and ``ma.corrcoef`` are now significantly faster +----------------------------------------------------------- +The private function has been refactored along with ``ma.cov`` and +``ma.corrcoef``. They are now significantly faster, particularly on large, +masked arrays. + +(`gh-26285 `__) + + +Changes +======= + +* As ``numpy.vecdot`` is now a ufunc it has a less precise signature. + This is due to the limitations of ufunc's typing stub. + + (`gh-26313 `__) + +* ``numpy.floor``, ``numpy.ceil``, and ``numpy.trunc`` now won't perform + casting to a floating dtype for integer and boolean dtype input arrays. + + (`gh-26766 `__) + +``ma.corrcoef`` may return a slightly different result +------------------------------------------------------ +A pairwise observation approach is currently used in ``ma.corrcoef`` to +calculate the standard deviations for each pair of variables. This has been +changed as it is being used to normalise the covariance, estimated using +``ma.cov``, which does not consider the observations for each variable in a +pairwise manner, rendering it unnecessary. The normalisation has been replaced +by the more appropriate standard deviation for each variable, which +significantly reduces the wall time, but will return slightly different +estimates of the correlation coefficients in cases where the observations +between a pair of variables are not aligned. However, it will return the same +estimates in all other cases, including returning the same correlation matrix +as ``corrcoef`` when using a masked array with no masked values. + +(`gh-26285 `__) + +Cast-safety fixes in ``copyto`` and ``full`` +-------------------------------------------- +``copyto`` now uses NEP 50 correctly and applies this to its cast safety. +Python integer to NumPy integer casts and Python float to NumPy float casts +are now considered "safe" even if assignment may fail or precision may be lost. +This means the following examples change slightly: + +* ``np.copyto(int8_arr, 1000)`` previously performed an unsafe/same-kind cast + of the Python integer. It will now always raise, to achieve an unsafe cast + you must pass an array or NumPy scalar. + +* ``np.copyto(uint8_arr, 1000, casting="safe")`` will raise an OverflowError + rather than a TypeError due to same-kind casting. + +* ``np.copyto(float32_arr, 1e300, casting="safe")`` will overflow to ``inf`` + (float32 cannot hold ``1e300``) rather raising a TypeError. + +Further, only the dtype is used when assigning NumPy scalars (or 0-d arrays), +meaning that the following behaves differently: + +* ``np.copyto(float32_arr, np.float64(3.0), casting="safe")`` raises. + +* ``np.coptyo(int8_arr, np.int64(100), casting="safe")`` raises. + Previously, NumPy checked whether the 100 fits the ``int8_arr``. + +This aligns ``copyto``, ``full``, and ``full_like`` with the correct NumPy 2 +behavior. + +(`gh-27091 `__) -.. include:: notes-towncrier.rst diff --git a/doc/source/release/2.1.1-notes.rst b/doc/source/release/2.1.1-notes.rst new file mode 100644 index 000000000000..79c63514695c --- /dev/null +++ b/doc/source/release/2.1.1-notes.rst @@ -0,0 +1,41 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.1.1 Release Notes +========================== + +NumPy 2.1.1 is a maintenance release that fixes bugs and regressions +discovered after the 2.1.0 release. + +The Python versions supported by this release are 3.10-3.13. + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Andrew Nelson +* Charles Harris +* Mateusz SokÃŗÅ‚ +* Maximilian Weigand + +* Nathan Goldbaum +* Pieter Eendebak +* Sebastian Berg + +Pull requests merged +==================== + +A total of 10 pull requests were merged for this release. + +* `#27236 `__: REL: Prepare for the NumPy 2.1.0 release [wheel build] +* `#27252 `__: MAINT: prepare 2.1.x for further development +* `#27259 `__: BUG: revert unintended change in the return value of set_printoptions +* `#27266 `__: BUG: fix reference counting bug in __array_interface__ implementationâ€Ļ +* `#27267 `__: TST: Add regression test for missing descr in array-interface +* `#27276 `__: BUG: Fix #27256 and #27257 +* `#27278 `__: BUG: Fix array_equal for numeric and non-numeric scalar types +* `#27287 `__: MAINT: Update maintenance/2.1.x after the 2.0.2 release +* `#27303 `__: BLD: cp311- macosx_arm64 wheels [wheel build] +* `#27304 `__: BUG: f2py: better handle filtering of public/private subroutines + diff --git a/doc/source/release/2.1.2-notes.rst b/doc/source/release/2.1.2-notes.rst new file mode 100644 index 000000000000..1a187dbd3365 --- /dev/null +++ b/doc/source/release/2.1.2-notes.rst @@ -0,0 +1,48 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.1.2 Release Notes +========================== + +NumPy 2.1.2 is a maintenance release that fixes bugs and regressions +discovered after the 2.1.1 release. + +The Python versions supported by this release are 3.10-3.13. + +Contributors +============ + +A total of 11 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Chris Sidebottom +* Ishan Koradia + +* JoÃŖo Eiras + +* Katie Rust + +* Marten van Kerkwijk +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Pieter Eendebak +* Slava Gorloff + + +Pull requests merged +==================== + +A total of 14 pull requests were merged for this release. + +* `#27333 `__: MAINT: prepare 2.1.x for further development +* `#27400 `__: BUG: apply critical sections around populating the dispatch cache +* `#27406 `__: BUG: Stub out get_build_msvc_version if distutils.msvccompiler... +* `#27416 `__: BUILD: fix missing include for std::ptrdiff_t for C++23 language... +* `#27433 `__: BLD: pin setuptools to avoid breaking numpy.distutils +* `#27437 `__: BUG: Allow unsigned shift argument for np.roll +* `#27439 `__: BUG: Disable SVE VQSort +* `#27471 `__: BUG: rfftn axis bug +* `#27479 `__: BUG: Fix extra decref of PyArray_UInt8DType. +* `#27480 `__: CI: use PyPI not scientific-python-nightly-wheels for CI doc... +* `#27481 `__: MAINT: Check for SVE support on demand +* `#27484 `__: BUG: initialize the promotion state to be weak +* `#27501 `__: MAINT: Bump pypa/cibuildwheel from 2.20.0 to 2.21.2 +* `#27506 `__: BUG: avoid segfault on bad arguments in ndarray.__array_function__ diff --git a/doc/source/release/2.1.3-notes.rst b/doc/source/release/2.1.3-notes.rst new file mode 100644 index 000000000000..cd797e0062a0 --- /dev/null +++ b/doc/source/release/2.1.3-notes.rst @@ -0,0 +1,81 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.1.3 Release Notes +========================== + +NumPy 2.1.3 is a maintenance release that fixes bugs and regressions +discovered after the 2.1.2 release. + +The Python versions supported by this release are 3.10-3.13. + + +Improvements +============ + +* Fixed a number of issues around promotion for string ufuncs with StringDType + arguments. Mixing StringDType and the fixed-width DTypes using the string + ufuncs should now generate much more uniform results. + + (`gh-27636 `__) + + +Changes +======= + +* `numpy.fix` now won't perform casting to a floating data-type for integer + and boolean data-type input arrays. + + (`gh-26766 `__) + + +Contributors +============ + +A total of 15 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Abhishek Kumar + +* Austin + +* Benjamin A. Beasley + +* Charles Harris +* Christian Lorentzen +* Marcel Telka + +* Matti Picus +* Michael Davidsaver + +* Nathan Goldbaum +* Peter Hawkins +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg +* dependabot[bot] +* kp2pml30 + + + +Pull requests merged +==================== + +A total of 21 pull requests were merged for this release. + +* `#27512 `__: MAINT: prepare 2.1.x for further development +* `#27537 `__: MAINT: Bump actions/cache from 4.0.2 to 4.1.1 +* `#27538 `__: MAINT: Bump pypa/cibuildwheel from 2.21.2 to 2.21.3 +* `#27539 `__: MAINT: MSVC does not support #warning directive +* `#27543 `__: BUG: Fix user dtype can-cast with python scalar during promotion +* `#27561 `__: DEV: bump ``python`` to 3.12 in environment.yml +* `#27562 `__: BLD: update vendored Meson to 1.5.2 +* `#27563 `__: BUG: weighted quantile for some zero weights (#27549) +* `#27565 `__: MAINT: Use miniforge for macos conda test. +* `#27566 `__: BUILD: satisfy gcc-13 pendantic errors +* `#27569 `__: BUG: handle possible error for PyTraceMallocTrack +* `#27570 `__: BLD: start building Windows free-threaded wheels [wheel build] +* `#27571 `__: BUILD: vendor tempita from Cython +* `#27574 `__: BUG: Fix warning "differs in levels of indirection" in npy_atomic.h... +* `#27592 `__: MAINT: Update Highway to latest +* `#27593 `__: BUG: Adjust numpy.i for SWIG 4.3 compatibility +* `#27616 `__: BUG: Fix Linux QEMU CI workflow +* `#27668 `__: BLD: Do not set __STDC_VERSION__ to zero during build +* `#27669 `__: ENH: fix wasm32 runtime type error in numpy._core +* `#27672 `__: BUG: Fix a reference count leak in npy_find_descr_for_scalar. +* `#27673 `__: BUG: fixes for StringDType/unicode promoters + diff --git a/doc/source/release/2.2.0-notes.rst b/doc/source/release/2.2.0-notes.rst new file mode 100644 index 000000000000..41b3d2b58004 --- /dev/null +++ b/doc/source/release/2.2.0-notes.rst @@ -0,0 +1,210 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.0 Release Notes +========================== + +The NumPy 2.2.0 release is quick release that brings us back into sync with the +usual twice yearly release cycle. There have been an number of small cleanups, +as well as work bringing the new StringDType to completion and improving support +for free threaded Python. Highlights are: + +* New functions ``matvec`` and ``vecmat``, see below. +* Many improved annotations. +* Improved support for the new StringDType. +* Improved support for free threaded Python +* Fixes for f2py + +This release supports Python versions 3.10-3.13. + + +Deprecations +============ + +* ``_add_newdoc_ufunc`` is now deprecated. ``ufunc.__doc__ = newdoc`` should + be used instead. + + (`gh-27735 `__) + + +Expired deprecations +==================== + +* ``bool(np.array([]))`` and other empty arrays will now raise an error. + Use ``arr.size > 0`` instead to check whether an array has no elements. + + (`gh-27160 `__) + + +Compatibility notes +=================== + +* `numpy.cov` now properly transposes single-row (2d array) design matrices + when ``rowvar=False``. Previously, single-row design matrices would return a + scalar in this scenario, which is not correct, so this is a behavior change + and an array of the appropriate shape will now be returned. + + (`gh-27661 `__) + + +New Features +============ + +* New functions for matrix-vector and vector-matrix products + + Two new generalized ufuncs were defined: + + * `numpy.matvec` - matrix-vector product, treating the arguments as + stacks of matrices and column vectors, respectively. + + * `numpy.vecmat` - vector-matrix product, treating the arguments as + stacks of column vectors and matrices, respectively. For complex + vectors, the conjugate is taken. + + These add to the existing `numpy.matmul` as well as to `numpy.vecdot`, + which was added in numpy 2.0. + + Note that `numpy.matmul` never takes a complex conjugate, also not + when its left input is a vector, while both `numpy.vecdot` and + `numpy.vecmat` do take the conjugate for complex vectors on the + left-hand side (which are taken to be the ones that are transposed, + following the physics convention). + + (`gh-25675 `__) + +* ``np.complexfloating[T, T]`` can now also be written as + ``np.complexfloating[T]`` + + (`gh-27420 `__) + +* UFuncs now support ``__dict__`` attribute and allow overriding ``__doc__`` + (either directly or via ``ufunc.__dict__["__doc__"]``). ``__dict__`` can be + used to also override other properties, such as ``__module__`` or + ``__qualname__``. + + (`gh-27735 `__) + +* The "nbit" type parameter of ``np.number`` and its subtypes now defaults + to ``typing.Any``. This way, type-checkers will infer annotations such as + ``x: np.floating`` as ``x: np.floating[Any]``, even in strict mode. + + (`gh-27736 `__) + + +Improvements +============ + +* The ``datetime64`` and ``timedelta64`` hashes now correctly match the Pythons + builtin ``datetime`` and ``timedelta`` ones. The hashes now evaluated equal + even for equal values with different time units. + + (`gh-14622 `__) + +* Fixed a number of issues around promotion for string ufuncs with StringDType + arguments. Mixing StringDType and the fixed-width DTypes using the string + ufuncs should now generate much more uniform results. + + (`gh-27636 `__) + +* Improved support for empty `memmap`. Previously an empty `memmap` would fail + unless a non-zero ``offset`` was set. Now a zero-size `memmap` is supported + even if ``offset=0``. To achieve this, if a `memmap` is mapped to an empty + file that file is padded with a single byte. + + (`gh-27723 `__) + +``f2py`` handles multiple modules and exposes variables again +------------------------------------------------------------- +A regression has been fixed which allows F2PY users to expose variables to +Python in modules with only assignments, and also fixes situations where +multiple modules are present within a single source file. + +(`gh-27695 `__) + + +Performance improvements and changes +==================================== + +* Improved multithreaded scaling on the free-threaded build when many threads + simultaneously call the same ufunc operations. + + (`gh-27896 `__) + +* NumPy now uses fast-on-failure attribute lookups for protocols. This can + greatly reduce overheads of function calls or array creation especially with + custom Python objects. The largest improvements will be seen on Python 3.12 + or newer. + + (`gh-27119 `__) + +* OpenBLAS on x86_64 and i686 is built with fewer kernels. Based on + benchmarking, there are 5 clusters of performance around these kernels: + ``PRESCOTT NEHALEM SANDYBRIDGE HASWELL SKYLAKEX``. + +* OpenBLAS on windows is linked without quadmath, simplifying licensing + +* Due to a regression in OpenBLAS on windows, the performance improvements + when using multiple threads for OpenBLAS 0.3.26 were reverted. + + (`gh-27147 `__) + +* NumPy now indicates hugepages also for large ``np.zeros`` allocations + on linux. Thus should generally improve performance. + + (`gh-27808 `__) + + +Changes +======= + +* `numpy.fix` now won't perform casting to a floating data-type for integer + and boolean data-type input arrays. + + (`gh-26766 `__) + +* The type annotations of ``numpy.float64`` and ``numpy.complex128`` now + reflect that they are also subtypes of the built-in ``float`` and ``complex`` + types, respectively. This update prevents static type-checkers from reporting + errors in cases such as: + + .. code-block:: python + + x: float = numpy.float64(6.28) # valid + z: complex = numpy.complex128(-1j) # valid + + (`gh-27334 `__) + +* The ``repr`` of arrays large enough to be summarized (i.e., where elements + are replaced with ``...``) now includes the ``shape`` of the array, similar + to what already was the case for arrays with zero size and non-obvious + shape. With this change, the shape is always given when it cannot be + inferred from the values. Note that while written as ``shape=...``, this + argument cannot actually be passed in to the ``np.array`` constructor. If + you encounter problems, e.g., due to failing doctests, you can use the print + option ``legacy=2.1`` to get the old behaviour. + + (`gh-27482 `__) + +* Calling ``__array_wrap__`` directly on NumPy arrays or scalars now does the + right thing when ``return_scalar`` is passed (Added in NumPy 2). It is + further safe now to call the scalar ``__array_wrap__`` on a non-scalar + result. + + (`gh-27807 `__) + +Bump the musllinux CI image and wheels to 1_2 from 1_1. This is because 1_1 is +`end of life `_. + +(`gh-27088 `__) + +NEP 50 promotion state option removed +------------------------------------- +The NEP 50 promotion state settings are now removed. They were always meant as +temporary means for testing. A warning will be given if the environment +variable is set to anything but ``NPY_PROMOTION_STATE=weak`` while +``_set_promotion_state`` and ``_get_promotion_state`` are removed. In case +code used ``_no_nep50_warning``, a ``contextlib.nullcontext`` could be used to +replace it when not available. + +(`gh-27156 `__) + diff --git a/doc/source/release/2.2.1-notes.rst b/doc/source/release/2.2.1-notes.rst new file mode 100644 index 000000000000..fe60fa0268f3 --- /dev/null +++ b/doc/source/release/2.2.1-notes.rst @@ -0,0 +1,54 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.1 Release Notes +========================== + +NumPy 2.2.1 is a patch release following 2.2.0. It fixes bugs found after the +2.2.0 release and has several maintenance pins to work around upstream changes. + +There was some breakage in downstream projects following the 2.2.0 release due +to updates to NumPy typing. Because of problems due to MyPy defects, we +recommend using basedpyright for type checking, it can be installed from +PyPI. The Pylance extension for Visual Studio Code is also based on Pyright. +Problems that persist when using basedpyright should be reported as issues +on the NumPy github site. + +This release supports Python 3.10-3.13. + + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Joren Hammudoglu +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Simon Altrogge +* Thomas A Caswell +* Warren Weckesser +* Yang Wang + + + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#27935 `__: MAINT: Prepare 2.2.x for further development +* `#27950 `__: TEST: cleanups [skip cirrus][skip azp] +* `#27958 `__: BUG: fix use-after-free error in npy_hashtable.cpp (#27955) +* `#27959 `__: BLD: add missing include +* `#27982 `__: BUG:fix compile error libatomic link test to meson.build +* `#27990 `__: TYP: Fix falsely rejected value types in ``ndarray.__setitem__`` +* `#27991 `__: MAINT: Don't wrap ``#include `` with ``extern "C"`` +* `#27993 `__: BUG: Fix segfault in stringdtype lexsort +* `#28006 `__: MAINT: random: Tweak module code in mtrand.pyx to fix a Cython... +* `#28007 `__: BUG: Cython API was missing NPY_UINTP. +* `#28021 `__: CI: pin scipy-doctest to 1.5.1 +* `#28044 `__: TYP: allow ``None`` in operand sequence of nditer + diff --git a/doc/source/release/2.2.2-notes.rst b/doc/source/release/2.2.2-notes.rst new file mode 100644 index 000000000000..8a3de547ec81 --- /dev/null +++ b/doc/source/release/2.2.2-notes.rst @@ -0,0 +1,49 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.2 Release Notes +========================== + +NumPy 2.2.2 is a patch release that fixes bugs found after the 2.2.1 release. +The number of typing fixes/updates is notable. This release supports Python +versions 3.10-3.13. + + +Contributors +============ + +A total of 8 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Alicia Boya García + +* Charles Harris +* Joren Hammudoglu +* Kai Germaschewski + +* Nathan Goldbaum +* PTUsumit + +* Rohit Goswami +* Sebastian Berg + + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#28050 `__: MAINT: Prepare 2.2.x for further development +* `#28055 `__: TYP: fix ``void`` arrays not accepting ``str`` keys in ``__setitem__`` +* `#28066 `__: TYP: fix unnecessarily broad ``integer`` binop return types (#28065) +* `#28112 `__: TYP: Better ``ndarray`` binop return types for ``float64`` &... +* `#28113 `__: TYP: Return the correct ``bool`` from ``issubdtype`` +* `#28114 `__: TYP: Always accept ``date[time]`` in the ``datetime64`` constructor +* `#28120 `__: BUG: Fix auxdata initialization in ufunc slow path +* `#28131 `__: BUG: move reduction initialization to ufunc initialization +* `#28132 `__: TYP: Fix ``interp`` to accept and return scalars +* `#28137 `__: BUG: call PyType_Ready in f2py to avoid data races +* `#28145 `__: BUG: remove unnecessary call to PyArray_UpdateFlags +* `#28160 `__: BUG: Avoid data race in PyArray_CheckFromAny_int +* `#28175 `__: BUG: Fix f2py directives and --lower casing +* `#28176 `__: TYP: Fix overlapping overloads issue in 2->1 ufuncs +* `#28177 `__: TYP: preserve shape-type in ndarray.astype() +* `#28178 `__: TYP: Fix missing and spurious top-level exports + diff --git a/doc/source/release/2.2.3-notes.rst b/doc/source/release/2.2.3-notes.rst new file mode 100644 index 000000000000..cf21d751ec00 --- /dev/null +++ b/doc/source/release/2.2.3-notes.rst @@ -0,0 +1,56 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.3 Release Notes +========================== + +NumPy 2.2.3 is a patch release that fixes bugs found after the 2.2.2 release. +The majority of the changes are typing improvements and fixes for free +threaded Python. Both of those areas are still under development, so if you +discover new problems, please report them. + +This release supports Python versions 3.10-3.13. + + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !amotzop +* Charles Harris +* Chris Sidebottom +* Joren Hammudoglu +* Matthew Brett +* Nathan Goldbaum +* Raghuveer Devulapalli +* Sebastian Berg +* Yakov Danishevsky + + +Pull requests merged +==================== + +A total of 21 pull requests were merged for this release. + +* `#28185 `__: MAINT: Prepare 2.2.x for further development +* `#28201 `__: BUG: fix data race in a more minimal way on stable branch +* `#28208 `__: BUG: Fix ``from_float_positional`` errors for huge pads +* `#28209 `__: BUG: fix data race in np.repeat +* `#28212 `__: MAINT: Use VQSORT_COMPILER_COMPATIBLE to determine if we should... +* `#28224 `__: MAINT: update highway to latest +* `#28236 `__: BUG: Add cpp atomic support (#28234) +* `#28237 `__: BLD: Compile fix for clang-cl on WoA +* `#28243 `__: TYP: Avoid upcasting ``float64`` in the set-ops +* `#28249 `__: BLD: better fix for clang / ARM compiles +* `#28266 `__: TYP: Fix ``timedelta64.__divmod__`` and ``timedelta64.__mod__``... +* `#28274 `__: TYP: Fixed missing typing information of set_printoptions +* `#28278 `__: BUG: backport resource cleanup bugfix from gh-28273 +* `#28282 `__: BUG: fix incorrect bytes to stringdtype coercion +* `#28283 `__: TYP: Fix scalar constructors +* `#28284 `__: TYP: stub ``numpy.matlib`` +* `#28285 `__: TYP: stub the missing ``numpy.testing`` modules +* `#28286 `__: CI: Fix the github label for ``TYP:`` PR's and issues +* `#28305 `__: TYP: Backport typing updates from main +* `#28321 `__: BUG: fix race initializing legacy dtype casts +* `#28324 `__: CI: update test_moderately_small_alpha diff --git a/doc/source/release/2.2.4-notes.rst b/doc/source/release/2.2.4-notes.rst new file mode 100644 index 000000000000..82f7a3916167 --- /dev/null +++ b/doc/source/release/2.2.4-notes.rst @@ -0,0 +1,58 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.4 Release Notes +========================== + +NumPy 2.2.4 is a patch release that fixes bugs found after the 2.2.3 release. +There are a large number of typing improvements, the rest of the changes are +the usual mix of bug fixes and platform maintenance. + +This release supports Python versions 3.10-3.13. + + +Contributors +============ + +A total of 15 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Abhishek Kumar +* Andrej Zhilenkov +* Andrew Nelson +* Charles Harris +* Giovanni Del Monte +* Guan Ming(Wesley) Chiu + +* Jonathan Albrecht + +* Joren Hammudoglu +* Mark Harfouche +* Matthieu Darbois +* Nathan Goldbaum +* Pieter Eendebak +* Sebastian Berg +* Tyler Reddy +* lvllvl + + + +Pull requests merged +==================== + +A total of 17 pull requests were merged for this release. + +* `#28333 `__: MAINT: Prepare 2.2.x for further development. +* `#28348 `__: TYP: fix positional- and keyword-only params in astype, cross... +* `#28377 `__: MAINT: Update FreeBSD version and fix test failure +* `#28379 `__: BUG: numpy.loadtxt reads only 50000 lines when skip_rows >= max_rows +* `#28385 `__: BUG: Make np.nonzero threading safe +* `#28420 `__: BUG: safer bincount casting (backport to 2.2.x) +* `#28422 `__: BUG: Fix building on s390x with clang +* `#28423 `__: CI: use QEMU 9.2.2 for Linux Qemu tests +* `#28424 `__: BUG: skip legacy dtype multithreaded test on 32 bit runners +* `#28435 `__: BUG: Fix searchsorted and CheckFromAny byte-swapping logic +* `#28449 `__: BUG: sanity check ``__array_interface__`` number of dimensions +* `#28510 `__: MAINT: Hide decorator from pytest traceback +* `#28512 `__: TYP: Typing fixes backported from #28452, #28491, #28494 +* `#28521 `__: TYP: Backport fixes from #28505, #28506, #28508, and #28511 +* `#28533 `__: TYP: Backport typing fixes from main (2) +* `#28534 `__: TYP: Backport typing fixes from main (3) +* `#28542 `__: TYP: Backport typing fixes from main (4) diff --git a/doc/source/release/2.2.5-notes.rst b/doc/source/release/2.2.5-notes.rst new file mode 100644 index 000000000000..e1c3205b006d --- /dev/null +++ b/doc/source/release/2.2.5-notes.rst @@ -0,0 +1,53 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.5 Release Notes +========================== + +NumPy 2.2.5 is a patch release that fixes bugs found after the 2.2.4 release. +It has a large number of typing fixes/improvements as well as the normal bug +fixes and some CI maintenance. + +This release supports Python versions 3.10-3.13. + + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Joren Hammudoglu +* Baskar Gopinath + +* Nathan Goldbaum +* Nicholas Christensen + +* Sayed Adel +* karl + + + +Pull requests merged +==================== + +A total of 19 pull requests were merged for this release. + +* `#28545 `__: MAINT: Prepare 2.2.x for further development +* `#28582 `__: BUG: Fix return type of NpyIter_GetIterNext in Cython declarations +* `#28583 `__: BUG: avoid deadlocks with C++ shared mutex in dispatch cache +* `#28585 `__: TYP: fix typing errors in ``_core.strings`` +* `#28631 `__: MAINT, CI: Update Ubuntu to 22.04 in azure-pipelines +* `#28632 `__: BUG: Set writeable flag for writeable dlpacks. +* `#28633 `__: BUG: Fix crackfortran parsing error when a division occurs within... +* `#28650 `__: TYP: fix ``ndarray.tolist()`` and ``.item()`` for unknown dtype +* `#28654 `__: BUG: fix deepcopying StringDType arrays (#28643) +* `#28661 `__: TYP: Accept objects that ``write()`` to ``str`` in ``savetxt`` +* `#28663 `__: CI: Replace QEMU armhf with native (32-bit compatibility mode) +* `#28682 `__: SIMD: Resolve Highway QSort symbol linking error on aarch32/ASIMD +* `#28683 `__: TYP: add missing ``"b1"`` literals for ``dtype[bool]`` +* `#28705 `__: TYP: Fix false rejection of ``NDArray[object_].__abs__()`` +* `#28706 `__: TYP: Fix inconsistent ``NDArray[float64].__[r]truediv__`` return... +* `#28723 `__: TYP: fix string-like ``ndarray`` rich comparison operators +* `#28758 `__: TYP: some ``[arg]partition`` fixes +* `#28772 `__: TYP: fix incorrect ``random.Generator.integers`` return type +* `#28774 `__: TYP: fix ``count_nonzero`` signature + diff --git a/doc/source/release/2.2.6-notes.rst b/doc/source/release/2.2.6-notes.rst new file mode 100644 index 000000000000..974f59d640db --- /dev/null +++ b/doc/source/release/2.2.6-notes.rst @@ -0,0 +1,43 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.6 Release Notes +========================== + +NumPy 2.2.6 is a patch release that fixes bugs found after the 2.2.5 release. +It is a mix of typing fixes/improvements as well as the normal bug +fixes and some CI maintenance. + +This release supports Python versions 3.10-3.13. + +Contributors +============ + +A total of 8 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Ilhan Polat +* Joren Hammudoglu +* Marco Gorelli + +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Sayed Adel + +Pull requests merged +==================== + +A total of 11 pull requests were merged for this release. + +* `#28778 `__: MAINT: Prepare 2.2.x for further development +* `#28851 `__: BLD: Update vendor-meson to fix module_feature conflicts arguments... +* `#28852 `__: BUG: fix heap buffer overflow in np.strings.find +* `#28853 `__: TYP: fix ``NDArray[floating] + float`` return type +* `#28864 `__: BUG: fix stringdtype singleton thread safety +* `#28865 `__: MAINT: use OpenBLAS 0.3.29 +* `#28889 `__: MAINT: from_dlpack thread safety fixes +* `#28913 `__: TYP: Fix non-existent ``CanIndex`` annotation in ``ndarray.setfield`` +* `#28915 `__: MAINT: Avoid dereferencing/strict aliasing warnings +* `#28916 `__: BUG: Fix missing check for PyErr_Occurred() in _pyarray_correlate. +* `#28966 `__: TYP: reject complex scalar types in ndarray.__ifloordiv__ diff --git a/doc/source/release/2.3.0-notes.rst b/doc/source/release/2.3.0-notes.rst new file mode 100644 index 000000000000..4c3c923b3b5e --- /dev/null +++ b/doc/source/release/2.3.0-notes.rst @@ -0,0 +1,532 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.3.0 Release Notes +========================== + +The NumPy 2.3.0 release continues the work to improve free threaded Python +support and annotations together with the usual set of bug fixes. It is unusual +in the number of expired deprecations, code modernizations, and style cleanups. +The latter may not be visible to users, but is important for code maintenance +over the long term. Note that we have also upgraded from manylinux2014 to +manylinux_2_28. + +Users running on a Mac having an M4 cpu might see various warnings about +invalid values and such. The warnings are a known problem with Accelerate. +They are annoying, but otherwise harmless. Apple promises to fix them. + +This release supports Python versions 3.11-3.13, Python 3.14 will be supported +when it is released. + + +Highlights +========== + +* Interactive examples in the NumPy documentation. +* Building NumPy with OpenMP Parallelization. +* Preliminary support for Windows on ARM. +* Improved support for free threaded Python. +* Improved annotations. + + +New functions +============= + +New function ``numpy.strings.slice`` +------------------------------------ +The new function ``numpy.strings.slice`` was added, which implements fast +native slicing of string arrays. It supports the full slicing API including +negative slice offsets and steps. + +(`gh-27789 `__) + + +Deprecations +============ + +* The ``numpy.typing.mypy_plugin`` has been deprecated in favor of platform-agnostic + static type inference. Please remove ``numpy.typing.mypy_plugin`` from the ``plugins`` + section of your mypy configuration. If this change results in new errors being + reported, kindly open an issue. + + (`gh-28129 `__) + +* The ``numpy.typing.NBitBase`` type has been deprecated and will be removed in + a future version. + + This type was previously intended to be used as a generic upper bound for + type-parameters, for example: + + .. code-block:: python + + import numpy as np + import numpy.typing as npt + + def f[NT: npt.NBitBase](x: np.complexfloating[NT]) -> np.floating[NT]: ... + + But in NumPy 2.2.0, ``float64`` and ``complex128`` were changed to concrete + subtypes, causing static type-checkers to reject ``x: np.float64 = + f(np.complex128(42j))``. + + So instead, the better approach is to use ``typing.overload``: + + .. code-block:: python + + import numpy as np + from typing import overload + + @overload + def f(x: np.complex64) -> np.float32: ... + @overload + def f(x: np.complex128) -> np.float64: ... + @overload + def f(x: np.clongdouble) -> np.longdouble: ... + + (`gh-28884 `__) + + +Expired deprecations +==================== + +* Remove deprecated macros like ``NPY_OWNDATA`` from Cython interfaces in favor + of ``NPY_ARRAY_OWNDATA`` (deprecated since 1.7) + + (`gh-28254 `__) + +* Remove ``numpy/npy_1_7_deprecated_api.h`` and C macros like ``NPY_OWNDATA`` + in favor of ``NPY_ARRAY_OWNDATA`` (deprecated since 1.7) + + (`gh-28254 `__) + +* Remove alias ``generate_divbyzero_error`` to + ``npy_set_floatstatus_divbyzero`` and ``generate_overflow_error`` to + ``npy_set_floatstatus_overflow`` (deprecated since 1.10) + + (`gh-28254 `__) + +* Remove ``np.tostring`` (deprecated since 1.19) + + (`gh-28254 `__) + +* Raise on ``np.conjugate`` of non-numeric types (deprecated since 1.13) + + (`gh-28254 `__) + +* Raise when using ``np.bincount(...minlength=None)``, use 0 instead + (deprecated since 1.14) + + (`gh-28254 `__) + +* Passing ``shape=None`` to functions with a non-optional shape argument + errors, use ``()`` instead (deprecated since 1.20) + + (`gh-28254 `__) + +* Inexact matches for ``mode`` and ``searchside`` raise (deprecated since 1.20) + + (`gh-28254 `__) + +* Setting ``__array_finalize__ = None`` errors (deprecated since 1.23) + + (`gh-28254 `__) + +* ``np.fromfile`` and ``np.fromstring`` error on bad data, previously they + would guess (deprecated since 1.18) + + (`gh-28254 `__) + +* ``datetime64`` and ``timedelta64`` construction with a tuple no longer + accepts an ``event`` value, either use a two-tuple of (unit, num) or a + 4-tuple of (unit, num, den, 1) (deprecated since 1.14) + + (`gh-28254 `__) + +* When constructing a ``dtype`` from a class with a ``dtype`` attribute, that + attribute must be a dtype-instance rather than a thing that can be parsed as + a dtype instance (deprecated in 1.19). At some point the whole construct of + using a dtype attribute will be deprecated (see #25306) + + (`gh-28254 `__) + +* Passing booleans as partition index errors (deprecated since 1.23) + + (`gh-28254 `__) + +* Out-of-bounds indexes error even on empty arrays (deprecated since 1.20) + + (`gh-28254 `__) + +* ``np.tostring`` has been removed, use ``tobytes`` instead (deprecated since 1.19) + + (`gh-28254 `__) + +* Disallow make a non-writeable array writeable for arrays with a base that do + not own their data (deprecated since 1.17) + + (`gh-28254 `__) + +* ``concatenate()`` with ``axis=None`` uses ``same-kind`` casting by default, + not ``unsafe`` (deprecated since 1.20) + + (`gh-28254 `__) + +* Unpickling a scalar with object dtype errors (deprecated since 1.20) + + (`gh-28254 `__) + +* The binary mode of ``fromstring`` now errors, use ``frombuffer`` instead + (deprecated since 1.14) + + (`gh-28254 `__) + +* Converting ``np.inexact`` or ``np.floating`` to a dtype errors (deprecated + since 1.19) + + (`gh-28254 `__) + +* Converting ``np.complex``, ``np.integer``, ``np.signedinteger``, + ``np.unsignedinteger``, ``np.generic`` to a dtype errors (deprecated since + 1.19) + + (`gh-28254 `__) + +* The Python built-in ``round`` errors for complex scalars. Use ``np.round`` or + ``scalar.round`` instead (deprecated since 1.19) + + (`gh-28254 `__) + +* 'np.bool' scalars can no longer be interpreted as an index (deprecated since 1.19) + + (`gh-28254 `__) + +* Parsing an integer via a float string is no longer supported. (deprecated + since 1.23) To avoid this error you can + * make sure the original data is stored as integers. + * use the ``converters=float`` keyword argument. + * Use ``np.loadtxt(...).astype(np.int64)`` + + (`gh-28254 `__) + +* The use of a length 1 tuple for the ufunc ``signature`` errors. Use ``dtype`` + or fill the tuple with ``None`` (deprecated since 1.19) + + (`gh-28254 `__) + +* Special handling of matrix is in np.outer is removed. Convert to a ndarray + via ``matrix.A`` (deprecated since 1.20) + + (`gh-28254 `__) + +* Removed the ``np.compat`` package source code (removed in 2.0) + + (`gh-28961 `__) + + +C API changes +============= + +* ``NpyIter_GetTransferFlags`` is now available to check if + the iterator needs the Python API or if casts may cause floating point + errors (FPE). FPEs can for example be set when casting ``float64(1e300)`` + to ``float32`` (overflow to infinity) or a NaN to an integer (invalid value). + + (`gh-27883 `__) + +* ``NpyIter`` now has no limit on the number of operands it supports. + + (`gh-28080 `__) + +New ``NpyIter_GetTransferFlags`` and ``NpyIter_IterationNeedsAPI`` change +------------------------------------------------------------------------- +NumPy now has the new ``NpyIter_GetTransferFlags`` function as a more precise +way checking of iterator/buffering needs. I.e. whether the Python API/GIL is +required or floating point errors may occur. +This function is also faster if you already know your needs without buffering. + +The ``NpyIter_IterationNeedsAPI`` function now performs all the checks that were +previously performed at setup time. While it was never necessary to call it +multiple times, doing so will now have a larger cost. + +(`gh-27998 `__) + + +New Features +============ + +* The type parameter of ``np.dtype`` now defaults to ``typing.Any``. + This way, static type-checkers will infer ``dtype: np.dtype`` as + ``dtype: np.dtype[Any]``, without reporting an error. + + (`gh-28669 `__) + +* Static type-checkers now interpret: + + - ``_: np.ndarray`` as ``_: npt.NDArray[typing.Any]``. + - ``_: np.flatiter`` as ``_: np.flatiter[np.ndarray]``. + + This is because their type parameters now have default values. + + (`gh-28940 `__) + +NumPy now registers its pkg-config paths with the pkgconf_ PyPI package +----------------------------------------------------------------------- +The pkgconf_ PyPI package provides an interface for projects like NumPy to +register their own paths to be added to the pkg-config search path. This means +that when using pkgconf_ from PyPI, NumPy will be discoverable without needing +for any custom environment configuration. + +.. attention:: Attention + + This only applies when using the pkgconf_ package from PyPI_, or put another + way, this only applies when installing pkgconf_ via a Python package + manager. + + If you are using ``pkg-config`` or ``pkgconf`` provided by your system, or + any other source that does not use the pkgconf-pypi_ project, the NumPy + pkg-config directory will not be automatically added to the search path. In + these situations, you might want to use ``numpy-config``. + + +.. _pkgconf: https://github.com/pypackaging-native/pkgconf-pypi +.. _PyPI: https://pypi.org/ +.. _pkgconf-pypi: https://github.com/pypackaging-native/pkgconf-pypi + +(`gh-28214 `__) + +Allow ``out=...`` in ufuncs to ensure array result +-------------------------------------------------- +NumPy has the sometimes difficult behavior that it currently usually +returns scalars rather than 0-D arrays (even if the inputs were 0-D arrays). +This is especially problematic for non-numerical dtypes (e.g. ``object``). + +For ufuncs (i.e. most simple math functions) it is now possible to use +``out=...`` (literally \`...\`, e.g. ``out=Ellipsis``) which is identical in +behavior to ``out`` not being passed, but will ensure a non-scalar return. +This spelling is borrowed from ``arr1d[0, ...]`` where the ``...`` also ensures +a non-scalar return. + +Other functions with an ``out=`` kwarg should gain support eventually. +Downstream libraries that interoperate via ``__array_ufunc__`` or +``__array_function__`` may need to adapt to support this. + +(`gh-28576 `__) + +Building NumPy with OpenMP Parallelization +------------------------------------------ +NumPy now supports OpenMP parallel processing capabilities when built with the +``-Denable_openmp=true`` Meson build flag. This feature is disabled by default. +When enabled, ``np.sort`` and ``np.argsort`` functions can utilize OpenMP for +parallel thread execution, improving performance for these operations. + +(`gh-28619 `__) + +Interactive examples in the NumPy documentation +----------------------------------------------- +The NumPy documentation includes a number of examples that +can now be run interactively in your browser using WebAssembly +and Pyodide. + +Please note that the examples are currently experimental in +nature and may not work as expected for all methods in the +public API. + +(`gh-26745 `__) + + +Improvements +============ + +* Scalar comparisons between non-comparable dtypes such as + ``np.array(1) == np.array('s')`` now return a NumPy bool instead of + a Python bool. + + (`gh-27288 `__) + +* ``np.nditer`` now has no limit on the number of supported operands + (C-integer). + + (`gh-28080 `__) + +* No-copy pickling is now supported for any + array that can be transposed to a C-contiguous array. + + (`gh-28105 `__) + +* The ``__repr__`` for user-defined dtypes now prefers the ``__name__`` of the + custom dtype over a more generic name constructed from its ``kind`` and + ``itemsize``. + + (`gh-28250 `__) + +* ``np.dot`` now reports floating point exceptions. + + (`gh-28442 `__) + +* ``np.dtypes.StringDType`` is now a + `generic type `_ which + accepts a type argument for ``na_object`` that defaults to ``typing.Never``. + For example, ``StringDType(na_object=None)`` returns a ``StringDType[None]``, + and ``StringDType()`` returns a ``StringDType[typing.Never]``. + + (`gh-28856 `__) + +Added warnings to ``np.isclose`` +-------------------------------- +Added warning messages if at least one of atol or rtol are either ``np.nan`` or +``np.inf`` within ``np.isclose``. + +* Warnings follow the user's ``np.seterr`` settings + +(`gh-28205 `__) + + +Performance improvements and changes +==================================== + +Performance improvements to ``np.unique`` +----------------------------------------- +``np.unique`` now tries to use a hash table to find unique values instead of +sorting values before finding unique values. This is limited to certain dtypes +for now, and the function is now faster for those dtypes. The function now also +exposes a ``sorted`` parameter to allow returning unique values as they were +found, instead of sorting them afterwards. + +(`gh-26018 `__) + +Performance improvements to ``np.sort`` and ``np.argsort`` +---------------------------------------------------------- +``np.sort`` and ``np.argsort`` functions now can leverage OpenMP for parallel +thread execution, resulting in up to 3.5x speedups on x86 architectures with +AVX2 or AVX-512 instructions. This opt-in feature requires NumPy to be built +with the -Denable_openmp Meson flag. Users can control the number of threads +used by setting the OMP_NUM_THREADS environment variable. + +(`gh-28619 `__) + +Performance improvements for ``np.float16`` casts +------------------------------------------------- +Earlier, floating point casts to and from ``np.float16`` types +were emulated in software on all platforms. + +Now, on ARM devices that support Neon float16 intrinsics (such as +recent Apple Silicon), the native float16 path is used to achieve +the best performance. + +(`gh-28769 `__) + +Performance improvements for ``np.matmul`` +------------------------------------------ +Enable using BLAS for ``matmul`` even when operands are non-contiguous by copying +if needed. + +(`gh-23752 `__) + +Changes +======= + +* The vector norm ``ord=inf`` and the matrix norms ``ord={1, 2, inf, 'nuc'}`` + now always returns zero for empty arrays. Empty arrays have at least one axis + of size zero. This affects ``np.linalg.norm``, ``np.linalg.vector_norm``, and + ``np.linalg.matrix_norm``. Previously, NumPy would raises errors or return + zero depending on the shape of the array. + + (`gh-28343 `__) + +* A spelling error in the error message returned when converting a string to a + float with the method ``np.format_float_positional`` has been fixed. + + (`gh-28569 `__) + +* NumPy's ``__array_api_version__`` was upgraded from ``2023.12`` to ``2024.12``. + +* ``numpy.count_nonzero`` for ``axis=None`` (default) now returns a NumPy scalar + instead of a Python integer. + +* The parameter ``axis`` in ``numpy.take_along_axis`` function has now a default + value of ``-1``. + + (`gh-28615 `__) + +* Printing of ``np.float16`` and ``np.float32`` scalars and arrays have been improved by + adjusting the transition to scientific notation based on the floating point precision. + A new legacy ``np.printoptions`` mode ``'2.2'`` has been added for backwards compatibility. + + (`gh-28703 `__) + +* Multiplication between a string and integer now raises OverflowError instead + of MemoryError if the result of the multiplication would create a string that + is too large to be represented. This follows Python's behavior. + + (`gh-29060 `__) + +``unique_values`` may return unsorted data +------------------------------------------ +The relatively new function (added in NumPy 2.0) ``unique_values`` may now +return unsorted results. Just as ``unique_counts`` and ``unique_all`` these +never guaranteed a sorted result, however, the result was sorted until now. In +cases where these do return a sorted result, this may change in future releases +to improve performance. + +(`gh-26018 `__) + +Changes to the main iterator and potential numerical changes +------------------------------------------------------------ +The main iterator, used in math functions and via ``np.nditer`` from Python and +``NpyIter`` in C, now behaves differently for some buffered iterations. This +means that: + +* The buffer size used will often be smaller than the maximum buffer sized + allowed by the ``buffersize`` parameter. + +* The "growinner" flag is now honored with buffered reductions when no operand + requires buffering. + +For ``np.sum()`` such changes in buffersize may slightly change numerical +results of floating point operations. Users who use "growinner" for custom +reductions could notice changes in precision (for example, in NumPy we removed +it from ``einsum`` to avoid most precision changes and improve precision for +some 64bit floating point inputs). + +(`gh-27883 `__) + +The minimum supported GCC version is now 9.3.0 +---------------------------------------------- +The minimum supported version was updated from 8.4.0 to 9.3.0, primarily in +order to reduce the chance of platform-specific bugs in old GCC versions from +causing issues. + +(`gh-28102 `__) + +Changes to automatic bin selection in numpy.histogram +----------------------------------------------------- +The automatic bin selection algorithm in ``numpy.histogram`` has been modified +to avoid out-of-memory errors for samples with low variation. For full control +over the selected bins the user can use set the ``bin`` or ``range`` parameters +of ``numpy.histogram``. + +(`gh-28426 `__) + +Build manylinux_2_28 wheels +--------------------------- +Wheels for linux systems will use the ``manylinux_2_28`` tag (instead of the +``manylinux2014`` tag), which means dropping support for redhat7/centos7, +amazonlinux2, debian9, ubuntu18.04, and other pre-glibc2.28 operating system +versions, as per the `PEP 600 support table`_. + +.. _`PEP 600 support table`: https://github.com/mayeut/pep600_compliance?tab=readme-ov-file#pep600-compliance-check + +(`gh-28436 `__) + +Remove use of -Wl,-ld_classic on macOS +-------------------------------------- +Remove use of -Wl,-ld_classic on macOS. This hack is no longer needed by Spack, +and results in libraries that cannot link to other libraries built with ld +(new). + +(`gh-28713 `__) + +Re-enable overriding functions in the ``numpy.strings`` +------------------------------------------------------- +Re-enable overriding functions in the ``numpy.strings`` module. + +(`gh-28741 `__) diff --git a/doc/source/release/2.3.1-notes.rst b/doc/source/release/2.3.1-notes.rst new file mode 100644 index 000000000000..d8193f07671c --- /dev/null +++ b/doc/source/release/2.3.1-notes.rst @@ -0,0 +1,53 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.3.1 Release Notes +========================= + +The NumPy 2.3.1 release is a patch release with several bug fixes, annotation +improvements, and better support for OpenBSD. Highlights are: + +- Fix bug in ``matmul`` for non-contiguous out kwarg parameter +- Fix for Accelerate runtime warnings on M4 hardware +- Fix new in NumPy 2.3.0 ``np.vectorize`` casting errors +- Improved support of cpu features for FreeBSD and OpenBSD + +This release supports Python versions 3.11-3.13, Python 3.14 will be supported +when it is released. + + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Brad Smith + +* Charles Harris +* Developer-Ecosystem-Engineering +* François Rozet +* Joren Hammudoglu +* Matti Picus +* Mugundan Selvanayagam +* Nathan Goldbaum +* Sebastian Berg + + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#29140 `__: MAINT: Prepare 2.3.x for further development +* `#29191 `__: BUG: fix matmul with transposed out arg (#29179) +* `#29192 `__: TYP: Backport typing fixes and improvements. +* `#29205 `__: BUG: Revert ``np.vectorize`` casting to legacy behavior (#29196) +* `#29222 `__: TYP: Backport typing fixes +* `#29233 `__: BUG: avoid negating unsigned integers in resize implementation... +* `#29234 `__: TST: Fix test that uses uninitialized memory (#29232) +* `#29235 `__: BUG: Address interaction between SME and FPSR (#29223) +* `#29237 `__: BUG: Enforce integer limitation in concatenate (#29231) +* `#29238 `__: CI: Add support for building NumPy with LLVM for Win-ARM64 +* `#29241 `__: ENH: Detect CPU features on OpenBSD ARM and PowerPC64 +* `#29242 `__: ENH: Detect CPU features on FreeBSD / OpenBSD RISC-V64. + diff --git a/doc/source/release/2.3.2-notes.rst b/doc/source/release/2.3.2-notes.rst new file mode 100644 index 000000000000..2acc400c89fe --- /dev/null +++ b/doc/source/release/2.3.2-notes.rst @@ -0,0 +1,56 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.3.2 Release Notes +========================= + +The NumPy 2.3.2 release is a patch release with a number of bug fixes and +maintenance updates. The highlights are: + +- Wheels for Python 3.14.0rc1 +- PyPy updated to the latest stable release +- OpenBLAS updated to 0.3.30 + +This release supports Python versions 3.11-3.14 + + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* Charles Harris +* Joren Hammudoglu +* Maanas Arora +* Marco Edward Gorelli +* Matti Picus +* Nathan Goldbaum +* Sebastian Berg +* kostayScr + + + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#29256 `__: MAINT: Prepare 2.3.x for further development +* `#29283 `__: TYP: Work around a mypy issue with bool arrays (#29248) +* `#29284 `__: BUG: fix fencepost error in StringDType internals +* `#29287 `__: BUG: handle case in mapiter where descriptors might get replaced... +* `#29350 `__: BUG: Fix shape error path in array-interface +* `#29412 `__: BUG: Allow reading non-npy files in npz and add test +* `#29413 `__: TST: Avoid uninitialized values in test (#29341) +* `#29414 `__: BUG: Fix reference leakage for output arrays in reduction functions +* `#29415 `__: BUG: fix casting issue in center, ljust, rjust, and zfill (#29369) +* `#29416 `__: TYP: Fix overloads in ``np.char.array`` and ``np.char.asarray``... +* `#29417 `__: BUG: Any dtype should call ``square`` on ``arr \*\* 2`` (#29392) +* `#29424 `__: MAINT: use a stable pypy release in CI +* `#29425 `__: MAINT: Support python 314rc1 +* `#29429 `__: MAINT: Update highway to match main. +* `#29430 `__: BLD: use github to build macos-arm64 wheels with OpenBLAS and... +* `#29437 `__: BUG: fix datetime/timedelta hash memory leak (#29411) + + diff --git a/doc/source/release/2.3.3-notes.rst b/doc/source/release/2.3.3-notes.rst new file mode 100644 index 000000000000..3c293c3db322 --- /dev/null +++ b/doc/source/release/2.3.3-notes.rst @@ -0,0 +1,59 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.3.3 Release Notes +========================= + +The NumPy 2.3.3 release is a patch release split between a number of maintenance +updates and bug fixes. This release supports Python versions 3.11-3.14. Note +that the 3.14.0 final is currently expected in Oct, 2025. This release is based +on 3.14.0rc2. + +Contributors +============ + +A total of 13 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Aleksandr A. Voyt + +* Bernard Roesler + +* Charles Harris +* Hunter Hogan + +* Joren Hammudoglu +* Maanas Arora +* Matti Picus +* Nathan Goldbaum +* Raghuveer Devulapalli +* Sanjay Kumar Sakamuri Kamalakar + +* Tobias Markus + +* Warren Weckesser +* Zebreus + + +Pull requests merged +==================== + +A total of 23 pull requests were merged for this release. + +* `#29440 `__: MAINT: Prepare 2.3.x for further development. +* `#29446 `__: BUG: Fix test_configtool_pkgconfigdir to resolve PKG_CONFIG_DIR... +* `#29447 `__: BLD: allow targeting webassembly without emscripten +* `#29460 `__: MAINT: Backport write_release.py +* `#29473 `__: MAINT: Bump pypa/cibuildwheel from 3.1.0 to 3.1.2 +* `#29500 `__: BUG: Always return a real dtype from linalg.cond (gh-18304) (#29333) +* `#29501 `__: MAINT: Add .file entry to all .s SVML files +* `#29556 `__: BUG: Casting from one timedelta64 to another didn't handle NAT. +* `#29562 `__: BLD: update vendored Meson to 1.8.3 [wheel build] +* `#29563 `__: BUG: Fix metadata not roundtripping when pickling datetime (#29555) +* `#29587 `__: TST: update link and version for Intel SDE download +* `#29593 `__: TYP: add ``sorted`` kwarg to ``unique`` +* `#29672 `__: MAINT: Update pythoncapi-compat from main. +* `#29673 `__: MAINT: Update cibuildwheel. +* `#29674 `__: MAINT: Fix typo in wheels.yml +* `#29683 `__: BUG, BLD: Correct regex for ppc64 VSX3/VSX4 feature detection +* `#29684 `__: TYP: ndarray.fill() takes no keyword arguments +* `#29685 `__: BUG: avoid thread-unsafe refcount check in temp elision +* `#29687 `__: CI: replace comment-hider action in mypy_primer workflow +* `#29689 `__: BLD: Add missing include +* `#29691 `__: BUG: use correct input dtype in flatiter assignment +* `#29700 `__: TYP: fix np.bool method declarations +* `#29701 `__: BUG: Correct ambiguous logic for s390x CPU feature detection diff --git a/doc/source/release/2.3.4-notes.rst b/doc/source/release/2.3.4-notes.rst new file mode 100644 index 000000000000..6ba7c06b7514 --- /dev/null +++ b/doc/source/release/2.3.4-notes.rst @@ -0,0 +1,83 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.3.4 Release Notes +========================== + +The NumPy 2.3.4 release is a patch release split between a number of maintenance +updates and bug fixes. This release supports Python versions 3.11-3.14. This +release is based on the Python 3.14.0 final. + + +Changes +======= + +The ``npymath`` and ``npyrandom`` libraries now have a ``.lib`` rather than a +``.a`` file extension on win-arm64, for compatibility for building with MSVC and +``setuptools``. Please note that using these static libraries is discouraged +and for existing projects using it, it's best to use it with a matching +compiler toolchain, which is ``clang-cl`` on Windows on Arm. + +(`gh-29750 `__) + + +Contributors +============ + +A total of 17 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* Charles Harris +* Christian Barbia + +* Evgeni Burovski +* Joren Hammudoglu +* Maaz + +* Mateusz SokÃŗÅ‚ +* Matti Picus +* Nathan Goldbaum +* Ralf Gommers +* Riku Sakamoto + +* Sandeep Gupta + +* Sayed Awad +* Sebastian Berg +* Sergey Fedorov + +* Warren Weckesser +* dependabot[bot] + +Pull requests merged +==================== + +A total of 30 pull requests were merged for this release. + +* `#29725 `__: MAINT: Prepare 2.3.x for further development +* `#29781 `__: MAINT: Pin some upstream dependences +* `#29782 `__: BLD: enable x86-simd-sort to build on KNL with -mavx512f +* `#29783 `__: BUG: Include python-including headers first (#29281) +* `#29784 `__: TYP: fix np.number and np.\*integer method declaration +* `#29785 `__: TYP: mypy 1.18.1 +* `#29788 `__: TYP: replace scalar type __init__ with __new__ +* `#29790 `__: BUG: Fix ``dtype`` refcount in ``__array__`` (#29715) +* `#29791 `__: TYP: fix method declarations in floating, timedelta64, and datetime64Backport +* `#29792 `__: MAINT: delete unused variables in unary logical dispatch +* `#29797 `__: BUG: Fix pocketfft umath strides for AIX compatibility (#29768) +* `#29798 `__: BUG: np.setbufsize should raise ValueError for negative input +* `#29799 `__: BUG: Fix assert in nditer buffer setup +* `#29800 `__: BUG: Stable ScalarType ordering +* `#29838 `__: TST: Pin pyparsing to avoid matplotlib errors. +* `#29839 `__: BUG: linalg: emit a MemoryError on a malloc failure (#29811) +* `#29840 `__: BLD: change file extension for libnpymath on win-arm64 from .a... +* `#29864 `__: CI: Fix loongarch64 CI (#29856) +* `#29865 `__: TYP: Various typing fixes +* `#29910 `__: BUG: Fix float16-sort failures on 32-bit x86 MSVC (#29908) +* `#29911 `__: TYP: add missing ``__slots__`` (#29901) +* `#29913 `__: TYP: wrong argument defaults in ``testing._private`` (#29902) +* `#29920 `__: BUG: avoid segmentation fault in string_expandtabs_length_promoter +* `#29921 `__: BUG: Fix INT_MIN % -1 to return 0 for all signed integer types... +* `#29922 `__: TYP: minor fixes related to ``errstate`` (#29914) +* `#29923 `__: TST: use requirements/test_requirements across CI (#29919) +* `#29926 `__: BUG: fix negative samples generated by Wald distribution (#29609) +* `#29940 `__: MAINT: Bump pypa/cibuildwheel from 3.1.4 to 3.2.1 +* `#29949 `__: STY: rename @classmethod arg to cls +* `#29950 `__: MAINT: Simplify string arena growth strategy (#29885) + diff --git a/doc/source/release/2.3.5-notes.rst b/doc/source/release/2.3.5-notes.rst new file mode 100644 index 000000000000..8013ef468055 --- /dev/null +++ b/doc/source/release/2.3.5-notes.rst @@ -0,0 +1,50 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.3.5 Release Notes +========================= + +The NumPy 2.3.5 release is a patch release split between a number of maintenance +updates and bug fixes. This release supports Python versions 3.11-3.14. + + +Contributors +============ + +A total of 10 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Aaron Kollasch + +* Charles Harris +* Joren Hammudoglu +* Matti Picus +* Nathan Goldbaum +* Rafael Laboissière + +* Sayed Awad +* Sebastian Berg +* Warren Weckesser +* Yasir Ashfaq + + + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#29979 `__: MAINT: Prepare 2.3.x for further development +* `#30026 `__: SIMD, BLD: Backport FPMATH mode on x86-32 and filter successor... +* `#30029 `__: MAINT: Backport write_release.py +* `#30041 `__: TYP: Various typing updates +* `#30059 `__: BUG: Fix np.strings.slice if stop=None or start and stop >= len... +* `#30063 `__: BUG: Fix np.strings.slice if start > stop +* `#30076 `__: BUG: avoid negating INT_MIN in PyArray_Round implementation (#30071) +* `#30090 `__: BUG: Fix resize when it contains references (#29970) +* `#30129 `__: BLD: update scipy-openblas, use -Dpkg_config_path (#30049) +* `#30130 `__: BUG: Avoid compilation error of wrapper file generated with SWIG... +* `#30157 `__: BLD: use scipy-openblas 0.3.30.7 (#30132) +* `#30158 `__: DOC: Remove nonexistent ``order`` parameter docs of ``ma.asanyarray``... +* `#30185 `__: BUG: Fix check of PyMem_Calloc return value. (#30176) +* `#30217 `__: DOC: fix links for newly rebuilt numpy-tutorials site +* `#30218 `__: BUG: Fix build on s390x with clang (#30214) +* `#30237 `__: ENH: Make FPE blas check a runtime check for all apple arm systems + diff --git a/doc/source/release/2.4.0-notes.rst b/doc/source/release/2.4.0-notes.rst new file mode 100644 index 000000000000..b6afff63f5f1 --- /dev/null +++ b/doc/source/release/2.4.0-notes.rst @@ -0,0 +1,728 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.4.0 Release Notes +========================== + +The NumPy 2.4.0 release continues the work to improve free threaded Python +support, user dtypes implementation, and annotations. There are many expired +deprecations and bug fixes as well. + +This release supports Python versions 3.11-3.14 + + +Highlights +========== + +Apart from annotations and ``same_value`` kwarg, the 2.4 highlights are mostly +of interest to downstream developers. They should help in implementing new user +dtypes. + +* Many annotation improvements. In particular, runtime signature introspection. + +* New ``casting`` kwarg ``'same_value'`` for casting by value. + +* New ``PyUFunc_AddLoopsFromSpec`` function that can be used to add user sort + loops using the ``ArrayMethod`` API. + +* New ``__numpy_dtype__`` protocol. + +Deprecations +============ + +Setting the ``strides`` attribute is deprecated +----------------------------------------------- +Setting the strides attribute is now deprecated since mutating +an array is unsafe if an array is shared, especially by multiple +threads. As an alternative, you can create a new view (no copy) via: + +* ``np.lib.stride_tricks.strided_window_view`` if applicable, +* ``np.lib.stride_tricks.as_strided`` for the general case, +* or the ``np.ndarray`` constructor (``buffer`` is the original array) for a + light-weight version. + +(`gh-28925 `__) + +Positional ``out`` argument to ``np.maximum``, ``np.minimum`` is deprecated +--------------------------------------------------------------------------- +Passing the output array ``out`` positionally to ``numpy.maximum`` and +``numpy.minimum`` is deprecated. For example, ``np.maximum(a, b, c)`` will emit +a deprecation warning, since ``c`` is treated as the output buffer rather than +a third input. + +Always pass the output with the keyword form, e.g. ``np.maximum(a, b, +out=c)``. This makes intent clear and simplifies type annotations. + +(`gh-29052 `__) + +``align=`` must be passed as boolean to ``np.dtype()`` +------------------------------------------------------ +When creating a new ``dtype`` a ``VisibleDeprecationWarning`` will be given if +``align=`` is not a boolean. This is mainly to prevent accidentally passing a +subarray align flag where it has no effect, such as ``np.dtype("f8", 3)`` +instead of ``np.dtype(("f8", 3))``. We strongly suggest to always pass +``align=`` as a keyword argument. + +(`gh-29301 `__) + +Assertion and warning control utilities are deprecated +------------------------------------------------------ +``np.testing.assert_warns`` and ``np.testing.suppress_warnings`` are +deprecated. Use ``warnings.catch_warnings``, ``warnings.filterwarnings``, +``pytest.warns``, or ``pytest.filterwarnings`` instead. + +(`gh-29550 `__) + +``np.fix`` is pending deprecation +--------------------------------- +The ``numpy.fix`` function will be deprecated in a future release. It is +recommended to use ``numpy.trunc`` instead, as it provides the same +functionality of truncating decimal values to their integer parts. Static type +checkers might already report a warning for the use of ``numpy.fix``. + +(`gh-30168 `__) + +in-place modification of ``ndarray.shape`` is pending deprecation +----------------------------------------------------------------- +Setting the ``ndarray.shape`` attribute directly will be deprecated in a future +release. Instead of modifying the shape in place, it is recommended to use the +``numpy.reshape`` function. Static type checkers might already report a +warning for assignments to ``ndarray.shape``. + +(`gh-30282 `__) + +Deprecation of ``numpy.lib.user_array.container`` +------------------------------------------------- +The ``numpy.lib.user_array.container`` class is deprecated and will be removed +in a future version. + +(`gh-30284 `__) + + +Expired deprecations +==================== + +Removed deprecated ``MachAr`` runtime discovery mechanism. +---------------------------------------------------------- + +(`gh-29836 `__) + +Raise ``TypeError`` on attempt to convert array with ``ndim > 0`` to scalar +--------------------------------------------------------------------------- +Conversion of an array with ``ndim > 0`` to a scalar was deprecated in NumPy +1.25. Now, attempting to do so raises ``TypeError``. Ensure you extract a +single element from your array before performing this operation. + +(`gh-29841 `__) + +Removed numpy.linalg.linalg and numpy.fft.helper +------------------------------------------------ +The following were deprecated in NumPy 2.0 and have been moved to private +modules: + +* ``numpy.linalg.linalg`` + Use ``numpy.linalg`` instead. + +* ``numpy.fft.helper`` + Use ``numpy.fft`` instead. + +(`gh-29909 `__) + +Removed ``interpolation`` parameter from quantile and percentile functions +-------------------------------------------------------------------------- +The ``interpolation`` parameter was deprecated in NumPy 1.22.0 and has been +removed from the following functions: + +* ``numpy.percentile`` +* ``numpy.nanpercentile`` +* ``numpy.quantile`` +* ``numpy.nanquantile`` + +Use the ``method`` parameter instead. + +(`gh-29973 `__) + +Removed ``numpy.in1d`` +---------------------- +``numpy.in1d`` has been deprecated since NumPy 2.0 and is now removed in favor of ``numpy.isin``. + +(`gh-29978 `__) + +Removed ``numpy.ndindex.ndincr()`` +---------------------------------- +The ``ndindex.ndincr()`` method has been deprecated since NumPy 1.20 and is now +removed; use ``next(ndindex)`` instead. + +(`gh-29980 `__) + +Removed ``fix_imports`` parameter from ``numpy.save`` +----------------------------------------------------- +The ``fix_imports`` parameter was deprecated in NumPy 2.1.0 and is now removed. +This flag has been ignored since NumPy 1.17 and was only needed to support +loading files in Python 2 that were written in Python 3. + +(`gh-29984 `__) + +Removal of four undocumented ``ndarray.ctypes`` methods +------------------------------------------------------- +Four undocumented methods of the ``ndarray.ctypes`` object have been removed: + +* ``_ctypes.get_data()`` (use ``_ctypes.data`` instead) +* ``_ctypes.get_shape()`` (use ``_ctypes.shape`` instead) +* ``_ctypes.get_strides()`` (use ``_ctypes.strides`` instead) +* ``_ctypes.get_as_parameter()`` (use ``_ctypes._as_parameter_`` instead) + +These methods have been deprecated since NumPy 1.21. + +(`gh-29986 `__) + +Removed ``newshape`` parameter from ``numpy.reshape`` +----------------------------------------------------- +The ``newshape`` parameter was deprecated in NumPy 2.1.0 and has been +removed from ``numpy.reshape``. Pass it positionally or use ``shape=`` +on newer NumPy versions. + +(`gh-29994 `__) + +Removal of deprecated functions and arguments +--------------------------------------------- +The following long-deprecated APIs have been removed: + +* ``numpy.trapz`` — deprecated since NumPy 2.0 (2023-08-18). Use ``numpy.trapezoid`` or + ``scipy.integrate`` functions instead. + +* ``disp`` function — deprecated from 2.0 release and no longer functional. Use + your own printing function instead. + +* ``bias`` and ``ddof`` arguments in ``numpy.corrcoef`` — these had no effect + since NumPy 1.10. + +(`gh-29997 `__) + +Removed ``delimitor`` parameter from ``numpy.ma.mrecords.fromtextfile()`` +------------------------------------------------------------------------- +The ``delimitor`` parameter was deprecated in NumPy 1.22.0 and has been +removed from ``numpy.ma.mrecords.fromtextfile()``. Use ``delimiter`` instead. + +(`gh-30021 `__) + +``numpy.array2string`` and ``numpy.sum`` deprecations finalized +--------------------------------------------------------------- +The following long-deprecated APIs have been removed or converted to errors: + +* The ``style`` parameter has been removed from ``numpy.array2string``. + This argument had no effect since Numpy 1.14.0. Any arguments following + it, such as ``formatter`` have now been made keyword-only. + +* Calling ``np.sum(generator)`` directly on a generator object now raises a + ``TypeError``. This behavior was deprecated in NumPy 1.15.0. Use + ``np.sum(np.fromiter(generator))`` or the python ``sum`` builtin instead. + +(`gh-30068 `__) + + +Compatibility notes +=================== + +* NumPy's C extension modules have begun to use multi-phase initialisation, as + defined by PEP 489. As part of this, a new explicit check has been added that + each such module is only imported once per Python process. This comes with + the side-effect that deleting ``numpy`` from ``sys.modules`` and re-importing + it will now fail with an ``ImportError``. This has always been unsafe, with + unexpected side-effects, though did not previously raise an error. + + (`gh-29030 `__) + +* ``numpy.round`` now always returns a copy. Previously, it returned a view + for integer inputs for ``decimals >= 0`` and a copy in all other cases. + This change brings ``round`` in line with ``ceil``, ``floor`` and ``trunc``. + + (`gh-29137 `__) + +* Type-checkers will no longer accept calls to ``numpy.arange`` with + ``start`` as a keyword argument. This was done for compatibility with + the Array API standard. At runtime it is still possible to use + ``numpy.arange`` with ``start`` as a keyword argument. + + (`gh-30147 `__) + +* The Macro NPY_ALIGNMENT_REQUIRED has been removed The macro was defined in + the ``npy_cpu.h`` file, so might be regarded as semi public. As it turns out, + with modern compilers and hardware it is almost always the case that + alignment is required, so numpy no longer uses the macro. It is unlikely + anyone uses it, but you might want to compile with the ``-Wundef`` flag or + equivalent to be sure. + + (`gh-29094 `__) + + +C API changes +============= + +The NPY_SORTKIND enum has been enhanced with new variables +---------------------------------------------------------- +This is of interest if you are using ``PyArray_Sort`` or ``PyArray_ArgSort``. +We have changed the semantics of the old names in the ``NPY_SORTKIND`` enum and +added new ones. The changes are backward compatible, and no recompilation is +needed. The new names of interest are: + +* ``NPY_SORT_DEFAULT`` -- default sort (same value as ``NPY_QUICKSORT``) +* ``NPY_SORT_STABLE`` -- the sort must be stable (same value as ``NPY_MERGESORT``) +* ``NPY_SORT_DESCENDING`` -- the sort must be descending + +The semantic change is that ``NPY_HEAPSORT`` is mapped to ``NPY_QUICKSORT`` when used. +Note that ``NPY_SORT_DESCENDING`` is not yet implemented. + +(`gh-29642 `__) + +New ``NPY_DT_get_constant`` slot for DType constant retrieval +------------------------------------------------------------- +A new slot ``NPY_DT_get_constant`` has been added to the DType API, allowing +dtype implementations to provide constant values such as machine limits and +special values. The slot function has the signature:: + + int get_constant(PyArray_Descr *descr, int constant_id, void *ptr) + +It returns 1 on success, 0 if the constant is not available, or -1 on error. +The function is always called with the GIL held and may write to unaligned memory. + +Integer constants (marked with the ``1 << 16`` bit) return ``npy_intp`` values, +while floating-point constants return values of the dtype's native type. + +Implementing this can be used by user DTypes to provide ``numpy.finfo`` values. + +(`gh-29836 `__) + +A new ``PyUFunc_AddLoopsFromSpecs`` convenience function has been added to the C API. +------------------------------------------------------------------------------------- +This function allows adding multiple ufunc loops from their specs in one call +using a NULL-terminated array of ``PyUFunc_LoopSlot`` structs. It allows +registering sorting and argsorting loops using the new ArrayMethod API. + +(`gh-29900 `__) + + +New Features +============ + +* Let ``np.size`` accept multiple axes. + + (`gh-29240 `__) + +* Extend ``numpy.pad`` to accept a dictionary for the ``pad_width`` argument. + + (`gh-29273 `__) + +``'same_value'`` for casting by value +------------------------------------- +The ``casting`` kwarg now has a ``'same_value'`` option that checks the actual +values can be round-trip cast without changing value. Currently it is only +implemented in ``ndarray.astype``. This will raise a ``ValueError`` if any of the +values in the array would change as a result of the cast, including rounding of +floats or overflowing of ints. + +(`gh-29129 `__) + +``StringDType`` fill_value support in ``numpy.ma.MaskedArray`` +-------------------------------------------------------------- +Masked arrays now accept and preserve a Python ``str`` as their ``fill_value`` +when using the variable‑width ``StringDType`` (kind ``'T'``), including through +slicing and views. The default is ``'N/A'`` and may be overridden by any valid +string. This fixes issue `gh‑29421 `__ +and was implemented in pull request `gh‑29423 `__. + +(`gh-29423 `__) + +``ndmax`` option for ``numpy.array`` +------------------------------------ +The ``ndmax`` option is now available for ``numpy.array``. +It explicitly limits the maximum number of dimensions created from nested sequences. + +This is particularly useful when creating arrays of list-like objects with ``dtype=object``. +By default, NumPy recurses through all nesting levels to create the highest possible +dimensional array, but this behavior may not be desired when the intent is to preserve +nested structures as objects. The ``ndmax`` parameter provides explicit control over +this recursion depth. + +.. code-block:: python + + # Default behavior: Creates a 2D array + >>> a = np.array([[1, 2], [3, 4]], dtype=object) + >>> a + array([[1, 2], + [3, 4]], dtype=object) + >>> a.shape + (2, 2) + + # With ndmax=1: Creates a 1D array + >>> b = np.array([[1, 2], [3, 4]], dtype=object, ndmax=1) + >>> b + array([list([1, 2]), list([3, 4])], dtype=object) + >>> b.shape + (2,) + +(`gh-29569 `__) + +Warning emitted when using ``where`` without ``out`` +---------------------------------------------------- +Ufuncs called with a ``where`` mask and without an ``out`` positional or kwarg will +now emit a warning. This usage tends to trip up users who expect some value in +output locations where the mask is ``False`` (the ufunc will not touch those +locations). The warning can be suppressed by using ``out=None``. + +(`gh-29813 `__) + +DType sorting and argsorting supports the ArrayMethod API +--------------------------------------------------------- +User-defined dtypes can now implement custom sorting and argsorting using the +``ArrayMethod`` API. This mechanism can be used in place of the +``PyArray_ArrFuncs`` slots which may be deprecated in the future. + +The sorting and argsorting methods are registered by passing the arraymethod +specs that implement the operations to the new ``PyUFunc_AddLoopsFromSpecs`` +function. See the ``ArrayMethod`` API documentation for details. + +(`gh-29900 `__) + +New ``__numpy_dtype__`` protocol +-------------------------------- +NumPy now has a new ``__numpy_dtype__`` protocol. NumPy will check +for this attribute when converting to a NumPy dtype via ``np.dtype(obj)`` +or any ``dtype=`` argument. + +Downstream projects are encouraged to implement this for all dtype like +objects which may previously have used a ``.dtype`` attribute that returned +a NumPy dtype. +We expect to deprecate ``.dtype`` in the future to prevent interpreting +array-like objects with a ``.dtype`` attribute as a dtype. +If you wish you can implement ``__numpy_dtype__`` to ensure an earlier +warning or error (``.dtype`` is ignored if this is found). + +(`gh-30179 `__) + + +Improvements +============ + +Fix ``flatiter`` indexing edge cases +------------------------------------ + +The ``flatiter`` object now shares the same index preparation logic as +``ndarray``, ensuring consistent behavior and fixing several issues where +invalid indices were previously accepted or misinterpreted. + +Key fixes and improvements: + +* Stricter index validation + + - Boolean non-array indices like ``arr.flat[[True, True]]`` were + incorrectly treated as ``arr.flat[np.array([1, 1], dtype=int)]``. + They now raise an index error. Note that indices that match the + iterator's shape are expected to not raise in the future and be + handled as regular boolean indices. Use ``np.asarray()`` if + you want to match that behavior. + - Float non-array indices were also cast to integer and incorrectly + treated as ``arr.flat[np.array([1.0, 1.0], dtype=int)]``. This is now + deprecated and will be removed in a future version. + - 0-dimensional boolean indices like ``arr.flat[True]`` are also + deprecated and will be removed in a future version. + +* Consistent error types: + + Certain invalid ``flatiter`` indices that previously raised ``ValueError`` + now correctly raise ``IndexError``, aligning with ``ndarray`` behavior. + +* Improved error messages: + + The error message for unsupported index operations now provides more + specific details, including explicitly listing the valid index types, + instead of the generic ``IndexError: unsupported index operation``. + +(`gh-28590 `__) + +Improved error handling in ``np.quantile`` +------------------------------------------ +`np.quantile` now raises errors if: + +* All weights are zero +* At least one weight is ``np.nan`` +* At least one weight is ``np.inf`` + +(`gh-28595 `__) + +Improved error message for ``assert_array_compare`` +--------------------------------------------------- +The error message generated by ``assert_array_compare`` which is used by functions +like ``assert_allclose``, ``assert_array_less`` etc. now also includes information +about the indices at which the assertion fails. + +(`gh-29112 `__) + +Show unit information in ``__repr__`` for ``datetime64("NaT")`` +------------------------------------------------------------------ +When a ``datetime64`` object is "Not a Time" (NaT), its ``__repr__`` method now +includes the time unit of the datetime64 type. This makes it consistent with +the behavior of a ``timedelta64`` object. + +(`gh-29396 `__) + +Performance increase for scalar calculations +-------------------------------------------- +The speed of calculations on scalars has been improved by about a factor 6 for +ufuncs that take only one input (like ``np.sin(scalar)``), reducing the speed +difference from their ``math`` equivalents from a factor 19 to 3 (the speed +for arrays is left unchanged). + +(`gh-29819 `__) + +``numpy.finfo`` Refactor +------------------------ +The ``numpy.finfo`` class has been completely refactored to obtain floating-point +constants directly from C compiler macros rather than deriving them at runtime. +This provides better accuracy, platform compatibility and corrected +several attribute calculations: + +* Constants like ``eps``, ``min``, ``max``, ``smallest_normal``, and + ``smallest_subnormal`` now come directly from standard C macros (``FLT_EPSILON``, + ``DBL_MIN``, etc.), ensuring platform-correct values. + +* The deprecated ``MachAr`` runtime discovery mechanism has been removed. + +* Derived attributes have been corrected to match standard definitions: + ``machep`` and ``negep`` now use ``int(log2(eps))``; ``nexp`` accounts for + all exponent patterns; ``nmant`` excludes the implicit bit; and ``minexp`` + follows the C standard definition. + +* longdouble constants, Specifically ``smallest_normal`` now follows the + C standard definitions as per respecitive platform. + +* Special handling added for PowerPC's IBM double-double format. + +* New test suite added in ``test_finfo.py`` to validate all + ``finfo`` properties against expected machine arithmetic values for + float16, float32, and float64 types. + +(`gh-29836 `__) + +Multiple axes are now supported in ``numpy.trim_zeros`` +------------------------------------------------------- +The ``axis`` argument of ``numpy.trim_zeros`` now accepts a sequence; for example +``np.trim_zeros(x, axis=(0, 1))`` will trim the zeros from a multi-dimensional +array ``x`` along axes 0 and 1. This fixes issue +`gh‑29945 `__ and was implemented +in pull request `gh‑29947 `__. + +(`gh-29947 `__) + +Runtime signature introspection support has been significantly improved +----------------------------------------------------------------------- +Many NumPy functions, classes, and methods that previously raised +``ValueError`` when passed to ``inspect.signature()`` now return meaningful +signatures. This improves support for runtime type checking, IDE autocomplete, +documentation generation, and runtime introspection capabilities across the +NumPy API. + +Over three hundred classes and functions have been updated in total, including, +but not limited to, core classes such as ``ndarray``, ``generic``, ``dtype``, +``ufunc``, ``broadcast``, ``nditer``, etc., most methods of ``ndarray`` and +scalar types, array constructor functions (``array``, ``empty``, ``arange``, +``fromiter``, etc.), all ``ufuncs``, and many other commonly used functions, +including ``dot``, ``concat``, ``where``, ``bincount``, ``can_cast``, and +numerous others. + +(`gh-30208 `__) + + +Performance improvements and changes +==================================== + +Performance improvements to ``np.unique`` for string dtypes +----------------------------------------------------------- +The hash-based algorithm for unique extraction provides an order-of-magnitude +speedup on large string arrays. In an internal benchmark with about 1 billion +string elements, the hash-based np.unique completed in roughly 33.5 seconds, +compared to 498 seconds with the sort-based method – about 15× faster for +unsorted unique operations on strings. This improvement greatly reduces the +time to find unique values in very large string datasets. + +(`gh-28767 `__) + +Rewrite of ``np.ndindex`` using ``itertools.product`` +----------------------------------------------------- +The ``numpy.ndindex`` function now uses ``itertools.product`` internally, +providing significant improvements in performance for large iteration spaces, +while maintaining the original behavior and interface. For example, for an +array of shape (50, 60, 90) the NumPy ``ndindex`` benchmark improves +performance by a factor 5.2. + +(`gh-29165 `__) + +Performance improvements to ``np.unique`` for complex dtypes +------------------------------------------------------------ +The hash-based algorithm for unique extraction now also supports +complex dtypes, offering noticeable performance gains. + +In our benchmarks on complex128 arrays with 200,000 elements, +the hash-based approach was about 1.4–1.5× faster +than the sort-based baseline when there were 20% of unique values, +and about 5× faster when there were 0.2% of unique values. + +(`gh-29537 `__) + + +Changes +======= + +* Multiplication between a string and integer now raises OverflowError instead + of MemoryError if the result of the multiplication would create a string that + is too large to be represented. This follows Python's behavior. + + (`gh-29060 `__) + +* The accuracy of ``np.quantile`` and ``np.percentile`` for 16- and 32-bit + floating point input data has been improved. + + (`gh-29105 `__) + +``unique_values`` for string dtypes may return unsorted data +------------------------------------------------------------ +np.unique now supports hash‐based duplicate removal for string dtypes. +This enhancement extends the hash-table algorithm to byte strings ('S'), +Unicode strings ('U'), and the experimental string dtype ('T', StringDType). +As a result, calling np.unique() on an array of strings will use +the faster hash-based method to obtain unique values. +Note that this hash-based method does not guarantee that the returned unique values will be sorted. +This also works for StringDType arrays containing None (missing values) +when using equal_nan=True (treating missing values as equal). + +(`gh-28767 `__) + +Modulate dispatched x86 CPU features +------------------------------------ +**IMPORTANT**: The default setting for ``cpu-baseline`` on x86 has been raised +to ``x86-64-v2`` microarchitecture. This can be changed to none during build +time to support older CPUs, though SIMD optimizations for pre-2009 processors +are no longer maintained. + +NumPy has reorganized x86 CPU features into microarchitecture-based groups +instead of individual features, aligning with Linux distribution standards and +Google Highway requirements. + +Key changes: + +* Replaced individual x86 features with microarchitecture levels: ``X86_V2``, + ``X86_V3``, and ``X86_V4`` +* Raised the baseline to ``X86_V2`` +* Improved ``-`` operator behavior to properly exclude successor features that + imply the excluded feature +* Added meson redirections for removed feature names to maintain backward + compatibility +* Removed compiler compatibility workarounds for partial feature support (e.g., + AVX512 without mask operations) +* Removed legacy AMD features (XOP, FMA4) and discontinued Intel Xeon Phi + support + +New Feature Group Hierarchy: + +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + - Includes + * - ``X86_V2`` + - + - ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE4_1`` ``SSE4_2`` ``POPCNT`` ``CX16`` ``LAHF`` + * - ``X86_V3`` + - ``X86_V2`` + - ``AVX`` ``AVX2`` ``FMA3`` ``BMI`` ``BMI2`` ``LZCNT`` ``F16C`` ``MOVBE`` + * - ``X86_V4`` + - ``X86_V3`` + - ``AVX512F`` ``AVX512CD`` ``AVX512VL`` ``AVX512BW`` ``AVX512DQ`` + * - ``AVX512_ICL`` + - ``X86_V4`` + - ``AVX512VBMI`` ``AVX512VBMI2`` ``AVX512VNNI`` ``AVX512BITALG`` ``AVX512VPOPCNTDQ`` ``AVX512IFMA`` ``VAES`` ``GFNI`` ``VPCLMULQDQ`` + * - ``AVX512_SPR`` + - ``AVX512_ICL`` + - ``AVX512FP16`` + + +These groups correspond to CPU generations: + +- ``X86_V2``: x86-64-v2 microarchitectures (CPUs since 2009) +- ``X86_V3``: x86-64-v3 microarchitectures (CPUs since 2015) +- ``X86_V4``: x86-64-v4 microarchitectures (AVX-512 capable CPUs) +- ``AVX512_ICL``: Intel Ice Lake and similar CPUs +- ``AVX512_SPR``: Intel Sapphire Rapids and newer CPUs + +.. note:: + On 32-bit x86, ``cx16`` is excluded from ``X86_V2``. + +Documentation has been updated with details on using these new feature groups +with the current meson build system. + +(`gh-28896 `__) + +Fix bug in ``matmul`` for non-contiguous out kwarg parameter +------------------------------------------------------------ +In some cases, if ``out`` was non-contiguous, ``np.matmul`` would cause memory +corruption or a c-level assert. This was new to v2.3.0 and fixed in v2.3.1. + +(`gh-29179 `__) + +``__array_interface__`` with NULL pointer changed +------------------------------------------------- +The array interface now accepts NULL pointers (NumPy will do its own dummy +allocation, though). Previously, these incorrectly triggered an undocumented +scalar path. In the unlikely event that the scalar path was actually desired, +you can (for now) achieve the previous behavior via the correct scalar path by +not providing a ``data`` field at all. + +(`gh-29338 `__) + +``unique_values`` for complex dtypes may return unsorted data +------------------------------------------------------------- +np.unique now supports hash‐based duplicate removal for complex dtypes. This +enhancement extends the hash‐table algorithm to all complex types ('c'), and +their extended precision variants. The hash‐based method provides faster +extraction of unique values but does not guarantee that the result will be +sorted. + +(`gh-29537 `__) + +Sorting ``kind='heapsort'`` now maps to ``kind='quicksort'`` +------------------------------------------------------------ +It is unlikely that this change will be noticed, but if you do see a change in +execution time or unstable argsort order, that is likely the cause. Please let +us know if there is a performance regression. Congratulate us if it is improved +:) + +(`gh-29642 `__) + +``numpy.typing.DTypeLike`` no longer accepts ``None`` +----------------------------------------------------- +The type alias ``numpy.typing.DTypeLike`` no longer accepts ``None``. Instead of + +.. code-block:: python + + dtype: DTypeLike = None + +it should now be + +.. code-block:: python + + dtype: DTypeLike | None = None + +instead. + +(`gh-29739 `__) + +The ``npymath`` and ``npyrandom`` libraries now have a ``.lib`` rather than a +``.a`` file extension on win-arm64, for compatibility for building with MSVC +and ``setuptools``. Please note that using these static libraries is +discouraged and for existing projects using it, it's best to use it with a +matching compiler toolchain, which is ``clang-cl`` on Windows on Arm. + +(`gh-29750 `__) + diff --git a/doc/source/release/2.4.1-notes.rst b/doc/source/release/2.4.1-notes.rst new file mode 100644 index 000000000000..c033a070bd73 --- /dev/null +++ b/doc/source/release/2.4.1-notes.rst @@ -0,0 +1,52 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.4.1 Release Notes +========================= + +The NumPy 2.4.1 is a patch release that fixes bugs discovered after the +2.4.0 release. In particular, the typo `SeedlessSequence` is preserved to +enable wheels using the random Cython API and built against NumPy < 2.4.0 +to run without errors. + +This release supports Python versions 3.11-3.14 + + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Alexander Shadchin +* Bill Tompkins + +* Charles Harris +* Joren Hammudoglu +* Marten van Kerkwijk +* Nathan Goldbaum +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg + + +Pull requests merged +==================== + +A total of 15 pull requests were merged for this release. + +* `#30490 `__: MAINT: Prepare 2.4.x for further development +* `#30503 `__: DOC: ``numpy.select``\ : fix ``default`` parameter docstring... +* `#30504 `__: REV: Revert part of #30164 (#30500) +* `#30506 `__: TYP: ``numpy.select``\ : allow passing array-like ``default``... +* `#30507 `__: MNT: use if constexpr for compile-time branch selection +* `#30513 `__: BUG: Fix leak in flat assignment iterator +* `#30516 `__: BUG: fix heap overflow in fixed-width string multiply (#30511) +* `#30523 `__: BUG: Ensure summed weights returned by np.average always are... +* `#30527 `__: TYP: Fix return type of histogram2d +* `#30594 `__: MAINT: avoid passing ints to random functions that take double... +* `#30595 `__: BLD: Avoiding conflict with pygit2 for static build +* `#30596 `__: MAINT: Fix msvccompiler missing error on FreeBSD +* `#30608 `__: BLD: update vendored Meson to 1.9.2 +* `#30620 `__: ENH: use more fine-grained critical sections in array coercion... +* `#30623 `__: BUG: Undo result type change of quantile/percentile but keep... + diff --git a/doc/source/release/2.4.2-notes.rst b/doc/source/release/2.4.2-notes.rst new file mode 100644 index 000000000000..3cb4fc3b1955 --- /dev/null +++ b/doc/source/release/2.4.2-notes.rst @@ -0,0 +1,50 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.4.2 Release Notes +========================= + +The NumPy 2.4.2 is a patch release that fixes bugs discovered after the +2.4.1 release. Highlights are: + +- Fixes memory leaks +- Updates OpenBLAS to fix hangs + +This release supports Python versions 3.11-3.14 + + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Daniel Tang + +* Joren Hammudoglu +* Kumar Aditya +* Matti Picus +* Nathan Goldbaum +* Ralf Gommers +* Sebastian Berg +* Vikram Kumar + + + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#30629 `__: MAINT: Prepare 2.4.x for further development +* `#30636 `__: TYP: ``arange``\ : accept datetime strings +* `#30657 `__: MAINT: avoid possible race condition by not touching ``os.environ``... +* `#30700 `__: BUG: validate contraction axes in tensordot (#30521) +* `#30701 `__: DOC: __array_namespace__info__: set_module not __module__ (#30679) +* `#30702 `__: BUG: fix free-threaded PyObject layout in replace_scalar_type_names... +* `#30703 `__: TST: fix limited API example in tests for latest Cython +* `#30709 `__: BUG: Fix some bugs found via valgrind (#30680) +* `#30712 `__: MAINT: replace ob_type access with Py_TYPE in PyArray_CheckExact +* `#30713 `__: BUG: Fixup the quantile promotion fixup +* `#30736 `__: BUG: fix thread safety of ``array_getbuffer`` (#30667) +* `#30737 `__: backport scipy-openblas version change + diff --git a/doc/source/release/2.5.0-notes.rst b/doc/source/release/2.5.0-notes.rst new file mode 100644 index 000000000000..1c07e859a7b9 --- /dev/null +++ b/doc/source/release/2.5.0-notes.rst @@ -0,0 +1,19 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.5.0 Release Notes +========================== + + +Highlights +========== + +*We'll choose highlights for this release near the end of the release cycle.* + + +.. if release snippets have been incorporated already, uncomment the follow + line (leave the `.. include:: directive) + +.. **Content from release note snippets in doc/release/upcoming_changes:** + +.. include:: notes-towncrier.rst diff --git a/doc/source/try_examples.json b/doc/source/try_examples.json new file mode 100644 index 000000000000..823d4a5d1e82 --- /dev/null +++ b/doc/source/try_examples.json @@ -0,0 +1,8 @@ +{ + "global_min_height": "400px", + "ignore_patterns": [ + "reference\/typing.html*", + "numpy.__array_namespace_info__.html*" + ] +} + diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index 61468132879f..f1007db45acc 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -42,6 +42,10 @@ enter in a script or at a Python prompt. Everything else is **output**, the results of running your code. Note that ``>>>`` and ``...`` are not part of the code and may cause an error if entered at a Python prompt. +To run the code in the examples, you can copy and paste it into a Python script or +REPL, or use the experimental interactive examples in the browser provided in various +locations in the documentation. + Why use NumPy? -------------- @@ -430,7 +434,7 @@ With ``np.reshape``, you can specify a few optional parameters:: ``a`` is the array to be reshaped. -``newshape`` is the new shape you want. You can specify an integer or a tuple of +``shape`` is the new shape you want. You can specify an integer or a tuple of integers. If you specify an integer, the result will be an array of that length. The shape should be compatible with the original shape. @@ -552,7 +556,7 @@ it's straightforward with NumPy. For example, if you start with this array:: - >>> a = np.array([[1 , 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) + >>> a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) You can easily print all of the values in the array that are less than 5. :: @@ -664,7 +668,10 @@ where you want to slice your array. :: array([4, 5, 6, 7, 8]) Here, you grabbed a section of your array from index position 3 through index -position 8. +position 8 but not including position 8 itself. + +*Reminder: Array indexes begin at 0. This means the first element of the array is at index 0, +the second element is at index 1, and so on.* You can also stack two existing arrays, both vertically and horizontally. Let's say you have two arrays, ``a1`` and ``a2``:: @@ -772,7 +779,7 @@ You can add the arrays together with the plus sign. :: >>> data = np.array([1, 2]) - >>> ones = np.ones(2, dtype=int) + >>> ones = np.ones(2, dtype=np.int_) >>> data + ones array([2, 3]) @@ -856,12 +863,13 @@ NumPy also performs aggregation functions. In addition to ``min``, ``max``, and result of multiplying the elements together, ``std`` to get the standard deviation, and more. :: + >>> data = np.array([1, 2, 3]) >>> data.max() - 2.0 + 3 >>> data.min() - 1.0 + 1 >>> data.sum() - 3.0 + 6 .. image:: images/np_aggregation.png @@ -1274,7 +1282,7 @@ Since ``ravel`` does not create a copy, it's memory efficient. If you start with this array:: - >>> x = np.array([[1 , 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) + >>> x = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) You can use ``flatten`` to flatten your array into a 1D array. :: @@ -1340,7 +1348,7 @@ For example:: With a single iterable argument, return its biggest item. The default keyword-only argument specifies an object to return if the provided iterable is empty. - With two or more arguments, return the largest argument. + With two or more ...arguments, return the largest argument. @@ -1733,4 +1741,5 @@ For directions regarding installing Matplotlib, see the official ------------------------------------------------------- -*Image credits: Jay Alammar https://jalammar.github.io/* +*Image credits: Jay Alammar* +`https://jalammar.github.io/ `_ diff --git a/doc/source/user/basics.broadcasting.rst b/doc/source/user/basics.broadcasting.rst index a753767655c7..2b03817bba91 100644 --- a/doc/source/user/basics.broadcasting.rst +++ b/doc/source/user/basics.broadcasting.rst @@ -23,6 +23,7 @@ NumPy operations are usually done on pairs of arrays on an element-by-element basis. In the simplest case, the two arrays must have exactly the same shape, as in the following example: + >>> import numpy as np >>> a = np.array([1.0, 2.0, 3.0]) >>> b = np.array([2.0, 2.0, 2.0]) >>> a * b @@ -32,6 +33,7 @@ NumPy's broadcasting rule relaxes this constraint when the arrays' shapes meet certain constraints. The simplest broadcasting example occurs when an array and a scalar value are combined in an operation: +>>> import numpy as np >>> a = np.array([1.0, 2.0, 3.0]) >>> b = 2.0 >>> a * b @@ -162,6 +164,7 @@ Here are examples of shapes that do not broadcast:: An example of broadcasting when a 1-d array is added to a 2-d array:: + >>> import numpy as np >>> a = np.array([[ 0.0, 0.0, 0.0], ... [10.0, 10.0, 10.0], ... [20.0, 20.0, 20.0], @@ -209,6 +212,7 @@ Broadcasting provides a convenient way of taking the outer product (or any other outer operation) of two arrays. The following example shows an outer addition operation of two 1-d arrays:: + >>> import numpy as np >>> a = np.array([0.0, 10.0, 20.0, 30.0]) >>> b = np.array([1.0, 2.0, 3.0]) >>> a[:, np.newaxis] + b diff --git a/doc/source/user/basics.copies.rst b/doc/source/user/basics.copies.rst index 482cbc189ec8..c0dbc8e8fb51 100644 --- a/doc/source/user/basics.copies.rst +++ b/doc/source/user/basics.copies.rst @@ -50,6 +50,7 @@ Views are created when elements can be addressed with offsets and strides in the original array. Hence, basic indexing always creates views. For example:: + >>> import numpy as np >>> x = np.arange(10) >>> x array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) @@ -60,13 +61,14 @@ For example:: >>> x array([ 0, 10, 11, 3, 4, 5, 6, 7, 8, 9]) >>> y - array([10, 11]) + array([10, 11]) Here, ``y`` gets changed when ``x`` is changed because it is a view. :ref:`advanced-indexing`, on the other hand, always creates copies. For example:: + >>> import numpy as np >>> x = np.arange(9).reshape(3, 3) >>> x array([[0, 1, 2], @@ -93,7 +95,7 @@ which in turn will not affect ``y`` at all:: [6, 7, 8]]) It must be noted here that during the assignment of ``x[[1, 2]]`` no view -or copy is created as the assignment happens in-place. +or copy is created as the assignment happens in-place. Other operations @@ -104,24 +106,9 @@ otherwise. In most cases, the strides can be modified to reshape the array with a view. However, in some cases where the array becomes non-contiguous (perhaps after a :meth:`.ndarray.transpose` operation), the reshaping cannot be done by modifying strides and requires a copy. -In these cases, we can raise an error by assigning the new shape to the -shape attribute of the array. For example:: - >>> x = np.ones((2, 3)) - >>> y = x.T # makes the array non-contiguous - >>> y - array([[1., 1.], - [1., 1.], - [1., 1.]]) - >>> z = y.view() - >>> z.shape = 6 - Traceback (most recent call last): - ... - AttributeError: Incompatible shape for in-place modification. Use - `.reshape()` to make a copy with the desired shape. - -Taking the example of another operation, :func:`.ravel` returns a contiguous -flattened view of the array wherever possible. On the other hand, +Taking the example of another operation, :func:`numpy.ravel` returns a +contiguous flattened view of the array wherever possible. On the other hand, :meth:`.ndarray.flatten` always returns a flattened copy of the array. However, to guarantee a view in most cases, ``x.reshape(-1)`` may be preferable. @@ -132,6 +119,7 @@ The :attr:`base <.ndarray.base>` attribute of the ndarray makes it easy to tell if an array is a view or a copy. The base attribute of a view returns the original array while it returns ``None`` for a copy. + >>> import numpy as np >>> x = np.arange(9) >>> x array([0, 1, 2, 3, 4, 5, 6, 7, 8]) diff --git a/doc/source/user/basics.creation.rst b/doc/source/user/basics.creation.rst index c9773dc0fcd0..19fa737d5f8d 100644 --- a/doc/source/user/basics.creation.rst +++ b/doc/source/user/basics.creation.rst @@ -20,7 +20,7 @@ There are 6 general mechanisms for creating arrays: 6) Use of special library functions (e.g., random) You can use these methods to create ndarrays or :ref:`structured_arrays`. -This document will cover general methods for ndarray creation. +This document will cover general methods for ndarray creation. 1) Converting Python sequences to NumPy arrays ============================================== @@ -29,12 +29,13 @@ NumPy arrays can be defined using Python sequences such as lists and tuples. Lists and tuples are defined using ``[...]`` and ``(...)``, respectively. Lists and tuples can define ndarray creation: -* a list of numbers will create a 1D array, -* a list of lists will create a 2D array, +* a list of numbers will create a 1D array, +* a list of lists will create a 2D array, * further nested lists will create higher-dimensional arrays. In general, any array object is called an **ndarray** in NumPy. :: + >>> import numpy as np >>> a1D = np.array([1, 2, 3, 4]) >>> a2D = np.array([[1, 2], [3, 4]]) >>> a3D = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) @@ -47,6 +48,7 @@ are handled in C/C++ functions. When values do not fit and you are using a ``dtype``, NumPy may raise an error:: + >>> import numpy as np >>> np.array([127, 128, 129], dtype=np.int8) Traceback (most recent call last): ... @@ -56,8 +58,9 @@ An 8-bit signed integer represents integers from -128 to 127. Assigning the ``int8`` array to integers outside of this range results in overflow. This feature can often be misunderstood. If you perform calculations with mismatching ``dtypes``, you can get unwanted -results, for example:: +results, for example:: + >>> import numpy as np >>> a = np.array([2, 3, 4], dtype=np.uint32) >>> b = np.array([5, 6, 7], dtype=np.uint32) >>> c_unsigned32 = a - b @@ -69,10 +72,10 @@ results, for example:: Notice when you perform operations with two arrays of the same ``dtype``: ``uint32``, the resulting array is the same type. When you -perform operations with different ``dtype``, NumPy will +perform operations with different ``dtype``, NumPy will assign a new type that satisfies all of the array elements involved in the computation, here ``uint32`` and ``int32`` can both be represented in -as ``int64``. +as ``int64``. The default NumPy behavior is to create arrays in either 32 or 64-bit signed integers (platform dependent and matches C ``long`` size) or double precision @@ -84,8 +87,8 @@ you create the array. =========================================== .. - 40 functions seems like a small number, but the routies.array-creation - has ~47. I'm sure there are more. + 40 functions seems like a small number, but the routines.array-creation + has ~47. I'm sure there are more. NumPy has over 40 built-in functions for creating arrays as laid out in the :ref:`Array creation routines `. @@ -101,15 +104,16 @@ dimension of the array they create: The 1D array creation functions e.g. :func:`numpy.linspace` and :func:`numpy.arange` generally need at least two inputs, ``start`` and -``stop``. +``stop``. :func:`numpy.arange` creates arrays with regularly incrementing values. Check the documentation for complete information and examples. A few examples are shown:: + >>> import numpy as np >>> np.arange(10) array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> np.arange(2, 10, dtype=float) + >>> np.arange(2, 10, dtype=np.float64) array([2., 3., 4., 5., 6., 7., 8., 9.]) >>> np.arange(2, 3, 0.1) array([2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9]) @@ -117,13 +121,14 @@ examples are shown:: Note: best practice for :func:`numpy.arange` is to use integer start, end, and step values. There are some subtleties regarding ``dtype``. In the second example, the ``dtype`` is defined. In the third example, the array is -``dtype=float`` to accommodate the step size of ``0.1``. Due to roundoff error, -the ``stop`` value is sometimes included. +``dtype=np.float64`` to accommodate the step size of ``0.1``. Due to roundoff error, +the ``stop`` value is sometimes included. :func:`numpy.linspace` will create arrays with a specified number of elements, and spaced equally between the specified beginning and end values. For example: :: + >>> import numpy as np >>> np.linspace(1., 4., 6) array([1. , 1.6, 2.2, 2.8, 3.4, 4. ]) @@ -135,11 +140,12 @@ number of elements and the starting and end point. The previous ------------------------------- The 2D array creation functions e.g. :func:`numpy.eye`, :func:`numpy.diag`, and :func:`numpy.vander` -define properties of special matrices represented as 2D arrays. +define properties of special matrices represented as 2D arrays. ``np.eye(n, m)`` defines a 2D identity matrix. The elements where i=j (row index and column index are equal) are 1 and the rest are 0, as such:: + >>> import numpy as np >>> np.eye(3) array([[1., 0., 0.], [0., 1., 0.], @@ -153,7 +159,8 @@ and the rest are 0, as such:: the diagonal *or* if given a 2D array returns a 1D array that is only the diagonal elements. The two array creation functions can be helpful while doing linear algebra, as such:: - + + >>> import numpy as np >>> np.diag([1, 2, 3]) array([[1, 0, 0], [0, 2, 0], @@ -172,7 +179,8 @@ of the Vandermonde matrix is a decreasing power of the input 1D array or list or tuple, ``x`` where the highest polynomial order is ``n-1``. This array creation routine is helpful in generating linear least squares models, as such:: - + + >>> import numpy as np >>> np.vander(np.linspace(0, 2, 5), 2) array([[0. , 1. ], [0.5, 1. ], @@ -189,7 +197,7 @@ routine is helpful in generating linear least squares models, as such:: [ 8, 4, 2, 1], [27, 9, 3, 1], [64, 16, 4, 1]]) - + 3 - general ndarray creation functions -------------------------------------- @@ -197,19 +205,20 @@ The ndarray creation functions e.g. :func:`numpy.ones`, :func:`numpy.zeros`, and :meth:`~numpy.random.Generator.random` define arrays based upon the desired shape. The ndarray creation functions can create arrays with any dimension by specifying how many dimensions -and length along that dimension in a tuple or list. +and length along that dimension in a tuple or list. :func:`numpy.zeros` will create an array filled with 0 values with the specified shape. The default dtype is ``float64``:: + >>> import numpy as np >>> np.zeros((2, 3)) - array([[0., 0., 0.], + array([[0., 0., 0.], [0., 0., 0.]]) >>> np.zeros((2, 3, 2)) array([[[0., 0.], [0., 0.], [0., 0.]], - + [[0., 0.], [0., 0.], [0., 0.]]]) @@ -217,8 +226,9 @@ specified shape. The default dtype is ``float64``:: :func:`numpy.ones` will create an array filled with 1 values. It is identical to ``zeros`` in all other respects as such:: + >>> import numpy as np >>> np.ones((2, 3)) - array([[1., 1., 1.], + array([[1., 1., 1.], [1., 1., 1.]]) >>> np.ones((2, 3, 2)) array([[[1., 1.], @@ -236,6 +246,7 @@ library. Below, two arrays are created with shapes (2,3) and (2,3,2), respectively. The seed is set to 42 so you can reproduce these pseudorandom numbers:: + >>> import numpy as np >>> from numpy.random import default_rng >>> default_rng(42).random((2,3)) array([[0.77395605, 0.43887844, 0.85859792], @@ -250,14 +261,15 @@ pseudorandom numbers:: :func:`numpy.indices` will create a set of arrays (stacked as a one-higher dimensioned array), one per dimension with each representing variation in that -dimension: :: +dimension:: + >>> import numpy as np >>> np.indices((3,3)) - array([[[0, 0, 0], - [1, 1, 1], - [2, 2, 2]], - [[0, 1, 2], - [0, 1, 2], + array([[[0, 0, 0], + [1, 1, 1], + [2, 2, 2]], + [[0, 1, 2], + [0, 1, 2], [0, 1, 2]]]) This is particularly useful for evaluating functions of multiple dimensions on @@ -272,6 +284,7 @@ elements to a new variable, you have to explicitly :func:`numpy.copy` the array, otherwise the variable is a view into the original array. Consider the following example:: + >>> import numpy as np >>> a = np.array([1, 2, 3, 4, 5, 6]) >>> b = a[:2] >>> b += 1 @@ -283,6 +296,7 @@ In this example, you did not create a new array. You created a variable, would get the same result by adding 1 to ``a[:2]``. If you want to create a *new* array, use the :func:`numpy.copy` array creation routine as such:: + >>> import numpy as np >>> a = np.array([1, 2, 3, 4]) >>> b = a[:2].copy() >>> b += 1 @@ -296,6 +310,7 @@ There are a number of routines to join existing arrays e.g. :func:`numpy.vstack` :func:`numpy.hstack`, and :func:`numpy.block`. Here is an example of joining four 2-by-2 arrays into a 4-by-4 array using ``block``:: + >>> import numpy as np >>> A = np.ones((2, 2)) >>> B = np.eye(2, 2) >>> C = np.zeros((2, 2)) @@ -307,7 +322,7 @@ arrays into a 4-by-4 array using ``block``:: [ 0., 0., 0., -4.]]) Other routines use similar syntax to join ndarrays. Check the -routine's documentation for further examples and syntax. +routine's documentation for further examples and syntax. 4) Reading arrays from disk, either from standard or custom formats =================================================================== @@ -315,7 +330,7 @@ routine's documentation for further examples and syntax. This is the most common case of large array creation. The details depend greatly on the format of data on disk. This section gives general pointers on how to handle various formats. For more detailed examples of IO look at -:ref:`How to Read and Write files `. +:ref:`How to Read and Write files `. Standard binary formats ----------------------- @@ -354,6 +369,7 @@ and :func:`numpy.genfromtxt`. These functions have more involved use cases in Importing ``simple.csv`` is accomplished using :func:`numpy.loadtxt`:: + >>> import numpy as np >>> np.loadtxt('simple.csv', delimiter = ',', skiprows = 1) # doctest: +SKIP array([[0., 0.], [1., 1.], @@ -381,4 +397,4 @@ knowledge to interface with C or C++. NumPy is the fundamental library for array containers in the Python Scientific Computing stack. Many Python libraries, including SciPy, Pandas, and OpenCV, use NumPy ndarrays as the common format for data exchange, These libraries can create, -operate on, and work with NumPy arrays. +operate on, and work with NumPy arrays. diff --git a/doc/source/user/basics.dispatch.rst b/doc/source/user/basics.dispatch.rst index daea7474aa1a..8140517903c3 100644 --- a/doc/source/user/basics.dispatch.rst +++ b/doc/source/user/basics.dispatch.rst @@ -7,291 +7,18 @@ Writing custom array containers Numpy's dispatch mechanism, introduced in numpy version v1.16 is the recommended approach for writing custom N-dimensional array containers that are compatible with the numpy API and provide custom implementations of numpy -functionality. Applications include `dask `_ arrays, an -N-dimensional array distributed across multiple nodes, and `cupy +functionality. Applications include `dask `_ +arrays, an N-dimensional array distributed across multiple nodes, and `cupy `_ arrays, an N-dimensional array on a GPU. -To get a feel for writing custom array containers, we'll begin with a simple -example that has rather narrow utility but illustrates the concepts involved. +For comprehensive documentation on writing custom array containers, please see: ->>> import numpy as np ->>> class DiagonalArray: -... def __init__(self, N, value): -... self._N = N -... self._i = value -... def __repr__(self): -... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" -... def __array__(self, dtype=None, copy=None): -... if copy is False: -... raise ValueError( -... "`copy=False` isn't supported. A copy is always created." -... ) -... return self._i * np.eye(self._N, dtype=dtype) - -Our custom array can be instantiated like: - ->>> arr = DiagonalArray(5, 1) ->>> arr -DiagonalArray(N=5, value=1) - -We can convert to a numpy array using :func:`numpy.array` or -:func:`numpy.asarray`, which will call its ``__array__`` method to obtain a -standard ``numpy.ndarray``. - ->>> np.asarray(arr) -array([[1., 0., 0., 0., 0.], - [0., 1., 0., 0., 0.], - [0., 0., 1., 0., 0.], - [0., 0., 0., 1., 0.], - [0., 0., 0., 0., 1.]]) - -If we operate on ``arr`` with a numpy function, numpy will again use the -``__array__`` interface to convert it to an array and then apply the function -in the usual way. - ->>> np.multiply(arr, 2) -array([[2., 0., 0., 0., 0.], - [0., 2., 0., 0., 0.], - [0., 0., 2., 0., 0.], - [0., 0., 0., 2., 0.], - [0., 0., 0., 0., 2.]]) - - -Notice that the return type is a standard ``numpy.ndarray``. - ->>> type(np.multiply(arr, 2)) - - -How can we pass our custom array type through this function? Numpy allows a -class to indicate that it would like to handle computations in a custom-defined -way through the interfaces ``__array_ufunc__`` and ``__array_function__``. Let's -take one at a time, starting with ``__array_ufunc__``. This method covers -:ref:`ufuncs`, a class of functions that includes, for example, -:func:`numpy.multiply` and :func:`numpy.sin`. - -The ``__array_ufunc__`` receives: - -- ``ufunc``, a function like ``numpy.multiply`` -- ``method``, a string, differentiating between ``numpy.multiply(...)`` and - variants like ``numpy.multiply.outer``, ``numpy.multiply.accumulate``, and so - on. For the common case, ``numpy.multiply(...)``, ``method == '__call__'``. -- ``inputs``, which could be a mixture of different types -- ``kwargs``, keyword arguments passed to the function - -For this example we will only handle the method ``__call__`` - ->>> from numbers import Number ->>> class DiagonalArray: -... def __init__(self, N, value): -... self._N = N -... self._i = value -... def __repr__(self): -... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" -... def __array__(self, dtype=None, copy=None): -... if copy is False: -... raise ValueError( -... "`copy=False` isn't supported. A copy is always created." -... ) -... return self._i * np.eye(self._N, dtype=dtype) -... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): -... if method == '__call__': -... N = None -... scalars = [] -... for input in inputs: -... if isinstance(input, Number): -... scalars.append(input) -... elif isinstance(input, self.__class__): -... scalars.append(input._i) -... if N is not None: -... if N != input._N: -... raise TypeError("inconsistent sizes") -... else: -... N = input._N -... else: -... return NotImplemented -... return self.__class__(N, ufunc(*scalars, **kwargs)) -... else: -... return NotImplemented - -Now our custom array type passes through numpy functions. - ->>> arr = DiagonalArray(5, 1) ->>> np.multiply(arr, 3) -DiagonalArray(N=5, value=3) ->>> np.add(arr, 3) -DiagonalArray(N=5, value=4) ->>> np.sin(arr) -DiagonalArray(N=5, value=0.8414709848078965) - -At this point ``arr + 3`` does not work. - ->>> arr + 3 -Traceback (most recent call last): -... -TypeError: unsupported operand type(s) for +: 'DiagonalArray' and 'int' - -To support it, we need to define the Python interfaces ``__add__``, ``__lt__``, -and so on to dispatch to the corresponding ufunc. We can achieve this -conveniently by inheriting from the mixin -:class:`~numpy.lib.mixins.NDArrayOperatorsMixin`. - ->>> import numpy.lib.mixins ->>> class DiagonalArray(numpy.lib.mixins.NDArrayOperatorsMixin): -... def __init__(self, N, value): -... self._N = N -... self._i = value -... def __repr__(self): -... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" -... def __array__(self, dtype=None, copy=None): -... if copy is False: -... raise ValueError( -... "`copy=False` isn't supported. A copy is always created." -... ) -... return self._i * np.eye(self._N, dtype=dtype) -... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): -... if method == '__call__': -... N = None -... scalars = [] -... for input in inputs: -... if isinstance(input, Number): -... scalars.append(input) -... elif isinstance(input, self.__class__): -... scalars.append(input._i) -... if N is not None: -... if N != input._N: -... raise TypeError("inconsistent sizes") -... else: -... N = input._N -... else: -... return NotImplemented -... return self.__class__(N, ufunc(*scalars, **kwargs)) -... else: -... return NotImplemented - ->>> arr = DiagonalArray(5, 1) ->>> arr + 3 -DiagonalArray(N=5, value=4) ->>> arr > 0 -DiagonalArray(N=5, value=True) - -Now let's tackle ``__array_function__``. We'll create dict that maps numpy -functions to our custom variants. - ->>> HANDLED_FUNCTIONS = {} ->>> class DiagonalArray(numpy.lib.mixins.NDArrayOperatorsMixin): -... def __init__(self, N, value): -... self._N = N -... self._i = value -... def __repr__(self): -... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" -... def __array__(self, dtype=None, copy=None): -... if copy is False: -... raise ValueError( -... "`copy=False` isn't supported. A copy is always created." -... ) -... return self._i * np.eye(self._N, dtype=dtype) -... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): -... if method == '__call__': -... N = None -... scalars = [] -... for input in inputs: -... # In this case we accept only scalar numbers or DiagonalArrays. -... if isinstance(input, Number): -... scalars.append(input) -... elif isinstance(input, self.__class__): -... scalars.append(input._i) -... if N is not None: -... if N != input._N: -... raise TypeError("inconsistent sizes") -... else: -... N = input._N -... else: -... return NotImplemented -... return self.__class__(N, ufunc(*scalars, **kwargs)) -... else: -... return NotImplemented -... def __array_function__(self, func, types, args, kwargs): -... if func not in HANDLED_FUNCTIONS: -... return NotImplemented -... # Note: this allows subclasses that don't override -... # __array_function__ to handle DiagonalArray objects. -... if not all(issubclass(t, self.__class__) for t in types): -... return NotImplemented -... return HANDLED_FUNCTIONS[func](*args, **kwargs) -... - -A convenient pattern is to define a decorator ``implements`` that can be used -to add functions to ``HANDLED_FUNCTIONS``. - ->>> def implements(np_function): -... "Register an __array_function__ implementation for DiagonalArray objects." -... def decorator(func): -... HANDLED_FUNCTIONS[np_function] = func -... return func -... return decorator -... - -Now we write implementations of numpy functions for ``DiagonalArray``. -For completeness, to support the usage ``arr.sum()`` add a method ``sum`` that -calls ``numpy.sum(self)``, and the same for ``mean``. - ->>> @implements(np.sum) -... def sum(arr): -... "Implementation of np.sum for DiagonalArray objects" -... return arr._i * arr._N -... ->>> @implements(np.mean) -... def mean(arr): -... "Implementation of np.mean for DiagonalArray objects" -... return arr._i / arr._N -... ->>> arr = DiagonalArray(5, 1) ->>> np.sum(arr) -5 ->>> np.mean(arr) -0.2 - -If the user tries to use any numpy functions not included in -``HANDLED_FUNCTIONS``, a ``TypeError`` will be raised by numpy, indicating that -this operation is not supported. For example, concatenating two -``DiagonalArrays`` does not produce another diagonal array, so it is not -supported. - ->>> np.concatenate([arr, arr]) -Traceback (most recent call last): -... -TypeError: no implementation found for 'numpy.concatenate' on types that implement __array_function__: [] - -Additionally, our implementations of ``sum`` and ``mean`` do not accept the -optional arguments that numpy's implementation does. - ->>> np.sum(arr, axis=0) -Traceback (most recent call last): -... -TypeError: sum() got an unexpected keyword argument 'axis' - - -The user always has the option of converting to a normal ``numpy.ndarray`` with -:func:`numpy.asarray` and using standard numpy from there. - ->>> np.concatenate([np.asarray(arr), np.asarray(arr)]) -array([[1., 0., 0., 0., 0.], - [0., 1., 0., 0., 0.], - [0., 0., 1., 0., 0.], - [0., 0., 0., 1., 0.], - [0., 0., 0., 0., 1.], - [1., 0., 0., 0., 0.], - [0., 1., 0., 0., 0.], - [0., 0., 1., 0., 0.], - [0., 0., 0., 1., 0.], - [0., 0., 0., 0., 1.]]) - - -The implementation of ``DiagonalArray`` in this example only handles the -``np.sum`` and ``np.mean`` functions for brevity. Many other functions in the -Numpy API are also available to wrap and a full-fledged custom array container -can explicitly support all functions that Numpy makes available to wrap. +- :ref:`Interoperability with NumPy ` - the main guide + covering ``__array_ufunc__`` and ``__array_function__`` protocols +- :ref:`Special attributes and methods ` - see + ``class.__array__()`` for documentation and example implementing the + ``__array__()`` method Numpy provides some utilities to aid testing of custom array containers that implement the ``__array_ufunc__`` and ``__array_function__`` protocols in the @@ -300,7 +27,7 @@ implement the ``__array_ufunc__`` and ``__array_function__`` protocols in the To check if a Numpy function can be overridden via ``__array_ufunc__``, you can use :func:`~numpy.testing.overrides.allows_array_ufunc_override`: ->>> from np.testing.overrides import allows_array_ufunc_override +>>> from numpy.testing.overrides import allows_array_ufunc_override >>> allows_array_ufunc_override(np.add) True diff --git a/doc/source/user/basics.indexing.rst b/doc/source/user/basics.indexing.rst index 7481468fe6db..51d126f8183b 100644 --- a/doc/source/user/basics.indexing.rst +++ b/doc/source/user/basics.indexing.rst @@ -54,7 +54,7 @@ and accepts negative indices for indexing from the end of the array. :: It is not necessary to separate each dimension's index into its own set of square brackets. :: - >>> x.shape = (2, 5) # now x is 2-dimensional + >>> x = x.reshape((2, 5)) # now x is 2-dimensional >>> x[1, 3] 8 >>> x[1, -1] diff --git a/doc/source/user/basics.interoperability.rst b/doc/source/user/basics.interoperability.rst index ca0c39d7081f..ba72c2cb6a52 100644 --- a/doc/source/user/basics.interoperability.rst +++ b/doc/source/user/basics.interoperability.rst @@ -65,6 +65,18 @@ and outputs a NumPy ndarray (which is generally a view of the input object's dat buffer). The :ref:`dlpack:python-spec` page explains the ``__dlpack__`` protocol in detail. +``dtype`` interoperability +~~~~~~~~~~~~~~~~~~~~~~~~~~ +Similar to ``__array__()`` for array objects, defining ``__numpy_dtype__`` +allows a custom dtype object to be interoperable with NumPy. +The ``__numpy_dtype__`` must return a NumPy dtype instance (note that +``np.float64`` is not a dtype instance, ``np.dtype(np.float64)`` is). + +.. versionadded:: 2.4 + Before NumPy 2.4 a ``.dtype`` attribute was treated similarly. As of NumPy 2.4 + both is accepted and implementing ``__numpy_dtype__`` prevents ``.dtype`` + from being checked. + The array interface protocol ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -118,7 +130,7 @@ We can check that ``arr`` and ``new_arr`` share the same data buffer: The ``__array__()`` method ~~~~~~~~~~~~~~~~~~~~~~~~~~ -The ``__array__()`` method ensures that any NumPy-like object (an array, any +The `__array__() <../reference/arrays.classes.html#numpy.class.\_\_array\_\_>`__ method ensures that any NumPy-like object (an array, any object exposing the array interface, an object whose ``__array__()`` method returns an array or any nested sequence) that implements it can be used as a NumPy array. If possible, this will mean using ``__array__()`` to create a NumPy @@ -137,9 +149,6 @@ is needed. If a class implements the old signature ``__array__(self)``, for ``np.array(a)`` a warning will be raised saying that ``dtype`` and ``copy`` arguments are missing. -To see an example of a custom array implementation including the use of -``__array__()``, see :ref:`basics.dispatch`. - The DLPack Protocol ~~~~~~~~~~~~~~~~~~~ @@ -204,7 +213,7 @@ The ``__array_ufunc__`` protocol A :ref:`universal function (or ufunc for short) ` is a “vectorized” wrapper for a function that takes a fixed number of specific inputs and produces a fixed number of specific outputs. The output of the ufunc (and -its methods) is not necessarily a ndarray, if not all input arguments are +its methods) is not necessarily an ndarray, if not all input arguments are ndarrays. Indeed, if any input defines an ``__array_ufunc__`` method, control will be passed completely to that function, i.e., the ufunc is overridden. The ``__array_ufunc__`` method defined on that (non-ndarray) object has access to @@ -274,10 +283,10 @@ Consider the following: >>> type(ser) pandas.core.series.Series -Now, ``ser`` is **not** a ndarray, but because it +Now, ``ser`` is **not** an ndarray, but because it `implements the __array_ufunc__ protocol `__, -we can apply ufuncs to it as if it were a ndarray: +we can apply ufuncs to it as if it were an ndarray: >>> np.exp(ser) 0 2.718282 diff --git a/doc/source/user/basics.io.genfromtxt.rst b/doc/source/user/basics.io.genfromtxt.rst index 64dd46153091..2a1523ba209b 100644 --- a/doc/source/user/basics.io.genfromtxt.rst +++ b/doc/source/user/basics.io.genfromtxt.rst @@ -131,10 +131,6 @@ marker(s) is simply ignored:: [7., 8.], [9., 0.]]) -.. versionadded:: 1.7.0 - - When ``comments`` is set to ``None``, no lines are treated as comments. - .. note:: There is one notable exception to this behavior: if the optional argument @@ -205,16 +201,16 @@ The main way to control how the sequences of strings we have read from the file are converted to other types is to set the ``dtype`` argument. Acceptable values for this argument are: -* a single type, such as ``dtype=float``. +* a single type, such as ``dtype=np.float64``. The output will be 2D with the given dtype, unless a name has been associated with each column with the use of the ``names`` argument - (see below). Note that ``dtype=float`` is the default for + (see below). Note that ``dtype=np.float64`` is the default for :func:`~numpy.genfromtxt`. -* a sequence of types, such as ``dtype=(int, float, float)``. +* a sequence of types, such as ``dtype=(np.int_, np.float64, np.float64)``. * a comma-separated string, such as ``dtype="i4,f8,|U3"``. * a dictionary with two keys ``'names'`` and ``'formats'``. * a sequence of tuples ``(name, type)``, such as - ``dtype=[('A', int), ('B', float)]``. + ``dtype=[('A', np.int_), ('B', np.float64)]``. * an existing :class:`numpy.dtype` object. * the special value ``None``. In that case, the type of the columns will be determined from the data @@ -247,7 +243,7 @@ each column. A first possibility is to use an explicit structured dtype, as mentioned previously:: >>> data = StringIO("1 2 3\n 4 5 6") - >>> np.genfromtxt(data, dtype=[(_, int) for _ in "abc"]) + >>> np.genfromtxt(data, dtype=[(_, np.int_) for _ in "abc"]) array([(1, 2, 3), (4, 5, 6)], dtype=[('a', '>> data = StringIO("1 2 3\n 4 5 6") - >>> ndtype=[('a',int), ('b', float), ('c', int)] + >>> ndtype=[('a', np.int_), ('b', np.float64), ('c', np.int_)] >>> names = ["A", "B", "C"] >>> np.genfromtxt(data, names=names, dtype=ndtype) array([(1, 2., 3), (4, 5., 6)], @@ -293,7 +289,7 @@ with the standard NumPy default of ``"f%i"``, yielding names like ``f0``, ``f1`` and so forth:: >>> data = StringIO("1 2 3\n 4 5 6") - >>> np.genfromtxt(data, dtype=(int, float, int)) + >>> np.genfromtxt(data, dtype=(np.int_, np.float64, np.int_)) array([(1, 2., 3), (4, 5., 6)], dtype=[('f0', '>> data = StringIO("1 2 3\n 4 5 6") - >>> np.genfromtxt(data, dtype=(int, float, int), names="a") + >>> np.genfromtxt(data, dtype=(np.int_, np.float64, np.int_), names="a") array([(1, 2., 3), (4, 5., 6)], dtype=[('a', '>> data = StringIO("1 2 3\n 4 5 6") - >>> np.genfromtxt(data, dtype=(int, float, int), defaultfmt="var_%02i") + >>> np.genfromtxt(data, dtype=(np.int_, np.float64, np.int_), defaultfmt="var_%02i") array([(1, 2., 3), (4, 5., 6)], dtype=[('var_00', '>> data = "N/A, 2, 3\n4, ,???" >>> kwargs = dict(delimiter=",", - ... dtype=int, + ... dtype=np.int_, ... names="a,b,c", ... missing_values={0:"N/A", 'b':" ", 2:"???"}, ... filling_values={0:0, 'b':0, 2:-999}) diff --git a/doc/source/user/basics.rec.rst b/doc/source/user/basics.rec.rst index 8402ee7f8e17..af14bcd10201 100644 --- a/doc/source/user/basics.rec.rst +++ b/doc/source/user/basics.rec.rst @@ -535,7 +535,7 @@ Similarly to tuples, structured scalars can also be indexed with an integer:: >>> scalar = np.array([(1, 2., 3.)], dtype='i, f, f')[0] >>> scalar[0] - 1 + np.int32(1) >>> scalar[1] = 4 Thus, tuples might be thought of as the native Python equivalent to numpy's @@ -595,7 +595,7 @@ removed:: >>> dt = np.dtype("i1,V3,i4,V1")[["f0", "f2"]] >>> dt - dtype({'names':['f0','f2'], 'formats':['i1','>> np.result_type(dt) dtype([('f0', 'i1'), ('f2', '>> dt = np.dtype("i1,V3,i4,V1", align=True)[["f0", "f2"]] >>> dt - dtype({'names':['f0','f2'], 'formats':['i1','>> np.result_type(dt) dtype([('f0', 'i1'), ('f2', '>> np.result_type(dt).isalignedstruct diff --git a/doc/source/user/basics.strings.rst b/doc/source/user/basics.strings.rst index 460bc1fe589f..cbbaa8f6e3b3 100644 --- a/doc/source/user/basics.strings.rst +++ b/doc/source/user/basics.strings.rst @@ -109,7 +109,7 @@ that empty strings are used to populate empty arrays: >>> np.empty(3, dtype=StringDType()) array(['', '', ''], dtype=StringDType()) -Optionally, you can pass create an instance of ``StringDType`` with +Optionally, you can create an instance of ``StringDType`` with support for missing values by passing ``na_object`` as a keyword argument for the initializer: diff --git a/doc/source/user/basics.subclassing.rst b/doc/source/user/basics.subclassing.rst index 83be116b7e7f..202561a958a8 100644 --- a/doc/source/user/basics.subclassing.rst +++ b/doc/source/user/basics.subclassing.rst @@ -42,7 +42,7 @@ This can result in surprising behavior if you use NumPy methods or functions you have not explicitly tested. On the other hand, compared to other interoperability approaches, -subclassing can be a useful because many thing will "just work". +subclassing can be useful because many things will "just work". This means that subclassing can be a convenient approach and for a long time it was also often the only available approach. @@ -158,21 +158,21 @@ __new__ documentation For example, consider the following Python code: >>> class C: ->>> def __new__(cls, *args): ->>> print('Cls in __new__:', cls) ->>> print('Args in __new__:', args) ->>> # The `object` type __new__ method takes a single argument. ->>> return object.__new__(cls) ->>> def __init__(self, *args): ->>> print('type(self) in __init__:', type(self)) ->>> print('Args in __init__:', args) +... def __new__(cls, *args): +... print('Cls in __new__:', cls) +... print('Args in __new__:', args) +... # The `object` type __new__ method takes a single argument. +... return object.__new__(cls) +... def __init__(self, *args): +... print('type(self) in __init__:', type(self)) +... print('Args in __init__:', args) meaning that we get: >>> c = C('hello') -Cls in __new__: +Cls in __new__: Args in __new__: ('hello',) -type(self) in __init__: +type(self) in __init__: Args in __init__: ('hello',) When we call ``C('hello')``, the ``__new__`` method gets its own class @@ -227,7 +227,7 @@ like:: obj = ndarray.__new__(subtype, shape, ... -where ``subdtype`` is the subclass. Thus the returned view is of the +where ``subtype`` is the subclass. Thus the returned view is of the same class as the subclass, rather than being of class ``ndarray``. That solves the problem of returning views of the same type, but now @@ -346,7 +346,7 @@ Simple example - adding an extra attribute to ndarray class InfoArray(np.ndarray): - def __new__(subtype, shape, dtype=float, buffer=None, offset=0, + def __new__(subtype, shape, dtype=np.float64, buffer=None, offset=0, strides=None, order=None, info=None): # Create the ndarray instance of our type, given the usual # ndarray input arguments. This will call the standard @@ -461,8 +461,6 @@ So: ``__array_ufunc__`` for ufuncs ============================== -.. versionadded:: 1.13 - A subclass can override what happens when executing numpy ufuncs on it by overriding the default ``ndarray.__array_ufunc__`` method. This method is executed *instead* of the ufunc and should return either the result of the @@ -471,7 +469,7 @@ implemented. The signature of ``__array_ufunc__`` is:: - def __array_ufunc__(ufunc, method, *inputs, **kwargs): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - *ufunc* is the ufunc object that was called. - *method* is a string indicating how the Ufunc was called, either @@ -569,7 +567,7 @@ which inputs and outputs it converted. Hence, e.g., Note that another approach would be to use ``getattr(ufunc, methods)(*inputs, **kwargs)`` instead of the ``super`` call. For this example, the result would be identical, but there is a difference if another operand -also defines ``__array_ufunc__``. E.g., lets assume that we evaluate +also defines ``__array_ufunc__``. E.g., let's assume that we evaluate ``np.add(a, b)``, where ``b`` is an instance of another class ``B`` that has an override. If you use ``super`` as in the example, ``ndarray.__array_ufunc__`` will notice that ``b`` has an override, which @@ -781,5 +779,3 @@ your function's signature should accept ``**kwargs``. For example: This object is now compatible with ``np.sum`` again because any extraneous arguments (i.e. keywords that are not ``axis`` or ``dtype``) will be hidden away in the ``**unused_kwargs`` parameter. - - diff --git a/doc/source/user/basics.types.rst b/doc/source/user/basics.types.rst index afecdf0d77f1..d6914f437faa 100644 --- a/doc/source/user/basics.types.rst +++ b/doc/source/user/basics.types.rst @@ -35,18 +35,6 @@ See :ref:`arrays.dtypes.constructing` for more information about specifying and constructing data type objects, including how to specify parameters like the byte order. -To convert the type of an array, use the .astype() method. For example: :: - - >>> z.astype(np.float64) #doctest: +NORMALIZE_WHITESPACE - array([0., 1., 2.]) - -Note that, above, we could have used the *Python* float object as a dtype -instead of `numpy.float64`. NumPy knows that -:class:`int` refers to `numpy.int_`, :class:`bool` means -`numpy.bool`, that :class:`float` is `numpy.float64` and -:class:`complex` is `numpy.complex128`. The other data-types do not have -Python equivalents. - To determine the type of an array, look at the dtype attribute:: >>> z.dtype @@ -56,7 +44,7 @@ dtype objects also contain information about the type, such as its bit-width and its byte-order. The data type can also be used indirectly to query properties of the type, such as whether it is an integer:: - >>> d = np.dtype(int64) + >>> d = np.dtype(np.int64) >>> d dtype('int64') @@ -66,6 +54,28 @@ properties of the type, such as whether it is an integer:: >>> np.issubdtype(d, np.floating) False +To convert the type of an array, use the .astype() method. For example:: + + >>> z.astype(np.float64) #doctest: +NORMALIZE_WHITESPACE + array([0., 1., 2.]) + +Note that, above, we could have used the *Python* float object as a dtype +instead of `numpy.float64`. NumPy knows that +:class:`int` refers to `numpy.int_`, :class:`bool` means +`numpy.bool`, that :class:`float` is `numpy.float64` and +:class:`complex` is `numpy.complex128`. The other data-types do not have +Python equivalents. + +Sometimes the conversion can overflow, for instance when converting a `numpy.int64` value +300 to `numpy.int8`. NumPy follows C casting rules, so that value would overflow and +become 44 ``(300 - 256)``. If you wish to avoid such overflows, you can specify that the +overflow action fail by using ``same_value`` for the ``casting`` argument (see also +:ref:`overflow-errors`):: + + >>> z.astype(np.float64, casting="same_value") #doctest: +NORMALIZE_WHITESPACE + array([0., 1., 2.]) + + Numerical Data Types -------------------- @@ -217,7 +227,7 @@ confusion with builtin python type names, such as `numpy.bool_`. * - N/A - ``'P'`` - ``uintptr_t`` - - Guaranteed to hold pointers. Character code only (Python and C). + - Guaranteed to hold pointers without sign. Character code only (Python and C). * - `numpy.int32` or `numpy.int64` - `numpy.long` @@ -314,7 +324,7 @@ but gives -1486618624 (incorrect) for a 32-bit integer. >>> np.power(100, 9, dtype=np.int64) 1000000000000000000 >>> np.power(100, 9, dtype=np.int32) - -1486618624 + np.int32(-1486618624) The behaviour of NumPy and Python integer types differs significantly for integer overflows and may confuse users expecting NumPy integers to behave @@ -342,6 +352,30 @@ range of possible values. >>> np.power(100, 100, dtype=np.float64) 1e+200 +Floating point precision +======================== + +Many functions in NumPy, especially those in `numpy.linalg`, involve floating-point +arithmetic, which can introduce small inaccuracies due to the way computers +represent decimal numbers. For instance, when performing basic arithmetic operations +involving floating-point numbers: + + >>> 0.3 - 0.2 - 0.1 # This does not equal 0 due to floating-point precision + -2.7755575615628914e-17 + +To handle such cases, it's advisable to use functions like `np.isclose` to compare +values, rather than checking for exact equality: + + >>> np.isclose(0.3 - 0.2 - 0.1, 0, rtol=1e-05) # Check for closeness to 0 + True + +In this example, `np.isclose` accounts for the minor inaccuracies that occur in +floating-point calculations by applying a relative tolerance, ensuring that results +within a small threshold are considered close. + +For information about precision in calculations, see `Floating-Point Arithmetic `_. + + Extended precision ================== diff --git a/doc/source/user/basics.ufuncs.rst b/doc/source/user/basics.ufuncs.rst index 8146ee9096f0..5c91ab6c0168 100644 --- a/doc/source/user/basics.ufuncs.rst +++ b/doc/source/user/basics.ufuncs.rst @@ -18,6 +18,25 @@ is, a ufunc is a ":term:`vectorized `" wrapper for a function that takes a fixed number of specific inputs and produces a fixed number of specific outputs. +There are also :ref:`generalized ufuncs ` which +are functions over vectors (or arrays) instead of single-element scalars. +For example, :func:`numpy.add` is a ufunc that operates element-by-element, +while :func:`numpy.matmul` is a gufunc that operates on vectors/matrices:: + + >>> a = np.arange(6).reshape(3, 2) + >>> a + array([[0, 1], + [2, 3], + [4, 5]]) + >>> np.add(a, a) # element-wise addition + array([[ 0, 2], + [ 4, 6], + [ 8, 10]]) + >>> np.matmul(a, a.T) # matrix multiplication (3x2) @ (2x3) -> (3x3) + array([[ 1, 3, 5], + [ 3, 13, 23], + [ 5, 23, 41]]) + In NumPy, universal functions are instances of the :class:`numpy.ufunc` class. Many of the built-in functions are implemented in compiled C code. The basic ufuncs operate on scalars, but @@ -35,12 +54,30 @@ One can also produce custom :class:`numpy.ufunc` instances using the Ufunc methods ============= -All ufuncs have four methods. They can be found at -:ref:`ufuncs.methods`. However, these methods only make sense on scalar -ufuncs that take two input arguments and return one output argument. +All ufuncs have 5 methods. 4 reduce-like methods +(:meth:`~numpy.ufunc.reduce`, :meth:`~numpy.ufunc.accumulate`, +:meth:`~numpy.ufunc.reduceat`, :meth:`~numpy.ufunc.outer`) and one +for inplace operations (:meth:`~numpy.ufunc.at`). +See :ref:`ufuncs.methods` for more. However, these methods only make sense on +ufuncs that take two input arguments and return one output argument (so-called +"scalar" ufuncs since the inner loop operates on a single scalar value). Attempting to call these methods on other ufuncs will cause a :exc:`ValueError`. +For example, :func:`numpy.add` takes two inputs and returns one output, +so its methods work:: + + >>> np.add.reduce([1, 2, 3]) + 6 + +But :func:`numpy.divmod` returns two outputs (quotient and remainder), +so calling its methods raises an error:: + + >>> np.divmod.reduce([1, 2, 3]) + Traceback (most recent call last): + ... + ValueError: reduce only supported for functions returning a single value + The reduce-like methods all take an *axis* keyword, a *dtype* keyword, and an *out* keyword, and the arrays must all have dimension >= 1. The *axis* keyword specifies the axis of the array over which the reduction @@ -74,9 +111,9 @@ an integer (or Boolean) data-type and smaller than the size of the :class:`numpy.int_` data type, it will be internally upcast to the :class:`.int_` (or :class:`numpy.uint`) data-type. In the previous example:: - >>> x.dtype + >>> x.dtype dtype('int64') - >>> np.multiply.reduce(x, dtype=float) + >>> np.multiply.reduce(x, dtype=np.float64) array([ 0., 28., 80.]) Finally, the *out* keyword allows you to @@ -84,10 +121,10 @@ provide an output array (or a tuple of output arrays for multi-output ufuncs). If *out* is given, the *dtype* argument is only used for the internal computations. Considering ``x`` from the previous example:: - >>> y = np.zeros(3, dtype=int) + >>> y = np.zeros(3, dtype=np.int_) >>> y array([0, 0, 0]) - >>> np.multiply.reduce(x, dtype=float, out=y) + >>> np.multiply.reduce(x, dtype=np.float64, out=y) array([ 0, 28, 80]) Ufuncs also have a fifth method, :func:`numpy.ufunc.at`, that allows in place @@ -103,10 +140,16 @@ of the previous operation for that item. Output type determination ========================= -The output of the ufunc (and its methods) is not necessarily an -:class:`ndarray `, if all input arguments are not -:class:`ndarrays `. Indeed, if any input defines an -:obj:`~.class.__array_ufunc__` method, +If the input arguments of the ufunc (or its methods) are +:class:`ndarrays `, then the output will be as well. +The exception is when the result is zero-dimensional, in which case the +output will be converted to an `array scalar `. This can +be avoided by passing in ``out=...`` or ``out=Ellipsis``. + +If some or all of the input arguments are not +:class:`ndarrays `, then the output may not be an +:class:`ndarray ` either. +Indeed, if any input defines an :obj:`~.class.__array_ufunc__` method, control will be passed completely to that function, i.e., the ufunc is :ref:`overridden `. @@ -140,14 +183,14 @@ element is generally a scalar, but can be a vector or higher-order sub-array for generalized ufuncs). Standard :ref:`broadcasting rules ` are applied so that inputs not sharing exactly the -same shapes can still be usefully operated on. +same shapes can still be usefully operated on. By these rules, if an input has a dimension size of 1 in its shape, the first data entry in that dimension will be used for all calculations along that dimension. In other words, the stepping machinery of the :term:`ufunc` will simply not step along that dimension (the :ref:`stride ` will be 0 for that dimension). - + .. _ufuncs.casting: @@ -293,7 +336,7 @@ platform, these registers will be regularly checked during calculation. Error handling is controlled on a per-thread basis, and can be configured using the functions :func:`numpy.seterr` and :func:`numpy.seterrcall`. - + .. _ufuncs.overrides: diff --git a/doc/source/user/byteswapping.rst b/doc/source/user/byteswapping.rst index 01247500347f..8f08d2a01a3d 100644 --- a/doc/source/user/byteswapping.rst +++ b/doc/source/user/byteswapping.rst @@ -40,9 +40,9 @@ there are two integers, and that they are 16 bit and big-endian: >>> import numpy as np >>> big_end_arr = np.ndarray(shape=(2,),dtype='>i2', buffer=big_end_buffer) >>> big_end_arr[0] -1 +np.int16(1) >>> big_end_arr[1] -770 +np.int16(770) Note the array ``dtype`` above of ``>i2``. The ``>`` means 'big-endian' (``<`` is little-endian) and ``i2`` means 'signed 2-byte integer'. For @@ -99,14 +99,14 @@ We make something where they don't match: >>> wrong_end_dtype_arr = np.ndarray(shape=(2,),dtype='>> wrong_end_dtype_arr[0] -256 +np.int16(256) The obvious fix for this situation is to change the dtype so it gives the correct endianness: >>> fixed_end_dtype_arr = wrong_end_dtype_arr.view(np.dtype('>> fixed_end_dtype_arr[0] -1 +np.int16(1) Note the array has not changed in memory: @@ -122,7 +122,7 @@ that needs a certain byte ordering. >>> fixed_end_mem_arr = wrong_end_dtype_arr.byteswap() >>> fixed_end_mem_arr[0] -1 +np.int16(1) Now the array *has* changed in memory: @@ -140,7 +140,7 @@ the previous operations: >>> swapped_end_arr = big_end_arr.byteswap() >>> swapped_end_arr = swapped_end_arr.view(swapped_end_arr.dtype.newbyteorder()) >>> swapped_end_arr[0] -1 +np.int16(1) >>> swapped_end_arr.tobytes() == big_end_buffer False @@ -149,7 +149,7 @@ can be achieved with the ndarray astype method: >>> swapped_end_arr = big_end_arr.astype('>> swapped_end_arr[0] -1 +np.int16(1) >>> swapped_end_arr.tobytes() == big_end_buffer False diff --git a/doc/source/user/c-info.beyond-basics.rst b/doc/source/user/c-info.beyond-basics.rst index 697c0c045e4f..eadeafe51e8e 100644 --- a/doc/source/user/c-info.beyond-basics.rst +++ b/doc/source/user/c-info.beyond-basics.rst @@ -268,6 +268,9 @@ specifies your data-type. This type number should be stored and made available by your module so that other modules can use it to recognize your data-type. +Note that this API is inherently thread-unsafe. See :ref:`thread_safety` for more +details about thread safety in NumPy. + Registering a casting function ------------------------------ diff --git a/doc/source/user/c-info.python-as-glue.rst b/doc/source/user/c-info.python-as-glue.rst index 753a44a0174f..19763f7c2a51 100644 --- a/doc/source/user/c-info.python-as-glue.rst +++ b/doc/source/user/c-info.python-as-glue.rst @@ -144,7 +144,7 @@ written C-code. Cython ====== -`Cython `_ is a compiler for a Python dialect that adds +`Cython `_ is a compiler for a Python dialect that adds (optional) static typing for speed, and allows mixing C or C++ code into your modules. It produces C or C++ extensions that can be compiled and imported in Python code. @@ -159,40 +159,102 @@ work with multidimensional arrays. Notice that Cython is an extension-module generator only. Unlike f2py, it includes no automatic facility for compiling and linking -the extension module (which must be done in the usual fashion). It -does provide a modified distutils class called ``build_ext`` which lets -you build an extension module from a ``.pyx`` source. Thus, you could -write in a ``setup.py`` file: +the extension module. However, many Python build tools have support for Cython. + +Here is an example of how to set up a Python project that contains a Cython +extension. The example uses the `meson-python Python build backend +`_ and `the meson build system +`_. This is the same build system NumPy itself uses. +. + +First, create a file named ``my_extension.pyx``. + +.. code-block:: cython + + cimport numpy as np + + def say_hello(): + print("Hello!") + +This file lives next to a ``__init__.py`` file with the following content: .. code-block:: python - from Cython.Distutils import build_ext - from distutils.extension import Extension - from distutils.core import setup - import numpy - - setup(name='mine', description='Nothing', - ext_modules=[Extension('filter', ['filter.pyx'], - include_dirs=[numpy.get_include()])], - cmdclass = {'build_ext':build_ext}) - -Adding the NumPy include directory is, of course, only necessary if -you are using NumPy arrays in the extension module (which is what we -assume you are using Cython for). The distutils extensions in NumPy -also include support for automatically producing the extension-module -and linking it from a ``.pyx`` file. It works so that if the user does -not have Cython installed, then it looks for a file with the same -file-name but a ``.c`` extension which it then uses instead of trying -to produce the ``.c`` file again. - -If you just use Cython to compile a standard Python module, then you -will get a C extension module that typically runs a bit faster than the -equivalent Python module. Further speed increases can be gained by using -the ``cdef`` keyword to statically define C variables. + from .my_extension import say_hello + +Now you need to create two more files to set up the build system. First, a +``meson.build`` file: + +.. code-block:: meson + + project( + 'module_with_extension', + 'c', 'cython', + version: '0.0.1', + license: 'MIT', + ) + + cython = find_program('cython') + py = import('python').find_installation(pure: false) + + numpy_nodepr_api = ['-DNPY_NO_DEPRECATED_API=NPY_2_0_API_VERSION'] + + np_dep = declare_dependency(dependencies: dependency('numpy'), + compile_args: numpy_nodepr_api) + + py.extension_module( + 'my_extension', + 'my_extension.pyx', + dependencies: [np_dep], + install: true, + subdir: 'my_module_with_extension', + ) + + py.install_sources( + '__init__.py', + subdir: 'my_module_with_extension', + ) + +And a ``pyproject.toml`` file with the following content: + +.. code-block:: toml + + [build-system] + build-backend = "mesonpy" + requires = [ + "meson-python", + "Cython>=3.0.0", + "numpy", + ] + + [project] + name = "my_module_with_extension" + version = "0.0.1" + license = "MIT" + dependencies = ["numpy"] + +You should then be able to do the following command to build, install, and call +the function defined in the extension from Python: + +.. code-block:: bash + + $ pip install . + $ python -c "from my_module_with_extension import say_hello; say_hello()" + "Hello!" + +Adding a NumPy dependency to your Meson configuration is only necessary +if you are using the NumPy C API in the extension module via ``cimport +numpy`` (which is what we assume you are using Cython for). If you just +use Cython to compile a standard Python module, then you will get a C +extension module that typically runs a bit faster than the equivalent +Python module. Further speed increases can be gained by using the +``cdef`` keyword to statically define C variables. + +See the meson and meson-python documentation for more details on how to +build more complicated extensions. Let's look at two examples we've seen before to see how they might be -implemented using Cython. These examples were compiled into extension -modules using Cython 0.21.1. +implemented using Cython. Complex addition in Cython @@ -763,19 +825,13 @@ Conclusion Using ctypes is a powerful way to connect Python with arbitrary C-code. Its advantages for extending Python include -- clean separation of C code from Python code - - - no need to learn a new syntax except Python and C - - - allows reuse of C code - - - functionality in shared libraries written for other purposes can be - obtained with a simple Python wrapper and search for the library. - - -- easy integration with NumPy through the ctypes attribute - -- full argument checking with the ndpointer class factory +* clean separation of C code from Python code +* no need to learn a new syntax except Python and C +* allows reuse of C code +* functionality in shared libraries written for other purposes can be + obtained with a simple Python wrapper and search for the library. +* easy integration with NumPy through the ctypes attribute +* full argument checking with the ndpointer class factory Its disadvantages include @@ -831,7 +887,7 @@ file that defines the interface. Often, however, this ``.i`` file can be parts of the header itself. The interface usually needs a bit of tweaking to be very useful. This ability to parse C/C++ headers and auto-generate the interface still makes SWIG a useful approach to -adding functionalilty from C/C++ into Python, despite the other +adding functionality from C/C++ into Python, despite the other methods that have emerged that are more targeted to Python. SWIG can actually target extensions for several languages, but the typemaps usually have to be language-specific. Nonetheless, with modifications diff --git a/doc/source/user/c-info.ufunc-tutorial.rst b/doc/source/user/c-info.ufunc-tutorial.rst index 6b1aca65ed00..e5773f8232b8 100644 --- a/doc/source/user/c-info.ufunc-tutorial.rst +++ b/doc/source/user/c-info.ufunc-tutorial.rst @@ -74,9 +74,9 @@ For comparison and general edification of the reader we provide a simple implementation of a C extension of ``logit`` that uses no numpy. -To do this we need two files. The first is the C file which contains -the actual code, and the second is the ``setup.py`` file used to create -the module. +To do this we need three files. The first is the C file which contains +the actual code, and the others are two project files that describe +how to create the module. .. code-block:: c @@ -157,65 +157,91 @@ the module. return m; } -To use the ``setup.py file``, place ``setup.py`` and ``spammodule.c`` -in the same folder. Then ``python setup.py build`` will build the module to -import, or ``python setup.py install`` will install the module to your -site-packages directory. +To create the module, one proceeds as one would for a Python package, creating +a ``pyproject.toml`` file, which defines a build back-end, and then another +file for that backend which describes how to compile the code. For the backend, +we recommend ``meson-python``, as we use it for numpy itself, but below we +also show how to use the older ``setuptools``. - .. code-block:: python +.. tab-set:: - ''' - setup.py file for spammodule.c + .. tab-item:: meson - Calling - $python setup.py build_ext --inplace - will build the extension library in the current file. + Sample ``pyproject.toml`` and ``meson.build``. - Calling - $python setup.py build - will build a file that looks like ./build/lib*, where - lib* is a file that begins with lib. The library will - be in this file and end with a C library extension, - such as .so + .. code-block:: toml - Calling - $python setup.py install - will install the module in your site-packages file. + [project] + name = "spam" + version = "0.1" - See the setuptools section 'Building Extension Modules' - at setuptools.pypa.io for more information. - ''' + [build-system] + requires = ["meson-python"] + build-backend = "mesonpy" - from setuptools import setup, Extension - import numpy as np + .. code-block:: meson - module1 = Extension('spam', sources=['spammodule.c']) + project('spam', 'c') - setup(name='spam', version='1.0', ext_modules=[module1]) + py = import('python').find_installation() + sources = files('spammodule.c') -Once the spam module is imported into python, you can call logit + extension_module = py.extension_module( + 'spam', + sources, + install: true, + ) + + .. tab-item:: setuptools + + Sample ``pyproject.toml`` and ``setup.py``. + + .. code-block:: toml + + [project] + name = "spam" + version = "0.1" + + [build-system] + requires = ["setuptools"] + build-backend = "setuptools.build_meta" + + .. code-block:: python + + from setuptools import setup, Extension + + spammodule = Extension('spam', sources=['spammodule.c']) + + setup(name='spam', version='1.0', + ext_modules=[spammodule]) + +With either of the above, one can build and install the ``spam`` package with, + +.. code-block:: bash + + pip install . + +Once the ``spam`` module is imported into python, you can call logit via ``spam.logit``. Note that the function used above cannot be applied as-is to numpy arrays. To do so we must call :py:func:`numpy.vectorize` -on it. For example, if a python interpreter is opened in the file containing -the spam library or spam has been installed, one can perform the -following commands: - ->>> import numpy as np ->>> import spam ->>> spam.logit(0) --inf ->>> spam.logit(1) -inf ->>> spam.logit(0.5) -0.0 ->>> x = np.linspace(0,1,10) ->>> spam.logit(x) -TypeError: only length-1 arrays can be converted to Python scalars ->>> f = np.vectorize(spam.logit) ->>> f(x) -array([ -inf, -2.07944154, -1.25276297, -0.69314718, -0.22314355, - 0.22314355, 0.69314718, 1.25276297, 2.07944154, inf]) +on it. For example:: + + >>> import numpy as np + >>> import spam + >>> spam.logit(0) + -inf + >>> spam.logit(1) + inf + >>> spam.logit(0.5) + 0.0 + >>> x = np.linspace(0,1,10) + >>> spam.logit(x) + TypeError: only length-1 arrays can be converted to Python scalars + >>> f = np.vectorize(spam.logit) + >>> f(x) + array([ -inf, -2.07944154, -1.25276297, -0.69314718, -0.22314355, + 0.22314355, 0.69314718, 1.25276297, 2.07944154, inf]) THE RESULTING LOGIT FUNCTION IS NOT FAST! ``numpy.vectorize`` simply loops over ``spam.logit``. The loop is done at the C level, but the numpy @@ -236,12 +262,11 @@ Example NumPy ufunc for one dtype For simplicity we give a ufunc for a single dtype, the ``'f8'`` ``double``. As in the previous section, we first give the ``.c`` file -and then the ``setup.py`` file used to create the module containing the -ufunc. +and then the files used to create a ``npufunc`` module containing the ufunc. The place in the code corresponding to the actual computations for -the ufunc are marked with ``/\* BEGIN main ufunc computation \*/`` and -``/\* END main ufunc computation \*/``. The code in between those lines is +the ufunc are marked with ``/* BEGIN main ufunc computation */`` and +``/* END main ufunc computation */``. The code in between those lines is the primary thing that must be changed to create your own ufunc. .. code-block:: c @@ -339,59 +364,77 @@ the primary thing that must be changed to create your own ufunc. return m; } -This is a ``setup.py file`` for the above code. As before, the module -can be build via calling ``python setup.py build`` at the command prompt, -or installed to site-packages via ``python setup.py install``. The module -can also be placed into a local folder e.g. ``npufunc_directory`` below -using ``python setup.py build_ext --inplace``. +For the files needed to create the module, the main difference from our +previous example is that we now need to declare dependencies on numpy. - .. code-block:: python +.. tab-set:: - ''' - setup.py file for single_type_logit.c - Note that since this is a numpy extension - we add an include_dirs=[get_include()] so that the - extension is built with numpy's C/C++ header files. + .. tab-item:: meson - Calling - $python setup.py build_ext --inplace - will build the extension library in the npufunc_directory. + Sample ``pyproject.toml`` and ``meson.build``. - Calling - $python setup.py build - will build a file that looks like ./build/lib*, where - lib* is a file that begins with lib. The library will - be in this file and end with a C library extension, - such as .so + .. code-block:: toml - Calling - $python setup.py install - will install the module in your site-packages file. + [project] + name = "npufunc" + dependencies = ["numpy"] + version = "0.1" - See the setuptools section 'Building Extension Modules' - at setuptools.pypa.io for more information. - ''' + [build-system] + requires = ["meson-python", "numpy"] + build-backend = "mesonpy" - from setuptools import setup, Extension - from numpy import get_include + .. code-block:: meson - npufunc = Extension('npufunc', - sources=['single_type_logit.c'], - include_dirs=[get_include()]) + project('npufunc', 'c') - setup(name='npufunc', version='1.0', ext_modules=[npufunc]) + py = import('python').find_installation() + np_dep = dependency('numpy') + sources = files('single_type_logit.c') -After the above has been installed, it can be imported and used as follows. + extension_module = py.extension_module( + 'npufunc', + sources, + dependencies: [np_dep], + install: true, + ) ->>> import numpy as np ->>> import npufunc ->>> npufunc.logit(0.5) -np.float64(0.0) ->>> a = np.linspace(0,1,5) ->>> npufunc.logit(a) -array([ -inf, -1.09861229, 0. , 1.09861229, inf]) + .. tab-item:: setuptools + + Sample ``pyproject.toml`` and ``setup.py``. + + .. code-block:: toml + + [project] + name = "npufunc" + dependencies = ["numpy"] + version = "0.1" + + [build-system] + requires = ["setuptools", "numpy"] + build-backend = "setuptools.build_meta" + + .. code-block:: python + + from setuptools import setup, Extension + from numpy import get_include + + npufunc = Extension('npufunc', + sources=['single_type_logit.c'], + include_dirs=[get_include()]) + + setup(name='npufunc', version='1.0', ext_modules=[npufunc]) +After the above has been installed, it can be imported and used as follows:: + + >>> import numpy as np + >>> import npufunc + >>> npufunc.logit(0.5) + np.float64(0.0) + >>> a = np.linspace(0, 1, 5) + >>> npufunc.logit(a) + array([ -inf, -1.09861229, 0. , 1.09861229, inf]) .. _`sec:NumPy-many-loop`: @@ -402,14 +445,14 @@ Example NumPy ufunc with multiple dtypes .. index:: pair: ufunc; adding new -We finally give an example of a full ufunc, with inner loops for -half-floats, floats, doubles, and long doubles. As in the previous -sections we first give the ``.c`` file and then the corresponding -``setup.py`` file. +We now extend the above to a full ``logit`` ufunc, with inner loops for +floats, doubles, and long doubles. Here, we can use the same build files +as above, except we need to change the source file from ``single_type_logit.c`` +to ``multi_type_logit.c``. The places in the code corresponding to the actual computations for -the ufunc are marked with ``/\* BEGIN main ufunc computation \*/`` and -``/\* END main ufunc computation \*/``. The code in between those lines +the ufunc are marked with ``/* BEGIN main ufunc computation */`` and +``/* END main ufunc computation */``. The code in between those lines is the primary thing that must be changed to create your own ufunc. @@ -419,7 +462,6 @@ is the primary thing that must be changed to create your own ufunc. #include #include "numpy/ndarraytypes.h" #include "numpy/ufuncobject.h" - #include "numpy/halffloat.h" #include /* @@ -514,39 +556,13 @@ is the primary thing that must be changed to create your own ufunc. } - static void half_float_logit(char **args, const npy_intp *dimensions, - const npy_intp *steps, void *data) - { - npy_intp i; - npy_intp n = dimensions[0]; - char *in = args[0], *out = args[1]; - npy_intp in_step = steps[0], out_step = steps[1]; - - float tmp; - - for (i = 0; i < n; i++) { - - /* BEGIN main ufunc computation */ - tmp = npy_half_to_float(*(npy_half *)in); - tmp /= 1 - tmp; - tmp = logf(tmp); - *((npy_half *)out) = npy_float_to_half(tmp); - /* END main ufunc computation */ - - in += in_step; - out += out_step; - } - } - /*This gives pointers to the above functions*/ - PyUFuncGenericFunction funcs[4] = {&half_float_logit, - &float_logit, + PyUFuncGenericFunction funcs[3] = {&float_logit, &double_logit, &long_double_logit}; - static const char types[8] = {NPY_HALF, NPY_HALF, - NPY_FLOAT, NPY_FLOAT, + static const char types[6] = {NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_LONGDOUBLE, NPY_LONGDOUBLE}; @@ -586,92 +602,40 @@ is the primary thing that must be changed to create your own ufunc. return m; } -This is a ``setup.py`` file for the above code. As before, the module -can be build via calling ``python setup.py build`` at the command prompt, -or installed to site-packages via ``python setup.py install``. - - .. code-block:: python - - ''' - setup.py file for multi_type_logit.c - Note that since this is a numpy extension - we add an include_dirs=[get_include()] so that the - extension is built with numpy's C/C++ header files. - Furthermore, we also have to include the npymath - lib for half-float d-type. - - Calling - $python setup.py build_ext --inplace - will build the extension library in the current file. - - Calling - $python setup.py build - will build a file that looks like ./build/lib*, where - lib* is a file that begins with lib. The library will - be in this file and end with a C library extension, - such as .so - - Calling - $python setup.py install - will install the module in your site-packages file. - - See the setuptools section 'Building Extension Modules' - at setuptools.pypa.io for more information. - ''' - - from setuptools import setup, Extension - from numpy import get_include - from os import path - - path_to_npymath = path.join(get_include(), '..', 'lib') - npufunc = Extension('npufunc', - sources=['multi_type_logit.c'], - include_dirs=[get_include()], - # Necessary for the half-float d-type. - library_dirs=[path_to_npymath], - libraries=["npymath"]) - - setup(name='npufunc', version='1.0', ext_modules=[npufunc]) - - After the above has been installed, it can be imported and used as follows. >>> import numpy as np >>> import npufunc >>> npufunc.logit(0.5) np.float64(0.0) ->>> a = np.linspace(0,1,5) +>>> a = np.linspace(0, 1, 5, dtype=np.float32) >>> npufunc.logit(a) -array([ -inf, -1.09861229, 0. , 1.09861229, inf]) +:1: RuntimeWarning: divide by zero encountered in logit +array([ -inf, -1.0986123, 0. , 1.0986123, inf], + dtype=float32) +.. note:: + Supporting ``float16`` (half-precision) in custom ufuncs is more complex + due to its non-standard C representation and conversion requirements. The + above code can process ``float16`` input, but will do so by converting it + to ``float32``. The result will then be ``float32`` too, but one can + convert it back to ``float16`` by passing in a suitable output, as in + ``npufunc.logit(a, out=np.empty_like(a))``. For examples of actual + ``float16`` loops, see the numpy source code. .. _`sec:NumPy-many-arg`: Example NumPy ufunc with multiple arguments/return values ========================================================= -Our final example is a ufunc with multiple arguments. It is a modification -of the code for a logit ufunc for data with a single dtype. We -compute ``(A * B, logit(A * B))``. +Creating a ufunc with multiple arguments is not difficult. Here, we make a +modification of the code for a logit ufunc, where we compute ``(A * B, +logit(A * B))``. For simplicity, we only create a loop for doubles. -We only give the C code as the setup.py file is exactly the same as -the ``setup.py`` file in `Example NumPy ufunc for one dtype`_, except that -the line - - .. code-block:: python - - npufunc = Extension('npufunc', - sources=['single_type_logit.c'], - include_dirs=[get_include()]) - -is replaced with - - .. code-block:: python - - npufunc = Extension('npufunc', - sources=['multi_arg_logit.c'], - include_dirs=[get_include()]) +We again only give the C code as the files needed to create the module are the +same as before, but with the source file name replaced by +``multi_arg_logit.c``. The C file is given below. The ufunc generated takes two arguments ``A`` and ``B``. It returns a tuple whose first element is ``A * B`` and whose second @@ -684,7 +648,6 @@ as well as all other properties of a ufunc. #include #include "numpy/ndarraytypes.h" #include "numpy/ufuncobject.h" - #include "numpy/halffloat.h" #include /* @@ -786,29 +749,12 @@ Example NumPy ufunc with structured array dtype arguments This example shows how to create a ufunc for a structured array dtype. For the example we show a trivial ufunc for adding two arrays with dtype ``'u8,u8,u8'``. The process is a bit different from the other examples since -a call to :c:func:`PyUFunc_FromFuncAndData` doesn't fully register ufuncs for +a call to :c:func:`PyUFunc_FromFuncAndData` cannot register ufuncs for custom dtypes and structured array dtypes. We need to also call :c:func:`PyUFunc_RegisterLoopForDescr` to finish setting up the ufunc. -We only give the C code as the ``setup.py`` file is exactly the same as -the ``setup.py`` file in `Example NumPy ufunc for one dtype`_, except that -the line - - .. code-block:: python - - npufunc = Extension('npufunc', - sources=['single_type_logit.c'], - include_dirs=[get_include()]) - -is replaced with - - .. code-block:: python - - npufunc = Extension('npufunc', - sources=['add_triplet.c'], - include_dirs=[get_include()]) - -The C file is given below. +We only give the C code as the files needed to construct the module are again +exactly the same as before, except that the source file is now ``add_triplet.c``. .. code-block:: c @@ -865,15 +811,9 @@ The C file is given below. } } - /* This a pointer to the above function */ - PyUFuncGenericFunction funcs[1] = {&add_uint64_triplet}; - - /* These are the input and return dtypes of add_uint64_triplet. */ - static const char types[3] = {NPY_UINT64, NPY_UINT64, NPY_UINT64}; - static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, - "struct_ufunc_test", + "npufunc", NULL, -1, StructUfuncTestMethods, @@ -913,7 +853,7 @@ The C file is given below. dtypes[2] = dtype; /* Register ufunc for structured dtype */ - PyUFunc_RegisterLoopForDescr(add_triplet, + PyUFunc_RegisterLoopForDescr((PyUFuncObject *)add_triplet, dtype, &add_uint64_triplet, dtypes, @@ -926,37 +866,11 @@ The C file is given below. return m; } -.. index:: - pair: ufunc; adding new - -The returned ufunc object is a callable Python object. It should be -placed in a (module) dictionary under the same name as was used in the -name argument to the ufunc-creation routine. The following example is -adapted from the umath module - - .. code-block:: c +Sample usage:: - static PyUFuncGenericFunction atan2_functions[] = { - PyUFunc_ff_f, PyUFunc_dd_d, - PyUFunc_gg_g, PyUFunc_OO_O_method}; - static void *atan2_data[] = { - (void *)atan2f, (void *)atan2, - (void *)atan2l, (void *)"arctan2"}; - static const char atan2_signatures[] = { - NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, - NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, - NPY_LONGDOUBLE, NPY_LONGDOUBLE, NPY_LONGDOUBLE - NPY_OBJECT, NPY_OBJECT, NPY_OBJECT}; - ... - /* in the module initialization code */ - PyObject *f, *dict, *module; - ... - dict = PyModule_GetDict(module); - ... - f = PyUFunc_FromFuncAndData(atan2_functions, - atan2_data, atan2_signatures, 4, 2, 1, - PyUFunc_None, "arctan2", - "a safe and correct arctan(x1/x2)", 0); - PyDict_SetItemString(dict, "arctan2", f); - Py_DECREF(f); - ... + >>> import npufunc + >>> import numpy as np + >>> a = np.array([(1, 2, 3), (4, 5, 6)], "u8,u8,u8") + >>> npufunc.add_triplet(a, a) + array([(2, 4, 6), (8, 10, 12)], + dtype=[('f0', '>> import os >>> # list all files created in testsetup. If needed there are - >>> # convenienes in e.g. astroquery to do this more automatically + >>> # conveniences in e.g. astroquery to do this more automatically >>> for filename in ['csv.txt', 'fixedwidth.txt', 'nan.txt', 'skip.txt', 'tabs.txt']: ... os.remove(filename) diff --git a/doc/source/user/how-to-partition.rst b/doc/source/user/how-to-partition.rst index e90b39e9440c..bd418594e231 100644 --- a/doc/source/user/how-to-partition.rst +++ b/doc/source/user/how-to-partition.rst @@ -237,17 +237,17 @@ meshgrid. This means that when it is indexed, only one dimension of each returned array is greater than 1. This avoids repeating the data and thus saves memory, which is often desirable. -These sparse coordinate grids are intended to be use with :ref:`broadcasting`. +These sparse coordinate grids are intended to be used with :ref:`broadcasting`. When all coordinates are used in an expression, broadcasting still leads to a fully-dimensional result array. :: >>> np.ogrid[0:4, 0:6] - [array([[0], + (array([[0], [1], [2], - [3]]), array([[0, 1, 2, 3, 4, 5]])] + [3]]), array([[0, 1, 2, 3, 4, 5]])) All three methods described here can be used to evaluate function values on a grid. diff --git a/doc/source/user/how-to-print.rst b/doc/source/user/how-to-print.rst new file mode 100644 index 000000000000..6195b6ed4c70 --- /dev/null +++ b/doc/source/user/how-to-print.rst @@ -0,0 +1,112 @@ +.. _how-to-print: + +======================= + Printing NumPy Arrays +======================= + + +This page explains how to control the formatting of printed NumPy arrays. +Note that these printing options apply only to arrays, not to scalars. + +Defining printing options +========================= + +Applying settings globally +-------------------------- + +Use :func:`numpy.set_printoptions` to change printing options for the entire runtime session. To inspect current print settings, use :func:`numpy.get_printoptions`: + + >>> np.set_printoptions(precision=2) + >>> np.get_printoptions() + {'edgeitems': 3, 'threshold': 1000, 'floatmode': 'maxprec', 'precision': 2, 'suppress': False, 'linewidth': 75, 'nanstr': 'nan', 'infstr': 'inf', 'sign': '-', 'formatter': None, 'legacy': False, 'override_repr': None} + +To restore the default settings, use: + + >>> np.set_printoptions(edgeitems=3, infstr='inf', + ... linewidth=75, nanstr='nan', precision=8, + ... suppress=False, threshold=1000, formatter=None) + + +Applying settings temporarily +----------------------------- + +Use :func:`numpy.printoptions` as a context manager to temporarily override print settings within a specific scope: + + + >>> arr = np.array([0.155, 0.184, 0.173]) + >>> with np.printoptions(precision=2): + ... print(arr) + [0.15 0.18 0.17] + + +All keywords that apply to :func:`numpy.set_printoptions` also apply to :func:`numpy.printoptions`. + + +Changing the number of digits of precision +========================================== + +The default number of fractional digits displayed is 8. You can change this number using ``precision`` keyword. + + >>> arr = np.array([0.1, 0.184, 0.17322]) + >>> with np.printoptions(precision=2): + ... print(arr) + [0.1 0.18 0.17] + + +The ``floatmode`` option determines how the ``precision`` setting is interpreted. +By default, ``floatmode=maxprec_equal`` displays values with the minimal number of digits needed to uniquely represent them, +using the same number of digits across all elements. +If you want to show exactly the same number of digits specified by ``precision``, use ``floatmode=fixed``: + + >>> arr = np.array([0.1, 0.184, 0.173], dtype=np.float32) + >>> with np.printoptions(precision=2, floatmode="fixed"): + ... print(arr) + [0.10 0.18 0.17] + + +Changing how `nan` and `inf` are displayed +========================================== + +By default, `numpy.nan` is displayed as `nan` and `numpy.inf` is displayed as `inf`. +You can override these representations using the ``nanstr`` and ``infstr`` options: + + >>> arr = np.array([np.inf, np.nan, 0]) + >>> with np.printoptions(nanstr="NAN", infstr="INF"): + ... print(arr) + [INF NAN 0.] + + +Controlling scientific notations +================================ + +By default, NumPy uses scientific notation when: + +- The absolute value of the smallest number is less than ``1e-4``, or +- The ratio of the largest to the smallest absolute value is greater than ``1e3`` + + >>> arr = np.array([0.00002, 210000.0, 3.14]) + >>> print(arr) + [2.00e-05 2.10e+05 3.14e+00] + +To suppress scientific notation and always use fixed-point notation, set ``suppress=True``: + + >>> arr = np.array([0.00002, 210000.0, 3.14]) + >>> with np.printoptions(suppress=True): + ... print(arr) + [ 0.00002 210000. 3.14 ] + + + +Applying custom formatting functions +==================================== + +You can apply custom formatting functions to specific or all data types using ``formatter`` keyword. +See :func:`numpy.set_printoptions` for more details on supported format keys. + +For example, to format `datetime64` values with a custom function: + + >>> arr = np.array([np.datetime64("2025-01-01"), np.datetime64("2024-01-01")]) + >>> with np.printoptions(formatter={"datetime":lambda x: f"(Year: {x.item().year}, Month: {x.item().month})"}): + ... print(arr) + [(Year: 2025, Month: 1) (Year: 2024, Month: 1)] + diff --git a/doc/source/user/howtos_index.rst b/doc/source/user/howtos_index.rst index ca30f7e9115d..a8a8229dd7dd 100644 --- a/doc/source/user/howtos_index.rst +++ b/doc/source/user/howtos_index.rst @@ -16,3 +16,4 @@ the package, see the :ref:`API reference `. how-to-index how-to-verify-bug how-to-partition + how-to-print diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst index 8d4c500fd021..5a002ba8375e 100644 --- a/doc/source/user/index.rst +++ b/doc/source/user/index.rst @@ -26,7 +26,7 @@ details are found in :ref:`reference`. :maxdepth: 1 numpy-for-matlab-users - NumPy tutorials + NumPy tutorials howtos_index .. toctree:: diff --git a/doc/source/user/misc.rst b/doc/source/user/misc.rst index 6d652e3ca67f..a882afa37afd 100644 --- a/doc/source/user/misc.rst +++ b/doc/source/user/misc.rst @@ -7,7 +7,7 @@ Miscellaneous IEEE 754 floating point special values -------------------------------------- -Special values defined in numpy: nan, inf, +Special values defined in numpy: :data:`~numpy.nan`, :data:`~numpy.inf` NaNs can be used as a poor-man's mask (if you don't care what the original value was) @@ -17,29 +17,39 @@ Note: cannot use equality to test NaNs. E.g.: :: >>> myarr = np.array([1., 0., np.nan, 3.]) >>> np.nonzero(myarr == np.nan) (array([], dtype=int64),) + +:: + >>> np.nan == np.nan # is always False! Use special numpy functions instead. False + +:: + >>> myarr[myarr == np.nan] = 0. # doesn't work >>> myarr array([ 1., 0., nan, 3.]) + +:: + >>> myarr[np.isnan(myarr)] = 0. # use this instead find >>> myarr array([1., 0., 0., 3.]) -Other related special value functions: :: +Other related special value functions: - isinf(): True if value is inf - isfinite(): True if not nan or inf - nan_to_num(): Map nan to 0, inf to max float, -inf to min float +- :func:`~numpy.isnan` - True if value is nan +- :func:`~numpy.isinf` - True if value is inf +- :func:`~numpy.isfinite` - True if not nan or inf +- :func:`~numpy.nan_to_num` - Map nan to 0, inf to max float, -inf to min float The following corresponds to the usual functions except that nans are excluded -from the results: :: +from the results: - nansum() - nanmax() - nanmin() - nanargmax() - nanargmin() +- :func:`~numpy.nansum` +- :func:`~numpy.nanmax` +- :func:`~numpy.nanmin` +- :func:`~numpy.nanargmax` +- :func:`~numpy.nanargmin` >>> x = np.arange(10.) >>> x[3] = np.nan @@ -47,168 +57,3 @@ from the results: :: nan >>> np.nansum(x) 42.0 - -How numpy handles numerical exceptions --------------------------------------- - -The default is to ``'warn'`` for ``invalid``, ``divide``, and ``overflow`` -and ``'ignore'`` for ``underflow``. But this can be changed, and it can be -set individually for different kinds of exceptions. The different behaviors -are: - - - 'ignore' : Take no action when the exception occurs. - - 'warn' : Print a `RuntimeWarning` (via the Python `warnings` module). - - 'raise' : Raise a `FloatingPointError`. - - 'call' : Call a function specified using the `seterrcall` function. - - 'print' : Print a warning directly to ``stdout``. - - 'log' : Record error in a Log object specified by `seterrcall`. - -These behaviors can be set for all kinds of errors or specific ones: - - - all : apply to all numeric exceptions - - invalid : when NaNs are generated - - divide : divide by zero (for integers as well!) - - overflow : floating point overflows - - underflow : floating point underflows - -Note that integer divide-by-zero is handled by the same machinery. -These behaviors are set on a per-thread basis. - -Examples --------- - -:: - - >>> oldsettings = np.seterr(all='warn') - >>> np.zeros(5,dtype=np.float32)/0. - Traceback (most recent call last): - ... - RuntimeWarning: invalid value encountered in divide - >>> j = np.seterr(under='ignore') - >>> np.array([1.e-100])**10 - array([0.]) - >>> j = np.seterr(invalid='raise') - >>> np.sqrt(np.array([-1.])) - Traceback (most recent call last): - ... - FloatingPointError: invalid value encountered in sqrt - >>> def errorhandler(errstr, errflag): - ... print("saw stupid error!") - >>> np.seterrcall(errorhandler) - >>> j = np.seterr(all='call') - >>> np.zeros(5, dtype=np.int32)/0 - saw stupid error! - array([nan, nan, nan, nan, nan]) - >>> j = np.seterr(**oldsettings) # restore previous - ... # error-handling settings - -Interfacing to C ----------------- -Only a survey of the choices. Little detail on how each works. - -1) Bare metal, wrap your own C-code manually. - - - Plusses: - - - Efficient - - No dependencies on other tools - - - Minuses: - - - Lots of learning overhead: - - - need to learn basics of Python C API - - need to learn basics of numpy C API - - need to learn how to handle reference counting and love it. - - - Reference counting often difficult to get right. - - - getting it wrong leads to memory leaks, and worse, segfaults - -2) Cython - - - Plusses: - - - avoid learning C API's - - no dealing with reference counting - - can code in pseudo python and generate C code - - can also interface to existing C code - - should shield you from changes to Python C api - - has become the de-facto standard within the scientific Python community - - fast indexing support for arrays - - - Minuses: - - - Can write code in non-standard form which may become obsolete - - Not as flexible as manual wrapping - -3) ctypes - - - Plusses: - - - part of Python standard library - - good for interfacing to existing shareable libraries, particularly - Windows DLLs - - avoids API/reference counting issues - - good numpy support: arrays have all these in their ctypes - attribute: :: - - a.ctypes.data - a.ctypes.data_as - a.ctypes.shape - a.ctypes.shape_as - a.ctypes.strides - a.ctypes.strides_as - - - Minuses: - - - can't use for writing code to be turned into C extensions, only a wrapper - tool. - -4) SWIG (automatic wrapper generator) - - - Plusses: - - - around a long time - - multiple scripting language support - - C++ support - - Good for wrapping large (many functions) existing C libraries - - - Minuses: - - - generates lots of code between Python and the C code - - can cause performance problems that are nearly impossible to optimize - out - - interface files can be hard to write - - doesn't necessarily avoid reference counting issues or needing to know - API's - -5) Psyco - - - Plusses: - - - Turns pure python into efficient machine code through jit-like - optimizations - - very fast when it optimizes well - - - Minuses: - - - Only on intel (windows?) - - Doesn't do much for numpy? - -Interfacing to Fortran: ------------------------ -The clear choice to wrap Fortran code is -`f2py `_. - -Pyfort is an older alternative, but not supported any longer. -Fwrap is a newer project that looked promising but isn't being developed any -longer. - -Interfacing to C++: -------------------- - 1) Cython - 2) CXX - 3) Boost.python - 4) SWIG - 5) SIP (used mainly in PyQT) diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst index d9b5c460944c..e05e123e224c 100644 --- a/doc/source/user/numpy-for-matlab-users.rst +++ b/doc/source/user/numpy-for-matlab-users.rst @@ -574,12 +574,12 @@ Notes \ **Submatrix**: Assignment to a submatrix can be done with lists of indices using the ``ix_`` command. E.g., for 2D array ``a``, one might -do: ``ind=[1, 3]; a[np.ix_(ind, ind)] += 100``. +do: ``ind=[1, 3]; a[np.ix_(ind, ind)] += 100``. \ **HELP**: There is no direct equivalent of MATLAB's ``which`` command, but the commands :func:`help` will usually list the filename where the function is located. Python also has an ``inspect`` module (do -``import inspect``) which provides a ``getfile`` that often works. +``import inspect``) which provides a ``getfile`` that often works. \ **INDEXING**: MATLAB uses one based indexing, so the initial element of a sequence has index 1. Python uses zero based indexing, so the @@ -674,11 +674,10 @@ are only a handful of key differences between the two. - Operators ``*`` and ``@``, functions ``dot()``, and ``multiply()``: - - For ``array``, **``*`` means element-wise multiplication**, while - **``@`` means matrix multiplication**; they have associated functions - ``multiply()`` and ``dot()``. (Before Python 3.5, ``@`` did not exist - and one had to use ``dot()`` for matrix multiplication). - - For ``matrix``, **``*`` means matrix multiplication**, and for + - For ``array``, ``*`` **means element-wise multiplication**, while + ``@`` **means matrix multiplication**; they have associated functions + ``multiply()`` and ``dot()``. + - For ``matrix``, ``*`` **means matrix multiplication**, and for element-wise multiplication one has to use the ``multiply()`` function. - Handling of vectors (one-dimensional arrays) @@ -709,7 +708,7 @@ are only a handful of key differences between the two. - The ``array`` constructor **takes (nested) Python sequences as initializers**. As in, ``array([[1,2,3],[4,5,6]])``. - The ``matrix`` constructor additionally **takes a convenient - string initializer**. As in ``matrix("[1 2 3; 4 5 6]")``. + string initializer**. As in ``matrix("[1 2 3; 4 5 6]")``. There are pros and cons to using both: @@ -810,10 +809,10 @@ Links ===== Another somewhat outdated MATLAB/NumPy cross-reference can be found at -http://mathesaurus.sf.net/ +https://mathesaurus.sf.net/ An extensive list of tools for scientific work with Python can be -found in the `topical software page `__. +found in the `topical software page `__. See `List of Python software: scripting diff --git a/doc/source/user/plots/matplotlib1.py b/doc/source/user/plots/matplotlib1.py index 2cbf87ffa2fa..8c1b516752e1 100644 --- a/doc/source/user/plots/matplotlib1.py +++ b/doc/source/user/plots/matplotlib1.py @@ -1,7 +1,8 @@ import matplotlib.pyplot as plt + import numpy as np a = np.array([2, 1, 5, 7, 4, 6, 8, 14, 10, 9, 18, 20, 22]) -plt.plot(a) -plt.show() \ No newline at end of file +plt.plot(a) +plt.show() diff --git a/doc/source/user/plots/matplotlib2.py b/doc/source/user/plots/matplotlib2.py index e15986c2512d..85690b24d54a 100644 --- a/doc/source/user/plots/matplotlib2.py +++ b/doc/source/user/plots/matplotlib2.py @@ -1,8 +1,9 @@ import matplotlib.pyplot as plt + import numpy as np x = np.linspace(0, 5, 20) y = np.linspace(0, 10, 20) -plt.plot(x, y, 'purple') # line +plt.plot(x, y, 'purple') # line plt.plot(x, y, 'o') # dots -plt.show() \ No newline at end of file +plt.show() diff --git a/doc/source/user/plots/matplotlib3.py b/doc/source/user/plots/matplotlib3.py index 7b56067ef463..212088b78464 100644 --- a/doc/source/user/plots/matplotlib3.py +++ b/doc/source/user/plots/matplotlib3.py @@ -1,6 +1,7 @@ -import numpy as np import matplotlib.pyplot as plt +import numpy as np + fig = plt.figure() ax = fig.add_subplot(projection='3d') X = np.arange(-5, 5, 0.15) @@ -11,4 +12,4 @@ ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap='viridis') -plt.show() \ No newline at end of file +plt.show() diff --git a/doc/source/user/plots/meshgrid_plot.py b/doc/source/user/plots/meshgrid_plot.py index 91032145af68..d91a9aa42e21 100644 --- a/doc/source/user/plots/meshgrid_plot.py +++ b/doc/source/user/plots/meshgrid_plot.py @@ -1,6 +1,7 @@ -import numpy as np import matplotlib.pyplot as plt +import numpy as np + x = np.array([0, 1, 2, 3]) y = np.array([0, 1, 2, 3, 4, 5]) xx, yy = np.meshgrid(x, y) diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst index 4d418af44ddb..1208bd1a6347 100644 --- a/doc/source/user/quickstart.rst +++ b/doc/source/user/quickstart.rst @@ -163,7 +163,7 @@ The type of the array can also be explicitly specified at creation time: :: - >>> c = np.array([[1, 2], [3, 4]], dtype=complex) + >>> c = np.array([[1, 2], [3, 4]], dtype=np.complex128) >>> c array([[1.+0.j, 2.+0.j], [3.+0.j, 4.+0.j]]) @@ -346,7 +346,7 @@ existing array rather than create a new one. :: >>> rg = np.random.default_rng(1) # create instance of default random number generator - >>> a = np.ones((2, 3), dtype=int) + >>> a = np.ones((2, 3), dtype=np.int_) >>> b = rg.random((2, 3)) >>> a *= 3 >>> a @@ -535,7 +535,7 @@ are given in a tuple separated by commas:: >>> def f(x, y): ... return 10 * x + y ... - >>> b = np.fromfunction(f, (5, 4), dtype=int) + >>> b = np.fromfunction(f, (5, 4), dtype=np.int_) >>> b array([[ 0, 1, 2, 3], [10, 11, 12, 13], @@ -881,7 +881,7 @@ creates a new array object that looks at the same data. >>> c.flags.owndata False >>> - >>> c = c.reshape((2, 6)) # a's shape doesn't change + >>> c = c.reshape((2, 6)) # a's shape doesn't change, reassigned c is still a view of a >>> a.shape (3, 4) >>> c[0, 4] = 1234 # a's data changes @@ -929,6 +929,8 @@ a small fraction of ``a``, a deep copy should be made when constructing ``b`` wi If ``b = a[:100]`` is used instead, ``a`` is referenced by ``b`` and will persist in memory even if ``del a`` is executed. +See also :ref:`basics.copies-and-views`. + Functions and methods overview ------------------------------ @@ -1254,7 +1256,7 @@ set `__: ... A, B = np.meshgrid(x, y) ... C = A + B*1j ... z = np.zeros_like(C) - ... divtime = maxit + np.zeros(z.shape, dtype=int) + ... divtime = maxit + np.zeros(z.shape, dtype=np.int_) ... ... for i in range(maxit): ... z = z**2 + C @@ -1477,4 +1479,4 @@ Further reading - `SciPy Tutorial `__ - `SciPy Lecture Notes `__ - A `matlab, R, IDL, NumPy/SciPy dictionary `__ -- :doc:`tutorial-svd ` +- :doc:`tutorial-svd ` diff --git a/doc/source/user/troubleshooting-importerror.rst b/doc/source/user/troubleshooting-importerror.rst index adbc9d898846..68ac4f939525 100644 --- a/doc/source/user/troubleshooting-importerror.rst +++ b/doc/source/user/troubleshooting-importerror.rst @@ -78,33 +78,11 @@ Using Eclipse/PyDev with Anaconda Python (or environments) ---------------------------------------------------------- Please see the -`Anaconda Documentation `_ +`Anaconda Documentation `_ on how to properly configure Eclipse/PyDev to use Anaconda Python with specific conda environments. -Raspberry Pi ------------- - -There are sometimes issues reported on Raspberry Pi setups when installing -using ``pip3 install`` (or ``pip`` install). These will typically mention:: - - libf77blas.so.3: cannot open shared object file: No such file or directory - - -The solution will be to either:: - - sudo apt-get install libatlas-base-dev - -to install the missing libraries expected by the self-compiled NumPy -(ATLAS is a possible provider of linear algebra). - -*Alternatively* use the NumPy provided by Raspbian. In which case run:: - - pip3 uninstall numpy # remove previously installed version - apt install python3-numpy - - Debug build on Windows ---------------------- @@ -148,67 +126,74 @@ This may mainly help you if you are not running the python and/or NumPy version you are expecting to run. -C-API incompatibility ---------------------------- +Downstream ImportError, AttributeError or C-API/ABI incompatibility +=================================================================== -If you see an error like: +If you see a message such as:: + A module that was compiled using NumPy 1.x cannot be run in + NumPy 2.0.0 as it may crash. To support both 1.x and 2.x + versions of NumPy, modules must be compiled with NumPy 2.0. + Some module may need to rebuild instead e.g. with 'pybind11>=2.12'. - RuntimeError: module compiled against API version v1 but this version of numpy is v2 +either as an ``ImportError`` or with:: + AttributeError: _ARRAY_API not found -You may have: +or other errors such as:: -* A bad extension "wheel" (binary install) that should use - `oldest-support-numpy `_ ( - with manual constraints if necessary) to build their binary packages. + RuntimeError: module compiled against API version v1 but this version of numpy is v2 -* An environment issue messing with package versions. +or when a package implemented with Cython:: -* Incompatible package versions somehow enforced manually. + ValueError: numpy.dtype size changed, may indicate binary incompatibility. Expected 96 from C header, got 88 from PyObject -* An extension module compiled locally against a very recent version - followed by a NumPy downgrade. +This means that a package depending on NumPy was build in a way that is not +compatible with the NumPy version found. +If this error is due to a recent upgrade to NumPy 2, the easiest solution may +be to simply downgrade NumPy to ``'numpy<2'``. -* A compiled extension copied to a different computer with an - older NumPy version. +To understand the cause, search the traceback (from the back) to find the first +line that isn't inside NumPy to see which package has the incompatibility. +Note your NumPy version and the version of the incompatible package to +help you find the best solution. -The best thing to do if you see this error is to contact -the maintainers of the package that is causing problem -so that they can solve the problem properly. +There can be various reasons for the incompatibility: -However, while you wait for a solution, a work around -that usually works is to upgrade the NumPy version:: +* You have recently upgraded NumPy, most likely to NumPy 2, and the other + module now also needs to be upgraded. (NumPy 2 was released in June 2024.) +* You have version constraints and ``pip`` may + have installed a combination of incompatible packages. - pip install numpy --upgrade +* You have compiled locally or have copied a compiled extension from + elsewhere (which is, in general, a bad idea). +The best solution will usually be to upgrade the failing package: -Downstream ImportError or AttributeError -======================================== +* If you installed it for example through ``pip``, try upgrading it with + ``pip install package_name --upgrade``. -If you see a message such as:: +* If it is your own package or it is build locally, you need recompiled + for the new NumPy version (for details see :ref:`depending_on_numpy`). + It may be that a reinstall of the package is sufficient to fix it. - A module that was compiled using NumPy 1.x cannot be run in - NumPy 2.0.0 as it may crash. To support both 1.x and 2.x - versions of NumPy, modules must be compiled with NumPy 2.0. - Some module may need to rebuild instead e.g. with 'pybind11>=2.12'. +When these steps fail, you should inform the package maintainers since they +probably need to make a new, compatible, release. -Either as an ``ImportError`` or with:: +However, upgrading may not always be possible because a compatible version does +not yet exist or cannot be installed for other reasons. In that case: - AttributeError: _ARRAY_API not found +* Install a compatible NumPy version: -Then you are using NumPy 2 together with a module that was build with NumPy 1. -NumPy 2 made some changes that require rebuilding such modules to avoid -possibly incorrect results or crashes. + * Try downgrading NumPy with ``pip install 'numpy<2'`` + (NumPy 2 was released in June 2024). + * If your NumPy version is old, you can try upgrading it for + example with ``pip install numpy --upgrade``. -As the error message suggests, the easiest solution is likely to downgrade -NumPy to `numpy<2`. -Alternatively, you can search the traceback (from the back) to find the first -line that isn't inside NumPy to see which module needs to be updated. +* Add additional version pins to the failing package to help ``pip`` + resolve compatible versions of NumPy and the package. -NumPy 2 was released in the first half of 2024 and especially smaller -modules downstream are expected need time to adapt and publish a new version. Segfaults or crashes diff --git a/doc/ufuncs.rst b/doc/ufuncs.rst index 077195fa59b7..4344ff9ab121 100644 --- a/doc/ufuncs.rst +++ b/doc/ufuncs.rst @@ -78,10 +78,11 @@ If there are object arrays involved then loop->obj gets set to 1. Then there ar loop, then "remainder" DECREF's are needed). Outputs: - - castbuf contains a new reference as the result of the function call. This - gets converted to the type of interest and. This new reference in castbuf - will be DECREF'd by later calls to the function. Thus, only after the - inner most loop do we need to DECREF the remaining references in castbuf. + - castbuf contains a new reference as the result of the function call. + This is converted to the type of interest, and this new reference + in castbuf will be DECREF'd (its reference count decreased) by + later calls to the function. Thus, only after the innermost loop + finishes do we need to DECREF the remaining references in castbuf. 2) The loop function is of a different type: diff --git a/environment.yml b/environment.yml index 7e347bccb6c9..e74ba4aba356 100644 --- a/environment.yml +++ b/environment.yml @@ -7,17 +7,16 @@ name: numpy-dev channels: - conda-forge dependencies: - - python=3.11 #need to pin to avoid issues with builds + - python=3.12 # need to pin to avoid issues with builds - cython>=3.0 - compilers - openblas - nomkl - - setuptools + - setuptools==65.5.1 - ninja - pkg-config - meson-python - - pip - - spin=0.8 # Unpin when spin 0.9.1 is released + - spin==0.15 - ccache # For testing - pytest @@ -25,10 +24,11 @@ dependencies: - pytest-xdist - hypothesis # For type annotations - - typing_extensions>=4.2.0 # needed for python < 3.10 - - mypy=1.10.0 + - mypy=1.19.1 + - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 + - sphinx-copybutton - sphinx-design - numpydoc=1.4.0 - ipython @@ -38,11 +38,15 @@ dependencies: - pydata-sphinx-theme>=0.15.2 - doxygen - towncrier + - jupyterlite-sphinx>=0.18.0 + # see https://github.com/jupyterlite/pyodide-kernel#compatibility + - jupyterlite-pyodide-kernel==0.5.2 # supports Pyodide 0.27.1 # NOTE: breathe 4.33.0 collides with sphinx.ext.graphviz - breathe>4.33.0 # For linting - - pycodestyle=2.8.0 + - cython-lint + - ruff=0.15.2 - gitpython # Used in some tests - cffi - - pytz + - tzdata diff --git a/meson.build b/meson.build index d816cca456a8..a72c5bb02734 100644 --- a/meson.build +++ b/meson.build @@ -1,12 +1,9 @@ project( 'NumPy', 'c', 'cpp', 'cython', - version: run_command( - # This should become `numpy/_version.py` in NumPy 2.0 - ['numpy/_build_utils/gitversion.py'], - check: true).stdout().strip(), - license: 'BSD-3', - meson_version: '>=1.2.99', # version in vendored-meson is 1.2.99 + version: run_command(['numpy/_build_utils/gitversion.py'], check: true).stdout().strip(), + license: 'BSD-3-Clause AND 0BSD AND MIT AND Zlib AND CC0-1.0', + meson_version: '>=1.8.3', # version in vendored-meson default_options: [ 'buildtype=debugoptimized', 'b_ndebug=if-release', @@ -24,14 +21,15 @@ cy = meson.get_compiler('cython') # Check compiler is recent enough (see the SciPy Toolchain Roadmap for details) if cc.get_id() == 'gcc' - if not cc.version().version_compare('>=8.4') - error('NumPy requires GCC >= 8.4') + if not cc.version().version_compare('>=9.3') + error('NumPy requires GCC >= 9.3') endif elif cc.get_id() == 'msvc' - if not cc.version().version_compare('>=19.20') + if not cc.version().version_compare('>=19.35') error('NumPy requires at least vc142 (default with Visual Studio 2019) ' + \ 'when building with MSVC') endif + add_project_arguments('/experimental:c11atomics', language: 'c') endif if not cy.version().version_compare('>=3.0.6') error('NumPy requires Cython >= 3.0.6') @@ -82,11 +80,5 @@ if cc_id.startswith('clang') endif endif -if host_machine.system() == 'darwin' and cc.has_link_argument('-Wl,-ld_classic') - # New linker introduced in macOS 14 not working yet with at least OpenBLAS in Spack, - # see gh-24964 (and linked scipy issue from there). - add_project_link_arguments('-Wl,-ld_classic', language : ['c', 'cpp']) -endif - subdir('meson_cpu') subdir('numpy') diff --git a/meson.options b/meson.options new file mode 100644 index 000000000000..8ba7c4b79e03 --- /dev/null +++ b/meson.options @@ -0,0 +1,53 @@ +# BLAS / LAPACK selection +option('blas', type: 'string', value: 'auto', + description: 'BLAS library to use (default: autodetect based on `blas-order`)') +option('lapack', type: 'string', value: 'auto', + description: 'LAPACK library to use (default: autodetect based on `lapack-order`)') +option('allow-noblas', type: 'boolean', value: true, + description: 'Allow building with (slow!) internal fallback routines if no BLAS library is found') +option('blas-order', type: 'array', value: ['auto'], + description: 'Preferred search order for BLAS libraries (e.g., mkl, openblas, blis, blas)') +option('lapack-order', type: 'array', value: ['auto'], + description: 'Preferred search order for LAPACK libraries (e.g., mkl, openblas, lapack)') +option('use-ilp64', type: 'boolean', value: false, + description: 'Use ILP64 (64-bit integer) BLAS/LAPACK interfaces') +option('blas-symbol-suffix', type: 'string', value: 'auto', + description: 'Symbol suffix for BLAS/LAPACK symbols (if any)') +option('mkl-threading', type: 'combo', value: 'auto', + choices: ['auto', 'seq', 'iomp', 'gomp', 'tbb'], + description: 'Threading backend for MKL') + +# Threading & parallelism +option('disable-threading', type: 'boolean', value: false, + description: 'Disable threading support (see `NPY_ALLOW_THREADS` docs)') +option('enable-openmp', type: 'boolean', value: false, + description: 'Enable compilation with OpenMP support') + +# CPU optimization / SIMD +option('disable-optimization', type: 'boolean', value: false, + description: 'Disable all CPU optimizations (dispatch, SIMD, loop unrolling)') +option('disable-svml', type: 'boolean', value: false, + description: 'Disable use of Intel SVML') +option('disable-highway', type: 'boolean', value: false, + description: 'Disable SIMD-optimized operations related to Google Highway') +option('disable-intel-sort', type: 'boolean', value: false, + description: 'Disable SIMD-optimized operations related to Intel x86-simd-sort') +option('cpu-baseline', type: 'string', value: 'min', + description: 'Minimal set of required CPU features') +option('cpu-baseline-detect', type: 'feature', value: 'auto', + description: 'Detect CPU baseline from the compiler flags') +option('cpu-dispatch', type: 'string', value: 'max', + description: 'Additional CPU features to dispatch to (beyond baseline)') + +# SIMD test options +option('test-simd', type: 'array', + value: [ + 'BASELINE', 'X86_V2', 'X86_V3', 'X86_V4', + 'VSX', 'VSX2', 'VSX3', 'VSX4', + 'NEON', 'ASIMD', + 'VX', 'VXE', 'VXE2', + 'LSX', + ], + description: 'CPU SIMD feature sets to be tested by the NumPy SIMD test module') +option('test-simd-args', type: 'string', value: '', + description: 'Extra arguments passed to the internal `_simd` test module') diff --git a/meson_cpu/arm/meson.build b/meson_cpu/arm/meson.build index 7ffa3ef58ed0..92d241883795 100644 --- a/meson_cpu/arm/meson.build +++ b/meson_cpu/arm/meson.build @@ -2,21 +2,21 @@ source_root = meson.project_source_root() mod_features = import('features') NEON = mod_features.new( 'NEON', 1, - test_code: files(source_root + '/numpy/distutils/checks/cpu_neon.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_neon.c')[0] ) NEON_FP16 = mod_features.new( 'NEON_FP16', 2, implies: NEON, - test_code: files(source_root + '/numpy/distutils/checks/cpu_neon_fp16.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_neon_fp16.c')[0] ) # FMA NEON_VFPV4 = mod_features.new( 'NEON_VFPV4', 3, implies: NEON_FP16, - test_code: files(source_root + '/numpy/distutils/checks/cpu_neon_vfpv4.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_neon_vfpv4.c')[0] ) # Advanced SIMD ASIMD = mod_features.new( 'ASIMD', 4, implies: NEON_VFPV4, detect: {'val': 'ASIMD', 'match': 'NEON.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_asimd.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_asimd.c')[0] ) cpu_family = host_machine.cpu_family() if cpu_family == 'aarch64' @@ -37,29 +37,29 @@ endif ASIMDHP = mod_features.new( 'ASIMDHP', 5, implies: ASIMD, args: {'val': '-march=armv8.2-a+fp16', 'match': '-march=.*', 'mfilter': '\+.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_asimdhp.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_asimdhp.c')[0] ) ## ARMv8.2 dot product ASIMDDP = mod_features.new( 'ASIMDDP', 6, implies: ASIMD, args: {'val': '-march=armv8.2-a+dotprod', 'match': '-march=.*', 'mfilter': '\+.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_asimddp.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_asimddp.c')[0] ) ## ARMv8.2 Single & half-precision Multiply ASIMDFHM = mod_features.new( 'ASIMDFHM', 7, implies: ASIMDHP, args: {'val': '-march=armv8.2-a+fp16fml', 'match': '-march=.*', 'mfilter': '\+.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_asimdfhm.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_asimdfhm.c')[0] ) ## Scalable Vector Extensions (SVE) SVE = mod_features.new( 'SVE', 8, implies: ASIMDHP, args: {'val': '-march=armv8.2-a+sve', 'match': '-march=.*', 'mfilter': '\+.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_sve.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_sve.c')[0] ) # TODO: Add support for MSVC ARM_FEATURES = { 'NEON': NEON, 'NEON_FP16': NEON_FP16, 'NEON_VFPV4': NEON_VFPV4, - 'ASIMD': ASIMD, 'ASIMDHP': ASIMDHP, 'ASIMDFHM': ASIMDFHM, + 'ASIMD': ASIMD, 'ASIMDHP': ASIMDHP, 'ASIMDDP': ASIMDDP, 'ASIMDFHM': ASIMDFHM, 'SVE': SVE } diff --git a/meson_cpu/loongarch64/meson.build b/meson_cpu/loongarch64/meson.build new file mode 100644 index 000000000000..d59b5682d646 --- /dev/null +++ b/meson_cpu/loongarch64/meson.build @@ -0,0 +1,8 @@ +source_root = meson.project_source_root() +mod_features = import('features') + +LSX = mod_features.new( + 'LSX', 1, args: ['-mlsx'], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_lsx.c')[0] +) +LOONGARCH64_FEATURES = {'LSX': LSX} diff --git a/meson_cpu/main_config.h.in b/meson_cpu/main_config.h.in index 0952adf67353..e1d6a870c075 100644 --- a/meson_cpu/main_config.h.in +++ b/meson_cpu/main_config.h.in @@ -11,8 +11,6 @@ */ #ifndef @P@_CPU_DISPATCHER_CONF_H_ #define @P@_CPU_DISPATCHER_CONF_H_ -/// This definition is required to provides comptablity with NumPy distutils -#define @P@_CPU_MESON_BUILD /** * @def @P@WITH_CPU_BASELINE * Enabled baseline features names as a single string where each is separated by a single space. @@ -46,7 +44,7 @@ /** * @def @P@WITH_CPU_BASELINE_CALL(EXEC_CB, ...) * Call each enabled baseline feature sorted by lowest interest - * using preprocessor callback without testing whiher the + * using preprocessor callback without testing whether the * feature is supported by CPU or not. * * Required for logging purposes only, for example, generating @@ -79,20 +77,12 @@ * Defines the default behavior for the configurable macros derived from the configuration header * that is generated by the meson function `mod_features.multi_targets()`. * - * Note: Providing fallback in case of optimization disabled is no longer needed for meson - * since we always guarantee having configuration headers. - * - * However, it is still needed for compatibility with Numpy distutils. + * These macros are replaced by disapatch config headers once its included. */ -#ifndef @P@DISABLE_OPTIMIZATION - #define @P@MTARGETS_CONF_BASELINE(CB, ...) \ - &&"Expected config header that generated by mod_features.multi_targets()"; - #define @P@MTARGETS_CONF_DISPATCH(TEST_FEATURE_CB, CB, ...) \ - &&"Expected config header that generated by mod_features.multi_targets()"; -#else - #define @P@MTARGETS_CONF_BASELINE(CB, ...) @P@_CPU_EXPAND(CB(__VA_ARGS__)) - #define @P@MTARGETS_CONF_DISPATCH(CHK, CB, ...) -#endif +#define @P@MTARGETS_CONF_BASELINE(CB, ...) \ + &&"Expected config header that generated by mod_features.multi_targets()"; +#define @P@MTARGETS_CONF_DISPATCH(TEST_FEATURE_CB, CB, ...) \ + &&"Expected config header that generated by mod_features.multi_targets()"; /** * @def @P@CPU_DISPATCH_CURFX(NAME) * @@ -374,13 +364,33 @@ #include #endif +#if (defined(@P@HAVE_VSX) || defined(@P@HAVE_VX)) && !defined(__cplusplus) && defined(bool) + /* + * "altivec.h" header contains the definitions(bool, vector, pixel), + * usually in c++ we undefine them after including the header. + * It's better anyway to take them off and use built-in types(__vector, __pixel, __bool) instead, + * since c99 supports bool variables which may lead to ambiguous errors. + */ + // backup 'bool' before including 'npy_cpu_dispatch_config.h', since it may not defined as a compiler token. + #define NPY__CPU_DISPATCH_GUARD_BOOL + typedef bool npy__cpu_dispatch_guard_bool; +#endif #ifdef @P@HAVE_VSX #include #endif - #ifdef @P@HAVE_VX #include #endif +#if (defined(@P@HAVE_VSX) || defined(@P@HAVE_VX)) + #undef bool + #undef vector + #undef pixel + #ifdef NPY__CPU_DISPATCH_GUARD_BOOL + #define bool npy__cpu_dispatch_guard_bool + #undef NPY__CPU_DISPATCH_GUARD_BOOL + #endif +#endif + #ifdef @P@HAVE_NEON #include @@ -389,4 +399,8 @@ #ifdef @P@HAVE_RVV #include #endif + +#ifdef @P@HAVE_LSX + #include +#endif #endif // @P@_CPU_DISPATCHER_CONF_H_ diff --git a/meson_cpu/meson.build b/meson_cpu/meson.build index 3afc54cae415..02bbe5f7618e 100644 --- a/meson_cpu/meson.build +++ b/meson_cpu/meson.build @@ -46,20 +46,22 @@ if get_option('disable-optimization') CPU_CONF_BASELINE = 'none' CPU_CONF_DISPATCH = 'none' else - baseline_detect = false + baseline_detect = get_option('cpu-baseline-detect').enabled() c_args = get_option('c_args') - foreach arg : c_args - foreach carch : ['-march', '-mcpu', '-xhost', '/QxHost'] - if arg.contains(carch) - message('Appending option "detect" to "cpu-baseline" due to detecting global architecture c_arg "' + arg + '"') - baseline_detect = true + if get_option('cpu-baseline-detect').auto() + foreach arg : c_args + foreach carch : ['-march', '-mcpu', '-xhost', '/QxHost'] + if arg.contains(carch) + message('Appending option "detect" to "cpu-baseline" due to detecting global architecture c_arg "' + arg + '"') + baseline_detect = true + break + endif + endforeach + if baseline_detect break endif endforeach - if baseline_detect - break - endif - endforeach + endif # The required minimal set of required CPU features. CPU_CONF_BASELINE = get_option('cpu-baseline') if baseline_detect @@ -76,6 +78,7 @@ subdir('ppc64') subdir('s390x') subdir('arm') subdir('riscv64') +subdir('loongarch64') CPU_FEATURES = {} CPU_FEATURES += ARM_FEATURES @@ -83,20 +86,25 @@ CPU_FEATURES += X86_FEATURES CPU_FEATURES += PPC64_FEATURES CPU_FEATURES += S390X_FEATURES CPU_FEATURES += RV64_FEATURES +CPU_FEATURES += LOONGARCH64_FEATURES + +CPU_FEATURES_REDIRECT = {} +CPU_FEATURES_REDIRECT += X86_REDIRECT # Parse the requested baseline (CPU_CONF_BASELINE) and dispatch features # (CPU_CONF_DISPATCH). cpu_family = host_machine.cpu_family() # Used by build option 'min' min_features = { - 'x86': [SSE2], - 'x86_64': [SSE3], + 'x86': [X86_V2], + 'x86_64': [X86_V2], 'ppc64': [], 's390x': [], 'arm': [], 'aarch64': [ASIMD], 'riscv64': [], 'wasm32': [], + 'loongarch64': [LSX], }.get(cpu_family, []) if host_machine.endian() == 'little' and cpu_family == 'ppc64' min_features = [VSX2] @@ -112,6 +120,7 @@ max_features_dict = { 'aarch64': ARM_FEATURES, 'riscv64': RV64_FEATURES, 'wasm32': {}, + 'loongarch64': LOONGARCH64_FEATURES, }.get(cpu_family, {}) max_features = [] foreach fet_name, fet_obj : max_features_dict @@ -185,15 +194,31 @@ foreach opt_name, conf : parse_options accumulate = min_features elif tok == 'MAX' accumulate = max_features - elif tok in CPU_FEATURES - tokobj = CPU_FEATURES[tok] - if tokobj not in max_features - ignored += tok - continue - endif - accumulate = [tokobj] else - error('Invalid token "'+tok+'" within option --'+opt_name) + if tok in CPU_FEATURES_REDIRECT + ntok = CPU_FEATURES_REDIRECT[tok] + if ntok == '' + warning('Ignoring CPU feature "@0@" in --@1@ option - feature is no longer supported.'.format(tok, opt_name)) + else + warning('CPU Feature "@0@" is no longer explicitly supported, redirecting to "@1@".'.format(tok, ntok)) + endif + warning('Please check the latest documentation for build options.') + if ntok == '' or not append # redirected features not safe to be execluded + continue + endif + tok = ntok + endif + if tok not in CPU_FEATURES + error('Invalid token "'+tok+'" within option --'+opt_name) + endif + if tok in CPU_FEATURES + tokobj = CPU_FEATURES[tok] + if tokobj not in max_features + ignored += tok + continue + endif + accumulate = [tokobj] + endif endif if append foreach fet : accumulate @@ -203,8 +228,17 @@ foreach opt_name, conf : parse_options endforeach else filterd = [] + # filter out the features that are in the accumulate list + # including any successor features foreach fet : result - if fet not in accumulate + escape = false + foreach fet2 : accumulate + if fet2 in mod_features.implicit_c(fet) + escape = true + break + endif + endforeach + if not escape filterd += fet endif endforeach diff --git a/meson_cpu/ppc64/meson.build b/meson_cpu/ppc64/meson.build index bad95257ca95..58690d1fa80a 100644 --- a/meson_cpu/ppc64/meson.build +++ b/meson_cpu/ppc64/meson.build @@ -3,19 +3,17 @@ mod_features = import('features') compiler_id = meson.get_compiler('c').get_id() VSX = mod_features.new( - 'VSX', 1, args: '-mvsx', - test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx.c')[0], + 'VSX', 1, args: ['-mvsx', '-DHWY_COMPILE_ONLY_STATIC', '-DHWY_DISABLE_ATTR'] + + (compiler_id == 'clang' ? ['-maltivec'] : []), + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vsx.c')[0], extra_tests: { - 'VSX_ASM': files(source_root + '/numpy/distutils/checks/extra_vsx_asm.c')[0] + 'VSX_ASM': files(source_root + '/numpy/_core/src/_simd/checks/extra_vsx_asm.c')[0] } ) -if compiler_id == 'clang' - VSX.update(args: ['-mvsx', '-maltivec']) -endif VSX2 = mod_features.new( 'VSX2', 2, implies: VSX, args: {'val': '-mcpu=power8', 'match': '.*vsx'}, detect: {'val': 'VSX2', 'match': 'VSX'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx2.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vsx2.c')[0], ) # VSX2 is hardware baseline feature on ppc64le since the first little-endian # support was part of Power8 @@ -23,19 +21,19 @@ if host_machine.endian() == 'little' VSX.update(implies: VSX2) endif VSX3 = mod_features.new( - 'VSX3', 3, implies: VSX2, args: {'val': '-mcpu=power9', 'match': '.*[mcpu=|vsx].*'}, + 'VSX3', 3, implies: VSX2, args: {'val': '-mcpu=power9', 'match': '.*(?:mcpu=|vsx).*'}, detect: {'val': 'VSX3', 'match': 'VSX.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx3.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vsx3.c')[0], extra_tests: { - 'VSX3_HALF_DOUBLE': files(source_root + '/numpy/distutils/checks/extra_vsx3_half_double.c')[0] + 'VSX3_HALF_DOUBLE': files(source_root + '/numpy/_core/src/_simd/checks/extra_vsx3_half_double.c')[0] } ) VSX4 = mod_features.new( - 'VSX4', 4, implies: VSX3, args: {'val': '-mcpu=power10', 'match': '.*[mcpu=|vsx].*'}, + 'VSX4', 4, implies: VSX3, args: {'val': '-mcpu=power10', 'match': '.*(?:mcpu=|vsx).*'}, detect: {'val': 'VSX4', 'match': 'VSX.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx4.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vsx4.c')[0], extra_tests: { - 'VSX4_MMA': files(source_root + '/numpy/distutils/checks/extra_vsx4_mma.c')[0] + 'VSX4_MMA': files(source_root + '/numpy/_core/src/_simd/checks/extra_vsx4_mma.c')[0] } ) PPC64_FEATURES = {'VSX': VSX, 'VSX2': VSX2, 'VSX3': VSX3, 'VSX4': VSX4} diff --git a/meson_cpu/riscv64/meson.build b/meson_cpu/riscv64/meson.build index 3f930f39e27e..fdab67d246d6 100644 --- a/meson_cpu/riscv64/meson.build +++ b/meson_cpu/riscv64/meson.build @@ -3,6 +3,6 @@ mod_features = import('features') RVV = mod_features.new( 'RVV', 1, args: ['-march=rv64gcv'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_rvv.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_rvv.c')[0], ) RV64_FEATURES = {'RVV': RVV} diff --git a/meson_cpu/s390x/meson.build b/meson_cpu/s390x/meson.build index a69252d1607c..282ec056e78e 100644 --- a/meson_cpu/s390x/meson.build +++ b/meson_cpu/s390x/meson.build @@ -3,16 +3,16 @@ mod_features = import('features') VX = mod_features.new( 'VX', 1, args: ['-mzvector', '-march=arch11'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_vx.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vx.c')[0], ) VXE = mod_features.new( 'VXE', 2, implies: VX, args: {'val': '-march=arch12', 'match': '-march=.*'}, - detect: {'val': 'VXE', 'match': 'VX'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_vxe.c')[0], + detect: {'val': 'VXE', 'match': '\\bvxe\\b'}, + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vxe.c')[0], ) VXE2 = mod_features.new( 'VXE2', 3, implies: VXE, args: {'val': '-march=arch13', 'match': '-march=.*'}, - detect: {'val': 'VXE2', 'match': 'VX.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_vxe2.c')[0], + detect: {'val': 'VXE2', 'match': '\\bvxe2\\b'}, + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vxe2.c')[0], ) S390X_FEATURES = {'VX': VX, 'VXE': VXE, 'VXE2': VXE2} diff --git a/meson_cpu/x86/meson.build b/meson_cpu/x86/meson.build index 598f80ff0c89..412803e5ddbb 100644 --- a/meson_cpu/x86/meson.build +++ b/meson_cpu/x86/meson.build @@ -1,227 +1,109 @@ source_root = meson.project_source_root() +current_dir = meson.current_source_dir() +cpu_family = host_machine.cpu_family() mod_features = import('features') -SSE = mod_features.new( - 'SSE', 1, args: '-msse', - test_code: files(source_root + '/numpy/distutils/checks/cpu_sse.c')[0] -) -SSE2 = mod_features.new( - 'SSE2', 2, implies: SSE, - args: '-msse2', - test_code: files(source_root + '/numpy/distutils/checks/cpu_sse2.c')[0] -) -# enabling SSE without SSE2 is useless also it's non-optional for x86_64 -SSE.update(implies: SSE2) -SSE3 = mod_features.new( - 'SSE3', 3, implies: SSE2, - args: '-msse3', - test_code: files(source_root + '/numpy/distutils/checks/cpu_sse3.c')[0] -) -SSSE3 = mod_features.new( - 'SSSE3', 4, implies: SSE3, - args: '-mssse3', - test_code: files(source_root + '/numpy/distutils/checks/cpu_ssse3.c')[0] -) -SSE41 = mod_features.new( - 'SSE41', 5, implies: SSSE3, - args: '-msse4.1', - test_code: files(source_root + '/numpy/distutils/checks/cpu_sse41.c')[0] -) -POPCNT = mod_features.new( - 'POPCNT', 6, implies: SSE41, - args: '-mpopcnt', - test_code: files(source_root + '/numpy/distutils/checks/cpu_popcnt.c')[0] -) -SSE42 = mod_features.new( - 'SSE42', 7, implies: POPCNT, args: '-msse4.2', - test_code: files(source_root + '/numpy/distutils/checks/cpu_sse42.c')[0] -) -# 7-20 left as margin for any extra features -AVX = mod_features.new( - 'AVX', 20, implies: SSE42, args: '-mavx', - detect: {'val': 'AVX', 'match': '.*SSE.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx.c')[0] -) -XOP = mod_features.new( - 'XOP', 21, implies: AVX, args: '-mxop', - test_code: files(source_root + '/numpy/distutils/checks/cpu_xop.c')[0] -) -FMA4 = mod_features.new( - 'FMA4', 22, implies: AVX, args: '-mfma4', - test_code: files(source_root + '/numpy/distutils/checks/cpu_fma4.c')[0] -) -# x86 half-precision -F16C = mod_features.new( - 'F16C', 23, implies: AVX, args: '-mf16c', - test_code: files(source_root + '/numpy/distutils/checks/cpu_f16c.c')[0] -) -FMA3 = mod_features.new( - 'FMA3', 24, implies: F16C, args: '-mfma', - test_code: files(source_root + '/numpy/distutils/checks/cpu_fma3.c')[0] -) -AVX2 = mod_features.new( - 'AVX2', 25, implies: F16C, args: '-mavx2', - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx2.c')[0] -) -# 25-40 left as margin for any extra features -AVX512F = mod_features.new( - 'AVX512F', 40, implies: [FMA3, AVX2], - # Disables mmx because of stack corruption that may happen during mask - # conversions. - # TODO (seiko2plus): provide more clarification - args: ['-mno-mmx', '-mavx512f'], - detect: {'val': 'AVX512F', 'match': '.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512f.c')[0], - extra_tests: { - 'AVX512F_REDUCE': files(source_root + '/numpy/distutils/checks/extra_avx512f_reduce.c')[0] - } -) -AVX512CD = mod_features.new( - 'AVX512CD', 41, implies: AVX512F, args: '-mavx512cd', - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512cd.c')[0] -) -AVX512_KNL = mod_features.new( - 'AVX512_KNL', 42, implies: AVX512CD, args: ['-mavx512er', '-mavx512pf'], - group: ['AVX512ER', 'AVX512PF'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_knl.c')[0] -) -AVX512_KNM = mod_features.new( - 'AVX512_KNM', 43, implies: AVX512_KNL, - args: ['-mavx5124fmaps', '-mavx5124vnniw', '-mavx512vpopcntdq'], - group: ['AVX5124FMAPS', 'AVX5124VNNIW', 'AVX512VPOPCNTDQ'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_knm.c')[0] -) -AVX512_SKX = mod_features.new( - 'AVX512_SKX', 50, implies: AVX512CD, - args: ['-mavx512vl', '-mavx512bw', '-mavx512dq'], - group: ['AVX512VL', 'AVX512BW', 'AVX512DQ'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_skx.c')[0], - extra_tests: { - 'AVX512BW_MASK': files(source_root + '/numpy/distutils/checks/extra_avx512bw_mask.c')[0], - 'AVX512DQ_MASK': files(source_root + '/numpy/distutils/checks/extra_avx512dq_mask.c')[0] - } -) -AVX512_CLX = mod_features.new( - 'AVX512_CLX', 51, implies: AVX512_SKX, args: '-mavx512vnni', - group: ['AVX512VNNI'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_clx.c')[0] -) -AVX512_CNL = mod_features.new( - 'AVX512_CNL', 52, implies: AVX512_SKX, - args: ['-mavx512ifma', '-mavx512vbmi'], - group: ['AVX512IFMA', 'AVX512VBMI'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_cnl.c')[0] -) +HWY_SSE4_FLAGS = ['-DHWY_WANT_SSE4', '-DHWY_DISABLE_PCLMUL_AES'] +# Use SSE for floating-point on x86-32 to ensure numeric consistency. +# The x87 FPU's 80-bit internal precision causes unpredictable rounding +# and overflow behavior when converting to smaller types. SSE maintains +# strict 32/64-bit precision throughout all calculations. +X86_64_V2_FLAGS = cpu_family == 'x86'? ['-mfpmath=sse'] : ['-mcx16'] +X86_64_V2_NAMES = cpu_family == 'x86'? [] : ['CX16'] +X86_V2 = mod_features.new( + 'X86_V2', 1, args: ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', + '-mpopcnt', '-msahf'] + X86_64_V2_FLAGS + HWY_SSE4_FLAGS, + # Adds compiler definitions `NPY_HAVE_SSE*` + group: ['SSE', 'SSE2', 'SSE3', 'SSSE3', 'SSE41', 'SSE42', 'POPCNT', 'LAHF'] + X86_64_V2_NAMES, + detect: 'X86_V2', + test_code: files(current_dir + '/test_x86_v2.c')[0], +) +X86_V3 = mod_features.new( + 'X86_V3', 10, implies: X86_V2, + args: ['-mavx', '-mavx2', '-mfma', '-mbmi', '-mbmi2', '-mlzcnt', '-mf16c', '-mmovbe'], + group: ['AVX', 'AVX2', 'FMA3', 'BMI', 'BMI2', 'LZCNT', 'F16C', 'MOVBE'], + detect: 'X86_V3', + test_code: files(current_dir + '/test_x86_v3.c')[0], +) +X86_V4 = mod_features.new( + 'X86_V4', 20, implies: X86_V3, + args: ['-mavx512f', '-mavx512cd', '-mavx512vl', '-mavx512bw', '-mavx512dq'], + group: ['AVX512F', 'AVX512CD', 'AVX512VL', 'AVX512BW', 'AVX512DQ', 'AVX512_SKX', + 'AVX512F_REDUCE', 'AVX512BW_MASK', 'AVX512DQ_MASK'], + detect: 'X86_V4', + test_code: files(current_dir + '/test_x86_v4.c')[0], +) +if cpu_family == 'x86' + X86_V4.update(disable: 'not supported on x86-32') +endif AVX512_ICL = mod_features.new( - 'AVX512_ICL', 53, implies: [AVX512_CLX, AVX512_CNL], - args: ['-mavx512vbmi2', '-mavx512bitalg', '-mavx512vpopcntdq'], - group: ['AVX512VBMI2', 'AVX512BITALG', 'AVX512VPOPCNTDQ'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_icl.c')[0] + 'AVX512_ICL', 30, implies: X86_V4, + args: ['-mavx512vbmi', '-mavx512vbmi2', '-mavx512vnni', '-mavx512bitalg', + '-mavx512vpopcntdq', '-mavx512ifma', '-mvaes', '-mgfni', '-mvpclmulqdq'], + group: ['AVX512VBMI', 'AVX512VBMI2', 'AVX512VNNI', 'AVX512BITALG', 'AVX512VPOPCNTDQ', + 'AVX512IFMA', 'VAES', 'GFNI', 'VPCLMULQDQ'], + detect: 'AVX512_ICL', + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_avx512_icl.c')[0] ) -# TODO add support for zen4 AVX512_SPR = mod_features.new( - 'AVX512_SPR', 55, implies: AVX512_ICL, - args: ['-mavx512fp16'], - group: ['AVX512FP16'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_spr.c')[0] + 'AVX512_SPR', 35, implies: AVX512_ICL, + args: ['-mavx512fp16', '-mavx512bf16'], + group: ['AVX512FP16', 'AVX512BF16'], + detect: 'AVX512_SPR', + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_avx512_spr.c')[0] ) # Specializations for non unix-like compilers # ------------------------------------------- -cpu_family = host_machine.cpu_family() -compiler_id = meson.get_compiler('c').get_id() +cc = meson.get_compiler('c') +compiler_id = cc.get_id() if compiler_id not in ['gcc', 'clang'] AVX512_SPR.update(disable: compiler_id + ' compiler does not support it') endif -# Common specializations between both Intel compilers (unix-like and msvc-like) -if compiler_id in ['intel', 'intel-cl'] - # POPCNT, and F16C don't own private FLAGS however the compiler still - # provides ISA capability for them. - POPCNT.update(args: '') - F16C.update(args: '') - # Intel compilers don't support the following features independently - FMA3.update(implies: [F16C, AVX2]) - AVX2.update(implies: [F16C, FMA3]) - AVX512F.update(implies: [AVX2, AVX512CD]) - AVX512CD.update(implies: [AVX512F]) - XOP.update(disable: 'Intel Compiler does not support it') - FMA4.update(disable: 'Intel Compiler does not support it') -endif - if compiler_id == 'intel-cl' - foreach fet : [SSE, SSE2, SSE3, SSSE3, AVX] - fet.update(args: {'val': '/arch:' + fet.get('name'), 'match': '/arch:.*'}) - endforeach - SSE41.update(args: {'val': '/arch:SSE4.1', 'match': '/arch:.*'}) - SSE42.update(args: {'val': '/arch:SSE4.2', 'match': '/arch:.*'}) - FMA3.update(args: {'val': '/arch:CORE-AVX2', 'match': '/arch:.*'}) - AVX2.update(args: {'val': '/arch:CORE-AVX2', 'match': '/arch:.*'}) - AVX512F.update(args: {'val': '/Qx:COMMON-AVX512', 'match': '/arch:.*'}) - AVX512CD.update(args: {'val': '/Qx:COMMON-AVX512', 'match': '/arch:.*'}) - AVX512_KNL.update(args: {'val': '/Qx:KNL', 'match': '/[arch|Qx]:.*'}) - AVX512_KNM.update(args: {'val': '/Qx:KNM', 'match': '/[arch|Qx]:.*'}) - AVX512_SKX.update(args: {'val': '/Qx:SKYLAKE-AVX512', 'match': '/[arch|Qx]:.*'}) - AVX512_CLX.update(args: {'val': '/Qx:CASCADELAKE', 'match': '/[arch|Qx]:.*'}) - AVX512_CNL.update(args: {'val': '/Qx:CANNONLAKE', 'match': '/[arch|Qx]:.*'}) + X86_V2.update(args: [{'val': '/arch:SSE4.2', 'match': '/arch:.*'}] + HWY_SSE4_FLAGS) + X86_V3.update(args: {'val': '/arch:CORE-AVX2', 'match': '/arch:.*'}) + X86_V4.update(args: {'val': '/Qx:SKYLAKE-AVX512', 'match': '/[arch|Qx]:.*'}) AVX512_ICL.update(args: {'val': '/Qx:ICELAKE-CLIENT', 'match': '/[arch|Qx]:.*'}) endif if compiler_id == 'intel' - clear_m = '^(-mcpu=|-march=)' clear_any = '^(-mcpu=|-march=|-x[A-Z0-9\-])' - FMA3.update(args: {'val': '-xCORE-AVX2', 'match': clear_m}) - AVX2.update(args: {'val': '-xCORE-AVX2', 'match': clear_m}) - AVX512F.update(args: {'val': '-xCOMMON-AVX512', 'match': clear_m}) - AVX512CD.update(args: {'val': '-xCOMMON-AVX512', 'match': clear_m}) - AVX512_KNL.update(args: {'val': '-xKNL', 'match': clear_any}) - AVX512_KNM.update(args: {'val': '-xKNM', 'match': clear_any}) - AVX512_SKX.update(args: {'val': '-xSKYLAKE-AVX512', 'match': clear_any}) - AVX512_CLX.update(args: {'val': '-xCASCADELAKE', 'match': clear_any}) - AVX512_CNL.update(args: {'val': '-xCANNONLAKE', 'match': clear_any}) + X86_V2.update(args: [{'val': '-xSSE4.2', 'match': clear_any}] + HWY_SSE4_FLAGS) + X86_V3.update(args: {'val': '-xCORE-AVX2', 'match': clear_any}) + X86_V4.update(args: {'val': '-xSKYLAKE-AVX512', 'match': clear_any}) AVX512_ICL.update(args: {'val': '-xICELAKE-CLIENT', 'match': clear_any}) endif if compiler_id == 'msvc' - # MSVC compiler doesn't support the following features - foreach fet : [AVX512_KNL, AVX512_KNM] - fet.update(disable: compiler_id + ' compiler does not support it') - endforeach - # The following features don't own private FLAGS, however the compiler still - # provides ISA capability for them. - foreach fet : [ - SSE3, SSSE3, SSE41, POPCNT, SSE42, AVX, F16C, XOP, FMA4, - AVX512F, AVX512CD, AVX512_CLX, AVX512_CNL, - AVX512_ICL - ] - fet.update(args: '') - endforeach - # MSVC compiler doesn't support the following features independently - FMA3.update(implies: [F16C, AVX2]) - AVX2.update(implies: [F16C, FMA3]) - AVX512F.update(implies: [AVX2, AVX512CD, AVX512_SKX]) - AVX512CD.update(implies: [AVX512F, AVX512_SKX]) + cc_ver = cc.version() + MSVC_SSE4 = cc_ver.version_compare('>=19.40') ? ['/arch:SSE4.2'] : [] + # 32-bit MSVC does not support /arch:SSE4.2 + MSVC_SSE4 = cpu_family == 'x86' ? ['/arch:SSE2'] : MSVC_SSE4 + MSVC_SSE4 = cc_ver.version_compare('>=19.30') ? MSVC_SSE4 + ['/fp:contract'] : MSVC_SSE4 + X86_V2.update(args: MSVC_SSE4 + HWY_SSE4_FLAGS) clear_arch = '/arch:.*' - # only available on 32-bit. Its enabled by default on 64-bit mode - foreach fet : [SSE, SSE2] - if cpu_family == 'x86' - fet.update(args: {'val': '/arch:' + fet.get('name'), 'match': clear_arch}) - else - fet.update(args: '') - endif - endforeach - FMA3.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) - AVX2.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) - AVX512_SKX.update(args: {'val': '/arch:AVX512', 'match': clear_arch}) + X86_V3.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) + # FIXME: After completing transition from universal intrinsics to Highway, + # investigate which MSVC versions are incompatible with Highway's AVX-512 implementation. + X86_V4.update(disable: 'Considered broken by Highway on MSVC') + # To force enable AVX-512, use: + # X86_V4.update(args: [{'val': '/arch:AVX512', 'match': clear_arch}, '-DHWY_BROKEN_MSVC=0']) + AVX512_ICL.update(disable: 'unsupported by Highway on MSVC') endif +# legacy CPU features +X86_REDIRECT = { + 'SSE': 'X86_V2', 'SSE2': 'X86_V2', 'SSE3': 'X86_V2', 'SSSE3': 'X86_V2', + 'SSE41': 'X86_V2', 'SSE42': 'X86_V2', 'XOP': 'X86_V2', 'FMA4': 'X86_V2', + 'FMA3': 'X86_V3', 'AVX': 'X86_V3', 'F16C': 'X86_V3', + 'AVX512F': 'X86_V3', 'AVX512CD': 'X86_V3', + 'AVX512_KNL': 'X86_V3', 'AVX512_KNM': 'X86_V3', + 'AVX512_SKX': 'X86_V4', 'AVX512_CLX': 'X86_V4', 'AVX512_CNL': 'X86_V4', +} + X86_FEATURES = { - 'SSE': SSE, 'SSE2': SSE2, 'SSE3': SSE3, 'SSSE3': SSSE3, - 'SSE41': SSE41, 'POPCNT': POPCNT, 'SSE42': SSE42, 'AVX': AVX, - 'XOP': XOP, 'FMA4': FMA4, 'F16C': F16C, 'FMA3': FMA3, - 'AVX2': AVX2, 'AVX512F': AVX512F, 'AVX512CD': AVX512CD, - 'AVX512_KNL': AVX512_KNL, 'AVX512_KNM': AVX512_KNM, - 'AVX512_SKX': AVX512_SKX, 'AVX512_CLX': AVX512_CLX, - 'AVX512_CNL': AVX512_CNL, 'AVX512_ICL': AVX512_ICL, - 'AVX512_SPR': AVX512_SPR + 'X86_V2': X86_V2, 'X86_V3': X86_V3, 'X86_V4': X86_V4, + 'AVX512_ICL': AVX512_ICL, 'AVX512_SPR': AVX512_SPR } diff --git a/meson_cpu/x86/test_x86_v2.c b/meson_cpu/x86/test_x86_v2.c new file mode 100644 index 000000000000..f897957224d5 --- /dev/null +++ b/meson_cpu/x86/test_x86_v2.c @@ -0,0 +1,69 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__SSE__) || !defined(__SSE2__) || !defined(__SSE3__) || \ + !defined(__SSSE3__) || !defined(__SSE4_1__) || !defined(__SSE4_2__) || !defined(__POPCNT__) + #error HOST/ARCH does not support x86_v2 + #endif +#endif + +#include // SSE +#include // SSE2 +#include // SSE3 +#include // SSSE3 +#include // SSE4.1 +#ifdef _MSC_VER + #include // SSE4.2 and POPCNT for MSVC +#else + #include // SSE4.2 + #include // POPCNT +#endif + +int main(int argc, char **argv) +{ + // to prevent optimization + int seed = (int)argv[argc-1][0]; + volatile int result = 0; + + // SSE test + __m128 a = _mm_set1_ps((float)seed); + __m128 b = _mm_set1_ps(2.0f); + __m128 c = _mm_add_ps(a, b); + result += (int)_mm_cvtss_f32(c); + + // SSE2 test + __m128i ai = _mm_set1_epi32(seed); + __m128i bi = _mm_set1_epi32(2); + __m128i ci = _mm_add_epi32(ai, bi); + result += _mm_cvtsi128_si32(ci); + + // SSE3 test + __m128 d = _mm_movehdup_ps(a); + result += (int)_mm_cvtss_f32(d); + + // SSSE3 test + __m128i di = _mm_abs_epi16(_mm_set1_epi16((short)seed)); + result += _mm_cvtsi128_si32(di); + + // SSE4.1 test + __m128i ei = _mm_max_epi32(ai, bi); + result += _mm_cvtsi128_si32(ei); + + // SSE4.2 test + __m128i str1 = _mm_set1_epi8((char)seed); + __m128i str2 = _mm_set1_epi8((char)(seed + 1)); + int res4_2 = _mm_cmpestra(str1, 4, str2, 4, 0); + result += res4_2; + + // POPCNT test + unsigned int test_val = (unsigned int)seed | 0x01234567; + int pcnt = _mm_popcnt_u32(test_val); + result += pcnt; + + return result; +} diff --git a/meson_cpu/x86/test_x86_v3.c b/meson_cpu/x86/test_x86_v3.c new file mode 100644 index 000000000000..0bc496a93ad0 --- /dev/null +++ b/meson_cpu/x86/test_x86_v3.c @@ -0,0 +1,66 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX__) || !defined(__AVX2__) || !defined(__FMA__) || \ + !defined(__BMI__) || !defined(__BMI2__) || !defined(__LZCNT__) || !defined(__F16C__) + #error HOST/ARCH does not support x86_v3 + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + // to prevent optimization + int seed = (int)argv[argc-1][0]; + volatile int result = 0; + + // AVX test + __m256 avx_a = _mm256_set1_ps((float)seed); + __m256 avx_b = _mm256_set1_ps(2.0f); + __m256 avx_c = _mm256_add_ps(avx_a, avx_b); + float avx_result = _mm256_cvtss_f32(avx_c); + result += (int)avx_result; + + // AVX2 test + __m256i avx2_a = _mm256_set1_epi32(seed); + __m256i avx2_b = _mm256_set1_epi32(2); + __m256i avx2_c = _mm256_add_epi32(avx2_a, avx2_b); + result += _mm256_extract_epi32(avx2_c, 0); + + // FMA test + __m256 fma_a = _mm256_set1_ps((float)seed); + __m256 fma_b = _mm256_set1_ps(2.0f); + __m256 fma_c = _mm256_set1_ps(3.0f); + __m256 fma_result = _mm256_fmadd_ps(fma_a, fma_b, fma_c); + result += (int)_mm256_cvtss_f32(fma_result); + + // BMI1 tests + unsigned int bmi1_src = (unsigned int)seed; + unsigned int tzcnt_result = _tzcnt_u32(bmi1_src); + result += tzcnt_result; + + // BMI2 tests + unsigned int bzhi_result = _bzhi_u32(bmi1_src, 17); + result += (int)bzhi_result; + + unsigned int pdep_result = _pdep_u32(bmi1_src, 0x10101010); + result += pdep_result; + + // LZCNT test + unsigned int lzcnt_result = _lzcnt_u32(bmi1_src); + result += lzcnt_result; + + // F16C tests + __m128 f16c_src = _mm_set1_ps((float)seed); + __m128i f16c_half = _mm_cvtps_ph(f16c_src, 0); + __m128 f16c_restored = _mm_cvtph_ps(f16c_half); + result += (int)_mm_cvtss_f32(f16c_restored); + + return result; +} diff --git a/meson_cpu/x86/test_x86_v4.c b/meson_cpu/x86/test_x86_v4.c new file mode 100644 index 000000000000..d49c3a78e3b3 --- /dev/null +++ b/meson_cpu/x86/test_x86_v4.c @@ -0,0 +1,88 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX512F__) || !defined(__AVX512CD__) || !defined(__AVX512VL__) || \ + !defined(__AVX512BW__) || !defined(__AVX512DQ__) + #error HOST/ARCH does not support x86_v4 + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + // to prevent optimization + int seed = (int)argv[argc-1][0]; + volatile int result = 0; + + // AVX512F tests (Foundation) + __m512 avx512f_a = _mm512_set1_ps((float)seed); + __m512 avx512f_b = _mm512_set1_ps(2.0f); + __m512 avx512f_c = _mm512_add_ps(avx512f_a, avx512f_b); + float avx512f_result = _mm512_cvtss_f32(avx512f_c); + result += (int)avx512f_result; + + // Test AVX512F mask operations + __mmask16 k1 = _mm512_cmpeq_ps_mask(avx512f_a, avx512f_b); + __m512 masked_result = _mm512_mask_add_ps(avx512f_a, k1, avx512f_b, avx512f_c); + result += _mm512_mask2int(k1); + + // AVX512CD tests (Conflict Detection) + __m512i avx512cd_a = _mm512_set1_epi32(seed); + __m512i avx512cd_b = _mm512_conflict_epi32(avx512cd_a); + result += _mm_cvtsi128_si32(_mm512_extracti32x4_epi32(avx512cd_b, 0)); + + __m512i avx512cd_lzcnt = _mm512_lzcnt_epi32(avx512cd_a); + result += _mm_cvtsi128_si32(_mm512_extracti32x4_epi32(avx512cd_lzcnt, 0)); + + // AVX512VL tests (Vector Length Extensions - 128/256-bit vectors with AVX512 features) + __m256 avx512vl_a = _mm256_set1_ps((float)seed); + __m256 avx512vl_b = _mm256_set1_ps(2.0f); + __mmask8 k2 = _mm256_cmp_ps_mask(avx512vl_a, avx512vl_b, _CMP_EQ_OQ); + __m256 avx512vl_c = _mm256_mask_add_ps(avx512vl_a, k2, avx512vl_a, avx512vl_b); + result += (int)_mm256_cvtss_f32(avx512vl_c); + + __m128 avx512vl_sm_a = _mm_set1_ps((float)seed); + __m128 avx512vl_sm_b = _mm_set1_ps(2.0f); + __mmask8 k3 = _mm_cmp_ps_mask(avx512vl_sm_a, avx512vl_sm_b, _CMP_EQ_OQ); + __m128 avx512vl_sm_c = _mm_mask_add_ps(avx512vl_sm_a, k3, avx512vl_sm_a, avx512vl_sm_b); + result += (int)_mm_cvtss_f32(avx512vl_sm_c); + + // AVX512BW tests (Byte and Word) + __m512i avx512bw_a = _mm512_set1_epi16((short)seed); + __m512i avx512bw_b = _mm512_set1_epi16(2); + __mmask32 k4 = _mm512_cmpeq_epi16_mask(avx512bw_a, avx512bw_b); + __m512i avx512bw_c = _mm512_mask_add_epi16(avx512bw_a, k4, avx512bw_a, avx512bw_b); + result += _mm_cvtsi128_si32(_mm512_extracti32x4_epi32(avx512bw_c, 0)); + + // Test byte operations + __m512i avx512bw_bytes_a = _mm512_set1_epi8((char)seed); + __m512i avx512bw_bytes_b = _mm512_set1_epi8(2); + __mmask64 k5 = _mm512_cmpeq_epi8_mask(avx512bw_bytes_a, avx512bw_bytes_b); + result += (k5 & 1); + + // AVX512DQ tests (Doubleword and Quadword) + __m512d avx512dq_a = _mm512_set1_pd((double)seed); + __m512d avx512dq_b = _mm512_set1_pd(2.0); + __mmask8 k6 = _mm512_cmpeq_pd_mask(avx512dq_a, avx512dq_b); + __m512d avx512dq_c = _mm512_mask_add_pd(avx512dq_a, k6, avx512dq_a, avx512dq_b); + double avx512dq_result = _mm512_cvtsd_f64(avx512dq_c); + result += (int)avx512dq_result; + + // Test integer to/from floating point conversion + __m512i avx512dq_back = _mm512_cvtps_epi32(masked_result); + result += _mm_cvtsi128_si32(_mm512_extracti32x4_epi32(avx512dq_back, 0)); + + // Test 64-bit integer operations + __m512i avx512dq_i64_a = _mm512_set1_epi64(seed); + __m512i avx512dq_i64_b = _mm512_set1_epi64(2); + __m512i avx512dq_i64_c = _mm512_add_epi64(avx512dq_i64_a, avx512dq_i64_b); + result += _mm_cvtsi128_si32(_mm512_extracti32x4_epi32(avx512dq_i64_c, 0)); + + return result; +} diff --git a/meson_options.txt b/meson_options.txt deleted file mode 100644 index 844fa4f5a2e7..000000000000 --- a/meson_options.txt +++ /dev/null @@ -1,41 +0,0 @@ -option('blas', type: 'string', value: 'auto', - description: 'Option for BLAS library selection. By default, try to find any in the order given by `blas-order`') -option('lapack', type: 'string', value: 'auto', - description: 'Option for LAPACK library selection. By default, try to find any in the order given by `lapack-order`') -option('allow-noblas', type: 'boolean', value: true, - description: 'If set to true, allow building with (slow!) internal fallback routines') -option('blas-order', type: 'array', value: ['auto'], - description: 'Order of BLAS libraries to search for. E.g.: mkl,openblas,blis,blas') -option('lapack-order', type: 'array', value: ['auto'], - description: 'Order of LAPACK libraries to search for. E.g.: mkl,openblas,lapack') -option('use-ilp64', type: 'boolean', value: false, - description: 'Use ILP64 (64-bit integer) BLAS and LAPACK interfaces') -option('blas-symbol-suffix', type: 'string', value: 'auto', - description: 'BLAS and LAPACK symbol suffix to use, if any') -option('mkl-threading', type: 'string', value: 'auto', - description: 'MKL threading method, one of: `seq`, `iomp`, `gomp`, `tbb`') -option('disable-svml', type: 'boolean', value: false, - description: 'Disable building against SVML') -option('disable-highway', type: 'boolean', value: false, - description: 'Disables SIMD-optimized operations related to Google Highway') -option('disable-intel-sort', type: 'boolean', value: false, - description: 'Disables SIMD-optimized operations related to Intel x86-simd-sort') -option('disable-threading', type: 'boolean', value: false, - description: 'Disable threading support (see `NPY_ALLOW_THREADS` docs)') -option('disable-optimization', type: 'boolean', value: false, - description: 'Disable CPU optimized code (dispatch,simd,unroll...)') -option('cpu-baseline', type: 'string', value: 'min', - description: 'Minimal set of required CPU features') -option('cpu-dispatch', type: 'string', value: 'max -xop -fma4', - description: 'Dispatched set of additional CPU features') -option('test-simd', type: 'array', - value: [ - 'BASELINE', 'SSE2', 'SSE42', 'XOP', 'FMA4', - 'AVX2', 'FMA3', 'AVX2,FMA3', 'AVX512F', 'AVX512_SKX', - 'VSX', 'VSX2', 'VSX3', 'VSX4', - 'NEON', 'ASIMD', - 'VX', 'VXE', 'VXE2', - ], - description: 'Specify a list of CPU features to be tested against NumPy SIMD interface') -option('test-simd-args', type: 'string', value: '', - description: 'Extra args to be passed to the `_simd` module that is used for testing the NumPy SIMD interface') diff --git a/numpy/__config__.py.in b/numpy/__config__.py.in index ce224e49a15d..a62f531c3769 100644 --- a/numpy/__config__.py.in +++ b/numpy/__config__.py.in @@ -7,7 +7,7 @@ from numpy._core._multiarray_umath import ( __cpu_dispatch__, ) -__all__ = ["show"] +__all__ = ["show_config"] _built_with_meson = True @@ -160,3 +160,11 @@ def show(mode=DisplayModes.stdout.value): raise AttributeError( f"Invalid `mode`, use one of: {', '.join([e.value for e in DisplayModes])}" ) + + +def show_config(mode=DisplayModes.stdout.value): + return show(mode) + + +show_config.__doc__ = show.__doc__ +show_config.__module__ = "numpy" diff --git a/numpy/__config__.pyi b/numpy/__config__.pyi new file mode 100644 index 000000000000..21e8b01fdd96 --- /dev/null +++ b/numpy/__config__.pyi @@ -0,0 +1,108 @@ +from enum import Enum +from types import ModuleType +from typing import ( + Final, + Literal as L, + NotRequired, + TypedDict, + overload, + type_check_only, +) + +_CompilerConfigDictValue = TypedDict( + "_CompilerConfigDictValue", + { + "name": str, + "linker": str, + "version": str, + "commands": str, + "args": str, + "linker args": str, + }, +) +_CompilerConfigDict = TypedDict( + "_CompilerConfigDict", + { + "c": _CompilerConfigDictValue, + "cython": _CompilerConfigDictValue, + "c++": _CompilerConfigDictValue, + }, +) +_MachineInformationDict = TypedDict( + "_MachineInformationDict", + { + "host": _MachineInformationDictValue, + "build": _MachineInformationDictValue, + "cross-compiled": NotRequired[L[True]], + }, +) + +@type_check_only +class _MachineInformationDictValue(TypedDict): + cpu: str + family: str + endian: L["little", "big"] + system: str + +_BuildDependenciesDictValue = TypedDict( + "_BuildDependenciesDictValue", + { + "name": str, + "found": NotRequired[L[True]], + "version": str, + "include directory": str, + "lib directory": str, + "openblas configuration": str, + "pc file directory": str, + }, +) + +class _BuildDependenciesDict(TypedDict): + blas: _BuildDependenciesDictValue + lapack: _BuildDependenciesDictValue + +class _PythonInformationDict(TypedDict): + path: str + version: str + +_SIMDExtensionsDict = TypedDict( + "_SIMDExtensionsDict", + { + "baseline": list[str], + "found": list[str], + "not found": list[str], + }, +) + +_ConfigDict = TypedDict( + "_ConfigDict", + { + "Compilers": _CompilerConfigDict, + "Machine Information": _MachineInformationDict, + "Build Dependencies": _BuildDependenciesDict, + "Python Information": _PythonInformationDict, + "SIMD Extensions": _SIMDExtensionsDict, + }, +) + +### + +__all__ = ["show_config"] + +CONFIG: Final[_ConfigDict] = ... + +class DisplayModes(Enum): + stdout = "stdout" + dicts = "dicts" + +def _check_pyyaml() -> ModuleType: ... + +@overload +def show(mode: L["stdout"] = "stdout") -> None: ... +@overload +def show(mode: L["dicts"]) -> _ConfigDict: ... + +@overload +def show_config(mode: L["stdout"] = "stdout") -> None: ... +@overload +def show_config(mode: L["dicts"]) -> _ConfigDict: ... diff --git a/numpy/__init__.cython-30.pxd b/numpy/__init__.cython-30.pxd index 0270f0ee988f..c71898626070 100644 --- a/numpy/__init__.cython-30.pxd +++ b/numpy/__init__.cython-30.pxd @@ -51,15 +51,11 @@ cdef extern from "numpy/arrayobject.h": ctypedef signed short npy_int16 ctypedef signed int npy_int32 ctypedef signed long long npy_int64 - ctypedef signed long long npy_int96 - ctypedef signed long long npy_int128 ctypedef unsigned char npy_uint8 ctypedef unsigned short npy_uint16 ctypedef unsigned int npy_uint32 ctypedef unsigned long long npy_uint64 - ctypedef unsigned long long npy_uint96 - ctypedef unsigned long long npy_uint128 ctypedef float npy_float32 ctypedef double npy_float64 @@ -117,6 +113,7 @@ cdef extern from "numpy/arrayobject.h": NPY_OBJECT NPY_STRING NPY_UNICODE + NPY_VSTRING NPY_VOID NPY_DATETIME NPY_TIMEDELTA @@ -127,30 +124,24 @@ cdef extern from "numpy/arrayobject.h": NPY_INT16 NPY_INT32 NPY_INT64 - NPY_INT128 - NPY_INT256 NPY_UINT8 NPY_UINT16 NPY_UINT32 NPY_UINT64 - NPY_UINT128 - NPY_UINT256 NPY_FLOAT16 NPY_FLOAT32 NPY_FLOAT64 NPY_FLOAT80 NPY_FLOAT96 NPY_FLOAT128 - NPY_FLOAT256 - NPY_COMPLEX32 NPY_COMPLEX64 NPY_COMPLEX128 NPY_COMPLEX160 NPY_COMPLEX192 NPY_COMPLEX256 - NPY_COMPLEX512 NPY_INTP + NPY_UINTP NPY_DEFAULT_INT # Not a compile time constant (normally)! ctypedef enum NPY_ORDER: @@ -165,6 +156,7 @@ cdef extern from "numpy/arrayobject.h": NPY_SAFE_CASTING NPY_SAME_KIND_CASTING NPY_UNSAFE_CASTING + NPY_SAME_VALUE_CASTING ctypedef enum NPY_CLIPMODE: NPY_CLIP @@ -190,40 +182,6 @@ cdef extern from "numpy/arrayobject.h": NPY_SEARCHRIGHT enum: - # DEPRECATED since NumPy 1.7 ! Do not use in new code! - NPY_C_CONTIGUOUS - NPY_F_CONTIGUOUS - NPY_CONTIGUOUS - NPY_FORTRAN - NPY_OWNDATA - NPY_FORCECAST - NPY_ENSURECOPY - NPY_ENSUREARRAY - NPY_ELEMENTSTRIDES - NPY_ALIGNED - NPY_NOTSWAPPED - NPY_WRITEABLE - NPY_ARR_HAS_DESCR - - NPY_BEHAVED - NPY_BEHAVED_NS - NPY_CARRAY - NPY_CARRAY_RO - NPY_FARRAY - NPY_FARRAY_RO - NPY_DEFAULT - - NPY_IN_ARRAY - NPY_OUT_ARRAY - NPY_INOUT_ARRAY - NPY_IN_FARRAY - NPY_OUT_FARRAY - NPY_INOUT_FARRAY - - NPY_UPDATE_ALL - - enum: - # Added in NumPy 1.7 to replace the deprecated enums above. NPY_ARRAY_C_CONTIGUOUS NPY_ARRAY_F_CONTIGUOUS NPY_ARRAY_OWNDATA @@ -608,7 +566,6 @@ cdef extern from "numpy/arrayobject.h": # more than is probably needed until it can be checked further. int PyArray_INCREF (ndarray) except * # uses PyArray_Item_INCREF... int PyArray_XDECREF (ndarray) except * # uses PyArray_Item_DECREF... - void PyArray_SetStringFunction (object, int) dtype PyArray_DescrFromType (int) object PyArray_TypeObjectFromType (int) char * PyArray_Zero (ndarray) @@ -758,6 +715,23 @@ cdef extern from "numpy/arrayobject.h": npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) int PyArray_SetBaseObject(ndarray, base) except -1 # NOTE: steals a reference to base! Use "set_array_base()" instead. + # The memory handler functions require the NumPy 1.22 API + # and may require defining NPY_TARGET_VERSION + ctypedef struct PyDataMemAllocator: + void *ctx + void* (*malloc) (void *ctx, size_t size) + void* (*calloc) (void *ctx, size_t nelem, size_t elsize) + void* (*realloc) (void *ctx, void *ptr, size_t new_size) + void (*free) (void *ctx, void *ptr, size_t size) + + ctypedef struct PyDataMem_Handler: + char* name + npy_uint8 version + PyDataMemAllocator allocator + + object PyDataMem_SetHandler(object handler) + object PyDataMem_GetHandler() + # additional datetime related functions are defined below @@ -771,15 +745,11 @@ ctypedef npy_int8 int8_t ctypedef npy_int16 int16_t ctypedef npy_int32 int32_t ctypedef npy_int64 int64_t -#ctypedef npy_int96 int96_t -#ctypedef npy_int128 int128_t ctypedef npy_uint8 uint8_t ctypedef npy_uint16 uint16_t ctypedef npy_uint32 uint32_t ctypedef npy_uint64 uint64_t -#ctypedef npy_uint96 uint96_t -#ctypedef npy_uint128 uint128_t ctypedef npy_float32 float32_t ctypedef npy_float64 float64_t @@ -841,6 +811,14 @@ cdef extern from "numpy/ndarraytypes.h": int64_t year int32_t month, day, hour, min, sec, us, ps, as + # Iterator API added in v1.6 + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. + # https://github.com/cython/cython/issues/6720 + ctypedef int (*NpyIter_IterNextFunc "NpyIter_IterNextFunc *")(NpyIter* it) noexcept nogil + ctypedef void (*NpyIter_GetMultiIndexFunc "NpyIter_GetMultiIndexFunc *")(NpyIter* it, npy_intp* outcoords) noexcept nogil + cdef extern from "numpy/arrayscalars.h": @@ -946,10 +924,17 @@ cdef extern from "numpy/ufuncobject.h": PyUFunc_Zero PyUFunc_One PyUFunc_None + # deprecated UFUNC_FPE_DIVIDEBYZERO UFUNC_FPE_OVERFLOW UFUNC_FPE_UNDERFLOW UFUNC_FPE_INVALID + # use these instead + NPY_FPE_DIVIDEBYZERO + NPY_FPE_OVERFLOW + NPY_FPE_UNDERFLOW + NPY_FPE_INVALID + object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, void **, char *, int, int, int, int, char *, char *, int) @@ -1092,10 +1077,6 @@ cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) noexcept nogil: return (obj).obmeta.base -# Iterator API added in v1.6 -ctypedef int (*NpyIter_IterNextFunc)(NpyIter* it) noexcept nogil -ctypedef void (*NpyIter_GetMultiIndexFunc)(NpyIter* it, npy_intp* outcoords) noexcept nogil - cdef extern from "numpy/arrayobject.h": ctypedef struct NpyIter: @@ -1213,9 +1194,12 @@ cdef extern from "numpy/arrayobject.h": npy_intp* outstrides) except NPY_FAIL npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil # functions for iterating an NpyIter object - NpyIter_IterNextFunc* NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL - NpyIter_GetMultiIndexFunc* NpyIter_GetGetMultiIndex(NpyIter* it, - char** errmsg) except NULL + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. + NpyIter_IterNextFunc NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL + NpyIter_GetMultiIndexFunc NpyIter_GetGetMultiIndex(NpyIter* it, + char** errmsg) except NULL char** NpyIter_GetDataPtrArray(NpyIter* it) nogil char** NpyIter_GetInitialDataPtrArray(NpyIter* it) nogil npy_intp* NpyIter_GetIndexPtr(NpyIter* it) @@ -1224,3 +1208,35 @@ cdef extern from "numpy/arrayobject.h": void NpyIter_GetInnerFixedStrideArray(NpyIter* it, npy_intp* outstrides) nogil npy_bool NpyIter_IterationNeedsAPI(NpyIter* it) nogil void NpyIter_DebugPrint(NpyIter* it) + +# NpyString API +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct npy_string_allocator: + pass + + ctypedef struct npy_packed_static_string: + pass + + ctypedef struct npy_static_string: + size_t size + const char *buf + + ctypedef struct PyArray_StringDTypeObject: + PyArray_Descr base + PyObject *na_object + char coerce + char has_nan_na + char has_string_na + char array_owned + npy_static_string default_string + npy_static_string na_name + npy_string_allocator *allocator + +cdef extern from "numpy/arrayobject.h": + npy_string_allocator *NpyString_acquire_allocator(const PyArray_StringDTypeObject *descr) + void NpyString_acquire_allocators(size_t n_descriptors, PyArray_Descr *const descrs[], npy_string_allocator *allocators[]) + void NpyString_release_allocator(npy_string_allocator *allocator) + void NpyString_release_allocators(size_t length, npy_string_allocator *allocators[]) + int NpyString_load(npy_string_allocator *allocator, const npy_packed_static_string *packed_string, npy_static_string *unpacked_string) + int NpyString_pack_null(npy_string_allocator *allocator, npy_packed_static_string *packed_string) + int NpyString_pack(npy_string_allocator *allocator, npy_packed_static_string *packed_string, const char *buf, size_t size) diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd index aebb71fffa9c..ddb904c1fd68 100644 --- a/numpy/__init__.pxd +++ b/numpy/__init__.pxd @@ -1,1141 +1,12 @@ # NumPy static imports for Cython < 3.0 # -# If any of the PyArray_* functions are called, import_array must be -# called first. +# DO NOT USE OR REFER TO THIS HEADER # -# Author: Dag Sverre Seljebotn +# This is provided only to generate an error message on older Cython +# versions. # - -DEF _buffer_format_string_len = 255 - -cimport cpython.buffer as pybuf -from cpython.ref cimport Py_INCREF -from cpython.mem cimport PyObject_Malloc, PyObject_Free -from cpython.object cimport PyObject, PyTypeObject -from cpython.buffer cimport PyObject_GetBuffer -from cpython.type cimport type -cimport libc.stdio as stdio - - -cdef extern from *: - # Leave a marker that the NumPy declarations came from NumPy itself and not from Cython. - # See https://github.com/cython/cython/issues/3573 - """ - /* Using NumPy API declarations from "numpy/__init__.pxd" */ - """ - - -cdef extern from "Python.h": - ctypedef int Py_intptr_t - bint PyObject_TypeCheck(object obj, PyTypeObject* type) - -cdef extern from "numpy/arrayobject.h": - # It would be nice to use size_t and ssize_t, but ssize_t has special - # implicit conversion rules, so just use "long". - # Note: The actual type only matters for Cython promotion, so long - # is closer than int, but could lead to incorrect promotion. - # (Not to worrying, and always the status-quo.) - ctypedef signed long npy_intp - ctypedef unsigned long npy_uintp - - ctypedef unsigned char npy_bool - - ctypedef signed char npy_byte - ctypedef signed short npy_short - ctypedef signed int npy_int - ctypedef signed long npy_long - ctypedef signed long long npy_longlong - - ctypedef unsigned char npy_ubyte - ctypedef unsigned short npy_ushort - ctypedef unsigned int npy_uint - ctypedef unsigned long npy_ulong - ctypedef unsigned long long npy_ulonglong - - ctypedef float npy_float - ctypedef double npy_double - ctypedef long double npy_longdouble - - ctypedef signed char npy_int8 - ctypedef signed short npy_int16 - ctypedef signed int npy_int32 - ctypedef signed long long npy_int64 - ctypedef signed long long npy_int96 - ctypedef signed long long npy_int128 - - ctypedef unsigned char npy_uint8 - ctypedef unsigned short npy_uint16 - ctypedef unsigned int npy_uint32 - ctypedef unsigned long long npy_uint64 - ctypedef unsigned long long npy_uint96 - ctypedef unsigned long long npy_uint128 - - ctypedef float npy_float32 - ctypedef double npy_float64 - ctypedef long double npy_float80 - ctypedef long double npy_float96 - ctypedef long double npy_float128 - - ctypedef struct npy_cfloat: - pass - - ctypedef struct npy_cdouble: - pass - - ctypedef struct npy_clongdouble: - pass - - ctypedef struct npy_complex64: - pass - - ctypedef struct npy_complex128: - pass - - ctypedef struct npy_complex160: - pass - - ctypedef struct npy_complex192: - pass - - ctypedef struct npy_complex256: - pass - - ctypedef struct PyArray_Dims: - npy_intp *ptr - int len - - - cdef enum NPY_TYPES: - NPY_BOOL - NPY_BYTE - NPY_UBYTE - NPY_SHORT - NPY_USHORT - NPY_INT - NPY_UINT - NPY_LONG - NPY_ULONG - NPY_LONGLONG - NPY_ULONGLONG - NPY_FLOAT - NPY_DOUBLE - NPY_LONGDOUBLE - NPY_CFLOAT - NPY_CDOUBLE - NPY_CLONGDOUBLE - NPY_OBJECT - NPY_STRING - NPY_UNICODE - NPY_VOID - NPY_DATETIME - NPY_TIMEDELTA - NPY_NTYPES_LEGACY - NPY_NOTYPE - - NPY_INT8 - NPY_INT16 - NPY_INT32 - NPY_INT64 - NPY_INT128 - NPY_INT256 - NPY_UINT8 - NPY_UINT16 - NPY_UINT32 - NPY_UINT64 - NPY_UINT128 - NPY_UINT256 - NPY_FLOAT16 - NPY_FLOAT32 - NPY_FLOAT64 - NPY_FLOAT80 - NPY_FLOAT96 - NPY_FLOAT128 - NPY_FLOAT256 - NPY_COMPLEX32 - NPY_COMPLEX64 - NPY_COMPLEX128 - NPY_COMPLEX160 - NPY_COMPLEX192 - NPY_COMPLEX256 - NPY_COMPLEX512 - - NPY_INTP - NPY_DEFAULT_INT # Not a compile time constant (normally)! - - ctypedef enum NPY_ORDER: - NPY_ANYORDER - NPY_CORDER - NPY_FORTRANORDER - NPY_KEEPORDER - - ctypedef enum NPY_CASTING: - NPY_NO_CASTING - NPY_EQUIV_CASTING - NPY_SAFE_CASTING - NPY_SAME_KIND_CASTING - NPY_UNSAFE_CASTING - - ctypedef enum NPY_CLIPMODE: - NPY_CLIP - NPY_WRAP - NPY_RAISE - - ctypedef enum NPY_SCALARKIND: - NPY_NOSCALAR, - NPY_BOOL_SCALAR, - NPY_INTPOS_SCALAR, - NPY_INTNEG_SCALAR, - NPY_FLOAT_SCALAR, - NPY_COMPLEX_SCALAR, - NPY_OBJECT_SCALAR - - ctypedef enum NPY_SORTKIND: - NPY_QUICKSORT - NPY_HEAPSORT - NPY_MERGESORT - - ctypedef enum NPY_SEARCHSIDE: - NPY_SEARCHLEFT - NPY_SEARCHRIGHT - - enum: - # DEPRECATED since NumPy 1.7 ! Do not use in new code! - NPY_C_CONTIGUOUS - NPY_F_CONTIGUOUS - NPY_CONTIGUOUS - NPY_FORTRAN - NPY_OWNDATA - NPY_FORCECAST - NPY_ENSURECOPY - NPY_ENSUREARRAY - NPY_ELEMENTSTRIDES - NPY_ALIGNED - NPY_NOTSWAPPED - NPY_WRITEABLE - NPY_ARR_HAS_DESCR - - NPY_BEHAVED - NPY_BEHAVED_NS - NPY_CARRAY - NPY_CARRAY_RO - NPY_FARRAY - NPY_FARRAY_RO - NPY_DEFAULT - - NPY_IN_ARRAY - NPY_OUT_ARRAY - NPY_INOUT_ARRAY - NPY_IN_FARRAY - NPY_OUT_FARRAY - NPY_INOUT_FARRAY - - NPY_UPDATE_ALL - - enum: - # Added in NumPy 1.7 to replace the deprecated enums above. - NPY_ARRAY_C_CONTIGUOUS - NPY_ARRAY_F_CONTIGUOUS - NPY_ARRAY_OWNDATA - NPY_ARRAY_FORCECAST - NPY_ARRAY_ENSURECOPY - NPY_ARRAY_ENSUREARRAY - NPY_ARRAY_ELEMENTSTRIDES - NPY_ARRAY_ALIGNED - NPY_ARRAY_NOTSWAPPED - NPY_ARRAY_WRITEABLE - NPY_ARRAY_WRITEBACKIFCOPY - - NPY_ARRAY_BEHAVED - NPY_ARRAY_BEHAVED_NS - NPY_ARRAY_CARRAY - NPY_ARRAY_CARRAY_RO - NPY_ARRAY_FARRAY - NPY_ARRAY_FARRAY_RO - NPY_ARRAY_DEFAULT - - NPY_ARRAY_IN_ARRAY - NPY_ARRAY_OUT_ARRAY - NPY_ARRAY_INOUT_ARRAY - NPY_ARRAY_IN_FARRAY - NPY_ARRAY_OUT_FARRAY - NPY_ARRAY_INOUT_FARRAY - - NPY_ARRAY_UPDATE_ALL - - cdef enum: - NPY_MAXDIMS # 64 on NumPy 2.x and 32 on NumPy 1.x - NPY_RAVEL_AXIS # Used for functions like PyArray_Mean - - ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *) - - ctypedef struct PyArray_ArrayDescr: - # shape is a tuple, but Cython doesn't support "tuple shape" - # inside a non-PyObject declaration, so we have to declare it - # as just a PyObject*. - PyObject* shape - - ctypedef struct PyArray_Descr: - pass - - ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: - # Use PyDataType_* macros when possible, however there are no macros - # for accessing some of the fields, so some are defined. - cdef PyTypeObject* typeobj - cdef char kind - cdef char type - # Numpy sometimes mutates this without warning (e.g. it'll - # sometimes change "|" to "<" in shared dtype objects on - # little-endian machines). If this matters to you, use - # PyArray_IsNativeByteOrder(dtype.byteorder) instead of - # directly accessing this field. - cdef char byteorder - # Flags are not directly accessible on Cython <3. Use PyDataType_FLAGS. - # cdef char flags - cdef int type_num - # itemsize/elsize, alignment, fields, names, and subarray must - # use the `PyDataType_*` accessor macros. With Cython 3 you can - # still use getter attributes `dtype.itemsize` - - ctypedef class numpy.flatiter [object PyArrayIterObject, check_size ignore]: - # Use through macros - pass - - ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]: - cdef int numiter - cdef npy_intp size, index - cdef int nd - cdef npy_intp *dimensions - cdef void **iters - - ctypedef struct PyArrayObject: - # For use in situations where ndarray can't replace PyArrayObject*, - # like PyArrayObject**. - pass - - ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]: - cdef __cythonbufferdefaults__ = {"mode": "strided"} - - cdef: - # Only taking a few of the most commonly used and stable fields. - # One should use PyArray_* macros instead to access the C fields. - char *data - int ndim "nd" - npy_intp *shape "dimensions" - npy_intp *strides - dtype descr # deprecated since NumPy 1.7 ! - PyObject* base # NOT PUBLIC, DO NOT USE ! - - - int _import_array() except -1 - # A second definition so _import_array isn't marked as used when we use it here. - # Do not use - subject to change any time. - int __pyx_import_array "_import_array"() except -1 - - # - # Macros from ndarrayobject.h - # - bint PyArray_CHKFLAGS(ndarray m, int flags) nogil - bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil - bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil - bint PyArray_ISCONTIGUOUS(ndarray m) nogil - bint PyArray_ISWRITEABLE(ndarray m) nogil - bint PyArray_ISALIGNED(ndarray m) nogil - - int PyArray_NDIM(ndarray) nogil - bint PyArray_ISONESEGMENT(ndarray) nogil - bint PyArray_ISFORTRAN(ndarray) nogil - int PyArray_FORTRANIF(ndarray) nogil - - void* PyArray_DATA(ndarray) nogil - char* PyArray_BYTES(ndarray) nogil - - npy_intp* PyArray_DIMS(ndarray) nogil - npy_intp* PyArray_STRIDES(ndarray) nogil - npy_intp PyArray_DIM(ndarray, size_t) nogil - npy_intp PyArray_STRIDE(ndarray, size_t) nogil - - PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference! - PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype! - PyArray_Descr *PyArray_DTYPE(ndarray) nogil # returns borrowed reference to dtype! NP 1.7+ alias for descr. - int PyArray_FLAGS(ndarray) nogil - void PyArray_CLEARFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 - void PyArray_ENABLEFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 - npy_intp PyArray_ITEMSIZE(ndarray) nogil - int PyArray_TYPE(ndarray arr) nogil - - object PyArray_GETITEM(ndarray arr, void *itemptr) - int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) except -1 - - bint PyTypeNum_ISBOOL(int) nogil - bint PyTypeNum_ISUNSIGNED(int) nogil - bint PyTypeNum_ISSIGNED(int) nogil - bint PyTypeNum_ISINTEGER(int) nogil - bint PyTypeNum_ISFLOAT(int) nogil - bint PyTypeNum_ISNUMBER(int) nogil - bint PyTypeNum_ISSTRING(int) nogil - bint PyTypeNum_ISCOMPLEX(int) nogil - bint PyTypeNum_ISFLEXIBLE(int) nogil - bint PyTypeNum_ISUSERDEF(int) nogil - bint PyTypeNum_ISEXTENDED(int) nogil - bint PyTypeNum_ISOBJECT(int) nogil - - npy_intp PyDataType_ELSIZE(dtype) nogil - npy_intp PyDataType_ALIGNMENT(dtype) nogil - PyObject* PyDataType_METADATA(dtype) nogil - PyArray_ArrayDescr* PyDataType_SUBARRAY(dtype) nogil - PyObject* PyDataType_NAMES(dtype) nogil - PyObject* PyDataType_FIELDS(dtype) nogil - - bint PyDataType_ISBOOL(dtype) nogil - bint PyDataType_ISUNSIGNED(dtype) nogil - bint PyDataType_ISSIGNED(dtype) nogil - bint PyDataType_ISINTEGER(dtype) nogil - bint PyDataType_ISFLOAT(dtype) nogil - bint PyDataType_ISNUMBER(dtype) nogil - bint PyDataType_ISSTRING(dtype) nogil - bint PyDataType_ISCOMPLEX(dtype) nogil - bint PyDataType_ISFLEXIBLE(dtype) nogil - bint PyDataType_ISUSERDEF(dtype) nogil - bint PyDataType_ISEXTENDED(dtype) nogil - bint PyDataType_ISOBJECT(dtype) nogil - bint PyDataType_HASFIELDS(dtype) nogil - bint PyDataType_HASSUBARRAY(dtype) nogil - npy_uint64 PyDataType_FLAGS(dtype) nogil - - bint PyArray_ISBOOL(ndarray) nogil - bint PyArray_ISUNSIGNED(ndarray) nogil - bint PyArray_ISSIGNED(ndarray) nogil - bint PyArray_ISINTEGER(ndarray) nogil - bint PyArray_ISFLOAT(ndarray) nogil - bint PyArray_ISNUMBER(ndarray) nogil - bint PyArray_ISSTRING(ndarray) nogil - bint PyArray_ISCOMPLEX(ndarray) nogil - bint PyArray_ISFLEXIBLE(ndarray) nogil - bint PyArray_ISUSERDEF(ndarray) nogil - bint PyArray_ISEXTENDED(ndarray) nogil - bint PyArray_ISOBJECT(ndarray) nogil - bint PyArray_HASFIELDS(ndarray) nogil - - bint PyArray_ISVARIABLE(ndarray) nogil - - bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil - bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder - bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder - bint PyArray_ISNOTSWAPPED(ndarray) nogil - bint PyArray_ISBYTESWAPPED(ndarray) nogil - - bint PyArray_FLAGSWAP(ndarray, int) nogil - - bint PyArray_ISCARRAY(ndarray) nogil - bint PyArray_ISCARRAY_RO(ndarray) nogil - bint PyArray_ISFARRAY(ndarray) nogil - bint PyArray_ISFARRAY_RO(ndarray) nogil - bint PyArray_ISBEHAVED(ndarray) nogil - bint PyArray_ISBEHAVED_RO(ndarray) nogil - - - bint PyDataType_ISNOTSWAPPED(dtype) nogil - bint PyDataType_ISBYTESWAPPED(dtype) nogil - - bint PyArray_DescrCheck(object) - - bint PyArray_Check(object) - bint PyArray_CheckExact(object) - - # Cannot be supported due to out arg: - # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&) - # bint PyArray_HasArrayInterface(op, out) - - - bint PyArray_IsZeroDim(object) - # Cannot be supported due to ## ## in macro: - # bint PyArray_IsScalar(object, verbatim work) - bint PyArray_CheckScalar(object) - bint PyArray_IsPythonNumber(object) - bint PyArray_IsPythonScalar(object) - bint PyArray_IsAnyScalar(object) - bint PyArray_CheckAnyScalar(object) - - ndarray PyArray_GETCONTIGUOUS(ndarray) - bint PyArray_SAMESHAPE(ndarray, ndarray) nogil - npy_intp PyArray_SIZE(ndarray) nogil - npy_intp PyArray_NBYTES(ndarray) nogil - - object PyArray_FROM_O(object) - object PyArray_FROM_OF(object m, int flags) - object PyArray_FROM_OT(object m, int type) - object PyArray_FROM_OTF(object m, int type, int flags) - object PyArray_FROMANY(object m, int type, int min, int max, int flags) - object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) - object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) - void PyArray_FILLWBYTE(ndarray, int val) - object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) - unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) - bint PyArray_EquivByteorders(int b1, int b2) nogil - object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) - object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data) - #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr) - object PyArray_ToScalar(void* data, ndarray arr) - - void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil - void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil - void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil - void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil - - # Cannot be supported due to out arg - # void PyArray_DESCR_REPLACE(descr) - - - object PyArray_Copy(ndarray) - object PyArray_FromObject(object op, int type, int min_depth, int max_depth) - object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth) - object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth) - - object PyArray_Cast(ndarray mp, int type_num) - object PyArray_Take(ndarray ap, object items, int axis) - object PyArray_Put(ndarray ap, object items, object values) - - void PyArray_ITER_RESET(flatiter it) nogil - void PyArray_ITER_NEXT(flatiter it) nogil - void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil - void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil - void* PyArray_ITER_DATA(flatiter it) nogil - bint PyArray_ITER_NOTDONE(flatiter it) nogil - - void PyArray_MultiIter_RESET(broadcast multi) nogil - void PyArray_MultiIter_NEXT(broadcast multi) nogil - void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil - void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil - void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil - void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil - bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil - npy_intp PyArray_MultiIter_SIZE(broadcast multi) nogil - int PyArray_MultiIter_NDIM(broadcast multi) nogil - npy_intp PyArray_MultiIter_INDEX(broadcast multi) nogil - int PyArray_MultiIter_NUMITER(broadcast multi) nogil - npy_intp* PyArray_MultiIter_DIMS(broadcast multi) nogil - void** PyArray_MultiIter_ITERS(broadcast multi) nogil - - # Functions from __multiarray_api.h - - # Functions taking dtype and returning object/ndarray are disabled - # for now as they steal dtype references. I'm conservative and disable - # more than is probably needed until it can be checked further. - int PyArray_INCREF (ndarray) except * # uses PyArray_Item_INCREF... - int PyArray_XDECREF (ndarray) except * # uses PyArray_Item_DECREF... - void PyArray_SetStringFunction (object, int) - dtype PyArray_DescrFromType (int) - object PyArray_TypeObjectFromType (int) - char * PyArray_Zero (ndarray) - char * PyArray_One (ndarray) - #object PyArray_CastToType (ndarray, dtype, int) - int PyArray_CanCastSafely (int, int) # writes errors - npy_bool PyArray_CanCastTo (dtype, dtype) # writes errors - int PyArray_ObjectType (object, int) except 0 - dtype PyArray_DescrFromObject (object, dtype) - #ndarray* PyArray_ConvertToCommonType (object, int *) - dtype PyArray_DescrFromScalar (object) - dtype PyArray_DescrFromTypeObject (object) - npy_intp PyArray_Size (object) - #object PyArray_Scalar (void *, dtype, object) - #object PyArray_FromScalar (object, dtype) - void PyArray_ScalarAsCtype (object, void *) - #int PyArray_CastScalarToCtype (object, void *, dtype) - #int PyArray_CastScalarDirect (object, dtype, void *, int) - #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int) - #object PyArray_FromAny (object, dtype, int, int, int, object) - object PyArray_EnsureArray (object) - object PyArray_EnsureAnyArray (object) - #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *) - #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *) - #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp) - #object PyArray_FromIter (object, dtype, npy_intp) - object PyArray_Return (ndarray) - #object PyArray_GetField (ndarray, dtype, int) - #int PyArray_SetField (ndarray, dtype, int, object) except -1 - object PyArray_Byteswap (ndarray, npy_bool) - object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER) - int PyArray_CopyInto (ndarray, ndarray) except -1 - int PyArray_CopyAnyInto (ndarray, ndarray) except -1 - int PyArray_CopyObject (ndarray, object) except -1 - object PyArray_NewCopy (ndarray, NPY_ORDER) - object PyArray_ToList (ndarray) - object PyArray_ToString (ndarray, NPY_ORDER) - int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) except -1 - int PyArray_Dump (object, object, int) except -1 - object PyArray_Dumps (object, int) - int PyArray_ValidType (int) # Cannot error - void PyArray_UpdateFlags (ndarray, int) - object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object) - #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object) - #dtype PyArray_DescrNew (dtype) - dtype PyArray_DescrNewFromType (int) - double PyArray_GetPriority (object, double) # clears errors as of 1.25 - object PyArray_IterNew (object) - object PyArray_MultiIterNew (int, ...) - - int PyArray_PyIntAsInt (object) except? -1 - npy_intp PyArray_PyIntAsIntp (object) - int PyArray_Broadcast (broadcast) except -1 - int PyArray_FillWithScalar (ndarray, object) except -1 - npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *) - dtype PyArray_DescrNewByteorder (dtype, char) - object PyArray_IterAllButAxis (object, int *) - #object PyArray_CheckFromAny (object, dtype, int, int, int, object) - #object PyArray_FromArray (ndarray, dtype, int) - object PyArray_FromInterface (object) - object PyArray_FromStructInterface (object) - #object PyArray_FromArrayAttr (object, dtype, object) - #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*) - int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND) - npy_bool PyArray_CanCastScalar (type, type) - int PyArray_RemoveSmallest (broadcast) except -1 - int PyArray_ElementStrides (object) - void PyArray_Item_INCREF (char *, dtype) except * - void PyArray_Item_XDECREF (char *, dtype) except * - object PyArray_Transpose (ndarray, PyArray_Dims *) - object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE) - object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE) - object PyArray_PutMask (ndarray, object, object) - object PyArray_Repeat (ndarray, object, int) - object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE) - int PyArray_Sort (ndarray, int, NPY_SORTKIND) except -1 - object PyArray_ArgSort (ndarray, int, NPY_SORTKIND) - object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *) - object PyArray_ArgMax (ndarray, int, ndarray) - object PyArray_ArgMin (ndarray, int, ndarray) - object PyArray_Reshape (ndarray, object) - object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER) - object PyArray_Squeeze (ndarray) - #object PyArray_View (ndarray, dtype, type) - object PyArray_SwapAxes (ndarray, int, int) - object PyArray_Max (ndarray, int, ndarray) - object PyArray_Min (ndarray, int, ndarray) - object PyArray_Ptp (ndarray, int, ndarray) - object PyArray_Mean (ndarray, int, int, ndarray) - object PyArray_Trace (ndarray, int, int, int, int, ndarray) - object PyArray_Diagonal (ndarray, int, int, int) - object PyArray_Clip (ndarray, object, object, ndarray) - object PyArray_Conjugate (ndarray, ndarray) - object PyArray_Nonzero (ndarray) - object PyArray_Std (ndarray, int, int, ndarray, int) - object PyArray_Sum (ndarray, int, int, ndarray) - object PyArray_CumSum (ndarray, int, int, ndarray) - object PyArray_Prod (ndarray, int, int, ndarray) - object PyArray_CumProd (ndarray, int, int, ndarray) - object PyArray_All (ndarray, int, ndarray) - object PyArray_Any (ndarray, int, ndarray) - object PyArray_Compress (ndarray, object, int, ndarray) - object PyArray_Flatten (ndarray, NPY_ORDER) - object PyArray_Ravel (ndarray, NPY_ORDER) - npy_intp PyArray_MultiplyList (npy_intp *, int) - int PyArray_MultiplyIntList (int *, int) - void * PyArray_GetPtr (ndarray, npy_intp*) - int PyArray_CompareLists (npy_intp *, npy_intp *, int) - #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype) - int PyArray_Free (object, void *) - #int PyArray_Converter (object, object*) - int PyArray_IntpFromSequence (object, npy_intp *, int) except -1 - object PyArray_Concatenate (object, int) - object PyArray_InnerProduct (object, object) - object PyArray_MatrixProduct (object, object) - object PyArray_Correlate (object, object, int) - #int PyArray_DescrConverter (object, dtype*) except 0 - #int PyArray_DescrConverter2 (object, dtype*) except 0 - int PyArray_IntpConverter (object, PyArray_Dims *) except 0 - #int PyArray_BufferConverter (object, chunk) except 0 - int PyArray_AxisConverter (object, int *) except 0 - int PyArray_BoolConverter (object, npy_bool *) except 0 - int PyArray_ByteorderConverter (object, char *) except 0 - int PyArray_OrderConverter (object, NPY_ORDER *) except 0 - unsigned char PyArray_EquivTypes (dtype, dtype) # clears errors - #object PyArray_Zeros (int, npy_intp *, dtype, int) - #object PyArray_Empty (int, npy_intp *, dtype, int) - object PyArray_Where (object, object, object) - object PyArray_Arange (double, double, double, int) - #object PyArray_ArangeObj (object, object, object, dtype) - int PyArray_SortkindConverter (object, NPY_SORTKIND *) except 0 - object PyArray_LexSort (object, int) - object PyArray_Round (ndarray, int, ndarray) - unsigned char PyArray_EquivTypenums (int, int) - int PyArray_RegisterDataType (dtype) except -1 - int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) except -1 - int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) except -1 - #void PyArray_InitArrFuncs (PyArray_ArrFuncs *) - object PyArray_IntTupleFromIntp (int, npy_intp *) - int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) except 0 - #int PyArray_OutputConverter (object, ndarray*) except 0 - object PyArray_BroadcastToShape (object, npy_intp *, int) - #int PyArray_DescrAlignConverter (object, dtype*) except 0 - #int PyArray_DescrAlignConverter2 (object, dtype*) except 0 - int PyArray_SearchsideConverter (object, void *) except 0 - object PyArray_CheckAxis (ndarray, int *, int) - npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) - int PyArray_SetBaseObject(ndarray, base) except -1 # NOTE: steals a reference to base! Use "set_array_base()" instead. - - # additional datetime related functions are defined below - - -# Typedefs that matches the runtime dtype objects in -# the numpy module. - -# The ones that are commented out needs an IFDEF function -# in Cython to enable them only on the right systems. - -ctypedef npy_int8 int8_t -ctypedef npy_int16 int16_t -ctypedef npy_int32 int32_t -ctypedef npy_int64 int64_t -#ctypedef npy_int96 int96_t -#ctypedef npy_int128 int128_t - -ctypedef npy_uint8 uint8_t -ctypedef npy_uint16 uint16_t -ctypedef npy_uint32 uint32_t -ctypedef npy_uint64 uint64_t -#ctypedef npy_uint96 uint96_t -#ctypedef npy_uint128 uint128_t - -ctypedef npy_float32 float32_t -ctypedef npy_float64 float64_t -#ctypedef npy_float80 float80_t -#ctypedef npy_float128 float128_t - -ctypedef float complex complex64_t -ctypedef double complex complex128_t - -ctypedef npy_longlong longlong_t -ctypedef npy_ulonglong ulonglong_t - -ctypedef npy_intp intp_t -ctypedef npy_uintp uintp_t - -ctypedef npy_double float_t -ctypedef npy_double double_t -ctypedef npy_longdouble longdouble_t - -ctypedef float complex cfloat_t -ctypedef double complex cdouble_t -ctypedef double complex complex_t -ctypedef long double complex clongdouble_t - -cdef inline object PyArray_MultiIterNew1(a): - return PyArray_MultiIterNew(1, a) - -cdef inline object PyArray_MultiIterNew2(a, b): - return PyArray_MultiIterNew(2, a, b) - -cdef inline object PyArray_MultiIterNew3(a, b, c): - return PyArray_MultiIterNew(3, a, b, c) - -cdef inline object PyArray_MultiIterNew4(a, b, c, d): - return PyArray_MultiIterNew(4, a, b, c, d) - -cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): - return PyArray_MultiIterNew(5, a, b, c, d, e) - -cdef inline tuple PyDataType_SHAPE(dtype d): - if PyDataType_HASSUBARRAY(d): - return d.subarray.shape - else: - return () - - -cdef extern from "numpy/ndarrayobject.h": - PyTypeObject PyTimedeltaArrType_Type - PyTypeObject PyDatetimeArrType_Type - ctypedef int64_t npy_timedelta - ctypedef int64_t npy_datetime - -cdef extern from "numpy/ndarraytypes.h": - ctypedef struct PyArray_DatetimeMetaData: - NPY_DATETIMEUNIT base - int64_t num - - ctypedef struct npy_datetimestruct: - int64_t year - int32_t month, day, hour, min, sec, us, ps, as - - -cdef extern from "numpy/arrayscalars.h": - - # abstract types - ctypedef class numpy.generic [object PyObject]: - pass - ctypedef class numpy.number [object PyObject]: - pass - ctypedef class numpy.integer [object PyObject]: - pass - ctypedef class numpy.signedinteger [object PyObject]: - pass - ctypedef class numpy.unsignedinteger [object PyObject]: - pass - ctypedef class numpy.inexact [object PyObject]: - pass - ctypedef class numpy.floating [object PyObject]: - pass - ctypedef class numpy.complexfloating [object PyObject]: - pass - ctypedef class numpy.flexible [object PyObject]: - pass - ctypedef class numpy.character [object PyObject]: - pass - - ctypedef struct PyDatetimeScalarObject: - # PyObject_HEAD - npy_datetime obval - PyArray_DatetimeMetaData obmeta - - ctypedef struct PyTimedeltaScalarObject: - # PyObject_HEAD - npy_timedelta obval - PyArray_DatetimeMetaData obmeta - - ctypedef enum NPY_DATETIMEUNIT: - NPY_FR_Y - NPY_FR_M - NPY_FR_W - NPY_FR_D - NPY_FR_B - NPY_FR_h - NPY_FR_m - NPY_FR_s - NPY_FR_ms - NPY_FR_us - NPY_FR_ns - NPY_FR_ps - NPY_FR_fs - NPY_FR_as - NPY_FR_GENERIC - - -cdef extern from "numpy/arrayobject.h": - # These are part of the C-API defined in `__multiarray_api.h` - - # NumPy internal definitions in datetime_strings.c: - int get_datetime_iso_8601_strlen "NpyDatetime_GetDatetimeISO8601StrLen" ( - int local, NPY_DATETIMEUNIT base) - int make_iso_8601_datetime "NpyDatetime_MakeISO8601Datetime" ( - npy_datetimestruct *dts, char *outstr, npy_intp outlen, - int local, int utc, NPY_DATETIMEUNIT base, int tzoffset, - NPY_CASTING casting) except -1 - - # NumPy internal definition in datetime.c: - # May return 1 to indicate that object does not appear to be a datetime - # (returns 0 on success). - int convert_pydatetime_to_datetimestruct "NpyDatetime_ConvertPyDateTimeToDatetimeStruct" ( - PyObject *obj, npy_datetimestruct *out, - NPY_DATETIMEUNIT *out_bestunit, int apply_tzinfo) except -1 - int convert_datetime64_to_datetimestruct "NpyDatetime_ConvertDatetime64ToDatetimeStruct" ( - PyArray_DatetimeMetaData *meta, npy_datetime dt, - npy_datetimestruct *out) except -1 - int convert_datetimestruct_to_datetime64 "NpyDatetime_ConvertDatetimeStructToDatetime64"( - PyArray_DatetimeMetaData *meta, const npy_datetimestruct *dts, - npy_datetime *out) except -1 - - +# See __init__.cython-30.pxd for the real Cython header # -# ufunc API -# - -cdef extern from "numpy/ufuncobject.h": - - ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *) - - ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]: - cdef: - int nin, nout, nargs - int identity - PyUFuncGenericFunction *functions - void **data - int ntypes - int check_return - char *name - char *types - char *doc - void *ptr - PyObject *obj - PyObject *userloops - - cdef enum: - PyUFunc_Zero - PyUFunc_One - PyUFunc_None - UFUNC_FPE_DIVIDEBYZERO - UFUNC_FPE_OVERFLOW - UFUNC_FPE_UNDERFLOW - UFUNC_FPE_INVALID - - object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, - void **, char *, int, int, int, int, char *, char *, int) - int PyUFunc_RegisterLoopForType(ufunc, int, - PyUFuncGenericFunction, int *, void *) except -1 - void PyUFunc_f_f_As_d_d \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_d_d \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_f_f \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_g_g \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_F_F_As_D_D \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_F_F \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_D_D \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_G_G \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_O_O \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_ff_f_As_dd_d \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_ff_f \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_dd_d \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_gg_g \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_FF_F_As_DD_D \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_DD_D \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_FF_F \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_GG_G \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_OO_O \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_O_O_method \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_OO_O_method \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_On_Om \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_clearfperr() - int PyUFunc_getfperr() - int PyUFunc_ReplaceLoopBySignature \ - (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *) - object PyUFunc_FromFuncAndDataAndSignature \ - (PyUFuncGenericFunction *, void **, char *, int, int, int, - int, char *, char *, int, char *) - - int _import_umath() except -1 - -cdef inline void set_array_base(ndarray arr, object base): - Py_INCREF(base) # important to do this before stealing the reference below! - PyArray_SetBaseObject(arr, base) - -cdef inline object get_array_base(ndarray arr): - base = PyArray_BASE(arr) - if base is NULL: - return None - return base - -# Versions of the import_* functions which are more suitable for -# Cython code. -cdef inline int import_array() except -1: - try: - __pyx_import_array() - except Exception: - raise ImportError("numpy._core.multiarray failed to import") - -cdef inline int import_umath() except -1: - try: - _import_umath() - except Exception: - raise ImportError("numpy._core.umath failed to import") - -cdef inline int import_ufunc() except -1: - try: - _import_umath() - except Exception: - raise ImportError("numpy._core.umath failed to import") - - -cdef inline bint is_timedelta64_object(object obj): - """ - Cython equivalent of `isinstance(obj, np.timedelta64)` - - Parameters - ---------- - obj : object - - Returns - ------- - bool - """ - return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) - - -cdef inline bint is_datetime64_object(object obj): - """ - Cython equivalent of `isinstance(obj, np.datetime64)` - - Parameters - ---------- - obj : object - - Returns - ------- - bool - """ - return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) - - -cdef inline npy_datetime get_datetime64_value(object obj) nogil: - """ - returns the int64 value underlying scalar numpy datetime64 object - - Note that to interpret this as a datetime, the corresponding unit is - also needed. That can be found using `get_datetime64_unit`. - """ - return (obj).obval - - -cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: - """ - returns the int64 value underlying scalar numpy timedelta64 object - """ - return (obj).obval - - -cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: - """ - returns the unit part of the dtype for a numpy datetime64 object. - """ - return (obj).obmeta.base - - -# Iterator API added in v1.6 -ctypedef int (*NpyIter_IterNextFunc)(NpyIter* it) noexcept nogil -ctypedef void (*NpyIter_GetMultiIndexFunc)(NpyIter* it, npy_intp* outcoords) noexcept nogil - -cdef extern from "numpy/arrayobject.h": - - ctypedef struct NpyIter: - pass - - cdef enum: - NPY_FAIL - NPY_SUCCEED - - cdef enum: - # Track an index representing C order - NPY_ITER_C_INDEX - # Track an index representing Fortran order - NPY_ITER_F_INDEX - # Track a multi-index - NPY_ITER_MULTI_INDEX - # User code external to the iterator does the 1-dimensional innermost loop - NPY_ITER_EXTERNAL_LOOP - # Convert all the operands to a common data type - NPY_ITER_COMMON_DTYPE - # Operands may hold references, requiring API access during iteration - NPY_ITER_REFS_OK - # Zero-sized operands should be permitted, iteration checks IterSize for 0 - NPY_ITER_ZEROSIZE_OK - # Permits reductions (size-0 stride with dimension size > 1) - NPY_ITER_REDUCE_OK - # Enables sub-range iteration - NPY_ITER_RANGED - # Enables buffering - NPY_ITER_BUFFERED - # When buffering is enabled, grows the inner loop if possible - NPY_ITER_GROWINNER - # Delay allocation of buffers until first Reset* call - NPY_ITER_DELAY_BUFALLOC - # When NPY_KEEPORDER is specified, disable reversing negative-stride axes - NPY_ITER_DONT_NEGATE_STRIDES - NPY_ITER_COPY_IF_OVERLAP - # The operand will be read from and written to - NPY_ITER_READWRITE - # The operand will only be read from - NPY_ITER_READONLY - # The operand will only be written to - NPY_ITER_WRITEONLY - # The operand's data must be in native byte order - NPY_ITER_NBO - # The operand's data must be aligned - NPY_ITER_ALIGNED - # The operand's data must be contiguous (within the inner loop) - NPY_ITER_CONTIG - # The operand may be copied to satisfy requirements - NPY_ITER_COPY - # The operand may be copied with WRITEBACKIFCOPY to satisfy requirements - NPY_ITER_UPDATEIFCOPY - # Allocate the operand if it is NULL - NPY_ITER_ALLOCATE - # If an operand is allocated, don't use any subtype - NPY_ITER_NO_SUBTYPE - # This is a virtual array slot, operand is NULL but temporary data is there - NPY_ITER_VIRTUAL - # Require that the dimension match the iterator dimensions exactly - NPY_ITER_NO_BROADCAST - # A mask is being used on this array, affects buffer -> array copy - NPY_ITER_WRITEMASKED - # This array is the mask for all WRITEMASKED operands - NPY_ITER_ARRAYMASK - # Assume iterator order data access for COPY_IF_OVERLAP - NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE - # construction and destruction functions - NpyIter* NpyIter_New(ndarray arr, npy_uint32 flags, NPY_ORDER order, - NPY_CASTING casting, dtype datatype) except NULL - NpyIter* NpyIter_MultiNew(npy_intp nop, PyArrayObject** op, npy_uint32 flags, - NPY_ORDER order, NPY_CASTING casting, npy_uint32* - op_flags, PyArray_Descr** op_dtypes) except NULL - NpyIter* NpyIter_AdvancedNew(npy_intp nop, PyArrayObject** op, - npy_uint32 flags, NPY_ORDER order, - NPY_CASTING casting, npy_uint32* op_flags, - PyArray_Descr** op_dtypes, int oa_ndim, - int** op_axes, const npy_intp* itershape, - npy_intp buffersize) except NULL - NpyIter* NpyIter_Copy(NpyIter* it) except NULL - int NpyIter_RemoveAxis(NpyIter* it, int axis) except NPY_FAIL - int NpyIter_RemoveMultiIndex(NpyIter* it) except NPY_FAIL - int NpyIter_EnableExternalLoop(NpyIter* it) except NPY_FAIL - int NpyIter_Deallocate(NpyIter* it) except NPY_FAIL - int NpyIter_Reset(NpyIter* it, char** errmsg) except NPY_FAIL - int NpyIter_ResetToIterIndexRange(NpyIter* it, npy_intp istart, - npy_intp iend, char** errmsg) except NPY_FAIL - int NpyIter_ResetBasePointers(NpyIter* it, char** baseptrs, char** errmsg) except NPY_FAIL - int NpyIter_GotoMultiIndex(NpyIter* it, const npy_intp* multi_index) except NPY_FAIL - int NpyIter_GotoIndex(NpyIter* it, npy_intp index) except NPY_FAIL - npy_intp NpyIter_GetIterSize(NpyIter* it) nogil - npy_intp NpyIter_GetIterIndex(NpyIter* it) nogil - void NpyIter_GetIterIndexRange(NpyIter* it, npy_intp* istart, - npy_intp* iend) nogil - int NpyIter_GotoIterIndex(NpyIter* it, npy_intp iterindex) except NPY_FAIL - npy_bool NpyIter_HasDelayedBufAlloc(NpyIter* it) nogil - npy_bool NpyIter_HasExternalLoop(NpyIter* it) nogil - npy_bool NpyIter_HasMultiIndex(NpyIter* it) nogil - npy_bool NpyIter_HasIndex(NpyIter* it) nogil - npy_bool NpyIter_RequiresBuffering(NpyIter* it) nogil - npy_bool NpyIter_IsBuffered(NpyIter* it) nogil - npy_bool NpyIter_IsGrowInner(NpyIter* it) nogil - npy_intp NpyIter_GetBufferSize(NpyIter* it) nogil - int NpyIter_GetNDim(NpyIter* it) nogil - int NpyIter_GetNOp(NpyIter* it) nogil - npy_intp* NpyIter_GetAxisStrideArray(NpyIter* it, int axis) except NULL - int NpyIter_GetShape(NpyIter* it, npy_intp* outshape) nogil - PyArray_Descr** NpyIter_GetDescrArray(NpyIter* it) - PyArrayObject** NpyIter_GetOperandArray(NpyIter* it) - ndarray NpyIter_GetIterView(NpyIter* it, npy_intp i) - void NpyIter_GetReadFlags(NpyIter* it, char* outreadflags) - void NpyIter_GetWriteFlags(NpyIter* it, char* outwriteflags) - int NpyIter_CreateCompatibleStrides(NpyIter* it, npy_intp itemsize, - npy_intp* outstrides) except NPY_FAIL - npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil - # functions for iterating an NpyIter object - NpyIter_IterNextFunc* NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL - NpyIter_GetMultiIndexFunc* NpyIter_GetGetMultiIndex(NpyIter* it, - char** errmsg) except NULL - char** NpyIter_GetDataPtrArray(NpyIter* it) nogil - char** NpyIter_GetInitialDataPtrArray(NpyIter* it) nogil - npy_intp* NpyIter_GetIndexPtr(NpyIter* it) - npy_intp* NpyIter_GetInnerStrideArray(NpyIter* it) nogil - npy_intp* NpyIter_GetInnerLoopSizePtr(NpyIter* it) nogil - void NpyIter_GetInnerFixedStrideArray(NpyIter* it, npy_intp* outstrides) nogil - npy_bool NpyIter_IterationNeedsAPI(NpyIter* it) nogil - void NpyIter_DebugPrint(NpyIter* it) +# intentionally created compiler error that only triggers on Cython < 3.0.0 +DEF err = int('Build aborted: the NumPy Cython headers require Cython 3.0.0 or newer.') diff --git a/numpy/__init__.py b/numpy/__init__.py index 0d0e09ceb716..3f2652a306fd 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -52,9 +52,6 @@ Polynomial tools testing NumPy testing tools -distutils - Enhancements to distutils with support for - Fortran compilers support and more (for Python <= 3.11) Utilities --------- @@ -89,18 +86,16 @@ import sys import warnings -from ._globals import _NoValue, _CopyMode -from ._expired_attrs_2_0 import __expired_attributes__ - - # If a version with git hash was stored, use that instead from . import version +from ._expired_attrs_2_0 import __expired_attributes__ +from ._globals import _CopyMode, _NoValue from .version import __version__ # We first need to detect if we're being called as part of the numpy setup # procedure itself in a reliable manner. try: - __NUMPY_SETUP__ + __NUMPY_SETUP__ # noqa: B018 except NameError: __NUMPY_SETUP__ = False @@ -111,65 +106,343 @@ from . import _distributor_init try: - from numpy.__config__ import show as show_config + from numpy.__config__ import show_config except ImportError as e: - msg = """Error importing numpy: you should not try to import numpy from - its source directory; please exit the numpy source tree, and relaunch - your python interpreter from there.""" - raise ImportError(msg) from e + if isinstance(e, ModuleNotFoundError) and e.name == "numpy.__config__": + # The __config__ module itself was not found, so add this info: + msg = """Error importing numpy: you should not try to import numpy from + its source directory; please exit the numpy source tree, and relaunch + your python interpreter from there.""" + raise ImportError(msg) from e + raise from . import _core from ._core import ( - False_, ScalarType, True_, _get_promotion_state, _no_nep50_warning, - _set_promotion_state, abs, absolute, acos, acosh, add, all, allclose, - amax, amin, any, arange, arccos, arccosh, arcsin, arcsinh, - arctan, arctan2, arctanh, argmax, argmin, argpartition, argsort, - argwhere, around, array, array2string, array_equal, array_equiv, - array_repr, array_str, asanyarray, asarray, ascontiguousarray, - asfortranarray, asin, asinh, atan, atanh, atan2, astype, atleast_1d, - atleast_2d, atleast_3d, base_repr, binary_repr, bitwise_and, - bitwise_count, bitwise_invert, bitwise_left_shift, bitwise_not, - bitwise_or, bitwise_right_shift, bitwise_xor, block, bool, bool_, - broadcast, busday_count, busday_offset, busdaycalendar, byte, bytes_, - can_cast, cbrt, cdouble, ceil, character, choose, clip, clongdouble, - complex128, complex64, complexfloating, compress, concat, concatenate, - conj, conjugate, convolve, copysign, copyto, correlate, cos, cosh, - count_nonzero, cross, csingle, cumprod, cumsum, - datetime64, datetime_as_string, datetime_data, deg2rad, degrees, - diagonal, divide, divmod, dot, double, dtype, e, einsum, einsum_path, - empty, empty_like, equal, errstate, euler_gamma, exp, exp2, expm1, - fabs, finfo, flatiter, flatnonzero, flexible, float16, float32, - float64, float_power, floating, floor, floor_divide, fmax, fmin, fmod, - format_float_positional, format_float_scientific, frexp, from_dlpack, - frombuffer, fromfile, fromfunction, fromiter, frompyfunc, fromstring, - full, full_like, gcd, generic, geomspace, get_printoptions, - getbufsize, geterr, geterrcall, greater, greater_equal, half, - heaviside, hstack, hypot, identity, iinfo, iinfo, indices, inexact, - inf, inner, int16, int32, int64, int8, int_, intc, integer, intp, - invert, is_busday, isclose, isdtype, isfinite, isfortran, isinf, - isnan, isnat, isscalar, issubdtype, lcm, ldexp, left_shift, less, - less_equal, lexsort, linspace, little_endian, log, log10, log1p, log2, - logaddexp, logaddexp2, logical_and, logical_not, logical_or, - logical_xor, logspace, long, longdouble, longlong, matmul, - matrix_transpose, max, maximum, may_share_memory, mean, memmap, min, - min_scalar_type, minimum, mod, modf, moveaxis, multiply, nan, ndarray, - ndim, nditer, negative, nested_iters, newaxis, nextafter, nonzero, - not_equal, number, object_, ones, ones_like, outer, partition, - permute_dims, pi, positive, pow, power, printoptions, prod, - promote_types, ptp, put, putmask, rad2deg, radians, ravel, recarray, - reciprocal, record, remainder, repeat, require, reshape, resize, - result_type, right_shift, rint, roll, rollaxis, round, sctypeDict, - searchsorted, set_printoptions, setbufsize, seterr, seterrcall, shape, - shares_memory, short, sign, signbit, signedinteger, sin, single, sinh, - size, sort, spacing, sqrt, square, squeeze, stack, std, - str_, subtract, sum, swapaxes, take, tan, tanh, tensordot, - timedelta64, trace, transpose, true_divide, trunc, typecodes, ubyte, - ufunc, uint, uint16, uint32, uint64, uint8, uintc, uintp, ulong, - ulonglong, unsignedinteger, ushort, var, vdot, vecdot, void, vstack, - where, zeros, zeros_like + False_, + ScalarType, + True_, + abs, + absolute, + acos, + acosh, + add, + all, + allclose, + amax, + amin, + any, + arange, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + argmax, + argmin, + argpartition, + argsort, + argwhere, + around, + array, + array2string, + array_equal, + array_equiv, + array_repr, + array_str, + asanyarray, + asarray, + ascontiguousarray, + asfortranarray, + asin, + asinh, + astype, + atan, + atan2, + atanh, + atleast_1d, + atleast_2d, + atleast_3d, + base_repr, + binary_repr, + bitwise_and, + bitwise_count, + bitwise_invert, + bitwise_left_shift, + bitwise_not, + bitwise_or, + bitwise_right_shift, + bitwise_xor, + block, + bool, + bool_, + broadcast, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + can_cast, + cbrt, + cdouble, + ceil, + character, + choose, + clip, + clongdouble, + complex64, + complex128, + complexfloating, + compress, + concat, + concatenate, + conj, + conjugate, + convolve, + copysign, + copyto, + correlate, + cos, + cosh, + count_nonzero, + cross, + csingle, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + datetime64, + datetime_as_string, + datetime_data, + deg2rad, + degrees, + diagonal, + divide, + divmod, + dot, + double, + dtype, + e, + einsum, + einsum_path, + empty, + empty_like, + equal, + errstate, + euler_gamma, + exp, + exp2, + expm1, + fabs, + finfo, + flatiter, + flatnonzero, + flexible, + float16, + float32, + float64, + float_power, + floating, + floor, + floor_divide, + fmax, + fmin, + fmod, + format_float_positional, + format_float_scientific, + frexp, + from_dlpack, + frombuffer, + fromfile, + fromfunction, + fromiter, + frompyfunc, + fromstring, + full, + full_like, + gcd, + generic, + geomspace, + get_printoptions, + getbufsize, + geterr, + geterrcall, + greater, + greater_equal, + half, + heaviside, + hstack, + hypot, + identity, + iinfo, + indices, + inexact, + inf, + inner, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + invert, + is_busday, + isclose, + isdtype, + isfinite, + isfortran, + isinf, + isnan, + isnat, + isscalar, + issubdtype, + lcm, + ldexp, + left_shift, + less, + less_equal, + lexsort, + linspace, + little_endian, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + logspace, + long, + longdouble, + longlong, + matmul, + matrix_transpose, + matvec, + max, + maximum, + may_share_memory, + mean, + memmap, + min, + min_scalar_type, + minimum, + mod, + modf, + moveaxis, + multiply, + nan, + ndarray, + ndim, + nditer, + negative, + nested_iters, + newaxis, + nextafter, + nonzero, + not_equal, + number, + object_, + ones, + ones_like, + outer, + partition, + permute_dims, + pi, + positive, + pow, + power, + printoptions, + prod, + promote_types, + ptp, + put, + putmask, + rad2deg, + radians, + ravel, + recarray, + reciprocal, + record, + remainder, + repeat, + require, + reshape, + resize, + result_type, + right_shift, + rint, + roll, + rollaxis, + round, + sctypeDict, + searchsorted, + set_printoptions, + setbufsize, + seterr, + seterrcall, + shape, + shares_memory, + short, + sign, + signbit, + signedinteger, + sin, + single, + sinh, + size, + sort, + spacing, + sqrt, + square, + squeeze, + stack, + std, + str_, + subtract, + sum, + swapaxes, + take, + tan, + tanh, + tensordot, + timedelta64, + trace, + transpose, + true_divide, + trunc, + typecodes, + ubyte, + ufunc, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + unstack, + ushort, + var, + vdot, + vecdot, + vecmat, + void, + vstack, + where, + zeros, + zeros_like, ) - # NOTE: It's still under discussion whether these aliases + # NOTE: It's still under discussion whether these aliases # should be removed. for ta in ["float96", "float128", "complex192", "complex256"]: try: @@ -178,75 +451,180 @@ pass del ta - from . import lib + from . import lib, matrixlib as _mat from .lib import scimath as emath - from .lib._histograms_impl import ( - histogram, histogram_bin_edges, histogramdd - ) - from .lib._nanfunctions_impl import ( - nanargmax, nanargmin, nancumprod, nancumsum, nanmax, nanmean, - nanmedian, nanmin, nanpercentile, nanprod, nanquantile, nanstd, - nansum, nanvar + from .lib._arraypad_impl import pad + from .lib._arraysetops_impl import ( + ediff1d, + intersect1d, + isin, + setdiff1d, + setxor1d, + union1d, + unique, + unique_all, + unique_counts, + unique_inverse, + unique_values, ) from .lib._function_base_impl import ( - select, piecewise, trim_zeros, copy, iterable, percentile, diff, - gradient, angle, unwrap, sort_complex, flip, rot90, extract, place, - vectorize, asarray_chkfinite, average, bincount, digitize, cov, - corrcoef, median, sinc, hamming, hanning, bartlett, blackman, - kaiser, trapezoid, trapz, i0, meshgrid, delete, insert, append, - interp, quantile + angle, + append, + asarray_chkfinite, + average, + bartlett, + bincount, + blackman, + copy, + corrcoef, + cov, + delete, + diff, + digitize, + extract, + flip, + gradient, + hamming, + hanning, + i0, + insert, + interp, + iterable, + kaiser, + median, + meshgrid, + percentile, + piecewise, + place, + quantile, + rot90, + select, + sinc, + sort_complex, + trapezoid, + trim_zeros, + unwrap, + vectorize, ) - from .lib._twodim_base_impl import ( - diag, diagflat, eye, fliplr, flipud, tri, triu, tril, vander, - histogram2d, mask_indices, tril_indices, tril_indices_from, - triu_indices, triu_indices_from + from .lib._histograms_impl import histogram, histogram_bin_edges, histogramdd + from .lib._index_tricks_impl import ( + c_, + diag_indices, + diag_indices_from, + fill_diagonal, + index_exp, + ix_, + mgrid, + ndenumerate, + ndindex, + ogrid, + r_, + ravel_multi_index, + s_, + unravel_index, ) - from .lib._shape_base_impl import ( - apply_over_axes, apply_along_axis, array_split, column_stack, dsplit, - dstack, expand_dims, hsplit, kron, put_along_axis, row_stack, split, - take_along_axis, tile, vsplit + from .lib._nanfunctions_impl import ( + nanargmax, + nanargmin, + nancumprod, + nancumsum, + nanmax, + nanmean, + nanmedian, + nanmin, + nanpercentile, + nanprod, + nanquantile, + nanstd, + nansum, + nanvar, ) - from .lib._type_check_impl import ( - iscomplexobj, isrealobj, imag, iscomplex, isreal, nan_to_num, real, - real_if_close, typename, mintypecode, common_type + from .lib._npyio_impl import ( + fromregex, + genfromtxt, + load, + loadtxt, + packbits, + save, + savetxt, + savez, + savez_compressed, + unpackbits, ) - from .lib._arraysetops_impl import ( - ediff1d, in1d, intersect1d, isin, setdiff1d, setxor1d, union1d, - unique, unique_all, unique_counts, unique_inverse, unique_values + from .lib._polynomial_impl import ( + poly, + poly1d, + polyadd, + polyder, + polydiv, + polyfit, + polyint, + polymul, + polysub, + polyval, + roots, ) - from .lib._ufunclike_impl import fix, isneginf, isposinf - from .lib._arraypad_impl import pad - from .lib._utils_impl import ( - show_runtime, get_include, info + from .lib._shape_base_impl import ( + apply_along_axis, + apply_over_axes, + array_split, + column_stack, + dsplit, + dstack, + expand_dims, + hsplit, + kron, + put_along_axis, + split, + take_along_axis, + tile, + vsplit, ) from .lib._stride_tricks_impl import ( - broadcast_arrays, broadcast_shapes, broadcast_to + broadcast_arrays, + broadcast_shapes, + broadcast_to, ) - from .lib._polynomial_impl import ( - poly, polyint, polyder, polyadd, polysub, polymul, polydiv, polyval, - polyfit, poly1d, roots - ) - from .lib._npyio_impl import ( - savetxt, loadtxt, genfromtxt, load, save, savez, packbits, - savez_compressed, unpackbits, fromregex - ) - from .lib._index_tricks_impl import ( - diag_indices_from, diag_indices, fill_diagonal, ndindex, ndenumerate, - ix_, c_, r_, s_, ogrid, mgrid, unravel_index, ravel_multi_index, - index_exp + from .lib._twodim_base_impl import ( + diag, + diagflat, + eye, + fliplr, + flipud, + histogram2d, + mask_indices, + tri, + tril, + tril_indices, + tril_indices_from, + triu, + triu_indices, + triu_indices_from, + vander, ) - - from . import matrixlib as _mat - from .matrixlib import ( - asmatrix, bmat, matrix + from .lib._type_check_impl import ( + common_type, + imag, + iscomplex, + iscomplexobj, + isreal, + isrealobj, + mintypecode, + nan_to_num, + real, + real_if_close, + typename, ) + from .lib._ufunclike_impl import fix, isneginf, isposinf + from .lib._utils_impl import get_include, info, show_runtime + from .matrixlib import asmatrix, bmat, matrix # public submodules are imported lazily, therefore are accessible from - # __getattr__. Note that `distutils` (deprecated) and `array_api` - # (experimental label) are not added here, because `from numpy import *` + # __getattr__. Note that `array_api` + # (experimental label) is not added here, because `from numpy import *` # must not raise any warnings - that's too disruptive. __numpy_submodules__ = { - "linalg", "fft", "dtypes", "random", "polynomial", "ma", + "linalg", "fft", "dtypes", "random", "polynomial", "ma", "exceptions", "lib", "ctypeslib", "testing", "typing", "f2py", "test", "rec", "char", "core", "strings", } @@ -282,7 +660,6 @@ for n, extended_msg in _type_info } - # Some of these could be defined right away, but most were aliases to # the Python objects and only removed in NumPy 1.24. Defining them should # probably wait for NumPy 1.26 or 2.0. @@ -290,13 +667,10 @@ # import with `from numpy import *`. __future_scalars__ = {"str", "bytes", "object"} - __array_api_version__ = "2022.12" + __array_api_version__ = "2024.12" from ._array_api_info import __array_namespace_info__ - # now that numpy core module is imported, can initialize limits - _core.getlimits._register_known_types() - __all__ = list( __numpy_submodules__ | set(_core.__all__) | @@ -369,23 +743,12 @@ def __getattr__(attr): elif attr == "char": import numpy.char as char return char - elif attr == "array_api": - raise AttributeError("`numpy.array_api` is not available from " - "numpy 2.0 onwards") elif attr == "core": import numpy.core as core return core elif attr == "strings": import numpy.strings as strings return strings - elif attr == "distutils": - if 'distutils' in __numpy_submodules__: - import numpy.distutils as distutils - return distutils - else: - raise AttributeError("`numpy.distutils` is not available from " - "Python 3.12 onwards") - if attr in __future_scalars__: # And future warnings for those that will change, but also give # the AttributeError @@ -394,32 +757,24 @@ def __getattr__(attr): "corresponding NumPy scalar.", FutureWarning, stacklevel=2) if attr in __former_attrs__: - raise AttributeError(__former_attrs__[attr]) - + raise AttributeError(__former_attrs__[attr], name=None) + if attr in __expired_attributes__: raise AttributeError( f"`np.{attr}` was removed in the NumPy 2.0 release. " - f"{__expired_attributes__[attr]}" + f"{__expired_attributes__[attr]}", + name=None ) - if attr == "chararray": - warnings.warn( - "`np.chararray` is deprecated and will be removed from " - "the main namespace in the future. Use an array with a string " - "or bytes dtype instead.", DeprecationWarning, stacklevel=2) - import numpy.char as char - return char.chararray - - raise AttributeError("module {!r} has no attribute " - "{!r}".format(__name__, attr)) + raise AttributeError(f"module {__name__!r} has no attribute {attr!r}") def __dir__(): public_symbols = ( globals().keys() | __numpy_submodules__ ) public_symbols -= { - "matrixlib", "matlib", "tests", "conftest", "version", - "compat", "distutils", "array_api" + "matrixlib", "matlib", "tests", "conftest", "version", + "array_api" } return list(public_symbols) @@ -442,7 +797,7 @@ def _sanity_check(): try: x = ones(2, dtype=float32) if not abs(x.dot(x) - float32(2.0)) < 1e-5: - raise AssertionError() + raise AssertionError except AssertionError: msg = ("The current Numpy installation ({!r}) fails to " "pass simple sanity checks. This can be caused for example " @@ -471,28 +826,48 @@ def _mac_os_check(): from . import exceptions with warnings.catch_warnings(record=True) as w: _mac_os_check() - # Throw runtime error, if the test failed Check for warning and error_message + # Throw runtime error, if the test failed + # Check for warning and report the error_message if len(w) > 0: for _wn in w: if _wn.category is exceptions.RankWarning: - # Ignore other warnings, they may not be relevant (see gh-25433). - error_message = f"{_wn.category.__name__}: {str(_wn.message)}" + # Ignore other warnings, they may not be relevant (see gh-25433) + error_message = ( + f"{_wn.category.__name__}: {_wn.message}" + ) msg = ( "Polyfit sanity test emitted a warning, most likely due " "to using a buggy Accelerate backend." - "\nIf you compiled yourself, more information is available at:" + "\nIf you compiled yourself, more information is available at:" # noqa: E501 "\nhttps://numpy.org/devdocs/building/index.html" "\nOtherwise report this to the vendor " - "that provided NumPy.\n\n{}\n".format(error_message)) + f"that provided NumPy.\n\n{error_message}\n") raise RuntimeError(msg) del _wn del w del _mac_os_check + def blas_fpe_check(): + # Check if BLAS adds spurious FPEs, mostly seen on M4 arms with Accelerate. + with errstate(all='raise'): + x = ones((20, 20)) + try: + x @ x + except FloatingPointError: + res = _core._multiarray_umath._blas_supports_fpe(False) + if res: # res was not modified (hardcoded to True for now) + warnings.warn( + "Spurious warnings given by blas but suppression not " + "set up on this platform. Please open a NumPy issue.", + UserWarning, stacklevel=2) + + blas_fpe_check() + del blas_fpe_check + def hugepage_setup(): """ We usually use madvise hugepages support, but on some old kernels it - is slow and thus better avoided. Specifically kernel version 4.6 + is slow and thus better avoided. Specifically kernel version 4.6 had a bug fix which probably fixed this: https://github.com/torvalds/linux/commit/7cf91a98e607c2f935dbcc177d70011e95b8faff """ @@ -501,7 +876,7 @@ def hugepage_setup(): # If there is an issue with parsing the kernel version, # set use_hugepage to 0. Usage of LooseVersion will handle # the kernel version parsing better, but avoided since it - # will increase the import time. + # will increase the import time. # See: #16679 for related discussion. try: use_hugepage = 1 @@ -528,8 +903,11 @@ def hugepage_setup(): _core.multiarray._multiarray_umath._reload_guard() # TODO: Remove the environment variable entirely now that it is "weak" - _core._set_promotion_state( - os.environ.get("NPY_PROMOTION_STATE", "weak")) + if (os.environ.get("NPY_PROMOTION_STATE", "weak") != "weak"): + warnings.warn( + "NPY_PROMOTION_STATE was a temporary feature for NumPy 2.0 " + "transition and is ignored after NumPy 2.2.", + UserWarning, stacklevel=2) # Tell PyInstaller where to find hook-numpy.py def _pyinstaller_hooks_dir(): diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 6a6d133e335d..113ea5e010fa 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1,52 +1,50 @@ -import builtins -import sys -import os -import mmap +# ruff: noqa: I001 import ctypes as ct -import array as _array import datetime as dt -import enum +import inspect +import sys from abc import abstractmethod -from types import TracebackType, MappingProxyType, GenericAlias -from contextlib import contextmanager - -import numpy as np +# Aliases for builtins shadowed by classes to avoid annotations resolving to class members by ty +from builtins import bool as py_bool, str as py_str, type as py_type +from decimal import Decimal +from fractions import Fraction +from types import EllipsisType, ModuleType, MappingProxyType, GenericAlias +from uuid import UUID + +from numpy.__config__ import show as show_config from numpy._pytesttester import PytestTester from numpy._core._internal import _ctypes -from numpy._typing import ( +from numpy._typing import ( # type: ignore[deprecated] # Arrays ArrayLike, NDArray, - _SupportsArray, _NestedSequence, - _FiniteNestedSequence, - _SupportsArray, + _ArrayLike, _ArrayLikeBool_co, _ArrayLikeUInt_co, + _ArrayLikeInt, _ArrayLikeInt_co, + _ArrayLikeFloat64_co, _ArrayLikeFloat_co, + _ArrayLikeComplex128_co, _ArrayLikeComplex_co, _ArrayLikeNumber_co, - _ArrayLikeTD64_co, - _ArrayLikeDT64_co, _ArrayLikeObject_co, - _ArrayLikeStr_co, _ArrayLikeBytes_co, - _ArrayLikeUnknown, - _UnknownType, - + _ArrayLikeStr_co, + _ArrayLikeString_co, + _ArrayLikeTD64_co, + _ArrayLikeDT64_co, # DTypes DTypeLike, _DTypeLike, _DTypeLikeVoid, - _SupportsDType, _VoidDTypeLike, - # Shapes + _AnyShape, _Shape, _ShapeLike, - # Scalars _CharLike_co, _IntLike_co, @@ -54,15 +52,12 @@ from numpy._typing import ( _TD64Like_co, _NumberLike_co, _ScalarLike_co, - # `number` precision NBitBase, # NOTE: Do not remove the extended precision bit-types even if seemingly unused; # they're used by the mypy plugin - _256Bit, _128Bit, _96Bit, - _80Bit, _64Bit, _32Bit, _16Bit, @@ -71,14 +66,12 @@ from numpy._typing import ( _NBitShort, _NBitIntC, _NBitIntP, - _NBitInt, _NBitLong, _NBitLongLong, _NBitHalf, _NBitSingle, _NBitDouble, _NBitLongDouble, - # Character codes _BoolCodes, _UInt8Codes, @@ -94,32 +87,29 @@ from numpy._typing import ( _Float64Codes, _Complex64Codes, _Complex128Codes, - _ByteCodes, - _ShortCodes, _IntCCodes, _IntPCodes, _LongCodes, _LongLongCodes, - _UByteCodes, - _UShortCodes, _UIntCCodes, _UIntPCodes, _ULongCodes, _ULongLongCodes, - _HalfCodes, - _SingleCodes, - _DoubleCodes, _LongDoubleCodes, - _CSingleCodes, - _CDoubleCodes, _CLongDoubleCodes, - _DT64Codes, _TD64Codes, _StrCodes, _BytesCodes, _VoidCodes, _ObjectCodes, - + _StringCodes, + _UnsignedIntegerCodes, + _SignedIntegerCodes, + _IntegerCodes, + _FloatingCodes, + _ComplexFloatingCodes, + _InexactCodes, + _CharacterCodes, # Ufuncs _UFunc_Nin1_Nout1, _UFunc_Nin2_Nout1, @@ -127,833 +117,1549 @@ from numpy._typing import ( _UFunc_Nin2_Nout2, _GUFunc_Nin2_Nout1, ) - -from numpy._typing._callable import ( - _BoolOp, - _BoolBitOp, - _BoolSub, - _BoolTrueDiv, - _BoolMod, - _BoolDivMod, - _TD64Div, - _IntTrueDiv, - _UnsignedIntOp, - _UnsignedIntBitOp, - _UnsignedIntMod, - _UnsignedIntDivMod, - _SignedIntOp, - _SignedIntBitOp, - _SignedIntMod, - _SignedIntDivMod, - _FloatOp, - _FloatMod, - _FloatDivMod, - _ComplexOp, - _NumberOp, - _ComparisonOp, +from numpy._typing._char_codes import ( + _DT64Codes_any, + _DT64Codes_date, + _DT64Codes_datetime, + _DT64Codes_int, + _TD64Codes_any, + _TD64Codes_int, + _TD64Codes_timedelta, ) -# NOTE: Numpy's mypy plugin is used for removing the types unavailable -# to the specific platform +# NOTE: Numpy's mypy plugin is used for removing the types unavailable to the specific platform from numpy._typing._extended_precision import ( - uint128 as uint128, - uint256 as uint256, - int128 as int128, - int256 as int256, - float80 as float80, - float96 as float96, - float128 as float128, - float256 as float256, - complex160 as complex160, - complex192 as complex192, - complex256 as complex256, - complex512 as complex512, + float96, + float128, + complex192, + complex256, ) -from numpy._array_api_info import __array_namespace_info__ as __array_namespace_info__ +from numpy._array_api_info import __array_namespace_info__ from collections.abc import ( + Buffer, Callable, Iterable, Iterator, Mapping, Sequence, ) + from typing import ( - Literal as L, Any, - Generator, + ClassVar, + Final, Generic, + Literal as L, + LiteralString, + Never, NoReturn, - overload, + Protocol, + Self, SupportsComplex, SupportsFloat, SupportsInt, - TypeVar, - Protocol, SupportsIndex, - Final, + TypedDict, final, - ClassVar, + overload, + override, + type_check_only, ) -# Ensures that the stubs are picked up +# NOTE: `typing_extensions` and `_typeshed` are always available in `.pyi` stubs, even +# if not available at runtime. This is because the `typeshed` stubs for the standard +# library include `typing_extensions` stubs: +# https://github.com/python/typeshed/blob/main/stdlib/typing_extensions.pyi +from _typeshed import Incomplete, StrOrBytesPath, SupportsFlush, SupportsLenAndGetItem, SupportsWrite +from typing_extensions import CapsuleType, TypeVar, deprecated + from numpy import ( - ctypeslib as ctypeslib, - exceptions as exceptions, - fft as fft, - lib as lib, - linalg as linalg, - ma as ma, - polynomial as polynomial, - random as random, - testing as testing, - version as version, - exceptions as exceptions, - dtypes as dtypes, - rec as rec, - char as char, - strings as strings, + char, + core, + ctypeslib, + dtypes, + exceptions, + f2py, + fft, + lib, + linalg, + ma, + polynomial, + random, + rec, + strings, + testing, + typing, ) -from numpy._core.records import ( - record as record, - recarray as recarray, +# available through `__getattr__`, but not in `__all__` or `__dir__` +from numpy import ( + __config__ as __config__, + matlib as matlib, + matrixlib as matrixlib, + version as version, ) -from numpy._core.defchararray import ( - chararray as chararray, +from numpy._core.records import ( + record, + recarray, ) from numpy._core.function_base import ( - linspace as linspace, - logspace as logspace, - geomspace as geomspace, + linspace, + logspace, + geomspace, ) from numpy._core.fromnumeric import ( - take as take, - reshape as reshape, - choose as choose, - repeat as repeat, - put as put, - swapaxes as swapaxes, - transpose as transpose, - matrix_transpose as matrix_transpose, - partition as partition, - argpartition as argpartition, - sort as sort, - argsort as argsort, - argmax as argmax, - argmin as argmin, - searchsorted as searchsorted, - resize as resize, - squeeze as squeeze, - diagonal as diagonal, - trace as trace, - ravel as ravel, - nonzero as nonzero, - shape as shape, - compress as compress, - clip as clip, - sum as sum, - all as all, - any as any, - cumsum as cumsum, - ptp as ptp, - max as max, - min as min, - amax as amax, - amin as amin, - prod as prod, - cumprod as cumprod, - ndim as ndim, - size as size, - around as around, - round as round, - mean as mean, - std as std, - var as var, + take, + reshape, + choose, + repeat, + put, + swapaxes, + transpose, + matrix_transpose, + partition, + argpartition, + sort, + argsort, + argmax, + argmin, + searchsorted, + resize, + squeeze, + diagonal, + trace, + ravel, + nonzero, + shape, + compress, + clip, + sum, + all, + any, + cumsum, + cumulative_sum, + ptp, + max, + min, + amax, + amin, + prod, + cumprod, + cumulative_prod, + ndim, + size, + around, + round, + mean, + std, + var, ) from numpy._core._asarray import ( - require as require, + require, ) from numpy._core._type_aliases import ( - sctypeDict as sctypeDict, + sctypeDict, ) from numpy._core._ufunc_config import ( - seterr as seterr, - geterr as geterr, - setbufsize as setbufsize, - getbufsize as getbufsize, - seterrcall as seterrcall, - geterrcall as geterrcall, - _ErrKind, - _ErrFunc, + seterr, + geterr, + setbufsize, + getbufsize, + seterrcall, + geterrcall, + errstate, ) from numpy._core.arrayprint import ( - set_printoptions as set_printoptions, - get_printoptions as get_printoptions, - array2string as array2string, - format_float_scientific as format_float_scientific, - format_float_positional as format_float_positional, - array_repr as array_repr, - array_str as array_str, - printoptions as printoptions, + set_printoptions, + get_printoptions, + array2string, + format_float_scientific, + format_float_positional, + array_repr, + array_str, + printoptions, ) from numpy._core.einsumfunc import ( - einsum as einsum, - einsum_path as einsum_path, + einsum, + einsum_path, ) - +from numpy._core.getlimits import ( + finfo, + iinfo, +) +from numpy._core.memmap import memmap from numpy._core.multiarray import ( - array as array, - empty_like as empty_like, - empty as empty, - zeros as zeros, - concatenate as concatenate, - inner as inner, - where as where, - lexsort as lexsort, - can_cast as can_cast, - min_scalar_type as min_scalar_type, - result_type as result_type, - dot as dot, - vdot as vdot, - bincount as bincount, - copyto as copyto, - putmask as putmask, - packbits as packbits, - unpackbits as unpackbits, - shares_memory as shares_memory, - may_share_memory as may_share_memory, - asarray as asarray, - asanyarray as asanyarray, - ascontiguousarray as ascontiguousarray, - asfortranarray as asfortranarray, - arange as arange, - busday_count as busday_count, - busday_offset as busday_offset, - datetime_as_string as datetime_as_string, - datetime_data as datetime_data, - frombuffer as frombuffer, - fromfile as fromfile, - fromiter as fromiter, - is_busday as is_busday, - promote_types as promote_types, - fromstring as fromstring, - frompyfunc as frompyfunc, - nested_iters as nested_iters, + array, + empty_like, + empty, + zeros, + concatenate, + inner, + where, + lexsort, + can_cast, + min_scalar_type, + result_type, + dot, + vdot, + bincount, + copyto, + putmask, + packbits, + unpackbits, + shares_memory, + may_share_memory, + asarray, + asanyarray, + ascontiguousarray, + asfortranarray, + arange, + busdaycalendar, + busday_count, + busday_offset, + datetime_as_string, + datetime_data, + frombuffer, + fromfile, + fromiter, + is_busday, + promote_types, + fromstring, + frompyfunc, + flatiter, + nditer, + nested_iters, flagsobj, ) from numpy._core.numeric import ( - zeros_like as zeros_like, - ones as ones, - ones_like as ones_like, - full as full, - full_like as full_like, - count_nonzero as count_nonzero, - isfortran as isfortran, - argwhere as argwhere, - flatnonzero as flatnonzero, - correlate as correlate, - convolve as convolve, - outer as outer, - tensordot as tensordot, - roll as roll, - rollaxis as rollaxis, - moveaxis as moveaxis, - cross as cross, - indices as indices, - fromfunction as fromfunction, - isscalar as isscalar, - binary_repr as binary_repr, - base_repr as base_repr, - identity as identity, - allclose as allclose, - isclose as isclose, - array_equal as array_equal, - array_equiv as array_equiv, - astype as astype, + zeros_like, + ones, + ones_like, + full, + full_like, + count_nonzero, + isfortran, + argwhere, + flatnonzero, + correlate, + convolve, + outer, + tensordot, + roll, + rollaxis, + moveaxis, + cross, + indices, + fromfunction, + isscalar, + binary_repr, + base_repr, + identity, + allclose, + isclose, + array_equal, + array_equiv, + astype, ) from numpy._core.numerictypes import ( - isdtype as isdtype, - issubdtype as issubdtype, - cast as cast, - ScalarType as ScalarType, - typecodes as typecodes, + isdtype, + issubdtype, + ScalarType, + typecodes, ) from numpy._core.shape_base import ( - atleast_1d as atleast_1d, - atleast_2d as atleast_2d, - atleast_3d as atleast_3d, - block as block, - hstack as hstack, - stack as stack, - vstack as vstack, + atleast_1d, + atleast_2d, + atleast_3d, + block, + hstack, + stack, + vstack, + unstack, ) +from ._expired_attrs_2_0 import __expired_attributes__ as __expired_attributes__ +from ._globals import _CopyMode as _CopyMode +from ._globals import _NoValue as _NoValue, _NoValueType + from numpy.lib import ( scimath as emath, ) from numpy.lib._arraypad_impl import ( - pad as pad, + pad, ) from numpy.lib._arraysetops_impl import ( - ediff1d as ediff1d, - intersect1d as intersect1d, - isin as isin, - setdiff1d as setdiff1d, - setxor1d as setxor1d, - union1d as union1d, - unique as unique, - unique_all as unique_all, - unique_counts as unique_counts, - unique_inverse as unique_inverse, - unique_values as unique_values, + ediff1d, + intersect1d, + isin, + setdiff1d, + setxor1d, + union1d, + unique, + unique_all, + unique_counts, + unique_inverse, + unique_values, ) from numpy.lib._function_base_impl import ( - select as select, - piecewise as piecewise, - trim_zeros as trim_zeros, - copy as copy, - iterable as iterable, - percentile as percentile, - diff as diff, - gradient as gradient, - angle as angle, - unwrap as unwrap, - sort_complex as sort_complex, - disp as disp, - flip as flip, - rot90 as rot90, - extract as extract, - place as place, - asarray_chkfinite as asarray_chkfinite, - average as average, - bincount as bincount, - digitize as digitize, - cov as cov, - corrcoef as corrcoef, - median as median, - sinc as sinc, - hamming as hamming, - hanning as hanning, - bartlett as bartlett, - blackman as blackman, - kaiser as kaiser, - i0 as i0, - meshgrid as meshgrid, - delete as delete, - insert as insert, - append as append, - interp as interp, - quantile as quantile, + select, + piecewise, + trim_zeros, + copy, + iterable, + percentile, + diff, + gradient, + angle, + unwrap, + sort_complex, + flip, + rot90, + extract, + place, + asarray_chkfinite, + average, + digitize, + cov, + corrcoef, + median, + sinc, + hamming, + hanning, + bartlett, + blackman, + kaiser, + trapezoid, + i0, + meshgrid, + delete, + insert, + append, + interp, + quantile, + vectorize, ) from numpy.lib._histograms_impl import ( - histogram_bin_edges as histogram_bin_edges, - histogram as histogram, - histogramdd as histogramdd, + histogram_bin_edges, + histogram, + histogramdd, ) from numpy.lib._index_tricks_impl import ( - ravel_multi_index as ravel_multi_index, - unravel_index as unravel_index, - mgrid as mgrid, - ogrid as ogrid, - r_ as r_, - c_ as c_, - s_ as s_, - index_exp as index_exp, - ix_ as ix_, - fill_diagonal as fill_diagonal, - diag_indices as diag_indices, - diag_indices_from as diag_indices_from, + ndenumerate, + ndindex, + ravel_multi_index, + unravel_index, + mgrid, + ogrid, + r_, + c_, + s_, + index_exp, + ix_, + fill_diagonal, + diag_indices, + diag_indices_from, ) from numpy.lib._nanfunctions_impl import ( - nansum as nansum, - nanmax as nanmax, - nanmin as nanmin, - nanargmax as nanargmax, - nanargmin as nanargmin, - nanmean as nanmean, - nanmedian as nanmedian, - nanpercentile as nanpercentile, - nanvar as nanvar, - nanstd as nanstd, - nanprod as nanprod, - nancumsum as nancumsum, - nancumprod as nancumprod, - nanquantile as nanquantile, + nansum, + nanmax, + nanmin, + nanargmax, + nanargmin, + nanmean, + nanmedian, + nanpercentile, + nanvar, + nanstd, + nanprod, + nancumsum, + nancumprod, + nanquantile, ) from numpy.lib._npyio_impl import ( - savetxt as savetxt, - loadtxt as loadtxt, - genfromtxt as genfromtxt, - load as load, - save as save, - savez as savez, - savez_compressed as savez_compressed, - packbits as packbits, - unpackbits as unpackbits, - fromregex as fromregex, + savetxt, + loadtxt, + genfromtxt, + load, + save, + savez, + savez_compressed, + fromregex, ) from numpy.lib._polynomial_impl import ( - poly as poly, - roots as roots, - polyint as polyint, - polyder as polyder, - polyadd as polyadd, - polysub as polysub, - polymul as polymul, - polydiv as polydiv, - polyval as polyval, - polyfit as polyfit, + poly, + roots, + polyint, + polyder, + polyadd, + polysub, + polymul, + polydiv, + polyval, + poly1d, + polyfit, ) from numpy.lib._shape_base_impl import ( - column_stack as column_stack, - dstack as dstack, - array_split as array_split, - split as split, - hsplit as hsplit, - vsplit as vsplit, - dsplit as dsplit, - apply_over_axes as apply_over_axes, - expand_dims as expand_dims, - apply_along_axis as apply_along_axis, - kron as kron, - tile as tile, - take_along_axis as take_along_axis, - put_along_axis as put_along_axis, + column_stack, + dstack, + array_split, + split, + hsplit, + vsplit, + dsplit, + apply_over_axes, + expand_dims, + apply_along_axis, + kron, + tile, + take_along_axis, + put_along_axis, ) from numpy.lib._stride_tricks_impl import ( - broadcast_to as broadcast_to, - broadcast_arrays as broadcast_arrays, - broadcast_shapes as broadcast_shapes, + broadcast_to, + broadcast_arrays, + broadcast_shapes, ) from numpy.lib._twodim_base_impl import ( - diag as diag, - diagflat as diagflat, - eye as eye, - fliplr as fliplr, - flipud as flipud, - tri as tri, - triu as triu, - tril as tril, - vander as vander, - histogram2d as histogram2d, - mask_indices as mask_indices, - tril_indices as tril_indices, - tril_indices_from as tril_indices_from, - triu_indices as triu_indices, - triu_indices_from as triu_indices_from, + diag, + diagflat, + eye, + fliplr, + flipud, + tri, + triu, + tril, + vander, + histogram2d, + mask_indices, + tril_indices, + tril_indices_from, + triu_indices, + triu_indices_from, ) from numpy.lib._type_check_impl import ( - mintypecode as mintypecode, - real as real, - imag as imag, - iscomplex as iscomplex, - isreal as isreal, - iscomplexobj as iscomplexobj, - isrealobj as isrealobj, - nan_to_num as nan_to_num, - real_if_close as real_if_close, - typename as typename, - common_type as common_type, + mintypecode, + real, + imag, + iscomplex, + isreal, + iscomplexobj, + isrealobj, + nan_to_num, + real_if_close, + typename, + common_type, ) from numpy.lib._ufunclike_impl import ( - fix as fix, - isposinf as isposinf, - isneginf as isneginf, + fix, + isposinf, + isneginf, ) from numpy.lib._utils_impl import ( - get_include as get_include, - info as info, - show_runtime as show_runtime, + get_include, + info, + show_runtime, ) from numpy.matrixlib import ( - asmatrix as asmatrix, - bmat as bmat, + asmatrix, + bmat, + matrix, ) -_AnyStr_contra = TypeVar("_AnyStr_contra", str, bytes, contravariant=True) +__all__ = [ + # __numpy_submodules__ + "char", "core", "ctypeslib", "dtypes", "exceptions", "f2py", "fft", "lib", "linalg", + "ma", "polynomial", "random", "rec", "strings", "test", "testing", "typing", + + # _core.__all__ + "abs", "acos", "acosh", "asin", "asinh", "atan", "atanh", "atan2", "bitwise_invert", + "bitwise_left_shift", "bitwise_right_shift", "concat", "pow", "permute_dims", + "memmap", "sctypeDict", "record", "recarray", + + # _core.numeric.__all__ + "newaxis", "ndarray", "flatiter", "nditer", "nested_iters", "ufunc", "arange", + "array", "asarray", "asanyarray", "ascontiguousarray", "asfortranarray", "zeros", + "count_nonzero", "empty", "broadcast", "dtype", "fromstring", "fromfile", + "frombuffer", "from_dlpack", "where", "argwhere", "copyto", "concatenate", + "lexsort", "astype", "can_cast", "promote_types", "min_scalar_type", "result_type", + "isfortran", "empty_like", "zeros_like", "ones_like", "correlate", "convolve", + "inner", "dot", "outer", "vdot", "roll", "rollaxis", "moveaxis", "cross", + "tensordot", "little_endian", "fromiter", "array_equal", "array_equiv", "indices", + "fromfunction", "isclose", "isscalar", "binary_repr", "base_repr", "ones", + "identity", "allclose", "putmask", "flatnonzero", "inf", "nan", "False_", "True_", + "bitwise_not", "full", "full_like", "matmul", "vecdot", "vecmat", + "shares_memory", "may_share_memory", + "all", "amax", "amin", "any", "argmax", "argmin", "argpartition", "argsort", + "around", "choose", "clip", "compress", "cumprod", "cumsum", "cumulative_prod", + "cumulative_sum", "diagonal", "mean", "max", "min", "matrix_transpose", "ndim", + "nonzero", "partition", "prod", "ptp", "put", "ravel", "repeat", "reshape", + "resize", "round", "searchsorted", "shape", "size", "sort", "squeeze", "std", "sum", + "swapaxes", "take", "trace", "transpose", "var", + "absolute", "add", "arccos", "arccosh", "arcsin", "arcsinh", "arctan", "arctan2", + "arctanh", "bitwise_and", "bitwise_or", "bitwise_xor", "cbrt", "ceil", "conj", + "conjugate", "copysign", "cos", "cosh", "bitwise_count", "deg2rad", "degrees", + "divide", "divmod", "e", "equal", "euler_gamma", "exp", "exp2", "expm1", "fabs", + "floor", "floor_divide", "float_power", "fmax", "fmin", "fmod", "frexp", + "frompyfunc", "gcd", "greater", "greater_equal", "heaviside", "hypot", "invert", + "isfinite", "isinf", "isnan", "isnat", "lcm", "ldexp", "left_shift", "less", + "less_equal", "log", "log10", "log1p", "log2", "logaddexp", "logaddexp2", + "logical_and", "logical_not", "logical_or", "logical_xor", "matvec", "maximum", "minimum", + "mod", "modf", "multiply", "negative", "nextafter", "not_equal", "pi", "positive", + "power", "rad2deg", "radians", "reciprocal", "remainder", "right_shift", "rint", + "sign", "signbit", "sin", "sinh", "spacing", "sqrt", "square", "subtract", "tan", + "tanh", "true_divide", "trunc", "ScalarType", "typecodes", "issubdtype", + "datetime_data", "datetime_as_string", "busday_offset", "busday_count", "is_busday", + "busdaycalendar", "isdtype", + "complexfloating", "character", "unsignedinteger", "inexact", "generic", "floating", + "integer", "signedinteger", "number", "flexible", "bool", "float16", "float32", + "float64", "longdouble", "complex64", "complex128", "clongdouble", + "bytes_", "str_", "void", "object_", "datetime64", "timedelta64", "int8", "byte", + "uint8", "ubyte", "int16", "short", "uint16", "ushort", "int32", "intc", "uint32", + "uintc", "int64", "long", "uint64", "ulong", "longlong", "ulonglong", "intp", + "uintp", "double", "cdouble", "single", "csingle", "half", "bool_", "int_", "uint", + "float96", "float128", "complex192", "complex256", + "array2string", "array_str", "array_repr", "set_printoptions", "get_printoptions", + "printoptions", "format_float_positional", "format_float_scientific", "require", + "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall", + "errstate", + # _core.function_base.__all__ + "logspace", "linspace", "geomspace", + # _core.getlimits.__all__ + "finfo", "iinfo", + # _core.shape_base.__all__ + "atleast_1d", "atleast_2d", "atleast_3d", "block", "hstack", "stack", "unstack", + "vstack", + # _core.einsumfunc.__all__ + "einsum", "einsum_path", + # matrixlib.__all__ + "matrix", "bmat", "asmatrix", + # lib._histograms_impl.__all__ + "histogram", "histogramdd", "histogram_bin_edges", + # lib._nanfunctions_impl.__all__ + "nansum", "nanmax", "nanmin", "nanargmax", "nanargmin", "nanmean", "nanmedian", + "nanpercentile", "nanvar", "nanstd", "nanprod", "nancumsum", "nancumprod", + "nanquantile", + # lib._function_base_impl.__all__ + "select", "piecewise", "trim_zeros", "copy", "iterable", "percentile", "diff", + "gradient", "angle", "unwrap", "sort_complex", "flip", "rot90", "extract", "place", + "vectorize", "asarray_chkfinite", "average", "bincount", "digitize", "cov", + "corrcoef", "median", "sinc", "hamming", "hanning", "bartlett", "blackman", + "kaiser", "trapezoid", "i0", "meshgrid", "delete", "insert", "append", + "interp", "quantile", + # lib._twodim_base_impl.__all__ + "diag", "diagflat", "eye", "fliplr", "flipud", "tri", "triu", "tril", "vander", + "histogram2d", "mask_indices", "tril_indices", "tril_indices_from", "triu_indices", + "triu_indices_from", + # lib._shape_base_impl.__all__ + "column_stack", "dstack", "array_split", "split", "hsplit", "vsplit", "dsplit", + "apply_over_axes", "expand_dims", "apply_along_axis", "kron", "tile", + "take_along_axis", "put_along_axis", + # lib._type_check_impl.__all__ + "iscomplexobj", "isrealobj", "imag", "iscomplex", "isreal", "nan_to_num", "real", + "real_if_close", "typename", "mintypecode", "common_type", + # lib._arraysetops_impl.__all__ + "ediff1d", "intersect1d", "isin", "setdiff1d", "setxor1d", "union1d", + "unique", "unique_all", "unique_counts", "unique_inverse", "unique_values", + # lib._ufunclike_impl.__all__ + "fix", "isneginf", "isposinf", + # lib._arraypad_impl.__all__ + "pad", + # lib._utils_impl.__all__ + "get_include", "info", "show_runtime", + # lib._stride_tricks_impl.__all__ + "broadcast_to", "broadcast_arrays", "broadcast_shapes", + # lib._polynomial_impl.__all__ + "poly", "roots", "polyint", "polyder", "polyadd", "polysub", "polymul", "polydiv", + "polyval", "poly1d", "polyfit", + # lib._npyio_impl.__all__ + "savetxt", "loadtxt", "genfromtxt", "load", "save", "savez", "savez_compressed", + "packbits", "unpackbits", "fromregex", + # lib._index_tricks_impl.__all__ + "ravel_multi_index", "unravel_index", "mgrid", "ogrid", "r_", "c_", "s_", + "index_exp", "ix_", "ndenumerate", "ndindex", "fill_diagonal", "diag_indices", + "diag_indices_from", + + # __init__.__all__ + "emath", "show_config", "__version__", "__array_namespace_info__", +] # fmt: skip + +### Type parameters (with defaults); for internal use only + +_ArrayT_co = TypeVar("_ArrayT_co", bound=ndarray, default=ndarray, covariant=True) +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) +_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) +_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, default=Any, covariant=True) + +# intentionally invariant +_NBitT = TypeVar("_NBitT", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] +_NBitT1 = TypeVar("_NBitT1", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] +_NBitT2 = TypeVar("_NBitT2", bound=NBitBase, default=_NBitT1) # pyright: ignore[reportDeprecated] + +_ItemT_co = TypeVar("_ItemT_co", default=Any, covariant=True) +_BoolItemT_co = TypeVar("_BoolItemT_co", bound=py_bool, default=py_bool, covariant=True) +_NumberItemT_co = TypeVar("_NumberItemT_co", bound=complex, default=Any, covariant=True) # either int, float, or complex +_InexactItemT_co = TypeVar("_InexactItemT_co", bound=complex, default=Any, covariant=True) # either float or complex +_FlexibleItemT_co = TypeVar("_FlexibleItemT_co", bound=bytes | str | tuple[Any, ...], default=Any, covariant=True) +_CharacterItemT_co = TypeVar("_CharacterItemT_co", bound=bytes | str, default=Any, covariant=True) +_TD64ItemT_co = TypeVar("_TD64ItemT_co", bound=_TD64Item, default=Any, covariant=True) +_DT64ItemT_co = TypeVar("_DT64ItemT_co", bound=_DT64Item, default=Any, covariant=True) + +### Type Aliases (for internal use only) + +type _Falsy = L[False, 0] | bool_[L[False]] +type _Truthy = L[True, 1] | bool_[L[True]] + +type _1D = tuple[int] +type _2D = tuple[int, int] +type _2Tuple[T] = tuple[T, T] + +type _ArrayUInt_co = NDArray[unsignedinteger | bool_] +type _ArrayInt_co = NDArray[integer | bool_] +type _ArrayFloat64_co = NDArray[floating[_64Bit] | float32 | float16 | integer | bool_] +type _ArrayFloat_co = NDArray[floating | integer | bool_] +type _ArrayComplex128_co = NDArray[number[_64Bit] | number[_32Bit] | float16 | integer | bool_] +type _ArrayComplex_co = NDArray[inexact | integer | bool_] +type _ArrayNumber_co = NDArray[number | bool_] +type _ArrayTD64_co = NDArray[timedelta64 | integer | bool_] + +type _ArrayString = ndarray[_AnyShape, dtype[str_] | dtypes.StringDType] +type _ArrayNumeric = NDArray[number | timedelta64 | object_] + +type _ScalarNotObject = bool_ | number | flexible | datetime64 | timedelta64 + +type _Float64_co = float | floating[_64Bit] | float32 | float16 | integer | bool_ +type _Complex64_co = number[_32Bit] | number[_16Bit] | number[_8Bit] | py_bool | bool_ +type _Complex128_co = complex | number[_64Bit] | _Complex64_co + +type _ToIndex = SupportsIndex | slice | EllipsisType | _ArrayLikeInt_co | None +type _ToIndices = _ToIndex | tuple[_ToIndex, ...] + +type _UnsignedIntegerCType = type[ + ct.c_uint8 | ct.c_uint16 | ct.c_uint32 | ct.c_uint64 + | ct.c_ushort | ct.c_uint | ct.c_ulong | ct.c_ulonglong + | ct.c_size_t | ct.c_void_p +] # fmt: skip +type _SignedIntegerCType = type[ + ct.c_int8 | ct.c_int16 | ct.c_int32 | ct.c_int64 + | ct.c_short | ct.c_int | ct.c_long | ct.c_longlong + | ct.c_ssize_t +] # fmt: skip +type _FloatingCType = type[ct.c_float | ct.c_double | ct.c_longdouble] +type _IntegerCType = _UnsignedIntegerCType | _SignedIntegerCType + +# some commonly used builtin types that are known to result in a +# `dtype[object_]`, when their *type* is passed to the `dtype` constructor +# NOTE: `builtins.object` should not be included here +type _BuiltinObjectLike = ( + slice | Decimal | Fraction | UUID + | dt.date | dt.time | dt.timedelta | dt.tzinfo + | tuple[Any, ...] | list[Any] | set[Any] | frozenset[Any] | dict[Any, Any] +) # fmt: skip -# Protocol for representing file-like-objects accepted -# by `ndarray.tofile` and `fromfile` -class _IOProtocol(Protocol): - def flush(self) -> object: ... - def fileno(self) -> int: ... - def tell(self) -> SupportsIndex: ... - def seek(self, offset: int, whence: int, /) -> object: ... +# Introduce an alias for `dtype` to avoid naming conflicts. +# NOTE: This should _not_ be `Final[_]`, `_: TypeAlias`, or `type _` +_dtype = dtype + +type _ByteOrderChar = L["<", ">", "=", "|"] +# can be anything, is case-insensitive, and only the first character matters +type _ByteOrder = L[ + "S", # swap the current order (default) + "<", "L", "little", # little-endian + ">", "B", "big", # big endian + "=", "N", "native", # native order + "|", "I", # ignore +] # fmt: skip +type _DTypeKind = L[ + "b", # boolean + "i", # signed integer + "u", # unsigned integer + "f", # floating-point + "c", # complex floating-point + "m", # timedelta64 + "M", # datetime64 + "O", # python object + "S", # byte-string (fixed-width) + "U", # unicode-string (fixed-width) + "V", # void + "T", # unicode-string (variable-width) +] +type _DTypeChar = L[ + "?", # bool + "b", # byte + "B", # ubyte + "h", # short + "H", # ushort + "i", # intc + "I", # uintc + "l", # long + "L", # ulong + "q", # longlong + "Q", # ulonglong + "e", # half + "f", # single + "d", # double + "g", # longdouble + "F", # csingle + "D", # cdouble + "G", # clongdouble + "O", # object + "S", # bytes_ (S0) + "U", # str_ + "V", # void + "M", # datetime64 + "m", # timedelta64 + "c", # bytes_ (S1) + "T", # StringDType +] +type _DTypeNum = L[ + 0, # bool + 1, # byte + 2, # ubyte + 3, # short + 4, # ushort + 5, # intc + 6, # uintc + 7, # long + 8, # ulong + 9, # longlong + 10, # ulonglong + 23, # half + 11, # single + 12, # double + 13, # longdouble + 14, # csingle + 15, # cdouble + 16, # clongdouble + 17, # object + 18, # bytes_ + 19, # str_ + 20, # void + 21, # datetime64 + 22, # timedelta64 + 25, # no type + 256, # user-defined + 2056, # StringDType +] +type _DTypeBuiltinKind = L[0, 1, 2] + +type _ArrayAPIVersion = L["2021.12", "2022.12", "2023.12", "2024.12"] + +type _CastingKind = L["no", "equiv", "safe", "same_kind", "same_value", "unsafe"] + +type _OrderKACF = L["K", "A", "C", "F"] | None +type _OrderACF = L["A", "C", "F"] | None +type _OrderCF = L["C", "F"] | None + +type _ModeKind = L["raise", "wrap", "clip"] +type _PartitionKind = L["introselect"] +# in practice, only the first case-insensitive character is considered (so e.g. +# "QuantumSort3000" will be interpreted as quicksort). +type _SortKind = L[ + "Q", "quick", "quicksort", + "M", "merge", "mergesort", + "H", "heap", "heapsort", + "S", "stable", "stablesort", +] # fmt: skip +type _SortSide = L["left", "right"] + +type _ConvertibleToInt = SupportsInt | SupportsIndex | _CharLike_co +type _ConvertibleToFloat = SupportsFloat | SupportsIndex | _CharLike_co +type _ConvertibleToComplex = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co +type _ConvertibleToTD64 = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | bool_ | None +type _ConvertibleToDT64 = dt.date | int | _CharLike_co | character | number | datetime64 | bool_ | None + +type _NDIterFlagsKind = L[ + "buffered", + "c_index", + "copy_if_overlap", + "common_dtype", + "delay_bufalloc", + "external_loop", + "f_index", + "grow_inner", "growinner", + "multi_index", + "ranged", + "refs_ok", + "reduce_ok", + "zerosize_ok", +] +type _NDIterFlagsOp = L[ + "aligned", + "allocate", + "arraymask", + "copy", + "config", + "nbo", + "no_subtype", + "no_broadcast", + "overlap_assume_elementwise", + "readonly", + "readwrite", + "updateifcopy", + "virtual", + "writeonly", + "writemasked", +] + +type _DT64Item = dt.date | int | None +type _TD64Item = dt.timedelta | int | None + +type _DT64Date = _HasDateAttributes | L["TODAY", "today", b"TODAY", b"today"] +type _DT64Now = L["NOW", "now", b"NOW", b"now"] +type _NaTValue = L["NAT", "NaT", "nat", b"NAT", b"NaT", b"nat"] + +type _MonthUnit = L["Y", "M", b"Y", b"M"] +type _DayUnit = L["W", "D", b"W", b"D"] +type _DateUnit = L[_MonthUnit, _DayUnit] +type _NativeTimeUnit = L["h", "m", "s", "ms", "us", "Îŧs", b"h", b"m", b"s", b"ms", b"us"] +type _IntTimeUnit = L["ns", "ps", "fs", "as", b"ns", b"ps", b"fs", b"as"] +type _TimeUnit = L[_NativeTimeUnit, _IntTimeUnit] +type _NativeTD64Unit = L[_DayUnit, _NativeTimeUnit] +type _IntTD64Unit = L[_MonthUnit, _IntTimeUnit] +type _TD64Unit = L[_DateUnit, _TimeUnit] +type _TimeUnitSpec[UnitT: _TD64Unit] = _TD64Unit | tuple[_TD64Unit, SupportsIndex] + +### TypedDict's (for internal use only) + +@type_check_only +class _FormerAttrsDict(TypedDict): + object: LiteralString + float: LiteralString + complex: LiteralString + str: LiteralString + int: LiteralString + +### Protocols (for internal use only) + +@final +@type_check_only +class _SupportsLT(Protocol): + def __lt__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsLE(Protocol): + def __le__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsGT(Protocol): + def __gt__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsGE(Protocol): + def __ge__(self, other: Any, /) -> Any: ... -# NOTE: `seek`, `write` and `flush` are technically only required -# for `readwrite`/`write` modes -class _MemMapIOProtocol(Protocol): - def flush(self) -> object: ... +@type_check_only +class _SupportsFileMethods(SupportsFlush, Protocol): + # Protocol for representing file-like-objects accepted by `ndarray.tofile` and `fromfile` def fileno(self) -> SupportsIndex: ... - def tell(self) -> int: ... + def tell(self) -> SupportsIndex: ... def seek(self, offset: int, whence: int, /) -> object: ... - def write(self, s: bytes, /) -> object: ... + +@type_check_only +class _SupportsDLPack[StreamT](Protocol): + def __dlpack__(self, /, *, stream: StreamT | None = None) -> CapsuleType: ... + +@type_check_only +class _HasDType[DTypeT](Protocol): # DTypeT bound was intentionally left out + @property + def dtype(self, /) -> DTypeT: ... + +@type_check_only +class _HasRealAndImag[RealT, ImagT](Protocol): + @property + def real(self, /) -> RealT: ... + @property + def imag(self, /) -> ImagT: ... + +@type_check_only +class _HasTypeWithRealAndImag[RealT, ImagT](Protocol): + @property + def type(self, /) -> py_type[_HasRealAndImag[RealT, ImagT]]: ... + +@type_check_only +class _HasDTypeWithRealAndImag[RealT, ImagT](Protocol): + @property + def dtype(self, /) -> _HasTypeWithRealAndImag[RealT, ImagT]: ... + +@type_check_only +class _HasDateAttributes(Protocol): + # The `datetime64` constructors requires an object with the three attributes below, + # and thus supports datetime duck typing + @property + def day(self) -> int: ... + @property + def month(self) -> int: ... + @property + def year(self) -> int: ... + +### Mixins (for internal use only) + +@type_check_only +class _RealMixin: + @property + def real(self) -> Self: ... + @property + def imag(self) -> Self: ... + +@type_check_only +class _RoundMixin: + @overload + def __round__(self, /, ndigits: None = None) -> int: ... + @overload + def __round__(self, /, ndigits: SupportsIndex) -> Self: ... + +@type_check_only +class _IntegralMixin(_RealMixin): + @property + def numerator(self) -> Self: ... @property - def read(self) -> object: ... + def denominator(self) -> L[1]: ... + + def is_integer(self, /) -> L[True]: ... -class _SupportsWrite(Protocol[_AnyStr_contra]): - def write(self, s: _AnyStr_contra, /) -> object: ... +### Public API -__all__: list[str] -__dir__: list[str] -__version__: str -__git_version__: str -__array_api_version__: str -test: PytestTester +__version__: Final[LiteralString] = ... -# TODO: Move placeholders to their respective module once -# their annotations are properly implemented -# -# Placeholders for classes +e: Final[float] = ... +euler_gamma: Final[float] = ... +pi: Final[float] = ... +inf: Final[float] = ... +nan: Final[float] = ... +little_endian: Final[py_bool] = ... +False_: Final[bool_[L[False]]] = ... +True_: Final[bool_[L[True]]] = ... +newaxis: Final[None] = None -def show_config() -> None: ... +# not in __all__ +__NUMPY_SETUP__: Final[L[False]] = False +__numpy_submodules__: Final[set[LiteralString]] = ... +__former_attrs__: Final[_FormerAttrsDict] = ... +__future_scalars__: Final[set[L["bytes", "str", "object"]]] = ... +__array_api_version__: Final[L["2024.12"]] = "2024.12" +test: Final[PytestTester] = ... -_NdArraySubClass = TypeVar("_NdArraySubClass", bound=NDArray[Any]) -_DTypeScalar_co = TypeVar("_DTypeScalar_co", covariant=True, bound=generic) -_ByteOrder = L["S", "<", ">", "=", "|", "L", "B", "N", "I", "little", "big", "native"] +@type_check_only +class _DTypeMeta(type): + @property + def type(cls, /) -> py_type[generic] | None: ... + @property + def _abstract(cls, /) -> bool: ... + @property + def _is_numeric(cls, /) -> bool: ... + @property + def _parametric(cls, /) -> bool: ... + @property + def _legacy(cls, /) -> bool: ... @final -class dtype(Generic[_DTypeScalar_co]): - names: None | tuple[builtins.str, ...] +class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 + names: tuple[py_str, ...] | None def __hash__(self) -> int: ... - # Overload for subclass of generic + + # `None` results in the default dtype @overload def __new__( cls, - dtype: type[_DTypeScalar_co], - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[_DTypeScalar_co]: ... - # Overloads for string aliases, Python types, and some assorted - # other special cases. Order is sometimes important because of the - # subtype relationships + dtype: py_type[float64 | ct.c_double] | _Float64Codes | None, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ... + ) -> dtype[float64]: ... + + # Overload for `dtype` instances, scalar types, and instances that have a + # `dtype: dtype[ScalarT]` attribute + @overload + def __new__[ScalarT: generic]( + cls, + dtype: _DTypeLike[ScalarT], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[ScalarT]: ... + + # Builtin types # - # builtins.bool < int < float < complex < object + # NOTE: Typecheckers act as if `bool <: int <: float <: complex <: object`, + # even though at runtime `int`, `float`, and `complex` aren't subtypes.. + # This makes it impossible to express e.g. "a float that isn't an int", + # since type checkers treat `_: float` like `_: float | int`. # - # so we have to make sure the overloads for the narrowest type is - # first. - # Builtin types - @overload - def __new__(cls, dtype: type[builtins.bool], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[np.bool]: ... - @overload - def __new__(cls, dtype: type[int], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int_]: ... + # For more details, see: + # - https://github.com/numpy/numpy/issues/27032#issuecomment-2278958251 + # - https://typing.readthedocs.io/en/latest/spec/special-types.html#special-cases-for-float-and-complex @overload - def __new__(cls, dtype: None | type[float], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float64]: ... + def __new__( + cls, + dtype: py_type[py_bool | bool_ | ct.c_bool] | _BoolCodes, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[bool_]: ... @overload - def __new__(cls, dtype: type[complex], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex128]: ... + def __new__( + cls, + dtype: py_type[int], # also accepts `type[py_bool]` + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[int_ | Any]: ... @overload - def __new__(cls, dtype: type[builtins.str], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[str_]: ... + def __new__( + cls, + dtype: py_type[float], # also accepts `type[int | bool]` + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[float64 | Any]: ... @overload - def __new__(cls, dtype: type[bytes], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[bytes_]: ... - - # `unsignedinteger` string-based representations and ctypes + def __new__( + cls, + dtype: py_type[complex], # also accepts `type[float | int | bool]` + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[complex128 | Any]: ... @overload - def __new__(cls, dtype: _UInt8Codes | type[ct.c_uint8], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint8]: ... + def __new__( + cls, + dtype: py_type[bytes | ct.c_char] | _BytesCodes, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[bytes_]: ... @overload - def __new__(cls, dtype: _UInt16Codes | type[ct.c_uint16], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint16]: ... + def __new__( + cls, + dtype: py_type[py_str] | _StrCodes, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[str_]: ... + # NOTE: These `memoryview` overloads assume PEP 688, which requires mypy to + # be run with the (undocumented) `--disable-memoryview-promotion` flag, + # This will be the default in a future mypy release, see: + # https://github.com/python/mypy/issues/15313 + # Pyright / Pylance requires setting `disableBytesTypePromotions=true`, + # which is the default in strict mode @overload - def __new__(cls, dtype: _UInt32Codes | type[ct.c_uint32], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint32]: ... + def __new__( + cls, + dtype: py_type[void | memoryview] | _VoidDTypeLike | _VoidCodes, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[void]: ... + # NOTE: `_: type[object]` would also accept e.g. `type[object | complex]`, + # and is therefore not included here @overload - def __new__(cls, dtype: _UInt64Codes | type[ct.c_uint64], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint64]: ... + def __new__( + cls, + dtype: py_type[object_ | _BuiltinObjectLike | ct.py_object[Any]] | _ObjectCodes, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[object_]: ... + + # `unsignedinteger` string-based representations and ctypes @overload - def __new__(cls, dtype: _UByteCodes | type[ct.c_ubyte], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ubyte]: ... + def __new__( + cls, + dtype: _UInt8Codes | py_type[ct.c_uint8], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[uint8]: ... @overload - def __new__(cls, dtype: _UShortCodes | type[ct.c_ushort], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ushort]: ... + def __new__( + cls, + dtype: _UInt16Codes | py_type[ct.c_uint16 | ct.c_ushort], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[uint16]: ... @overload - def __new__(cls, dtype: _UIntCCodes | type[ct.c_uint], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uintc]: ... - - # NOTE: We're assuming here that `uint_ptr_t == size_t`, - # an assumption that does not hold in rare cases (same for `ssize_t`) + def __new__( + cls, + dtype: _UInt32Codes | _UIntCCodes | py_type[ct.c_uint32 | ct.c_uint], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[uint32]: ... @overload - def __new__(cls, dtype: _UIntPCodes | type[ct.c_void_p] | type[ct.c_size_t], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uintp]: ... + def __new__( + cls, + dtype: _UInt64Codes | _ULongLongCodes | py_type[ct.c_uint64 | ct.c_ulonglong], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[uint64]: ... @overload - def __new__(cls, dtype: _ULongCodes | type[ct.c_ulong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ulong]: ... + def __new__( + cls, + dtype: _UIntPCodes | py_type[ct.c_void_p | ct.c_size_t], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[uintp]: ... @overload - def __new__(cls, dtype: _ULongLongCodes | type[ct.c_ulonglong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ulonglong]: ... + def __new__( + cls, + dtype: _ULongCodes | py_type[ct.c_ulong], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[uint32 | uint64]: ... # `signedinteger` string-based representations and ctypes @overload - def __new__(cls, dtype: _Int8Codes | type[ct.c_int8], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int8]: ... - @overload - def __new__(cls, dtype: _Int16Codes | type[ct.c_int16], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int16]: ... - @overload - def __new__(cls, dtype: _Int32Codes | type[ct.c_int32], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int32]: ... - @overload - def __new__(cls, dtype: _Int64Codes | type[ct.c_int64], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int64]: ... - @overload - def __new__(cls, dtype: _ByteCodes | type[ct.c_byte], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[byte]: ... + def __new__( + cls, + dtype: _Int8Codes | py_type[ct.c_int8], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[int8]: ... @overload - def __new__(cls, dtype: _ShortCodes | type[ct.c_short], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[short]: ... + def __new__( + cls, + dtype: _Int16Codes | py_type[ct.c_int16 | ct.c_short], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[int16]: ... @overload - def __new__(cls, dtype: _IntCCodes | type[ct.c_int], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[intc]: ... + def __new__( + cls, + dtype: _Int32Codes | _IntCCodes | py_type[ct.c_int32 | ct.c_int], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[int32]: ... @overload - def __new__(cls, dtype: _IntPCodes | type[ct.c_ssize_t], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[intp]: ... + def __new__( + cls, + dtype: _Int64Codes | _LongLongCodes | py_type[ct.c_int64 | ct.c_longlong], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[int64]: ... @overload - def __new__(cls, dtype: _LongCodes | type[ct.c_long], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[long]: ... + def __new__( + cls, + dtype: _IntPCodes | py_type[intp | ct.c_ssize_t], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[intp]: ... @overload - def __new__(cls, dtype: _LongLongCodes | type[ct.c_longlong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[longlong]: ... + def __new__( + cls, + dtype: _LongCodes | py_type[ct.c_long], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[int32 | int64]: ... # `floating` string-based representations and ctypes @overload - def __new__(cls, dtype: _Float16Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float16]: ... - @overload - def __new__(cls, dtype: _Float32Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float32]: ... - @overload - def __new__(cls, dtype: _Float64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float64]: ... - @overload - def __new__(cls, dtype: _HalfCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[half]: ... - @overload - def __new__(cls, dtype: _SingleCodes | type[ct.c_float], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[single]: ... + def __new__( + cls, + dtype: _Float16Codes, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[float16]: ... @overload - def __new__(cls, dtype: _DoubleCodes | type[ct.c_double], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[double]: ... + def __new__( + cls, + dtype: _Float32Codes | py_type[ct.c_float], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[float32]: ... + # float64 codes are covered by overload 1 @overload - def __new__(cls, dtype: _LongDoubleCodes | type[ct.c_longdouble], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[longdouble]: ... + def __new__( + cls, + dtype: _LongDoubleCodes | py_type[ct.c_longdouble], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[longdouble]: ... + + # `complexfloating` string-based representations and ctypes + if sys.version_info < (3, 14) or sys.platform == "win32": + @overload + def __new__( + cls, + dtype: _Complex64Codes, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[complex64]: ... + @overload + def __new__( + cls, + dtype: _Complex128Codes, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[complex128]: ... + @overload + def __new__( + cls, + dtype: _CLongDoubleCodes, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[clongdouble]: ... + else: + @overload + def __new__( + cls, + dtype: _Complex64Codes | py_type[ct.c_float_complex], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[complex64]: ... + @overload + def __new__( + cls, + dtype: _Complex128Codes | py_type[ct.c_double_complex], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[complex128]: ... + @overload + def __new__( + cls, + dtype: _CLongDoubleCodes | py_type[ct.c_longdouble_complex], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[clongdouble]: ... + + # datetime64 + @overload # datetime64[{Y,M,W,D}] + def __new__( + cls, + dtype: _DT64Codes_date, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[datetime64[dt.date]]: ... + @overload # datetime64[{h,m,s,ms,us}] + def __new__( + cls, + dtype: _DT64Codes_datetime, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[datetime64[dt.datetime]]: ... + @overload # datetime64[{ns,ps,fs,as}] + def __new__( + cls, + dtype: _DT64Codes_int, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[datetime64[int]]: ... + @overload # datetime64[?] + def __new__( + cls, + dtype: _DT64Codes_any, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[datetime64]: ... - # `complexfloating` string-based representations - @overload - def __new__(cls, dtype: _Complex64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex64]: ... - @overload - def __new__(cls, dtype: _Complex128Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex128]: ... - @overload - def __new__(cls, dtype: _CSingleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[csingle]: ... - @overload - def __new__(cls, dtype: _CDoubleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[cdouble]: ... - @overload - def __new__(cls, dtype: _CLongDoubleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[clongdouble]: ... + # timedelta64 + @overload # timedelta64[{W,D,h,m,s,ms,us}] + def __new__( + cls, + dtype: _TD64Codes_timedelta, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[timedelta64[dt.timedelta]]: ... + @overload # timedelta64[{Y,M,ns,ps,fs,as}] + def __new__( + cls, + dtype: _TD64Codes_int, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[timedelta64[int]]: ... + @overload # timedelta64[?] + def __new__( + cls, + dtype: _TD64Codes_any, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[timedelta64]: ... - # Miscellaneous string-based representations and ctypes - @overload - def __new__(cls, dtype: _BoolCodes | type[ct.c_bool], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[np.bool]: ... - @overload - def __new__(cls, dtype: _TD64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[timedelta64]: ... - @overload - def __new__(cls, dtype: _DT64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[datetime64]: ... - @overload - def __new__(cls, dtype: _StrCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[str_]: ... - @overload - def __new__(cls, dtype: _BytesCodes | type[ct.c_char], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[bytes_]: ... - @overload - def __new__(cls, dtype: _VoidCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[void]: ... + # `StringDType` requires special treatment because it has no scalar type @overload - def __new__(cls, dtype: _ObjectCodes | type[ct.py_object[Any]], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[object_]: ... + def __new__( + cls, + dtype: dtypes.StringDType | _StringCodes, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtypes.StringDType: ... - # dtype of a dtype is the same dtype + # Combined char-codes and ctypes, analogous to the scalar-type hierarchy @overload def __new__( cls, - dtype: dtype[_DTypeScalar_co], - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[_DTypeScalar_co]: ... + dtype: _UnsignedIntegerCodes | _UnsignedIntegerCType, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[unsignedinteger]: ... @overload def __new__( cls, - dtype: _SupportsDType[dtype[_DTypeScalar_co]], - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[_DTypeScalar_co]: ... - # Handle strings that can't be expressed as literals; i.e. s1, s2, ... + dtype: _SignedIntegerCodes | _SignedIntegerCType, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[signedinteger]: ... @overload def __new__( cls, - dtype: builtins.str, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[Any]: ... - # Catchall overload for void-likes + dtype: _IntegerCodes | _IntegerCType, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[integer]: ... @overload def __new__( cls, - dtype: _VoidDTypeLike, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[void]: ... - # Catchall overload for object-likes + dtype: _FloatingCodes | _FloatingCType, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[floating]: ... @overload def __new__( cls, - dtype: type[object], - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[object_]: ... + dtype: _ComplexFloatingCodes, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[complexfloating]: ... + @overload + def __new__( + cls, + dtype: _InexactCodes | _FloatingCType, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[inexact]: ... + @overload + def __new__( + cls, + dtype: _CharacterCodes | py_type[bytes | py_str | ct.c_char], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[character]: ... + + # Handle strings that can't be expressed as literals; i.e. "S1", "S2", ... + @overload + def __new__( + cls, + dtype: py_str, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype: ... + + # Catch-all overload for object-likes + # NOTE: `object_ | Any` is NOT equivalent to `Any`. It is specified to behave + # like a "sum type" (a.k.a. variant type, discriminated union, or tagged union). + # So the union of a type and `Any` is not the same "union type" that all other + # unions are (by definition). + # https://typing.python.org/en/latest/spec/concepts.html#union-types + @overload + def __new__( + cls, + dtype: py_type[object], + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[py_str, Any] = ..., + ) -> dtype[object_ | Any]: ... - def __class_getitem__(self, item: Any) -> GenericAlias: ... + def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... @overload - def __getitem__(self: dtype[void], key: list[builtins.str]) -> dtype[void]: ... + def __getitem__(self: dtype[void], key: list[py_str], /) -> dtype[void]: ... @overload - def __getitem__(self: dtype[void], key: builtins.str | SupportsIndex) -> dtype[Any]: ... + def __getitem__(self: dtype[void], key: py_str | SupportsIndex, /) -> dtype: ... # NOTE: In the future 1-based multiplications will also yield `flexible` dtypes @overload - def __mul__(self: _DType, value: L[1]) -> _DType: ... + def __mul__[DTypeT: dtype](self: DTypeT, value: L[1], /) -> DTypeT: ... @overload - def __mul__(self: _FlexDType, value: SupportsIndex) -> _FlexDType: ... + def __mul__[FlexibleDTypeT: dtype[flexible]](self: FlexibleDTypeT, value: SupportsIndex, /) -> FlexibleDTypeT: ... @overload - def __mul__(self, value: SupportsIndex) -> dtype[void]: ... + def __mul__(self, value: SupportsIndex, /) -> dtype[void]: ... # NOTE: `__rmul__` seems to be broken when used in combination with - # literals as of mypy 0.902. Set the return-type to `dtype[Any]` for + # literals as of mypy 0.902. Set the return-type to `dtype` for # now for non-flexible dtypes. @overload - def __rmul__(self: _FlexDType, value: SupportsIndex) -> _FlexDType: ... + def __rmul__[FlexibleDTypeT: dtype[flexible]](self: FlexibleDTypeT, value: SupportsIndex, /) -> FlexibleDTypeT: ... @overload - def __rmul__(self, value: SupportsIndex) -> dtype[Any]: ... + def __rmul__(self, value: SupportsIndex, /) -> dtype: ... - def __gt__(self, other: DTypeLike) -> builtins.bool: ... - def __ge__(self, other: DTypeLike) -> builtins.bool: ... - def __lt__(self, other: DTypeLike) -> builtins.bool: ... - def __le__(self, other: DTypeLike) -> builtins.bool: ... + def __gt__(self, other: DTypeLike | None, /) -> py_bool: ... + def __ge__(self, other: DTypeLike | None, /) -> py_bool: ... + def __lt__(self, other: DTypeLike | None, /) -> py_bool: ... + def __le__(self, other: DTypeLike | None, /) -> py_bool: ... # Explicitly defined `__eq__` and `__ne__` to get around mypy's # `strict_equality` option; even though their signatures are # identical to their `object`-based counterpart - def __eq__(self, other: Any) -> builtins.bool: ... - def __ne__(self, other: Any) -> builtins.bool: ... + def __eq__(self, other: Any, /) -> py_bool: ... + def __ne__(self, other: Any, /) -> py_bool: ... @property def alignment(self) -> int: ... @property - def base(self) -> dtype[Any]: ... + def base(self) -> dtype: ... @property - def byteorder(self) -> builtins.str: ... + def byteorder(self) -> _ByteOrderChar: ... @property - def char(self) -> builtins.str: ... + def char(self) -> _DTypeChar: ... @property - def descr(self) -> list[tuple[builtins.str, builtins.str] | tuple[builtins.str, builtins.str, _Shape]]: ... + def descr(self) -> list[tuple[LiteralString, LiteralString] | tuple[LiteralString, LiteralString, _Shape]]: ... @property - def fields( - self, - ) -> None | MappingProxyType[builtins.str, tuple[dtype[Any], int] | tuple[dtype[Any], int, Any]]: ... + def fields(self,) -> MappingProxyType[LiteralString, tuple[dtype, int] | tuple[dtype, int, Any]] | None: ... @property def flags(self) -> int: ... @property - def hasobject(self) -> builtins.bool: ... + def hasobject(self) -> py_bool: ... @property - def isbuiltin(self) -> int: ... + def isbuiltin(self) -> _DTypeBuiltinKind: ... @property - def isnative(self) -> builtins.bool: ... + def isnative(self) -> py_bool: ... @property - def isalignedstruct(self) -> builtins.bool: ... + def isalignedstruct(self) -> py_bool: ... @property def itemsize(self) -> int: ... @property - def kind(self) -> builtins.str: ... + def kind(self) -> _DTypeKind: ... @property - def metadata(self) -> None | MappingProxyType[builtins.str, Any]: ... + def metadata(self) -> MappingProxyType[py_str, Any] | None: ... @property - def name(self) -> builtins.str: ... + def name(self) -> LiteralString: ... @property - def num(self) -> int: ... + def num(self) -> _DTypeNum: ... @property - def shape(self) -> _Shape: ... + def shape(self) -> _AnyShape: ... @property def ndim(self) -> int: ... @property - def subdtype(self) -> None | tuple[dtype[Any], _Shape]: ... - def newbyteorder(self: _DType, __new_order: _ByteOrder = ...) -> _DType: ... + def subdtype(self) -> tuple[dtype, _AnyShape] | None: ... + def newbyteorder(self, new_order: _ByteOrder = ..., /) -> Self: ... @property - def str(self) -> builtins.str: ... + def str(self) -> LiteralString: ... @property - def type(self) -> type[_DTypeScalar_co]: ... - -_ArrayLikeInt = ( - int - | integer[Any] - | Sequence[int | integer[Any]] - | Sequence[Sequence[Any]] # TODO: wait for support for recursive types - | NDArray[Any] -) - -_FlatIterSelf = TypeVar("_FlatIterSelf", bound=flatiter[Any]) + def type(self) -> py_type[_ScalarT_co]: ... -@final -class flatiter(Generic[_NdArraySubClass]): - __hash__: ClassVar[None] - @property - def base(self) -> _NdArraySubClass: ... +@type_check_only +class _ArrayOrScalarCommon: @property - def coords(self) -> _Shape: ... + def real(self, /) -> Any: ... @property - def index(self) -> int: ... - def copy(self) -> _NdArraySubClass: ... - def __iter__(self: _FlatIterSelf) -> _FlatIterSelf: ... - def __next__(self: flatiter[NDArray[_ScalarType]]) -> _ScalarType: ... - def __len__(self) -> int: ... - @overload - def __getitem__( - self: flatiter[NDArray[_ScalarType]], - key: int | integer[Any] | tuple[int | integer[Any]], - ) -> _ScalarType: ... - @overload - def __getitem__( - self, - key: _ArrayLikeInt | slice | ellipsis | tuple[_ArrayLikeInt | slice | ellipsis], - ) -> _NdArraySubClass: ... - # TODO: `__setitem__` operates via `unsafe` casting rules, and can - # thus accept any type accepted by the relevant underlying `np.generic` - # constructor. - # This means that `value` must in reality be a supertype of `npt.ArrayLike`. - def __setitem__( - self, - key: _ArrayLikeInt | slice | ellipsis | tuple[_ArrayLikeInt | slice | ellipsis], - value: Any, - ) -> None: ... - @overload - def __array__(self: flatiter[ndarray[Any, _DType]], dtype: None = ..., /) -> ndarray[Any, _DType]: ... - @overload - def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ... - -_OrderKACF = L[None, "K", "A", "C", "F"] -_OrderACF = L[None, "A", "C", "F"] -_OrderCF = L[None, "C", "F"] - -_ModeKind = L["raise", "wrap", "clip"] -_PartitionKind = L["introselect"] -_SortKind = L["quicksort", "mergesort", "heapsort", "stable"] -_SortSide = L["left", "right"] - -_ArraySelf = TypeVar("_ArraySelf", bound=_ArrayOrScalarCommon) - -class _ArrayOrScalarCommon: + def imag(self, /) -> Any: ... @property - def T(self: _ArraySelf) -> _ArraySelf: ... + def T(self) -> Self: ... @property - def mT(self: _ArraySelf) -> _ArraySelf: ... + def mT(self) -> Self: ... @property def data(self) -> memoryview: ... @property @@ -962,516 +1668,446 @@ class _ArrayOrScalarCommon: def itemsize(self) -> int: ... @property def nbytes(self) -> int: ... - def __bool__(self) -> builtins.bool: ... - def __bytes__(self) -> bytes: ... - def __str__(self) -> str: ... - def __repr__(self) -> str: ... - def __copy__(self: _ArraySelf) -> _ArraySelf: ... - def __deepcopy__(self: _ArraySelf, memo: None | dict[int, Any], /) -> _ArraySelf: ... + @property + def device(self) -> L["cpu"]: ... + + def __bool__(self, /) -> py_bool: ... + def __int__(self, /) -> int: ... + def __float__(self, /) -> float: ... + def __copy__(self) -> Self: ... + def __deepcopy__(self, memo: dict[int, Any] | None, /) -> Self: ... # TODO: How to deal with the non-commutative nature of `==` and `!=`? # xref numpy/numpy#17368 - def __eq__(self, other: Any) -> Any: ... - def __ne__(self, other: Any) -> Any: ... - def copy(self: _ArraySelf, order: _OrderKACF = ...) -> _ArraySelf: ... - def dump(self, file: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _SupportsWrite[bytes]) -> None: ... + def __eq__(self, other: Any, /) -> Any: ... + def __ne__(self, other: Any, /) -> Any: ... + + def copy(self, order: _OrderKACF = ...) -> Self: ... + def dump(self, file: StrOrBytesPath | SupportsWrite[bytes]) -> None: ... def dumps(self) -> bytes: ... def tobytes(self, order: _OrderKACF = ...) -> bytes: ... - # NOTE: `tostring()` is deprecated and therefore excluded - # def tostring(self, order=...): ... - def tofile( - self, - fid: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _IOProtocol, - sep: str = ..., - format: str = ..., - ) -> None: ... + def tofile(self, fid: StrOrBytesPath | _SupportsFileMethods, /, sep: str = "", format: str = "%s") -> None: ... # generics and 0d arrays return builtin scalars def tolist(self) -> Any: ... + def to_device(self, device: L["cpu"], /, *, stream: int | Any | None = ...) -> Self: ... + + # NOTE: for `generic`, these two methods don't do anything + def fill(self, /, value: Incomplete) -> None: ... + def put(self, indices: _ArrayLikeInt_co, values: ArrayLike, /, mode: _ModeKind = "raise") -> None: ... + + # NOTE: even on `generic` this seems to work + def setflags( + self, + /, + *, + write: py_bool | None = None, + align: py_bool | None = None, + uic: py_bool | None = None, + ) -> None: ... @property def __array_interface__(self) -> dict[str, Any]: ... @property def __array_priority__(self) -> float: ... @property - def __array_struct__(self) -> Any: ... # builtins.PyCapsule + def __array_struct__(self) -> CapsuleType: ... + def __array_namespace__(self, /, *, api_version: _ArrayAPIVersion | None = None) -> ModuleType: ... def __setstate__(self, state: tuple[ SupportsIndex, # version _ShapeLike, # Shape - _DType_co, # DType - np.bool, # F-continuous + _DTypeT_co, # DType + bool_, # F-continuous bytes | list[Any], # Data ], /) -> None: ... - # an `np.bool` is returned when `keepdims=True` and `self` is a 0d array - @overload - def all( + def conj(self) -> Self: ... + def conjugate(self) -> Self: ... + + def argsort( self, - axis: None = ..., - out: None = ..., - keepdims: L[False] = ..., + axis: SupportsIndex | None = ..., + kind: _SortKind | None = ..., + order: str | Sequence[str] | None = ..., *, - where: _ArrayLikeBool_co = ..., - ) -> np.bool: ... + stable: py_bool | None = ..., + ) -> NDArray[intp]: ... + + @overload # axis=None (default), out=None (default), keepdims=False (default) + def argmax(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... + @overload # axis=index, out=None (default) + def argmax(self, /, axis: SupportsIndex, out: None = None, *, keepdims: py_bool = False) -> Any: ... + @overload # axis=index, out=ndarray + def argmax[OutT: _ArrayInt_co]( + self, /, axis: SupportsIndex | None, out: OutT, *, keepdims: py_bool = False + ) -> OutT: ... @overload - def all( - self, - axis: None | _ShapeLike = ..., - out: None = ..., - keepdims: builtins.bool = ..., - *, - where: _ArrayLikeBool_co = ..., - ) -> Any: ... + def argmax[OutT: _ArrayInt_co]( + self, /, axis: SupportsIndex | None = None, *, out: OutT, keepdims: py_bool = False + ) -> OutT: ... + + @overload # axis=None (default), out=None (default), keepdims=False (default) + def argmin(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... + @overload # axis=index, out=None (default) + def argmin(self, /, axis: SupportsIndex, out: None = None, *, keepdims: py_bool = False) -> Any: ... + @overload # axis=index, out=ndarray + def argmin[OutT: _ArrayInt_co]( + self, /, axis: SupportsIndex | None, out: OutT, *, keepdims: py_bool = False + ) -> OutT: ... @overload - def all( - self, - axis: None | _ShapeLike = ..., - out: _NdArraySubClass = ..., - keepdims: builtins.bool = ..., - *, - where: _ArrayLikeBool_co = ..., - ) -> _NdArraySubClass: ... + def argmin[OutT: _ArrayInt_co]( + self, /, axis: SupportsIndex | None = None, *, out: OutT, keepdims: py_bool = False + ) -> OutT: ... + # Keep in sync with `MaskedArray.round` + @overload # out=None (default) + def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ... + @overload # out=ndarray + def round[ArrayT: ndarray](self, /, decimals: SupportsIndex, out: ArrayT) -> ArrayT: ... @overload - def any( - self, - axis: None = ..., - out: None = ..., - keepdims: L[False] = ..., - *, - where: _ArrayLikeBool_co = ..., - ) -> np.bool: ... + def round[ArrayT: ndarray](self, /, decimals: SupportsIndex = 0, *, out: ArrayT) -> ArrayT: ... + + @overload # out=None (default) + def choose(self, /, choices: ArrayLike, out: None = None, mode: _ModeKind = "raise") -> NDArray[Any]: ... + @overload # out=ndarray + def choose[ArrayT: ndarray](self, /, choices: ArrayLike, out: ArrayT, mode: _ModeKind = "raise") -> ArrayT: ... + + # TODO: Annotate kwargs with an unpacked `TypedDict` + @overload # out: None (default) + def clip(self, /, min: ArrayLike, max: ArrayLike | None = None, out: None = None, **kwargs: Any) -> NDArray[Any]: ... @overload - def any( - self, - axis: None | _ShapeLike = ..., - out: None = ..., - keepdims: builtins.bool = ..., - *, - where: _ArrayLikeBool_co = ..., - ) -> Any: ... + def clip(self, /, min: None, max: ArrayLike, out: None = None, **kwargs: Any) -> NDArray[Any]: ... @overload - def any( - self, - axis: None | _ShapeLike = ..., - out: _NdArraySubClass = ..., - keepdims: builtins.bool = ..., - *, - where: _ArrayLikeBool_co = ..., - ) -> _NdArraySubClass: ... - + def clip(self, /, min: None = None, *, max: ArrayLike, out: None = None, **kwargs: Any) -> NDArray[Any]: ... + @overload # out: ndarray + def clip[ArrayT: ndarray](self, /, min: ArrayLike, max: ArrayLike | None, out: ArrayT, **kwargs: Any) -> ArrayT: ... @overload - def argmax( - self, - axis: None = ..., - out: None = ..., - *, - keepdims: L[False] = ..., - ) -> intp: ... + def clip[ArrayT: ndarray](self, /, min: ArrayLike, max: ArrayLike | None = None, *, out: ArrayT, **kwargs: Any) -> ArrayT: ... @overload - def argmax( - self, - axis: SupportsIndex = ..., - out: None = ..., - *, - keepdims: builtins.bool = ..., - ) -> Any: ... + def clip[ArrayT: ndarray](self, /, min: None, max: ArrayLike, out: ArrayT, **kwargs: Any) -> ArrayT: ... @overload - def argmax( - self, - axis: None | SupportsIndex = ..., - out: _NdArraySubClass = ..., - *, - keepdims: builtins.bool = ..., - ) -> _NdArraySubClass: ... + def clip[ArrayT: ndarray](self, /, min: None = None, *, max: ArrayLike, out: ArrayT, **kwargs: Any) -> ArrayT: ... @overload - def argmin( - self, - axis: None = ..., - out: None = ..., - *, - keepdims: L[False] = ..., - ) -> intp: ... + def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None) -> NDArray[Any]: ... @overload - def argmin( - self, - axis: SupportsIndex = ..., - out: None = ..., - *, - keepdims: builtins.bool = ..., - ) -> Any: ... + def compress[ArrayT: ndarray](self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None, out: ArrayT) -> ArrayT: ... @overload - def argmin( - self, - axis: None | SupportsIndex = ..., - out: _NdArraySubClass = ..., - *, - keepdims: builtins.bool = ..., - ) -> _NdArraySubClass: ... - - def argsort( - self, - axis: None | SupportsIndex = ..., - kind: None | _SortKind = ..., - order: None | str | Sequence[str] = ..., - *, - stable: None | bool = ..., - ) -> NDArray[Any]: ... + def compress[ArrayT: ndarray]( + self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, out: ArrayT + ) -> ArrayT: ... + # Keep in sync with `MaskedArray.cumprod` + @overload # out: None (default) + def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... + @overload # out: ndarray + def cumprod[ArrayT: ndarray](self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... @overload - def choose( - self, - choices: ArrayLike, - out: None = ..., - mode: _ModeKind = ..., - ) -> NDArray[Any]: ... - @overload - def choose( - self, - choices: ArrayLike, - out: _NdArraySubClass = ..., - mode: _ModeKind = ..., - ) -> _NdArraySubClass: ... + def cumprod[ArrayT: ndarray]( + self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: ArrayT + ) -> ArrayT: ... + # Keep in sync with `MaskedArray.cumsum` + @overload # out: None (default) + def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... + @overload # out: ndarray + def cumsum[ArrayT: ndarray](self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... @overload - def clip( - self, - min: ArrayLike = ..., - max: None | ArrayLike = ..., - out: None = ..., - **kwargs: Any, - ) -> NDArray[Any]: ... + def cumsum[ArrayT: ndarray]( + self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: ArrayT + ) -> ArrayT: ... + @overload - def clip( + def max( self, - min: None = ..., - max: ArrayLike = ..., - out: None = ..., - **kwargs: Any, - ) -> NDArray[Any]: ... + /, + axis: _ShapeLike | None = None, + out: None = None, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... @overload - def clip( + def max[ArrayT: ndarray]( self, - min: ArrayLike = ..., - max: None | ArrayLike = ..., - out: _NdArraySubClass = ..., - **kwargs: Any, - ) -> _NdArraySubClass: ... + /, + axis: _ShapeLike | None, + out: ArrayT, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... @overload - def clip( + def max[ArrayT: ndarray]( self, - min: None = ..., - max: ArrayLike = ..., - out: _NdArraySubClass = ..., - **kwargs: Any, - ) -> _NdArraySubClass: ... + /, + axis: _ShapeLike | None = None, + *, + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... @overload - def compress( - self, - a: ArrayLike, - axis: None | SupportsIndex = ..., - out: None = ..., - ) -> NDArray[Any]: ... - @overload - def compress( + def min( self, - a: ArrayLike, - axis: None | SupportsIndex = ..., - out: _NdArraySubClass = ..., - ) -> _NdArraySubClass: ... - - def conj(self: _ArraySelf) -> _ArraySelf: ... - - def conjugate(self: _ArraySelf) -> _ArraySelf: ... - + /, + axis: _ShapeLike | None = None, + out: None = None, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... @overload - def cumprod( + def min[ArrayT: ndarray]( self, - axis: None | SupportsIndex = ..., - dtype: DTypeLike = ..., - out: None = ..., - ) -> NDArray[Any]: ... + /, + axis: _ShapeLike | None, + out: ArrayT, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... @overload - def cumprod( + def min[ArrayT: ndarray]( self, - axis: None | SupportsIndex = ..., - dtype: DTypeLike = ..., - out: _NdArraySubClass = ..., - ) -> _NdArraySubClass: ... + /, + axis: _ShapeLike | None = None, + *, + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... @overload - def cumsum( - self, - axis: None | SupportsIndex = ..., - dtype: DTypeLike = ..., - out: None = ..., - ) -> NDArray[Any]: ... - @overload - def cumsum( + def sum( self, - axis: None | SupportsIndex = ..., - dtype: DTypeLike = ..., - out: _NdArraySubClass = ..., - ) -> _NdArraySubClass: ... - + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... @overload - def max( + def sum[ArrayT: ndarray]( self, - axis: None | _ShapeLike = ..., - out: None = ..., - keepdims: builtins.bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., - ) -> Any: ... + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... @overload - def max( + def sum[ArrayT: ndarray]( self, - axis: None | _ShapeLike = ..., - out: _NdArraySubClass = ..., - keepdims: builtins.bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., - ) -> _NdArraySubClass: ... + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... @overload - def mean( + def prod( self, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: None = ..., - keepdims: builtins.bool = ..., + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, *, - where: _ArrayLikeBool_co = ..., + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload - def mean( + def prod[ArrayT: ndarray]( self, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: _NdArraySubClass = ..., - keepdims: builtins.bool = ..., + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, *, - where: _ArrayLikeBool_co = ..., - ) -> _NdArraySubClass: ... - - @overload - def min( - self, - axis: None | _ShapeLike = ..., - out: None = ..., - keepdims: builtins.bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., - ) -> Any: ... + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... @overload - def min( + def prod[ArrayT: ndarray]( self, - axis: None | _ShapeLike = ..., - out: _NdArraySubClass = ..., - keepdims: builtins.bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., - ) -> _NdArraySubClass: ... + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... @overload - def prod( + def mean( self, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: None = ..., - keepdims: builtins.bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: py_bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload - def prod( + def mean[ArrayT: ndarray]( self, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: _NdArraySubClass = ..., - keepdims: builtins.bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., - ) -> _NdArraySubClass: ... - - @overload - def round( - self: _ArraySelf, - decimals: SupportsIndex = ..., - out: None = ..., - ) -> _ArraySelf: ... + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + *, + keepdims: py_bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... @overload - def round( + def mean[ArrayT: ndarray]( self, - decimals: SupportsIndex = ..., - out: _NdArraySubClass = ..., - ) -> _NdArraySubClass: ... + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... @overload def std( self, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: None = ..., - ddof: float = ..., - keepdims: builtins.bool = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, *, - where: _ArrayLikeBool_co = ..., + keepdims: py_bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> Any: ... @overload - def std( + def std[ArrayT: ndarray]( self, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: _NdArraySubClass = ..., - ddof: float = ..., - keepdims: builtins.bool = ..., + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + ddof: float = 0, *, - where: _ArrayLikeBool_co = ..., - ) -> _NdArraySubClass: ... - - @overload - def sum( - self, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: None = ..., - keepdims: builtins.bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., - ) -> Any: ... + keepdims: py_bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ArrayT: ... @overload - def sum( + def std[ArrayT: ndarray]( self, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: _NdArraySubClass = ..., - keepdims: builtins.bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., - ) -> _NdArraySubClass: ... + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ddof: float = 0, + keepdims: py_bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ArrayT: ... @overload def var( self, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: None = ..., - ddof: float = ..., - keepdims: builtins.bool = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, *, - where: _ArrayLikeBool_co = ..., + keepdims: py_bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> Any: ... @overload - def var( + def var[ArrayT: ndarray]( self, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: _NdArraySubClass = ..., - ddof: float = ..., - keepdims: builtins.bool = ..., - *, - where: _ArrayLikeBool_co = ..., - ) -> _NdArraySubClass: ... - -_DType = TypeVar("_DType", bound=dtype[Any]) -_DType_co = TypeVar("_DType_co", covariant=True, bound=dtype[Any]) -_FlexDType = TypeVar("_FlexDType", bound=dtype[flexible]) - -# TODO: Set the `bound` to something more suitable once we -# have proper shape support -_ShapeType = TypeVar("_ShapeType", bound=Any) -_ShapeType2 = TypeVar("_ShapeType2", bound=Any) -_NumberType = TypeVar("_NumberType", bound=number[Any]) - -if sys.version_info >= (3, 12): - from collections.abc import Buffer as _SupportsBuffer -else: - _SupportsBuffer = ( - bytes - | bytearray - | memoryview - | _array.array[Any] - | mmap.mmap - | NDArray[Any] - | generic - ) - -_T = TypeVar("_T") -_T_co = TypeVar("_T_co", covariant=True) -_T_contra = TypeVar("_T_contra", contravariant=True) -_2Tuple = tuple[_T, _T] -_CastingKind = L["no", "equiv", "safe", "same_kind", "unsafe"] - -_ArrayUInt_co = NDArray[np.bool | unsignedinteger[Any]] -_ArrayInt_co = NDArray[np.bool | integer[Any]] -_ArrayFloat_co = NDArray[np.bool | integer[Any] | floating[Any]] -_ArrayComplex_co = NDArray[np.bool | integer[Any] | floating[Any] | complexfloating[Any, Any]] -_ArrayNumber_co = NDArray[np.bool | number[Any]] -_ArrayTD64_co = NDArray[np.bool | integer[Any] | timedelta64] - -# Introduce an alias for `dtype` to avoid naming conflicts. -_dtype = dtype - -# `builtins.PyCapsule` unfortunately lacks annotations as of the moment; -# use `Any` as a stopgap measure -_PyCapsule = Any - -class _SupportsItem(Protocol[_T_co]): - def item(self, args: Any, /) -> _T_co: ... - -class _SupportsReal(Protocol[_T_co]): - @property - def real(self) -> _T_co: ... - -class _SupportsImag(Protocol[_T_co]): - @property - def imag(self) -> _T_co: ... - -class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): - __hash__: ClassVar[None] + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + ddof: float = 0, + *, + keepdims: py_bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ArrayT: ... + @overload + def var[ArrayT: ndarray]( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ddof: float = 0, + keepdims: py_bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ArrayT: ... + +class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): + __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] @property - def base(self) -> None | NDArray[Any]: ... + def base(self) -> NDArray[Any] | None: ... @property def ndim(self) -> int: ... @property def size(self) -> int: ... + @property - def real( - self: ndarray[_ShapeType, dtype[_SupportsReal[_ScalarType]]], # type: ignore[type-var] - ) -> ndarray[_ShapeType, _dtype[_ScalarType]]: ... + def real[ScalarT: generic](self: _HasDTypeWithRealAndImag[ScalarT, object], /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @real.setter - def real(self, value: ArrayLike) -> None: ... + def real(self, value: ArrayLike, /) -> None: ... + @property - def imag( - self: ndarray[_ShapeType, dtype[_SupportsImag[_ScalarType]]], # type: ignore[type-var] - ) -> ndarray[_ShapeType, _dtype[_ScalarType]]: ... + def imag[ScalarT: generic](self: _HasDTypeWithRealAndImag[object, ScalarT], /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @imag.setter - def imag(self, value: ArrayLike) -> None: ... + def imag(self, value: ArrayLike, /) -> None: ... + def __new__( - cls: type[_ArraySelf], + cls, shape: _ShapeLike, - dtype: DTypeLike = ..., - buffer: None | _SupportsBuffer = ..., + dtype: DTypeLike | None = ..., + buffer: Buffer | None = ..., offset: SupportsIndex = ..., - strides: None | _ShapeLike = ..., + strides: _ShapeLike | None = ..., order: _OrderKACF = ..., - ) -> _ArraySelf: ... + ) -> Self: ... - if sys.version_info >= (3, 12): - def __buffer__(self, flags: int, /) -> memoryview: ... + def __buffer__(self, flags: int, /) -> memoryview: ... - def __class_getitem__(self, item: Any) -> GenericAlias: ... + def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... @overload - def __array__( - self, dtype: None = ..., /, *, copy: None | bool = ... - ) -> ndarray[Any, _DType_co]: ... + def __array__(self, dtype: None = None, /, *, copy: py_bool | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __array__( - self, dtype: _DType, /, *, copy: None | bool = ... - ) -> ndarray[Any, _DType]: ... + def __array__[DTypeT: _dtype](self, dtype: DTypeT, /, *, copy: py_bool | None = None) -> ndarray[_ShapeT_co, DTypeT]: ... def __array_ufunc__( self, @@ -1492,2433 +2128,3941 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): # NOTE: In practice any object is accepted by `obj`, but as `__array_finalize__` # is a pseudo-abstract method the type has been narrowed down in order to # grant subclasses a bit more flexibility - def __array_finalize__(self, obj: None | NDArray[Any], /) -> None: ... + def __array_finalize__(self, obj: NDArray[Any] | None, /) -> None: ... - def __array_wrap__( + def __array_wrap__[ShapeT: _Shape, DTypeT: _dtype]( self, - array: ndarray[_ShapeType2, _DType], - context: None | tuple[ufunc, tuple[Any, ...], int] = ..., - return_scalar: builtins.bool = ..., + array: ndarray[ShapeT, DTypeT], + context: tuple[ufunc, tuple[Any, ...], int] | None = ..., + return_scalar: py_bool = ..., /, - ) -> ndarray[_ShapeType2, _DType]: ... + ) -> ndarray[ShapeT, DTypeT]: ... + # Keep in sync with `MaskedArray.__getitem__` @overload - def __getitem__(self, key: ( - NDArray[integer[Any]] - | NDArray[np.bool] - | tuple[NDArray[integer[Any]] | NDArray[np.bool], ...] - )) -> ndarray[Any, _DType_co]: ... + def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload - def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...]) -> Any: ... + def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ... @overload - def __getitem__(self, key: ( - None - | slice - | ellipsis - | SupportsIndex - | _ArrayLikeInt_co - | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...] - )) -> ndarray[Any, _DType_co]: ... + def __getitem__(self, key: _ToIndices, /) -> ndarray[_AnyShape, _DTypeT_co]: ... + @overload # can be of any shape + def __getitem__(self: NDArray[void], key: str, /) -> ndarray[_ShapeT_co | _AnyShape]: ... @overload - def __getitem__(self: NDArray[void], key: str) -> NDArray[Any]: ... - @overload - def __getitem__(self: NDArray[void], key: list[str]) -> ndarray[_ShapeType, _dtype[void]]: ... + def __getitem__(self: NDArray[void], key: list[str], /) -> ndarray[_ShapeT_co | _AnyShape, _dtype[void]]: ... + + @overload # flexible | object_ | bool + def __setitem__( + self: ndarray[Any, _dtype[flexible | object_ | bool_] | dtypes.StringDType], + key: _ToIndices, + value: object, + /, + ) -> None: ... + @overload # integer + def __setitem__( + self: NDArray[integer], + key: _ToIndices, + value: _ConvertibleToInt | _NestedSequence[_ConvertibleToInt] | _ArrayLikeInt_co, + /, + ) -> None: ... + @overload # floating + def __setitem__( + self: NDArray[floating], + key: _ToIndices, + value: _ConvertibleToFloat | _NestedSequence[_ConvertibleToFloat | None] | _ArrayLikeFloat_co | None, + /, + ) -> None: ... + @overload # complexfloating + def __setitem__( + self: NDArray[complexfloating], + key: _ToIndices, + value: _ConvertibleToComplex | _NestedSequence[_ConvertibleToComplex | None] | _ArrayLikeNumber_co | None, + /, + ) -> None: ... + @overload # timedelta64 + def __setitem__( + self: NDArray[timedelta64], + key: _ToIndices, + value: _ConvertibleToTD64 | _NestedSequence[_ConvertibleToTD64], + /, + ) -> None: ... + @overload # datetime64 + def __setitem__( + self: NDArray[datetime64], + key: _ToIndices, + value: _ConvertibleToDT64 | _NestedSequence[_ConvertibleToDT64], + /, + ) -> None: ... + @overload # void + def __setitem__(self: NDArray[void], key: str | list[str], value: object, /) -> None: ... + @overload # catch-all + def __setitem__(self, key: _ToIndices, value: ArrayLike, /) -> None: ... @property def ctypes(self) -> _ctypes[int]: ... + + # @property - def shape(self) -> _Shape: ... + def shape(self) -> _ShapeT_co: ... @shape.setter + @deprecated("In-place shape modification has been deprecated in NumPy 2.5.") def shape(self, value: _ShapeLike) -> None: ... + + # @property def strides(self) -> _Shape: ... @strides.setter + @deprecated("Setting the strides on a NumPy array has been deprecated in NumPy 2.4") def strides(self, value: _ShapeLike) -> None: ... - def byteswap(self: _ArraySelf, inplace: builtins.bool = ...) -> _ArraySelf: ... - def fill(self, value: Any) -> None: ... + + # + def byteswap(self, inplace: py_bool = ...) -> Self: ... @property - def flat(self: _NdArraySubClass) -> flatiter[_NdArraySubClass]: ... + def flat(self) -> flatiter[Self]: ... - # Use the same output type as that of the underlying `generic` - @overload - def item( - self: ndarray[Any, _dtype[_SupportsItem[_T]]], # type: ignore[type-var] - *args: SupportsIndex, - ) -> _T: ... - @overload + @overload # use the same output type as that of the underlying `generic` + def item[T](self: NDArray[generic[T]], i0: SupportsIndex | tuple[SupportsIndex, ...] = ..., /, *args: SupportsIndex) -> T: ... + @overload # special casing for `StringDType`, which has no scalar type def item( - self: ndarray[Any, _dtype[_SupportsItem[_T]]], # type: ignore[type-var] - args: tuple[SupportsIndex, ...], + self: ndarray[Any, dtypes.StringDType], + arg0: SupportsIndex | tuple[SupportsIndex, ...] = ..., /, - ) -> _T: ... + *args: SupportsIndex, + ) -> str: ... + # keep in sync with `ma.MaskedArray.tolist` + @overload # this first overload prevents mypy from over-eagerly selecting `tuple[()]` in case of `_AnyShape` + def tolist[T](self: ndarray[tuple[Never], _dtype[generic[T]]], /) -> Any: ... @overload - def resize(self, new_shape: _ShapeLike, /, *, refcheck: builtins.bool = ...) -> None: ... + def tolist[T](self: ndarray[tuple[()], _dtype[generic[T]]], /) -> T: ... @overload - def resize(self, *new_shape: SupportsIndex, refcheck: builtins.bool = ...) -> None: ... + def tolist[T](self: ndarray[tuple[int], _dtype[generic[T]]], /) -> list[T]: ... + @overload + def tolist[T](self: ndarray[tuple[int, int], _dtype[generic[T]]], /) -> list[list[T]]: ... + @overload + def tolist[T](self: ndarray[tuple[int, int, int], _dtype[generic[T]]], /) -> list[list[list[T]]]: ... + @overload + def tolist(self, /) -> Any: ... - def setflags( - self, write: builtins.bool = ..., align: builtins.bool = ..., uic: builtins.bool = ... - ) -> None: ... + @overload + @deprecated("Resizing a NumPy array inplace has been deprecated in NumPy 2.5") + def resize(self, new_shape: _ShapeLike, /, *, refcheck: py_bool = True) -> None: ... + @overload + @deprecated("Resizing a NumPy array inplace has been deprecated in NumPy 2.5") + def resize(self, /, *new_shape: SupportsIndex, refcheck: py_bool = True) -> None: ... + # keep in sync with `ma.MaskedArray.squeeze` def squeeze( self, - axis: None | SupportsIndex | tuple[SupportsIndex, ...] = ..., - ) -> ndarray[Any, _DType_co]: ... + /, + axis: SupportsIndex | tuple[SupportsIndex, ...] | None = ..., + ) -> ndarray[_AnyShape, _DTypeT_co]: ... - def swapaxes( - self, - axis1: SupportsIndex, - axis2: SupportsIndex, - ) -> ndarray[Any, _DType_co]: ... + def swapaxes(self, axis1: SupportsIndex, axis2: SupportsIndex, /) -> Self: ... @overload - def transpose(self: _ArraySelf, axes: None | _ShapeLike, /) -> _ArraySelf: ... + def transpose(self, axes: _ShapeLike | None, /) -> Self: ... @overload - def transpose(self: _ArraySelf, *axes: SupportsIndex) -> _ArraySelf: ... - - def argpartition( - self, - kth: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - kind: _PartitionKind = ..., - order: None | str | Sequence[str] = ..., - ) -> NDArray[intp]: ... - - def diagonal( - self, - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - ) -> ndarray[Any, _DType_co]: ... + def transpose(self, /, *axes: SupportsIndex) -> Self: ... - # 1D + 1D returns a scalar; - # all other with at least 1 non-0D array return an ndarray. @overload - def dot(self, b: _ScalarLike_co, out: None = ...) -> NDArray[Any]: ... + def all( + self, + axis: None = None, + out: None = None, + keepdims: L[False, 0] = False, + *, + where: _ArrayLikeBool_co = True + ) -> bool_: ... @overload - def dot(self, b: ArrayLike, out: None = ...) -> Any: ... # type: ignore[misc] + def all( + self, + axis: int | tuple[int, ...] | None = None, + out: None = None, + keepdims: SupportsIndex = False, + *, + where: _ArrayLikeBool_co = True, + ) -> bool_ | NDArray[bool_]: ... + @overload + def all[ArrayT: ndarray]( + self, + axis: int | tuple[int, ...] | None, + out: ArrayT, + keepdims: SupportsIndex = False, + *, + where: _ArrayLikeBool_co = True, + ) -> ArrayT: ... @overload - def dot(self, b: ArrayLike, out: _NdArraySubClass) -> _NdArraySubClass: ... + def all[ArrayT: ndarray]( + self, + axis: int | tuple[int, ...] | None = None, + *, + out: ArrayT, + keepdims: SupportsIndex = False, + where: _ArrayLikeBool_co = True, + ) -> ArrayT: ... - # `nonzero()` is deprecated for 0d arrays/generics - def nonzero(self) -> tuple[NDArray[intp], ...]: ... + @overload + def any( + self, + axis: None = None, + out: None = None, + keepdims: L[False, 0] = False, + *, + where: _ArrayLikeBool_co = True + ) -> bool_: ... + @overload + def any( + self, + axis: int | tuple[int, ...] | None = None, + out: None = None, + keepdims: SupportsIndex = False, + *, + where: _ArrayLikeBool_co = True, + ) -> bool_ | NDArray[bool_]: ... + @overload + def any[ArrayT: ndarray]( + self, + axis: int | tuple[int, ...] | None, + out: ArrayT, + keepdims: SupportsIndex = False, + *, + where: _ArrayLikeBool_co = True, + ) -> ArrayT: ... + @overload + def any[ArrayT: ndarray]( + self, + axis: int | tuple[int, ...] | None = None, + *, + out: ArrayT, + keepdims: SupportsIndex = False, + where: _ArrayLikeBool_co = True, + ) -> ArrayT: ... + # + @overload def partition( self, - kth: _ArrayLikeInt_co, - axis: SupportsIndex = ..., - kind: _PartitionKind = ..., - order: None | str | Sequence[str] = ..., + kth: _ArrayLikeInt, + /, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> None: ... + @overload + def partition( + self: NDArray[void], + kth: _ArrayLikeInt, + /, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, ) -> None: ... - # `put` is technically available to `generic`, - # but is pointless as `generic`s are immutable - def put( + # keep in sync with `ma.core.MaskedArray.argpartition` + # keep roughly in sync with `_core.fromnumeric.argpartition` + @overload # axis: None + def argpartition( self, - ind: _ArrayLikeInt_co, - v: ArrayLike, - mode: _ModeKind = ..., - ) -> None: ... + kth: _ArrayLikeInt, + /, + axis: None, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> ndarray[tuple[int], _dtype[intp]]: ... + @overload # axis: index (default) + def argpartition( + self, + kth: _ArrayLikeInt, + /, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> ndarray[_ShapeT_co, _dtype[intp]]: ... + @overload # void, axis: None + def argpartition( + self: NDArray[void], + kth: _ArrayLikeInt, + /, + axis: None, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> ndarray[tuple[int], _dtype[intp]]: ... + @overload # void, axis: index (default) + def argpartition( + self: NDArray[void], + kth: _ArrayLikeInt, + /, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> ndarray[_ShapeT_co, _dtype[intp]]: ... + + # keep in sync with `ma.MaskedArray.diagonal` + def diagonal( + self, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + + # 1D + 1D returns a scalar; + # all other with at least 1 non-0D array return an ndarray. + @overload + def dot(self, b: _ScalarLike_co, /, out: None = None) -> NDArray[Any]: ... + @overload + def dot(self, b: ArrayLike, /, out: None = None) -> Any: ... + @overload + def dot[ArrayT: ndarray](self, b: ArrayLike, /, out: ArrayT) -> ArrayT: ... + + # `nonzero()` raises for 0d arrays/generics + def nonzero(self) -> tuple[ndarray[tuple[int], _dtype[intp]], ...]: ... @overload - def searchsorted( # type: ignore[misc] + def searchsorted( self, # >= 1D array v: _ScalarLike_co, # 0D array-like - side: _SortSide = ..., - sorter: None | _ArrayLikeInt_co = ..., + /, + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, ) -> intp: ... @overload def searchsorted( self, # >= 1D array v: ArrayLike, - side: _SortSide = ..., - sorter: None | _ArrayLikeInt_co = ..., + /, + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, ) -> NDArray[intp]: ... - def setfield( - self, - val: ArrayLike, - dtype: DTypeLike, - offset: SupportsIndex = ..., - ) -> None: ... - def sort( self, - axis: SupportsIndex = ..., - kind: None | _SortKind = ..., - order: None | str | Sequence[str] = ..., + /, + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, *, - stable: None | bool = ..., + stable: py_bool | None = None, ) -> None: ... + # Keep in sync with `MaskedArray.trace` @overload def trace( self, # >= 2D array - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., - out: None = ..., + /, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + out: None = None, ) -> Any: ... @overload - def trace( + def trace[ArrayT: ndarray]( self, # >= 2D array - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., - out: _NdArraySubClass = ..., - ) -> _NdArraySubClass: ... + /, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ) -> ArrayT: ... + @overload + def trace[ArrayT: ndarray]( + self, # >= 2D array + /, + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike | None, + out: ArrayT, + ) -> ArrayT: ... @overload - def take( # type: ignore[misc] - self: NDArray[_ScalarType], + def take[ScalarT: generic]( + self: NDArray[ScalarT], indices: _IntLike_co, - axis: None | SupportsIndex = ..., - out: None = ..., + /, + axis: SupportsIndex | None = ..., + out: None = None, mode: _ModeKind = ..., - ) -> _ScalarType: ... + ) -> ScalarT: ... @overload - def take( # type: ignore[misc] + def take( self, indices: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - out: None = ..., + /, + axis: SupportsIndex | None = ..., + out: None = None, mode: _ModeKind = ..., - ) -> ndarray[Any, _DType_co]: ... + ) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload - def take( + def take[ArrayT: ndarray]( self, indices: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - out: _NdArraySubClass = ..., + /, + axis: SupportsIndex | None = ..., + *, + out: ArrayT, mode: _ModeKind = ..., - ) -> _NdArraySubClass: ... - - def repeat( + ) -> ArrayT: ... + @overload + def take[ArrayT: ndarray]( self, - repeats: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - ) -> ndarray[Any, _DType_co]: ... + indices: _ArrayLikeInt_co, + /, + axis: SupportsIndex | None, + out: ArrayT, + mode: _ModeKind = ..., + ) -> ArrayT: ... - def flatten( - self, - order: _OrderKACF = ..., - ) -> ndarray[Any, _DType_co]: ... + # keep in sync with `ma.MaskedArray.repeat` + @overload + def repeat(self, repeats: _ArrayLikeInt_co, /, axis: None = None) -> ndarray[tuple[int], _DTypeT_co]: ... + @overload + def repeat(self, repeats: _ArrayLikeInt_co, /, axis: SupportsIndex) -> ndarray[_AnyShape, _DTypeT_co]: ... - def ravel( - self, - order: _OrderKACF = ..., - ) -> ndarray[Any, _DType_co]: ... + # keep in sync with `ma.MaskedArray.flatten` and `ma.MaskedArray.ravel` + def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... + def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... - @overload + # Keep in sync with `MaskedArray.reshape` + # NOTE: reshape also accepts negative integers, so we can't use integer literals + @overload # (None) + def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: py_bool | None = None) -> Self: ... + @overload # (empty_sequence) + def reshape( # mypy false positive + self, + shape: Sequence[Never], + /, + *, + order: _OrderACF = "C", + copy: py_bool | None = None, + ) -> ndarray[tuple[()], _DTypeT_co]: ... + @overload # (() | (int) | (int, int) | ....) # up to 8-d + def reshape[ + AnyShapeT: ( + tuple[()], # 0d + tuple[int], # 1d + tuple[int, int], # 2d + tuple[int, int, int], # 3d + tuple[int, int, int, int], # 4d + tuple[int, int, int, int, int], # 5d + tuple[int, int, int, int, int, int], # 6d + tuple[int, int, int, int, int, int, int], # 7d + tuple[int, int, int, int, int, int, int, int], # 8d + ) + ]( + self, + shape: AnyShapeT, + /, + *, + order: _OrderACF = "C", + copy: py_bool | None = None, + ) -> ndarray[AnyShapeT, _DTypeT_co]: ... + @overload # (index) def reshape( self, - shape: _ShapeLike, + size1: SupportsIndex, /, *, - order: _OrderACF = ..., - copy: None | bool = ..., - ) -> ndarray[Any, _DType_co]: ... - @overload + order: _OrderACF = "C", + copy: py_bool | None = None, + ) -> ndarray[tuple[int], _DTypeT_co]: ... + @overload # (index, index) def reshape( self, + size1: SupportsIndex, + size2: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: py_bool | None = None, + ) -> ndarray[tuple[int, int], _DTypeT_co]: ... + @overload # (index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: py_bool | None = None, + ) -> ndarray[tuple[int, int, int], _DTypeT_co]: ... + @overload # (index, index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + size4: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: py_bool | None = None, + ) -> ndarray[tuple[int, int, int, int], _DTypeT_co]: ... + @overload # (int, *(index, ...)) + def reshape( + self, + size0: SupportsIndex, + /, *shape: SupportsIndex, - order: _OrderACF = ..., - copy: None | bool = ..., - ) -> ndarray[Any, _DType_co]: ... + order: _OrderACF = "C", + copy: py_bool | None = None, + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + @overload # (sequence[index]) + def reshape( + self, + shape: Sequence[SupportsIndex], + /, + *, + order: _OrderACF = "C", + copy: py_bool | None = None, + ) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload - def astype( + def astype[ScalarT: generic]( self, - dtype: _DTypeLike[_ScalarType], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = ..., casting: _CastingKind = ..., - subok: builtins.bool = ..., - copy: builtins.bool | _CopyMode = ..., - ) -> NDArray[_ScalarType]: ... + subok: py_bool = ..., + copy: py_bool | _CopyMode = ..., + ) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload def astype( self, - dtype: DTypeLike, + dtype: DTypeLike | None, order: _OrderKACF = ..., casting: _CastingKind = ..., - subok: builtins.bool = ..., - copy: builtins.bool | _CopyMode = ..., - ) -> NDArray[Any]: ... + subok: py_bool = ..., + copy: py_bool | _CopyMode = ..., + ) -> ndarray[_ShapeT_co, _dtype]: ... + + # + @overload # () + def view(self, /) -> Self: ... + @overload # (dtype: T) + def view[DTypeT: _dtype](self, /, dtype: DTypeT | _HasDType[DTypeT]) -> ndarray[_ShapeT_co, DTypeT]: ... + @overload # (dtype: dtype[T]) + def view[ScalarT: generic](self, /, dtype: _DTypeLike[ScalarT]) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... + @overload # (type: T) + def view[ArrayT: ndarray](self, /, *, type: type[ArrayT]) -> ArrayT: ... + @overload # (_: T) + def view[ArrayT: ndarray](self, /, dtype: type[ArrayT]) -> ArrayT: ... + @overload # (dtype: ?) + def view(self, /, dtype: DTypeLike) -> ndarray[_ShapeT_co, _dtype]: ... + @overload # (dtype: ?, type: T) + def view[ArrayT: ndarray](self, /, dtype: DTypeLike, type: type[ArrayT]) -> ArrayT: ... + + def setfield(self, val: ArrayLike, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> None: ... + @overload + def getfield[ScalarT: generic](self, /, dtype: _DTypeLike[ScalarT], offset: SupportsIndex = 0) -> NDArray[ScalarT]: ... + @overload + def getfield(self, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> NDArray[Any]: ... + + def __index__(self: NDArray[integer], /) -> int: ... + def __complex__(self: NDArray[number | bool_ | object_], /) -> complex: ... + def __len__(self) -> int: ... + def __contains__(self, value: object, /) -> py_bool: ... + + # NOTE: This weird `Never` tuple works around a strange mypy issue where it assigns + # `tuple[int]` to `tuple[Never]` or `tuple[int, int]` to `tuple[Never, Never]`. + # This way the bug only occurs for 9-D arrays, which are probably not very common. + @overload + def __iter__( + self: ndarray[tuple[Never, Never, Never, Never, Never, Never, Never, Never, Never], Any], / + ) -> Iterator[Any]: ... + @overload # == 1-d & dtype[T \ object_] + def __iter__[ScalarT: _ScalarNotObject](self: ndarray[tuple[int], _dtype[ScalarT]], /) -> Iterator[ScalarT]: ... + @overload # == 1-d & StringDType + def __iter__(self: ndarray[tuple[int], dtypes.StringDType], /) -> Iterator[str]: ... + @overload # >= 2-d + def __iter__[DTypeT: _dtype]( + self: ndarray[tuple[int, int, *tuple[int, ...]], DTypeT], / + ) -> Iterator[ndarray[_AnyShape, DTypeT]]: ... + @overload # ?-d + def __iter__(self, /) -> Iterator[Any]: ... + + # @overload - def view(self: _ArraySelf) -> _ArraySelf: ... + def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[bool_]: ... @overload - def view(self, type: type[_NdArraySubClass]) -> _NdArraySubClass: ... + def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[bool_]: ... @overload - def view(self, dtype: _DTypeLike[_ScalarType]) -> NDArray[_ScalarType]: ... + def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[bool_]: ... @overload - def view(self, dtype: DTypeLike) -> NDArray[Any]: ... + def __lt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bool_]: ... @overload - def view( - self, - dtype: DTypeLike, - type: type[_NdArraySubClass], - ) -> _NdArraySubClass: ... - + def __lt__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[bool_]: ... @overload - def getfield( - self, - dtype: _DTypeLike[_ScalarType], - offset: SupportsIndex = ... - ) -> NDArray[_ScalarType]: ... + def __lt__(self: NDArray[object_], other: object, /) -> NDArray[bool_]: ... @overload - def getfield( - self, - dtype: DTypeLike, - offset: SupportsIndex = ... - ) -> NDArray[Any]: ... - - # Dispatch to the underlying `generic` via protocols - def __int__( - self: NDArray[SupportsInt], # type: ignore[type-var] - ) -> int: ... - - def __float__( - self: NDArray[SupportsFloat], # type: ignore[type-var] - ) -> float: ... - - def __complex__( - self: NDArray[SupportsComplex], # type: ignore[type-var] - ) -> complex: ... - - def __index__( - self: NDArray[SupportsIndex], # type: ignore[type-var] - ) -> int: ... - - def __len__(self) -> int: ... - def __setitem__(self, key, value): ... - def __iter__(self) -> Any: ... - def __contains__(self, key) -> builtins.bool: ... - - # The last overload is for catching recursive objects whose - # nesting is too deep. - # The first overload is for catching `bytes` (as they are a subtype of - # `Sequence[int]`) and `str`. As `str` is a recursive sequence of - # strings, it will pass through the final overload otherwise + def __lt__(self, other: _ArrayLikeObject_co, /) -> NDArray[bool_]: ... + # @overload - def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[np.bool]: ... - @overload - def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[np.bool]: ... + def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[bool_]: ... @overload - def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[np.bool]: ... + def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[bool_]: ... @overload - def __lt__(self: NDArray[object_], other: Any) -> NDArray[np.bool]: ... + def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[bool_]: ... @overload - def __lt__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[np.bool]: ... - + def __le__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bool_]: ... @overload - def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[np.bool]: ... + def __le__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[bool_]: ... @overload - def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[np.bool]: ... + def __le__(self: NDArray[object_], other: object, /) -> NDArray[bool_]: ... @overload - def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[np.bool]: ... + def __le__(self, other: _ArrayLikeObject_co, /) -> NDArray[bool_]: ... + + # @overload - def __le__(self: NDArray[object_], other: Any) -> NDArray[np.bool]: ... + def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[bool_]: ... @overload - def __le__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[np.bool]: ... - + def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[bool_]: ... @overload - def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[np.bool]: ... + def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[bool_]: ... @overload - def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[np.bool]: ... + def __gt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bool_]: ... @overload - def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[np.bool]: ... + def __gt__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[bool_]: ... @overload - def __gt__(self: NDArray[object_], other: Any) -> NDArray[np.bool]: ... + def __gt__(self: NDArray[object_], other: object, /) -> NDArray[bool_]: ... @overload - def __gt__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[np.bool]: ... + def __gt__(self, other: _ArrayLikeObject_co, /) -> NDArray[bool_]: ... + # + @overload + def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[bool_]: ... @overload - def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[np.bool]: ... + def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[bool_]: ... @overload - def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[np.bool]: ... + def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[bool_]: ... @overload - def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[np.bool]: ... + def __ge__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bool_]: ... @overload - def __ge__(self: NDArray[object_], other: Any) -> NDArray[np.bool]: ... + def __ge__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[bool_]: ... @overload - def __ge__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[np.bool]: ... + def __ge__(self: NDArray[object_], other: object, /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _ArrayLikeObject_co, /) -> NDArray[bool_]: ... # Unary ops + + # TODO: Uncomment once https://github.com/python/mypy/issues/14070 is fixed + # @overload + # def __abs__[ShapeT: _Shape](self: ndarray[ShapeT, dtypes.Complex64DType], /) -> ndarray[ShapeT, dtypes.Float32DType]: ... + # @overload + # def __abs__[ShapeT: _Shape](self: ndarray[ShapeT, dtypes.Complex128DType], /) -> ndarray[ShapeT, dtypes.Float64DType]: ... + # @overload + # def __abs__[ShapeT: _Shape](self: ndarray[ShapeT, dtypes.CLongDoubleDType], /) -> ndarray[ShapeT, dtypes.LongDoubleDType]: ... + # @overload + # def __abs__[ShapeT: _Shape](self: ndarray[ShapeT, dtype[complex128]], /) -> ndarray[ShapeT, dtype[float64]]: ... @overload - def __abs__(self: NDArray[_UnknownType]) -> NDArray[Any]: ... + def __abs__[ShapeT: _Shape, NBitT: NBitBase]( + self: ndarray[ShapeT, _dtype[complexfloating[NBitT]]], / + ) -> ndarray[ShapeT, _dtype[floating[NBitT]]]: ... @overload - def __abs__(self: NDArray[np.bool]) -> NDArray[np.bool]: ... + def __abs__[ArrayT: NDArray[bool_ | integer | floating | timedelta64 | object_]](self: ArrayT, /) -> ArrayT: ... + + def __invert__[ArrayT: NDArray[bool_ | integer | object_]](self: ArrayT, /) -> ArrayT: ... + def __neg__[ArrayT: _ArrayNumeric](self: ArrayT, /) -> ArrayT: ... + def __pos__[ArrayT: _ArrayNumeric](self: ArrayT, /) -> ArrayT: ... + + # Binary ops + + # TODO: Support the "1d @ 1d -> scalar" case @overload - def __abs__(self: NDArray[complexfloating[_NBit1, _NBit1]]) -> NDArray[floating[_NBit1]]: ... + def __matmul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __abs__(self: NDArray[_NumberType]) -> NDArray[_NumberType]: ... + def __matmul__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __abs__(self: NDArray[timedelta64]) -> NDArray[timedelta64]: ... + def __matmul__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload - def __abs__(self: NDArray[object_]) -> Any: ... - + def __matmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __invert__(self: NDArray[_UnknownType]) -> NDArray[Any]: ... + def __matmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __invert__(self: NDArray[np.bool]) -> NDArray[np.bool]: ... + def __matmul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __invert__(self: NDArray[_IntType]) -> NDArray[_IntType]: ... + def __matmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __invert__(self: NDArray[object_]) -> Any: ... - + def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __pos__(self: NDArray[_NumberType]) -> NDArray[_NumberType]: ... + def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __pos__(self: NDArray[timedelta64]) -> NDArray[timedelta64]: ... + def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __pos__(self: NDArray[object_]) -> Any: ... - + def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __neg__(self: NDArray[_NumberType]) -> NDArray[_NumberType]: ... + def __matmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload - def __neg__(self: NDArray[timedelta64]) -> NDArray[timedelta64]: ... + def __matmul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __neg__(self: NDArray[object_]) -> Any: ... + def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - # Binary ops + @overload # signature equivalent to __matmul__ + def __rmatmul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __matmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rmatmul__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rmatmul__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload - def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + def __rmatmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __matmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __matmul__(self: NDArray[object_], other: Any) -> Any: ... + def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - - @overload - def __rmatmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload - def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload - def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + def __mod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], other: int | bool_, / + ) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload - def __rmatmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __mod__[ScalarT: floating | integer](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rmatmul__(self: NDArray[object_], other: Any) -> Any: ... + def __mod__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - + def __mod__[ScalarT: floating | integer](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload - def __mod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __mod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __mod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __mod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[timedelta64]: ... + def __mod__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload - def __mod__(self: NDArray[object_], other: Any) -> Any: ... + def __mod__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __mod__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __mod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload # signature equivalent to __mod__ + def __rmod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], other: int | bool_, / + ) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload - def __rmod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rmod__[ScalarT: floating | integer](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __rmod__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmod__[ScalarT: floating | integer](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload - def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rmod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rmod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[timedelta64]: ... + def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __rmod__(self: NDArray[object_], other: Any) -> Any: ... + def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __rmod__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - + def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __rmod__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload - def __divmod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> _2Tuple[NDArray[Any]]: ... + def __rmod__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __divmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc] + def __rmod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload - def __divmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] + def __divmod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], rhs: int | bool_, / + ) -> _2Tuple[ndarray[_ShapeT_co, _dtype[ScalarT]]]: ... @overload - def __divmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc] + def __divmod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], rhs: _ArrayLikeBool_co, / + ) -> _2Tuple[NDArray[ScalarT]]: ... @overload - def __divmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] + def __divmod__(self: NDArray[bool_], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... @overload - def __divmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... - + def __divmod__[ScalarT: floating | integer]( + self: NDArray[bool_], rhs: _ArrayLike[ScalarT], / + ) -> _2Tuple[NDArray[ScalarT]]: ... @overload - def __rdivmod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> _2Tuple[NDArray[Any]]: ... + def __divmod__(self: NDArray[float64], rhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @overload - def __rdivmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc] + def __divmod__(self: _ArrayFloat64_co, rhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... @overload - def __rdivmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] + def __divmod__(self: _ArrayUInt_co, rhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... @overload - def __rdivmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc] + def __divmod__(self: _ArrayInt_co, rhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... @overload - def __rdivmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] + def __divmod__(self: _ArrayFloat_co, rhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ... @overload - def __rdivmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + def __divmod__(self: NDArray[timedelta64], rhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + @overload # signature equivalent to __divmod__ + def __rdivmod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], lhs: int | bool_, / + ) -> _2Tuple[ndarray[_ShapeT_co, _dtype[ScalarT]]]: ... @overload - def __add__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rdivmod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], lhs: _ArrayLikeBool_co, / + ) -> _2Tuple[NDArray[ScalarT]]: ... @overload - def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rdivmod__(self: NDArray[bool_], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... @overload - def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rdivmod__[ScalarT: floating | integer]( + self: NDArray[bool_], lhs: _ArrayLike[ScalarT], / + ) -> _2Tuple[NDArray[ScalarT]]: ... @overload - def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rdivmod__(self: NDArray[float64], lhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @overload - def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rdivmod__(self: _ArrayFloat64_co, lhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... @overload - def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __rdivmod__(self: _ArrayUInt_co, lhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... @overload - def __add__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __rdivmod__(self: _ArrayInt_co, lhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... @overload - def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] + def __rdivmod__(self: _ArrayFloat_co, lhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ... @overload - def __add__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ... + def __rdivmod__(self: NDArray[timedelta64], lhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + + # Keep in sync with `MaskedArray.__add__` @overload - def __add__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + def __add__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload - def __add__(self: NDArray[object_], other: Any) -> Any: ... + def __add__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __add__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - + def __add__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __radd__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __add__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload - def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __add__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __add__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __add__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __add__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __radd__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] + def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __radd__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ... + def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __radd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + def __add__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload - def __radd__(self: NDArray[object_], other: Any) -> Any: ... + def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload - def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - + def __add__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... + @overload + def __add__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... @overload - def __sub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __add__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bytes_]: ... @overload - def __sub__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NoReturn: ... + def __add__(self: NDArray[str_], other: _ArrayLikeStr_co, /) -> NDArray[str_]: ... @overload - def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __add__( + self: ndarray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> ndarray[tuple[Any, ...], dtypes.StringDType]: ... + @overload + def __add__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __add__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `MaskedArray.__radd__` + @overload # signature equivalent to __add__ + def __radd__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload - def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __radd__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __radd__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __sub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __radd__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload - def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] + def __radd__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __sub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + def __radd__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __sub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[timedelta64]: ... + def __radd__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __sub__(self: NDArray[object_], other: Any) -> Any: ... + def __radd__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - + def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __rsub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __rsub__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NoReturn: ... + def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __radd__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload - def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload - def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... @overload - def __rsub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __radd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... @overload - def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] + def __radd__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bytes_]: ... @overload - def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ... # type: ignore[misc] + def __radd__(self: NDArray[str_], other: _ArrayLikeStr_co, /) -> NDArray[str_]: ... @overload - def __rsub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[timedelta64]: ... + def __radd__( + self: ndarray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> ndarray[tuple[Any, ...], dtypes.StringDType]: ... @overload - def __rsub__(self: NDArray[object_], other: Any) -> Any: ... + def __radd__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__sub__` @overload - def __mul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __sub__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload - def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __sub__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __sub__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __sub__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload - def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __sub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __mul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __sub__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __mul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + def __sub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __mul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __mul__(self: NDArray[object_], other: Any) -> Any: ... + def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - + def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __rmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __sub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload - def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload - def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __sub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... @overload - def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __sub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[timedelta64]: ... @overload - def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __sub__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `MaskedArray.__rsub__` @overload - def __rmul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + def __rsub__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload - def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + def __rsub__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rmul__(self: NDArray[object_], other: Any) -> Any: ... + def __rsub__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - + def __rsub__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload - def __floordiv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rsub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __rsub__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rsub__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __floordiv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[int64]: ... + def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __floordiv__(self: NDArray[object_], other: Any) -> Any: ... + def __rsub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload - def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload - def __rfloordiv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... @overload - def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __rsub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[timedelta64]: ... @overload - def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rsub__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `MaskedArray.__mul__` @overload - def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __mul__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload - def __rfloordiv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[int64]: ... + def __mul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeTD64_co) -> NoReturn: ... + def __mul__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + def __mul__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload - def __rfloordiv__(self: NDArray[object_], other: Any) -> Any: ... + def __mul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - + def __mul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __pow__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __mul__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __mul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __pow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __mul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload - def __pow__(self: NDArray[object_], other: Any) -> Any: ... + def __mul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... @overload - def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - + def __mul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload - def __rpow__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __mul__( + self: ndarray[Any, _dtype[character] | dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> ndarray[tuple[Any, ...], _DTypeT_co]: ... + @overload + def __mul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `MaskedArray.__rmul__` + @overload # signature equivalent to __mul__ + def __rmul__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload - def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rmul__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rmul__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload - def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + def __rmul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __rpow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __rmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rpow__(self: NDArray[object_], other: Any) -> Any: ... + def __rmul__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - + def __rmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __truediv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __truediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> NDArray[float64]: ... # type: ignore[misc] + def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __truediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __truediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __truediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __rmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload - def __truediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[float64]: ... + def __rmul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... @overload - def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + def __rmul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload - def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + def __rmul__( + self: ndarray[Any, _dtype[character] | dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> ndarray[tuple[Any, ...], _DTypeT_co]: ... @overload - def __truediv__(self: NDArray[object_], other: Any) -> Any: ... + def __rmul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__truediv__` @overload - def __rtruediv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... - @overload - def __rtruediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> NDArray[float64]: ... # type: ignore[misc] + def __truediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __truediv__(self: _ArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rtruediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __truediv__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __rtruediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __truediv__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rtruediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[float64]: ... + def __truediv__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __rtruediv__(self: NDArray[np.bool], other: _ArrayLikeTD64_co) -> NoReturn: ... + def __truediv__(self: _ArrayFloat_co, other: _ArrayLike[floating], /) -> NDArray[floating]: ... @overload - def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + def __truediv__(self: NDArray[complexfloating], other: _ArrayLikeNumber_co, /) -> NDArray[complexfloating]: ... @overload - def __rtruediv__(self: NDArray[object_], other: Any) -> Any: ... + def __truediv__(self: _ArrayNumber_co, other: _ArrayLike[complexfloating], /) -> NDArray[complexfloating]: ... @overload - def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - + def __truediv__(self: NDArray[inexact], other: _ArrayLikeNumber_co, /) -> NDArray[inexact]: ... @overload - def __lshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __truediv__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload - def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __truediv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[float64]: ... @overload - def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... @overload - def __lshift__(self: NDArray[object_], other: Any) -> Any: ... + def __truediv__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__rtruediv__` @overload - def __rlshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rtruediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __rlshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __rtruediv__(self: _ArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rtruediv__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __rtruediv__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rlshift__(self: NDArray[object_], other: Any) -> Any: ... + def __rtruediv__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - + def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLike[floating], /) -> NDArray[floating]: ... @overload - def __rshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rtruediv__(self: NDArray[complexfloating], other: _ArrayLikeNumber_co, /) -> NDArray[complexfloating]: ... @overload - def __rshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __rtruediv__(self: _ArrayNumber_co, other: _ArrayLike[complexfloating], /) -> NDArray[complexfloating]: ... @overload - def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rtruediv__(self: NDArray[inexact], other: _ArrayLikeNumber_co, /) -> NDArray[inexact]: ... @overload - def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __rtruediv__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload - def __rshift__(self: NDArray[object_], other: Any) -> Any: ... + def __rtruediv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[float64]: ... @overload - def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rtruediv__(self: NDArray[integer | floating], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __rtruediv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__floordiv__` @overload - def __rrshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __floordiv__[ScalarT: integer | floating]( + self: NDArray[ScalarT], other: int | bool_, / + ) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload - def __rrshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __floordiv__[ScalarT: integer | floating](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __floordiv__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __floordiv__[ScalarT: integer | floating](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload - def __rrshift__(self: NDArray[object_], other: Any) -> Any: ... + def __floordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - + def __floordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __and__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __and__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[int64]: ... @overload - def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __and__(self: NDArray[object_], other: Any) -> Any: ... + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... @overload - def __and__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __floordiv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__rfloordiv__` @overload - def __rand__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rfloordiv__[ScalarT: integer | floating]( + self: NDArray[ScalarT], other: int | bool_, / + ) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload - def __rand__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rfloordiv__[ScalarT: integer | floating](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rfloordiv__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __rfloordiv__[ScalarT: integer | floating](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload - def __rand__(self: NDArray[object_], other: Any) -> Any: ... + def __rfloordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - + def __rfloordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __xor__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __xor__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __rfloordiv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[int64]: ... @overload - def __xor__(self: NDArray[object_], other: Any) -> Any: ... + def __rfloordiv__(self: NDArray[floating | integer], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload - def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rfloordiv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__pow__` @overload - def __rxor__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __pow__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, mod: None = None, /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload - def __rxor__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __pow__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[ScalarT]: ... @overload - def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __pow__(self: NDArray[bool_], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... @overload - def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __pow__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], mod: None = None, /) -> NDArray[ScalarT]: ... @overload - def __rxor__(self: NDArray[object_], other: Any) -> Any: ... + def __pow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... @overload - def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - + def __pow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> NDArray[float64]: ... @overload - def __or__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __pow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> NDArray[complex128]: ... @overload - def __or__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __pow__( + self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / + ) -> NDArray[complex128]: ... @overload - def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... @overload - def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... @overload - def __or__(self: NDArray[object_], other: Any) -> Any: ... + def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... @overload - def __or__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ... + @overload + def __pow__(self: NDArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> NDArray[number]: ... + @overload + def __pow__(self: NDArray[object_], other: Any, mod: None = None, /) -> Any: ... + @overload + def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... + # Keep in sync with `MaskedArray.__rpow__` @overload - def __ror__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rpow__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, mod: None = None, /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload - def __ror__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rpow__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[ScalarT]: ... @overload - def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rpow__(self: NDArray[bool_], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... @overload - def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __rpow__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], mod: None = None, /) -> NDArray[ScalarT]: ... @overload - def __ror__(self: NDArray[object_], other: Any) -> Any: ... + def __rpow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... @overload - def __ror__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... - - # `np.generic` does not support inplace operations - - # NOTE: Inplace ops generally use "same_kind" casting w.r.t. to the left - # operand. An exception to this rule are unsigned integers though, which - # also accepts a signed integer for the right operand as long it is a 0D - # object and its value is >= 0 + def __rpow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> NDArray[float64]: ... @overload - def __iadd__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rpow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> NDArray[complex128]: ... @overload - def __iadd__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... + def __rpow__( + self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / + ) -> NDArray[complex128]: ... @overload - def __iadd__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... @overload - def __iadd__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... @overload - def __iadd__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... @overload - def __iadd__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ... @overload - def __iadd__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + def __rpow__(self: NDArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> NDArray[number]: ... @overload - def __iadd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + def __rpow__(self: NDArray[object_], other: Any, mod: None = None, /) -> Any: ... @overload - def __iadd__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... @overload - def __isub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __lshift__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + @overload + def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... + @overload + def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __isub__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __lshift__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __isub__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload - def __isub__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + def __rlshift__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __isub__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __isub__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __isub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + def __rlshift__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __isub__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __imul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rshift__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + @overload + def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __imul__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... + def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __imul__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __rshift__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __imul__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload - def __imul__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + def __rrshift__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __imul__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __imul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __imul__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... - + def __rrshift__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __itruediv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload - def __itruediv__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + def __and__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __itruediv__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co) -> NDArray[timedelta64]: ... + def __and__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __itruediv__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __and__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __ifloordiv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rand__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... + @overload + def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __ifloordiv__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __ifloordiv__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __rand__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __ifloordiv__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload - def __ifloordiv__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __xor__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co) -> NDArray[timedelta64]: ... + def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __ifloordiv__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... - + def __xor__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __ipow__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload - def __ipow__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __rxor__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __ipow__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __ipow__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __ipow__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __rxor__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __ipow__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __imod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... - @overload - def __imod__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __or__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __imod__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __imod__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __imod__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[timedelta64]: ... + def __or__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __imod__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __or__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __ilshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __ror__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __ilshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __ilshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __ilshift__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __ror__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __ror__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # `np.generic` does not support inplace operations + + # NOTE: Inplace ops generally use "same_kind" casting w.r.t. to the left + # operand. An exception to this rule are unsigned integers though, which + # also accepts a signed integer for the right operand as long it is a 0D + # object and its value is >= 0 + # NOTE: Due to a mypy bug, overloading on e.g. `self: NDArray[SCT_floating]` won't + # work, as this will lead to `false negatives` when using these inplace ops. + # += + @overload # type: ignore[misc] + def __iadd__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... @overload - def __irshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __iadd__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __irshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __iadd__[ArrayT: NDArray[inexact]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __irshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __iadd__[ArrayT: NDArray[number]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __irshift__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... - + def __iadd__[ArrayT: NDArray[datetime64 | timedelta64]](self: ArrayT, other: _ArrayLikeTD64_co, /) -> ArrayT: ... + @overload + def __iadd__[ArrayT: NDArray[bytes_]](self: ArrayT, other: _ArrayLikeBytes_co, /) -> ArrayT: ... @overload - def __iand__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __iadd__[ArrayT: _ArrayString](self: ArrayT, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> ArrayT: ... @overload - def __iand__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... + def __iadd__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... + + # -= + @overload # type: ignore[misc] + def __isub__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... + @overload + def __isub__[ArrayT: NDArray[inexact]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __iand__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __isub__[ArrayT: NDArray[number]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __iand__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __isub__[ArrayT: NDArray[datetime64 | timedelta64]](self: ArrayT, other: _ArrayLikeTD64_co, /) -> ArrayT: ... @overload - def __iand__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __isub__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... + # *= + @overload # type: ignore[misc] + def __imul__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... @overload - def __ixor__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __imul__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __ixor__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... + def __imul__[ArrayT: NDArray[inexact | timedelta64]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __ixor__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __imul__[ArrayT: NDArray[number | character]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __ixor__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __imul__[ArrayT: _ArrayString](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __ixor__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __imul__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... + # @= + @overload # type: ignore[misc] + def __imatmul__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... + @overload + def __imatmul__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... + @overload + def __imatmul__[ArrayT: NDArray[inexact]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __ior__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __imatmul__[ArrayT: NDArray[number]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __ior__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... + def __imatmul__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... + + # **= + @overload # type: ignore[misc] + def __ipow__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __ior__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __ipow__[ArrayT: NDArray[inexact]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __ior__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __ipow__[ArrayT: NDArray[number]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __ior__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __ipow__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... + # /= + @overload # type: ignore[misc] + def __itruediv__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __imatmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __itruediv__[ArrayT: NDArray[inexact | timedelta64]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __imatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... + def __itruediv__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... + + # //= + # keep in sync with `__imod__` + @overload # type: ignore[misc] + def __ifloordiv__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __imatmul__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __ifloordiv__[ArrayT: NDArray[floating | timedelta64]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __imatmul__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __ifloordiv__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... + + # %= + # keep in sync with `__ifloordiv__` + @overload # type: ignore[misc] + def __imod__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __imatmul__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + def __imod__[ArrayT: NDArray[floating]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __imatmul__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __imod__[ArrayT: NDArray[timedelta64]](self: ArrayT, other: _ArrayLike[timedelta64], /) -> ArrayT: ... @overload - def __imatmul__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __imod__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... - def __dlpack__(self: NDArray[number[Any]], *, stream: None = ...) -> _PyCapsule: ... - def __dlpack_device__(self) -> tuple[int, L[0]]: ... + # <<= + # keep in sync with `__irshift__` + @overload # type: ignore[misc] + def __ilshift__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... + @overload + def __ilshift__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... - def __array_namespace__(self, *, api_version: str | None = ...) -> Any: ... + # >>= + # keep in sync with `__ilshift__` + @overload # type: ignore[misc] + def __irshift__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... + @overload + def __irshift__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... - def to_device(self, device: L["cpu"], /, *, stream: None | int | Any = ...) -> NDArray[Any]: ... + # &= + # keep in sync with `__ixor__` and `__ior__` + @overload # type: ignore[misc] + def __iand__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... + @overload + def __iand__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... + @overload + def __iand__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... - @property - def device(self) -> L["cpu"]: ... + # ^= + # keep in sync with `__iand__` and `__ior__` + @overload # type: ignore[misc] + def __ixor__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... + @overload + def __ixor__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... + @overload + def __ixor__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... - def bitwise_count( - self, - out: None | NDArray[Any] = ..., + # |= + # keep in sync with `__iand__` and `__ixor__` + @overload # type: ignore[misc] + def __ior__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... + @overload + def __ior__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... + @overload + def __ior__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... + + # + def __dlpack__( + self: NDArray[number], + /, *, - where: _ArrayLikeBool_co = ..., - casting: _CastingKind = ..., - order: _OrderKACF = ..., - dtype: DTypeLike = ..., - subok: builtins.bool = ..., - ) -> NDArray[Any]: ... + stream: int | Any | None = None, + max_version: tuple[int, int] | None = None, + dl_device: tuple[int, int] | None = None, + copy: py_bool | None = None, + ) -> CapsuleType: ... + def __dlpack_device__(self, /) -> tuple[L[1], L[0]]: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @property - def dtype(self) -> _DType_co: ... + def dtype(self) -> _DTypeT_co: ... # NOTE: while `np.generic` is not technically an instance of `ABCMeta`, # the `@abstractmethod` decorator is herein used to (forcefully) deny # the creation of `np.generic` instances. # The `# type: ignore` comments are necessary to silence mypy errors regarding # the missing `ABCMeta` metaclass. - # See https://github.com/numpy/numpy-stubs/pull/80 for more details. +class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): + @abstractmethod + def __new__(cls, /, *args: Any, **kwargs: Any) -> Self: ... -_ScalarType = TypeVar("_ScalarType", bound=generic) -_NBit1 = TypeVar("_NBit1", bound=NBitBase) -_NBit2 = TypeVar("_NBit2", bound=NBitBase) + # NOTE: Technically this doesn't exist at runtime, but it is unlikely to lead to + # type-unsafe situations (the abstract scalar types cannot be instantiated + # themselves) and is convenient to have, so we include it regardless. See + # https://github.com/numpy/numpy/issues/30445 for use-cases and discussion. + def __hash__(self, /) -> int: ... + + def __buffer__(self, flags: int, /) -> memoryview: ... -class generic(_ArrayOrScalarCommon): - @abstractmethod - def __init__(self, *args: Any, **kwargs: Any) -> None: ... @overload - def __array__(self: _ScalarType, dtype: None = ..., /) -> NDArray[_ScalarType]: ... + def __array__(self, dtype: None = None, /) -> ndarray[tuple[()], _dtype[Self]]: ... @overload - def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ... - def __hash__(self) -> int: ... - @property - def base(self) -> None: ... - @property - def ndim(self) -> L[0]: ... - @property - def size(self) -> L[1]: ... - @property - def shape(self) -> tuple[()]: ... + def __array__[DTypeT: _dtype](self, dtype: DTypeT, /) -> ndarray[tuple[()], DTypeT]: ... + + # + @overload + def __getitem__(self, key: tuple[()], /) -> Self: ... + @overload + def __getitem__( + self, key: EllipsisType | tuple[EllipsisType], / + ) -> ndarray[tuple[()], _dtype[Self]]: ... + @overload + def __getitem__( + self, key: None | tuple[None], / + ) -> ndarray[tuple[int], _dtype[Self]]: ... + @overload + def __getitem__( + self, key: tuple[None, None], / + ) -> ndarray[tuple[int, int], _dtype[Self]]: ... + @overload + def __getitem__( + self, key: tuple[None, None, None], / + ) -> ndarray[tuple[int, int, int], _dtype[Self]]: ... + @overload # Limited support for (None,) * N > 3 + def __getitem__(self, key: tuple[None, ...], /) -> NDArray[Self]: ... + + # + @overload + def __array_wrap__[ShapeT: _Shape, DTypeT: _dtype]( + self, + array: ndarray[ShapeT, DTypeT], + context: tuple[ufunc, tuple[object, ...], int] | None, + return_scalar: L[False], + /, + ) -> ndarray[ShapeT, DTypeT]: ... + @overload + def __array_wrap__[ScalarT: generic]( + self, + array: ndarray[tuple[()], _dtype[ScalarT]], + context: tuple[ufunc, tuple[object, ...], int] | None = None, + return_scalar: L[True] = True, + /, + ) -> ScalarT: ... + @overload + def __array_wrap__[ShapeT: tuple[int, *tuple[int, ...]], DTypeT: _dtype]( + self, + array: ndarray[ShapeT, DTypeT], + context: tuple[ufunc, tuple[object, ...], int] | None = None, + return_scalar: L[True] = True, + /, + ) -> ndarray[ShapeT, DTypeT]: ... + @overload + def __array_wrap__[ShapeT: _Shape, ScalarT: generic]( + self, + array: ndarray[ShapeT, _dtype[ScalarT]], + context: tuple[ufunc, tuple[object, ...], int] | None = None, + return_scalar: L[True] = True, + /, + ) -> ScalarT | ndarray[ShapeT, _dtype[ScalarT]]: ... + + @property + def base(self) -> None: ... + @property + def ndim(self) -> L[0]: ... + @property + def size(self) -> L[1]: ... + @property + def shape(self) -> tuple[()]: ... @property def strides(self) -> tuple[()]: ... - def byteswap(self: _ScalarType, inplace: L[False] = ...) -> _ScalarType: ... @property - def flat(self: _ScalarType) -> flatiter[NDArray[_ScalarType]]: ... + def flat(self) -> flatiter[ndarray[tuple[int], _dtype[Self]]]: ... + + @overload + def item(self, /) -> _ItemT_co: ... + @overload + def item(self, arg0: L[0, -1] | tuple[L[0, -1]] | tuple[()] = ..., /) -> _ItemT_co: ... + @override + def tolist(self, /) -> _ItemT_co: ... + + # NOTE: these technically exist, but will always raise when called + def trace( # type: ignore[misc] + self: Never, + /, + offset: L[0] = 0, + axis1: L[0] = 0, + axis2: L[1] = 1, + dtype: None = None, + out: None = None, + ) -> Never: ... + def diagonal(self: Never, /, offset: L[0] = 0, axis1: L[0] = 0, axis2: L[1] = 1) -> Never: ... # type: ignore[misc] + def swapaxes(self: Never, axis1: Never, axis2: Never, /) -> Never: ... # type: ignore[misc] + def sort(self: Never, /, axis: L[-1] = -1, kind: None = None, order: None = None, *, stable: None = None) -> Never: ... # type: ignore[misc] + def nonzero(self: Never, /) -> Never: ... # type: ignore[misc] + def setfield(self: Never, val: Never, /, dtype: Never, offset: L[0] = 0) -> None: ... # type: ignore[misc] + def searchsorted(self: Never, v: Never, /, side: L["left"] = "left", sorter: None = None) -> Never: ... # type: ignore[misc] + + # NOTE: this wont't raise, but won't do anything either + @overload + @deprecated("Resizing a NumPy generic inplace has been deprecated in NumPy 2.5") + def resize(self, /, *, refcheck: py_bool = True) -> None: ... + @overload + @deprecated("Resizing a NumPy generic inplace has been deprecated in NumPy 2.5") + def resize(self, new_shape: L[0, -1] | tuple[L[0, -1]] | tuple[()], /, *, refcheck: py_bool = True) -> None: ... - if sys.version_info >= (3, 12): - def __buffer__(self, flags: int, /) -> memoryview: ... + # + def byteswap(self, /, inplace: L[False] = False) -> Self: ... + # @overload - def astype( + def astype[ScalarT: generic]( self, - dtype: _DTypeLike[_ScalarType], - order: _OrderKACF = ..., - casting: _CastingKind = ..., - subok: builtins.bool = ..., - copy: builtins.bool | _CopyMode = ..., - ) -> _ScalarType: ... + /, + dtype: _DTypeLike[ScalarT], + order: _OrderKACF = "K", + casting: _CastingKind = "unsafe", + subok: py_bool = True, + copy: py_bool | _CopyMode = True, + ) -> ScalarT: ... @overload def astype( self, - dtype: DTypeLike, - order: _OrderKACF = ..., - casting: _CastingKind = ..., - subok: builtins.bool = ..., - copy: builtins.bool | _CopyMode = ..., - ) -> Any: ... + /, + dtype: DTypeLike | None, + order: _OrderKACF = "K", + casting: _CastingKind = "unsafe", + subok: py_bool = True, + copy: py_bool | _CopyMode = True, + ) -> Incomplete: ... # NOTE: `view` will perform a 0D->scalar cast, # thus the array `type` is irrelevant to the output type @overload - def view( - self: _ScalarType, - type: type[NDArray[Any]] = ..., - ) -> _ScalarType: ... + def view(self, type: type[ndarray] = ...) -> Self: ... @overload - def view( - self, - dtype: _DTypeLike[_ScalarType], - type: type[NDArray[Any]] = ..., - ) -> _ScalarType: ... + def view[ScalarT: generic](self, /, dtype: _DTypeLike[ScalarT], type: type[ndarray] = ...) -> ScalarT: ... @overload - def view( - self, - dtype: DTypeLike, - type: type[NDArray[Any]] = ..., - ) -> Any: ... + def view(self, /, dtype: DTypeLike, type: type[ndarray] = ...) -> Incomplete: ... @overload - def getfield( - self, - dtype: _DTypeLike[_ScalarType], - offset: SupportsIndex = ... - ) -> _ScalarType: ... + def getfield[ScalarT: generic](self, /, dtype: _DTypeLike[ScalarT], offset: SupportsIndex = 0) -> ScalarT: ... @overload - def getfield( - self, - dtype: DTypeLike, - offset: SupportsIndex = ... - ) -> Any: ... - - def item( - self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, - ) -> Any: ... + def getfield(self, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> Incomplete: ... @overload - def take( # type: ignore[misc] - self: _ScalarType, + def take( + self, indices: _IntLike_co, - axis: None | SupportsIndex = ..., - out: None = ..., - mode: _ModeKind = ..., - ) -> _ScalarType: ... + /, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", + ) -> Self: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + /, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", + ) -> NDArray[Self]: ... @overload - def take( # type: ignore[misc] - self: _ScalarType, + def take[ArrayT: ndarray]( + self, indices: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - out: None = ..., - mode: _ModeKind = ..., - ) -> NDArray[_ScalarType]: ... + /, + axis: SupportsIndex | None = None, + *, + out: ArrayT, + mode: _ModeKind = "raise", + ) -> ArrayT: ... @overload - def take( + def take[ArrayT: ndarray]( self, indices: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - out: _NdArraySubClass = ..., - mode: _ModeKind = ..., - ) -> _NdArraySubClass: ... + /, + axis: SupportsIndex | None, + out: ArrayT, + mode: _ModeKind = "raise", + ) -> ArrayT: ... - def repeat( - self: _ScalarType, - repeats: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - ) -> NDArray[_ScalarType]: ... + def repeat(self, repeats: _ArrayLikeInt_co, /, axis: SupportsIndex | None = None) -> ndarray[tuple[int], _dtype[Self]]: ... + def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _dtype[Self]]: ... + def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _dtype[Self]]: ... - def flatten( - self: _ScalarType, - order: _OrderKACF = ..., - ) -> NDArray[_ScalarType]: ... + @overload # (()) + def reshape( + self, + shape: tuple[()] | list[Never], + /, + *, + order: _OrderACF = "C", + copy: py_bool | None = None, + ) -> Self: ... + @overload # (ShapeT: (index, ...)) + def reshape[ShapeT: tuple[int, *tuple[int, ...]]]( + self, + shape: ShapeT, + /, + *, + order: _OrderACF = "C", + copy: py_bool | None = None, + ) -> ndarray[ShapeT, _dtype[Self]]: ... + @overload # (Sequence[index, ...]) # not recommended + def reshape( + self, + shape: Sequence[SupportsIndex], + /, + *, + order: _OrderACF = "C", + copy: py_bool | None = None, + ) -> NDArray[Self] | Any: ... + @overload # _(index) + def reshape( + self, + size1: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: py_bool | None = None, + ) -> ndarray[tuple[int], _dtype[Self]]: ... + @overload # _(index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: py_bool | None = None, + ) -> ndarray[tuple[int, int], _dtype[Self]]: ... + @overload # _(index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: py_bool | None = None, + ) -> ndarray[tuple[int, int, int], _dtype[Self]]: ... + @overload # _(index, index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + size4: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: py_bool | None = None, + ) -> ndarray[tuple[int, int, int, int], _dtype[Self]]: ... + @overload # _(index, index, index, index, index, *index) # ndim >= 5 + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + size4: SupportsIndex, + size5: SupportsIndex, + /, + *sizes6_: SupportsIndex, + order: _OrderACF = "C", + copy: py_bool | None = None, + ) -> ndarray[tuple[int, int, int, int, int, *tuple[int, ...]], _dtype[Self]]: ... - def ravel( - self: _ScalarType, - order: _OrderKACF = ..., - ) -> NDArray[_ScalarType]: ... + def squeeze(self, axis: L[0] | tuple[()] | None = ...) -> Self: ... + def transpose(self, axes: tuple[()] | None = ..., /) -> Self: ... @overload - def reshape( - self: _ScalarType, shape: _ShapeLike, /, *, order: _OrderACF = ... - ) -> NDArray[_ScalarType]: ... + def all( + self, + /, + axis: L[0, -1] | tuple[()] | None = None, + out: None = None, + keepdims: SupportsIndex = False, + *, + where: py_bool | bool_ | ndarray[tuple[()], _dtype[bool_]] = True + ) -> bool_: ... @overload - def reshape( - self: _ScalarType, *shape: SupportsIndex, order: _OrderACF = ... - ) -> NDArray[_ScalarType]: ... + def all[ScalarT: generic]( + self, + /, + axis: L[0, -1] | tuple[()] | None, + out: ndarray[tuple[()], _dtype[ScalarT]], + keepdims: SupportsIndex = False, + *, + where: py_bool | bool_ | ndarray[tuple[()], _dtype[bool_]] = True, + ) -> ScalarT: ... + @overload + def all[ScalarT: generic]( + self, + /, + axis: L[0, -1] | tuple[()] | None = None, + *, + out: ndarray[tuple[()], _dtype[ScalarT]], + keepdims: SupportsIndex = False, + where: py_bool | bool_ | ndarray[tuple[()], _dtype[bool_]] = True, + ) -> ScalarT: ... - def bitwise_count( + @overload + def any( self, - out: None | NDArray[Any] = ..., + /, + axis: L[0, -1] | tuple[()] | None = None, + out: None = None, + keepdims: SupportsIndex = False, *, - where: _ArrayLikeBool_co = ..., - casting: _CastingKind = ..., - order: _OrderKACF = ..., - dtype: DTypeLike = ..., - subok: builtins.bool = ..., - ) -> Any: ... + where: py_bool | bool_ | ndarray[tuple[()], _dtype[bool_]] = True + ) -> bool_: ... + @overload + def any[ScalarT: generic]( + self, + /, + axis: L[0, -1] | tuple[()] | None, + out: ndarray[tuple[()], _dtype[ScalarT]], + keepdims: SupportsIndex = False, + *, + where: py_bool | bool_ | ndarray[tuple[()], _dtype[bool_]] = True, + ) -> ScalarT: ... + @overload + def any[ScalarT: generic]( + self, + /, + axis: L[0, -1] | tuple[()] | None = None, + *, + out: ndarray[tuple[()], _dtype[ScalarT]], + keepdims: SupportsIndex = False, + where: py_bool | bool_ | ndarray[tuple[()], _dtype[bool_]] = True, + ) -> ScalarT: ... - def squeeze( - self: _ScalarType, axis: None | L[0] | tuple[()] = ... - ) -> _ScalarType: ... - def transpose(self: _ScalarType, axes: None | tuple[()] = ..., /) -> _ScalarType: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @property - def dtype(self: _ScalarType) -> _dtype[_ScalarType]: ... - -class number(generic, Generic[_NBit1]): # type: ignore - @property - def real(self: _ArraySelf) -> _ArraySelf: ... - @property - def imag(self: _ArraySelf) -> _ArraySelf: ... - def __class_getitem__(self, item: Any) -> GenericAlias: ... - def __int__(self) -> int: ... - def __float__(self) -> float: ... - def __complex__(self) -> complex: ... - def __neg__(self: _ArraySelf) -> _ArraySelf: ... - def __pos__(self: _ArraySelf) -> _ArraySelf: ... - def __abs__(self: _ArraySelf) -> _ArraySelf: ... - # Ensure that objects annotated as `number` support arithmetic operations - __add__: _NumberOp - __radd__: _NumberOp - __sub__: _NumberOp - __rsub__: _NumberOp - __mul__: _NumberOp - __rmul__: _NumberOp - __floordiv__: _NumberOp - __rfloordiv__: _NumberOp - __pow__: _NumberOp - __rpow__: _NumberOp - __truediv__: _NumberOp - __rtruediv__: _NumberOp - __lt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] - __le__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] - __gt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] - __ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] - -class bool(generic): - def __init__(self, value: object = ..., /) -> None: ... - def item( - self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, - ) -> builtins.bool: ... - def tolist(self) -> builtins.bool: ... - @property - def real(self: _ArraySelf) -> _ArraySelf: ... - @property - def imag(self: _ArraySelf) -> _ArraySelf: ... - def __int__(self) -> int: ... - def __float__(self) -> float: ... - def __complex__(self) -> complex: ... - def __abs__(self: _ArraySelf) -> _ArraySelf: ... - __add__: _BoolOp[np.bool] - __radd__: _BoolOp[np.bool] - __sub__: _BoolSub - __rsub__: _BoolSub - __mul__: _BoolOp[np.bool] - __rmul__: _BoolOp[np.bool] - __floordiv__: _BoolOp[int8] - __rfloordiv__: _BoolOp[int8] - __pow__: _BoolOp[int8] - __rpow__: _BoolOp[int8] - __truediv__: _BoolTrueDiv - __rtruediv__: _BoolTrueDiv - def __invert__(self) -> np.bool: ... - __lshift__: _BoolBitOp[int8] - __rlshift__: _BoolBitOp[int8] - __rshift__: _BoolBitOp[int8] - __rrshift__: _BoolBitOp[int8] - __and__: _BoolBitOp[np.bool] - __rand__: _BoolBitOp[np.bool] - __xor__: _BoolBitOp[np.bool] - __rxor__: _BoolBitOp[np.bool] - __or__: _BoolBitOp[np.bool] - __ror__: _BoolBitOp[np.bool] - __mod__: _BoolMod - __rmod__: _BoolMod - __divmod__: _BoolDivMod - __rdivmod__: _BoolDivMod - __lt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] - __le__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] - __gt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] - __ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] - -bool_ = bool + def dtype(self) -> _dtype[Self]: ... -class object_(generic): - def __init__(self, value: object = ..., /) -> None: ... - @property - def real(self: _ArraySelf) -> _ArraySelf: ... - @property - def imag(self: _ArraySelf) -> _ArraySelf: ... - # The 3 protocols below may or may not raise, - # depending on the underlying object - def __int__(self) -> int: ... - def __float__(self) -> float: ... - def __complex__(self) -> complex: ... +class number(generic[_NumberItemT_co], Generic[_NBitT, _NumberItemT_co]): + @abstractmethod # `SupportsIndex | str | bytes` equivs `_ConvertibleToInt & _ConvertibleToFloat` + def __new__(cls, value: SupportsIndex | str | bytes = 0, /) -> Self: ... + def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... - if sys.version_info >= (3, 12): - def __release_buffer__(self, buffer: memoryview, /) -> None: ... + def __neg__(self) -> Self: ... + def __pos__(self) -> Self: ... + def __abs__(self) -> Self: ... -# The `datetime64` constructors requires an object with the three attributes below, -# and thus supports datetime duck typing -class _DatetimeScalar(Protocol): - @property - def day(self) -> int: ... - @property - def month(self) -> int: ... - @property - def year(self) -> int: ... + def __add__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __radd__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __sub__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rsub__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __mul__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rmul__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __pow__(self, other: _NumberLike_co, mod: None = None, /) -> Incomplete: ... + def __rpow__(self, other: _NumberLike_co, mod: None = None, /) -> Incomplete: ... + def __truediv__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rtruediv__(self, other: _NumberLike_co, /) -> Incomplete: ... -# TODO: `item`/`tolist` returns either `dt.date`, `dt.datetime` or `int` -# depending on the unit -class datetime64(generic): @overload - def __init__( - self, - value: None | datetime64 | _CharLike_co | _DatetimeScalar = ..., - format: _CharLike_co | tuple[_CharLike_co, _IntLike_co] = ..., - /, - ) -> None: ... + def __lt__(self, other: _NumberLike_co, /) -> bool_: ... @overload - def __init__( - self, - value: int, - format: _CharLike_co | tuple[_CharLike_co, _IntLike_co], - /, - ) -> None: ... - def __add__(self, other: _TD64Like_co) -> datetime64: ... - def __radd__(self, other: _TD64Like_co) -> datetime64: ... - @overload - def __sub__(self, other: datetime64) -> timedelta64: ... - @overload - def __sub__(self, other: _TD64Like_co) -> datetime64: ... - def __rsub__(self, other: datetime64) -> timedelta64: ... - __lt__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] - __le__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] - __gt__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] - __ge__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] - -_IntValue = SupportsInt | _CharLike_co | SupportsIndex -_FloatValue = None | _CharLike_co | SupportsFloat | SupportsIndex -_ComplexValue = ( - None - | _CharLike_co - | SupportsFloat - | SupportsComplex - | SupportsIndex - | complex # `complex` is not a subtype of `SupportsComplex` -) + def __lt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... -class integer(number[_NBit1]): # type: ignore - @property - def numerator(self: _ScalarType) -> _ScalarType: ... - @property - def denominator(self) -> L[1]: ... @overload - def __round__(self, ndigits: None = ...) -> int: ... + def __le__(self, other: _NumberLike_co, /) -> bool_: ... @overload - def __round__(self: _ScalarType, ndigits: SupportsIndex) -> _ScalarType: ... - - # NOTE: `__index__` is technically defined in the bottom-most - # sub-classes (`int64`, `uint32`, etc) - def item( - self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, - ) -> int: ... - def tolist(self) -> int: ... - def is_integer(self) -> L[True]: ... - def bit_count(self: _ScalarType) -> int: ... - def __index__(self) -> int: ... - __truediv__: _IntTrueDiv[_NBit1] - __rtruediv__: _IntTrueDiv[_NBit1] - def __mod__(self, value: _IntLike_co) -> integer[Any]: ... - def __rmod__(self, value: _IntLike_co) -> integer[Any]: ... - def __invert__(self: _IntType) -> _IntType: ... - # Ensure that objects annotated as `integer` support bit-wise operations - def __lshift__(self, other: _IntLike_co) -> integer[Any]: ... - def __rlshift__(self, other: _IntLike_co) -> integer[Any]: ... - def __rshift__(self, other: _IntLike_co) -> integer[Any]: ... - def __rrshift__(self, other: _IntLike_co) -> integer[Any]: ... - def __and__(self, other: _IntLike_co) -> integer[Any]: ... - def __rand__(self, other: _IntLike_co) -> integer[Any]: ... - def __or__(self, other: _IntLike_co) -> integer[Any]: ... - def __ror__(self, other: _IntLike_co) -> integer[Any]: ... - def __xor__(self, other: _IntLike_co) -> integer[Any]: ... - def __rxor__(self, other: _IntLike_co) -> integer[Any]: ... - -class signedinteger(integer[_NBit1]): - def __init__(self, value: _IntValue = ..., /) -> None: ... - __add__: _SignedIntOp[_NBit1] - __radd__: _SignedIntOp[_NBit1] - __sub__: _SignedIntOp[_NBit1] - __rsub__: _SignedIntOp[_NBit1] - __mul__: _SignedIntOp[_NBit1] - __rmul__: _SignedIntOp[_NBit1] - __floordiv__: _SignedIntOp[_NBit1] - __rfloordiv__: _SignedIntOp[_NBit1] - __pow__: _SignedIntOp[_NBit1] - __rpow__: _SignedIntOp[_NBit1] - __lshift__: _SignedIntBitOp[_NBit1] - __rlshift__: _SignedIntBitOp[_NBit1] - __rshift__: _SignedIntBitOp[_NBit1] - __rrshift__: _SignedIntBitOp[_NBit1] - __and__: _SignedIntBitOp[_NBit1] - __rand__: _SignedIntBitOp[_NBit1] - __xor__: _SignedIntBitOp[_NBit1] - __rxor__: _SignedIntBitOp[_NBit1] - __or__: _SignedIntBitOp[_NBit1] - __ror__: _SignedIntBitOp[_NBit1] - __mod__: _SignedIntMod[_NBit1] - __rmod__: _SignedIntMod[_NBit1] - __divmod__: _SignedIntDivMod[_NBit1] - __rdivmod__: _SignedIntDivMod[_NBit1] + def __le__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGE, /) -> bool_: ... -int8 = signedinteger[_8Bit] -int16 = signedinteger[_16Bit] -int32 = signedinteger[_32Bit] -int64 = signedinteger[_64Bit] + @overload + def __gt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsLT, /) -> bool_: ... -byte = signedinteger[_NBitByte] -short = signedinteger[_NBitShort] -intc = signedinteger[_NBitIntC] -intp = signedinteger[_NBitIntP] -int_ = intp -long = signedinteger[_NBitLong] -longlong = signedinteger[_NBitLongLong] + @overload + def __ge__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsLE, /) -> bool_: ... -# TODO: `item`/`tolist` returns either `dt.timedelta` or `int` -# depending on the unit -class timedelta64(generic): - def __init__( - self, - value: None | int | _CharLike_co | dt.timedelta | timedelta64 = ..., - format: _CharLike_co | tuple[_CharLike_co, _IntLike_co] = ..., - /, - ) -> None: ... +class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @property - def numerator(self: _ScalarType) -> _ScalarType: ... + def itemsize(self) -> L[1]: ... @property - def denominator(self) -> L[1]: ... + def nbytes(self) -> L[1]: ... + @property + def real(self) -> Self: ... + @property + def imag(self) -> bool_[L[False]]: ... - # NOTE: Only a limited number of units support conversion - # to builtin scalar types: `Y`, `M`, `ns`, `ps`, `fs`, `as` - def __int__(self) -> int: ... - def __float__(self) -> float: ... - def __complex__(self) -> complex: ... - def __neg__(self: _ArraySelf) -> _ArraySelf: ... - def __pos__(self: _ArraySelf) -> _ArraySelf: ... - def __abs__(self: _ArraySelf) -> _ArraySelf: ... - def __add__(self, other: _TD64Like_co) -> timedelta64: ... - def __radd__(self, other: _TD64Like_co) -> timedelta64: ... - def __sub__(self, other: _TD64Like_co) -> timedelta64: ... - def __rsub__(self, other: _TD64Like_co) -> timedelta64: ... - def __mul__(self, other: _FloatLike_co) -> timedelta64: ... - def __rmul__(self, other: _FloatLike_co) -> timedelta64: ... - __truediv__: _TD64Div[float64] - __floordiv__: _TD64Div[int64] - def __rtruediv__(self, other: timedelta64) -> float64: ... - def __rfloordiv__(self, other: timedelta64) -> int64: ... - def __mod__(self, other: timedelta64) -> timedelta64: ... - def __rmod__(self, other: timedelta64) -> timedelta64: ... - def __divmod__(self, other: timedelta64) -> tuple[int64, timedelta64]: ... - def __rdivmod__(self, other: timedelta64) -> tuple[int64, timedelta64]: ... - __lt__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] - __le__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] - __gt__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] - __ge__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] - -class unsignedinteger(integer[_NBit1]): - # NOTE: `uint64 + signedinteger -> float64` - def __init__(self, value: _IntValue = ..., /) -> None: ... - __add__: _UnsignedIntOp[_NBit1] - __radd__: _UnsignedIntOp[_NBit1] - __sub__: _UnsignedIntOp[_NBit1] - __rsub__: _UnsignedIntOp[_NBit1] - __mul__: _UnsignedIntOp[_NBit1] - __rmul__: _UnsignedIntOp[_NBit1] - __floordiv__: _UnsignedIntOp[_NBit1] - __rfloordiv__: _UnsignedIntOp[_NBit1] - __pow__: _UnsignedIntOp[_NBit1] - __rpow__: _UnsignedIntOp[_NBit1] - __lshift__: _UnsignedIntBitOp[_NBit1] - __rlshift__: _UnsignedIntBitOp[_NBit1] - __rshift__: _UnsignedIntBitOp[_NBit1] - __rrshift__: _UnsignedIntBitOp[_NBit1] - __and__: _UnsignedIntBitOp[_NBit1] - __rand__: _UnsignedIntBitOp[_NBit1] - __xor__: _UnsignedIntBitOp[_NBit1] - __rxor__: _UnsignedIntBitOp[_NBit1] - __or__: _UnsignedIntBitOp[_NBit1] - __ror__: _UnsignedIntBitOp[_NBit1] - __mod__: _UnsignedIntMod[_NBit1] - __rmod__: _UnsignedIntMod[_NBit1] - __divmod__: _UnsignedIntDivMod[_NBit1] - __rdivmod__: _UnsignedIntDivMod[_NBit1] + @overload # mypy bug workaround: https://github.com/numpy/numpy/issues/29245 + def __new__(cls, value: Never, /) -> bool_[py_bool]: ... + @overload + def __new__(cls, value: _Falsy = ..., /) -> bool_[L[False]]: ... + @overload + def __new__(cls, value: _Truthy, /) -> bool_[L[True]]: ... + @overload + def __new__(cls, value: object, /) -> bool_[py_bool]: ... -uint8 = unsignedinteger[_8Bit] -uint16 = unsignedinteger[_16Bit] -uint32 = unsignedinteger[_32Bit] -uint64 = unsignedinteger[_64Bit] + def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... -ubyte = unsignedinteger[_NBitByte] -ushort = unsignedinteger[_NBitShort] -uintc = unsignedinteger[_NBitIntC] -uintp = unsignedinteger[_NBitIntP] -uint = uintp -ulong = unsignedinteger[_NBitLong] -ulonglong = unsignedinteger[_NBitLongLong] + def __bool__(self, /) -> _BoolItemT_co: ... -class inexact(number[_NBit1]): # type: ignore - def __getnewargs__(self: inexact[_64Bit]) -> tuple[float, ...]: ... + @overload + def __int__(self: bool_[L[False]], /) -> L[0]: ... + @overload + def __int__(self: bool_[L[True]], /) -> L[1]: ... + @overload + def __int__(self, /) -> L[0, 1]: ... -_IntType = TypeVar("_IntType", bound=integer[Any]) -_FloatType = TypeVar('_FloatType', bound=floating[Any]) + def __abs__(self) -> Self: ... -class floating(inexact[_NBit1]): - def __init__(self, value: _FloatValue = ..., /) -> None: ... - def item( - self, args: L[0] | tuple[()] | tuple[L[0]] = ..., - /, - ) -> float: ... - def tolist(self) -> float: ... - def is_integer(self) -> builtins.bool: ... - def hex(self: float64) -> str: ... - @classmethod - def fromhex(cls: type[float64], string: str, /) -> float64: ... - def as_integer_ratio(self) -> tuple[int, int]: ... - def __ceil__(self: float64) -> int: ... - def __floor__(self: float64) -> int: ... - def __trunc__(self: float64) -> int: ... - def __getnewargs__(self: float64) -> tuple[float]: ... - def __getformat__(self: float64, typestr: L["double", "float"], /) -> str: ... - @overload - def __round__(self, ndigits: None = ...) -> int: ... - @overload - def __round__(self: _ScalarType, ndigits: SupportsIndex) -> _ScalarType: ... - __add__: _FloatOp[_NBit1] - __radd__: _FloatOp[_NBit1] - __sub__: _FloatOp[_NBit1] - __rsub__: _FloatOp[_NBit1] - __mul__: _FloatOp[_NBit1] - __rmul__: _FloatOp[_NBit1] - __truediv__: _FloatOp[_NBit1] - __rtruediv__: _FloatOp[_NBit1] - __floordiv__: _FloatOp[_NBit1] - __rfloordiv__: _FloatOp[_NBit1] - __pow__: _FloatOp[_NBit1] - __rpow__: _FloatOp[_NBit1] - __mod__: _FloatMod[_NBit1] - __rmod__: _FloatMod[_NBit1] - __divmod__: _FloatDivMod[_NBit1] - __rdivmod__: _FloatDivMod[_NBit1] + @overload + def __invert__(self: bool_[L[False]], /) -> bool_[L[True]]: ... + @overload + def __invert__(self: bool_[L[True]], /) -> bool_[L[False]]: ... + @overload + def __invert__(self, /) -> bool_: ... -float16 = floating[_16Bit] -float32 = floating[_32Bit] -float64 = floating[_64Bit] + @overload + def __add__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __add__(self, other: py_bool | bool_, /) -> bool_: ... + @overload + def __add__(self, other: int, /) -> int_: ... + @overload + def __add__(self, other: float, /) -> float64: ... + @overload + def __add__(self, other: complex, /) -> complex128: ... -half = floating[_NBitHalf] -single = floating[_NBitSingle] -double = floating[_NBitDouble] -longdouble = floating[_NBitLongDouble] + @overload + def __radd__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __radd__(self, other: py_bool, /) -> bool_: ... + @overload + def __radd__(self, other: int, /) -> int_: ... + @overload + def __radd__(self, other: float, /) -> float64: ... + @overload + def __radd__(self, other: complex, /) -> complex128: ... -# The main reason for `complexfloating` having two typevars is cosmetic. -# It is used to clarify why `complex128`s precision is `_64Bit`, the latter -# describing the two 64 bit floats representing its real and imaginary component + @overload + def __sub__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __sub__(self, other: int, /) -> int_: ... + @overload + def __sub__(self, other: float, /) -> float64: ... + @overload + def __sub__(self, other: complex, /) -> complex128: ... -class complexfloating(inexact[_NBit1], Generic[_NBit1, _NBit2]): - def __init__(self, value: _ComplexValue = ..., /) -> None: ... - def item( - self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, - ) -> complex: ... - def tolist(self) -> complex: ... - @property - def real(self) -> floating[_NBit1]: ... # type: ignore[override] - @property - def imag(self) -> floating[_NBit2]: ... # type: ignore[override] - def __abs__(self) -> floating[_NBit1]: ... # type: ignore[override] - def __getnewargs__(self: complex128) -> tuple[float, float]: ... - # NOTE: Deprecated - # def __round__(self, ndigits=...): ... - __add__: _ComplexOp[_NBit1] - __radd__: _ComplexOp[_NBit1] - __sub__: _ComplexOp[_NBit1] - __rsub__: _ComplexOp[_NBit1] - __mul__: _ComplexOp[_NBit1] - __rmul__: _ComplexOp[_NBit1] - __truediv__: _ComplexOp[_NBit1] - __rtruediv__: _ComplexOp[_NBit1] - __pow__: _ComplexOp[_NBit1] - __rpow__: _ComplexOp[_NBit1] - -complex64 = complexfloating[_32Bit, _32Bit] -complex128 = complexfloating[_64Bit, _64Bit] - -csingle = complexfloating[_NBitSingle, _NBitSingle] -cdouble = complexfloating[_NBitDouble, _NBitDouble] -clongdouble = complexfloating[_NBitLongDouble, _NBitLongDouble] - -class flexible(generic): ... # type: ignore - -# TODO: `item`/`tolist` returns either `bytes` or `tuple` -# depending on whether or not it's used as an opaque bytes sequence -# or a structure -class void(flexible): - @overload - def __init__(self, value: _IntLike_co | bytes, /, dtype : None = ...) -> None: ... - @overload - def __init__(self, value: Any, /, dtype: _DTypeLikeVoid) -> None: ... - @property - def real(self: _ArraySelf) -> _ArraySelf: ... - @property - def imag(self: _ArraySelf) -> _ArraySelf: ... - def setfield( - self, val: ArrayLike, dtype: DTypeLike, offset: int = ... - ) -> None: ... @overload - def __getitem__(self, key: str | SupportsIndex) -> Any: ... + def __rsub__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload - def __getitem__(self, key: list[str]) -> void: ... - def __setitem__( - self, - key: str | list[str] | SupportsIndex, - value: ArrayLike, - ) -> None: ... + def __rsub__(self, other: int, /) -> int_: ... + @overload + def __rsub__(self, other: float, /) -> float64: ... + @overload + def __rsub__(self, other: complex, /) -> complex128: ... -class character(flexible): # type: ignore - def __int__(self) -> int: ... - def __float__(self) -> float: ... + @overload + def __mul__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __mul__(self, other: py_bool | bool_, /) -> bool_: ... + @overload + def __mul__(self, other: int, /) -> int_: ... + @overload + def __mul__(self, other: float, /) -> float64: ... + @overload + def __mul__(self, other: complex, /) -> complex128: ... -# NOTE: Most `np.bytes_` / `np.str_` methods return their -# builtin `bytes` / `str` counterpart + @overload + def __rmul__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __rmul__(self, other: py_bool, /) -> bool_: ... + @overload + def __rmul__(self, other: int, /) -> int_: ... + @overload + def __rmul__(self, other: float, /) -> float64: ... + @overload + def __rmul__(self, other: complex, /) -> complex128: ... -class bytes_(character, bytes): @overload - def __init__(self, value: object = ..., /) -> None: ... + def __pow__[ScalarT: number](self, other: ScalarT, mod: None = None, /) -> ScalarT: ... @overload - def __init__( - self, value: str, /, encoding: str = ..., errors: str = ... - ) -> None: ... - def item( - self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, - ) -> bytes: ... - def tolist(self) -> bytes: ... + def __pow__(self, other: py_bool | bool_, mod: None = None, /) -> int8: ... + @overload + def __pow__(self, other: int, mod: None = None, /) -> int_: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... -class str_(character, str): @overload - def __init__(self, value: object = ..., /) -> None: ... + def __rpow__[ScalarT: number](self, other: ScalarT, mod: None = None, /) -> ScalarT: ... @overload - def __init__( - self, value: bytes, /, encoding: str = ..., errors: str = ... - ) -> None: ... - def item( - self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, - ) -> str: ... - def tolist(self) -> str: ... + def __rpow__(self, other: py_bool, mod: None = None, /) -> int8: ... + @overload + def __rpow__(self, other: int, mod: None = None, /) -> int_: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... -# -# Constants -# + @overload + def __truediv__[ScalarT: inexact](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __truediv__(self, other: float | integer | bool_, /) -> float64: ... + @overload + def __truediv__(self, other: complex, /) -> complex128: ... + + @overload + def __rtruediv__[ScalarT: inexact](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __rtruediv__(self, other: float | integer, /) -> float64: ... + @overload + def __rtruediv__(self, other: complex, /) -> complex128: ... + + @overload + def __floordiv__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __floordiv__(self, other: py_bool | bool_, /) -> int8: ... + @overload + def __floordiv__(self, other: int, /) -> int_: ... + @overload + def __floordiv__(self, other: float, /) -> float64: ... + + @overload + def __rfloordiv__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __rfloordiv__(self, other: py_bool, /) -> int8: ... + @overload + def __rfloordiv__(self, other: int, /) -> int_: ... + @overload + def __rfloordiv__(self, other: float, /) -> float64: ... + + # keep in sync with __floordiv__ + @overload + def __mod__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __mod__(self, other: py_bool | bool_, /) -> int8: ... + @overload + def __mod__(self, other: int, /) -> int_: ... + @overload + def __mod__(self, other: float, /) -> float64: ... + + # keep in sync with __rfloordiv__ + @overload + def __rmod__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __rmod__(self, other: py_bool, /) -> int8: ... + @overload + def __rmod__(self, other: int, /) -> int_: ... + @overload + def __rmod__(self, other: float, /) -> float64: ... + + # keep in sync with __mod__ + @overload + def __divmod__[ScalarT: integer | floating](self, other: ScalarT, /) -> _2Tuple[ScalarT]: ... + @overload + def __divmod__(self, other: py_bool | bool_, /) -> _2Tuple[int8]: ... + @overload + def __divmod__(self, other: int, /) -> _2Tuple[int_]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... + + # keep in sync with __rmod__ + @overload + def __rdivmod__[ScalarT: integer | floating](self, other: ScalarT, /) -> _2Tuple[ScalarT]: ... + @overload + def __rdivmod__(self, other: py_bool, /) -> _2Tuple[int8]: ... + @overload + def __rdivmod__(self, other: int, /) -> _2Tuple[int_]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... + + @overload + def __lshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __lshift__(self, other: py_bool | bool_, /) -> int8: ... + @overload + def __lshift__(self, other: int, /) -> int_: ... + + @overload + def __rlshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __rlshift__(self, other: py_bool, /) -> int8: ... + @overload + def __rlshift__(self, other: int, /) -> int_: ... + + # keep in sync with __lshift__ + @overload + def __rshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __rshift__(self, other: py_bool | bool_, /) -> int8: ... + @overload + def __rshift__(self, other: int, /) -> int_: ... + + # keep in sync with __rlshift__ + @overload + def __rrshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __rrshift__(self, other: py_bool, /) -> int8: ... + @overload + def __rrshift__(self, other: int, /) -> int_: ... + + @overload + def __and__(self: bool_[L[False]], other: py_bool | bool_, /) -> bool_[L[False]]: ... + @overload + def __and__(self, other: L[False] | bool_[L[False]], /) -> bool_[L[False]]: ... + @overload + def __and__(self, other: L[True] | bool_[L[True]], /) -> Self: ... + @overload + def __and__(self, other: py_bool | bool_, /) -> bool_: ... + @overload + def __and__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __and__(self, other: int, /) -> bool_ | intp: ... + __rand__ = __and__ + + @overload + def __xor__[ItemT: py_bool](self: bool_[L[False]], other: ItemT | bool_[ItemT], /) -> bool_[ItemT]: ... + @overload + def __xor__(self: bool_[L[True]], other: L[True] | bool_[L[True]], /) -> bool_[L[False]]: ... + @overload + def __xor__(self, other: L[False] | bool_[L[False]], /) -> Self: ... + @overload + def __xor__(self, other: py_bool | bool_, /) -> bool_: ... + @overload + def __xor__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __xor__(self, other: int, /) -> bool_ | intp: ... + __rxor__ = __xor__ + + @overload + def __or__(self: bool_[L[True]], other: py_bool | bool_, /) -> bool_[L[True]]: ... + @overload + def __or__(self, other: L[False] | bool_[L[False]], /) -> Self: ... + @overload + def __or__(self, other: L[True] | bool_[L[True]], /) -> bool_[L[True]]: ... + @overload + def __or__(self, other: py_bool | bool_, /) -> bool_: ... + @overload + def __or__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... + @overload + def __or__(self, other: int, /) -> bool_ | intp: ... + __ror__ = __or__ + + @overload + def __lt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGE, /) -> bool_: ... + + @overload + def __gt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsLT, /) -> bool_: ... + + @overload + def __ge__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsLE, /) -> bool_: ... + +# NOTE: This should _not_ be `Final[_]`, `_: TypeAlias`, or `type _` +bool_ = bool + +# NOTE: The `object_` constructor returns the passed object, so instances with type +# `object_` cannot exists (at runtime). +# NOTE: Because mypy has some long-standing bugs related to `__new__`, `object_` can't +# be made generic. +@final +class object_(_RealMixin, generic): + @overload + def __new__(cls, value: None = None, /) -> None: ... # type: ignore[misc] + @overload + def __new__[AnyStrT: (LiteralString, str, bytes)](cls, value: AnyStrT, /) -> AnyStrT: ... # type: ignore[misc] + @overload + def __new__[ShapeT: _Shape](cls, value: ndarray[ShapeT, Any], /) -> ndarray[ShapeT, dtype[Self]]: ... # type: ignore[misc] + @overload + def __new__(cls, value: SupportsLenAndGetItem[object], /) -> NDArray[Self]: ... # type: ignore[misc] + @overload + def __new__[T](cls, value: T, /) -> T: ... # type: ignore[misc] + @overload # catch-all + def __new__(cls, value: Any = ..., /) -> object | NDArray[Self]: ... # type: ignore[misc] + + def __hash__(self, /) -> int: ... + def __abs__(self, /) -> object_: ... # this affects NDArray[object_].__abs__ + def __call__(self, /, *args: object, **kwargs: object) -> Any: ... + + def __release_buffer__(self, buffer: memoryview, /) -> None: ... + +class integer(_IntegralMixin, _RoundMixin, number[_NBitT, int]): + @abstractmethod + def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... + + # NOTE: `bit_count` and `__index__` are technically defined in the concrete subtypes + def bit_count(self, /) -> int: ... + def __index__(self, /) -> int: ... + def __invert__(self, /) -> Self: ... + + @override # type: ignore[override] + @overload + def __truediv__(self, other: float | integer, /) -> float64: ... + @overload + def __truediv__(self, other: complex, /) -> complex128: ... + + @override # type: ignore[override] + @overload + def __rtruediv__(self, other: float | integer, /) -> float64: ... + @overload + def __rtruediv__(self, other: complex, /) -> complex128: ... + + def __floordiv__(self, value: _IntLike_co, /) -> integer: ... + def __rfloordiv__(self, value: _IntLike_co, /) -> integer: ... + def __mod__(self, value: _IntLike_co, /) -> integer: ... + def __rmod__(self, value: _IntLike_co, /) -> integer: ... + def __divmod__(self, value: _IntLike_co, /) -> _2Tuple[integer]: ... + def __rdivmod__(self, value: _IntLike_co, /) -> _2Tuple[integer]: ... + + # Ensure that objects annotated as `integer` support bit-wise operations + def __lshift__(self, other: _IntLike_co, /) -> integer: ... + def __rlshift__(self, other: _IntLike_co, /) -> integer: ... + def __rshift__(self, other: _IntLike_co, /) -> integer: ... + def __rrshift__(self, other: _IntLike_co, /) -> integer: ... + def __and__(self, other: _IntLike_co, /) -> integer: ... + def __rand__(self, other: _IntLike_co, /) -> integer: ... + def __or__(self, other: _IntLike_co, /) -> integer: ... + def __ror__(self, other: _IntLike_co, /) -> integer: ... + def __xor__(self, other: _IntLike_co, /) -> integer: ... + def __rxor__(self, other: _IntLike_co, /) -> integer: ... + +class signedinteger(integer[_NBitT]): + def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... + + # arithmetic ops + + @override # type: ignore[override] + @overload + def __add__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __add__(self, other: float, /) -> float64: ... + @overload + def __add__(self, other: complex, /) -> complex128: ... + @overload + def __add__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __add__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __radd__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __radd__(self, other: float, /) -> float64: ... + @overload + def __radd__(self, other: complex, /) -> complex128: ... + @overload + def __radd__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __radd__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __sub__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __sub__(self, other: float, /) -> float64: ... + @overload + def __sub__(self, other: complex, /) -> complex128: ... + @overload + def __sub__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __sub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rsub__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rsub__(self, other: float, /) -> float64: ... + @overload + def __rsub__(self, other: complex, /) -> complex128: ... + @overload + def __rsub__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rsub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mul__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mul__(self, other: float, /) -> float64: ... + @overload + def __mul__(self, other: complex, /) -> complex128: ... + @overload + def __mul__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __mul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmul__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rmul__(self, other: float, /) -> float64: ... + @overload + def __rmul__(self, other: complex, /) -> complex128: ... + @overload + def __rmul__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rmul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __pow__(self, other: int | int8 | bool_ | Self, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __pow__(self, other: signedinteger, mod: None = None, /) -> signedinteger: ... + @overload + def __pow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rpow__(self, other: int | int8 | bool_, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __rpow__(self, other: signedinteger, mod: None = None, /) -> signedinteger: ... + @overload + def __rpow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + # modular division ops + + @override # type: ignore[override] + @overload + def __floordiv__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __floordiv__(self, other: float, /) -> float64: ... + @overload + def __floordiv__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __floordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rfloordiv__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rfloordiv__(self, other: float, /) -> float64: ... + @overload + def __rfloordiv__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rfloordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mod__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mod__(self, other: float, /) -> float64: ... + @overload + def __mod__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __mod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmod__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rmod__(self, other: float, /) -> float64: ... + @overload + def __rmod__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rmod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __divmod__(self, other: int | int8 | bool_ | Self, /) -> _2Tuple[Self]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __divmod__(self, other: signedinteger, /) -> _2Tuple[signedinteger]: ... + @overload + def __divmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + @override # type: ignore[override] + @overload + def __rdivmod__(self, other: int | int8 | bool_, /) -> _2Tuple[Self]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __rdivmod__(self, other: signedinteger, /) -> _2Tuple[signedinteger]: ... + @overload + def __rdivmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + # bitwise ops + + @override # type: ignore[override] + @overload + def __lshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __lshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rlshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rlshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __rshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rrshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rrshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __and__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __and__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rand__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rand__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __xor__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __xor__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rxor__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rxor__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __or__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __or__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __ror__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __ror__(self, other: integer, /) -> signedinteger: ... + +int8 = signedinteger[_8Bit] +int16 = signedinteger[_16Bit] +int32 = signedinteger[_32Bit] +int64 = signedinteger[_64Bit] + +byte = signedinteger[_NBitByte] +short = signedinteger[_NBitShort] +intc = signedinteger[_NBitIntC] +intp = signedinteger[_NBitIntP] +int_ = intp +long = signedinteger[_NBitLong] +longlong = signedinteger[_NBitLongLong] + +class unsignedinteger(integer[_NBitT]): + def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... + + # arithmetic ops + + @override # type: ignore[override] + @overload + def __add__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __add__(self, other: float, /) -> float64: ... + @overload + def __add__(self, other: complex, /) -> complex128: ... + @overload + def __add__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __add__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __radd__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __radd__(self, other: float, /) -> float64: ... + @overload + def __radd__(self, other: complex, /) -> complex128: ... + @overload + def __radd__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __radd__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __sub__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __sub__(self, other: float, /) -> float64: ... + @overload + def __sub__(self, other: complex, /) -> complex128: ... + @overload + def __sub__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __sub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rsub__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rsub__(self, other: float, /) -> float64: ... + @overload + def __rsub__(self, other: complex, /) -> complex128: ... + @overload + def __rsub__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rsub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mul__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __mul__(self, other: float, /) -> float64: ... + @overload + def __mul__(self, other: complex, /) -> complex128: ... + @overload + def __mul__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __mul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmul__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rmul__(self, other: float, /) -> float64: ... + @overload + def __rmul__(self, other: complex, /) -> complex128: ... + @overload + def __rmul__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rmul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __pow__(self, other: int | uint8 | bool_ | Self, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __pow__(self, other: unsignedinteger, mod: None = None, /) -> unsignedinteger: ... + @overload + def __pow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rpow__(self, other: int | uint8 | bool_, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __rpow__(self, other: unsignedinteger, mod: None = None, /) -> unsignedinteger: ... + @overload + def __rpow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + # modular division ops + + @override # type: ignore[override] + @overload + def __floordiv__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __floordiv__(self, other: float, /) -> float64: ... + @overload + def __floordiv__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __floordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rfloordiv__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rfloordiv__(self, other: float, /) -> float64: ... + @overload + def __rfloordiv__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rfloordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mod__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __mod__(self, other: float, /) -> float64: ... + @overload + def __mod__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __mod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmod__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rmod__(self, other: float, /) -> float64: ... + @overload + def __rmod__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rmod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __divmod__(self, other: int | uint8 | bool_ | Self, /) -> _2Tuple[Self]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __divmod__(self, other: unsignedinteger, /) -> _2Tuple[unsignedinteger]: ... + @overload + def __divmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + @override # type: ignore[override] + @overload + def __rdivmod__(self, other: int | uint8 | bool_, /) -> _2Tuple[Self]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __rdivmod__(self, other: unsignedinteger, /) -> _2Tuple[unsignedinteger]: ... + @overload + def __rdivmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + # bitwise ops + + @override # type: ignore[override] + @overload + def __lshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __lshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __lshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rlshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rlshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rlshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __rshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rrshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rrshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rrshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __and__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __and__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __and__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rand__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rand__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rand__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __xor__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __xor__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __xor__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rxor__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rxor__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rxor__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __or__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __or__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __or__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __ror__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __ror__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __ror__(self, other: signedinteger, /) -> signedinteger: ... + +uint8 = unsignedinteger[_8Bit] +uint16 = unsignedinteger[_16Bit] +uint32 = unsignedinteger[_32Bit] +uint64 = unsignedinteger[_64Bit] + +ubyte = unsignedinteger[_NBitByte] +ushort = unsignedinteger[_NBitShort] +uintc = unsignedinteger[_NBitIntC] +uintp = unsignedinteger[_NBitIntP] +uint = uintp +ulong = unsignedinteger[_NBitLong] +ulonglong = unsignedinteger[_NBitLongLong] + +class inexact(number[_NBitT, _InexactItemT_co], Generic[_NBitT, _InexactItemT_co]): + @abstractmethod + def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... + +class floating(_RealMixin, _RoundMixin, inexact[_NBitT, float]): + def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... + + # arithmetic ops + + @override # type: ignore[override] + @overload + def __add__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __add__(self, other: integer | floating, /) -> floating: ... + @overload + def __add__(self, other: float, /) -> Self: ... + @overload + def __add__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __radd__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __radd__(self, other: integer | floating, /) -> floating: ... + @overload + def __radd__(self, other: float, /) -> Self: ... + @overload + def __radd__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __sub__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __sub__(self, other: integer | floating, /) -> floating: ... + @overload + def __sub__(self, other: float, /) -> Self: ... + @overload + def __sub__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rsub__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rsub__(self, other: integer | floating, /) -> floating: ... + @overload + def __rsub__(self, other: float, /) -> Self: ... + @overload + def __rsub__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __mul__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mul__(self, other: integer | floating, /) -> floating: ... + @overload + def __mul__(self, other: float, /) -> Self: ... + @overload + def __mul__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rmul__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rmul__(self, other: integer | floating, /) -> floating: ... + @overload + def __rmul__(self, other: float, /) -> Self: ... + @overload + def __rmul__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __pow__(self, other: int | float16 | uint8 | int8 | bool_ | Self, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: integer | floating, mod: None = None, /) -> floating: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rpow__(self, other: int | float16 | uint8 | int8 | bool_, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: integer | floating, mod: None = None, /) -> floating: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __truediv__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __truediv__(self, other: integer | floating, /) -> floating: ... + @overload + def __truediv__(self, other: float, /) -> Self: ... + @overload + def __truediv__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rtruediv__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rtruediv__(self, other: integer | floating, /) -> floating: ... + @overload + def __rtruediv__(self, other: float, /) -> Self: ... + @overload + def __rtruediv__(self, other: complex, /) -> complexfloating: ... + + # modular division ops + + @overload + def __floordiv__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __floordiv__(self, other: integer | floating, /) -> floating: ... + @overload + def __floordiv__(self, other: float, /) -> Self: ... + + @overload + def __rfloordiv__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rfloordiv__(self, other: integer | floating, /) -> floating: ... + @overload + def __rfloordiv__(self, other: float, /) -> Self: ... + + @overload + def __mod__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mod__(self, other: integer | floating, /) -> floating: ... + @overload + def __mod__(self, other: float, /) -> Self: ... + + @overload + def __rmod__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rmod__(self, other: integer | floating, /) -> floating: ... + @overload + def __rmod__(self, other: float, /) -> Self: ... -e: Final[float] -euler_gamma: Final[float] -inf: Final[float] -nan: Final[float] -pi: Final[float] + @overload + def __divmod__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> _2Tuple[Self]: ... + @overload + def __divmod__(self, other: integer | floating, /) -> _2Tuple[floating]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[Self]: ... -little_endian: Final[builtins.bool] -True_: Final[np.bool] -False_: Final[np.bool] + @overload + def __rdivmod__(self, other: int | float16 | uint8 | int8 | bool_, /) -> _2Tuple[Self]: ... + @overload + def __rdivmod__(self, other: integer | floating, /) -> _2Tuple[floating]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[Self]: ... -newaxis: None + # NOTE: `is_integer` and `as_integer_ratio` are technically defined in the concrete subtypes + def is_integer(self, /) -> py_bool: ... + def as_integer_ratio(self, /) -> tuple[int, int]: ... -# See `numpy._typing._ufunc` for more concrete nin-/nout-specific stubs -@final -class ufunc: +float16 = floating[_16Bit] +float32 = floating[_32Bit] + +# either a C `double`, `float`, or `longdouble` +class float64(floating[_64Bit], float): # type: ignore[misc] @property - def __name__(self) -> str: ... + def itemsize(self) -> L[8]: ... @property - def __doc__(self) -> str: ... - __call__: Callable[..., Any] + def nbytes(self) -> L[8]: ... + + # overrides for `floating` and `builtins.float` compatibility (`_RealMixin` doesn't work) @property - def nin(self) -> int: ... + def real(self) -> Self: ... @property - def nout(self) -> int: ... + def imag(self) -> Self: ... + def conjugate(self) -> Self: ... + def __getnewargs__(self, /) -> tuple[float]: ... + + @classmethod + def __getformat__(cls, typestr: L["double", "float"], /) -> str: ... # undocumented + + # float64-specific operator overrides + # NOTE: Mypy reports [misc] errors about "unsafely overlapping signatures" for the + # reflected methods. But since they are identical to the non-reflected versions, + # these errors appear to be false positives. + + @overload # type: ignore[override] + def __add__(self, other: _Float64_co, /) -> float64: ... + @overload + def __add__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __add__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... + @overload + def __add__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __radd__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] + @overload + def __radd__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] + @overload + def __radd__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... + @overload + def __radd__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __sub__(self, other: _Float64_co, /) -> float64: ... + @overload + def __sub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __sub__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... + @overload + def __sub__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __rsub__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] + @overload + def __rsub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] + @overload + def __rsub__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... + @overload + def __rsub__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __mul__(self, other: _Float64_co, /) -> float64: ... + @overload + def __mul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __mul__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... + @overload + def __mul__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __rmul__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] + @overload + def __rmul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] + @overload + def __rmul__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... + @overload + def __rmul__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __truediv__(self, other: _Float64_co, /) -> float64: ... + @overload + def __truediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __truediv__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... + @overload + def __truediv__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __rtruediv__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] + @overload + def __rtruediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] + @overload + def __rtruediv__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... + @overload + def __rtruediv__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __floordiv__(self, other: _Float64_co, /) -> float64: ... + @overload + def __floordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __floordiv__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... + @overload + def __floordiv__(self, other: complex, /) -> float64 | complex128: ... + + @overload + def __rfloordiv__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] + @overload + def __rfloordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __rfloordiv__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... + @overload + def __rfloordiv__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __pow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... + @overload + def __pow__[NBitT: NBitBase](self, other: complexfloating[NBitT], mod: None = None, /) -> complexfloating[NBitT | _64Bit]: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __rpow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... # type: ignore[misc] + @overload + def __rpow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... # type: ignore[misc] + @overload + def __rpow__[NBitT: NBitBase]( + self, other: complexfloating[NBitT], mod: None = None, / + ) -> complexfloating[NBitT | _64Bit]: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... + + def __mod__(self, other: _Float64_co, /) -> float64: ... + def __rmod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] + + def __divmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... + def __rdivmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[misc] + +half = float16 +single = float32 +double = float64 +longdouble = floating[_NBitLongDouble] + +# The main reason for `complexfloating` having two typevars is cosmetic. +# It is used to clarify why `complex128`s precision is `_64Bit`, the latter +# describing the two 64 bit floats representing its real and imaginary component + +class complexfloating(inexact[_NBitT1, complex], Generic[_NBitT1, _NBitT2]): + @overload + def __new__( + cls, + real: complex | SupportsComplex | SupportsFloat | SupportsIndex = 0, + imag: complex | SupportsFloat | SupportsIndex = 0, + /, + ) -> Self: ... + @overload + def __new__(cls, real: _ConvertibleToComplex | None = 0, /) -> Self: ... + @property - def nargs(self) -> int: ... + def real(self) -> floating[_NBitT1]: ... @property - def ntypes(self) -> int: ... + def imag(self) -> floating[_NBitT2]: ... + + # NOTE: `__complex__` is technically defined in the concrete subtypes + def __complex__(self, /) -> complex: ... + def __abs__(self, /) -> floating[_NBitT1 | _NBitT2]: ... # type: ignore[override] + + @overload # type: ignore[override] + def __add__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... + @overload + def __add__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... + @overload + def __add__[NBitT: NBitBase](self, other: number[NBitT], /) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... + + @overload # type: ignore[override] + def __radd__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... + @overload + def __radd__(self, other: complex, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... + @overload + def __radd__[NBitT: NBitBase]( + self, other: number[NBitT], / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... + + @overload # type: ignore[override] + def __sub__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... + @overload + def __sub__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... + @overload + def __sub__[NBitT: NBitBase](self, other: number[NBitT], /) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... + + @overload # type: ignore[override] + def __rsub__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... + @overload + def __rsub__(self, other: complex, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... + @overload + def __rsub__[NBitT: NBitBase]( + self, other: number[NBitT], / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... + + @overload # type: ignore[override] + def __mul__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... + @overload + def __mul__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... + @overload + def __mul__[NBitT: NBitBase](self, other: number[NBitT], /) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... + + @overload # type: ignore[override] + def __rmul__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... + @overload + def __rmul__(self, other: complex, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... + @overload + def __rmul__[NBitT: NBitBase]( + self, other: number[NBitT], / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... + + @overload # type: ignore[override] + def __truediv__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... + @overload + def __truediv__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... + @overload + def __truediv__[NBitT: NBitBase]( + self, other: number[NBitT], / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... + + @overload # type: ignore[override] + def __rtruediv__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... + @overload + def __rtruediv__(self, other: complex, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... + @overload + def __rtruediv__[NBitT: NBitBase]( + self, other: number[NBitT], / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... + + @overload # type: ignore[override] + def __pow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBitT1, _NBitT2]: ... + @overload + def __pow__( + self, other: complex | float64 | complex128, mod: None = None, / + ) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... + @overload + def __pow__[NBitT: NBitBase]( + self, other: number[NBitT], mod: None = None, / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... + + @overload # type: ignore[override] + def __rpow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBitT1, _NBitT2]: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... + @overload + def __rpow__[NBitT: NBitBase]( + self, other: number[NBitT], mod: None = None, / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... + +complex64 = complexfloating[_32Bit] + +class complex128(complexfloating[_64Bit, _64Bit], complex): @property - def types(self) -> list[str]: ... - # Broad return type because it has to encompass things like - # - # >>> np.logical_and.identity is True - # True - # >>> np.add.identity is 0 - # True - # >>> np.sin.identity is None - # True - # - # and any user-defined ufuncs. + def itemsize(self) -> L[16]: ... @property - def identity(self) -> Any: ... - # This is None for ufuncs and a string for gufuncs. + def nbytes(self) -> L[16]: ... + + # overrides for `floating` and `builtins.float` compatibility @property - def signature(self) -> None | str: ... - # The next four methods will always exist, but they will just - # raise a ValueError ufuncs with that don't accept two input - # arguments and return one output argument. Because of that we - # can't type them very precisely. - reduce: Any - accumulate: Any - reduceat: Any - outer: Any - # Similarly at won't be defined for ufuncs that return multiple - # outputs, so we can't type it very precisely. - at: Any + def real(self) -> float64: ... + @property + def imag(self) -> float64: ... + def conjugate(self) -> Self: ... + def __abs__(self) -> float64: ... # type: ignore[override] + def __getnewargs__(self, /) -> tuple[float, float]: ... -# Parameters: `__name__`, `ntypes` and `identity` -absolute: _UFunc_Nin1_Nout1[L['absolute'], L[20], None] -add: _UFunc_Nin2_Nout1[L['add'], L[22], L[0]] -arccos: _UFunc_Nin1_Nout1[L['arccos'], L[8], None] -arccosh: _UFunc_Nin1_Nout1[L['arccosh'], L[8], None] -arcsin: _UFunc_Nin1_Nout1[L['arcsin'], L[8], None] -arcsinh: _UFunc_Nin1_Nout1[L['arcsinh'], L[8], None] -arctan2: _UFunc_Nin2_Nout1[L['arctan2'], L[5], None] -arctan: _UFunc_Nin1_Nout1[L['arctan'], L[8], None] -arctanh: _UFunc_Nin1_Nout1[L['arctanh'], L[8], None] -bitwise_and: _UFunc_Nin2_Nout1[L['bitwise_and'], L[12], L[-1]] -bitwise_count: _UFunc_Nin1_Nout1[L['bitwise_count'], L[11], None] -bitwise_not: _UFunc_Nin1_Nout1[L['invert'], L[12], None] -bitwise_or: _UFunc_Nin2_Nout1[L['bitwise_or'], L[12], L[0]] -bitwise_xor: _UFunc_Nin2_Nout1[L['bitwise_xor'], L[12], L[0]] -cbrt: _UFunc_Nin1_Nout1[L['cbrt'], L[5], None] -ceil: _UFunc_Nin1_Nout1[L['ceil'], L[7], None] -conj: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] -conjugate: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] -copysign: _UFunc_Nin2_Nout1[L['copysign'], L[4], None] -cos: _UFunc_Nin1_Nout1[L['cos'], L[9], None] -cosh: _UFunc_Nin1_Nout1[L['cosh'], L[8], None] -deg2rad: _UFunc_Nin1_Nout1[L['deg2rad'], L[5], None] -degrees: _UFunc_Nin1_Nout1[L['degrees'], L[5], None] -divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] -divmod: _UFunc_Nin2_Nout2[L['divmod'], L[15], None] -equal: _UFunc_Nin2_Nout1[L['equal'], L[23], None] -exp2: _UFunc_Nin1_Nout1[L['exp2'], L[8], None] -exp: _UFunc_Nin1_Nout1[L['exp'], L[10], None] -expm1: _UFunc_Nin1_Nout1[L['expm1'], L[8], None] -fabs: _UFunc_Nin1_Nout1[L['fabs'], L[5], None] -float_power: _UFunc_Nin2_Nout1[L['float_power'], L[4], None] -floor: _UFunc_Nin1_Nout1[L['floor'], L[7], None] -floor_divide: _UFunc_Nin2_Nout1[L['floor_divide'], L[21], None] -fmax: _UFunc_Nin2_Nout1[L['fmax'], L[21], None] -fmin: _UFunc_Nin2_Nout1[L['fmin'], L[21], None] -fmod: _UFunc_Nin2_Nout1[L['fmod'], L[15], None] -frexp: _UFunc_Nin1_Nout2[L['frexp'], L[4], None] -gcd: _UFunc_Nin2_Nout1[L['gcd'], L[11], L[0]] -greater: _UFunc_Nin2_Nout1[L['greater'], L[23], None] -greater_equal: _UFunc_Nin2_Nout1[L['greater_equal'], L[23], None] -heaviside: _UFunc_Nin2_Nout1[L['heaviside'], L[4], None] -hypot: _UFunc_Nin2_Nout1[L['hypot'], L[5], L[0]] -invert: _UFunc_Nin1_Nout1[L['invert'], L[12], None] -isfinite: _UFunc_Nin1_Nout1[L['isfinite'], L[20], None] -isinf: _UFunc_Nin1_Nout1[L['isinf'], L[20], None] -isnan: _UFunc_Nin1_Nout1[L['isnan'], L[20], None] -isnat: _UFunc_Nin1_Nout1[L['isnat'], L[2], None] -lcm: _UFunc_Nin2_Nout1[L['lcm'], L[11], None] -ldexp: _UFunc_Nin2_Nout1[L['ldexp'], L[8], None] -left_shift: _UFunc_Nin2_Nout1[L['left_shift'], L[11], None] -less: _UFunc_Nin2_Nout1[L['less'], L[23], None] -less_equal: _UFunc_Nin2_Nout1[L['less_equal'], L[23], None] -log10: _UFunc_Nin1_Nout1[L['log10'], L[8], None] -log1p: _UFunc_Nin1_Nout1[L['log1p'], L[8], None] -log2: _UFunc_Nin1_Nout1[L['log2'], L[8], None] -log: _UFunc_Nin1_Nout1[L['log'], L[10], None] -logaddexp2: _UFunc_Nin2_Nout1[L['logaddexp2'], L[4], float] -logaddexp: _UFunc_Nin2_Nout1[L['logaddexp'], L[4], float] -logical_and: _UFunc_Nin2_Nout1[L['logical_and'], L[20], L[True]] -logical_not: _UFunc_Nin1_Nout1[L['logical_not'], L[20], None] -logical_or: _UFunc_Nin2_Nout1[L['logical_or'], L[20], L[False]] -logical_xor: _UFunc_Nin2_Nout1[L['logical_xor'], L[19], L[False]] -matmul: _GUFunc_Nin2_Nout1[L['matmul'], L[19], None, L["(n?,k),(k,m?)->(n?,m?)"]] -maximum: _UFunc_Nin2_Nout1[L['maximum'], L[21], None] -minimum: _UFunc_Nin2_Nout1[L['minimum'], L[21], None] -mod: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] -modf: _UFunc_Nin1_Nout2[L['modf'], L[4], None] -multiply: _UFunc_Nin2_Nout1[L['multiply'], L[23], L[1]] -negative: _UFunc_Nin1_Nout1[L['negative'], L[19], None] -nextafter: _UFunc_Nin2_Nout1[L['nextafter'], L[4], None] -not_equal: _UFunc_Nin2_Nout1[L['not_equal'], L[23], None] -positive: _UFunc_Nin1_Nout1[L['positive'], L[19], None] -power: _UFunc_Nin2_Nout1[L['power'], L[18], None] -rad2deg: _UFunc_Nin1_Nout1[L['rad2deg'], L[5], None] -radians: _UFunc_Nin1_Nout1[L['radians'], L[5], None] -reciprocal: _UFunc_Nin1_Nout1[L['reciprocal'], L[18], None] -remainder: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] -right_shift: _UFunc_Nin2_Nout1[L['right_shift'], L[11], None] -rint: _UFunc_Nin1_Nout1[L['rint'], L[10], None] -sign: _UFunc_Nin1_Nout1[L['sign'], L[19], None] -signbit: _UFunc_Nin1_Nout1[L['signbit'], L[4], None] -sin: _UFunc_Nin1_Nout1[L['sin'], L[9], None] -sinh: _UFunc_Nin1_Nout1[L['sinh'], L[8], None] -spacing: _UFunc_Nin1_Nout1[L['spacing'], L[4], None] -sqrt: _UFunc_Nin1_Nout1[L['sqrt'], L[10], None] -square: _UFunc_Nin1_Nout1[L['square'], L[18], None] -subtract: _UFunc_Nin2_Nout1[L['subtract'], L[21], None] -tan: _UFunc_Nin1_Nout1[L['tan'], L[8], None] -tanh: _UFunc_Nin1_Nout1[L['tanh'], L[8], None] -true_divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] -trunc: _UFunc_Nin1_Nout1[L['trunc'], L[7], None] -vecdot: _GUFunc_Nin2_Nout1[L['vecdot'], L[19], None, L["(n),(n)->()"]] + # complex128-specific operator overrides + @overload # type: ignore[override] + def __add__(self, other: _Complex128_co, /) -> complex128: ... + @overload + def __add__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... + def __radd__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] -abs = absolute -acos = arccos -acosh = arccosh -asin = arcsin -asinh = arcsinh -atan = arctan -atanh = arctanh -atan2 = arctan2 -concat = concatenate -bitwise_left_shift = left_shift -bitwise_invert = invert -bitwise_right_shift = right_shift -permute_dims = transpose -pow = power + @overload # type: ignore[override] + def __sub__(self, other: _Complex128_co, /) -> complex128: ... + @overload + def __sub__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... + def __rsub__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] -class _CopyMode(enum.Enum): - ALWAYS: L[True] - IF_NEEDED: L[False] - NEVER: L[2] + @overload # type: ignore[override] + def __mul__(self, other: _Complex128_co, /) -> complex128: ... + @overload + def __mul__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... + def __rmul__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] -_CallType = TypeVar("_CallType", bound=Callable[..., Any]) + @overload # type: ignore[override] + def __truediv__(self, other: _Complex128_co, /) -> complex128: ... + @overload + def __truediv__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... + def __rtruediv__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] -class errstate: - def __init__( - self, - *, - call: _ErrFunc | _SupportsWrite[str] = ..., - all: None | _ErrKind = ..., - divide: None | _ErrKind = ..., - over: None | _ErrKind = ..., - under: None | _ErrKind = ..., - invalid: None | _ErrKind = ..., - ) -> None: ... - def __enter__(self) -> None: ... - def __exit__( - self, - exc_type: None | type[BaseException], - exc_value: None | BaseException, - traceback: None | TracebackType, + @overload # type: ignore[override] + def __pow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... + @overload + def __pow__[NBitT: NBitBase](self, other: complexfloating[NBitT], mod: None = None, /) -> complexfloating[NBitT | _64Bit]: ... + def __rpow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... # type: ignore[override] + +csingle = complex64 +cdouble = complex128 +clongdouble = complexfloating[_NBitLongDouble] + +class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co]): + @property + def itemsize(self) -> L[8]: ... + @property + def nbytes(self) -> L[8]: ... + + @overload + def __new__(cls, value: _TD64ItemT_co | timedelta64[_TD64ItemT_co], /) -> Self: ... + @overload + def __new__(cls, /) -> timedelta64[L[0]]: ... + @overload + def __new__(cls, value: _NaTValue | None, format: _TimeUnitSpec[_TD64Unit], /) -> timedelta64[None]: ... + @overload + def __new__(cls, value: L[0], format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> timedelta64[L[0]]: ... + @overload + def __new__(cls, value: _IntLike_co, format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> timedelta64[int]: ... + @overload + def __new__(cls, value: dt.timedelta, format: _TimeUnitSpec[_IntTimeUnit], /) -> timedelta64[int]: ... + @overload + def __new__( + cls, + value: dt.timedelta | _IntLike_co, + format: _TimeUnitSpec[_NativeTD64Unit] = ..., /, - ) -> None: ... - def __call__(self, func: _CallType) -> _CallType: ... + ) -> timedelta64[dt.timedelta]: ... + @overload + def __new__(cls, value: _ConvertibleToTD64, format: _TimeUnitSpec[_TD64Unit] = ..., /) -> timedelta64: ... + + # inherited at runtime from `signedinteger` + def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... + + # NOTE: Only a limited number of units support conversion + # to builtin scalar types: `Y`, `M`, `ns`, `ps`, `fs`, `as` + def __int__(self: timedelta64[int], /) -> int: ... + def __float__(self: timedelta64[int], /) -> float: ... + + def __neg__(self, /) -> Self: ... + def __pos__(self, /) -> Self: ... + def __abs__(self, /) -> Self: ... + + # + @overload + def __add__(self: timedelta64[Never], x: timedelta64[int | dt.timedelta] | _IntLike_co, /) -> timedelta64: ... + @overload + def __add__(self: timedelta64[None], x: _TD64Like_co, /) -> timedelta64[None]: ... + @overload + def __add__(self: timedelta64[int | dt.timedelta], x: timedelta64[Never], /) -> timedelta64: ... + @overload + def __add__(self, x: timedelta64[None], /) -> timedelta64[None]: ... + @overload + def __add__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... + @overload + def __add__(self: timedelta64[int], x: timedelta64, /) -> timedelta64[int | None]: ... + @overload + def __add__[AnyDateOrTimeT: (dt.datetime, dt.date, dt.timedelta)]( + self: timedelta64[dt.timedelta], x: AnyDateOrTimeT, / + ) -> AnyDateOrTimeT: ... + @overload + def __add__[AnyItemT: (dt.timedelta, int, None, _TD64Item)]( + self: timedelta64[AnyItemT], x: timedelta64[AnyItemT] | _IntLike_co, / + ) -> timedelta64[AnyItemT]: ... + __radd__ = __add__ + + # + @overload + def __sub__(self: timedelta64[Never], b: timedelta64[int | dt.timedelta] | _IntLike_co, /) -> timedelta64: ... + @overload + def __sub__(self: timedelta64[None], b: _TD64Like_co, /) -> timedelta64[None]: ... + @overload + def __sub__(self: timedelta64[int | dt.timedelta], b: timedelta64[Never], /) -> timedelta64: ... + @overload + def __sub__(self, b: timedelta64[None], /) -> timedelta64[None]: ... + @overload + def __sub__(self: timedelta64[int], b: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... + @overload + def __sub__(self: timedelta64[int], b: timedelta64, /) -> timedelta64[int | None]: ... + @overload + def __sub__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> dt.timedelta: ... + @overload + def __sub__[AnyItemT: (dt.timedelta, int, None, _TD64Item)]( + self: timedelta64[AnyItemT], b: timedelta64[AnyItemT] | _IntLike_co, / + ) -> timedelta64[AnyItemT]: ... + + # NOTE: subtraction is not commutative, so __rsub__ differs from __sub__. + # This confuses mypy, so we ignore the [misc] errors it reports. + @overload + def __rsub__(self: timedelta64[Never], a: timedelta64[int | dt.timedelta] | _IntLike_co, /) -> timedelta64: ... + @overload + def __rsub__(self: timedelta64[None], a: _TD64Like_co, /) -> timedelta64[None]: ... + @overload + def __rsub__[AnyDateT: (dt.datetime, dt.date)](self: timedelta64[dt.timedelta], a: AnyDateT, /) -> AnyDateT: ... + @overload + def __rsub__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[dt.timedelta], a: timedelta64[AnyItemT], / + ) -> timedelta64[AnyItemT]: ... + @overload + def __rsub__[AnyItemT: (dt.timedelta, int, None, _TD64Item)]( # type: ignore[misc] + self: timedelta64[AnyItemT], a: timedelta64[AnyItemT] | _IntLike_co, / + ) -> timedelta64[AnyItemT]: ... + @overload + def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] + @overload + def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... + + # + @overload + def __mul__(self: timedelta64[Never], x: _FloatLike_co, /) -> timedelta64: ... + @overload + def __mul__(self: timedelta64[None], x: _FloatLike_co, /) -> timedelta64[None]: ... + @overload + def __mul__(self, x: _IntLike_co, /) -> Self: ... + @overload + def __mul__(self, x: float | floating, /) -> timedelta64[_TD64ItemT_co | None]: ... + @overload + def __mul__(self, x: _FloatLike_co, /) -> timedelta64: ... + __rmul__ = __mul__ + + # keep in sync with __divmod__ + @overload + def __mod__(self: timedelta64[Never], x: timedelta64[dt.timedelta], /) -> timedelta64: ... + @overload + def __mod__(self: timedelta64[int | dt.timedelta], x: timedelta64[Never], /) -> timedelta64: ... + @overload + def __mod__(self, x: timedelta64[L[0] | None], /) -> timedelta64[None]: ... + @overload + def __mod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... + @overload + def __mod__(self: timedelta64[None], x: timedelta64, /) -> timedelta64[None]: ... + @overload + def __mod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... + @overload + def __mod__(self: timedelta64[dt.timedelta], x: timedelta64[int], /) -> timedelta64[int | None]: ... + @overload + def __mod__(self: timedelta64[dt.timedelta], x: timedelta64[dt.timedelta], /) -> timedelta64[dt.timedelta | None]: ... + @overload + def __mod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... + @overload + def __mod__(self, x: timedelta64, /) -> timedelta64: ... + + # keep in sync with __rdivmod__ + def __rmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... + + # keep in sync with __mod__ + @overload + def __divmod__( + self: timedelta64[Never], x: timedelta64[Never] | timedelta64[dt.timedelta], / + ) -> tuple[int64, timedelta64]: ... + @overload + def __divmod__(self: timedelta64[int | dt.timedelta], x: timedelta64[Never], /) -> tuple[int64, timedelta64]: ... + @overload + def __divmod__(self, x: timedelta64[L[0] | None], /) -> tuple[int64, timedelta64[None]]: ... + @overload + def __divmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload + def __divmod__(self: timedelta64[None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... + @overload + def __divmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload + def __divmod__(self: timedelta64[dt.timedelta], x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload + def __divmod__( + self: timedelta64[dt.timedelta], x: timedelta64[dt.timedelta], / + ) -> tuple[int64, timedelta64[dt.timedelta | None]]: ... + @overload + def __divmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... + @overload + def __divmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... + + # keep in sync with __rmod__ + def __rdivmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... + + @overload + def __truediv__(self, b: timedelta64, /) -> float64: ... + @overload + def __truediv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> float: ... + @overload + def __truediv__(self: timedelta64[Never], b: float | floating | integer, /) -> timedelta64: ... + @overload + def __truediv__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[AnyItemT], b: int | integer, / + ) -> timedelta64[AnyItemT]: ... + @overload + def __truediv__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[AnyItemT], b: float | floating, / + ) -> timedelta64[AnyItemT | None]: ... + @overload + def __truediv__(self, b: float | floating | integer, /) -> timedelta64: ... -@contextmanager -def _no_nep50_warning() -> Generator[None, None, None]: ... -def _get_promotion_state() -> str: ... -def _set_promotion_state(state: str, /) -> None: ... + @overload + def __rtruediv__(self, a: timedelta64, /) -> float64: ... + @overload + def __rtruediv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> float: ... -class ndenumerate(Generic[_ScalarType]): - iter: flatiter[NDArray[_ScalarType]] @overload - def __new__( - cls, arr: _FiniteNestedSequence[_SupportsArray[dtype[_ScalarType]]], - ) -> ndenumerate[_ScalarType]: ... + def __floordiv__(self, b: timedelta64, /) -> int64: ... @overload - def __new__(cls, arr: str | _NestedSequence[str]) -> ndenumerate[str_]: ... + def __floordiv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> int: ... @overload - def __new__(cls, arr: bytes | _NestedSequence[bytes]) -> ndenumerate[bytes_]: ... + def __floordiv__(self: timedelta64[Never], b: float | floating | integer, /) -> timedelta64: ... @overload - def __new__(cls, arr: builtins.bool | _NestedSequence[builtins.bool]) -> ndenumerate[np.bool]: ... + def __floordiv__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[AnyItemT], b: int | integer, / + ) -> timedelta64[AnyItemT]: ... @overload - def __new__(cls, arr: int | _NestedSequence[int]) -> ndenumerate[int_]: ... + def __floordiv__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[AnyItemT], b: float | floating, / + ) -> timedelta64[AnyItemT | None]: ... + @overload - def __new__(cls, arr: float | _NestedSequence[float]) -> ndenumerate[float64]: ... + def __rfloordiv__(self, a: timedelta64, /) -> int64: ... @overload - def __new__(cls, arr: complex | _NestedSequence[complex]) -> ndenumerate[complex128]: ... - def __next__(self: ndenumerate[_ScalarType]) -> tuple[_Shape, _ScalarType]: ... - def __iter__(self: _T) -> _T: ... + def __rfloordiv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> int: ... -class ndindex: @overload - def __init__(self, shape: tuple[SupportsIndex, ...], /) -> None: ... + def __lt__(self, other: _TD64Like_co, /) -> bool_: ... @overload - def __init__(self, *shape: SupportsIndex) -> None: ... - def __iter__(self: _T) -> _T: ... - def __next__(self) -> _Shape: ... - -# TODO: The type of each `__next__` and `iters` return-type depends -# on the length and dtype of `args`; we can't describe this behavior yet -# as we lack variadics (PEP 646). -@final -class broadcast: - def __new__(cls, *args: ArrayLike) -> broadcast: ... - @property - def index(self) -> int: ... - @property - def iters(self) -> tuple[flatiter[Any], ...]: ... - @property - def nd(self) -> int: ... - @property - def ndim(self) -> int: ... - @property - def numiter(self) -> int: ... - @property - def shape(self) -> _Shape: ... - @property - def size(self) -> int: ... - def __next__(self) -> tuple[Any, ...]: ... - def __iter__(self: _T) -> _T: ... - def reset(self) -> None: ... + def __lt__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... -@final -class busdaycalendar: - def __new__( - cls, - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - ) -> busdaycalendar: ... - @property - def weekmask(self) -> NDArray[np.bool]: ... - @property - def holidays(self) -> NDArray[datetime64]: ... - -class finfo(Generic[_FloatType]): - dtype: dtype[_FloatType] - bits: int - eps: _FloatType - epsneg: _FloatType - iexp: int - machep: int - max: _FloatType - maxexp: int - min: _FloatType - minexp: int - negep: int - nexp: int - nmant: int - precision: int - resolution: _FloatType - smallest_subnormal: _FloatType - @property - def smallest_normal(self) -> _FloatType: ... - @property - def tiny(self) -> _FloatType: ... @overload - def __new__( - cls, dtype: inexact[_NBit1] | _DTypeLike[inexact[_NBit1]] - ) -> finfo[floating[_NBit1]]: ... + def __le__(self, other: _TD64Like_co, /) -> bool_: ... @overload - def __new__( - cls, dtype: complex | float | type[complex] | type[float] - ) -> finfo[float64]: ... + def __le__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... @overload - def __new__( - cls, dtype: str - ) -> finfo[floating[Any]]: ... - -class iinfo(Generic[_IntType]): - dtype: dtype[_IntType] - kind: str - bits: int - key: str - @property - def min(self) -> int: ... - @property - def max(self) -> int: ... + def __le__(self, other: _SupportsGT, /) -> bool_: ... @overload - def __new__(cls, dtype: _IntType | _DTypeLike[_IntType]) -> iinfo[_IntType]: ... + def __gt__(self, other: _TD64Like_co, /) -> bool_: ... @overload - def __new__(cls, dtype: int | type[int]) -> iinfo[int_]: ... + def __gt__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... @overload - def __new__(cls, dtype: str) -> iinfo[Any]: ... - -_NDIterFlagsKind = L[ - "buffered", - "c_index", - "copy_if_overlap", - "common_dtype", - "delay_bufalloc", - "external_loop", - "f_index", - "grow_inner", "growinner", - "multi_index", - "ranged", - "refs_ok", - "reduce_ok", - "zerosize_ok", -] - -_NDIterOpFlagsKind = L[ - "aligned", - "allocate", - "arraymask", - "copy", - "config", - "nbo", - "no_subtype", - "no_broadcast", - "overlap_assume_elementwise", - "readonly", - "readwrite", - "updateifcopy", - "virtual", - "writeonly", - "writemasked" -] - -@final -class nditer: - def __new__( - cls, - op: ArrayLike | Sequence[ArrayLike], - flags: None | Sequence[_NDIterFlagsKind] = ..., - op_flags: None | Sequence[Sequence[_NDIterOpFlagsKind]] = ..., - op_dtypes: DTypeLike | Sequence[DTypeLike] = ..., - order: _OrderKACF = ..., - casting: _CastingKind = ..., - op_axes: None | Sequence[Sequence[SupportsIndex]] = ..., - itershape: None | _ShapeLike = ..., - buffersize: SupportsIndex = ..., - ) -> nditer: ... - def __enter__(self) -> nditer: ... - def __exit__( - self, - exc_type: None | type[BaseException], - exc_value: None | BaseException, - traceback: None | TracebackType, - ) -> None: ... - def __iter__(self) -> nditer: ... - def __next__(self) -> tuple[NDArray[Any], ...]: ... - def __len__(self) -> int: ... - def __copy__(self) -> nditer: ... - @overload - def __getitem__(self, index: SupportsIndex) -> NDArray[Any]: ... - @overload - def __getitem__(self, index: slice) -> tuple[NDArray[Any], ...]: ... - def __setitem__(self, index: slice | SupportsIndex, value: ArrayLike) -> None: ... - def close(self) -> None: ... - def copy(self) -> nditer: ... - def debug_print(self) -> None: ... - def enable_external_loop(self) -> None: ... - def iternext(self) -> builtins.bool: ... - def remove_axis(self, i: SupportsIndex, /) -> None: ... - def remove_multi_index(self) -> None: ... - def reset(self) -> None: ... - @property - def dtypes(self) -> tuple[dtype[Any], ...]: ... - @property - def finished(self) -> builtins.bool: ... - @property - def has_delayed_bufalloc(self) -> builtins.bool: ... - @property - def has_index(self) -> builtins.bool: ... - @property - def has_multi_index(self) -> builtins.bool: ... - @property - def index(self) -> int: ... - @property - def iterationneedsapi(self) -> builtins.bool: ... - @property - def iterindex(self) -> int: ... - @property - def iterrange(self) -> tuple[int, ...]: ... - @property - def itersize(self) -> int: ... - @property - def itviews(self) -> tuple[NDArray[Any], ...]: ... - @property - def multi_index(self) -> tuple[int, ...]: ... - @property - def ndim(self) -> int: ... - @property - def nop(self) -> int: ... - @property - def operands(self) -> tuple[NDArray[Any], ...]: ... - @property - def shape(self) -> tuple[int, ...]: ... - @property - def value(self) -> tuple[NDArray[Any], ...]: ... - -_MemMapModeKind = L[ - "readonly", "r", - "copyonwrite", "c", - "readwrite", "r+", - "write", "w+", -] + def __gt__(self, other: _SupportsGT, /) -> bool_: ... -class memmap(ndarray[_ShapeType, _DType_co]): - __array_priority__: ClassVar[float] - filename: str | None - offset: int - mode: str @overload - def __new__( - subtype, - filename: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _MemMapIOProtocol, - dtype: type[uint8] = ..., - mode: _MemMapModeKind = ..., - offset: int = ..., - shape: None | int | tuple[int, ...] = ..., - order: _OrderKACF = ..., - ) -> memmap[Any, dtype[uint8]]: ... + def __ge__(self, other: _TD64Like_co, /) -> bool_: ... @overload - def __new__( - subtype, - filename: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _MemMapIOProtocol, - dtype: _DTypeLike[_ScalarType], - mode: _MemMapModeKind = ..., - offset: int = ..., - shape: None | int | tuple[int, ...] = ..., - order: _OrderKACF = ..., - ) -> memmap[Any, dtype[_ScalarType]]: ... + def __ge__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... @overload - def __new__( - subtype, - filename: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _MemMapIOProtocol, - dtype: DTypeLike, - mode: _MemMapModeKind = ..., - offset: int = ..., - shape: None | int | tuple[int, ...] = ..., - order: _OrderKACF = ..., - ) -> memmap[Any, dtype[Any]]: ... - def __array_finalize__(self, obj: object) -> None: ... - def __array_wrap__( - self, - array: memmap[_ShapeType, _DType_co], - context: None | tuple[ufunc, tuple[Any, ...], int] = ..., - return_scalar: builtins.bool = ..., - ) -> Any: ... - def flush(self) -> None: ... - -# TODO: Add a mypy plugin for managing functions whose output type is dependent -# on the literal value of some sort of signature (e.g. `einsum` and `vectorize`) -class vectorize: - pyfunc: Callable[..., Any] - cache: builtins.bool - signature: None | str - otypes: None | str - excluded: set[int | str] - __doc__: None | str - def __init__( - self, - pyfunc: Callable[..., Any], - otypes: None | str | Iterable[DTypeLike] = ..., - doc: None | str = ..., - excluded: None | Iterable[int | str] = ..., - cache: builtins.bool = ..., - signature: None | str = ..., - ) -> None: ... - def __call__(self, *args: Any, **kwargs: Any) -> Any: ... - -class poly1d: - @property - def variable(self) -> str: ... - @property - def order(self) -> int: ... - @property - def o(self) -> int: ... - @property - def roots(self) -> NDArray[Any]: ... - @property - def r(self) -> NDArray[Any]: ... - - @property - def coeffs(self) -> NDArray[Any]: ... - @coeffs.setter - def coeffs(self, value: NDArray[Any]) -> None: ... + def __ge__(self, other: _SupportsGT, /) -> bool_: ... +class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @property - def c(self) -> NDArray[Any]: ... - @c.setter - def c(self, value: NDArray[Any]) -> None: ... - - @property - def coef(self) -> NDArray[Any]: ... - @coef.setter - def coef(self, value: NDArray[Any]) -> None: ... - + def itemsize(self) -> L[8]: ... @property - def coefficients(self) -> NDArray[Any]: ... - @coefficients.setter - def coefficients(self, value: NDArray[Any]) -> None: ... - - __hash__: ClassVar[None] # type: ignore + def nbytes(self) -> L[8]: ... @overload - def __array__(self, t: None = ..., copy: None | bool = ...) -> NDArray[Any]: ... + def __new__(cls, value: datetime64[_DT64ItemT_co], /) -> Self: ... @overload - def __array__(self, t: _DType, copy: None | bool = ...) -> ndarray[Any, _DType]: ... - + def __new__[AnyItemT: (dt.datetime, dt.date, None)](cls, value: AnyItemT, /) -> datetime64[AnyItemT]: ... @overload - def __call__(self, val: _ScalarLike_co) -> Any: ... + def __new__(cls, value: _NaTValue | None = ..., format: _TimeUnitSpec[_TD64Unit] = ..., /) -> datetime64[None]: ... @overload - def __call__(self, val: poly1d) -> poly1d: ... + def __new__(cls, value: _DT64Now, format: _TimeUnitSpec[_NativeTimeUnit] = ..., /) -> datetime64[dt.datetime]: ... @overload - def __call__(self, val: ArrayLike) -> NDArray[Any]: ... - - def __init__( - self, - c_or_r: ArrayLike, - r: builtins.bool = ..., - variable: None | str = ..., - ) -> None: ... - def __len__(self) -> int: ... - def __neg__(self) -> poly1d: ... - def __pos__(self) -> poly1d: ... - def __mul__(self, other: ArrayLike) -> poly1d: ... - def __rmul__(self, other: ArrayLike) -> poly1d: ... - def __add__(self, other: ArrayLike) -> poly1d: ... - def __radd__(self, other: ArrayLike) -> poly1d: ... - def __pow__(self, val: _FloatLike_co) -> poly1d: ... # Integral floats are accepted - def __sub__(self, other: ArrayLike) -> poly1d: ... - def __rsub__(self, other: ArrayLike) -> poly1d: ... - def __div__(self, other: ArrayLike) -> poly1d: ... - def __truediv__(self, other: ArrayLike) -> poly1d: ... - def __rdiv__(self, other: ArrayLike) -> poly1d: ... - def __rtruediv__(self, other: ArrayLike) -> poly1d: ... - def __getitem__(self, val: int) -> Any: ... - def __setitem__(self, key: int, val: Any) -> None: ... - def __iter__(self) -> Iterator[Any]: ... - def deriv(self, m: SupportsInt | SupportsIndex = ...) -> poly1d: ... - def integ( - self, - m: SupportsInt | SupportsIndex = ..., - k: None | _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., - ) -> poly1d: ... - -class matrix(ndarray[_ShapeType, _DType_co]): - __array_priority__: ClassVar[float] - def __new__( - subtype, - data: ArrayLike, - dtype: DTypeLike = ..., - copy: builtins.bool = ..., - ) -> matrix[Any, Any]: ... - def __array_finalize__(self, obj: object) -> None: ... - + def __new__(cls, value: _DT64Date, format: _TimeUnitSpec[_DateUnit] = ..., /) -> datetime64[dt.date]: ... @overload - def __getitem__(self, key: ( - SupportsIndex - | _ArrayLikeInt_co - | tuple[SupportsIndex | _ArrayLikeInt_co, ...] - )) -> Any: ... + def __new__(cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_IntTimeUnit], /) -> datetime64[int]: ... @overload - def __getitem__(self, key: ( - None - | slice - | ellipsis - | SupportsIndex - | _ArrayLikeInt_co - | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...] - )) -> matrix[Any, _DType_co]: ... + def __new__( # type: ignore[overload-cannot-match] + cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_NativeTimeUnit], / + ) -> datetime64[dt.datetime]: ... @overload - def __getitem__(self: NDArray[void], key: str) -> matrix[Any, dtype[Any]]: ... + def __new__(cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_DateUnit], /) -> datetime64[dt.date]: ... # type: ignore[overload-cannot-match] @overload - def __getitem__(self: NDArray[void], key: list[str]) -> matrix[_ShapeType, dtype[void]]: ... + def __new__(cls, value: bytes | str | dt.date | None, format: _TimeUnitSpec[_TD64Unit] = ..., /) -> Self: ... - def __mul__(self, other: ArrayLike) -> matrix[Any, Any]: ... - def __rmul__(self, other: ArrayLike) -> matrix[Any, Any]: ... - def __imul__(self, other: ArrayLike) -> matrix[_ShapeType, _DType_co]: ... - def __pow__(self, other: ArrayLike) -> matrix[Any, Any]: ... - def __ipow__(self, other: ArrayLike) -> matrix[_ShapeType, _DType_co]: ... + # + def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... + # @overload - def sum(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ... + def __add__(self: datetime64[Never], x: _TD64Like_co, /) -> datetime64: ... @overload - def sum(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[Any, Any]: ... + def __add__(self, x: _IntLike_co, /) -> Self: ... @overload - def sum(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... - + def __add__(self: datetime64[None], x: timedelta64, /) -> datetime64[None]: ... @overload - def mean(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ... + def __add__(self: datetime64[int | dt.datetime], x: timedelta64[Never], /) -> datetime64: ... @overload - def mean(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[Any, Any]: ... + def __add__(self: datetime64[int], x: timedelta64[int | dt.timedelta], /) -> datetime64[int]: ... @overload - def mean(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... - + def __add__(self: datetime64[dt.datetime], x: timedelta64[dt.timedelta], /) -> datetime64[dt.datetime]: ... + @overload + def __add__(self: datetime64[dt.date], x: timedelta64[dt.timedelta], /) -> datetime64[dt.date]: ... @overload - def std(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> Any: ... + def __add__(self: datetime64[dt.date], x: timedelta64[int], /) -> datetime64[int]: ... @overload - def std(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> matrix[Any, Any]: ... + def __add__(self, x: timedelta64[None], /) -> datetime64[None]: ... @overload - def std(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ddof: float = ...) -> _NdArraySubClass: ... + def __add__(self, x: _TD64Like_co, /) -> datetime64: ... + __radd__ = __add__ + # @overload - def var(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> Any: ... + def __sub__(self: datetime64[Never], x: _TD64Like_co, /) -> datetime64: ... @overload - def var(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> matrix[Any, Any]: ... + def __sub__(self: datetime64[Never], x: datetime64, /) -> timedelta64: ... @overload - def var(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ddof: float = ...) -> _NdArraySubClass: ... - + def __sub__(self, x: _IntLike_co, /) -> Self: ... + @overload + def __sub__(self: datetime64[dt.date], x: dt.date, /) -> dt.timedelta: ... + @overload + def __sub__(self: datetime64[None], x: timedelta64, /) -> datetime64[None]: ... + @overload + def __sub__(self: datetime64[None], x: datetime64, /) -> timedelta64[None]: ... + @overload + def __sub__(self: datetime64[int], x: timedelta64, /) -> datetime64[int]: ... + @overload + def __sub__(self: datetime64[int], x: datetime64, /) -> timedelta64[int]: ... + @overload + def __sub__(self: datetime64[dt.datetime], x: timedelta64[int], /) -> datetime64[int]: ... @overload - def prod(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ... + def __sub__(self: datetime64[dt.datetime], x: timedelta64[dt.timedelta], /) -> datetime64[dt.datetime]: ... @overload - def prod(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[Any, Any]: ... + def __sub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... @overload - def prod(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + def __sub__(self: datetime64[dt.date], x: timedelta64[int], /) -> datetime64[dt.date | int]: ... + @overload + def __sub__(self: datetime64[dt.date], x: timedelta64[dt.timedelta], /) -> datetime64[dt.date]: ... + @overload + def __sub__(self: datetime64[dt.date], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... + @overload + def __sub__(self, x: timedelta64[None], /) -> datetime64[None]: ... + @overload + def __sub__(self, x: datetime64[None], /) -> timedelta64[None]: ... + @overload + def __sub__(self, x: _TD64Like_co, /) -> datetime64: ... + @overload + def __sub__(self, x: datetime64, /) -> timedelta64: ... + # NOTE: mypy gets confused by the non-commutativity of subtraction here + @overload + def __rsub__(self: datetime64[Never], x: datetime64, /) -> timedelta64: ... + @overload + def __rsub__(self, x: _IntLike_co, /) -> Self: ... @overload - def any(self, axis: None = ..., out: None = ...) -> np.bool: ... + def __rsub__(self: datetime64[dt.date], x: dt.date, /) -> dt.timedelta: ... @overload - def any(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[np.bool]]: ... + def __rsub__(self: datetime64[None], x: datetime64, /) -> timedelta64[None]: ... @overload - def any(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + def __rsub__(self: datetime64[int], x: datetime64, /) -> timedelta64[int]: ... + @overload + def __rsub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... + @overload + def __rsub__(self: datetime64[dt.datetime], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... + @overload + def __rsub__(self, x: datetime64[None], /) -> timedelta64[None]: ... + @overload + def __rsub__(self, x: datetime64, /) -> timedelta64: ... + # @overload - def all(self, axis: None = ..., out: None = ...) -> np.bool: ... + def __lt__(self, other: datetime64, /) -> bool_: ... @overload - def all(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[np.bool]]: ... + def __lt__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... @overload - def all(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + def __lt__(self, other: _SupportsGT, /) -> bool_: ... @overload - def max(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> _ScalarType: ... + def __le__(self, other: datetime64, /) -> bool_: ... @overload - def max(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, _DType_co]: ... + def __le__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... @overload - def max(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + def __le__(self, other: _SupportsGT, /) -> bool_: ... @overload - def min(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> _ScalarType: ... + def __gt__(self, other: datetime64, /) -> bool_: ... @overload - def min(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, _DType_co]: ... + def __gt__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... @overload - def min(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + def __gt__(self, other: _SupportsGT, /) -> bool_: ... @overload - def argmax(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> intp: ... + def __ge__(self, other: datetime64, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsGT, /) -> bool_: ... + +@final # cannot be subclassed at runtime +class flexible(_RealMixin, generic[_FlexibleItemT_co], Generic[_FlexibleItemT_co]): ... # type: ignore[misc] + +class void(flexible[bytes | tuple[Any, ...]]): # type: ignore[misc] @overload - def argmax(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[intp]]: ... + def __new__(cls, length_or_data: _IntLike_co | bytes, /, dtype: None = None) -> Self: ... @overload - def argmax(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + def __new__(cls, length_or_data: object, /, dtype: _DTypeLikeVoid) -> Self: ... + # + @overload + def __getitem__(self, key: tuple[()], /) -> Self: ... + @overload + def __getitem__( + self, key: EllipsisType | tuple[EllipsisType], / + ) -> ndarray[tuple[()], dtype[Self]]: ... + @overload + def __getitem__( + self, key: None | tuple[None], / + ) -> ndarray[tuple[int], dtype[Self]]: ... + @overload + def __getitem__( + self, key: tuple[None, None], / + ) -> ndarray[tuple[int, int], dtype[Self]]: ... @overload - def argmin(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> intp: ... + def __getitem__( + self, key: tuple[None, None, None], / + ) -> ndarray[tuple[int, int, int], dtype[Self]]: ... + @overload # Limited support for (None,) * N > 3 + def __getitem__(self, key: tuple[None, ...], /) -> NDArray[Self]: ... @overload - def argmin(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[intp]]: ... + def __getitem__(self, key: str | SupportsIndex, /) -> Any: ... @overload - def argmin(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + def __getitem__(self, key: list[str], /) -> void: ... + + # + def __setitem__(self, key: str | list[str] | SupportsIndex, value: ArrayLike, /) -> None: ... + + def setfield(self, val: ArrayLike, dtype: DTypeLike, offset: int = ...) -> None: ... + +class character(flexible[_CharacterItemT_co], Generic[_CharacterItemT_co]): # type: ignore[misc] + @abstractmethod + def __new__(cls, value: object = ..., /) -> Self: ... + +# NOTE: Most `np.bytes_` / `np.str_` methods return their builtin `bytes` / `str` counterpart +class bytes_(character[bytes], bytes): # type: ignore[misc] @overload - def ptp(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> _ScalarType: ... + def __new__(cls, value: object = b"", /) -> Self: ... @overload - def ptp(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, _DType_co]: ... + def __new__(cls, value: str, /, encoding: str, errors: str = "strict") -> Self: ... + + # + @override + def __hash__(self, /) -> int: ... + + # + def __bytes__(self, /) -> bytes: ... + +class str_(character[str], str): # type: ignore[misc] + @overload + def __new__(cls, value: object = "", /) -> Self: ... @overload - def ptp(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + def __new__(cls, value: bytes, /, encoding: str, errors: str = "strict") -> Self: ... + + # + @override + def __hash__(self, /) -> int: ... - def squeeze(self, axis: None | _ShapeLike = ...) -> matrix[Any, _DType_co]: ... - def tolist(self: matrix[Any, dtype[_SupportsItem[_T]]]) -> list[list[_T]]: ... # type: ignore[typevar] - def ravel(self, order: _OrderKACF = ...) -> matrix[Any, _DType_co]: ... - def flatten(self, order: _OrderKACF = ...) -> matrix[Any, _DType_co]: ... +# See `numpy._typing._ufunc` for more concrete nin-/nout-specific stubs +@final +class ufunc: + __signature__: Final[inspect.Signature] @property - def T(self) -> matrix[Any, _DType_co]: ... + def __name__(self) -> LiteralString: ... + @property + def __qualname__(self) -> LiteralString: ... # pyright: ignore[reportIncompatibleVariableOverride] + @property + def __doc__(self) -> str: ... # type: ignore[override] + @property + def nin(self) -> int: ... + @property + def nout(self) -> int: ... + @property + def nargs(self) -> int: ... @property - def I(self) -> matrix[Any, Any]: ... + def ntypes(self) -> int: ... @property - def A(self) -> ndarray[_ShapeType, _DType_co]: ... + def types(self) -> list[LiteralString]: ... + # Broad return type because it has to encompass things like + # + # >>> np.logical_and.identity is True + # True + # >>> np.add.identity is 0 + # True + # >>> np.sin.identity is None + # True + # + # and any user-defined ufuncs. @property - def A1(self) -> ndarray[Any, _DType_co]: ... + def identity(self) -> Any: ... + # This is None for ufuncs and a string for gufuncs. @property - def H(self) -> matrix[Any, _DType_co]: ... - def getT(self) -> matrix[Any, _DType_co]: ... - def getI(self) -> matrix[Any, Any]: ... - def getA(self) -> ndarray[_ShapeType, _DType_co]: ... - def getA1(self) -> ndarray[Any, _DType_co]: ... - def getH(self) -> matrix[Any, _DType_co]: ... + def signature(self) -> LiteralString | None: ... + + def __call__(self, /, *args: Any, **kwargs: Any) -> Any: ... + + # The next four methods will always exist, but they will just + # raise a ValueError ufuncs with that don't accept two input + # arguments and return one output argument. Because of that we + # can't type them very precisely. + def accumulate( + self, + array: ArrayLike, + /, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... + def reduce( + self, + array: ArrayLike, + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: ndarray | EllipsisType | None = None, + **kwargs: Incomplete, + ) -> Incomplete: ... + def reduceat( + self, + array: ArrayLike, + /, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... + def outer(self, A: ArrayLike, B: ArrayLike, /, **kwargs: Incomplete) -> NDArray[Incomplete]: ... + + # Similarly `at` won't be defined for ufuncs that return multiple + # outputs, so we can't type it very precisely. + def at(self, a: ndarray, indices: _ArrayLikeInt_co, b: ArrayLike | None = None, /) -> None: ... + + # + def resolve_dtypes( + self, + /, + dtypes: tuple[dtype | type | None, ...], + *, + signature: tuple[dtype | None, ...] | None = None, + casting: _CastingKind | None = None, + reduction: py_bool = False, + ) -> tuple[dtype, ...]: ... -_CharType = TypeVar("_CharType", str_, bytes_) -_CharDType = TypeVar("_CharDType", dtype[str_], dtype[bytes_]) +# Parameters: `__name__`, `ntypes` and `identity` +absolute: _UFunc_Nin1_Nout1[L["absolute"], L[20], None] +add: _UFunc_Nin2_Nout1[L["add"], L[22], L[0]] +arccos: _UFunc_Nin1_Nout1[L["arccos"], L[8], None] +arccosh: _UFunc_Nin1_Nout1[L["arccosh"], L[8], None] +arcsin: _UFunc_Nin1_Nout1[L["arcsin"], L[8], None] +arcsinh: _UFunc_Nin1_Nout1[L["arcsinh"], L[8], None] +arctan2: _UFunc_Nin2_Nout1[L["arctan2"], L[5], None] +arctan: _UFunc_Nin1_Nout1[L["arctan"], L[8], None] +arctanh: _UFunc_Nin1_Nout1[L["arctanh"], L[8], None] +bitwise_and: _UFunc_Nin2_Nout1[L["bitwise_and"], L[12], L[-1]] +bitwise_count: _UFunc_Nin1_Nout1[L["bitwise_count"], L[11], None] +bitwise_or: _UFunc_Nin2_Nout1[L["bitwise_or"], L[12], L[0]] +bitwise_xor: _UFunc_Nin2_Nout1[L["bitwise_xor"], L[12], L[0]] +cbrt: _UFunc_Nin1_Nout1[L["cbrt"], L[5], None] +ceil: _UFunc_Nin1_Nout1[L["ceil"], L[7], None] +conjugate: _UFunc_Nin1_Nout1[L["conjugate"], L[18], None] +copysign: _UFunc_Nin2_Nout1[L["copysign"], L[4], None] +cos: _UFunc_Nin1_Nout1[L["cos"], L[9], None] +cosh: _UFunc_Nin1_Nout1[L["cosh"], L[8], None] +deg2rad: _UFunc_Nin1_Nout1[L["deg2rad"], L[5], None] +degrees: _UFunc_Nin1_Nout1[L["degrees"], L[5], None] +divide: _UFunc_Nin2_Nout1[L["divide"], L[11], None] +divmod: _UFunc_Nin2_Nout2[L["divmod"], L[15], None] +equal: _UFunc_Nin2_Nout1[L["equal"], L[23], None] +exp2: _UFunc_Nin1_Nout1[L["exp2"], L[8], None] +exp: _UFunc_Nin1_Nout1[L["exp"], L[10], None] +expm1: _UFunc_Nin1_Nout1[L["expm1"], L[8], None] +fabs: _UFunc_Nin1_Nout1[L["fabs"], L[5], None] +float_power: _UFunc_Nin2_Nout1[L["float_power"], L[4], None] +floor: _UFunc_Nin1_Nout1[L["floor"], L[7], None] +floor_divide: _UFunc_Nin2_Nout1[L["floor_divide"], L[21], None] +fmax: _UFunc_Nin2_Nout1[L["fmax"], L[21], None] +fmin: _UFunc_Nin2_Nout1[L["fmin"], L[21], None] +fmod: _UFunc_Nin2_Nout1[L["fmod"], L[15], None] +frexp: _UFunc_Nin1_Nout2[L["frexp"], L[4], None] +gcd: _UFunc_Nin2_Nout1[L["gcd"], L[11], L[0]] +greater: _UFunc_Nin2_Nout1[L["greater"], L[23], None] +greater_equal: _UFunc_Nin2_Nout1[L["greater_equal"], L[23], None] +heaviside: _UFunc_Nin2_Nout1[L["heaviside"], L[4], None] +hypot: _UFunc_Nin2_Nout1[L["hypot"], L[5], L[0]] +invert: _UFunc_Nin1_Nout1[L["invert"], L[12], None] +isfinite: _UFunc_Nin1_Nout1[L["isfinite"], L[20], None] +isinf: _UFunc_Nin1_Nout1[L["isinf"], L[20], None] +isnan: _UFunc_Nin1_Nout1[L["isnan"], L[20], None] +isnat: _UFunc_Nin1_Nout1[L["isnat"], L[2], None] +lcm: _UFunc_Nin2_Nout1[L["lcm"], L[11], None] +ldexp: _UFunc_Nin2_Nout1[L["ldexp"], L[8], None] +left_shift: _UFunc_Nin2_Nout1[L["left_shift"], L[11], None] +less: _UFunc_Nin2_Nout1[L["less"], L[23], None] +less_equal: _UFunc_Nin2_Nout1[L["less_equal"], L[23], None] +log10: _UFunc_Nin1_Nout1[L["log10"], L[8], None] +log1p: _UFunc_Nin1_Nout1[L["log1p"], L[8], None] +log2: _UFunc_Nin1_Nout1[L["log2"], L[8], None] +log: _UFunc_Nin1_Nout1[L["log"], L[10], None] +logaddexp2: _UFunc_Nin2_Nout1[L["logaddexp2"], L[4], float] +logaddexp: _UFunc_Nin2_Nout1[L["logaddexp"], L[4], float] +logical_and: _UFunc_Nin2_Nout1[L["logical_and"], L[20], L[True]] +logical_not: _UFunc_Nin1_Nout1[L["logical_not"], L[20], None] +logical_or: _UFunc_Nin2_Nout1[L["logical_or"], L[20], L[False]] +logical_xor: _UFunc_Nin2_Nout1[L["logical_xor"], L[19], L[False]] +matmul: _GUFunc_Nin2_Nout1[L["matmul"], L[19], None, L["(n?,k),(k,m?)->(n?,m?)"]] +matvec: _GUFunc_Nin2_Nout1[L["matvec"], L[19], None, L["(m,n),(n)->(m)"]] +maximum: _UFunc_Nin2_Nout1[L["maximum"], L[21], None] +minimum: _UFunc_Nin2_Nout1[L["minimum"], L[21], None] +modf: _UFunc_Nin1_Nout2[L["modf"], L[4], None] +multiply: _UFunc_Nin2_Nout1[L["multiply"], L[23], L[1]] +negative: _UFunc_Nin1_Nout1[L["negative"], L[19], None] +nextafter: _UFunc_Nin2_Nout1[L["nextafter"], L[4], None] +not_equal: _UFunc_Nin2_Nout1[L["not_equal"], L[23], None] +positive: _UFunc_Nin1_Nout1[L["positive"], L[19], None] +power: _UFunc_Nin2_Nout1[L["power"], L[18], None] +rad2deg: _UFunc_Nin1_Nout1[L["rad2deg"], L[5], None] +radians: _UFunc_Nin1_Nout1[L["radians"], L[5], None] +reciprocal: _UFunc_Nin1_Nout1[L["reciprocal"], L[18], None] +remainder: _UFunc_Nin2_Nout1[L["remainder"], L[16], None] +right_shift: _UFunc_Nin2_Nout1[L["right_shift"], L[11], None] +rint: _UFunc_Nin1_Nout1[L["rint"], L[10], None] +sign: _UFunc_Nin1_Nout1[L["sign"], L[19], None] +signbit: _UFunc_Nin1_Nout1[L["signbit"], L[4], None] +sin: _UFunc_Nin1_Nout1[L["sin"], L[9], None] +sinh: _UFunc_Nin1_Nout1[L["sinh"], L[8], None] +spacing: _UFunc_Nin1_Nout1[L["spacing"], L[4], None] +sqrt: _UFunc_Nin1_Nout1[L["sqrt"], L[10], None] +square: _UFunc_Nin1_Nout1[L["square"], L[18], None] +subtract: _UFunc_Nin2_Nout1[L["subtract"], L[21], None] +tan: _UFunc_Nin1_Nout1[L["tan"], L[8], None] +tanh: _UFunc_Nin1_Nout1[L["tanh"], L[8], None] +trunc: _UFunc_Nin1_Nout1[L["trunc"], L[7], None] +vecdot: _GUFunc_Nin2_Nout1[L["vecdot"], L[19], None, L["(n),(n)->()"]] +vecmat: _GUFunc_Nin2_Nout1[L["vecmat"], L[19], None, L["(n),(n,m)->(m)"]] -# NOTE: Deprecated -# class MachAr: ... +abs = absolute +acos = arccos +acosh = arccosh +asin = arcsin +asinh = arcsinh +atan = arctan +atanh = arctanh +atan2 = arctan2 +concat = concatenate +bitwise_left_shift = left_shift +bitwise_not = invert +bitwise_invert = invert +bitwise_right_shift = right_shift +conj = conjugate +mod = remainder +permute_dims = transpose +pow = power +true_divide = divide -class _SupportsDLPack(Protocol[_T_contra]): - def __dlpack__(self, *, stream: None | _T_contra = ...) -> _PyCapsule: ... +# TODO: The type of each `__next__` and `iters` return-type depends +# on the length and dtype of `args`; we can't describe this behavior yet +# as we lack variadics (PEP 646). +@final +class broadcast: + def __new__(cls, *args: ArrayLike) -> broadcast: ... + @property + def index(self) -> int: ... + @property + def iters(self) -> tuple[flatiter[Any], ...]: ... + @property + def nd(self) -> int: ... + @property + def ndim(self) -> int: ... + @property + def numiter(self) -> int: ... + @property + def shape(self) -> _AnyShape: ... + @property + def size(self) -> int: ... + def __next__(self) -> tuple[Any, ...]: ... + def __iter__(self) -> Self: ... + def reset(self) -> None: ... -def from_dlpack(obj: _SupportsDLPack[None], /) -> NDArray[Any]: ... +def from_dlpack( + x: _SupportsDLPack[None], + /, + *, + device: L["cpu"] | None = None, + copy: py_bool | None = None, +) -> NDArray[number | bool_]: ... diff --git a/numpy/_array_api_info.py b/numpy/_array_api_info.py index 0167a2fe7985..41adb835433d 100644 --- a/numpy/_array_api_info.py +++ b/numpy/_array_api_info.py @@ -8,24 +8,26 @@ """ from numpy._core import ( - dtype, bool, - intp, + complex64, + complex128, + dtype, + float32, + float64, int8, int16, int32, int64, + intp, uint8, uint16, uint32, uint64, - float32, - float64, - complex64, - complex128, ) +from numpy._utils import set_module +@set_module('numpy') class __array_namespace_info__: """ Get the array API inspection namespace for NumPy. @@ -58,8 +60,6 @@ class __array_namespace_info__: """ - __module__ = 'numpy' - def capabilities(self): """ Return a dictionary of array API library capabilities. @@ -94,14 +94,14 @@ def capabilities(self): >>> info = np.__array_namespace_info__() >>> info.capabilities() {'boolean indexing': True, - 'data-dependent shapes': True} + 'data-dependent shapes': True, + 'max dimensions': 64} """ return { "boolean indexing": True, "data-dependent shapes": True, - # 'max rank' will be part of the 2024.12 standard - # "max rank": 64, + "max dimensions": 64, } def default_device(self): diff --git a/numpy/_array_api_info.pyi b/numpy/_array_api_info.pyi index f86aeb63fd2b..f6fc86d38dbb 100644 --- a/numpy/_array_api_info.pyi +++ b/numpy/_array_api_info.pyi @@ -1,62 +1,176 @@ -from typing import TypedDict, Optional, Union, Tuple, List -from numpy._typing import DtypeLike +from typing import Literal, Never, TypedDict, final, overload, type_check_only -Capabilities = TypedDict( - "Capabilities", - { - "boolean indexing": bool, - "data-dependent shapes": bool, - }, -) +import numpy as np -DefaultDataTypes = TypedDict( - "DefaultDataTypes", +type _Device = Literal["cpu"] +type _DeviceLike = _Device | None + +_Capabilities = TypedDict( + "_Capabilities", { - "real floating": DtypeLike, - "complex floating": DtypeLike, - "integral": DtypeLike, - "indexing": DtypeLike, + "boolean indexing": Literal[True], + "data-dependent shapes": Literal[True], }, ) -DataTypes = TypedDict( - "DataTypes", +_DefaultDTypes = TypedDict( + "_DefaultDTypes", { - "bool": DtypeLike, - "float32": DtypeLike, - "float64": DtypeLike, - "complex64": DtypeLike, - "complex128": DtypeLike, - "int8": DtypeLike, - "int16": DtypeLike, - "int32": DtypeLike, - "int64": DtypeLike, - "uint8": DtypeLike, - "uint16": DtypeLike, - "uint32": DtypeLike, - "uint64": DtypeLike, + "real floating": np.dtype[np.float64], + "complex floating": np.dtype[np.complex128], + "integral": np.dtype[np.intp], + "indexing": np.dtype[np.intp], }, - total=False, ) -class __array_namespace_info__: - __module__: str +type _KindBool = Literal["bool"] +type _KindInt = Literal["signed integer"] +type _KindUInt = Literal["unsigned integer"] +type _KindInteger = Literal["integral"] +type _KindFloat = Literal["real floating"] +type _KindComplex = Literal["complex floating"] +type _KindNumber = Literal["numeric"] +type _Kind = _KindBool | _KindInt | _KindUInt | _KindInteger | _KindFloat | _KindComplex | _KindNumber + +type _Permute1[T1] = T1 | tuple[T1] +type _Permute2[T1, T2] = tuple[T1, T2] | tuple[T2, T1] +type _Permute3[T1, T2, T3] = ( + tuple[T1, T2, T3] | tuple[T1, T3, T2] + | tuple[T2, T1, T3] | tuple[T2, T3, T1] + | tuple[T3, T1, T2] | tuple[T3, T2, T1] +) # fmt: skip + +@type_check_only +class _DTypesBool(TypedDict): + bool: np.dtype[np.bool] + +@type_check_only +class _DTypesInt(TypedDict): + int8: np.dtype[np.int8] + int16: np.dtype[np.int16] + int32: np.dtype[np.int32] + int64: np.dtype[np.int64] + +@type_check_only +class _DTypesUInt(TypedDict): + uint8: np.dtype[np.uint8] + uint16: np.dtype[np.uint16] + uint32: np.dtype[np.uint32] + uint64: np.dtype[np.uint64] + +@type_check_only +class _DTypesInteger(_DTypesInt, _DTypesUInt): ... + +@type_check_only +class _DTypesFloat(TypedDict): + float32: np.dtype[np.float32] + float64: np.dtype[np.float64] - def capabilities(self) -> Capabilities: ... +@type_check_only +class _DTypesComplex(TypedDict): + complex64: np.dtype[np.complex64] + complex128: np.dtype[np.complex128] - def default_device(self) -> str: ... +@type_check_only +class _DTypesNumber(_DTypesInteger, _DTypesFloat, _DTypesComplex): ... - def default_dtypes( +@type_check_only +class _DTypes(_DTypesBool, _DTypesNumber): ... + +@type_check_only +class _DTypesUnion(TypedDict, total=False): + bool: np.dtype[np.bool] + int8: np.dtype[np.int8] + int16: np.dtype[np.int16] + int32: np.dtype[np.int32] + int64: np.dtype[np.int64] + uint8: np.dtype[np.uint8] + uint16: np.dtype[np.uint16] + uint32: np.dtype[np.uint32] + uint64: np.dtype[np.uint64] + float32: np.dtype[np.float32] + float64: np.dtype[np.float64] + complex64: np.dtype[np.complex64] + complex128: np.dtype[np.complex128] + +type _EmptyDict = dict[Never, Never] + +@final +class __array_namespace_info__: + __module__: Literal["numpy"] = "numpy" # pyrefly: ignore[bad-override] + + def capabilities(self) -> _Capabilities: ... + def default_device(self) -> _Device: ... + def default_dtypes(self, *, device: _DeviceLike = None) -> _DefaultDTypes: ... + def devices(self) -> list[_Device]: ... + + @overload + def dtypes( self, *, - device: Optional[str] = None, - ) -> DefaultDataTypes: ... - + device: _DeviceLike = None, + kind: None = None, + ) -> _DTypes: ... + @overload def dtypes( self, *, - device: Optional[str] = None, - kind: Optional[Union[str, Tuple[str, ...]]] = None, - ) -> DataTypes: ... - - def devices(self) -> List[str]: ... + device: _DeviceLike = None, + kind: _Permute1[_KindBool], + ) -> _DTypesBool: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: _Permute1[_KindInt], + ) -> _DTypesInt: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: _Permute1[_KindUInt], + ) -> _DTypesUInt: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: _Permute1[_KindFloat], + ) -> _DTypesFloat: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: _Permute1[_KindComplex], + ) -> _DTypesComplex: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: _Permute1[_KindInteger] | _Permute2[_KindInt, _KindUInt], + ) -> _DTypesInteger: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: _Permute1[_KindNumber] | _Permute3[_KindInteger, _KindFloat, _KindComplex], + ) -> _DTypesNumber: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: tuple[()], + ) -> _EmptyDict: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: tuple[_Kind, ...], + ) -> _DTypesUnion: ... diff --git a/numpy/_build_utils/__init__.py b/numpy/_build_utils/__init__.py deleted file mode 100644 index ac4908957ad1..000000000000 --- a/numpy/_build_utils/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Don't use the deprecated NumPy C API. Define this to a fixed version -# instead of NPY_API_VERSION in order not to break compilation for -# released SciPy versions when NumPy introduces a new deprecation. Use -# in setup.py:: -# -# config.add_extension('_name', sources=['source_fname'], **numpy_nodepr_api) -# -numpy_nodepr_api = dict( - define_macros=[("NPY_NO_DEPRECATED_API", "NPY_1_9_API_VERSION")] -) - - -def import_file(folder, module_name): - """Import a file directly, avoiding importing scipy""" - import importlib - import pathlib - - fname = pathlib.Path(folder) / f'{module_name}.py' - spec = importlib.util.spec_from_file_location(module_name, str(fname)) - module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(module) - return module diff --git a/numpy/distutils/conv_template.py b/numpy/_build_utils/conv_template.py similarity index 90% rename from numpy/distutils/conv_template.py rename to numpy/_build_utils/conv_template.py index c8933d1d4286..3f6347371ae0 100644 --- a/numpy/distutils/conv_template.py +++ b/numpy/_build_utils/conv_template.py @@ -82,8 +82,8 @@ __all__ = ['process_str', 'process_file'] import os -import sys import re +import sys # names for replacement that are already global. global_names = {} @@ -106,12 +106,12 @@ def parse_structure(astr, level): at zero. Returns an empty list if no loops found. """ - if level == 0 : + if level == 0: loopbeg = "/**begin repeat" loopend = "/**end repeat**/" - else : - loopbeg = "/**begin repeat%d" % level - loopend = "/**end repeat%d**/" % level + else: + loopbeg = f"/**begin repeat{level}" + loopend = f"/**end repeat{level}**/" ind = 0 line = 0 @@ -124,9 +124,9 @@ def parse_structure(astr, level): start2 = astr.find("\n", start2) fini1 = astr.find(loopend, start2) fini2 = astr.find("\n", fini1) - line += astr.count("\n", ind, start2+1) - spanlist.append((start, start2+1, fini1, fini2+1, line)) - line += astr.count("\n", start2+1, fini2) + line += astr.count("\n", ind, start2 + 1) + spanlist.append((start, start2 + 1, fini1, fini2 + 1, line)) + line += astr.count("\n", start2 + 1, fini2) ind = fini2 spanlist.sort() return spanlist @@ -135,10 +135,13 @@ def parse_structure(astr, level): def paren_repl(obj): torep = obj.group(1) numrep = obj.group(2) - return ','.join([torep]*int(numrep)) + return ','.join([torep] * int(numrep)) + parenrep = re.compile(r"\(([^)]*)\)\*(\d+)") plainrep = re.compile(r"([^*]+)\*(\d+)") + + def parse_values(astr): # replaces all occurrences of '(a,b,c)*4' in astr # with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate @@ -155,7 +158,7 @@ def parse_values(astr): named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#") exclude_vars_re = re.compile(r"(\w*)=(\w*)") exclude_re = re.compile(":exclude:") -def parse_loop_header(loophead) : +def parse_loop_header(loophead): """Find all named replacements in the header Returns a list of dictionaries, one for each loop iteration, @@ -179,14 +182,13 @@ def parse_loop_header(loophead) : name = rep[0] vals = parse_values(rep[1]) size = len(vals) - if nsub is None : + if nsub is None: nsub = size - elif nsub != size : + elif nsub != size: msg = "Mismatch in number of values, %d != %d\n%s = %s" raise ValueError(msg % (nsub, size, name, vals)) names.append((name, vals)) - # Find any exclude variables excludes = [] @@ -200,30 +202,33 @@ def parse_loop_header(loophead) : # generate list of dictionaries, one for each template iteration dlist = [] - if nsub is None : + if nsub is None: raise ValueError("No substitution variables found") for i in range(nsub): tmp = {name: vals[i] for name, vals in names} dlist.append(tmp) return dlist + replace_re = re.compile(r"@(\w+)@") -def parse_string(astr, env, level, line) : - lineno = "#line %d\n" % line + + +def parse_string(astr, env, level, line): + lineno = f"#line {line}\n" # local function for string replacement, uses env def replace(match): name = match.group(1) - try : + try: val = env[name] except KeyError: - msg = 'line %d: no definition of key "%s"'%(line, name) + msg = f'line {line}: no definition of key "{name}"' raise ValueError(msg) from None return val code = [lineno] struct = parse_structure(astr, level) - if struct : + if struct: # recurse over inner loops oldend = 0 newlevel = level + 1 @@ -234,18 +239,18 @@ def replace(match): oldend = sub[3] newline = line + sub[4] code.append(replace_re.sub(replace, pref)) - try : + try: envlist = parse_loop_header(head) except ValueError as e: - msg = "line %d: %s" % (newline, e) + msg = f"line {newline}: {e}" raise ValueError(msg) - for newenv in envlist : + for newenv in envlist: newenv.update(env) newcode = parse_string(text, newenv, newlevel, newline) code.extend(newcode) suff = astr[oldend:] code.append(replace_re.sub(replace, suff)) - else : + else: # replace keys code.append(replace_re.sub(replace, astr)) code.append('\n') @@ -284,8 +289,8 @@ def process_file(source): try: code = process_str(''.join(lines)) except ValueError as e: - raise ValueError('In "%s" loop at %s' % (sourcefile, e)) from None - return '#line 1 "%s"\n%s' % (sourcefile, code) + raise ValueError(f'In "{sourcefile}" loop at {e}') from None + return f'#line 1 "{sourcefile}"\n{code}' def unique_key(adict): @@ -321,9 +326,10 @@ def main(): try: writestr = process_str(allstr) except ValueError as e: - raise ValueError("In %s loop at %s" % (file, e)) from None + raise ValueError(f"In {file} loop at {e}") from None outfile.write(writestr) + if __name__ == "__main__": main() diff --git a/numpy/_build_utils/gcc_build_bitness.py b/numpy/_build_utils/gcc_build_bitness.py index fcad237e98bc..08d02d4d813f 100644 --- a/numpy/_build_utils/gcc_build_bitness.py +++ b/numpy/_build_utils/gcc_build_bitness.py @@ -3,7 +3,7 @@ """ import re -from subprocess import run, PIPE +from subprocess import run def main(): diff --git a/numpy/_build_utils/gitversion.py b/numpy/_build_utils/gitversion.py index defc704c41eb..47dd71d1567b 100644 --- a/numpy/_build_utils/gitversion.py +++ b/numpy/_build_utils/gitversion.py @@ -22,13 +22,13 @@ def git_version(version): # Append last commit date and hash to dev version information, # if available - import subprocess import os.path + import subprocess git_hash = '' try: p = subprocess.Popen( - ['git', 'log', '-1', '--format="%H %aI"'], + ['git', '-c', 'log.showSignature=false', 'log', '-1', '--format="%H %aI"'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=os.path.dirname(__file__), diff --git a/numpy/_build_utils/process_src_template.py b/numpy/_build_utils/process_src_template.py index 4a0915e25254..f934c222e838 100644 --- a/numpy/_build_utils/process_src_template.py +++ b/numpy/_build_utils/process_src_template.py @@ -1,16 +1,15 @@ #!/usr/bin/env python3 -import sys -import os import argparse import importlib.util +import os def get_processor(): - # Convoluted because we can't import from numpy.distutils + # Convoluted because we can't import from numpy # (numpy is not yet built) conv_template_path = os.path.join( os.path.dirname(__file__), - '..', 'distutils', 'conv_template.py' + 'conv_template.py' ) spec = importlib.util.spec_from_file_location( 'conv_template', conv_template_path diff --git a/numpy/_build_utils/tempita.py b/numpy/_build_utils/tempita.py index 0743b892436b..e3571ef8747d 100644 --- a/numpy/_build_utils/tempita.py +++ b/numpy/_build_utils/tempita.py @@ -1,11 +1,9 @@ #!/usr/bin/env python3 -import sys -import os import argparse +import os +import sys -from Cython import Tempita as tempita - -# XXX: If this import ever fails (does it really?), vendor cython.tempita +import tempita def process_tempita(fromfile, outfile=None): diff --git a/numpy/_build_utils/tempita/LICENSE.txt b/numpy/_build_utils/tempita/LICENSE.txt new file mode 100644 index 000000000000..0ba6f23c440f --- /dev/null +++ b/numpy/_build_utils/tempita/LICENSE.txt @@ -0,0 +1,20 @@ +Copyright (c) 2008 Ian Bicking and Contributors + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/numpy/_build_utils/tempita/__init__.py b/numpy/_build_utils/tempita/__init__.py new file mode 100644 index 000000000000..41a0ce3d0efa --- /dev/null +++ b/numpy/_build_utils/tempita/__init__.py @@ -0,0 +1,4 @@ +# The original Tempita implements all of its templating code here. +# Moved it to _tempita.py to make the compilation portable. + +from ._tempita import * diff --git a/numpy/_build_utils/tempita/_looper.py b/numpy/_build_utils/tempita/_looper.py new file mode 100644 index 000000000000..0d3de22ac80c --- /dev/null +++ b/numpy/_build_utils/tempita/_looper.py @@ -0,0 +1,154 @@ +""" +Helper for looping over sequences, particular in templates. + +Often in a loop in a template it's handy to know what's next up, +previously up, if this is the first or last item in the sequence, etc. +These can be awkward to manage in a normal Python loop, but using the +looper you can get a better sense of the context. Use like:: + + >>> for loop, item in looper(['a', 'b', 'c']): + ... print loop.number, item + ... if not loop.last: + ... print '---' + 1 a + --- + 2 b + --- + 3 c + +""" + +basestring_ = (bytes, str) + +__all__ = ['looper'] + + +class looper: + """ + Helper for looping (particularly in templates) + + Use this like:: + + for loop, item in looper(seq): + if loop.first: + ... + """ + + def __init__(self, seq): + self.seq = seq + + def __iter__(self): + return looper_iter(self.seq) + + def __repr__(self): + return f'<{self.__class__.__name__} for {self.seq!r}>' + + +class looper_iter: + + def __init__(self, seq): + self.seq = list(seq) + self.pos = 0 + + def __iter__(self): + return self + + def __next__(self): + if self.pos >= len(self.seq): + raise StopIteration + result = loop_pos(self.seq, self.pos), self.seq[self.pos] + self.pos += 1 + return result + + +class loop_pos: + + def __init__(self, seq, pos): + self.seq = seq + self.pos = pos + + def __repr__(self): + return f'' + + def index(self): + return self.pos + index = property(index) + + def number(self): + return self.pos + 1 + number = property(number) + + def item(self): + return self.seq[self.pos] + item = property(item) + + def __next__(self): + try: + return self.seq[self.pos + 1] + except IndexError: + return None + __next__ = property(__next__) + + def previous(self): + if self.pos == 0: + return None + return self.seq[self.pos - 1] + previous = property(previous) + + def odd(self): + return not self.pos % 2 + odd = property(odd) + + def even(self): + return self.pos % 2 + even = property(even) + + def first(self): + return self.pos == 0 + first = property(first) + + def last(self): + return self.pos == len(self.seq) - 1 + last = property(last) + + def length(self): + return len(self.seq) + length = property(length) + + def first_group(self, getter=None): + """ + Returns true if this item is the start of a new group, + where groups mean that some attribute has changed. The getter + can be None (the item itself changes), an attribute name like + ``'.attr'``, a function, or a dict key or list index. + """ + if self.first: + return True + return self._compare_group(self.item, self.previous, getter) + + def last_group(self, getter=None): + """ + Returns true if this item is the end of a new group, + where groups mean that some attribute has changed. The getter + can be None (the item itself changes), an attribute name like + ``'.attr'``, a function, or a dict key or list index. + """ + if self.last: + return True + return self._compare_group(self.item, self.__next__, getter) + + def _compare_group(self, item, other, getter): + if getter is None: + return item != other + elif (isinstance(getter, basestring_) + and getter.startswith('.')): + getter = getter[1:] + if getter.endswith('()'): + getter = getter[:-2] + return getattr(item, getter)() != getattr(other, getter)() + else: + return getattr(item, getter) != getattr(other, getter) + elif callable(getter): + return getter(item) != getter(other) + else: + return item[getter] != other[getter] diff --git a/numpy/_build_utils/tempita/_tempita.py b/numpy/_build_utils/tempita/_tempita.py new file mode 100644 index 000000000000..3d5113085183 --- /dev/null +++ b/numpy/_build_utils/tempita/_tempita.py @@ -0,0 +1,1116 @@ +""" +A small templating language + +This implements a small templating language. This language implements +if/elif/else, for/continue/break, expressions, and blocks of Python +code. The syntax is:: + + {{any expression (function calls etc)}} + {{any expression | filter}} + {{for x in y}}...{{endfor}} + {{if x}}x{{elif y}}y{{else}}z{{endif}} + {{py:x=1}} + {{py: + def foo(bar): + return 'baz' + }} + {{default var = default_value}} + {{# comment}} + +You use this with the ``Template`` class or the ``sub`` shortcut. +The ``Template`` class takes the template string and the name of +the template (for errors) and a default namespace. Then (like +``string.Template``) you can call the ``tmpl.substitute(**kw)`` +method to make a substitution (or ``tmpl.substitute(a_dict)``). + +``sub(content, **kw)`` substitutes the template immediately. You +can use ``__name='tmpl.html'`` to set the name of the template. + +If there are syntax errors ``TemplateError`` will be raised. +""" + +import os +import re +import sys +import tokenize +from io import StringIO + +from ._looper import looper + +__all__ = ["TemplateError", "Template", "sub", "bunch"] + +in_re = re.compile(r"\s+in\s+") +var_re = re.compile(r"^[a-z_][a-z0-9_]*$", re.I) +basestring_ = (bytes, str) + + +def coerce_text(v): + if not isinstance(v, basestring_): + if hasattr(v, "__str__"): + return str(v) + else: + return bytes(v) + return v + + +class TemplateError(Exception): + """Exception raised while parsing a template""" + + def __init__(self, message, position, name=None): + Exception.__init__(self, message) + self.position = position + self.name = name + + def __str__(self): + msg = " ".join(self.args) + if self.position: + msg = f"{msg} at line {self.position[0]} column {self.position[1]}" + if self.name: + msg += f" in {self.name}" + return msg + + +class _TemplateContinue(Exception): + pass + + +class _TemplateBreak(Exception): + pass + + +def get_file_template(name, from_template): + path = os.path.join(os.path.dirname(from_template.name), name) + return from_template.__class__.from_filename( + path, namespace=from_template.namespace, get_template=from_template.get_template + ) + + +class Template: + default_namespace = { + "start_braces": "{{", + "end_braces": "}}", + "looper": looper, + } + + default_encoding = "utf8" + default_inherit = None + + def __init__( + self, + content, + name=None, + namespace=None, + stacklevel=None, + get_template=None, + default_inherit=None, + line_offset=0, + delimiters=None, + ): + self.content = content + + # set delimiters + if delimiters is None: + delimiters = ( + self.default_namespace["start_braces"], + self.default_namespace["end_braces"], + ) + else: + # assert len(delimiters) == 2 and all([isinstance(delimiter, basestring) + # for delimiter in delimiters]) + self.default_namespace = self.__class__.default_namespace.copy() + self.default_namespace["start_braces"] = delimiters[0] + self.default_namespace["end_braces"] = delimiters[1] + self.delimiters = delimiters + + self._unicode = isinstance(content, str) + if name is None and stacklevel is not None: + try: + caller = sys._getframe(stacklevel) + except ValueError: + pass + else: + globals = caller.f_globals + lineno = caller.f_lineno + if "__file__" in globals: + name = globals["__file__"] + if name.endswith((".pyc", ".pyo")): + name = name[:-1] + elif "__name__" in globals: + name = globals["__name__"] + else: + name = "" + if lineno: + name += f":{lineno}" + self.name = name + self._parsed = parse( + content, name=name, line_offset=line_offset, delimiters=self.delimiters + ) + if namespace is None: + namespace = {} + self.namespace = namespace + self.get_template = get_template + if default_inherit is not None: + self.default_inherit = default_inherit + + @classmethod + def from_filename( + cls, + filename, + namespace=None, + encoding=None, + default_inherit=None, + get_template=get_file_template, + ): + with open(filename, "rb") as f: + c = f.read() + if encoding: + c = c.decode(encoding) + return cls( + content=c, + name=filename, + namespace=namespace, + default_inherit=default_inherit, + get_template=get_template, + ) + + def __repr__(self): + return f"<{self.__class__.__name__} {id(self):x} name={self.name!r}>" + + def substitute(self, *args, **kw): + if args: + if kw: + raise TypeError("You can only give positional *or* keyword arguments") + if len(args) > 1: + raise TypeError("You can only give one positional argument") + if not hasattr(args[0], "items"): + raise TypeError( + "If you pass in a single argument, you must pass in a " + "dictionary-like object (with a .items() method); " + f"you gave {args[0]!r}" + ) + kw = args[0] + ns = kw + ns["__template_name__"] = self.name + if self.namespace: + ns.update(self.namespace) + result, defs, inherit = self._interpret(ns) + if not inherit: + inherit = self.default_inherit + if inherit: + result = self._interpret_inherit(result, defs, inherit, ns) + return result + + def _interpret(self, ns): + __traceback_hide__ = True + parts = [] + defs = {} + self._interpret_codes(self._parsed, ns, out=parts, defs=defs) + if "__inherit__" in defs: + inherit = defs.pop("__inherit__") + else: + inherit = None + return "".join(parts), defs, inherit + + def _interpret_inherit(self, body, defs, inherit_template, ns): + __traceback_hide__ = True + if not self.get_template: + raise TemplateError( + "You cannot use inheritance without passing in get_template", + position=None, + name=self.name, + ) + templ = self.get_template(inherit_template, self) + self_ = TemplateObject(self.name) + for name, value in defs.items(): + setattr(self_, name, value) + self_.body = body + ns = ns.copy() + ns["self"] = self_ + return templ.substitute(ns) + + def _interpret_codes(self, codes, ns, out, defs): + __traceback_hide__ = True + for item in codes: + if isinstance(item, basestring_): + out.append(item) + else: + self._interpret_code(item, ns, out, defs) + + def _interpret_code(self, code, ns, out, defs): + __traceback_hide__ = True + name, pos = code[0], code[1] + if name == "py": + self._exec(code[2], ns, pos) + elif name == "continue": + raise _TemplateContinue() + elif name == "break": + raise _TemplateBreak() + elif name == "for": + vars, expr, content = code[2], code[3], code[4] + expr = self._eval(expr, ns, pos) + self._interpret_for(vars, expr, content, ns, out, defs) + elif name == "cond": + parts = code[2:] + self._interpret_if(parts, ns, out, defs) + elif name == "expr": + parts = code[2].split("|") + base = self._eval(parts[0], ns, pos) + for part in parts[1:]: + func = self._eval(part, ns, pos) + base = func(base) + out.append(self._repr(base, pos)) + elif name == "default": + var, expr = code[2], code[3] + if var not in ns: + result = self._eval(expr, ns, pos) + ns[var] = result + elif name == "inherit": + expr = code[2] + value = self._eval(expr, ns, pos) + defs["__inherit__"] = value + elif name == "def": + name = code[2] + signature = code[3] + parts = code[4] + ns[name] = defs[name] = TemplateDef( + self, name, signature, body=parts, ns=ns, pos=pos + ) + elif name == "comment": + return + else: + assert 0, f"Unknown code: {name!r}" + + def _interpret_for(self, vars, expr, content, ns, out, defs): + __traceback_hide__ = True + for item in expr: + if len(vars) == 1: + ns[vars[0]] = item + else: + if len(vars) != len(item): + raise ValueError( + f"Need {len(vars)} items to unpack (got {len(item)} items)" + ) + for name, value in zip(vars, item): + ns[name] = value + try: + self._interpret_codes(content, ns, out, defs) + except _TemplateContinue: + continue + except _TemplateBreak: + break + + def _interpret_if(self, parts, ns, out, defs): + __traceback_hide__ = True + # @@: if/else/else gets through + for part in parts: + assert not isinstance(part, basestring_) + name, pos = part[0], part[1] + if name == "else": + result = True + else: + result = self._eval(part[2], ns, pos) + if result: + self._interpret_codes(part[3], ns, out, defs) + break + + def _eval(self, code, ns, pos): + __traceback_hide__ = True + try: + try: + value = eval(code, self.default_namespace, ns) + except SyntaxError as e: + raise SyntaxError(f"invalid syntax in expression: {code}") + return value + except Exception as e: + if getattr(e, "args", None): + arg0 = e.args[0] + else: + arg0 = coerce_text(e) + e.args = (self._add_line_info(arg0, pos),) + raise + + def _exec(self, code, ns, pos): + __traceback_hide__ = True + try: + exec(code, self.default_namespace, ns) + except Exception as e: + if e.args: + e.args = (self._add_line_info(e.args[0], pos),) + else: + e.args = (self._add_line_info(None, pos),) + raise + + def _repr(self, value, pos): + __traceback_hide__ = True + try: + if value is None: + return "" + if self._unicode: + try: + value = str(value) + except UnicodeDecodeError: + value = bytes(value) + else: + if not isinstance(value, basestring_): + value = coerce_text(value) + if isinstance(value, str) and self.default_encoding: + value = value.encode(self.default_encoding) + except Exception as e: + e.args = (self._add_line_info(e.args[0], pos),) + raise + else: + if self._unicode and isinstance(value, bytes): + if not self.default_encoding: + raise UnicodeDecodeError( + f"Cannot decode bytes value {value!r} into unicode " + "(no default_encoding provided)" + ) + try: + value = value.decode(self.default_encoding) + except UnicodeDecodeError as e: + raise UnicodeDecodeError( + e.encoding, + e.object, + e.start, + e.end, + e.reason + f" in string {value!r}", + ) + elif not self._unicode and isinstance(value, str): + if not self.default_encoding: + raise UnicodeEncodeError( + f"Cannot encode unicode value {value!r} into bytes " + "(no default_encoding provided)" + ) + value = value.encode(self.default_encoding) + return value + + def _add_line_info(self, msg, pos): + msg = f"{msg} at line {pos[0]} column {pos[1]}" + if self.name: + msg += f" in file {self.name}" + return msg + + +def sub(content, delimiters=None, **kw): + name = kw.get("__name") + tmpl = Template(content, name=name, delimiters=delimiters) + return tmpl.substitute(kw) + + +def paste_script_template_renderer(content, vars, filename=None): + tmpl = Template(content, name=filename) + return tmpl.substitute(vars) + + +class bunch(dict): + def __init__(self, **kw): + for name, value in kw.items(): + setattr(self, name, value) + + def __setattr__(self, name, value): + self[name] = value + + def __getattr__(self, name): + try: + return self[name] + except KeyError: + raise AttributeError(name) + + def __getitem__(self, key): + if "default" in self: + try: + return dict.__getitem__(self, key) + except KeyError: + return dict.__getitem__(self, "default") + else: + return dict.__getitem__(self, key) + + def __repr__(self): + items_str = " ".join([f"{k}={v!r}" for k, v in sorted(self.items())]) + return f"<{self.__class__.__name__} {items_str}>" + + +class TemplateDef: + def __init__( + self, template, func_name, func_signature, body, ns, pos, bound_self=None + ): + self._template = template + self._func_name = func_name + self._func_signature = func_signature + self._body = body + self._ns = ns + self._pos = pos + self._bound_self = bound_self + + def __repr__(self): + return (f"") + + def __str__(self): + return self() + + def __call__(self, *args, **kw): + values = self._parse_signature(args, kw) + ns = self._ns.copy() + ns.update(values) + if self._bound_self is not None: + ns["self"] = self._bound_self + out = [] + subdefs = {} + self._template._interpret_codes(self._body, ns, out, subdefs) + return "".join(out) + + def __get__(self, obj, type=None): + if obj is None: + return self + return self.__class__( + self._template, + self._func_name, + self._func_signature, + self._body, + self._ns, + self._pos, + bound_self=obj, + ) + + def _parse_signature(self, args, kw): + values = {} + sig_args, var_args, var_kw, defaults = self._func_signature + extra_kw = {} + for name, value in kw.items(): + if not var_kw and name not in sig_args: + raise TypeError(f"Unexpected argument {name}") + if name in sig_args: + values[sig_args] = value + else: + extra_kw[name] = value + args = list(args) + sig_args = list(sig_args) + while args: + while sig_args and sig_args[0] in values: + sig_args.pop(0) + if sig_args: + name = sig_args.pop(0) + values[name] = args.pop(0) + elif var_args: + values[var_args] = tuple(args) + break + else: + args_str = ", ".join([repr(v) for v in args]) + raise TypeError(f"Extra position arguments: {args_str}") + for name, value_expr in defaults.items(): + if name not in values: + values[name] = self._template._eval(value_expr, self._ns, self._pos) + for name in sig_args: + if name not in values: + raise TypeError(f"Missing argument: {name}") + if var_kw: + values[var_kw] = extra_kw + return values + + +class TemplateObject: + def __init__(self, name): + self.__name = name + self.get = TemplateObjectGetter(self) + + def __repr__(self): + return f"<{self.__class__.__name__} {self.__name}>" + + +class TemplateObjectGetter: + def __init__(self, template_obj): + self.__template_obj = template_obj + + def __getattr__(self, attr): + return getattr(self.__template_obj, attr, Empty) + + def __repr__(self): + return f"<{self.__class__.__name__} around {self.__template_obj!r}>" + + +class _Empty: + def __call__(self, *args, **kw): + return self + + def __str__(self): + return "" + + def __repr__(self): + return "Empty" + + def __unicode__(self): + return "" + + def __iter__(self): + return iter(()) + + def __bool__(self): + return False + + +Empty = _Empty() +del _Empty + +############################################################ +## Lexing and Parsing +############################################################ + + +def lex(s, name=None, trim_whitespace=True, line_offset=0, delimiters=None): + """ + Lex a string into chunks: + + >>> lex('hey') + ['hey'] + >>> lex('hey {{you}}') + ['hey ', ('you', (1, 7))] + >>> lex('hey {{') + Traceback (most recent call last): + ... + TemplateError: No }} to finish last expression at line 1 column 7 + >>> lex('hey }}') + Traceback (most recent call last): + ... + TemplateError: }} outside expression at line 1 column 7 + >>> lex('hey {{ {{') + Traceback (most recent call last): + ... + TemplateError: {{ inside expression at line 1 column 10 + + """ + if delimiters is None: + delimiters = ( + Template.default_namespace["start_braces"], + Template.default_namespace["end_braces"], + ) + in_expr = False + chunks = [] + last = 0 + last_pos = (line_offset + 1, 1) + + token_re = re.compile( + rf"{re.escape(delimiters[0])}|{re.escape(delimiters[1])}" + ) + for match in token_re.finditer(s): + expr = match.group(0) + pos = find_position(s, match.end(), last, last_pos) + if expr == delimiters[0] and in_expr: + raise TemplateError( + f"{delimiters[0]} inside expression", position=pos, name=name + ) + elif expr == delimiters[1] and not in_expr: + raise TemplateError( + f"{delimiters[1]} outside expression", position=pos, name=name + ) + if expr == delimiters[0]: + part = s[last:match.start()] + if part: + chunks.append(part) + in_expr = True + else: + chunks.append((s[last: match.start()], last_pos)) + in_expr = False + last = match.end() + last_pos = pos + if in_expr: + raise TemplateError( + f"No {delimiters[1]} to finish last expression", + name=name, + position=last_pos, + ) + part = s[last:] + if part: + chunks.append(part) + if trim_whitespace: + chunks = trim_lex(chunks) + return chunks + + +statement_re = re.compile(r"^(?:if |elif |for |def |inherit |default |py:)") +single_statements = ["else", "endif", "endfor", "enddef", "continue", "break"] +trail_whitespace_re = re.compile(r"\n\r?[\t ]*$") +lead_whitespace_re = re.compile(r"^[\t ]*\n") + + +def trim_lex(tokens): + r""" + Takes a lexed set of tokens, and removes whitespace when there is + a directive on a line by itself: + + >>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False) + >>> tokens + [('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny'] + >>> trim_lex(tokens) + [('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y'] + """ + last_trim = None + for i, current in enumerate(tokens): + if isinstance(current, basestring_): + # we don't trim this + continue + item = current[0] + if not statement_re.search(item) and item not in single_statements: + continue + if not i: + prev = "" + else: + prev = tokens[i - 1] + if i + 1 >= len(tokens): + next_chunk = "" + else: + next_chunk = tokens[i + 1] + if not isinstance(next_chunk, basestring_) or not isinstance(prev, basestring_): + continue + prev_ok = not prev or trail_whitespace_re.search(prev) + if i == 1 and not prev.strip(): + prev_ok = True + if last_trim is not None and last_trim + 2 == i and not prev.strip(): + prev_ok = "last" + if prev_ok and ( + not next_chunk + or lead_whitespace_re.search(next_chunk) + or (i == len(tokens) - 2 and not next_chunk.strip()) + ): + if prev: + if (i == 1 and not prev.strip()) or prev_ok == "last": + tokens[i - 1] = "" + else: + m = trail_whitespace_re.search(prev) + # +1 to leave the leading \n on: + prev = prev[: m.start() + 1] + tokens[i - 1] = prev + if next_chunk: + last_trim = i + if i == len(tokens) - 2 and not next_chunk.strip(): + tokens[i + 1] = "" + else: + m = lead_whitespace_re.search(next_chunk) + next_chunk = next_chunk[m.end():] + tokens[i + 1] = next_chunk + return tokens + + +def find_position(string, index, last_index, last_pos): + """Given a string and index, return (line, column)""" + lines = string.count("\n", last_index, index) + if lines > 0: + column = index - string.rfind("\n", last_index, index) + else: + column = last_pos[1] + (index - last_index) + return (last_pos[0] + lines, column) + + +def parse(s, name=None, line_offset=0, delimiters=None): + r""" + Parses a string into a kind of AST + + >>> parse('{{x}}') + [('expr', (1, 3), 'x')] + >>> parse('foo') + ['foo'] + >>> parse('{{if x}}test{{endif}}') + [('cond', (1, 3), ('if', (1, 3), 'x', ['test']))] + >>> parse('series->{{for x in y}}x={{x}}{{endfor}}') + ['series->', ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])] + >>> parse('{{for x, y in z:}}{{continue}}{{endfor}}') + [('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])] + >>> parse('{{py:x=1}}') + [('py', (1, 3), 'x=1')] + >>> parse('{{if x}}a{{elif y}}b{{else}}c{{endif}}') + [('cond', (1, 3), ('if', (1, 3), 'x', ['a']), ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))] + + Some exceptions:: + + >>> parse('{{continue}}') + Traceback (most recent call last): + ... + TemplateError: continue outside of for loop at line 1 column 3 + >>> parse('{{if x}}foo') + Traceback (most recent call last): + ... + TemplateError: No {{endif}} at line 1 column 3 + >>> parse('{{else}}') + Traceback (most recent call last): + ... + TemplateError: else outside of an if block at line 1 column 3 + >>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}') + Traceback (most recent call last): + ... + TemplateError: Unexpected endif at line 1 column 25 + >>> parse('{{if}}{{endif}}') + Traceback (most recent call last): + ... + TemplateError: if with no expression at line 1 column 3 + >>> parse('{{for x y}}{{endfor}}') + Traceback (most recent call last): + ... + TemplateError: Bad for (no "in") in 'x y' at line 1 column 3 + >>> parse('{{py:x=1\ny=2}}') + Traceback (most recent call last): + ... + TemplateError: Multi-line py blocks must start with a newline at line 1 column 3 + """ # noqa: E501 + if delimiters is None: + delimiters = ( + Template.default_namespace["start_braces"], + Template.default_namespace["end_braces"], + ) + tokens = lex(s, name=name, line_offset=line_offset, delimiters=delimiters) + result = [] + while tokens: + next_chunk, tokens = parse_expr(tokens, name) + result.append(next_chunk) + return result + + +def parse_expr(tokens, name, context=()): + if isinstance(tokens[0], basestring_): + return tokens[0], tokens[1:] + expr, pos = tokens[0] + expr = expr.strip() + if expr.startswith("py:"): + expr = expr[3:].lstrip(" \t") + if expr.startswith(("\n", "\r")): + expr = expr.lstrip("\r\n") + if "\r" in expr: + expr = expr.replace("\r\n", "\n") + expr = expr.replace("\r", "") + expr += "\n" + elif "\n" in expr: + raise TemplateError( + "Multi-line py blocks must start with a newline", + position=pos, + name=name, + ) + return ("py", pos, expr), tokens[1:] + elif expr in ("continue", "break"): + if "for" not in context: + raise TemplateError("continue outside of for loop", position=pos, name=name) + return (expr, pos), tokens[1:] + elif expr.startswith("if "): + return parse_cond(tokens, name, context) + elif expr.startswith("elif ") or expr == "else": + raise TemplateError( + f"{expr.split()[0]} outside of an if block", position=pos, name=name + ) + elif expr in ("if", "elif", "for"): + raise TemplateError(f"{expr} with no expression", position=pos, name=name) + elif expr in ("endif", "endfor", "enddef"): + raise TemplateError(f"Unexpected {expr}", position=pos, name=name) + elif expr.startswith("for "): + return parse_for(tokens, name, context) + elif expr.startswith("default "): + return parse_default(tokens, name, context) + elif expr.startswith("inherit "): + return parse_inherit(tokens, name, context) + elif expr.startswith("def "): + return parse_def(tokens, name, context) + elif expr.startswith("#"): + return ("comment", pos, tokens[0][0]), tokens[1:] + return ("expr", pos, tokens[0][0]), tokens[1:] + + +def parse_cond(tokens, name, context): + start = tokens[0][1] + pieces = [] + context = context + ("if",) + while 1: + if not tokens: + raise TemplateError("Missing {{endif}}", position=start, name=name) + if isinstance(tokens[0], tuple) and tokens[0][0] == "endif": + return ("cond", start) + tuple(pieces), tokens[1:] + next_chunk, tokens = parse_one_cond(tokens, name, context) + pieces.append(next_chunk) + + +def parse_one_cond(tokens, name, context): + (first, pos), tokens = tokens[0], tokens[1:] + content = [] + first = first.removesuffix(":") + if first.startswith("if "): + part = ("if", pos, first[3:].lstrip(), content) + elif first.startswith("elif "): + part = ("elif", pos, first[5:].lstrip(), content) + elif first == "else": + part = ("else", pos, None, content) + else: + assert 0, f"Unexpected token {first!r} at {pos}" + while 1: + if not tokens: + raise TemplateError("No {{endif}}", position=pos, name=name) + if isinstance(tokens[0], tuple) and ( + tokens[0][0] == "endif" + or tokens[0][0].startswith("elif ") + or tokens[0][0] == "else" + ): + return part, tokens + next_chunk, tokens = parse_expr(tokens, name, context) + content.append(next_chunk) + + +def parse_for(tokens, name, context): + first, pos = tokens[0] + tokens = tokens[1:] + context = ("for",) + context + content = [] + assert first.startswith("for "), first + first = first.removesuffix(":") + first = first[3:].strip() + match = in_re.search(first) + if not match: + raise TemplateError(f'Bad for (no "in") in {first!r}', position=pos, name=name) + vars = first[: match.start()] + if "(" in vars: + raise TemplateError( + f"You cannot have () in the variable section of a for loop ({vars!r})", + position=pos, + name=name, + ) + vars = tuple(v.strip() for v in first[: match.start()].split(",") if v.strip()) + expr = first[match.end():] + while 1: + if not tokens: + raise TemplateError("No {{endfor}}", position=pos, name=name) + if isinstance(tokens[0], tuple) and tokens[0][0] == "endfor": + return ("for", pos, vars, expr, content), tokens[1:] + next_chunk, tokens = parse_expr(tokens, name, context) + content.append(next_chunk) + + +def parse_default(tokens, name, context): + first, pos = tokens[0] + assert first.startswith("default ") + first = first.split(None, 1)[1] + parts = first.split("=", 1) + if len(parts) == 1: + raise TemplateError( + f"Expression must be {{{{default var=value}}}}; no = found in {first!r}", + position=pos, + name=name, + ) + var = parts[0].strip() + if "," in var: + raise TemplateError( + "{{default x, y = ...}} is not supported", position=pos, name=name + ) + if not var_re.search(var): + raise TemplateError( + f"Not a valid variable name for {{{{default}}}}: {var!r}", + position=pos, + name=name, + ) + expr = parts[1].strip() + return ("default", pos, var, expr), tokens[1:] + + +def parse_inherit(tokens, name, context): + first, pos = tokens[0] + assert first.startswith("inherit ") + expr = first.split(None, 1)[1] + return ("inherit", pos, expr), tokens[1:] + + +def parse_def(tokens, name, context): + first, start = tokens[0] + tokens = tokens[1:] + assert first.startswith("def ") + first = first.split(None, 1)[1] + first = first.removesuffix(":") + if "(" not in first: + func_name = first + sig = ((), None, None, {}) + elif not first.endswith(")"): + raise TemplateError( + f"Function definition doesn't end with ): {first}", + position=start, + name=name, + ) + else: + first = first[:-1] + func_name, sig_text = first.split("(", 1) + sig = parse_signature(sig_text, name, start) + context = context + ("def",) + content = [] + while 1: + if not tokens: + raise TemplateError("Missing {{enddef}}", position=start, name=name) + if isinstance(tokens[0], tuple) and tokens[0][0] == "enddef": + return ("def", start, func_name, sig, content), tokens[1:] + next_chunk, tokens = parse_expr(tokens, name, context) + content.append(next_chunk) + + +def parse_signature(sig_text, name, pos): + tokens = tokenize.generate_tokens(StringIO(sig_text).readline) + sig_args = [] + var_arg = None + var_kw = None + defaults = {} + + def get_token(pos=False): + try: + tok_type, tok_string, (srow, scol), (erow, ecol), line = next(tokens) + except StopIteration: + return tokenize.ENDMARKER, "" + if pos: + return tok_type, tok_string, (srow, scol), (erow, ecol) + else: + return tok_type, tok_string + + while 1: + var_arg_type = None + tok_type, tok_string = get_token() + if tok_type == tokenize.ENDMARKER: + break + if tok_type == tokenize.OP and tok_string in {"*", "**"}: + var_arg_type = tok_string + tok_type, tok_string = get_token() + if tok_type != tokenize.NAME: + raise TemplateError( + f"Invalid signature: ({sig_text})", position=pos, name=name + ) + var_name = tok_string + tok_type, tok_string = get_token() + if tok_type == tokenize.ENDMARKER or ( + tok_type == tokenize.OP and tok_string == "," + ): + if var_arg_type == "*": + var_arg = var_name + elif var_arg_type == "**": + var_kw = var_name + else: + sig_args.append(var_name) + if tok_type == tokenize.ENDMARKER: + break + continue + if var_arg_type is not None: + raise TemplateError( + f"Invalid signature: ({sig_text})", position=pos, name=name + ) + if tok_type == tokenize.OP and tok_string == "=": + nest_type = None + unnest_type = None + nest_count = 0 + start_pos = end_pos = None + parts = [] + while 1: + tok_type, tok_string, s, e = get_token(True) + if start_pos is None: + start_pos = s + end_pos = e + if tok_type == tokenize.ENDMARKER and nest_count: + raise TemplateError( + f"Invalid signature: ({sig_text})", position=pos, name=name + ) + if not nest_count and ( + tok_type == tokenize.ENDMARKER + or (tok_type == tokenize.OP and tok_string == ",") + ): + default_expr = isolate_expression(sig_text, start_pos, end_pos) + defaults[var_name] = default_expr + sig_args.append(var_name) + break + parts.append((tok_type, tok_string)) + if nest_count and tok_type == tokenize.OP and tok_string == nest_type: + nest_count += 1 + elif ( + nest_count and tok_type == tokenize.OP and tok_string == unnest_type + ): + nest_count -= 1 + if not nest_count: + nest_type = unnest_type = None + elif ( + not nest_count + and tok_type == tokenize.OP + and tok_string in ("(", "[", "{") + ): + nest_type = tok_string + nest_count = 1 + unnest_type = {"(": ")", "[": "]", "{": "}"}[nest_type] + return sig_args, var_arg, var_kw, defaults + + +def isolate_expression(string, start_pos, end_pos): + srow, scol = start_pos + srow -= 1 + erow, ecol = end_pos + erow -= 1 + lines = string.splitlines(True) + if srow == erow: + return lines[srow][scol:ecol] + parts = [lines[srow][scol:]] + parts.extend(lines[srow + 1:erow]) + if erow < len(lines): + # It'll sometimes give (end_row_past_finish, 0) + parts.append(lines[erow][:ecol]) + return "".join(parts) + + +_fill_command_usage = """\ +%prog [OPTIONS] TEMPLATE arg=value + +Use py:arg=value to set a Python value; otherwise all values are +strings. +""" + + +def fill_command(args=None): + import optparse + import os + import sys + + import pkg_resources + + if args is None: + args = sys.argv[1:] + dist = pkg_resources.get_distribution("Paste") + parser = optparse.OptionParser(version=coerce_text(dist), usage=_fill_command_usage) + parser.add_option( + "-o", + "--output", + dest="output", + metavar="FILENAME", + help="File to write output to (default stdout)", + ) + parser.add_option( + "--env", + dest="use_env", + action="store_true", + help="Put the environment in as top-level variables", + ) + options, args = parser.parse_args(args) + if len(args) < 1: + print("You must give a template filename") + sys.exit(2) + template_name = args[0] + args = args[1:] + vars = {} + if options.use_env: + vars.update(os.environ) + for value in args: + if "=" not in value: + print(f"Bad argument: {value!r}") + sys.exit(2) + name, value = value.split("=", 1) + if name.startswith("py:"): + name = name[:3] + value = eval(value) + vars[name] = value + if template_name == "-": + template_content = sys.stdin.read() + template_name = "" + else: + with open(template_name, "rb") as f: + template_content = f.read() + template = Template(template_content, name=template_name) + result = template.substitute(vars) + if options.output: + with open(options.output, "wb") as f: + f.write(result) + else: + sys.stdout.write(result) + + +if __name__ == "__main__": + fill_command() diff --git a/numpy/_configtool.py b/numpy/_configtool.py index 70a14b876bcc..db7831c33951 100644 --- a/numpy/_configtool.py +++ b/numpy/_configtool.py @@ -1,9 +1,9 @@ import argparse -from pathlib import Path import sys +from pathlib import Path -from .version import __version__ from .lib._utils_impl import get_include +from .version import __version__ def main() -> None: diff --git a/numpy/_configtool.pyi b/numpy/_configtool.pyi new file mode 100644 index 000000000000..7e7363e797f3 --- /dev/null +++ b/numpy/_configtool.pyi @@ -0,0 +1 @@ +def main() -> None: ... diff --git a/numpy/_core/__init__.py b/numpy/_core/__init__.py index 4b90877138a3..ede50aaeefc3 100644 --- a/numpy/_core/__init__.py +++ b/numpy/_core/__init__.py @@ -10,46 +10,82 @@ from numpy.version import version as __version__ - # disables OpenBLAS affinity setting of the main thread that limits # python threads or processes to one core env_added = [] -for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']: +for envkey in ['OPENBLAS_MAIN_FREE']: if envkey not in os.environ: - os.environ[envkey] = '1' + # Note: using `putenv` (and `unsetenv` further down) instead of updating + # `os.environ` on purpose to avoid a race condition, see gh-30627. + os.putenv(envkey, '1') env_added.append(envkey) try: from . import multiarray except ImportError as exc: import sys - msg = """ + + # Bypass for the module re-initialization opt-out + if exc.msg == "cannot load module more than once per process": + raise + + # Basically always, the problem should be that the C module is wrong/missing... + if ( + isinstance(exc, ModuleNotFoundError) + and exc.name == "numpy._core._multiarray_umath" + ): + import sys + candidates = [] + for path in __path__: + candidates.extend( + f for f in os.listdir(path) if f.startswith("_multiarray_umath")) + if len(candidates) == 0: + bad_c_module_info = ( + "We found no compiled module, did NumPy build successfully?\n") + else: + candidate_str = '\n * '.join(candidates) + # cache_tag is documented to be possibly None, so just use name if it is + # this guesses at cache_tag being the same as the extension module scheme + tag = sys.implementation.cache_tag or sys.implementation.name + bad_c_module_info = ( + f"The following compiled module files exist, but seem incompatible\n" + f"with with either python '{tag}' or the " + f"platform '{sys.platform}':\n\n * {candidate_str}\n" + ) + else: + bad_c_module_info = "" + + major, minor, *_ = sys.version_info + msg = f""" IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE! Importing the numpy C-extensions failed. This error can happen for many reasons, often due to issues with your setup or how NumPy was installed. - +{bad_c_module_info} We have compiled some common reasons and troubleshooting tips at: https://numpy.org/devdocs/user/troubleshooting-importerror.html Please note and check the following: - * The Python version is: Python%d.%d from "%s" - * The NumPy version is: "%s" + * The Python version is: Python {major}.{minor} from "{sys.executable}" + * The NumPy version is: "{__version__}" and make sure that they are the versions you expect. -Please carefully study the documentation linked above for further help. -Original error was: %s -""" % (sys.version_info[0], sys.version_info[1], sys.executable, - __version__, exc) - raise ImportError(msg) +Please carefully study the information and documentation linked above. +This is unlikely to be a NumPy issue but will be caused by a bad install +or environment on your machine. + +Original error was: {exc} +""" + + raise ImportError(msg) from exc finally: for envkey in env_added: - del os.environ[envkey] + os.unsetenv(envkey) del envkey del env_added del os @@ -69,37 +105,35 @@ raise ImportError(msg.format(path)) from . import numerictypes as nt -from .numerictypes import sctypes, sctypeDict +from .numerictypes import sctypeDict, sctypes + multiarray.set_typeDict(nt.sctypeDict) -from . import numeric -from .numeric import * -from . import fromnumeric +from . import einsumfunc, fromnumeric, function_base, getlimits, numeric, shape_base +from .einsumfunc import * from .fromnumeric import * -from .records import record, recarray -# Note: module name memmap is overwritten by a class with same name -from .memmap import * -from . import function_base from .function_base import * -from . import _machar -from . import getlimits from .getlimits import * -from . import shape_base + +# Note: module name memmap is overwritten by a class with same name +from .memmap import * +from .numeric import * +from .records import recarray, record from .shape_base import * -from . import einsumfunc -from .einsumfunc import * -del nt -from .numeric import absolute as abs +del nt # do this after everything else, to minimize the chance of this misleadingly # appearing in an import-time traceback -from . import _add_newdocs -from . import _add_newdocs_scalars # add these for module-freeze analysis (like PyInstaller) -from . import _dtype_ctypes -from . import _internal -from . import _dtype -from . import _methods +from . import ( + _add_newdocs, + _add_newdocs_scalars, + _dtype, + _dtype_ctypes, + _internal, + _methods, +) +from .numeric import absolute as abs acos = numeric.arccos acosh = numeric.arccosh @@ -155,18 +189,6 @@ def _DType_reduce(DType): return _DType_reconstruct, (scalar_type,) -def __getattr__(name): - # Deprecated 2022-11-22, NumPy 1.25. - if name == "MachAr": - import warnings - warnings.warn( - "The `np._core.MachAr` is considered private API (NumPy 1.24)", - DeprecationWarning, stacklevel=2, - ) - return _machar.MachAr - raise AttributeError(f"Module {__name__!r} has no attribute {name!r}") - - import copyreg copyreg.pickle(ufunc, _ufunc_reduce) @@ -176,5 +198,6 @@ def __getattr__(name): del copyreg, _ufunc_reduce, _DType_reduce from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/numpy/_core/__init__.pyi b/numpy/_core/__init__.pyi index 40d9c411b97c..ce5427bbfcd9 100644 --- a/numpy/_core/__init__.pyi +++ b/numpy/_core/__init__.pyi @@ -1,2 +1,666 @@ -# NOTE: The `np._core` namespace is deliberately kept empty due to it -# being private +# keep in sync with https://github.com/numpy/numtype/blob/main/src/numpy-stubs/_core/__init__.pyi + +from ._asarray import require +from ._ufunc_config import ( + errstate, + getbufsize, + geterr, + geterrcall, + setbufsize, + seterr, + seterrcall, +) +from .arrayprint import ( + array2string, + array_repr, + array_str, + format_float_positional, + format_float_scientific, + get_printoptions, + printoptions, + set_printoptions, +) +from .einsumfunc import einsum, einsum_path +from .fromnumeric import ( + all, + amax, + amin, + any, + argmax, + argmin, + argpartition, + argsort, + around, + choose, + clip, + compress, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + diagonal, + matrix_transpose, + max, + mean, + min, + ndim, + nonzero, + partition, + prod, + ptp, + put, + ravel, + repeat, + reshape, + resize, + round, + searchsorted, + shape, + size, + sort, + squeeze, + std, + sum, + swapaxes, + take, + trace, + transpose, + transpose as permute_dims, + var, +) +from .function_base import geomspace, linspace, logspace +from .getlimits import finfo, iinfo +from .memmap import memmap +from .numeric import ( + False_, + True_, + allclose, + arange, + argwhere, + array, + array_equal, + array_equiv, + asanyarray, + asarray, + ascontiguousarray, + asfortranarray, + astype, + base_repr, + binary_repr, + bitwise_not, + broadcast, + can_cast, + concatenate, + concatenate as concat, + convolve, + copyto, + correlate, + count_nonzero, + cross, + dot, + dtype, + empty, + empty_like, + flatiter, + flatnonzero, + from_dlpack, + frombuffer, + fromfile, + fromfunction, + fromiter, + fromstring, + full, + full_like, + identity, + indices, + inf, + inner, + isclose, + isfortran, + isscalar, + lexsort, + little_endian, + matmul, + may_share_memory, + min_scalar_type, + moveaxis, + nan, + ndarray, + nditer, + nested_iters, + newaxis, + ones, + ones_like, + outer, + promote_types, + putmask, + result_type, + roll, + rollaxis, + shares_memory, + tensordot, + ufunc, + vdot, + vecdot, + where, + zeros, + zeros_like, +) +from .numerictypes import ( + ScalarType, + bool, + bool_, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + cdouble, + character, + clongdouble, + complex64, + complex128, + complex192, + complex256, + complexfloating, + csingle, + datetime64, + datetime_as_string, + datetime_data, + double, + flexible, + float16, + float32, + float64, + float96, + float128, + floating, + generic, + half, + inexact, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + is_busday, + isdtype, + issubdtype, + long, + longdouble, + longlong, + number, + object_, + sctypeDict, + short, + signedinteger, + single, + str_, + timedelta64, + typecodes, + ubyte, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + ushort, + void, +) +from .records import recarray, record +from .shape_base import ( + atleast_1d, + atleast_2d, + atleast_3d, + block, + hstack, + stack, + unstack, + vstack, +) +from .umath import ( + absolute, + absolute as abs, + add, + arccos, + arccos as acos, + arccosh, + arccosh as acosh, + arcsin, + arcsin as asin, + arcsinh, + arcsinh as asinh, + arctan, + arctan as atan, + arctan2, + arctan2 as atan2, + arctanh, + arctanh as atanh, + bitwise_and, + bitwise_count, + bitwise_or, + bitwise_xor, + cbrt, + ceil, + conj, + conjugate, + copysign, + cos, + cosh, + deg2rad, + degrees, + divide, + divmod, + e, + equal, + euler_gamma, + exp, + exp2, + expm1, + fabs, + float_power, + floor, + floor_divide, + fmax, + fmin, + fmod, + frexp, + frompyfunc, + gcd, + greater, + greater_equal, + heaviside, + hypot, + invert, + invert as bitwise_invert, + isfinite, + isinf, + isnan, + isnat, + lcm, + ldexp, + left_shift, + left_shift as bitwise_left_shift, + less, + less_equal, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + matvec, + maximum, + minimum, + mod, + modf, + multiply, + negative, + nextafter, + not_equal, + pi, + positive, + power, + power as pow, + rad2deg, + radians, + reciprocal, + remainder, + right_shift, + right_shift as bitwise_right_shift, + rint, + sign, + signbit, + sin, + sinh, + spacing, + sqrt, + square, + subtract, + tan, + tanh, + true_divide, + trunc, + vecmat, +) + +__all__ = [ + "False_", + "ScalarType", + "True_", + "abs", + "absolute", + "acos", + "acosh", + "add", + "all", + "allclose", + "amax", + "amin", + "any", + "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argpartition", + "argsort", + "argwhere", + "around", + "array", + "array2string", + "array_equal", + "array_equiv", + "array_repr", + "array_str", + "asanyarray", + "asarray", + "ascontiguousarray", + "asfortranarray", + "asin", + "asinh", + "astype", + "atan", + "atan2", + "atanh", + "atleast_1d", + "atleast_2d", + "atleast_3d", + "base_repr", + "binary_repr", + "bitwise_and", + "bitwise_count", + "bitwise_invert", + "bitwise_left_shift", + "bitwise_not", + "bitwise_or", + "bitwise_right_shift", + "bitwise_xor", + "block", + "bool", + "bool_", + "broadcast", + "busday_count", + "busday_offset", + "busdaycalendar", + "byte", + "bytes_", + "can_cast", + "cbrt", + "cdouble", + "ceil", + "character", + "choose", + "clip", + "clongdouble", + "complex64", + "complex128", + "complex192", + "complex256", + "complexfloating", + "compress", + "concat", + "concatenate", + "conj", + "conjugate", + "convolve", + "copysign", + "copyto", + "correlate", + "cos", + "cosh", + "count_nonzero", + "cross", + "csingle", + "cumprod", + "cumsum", + "cumulative_prod", + "cumulative_sum", + "datetime64", + "datetime_as_string", + "datetime_data", + "deg2rad", + "degrees", + "diagonal", + "divide", + "divmod", + "dot", + "double", + "dtype", + "e", + "einsum", + "einsum_path", + "empty", + "empty_like", + "equal", + "errstate", + "euler_gamma", + "exp", + "exp2", + "expm1", + "fabs", + "finfo", + "flatiter", + "flatnonzero", + "flexible", + "float16", + "float32", + "float64", + "float96", + "float128", + "float_power", + "floating", + "floor", + "floor_divide", + "fmax", + "fmin", + "fmod", + "format_float_positional", + "format_float_scientific", + "frexp", + "from_dlpack", + "frombuffer", + "fromfile", + "fromfunction", + "fromiter", + "frompyfunc", + "fromstring", + "full", + "full_like", + "gcd", + "generic", + "geomspace", + "get_printoptions", + "getbufsize", + "geterr", + "geterrcall", + "greater", + "greater_equal", + "half", + "heaviside", + "hstack", + "hypot", + "identity", + "iinfo", + "indices", + "inexact", + "inf", + "inner", + "int8", + "int16", + "int32", + "int64", + "int_", + "intc", + "integer", + "intp", + "invert", + "is_busday", + "isclose", + "isdtype", + "isfinite", + "isfortran", + "isinf", + "isnan", + "isnat", + "isscalar", + "issubdtype", + "lcm", + "ldexp", + "left_shift", + "less", + "less_equal", + "lexsort", + "linspace", + "little_endian", + "log", + "log1p", + "log2", + "log10", + "logaddexp", + "logaddexp2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "logspace", + "long", + "longdouble", + "longlong", + "matmul", + "matrix_transpose", + "matvec", + "max", + "maximum", + "may_share_memory", + "mean", + "memmap", + "min", + "min_scalar_type", + "minimum", + "mod", + "modf", + "moveaxis", + "multiply", + "nan", + "ndarray", + "ndim", + "nditer", + "negative", + "nested_iters", + "newaxis", + "nextafter", + "nonzero", + "not_equal", + "number", + "object_", + "ones", + "ones_like", + "outer", + "partition", + "permute_dims", + "pi", + "positive", + "pow", + "power", + "printoptions", + "prod", + "promote_types", + "ptp", + "put", + "putmask", + "rad2deg", + "radians", + "ravel", + "recarray", + "reciprocal", + "record", + "remainder", + "repeat", + "require", + "reshape", + "resize", + "result_type", + "right_shift", + "rint", + "roll", + "rollaxis", + "round", + "sctypeDict", + "searchsorted", + "set_printoptions", + "setbufsize", + "seterr", + "seterrcall", + "shape", + "shares_memory", + "short", + "sign", + "signbit", + "signedinteger", + "sin", + "single", + "sinh", + "size", + "sort", + "spacing", + "sqrt", + "square", + "squeeze", + "stack", + "std", + "str_", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "tensordot", + "timedelta64", + "trace", + "transpose", + "true_divide", + "trunc", + "typecodes", + "ubyte", + "ufunc", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintc", + "uintp", + "ulong", + "ulonglong", + "unsignedinteger", + "unstack", + "ushort", + "var", + "vdot", + "vecdot", + "vecmat", + "void", + "vstack", + "where", + "zeros", + "zeros_like", +] diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 177462384e81..30ed3c11ac73 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -9,9 +9,10 @@ """ -from numpy._core.function_base import add_newdoc -from numpy._core.overrides import array_function_like_doc +import textwrap +from numpy._core.function_base import add_newdoc +from numpy._core.overrides import get_array_function_like_doc # noqa: F401 ############################################################################### # @@ -45,6 +46,7 @@ Examples -------- + >>> import numpy as np >>> x = np.arange(6).reshape(2, 3) >>> fl = x.flat >>> type(fl) @@ -72,6 +74,7 @@ Examples -------- + >>> import numpy as np >>> x = np.arange(5) >>> fl = x.flat >>> fl.base is x @@ -79,13 +82,13 @@ """)) - add_newdoc('numpy._core', 'flatiter', ('coords', """ An N-dimensional tuple of current coordinates. Examples -------- + >>> import numpy as np >>> x = np.arange(6).reshape(2, 3) >>> fl = x.flat >>> fl.coords @@ -97,13 +100,13 @@ """)) - add_newdoc('numpy._core', 'flatiter', ('index', """ Current flat index into the array. Examples -------- + >>> import numpy as np >>> x = np.arange(6).reshape(2, 3) >>> fl = x.flat >>> fl.index @@ -115,22 +118,31 @@ """)) -# flatiter functions +# flatiter methods add_newdoc('numpy._core', 'flatiter', ('__array__', - """__array__(type=None) Get array from iterator + """ + __array__($self, dtype=None, /, *, copy=None) + -- - """)) + flat.__array__([dtype], *, copy=None) + Get array from iterator + + """)) add_newdoc('numpy._core', 'flatiter', ('copy', """ - copy() + copy($self, /) + -- + + flat.copy() Get a copy of the iterator as a 1-D array. Examples -------- + >>> import numpy as np >>> x = np.arange(6).reshape(2, 3) >>> x array([[0, 1, 2], @@ -150,6 +162,19 @@ add_newdoc('numpy._core', 'nditer', """ + nditer( + op, + flags=None, + op_flags=None, + op_dtypes=None, + order='K', + casting='safe', + op_axes=None, + itershape=None, + buffersize=0, + ) + -- + nditer(op, flags=None, op_flags=None, op_dtypes=None, order='K', casting='safe', op_axes=None, itershape=None, buffersize=0) @@ -161,63 +186,62 @@ ---------- op : ndarray or sequence of array_like The array(s) to iterate over. - flags : sequence of str, optional - Flags to control the behavior of the iterator. - - * ``buffered`` enables buffering when required. - * ``c_index`` causes a C-order index to be tracked. - * ``f_index`` causes a Fortran-order index to be tracked. - * ``multi_index`` causes a multi-index, or a tuple of indices - with one per iteration dimension, to be tracked. - * ``common_dtype`` causes all the operands to be converted to - a common data type, with copying or buffering as necessary. - * ``copy_if_overlap`` causes the iterator to determine if read - operands have overlap with write operands, and make temporary - copies as necessary to avoid overlap. False positives (needless - copying) are possible in some cases. - * ``delay_bufalloc`` delays allocation of the buffers until - a reset() call is made. Allows ``allocate`` operands to - be initialized before their values are copied into the buffers. - * ``external_loop`` causes the ``values`` given to be - one-dimensional arrays with multiple values instead of - zero-dimensional arrays. - * ``grow_inner`` allows the ``value`` array sizes to be made - larger than the buffer size when both ``buffered`` and - ``external_loop`` is used. - * ``ranged`` allows the iterator to be restricted to a sub-range - of the iterindex values. - * ``refs_ok`` enables iteration of reference types, such as - object arrays. - * ``reduce_ok`` enables iteration of ``readwrite`` operands - which are broadcasted, also known as reduction operands. - * ``zerosize_ok`` allows `itersize` to be zero. + Flags to control the behavior of the iterator. + + * ``buffered`` enables buffering when required. + * ``c_index`` causes a C-order index to be tracked. + * ``f_index`` causes a Fortran-order index to be tracked. + * ``multi_index`` causes a multi-index, or a tuple of indices + with one per iteration dimension, to be tracked. + * ``common_dtype`` causes all the operands to be converted to + a common data type, with copying or buffering as necessary. + * ``copy_if_overlap`` causes the iterator to determine if read + operands have overlap with write operands, and make temporary + copies as necessary to avoid overlap. False positives (needless + copying) are possible in some cases. + * ``delay_bufalloc`` delays allocation of the buffers until + a reset() call is made. Allows ``allocate`` operands to + be initialized before their values are copied into the buffers. + * ``external_loop`` causes the ``values`` given to be + one-dimensional arrays with multiple values instead of + zero-dimensional arrays. + * ``grow_inner`` allows the ``value`` array sizes to be made + larger than the buffer size when both ``buffered`` and + ``external_loop`` is used. + * ``ranged`` allows the iterator to be restricted to a sub-range + of the iterindex values. + * ``refs_ok`` enables iteration of reference types, such as + object arrays. + * ``reduce_ok`` enables iteration of ``readwrite`` operands + which are broadcasted, also known as reduction operands. + * ``zerosize_ok`` allows `itersize` to be zero. op_flags : list of list of str, optional - This is a list of flags for each operand. At minimum, one of - ``readonly``, ``readwrite``, or ``writeonly`` must be specified. - - * ``readonly`` indicates the operand will only be read from. - * ``readwrite`` indicates the operand will be read from and written to. - * ``writeonly`` indicates the operand will only be written to. - * ``no_broadcast`` prevents the operand from being broadcasted. - * ``contig`` forces the operand data to be contiguous. - * ``aligned`` forces the operand data to be aligned. - * ``nbo`` forces the operand data to be in native byte order. - * ``copy`` allows a temporary read-only copy if required. - * ``updateifcopy`` allows a temporary read-write copy if required. - * ``allocate`` causes the array to be allocated if it is None - in the ``op`` parameter. - * ``no_subtype`` prevents an ``allocate`` operand from using a subtype. - * ``arraymask`` indicates that this operand is the mask to use - for selecting elements when writing to operands with the - 'writemasked' flag set. The iterator does not enforce this, - but when writing from a buffer back to the array, it only - copies those elements indicated by this mask. - * ``writemasked`` indicates that only elements where the chosen - ``arraymask`` operand is True will be written to. - * ``overlap_assume_elementwise`` can be used to mark operands that are - accessed only in the iterator order, to allow less conservative - copying when ``copy_if_overlap`` is present. + This is a list of flags for each operand. At minimum, one of + ``readonly``, ``readwrite``, or ``writeonly`` must be specified. + + * ``readonly`` indicates the operand will only be read from. + * ``readwrite`` indicates the operand will be read from and written to. + * ``writeonly`` indicates the operand will only be written to. + * ``no_broadcast`` prevents the operand from being broadcasted. + * ``contig`` forces the operand data to be contiguous. + * ``aligned`` forces the operand data to be aligned. + * ``nbo`` forces the operand data to be in native byte order. + * ``copy`` allows a temporary read-only copy if required. + * ``updateifcopy`` allows a temporary read-write copy if required. + * ``allocate`` causes the array to be allocated if it is None + in the ``op`` parameter. + * ``no_subtype`` prevents an ``allocate`` operand from using a subtype. + * ``arraymask`` indicates that this operand is the mask to use + for selecting elements when writing to operands with the + 'writemasked' flag set. The iterator does not enforce this, + but when writing from a buffer back to the array, it only + copies those elements indicated by this mask. + * ``writemasked`` indicates that only elements where the chosen + ``arraymask`` operand is True will be written to. + * ``overlap_assume_elementwise`` can be used to mark operands that are + accessed only in the iterator order, to allow less conservative + copying when ``copy_if_overlap`` is present. op_dtypes : dtype or tuple of dtype(s), optional The required data type(s) of the operands. If copying or buffering is enabled, the data will be converted to/from their original types. @@ -321,6 +345,8 @@ Here is how we might write an ``iter_add`` function, using the Python iterator protocol: + >>> import numpy as np + >>> def iter_add_py(x, y, out=None): ... addop = np.add ... it = np.nditer([x, y, out], [], @@ -392,11 +418,11 @@ original data when the :meth:`~object.__exit__` function is called but not before: - >>> a = np.arange(6, dtype='i4')[::-2] + >>> a = np.arange(6, dtype=np.int32)[::-2] >>> with np.nditer(a, [], ... [['writeonly', 'updateifcopy']], ... casting='unsafe', - ... op_dtypes=[np.dtype('f4')]) as i: + ... op_dtypes=[np.dtype(np.float32)]) as i: ... x = i.operands[0] ... x[:] = [-1, -2, -3] ... # a still unchanged here @@ -416,16 +442,29 @@ """) +# nditer attributes + +add_newdoc('numpy._core', 'nditer', ('operands', + """ + operands[`Slice`] + + The array(s) to be iterated over. Valid only before the iterator is closed. + """)) + # nditer methods add_newdoc('numpy._core', 'nditer', ('copy', """ + copy($self, /) + -- + copy() Get a copy of the iterator in its current state. Examples -------- + >>> import numpy as np >>> x = np.arange(10) >>> y = x + 1 >>> it = np.nditer([x, y]) @@ -437,15 +476,11 @@ """)) -add_newdoc('numpy._core', 'nditer', ('operands', - """ - operands[`Slice`] - - The array(s) to be iterated over. Valid only before the iterator is closed. - """)) - add_newdoc('numpy._core', 'nditer', ('debug_print', """ + debug_print($self, /) + -- + debug_print() Print the current state of the `nditer` instance and debug info to stdout. @@ -454,6 +489,9 @@ add_newdoc('numpy._core', 'nditer', ('enable_external_loop', """ + enable_external_loop($self, /) + -- + enable_external_loop() When the "external_loop" was not used during construction, but @@ -464,6 +502,9 @@ add_newdoc('numpy._core', 'nditer', ('iternext', """ + iternext($self, /) + -- + iternext() Check whether iterations are left, and perform a single internal iteration @@ -479,6 +520,9 @@ add_newdoc('numpy._core', 'nditer', ('remove_axis', """ + remove_axis($self, i, /) + -- + remove_axis(i, /) Removes axis `i` from the iterator. Requires that the flag "multi_index" @@ -488,6 +532,9 @@ add_newdoc('numpy._core', 'nditer', ('remove_multi_index', """ + remove_multi_index($self, /) + -- + remove_multi_index() When the "multi_index" flag was specified, this removes it, allowing @@ -497,32 +544,62 @@ add_newdoc('numpy._core', 'nditer', ('reset', """ + reset($self, /) + -- + reset() Reset the iterator to its initial state. """)) +add_newdoc('numpy._core', 'nditer', ('close', + """ + close($self, /) + -- + + close() + + Resolve all writeback semantics in writeable operands. + + See Also + -------- + :ref:`nditer-context-manager` + + """)) + +# nested_iters + add_newdoc('numpy._core', 'nested_iters', """ - nested_iters(op, axes, flags=None, op_flags=None, op_dtypes=None, \ - order="K", casting="safe", buffersize=0) + nested_iters( + op, + axes, + flags=None, + op_flags=None, + op_dtypes=None, + order='K', + casting='safe', + buffersize=0, + ) + -- + + nested_iters(op, axes, flags=None, op_flags=None, op_dtypes=None, + order='K', casting='safe', buffersize=0) Create nditers for use in nested loops Create a tuple of `nditer` objects which iterate in nested loops over different axes of the op argument. The first iterator is used in the - outermost loop, the last in the innermost loop. Advancing one will change - the subsequent iterators to point at its new element. + outermost loop, the last in the innermost loop. Advancing one will + change the subsequent iterators to point at its new element. Parameters ---------- op : ndarray or sequence of array_like The array(s) to iterate over. - axes : list of list of int Each item is used as an "op_axes" argument to an nditer - flags, op_flags, op_dtypes, order, casting, buffersize (optional) See `nditer` parameters of the same name @@ -542,6 +619,7 @@ [a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified the first iter's axes as [1] + >>> import numpy as np >>> a = np.arange(12).reshape(2, 3, 2) >>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"]) >>> for x in i: @@ -566,22 +644,6 @@ """) -add_newdoc('numpy._core', 'nditer', ('close', - """ - close() - - Resolve all writeback semantics in writeable operands. - - .. versionadded:: 1.15.0 - - See Also - -------- - - :ref:`nditer-context-manager` - - """)) - - ############################################################################### # # broadcast @@ -590,6 +652,9 @@ add_newdoc('numpy._core', 'broadcast', """ + broadcast(*arrays) + -- + Produce an object that mimics broadcasting. Parameters @@ -616,6 +681,7 @@ Manually adding two vectors, using broadcasting: + >>> import numpy as np >>> x = np.array([[1], [2], [3]]) >>> y = np.array([4, 5, 6]) >>> b = np.broadcast(x, y) @@ -644,6 +710,8 @@ Examples -------- + + >>> import numpy as np >>> x = np.array([[1], [2], [3]]) >>> y = np.array([4, 5, 6]) >>> b = np.broadcast(x, y) @@ -669,6 +737,8 @@ Examples -------- + + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) @@ -682,10 +752,9 @@ """ Number of dimensions of broadcasted result. Alias for `nd`. - .. versionadded:: 1.12.0 - Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) @@ -701,6 +770,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) @@ -715,6 +785,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) @@ -729,6 +800,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) @@ -743,6 +815,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) @@ -751,8 +824,13 @@ """)) +# methods + add_newdoc('numpy._core', 'broadcast', ('reset', """ + reset($self, /) + -- + reset() Reset the broadcasted result's iterator(s). @@ -767,6 +845,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) @@ -790,8 +869,21 @@ add_newdoc('numpy._core.multiarray', 'array', """ + array( + object, + dtype=None, + *, + copy=True, + order='K', + subok=False, + ndmin=0, + ndmax=0, + like=None, + ) + -- + array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0, - like=None) + ndmax=0, like=None) Create an array. @@ -840,6 +932,16 @@ Specifies the minimum number of dimensions that the resulting array should have. Ones will be prepended to the shape as needed to meet this requirement. + ndmax : int, optional + Specifies the maximum number of dimensions to create when inferring + shape from nested sequences. By default (ndmax=0), NumPy recurses + through all nesting levels (up to the compile-time constant + ``NPY_MAXDIMS``). + Setting ``ndmax`` stops recursion at the specified depth, preserving + deeper nested structures as objects instead of promoting them to + higher-dimensional arrays. In this case, ``dtype=np.object_`` is required. + + .. versionadded:: 2.4.0 ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 @@ -859,7 +961,7 @@ ones : Return a new array setting values to one. zeros : Return a new array setting values to zero. full : Return a new array of given shape filled with value. - copy: Return an array copy of the given object. + copy : Return an array copy of the given object. Notes @@ -870,6 +972,7 @@ Examples -------- + >>> import numpy as np >>> np.array([1, 2, 3]) array([1, 2, 3]) @@ -891,14 +994,14 @@ Type provided: - >>> np.array([1, 2, 3], dtype=complex) + >>> np.array([1, 2, 3], dtype=np.complex128) array([ 1.+0.j, 2.+0.j, 3.+0.j]) Data-type consisting of more than one element: >>> x = np.array([(1,2),(3,4)],dtype=[('a','>> x['a'] - array([1, 3]) + array([1, 3], dtype=int32) Creating an array from sub-classes: @@ -910,13 +1013,28 @@ matrix([[1, 2], [3, 4]]) - """.replace( - "${ARRAY_FUNCTION_LIKE}", - array_function_like_doc, - )) + Limiting the maximum dimensions with ``ndmax``: + + >>> a = np.array([[1, 2], [3, 4]], dtype=np.object_, ndmax=2) + >>> a + array([[1, 2], + [3, 4]], dtype=object) + >>> a.shape + (2, 2) + + >>> b = np.array([[1, 2], [3, 4]], dtype=np.object_, ndmax=1) + >>> b + array([list([1, 2]), list([3, 4])], dtype=object) + >>> b.shape + (2,) + + """) add_newdoc('numpy._core.multiarray', 'asarray', """ + asarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) + -- + asarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) Convert the input to an array. @@ -930,14 +1048,15 @@ dtype : data-type, optional By default, the data-type is inferred from the input data. order : {'C', 'F', 'A', 'K'}, optional - Memory layout. 'A' and 'K' depend on the order of input array a. - 'C' row-major (C-style), - 'F' column-major (Fortran-style) memory representation. - 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise - 'K' (keep) preserve input order - Defaults to 'K'. + The memory layout of the output. + 'C' gives a row-major layout (C-style), + 'F' gives a column-major layout (Fortran-style). + 'C' and 'F' will copy if needed to ensure the output format. + 'A' (any) is equivalent to 'F' if input a is non-contiguous or Fortran-contiguous, otherwise, it is equivalent to 'C'. + Unlike 'C' or 'F', 'A' does not ensure that the result is contiguous. + 'K' (keep) is the default and preserves the input order for the output. device : str, optional - The device on which to place the created array. Default: None. + The device on which to place the created array. Default: ``None``. For Array-API interoperability only, so must be ``"cpu"`` if passed. .. versionadded:: 2.0.0 @@ -965,18 +1084,17 @@ -------- asanyarray : Similar function which passes through subclasses. ascontiguousarray : Convert input to a contiguous array. - asfortranarray : Convert input to an ndarray with column-major - memory order. + asfortranarray : Convert input to an ndarray with column-major memory order. asarray_chkfinite : Similar function which checks input for NaNs and Infs. fromiter : Create an array from an iterator. - fromfunction : Construct an array by executing a function on grid - positions. + fromfunction : Construct an array by executing a function on grid positions. Examples -------- Convert a list into an array: >>> a = [1, 2] + >>> import numpy as np >>> np.asarray(a) array([1, 2]) @@ -1004,14 +1122,14 @@ >>> np.asanyarray(a) is a True - """.replace( - "${ARRAY_FUNCTION_LIKE}", - array_function_like_doc, - )) + """) add_newdoc('numpy._core.multiarray', 'asanyarray', """ - asanyarray(a, dtype=None, order=None, *, like=None) + asanyarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) + -- + + asanyarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) Convert the input to an ndarray, but pass ndarray subclasses through. @@ -1024,14 +1142,16 @@ dtype : data-type, optional By default, the data-type is inferred from the input data. order : {'C', 'F', 'A', 'K'}, optional - Memory layout. 'A' and 'K' depend on the order of input array a. - 'C' row-major (C-style), - 'F' column-major (Fortran-style) memory representation. - 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise - 'K' (keep) preserve input order - Defaults to 'C'. + The memory layout of the output. + 'C' gives a row-major layout (C-style), + 'F' gives a column-major layout (Fortran-style). + 'C' and 'F' will copy if needed to ensure the output format. + 'A' (any) is equivalent to 'F' if input a is non-contiguous or Fortran-contiguous, otherwise, it is equivalent to 'C'. + Unlike 'C' or 'F', 'A' does not ensure that the result is contiguous. + 'K' (keep) preserves the input order for the output. + 'C' is the default. device : str, optional - The device on which to place the created array. Default: None. + The device on which to place the created array. Default: ``None``. For Array-API interoperability only, so must be ``"cpu"`` if passed. .. versionadded:: 2.1.0 @@ -1060,19 +1180,17 @@ -------- asarray : Similar function which always returns ndarrays. ascontiguousarray : Convert input to a contiguous array. - asfortranarray : Convert input to an ndarray with column-major - memory order. - asarray_chkfinite : Similar function which checks input for NaNs and - Infs. + asfortranarray : Convert input to an ndarray with column-major memory order. + asarray_chkfinite : Similar function which checks input for NaNs and Infs. fromiter : Create an array from an iterator. - fromfunction : Construct an array by executing a function on grid - positions. + fromfunction : Construct an array by executing a function on grid positions. Examples -------- Convert a list into an array: >>> a = [1, 2] + >>> import numpy as np >>> np.asanyarray(a) array([1, 2]) @@ -1082,13 +1200,13 @@ >>> np.asanyarray(a) is a True - """.replace( - "${ARRAY_FUNCTION_LIKE}", - array_function_like_doc, - )) + """) add_newdoc('numpy._core.multiarray', 'ascontiguousarray', """ + ascontiguousarray(a, dtype=None, *, like=None) + -- + ascontiguousarray(a, dtype=None, *, like=None) Return a contiguous array (ndim >= 1) in memory (C order). @@ -1111,8 +1229,7 @@ See Also -------- - asfortranarray : Convert input to an ndarray with column-major - memory order. + asfortranarray : Convert input to an ndarray with column-major memory order. require : Return an ndarray that satisfies requirements. ndarray.flags : Information about the memory layout of the array. @@ -1120,6 +1237,7 @@ -------- Starting with a Fortran-contiguous array: + >>> import numpy as np >>> x = np.ones((2, 3), order='F') >>> x.flags['F_CONTIGUOUS'] True @@ -1147,13 +1265,13 @@ Note: This function returns an array with at least one-dimension (1-d) so it will not preserve 0-d arrays. - """.replace( - "${ARRAY_FUNCTION_LIKE}", - array_function_like_doc, - )) + """) add_newdoc('numpy._core.multiarray', 'asfortranarray', """ + asfortranarray(a, dtype=None, *, like=None) + -- + asfortranarray(a, dtype=None, *, like=None) Return an array (ndim >= 1) laid out in Fortran order in memory. @@ -1185,6 +1303,7 @@ -------- Starting with a C-contiguous array: + >>> import numpy as np >>> x = np.ones((2, 3), order='C') >>> x.flags['C_CONTIGUOUS'] True @@ -1212,14 +1331,14 @@ Note: This function returns an array with at least one-dimension (1-d) so it will not preserve 0-d arrays. - """.replace( - "${ARRAY_FUNCTION_LIKE}", - array_function_like_doc, - )) + """) add_newdoc('numpy._core.multiarray', 'empty', """ - empty(shape, dtype=float, order='C', *, device=None, like=None) + empty(shape, dtype=None, order='C', *, device=None, like=None) + -- + + empty(shape, dtype=None, order='C', *, device=None, like=None) Return a new array of given shape and type, without initializing entries. @@ -1232,10 +1351,9 @@ `numpy.float64`. order : {'C', 'F'}, optional, default: 'C' Whether to store multi-dimensional data in row-major - (C-style) or column-major (Fortran-style) order in - memory. + (C-style) or column-major (Fortran-style) order in memory. device : str, optional - The device on which to place the created array. Default: None. + The device on which to place the created array. Default: ``None``. For Array-API interoperability only, so must be ``"cpu"`` if passed. .. versionadded:: 2.0.0 @@ -1266,18 +1384,16 @@ Examples -------- + >>> import numpy as np >>> np.empty([2, 2]) array([[ -9.74499359e+001, 6.69583040e-309], [ 2.13182611e-314, 3.06959433e-309]]) #uninitialized - >>> np.empty([2, 2], dtype=int) + >>> np.empty([2, 2], dtype=np.int_) array([[-1073741821, -1067949133], [ 496041986, 19249760]]) #uninitialized - """.replace( - "${ARRAY_FUNCTION_LIKE}", - array_function_like_doc, - )) + """) add_newdoc('numpy._core.multiarray', 'scalar', """ @@ -1291,11 +1407,14 @@ string. If `obj` is not given, it will be interpreted as None for object type and as zeros for all other types. - """) + """) # sufficient null bytes for all number dtypes add_newdoc('numpy._core.multiarray', 'zeros', """ - zeros(shape, dtype=float, order='C', *, like=None) + zeros(shape, dtype=None, order='C', *, device=None, like=None) + -- + + zeros(shape, dtype=None, order='C', *, device=None, like=None) Return a new array of given shape and type, filled with zeros. @@ -1308,8 +1427,12 @@ `numpy.float64`. order : {'C', 'F'}, optional, default: 'C' Whether to store multi-dimensional data in row-major - (C-style) or column-major (Fortran-style) order in - memory. + (C-style) or column-major (Fortran-style) order in memory. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 @@ -1328,10 +1451,11 @@ Examples -------- + >>> import numpy as np >>> np.zeros(5) array([ 0., 0., 0., 0., 0.]) - >>> np.zeros((5,), dtype=int) + >>> np.zeros((5,), dtype=np.int_) array([0, 0, 0, 0, 0]) >>> np.zeros((2, 1)) @@ -1347,13 +1471,11 @@ array([(0, 0), (0, 0)], dtype=[('x', '>> np.fromstring('1 2', dtype=int, sep=' ') + >>> import numpy as np + >>> np.fromstring('1 2', dtype=np.int_, sep=' ') array([1, 2]) - >>> np.fromstring('1, 2', dtype=int, sep=',') + >>> np.fromstring('1, 2', dtype=np.int_, sep=',') array([1, 2]) - """.replace( - "${ARRAY_FUNCTION_LIKE}", - array_function_like_doc, - )) + """) add_newdoc('numpy._core.multiarray', 'compare_chararrays', """ + compare_chararrays(a1, a2, cmp, rstrip) + -- + compare_chararrays(a1, a2, cmp, rstrip) Performs element-wise comparison of two string arrays using the @@ -1439,23 +1561,24 @@ Arrays to be compared. cmp : {"<", "<=", "==", ">=", ">", "!="} Type of comparison. - rstrip : Boolean - If True, the spaces at the end of Strings are removed before the comparison. + rstrip : bool + If True, the spaces at the end of strings are removed before the comparison. Returns ------- out : ndarray - The output array of type Boolean with the same shape as a and b. + The output array of type `numpy.bool` with the same shape as `a1` and `a2`. Raises ------ ValueError If `cmp` is not valid. TypeError - If at least one of `a` or `b` is a non-string array + If at least one of `a1` or `a2` is a non-string array Examples -------- + >>> import numpy as np >>> a = np.array(["a", "b", "cde"]) >>> b = np.array(["a", "a", "dec"]) >>> np.char.compare_chararrays(a, b, ">", True) @@ -1465,6 +1588,9 @@ add_newdoc('numpy._core.multiarray', 'fromiter', """ + fromiter(iter, dtype, count=-1, *, like=None) + -- + fromiter(iter, dtype, count=-1, *, like=None) Create a new 1-dimensional array from an iterable object. @@ -1499,6 +1625,7 @@ Examples -------- + >>> import numpy as np >>> iterable = (x*x for x in range(5)) >>> np.fromiter(iterable, float) array([ 0., 1., 4., 9., 16.]) @@ -1515,14 +1642,14 @@ [5, 6]]) - """.replace( - "${ARRAY_FUNCTION_LIKE}", - array_function_like_doc, - )) + """) add_newdoc('numpy._core.multiarray', 'fromfile', """ - fromfile(file, dtype=float, count=-1, sep='', offset=0, *, like=None) + fromfile(file, dtype=None, count=-1, sep='', offset=0, *, like=None) + -- + + fromfile(file, dtype=np.float64, count=-1, sep='', offset=0, *, like=None) Construct an array from data in a text or binary file. @@ -1533,20 +1660,14 @@ Parameters ---------- file : file or str or Path - Open file object or filename. - - .. versionchanged:: 1.17.0 - `pathlib.Path` objects are now accepted. - + An open file object, a string containing the filename, or a Path object. + When reading from a file object it must support random access + (i.e. it must have tell and seek methods). dtype : data-type Data type of the returned array. For binary files, it is used to determine the size and byte-order of the items in the file. Most builtin numeric types are supported and extension types may be supported. - - .. versionadded:: 1.18.0 - Complex dtypes. - count : int Number of items to read. ``-1`` means all items (i.e., the complete file). @@ -1559,8 +1680,6 @@ offset : int The offset (in bytes) from the file's current position. Defaults to 0. Only permitted for binary files. - - .. versionadded:: 1.17.0 ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 @@ -1583,6 +1702,7 @@ -------- Construct an ndarray: + >>> import numpy as np >>> dt = np.dtype([('time', [('min', np.int64), ('sec', np.int64)]), ... ('temp', float)]) >>> x = np.zeros((1,), dtype=dt) @@ -1610,14 +1730,14 @@ array([((10, 0), 98.25)], dtype=[('time', [('min', '>> dt = np.dtype(int) + >>> dt = np.dtype(np.int_) >>> dt = dt.newbyteorder('>') >>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP @@ -1663,6 +1783,7 @@ Examples -------- + >>> import numpy as np >>> s = b'hello world' >>> np.frombuffer(s, dtype='S1', count=5, offset=6) array([b'w', b'o', b'r', b'l', b'd'], dtype='|S1') @@ -1672,24 +1793,37 @@ >>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3) array([1, 2, 3], dtype=uint8) - """.replace( - "${ARRAY_FUNCTION_LIKE}", - array_function_like_doc, - )) + """) add_newdoc('numpy._core.multiarray', 'from_dlpack', """ - from_dlpack(x, /) + from_dlpack(x, /, *, device=None, copy=None) + -- + + from_dlpack(x, /, *, device=None, copy=None) Create a NumPy array from an object implementing the ``__dlpack__`` - protocol. Generally, the returned NumPy array is a read-only view - of the input object. See [1]_ and [2]_ for more details. + protocol. Generally, the returned NumPy array is a view of the input + object. See [1]_ and [2]_ for more details. Parameters ---------- x : object A Python object that implements the ``__dlpack__`` and ``__dlpack_device__`` methods. + device : device, optional + Device on which to place the created array. Default: ``None``. + Must be ``"cpu"`` if passed which may allow importing an array + that is not already CPU available. + copy : bool, optional + Boolean indicating whether or not to copy the input. If ``True``, + the copy will be made. If ``False``, the function will never copy, + and will raise ``BufferError`` in case a copy is deemed necessary. + Passing it requests a copy from the exporter who may or may not + implement the capability. + If ``None``, the function will reuse the existing memory buffer if + possible and copy otherwise. Default: ``None``. + Returns ------- @@ -1716,6 +1850,9 @@ add_newdoc('numpy._core.multiarray', 'arange', """ + arange(start_or_stop, /, stop=None, step=1, *, dtype=None, device=None, like=None) + -- + arange([start,] stop[, step,], dtype=None, *, device=None, like=None) Return evenly spaced values within a given interval. @@ -1758,7 +1895,7 @@ The type of the output array. If `dtype` is not given, infer the data type from the other input arguments. device : str, optional - The device on which to place the created array. Default: None. + The device on which to place the created array. Default: ``None``. For Array-API interoperability only, so must be ``"cpu"`` if passed. .. versionadded:: 2.0.0 @@ -1788,9 +1925,9 @@ `start` is much larger than `step`. This can lead to unexpected behaviour. For example:: - >>> np.arange(0, 5, 0.5, dtype=int) + >>> np.arange(0, 5, 0.5, dtype=np.int_) array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) - >>> np.arange(-3, 3, 0.5, dtype=int) + >>> np.arange(-3, 3, 0.5, dtype=np.int_) array([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) In such cases, the use of `numpy.linspace` should be preferred. @@ -1818,6 +1955,7 @@ Examples -------- + >>> import numpy as np >>> np.arange(3) array([0, 1, 2]) >>> np.arange(3.0) @@ -1827,10 +1965,7 @@ >>> np.arange(3,7,2) array([3, 5]) - """.replace( - "${ARRAY_FUNCTION_LIKE}", - array_function_like_doc, - )) + """) add_newdoc('numpy._core.multiarray', '_get_ndarray_c_version', """_get_ndarray_c_version() @@ -1842,22 +1977,16 @@ add_newdoc('numpy._core.multiarray', '_reconstruct', """_reconstruct(subtype, shape, dtype) - Construct an empty array. Used by Pickles. - - """) - - -add_newdoc('numpy._core.multiarray', 'set_string_function', - """ - set_string_function(f, repr=1) - - Internal method to set a function to be used when pretty printing arrays. + Construct an empty array. Used by Pickle. """) add_newdoc('numpy._core.multiarray', 'promote_types', """ - promote_types(type1, type2) + promote_types(type1, type2, /) + -- + + promote_types(type1, type2, /) Returns the data type with the smallest size and smallest scalar kind to which both ``type1`` and ``type2`` may be safely cast. @@ -1882,8 +2011,6 @@ ----- Please see `numpy.result_type` for additional information about promotion. - .. versionadded:: 1.6.0 - Starting in NumPy 1.9, promote_types function now returns a valid string length when given an integer or float dtype as one argument and a string dtype as another argument. Previously it always returned the input string @@ -1902,16 +2029,17 @@ Examples -------- - >>> np.promote_types('f4', 'f8') + >>> import numpy as np + >>> np.promote_types(np.float32, np.float64) dtype('float64') - >>> np.promote_types('i8', 'f4') + >>> np.promote_types(np.int64, np.float32) dtype('float64') >>> np.promote_types('>i8', '>> np.promote_types('i4', 'S8') + >>> np.promote_types(np.int32, 'S8') dtype('S11') An example of a non-associative case: @@ -1996,8 +2124,6 @@ Notes ----- - .. versionadded:: 1.6.0 - The Einstein summation convention can be used to compute many multi-dimensional, linear algebraic array operations. `einsum` provides a succinct way of representing these. @@ -2069,8 +2195,6 @@ The examples below have corresponding `einsum` calls with the two parameter methods. - .. versionadded:: 1.10.0 - Views returned from einsum are now writeable whenever the input array is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now have the same effect as :py:func:`np.swapaxes(a, 0, 2) ` @@ -2079,6 +2203,7 @@ Examples -------- + >>> import numpy as np >>> a = np.arange(25).reshape(5,5) >>> b = np.arange(5) >>> c = np.arange(6).reshape(2,3) @@ -2248,8 +2373,10 @@ add_newdoc('numpy._core.multiarray', 'ndarray', """ - ndarray(shape, dtype=float, buffer=None, offset=0, - strides=None, order=None) + ndarray(shape, dtype=None, buffer=None, offset=0, strides=None, order=None) + -- + + ndarray(shape, dtype=np.float64, buffer=None, offset=0, strides=None, order=None) An array object represents a multidimensional, homogeneous array of fixed-size items. An associated data-type object describes the @@ -2272,6 +2399,7 @@ Shape of created array. dtype : data-type, optional Any object that can be interpreted as a numpy data type. + Default is `numpy.float64`. buffer : object exposing buffer interface, optional Used to fill the array with data. offset : int, optional @@ -2356,7 +2484,8 @@ First mode, `buffer` is None: - >>> np.ndarray(shape=(2,2), dtype=float, order='F') + >>> import numpy as np + >>> np.ndarray(shape=(2,2), dtype=np.float64, order='F') array([[0.0e+000, 0.0e+000], # random [ nan, 2.5e-323]]) @@ -2364,7 +2493,7 @@ >>> np.ndarray((2,), buffer=np.array([1,2,3]), ... offset=np.int_().itemsize, - ... dtype=int) # offset = 1*itemsize, i.e. skip first element + ... dtype=np.int_) # offset = 1*itemsize, i.e. skip first element array([2, 3]) """) @@ -2388,15 +2517,6 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_struct__', """Array protocol: C-struct side.""")) -add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack__', - """a.__dlpack__(*, stream=None) - - DLPack Protocol: Part of the Array API.""")) - -add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack_device__', - """a.__dlpack_device__() - - DLPack Protocol: Part of the Array API.""")) add_newdoc('numpy._core.multiarray', 'ndarray', ('base', """ @@ -2406,6 +2526,7 @@ -------- The base of an array that owns its memory is None: + >>> import numpy as np >>> x = np.array([1,2,3,4]) >>> x.base is None True @@ -2475,6 +2596,7 @@ Examples -------- + >>> import numpy as np >>> import ctypes >>> x = np.array([[0, 1], [2, 3]], dtype=np.int32) >>> x @@ -2526,13 +2648,15 @@ Examples -------- + >>> import numpy as np + >>> x = np.arange(4).reshape((2, 2)) >>> x array([[0, 1], [2, 3]]) >>> x.dtype - dtype('int32') - >>> type(x.dtype) - + dtype('int64') # may vary (OS, bitness) + >>> isinstance(x.dtype, np.dtype) + True """)) @@ -2543,6 +2667,7 @@ Examples -------- + >>> import numpy as np >>> x = np.sqrt([1+0j, 0+1j]) >>> x.imag array([ 0. , 0.70710678]) @@ -2558,6 +2683,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1,2,3], dtype=np.float64) >>> x.itemsize 8 @@ -2654,6 +2780,7 @@ Examples -------- + >>> import numpy as np >>> x = np.arange(1, 7).reshape(2, 3) >>> x array([[1, 2, 3], @@ -2698,6 +2825,7 @@ Examples -------- + >>> import numpy as np >>> x = np.zeros((3,5,2), dtype=np.complex128) >>> x.nbytes 480 @@ -2713,6 +2841,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> x.ndim 1 @@ -2729,6 +2858,7 @@ Examples -------- + >>> import numpy as np >>> x = np.sqrt([1+0j, 0+1j]) >>> x.real array([ 1. , 0.70710678]) @@ -2755,31 +2885,18 @@ .. warning:: - Setting ``arr.shape`` is discouraged and may be deprecated in the + Setting ``arr.shape`` is deprecated and may be removed in the future. Using `ndarray.reshape` is the preferred approach. Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3, 4]) >>> x.shape (4,) >>> y = np.zeros((2, 3, 4)) >>> y.shape (2, 3, 4) - >>> y.shape = (3, 8) - >>> y - array([[ 0., 0., 0., 0., 0., 0., 0., 0.], - [ 0., 0., 0., 0., 0., 0., 0., 0.], - [ 0., 0., 0., 0., 0., 0., 0., 0.]]) - >>> y.shape = (3, 6) - Traceback (most recent call last): - File "", line 1, in - ValueError: total size of new array must be unchanged - >>> np.zeros((4,2))[::2].shape = (-1,) - Traceback (most recent call last): - File "", line 1, in - AttributeError: Incompatible shape for in-place modification. Use - `.reshape()` to make a copy with the desired shape. See Also -------- @@ -2807,6 +2924,7 @@ Examples -------- + >>> import numpy as np >>> x = np.zeros((3, 5, 2), dtype=np.complex128) >>> x.size 30 @@ -2855,31 +2973,33 @@ Examples -------- - >>> y = np.reshape(np.arange(2*3*4), (2,3,4)) + >>> import numpy as np + >>> y = np.reshape(np.arange(2 * 3 * 4, dtype=np.int32), (2, 3, 4)) >>> y array([[[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]], [[12, 13, 14, 15], [16, 17, 18, 19], - [20, 21, 22, 23]]]) + [20, 21, 22, 23]]], dtype=np.int32) >>> y.strides (48, 16, 4) - >>> y[1,1,1] - 17 - >>> offset=sum(y.strides * np.array((1,1,1))) - >>> offset/y.itemsize - 17 - - >>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0) + >>> y[1, 1, 1] + np.int32(17) + >>> offset = sum(y.strides * np.array((1, 1, 1))) + >>> offset // y.itemsize + np.int64(17) + + >>> x = np.reshape(np.arange(5*6*7*8, dtype=np.int32), (5, 6, 7, 8)) + >>> x = x.transpose(2, 3, 1, 0) >>> x.strides (32, 4, 224, 1344) - >>> i = np.array([3,5,2,2]) + >>> i = np.array([3, 5, 2, 2], dtype=np.int32) >>> offset = sum(i * x.strides) - >>> x[3,5,2,2] - 813 - >>> offset / x.itemsize - 813 + >>> x[3, 5, 2, 2] + np.int32(813) + >>> offset // x.itemsize + np.int64(813) """)) @@ -2892,6 +3012,7 @@ Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> a array([[1, 2], @@ -2929,6 +3050,7 @@ Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> a array([[1, 2], @@ -2952,6 +3074,7 @@ [5, 7]]]) """)) + ############################################################################## # # ndarray methods @@ -2961,6 +3084,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__array__', """ + __array__($self, dtype=None, /, *, copy=None) + -- + a.__array__([dtype], *, copy=None) For ``dtype`` parameter it returns a new reference to self if @@ -2980,6 +3106,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_finalize__', """ + __array_finalize__($self, obj, /) + -- + a.__array_finalize__(obj, /) Present so subclasses can call super. Does nothing. @@ -2987,29 +3116,48 @@ """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_wrap__', +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_function__', """ - a.__array_wrap__(array[, context], /) + __array_function__($self, /, func, types, args, kwargs) + -- - Returns a view of `array` with the same type as self. + a.__array_function__(func, types, args, kwargs) + + See :ref:`NEP 18 ` and :ref:`NEP 35 ` for details. """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('__copy__', +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_ufunc__', """ - a.__copy__() + __array_ufunc__($self, ufunc, method, /, *inputs, **kwargs) + -- - Used if :func:`copy.copy` is called on an array. Returns a copy of the array. + a.__array_ufunc__(ufunc, method, /, *inputs, **kwargs) - Equivalent to ``a.copy(order='K')``. + See :ref:`NEP 13 ` for details. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_wrap__', + """ + __array_wrap__($self, array, context=None, return_scalar=True, /) + -- + + a.__array_wrap__(array[, context[, return_scalar]], /) + + Returns a view of `array` with the same type as self. """)) add_newdoc('numpy._core.multiarray', 'ndarray', ('__class_getitem__', """ - a.__class_getitem__(item, /) + __class_getitem__($cls, item, /) + -- + + ndarray[shape, dtype] Return a parametrized wrapper around the `~numpy.ndarray` type. @@ -3022,11 +3170,10 @@ Examples -------- - >>> from typing import Any >>> import numpy as np - >>> np.ndarray[Any, np.dtype[Any]] - numpy.ndarray[typing.Any, numpy.dtype[typing.Any]] + >>> np.ndarray[tuple[int], np.dtype[np.uint8]] + numpy.ndarray[tuple[int], numpy.dtype[numpy.uint8]] See Also -------- @@ -3037,17 +3184,36 @@ """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('__deepcopy__', +add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack__', """ - a.__deepcopy__(memo, /) + __dlpack__($self, /, *, stream=None, max_version=None, dl_device=None, copy=None) + -- + + a.__dlpack__(*, stream=None, max_version=None, dl_device=None, copy=None) + + Exports the array for consumption by ``from_dlpack()`` as a DLPack capsule. + + """)) - Used if :func:`copy.deepcopy` is called on an array. + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack_device__', + """ + __dlpack_device__($self, /) + -- + + a.__dlpack_device__() + + Returns device type (``1``) and device ID (``0``) in DLPack format. + Meant for use within ``from_dlpack()``. """)) add_newdoc('numpy._core.multiarray', 'ndarray', ('__reduce__', """ + __reduce__($self, /) + -- + a.__reduce__() For pickling. @@ -3055,8 +3221,23 @@ """)) +add_newdoc('numpy._core.multiarray', 'ndarray', ('__reduce_ex__', + """ + __reduce_ex__($self, protocol, /) + -- + + a.__reduce_ex__(protocol, /) + + For pickling. + + """)) + + add_newdoc('numpy._core.multiarray', 'ndarray', ('__setstate__', """ + __setstate__($self, state, /) + -- + a.__setstate__(state, /) For unpickling. @@ -3077,99 +3258,242 @@ """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('all', +add_newdoc('numpy._core.multiarray', 'ndarray', ('dot', """ - a.all(axis=None, out=None, keepdims=False, *, where=True) + dot($self, other, /, out=None) + -- - Returns True if all elements evaluate to True. + a.dot(other, /, out=None) - Refer to `numpy.all` for full documentation. + Refer to :func:`numpy.dot` for full documentation. See Also -------- - numpy.all : equivalent function + numpy.dot : equivalent function """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('any', +add_newdoc('numpy._core.multiarray', 'ndarray', ('argpartition', """ - a.any(axis=None, out=None, keepdims=False, *, where=True) + argpartition($self, kth, /, axis=-1, kind='introselect', order=None) + -- - Returns True if any of the elements of `a` evaluate to True. + a.argpartition(kth, axis=-1, kind='introselect', order=None) - Refer to `numpy.any` for full documentation. + Returns the indices that would partition this array. + + Refer to `numpy.argpartition` for full documentation. See Also -------- - numpy.any : equivalent function + numpy.argpartition : equivalent function """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('argmax', +add_newdoc('numpy._core.multiarray', 'ndarray', ('partition', """ - a.argmax(axis=None, out=None, *, keepdims=False) + partition($self, kth, /, axis=-1, kind='introselect', order=None) + -- - Return indices of the maximum values along the given axis. + a.partition(kth, axis=-1, kind='introselect', order=None) - Refer to `numpy.argmax` for full documentation. + Partially sorts the elements in the array in such a way that the value of + the element in k-th position is in the position it would be in a sorted + array. In the output array, all elements smaller than the k-th element + are located to the left of this element and all equal or greater are + located to its right. The ordering of the elements in the two partitions + on the either side of the k-th element in the output array is undefined. + + Parameters + ---------- + kth : int or sequence of ints + Element index to partition by. The kth element value will be in its + final sorted position and all smaller elements will be moved before it + and all equal or greater elements behind it. + The order of all elements in the partitions is undefined. + If provided with a sequence of kth it will partition all elements + indexed by kth of them into their sorted position at once. + + .. deprecated:: 1.22.0 + Passing booleans as index is deprecated. + axis : int, optional + Axis along which to sort. Default is -1, which means sort along the + last axis. + kind : {'introselect'}, optional + Selection algorithm. Default is 'introselect'. + order : str or list of str, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. A single field can + be specified as a string, and not all fields need to be specified, + but unspecified fields will still be used, in the order in which + they come up in the dtype, to break ties. See Also -------- - numpy.argmax : equivalent function + numpy.partition : Return a partitioned copy of an array. + argpartition : Indirect partition. + sort : Full sort. + + Notes + ----- + See ``np.partition`` for notes on the different algorithms. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([3, 4, 2, 1]) + >>> a.partition(3) + >>> a + array([2, 1, 3, 4]) # may vary + + >>> a.partition((1, 3)) + >>> a + array([1, 2, 3, 4]) """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('argmin', +############################################################################## +# +# methods from both `ndarray` and `generic` +# +############################################################################## + +_METHOD_DOC_TEMPLATE = """{name}({params}) +-- + +{doc}""" + +def _array_method_doc(name: str, params: str, doc: str) -> None: """ - a.argmin(axis=None, out=None, *, keepdims=False) + Interenal helper function for adding docstrings to a common method of + `numpy.ndarray` and `numpy.generic`. - Return indices of the minimum values along the given axis. + The provided docstring will be added to the given `numpy.ndarray` method. + For the `numpy.generic` method, a shorter docstring indicating that it is + identical to the `ndarray` method will be created. + Both methods will have a proper and identical `__text_signature__`. - Refer to `numpy.argmin` for detailed documentation. + Parameters + ---------- + name : str + Name of the method. + params : str + Parameter signature for the method without parentheses, for example, + ``"a, /, dtype=None, *, copy=False"``. + Parameter defaults must be understood by `ast.literal_eval`, i.e. strings, + bytes, numbers, tuples, lists, dicts, sets, booleans, or None. + doc : str + The full docstring for the `ndarray` method. + """ - See Also - -------- - numpy.argmin : equivalent function + # prepend the pos-only `$self` parameter to the method signature + if "/" not in params: + params = f"/, {params}" if params else "/" + params = f"$self, {params}" - """)) + # add docstring to `np.ndarray.{name}` + doc = textwrap.dedent(doc).strip() + doc_array = _METHOD_DOC_TEMPLATE.format(name=name, params=params, doc=doc) + add_newdoc("numpy._core.multiarray", "ndarray", (name, doc_array)) + # add docstring to `np.generic.{name}` + doc_scalar = f"Scalar method identical to `ndarray.{name}`." + doc_scalar = _METHOD_DOC_TEMPLATE.format(name=name, params=params, doc=doc_scalar) + add_newdoc("numpy._core.numerictypes", "generic", (name, doc_scalar)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('argsort', + +_array_method_doc('__array_namespace__', "*, api_version=None", """ - a.argsort(axis=-1, kind=None, order=None) + a.__array_namespace__(*, api_version=None) - Returns the indices that would sort this array. + For Array API compatibility. + """) - Refer to `numpy.argsort` for full documentation. +_array_method_doc('__copy__', "", + """ + a.__copy__() + + Used if :func:`copy.copy` is called on an array. Returns a copy of the array. + + Equivalent to ``a.copy(order='K')``. + """) + +_array_method_doc('__deepcopy__', "memo, /", + """ + a.__deepcopy__(memo, /) + + Used if :func:`copy.deepcopy` is called on an array. + """) + +_array_method_doc('all', "axis=None, out=None, keepdims=False, *, where=True", + """ + a.all(axis=None, out=None, *, keepdims=, where=) + + Returns True if all elements evaluate to True. + + Refer to `numpy.all` for full documentation. See Also -------- - numpy.argsort : equivalent function + numpy.all : equivalent function + """) - """)) +_array_method_doc('any', "axis=None, out=None, keepdims=False, *, where=True", + """ + a.any(axis=None, out=None, *, keepdims=, where=) + Returns True if any of the elements of `a` evaluate to True. -add_newdoc('numpy._core.multiarray', 'ndarray', ('argpartition', + Refer to `numpy.any` for full documentation. + + See Also + -------- + numpy.any : equivalent function + """) + +_array_method_doc('argmax', "axis=None, out=None, *, keepdims=False", """ - a.argpartition(kth, axis=-1, kind='introselect', order=None) + a.argmax(axis=None, out=None, *, keepdims=False) - Returns the indices that would partition this array. + Return indices of the maximum values along the given axis. - Refer to `numpy.argpartition` for full documentation. + Refer to `numpy.argmax` for full documentation. + + See Also + -------- + numpy.argmax : equivalent function + """) + +_array_method_doc('argmin', "axis=None, out=None, *, keepdims=False", + """ + a.argmin(axis=None, out=None, *, keepdims=False) + + Return indices of the minimum values along the given axis. - .. versionadded:: 1.8.0 + Refer to `numpy.argmin` for detailed documentation. See Also -------- - numpy.argpartition : equivalent function + numpy.argmin : equivalent function + """) - """)) +_array_method_doc('argsort', "axis=-1, kind=None, order=None, *, stable=None", + """ + a.argsort(axis=-1, kind=None, order=None, *, stable=None) + + Returns the indices that would sort this array. + + Refer to `numpy.argsort` for full documentation. + See Also + -------- + numpy.argsort : equivalent function + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('astype', +_array_method_doc('astype', "dtype, order='K', casting='unsafe', subok=True, copy=True", """ a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True) @@ -3186,7 +3510,7 @@ 'C' order otherwise, and 'K' means as close to the order the array elements appear in memory as possible. Default is 'K'. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + casting : {'no', 'equiv', 'safe', 'same_kind', 'same_value', 'unsafe'}, optional Controls what kind of data casting may occur. Defaults to 'unsafe' for backwards compatibility. @@ -3196,6 +3520,12 @@ * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. + * 'same_value' means any data conversions may be done, but the values + must not change, including rounding of floats or overflow of ints + + .. versionadded:: 2.4 + Support for ``'same_value'`` was added. + subok : bool, optional If True, then sub-classes will be passed-through (default), otherwise the returned array will be forced to be a base-class array. @@ -3213,37 +3543,35 @@ is a new array of the same shape as the input array, with dtype, order given by `dtype`, `order`. - Notes - ----- - .. versionchanged:: 1.17.0 - Casting between a simple data type and a structured one is possible only - for "unsafe" casting. Casting to multiple fields is allowed, but - casting from multiple fields is not. - - .. versionchanged:: 1.9.0 - Casting from numeric to string types in 'safe' casting mode requires - that the string dtype length is long enough to store the max - integer/float value converted. - Raises ------ ComplexWarning When casting from complex to float or int. To avoid this, one should use ``a.real.astype(t)``. + ValueError + When casting using ``'same_value'`` and the values change or would + overflow Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 2.5]) >>> x array([1. , 2. , 2.5]) - >>> x.astype(int) + >>> x.astype(np.int_) array([1, 2, 2]) - """)) + >>> x.astype(np.int_, casting="same_value") + Traceback (most recent call last): + ... + ValueError: could not cast 'same_value' double to long + >>> x[:2].astype(np.int_, casting="same_value") + array([1, 2]) + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('byteswap', +_array_method_doc('byteswap', "inplace=False", """ a.byteswap(inplace=False) @@ -3267,6 +3595,7 @@ Examples -------- + >>> import numpy as np >>> A = np.array([1, 256, 8755], dtype=np.int16) >>> list(map(hex, A)) ['0x1', '0x100', '0x2233'] @@ -3284,20 +3613,18 @@ ``A.view(A.dtype.newbyteorder()).byteswap()`` produces an array with the same values but different representation in memory - >>> A = np.array([1, 2, 3]) + >>> A = np.array([1, 2, 3],dtype=np.int64) >>> A.view(np.uint8) array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0], dtype=uint8) >>> A.view(A.dtype.newbyteorder()).byteswap(inplace=True) - array([1, 2, 3]) + array([1, 2, 3], dtype='>i8') >>> A.view(np.uint8) array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3], dtype=uint8) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('choose', +_array_method_doc('choose', "choices, out=None, mode='raise'", """ a.choose(choices, out=None, mode='raise') @@ -3308,13 +3635,11 @@ See Also -------- numpy.choose : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('clip', +_array_method_doc('clip', "min=None, max=None, out=None, **kwargs", """ - a.clip(min=None, max=None, out=None, **kwargs) + a.clip(min=, max=, out=None, **kwargs) Return an array whose values are limited to ``[min, max]``. One of max or min must be given. @@ -3324,11 +3649,9 @@ See Also -------- numpy.clip : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('compress', +_array_method_doc('compress', "condition, axis=None, out=None", """ a.compress(condition, axis=None, out=None) @@ -3339,11 +3662,9 @@ See Also -------- numpy.compress : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('conj', +_array_method_doc('conj', "", """ a.conj() @@ -3354,11 +3675,9 @@ See Also -------- numpy.conjugate : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('conjugate', +_array_method_doc('conjugate', "", """ a.conjugate() @@ -3369,11 +3688,9 @@ See Also -------- numpy.conjugate : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('copy', +_array_method_doc('copy', "order='C'", """ a.copy(order='C') @@ -3402,6 +3719,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([[1,2,3],[4,5,6]], order='F') >>> y = x.copy() @@ -3419,12 +3737,12 @@ >>> y.flags['C_CONTIGUOUS'] True - For arrays containing Python objects (e.g. dtype=object), + For arrays containing Python objects (e.g. dtype=np.object_), the copy is a shallow one. The new array will contain the same object which may lead to surprises if that object can be modified (is mutable): - >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) + >>> a = np.array([1, 'm', [2, 3, 4]], dtype=np.object_) >>> b = a.copy() >>> b[2][0] = 10 >>> a @@ -3434,18 +3752,16 @@ use `copy.deepcopy`: >>> import copy - >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) + >>> a = np.array([1, 'm', [2, 3, 4]], dtype=np.object_) >>> c = copy.deepcopy(a) >>> c[2][0] = 10 >>> c array([1, 'm', list([10, 3, 4])], dtype=object) >>> a array([1, 'm', list([2, 3, 4])], dtype=object) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('cumprod', +_array_method_doc('cumprod', "axis=None, dtype=None, out=None", """ a.cumprod(axis=None, dtype=None, out=None) @@ -3456,11 +3772,9 @@ See Also -------- numpy.cumprod : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('cumsum', +_array_method_doc('cumsum', "axis=None, dtype=None, out=None", """ a.cumsum(axis=None, dtype=None, out=None) @@ -3471,11 +3785,9 @@ See Also -------- numpy.cumsum : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('diagonal', +_array_method_doc('diagonal', "offset=0, axis1=0, axis2=1", """ a.diagonal(offset=0, axis1=0, axis2=1) @@ -3488,14 +3800,9 @@ See Also -------- numpy.diagonal : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('dot')) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('dump', +_array_method_doc('dump', "file", """ a.dump(file) @@ -3506,28 +3813,21 @@ ---------- file : str or Path A string naming the dump file. + """) - .. versionchanged:: 1.17.0 - `pathlib.Path` objects are now accepted. - - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('dumps', +_array_method_doc('dumps', "", """ a.dumps() Returns the pickle of the array as a string. - pickle.loads will convert the string back to an array. + ``pickle.loads`` will convert the string back to an array. Parameters ---------- None + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('fill', +_array_method_doc('fill', "value", """ a.fill(value) @@ -3540,6 +3840,7 @@ Examples -------- + >>> import numpy as np >>> a = np.array([1, 2]) >>> a.fill(0) >>> a @@ -3553,7 +3854,7 @@ to a single array element. The following is a rare example where this distinction is important: - >>> a = np.array([None, None], dtype=object) + >>> a = np.array([None, None], dtype=np.object_) >>> a[0] = np.array(3) >>> a array([array(3), None], dtype=object) @@ -3566,11 +3867,9 @@ >>> a[...] = np.array(3) >>> a array([3, 3], dtype=object) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('flatten', +_array_method_doc('flatten', "order='C'", """ a.flatten(order='C') @@ -3599,16 +3898,15 @@ Examples -------- + >>> import numpy as np >>> a = np.array([[1,2], [3,4]]) >>> a.flatten() array([1, 2, 3, 4]) >>> a.flatten('F') array([1, 3, 2, 4]) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('getfield', +_array_method_doc('getfield', "dtype, offset=0", """ a.getfield(dtype, offset=0) @@ -3631,6 +3929,7 @@ Examples -------- + >>> import numpy as np >>> x = np.diag([1.+1.j]*2) >>> x[1, 1] = 2 + 4.j >>> x @@ -3646,11 +3945,9 @@ >>> x.getfield(np.float64, offset=8) array([[1., 0.], [0., 4.]]) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('item', +_array_method_doc('item', "*args", """ a.item(*args) @@ -3691,6 +3988,7 @@ Examples -------- + >>> import numpy as np >>> np.random.seed(123) >>> x = np.random.randint(9, size=(3, 3)) >>> x @@ -3708,16 +4006,16 @@ For an array with object dtype, elements are returned as-is. - >>> a = np.array([np.int64(1)], dtype=object) + >>> a = np.array([np.int64(1)], dtype=np.object_) >>> a.item() #return np.int64 np.int64(1) + """) - """)) - +_KWARGS_REDUCE = "keepdims=, initial=, where=" -add_newdoc('numpy._core.multiarray', 'ndarray', ('max', - """ - a.max(axis=None, out=None, keepdims=False, initial=, where=True) +_array_method_doc('max', "axis=None, out=None, **kwargs", + f""" + a.max(axis=None, out=None, *, {_KWARGS_REDUCE}) Return the maximum along a given axis. @@ -3726,89 +4024,89 @@ See Also -------- numpy.amax : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('mean', - """ - a.mean(axis=None, dtype=None, out=None, keepdims=False, *, where=True) +_array_method_doc('min', "axis=None, out=None, **kwargs", + f""" + a.min(axis=None, out=None, *, {_KWARGS_REDUCE}) - Returns the average of the array elements along given axis. + Return the minimum along a given axis. - Refer to `numpy.mean` for full documentation. + Refer to `numpy.amin` for full documentation. See Also -------- - numpy.mean : equivalent function - - """)) + numpy.amin : equivalent function + """) +_array_method_doc('prod', "axis=None, dtype=None, out=None, **kwargs", + f""" + a.prod(axis=None, dtype=None, out=None, *, {_KWARGS_REDUCE}) -add_newdoc('numpy._core.multiarray', 'ndarray', ('min', - """ - a.min(axis=None, out=None, keepdims=False, initial=, where=True) - - Return the minimum along a given axis. + Return the product of the array elements over the given axis - Refer to `numpy.amin` for full documentation. + Refer to `numpy.prod` for full documentation. See Also -------- - numpy.amin : equivalent function + numpy.prod : equivalent function + """) - """)) +_array_method_doc('sum', "axis=None, dtype=None, out=None, **kwargs", + f""" + a.sum(axis=None, dtype=None, out=None, *, {_KWARGS_REDUCE}) + Return the sum of the array elements over the given axis. + + Refer to `numpy.sum` for full documentation. -add_newdoc('numpy._core.multiarray', 'ndarray', ('nonzero', + See Also + -------- + numpy.sum : equivalent function + """) + +_array_method_doc('mean', "axis=None, dtype=None, out=None, **kwargs", """ - a.nonzero() + a.mean(axis=None, dtype=None, out=None, *, keepdims=, where=) - Return the indices of the elements that are non-zero. + Returns the average of the array elements along given axis. - Refer to `numpy.nonzero` for full documentation. + Refer to `numpy.mean` for full documentation. See Also -------- - numpy.nonzero : equivalent function - - """)) - + numpy.mean : equivalent function + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('prod', +_array_method_doc('nonzero', "", """ - a.prod(axis=None, dtype=None, out=None, keepdims=False, - initial=1, where=True) + a.nonzero() - Return the product of the array elements over the given axis + Return the indices of the elements that are non-zero. - Refer to `numpy.prod` for full documentation. + Refer to `numpy.nonzero` for full documentation. See Also -------- - numpy.prod : equivalent function - - """)) - + numpy.nonzero : equivalent function + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('put', +_array_method_doc('put', "indices, values, /, mode='raise'", """ a.put(indices, values, mode='raise') - Set ``a.flat[n] = values[n]`` for all `n` in indices. + Set ``a.flat[n] = values[n]`` for all ``n`` in indices. Refer to `numpy.put` for full documentation. See Also -------- numpy.put : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('ravel', +_array_method_doc('ravel', "order='C'", """ - a.ravel([order]) + a.ravel(order='C') Return a flattened array. @@ -3817,13 +4115,10 @@ See Also -------- numpy.ravel : equivalent function - ndarray.flat : a flat iterator on the array. + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('repeat', +_array_method_doc('repeat', "repeats, /, axis=None", """ a.repeat(repeats, axis=None) @@ -3834,13 +4129,12 @@ See Also -------- numpy.repeat : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('reshape', +_array_method_doc('reshape', "*shape, order='C', copy=None", """ a.reshape(shape, /, *, order='C', copy=None) + a.reshape(*shape, order='C', copy=None) Returns an array containing the same data with a new shape. @@ -3854,15 +4148,13 @@ ----- Unlike the free function `numpy.reshape`, this method on `ndarray` allows the elements of the shape parameter to be passed in as separate arguments. - For example, ``a.reshape(10, 11)`` is equivalent to - ``a.reshape((10, 11))``. - - """)) - + For example, ``a.reshape(4, 2)`` is equivalent to ``a.reshape((4, 2))``. + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('resize', +_array_method_doc('resize', "*new_shape, refcheck=True", """ - a.resize(new_shape, refcheck=True) + a.resize(new_shape, /, *, refcheck=True) + a.resize(*new_shape, refcheck=True) Change shape and size of array in-place. @@ -3882,9 +4174,6 @@ ValueError If `a` does not own its own data or references or views to it exist, and the data memory must be changed. - PyPy only: will always raise if the data memory must be changed, since - there is no reliable way to determine if references or views to it - exist. SystemError If the `order` keyword argument is specified. This behaviour is a @@ -3913,6 +4202,8 @@ Shrinking an array: array is flattened (in the order that the data are stored in memory), resized, and reshaped: + >>> import numpy as np + >>> a = np.array([[0, 1], [2, 3]], order='C') >>> a.resize((2, 1)) >>> a @@ -3948,11 +4239,9 @@ array([[0]]) >>> c array([[0]]) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('round', +_array_method_doc('round', "decimals=0, out=None", """ a.round(decimals=0, out=None) @@ -3963,26 +4252,22 @@ See Also -------- numpy.around : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('searchsorted', +_array_method_doc('searchsorted', "v, /, side='left', sorter=None", """ a.searchsorted(v, side='left', sorter=None) - Find indices where elements of v should be inserted in a to maintain order. + Find indices where elements of `v` should be inserted in `a` to maintain order. - For full documentation, see `numpy.searchsorted` + For full documentation, see `numpy.searchsorted`. See Also -------- numpy.searchsorted : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('setfield', +_array_method_doc('setfield', "val, /, dtype, offset=0", """ a.setfield(val, dtype, offset=0) @@ -4010,6 +4295,7 @@ Examples -------- + >>> import numpy as np >>> x = np.eye(3) >>> x.getfield(np.float64) array([[1., 0., 0.], @@ -4029,11 +4315,9 @@ array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('setflags', +_array_method_doc('setflags', "*, write=None, align=None, uic=None", """ a.setflags(write=None, align=None, uic=None) @@ -4079,6 +4363,7 @@ Examples -------- + >>> import numpy as np >>> y = np.array([[3, 1, 7], ... [2, 0, 0], ... [8, 5, 9]]) @@ -4105,13 +4390,11 @@ Traceback (most recent call last): File "", line 1, in ValueError: cannot set WRITEBACKIFCOPY flag to True + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('sort', +_array_method_doc('sort', "axis=-1, kind=None, order=None, *, stable=None", """ - a.sort(axis=-1, kind=None, order=None) + a.sort(axis=-1, kind=None, order=None, *, stable=None) Sort an array in-place. Refer to `numpy.sort` for full documentation. @@ -4125,16 +4408,19 @@ and 'mergesort' use timsort under the covers and, in general, the actual implementation will vary with datatype. The 'mergesort' option is retained for backwards compatibility. - - .. versionchanged:: 1.15.0 - The 'stable' option was added. - order : str or list of str, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. A single field can be specified as a string, and not all fields need be specified, but unspecified fields will still be used, in the order in which they come up in the dtype, to break ties. + stable : bool, optional + Sort stability. If ``True``, the returned array will maintain + the relative order of ``a`` values which compare as equal. + If ``False`` or ``None``, this is not guaranteed. Internally, + this option selects ``kind='stable'``. Default: ``None``. + + .. versionadded:: 2.0.0 See Also -------- @@ -4150,6 +4436,7 @@ Examples -------- + >>> import numpy as np >>> a = np.array([[1,4], [3,1]]) >>> a.sort(axis=1) >>> a @@ -4168,71 +4455,9 @@ >>> a array([(b'c', 1), (b'a', 2)], dtype=[('x', 'S1'), ('y', '>> a = np.array([3, 4, 2, 1]) - >>> a.partition(3) - >>> a - array([2, 1, 3, 4]) # may vary - - >>> a.partition((1, 3)) - >>> a - array([1, 2, 3, 4]) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('squeeze', +_array_method_doc('squeeze', "axis=None", """ a.squeeze(axis=None) @@ -4243,13 +4468,13 @@ See Also -------- numpy.squeeze : equivalent function + """) - """)) - +_KWARGS_STD = "*, keepdims=, where=, mean=" -add_newdoc('numpy._core.multiarray', 'ndarray', ('std', - """ - a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True) +_array_method_doc('std', "axis=None, dtype=None, out=None, ddof=0, **kwargs", + f""" + a.std(axis=None, dtype=None, out=None, ddof=0, {_KWARGS_STD}) Returns the standard deviation of the array elements along given axis. @@ -4258,28 +4483,24 @@ See Also -------- numpy.std : equivalent function + """) - """)) - +_array_method_doc('var', "axis=None, dtype=None, out=None, ddof=0, **kwargs", + f""" + a.var(axis=None, dtype=None, out=None, ddof=0, {_KWARGS_STD}) -add_newdoc('numpy._core.multiarray', 'ndarray', ('sum', - """ - a.sum(axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True) - - Return the sum of the array elements over the given axis. + Returns the variance of the array elements, along given axis. - Refer to `numpy.sum` for full documentation. + Refer to `numpy.var` for full documentation. See Also -------- - numpy.sum : equivalent function - - """)) - + numpy.var : equivalent function + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('swapaxes', +_array_method_doc('swapaxes', "axis1, axis2, /", """ - a.swapaxes(axis1, axis2) + a.swapaxes(axis1, axis2, /) Return a view of the array with `axis1` and `axis2` interchanged. @@ -4288,11 +4509,9 @@ See Also -------- numpy.swapaxes : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('take', +_array_method_doc('take', "indices, /, axis=None, out=None, mode='raise'", """ a.take(indices, axis=None, out=None, mode='raise') @@ -4303,13 +4522,31 @@ See Also -------- numpy.take : equivalent function + """) - """)) +_array_method_doc('to_device', "device, /, *, stream=None", + """ + a.to_device(device, /, *, stream=None) + + For Array API compatibility. Since NumPy only supports CPU arrays, this + method is a no-op that returns the same array. + + Parameters + ---------- + device : "cpu" + Must be ``"cpu"``. + stream : None, optional + Currently unsupported. + Returns + ------- + out : Self + Returns the same array. + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('tofile', +_array_method_doc('tofile', "fid, /, sep='', format='%s'", """ - a.tofile(fid, sep="", format="%s") + a.tofile(fid, /, sep='', format='%s') Write array to a file as text or binary (default). @@ -4321,10 +4558,6 @@ ---------- fid : file or str or Path An open file object, or a string containing a filename. - - .. versionchanged:: 1.17.0 - `pathlib.Path` objects are now accepted. - sep : str Separator between array items for text output. If "" (empty), a binary file is written, equivalent to @@ -4347,11 +4580,9 @@ file, bypassing the file object's ``write`` method. As a result, tofile cannot be used with files objects supporting compression (e.g., GzipFile) or file-like objects that do not support ``fileno()`` (e.g., BytesIO). + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('tolist', +_array_method_doc('tolist', "", """ a.tolist() @@ -4359,7 +4590,7 @@ Return a copy of the array data as a (nested) Python list. Data items are converted to the nearest compatible builtin Python type, via - the `~numpy.ndarray.item` function. + the `~numpy.ndarray.item` method. If ``a.ndim`` is 0, then since the depth of the nested list is 0, it will not be a list at all, but a simple Python scalar. @@ -4383,10 +4614,11 @@ For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``, except that ``tolist`` changes numpy scalars to Python scalars: + >>> import numpy as np >>> a = np.uint32([1, 2]) >>> a_list = list(a) >>> a_list - [1, 2] + [np.uint32(1), np.uint32(2)] >>> type(a_list[0]) >>> a_tolist = a.tolist() @@ -4412,10 +4644,10 @@ TypeError: iteration over a 0-d array >>> a.tolist() 1 - """)) - + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('tobytes', """ +_array_method_doc('tobytes', "order='C'", + """ a.tobytes(order='C') Construct Python bytes containing the raw data bytes in the array. @@ -4424,8 +4656,6 @@ data memory. The bytes object is produced in C-order by default. This behavior is controlled by the ``order`` parameter. - .. versionadded:: 1.9.0 - Parameters ---------- order : {'C', 'F', 'A'}, optional @@ -4446,6 +4676,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([[0, 1], [2, 3]], dtype='>> x.tobytes() b'\\x00\\x00\\x01\\x00\\x02\\x00\\x03\\x00' @@ -4453,23 +4684,9 @@ True >>> x.tobytes('F') b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00' + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('tostring', r""" - a.tostring(order='C') - - A compatibility alias for `~ndarray.tobytes`, with exactly the same - behavior. - - Despite its name, it returns :class:`bytes` not :class:`str`\ s. - - .. deprecated:: 1.19.0 - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('trace', +_array_method_doc('trace', "offset=0, axis1=0, axis2=1, dtype=None, out=None", """ a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) @@ -4480,11 +4697,9 @@ See Also -------- numpy.trace : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('transpose', +_array_method_doc('transpose', "*axes", """ a.transpose(*axes) @@ -4517,6 +4732,7 @@ Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> a array([[1, 2], @@ -4536,26 +4752,9 @@ array([1, 2, 3, 4]) >>> a.transpose() array([1, 2, 3, 4]) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('var', - """ - a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True) - - Returns the variance of the array elements, along given axis. - - Refer to `numpy.var` for full documentation. - - See Also - -------- - numpy.var : equivalent function - - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('view', +_array_method_doc('view', "*args, **kwargs", """ a.view([dtype][, type]) @@ -4564,7 +4763,7 @@ .. note:: Passing None for ``dtype`` is different from omitting the parameter, since the former invokes ``dtype(None)`` which is an alias for - ``dtype('float64')``. + ``dtype(np.float64)``. Parameters ---------- @@ -4602,15 +4801,17 @@ Examples -------- - >>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) + >>> import numpy as np + >>> x = np.array([(-1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) Viewing array data using a different type and dtype: - >>> y = x.view(dtype=np.int16, type=np.matrix) - >>> y - matrix([[513]], dtype=int16) - >>> print(type(y)) - + >>> nonneg = np.dtype([("a", np.uint8), ("b", np.uint8)]) + >>> y = x.view(dtype=nonneg, type=np.recarray) + >>> x["a"] + array([-1], dtype=int8) + >>> y.a + array([255], dtype=uint8) Creating a view on a structured array so it can be used in calculations @@ -4670,8 +4871,7 @@ [[2312, 2826], [5396, 5910]]], dtype=int16) - - """)) + """) ############################################################################## @@ -4682,6 +4882,9 @@ add_newdoc('numpy._core.umath', 'frompyfunc', """ + frompyfunc(func, /, nin, nout, **kwargs) + -- + frompyfunc(func, /, nin, nout, *[, identity]) Takes an arbitrary Python function and returns a NumPy ufunc. @@ -4722,6 +4925,7 @@ -------- Use frompyfunc to add broadcasting to the Python function ``oct``: + >>> import numpy as np >>> oct_array = np.frompyfunc(oct, 1, 1) >>> oct_array(np.array((10, 30, 100))) array(['0o12', '0o36', '0o144'], dtype=object) @@ -4747,35 +4951,9 @@ raise a TypeError """) -add_newdoc('numpy._core.umath', '_add_newdoc_ufunc', - """ - add_ufunc_docstring(ufunc, new_docstring) - - Replace the docstring for a ufunc with new_docstring. - This method will only work if the current docstring for - the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.) - - Parameters - ---------- - ufunc : numpy.ufunc - A ufunc whose current doc is NULL. - new_docstring : string - The new docstring for the ufunc. - - Notes - ----- - This method allocates memory for new_docstring on - the heap. Technically this creates a memory leak, since this - memory will not be reclaimed until the end of the program - even if the ufunc itself is removed. However this will only - be a problem if the user is repeatedly creating ufuncs with - no documentation, adding documentation via add_newdoc_ufunc, - and then throwing away the ufunc. - """) - add_newdoc('numpy._core.multiarray', 'get_handler_name', """ - get_handler_name(a: ndarray) -> str,None + get_handler_name(a: ndarray) -> str | None Return the name of the memory handler used by `a`. If not provided, return the name of the memory handler that will be used to allocate data for the @@ -4926,12 +5104,17 @@ ---------- *x : array_like Input arrays. - out : ndarray, None, or tuple of ndarray and None, optional - Alternate array object(s) in which to put the result; if provided, it - must have a shape that the inputs broadcast to. A tuple of arrays - (possible only as a keyword argument) must have length equal to the - number of outputs; use None for uninitialized outputs to be - allocated by the ufunc. + out : ndarray, None, ..., or tuple of ndarray and None, optional + Location(s) into which the result(s) are stored. + If not provided or None, new array(s) are created by the ufunc. + If passed as a keyword argument, can be Ellipses (``out=...``) to + ensure an array is returned even if the result is 0-dimensional, + or a tuple with length equal to the number of outputs (where None + can be used for allocation by the ufunc). + + .. versionadded:: 2.3 + Support for ``out=...`` was added. + where : array_like, optional This condition is broadcast over the input. At locations where the condition is True, the `out` array will be set to the ufunc result. @@ -4968,12 +5151,13 @@ Examples -------- + >>> import numpy as np >>> np.add.identity 0 >>> np.multiply.identity 1 - >>> np.power.identity - 1 + >>> print(np.power.identity) + None >>> print(np.exp.identity) None """)) @@ -4992,6 +5176,7 @@ Examples -------- + >>> import numpy as np >>> np.add.nargs 3 >>> np.multiply.nargs @@ -5010,6 +5195,7 @@ Examples -------- + >>> import numpy as np >>> np.add.nin 2 >>> np.multiply.nin @@ -5032,6 +5218,7 @@ Examples -------- + >>> import numpy as np >>> np.add.nout 1 >>> np.multiply.nout @@ -5056,16 +5243,17 @@ Examples -------- + >>> import numpy as np >>> np.add.ntypes - 18 + 22 >>> np.multiply.ntypes - 18 + 23 >>> np.power.ntypes - 17 + 21 >>> np.exp.ntypes - 7 + 10 >>> np.remainder.ntypes - 14 + 16 """)) @@ -5082,27 +5270,18 @@ Examples -------- + >>> import numpy as np >>> np.add.types - ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', - 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', - 'GG->G', 'OO->O'] - - >>> np.multiply.types - ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', - 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', - 'GG->G', 'OO->O'] + ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', ... >>> np.power.types - ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', - 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G', - 'OO->O'] + ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', ... >>> np.exp.types - ['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O'] + ['e->e', 'f->f', 'd->d', 'f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O'] >>> np.remainder.types - ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', - 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O'] + ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', ... """)) @@ -5129,6 +5308,7 @@ Examples -------- + >>> import numpy as np >>> np.linalg._umath_linalg.det.signature '(m,m)->()' >>> np.matmul.signature @@ -5145,6 +5325,9 @@ add_newdoc('numpy._core', 'ufunc', ('reduce', """ + reduce($self, array, /, axis=0, dtype=None, out=None, **kwargs) + -- + reduce(array, axis=0, dtype=None, out=None, keepdims=False, initial=, where=True) Reduces `array`'s dimension by one, by applying ufunc along one axis. @@ -5173,8 +5356,6 @@ dimension of the input array. `axis` may be negative, in which case it counts from the last to the first axis. - .. versionadded:: 1.7.0 - If this is None, a reduction is performed over all the axes. If this is a tuple of ints, a reduction is performed on multiple axes, instead of a single axis or all the axes as before. @@ -5188,37 +5369,33 @@ ``out`` if given, and the data type of ``array`` otherwise (though upcast to conserve precision for some cases, such as ``numpy.add.reduce`` for integer or boolean input). - out : ndarray, None, or tuple of ndarray and None, optional - A location into which the result is stored. If not provided or None, - a freshly-allocated array is returned. For consistency with - ``ufunc.__call__``, if given as a keyword, this may be wrapped in a - 1-element tuple. + out : ndarray, None, ..., or tuple of ndarray and None, optional + Location into which the result is stored. + If not provided or None, a freshly-allocated array is returned. + If passed as a keyword argument, can be Ellipses (``out=...``) to + ensure an array is returned even if the result is 0-dimensional + (which is useful especially for object dtype), or a 1-element tuple + (latter for consistency with ``ufunc.__call__``). + + .. versionadded:: 2.3 + Support for ``out=...`` was added. - .. versionchanged:: 1.13.0 - Tuples are allowed for keyword argument. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `array`. - - .. versionadded:: 1.7.0 initial : scalar, optional The value with which to start the reduction. If the ufunc has no identity or the dtype is object, this defaults to None - otherwise it defaults to ufunc.identity. If ``None`` is given, the first element of the reduction is used, and an error is thrown if the reduction is empty. - - .. versionadded:: 1.15.0 - where : array_like of bool, optional A boolean array which is broadcasted to match the dimensions of `array`, and selects elements to include in the reduction. Note that for ufuncs like ``minimum`` that do not have an identity defined, one has to pass in also ``initial``. - .. versionadded:: 1.17.0 - Returns ------- r : ndarray @@ -5226,6 +5403,7 @@ Examples -------- + >>> import numpy as np >>> np.multiply.reduce([2,3,5]) 30 @@ -5276,6 +5454,9 @@ add_newdoc('numpy._core', 'ufunc', ('accumulate', """ + accumulate($self, array, /, axis=0, dtype=None, out=None) + -- + accumulate(array, axis=0, dtype=None, out=None) Accumulate the result of applying the operator to all elements. @@ -5306,13 +5487,11 @@ to the data-type of the output array if such is provided, or the data-type of the input array if no output array is provided. out : ndarray, None, or tuple of ndarray and None, optional - A location into which the result is stored. If not provided or None, - a freshly-allocated array is returned. For consistency with - ``ufunc.__call__``, if given as a keyword, this may be wrapped in a - 1-element tuple. - - .. versionchanged:: 1.13.0 - Tuples are allowed for keyword argument. + Location into which the result is stored. + If not provided or None, a freshly-allocated array is returned. + For consistency with ``ufunc.__call__``, if passed as a keyword + argument, can be Ellipses (``out=...``, which has the same effect + as None as an array is always returned), or a 1-element tuple. Returns ------- @@ -5324,6 +5503,7 @@ -------- 1-D array examples: + >>> import numpy as np >>> np.add.accumulate([2, 3, 5]) array([ 2, 5, 10]) >>> np.multiply.accumulate([2, 3, 5]) @@ -5355,6 +5535,9 @@ add_newdoc('numpy._core', 'ufunc', ('reduceat', """ + reduceat($self, array, /, indices, axis=0, dtype=None, out=None) + -- + reduceat(array, indices, axis=0, dtype=None, out=None) Performs a (local) reduce with specified slices over a single axis. @@ -5389,13 +5572,11 @@ upcast to conserve precision for some cases, such as ``numpy.add.reduce`` for integer or boolean input). out : ndarray, None, or tuple of ndarray and None, optional - A location into which the result is stored. If not provided or None, - a freshly-allocated array is returned. For consistency with - ``ufunc.__call__``, if given as a keyword, this may be wrapped in a - 1-element tuple. - - .. versionchanged:: 1.13.0 - Tuples are allowed for keyword argument. + Location into which the result is stored. + If not provided or None, a freshly-allocated array is returned. + For consistency with ``ufunc.__call__``, if passed as a keyword + argument, can be Ellipses (``out=...``, which has the same effect + as None as an array is always returned), or a 1-element tuple. Returns ------- @@ -5421,6 +5602,7 @@ -------- To take the running sum of four successive values: + >>> import numpy as np >>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2] array([ 6, 10, 14, 18]) @@ -5464,6 +5646,9 @@ add_newdoc('numpy._core', 'ufunc', ('outer', r""" + outer($self, A, B, /, **kwargs) + -- + outer(A, B, /, **kwargs) Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`. @@ -5535,6 +5720,9 @@ add_newdoc('numpy._core', 'ufunc', ('at', """ + at($self, a, indices, b=None, /) + -- + at(a, indices, b=None, /) Performs unbuffered in place operation on operand 'a' for elements @@ -5544,8 +5732,6 @@ increment the first element once because of buffering, whereas ``add.at(a, [0,0], 1)`` will increment the first element twice. - .. versionadded:: 1.8.0 - Parameters ---------- a : array_like @@ -5562,6 +5748,7 @@ -------- Set items 0 and 1 to their negative values: + >>> import numpy as np >>> a = np.array([1, 2, 3, 4]) >>> np.negative.at(a, [0, 1]) >>> a @@ -5587,6 +5774,9 @@ add_newdoc('numpy._core', 'ufunc', ('resolve_dtypes', """ + resolve_dtypes($self, dtypes, *, signature=None, casting=None, reduction=False) + -- + resolve_dtypes(dtypes, *, signature=None, casting=None, reduction=False) Find the dtypes NumPy will use for the operation. Both input and @@ -5633,8 +5823,9 @@ -------- This API requires passing dtypes, define them for convenience: - >>> int32 = np.dtype("int32") - >>> float32 = np.dtype("float32") + >>> import numpy as np + >>> int32 = np.dtype(np.int32) + >>> float32 = np.dtype(np.float32) The typical ufunc call does not pass an output dtype. `numpy.add` has two inputs and one output, so leave the output as ``None`` (not provided): @@ -5651,13 +5842,16 @@ >>> np.add.resolve_dtypes((float32, float, None)) (dtype('float32'), dtype('float32'), dtype('float32')) - Where the Python ``float`` behaves samilar to a Python value ``0.0`` + Where the Python ``float`` behaves similar to a Python value ``0.0`` in a ufunc call. (See :ref:`NEP 50 ` for details.) """)) add_newdoc('numpy._core', 'ufunc', ('_resolve_dtypes_and_context', """ + _resolve_dtypes_and_context($self, dtypes, *, signature=None, casting=None, reduction=False) + -- + _resolve_dtypes_and_context(dtypes, *, signature=None, casting=None, reduction=False) See `numpy.ufunc.resolve_dtypes` for parameter information. This @@ -5681,6 +5875,9 @@ add_newdoc('numpy._core', 'ufunc', ('_get_strided_loop', """ + _get_strided_loop($self, call_info, /, *, fixed_strides=None) + -- + _get_strided_loop(call_info, /, *, fixed_strides=None) This function fills in the ``call_info`` capsule to include all @@ -5736,7 +5933,6 @@ """)) - ############################################################################## # # Documentation for dtype attributes and methods @@ -5751,6 +5947,9 @@ add_newdoc('numpy._core.multiarray', 'dtype', """ + dtype(dtype, align=False, copy=False, **kwargs) + -- + dtype(dtype, align=False, copy=False, [metadata]) Create a data type object. @@ -5782,6 +5981,7 @@ -------- Using array-scalar type: + >>> import numpy as np >>> np.dtype(np.int16) dtype('int16') @@ -5851,11 +6051,12 @@ Examples -------- - >>> x = np.dtype('i4') + >>> import numpy as np + >>> x = np.dtype(np.int32) >>> x.alignment 4 - >>> x = np.dtype(float) + >>> x = np.dtype(np.float64) >>> x.alignment 8 @@ -5879,11 +6080,12 @@ Examples -------- - >>> dt = np.dtype('i2') + >>> import numpy as np + >>> dt = np.dtype(np.int16) >>> dt.byteorder '=' >>> # endian is not relevant for 8 bit numbers - >>> np.dtype('i1').byteorder + >>> np.dtype(np.int8).byteorder '|' >>> # or ASCII strings >>> np.dtype('S2').byteorder @@ -5910,6 +6112,7 @@ Examples -------- + >>> import numpy as np >>> x = np.dtype(float) >>> x.char 'd' @@ -5930,6 +6133,7 @@ Examples -------- + >>> import numpy as np >>> x = np.dtype(float) >>> x.descr [('', '>> import numpy as np >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) >>> print(dt.fields) - {'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)} + {'name': (dtype('>> import numpy as np >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)]) >>> x.flags 16 @@ -6022,13 +6229,15 @@ Examples -------- - >>> dt = np.dtype('i2') + + >>> import numpy as np + >>> dt = np.dtype(np.int16) >>> dt.isbuiltin 1 - >>> dt = np.dtype('f8') + >>> dt = np.dtype(np.float64) >>> dt.isbuiltin 1 - >>> dt = np.dtype([('field1', 'f8')]) + >>> dt = np.dtype([('field1', np.float64)]) >>> dt.isbuiltin 0 @@ -6060,6 +6269,7 @@ Examples -------- + >>> import numpy as np >>> arr = np.array([[1, 2], [3, 4]]) >>> arr.dtype dtype('int64') @@ -6074,7 +6284,7 @@ add_newdoc('numpy._core.multiarray', 'dtype', ('kind', """ - A character code (one of 'biufcmMOSUV') identifying the general kind of data. + A character code (one of 'biufcmMOSTUV') identifying the general kind of data. = ====================== b boolean @@ -6086,6 +6296,7 @@ M datetime O object S (byte-)string + T string (StringDType) U Unicode V void = ====================== @@ -6093,13 +6304,14 @@ Examples -------- - >>> dt = np.dtype('i4') + >>> import numpy as np + >>> dt = np.dtype(np.int32) >>> dt.kind 'i' - >>> dt = np.dtype('f8') + >>> dt = np.dtype(np.float64) >>> dt.kind 'f' - >>> dt = np.dtype([('field1', 'f8')]) + >>> dt = np.dtype([('field1', np.float64)]) >>> dt.kind 'V' @@ -6123,6 +6335,7 @@ Examples -------- + >>> import numpy as np >>> dt = np.dtype(float, metadata={"key": "value"}) >>> dt.metadata["key"] 'value' @@ -6135,13 +6348,12 @@ >>> (arr + arr).dtype.metadata mappingproxy({'key': 'value'}) - But if the arrays have different dtype metadata, the metadata may be - dropped: + If the arrays have different dtype metadata, the first one wins: >>> dt2 = np.dtype(float, metadata={"key2": "value2"}) >>> arr2 = np.array([3, 2, 1], dtype=dt2) - >>> (arr + arr2).dtype.metadata is None - True # The metadata field is cleared so None is returned + >>> print((arr + arr2).dtype.metadata) + {'key': 'value'} """)) add_newdoc('numpy._core.multiarray', 'dtype', ('name', @@ -6153,6 +6365,7 @@ Examples -------- + >>> import numpy as np >>> x = np.dtype(float) >>> x.name 'float64' @@ -6186,6 +6399,7 @@ Examples -------- + >>> import numpy as np >>> dt = np.dtype(str) >>> dt.num 19 @@ -6204,6 +6418,7 @@ Examples -------- + >>> import numpy as np >>> dt = np.dtype(('i4', 4)) >>> dt.shape (4,) @@ -6219,10 +6434,9 @@ Number of dimensions of the sub-array if this data type describes a sub-array, and ``0`` otherwise. - .. versionadded:: 1.13.0 - Examples -------- + >>> import numpy as np >>> x = np.dtype(float) >>> x.ndim 0 @@ -6258,11 +6472,12 @@ Examples -------- - >>> x = numpy.dtype('8f') + >>> import numpy as np + >>> x = np.dtype('8f') >>> x.subdtype (dtype('float32'), (8,)) - >>> x = numpy.dtype('i2') + >>> x = np.dtype(np.int16) >>> x.subdtype >>> @@ -6279,11 +6494,12 @@ Examples -------- - >>> x = numpy.dtype('8f') + >>> import numpy as np + >>> x = np.dtype('8f') >>> x.base dtype('float32') - >>> x = numpy.dtype('i2') + >>> x = np.dtype(np.int16) >>> x.base dtype('int16') @@ -6300,6 +6516,9 @@ add_newdoc('numpy._core.multiarray', 'dtype', ('newbyteorder', """ + newbyteorder($self, new_order='S', /) + -- + newbyteorder(new_order='S', /) Return a new dtype with a different byte order. @@ -6334,6 +6553,7 @@ >>> sys_is_le = sys.byteorder == 'little' >>> native_code = '<' if sys_is_le else '>' >>> swapped_code = '>' if sys_is_le else '<' + >>> import numpy as np >>> native_dt = np.dtype(native_code+'i2') >>> swapped_dt = np.dtype(swapped_code+'i2') >>> native_dt.newbyteorder('S') == swapped_dt @@ -6417,7 +6637,7 @@ add_newdoc('numpy._core.multiarray', 'dtype', ('__gt__', """ - __ge__(value, /) + __gt__(value, /) Return ``self > value``. @@ -6455,6 +6675,9 @@ add_newdoc('numpy._core.multiarray', 'busdaycalendar', """ + busdaycalendar(weekmask='1111100', holidays=None) + -- + busdaycalendar(weekmask='1111100', holidays=None) A business day calendar object that efficiently stores information @@ -6467,8 +6690,6 @@ Once a busdaycalendar object is created, the weekmask and holidays cannot be modified. - .. versionadded:: 1.7.0 - Parameters ---------- weekmask : str or array_like of bool, optional @@ -6509,6 +6730,7 @@ Examples -------- + >>> import numpy as np >>> # Some important days in July ... bdd = np.busdaycalendar( ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) @@ -6536,8 +6758,6 @@ Used internally by all axis-checking logic. - .. versionadded:: 1.13.0 - Parameters ---------- axis : int @@ -6560,6 +6780,7 @@ Examples -------- + >>> import numpy as np >>> from numpy.lib.array_utils import normalize_axis_index >>> normalize_axis_index(0, ndim=3) 0 @@ -6580,6 +6801,9 @@ add_newdoc('numpy._core.multiarray', 'datetime_data', """ + datetime_data(dtype, /) + -- + datetime_data(dtype, /) Get information about the step size of a date or time type. @@ -6602,6 +6826,7 @@ Examples -------- + >>> import numpy as np >>> dt_25s = np.dtype('timedelta64[25s]') >>> np.datetime_data(dt_25s) ('s', 25) @@ -6612,7 +6837,7 @@ as a timedelta >>> np.datetime64('2010', np.datetime_data(dt_25s)) - numpy.datetime64('2010-01-01T00:00:00','25s') + np.datetime64('2010-01-01T00:00:00','25s') """) @@ -6636,21 +6861,11 @@ # Attributes -def refer_to_array_attribute(attr, method=True): - docstring = """ - Scalar {} identical to the corresponding array attribute. - - Please see `ndarray.{}`. - """ - - return attr, docstring.format("method" if method else "attribute", attr) - - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('T', method=False)) +add_newdoc('numpy._core.numerictypes', 'generic', ('T', + """Scalar attribute identical to `ndarray.T`.""")) -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('base', method=False)) +add_newdoc('numpy._core.numerictypes', 'generic', ('base', + """Scalar attribute identical to `ndarray.base`.""")) add_newdoc('numpy._core.numerictypes', 'generic', ('data', """Pointer to start of data.""")) @@ -6687,153 +6902,12 @@ def refer_to_array_attribute(attr, method=True): # Methods -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('all')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('any')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('argmax')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('argmin')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('argsort')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('astype')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('byteswap')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('choose')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('clip')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('compress')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('conjugate')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('copy')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('cumprod')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('cumsum')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('diagonal')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('dump')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('dumps')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('fill')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('flatten')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('getfield')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('item')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('max')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('mean')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('min')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('nonzero')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('prod')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('put')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('ravel')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('repeat')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('reshape')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('resize')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('round')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('searchsorted')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('setfield')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('setflags')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('sort')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('squeeze')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('std')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('sum')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('swapaxes')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('take')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('tofile')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('tolist')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('tostring')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('trace')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('transpose')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('var')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('view')) - add_newdoc('numpy._core.numerictypes', 'number', ('__class_getitem__', """ - __class_getitem__(item, /) + __class_getitem__($cls, item, /) + -- + + number.__class_getitem__(item, /) Return a parametrized wrapper around the `~numpy.number` type. @@ -6924,8 +6998,67 @@ def refer_to_array_attribute(attr, method=True): """) +############################################################################## +# +# Documentation for `dtypes.*` classes +# +############################################################################## + +for _dtype_name, _signature, _sctype_name in ( + ("BoolDType", "()", "bool"), + ("Int8DType", "()", "int8"), + ("UInt8DType", "()", "uint8"), + ("Int16DType", "()", "int16"), + ("UInt16DType", "()", "uint16"), + ("Int32DType", "()", "int32"), + ("IntDType", "()", "intc"), + ("UInt32DType", "()", "uint32"), + ("UIntDType", "()", "uintc"), + ("Int64DType", "()", "int64"), + ("UInt64DType", "()", "uint64"), + ("LongLongDType", "()", "longlong"), + ("ULongLongDType", "()", "ulonglong"), + ("Float16DType", "()", "float16"), + ("Float32DType", "()", "float32"), + ("Float64DType", "()", "float64"), + ("LongDoubleDType", "()", "longdouble"), + ("Complex64DType", "()", "complex64"), + ("Complex128DType", "()", "complex128"), + ("CLongDoubleDType", "()", "clongdouble"), + ("ObjectDType", "()", "object"), + ("BytesDType", "(size, /)", "bytes_"), + ("StrDType", "(size, /)", "str_"), + ("VoidDType", "(length, /)", "void"), + ("DateTime64DType", "(unit, /)", "datetime64"), + ("TimeDelta64DType", "(unit, /)", "timedelta64"), +): + _extra_docs = "" + if _dtype_name in {"VoidDType", "DateTime64DType", "TimeDelta64DType"}: + _extra_docs = f""" + .. warning:: + ``np.dtypes.{_dtype_name}`` cannot be instantiated directly. + Use ``np.dtype("{_sctype_name}[{{unit}}]")`` instead. + """ + + add_newdoc('numpy.dtypes', _dtype_name, + f""" + {_dtype_name}{_signature} + -- + + DType class corresponding to the `numpy.{_sctype_name}` scalar type. + {_extra_docs} + See `numpy.dtype` for the typical way to create dtype instances + and :ref:`arrays.dtypes` for additional information. + """) + + del _dtype_name, _signature, _sctype_name, _extra_docs # avoid namespace pollution + + add_newdoc('numpy._core.multiarray', 'StringDType', """ + StringDType(*, coerce=True, **kwargs) + -- + StringDType(*, na_object=np._NoValue, coerce=True) Create a StringDType instance. @@ -6948,26 +7081,29 @@ def refer_to_array_attribute(attr, method=True): Examples -------- + >>> import numpy as np + >>> from numpy.dtypes import StringDType >>> np.array(["hello", "world"], dtype=StringDType()) array(["hello", "world"], dtype=StringDType()) >>> arr = np.array(["hello", None, "world"], - dtype=StringDType(na_object=None)) + ... dtype=StringDType(na_object=None)) >>> arr - array(["hello", None, "world", dtype=StringDType(na_object=None)) + array(["hello", None, "world"], dtype=StringDType(na_object=None)) >>> arr[1] is None True >>> arr = np.array(["hello", np.nan, "world"], - dtype=StringDType(na_object=np.nan)) + ... dtype=StringDType(na_object=np.nan)) >>> np.isnan(arr) array([False, True, False]) >>> np.array([1.2, object(), "hello world"], - dtype=StringDType(coerce=True)) - ValueError: StringDType only allows string data when string coercion - is disabled. + ... dtype=StringDType(coerce=False)) + Traceback (most recent call last): + ... + ValueError: StringDType only allows string data when string coercion is disabled. >>> np.array(["hello", "world"], dtype=StringDType(coerce=True)) array(["hello", "world"], dtype=StringDType(coerce=True)) diff --git a/numpy/_core/_add_newdocs.pyi b/numpy/_core/_add_newdocs.pyi new file mode 100644 index 000000000000..2d004814fdcf --- /dev/null +++ b/numpy/_core/_add_newdocs.pyi @@ -0,0 +1,2 @@ +from .function_base import add_newdoc as add_newdoc +from .overrides import get_array_function_like_doc as get_array_function_like_doc diff --git a/numpy/_core/_add_newdocs_scalars.py b/numpy/_core/_add_newdocs_scalars.py index 2ad1d22ee8f1..3f9eca5e47f3 100644 --- a/numpy/_core/_add_newdocs_scalars.py +++ b/numpy/_core/_add_newdocs_scalars.py @@ -3,10 +3,10 @@ our sphinx ``conf.py`` during doc builds, where we want to avoid showing platform-dependent information. """ -import sys import os -from numpy._core import dtype -from numpy._core import numerictypes as _numerictypes +import sys + +from numpy._core import dtype, numerictypes as _numerictypes from numpy._core.function_base import add_newdoc ############################################################################## @@ -48,7 +48,7 @@ def type_aliases_gen(): ('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'), ('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'), ('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'), - ]) +]) def _get_platform_and_machine(): @@ -67,258 +67,240 @@ def _get_platform_and_machine(): _system, _machine = _get_platform_and_machine() _doc_alias_string = f":Alias on this platform ({_system} {_machine}):" +# docstring prefix that cpython uses to populate `__text_signature__` +_ARGUMENT_CLINIC_TEMPLATE = """{name}{signature} +-- -def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): - # note: `:field: value` is rST syntax which renders as field lists. - o = getattr(_numerictypes, obj) - - character_code = dtype(o).char - canonical_name_doc = "" if obj == o.__name__ else \ - f":Canonical name: `numpy.{obj}`\n " - if fixed_aliases: - alias_doc = ''.join(f":Alias: `numpy.{alias}`\n " - for alias in fixed_aliases) - else: - alias_doc = '' - alias_doc += ''.join(f"{_doc_alias_string} `numpy.{alias}`: {doc}.\n " - for (alias_type, alias, doc) in possible_aliases if alias_type is o) - - docstring = f""" - {doc.strip()} - - :Character code: ``'{character_code}'`` - {canonical_name_doc}{alias_doc} - """ - - add_newdoc('numpy._core.numerictypes', obj, docstring) - - -_bool_docstring = ( - """ - Boolean type (True or False), stored as a byte. - - .. warning:: - - The :class:`bool` type is not a subclass of the :class:`int_` type - (the :class:`bool` is not even a number type). This is different - than Python's default implementation of :class:`bool` as a - sub-class of :class:`int`. - """ -) - -add_newdoc_for_scalar_type('bool', [], _bool_docstring) - -add_newdoc_for_scalar_type('bool_', [], _bool_docstring) +{docstring}""" -add_newdoc_for_scalar_type('byte', [], - """ - Signed integer type, compatible with C ``char``. - """) - -add_newdoc_for_scalar_type('short', [], - """ - Signed integer type, compatible with C ``short``. - """) - -add_newdoc_for_scalar_type('intc', [], - """ - Signed integer type, compatible with C ``int``. - """) - -# TODO: These docs probably need an if to highlight the default rather than -# the C-types (and be correct). -add_newdoc_for_scalar_type('int_', [], - """ - Default signed integer type, 64bit on 64bit systems and 32bit on 32bit - systems. - """) - -add_newdoc_for_scalar_type('longlong', [], - """ - Signed integer type, compatible with C ``long long``. - """) - -add_newdoc_for_scalar_type('ubyte', [], - """ - Unsigned integer type, compatible with C ``unsigned char``. - """) +def add_newdoc_for_scalar_type(name: str, text_signature: str, doc: str) -> None: + # note: `:field: value` is rST syntax which renders as field lists. + cls = getattr(_numerictypes, name) + module = cls.__module__ -add_newdoc_for_scalar_type('ushort', [], - """ - Unsigned integer type, compatible with C ``unsigned short``. - """) + lines_extra = [ + "", # blank line after main doc + f":Character code: ``{dtype(cls).char!r}``", + ] -add_newdoc_for_scalar_type('uintc', [], - """ - Unsigned integer type, compatible with C ``unsigned int``. - """) + if name != cls.__name__: + lines_extra.append(f":Canonical name: `{module}.{name}`") -add_newdoc_for_scalar_type('uint', [], - """ - Unsigned signed integer type, 64bit on 64bit systems and 32bit on 32bit - systems. - """) + lines_extra.extend( + f"{_doc_alias_string} `{module}.{alias}`: {doc}." + for alias_type, alias, doc in possible_aliases + if alias_type is cls + ) -add_newdoc_for_scalar_type('ulonglong', [], - """ - Signed integer type, compatible with C ``unsigned long long``. - """) + docstring = _ARGUMENT_CLINIC_TEMPLATE.format( + name=cls.__name__, # must match the class name + signature=text_signature, + docstring="\n".join([doc.strip(), *lines_extra]), + ) + add_newdoc('numpy._core.numerictypes', name, docstring) -add_newdoc_for_scalar_type('half', [], - """ - Half-precision floating-point number type. - """) -add_newdoc_for_scalar_type('single', [], - """ - Single-precision floating-point number type, compatible with C ``float``. - """) +for bool_name in ('bool', 'bool_'): + add_newdoc_for_scalar_type(bool_name, '(value=False, /)', """ +Boolean type (True or False), stored as a byte. -add_newdoc_for_scalar_type('double', [], - """ - Double-precision floating-point number type, compatible with Python - :class:`float` and C ``double``. - """) - -add_newdoc_for_scalar_type('longdouble', [], - """ - Extended-precision floating-point number type, compatible with C - ``long double`` but not necessarily with IEEE 754 quadruple-precision. - """) +.. warning:: -add_newdoc_for_scalar_type('csingle', [], - """ - Complex number type composed of two single-precision floating-point - numbers. - """) + The :class:`bool` type is not a subclass of the :class:`int_` type + (the :class:`bool` is not even a number type). This is different + than Python's default implementation of :class:`bool` as a + sub-class of :class:`int`. +""") -add_newdoc_for_scalar_type('cdouble', [], - """ - Complex number type composed of two double-precision floating-point - numbers, compatible with Python :class:`complex`. - """) +add_newdoc_for_scalar_type('byte', '(value=0, /)', """ +Signed integer type, compatible with C ``char``. +""") -add_newdoc_for_scalar_type('clongdouble', [], - """ - Complex number type composed of two extended-precision floating-point - numbers. - """) +add_newdoc_for_scalar_type('short', '(value=0, /)', """ +Signed integer type, compatible with C ``short``. +""") -add_newdoc_for_scalar_type('object_', [], - """ - Any Python object. - """) - -add_newdoc_for_scalar_type('str_', [], - r""" - A unicode string. - - This type strips trailing null codepoints. - - >>> s = np.str_("abc\x00") - >>> s - 'abc' - - Unlike the builtin :class:`str`, this supports the - :ref:`python:bufferobjects`, exposing its contents as UCS4: - - >>> m = memoryview(np.str_("abc")) - >>> m.format - '3w' - >>> m.tobytes() - b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00' - """) - -add_newdoc_for_scalar_type('bytes_', [], - r""" - A byte string. - - When used in arrays, this type strips trailing null bytes. - """) - -add_newdoc_for_scalar_type('void', [], - r""" - np.void(length_or_data, /, dtype=None) - - Create a new structured or unstructured void scalar. - - Parameters - ---------- - length_or_data : int, array-like, bytes-like, object - One of multiple meanings (see notes). The length or - bytes data of an unstructured void. Or alternatively, - the data to be stored in the new scalar when `dtype` - is provided. - This can be an array-like, in which case an array may - be returned. - dtype : dtype, optional - If provided the dtype of the new scalar. This dtype must - be "void" dtype (i.e. a structured or unstructured void, - see also :ref:`defining-structured-types`). - - .. versionadded:: 1.24 - - Notes - ----- - For historical reasons and because void scalars can represent both - arbitrary byte data and structured dtypes, the void constructor - has three calling conventions: - - 1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five - ``\0`` bytes. The 5 can be a Python or NumPy integer. - 2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string. - The dtype itemsize will match the byte string length, here ``"V10"``. - 3. When a ``dtype=`` is passed the call is roughly the same as an - array creation. However, a void scalar rather than array is returned. - - Please see the examples which show all three different conventions. +add_newdoc_for_scalar_type('intc', '(value=0, /)', """ +Signed integer type, compatible with C ``int``. +""") - Examples - -------- - >>> np.void(5) - np.void(b'\x00\x00\x00\x00\x00') - >>> np.void(b'abcd') - np.void(b'\x61\x62\x63\x64') - >>> np.void((3.2, b'eggs'), dtype="d,S5") - np.void((3.2, b'eggs'), dtype=[('f0', '>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)]) - np.void((3, 3), dtype=[('x', 'i1'), ('y', 'i1')]) - - """) - -add_newdoc_for_scalar_type('datetime64', [], - """ - If created from a 64-bit integer, it represents an offset from - ``1970-01-01T00:00:00``. - If created from string, the string can be in ISO 8601 date - or datetime format. - - When parsing a string to create a datetime object, if the string contains - a trailing timezone (A 'Z' or a timezone offset), the timezone will be - dropped and a User Warning is given. - - Datetime64 objects should be considered to be UTC and therefore have an - offset of +0000. - - >>> np.datetime64(10, 'Y') - numpy.datetime64('1980') - >>> np.datetime64('1980', 'Y') - numpy.datetime64('1980') - >>> np.datetime64(10, 'D') - numpy.datetime64('1970-01-11') - - See :ref:`arrays.datetime` for more information. - """) - -add_newdoc_for_scalar_type('timedelta64', [], - """ - A timedelta stored as a 64-bit integer. +add_newdoc_for_scalar_type('long', '(value=0, /)', """ +Signed integer type, compatible with C ``long``. +""") - See :ref:`arrays.datetime` for more information. - """) +# TODO: These docs probably need an if to highlight the default rather than +# the C-types (and be correct). +add_newdoc_for_scalar_type('int_', '(value=0, /)', """ +Default signed integer type, 64bit on 64bit systems and 32bit on 32bit systems. +""") + +add_newdoc_for_scalar_type('longlong', '(value=0, /)', """ +Signed integer type, compatible with C ``long long``. +""") + +add_newdoc_for_scalar_type('ubyte', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned char``. +""") + +add_newdoc_for_scalar_type('ushort', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned short``. +""") + +add_newdoc_for_scalar_type('uintc', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned int``. +""") + +add_newdoc_for_scalar_type('uint', '(value=0, /)', """ +Unsigned signed integer type, 64bit on 64bit systems and 32bit on 32bit systems. +""") + +add_newdoc_for_scalar_type('ulong', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned long``. +""") + +add_newdoc_for_scalar_type('ulonglong', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned long long``. +""") + +add_newdoc_for_scalar_type('half', '(value=0, /)', """ +Half-precision floating-point number type. +""") + +add_newdoc_for_scalar_type('single', '(value=0, /)', """ +Single-precision floating-point number type, compatible with C ``float``. +""") + +add_newdoc_for_scalar_type('double', '(value=0, /)', """ +Double-precision floating-point number type, compatible with Python :class:`float` and C ``double``. +""") + +add_newdoc_for_scalar_type('longdouble', '(value=0, /)', """ +Extended-precision floating-point number type, compatible with C ``long double`` +but not necessarily with IEEE 754 quadruple-precision. +""") + +add_newdoc_for_scalar_type('csingle', '(real=0, imag=0, /)', """ +Complex number type composed of two single-precision floating-point numbers. +""") + +add_newdoc_for_scalar_type('cdouble', '(real=0, imag=0, /)', """ +Complex number type composed of two double-precision floating-point numbers, +compatible with Python :class:`complex`. +""") + +add_newdoc_for_scalar_type('clongdouble', '(real=0, imag=0, /)', """ +Complex number type composed of two extended-precision floating-point numbers. +""") + +add_newdoc_for_scalar_type('object_', '(value=None, /)', """ +Any Python object. +""") + +add_newdoc_for_scalar_type('str_', '(value="", /, *args, **kwargs)', r""" +A unicode string. + +This type strips trailing null codepoints. + +>>> s = np.str_("abc\x00") +>>> s +'abc' + +Unlike the builtin :class:`str`, this supports the +:ref:`python:bufferobjects`, exposing its contents as UCS4: + +>>> m = memoryview(np.str_("abc")) +>>> m.format +'3w' +>>> m.tobytes() +b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00' +""") + +add_newdoc_for_scalar_type('bytes_', '(value="", /, *args, **kwargs)', r""" +A byte string. + +When used in arrays, this type strips trailing null bytes. +""") + +add_newdoc_for_scalar_type('void', '(length_or_data, /, dtype=None)', r""" +np.void(length_or_data, /, dtype=None) + +Create a new structured or unstructured void scalar. + +Parameters +---------- +length_or_data : int, array-like, bytes-like, object + One of multiple meanings (see notes). The length or + bytes data of an unstructured void. Or alternatively, + the data to be stored in the new scalar when `dtype` + is provided. + This can be an array-like, in which case an array may + be returned. +dtype : dtype, optional + If provided the dtype of the new scalar. This dtype must + be "void" dtype (i.e. a structured or unstructured void, + see also :ref:`defining-structured-types`). + + .. versionadded:: 1.24 + +Notes +----- +For historical reasons and because void scalars can represent both +arbitrary byte data and structured dtypes, the void constructor +has three calling conventions: + +1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five + ``\0`` bytes. The 5 can be a Python or NumPy integer. +2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string. + The dtype itemsize will match the byte string length, here ``"V10"``. +3. When a ``dtype=`` is passed the call is roughly the same as an + array creation. However, a void scalar rather than array is returned. + +Please see the examples which show all three different conventions. + +Examples +-------- +>>> np.void(5) +np.void(b'\x00\x00\x00\x00\x00') +>>> np.void(b'abcd') +np.void(b'\x61\x62\x63\x64') +>>> np.void((3.2, b'eggs'), dtype="d,S5") +np.void((3.2, b'eggs'), dtype=[('f0', '>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)]) +np.void((3, 3), dtype=[('x', 'i1'), ('y', 'i1')]) +""") + +add_newdoc_for_scalar_type('datetime64', '(value=None, /, *args)', """ +If created from a 64-bit integer, it represents an offset from ``1970-01-01T00:00:00``. +If created from string, the string can be in ISO 8601 date or datetime format. + +When parsing a string to create a datetime object, if the string contains +a trailing timezone (A 'Z' or a timezone offset), the timezone will be +dropped and a User Warning is given. + +Datetime64 objects should be considered to be UTC and therefore have an +offset of +0000. + +>>> np.datetime64(10, 'Y') +np.datetime64('1980') +>>> np.datetime64('1980', 'Y') +np.datetime64('1980') +>>> np.datetime64(10, 'D') +np.datetime64('1970-01-11') + +See :ref:`arrays.datetime` for more information. +""") + +add_newdoc_for_scalar_type('timedelta64', '(value=0, /, *args)', """ +A timedelta stored as a 64-bit integer. + +See :ref:`arrays.datetime` for more information. +""") add_newdoc('numpy._core.numerictypes', "integer", ('is_integer', """ + is_integer($self, /) + -- + integer.is_integer() -> bool Return ``True`` if the number is finite with integral value. @@ -327,6 +309,7 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): Examples -------- + >>> import numpy as np >>> np.int64(-2).is_integer() True >>> np.uint32(5).is_integer() @@ -336,23 +319,29 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): # TODO: work out how to put this on the base class, np.floating for float_name in ('half', 'single', 'double', 'longdouble'): add_newdoc('numpy._core.numerictypes', float_name, ('as_integer_ratio', - """ - {ftype}.as_integer_ratio() -> (int, int) + f""" + as_integer_ratio($self, /) + -- + + {float_name}.as_integer_ratio() -> (int, int) Return a pair of integers, whose ratio is exactly equal to the original floating point number, and with a positive denominator. Raise `OverflowError` on infinities and a `ValueError` on NaNs. - >>> np.{ftype}(10.0).as_integer_ratio() + >>> np.{float_name}(10.0).as_integer_ratio() (10, 1) - >>> np.{ftype}(0.0).as_integer_ratio() + >>> np.{float_name}(0.0).as_integer_ratio() (0, 1) - >>> np.{ftype}(-.25).as_integer_ratio() + >>> np.{float_name}(-.25).as_integer_ratio() (-1, 4) - """.format(ftype=float_name))) + """)) add_newdoc('numpy._core.numerictypes', float_name, ('is_integer', f""" + is_integer($self, /) + -- + {float_name}.is_integer() -> bool Return ``True`` if the floating point number is finite with integral @@ -369,10 +358,14 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): """)) for int_name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', - 'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64'): + 'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64', + 'longlong', 'ulonglong'): # Add negative examples for signed cases by checking typecode add_newdoc('numpy._core.numerictypes', int_name, ('bit_count', f""" + bit_count($self, /) + -- + {int_name}.bit_count() -> int Computes the number of 1-bits in the absolute value of the input. diff --git a/numpy/_core/_add_newdocs_scalars.pyi b/numpy/_core/_add_newdocs_scalars.pyi new file mode 100644 index 000000000000..241f4a00bd45 --- /dev/null +++ b/numpy/_core/_add_newdocs_scalars.pyi @@ -0,0 +1,16 @@ +from typing import Final + +import numpy as np + +possible_aliases: Final[list[tuple[type[np.number], str, str]]] = ... +_system: Final[str] = ... +_machine: Final[str] = ... +_doc_alias_string: Final[str] = ... +_bool_docstring: Final[str] = ... +bool_name: str = ... +int_name: str = ... +float_name: str = ... + +def numeric_type_aliases(aliases: list[tuple[str, str]]) -> list[tuple[type[np.number], str, str]]: ... +def add_newdoc_for_scalar_type(name: str, text_signature: str, doc: str) -> None: ... +def _get_platform_and_machine() -> tuple[str, str]: ... diff --git a/numpy/_core/_asarray.py b/numpy/_core/_asarray.py index 75eabb21f996..edaff5222f69 100644 --- a/numpy/_core/_asarray.py +++ b/numpy/_core/_asarray.py @@ -3,13 +3,8 @@ `require` fits this category despite its name not matching this pattern. """ -from .overrides import ( - array_function_dispatch, - set_array_function_like_doc, - set_module, -) from .multiarray import array, asanyarray - +from .overrides import array_function_dispatch, finalize_array_function_like, set_module __all__ = ["require"] @@ -24,7 +19,7 @@ } -@set_array_function_like_doc +@finalize_array_function_like @set_module('numpy') def require(a, dtype=None, requirements=None, *, like=None): """ @@ -75,6 +70,7 @@ def require(a, dtype=None, requirements=None, *, like=None): Examples -------- + >>> import numpy as np >>> x = np.arange(6).reshape(2,3) >>> x.flags C_CONTIGUOUS : True diff --git a/numpy/_core/_asarray.pyi b/numpy/_core/_asarray.pyi index 5cd49659480e..07adc83fbcff 100644 --- a/numpy/_core/_asarray.pyi +++ b/numpy/_core/_asarray.pyi @@ -1,41 +1,41 @@ from collections.abc import Iterable -from typing import Any, TypeVar, overload, Literal +from typing import Any, Literal, overload -from numpy._typing import NDArray, DTypeLike, _SupportsArrayFunc +from numpy._typing import DTypeLike, NDArray, _SupportsArrayFunc -_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) +__all__ = ["require"] -_Requirements = Literal[ +type _Requirements = Literal[ "C", "C_CONTIGUOUS", "CONTIGUOUS", "F", "F_CONTIGUOUS", "FORTRAN", "A", "ALIGNED", "W", "WRITEABLE", "O", "OWNDATA" ] -_E = Literal["E", "ENSUREARRAY"] -_RequirementsWithE = _Requirements | _E +type _E = Literal["E", "ENSUREARRAY"] +type _RequirementsWithE = _Requirements | _E @overload -def require( - a: _ArrayType, - dtype: None = ..., - requirements: None | _Requirements | Iterable[_Requirements] = ..., +def require[ArrayT: NDArray[Any]]( + a: ArrayT, + dtype: None = None, + requirements: _Requirements | Iterable[_Requirements] | None = None, *, - like: _SupportsArrayFunc = ... -) -> _ArrayType: ... + like: _SupportsArrayFunc | None = None +) -> ArrayT: ... @overload def require( a: object, - dtype: DTypeLike = ..., - requirements: _E | Iterable[_RequirementsWithE] = ..., + dtype: DTypeLike | None = None, + requirements: _E | Iterable[_RequirementsWithE] | None = None, *, - like: _SupportsArrayFunc = ... + like: _SupportsArrayFunc | None = None ) -> NDArray[Any]: ... @overload def require( a: object, - dtype: DTypeLike = ..., - requirements: None | _Requirements | Iterable[_Requirements] = ..., + dtype: DTypeLike | None = None, + requirements: _Requirements | Iterable[_Requirements] | None = None, *, - like: _SupportsArrayFunc = ... + like: _SupportsArrayFunc | None = None ) -> NDArray[Any]: ... diff --git a/numpy/_core/_dtype.py b/numpy/_core/_dtype.py index 328a0e3959f3..58cfbbf8724a 100644 --- a/numpy/_core/_dtype.py +++ b/numpy/_core/_dtype.py @@ -5,7 +5,6 @@ """ import numpy as np - _kind_to_stem = { 'u': 'uint', 'i': 'int', @@ -26,8 +25,7 @@ def _kind_name(dtype): return _kind_to_stem[dtype.kind] except KeyError as e: raise RuntimeError( - "internal dtype error, unknown kind {!r}" - .format(dtype.kind) + f"internal dtype error, unknown kind {dtype.kind!r}" ) from None @@ -46,7 +44,7 @@ def __repr__(dtype): arg_str = _construction_repr(dtype, include_align=False) if dtype.isalignedstruct: arg_str = arg_str + ", align=True" - return "dtype({})".format(arg_str) + return f"dtype({arg_str})" def _unpack_field(dtype, offset, title=None): @@ -122,11 +120,11 @@ def _scalar_str(dtype, short): elif dtype.type == np.str_: if _isunsized(dtype): - return "'%sU'" % byteorder + return f"'{byteorder}U'" else: return "'%sU%d'" % (byteorder, dtype.itemsize / 4) - elif dtype.type == str: + elif dtype.type is str: return "'T'" elif not type(dtype)._legacy: @@ -141,10 +139,13 @@ def _scalar_str(dtype, short): return "'V%d'" % dtype.itemsize elif dtype.type == np.datetime64: - return "'%sM8%s'" % (byteorder, _datetime_metadata_str(dtype)) + return f"'{byteorder}M8{_datetime_metadata_str(dtype)}'" elif dtype.type == np.timedelta64: - return "'%sm8%s'" % (byteorder, _datetime_metadata_str(dtype)) + return f"'{byteorder}m8{_datetime_metadata_str(dtype)}'" + + elif dtype.isbuiltin == 2: + return dtype.type.__name__ elif np.issubdtype(dtype, np.number): # Short repr with endianness, like ' _Name: ... +def __str__(dtype: np.dtype) -> str: ... +def __repr__(dtype: np.dtype) -> str: ... + +# +def _isunsized(dtype: np.dtype) -> bool: ... +def _is_packed(dtype: np.dtype) -> bool: ... +def _name_includes_bit_suffix(dtype: np.dtype) -> bool: ... + +# +def _construction_repr(dtype: np.dtype, include_align: bool = False, short: bool = False) -> str: ... +def _scalar_str(dtype: np.dtype, short: bool) -> str: ... +def _byte_order_str(dtype: np.dtype) -> str: ... +def _datetime_metadata_str(dtype: np.dtype) -> str: ... +def _struct_dict_str(dtype: np.dtype, includealignedflag: bool) -> str: ... +def _struct_list_str(dtype: np.dtype) -> str: ... +def _struct_str(dtype: np.dtype, include_align: bool) -> str: ... +def _subarray_str(dtype: np.dtype) -> str: ... +def _name_get(dtype: np.dtype) -> str: ... + +# +@overload +def _unpack_field[T](dtype: np.dtype, offset: int, title: T) -> tuple[np.dtype, int, T]: ... +@overload +def _unpack_field(dtype: np.dtype, offset: int, title: None = None) -> tuple[np.dtype, int, None]: ... +def _aligned_offset(offset: int, alignment: int) -> int: ... diff --git a/numpy/_core/_dtype_ctypes.py b/numpy/_core/_dtype_ctypes.py index fef1e0db35f2..4de6df6dbd37 100644 --- a/numpy/_core/_dtype_ctypes.py +++ b/numpy/_core/_dtype_ctypes.py @@ -57,11 +57,11 @@ def _from_ctypes_structure(t): offsets.append(current_offset) current_offset += ctypes.sizeof(ftyp) - return np.dtype(dict( - formats=formats, - offsets=offsets, - names=names, - itemsize=ctypes.sizeof(t))) + return np.dtype({ + "formats": formats, + "offsets": offsets, + "names": names, + "itemsize": ctypes.sizeof(t)}) else: fields = [] for fname, ftyp in t._fields_: @@ -93,11 +93,11 @@ def _from_ctypes_union(t): formats.append(dtype_from_ctypes_type(ftyp)) offsets.append(0) # Union fields are offset to 0 - return np.dtype(dict( - formats=formats, - offsets=offsets, - names=names, - itemsize=ctypes.sizeof(t))) + return np.dtype({ + "formats": formats, + "offsets": offsets, + "names": names, + "itemsize": ctypes.sizeof(t)}) def dtype_from_ctypes_type(t): @@ -117,4 +117,4 @@ def dtype_from_ctypes_type(t): return _from_ctypes_scalar(t) else: raise NotImplementedError( - "Unknown ctypes type {}".format(t.__name__)) + f"Unknown ctypes type {t.__name__}") diff --git a/numpy/_core/_dtype_ctypes.pyi b/numpy/_core/_dtype_ctypes.pyi new file mode 100644 index 000000000000..69438a2c1b4c --- /dev/null +++ b/numpy/_core/_dtype_ctypes.pyi @@ -0,0 +1,83 @@ +import _ctypes +import ctypes as ct +from typing import Any, overload + +import numpy as np + +# +@overload +def dtype_from_ctypes_type(t: type[_ctypes.Array[Any] | _ctypes.Structure]) -> np.dtype[np.void]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_bool]) -> np.dtype[np.bool]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_int8 | ct.c_byte]) -> np.dtype[np.int8]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_uint8 | ct.c_ubyte]) -> np.dtype[np.uint8]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_int16 | ct.c_short]) -> np.dtype[np.int16]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_uint16 | ct.c_ushort]) -> np.dtype[np.uint16]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_int32 | ct.c_int]) -> np.dtype[np.int32]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_uint32 | ct.c_uint]) -> np.dtype[np.uint32]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_ssize_t | ct.c_long]) -> np.dtype[np.int32 | np.int64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_size_t | ct.c_ulong]) -> np.dtype[np.uint32 | np.uint64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_int64 | ct.c_longlong]) -> np.dtype[np.int64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_uint64 | ct.c_ulonglong]) -> np.dtype[np.uint64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_float]) -> np.dtype[np.float32]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_double]) -> np.dtype[np.float64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_longdouble]) -> np.dtype[np.longdouble]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_char]) -> np.dtype[np.bytes_]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.py_object[Any]]) -> np.dtype[np.object_]: ... + +# NOTE: the complex ctypes on python>=3.14 are not yet supported at runtim, see +# https://github.com/numpy/numpy/issues/28360 + +# +def _from_ctypes_array(t: type[_ctypes.Array[Any]]) -> np.dtype[np.void]: ... +def _from_ctypes_structure(t: type[_ctypes.Structure]) -> np.dtype[np.void]: ... +def _from_ctypes_union(t: type[_ctypes.Union]) -> np.dtype[np.void]: ... + +# keep in sync with `dtype_from_ctypes_type` (minus the first overload) +@overload +def _from_ctypes_scalar(t: type[ct.c_bool]) -> np.dtype[np.bool]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_int8 | ct.c_byte]) -> np.dtype[np.int8]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_uint8 | ct.c_ubyte]) -> np.dtype[np.uint8]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_int16 | ct.c_short]) -> np.dtype[np.int16]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_uint16 | ct.c_ushort]) -> np.dtype[np.uint16]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_int32 | ct.c_int]) -> np.dtype[np.int32]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_uint32 | ct.c_uint]) -> np.dtype[np.uint32]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_ssize_t | ct.c_long]) -> np.dtype[np.int32 | np.int64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_size_t | ct.c_ulong]) -> np.dtype[np.uint32 | np.uint64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_int64 | ct.c_longlong]) -> np.dtype[np.int64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_uint64 | ct.c_ulonglong]) -> np.dtype[np.uint64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_float]) -> np.dtype[np.float32]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_double]) -> np.dtype[np.float64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_longdouble]) -> np.dtype[np.longdouble]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_char]) -> np.dtype[np.bytes_]: ... +@overload +def _from_ctypes_scalar(t: type[ct.py_object[Any]]) -> np.dtype[np.object_]: ... diff --git a/numpy/_core/_exceptions.py b/numpy/_core/_exceptions.py index 87d4213a6d42..73b07d25ef1f 100644 --- a/numpy/_core/_exceptions.py +++ b/numpy/_core/_exceptions.py @@ -5,7 +5,6 @@ By putting the formatting in `__str__`, we also avoid paying the cost for users who silence the exceptions. """ -from .._utils import set_module def _unpack_tuple(tup): if len(tup) == 1: @@ -44,12 +43,9 @@ def __init__(self, ufunc, dtypes): def __str__(self): return ( - "ufunc {!r} did not contain a loop with signature matching types " - "{!r} -> {!r}" - ).format( - self.ufunc.__name__, - _unpack_tuple(self.dtypes[:self.ufunc.nin]), - _unpack_tuple(self.dtypes[self.ufunc.nin:]) + f"ufunc {self.ufunc.__name__!r} did not contain a loop with signature " + f"matching types {_unpack_tuple(self.dtypes[:self.ufunc.nin])!r} " + f"-> {_unpack_tuple(self.dtypes[self.ufunc.nin:])!r}" ) @@ -86,12 +82,10 @@ def __init__(self, ufunc, casting, from_, to, i): def __str__(self): # only show the number if more than one input exists - i_str = "{} ".format(self.in_i) if self.ufunc.nin != 1 else "" + i_str = f"{self.in_i} " if self.ufunc.nin != 1 else "" return ( - "Cannot cast ufunc {!r} input {}from {!r} to {!r} with casting " - "rule {!r}" - ).format( - self.ufunc.__name__, i_str, self.from_, self.to, self.casting + f"Cannot cast ufunc {self.ufunc.__name__!r} input {i_str}from " + f"{self.from_!r} to {self.to!r} with casting rule {self.casting!r}" ) @@ -104,12 +98,10 @@ def __init__(self, ufunc, casting, from_, to, i): def __str__(self): # only show the number if more than one output exists - i_str = "{} ".format(self.out_i) if self.ufunc.nout != 1 else "" + i_str = f"{self.out_i} " if self.ufunc.nout != 1 else "" return ( - "Cannot cast ufunc {!r} output {}from {!r} to {!r} with casting " - "rule {!r}" - ).format( - self.ufunc.__name__, i_str, self.from_, self.to, self.casting + f"Cannot cast ufunc {self.ufunc.__name__!r} output {i_str}from " + f"{self.from_!r} to {self.to!r} with casting rule {self.casting!r}" ) @@ -156,17 +148,15 @@ def _size_to_string(num_bytes): # format with a sensible number of digits if unit_i == 0: # no decimal point on bytes - return '{:.0f} {}'.format(n_units, unit_name) + return f'{n_units:.0f} {unit_name}' elif round(n_units) < 1000: # 3 significant figures, if none are dropped to the left of the . - return '{:#.3g} {}'.format(n_units, unit_name) + return f'{n_units:#.3g} {unit_name}' else: # just give all the digits otherwise - return '{:#.0f} {}'.format(n_units, unit_name) + return f'{n_units:#.0f} {unit_name}' def __str__(self): size_str = self._size_to_string(self._total_size) - return ( - "Unable to allocate {} for an array with shape {} and data type {}" - .format(size_str, self.shape, self.dtype) - ) + return (f"Unable to allocate {size_str} for an array with shape " + f"{self.shape} and data type {self.dtype}") diff --git a/numpy/_core/_exceptions.pyi b/numpy/_core/_exceptions.pyi new file mode 100644 index 000000000000..dd559be44fee --- /dev/null +++ b/numpy/_core/_exceptions.pyi @@ -0,0 +1,48 @@ +from collections.abc import Iterable +from typing import Any, Final, overload + +import numpy as np +from numpy import _CastingKind + +### + +class UFuncTypeError(TypeError): + ufunc: Final[np.ufunc] + def __init__(self, /, ufunc: np.ufunc) -> None: ... + +class _UFuncNoLoopError(UFuncTypeError): + dtypes: tuple[np.dtype, ...] + def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ... + +class _UFuncBinaryResolutionError(_UFuncNoLoopError): + dtypes: tuple[np.dtype, np.dtype] # pyrefly: ignore[bad-override] + def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ... + +class _UFuncCastingError(UFuncTypeError): + casting: Final[_CastingKind] + from_: Final[np.dtype] + to: Final[np.dtype] + def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype) -> None: ... + +class _UFuncInputCastingError(_UFuncCastingError): + in_i: Final[int] + def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ... + +class _UFuncOutputCastingError(_UFuncCastingError): + out_i: Final[int] + def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ... + +class _ArrayMemoryError(MemoryError): + shape: tuple[int, ...] + dtype: np.dtype + def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype) -> None: ... + @property + def _total_size(self) -> int: ... + @staticmethod + def _size_to_string(num_bytes: int) -> str: ... + +@overload +def _unpack_tuple[T](tup: tuple[T]) -> T: ... +@overload +def _unpack_tuple[TupleT: tuple[()] | tuple[Any, Any, *tuple[Any, ...]]](tup: TupleT) -> TupleT: ... +def _display_as_base[ExceptionT: Exception](cls: type[ExceptionT]) -> type[ExceptionT]: ... diff --git a/numpy/_core/_internal.py b/numpy/_core/_internal.py index 058e93644dec..ce796d5ee6a7 100644 --- a/numpy/_core/_internal.py +++ b/numpy/_core/_internal.py @@ -5,20 +5,21 @@ """ import ast +import math import re import sys import warnings -from ..exceptions import DTypePromotionError -from .multiarray import dtype, array, ndarray, promote_types, StringDType from numpy import _NoValue +from numpy.exceptions import DTypePromotionError + +from .multiarray import StringDType, array, dtype, promote_types + try: import ctypes except ImportError: ctypes = None -IS_PYPY = sys.implementation.name == 'pypy' - if sys.byteorder == 'little': _nbo = '<' else: @@ -157,7 +158,7 @@ def _commastring(astr): (order1, repeats, order2, dtype) = mo.groups() except (TypeError, AttributeError): raise ValueError( - f'format number {len(result)+1} of "{astr}" is not recognized' + f'format number {len(result) + 1} of "{astr}" is not recognized' ) from None startindex = mo.end() # Separator or ending padding @@ -169,7 +170,7 @@ def _commastring(astr): if not mo: raise ValueError( 'format number %d of "%s" is not recognized' % - (len(result)+1, astr)) + (len(result) + 1, astr)) startindex = mo.end() islist = True @@ -182,8 +183,7 @@ def _commastring(astr): order2 = _convorder.get(order2, order2) if (order1 != order2): raise ValueError( - 'inconsistent byte-order specification %s and %s' % - (order1, order2)) + f'inconsistent byte-order specification {order1} and {order2}') order = order1 if order in ('|', '=', _nbo): @@ -301,7 +301,7 @@ def shape_as(self, obj): """ if self._zerod: return None - return (obj*self._arr.ndim)(*self._arr.shape) + return (obj * self._arr.ndim)(*self._arr.shape) def strides_as(self, obj): """ @@ -310,7 +310,7 @@ def strides_as(self, obj): """ if self._zerod: return None - return (obj*self._arr.ndim)(*self._arr.strides) + return (obj * self._arr.ndim)(*self._arr.strides) @property def data(self): @@ -363,46 +363,6 @@ def _as_parameter_(self): """ return self.data_as(ctypes.c_void_p) - # Numpy 1.21.0, 2021-05-18 - - def get_data(self): - """Deprecated getter for the `_ctypes.data` property. - - .. deprecated:: 1.21 - """ - warnings.warn('"get_data" is deprecated. Use "data" instead', - DeprecationWarning, stacklevel=2) - return self.data - - def get_shape(self): - """Deprecated getter for the `_ctypes.shape` property. - - .. deprecated:: 1.21 - """ - warnings.warn('"get_shape" is deprecated. Use "shape" instead', - DeprecationWarning, stacklevel=2) - return self.shape - - def get_strides(self): - """Deprecated getter for the `_ctypes.strides` property. - - .. deprecated:: 1.21 - """ - warnings.warn('"get_strides" is deprecated. Use "strides" instead', - DeprecationWarning, stacklevel=2) - return self.strides - - def get_as_parameter(self): - """Deprecated getter for the `_ctypes._as_parameter_` property. - - .. deprecated:: 1.21 - """ - warnings.warn( - '"get_as_parameter" is deprecated. Use "_as_parameter_" instead', - DeprecationWarning, stacklevel=2, - ) - return self._as_parameter_ - def _newnames(datatype, order): """ @@ -668,12 +628,12 @@ def _dtype_from_pep3118(spec): return dtype def __dtype_from_pep3118(stream, is_subdtype): - field_spec = dict( - names=[], - formats=[], - offsets=[], - itemsize=0 - ) + field_spec = { + 'names': [], + 'formats': [], + 'offsets': [], + 'itemsize': 0 + } offset = 0 common_alignment = 1 is_padding = False @@ -738,11 +698,10 @@ def __dtype_from_pep3118(stream, is_subdtype): elif stream.next in _pep3118_unsupported_map: desc = _pep3118_unsupported_map[stream.next] raise NotImplementedError( - "Unrepresentable PEP 3118 data type {!r} ({})" - .format(stream.next, desc)) + f"Unrepresentable PEP 3118 data type {stream.next!r} ({desc})") else: raise ValueError( - "Unknown PEP 3118 data type specifier %r" % stream.s + f"Unknown PEP 3118 data type specifier {stream.s!r}" ) # @@ -833,21 +792,21 @@ def _fix_names(field_spec): def _add_trailing_padding(value, padding): """Inject the specified number of padding bytes at the end of a dtype""" if value.fields is None: - field_spec = dict( - names=['f0'], - formats=[value], - offsets=[0], - itemsize=value.itemsize - ) + field_spec = { + 'names': ['f0'], + 'formats': [value], + 'offsets': [0], + 'itemsize': value.itemsize + } else: fields = value.fields names = value.names - field_spec = dict( - names=names, - formats=[fields[name][0] for name in names], - offsets=[fields[name][1] for name in names], - itemsize=value.itemsize - ) + field_spec = { + 'names': names, + 'formats': [fields[name][0] for name in names], + 'offsets': [fields[name][1] for name in names], + 'itemsize': value.itemsize + } field_spec['itemsize'] += padding return dtype(field_spec) @@ -860,6 +819,9 @@ def _prod(a): def _gcd(a, b): """Calculate the greatest common divisor of a and b""" + if not (math.isfinite(a) and math.isfinite(b)): + raise ValueError('Can only find greatest common divisor of ' + f'finite arguments, found "{a}" and "{b}"') while b: a, b = b, a % b return a @@ -869,21 +831,21 @@ def _lcm(a, b): def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs): """ Format the error message for when __array_ufunc__ gives up. """ - args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] + - ['{}={!r}'.format(k, v) + args_string = ', '.join([f'{arg!r}' for arg in inputs] + + [f'{k}={v!r}' for k, v in kwargs.items()]) args = inputs + kwargs.get('out', ()) types_string = ', '.join(repr(type(arg).__name__) for arg in args) return ('operand type(s) all returned NotImplemented from ' - '__array_ufunc__({!r}, {!r}, {}): {}' - .format(ufunc, method, args_string, types_string)) + f'__array_ufunc__({ufunc!r}, {method!r}, {args_string}): {types_string}' + ) def array_function_errmsg_formatter(public_api, types): """ Format the error message for when __array_ufunc__ gives up. """ - func_name = '{}.{}'.format(public_api.__module__, public_api.__name__) - return ("no implementation found for '{}' on types that implement " - '__array_function__: {}'.format(func_name, list(types))) + func_name = f'{public_api.__module__}.{public_api.__name__}' + return (f"no implementation found for '{func_name}' on types that implement " + f'__array_function__: {list(types)}') def _ufunc_doc_signature_formatter(ufunc): @@ -891,13 +853,15 @@ def _ufunc_doc_signature_formatter(ufunc): Builds a signature string which resembles PEP 457 This is used to construct the first line of the docstring + + Keep in sync with `_ufunc_inspect_signature_builder`. """ # input arguments are simple if ufunc.nin == 1: in_args = 'x' else: - in_args = ', '.join(f'x{i+1}' for i in range(ufunc.nin)) + in_args = ', '.join(f'x{i + 1}' for i in range(ufunc.nin)) # output arguments are both keyword or positional if ufunc.nout == 0: @@ -907,8 +871,8 @@ def _ufunc_doc_signature_formatter(ufunc): else: out_args = '[, {positional}], / [, out={default}]'.format( positional=', '.join( - 'out{}'.format(i+1) for i in range(ufunc.nout)), - default=repr((None,)*ufunc.nout) + f'out{i + 1}' for i in range(ufunc.nout)), + default=repr((None,) * ufunc.nout) ) # keyword only args depend on whether this is a gufunc @@ -926,13 +890,56 @@ def _ufunc_doc_signature_formatter(ufunc): kwargs += "[, signature, axes, axis]" # join all the parts together - return '{name}({in_args}{out_args}, *{kwargs})'.format( - name=ufunc.__name__, - in_args=in_args, - out_args=out_args, - kwargs=kwargs + return f'{ufunc.__name__}({in_args}{out_args}, *{kwargs})' + + +def _ufunc_inspect_signature_builder(ufunc): + """ + Builds a ``__signature__`` string. + + Should be kept in sync with `_ufunc_doc_signature_formatter`. + """ + + from inspect import Parameter, Signature + + params = [] + + # positional-only input parameters + if ufunc.nin == 1: + params.append(Parameter("x", Parameter.POSITIONAL_ONLY)) + else: + params.extend( + Parameter(f"x{i}", Parameter.POSITIONAL_ONLY) + for i in range(1, ufunc.nin + 1) + ) + + # for the sake of simplicity, we only consider a single output parameter + if ufunc.nout == 1: + out_default = None + else: + out_default = (None,) * ufunc.nout + params.append( + Parameter("out", Parameter.POSITIONAL_OR_KEYWORD, default=out_default), ) + if ufunc.signature is None: + params.append(Parameter("where", Parameter.KEYWORD_ONLY, default=True)) + else: + # NOTE: not all gufuncs support the `axis` parameters + params.append(Parameter("axes", Parameter.KEYWORD_ONLY, default=_NoValue)) + params.append(Parameter("axis", Parameter.KEYWORD_ONLY, default=_NoValue)) + params.append(Parameter("keepdims", Parameter.KEYWORD_ONLY, default=False)) + + params.extend(( + Parameter("casting", Parameter.KEYWORD_ONLY, default='same_kind'), + Parameter("order", Parameter.KEYWORD_ONLY, default='K'), + Parameter("dtype", Parameter.KEYWORD_ONLY, default=None), + Parameter("subok", Parameter.KEYWORD_ONLY, default=True), + Parameter("signature", Parameter.KEYWORD_ONLY, default=None), + )) + + return Signature(params) + def npy_ctypes_check(cls): # determine if a class comes from ctypes, in order to work around @@ -940,12 +947,8 @@ def npy_ctypes_check(cls): try: # ctypes class are new-style, so have an __mro__. This probably fails # for ctypes classes with multiple inheritance. - if IS_PYPY: - # (..., _ctypes.basics._CData, Bufferable, object) - ctype_base = cls.__mro__[-3] - else: - # # (..., _ctypes._CData, object) - ctype_base = cls.__mro__[-2] + # # (..., _ctypes._CData, object) + ctype_base = cls.__mro__[-2] # right now, they're part of the _ctypes module return '_ctypes' in ctype_base.__module__ except Exception: diff --git a/numpy/_core/_internal.pyi b/numpy/_core/_internal.pyi index 690554f66f94..777bcd5561b2 100644 --- a/numpy/_core/_internal.pyi +++ b/numpy/_core/_internal.pyi @@ -1,23 +1,35 @@ -from typing import Any, TypeVar, overload, Generic import ctypes as ct +import re +from collections.abc import Callable, Iterable +from typing import Any, Final, Generic, Self, overload +from typing_extensions import TypeVar -from numpy.typing import NDArray +import numpy as np +import numpy.typing as npt from numpy.ctypeslib import c_intp -_CastT = TypeVar("_CastT", bound=ct._CanCastTo) # Copied from `ctypes.cast` -_CT = TypeVar("_CT", bound=ct._CData) -_PT = TypeVar("_PT", bound=int) +_PT_co = TypeVar("_PT_co", bound=int | None, default=None, covariant=True) + +### + +format_re: Final[re.Pattern[str]] = ... +sep_re: Final[re.Pattern[str]] = ... +space_re: Final[re.Pattern[str]] = ... + +### # TODO: Let the likes of `shape_as` and `strides_as` return `None` # for 0D arrays once we've got shape-support -class _ctypes(Generic[_PT]): +class _ctypes(Generic[_PT_co]): @overload - def __new__(cls, array: NDArray[Any], ptr: None = ...) -> _ctypes[None]: ... + def __init__(self: _ctypes[None], /, array: npt.NDArray[Any], ptr: None = None) -> None: ... @overload - def __new__(cls, array: NDArray[Any], ptr: _PT) -> _ctypes[_PT]: ... + def __init__(self, /, array: npt.NDArray[Any], ptr: _PT_co) -> None: ... + + # @property - def data(self) -> _PT: ... + def data(self) -> _PT_co: ... @property def shape(self) -> ct.Array[c_intp]: ... @property @@ -25,6 +37,20 @@ class _ctypes(Generic[_PT]): @property def _as_parameter_(self) -> ct.c_void_p: ... - def data_as(self, obj: type[_CastT]) -> _CastT: ... - def shape_as(self, obj: type[_CT]) -> ct.Array[_CT]: ... - def strides_as(self, obj: type[_CT]) -> ct.Array[_CT]: ... + # + def data_as[CastT: ct._CanCastTo](self, /, obj: type[CastT]) -> CastT: ... + def shape_as[CT: ct._CData](self, /, obj: type[CT]) -> ct.Array[CT]: ... + def strides_as[CT: ct._CData](self, /, obj: type[CT]) -> ct.Array[CT]: ... + +class dummy_ctype[T_co]: + _cls: type[T_co] + + def __init__(self, /, cls: type[T_co]) -> None: ... + def __eq__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __ne__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __mul__(self, other: object, /) -> Self: ... + def __call__(self, /, *other: object) -> T_co: ... + +def array_ufunc_errmsg_formatter(dummy: object, ufunc: np.ufunc, method: str, *inputs: object, **kwargs: object) -> str: ... +def array_function_errmsg_formatter(public_api: Callable[..., object], types: Iterable[str]) -> str: ... +def npy_ctypes_check(cls: type) -> bool: ... diff --git a/numpy/_core/_machar.py b/numpy/_core/_machar.py deleted file mode 100644 index 2b1812f48f98..000000000000 --- a/numpy/_core/_machar.py +++ /dev/null @@ -1,356 +0,0 @@ -""" -Machine arithmetic - determine the parameters of the -floating-point arithmetic system - -Author: Pearu Peterson, September 2003 - -""" -__all__ = ['MachAr'] - -from .fromnumeric import any -from ._ufunc_config import errstate -from .._utils import set_module - -# Need to speed this up...especially for longdouble - -# Deprecated 2021-10-20, NumPy 1.22 -class MachAr: - """ - Diagnosing machine parameters. - - Attributes - ---------- - ibeta : int - Radix in which numbers are represented. - it : int - Number of base-`ibeta` digits in the floating point mantissa M. - machep : int - Exponent of the smallest (most negative) power of `ibeta` that, - added to 1.0, gives something different from 1.0 - eps : float - Floating-point number ``beta**machep`` (floating point precision) - negep : int - Exponent of the smallest power of `ibeta` that, subtracted - from 1.0, gives something different from 1.0. - epsneg : float - Floating-point number ``beta**negep``. - iexp : int - Number of bits in the exponent (including its sign and bias). - minexp : int - Smallest (most negative) power of `ibeta` consistent with there - being no leading zeros in the mantissa. - xmin : float - Floating-point number ``beta**minexp`` (the smallest [in - magnitude] positive floating point number with full precision). - maxexp : int - Smallest (positive) power of `ibeta` that causes overflow. - xmax : float - ``(1-epsneg) * beta**maxexp`` (the largest [in magnitude] - usable floating value). - irnd : int - In ``range(6)``, information on what kind of rounding is done - in addition, and on how underflow is handled. - ngrd : int - Number of 'guard digits' used when truncating the product - of two mantissas to fit the representation. - epsilon : float - Same as `eps`. - tiny : float - An alias for `smallest_normal`, kept for backwards compatibility. - huge : float - Same as `xmax`. - precision : float - ``- int(-log10(eps))`` - resolution : float - ``- 10**(-precision)`` - smallest_normal : float - The smallest positive floating point number with 1 as leading bit in - the mantissa following IEEE-754. Same as `xmin`. - smallest_subnormal : float - The smallest positive floating point number with 0 as leading bit in - the mantissa following IEEE-754. - - Parameters - ---------- - float_conv : function, optional - Function that converts an integer or integer array to a float - or float array. Default is `float`. - int_conv : function, optional - Function that converts a float or float array to an integer or - integer array. Default is `int`. - float_to_float : function, optional - Function that converts a float array to float. Default is `float`. - Note that this does not seem to do anything useful in the current - implementation. - float_to_str : function, optional - Function that converts a single float to a string. Default is - ``lambda v:'%24.16e' %v``. - title : str, optional - Title that is printed in the string representation of `MachAr`. - - See Also - -------- - finfo : Machine limits for floating point types. - iinfo : Machine limits for integer types. - - References - ---------- - .. [1] Press, Teukolsky, Vetterling and Flannery, - "Numerical Recipes in C++," 2nd ed, - Cambridge University Press, 2002, p. 31. - - """ - - def __init__(self, float_conv=float,int_conv=int, - float_to_float=float, - float_to_str=lambda v:'%24.16e' % v, - title='Python floating point number'): - """ - - float_conv - convert integer to float (array) - int_conv - convert float (array) to integer - float_to_float - convert float array to float - float_to_str - convert array float to str - title - description of used floating point numbers - - """ - # We ignore all errors here because we are purposely triggering - # underflow to detect the properties of the runninng arch. - with errstate(under='ignore'): - self._do_init(float_conv, int_conv, float_to_float, float_to_str, title) - - def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title): - max_iterN = 10000 - msg = "Did not converge after %d tries with %s" - one = float_conv(1) - two = one + one - zero = one - one - - # Do we really need to do this? Aren't they 2 and 2.0? - # Determine ibeta and beta - a = one - for _ in range(max_iterN): - a = a + a - temp = a + one - temp1 = temp - a - if any(temp1 - one != zero): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - b = one - for _ in range(max_iterN): - b = b + b - temp = a + b - itemp = int_conv(temp-a) - if any(itemp != 0): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - ibeta = itemp - beta = float_conv(ibeta) - - # Determine it and irnd - it = -1 - b = one - for _ in range(max_iterN): - it = it + 1 - b = b * beta - temp = b + one - temp1 = temp - b - if any(temp1 - one != zero): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - - betah = beta / two - a = one - for _ in range(max_iterN): - a = a + a - temp = a + one - temp1 = temp - a - if any(temp1 - one != zero): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - temp = a + betah - irnd = 0 - if any(temp-a != zero): - irnd = 1 - tempa = a + beta - temp = tempa + betah - if irnd == 0 and any(temp-tempa != zero): - irnd = 2 - - # Determine negep and epsneg - negep = it + 3 - betain = one / beta - a = one - for i in range(negep): - a = a * betain - b = a - for _ in range(max_iterN): - temp = one - a - if any(temp-one != zero): - break - a = a * beta - negep = negep - 1 - # Prevent infinite loop on PPC with gcc 4.0: - if negep < 0: - raise RuntimeError("could not determine machine tolerance " - "for 'negep', locals() -> %s" % (locals())) - else: - raise RuntimeError(msg % (_, one.dtype)) - negep = -negep - epsneg = a - - # Determine machep and eps - machep = - it - 3 - a = b - - for _ in range(max_iterN): - temp = one + a - if any(temp-one != zero): - break - a = a * beta - machep = machep + 1 - else: - raise RuntimeError(msg % (_, one.dtype)) - eps = a - - # Determine ngrd - ngrd = 0 - temp = one + eps - if irnd == 0 and any(temp*one - one != zero): - ngrd = 1 - - # Determine iexp - i = 0 - k = 1 - z = betain - t = one + eps - nxres = 0 - for _ in range(max_iterN): - y = z - z = y*y - a = z*one # Check here for underflow - temp = z*t - if any(a+a == zero) or any(abs(z) >= y): - break - temp1 = temp * betain - if any(temp1*beta == z): - break - i = i + 1 - k = k + k - else: - raise RuntimeError(msg % (_, one.dtype)) - if ibeta != 10: - iexp = i + 1 - mx = k + k - else: - iexp = 2 - iz = ibeta - while k >= iz: - iz = iz * ibeta - iexp = iexp + 1 - mx = iz + iz - 1 - - # Determine minexp and xmin - for _ in range(max_iterN): - xmin = y - y = y * betain - a = y * one - temp = y * t - if any((a + a) != zero) and any(abs(y) < xmin): - k = k + 1 - temp1 = temp * betain - if any(temp1*beta == y) and any(temp != y): - nxres = 3 - xmin = y - break - else: - break - else: - raise RuntimeError(msg % (_, one.dtype)) - minexp = -k - - # Determine maxexp, xmax - if mx <= k + k - 3 and ibeta != 10: - mx = mx + mx - iexp = iexp + 1 - maxexp = mx + minexp - irnd = irnd + nxres - if irnd >= 2: - maxexp = maxexp - 2 - i = maxexp + minexp - if ibeta == 2 and not i: - maxexp = maxexp - 1 - if i > 20: - maxexp = maxexp - 1 - if any(a != y): - maxexp = maxexp - 2 - xmax = one - epsneg - if any(xmax*one != xmax): - xmax = one - beta*epsneg - xmax = xmax / (xmin*beta*beta*beta) - i = maxexp + minexp + 3 - for j in range(i): - if ibeta == 2: - xmax = xmax + xmax - else: - xmax = xmax * beta - - smallest_subnormal = abs(xmin / beta ** (it)) - - self.ibeta = ibeta - self.it = it - self.negep = negep - self.epsneg = float_to_float(epsneg) - self._str_epsneg = float_to_str(epsneg) - self.machep = machep - self.eps = float_to_float(eps) - self._str_eps = float_to_str(eps) - self.ngrd = ngrd - self.iexp = iexp - self.minexp = minexp - self.xmin = float_to_float(xmin) - self._str_xmin = float_to_str(xmin) - self.maxexp = maxexp - self.xmax = float_to_float(xmax) - self._str_xmax = float_to_str(xmax) - self.irnd = irnd - - self.title = title - # Commonly used parameters - self.epsilon = self.eps - self.tiny = self.xmin - self.huge = self.xmax - self.smallest_normal = self.xmin - self._str_smallest_normal = float_to_str(self.xmin) - self.smallest_subnormal = float_to_float(smallest_subnormal) - self._str_smallest_subnormal = float_to_str(smallest_subnormal) - - import math - self.precision = int(-math.log10(float_to_float(self.eps))) - ten = two + two + two + two + two - resolution = ten ** (-self.precision) - self.resolution = float_to_float(resolution) - self._str_resolution = float_to_str(resolution) - - def __str__(self): - fmt = ( - 'Machine parameters for %(title)s\n' - '---------------------------------------------------------------------\n' - 'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n' - 'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n' - 'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n' - 'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n' - 'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n' - 'smallest_normal=%(smallest_normal)s ' - 'smallest_subnormal=%(smallest_subnormal)s\n' - '---------------------------------------------------------------------\n' - ) - return fmt % self.__dict__ - - -if __name__ == '__main__': - print(MachAr()) diff --git a/numpy/_core/_methods.py b/numpy/_core/_methods.py index f214ff957370..1c29831bca20 100644 --- a/numpy/_core/_methods.py +++ b/numpy/_core/_methods.py @@ -8,12 +8,9 @@ import warnings from contextlib import nullcontext -from numpy._core import multiarray as mu -from numpy._core import umath as um +import numpy as np +from numpy._core import multiarray as mu, numerictypes as nt, umath as um from numpy._core.multiarray import asanyarray -from numpy._core import numerictypes as nt -from numpy._core import _exceptions -from numpy._core._ufunc_config import _no_nep50_warning from numpy._globals import _NoValue # save those O(100) nanoseconds! @@ -28,13 +25,13 @@ # Complex types to -> (2,)float view for fast-path computation in _var() _complex_to_float = { - nt.dtype(nt.csingle) : nt.dtype(nt.single), - nt.dtype(nt.cdouble) : nt.dtype(nt.double), + nt.dtype(nt.csingle): nt.dtype(nt.single), + nt.dtype(nt.cdouble): nt.dtype(nt.double), } # Special case for windows: ensure double takes precedence if nt.dtype(nt.longdouble) != nt.dtype(nt.double): _complex_to_float.update({ - nt.dtype(nt.clongdouble) : nt.dtype(nt.longdouble), + nt.dtype(nt.clongdouble): nt.dtype(nt.longdouble), }) # avoid keyword arguments to speed up parsing, saves about 15%-20% for very @@ -97,10 +94,18 @@ def _count_reduce_items(arr, axis, keepdims=False, where=True): return items def _clip(a, min=None, max=None, out=None, **kwargs): - if min is None and max is None: - raise ValueError("One of max or min must be given") + if a.dtype.kind in "iu": + # If min/max is a Python integer, deal with out-of-bound values here. + # (This enforces NEP 50 rules as no value based promotion is done.) + if type(min) is int and min <= np.iinfo(a.dtype).min: + min = None + if type(max) is int and max >= np.iinfo(a.dtype).max: + max = None - if min is None: + if min is None and max is None: + # return identity + return um.positive(a, out=out, **kwargs) + elif min is None: return um.minimum(a, max, out=out, **kwargs) elif max is None: return um.maximum(a, min, out=out, **kwargs) @@ -114,7 +119,7 @@ def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where) if rcount == 0 if where is True else umr_any(rcount == 0, axis=None): - warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2) + warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2) # Cast bool, unsigned int, and int to float64 by default if dtype is None: @@ -126,9 +131,8 @@ def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): ret = umr_sum(arr, axis, dtype, out, keepdims, where=where) if isinstance(ret, mu.ndarray): - with _no_nep50_warning(): - ret = um.true_divide( - ret, rcount, out=ret, casting='unsafe', subok=False) + ret = um.true_divide( + ret, rcount, out=ret, casting='unsafe', subok=False) if is_float16_result and out is None: ret = arr.dtype.type(ret) elif hasattr(ret, 'dtype'): @@ -171,9 +175,8 @@ def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, # matching rcount to arrmean when where is specified as array div = rcount.reshape(arrmean.shape) if isinstance(arrmean, mu.ndarray): - with _no_nep50_warning(): - arrmean = um.true_divide(arrmean, div, out=arrmean, - casting='unsafe', subok=False) + arrmean = um.true_divide(arrmean, div, out=arrmean, + casting='unsafe', subok=False) elif hasattr(arrmean, "dtype"): arrmean = arrmean.dtype.type(arrmean / rcount) else: @@ -182,15 +185,14 @@ def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, # Compute sum of squared deviations from mean # Note that x may not be inexact and that we need it to be an array, # not a scalar. - x = asanyarray(arr - arrmean) - + x = um.subtract(arr, arrmean, out=...) if issubclass(arr.dtype.type, (nt.floating, nt.integer)): - x = um.multiply(x, x, out=x) + x = um.square(x, out=x) # Fast-paths for built-in complex types - elif x.dtype in _complex_to_float: - xv = x.view(dtype=(_complex_to_float[x.dtype], (2,))) - um.multiply(xv, xv, out=xv) - x = um.add(xv[..., 0], xv[..., 1], out=x.real).real + elif (_float_dtype := _complex_to_float.get(x.dtype)) is not None: + xv = x.view(dtype=(_float_dtype, (2,))) + um.square(xv, out=xv) + x = um.add(xv[..., 0], xv[..., 1], out=x.real) # Most general case; includes handling object arrays containing imaginary # numbers and complex types with non-native byteorder else: @@ -203,9 +205,8 @@ def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, # divide by degrees of freedom if isinstance(ret, mu.ndarray): - with _no_nep50_warning(): - ret = um.true_divide( - ret, rcount, out=ret, casting='unsafe', subok=False) + ret = um.true_divide( + ret, rcount, out=ret, casting='unsafe', subok=False) elif hasattr(ret, 'dtype'): ret = ret.dtype.type(ret / rcount) else: diff --git a/numpy/_core/_methods.pyi b/numpy/_core/_methods.pyi new file mode 100644 index 000000000000..651c78d3530b --- /dev/null +++ b/numpy/_core/_methods.pyi @@ -0,0 +1,22 @@ +from collections.abc import Callable +from typing import Any, Concatenate + +import numpy as np + +from . import _exceptions as _exceptions + +### + +type _Reduce2 = Callable[Concatenate[object, ...], Any] + +### + +bool_dt: np.dtype[np.bool] = ... +umr_maximum: _Reduce2 = ... +umr_minimum: _Reduce2 = ... +umr_sum: _Reduce2 = ... +umr_prod: _Reduce2 = ... +umr_bitwise_count = np.bitwise_count +umr_any: _Reduce2 = ... +umr_all: _Reduce2 = ... +_complex_to_float: dict[np.dtype[np.complexfloating], np.dtype[np.floating]] = ... diff --git a/numpy/_core/_simd.pyi b/numpy/_core/_simd.pyi new file mode 100644 index 000000000000..0ba7d78eeb87 --- /dev/null +++ b/numpy/_core/_simd.pyi @@ -0,0 +1,35 @@ +from types import ModuleType +from typing import TypedDict, type_check_only + +# NOTE: these 5 are only defined on systems with an intel processor +SSE42: ModuleType | None = ... +FMA3: ModuleType | None = ... +AVX2: ModuleType | None = ... +AVX512F: ModuleType | None = ... +AVX512_SKX: ModuleType | None = ... + +# NOTE: these 2 are only defined on systems with an arm processor +ASIMD: ModuleType | None = ... +NEON: ModuleType | None = ... + +# NOTE: This is only defined on systems with an riscv64 processor. +RVV: ModuleType | None = ... + +baseline: ModuleType | None = ... + +@type_check_only +class SimdTargets(TypedDict): + SSE42: ModuleType | None + AVX2: ModuleType | None + FMA3: ModuleType | None + AVX512F: ModuleType | None + AVX512_SKX: ModuleType | None + ASIMD: ModuleType | None + NEON: ModuleType | None + RVV: ModuleType | None + baseline: ModuleType | None + +targets: SimdTargets = ... + +def clear_floatstatus() -> None: ... +def get_floatstatus() -> int: ... diff --git a/numpy/_core/_string_helpers.py b/numpy/_core/_string_helpers.py index 8a64ab5a05e4..87085d4119dd 100644 --- a/numpy/_core/_string_helpers.py +++ b/numpy/_core/_string_helpers.py @@ -7,10 +7,10 @@ # Construct the translation tables directly # "A" = chr(65), "a" = chr(97) _all_chars = tuple(map(chr, range(256))) -_ascii_upper = _all_chars[65:65+26] -_ascii_lower = _all_chars[97:97+26] -LOWER_TABLE = _all_chars[:65] + _ascii_lower + _all_chars[65+26:] -UPPER_TABLE = _all_chars[:97] + _ascii_upper + _all_chars[97+26:] +_ascii_upper = _all_chars[65:65 + 26] +_ascii_lower = _all_chars[97:97 + 26] +LOWER_TABLE = _all_chars[:65] + _ascii_lower + _all_chars[65 + 26:] +UPPER_TABLE = _all_chars[:97] + _ascii_upper + _all_chars[97 + 26:] def english_lower(s): diff --git a/numpy/_core/_string_helpers.pyi b/numpy/_core/_string_helpers.pyi new file mode 100644 index 000000000000..6a85832b7a93 --- /dev/null +++ b/numpy/_core/_string_helpers.pyi @@ -0,0 +1,12 @@ +from typing import Final + +_all_chars: Final[tuple[str, ...]] = ... +_ascii_upper: Final[tuple[str, ...]] = ... +_ascii_lower: Final[tuple[str, ...]] = ... + +LOWER_TABLE: Final[tuple[str, ...]] = ... +UPPER_TABLE: Final[tuple[str, ...]] = ... + +def english_lower(s: str) -> str: ... +def english_upper(s: str) -> str: ... +def english_capitalize(s: str) -> str: ... diff --git a/numpy/_core/_type_aliases.py b/numpy/_core/_type_aliases.py index 80a59e7b3f52..943955705083 100644 --- a/numpy/_core/_type_aliases.py +++ b/numpy/_core/_type_aliases.py @@ -18,7 +18,7 @@ """ import numpy._core.multiarray as ma -from numpy._core.multiarray import typeinfo, dtype +from numpy._core.multiarray import dtype, typeinfo ###################################### # Building `sctypeDict` and `allTypes` @@ -36,6 +36,7 @@ for _abstract_type_name in _abstract_type_names: allTypes[_abstract_type_name] = getattr(ma, _abstract_type_name) + del _abstract_type_name for k, v in typeinfo.items(): if k.startswith("NPY_") and v not in c_names_dict: @@ -44,6 +45,8 @@ concrete_type = v.type allTypes[k] = concrete_type sctypeDict[k] = concrete_type + del concrete_type + del k, v _aliases = { "double": "float64", @@ -60,15 +63,15 @@ for k, v in _aliases.items(): sctypeDict[k] = allTypes[v] allTypes[k] = allTypes[v] + del k, v # extra aliases are added only to `sctypeDict` # to support dtype name access, such as`np.dtype("float")` -_extra_aliases = { +_extra_aliases = { "float": "float64", "complex": "complex128", "object": "object_", "bytes": "bytes_", - "a": "bytes_", "int": "int_", "str": "str_", "unicode": "str_", @@ -76,18 +79,21 @@ for k, v in _extra_aliases.items(): sctypeDict[k] = allTypes[v] + del k, v # include extended precision sized aliases for is_complex, full_name in [(False, "longdouble"), (True, "clongdouble")]: - longdouble_type: type = allTypes[full_name] + longdouble_type = allTypes[full_name] - bits: int = dtype(longdouble_type).itemsize * 8 - base_name: str = "complex" if is_complex else "float" - extended_prec_name: str = f"{base_name}{bits}" + bits = dtype(longdouble_type).itemsize * 8 + base_name = "complex" if is_complex else "float" + extended_prec_name = f"{base_name}{bits}" if extended_prec_name not in allTypes: sctypeDict[extended_prec_name] = longdouble_type allTypes[extended_prec_name] = longdouble_type + del is_complex, full_name, longdouble_type, bits, base_name, extended_prec_name + #################### # Building `sctypes` @@ -104,16 +110,21 @@ # find proper group for each concrete type for type_group, abstract_type in [ - ("int", ma.signedinteger), ("uint", ma.unsignedinteger), - ("float", ma.floating), ("complex", ma.complexfloating), + ("int", ma.signedinteger), ("uint", ma.unsignedinteger), + ("float", ma.floating), ("complex", ma.complexfloating), ("others", ma.generic) ]: if issubclass(concrete_type, abstract_type): sctypes[type_group].add(concrete_type) + del type_group, abstract_type break + del type_info, concrete_type + # sort sctype groups by bitsize for sctype_key in sctypes.keys(): sctype_list = list(sctypes[sctype_key]) sctype_list.sort(key=lambda x: dtype(x).itemsize) sctypes[sctype_key] = sctype_list + + del sctype_key, sctype_list diff --git a/numpy/_core/_type_aliases.pyi b/numpy/_core/_type_aliases.pyi index 1adaa933239e..c7efe989caa5 100644 --- a/numpy/_core/_type_aliases.pyi +++ b/numpy/_core/_type_aliases.pyi @@ -1,3 +1,86 @@ -from numpy import generic +from collections.abc import Collection +from typing import Final, Literal as L, TypedDict, type_check_only -sctypeDict: dict[int | str, type[generic]] +import numpy as np + +sctypeDict: Final[dict[str, type[np.generic]]] +allTypes: Final[dict[str, type[np.generic]]] + +@type_check_only +class _CNamesDict(TypedDict): + BOOL: np.dtype[np.bool] + HALF: np.dtype[np.half] + FLOAT: np.dtype[np.single] + DOUBLE: np.dtype[np.double] + LONGDOUBLE: np.dtype[np.longdouble] + CFLOAT: np.dtype[np.csingle] + CDOUBLE: np.dtype[np.cdouble] + CLONGDOUBLE: np.dtype[np.clongdouble] + STRING: np.dtype[np.bytes_] + UNICODE: np.dtype[np.str_] + VOID: np.dtype[np.void] + OBJECT: np.dtype[np.object_] + DATETIME: np.dtype[np.datetime64] + TIMEDELTA: np.dtype[np.timedelta64] + BYTE: np.dtype[np.byte] + UBYTE: np.dtype[np.ubyte] + SHORT: np.dtype[np.short] + USHORT: np.dtype[np.ushort] + INT: np.dtype[np.intc] + UINT: np.dtype[np.uintc] + LONG: np.dtype[np.long] + ULONG: np.dtype[np.ulong] + LONGLONG: np.dtype[np.longlong] + ULONGLONG: np.dtype[np.ulonglong] + +c_names_dict: Final[_CNamesDict] + +type _AbstractTypeName = L[ + "generic", + "flexible", + "character", + "number", + "integer", + "inexact", + "unsignedinteger", + "signedinteger", + "floating", + "complexfloating", +] +_abstract_type_names: Final[set[_AbstractTypeName]] + +@type_check_only +class _AliasesType(TypedDict): + double: L["float64"] + cdouble: L["complex128"] + single: L["float32"] + csingle: L["complex64"] + half: L["float16"] + bool_: L["bool"] + int_: L["intp"] + uint: L["intp"] + +_aliases: Final[_AliasesType] + +@type_check_only +class _ExtraAliasesType(TypedDict): + float: L["float64"] + complex: L["complex128"] + object: L["object_"] + bytes: L["bytes_"] + a: L["bytes_"] + int: L["int_"] + str: L["str_"] + unicode: L["str_"] + +_extra_aliases: Final[_ExtraAliasesType] + +@type_check_only +class _SCTypes(TypedDict): + int: Collection[type[np.signedinteger]] + uint: Collection[type[np.unsignedinteger]] + float: Collection[type[np.floating]] + complex: Collection[type[np.complexfloating]] + others: Collection[type[np.flexible | np.bool | np.object_]] + +sctypes: Final[_SCTypes] diff --git a/numpy/_core/_ufunc_config.py b/numpy/_core/_ufunc_config.py index fe09ff873c86..6a7476670d95 100644 --- a/numpy/_core/_ufunc_config.py +++ b/numpy/_core/_ufunc_config.py @@ -4,17 +4,15 @@ This provides helpers which wrap `_get_extobj_dict` and `_make_extobj`, and `_extobj_contextvar` from umath. """ -import collections.abc -import contextlib -import contextvars import functools -from .._utils import set_module -from .umath import _make_extobj, _get_extobj_dict, _extobj_contextvar +from numpy._utils import set_module + +from .umath import _extobj_contextvar, _get_extobj_dict, _make_extobj __all__ = [ "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall", - "errstate", '_no_nep50_warning' + "errstate" ] @@ -59,6 +57,7 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None): seterrcall : Set a callback function for the 'call' mode. geterr, geterrcall, errstate + Notes ----- The floating-point exceptions are defined in the IEEE 754 standard [1]_: @@ -70,13 +69,16 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None): - Invalid operation: result is not an expressible number, typically indicates that a NaN was produced. + **Concurrency note:** see :ref:`fp_error_handling` + .. [1] https://en.wikipedia.org/wiki/IEEE_754 Examples -------- + >>> import numpy as np >>> orig_settings = np.seterr(all='ignore') # seterr to known value >>> np.int16(32000) * np.int16(3) - 30464 + np.int16(30464) >>> np.seterr(over='raise') {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'} >>> old_settings = np.seterr(all='warn', over='raise') @@ -89,7 +91,7 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None): >>> np.geterr() {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'} >>> np.int16(32000) * np.int16(3) - 30464 + np.int16(30464) >>> np.seterr(**orig_settings) # restore original {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'} @@ -128,8 +130,11 @@ def geterr(): For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. + **Concurrency note:** see :doc:`/reference/routines.err` + Examples -------- + >>> import numpy as np >>> np.geterr() {'divide': 'warn', 'over': 'warn', 'under': 'ignore', 'invalid': 'warn'} >>> np.arange(3.) / np.arange(3.) # doctest: +SKIP @@ -172,10 +177,15 @@ def setbufsize(size): bufsize : int Previous size of ufunc buffer in bytes. + Notes + ----- + **Concurrency note:** see :doc:`/reference/routines.err` + Examples -------- When exiting a `numpy.errstate` context manager the bufsize is restored: + >>> import numpy as np >>> with np.errstate(): ... np.setbufsize(4096) ... print(np.getbufsize()) @@ -186,6 +196,8 @@ def setbufsize(size): 8192 """ + if size < 0: + raise ValueError("buffer size must be non-negative") old = _get_extobj_dict()["bufsize"] extobj = _make_extobj(bufsize=size) _extobj_contextvar.set(extobj) @@ -202,8 +214,15 @@ def getbufsize(): getbufsize : int Size of ufunc buffer in bytes. + Notes + ----- + + **Concurrency note:** see :doc:`/reference/routines.err` + + Examples -------- + >>> import numpy as np >>> np.getbufsize() 8192 @@ -252,6 +271,11 @@ def seterrcall(func): -------- seterr, geterr, geterrcall + Notes + ----- + + **Concurrency note:** see :doc:`/reference/routines.err` + Examples -------- Callback upon error: @@ -260,6 +284,8 @@ def seterrcall(func): ... print("Floating point error (%s), with flag %s" % (type, flag)) ... + >>> import numpy as np + >>> orig_handler = np.seterrcall(err_handler) >>> orig_err = np.seterr(all='call') @@ -325,8 +351,11 @@ def geterrcall(): For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. + **Concurrency note:** see :ref:`fp_error_handling` + Examples -------- + >>> import numpy as np >>> np.geterrcall() # we did not yet set a handler, returns None >>> orig_settings = np.seterr(all='call') @@ -392,8 +421,11 @@ class errstate: For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. + **Concurrency note:** see :ref:`fp_error_handling` + Examples -------- + >>> import numpy as np >>> olderr = np.seterr(all='ignore') # Set error handling to known state. >>> np.arange(3) / 0. @@ -418,7 +450,14 @@ class errstate: """ __slots__ = ( - "_call", "_all", "_divide", "_over", "_under", "_invalid", "_token") + "_all", + "_call", + "_divide", + "_invalid", + "_over", + "_token", + "_under", + ) def __init__(self, *, call=_Unspecified, all=None, divide=None, over=None, under=None, invalid=None): @@ -474,22 +513,3 @@ def inner(*args, **kwargs): _extobj_contextvar.reset(_token) return inner - - -NO_NEP50_WARNING = contextvars.ContextVar("_no_nep50_warning", default=False) - -@set_module('numpy') -@contextlib.contextmanager -def _no_nep50_warning(): - """ - Context manager to disable NEP 50 warnings. This context manager is - only relevant if the NEP 50 warnings are enabled globally (which is not - thread/context safe). - - This warning context manager itself is fully safe, however. - """ - token = NO_NEP50_WARNING.set(True) - try: - yield - finally: - NO_NEP50_WARNING.reset(token) diff --git a/numpy/_core/_ufunc_config.pyi b/numpy/_core/_ufunc_config.pyi index f56504507ac0..039aa1d51223 100644 --- a/numpy/_core/_ufunc_config.pyi +++ b/numpy/_core/_ufunc_config.pyi @@ -1,37 +1,67 @@ +from _typeshed import SupportsWrite from collections.abc import Callable -from typing import Any, Literal, TypedDict +from types import TracebackType +from typing import Any, Final, Literal, TypedDict, type_check_only -from numpy import _SupportsWrite +__all__ = [ + "seterr", + "geterr", + "setbufsize", + "getbufsize", + "seterrcall", + "geterrcall", + "errstate", +] -_ErrKind = Literal["ignore", "warn", "raise", "call", "print", "log"] -_ErrFunc = Callable[[str, int], Any] +type _ErrKind = Literal["ignore", "warn", "raise", "call", "print", "log"] +type _ErrCall = Callable[[str, int], Any] | SupportsWrite[str] +@type_check_only class _ErrDict(TypedDict): divide: _ErrKind over: _ErrKind under: _ErrKind invalid: _ErrKind -class _ErrDictOptional(TypedDict, total=False): - all: None | _ErrKind - divide: None | _ErrKind - over: None | _ErrKind - under: None | _ErrKind - invalid: None | _ErrKind +### + +class _unspecified: ... + +_Unspecified: Final[_unspecified] + +class errstate: + __slots__ = "_all", "_call", "_divide", "_invalid", "_over", "_token", "_under" + + def __init__( + self, + /, + *, + call: _ErrCall | _unspecified = ..., # = _Unspecified + all: _ErrKind | None = None, + divide: _ErrKind | None = None, + over: _ErrKind | None = None, + under: _ErrKind | None = None, + invalid: _ErrKind | None = None, + ) -> None: ... + def __call__[FuncT: Callable[..., object]](self, /, func: FuncT) -> FuncT: ... + def __enter__(self) -> None: ... + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + /, + ) -> None: ... def seterr( - all: None | _ErrKind = ..., - divide: None | _ErrKind = ..., - over: None | _ErrKind = ..., - under: None | _ErrKind = ..., - invalid: None | _ErrKind = ..., + all: _ErrKind | None = None, + divide: _ErrKind | None = None, + over: _ErrKind | None = None, + under: _ErrKind | None = None, + invalid: _ErrKind | None = None, ) -> _ErrDict: ... def geterr() -> _ErrDict: ... def setbufsize(size: int) -> int: ... def getbufsize() -> int: ... -def seterrcall( - func: None | _ErrFunc | _SupportsWrite[str] -) -> None | _ErrFunc | _SupportsWrite[str]: ... -def geterrcall() -> None | _ErrFunc | _SupportsWrite[str]: ... - -# See `numpy/__init__.pyi` for the `errstate` class and `no_nep5_warnings` +def seterrcall(func: _ErrCall | None) -> _ErrCall | None: ... +def geterrcall() -> _ErrCall | None: ... diff --git a/numpy/_core/_umath_tests.pyi b/numpy/_core/_umath_tests.pyi new file mode 100644 index 000000000000..696cec3b755e --- /dev/null +++ b/numpy/_core/_umath_tests.pyi @@ -0,0 +1,47 @@ +# undocumented internal testing module for ufunc features, defined in +# numpy/_core/src/umath/_umath_tests.c.src + +from typing import Final, Literal as L, TypedDict, type_check_only + +import numpy as np +from numpy._typing import _GUFunc_Nin2_Nout1, _UFunc_Nin1_Nout1, _UFunc_Nin2_Nout1 + +@type_check_only +class _TestDispatchResult(TypedDict): + func: str # e.g. 'func_AVX2' + var: str # e.g. 'var_AVX2' + func_xb: str # e.g. 'func_AVX2' + var_xb: str # e.g. 'var_AVX2' + all: list[str] # e.g. ['func_AVX2', 'func_SSE41', 'func'] + +### + +# undocumented +def test_signature( + nin: int, nout: int, signature: str, / +) -> tuple[ + L[0, 1], # core_enabled (0 for scalar ufunc; 1 for generalized ufunc) + tuple[int, ...] | None, # core_num_dims + tuple[int, ...] | None, # core_dim_ixs + tuple[int, ...] | None, # core_dim_flags + tuple[int, ...] | None, # core_dim_sizes +]: ... + +# undocumented +def test_dispatch() -> _TestDispatchResult: ... + +# undocumented ufuncs and gufuncs +always_error: Final[_UFunc_Nin2_Nout1[L["always_error"], L[1], None]] = ... +always_error_unary: Final[_UFunc_Nin1_Nout1[L["always_error_unary"], L[1], None]] = ... +always_error_gufunc: Final[_GUFunc_Nin2_Nout1[L["always_error_gufunc"], L[1], None, L["(i),()->()"]]] = ... +inner1d: Final[_GUFunc_Nin2_Nout1[L["inner1d"], L[2], None, L["(i),(i)->()"]]] = ... +innerwt: Final[np.ufunc] = ... # we have no specialized type for 3->1 gufuncs +matrix_multiply: Final[_GUFunc_Nin2_Nout1[L["matrix_multiply"], L[3], None, L["(m,n),(n,p)->(m,p)"]]] = ... +matmul: Final[_GUFunc_Nin2_Nout1[L["matmul"], L[3], None, L["(m?,n),(n,p?)->(m?,p?)"]]] = ... +euclidean_pdist: Final[_GUFunc_Nin2_Nout1[L["euclidean_pdist"], L[2], None, L["(n,d)->(p)"]]] = ... +cumsum: Final[np.ufunc] = ... # we have no specialized type for 1->1 gufuncs +inner1d_no_doc: Final[_GUFunc_Nin2_Nout1[L["inner1d_no_doc"], L[2], None, L["(i),(i)->()"]]] = ... +cross1d: Final[_GUFunc_Nin2_Nout1[L["cross1d"], L[2], None, L["(3),(3)->(3)"]]] = ... +_pickleable_module_global_ufunc: Final[np.ufunc] = ... # 0->0 ufunc; segfaults if called +indexed_negative: Final[_UFunc_Nin1_Nout1[L["indexed_negative"], L[0], L[0]]] = ... # ntypes=0; can't be called +conv1d_full: Final[_GUFunc_Nin2_Nout1[L["conv1d_full"], L[1], None, L["(m),(n)->(p)"]]] = ... diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index d12746c7ce52..da4c6a95c47c 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -25,45 +25,39 @@ import functools import numbers import sys + try: from _thread import get_ident except ImportError: from _dummy_thread import get_ident +import contextlib +import operator +import warnings + import numpy as np + from . import numerictypes as _nt -from .umath import absolute, isinf, isfinite, isnat -from . import multiarray -from .multiarray import (array, dragon4_positional, dragon4_scientific, - datetime_as_string, datetime_data, ndarray, - set_legacy_print_mode) from .fromnumeric import any -from .numeric import concatenate, asarray, errstate -from .numerictypes import (longlong, intc, int_, float64, complex128, - flexible) +from .multiarray import ( + array, + datetime_as_string, + datetime_data, + dragon4_positional, + dragon4_scientific, + ndarray, +) +from .numeric import asarray, concatenate, errstate +from .numerictypes import complex128, flexible, float64, int_ from .overrides import array_function_dispatch, set_module -import operator -import warnings -import contextlib +from .printoptions import format_options +from .umath import absolute, isfinite, isinf, isnat -_format_options = { - 'edgeitems': 3, # repr N leading and trailing items of each dimension - 'threshold': 1000, # total items > triggers array summarization - 'floatmode': 'maxprec', - 'precision': 8, # precision of floating point representations - 'suppress': False, # suppress printing small floating values in exp format - 'linewidth': 75, - 'nanstr': 'nan', - 'infstr': 'inf', - 'sign': '-', - 'formatter': None, - # Internally stored as an int to simplify comparisons; converted from/to - # str/False on the way in/out. - 'legacy': sys.maxsize} def _make_options_dict(precision=None, threshold=None, edgeitems=None, linewidth=None, suppress=None, nanstr=None, infstr=None, - sign=None, formatter=None, floatmode=None, legacy=None): + sign=None, formatter=None, floatmode=None, legacy=None, + override_repr=None): """ Make a dictionary out of the non-None arguments, plus conversion of *legacy* and sanity checks. @@ -77,12 +71,18 @@ def _make_options_dict(precision=None, threshold=None, edgeitems=None, modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal'] if floatmode not in modes + [None]: raise ValueError("floatmode option must be one of " + - ", ".join('"{}"'.format(m) for m in modes)) + ", ".join(f'"{m}"' for m in modes)) if sign not in [None, '-', '+', ' ']: raise ValueError("sign option must be one of ' ', '+', or '-'") - if legacy == False: + if legacy is False: + options['legacy'] = sys.maxsize + elif legacy == False: + warnings.warn( + f"Passing `legacy={legacy!r}` is deprecated.", + FutureWarning, stacklevel=3 + ) options['legacy'] = sys.maxsize elif legacy == '1.13': options['legacy'] = 113 @@ -90,12 +90,16 @@ def _make_options_dict(precision=None, threshold=None, edgeitems=None, options['legacy'] = 121 elif legacy == '1.25': options['legacy'] = 125 + elif legacy == '2.1': + options['legacy'] = 201 + elif legacy == '2.2': + options['legacy'] = 202 elif legacy is None: pass # OK, do nothing. else: warnings.warn( "legacy printing option can currently only be '1.13', '1.21', " - "'1.25', or `False`", stacklevel=3) + "'1.25', '2.1', '2.2' or `False`", stacklevel=3) if threshold is not None: # forbid the bad threshold arg suggested by stack overflow, gh-12351 @@ -119,7 +123,7 @@ def _make_options_dict(precision=None, threshold=None, edgeitems=None, def set_printoptions(precision=None, threshold=None, edgeitems=None, linewidth=None, suppress=None, nanstr=None, infstr=None, formatter=None, sign=None, floatmode=None, - *, legacy=None): + *, legacy=None, override_repr=None): """ Set printing options. @@ -217,28 +221,48 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, by not inserting spaces after commas that separate fields and after colons. + If set to ``'1.25'`` approximates printing of 1.25 which mainly means + that numeric scalars are printed without their type information, e.g. + as ``3.0`` rather than ``np.float64(3.0)``. + + If set to ``'2.1'``, shape information is not given when arrays are + summarized (i.e., multiple elements replaced with ``...``). + + If set to ``'2.2'``, the transition to use scientific notation for + printing ``np.float16`` and ``np.float32`` types may happen later or + not at all for larger values. + If set to `False`, disables legacy mode. Unrecognized strings will be ignored with a warning for forward compatibility. - .. versionadded:: 1.14.0 .. versionchanged:: 1.22.0 + .. versionchanged:: 2.2 + + override_repr: callable, optional + If set a passed function will be used for generating arrays' repr. + Other options will be ignored. See Also -------- get_printoptions, printoptions, array2string + Notes ----- - `formatter` is always reset with a call to `set_printoptions`. - Use `printoptions` as a context manager to set the values temporarily. + * ``formatter`` is always reset with a call to `set_printoptions`. + * Use `printoptions` as a context manager to set the values temporarily. + * These print options apply only to NumPy ndarrays, not to scalars. + + **Concurrency note:** see :ref:`text_formatting_options` Examples -------- Floating point precision can be set: + >>> import numpy as np >>> np.set_printoptions(precision=4) >>> np.array([1.123456789]) [1.1235] @@ -247,7 +271,7 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, >>> np.set_printoptions(threshold=5) >>> np.arange(10) - array([0, 1, 2, ..., 7, 8, 9]) + array([0, 1, 2, ..., 7, 8, 9], shape=(10,)) Small results can be suppressed: @@ -280,27 +304,32 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, >>> with np.printoptions(precision=2, suppress=True, threshold=5): ... np.linspace(0, 10, 10) - array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ]) + array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ], shape=(10,)) """ - opt = _make_options_dict(precision, threshold, edgeitems, linewidth, - suppress, nanstr, infstr, sign, formatter, - floatmode, legacy) - # formatter is always reset - opt['formatter'] = formatter - _format_options.update(opt) - - # set the C variable for legacy mode - if _format_options['legacy'] == 113: - set_legacy_print_mode(113) - # reset the sign option in legacy mode to avoid confusion - _format_options['sign'] = '-' - elif _format_options['legacy'] == 121: - set_legacy_print_mode(121) - elif _format_options['legacy'] == 125: - set_legacy_print_mode(125) - elif _format_options['legacy'] == sys.maxsize: - set_legacy_print_mode(0) + _set_printoptions(precision, threshold, edgeitems, linewidth, suppress, + nanstr, infstr, formatter, sign, floatmode, + legacy=legacy, override_repr=override_repr) + + +def _set_printoptions(precision=None, threshold=None, edgeitems=None, + linewidth=None, suppress=None, nanstr=None, + infstr=None, formatter=None, sign=None, floatmode=None, + *, legacy=None, override_repr=None): + new_opt = _make_options_dict(precision, threshold, edgeitems, linewidth, + suppress, nanstr, infstr, sign, formatter, + floatmode, legacy) + # formatter and override_repr are always reset + new_opt['formatter'] = formatter + new_opt['override_repr'] = override_repr + + updated_opt = format_options.get() | new_opt + updated_opt.update(new_opt) + + if updated_opt['legacy'] == 113: + updated_opt['sign'] = '-' + + return format_options.set(updated_opt) @set_module('numpy') @@ -320,20 +349,29 @@ def get_printoptions(): - suppress : bool - nanstr : str - infstr : str - - formatter : dict of callables - sign : str + - formatter : dict of callables + - floatmode : str + - legacy : str or False For a full description of these options, see `set_printoptions`. + Notes + ----- + These print options apply only to NumPy ndarrays, not to scalars. + + **Concurrency note:** see :ref:`text_formatting_options` + See Also -------- set_printoptions, printoptions Examples -------- + >>> import numpy as np >>> np.get_printoptions() - {'edgeitems': 3, 'threshold': 1000, ..., 'legacy': False} + {'edgeitems': 3, 'threshold': 1000, ..., 'override_repr': None} >>> np.get_printoptions()['linewidth'] 75 @@ -342,16 +380,17 @@ def get_printoptions(): 100 """ - opts = _format_options.copy() + opts = format_options.get().copy() opts['legacy'] = { - 113: '1.13', 121: '1.21', 125: '1.25', sys.maxsize: False, + 113: '1.13', 121: '1.21', 125: '1.25', 201: '2.1', + 202: '2.2', sys.maxsize: False, }[opts['legacy']] return opts def _get_legacy_print_mode(): """Return the legacy print mode as an int.""" - return _format_options['legacy'] + return format_options.get()['legacy'] @set_module('numpy') @@ -365,6 +404,7 @@ def printoptions(*args, **kwargs): Examples -------- + >>> import numpy as np >>> from numpy.testing import assert_equal >>> with np.printoptions(precision=2): @@ -380,13 +420,19 @@ def printoptions(*args, **kwargs): -------- set_printoptions, get_printoptions + Notes + ----- + These print options apply only to NumPy ndarrays, not to scalars. + + **Concurrency note:** see :ref:`text_formatting_options` + """ - opts = np.get_printoptions() + token = _set_printoptions(*args, **kwargs) + try: - np.set_printoptions(*args, **kwargs) - yield np.get_printoptions() + yield get_printoptions() finally: - np.set_printoptions(**opts) + format_options.reset(token) def _leading_trailing(a, edgeitems, index=()): @@ -400,7 +446,7 @@ def _leading_trailing(a, edgeitems, index=()): if axis == a.ndim: return a[index] - if a.shape[axis] > 2*edgeitems: + if a.shape[axis] > 2 * edgeitems: return concatenate(( _leading_trailing(a, edgeitems, index + np.index_exp[:edgeitems]), _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:]) @@ -569,7 +615,7 @@ def _array2string(a, options, separator=' ', prefix=""): # skip over "[" next_line_prefix = " " # skip over array( - next_line_prefix += " "*len(prefix) + next_line_prefix += " " * len(prefix) lst = _formatArray(a, format_function, options['linewidth'], next_line_prefix, separator, options['edgeitems'], @@ -580,18 +626,18 @@ def _array2string(a, options, separator=' ', prefix=""): def _array2string_dispatcher( a, max_line_width=None, precision=None, suppress_small=None, separator=None, prefix=None, - style=None, formatter=None, threshold=None, + *, formatter=None, threshold=None, edgeitems=None, sign=None, floatmode=None, suffix=None, - *, legacy=None): + legacy=None): return (a,) @array_function_dispatch(_array2string_dispatcher, module='numpy') def array2string(a, max_line_width=None, precision=None, suppress_small=None, separator=' ', prefix="", - style=np._NoValue, formatter=None, threshold=None, + *, formatter=None, threshold=None, edgeitems=None, sign=None, floatmode=None, suffix="", - *, legacy=None): + legacy=None): """ Return a string representation of an array. @@ -624,10 +670,6 @@ def array2string(a, max_line_width=None, precision=None, wrapping is forced at the column ``max_line_width - len(suffix)``. It should be noted that the content of prefix and suffix strings are not included in the output. - style : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.14.0 formatter : dict of callables, optional If not None, the keys should indicate the type(s) that the respective formatting function applies to. Callables should return a string. @@ -699,8 +741,6 @@ def array2string(a, max_line_width=None, precision=None, `False`, disables legacy mode. Unrecognized strings will be ignored with a warning for forward compatibility. - .. versionadded:: 1.14.0 - Returns ------- array_str : str @@ -726,6 +766,7 @@ def array2string(a, max_line_width=None, precision=None, Examples -------- + >>> import numpy as np >>> x = np.array([1e-16,1,2,3]) >>> np.array2string(x, precision=2, separator=',', ... suppress_small=True) @@ -744,20 +785,12 @@ def array2string(a, max_line_width=None, precision=None, overrides = _make_options_dict(precision, threshold, edgeitems, max_line_width, suppress_small, None, None, sign, formatter, floatmode, legacy) - options = _format_options.copy() + options = format_options.get().copy() options.update(overrides) if options['legacy'] <= 113: - if style is np._NoValue: - style = repr - if a.shape == () and a.dtype.names is None: - return style(a.item()) - elif style is not np._NoValue: - # Deprecation 11-9-2017 v1.14 - warnings.warn("'style' argument is deprecated and no longer functional" - " except in 1.13 'legacy' mode", - DeprecationWarning, stacklevel=2) + return repr(a.item()) if options['legacy'] > 113: options['linewidth'] -= len(suffix) @@ -798,7 +831,7 @@ def _extendLine_pretty(s, line, word, line_width, next_line_prefix, legacy): line = next_line_prefix + words[0] indent = next_line_prefix else: - indent = len(line)*' ' + indent = len(line) * ' ' line += words[0] for word in words[1::]: @@ -806,7 +839,7 @@ def _extendLine_pretty(s, line, word, line_width, next_line_prefix, legacy): line = indent + word suffix_length = max_word_length - len(words[-1]) - line += suffix_length*' ' + line += suffix_length * ' ' return s, line @@ -840,7 +873,7 @@ def recurser(index, hanging_indent, curr_width): next_width = curr_width - len(']') a_len = a.shape[axis] - show_summary = summary_insert and 2*edge_items < a_len + show_summary = summary_insert and 2 * edge_items < a_len if show_summary: leading_items = edge_items trailing_items = edge_items @@ -895,7 +928,7 @@ def recurser(index, hanging_indent, curr_width): # other axes - insert newlines between rows else: s = '' - line_sep = separator.rstrip() + '\n'*(axes_left - 1) + line_sep = separator.rstrip() + '\n' * (axes_left - 1) for i in range(leading_items): nested = recurser( @@ -931,14 +964,14 @@ def recurser(index, hanging_indent, curr_width): finally: # recursive closures have a cyclic reference to themselves, which # requires gc to collect (gh-10620). To avoid this problem, for - # performance and PyPy friendliness, we break the cycle: + # performance, we break the cycle: recurser = None def _none_or_positive_arg(x, name): if x is None: return -1 if x < 0: - raise ValueError("{} must be >= 0".format(name)) + raise ValueError(f"{name} must be >= 0") return x class FloatingFormat: @@ -967,7 +1000,6 @@ def __init__(self, data, precision, floatmode, suppress_small, sign=False, self.sign = sign self.exp_format = False self.large_exponent = False - self.fillFormat(data) def fillFormat(self, data): @@ -979,9 +1011,14 @@ def fillFormat(self, data): if len(abs_non_zero) != 0: max_val = np.max(abs_non_zero) min_val = np.min(abs_non_zero) + if self._legacy <= 202: + exp_cutoff_max = 1.e8 + else: + # consider data type while deciding the max cutoff for exp format + exp_cutoff_max = 10.**min(8, np.finfo(data.dtype).precision) with errstate(over='ignore'): # division can overflow - if max_val >= 1.e8 or (not self.suppress_small and - (min_val < 0.0001 or max_val/min_val > 1000.)): + if max_val >= exp_cutoff_max or (not self.suppress_small and + (min_val < 0.0001 or max_val / min_val > 1000.)): self.exp_format = True # do a first pass of printing all the numbers, to determine sizes @@ -1049,23 +1086,24 @@ def fillFormat(self, data): # if there are non-finite values, may need to increase pad_left if data.size != finite_vals.size: neginf = self.sign != '-' or any(data[isinf(data)] < 0) - nanlen = len(_format_options['nanstr']) - inflen = len(_format_options['infstr']) + neginf offset = self.pad_right + 1 # +1 for decimal pt + current_options = format_options.get() self.pad_left = max( - self.pad_left, nanlen - offset, inflen - offset + self.pad_left, len(current_options['nanstr']) - offset, + len(current_options['infstr']) + neginf - offset ) def __call__(self, x): if not np.isfinite(x): with errstate(invalid='ignore'): + current_options = format_options.get() if np.isnan(x): sign = '+' if self.sign == '+' else '' - ret = sign + _format_options['nanstr'] + ret = sign + current_options['nanstr'] else: # isinf sign = '-' if x < 0 else '+' if self.sign == '+' else '' - ret = sign + _format_options['infstr'] - return ' '*( + ret = sign + current_options['infstr'] + return ' ' * ( self.pad_left + self.pad_right + 1 - len(ret) ) + ret @@ -1151,6 +1189,7 @@ def format_float_scientific(x, precision=None, unique=True, trim='k', Examples -------- + >>> import numpy as np >>> np.format_float_scientific(np.float32(np.pi)) '3.1415927e+00' >>> s = np.float32(1.23e24) @@ -1238,6 +1277,7 @@ def format_float_positional(x, precision=None, unique=True, Examples -------- + >>> import numpy as np >>> np.format_float_positional(np.float32(np.pi)) '3.1415927' >>> np.format_float_positional(np.float16(np.pi)) @@ -1335,7 +1375,7 @@ def __init__(self, data): if len(non_nat) < data.size: # data contains a NaT max_str_len = max(max_str_len, 5) - self._format = '%{}s'.format(max_str_len) + self._format = f'%{max_str_len}s' self._nat = "'NaT'".rjust(max_str_len) def _format_non_nat(self, x): @@ -1400,7 +1440,7 @@ def format_array(self, a): if np.ndim(a) == 0: return self.format_function(a) - if self.summary_insert and a.shape[0] > 2*self.edge_items: + if self.summary_insert and a.shape[0] > 2 * self.edge_items: formatted = ( [self.format_array(a_) for a_ in a[:self.edge_items]] + [self.summary_insert] @@ -1444,9 +1484,9 @@ def __call__(self, x): for field, format_function in zip(x, self.format_functions) ] if len(str_fields) == 1: - return "({},)".format(str_fields[0]) + return f"({str_fields[0]},)" else: - return "({})".format(", ".join(str_fields)) + return f"({', '.join(str_fields)})" def _void_scalar_to_string(x, is_repr=True): @@ -1455,10 +1495,10 @@ def _void_scalar_to_string(x, is_repr=True): scalartypes.c.src code, and is placed here because it uses the elementwise formatters defined above. """ - options = _format_options.copy() + options = format_options.get().copy() if options["legacy"] <= 125: - return StructuredVoidFormat.from_data(array(x), **_format_options)(x) + return StructuredVoidFormat.from_data(array(x), **options)(x) if options.get('formatter') is None: options['formatter'] = {} @@ -1492,6 +1532,7 @@ def dtype_is_implied(dtype): Examples -------- + >>> import numpy as np >>> np._core.arrayprint.dtype_is_implied(int) True >>> np.array([1, 2, 3], int) @@ -1502,7 +1543,7 @@ def dtype_is_implied(dtype): array([1, 2, 3], dtype=int8) """ dtype = np.dtype(dtype) - if _format_options['legacy'] <= 113 and dtype.type == np.bool: + if format_options.get()['legacy'] <= 113 and dtype.type == np.bool: return False # not just void types can be structured, and names are not part of the repr @@ -1534,14 +1575,14 @@ def dtype_short_repr(dtype): return str(dtype) elif issubclass(dtype.type, flexible): # handle these separately so they don't give garbage like str256 - return "'%s'" % str(dtype) + return f"'{str(dtype)}'" typename = dtype.name if not dtype.isnative: # deal with cases like dtype(' 0 - prefix = class_name + "(" - suffix = ")" if skipdtype else "," - - if (_format_options['legacy'] <= 113 and + if (current_options['legacy'] <= 113 and arr.shape == () and not arr.dtype.names): lst = repr(arr.item()) - elif arr.size > 0 or arr.shape == (0,): + else: lst = array2string(arr, max_line_width, precision, suppress_small, - ', ', prefix, suffix=suffix) - else: # show zero-length shape unless it is (0,) - lst = "[], shape=%s" % (repr(arr.shape),) - - arr_str = prefix + lst + suffix - - if skipdtype: - return arr_str - - dtype_str = "dtype={})".format(dtype_short_repr(arr.dtype)) - - # compute whether we should put dtype on a new line: Do so if adding the - # dtype would extend the last line past max_line_width. + ', ', prefix, suffix=")") + + # Add dtype and shape information if these cannot be inferred from + # the array string. + extras = [] + if ((arr.size == 0 and arr.shape != (0,)) + or (current_options['legacy'] > 210 + and arr.size > current_options['threshold'])): + extras.append(f"shape={arr.shape}") + if not dtype_is_implied(arr.dtype) or arr.size == 0: + extras.append(f"dtype={dtype_short_repr(arr.dtype)}") + + if not extras: + return prefix + lst + ")" + + arr_str = prefix + lst + "," + extra_str = ", ".join(extras) + ")" + # compute whether we should put extras on a new line: Do so if adding the + # extras would extend the last line past max_line_width. # Note: This line gives the correct result even when rfind returns -1. last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1) spacer = " " - if _format_options['legacy'] <= 113: + if current_options['legacy'] <= 113: if issubclass(arr.dtype.type, flexible): - spacer = '\n' + ' '*len(class_name + "(") - elif last_line_len + len(dtype_str) + 1 > max_line_width: - spacer = '\n' + ' '*len(class_name + "(") + spacer = '\n' + ' ' * len(prefix) + elif last_line_len + len(extra_str) + 1 > max_line_width: + spacer = '\n' + ' ' * len(prefix) - return arr_str + spacer + dtype_str + return arr_str + spacer + extra_str def _array_repr_dispatcher( @@ -1633,6 +1681,7 @@ def array_repr(arr, max_line_width=None, precision=None, suppress_small=None): Examples -------- + >>> import numpy as np >>> np.array_repr(np.array([1,2])) 'array([1, 2])' >>> np.array_repr(np.ma.array([0.])) @@ -1660,7 +1709,7 @@ def _array_str_implementation( a, max_line_width=None, precision=None, suppress_small=None, array2string=array2string): """Internal version of array_str() that allows overriding array2string.""" - if (_format_options['legacy'] <= 113 and + if (format_options.get()['legacy'] <= 113 and a.shape == () and not a.dtype.names): return str(a.item()) @@ -1713,6 +1762,7 @@ def array_str(a, max_line_width=None, precision=None, suppress_small=None): Examples -------- + >>> import numpy as np >>> np.array_str(np.arange(3)) '[0 1 2]' @@ -1727,78 +1777,3 @@ def array_str(a, max_line_width=None, precision=None, suppress_small=None): array2string=_array2string_impl) _default_array_repr = functools.partial(_array_repr_implementation, array2string=_array2string_impl) - - -def set_string_function(f, repr=True): - """ - Set a Python function to be used when pretty printing arrays. - - .. deprecated:: 2.0 - Use `np.set_printoptions` instead with a formatter for custom - printing of NumPy objects. - - Parameters - ---------- - f : function or None - Function to be used to pretty print arrays. The function should expect - a single array argument and return a string of the representation of - the array. If None, the function is reset to the default NumPy function - to print arrays. - repr : bool, optional - If True (default), the function for pretty printing (``__repr__``) - is set, if False the function that returns the default string - representation (``__str__``) is set. - - See Also - -------- - set_printoptions, get_printoptions - - Examples - -------- - >>> from numpy._core.arrayprint import set_string_function - >>> def pprint(arr): - ... return 'HA! - What are you going to do now?' - ... - >>> set_string_function(pprint) - >>> a = np.arange(10) - >>> a - HA! - What are you going to do now? - >>> _ = a - >>> # [0 1 2 3 4 5 6 7 8 9] - - We can reset the function to the default: - - >>> set_string_function(None) - >>> a - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - - `repr` affects either pretty printing or normal string representation. - Note that ``__repr__`` is still affected by setting ``__str__`` - because the width of each array element in the returned string becomes - equal to the length of the result of ``__str__()``. - - >>> x = np.arange(4) - >>> set_string_function(lambda x:'random', repr=False) - >>> x.__str__() - 'random' - >>> x.__repr__() - 'array([0, 1, 2, 3])' - - """ - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`set_string_function` is deprecated. Use `np.set_printoptions` " - "with a formatter for custom printing NumPy objects. " - "(deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - if f is None: - if repr: - return multiarray.set_string_function(_default_array_repr, 1) - else: - return multiarray.set_string_function(_default_array_str, 0) - else: - return multiarray.set_string_function(f, repr) diff --git a/numpy/_core/arrayprint.pyi b/numpy/_core/arrayprint.pyi index 50f10ec694f0..d06c38539306 100644 --- a/numpy/_core/arrayprint.pyi +++ b/numpy/_core/arrayprint.pyi @@ -1,43 +1,55 @@ +# Aliases for builtins shadowed by classes to avoid annotations resolving to class members by ty +from builtins import object as py_object from collections.abc import Callable -from typing import Any, Literal, TypedDict, SupportsIndex # Using a private class is by no means ideal, but it is simply a consequence # of a `contextlib.context` returning an instance of aforementioned class from contextlib import _GeneratorContextManager +from typing import Any, Final, Literal, SupportsIndex, TypedDict, type_check_only import numpy as np -from numpy import ( - integer, - timedelta64, - datetime64, - floating, - complexfloating, - void, - longdouble, - clongdouble, -) from numpy._typing import NDArray, _CharLike_co, _FloatLike_co -_FloatMode = Literal["fixed", "unique", "maxprec", "maxprec_equal"] +__all__ = [ + "array2string", + "array_repr", + "array_str", + "format_float_positional", + "format_float_scientific", + "get_printoptions", + "printoptions", + "set_printoptions", +] +### + +type _FloatMode = Literal["fixed", "unique", "maxprec", "maxprec_equal"] +type _LegacyNoStyle = Literal["1.21", "1.25", "2.1", False] +type _Legacy = Literal["1.13", _LegacyNoStyle] +type _Sign = Literal["-", "+", " "] +type _Trim = Literal["k", ".", "0", "-"] +type _ReprFunc = Callable[[NDArray[Any]], str] + +@type_check_only class _FormatDict(TypedDict, total=False): bool: Callable[[np.bool], str] - int: Callable[[integer[Any]], str] - timedelta: Callable[[timedelta64], str] - datetime: Callable[[datetime64], str] - float: Callable[[floating[Any]], str] - longfloat: Callable[[longdouble], str] - complexfloat: Callable[[complexfloating[Any, Any]], str] - longcomplexfloat: Callable[[clongdouble], str] - void: Callable[[void], str] + int: Callable[[np.integer], str] + timedelta: Callable[[np.timedelta64], str] + datetime: Callable[[np.datetime64], str] + float: Callable[[np.floating], str] + longfloat: Callable[[np.longdouble], str] + complexfloat: Callable[[np.complexfloating], str] + longcomplexfloat: Callable[[np.clongdouble], str] + void: Callable[[np.void], str] numpystr: Callable[[_CharLike_co], str] - object: Callable[[object], str] - all: Callable[[object], str] - int_kind: Callable[[integer[Any]], str] - float_kind: Callable[[floating[Any]], str] - complex_kind: Callable[[complexfloating[Any, Any]], str] + object: Callable[[py_object], str] + all: Callable[[py_object], str] + int_kind: Callable[[np.integer], str] + float_kind: Callable[[np.floating], str] + complex_kind: Callable[[np.complexfloating], str] str_kind: Callable[[_CharLike_co], str] +@type_check_only class _FormatOptions(TypedDict): precision: int threshold: int @@ -46,89 +58,95 @@ class _FormatOptions(TypedDict): suppress: bool nanstr: str infstr: str - formatter: None | _FormatDict - sign: Literal["-", "+", " "] + formatter: _FormatDict | None + sign: _Sign floatmode: _FloatMode - legacy: Literal[False, "1.13", "1.21"] + legacy: _Legacy + +### + +__docformat__: Final = "restructuredtext" # undocumented def set_printoptions( - precision: None | SupportsIndex = ..., - threshold: None | int = ..., - edgeitems: None | int = ..., - linewidth: None | int = ..., - suppress: None | bool = ..., - nanstr: None | str = ..., - infstr: None | str = ..., - formatter: None | _FormatDict = ..., - sign: Literal[None, "-", "+", " "] = ..., - floatmode: None | _FloatMode = ..., + precision: SupportsIndex | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + linewidth: int | None = None, + suppress: bool | None = None, + nanstr: str | None = None, + infstr: str | None = None, + formatter: _FormatDict | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, *, - legacy: Literal[None, False, "1.13", "1.21"] = ... + legacy: _Legacy | None = None, + override_repr: _ReprFunc | None = None, ) -> None: ... def get_printoptions() -> _FormatOptions: ... + +# public numpy export def array2string( a: NDArray[Any], - max_line_width: None | int = ..., - precision: None | SupportsIndex = ..., - suppress_small: None | bool = ..., - separator: str = ..., - prefix: str = ..., - # NOTE: With the `style` argument being deprecated, - # all arguments between `formatter` and `suffix` are de facto - # keyworld-only arguments + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, + separator: str = " ", + prefix: str = "", *, - formatter: None | _FormatDict = ..., - threshold: None | int = ..., - edgeitems: None | int = ..., - sign: Literal[None, "-", "+", " "] = ..., - floatmode: None | _FloatMode = ..., - suffix: str = ..., - legacy: Literal[None, False, "1.13", "1.21"] = ..., + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + legacy: _Legacy | None = None, ) -> str: ... + def format_float_scientific( x: _FloatLike_co, - precision: None | int = ..., - unique: bool = ..., - trim: Literal["k", ".", "0", "-"] = ..., - sign: bool = ..., - pad_left: None | int = ..., - exp_digits: None | int = ..., - min_digits: None | int = ..., + precision: int | None = None, + unique: bool = True, + trim: _Trim = "k", + sign: bool = False, + pad_left: int | None = None, + exp_digits: int | None = None, + min_digits: int | None = None, ) -> str: ... def format_float_positional( x: _FloatLike_co, - precision: None | int = ..., - unique: bool = ..., - fractional: bool = ..., - trim: Literal["k", ".", "0", "-"] = ..., - sign: bool = ..., - pad_left: None | int = ..., - pad_right: None | int = ..., - min_digits: None | int = ..., + precision: int | None = None, + unique: bool = True, + fractional: bool = True, + trim: _Trim = "k", + sign: bool = False, + pad_left: int | None = None, + pad_right: int | None = None, + min_digits: int | None = None, ) -> str: ... def array_repr( arr: NDArray[Any], - max_line_width: None | int = ..., - precision: None | SupportsIndex = ..., - suppress_small: None | bool = ..., + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, ) -> str: ... def array_str( a: NDArray[Any], - max_line_width: None | int = ..., - precision: None | SupportsIndex = ..., - suppress_small: None | bool = ..., + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, ) -> str: ... def printoptions( - precision: None | SupportsIndex = ..., - threshold: None | int = ..., - edgeitems: None | int = ..., - linewidth: None | int = ..., - suppress: None | bool = ..., - nanstr: None | str = ..., - infstr: None | str = ..., - formatter: None | _FormatDict = ..., - sign: Literal[None, "-", "+", " "] = ..., - floatmode: None | _FloatMode = ..., + precision: SupportsIndex | None = ..., + threshold: int | None = ..., + edgeitems: int | None = ..., + linewidth: int | None = ..., + suppress: bool | None = ..., + nanstr: str | None = ..., + infstr: str | None = ..., + formatter: _FormatDict | None = ..., + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, *, - legacy: Literal[None, False, "1.13", "1.21"] = ... + legacy: _Legacy | None = None, + override_repr: _ReprFunc | None = None, ) -> _GeneratorContextManager[_FormatOptions]: ... diff --git a/numpy/_core/code_generators/cversions.txt b/numpy/_core/code_generators/cversions.txt index ccba8a1c25b3..b058875d0455 100644 --- a/numpy/_core/code_generators/cversions.txt +++ b/numpy/_core/code_generators/cversions.txt @@ -74,5 +74,14 @@ 0x00000011 = ca1aebdad799358149567d9d93cbca09 # Version 18 (NumPy 2.0.0) -# Version 18 (NumPy 2.1.0) No change 0x00000012 = 2b8f1f4da822491ff030b2b37dff07e3 +# Version 19 (NumPy 2.1.0) Only header additions +# Version 19 (NumPy 2.2.0) No change +0x00000013 = 2b8f1f4da822491ff030b2b37dff07e3 +# Version 20 (NumPy 2.3.0) +0x00000014 = e56b74d32a934d085e7c3414cb9999b8, +# Version 21 (NumPy 2.4.0) +# Add 'same_value' casting, header additions. +# General loop registration for ufuncs, sort, and argsort +# Version 21 (NumPy 2.5.0) No change +0x00000015 = fbd24fc5b2ba4f7cd3606ec6128de7a5 diff --git a/numpy/_core/code_generators/genapi.py b/numpy/_core/code_generators/genapi.py index 27e42bcb4c14..bcd86dadfe88 100644 --- a/numpy/_core/code_generators/genapi.py +++ b/numpy/_core/code_generators/genapi.py @@ -7,22 +7,21 @@ """ import hashlib +import importlib.util import io import os import re import sys -import importlib.util import textwrap - from os.path import join def get_processor(): - # Convoluted because we can't import from numpy.distutils + # Convoluted because we can't import from numpy # (numpy is not yet built) conv_template_path = os.path.join( os.path.dirname(__file__), - '..', '..', 'distutils', 'conv_template.py' + '..', '..', '_build_utils', 'conv_template.py' ) spec = importlib.util.spec_from_file_location( 'conv_template', conv_template_path @@ -38,9 +37,10 @@ def get_processor(): __docformat__ = 'restructuredtext' # The files under src/ that are scanned for API functions -API_FILES = [join('multiarray', 'alloc.c'), +API_FILES = [join('multiarray', 'alloc.cpp'), join('multiarray', 'abstractdtypes.c'), join('multiarray', 'arrayfunction_override.c'), + join('multiarray', 'array_api_standard.c'), join('multiarray', 'array_assign_array.c'), join('multiarray', 'array_assign_scalar.c'), join('multiarray', 'array_coercion.c'), @@ -62,7 +62,7 @@ def get_processor(): join('multiarray', 'descriptor.c'), join('multiarray', 'dlpack.c'), join('multiarray', 'dtypemeta.c'), - join('multiarray', 'einsum.c.src'), + join('multiarray', 'einsum.cpp'), join('multiarray', 'public_dtype_api.c'), join('multiarray', 'flagsobject.c'), join('multiarray', 'getset.c'), @@ -84,7 +84,7 @@ def get_processor(): join('multiarray', 'stringdtype', 'static_string.c'), join('multiarray', 'strfuncs.c'), join('multiarray', 'usertypes.c'), - join('umath', 'dispatching.c'), + join('umath', 'dispatching.cpp'), join('umath', 'extobj.c'), join('umath', 'loops.c.src'), join('umath', 'reduction.c'), @@ -128,7 +128,7 @@ def add_guard(self, name, normal_define): class StealRef: def __init__(self, arg): - self.arg = arg # counting from 1 + self.arg = arg # counting from 1 def __str__(self): try: @@ -153,13 +153,13 @@ def _format_arg(self, typename, name): def __str__(self): argstr = ', '.join([self._format_arg(*a) for a in self.args]) if self.doc: - doccomment = '/* %s */\n' % self.doc + doccomment = f'/* {self.doc} */\n' else: doccomment = '' - return '%s%s %s(%s)' % (doccomment, self.return_type, self.name, argstr) + return f'{doccomment}{self.return_type} {self.name}({argstr})' def api_hash(self): - m = hashlib.md5() + m = hashlib.md5(usedforsecurity=False) m.update(remove_whitespace(self.return_type)) m.update('\000') m.update(self.name) @@ -176,7 +176,7 @@ def __init__(self, filename, lineno, msg): self.msg = msg def __str__(self): - return '%s:%s:%s' % (self.filename, self.lineno, self.msg) + return f'{self.filename}:{self.lineno}:{self.msg}' def skip_brackets(s, lbrac, rbrac): count = 0 @@ -187,12 +187,13 @@ def skip_brackets(s, lbrac, rbrac): count -= 1 if count == 0: return i - raise ValueError("no match '%s' for '%s' (%r)" % (lbrac, rbrac, s)) + raise ValueError(f"no match '{lbrac}' for '{rbrac}' ({s!r})") def split_arguments(argstr): arguments = [] current_argument = [] i = 0 + def finish_arg(): if current_argument: argstr = ''.join(current_argument).strip() @@ -211,8 +212,8 @@ def finish_arg(): finish_arg() elif c == '(': p = skip_brackets(argstr[i:], '(', ')') - current_argument += argstr[i:i+p] - i += p-1 + current_argument += argstr[i:i + p] + i += p - 1 else: current_argument += c i += 1 @@ -282,7 +283,7 @@ def find_functions(filename, tag='API'): if m: function_name = m.group(1) else: - raise ParseError(filename, lineno+1, + raise ParseError(filename, lineno + 1, 'could not find function name') function_args.append(line[m.end():]) state = STATE_ARGS @@ -342,7 +343,7 @@ def define_from_array_api_string(self): self.index) def array_api_define(self): - return " (void *) &%s" % self.name + return f" (void *) &{self.name}" def internal_define(self): if self.internal_type is None: @@ -374,12 +375,11 @@ def define_from_array_api_string(self): self.index) def array_api_define(self): - return " (%s *) &%s" % (self.type, self.name) + return f" ({self.type} *) &{self.name}" def internal_define(self): - astr = """\ -extern NPY_NO_EXPORT %(type)s %(name)s; -""" % {'type': self.type, 'name': self.name} + astr = f"""extern NPY_NO_EXPORT {self.type} {self.name}; +""" return astr # Dummy to be able to consistently use *Api instances for all items in the @@ -398,7 +398,7 @@ def define_from_array_api_string(self): self.index) def array_api_define(self): - return " (void *) &%s" % self.name + return f" (void *) &{self.name}" def internal_define(self): astr = """\ @@ -446,20 +446,19 @@ def define_from_array_api_string(self): return define def array_api_define(self): - return " (void *) %s" % self.name + return f" (void *) {self.name}" def internal_define(self): annstr = [str(a) for a in self.annotations] annstr = ' '.join(annstr) - astr = """\ -NPY_NO_EXPORT %s %s %s \\\n (%s);""" % (annstr, self.return_type, - self.name, - self._argtypes_string()) + astr = f"""NPY_NO_EXPORT {annstr} {self.return_type} {self.name} \\ + ({self._argtypes_string()});""" return astr def order_dict(d): """Order dict by its values.""" o = list(d.items()) + def _key(x): return x[1] + (x[0],) return sorted(o, key=_key) @@ -467,8 +466,7 @@ def _key(x): def merge_api_dicts(dicts): ret = {} for d in dicts: - for k, v in d.items(): - ret[k] = v + ret.update(d) return ret @@ -495,7 +493,7 @@ def check_api_dict(d): doubled[index] = [name] fmt = "Same index has been used twice in api definition: {}" val = ''.join( - '\n\tindex {} -> {}'.format(index, names) + f'\n\tindex {index} -> {names}' for index, names in doubled.items() if len(names) != 1 ) raise ValueError(fmt.format(val)) @@ -508,8 +506,7 @@ def check_api_dict(d): f"{indexes.intersection(removed)}") if indexes.union(removed) != expected: diff = expected.symmetric_difference(indexes.union(removed)) - msg = "There are some holes in the API indexing: " \ - "(symmetric diff is %s)" % diff + msg = f"There are some holes in the API indexing: (symmetric diff is {diff})" raise ValueError(msg) def get_api_functions(tagname, api_dict): @@ -532,7 +529,10 @@ def fullapi_hash(api_dicts): a.extend(name) a.extend(','.join(map(str, data))) - return hashlib.md5(''.join(a).encode('ascii')).hexdigest() + return hashlib.md5( + ''.join(a).encode('ascii'), usedforsecurity=False + ).hexdigest() + # To parse strings like 'hex = checksum' where hex is e.g. 0x1234567F and # checksum a 128 bits md5 checksum (hex format as well) @@ -554,7 +554,7 @@ def main(): tagname = sys.argv[1] order_file = sys.argv[2] functions = get_api_functions(tagname, order_file) - m = hashlib.md5(tagname) + m = hashlib.md5(tagname, usedforsecurity=False) for func in functions: print(func) ah = func.api_hash() @@ -562,5 +562,6 @@ def main(): print(hex(int(ah, 16))) print(hex(int(m.hexdigest()[:8], 16))) + if __name__ == '__main__': main() diff --git a/numpy/_core/code_generators/generate_numpy_api.py b/numpy/_core/code_generators/generate_numpy_api.py index 7fc6ad1aaf89..23d678872ca4 100644 --- a/numpy/_core/code_generators/generate_numpy_api.py +++ b/numpy/_core/code_generators/generate_numpy_api.py @@ -1,12 +1,10 @@ #!/usr/bin/env python3 -import os import argparse +import os import genapi -from genapi import \ - TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi - import numpy_api +from genapi import BoolValuesApi, FunctionApi, GlobalVarApi, TypeApi # use annotated api when running under cpychecker h_template = r""" @@ -65,6 +63,7 @@ { int st; PyObject *numpy = PyImport_ImportModule("numpy._core._multiarray_umath"); + PyObject *c_api; if (numpy == NULL && PyErr_ExceptionMatches(PyExc_ModuleNotFoundError)) { PyErr_Clear(); numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); @@ -74,7 +73,7 @@ return -1; } - PyObject *c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); + c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); Py_DECREF(numpy); if (c_api == NULL) { return -1; @@ -158,6 +157,12 @@ return 0; } +#if (SWIG_VERSION < 0x040400) +#define _RETURN_VALUE NULL +#else +#define _RETURN_VALUE 0 +#endif + #define import_array() { \ if (_import_array() < 0) { \ PyErr_Print(); \ @@ -165,7 +170,7 @@ PyExc_ImportError, \ "numpy._core.multiarray failed to import" \ ); \ - return NULL; \ + return _RETURN_VALUE; \ } \ } @@ -191,7 +196,7 @@ #endif #endif -""" +""" # noqa: E501 c_template = r""" @@ -207,8 +212,8 @@ def generate_api(output_dir, force=False): basename = 'multiarray_api' - h_file = os.path.join(output_dir, '__%s.h' % basename) - c_file = os.path.join(output_dir, '__%s.c' % basename) + h_file = os.path.join(output_dir, f'__{basename}.h') + c_file = os.path.join(output_dir, f'__{basename}.c') targets = (h_file, c_file) sources = numpy_api.multiarray_api @@ -259,17 +264,18 @@ def do_generate_api(targets, sources): for name, val in types_api.items(): index = val[0] - internal_type = None if len(val) == 1 else val[1] + internal_type = None if len(val) == 1 else val[1] multiarray_api_dict[name] = TypeApi( name, index, 'PyTypeObject', api_name, internal_type) if len(multiarray_api_dict) != len(multiarray_api_index): keys_dict = set(multiarray_api_dict.keys()) keys_index = set(multiarray_api_index.keys()) + keys_index_dict = keys_index - keys_dict + keys_dict_index = keys_dict - keys_index raise AssertionError( - "Multiarray API size mismatch - " - "index has extra keys {}, dict has extra keys {}" - .format(keys_index - keys_dict, keys_dict - keys_index) + f"Multiarray API size mismatch - index has extra keys {keys_index_dict}, " + f"dict has extra keys {keys_dict_index}" ) extension_list = [] diff --git a/numpy/_core/code_generators/generate_ufunc_api.py b/numpy/_core/code_generators/generate_ufunc_api.py index 4bdbbdb9abac..265fe840f810 100644 --- a/numpy/_core/code_generators/generate_ufunc_api.py +++ b/numpy/_core/code_generators/generate_ufunc_api.py @@ -1,9 +1,9 @@ -import os import argparse +import os import genapi -from genapi import TypeApi, FunctionApi import numpy_api +from genapi import FunctionApi, TypeApi h_template = r""" #ifdef _UMATHMODULE @@ -38,14 +38,11 @@ static inline int _import_umath(void) { + PyObject *c_api; PyObject *numpy = PyImport_ImportModule("numpy._core._multiarray_umath"); if (numpy == NULL && PyErr_ExceptionMatches(PyExc_ModuleNotFoundError)) { PyErr_Clear(); - numpy = PyImport_ImportModule("numpy._core._multiarray_umath"); - if (numpy == NULL && PyErr_ExceptionMatches(PyExc_ModuleNotFoundError)) { - PyErr_Clear(); - numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); - } + numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); } if (numpy == NULL) { @@ -54,7 +51,7 @@ return -1; } - PyObject *c_api = PyObject_GetAttrString(numpy, "_UFUNC_API"); + c_api = PyObject_GetAttrString(numpy, "_UFUNC_API"); Py_DECREF(numpy); if (c_api == NULL) { PyErr_SetString(PyExc_AttributeError, "_UFUNC_API not found"); @@ -143,8 +140,8 @@ def generate_api(output_dir, force=False): basename = 'ufunc_api' - h_file = os.path.join(output_dir, '__%s.h' % basename) - c_file = os.path.join(output_dir, '__%s.c' % basename) + h_file = os.path.join(output_dir, f'__{basename}.h') + c_file = os.path.join(output_dir, f'__{basename}.c') targets = (h_file, c_file) sources = ['ufunc_api_order.txt'] diff --git a/numpy/_core/code_generators/generate_umath.py b/numpy/_core/code_generators/generate_umath.py index 06871a44b37f..43cfe6f9d5e3 100644 --- a/numpy/_core/code_generators/generate_umath.py +++ b/numpy/_core/code_generators/generate_umath.py @@ -3,12 +3,10 @@ a dictionary ofUfunc classes. This is fed to make_code to generate __umath_generated.c """ +import argparse import os import re -import struct -import sys import textwrap -import argparse # identity objects Zero = "PyLong_FromLong(0)" @@ -59,7 +57,8 @@ class TypeDescription: If astype['x'] is 'y', uses PyUFunc_x_x_As_y_y/PyUFunc_xx_x_As_yy_y instead of PyUFunc_x_x/PyUFunc_xx_x. cfunc_alias : str or none, optional - Appended to inner loop C function name, e.g., FLOAT_{cfunc_alias}. See make_arrays. + Appended to inner loop C function name, e.g., FLOAT_{cfunc_alias}. + See make_arrays. NOTE: it doesn't support 'astype' dispatch : str or None, optional Dispatch-able source name without its extension '.dispatch.c' that @@ -129,24 +128,24 @@ def check_td_order(tds): # often that SIMD additions added loops that do not even make some sense. # TODO: This should likely be a test and it would be nice if it rejected # duplicate entries as well (but we have many as of writing this). - signatures = [t.in_+t.out for t in tds] + signatures = [t.in_ + t.out for t in tds] for prev_i, sign in enumerate(signatures[1:]): - if sign in signatures[:prev_i+1]: + if sign in signatures[:prev_i + 1]: continue # allow duplicates... _check_order(signatures[prev_i], sign) -_floatformat_map = dict( - e='npy_%sf', - f='npy_%sf', - d='npy_%s', - g='npy_%sl', - F='nc_%sf', - D='nc_%s', - G='nc_%sl' -) +_floatformat_map = { + "e": 'npy_%sf', + "f": 'npy_%sf', + "d": 'npy_%s', + "g": 'npy_%sl', + "F": 'nc_%sf', + "D": 'nc_%s', + "G": 'nc_%sl' +} def build_func_data(types, f): func_data = [_floatformat_map.get(t, '%s') % (f,) for t in types] @@ -182,7 +181,7 @@ def TD(types, f=None, astype=None, in_=None, out=None, cfunc_alias=None, for t, fd, i, o in zip(types, func_data, in_, out): # [(dispatch file name without extension '.dispatch.c*', list of types)] if dispatch: - dispt = ([k for k, v in dispatch if t in v]+[None])[0] + dispt = ([k for k, v in dispatch if t in v] + [None])[0] else: dispt = None tds.append(TypeDescription( @@ -228,6 +227,7 @@ def __init__(self, nin, nout, identity, docstring, typereso, # String-handling utilities to avoid locale-dependence. import string + UPPER_TABLE = bytes.maketrans(bytes(string.ascii_lowercase, "ascii"), bytes(string.ascii_uppercase, "ascii")) @@ -249,6 +249,7 @@ def english_upper(s): Examples -------- + >>> import numpy as np >>> from numpy.lib.utils import english_upper >>> s = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_' >>> english_upper(s) @@ -260,16 +261,16 @@ def english_upper(s): return uppered -#each entry in defdict is a Ufunc object. +# each entry in defdict is a Ufunc object. -#name: [string of chars for which it is defined, -# string of characters using func interface, -# tuple of strings giving funcs for data, -# (in, out), or (instr, outstr) giving the signature as character codes, -# identity, -# docstring, -# output specification (optional) -# ] +# name: [string of chars for which it is defined, +# string of characters using func interface, +# tuple of strings giving funcs for data, +# (in, out), or (instr, outstr) giving the signature as character codes, +# identity, +# docstring, +# output specification (optional) +# ] chartoname = { '?': 'bool', @@ -325,16 +326,16 @@ def english_upper(s): cmplxP = cmplx + P inexact = flts + cmplx inexactvec = 'fd' -noint = inexact+O -nointP = inexact+P -allP = bints+times+flts+cmplxP +noint = inexact + O +nointP = inexact + P +allP = bints + times + flts + cmplxP nobool_or_obj = noobj[1:] -nobool_or_datetime = noobj[1:-1] + O # includes m - timedelta64 -intflt = ints+flts -intfltcmplx = ints+flts+cmplx -nocmplx = bints+times+flts -nocmplxO = nocmplx+O -nocmplxP = nocmplx+P +nobool_or_datetime = noobj[1:-1] + O # includes m - timedelta64 +intflt = ints + flts +intfltcmplx = ints + flts + cmplx +nocmplx = bints + times + flts +nocmplxO = nocmplx + O +nocmplxP = nocmplx + P notimes_or_obj = bints + inexact nodatetime_or_obj = bints + inexact no_bool_times_obj = ints + inexact @@ -365,7 +366,7 @@ def english_upper(s): indexed=intfltcmplx ), 'subtract': - Ufunc(2, 1, None, # Zero is only a unit to the right, not the left + Ufunc(2, 1, None, # Zero is only a unit to the right, not the left docstrings.get('numpy._core.umath.subtract'), 'PyUFunc_SubtractionTypeResolver', TD(no_bool_times_obj, dispatch=[ @@ -397,9 +398,9 @@ def english_upper(s): TD(O, f='PyNumber_Multiply'), indexed=intfltcmplx ), -#'true_divide' : aliased to divide in umathmodule.c:initumath +# 'true_divide' : aliased to divide in umathmodule.c:initumath 'floor_divide': - Ufunc(2, 1, None, # One is only a unit to the right, not the left + Ufunc(2, 1, None, # One is only a unit to the right, not the left docstrings.get('numpy._core.umath.floor_divide'), 'PyUFunc_DivisionTypeResolver', TD(ints, cfunc_alias='divide', @@ -413,10 +414,10 @@ def english_upper(s): indexed=flts + ints ), 'divide': - Ufunc(2, 1, None, # One is only a unit to the right, not the left + Ufunc(2, 1, None, # One is only a unit to the right, not the left docstrings.get('numpy._core.umath.divide'), 'PyUFunc_TrueDivisionTypeResolver', - TD(flts+cmplx, cfunc_alias='divide', dispatch=[('loops_arithm_fp', 'fd')]), + TD(flts + cmplx, cfunc_alias='divide', dispatch=[('loops_arithm_fp', 'fd')]), [TypeDescription('m', FullTypeDescr, 'mq', 'm', cfunc_alias='divide'), TypeDescription('m', FullTypeDescr, 'md', 'm', cfunc_alias='divide'), TypeDescription('m', FullTypeDescr, 'mm', 'd', cfunc_alias='divide'), @@ -428,7 +429,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.conjugate'), None, - TD(ints+flts+cmplx, dispatch=[ + TD(ints + flts + cmplx, dispatch=[ ('loops_arithm_fp', 'FD'), ('loops_autovec', ints), ]), @@ -446,7 +447,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.square'), None, - TD(ints+inexact, dispatch=[ + TD(ints + inexact, dispatch=[ ('loops_unary_fp', 'fd'), ('loops_arithm_fp', 'FD'), ('loops_autovec', ints), @@ -457,7 +458,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.reciprocal'), None, - TD(ints+inexact, dispatch=[ + TD(ints + inexact, dispatch=[ ('loops_unary_fp', 'fd'), ('loops_autovec', ints), ]), @@ -492,7 +493,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.absolute'), 'PyUFunc_AbsoluteTypeResolver', - TD(bints+flts+timedeltaonly, dispatch=[ + TD(bints + flts + timedeltaonly, dispatch=[ ('loops_unary_fp', 'fd'), ('loops_logical', '?'), ('loops_autovec', ints + 'e'), @@ -511,7 +512,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.negative'), 'PyUFunc_NegativeTypeResolver', - TD(ints+flts+timedeltaonly, dispatch=[('loops_unary', ints+'fdg')]), + TD(ints + flts + timedeltaonly, dispatch=[('loops_unary', ints + 'fdg')]), TD(cmplx, f='neg'), TD(O, f='PyNumber_Negative'), ), @@ -519,15 +520,17 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.positive'), 'PyUFunc_SimpleUniformOperationTypeResolver', - TD(ints+flts+timedeltaonly), + TD(ints + flts + timedeltaonly), TD(cmplx, f='pos'), TD(O, f='PyNumber_Positive'), ), 'sign': Ufunc(1, 1, None, docstrings.get('numpy._core.umath.sign'), - 'PyUFunc_SimpleUniformOperationTypeResolver', - TD(nobool_or_datetime, dispatch=[('loops_autovec', ints)]), + 'PyUFunc_SignTypeResolver', + TD(ints + flts, dispatch=[('loops_autovec', ints)]), + TD(timedeltaonly, out='d'), + TD(cmplx + O), ), 'greater': Ufunc(2, 1, None, @@ -536,7 +539,7 @@ def english_upper(s): TD(bints, out='?'), [TypeDescription('q', FullTypeDescr, 'qQ', '?'), TypeDescription('q', FullTypeDescr, 'Qq', '?')], - TD(inexact+times, out='?', dispatch=[('loops_comparison', bints+'fd')]), + TD(inexact + times, out='?', dispatch=[('loops_comparison', bints + 'fd')]), TD('O', out='?'), [TypeDescription('O', FullTypeDescr, 'OO', 'O')], ), @@ -547,7 +550,7 @@ def english_upper(s): TD(bints, out='?'), [TypeDescription('q', FullTypeDescr, 'qQ', '?'), TypeDescription('q', FullTypeDescr, 'Qq', '?')], - TD(inexact+times, out='?', dispatch=[('loops_comparison', bints+'fd')]), + TD(inexact + times, out='?', dispatch=[('loops_comparison', bints + 'fd')]), TD('O', out='?'), [TypeDescription('O', FullTypeDescr, 'OO', 'O')], ), @@ -558,7 +561,7 @@ def english_upper(s): TD(bints, out='?'), [TypeDescription('q', FullTypeDescr, 'qQ', '?'), TypeDescription('q', FullTypeDescr, 'Qq', '?')], - TD(inexact+times, out='?', dispatch=[('loops_comparison', bints+'fd')]), + TD(inexact + times, out='?', dispatch=[('loops_comparison', bints + 'fd')]), TD('O', out='?'), [TypeDescription('O', FullTypeDescr, 'OO', 'O')], ), @@ -569,7 +572,7 @@ def english_upper(s): TD(bints, out='?'), [TypeDescription('q', FullTypeDescr, 'qQ', '?'), TypeDescription('q', FullTypeDescr, 'Qq', '?')], - TD(inexact+times, out='?', dispatch=[('loops_comparison', bints+'fd')]), + TD(inexact + times, out='?', dispatch=[('loops_comparison', bints + 'fd')]), TD('O', out='?'), [TypeDescription('O', FullTypeDescr, 'OO', 'O')], ), @@ -580,7 +583,7 @@ def english_upper(s): TD(bints, out='?'), [TypeDescription('q', FullTypeDescr, 'qQ', '?'), TypeDescription('q', FullTypeDescr, 'Qq', '?')], - TD(inexact+times, out='?', dispatch=[('loops_comparison', bints+'fd')]), + TD(inexact + times, out='?', dispatch=[('loops_comparison', bints + 'fd')]), TD('O', out='?'), [TypeDescription('O', FullTypeDescr, 'OO', 'O')], ), @@ -591,7 +594,7 @@ def english_upper(s): TD(bints, out='?'), [TypeDescription('q', FullTypeDescr, 'qQ', '?'), TypeDescription('q', FullTypeDescr, 'Qq', '?')], - TD(inexact+times, out='?', dispatch=[('loops_comparison', bints+'fd')]), + TD(inexact + times, out='?', dispatch=[('loops_comparison', bints + 'fd')]), TD('O', out='?'), [TypeDescription('O', FullTypeDescr, 'OO', 'O')], ), @@ -642,7 +645,7 @@ def english_upper(s): docstrings.get('numpy._core.umath.maximum'), 'PyUFunc_SimpleUniformOperationTypeResolver', TD('?', cfunc_alias='logical_or', dispatch=[('loops_logical', '?')]), - TD(no_obj_bool, dispatch=[('loops_minmax', ints+'fdg')]), + TD(no_obj_bool, dispatch=[('loops_minmax', ints + 'fdg')]), TD(O, f='npy_ObjectMax'), indexed=flts + ints, ), @@ -652,7 +655,7 @@ def english_upper(s): 'PyUFunc_SimpleUniformOperationTypeResolver', TD('?', cfunc_alias='logical_and', dispatch=[('loops_logical', '?')]), - TD(no_obj_bool, dispatch=[('loops_minmax', ints+'fdg')]), + TD(no_obj_bool, dispatch=[('loops_minmax', ints + 'fdg')]), TD(O, f='npy_ObjectMin'), indexed=flts + ints, ), @@ -777,7 +780,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.arccos'), None, - TD('efd', dispatch=[('loops_umath_fp', 'efd')]), + TD('efd', dispatch=[('loops_umath_fp', 'fd'), ('loops_half', 'e')]), TD(inexact, f='acos', astype={'e': 'f'}), TD(P, f='arccos'), ), @@ -785,7 +788,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.arccosh'), None, - TD('efd', dispatch=[('loops_umath_fp', 'efd')]), + TD('efd', dispatch=[('loops_umath_fp', 'fd'), ('loops_half', 'e')]), TD(inexact, f='acosh', astype={'e': 'f'}), TD(P, f='arccosh'), ), @@ -793,7 +796,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.arcsin'), None, - TD('efd', dispatch=[('loops_umath_fp', 'efd')]), + TD('efd', dispatch=[('loops_umath_fp', 'fd'), ('loops_half', 'e')]), TD(inexact, f='asin', astype={'e': 'f'}), TD(P, f='arcsin'), ), @@ -801,7 +804,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.arcsinh'), None, - TD('efd', dispatch=[('loops_umath_fp', 'efd')]), + TD('efd', dispatch=[('loops_umath_fp', 'fd'), ('loops_half', 'e')]), TD(inexact, f='asinh', astype={'e': 'f'}), TD(P, f='arcsinh'), ), @@ -809,7 +812,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.arctan'), None, - TD('efd', dispatch=[('loops_umath_fp', 'efd')]), + TD('efd', dispatch=[('loops_umath_fp', 'fd'), ('loops_half', 'e')]), TD(inexact, f='atan', astype={'e': 'f'}), TD(P, f='arctan'), ), @@ -817,7 +820,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.arctanh'), None, - TD('efd', dispatch=[('loops_umath_fp', 'efd')]), + TD('efd', dispatch=[('loops_umath_fp', 'fd'), ('loops_half', 'e')]), TD(inexact, f='atanh', astype={'e': 'f'}), TD(P, f='arctanh'), ), @@ -825,7 +828,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.cos'), None, - TD('e', dispatch=[('loops_umath_fp', 'e')]), + TD('e', dispatch=[('loops_half', 'e')]), TD('f', dispatch=[('loops_trigonometric', 'f')]), TD('d', dispatch=[('loops_trigonometric', 'd')]), TD('g' + cmplx, f='cos'), @@ -835,7 +838,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.sin'), None, - TD('e', dispatch=[('loops_umath_fp', 'e')]), + TD('e', dispatch=[('loops_half', 'e')]), TD('f', dispatch=[('loops_trigonometric', 'f')]), TD('d', dispatch=[('loops_trigonometric', 'd')]), TD('g' + cmplx, f='sin'), @@ -845,7 +848,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.tan'), None, - TD('efd', dispatch=[('loops_umath_fp', 'efd')]), + TD('efd', dispatch=[('loops_umath_fp', 'fd'), ('loops_half', 'e')]), TD(inexact, f='tan', astype={'e': 'f'}), TD(P, f='tan'), ), @@ -853,7 +856,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.cosh'), None, - TD('efd', dispatch=[('loops_umath_fp', 'efd')]), + TD('efd', dispatch=[('loops_umath_fp', 'fd'), ('loops_half', 'e')]), TD(inexact, f='cosh', astype={'e': 'f'}), TD(P, f='cosh'), ), @@ -861,7 +864,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.sinh'), None, - TD('efd', dispatch=[('loops_umath_fp', 'efd')]), + TD('efd', dispatch=[('loops_umath_fp', 'fd'), ('loops_half', 'e')]), TD(inexact, f='sinh', astype={'e': 'f'}), TD(P, f='sinh'), ), @@ -869,7 +872,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.tanh'), None, - TD('e', dispatch=[('loops_umath_fp', 'e')]), + TD('e', dispatch=[('loops_half', 'e')]), TD('fd', dispatch=[('loops_hyperbolic', 'fd')]), TD(inexact, f='tanh', astype={'e': 'f'}), TD(P, f='tanh'), @@ -878,7 +881,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.exp'), None, - TD('e', dispatch=[('loops_umath_fp', 'e')]), + TD('e', dispatch=[('loops_half', 'e')]), TD('fd', dispatch=[('loops_exponent_log', 'fd')]), TD('fdg' + cmplx, f='exp'), TD(P, f='exp'), @@ -887,7 +890,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.exp2'), None, - TD('efd', dispatch=[('loops_umath_fp', 'efd')]), + TD('efd', dispatch=[('loops_umath_fp', 'fd'), ('loops_half', 'e')]), TD(inexact, f='exp2', astype={'e': 'f'}), TD(P, f='exp2'), ), @@ -895,7 +898,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.expm1'), None, - TD('efd', dispatch=[('loops_umath_fp', 'efd')]), + TD('efd', dispatch=[('loops_umath_fp', 'fd'), ('loops_half', 'e')]), TD(inexact, f='expm1', astype={'e': 'f'}), TD(P, f='expm1'), ), @@ -903,7 +906,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.log'), None, - TD('e', dispatch=[('loops_umath_fp', 'e')]), + TD('e', dispatch=[('loops_half', 'e')]), TD('fd', dispatch=[('loops_exponent_log', 'fd')]), TD('fdg' + cmplx, f='log'), TD(P, f='log'), @@ -912,7 +915,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.log2'), None, - TD('efd', dispatch=[('loops_umath_fp', 'efd')]), + TD('efd', dispatch=[('loops_umath_fp', 'fd'), ('loops_half', 'e')]), TD(inexact, f='log2', astype={'e': 'f'}), TD(P, f='log2'), ), @@ -920,7 +923,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.log10'), None, - TD('efd', dispatch=[('loops_umath_fp', 'efd')]), + TD('efd', dispatch=[('loops_umath_fp', 'fd'), ('loops_half', 'e')]), TD(inexact, f='log10', astype={'e': 'f'}), TD(P, f='log10'), ), @@ -928,7 +931,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.log1p'), None, - TD('efd', dispatch=[('loops_umath_fp', 'efd')]), + TD('efd', dispatch=[('loops_umath_fp', 'fd'), ('loops_half', 'e')]), TD(inexact, f='log1p', astype={'e': 'f'}), TD(P, f='log1p'), ), @@ -945,7 +948,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.cbrt'), None, - TD('efd', dispatch=[('loops_umath_fp', 'efd')]), + TD('efd', dispatch=[('loops_umath_fp', 'fd'), ('loops_half', 'e')]), TD(flts, f='cbrt', astype={'e': 'f'}), TD(P, f='cbrt'), ), @@ -953,6 +956,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.ceil'), None, + TD(bints), TD('e', f='ceil', astype={'e': 'f'}), TD(inexactvec, dispatch=[('loops_unary_fp', 'fd')]), TD('fdg', f='ceil'), @@ -962,6 +966,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.trunc'), None, + TD(bints), TD('e', f='trunc', astype={'e': 'f'}), TD(inexactvec, dispatch=[('loops_unary_fp', 'fd')]), TD('fdg', f='trunc'), @@ -978,6 +983,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.floor'), None, + TD(bints), TD('e', f='floor', astype={'e': 'f'}), TD(inexactvec, dispatch=[('loops_unary_fp', 'fd')]), TD('fdg', f='floor'), @@ -1089,21 +1095,21 @@ def english_upper(s): None, TD(flts), ), -'ldexp' : +'ldexp': Ufunc(2, 1, None, docstrings.get('numpy._core.umath.ldexp'), None, [TypeDescription('e', None, 'ei', 'e'), TypeDescription('f', None, 'fi', 'f', dispatch='loops_exponent_log'), - TypeDescription('e', FuncNameSuffix('int64'), 'e'+int64, 'e'), - TypeDescription('f', FuncNameSuffix('int64'), 'f'+int64, 'f'), + TypeDescription('e', FuncNameSuffix('int64'), 'e' + int64, 'e'), + TypeDescription('f', FuncNameSuffix('int64'), 'f' + int64, 'f'), TypeDescription('d', None, 'di', 'd', dispatch='loops_exponent_log'), - TypeDescription('d', FuncNameSuffix('int64'), 'd'+int64, 'd'), + TypeDescription('d', FuncNameSuffix('int64'), 'd' + int64, 'd'), TypeDescription('g', None, 'gi', 'g'), - TypeDescription('g', FuncNameSuffix('int64'), 'g'+int64, 'g'), + TypeDescription('g', FuncNameSuffix('int64'), 'g' + int64, 'g'), ], ), -'frexp' : +'frexp': Ufunc(1, 2, None, docstrings.get('numpy._core.umath.frexp'), None, @@ -1113,14 +1119,14 @@ def english_upper(s): TypeDescription('g', None, 'g', 'gi'), ], ), -'gcd' : +'gcd': Ufunc(2, 1, Zero, docstrings.get('numpy._core.umath.gcd'), "PyUFunc_SimpleUniformOperationTypeResolver", TD(ints), TD('O', f='npy_ObjectGCD'), ), -'lcm' : +'lcm': Ufunc(2, 1, None, docstrings.get('numpy._core.umath.lcm'), "PyUFunc_SimpleUniformOperationTypeResolver", @@ -1134,7 +1140,7 @@ def english_upper(s): TD(ints, dispatch=[('loops_autovec', ints)], out='B'), TD(P, f='bit_count'), ), -'matmul' : +'matmul': Ufunc(2, 1, None, docstrings.get('numpy._core.umath.matmul'), "PyUFunc_SimpleUniformOperationTypeResolver", @@ -1150,6 +1156,22 @@ def english_upper(s): TD(O), signature='(n),(n)->()', ), +'matvec': + Ufunc(2, 1, None, + docstrings.get('numpy._core.umath.matvec'), + "PyUFunc_SimpleUniformOperationTypeResolver", + TD(notimes_or_obj), + TD(O), + signature='(m,n),(n)->(m)', + ), +'vecmat': + Ufunc(2, 1, None, + docstrings.get('numpy._core.umath.vecmat'), + "PyUFunc_SimpleUniformOperationTypeResolver", + TD(notimes_or_obj), + TD(O), + signature='(n),(n,m)->(m)', + ), 'str_len': Ufunc(1, 1, Zero, docstrings.get('numpy._core.umath.str_len'), @@ -1320,15 +1342,21 @@ def english_upper(s): docstrings.get('numpy._core.umath._rpartition'), None, ), +'_slice': + Ufunc(4, 1, None, + docstrings.get('numpy._core.umath._slice'), + None, + ), } def indent(st, spaces): - indentation = ' '*spaces - indented = indentation + st.replace('\n', '\n'+indentation) + indentation = ' ' * spaces + indented = indentation + st.replace('\n', '\n' + indentation) # trim off any trailing spaces indented = re.sub(r' +$', r'', indented) return indented + # maps [nin, nout][type] to a suffix arity_lookup = { (1, 1): { @@ -1358,7 +1386,7 @@ def indent(st, spaces): } } -#for each name +# for each name # 1) create functions, data, and signature # 2) fill in functions and data in InitOperators # 3) add function. @@ -1369,18 +1397,17 @@ def make_arrays(funcdict): # later code1list = [] code2list = [] - dispdict = {} + dispdict = {} names = sorted(funcdict.keys()) for name in names: uf = funcdict[name] funclist = [] datalist = [] siglist = [] - k = 0 sub = 0 - for t in uf.type_descriptions: - cfunc_alias = t.cfunc_alias if t.cfunc_alias else name + for k, t in enumerate(uf.type_descriptions): + cfunc_alias = t.cfunc_alias or name cfunc_fname = None if t.func_data is FullTypeDescr: tname = english_upper(chartoname[t.type]) @@ -1407,8 +1434,8 @@ def make_arrays(funcdict): ) from None astype = '' - if not t.astype is None: - astype = '_As_%s' % thedict[t.astype] + if t.astype is not None: + astype = f'_As_{thedict[t.astype]}' astr = ('%s_functions[%d] = PyUFunc_%s%s;' % (name, k, thedict[t.type], astype)) code2list.append(astr) @@ -1418,7 +1445,7 @@ def make_arrays(funcdict): code2list.append(astr) datalist.append('(void *)NULL') elif t.type == 'P': - datalist.append('(void *)"%s"' % t.func_data) + datalist.append(f'(void *)"{t.func_data}"') else: astr = ('%s_data[%d] = (void *) %s;' % (name, k, t.func_data)) @@ -1437,9 +1464,7 @@ def make_arrays(funcdict): funclist.append('NULL') for x in t.in_ + t.out: - siglist.append('NPY_%s' % (english_upper(chartoname[x]),)) - - k += 1 + siglist.append(f'NPY_{english_upper(chartoname[x])}') if funclist or siglist or datalist: funcnames = ', '.join(funclist) @@ -1458,15 +1483,18 @@ def make_arrays(funcdict): for dname, funcs in dispdict.items(): code2list.append(textwrap.dedent(f""" - #ifndef NPY_DISABLE_OPTIMIZATION #include "{dname}.dispatch.h" - #endif """)) for (ufunc_name, func_idx, cfunc_name, inout) in funcs: - code2list.append(textwrap.dedent(f"""\ + call_text = ( + f"NPY_CPU_DISPATCH_CALL_XB(" + f"{ufunc_name}_functions[{func_idx}] = {cfunc_name});" + ) + text = f"""\ NPY_CPU_DISPATCH_TRACE("{ufunc_name}", "{''.join(inout)}"); - NPY_CPU_DISPATCH_CALL_XB({ufunc_name}_functions[{func_idx}] = {cfunc_name}); - """)) + {call_text} + """ + code2list.append(textwrap.dedent(text)) return "\n".join(code1list), "\n".join(code2list) def make_ufuncs(funcdict): @@ -1478,7 +1506,7 @@ def make_ufuncs(funcdict): if uf.signature is None: sig = "NULL" else: - sig = '"{}"'.format(uf.signature) + sig = f'"{uf.signature}"' fmt = textwrap.dedent("""\ identity = {identity_expr}; if ({has_identity} && identity == NULL) {{ @@ -1496,19 +1524,19 @@ def make_ufuncs(funcdict): return -1; }} """) - args = dict( - name=name, - funcs=f"{name}_functions" if not uf.empty else "NULL", - data=f"{name}_data" if not uf.empty else "NULL", - signatures=f"{name}_signatures" if not uf.empty else "NULL", - nloops=len(uf.type_descriptions), - nin=uf.nin, nout=uf.nout, - has_identity='0' if uf.identity is None_ else '1', - identity='PyUFunc_IdentityValue', - identity_expr=uf.identity, - doc=uf.docstring, - sig=sig, - ) + args = { + "name": name, + "funcs": f"{name}_functions" if not uf.empty else "NULL", + "data": f"{name}_data" if not uf.empty else "NULL", + "signatures": f"{name}_signatures" if not uf.empty else "NULL", + "nloops": len(uf.type_descriptions), + "nin": uf.nin, "nout": uf.nout, + "has_identity": '0' if uf.identity is None_ else '1', + "identity": 'PyUFunc_IdentityValue', + "identity_expr": uf.identity, + "doc": uf.docstring, + "sig": sig, + } # Only PyUFunc_None means don't reorder - we pass this using the old # argument @@ -1549,9 +1577,9 @@ def make_ufuncs(funcdict): """) mlist.append(fmt.format( typenum=f"NPY_{english_upper(chartoname[c])}", - count=uf.nin+uf.nout, + count=uf.nin + uf.nout, name=name, - funcname = f"{english_upper(chartoname[c])}_{name}_indexed", + funcname=f"{english_upper(chartoname[c])}_{name}_indexed", )) mlist.append(r"""PyDict_SetItemString(dictionary, "%s", f);""" % name) @@ -1577,13 +1605,10 @@ def make_code(funcdict, filename): #include "matmul.h" #include "clip.h" #include "dtypemeta.h" + #include "dispatching.h" #include "_umath_doc_generated.h" %s - /* Returns a borrowed ref of the second value in the matching info tuple */ - PyObject * - get_info_no_cast(PyUFuncObject *ufunc, PyArray_DTypeMeta *op_dtype, - int ndtypes); static int InitOperators(PyObject *dictionary) { diff --git a/numpy/_core/code_generators/generate_umath_doc.py b/numpy/_core/code_generators/generate_umath_doc.py index fc0c2a1381cc..4b6f8985a98f 100644 --- a/numpy/_core/code_generators/generate_umath_doc.py +++ b/numpy/_core/code_generators/generate_umath_doc.py @@ -1,10 +1,11 @@ -import sys +import argparse import os +import sys import textwrap -import argparse sys.path.insert(0, os.path.dirname(__file__)) import ufunc_docstrings as docstrings + sys.path.pop(0) def normalize_doc(docstring): diff --git a/numpy/_core/code_generators/numpy_api.py b/numpy/_core/code_generators/numpy_api.py index 30e2222e557e..c2b471c71757 100644 --- a/numpy/_core/code_generators/numpy_api.py +++ b/numpy/_core/code_generators/numpy_api.py @@ -14,12 +14,12 @@ """ -import os import importlib.util +import os def get_annotations(): - # Convoluted because we can't import from numpy.distutils + # Convoluted because we can't import numpy # (numpy is not yet built) genapi_py = os.path.join(os.path.dirname(__file__), 'genapi.py') spec = importlib.util.spec_from_file_location('conv_template', genapi_py) @@ -106,7 +106,7 @@ def get_annotations(): '__unused_indices__': ( [1, 4, 40, 41, 66, 67, 68, 81, 82, 83, 103, 115, 117, 122, 163, 164, 171, 173, 197, - 201, 202, 208, 219, 220, 221, 222, 223, 278, + 201, 202, 208, 219, 220, 221, 222, 278, 291, 293, 294, 295, 301] # range/slots reserved DType classes (see _public_dtype_api_table.h): + list(range(320, 361)) + [366, 367, 368] @@ -116,6 +116,8 @@ def get_annotations(): # Unused slot 41, was `PyArray_GetNumericOps`, 'PyArray_INCREF': (42,), 'PyArray_XDECREF': (43,), + # `PyArray_SetStringFunction` was stubbed out + # and should be removed in the future. 'PyArray_SetStringFunction': (44,), 'PyArray_DescrFromType': (45,), 'PyArray_TypeObjectFromType': (46,), @@ -291,8 +293,8 @@ def get_annotations(): # Unused slot 220, was `PyArray_DatetimeToDatetimeStruct` # Unused slot 221, was `PyArray_TimedeltaToTimedeltaStruct` # Unused slot 222, was `PyArray_DatetimeStructToDatetime` - # Unused slot 223, was `PyArray_TimedeltaStructToTimedelta` # NDIter API + 'NpyIter_GetTransferFlags': (223, MinVersion("2.3")), 'NpyIter_New': (224,), 'NpyIter_MultiNew': (225,), 'NpyIter_AdvancedNew': (226,), @@ -405,10 +407,12 @@ def get_annotations(): # `PyDataType_GetArrFuncs` checks for the NumPy runtime version. '_PyDataType_GetArrFuncs': (365,), # End 2.0 API + # NpyIterGetTransferFlags (slot 223) added. + # End 2.3 API } ufunc_types_api = { - 'PyUFunc_Type': (0,) + 'PyUFunc_Type': (0,), } ufunc_funcs_api = { @@ -464,6 +468,8 @@ def get_annotations(): 'PyUFunc_AddPromoter': (44, MinVersion("2.0")), 'PyUFunc_AddWrappingLoop': (45, MinVersion("2.0")), 'PyUFunc_GiveFloatingpointErrors': (46, MinVersion("2.0")), + # End 2.0 API + 'PyUFunc_AddLoopsFromSpecs': (47, MinVersion("2.4")), } # List of all the dicts which define the C API diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index a3e1965151f1..40842b1cea43 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -44,22 +44,22 @@ def add_newdoc(place, name, doc): skip = ( # gufuncs do not use the OUT_SCALAR replacement strings - 'matmul', 'vecdot', + 'matmul', 'vecdot', 'matvec', 'vecmat', # clip has 3 inputs, which is not handled by this 'clip', ) if name[0] != '_' and name not in skip: if '\nx :' in doc: - assert '$OUT_SCALAR_1' in doc, "in {}".format(name) + assert '$OUT_SCALAR_1' in doc, f"in {name}" elif '\nx2 :' in doc or '\nx1, x2 :' in doc: - assert '$OUT_SCALAR_2' in doc, "in {}".format(name) + assert '$OUT_SCALAR_2' in doc, f"in {name}" else: - assert False, "Could not detect number of inputs in {}".format(name) + assert False, f"Could not detect number of inputs in {name}" for k, v in subst.items(): doc = doc.replace('$' + k, v) - docdict['.'.join((place, name))] = doc + docdict[f'{place}.{name}'] = doc add_newdoc('numpy._core.umath', 'absolute', @@ -84,6 +84,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> x = np.array([-1.2, 1.2]) >>> np.absolute(x) array([ 1.2, 1.2]) @@ -136,6 +137,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.add(1.0, 4.0) 5.0 >>> x1 = np.arange(9.0).reshape((3, 3)) @@ -203,6 +205,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + We expect the arccos of 1 to be 0, and of -1 to be pi: >>> np.arccos([1, -1]) @@ -263,6 +267,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.arccosh([np.e, 10.0]) array([ 1.65745445, 2.99322285]) >>> np.arccosh(1) @@ -315,6 +320,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.arcsin(1) # pi/2 1.5707963267948966 >>> np.arcsin(-1) # -pi/2 @@ -326,7 +332,7 @@ def add_newdoc(place, name, doc): add_newdoc('numpy._core.umath', 'arcsinh', """ - Inverse hyperbolic sine element-wise. + Inverse hyperbolic sine, element-wise. Parameters ---------- @@ -366,6 +372,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.arcsinh(np.array([np.e, 10.0])) array([ 1.72538256, 2.99822295]) @@ -419,8 +426,10 @@ def add_newdoc(place, name, doc): Examples -------- + We expect the arctan of 0 to be 0, and of 1 to be pi/4: + >>> import numpy as np >>> np.arctan([0, 1]) array([ 0. , 0.78539816]) @@ -498,8 +507,10 @@ def add_newdoc(place, name, doc): Examples -------- + Consider four points in different quadrants: + >>> import numpy as np >>> x = np.array([-1, +1, +1, -1]) >>> y = np.array([-1, -1, +1, +1]) >>> np.arctan2(y, x) * 180 / np.pi @@ -523,7 +534,7 @@ def add_newdoc(place, name, doc): add_newdoc('numpy._core.umath', 'arctanh', """ - Inverse hyperbolic tangent element-wise. + Inverse hyperbolic tangent, element-wise. Parameters ---------- @@ -567,6 +578,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.arctanh([0, -0.5]) array([ 0. , -0.54930614]) @@ -603,6 +615,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + The number 13 is represented by ``00001101``. Likewise, 17 is represented by ``00010001``. The bit-wise AND of 13 and 17 is therefore ``000000001``, or 1: @@ -665,6 +679,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + The number 13 has the binary representation ``00001101``. Likewise, 16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is then ``00011101``, or 29: @@ -687,7 +703,7 @@ def add_newdoc(place, name, doc): array([ 6, 5, 255]) >>> np.bitwise_or(np.array([2, 5, 255, 2147483647], dtype=np.int32), ... np.array([4, 4, 4, 2147483647], dtype=np.int32)) - array([ 6, 5, 255, 2147483647]) + array([ 6, 5, 255, 2147483647], dtype=int32) >>> np.bitwise_or([True, True], [False, True]) array([ True, True]) @@ -732,6 +748,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + The number 13 is represented by ``00001101``. Likewise, 17 is represented by ``00010001``. The bit-wise XOR of 13 and 17 is therefore ``00011100``, or 28: @@ -777,7 +795,7 @@ def add_newdoc(place, name, doc): Returns ------- y : ndarray or scalar - The ceiling of each element in `x`, with `float` dtype. + The ceiling of each element in `x`. $OUT_SCALAR_1 See Also @@ -786,6 +804,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) >>> np.ceil(a) array([-1., -1., -0., 1., 2., 2., 2.]) @@ -816,12 +836,9 @@ def add_newdoc(place, name, doc): -------- ceil, floor, rint, fix - Notes - ----- - .. versionadded:: 1.3.0 - Examples -------- + >>> import numpy as np >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) >>> np.trunc(a) array([-1., -1., -0., 0., 1., 1., 2.]) @@ -856,6 +873,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.conjugate(1+2j) (1-2j) @@ -894,11 +912,12 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.cos(np.array([0, np.pi/2, np.pi])) array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00]) >>> >>> # Example of providing the optional output parameter - >>> out1 = np.array([0], dtype='d') + >>> out1 = np.array([0], dtype=np.float64) >>> out2 = np.cos([0.1], out1) >>> out2 is out1 True @@ -931,6 +950,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.cosh(0) 1.0 @@ -968,6 +988,7 @@ def add_newdoc(place, name, doc): -------- Convert a radian array to degrees + >>> import numpy as np >>> rad = np.arange(12.)*np.pi/6 >>> np.degrees(rad) array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., @@ -1003,12 +1024,11 @@ def add_newdoc(place, name, doc): Notes ----- - .. versionadded:: 1.3.0 - rad2deg(x) is ``180 * x / pi``. Examples -------- + >>> import numpy as np >>> np.rad2deg(np.pi/2) 90.0 @@ -1041,10 +1061,6 @@ def add_newdoc(place, name, doc): The output array, element-wise Heaviside step function of `x1`. $OUT_SCALAR_2 - Notes - ----- - .. versionadded:: 1.13.0 - References ---------- .. [1] Wikipedia, "Heaviside step function", @@ -1052,6 +1068,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.heaviside([-1.5, 0, 2.0], 0.5) array([ 0. , 0.5, 1. ]) >>> np.heaviside([-1.5, 0, 2.0], 1) @@ -1091,6 +1108,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.divide(2.0, 4.0) 0.5 >>> x1 = np.arange(9.0).reshape((3, 3)) @@ -1127,7 +1145,7 @@ def add_newdoc(place, name, doc): ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 See Also @@ -1136,6 +1154,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.equal([0, 1, 3], np.arange(3)) array([ True, True, False]) @@ -1201,7 +1220,10 @@ def add_newdoc(place, name, doc): -------- Plot the magnitude and phase of ``exp(x)`` in the complex plane: + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> import numpy as np >>> x = np.linspace(-2*np.pi, 2*np.pi, 100) >>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane @@ -1240,14 +1262,9 @@ def add_newdoc(place, name, doc): -------- power - Notes - ----- - .. versionadded:: 1.3.0 - - - Examples -------- + >>> import numpy as np >>> np.exp2([2, 3]) array([ 4., 8.]) @@ -1281,10 +1298,12 @@ def add_newdoc(place, name, doc): Examples -------- + The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to about 32 significant digits. This example shows the superiority of expm1 in this case. + >>> import numpy as np >>> np.expm1(1e-10) 1.00000000005e-10 >>> np.exp(1e-10) - 1 @@ -1319,6 +1338,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.fabs(-1) 1.0 >>> np.fabs([-1.2, 1.2]) @@ -1358,6 +1378,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) >>> np.floor(a) array([-2., -2., -1., 0., 1., 1., 2.]) @@ -1396,6 +1417,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.floor_divide(7,3) 2 >>> np.floor_divide([1., 2., 3., 4.], 2.5) @@ -1449,6 +1471,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.fmod([-3, -2, -1, 1, 2, 3], 2) array([-1, 0, -1, 1, 0, 1]) >>> np.remainder([-3, -2, -1, 1, 2, 3], 2) @@ -1483,7 +1506,7 @@ def add_newdoc(place, name, doc): ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 @@ -1493,6 +1516,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.greater([4,2],[2,2]) array([ True, False]) @@ -1521,7 +1545,7 @@ def add_newdoc(place, name, doc): ------- out : bool or ndarray of bool Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 See Also @@ -1530,6 +1554,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.greater_equal([4, 2, 1], [2, 2, 2]) array([ True, True, False]) @@ -1567,6 +1592,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3))) array([[ 5., 5., 5.], [ 5., 5., 5.], @@ -1589,12 +1615,13 @@ def add_newdoc(place, name, doc): the integers in the input arrays. This ufunc implements the C/Python operator ``~``. - For signed integer inputs, the two's complement is returned. In a - two's-complement system negative numbers are represented by the two's - complement of the absolute value. This is the most common method of - representing signed integers on computers [1]_. A N-bit - two's-complement system can represent every integer in the range - :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. + For signed integer inputs, the bit-wise NOT of the absolute value is + returned. In a two's-complement system, this operation effectively flips + all the bits, resulting in a representation that corresponds to the + negative of the input plus one. This is the most common method of + representing signed integers on computers [1]_. An N-bit two's-complement + system can represent every integer in the range :math:`-2^{N-1}` to + :math:`+2^{N-1}-1`. Parameters ---------- @@ -1629,12 +1656,14 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + We've seen that 13 is represented by ``00001101``. The invert or bit-wise NOT of 13 is then: >>> x = np.invert(np.array(13, dtype=np.uint8)) >>> x - 242 + np.uint8(242) >>> np.binary_repr(x, width=8) '11110010' @@ -1642,12 +1671,12 @@ def add_newdoc(place, name, doc): >>> x = np.invert(np.array(13, dtype=np.uint16)) >>> x - 65522 + np.uint16(65522) >>> np.binary_repr(x, width=16) '1111111111110010' - When using signed integer types the result is the two's complement of - the result for the unsigned type: + When using signed integer types, the result is the bit-wise NOT of + the unsigned type, interpreted as a signed integer: >>> np.invert(np.array([13], dtype=np.int8)) array([-14], dtype=int8) @@ -1705,6 +1734,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.isfinite(1) True >>> np.isfinite(0) @@ -1761,6 +1791,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.isinf(np.inf) True >>> np.isinf(np.nan) @@ -1806,6 +1837,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.isnan(np.nan) True >>> np.isnan(np.inf) @@ -1819,8 +1851,6 @@ def add_newdoc(place, name, doc): """ Test element-wise for NaT (not a time) and return result as a boolean array. - .. versionadded:: 1.13.0 - Parameters ---------- x : array_like @@ -1839,6 +1869,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.isnat(np.datetime64("NaT")) True >>> np.isnat(np.datetime64("2016-01-01")) @@ -1879,6 +1910,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.binary_repr(5) '101' >>> np.left_shift(5, 2) @@ -1925,7 +1957,7 @@ def add_newdoc(place, name, doc): ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 See Also @@ -1934,6 +1966,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.less([1, 2], [2, 2]) array([ True, False]) @@ -1961,7 +1994,7 @@ def add_newdoc(place, name, doc): ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 See Also @@ -1970,6 +2003,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.less_equal([4, 2, 1], [2, 2, 2]) array([False, True, True]) @@ -2035,6 +2069,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.log([1, np.e, np.e**2, 0]) array([ 0., 1., 2., -inf]) @@ -2089,6 +2124,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.log10([1e-15, -3.]) array([-15., nan]) @@ -2116,8 +2152,6 @@ def add_newdoc(place, name, doc): Notes ----- - .. versionadded:: 1.3.0 - Logarithm is a multivalued function: for each `x` there is an infinite number of `z` such that `2**z = x`. The convention is to return the `z` whose imaginary part lies in `(-pi, pi]`. @@ -2137,6 +2171,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> x = np.array([0, 1, 2, 2**4]) >>> np.log2(x) array([-inf, 0., 1., 4.]) @@ -2174,12 +2209,9 @@ def add_newdoc(place, name, doc): -------- logaddexp2: Logarithm of the sum of exponentiations of inputs in base 2. - Notes - ----- - .. versionadded:: 1.3.0 - Examples -------- + >>> import numpy as np >>> prob1 = np.log(1e-50) >>> prob2 = np.log(2.5e-50) >>> prob12 = np.logaddexp(prob1, prob2) @@ -2217,12 +2249,9 @@ def add_newdoc(place, name, doc): -------- logaddexp: Logarithm of the sum of exponentiations of the inputs. - Notes - ----- - .. versionadded:: 1.3.0 - Examples -------- + >>> import numpy as np >>> prob1 = np.log2(1e-50) >>> prob2 = np.log2(2.5e-50) >>> prob12 = np.logaddexp2(prob1, prob2) @@ -2282,6 +2311,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.log1p(1e-99) 1e-99 >>> np.log(1 + 1e-99) @@ -2314,6 +2344,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.logical_and(True, False) False >>> np.logical_and([True, False], [False, False]) @@ -2357,6 +2388,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.logical_not(3) False >>> np.logical_not([True, False, 0, 1]) @@ -2393,6 +2425,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.logical_or(True, False) True >>> np.logical_or([True, False], [False, False]) @@ -2436,6 +2469,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.logical_xor(True, False) True >>> np.logical_xor([True, True, False, False], [True, False, True, False]) @@ -2498,6 +2532,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.maximum([2, 3, 4], [1, 5, 2]) array([2, 5, 4]) @@ -2557,6 +2592,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.minimum([2, 3, 4], [1, 5, 2]) array([1, 3, 2]) @@ -2610,15 +2646,14 @@ def add_newdoc(place, name, doc): Notes ----- - .. versionadded:: 1.3.0 - The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither x1 nor x2 are NaNs, but it is faster and does proper broadcasting. Examples -------- + >>> import numpy as np >>> np.fmax([2, 3, 4], [1, 5, 2]) - array([ 2., 5., 4.]) + array([ 2, 5, 4]) >>> np.fmax(np.eye(2), [0.5, 2]) array([[ 1. , 2. ], @@ -2668,13 +2703,12 @@ def add_newdoc(place, name, doc): Notes ----- - .. versionadded:: 1.3.0 - The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither x1 nor x2 are NaNs, but it is faster and does proper broadcasting. Examples -------- + >>> import numpy as np >>> np.fmin([2, 3, 4], [1, 5, 2]) array([1, 3, 2]) @@ -2743,9 +2777,6 @@ def add_newdoc(place, name, doc): For other keyword-only arguments, see the :ref:`ufunc docs `. - .. versionadded:: 1.16 - Now handles ufunc kwargs - Returns ------- y : ndarray @@ -2762,14 +2793,15 @@ def add_newdoc(place, name, doc): See Also -------- - vdot : Complex-conjugating dot product. + vecdot : Complex-conjugating dot product for stacks of vectors. + matvec : Matrix-vector product for stacks of matrices and vectors. + vecmat : Vector-matrix product for stacks of vectors and matrices. tensordot : Sum products over arbitrary axes. einsum : Einstein summation convention. dot : alternative matrix product with different broadcasting rules. Notes ----- - The behavior depends on the arguments in the following way. - If both arguments are 2-D they are multiplied like conventional @@ -2778,10 +2810,10 @@ def add_newdoc(place, name, doc): matrices residing in the last two indexes and broadcast accordingly. - If the first argument is 1-D, it is promoted to a matrix by prepending a 1 to its dimensions. After matrix multiplication - the prepended 1 is removed. + the prepended 1 is removed. (For stacks of vectors, use ``vecmat``.) - If the second argument is 1-D, it is promoted to a matrix by appending a 1 to its dimensions. After matrix multiplication - the appended 1 is removed. + the appended 1 is removed. (For stacks of vectors, use ``matvec``.) ``matmul`` differs from ``dot`` in two important ways: @@ -2798,7 +2830,7 @@ def add_newdoc(place, name, doc): >>> # n is 7, k is 4, m is 3 The matmul function implements the semantics of the ``@`` operator - introduced in Python 3.5 following :pep:`465`. + defined in :pep:`465`. It uses an optimized BLAS library when possible (see `numpy.linalg`). @@ -2806,6 +2838,7 @@ def add_newdoc(place, name, doc): -------- For 2-D arrays it is the matrix product: + >>> import numpy as np >>> a = np.array([[1, 0], ... [0, 1]]) >>> b = np.array([[4, 1], @@ -2856,8 +2889,6 @@ def add_newdoc(place, name, doc): >>> x2 = np.array([2j, 3j]) >>> x1 @ x2 (-13+0j) - - .. versionadded:: 1.10.0 """) add_newdoc('numpy._core.umath', 'vecdot', @@ -2874,14 +2905,16 @@ def add_newdoc(place, name, doc): where :math:`\\overline{a_i}` denotes the complex conjugate if :math:`a_i` is complex and the identity otherwise. + .. versionadded:: 2.0.0 + Parameters ---------- x1, x2 : array_like Input arrays, scalars not allowed. out : ndarray, optional A location into which the result is stored. If provided, it must have - a shape that the broadcasted shape of `x1` and `x2` with the last axis - removed. If not provided or None, a freshly-allocated array is used. + the broadcasted shape of `x1` and `x2` with the last axis removed. + If not provided or None, a freshly-allocated array is used. **kwargs For other keyword-only arguments, see the :ref:`ufunc docs `. @@ -2903,10 +2936,15 @@ def add_newdoc(place, name, doc): See Also -------- vdot : same but flattens arguments first + matmul : Matrix-matrix product. + vecmat : Vector-matrix product. + matvec : Matrix-vector product. einsum : Einstein summation convention. Examples -------- + >>> import numpy as np + Get the projected size along a given normal for an array of vectors. >>> v = np.array([[0., 5., 0.], [0., 0., 10.], [0., 6., 8.]]) @@ -2914,7 +2952,137 @@ def add_newdoc(place, name, doc): >>> np.vecdot(v, n) array([ 3., 8., 10.]) - .. versionadded:: 2.0.0 + """) + +add_newdoc('numpy._core.umath', 'matvec', + """ + Matrix-vector dot product of two arrays. + + Given a matrix (or stack of matrices) :math:`\\mathbf{A}` in ``x1`` and + a vector (or stack of vectors) :math:`\\mathbf{v}` in ``x2``, the + matrix-vector product is defined as: + + .. math:: + \\mathbf{A} \\cdot \\mathbf{v} = \\sum_{j=0}^{n-1} A_{ij} v_j + + where the sum is over the last dimensions in ``x1`` and ``x2`` + (unless ``axes`` is specified). (For a matrix-vector product with the + vector conjugated, use ``np.vecmat(x2, x1.mT)``.) + + .. versionadded:: 2.2.0 + + Parameters + ---------- + x1, x2 : array_like + Input arrays, scalars not allowed. + out : ndarray, optional + A location into which the result is stored. If provided, it must have + the broadcasted shape of ``x1`` and ``x2`` with the summation axis + removed. If not provided or None, a freshly-allocated array is used. + **kwargs + For other keyword-only arguments, see the + :ref:`ufunc docs `. + + Returns + ------- + y : ndarray + The matrix-vector product of the inputs. + + Raises + ------ + ValueError + If the last dimensions of ``x1`` and ``x2`` are not the same size. + + If a scalar value is passed in. + + See Also + -------- + vecdot : Vector-vector product. + vecmat : Vector-matrix product. + matmul : Matrix-matrix product. + einsum : Einstein summation convention. + + Examples + -------- + Rotate a set of vectors from Y to X along Z. + + >>> a = np.array([[0., 1., 0.], + ... [-1., 0., 0.], + ... [0., 0., 1.]]) + >>> v = np.array([[1., 0., 0.], + ... [0., 1., 0.], + ... [0., 0., 1.], + ... [0., 6., 8.]]) + >>> np.matvec(a, v) + array([[ 0., -1., 0.], + [ 1., 0., 0.], + [ 0., 0., 1.], + [ 6., 0., 8.]]) + + """) + +add_newdoc('numpy._core.umath', 'vecmat', + """ + Vector-matrix dot product of two arrays. + + Given a vector (or stack of vector) :math:`\\mathbf{v}` in ``x1`` and + a matrix (or stack of matrices) :math:`\\mathbf{A}` in ``x2``, the + vector-matrix product is defined as: + + .. math:: + \\mathbf{v} \\cdot \\mathbf{A} = \\sum_{i=0}^{n-1} \\overline{v_i}A_{ij} + + where the sum is over the last dimension of ``x1`` and the one-but-last + dimensions in ``x2`` (unless `axes` is specified) and where + :math:`\\overline{v_i}` denotes the complex conjugate if :math:`v` + is complex and the identity otherwise. (For a non-conjugated vector-matrix + product, use ``np.matvec(x2.mT, x1)``.) + + .. versionadded:: 2.2.0 + + Parameters + ---------- + x1, x2 : array_like + Input arrays, scalars not allowed. + out : ndarray, optional + A location into which the result is stored. If provided, it must have + the broadcasted shape of ``x1`` and ``x2`` with the summation axis + removed. If not provided or None, a freshly-allocated array is used. + **kwargs + For other keyword-only arguments, see the + :ref:`ufunc docs `. + + Returns + ------- + y : ndarray + The vector-matrix product of the inputs. + + Raises + ------ + ValueError + If the last dimensions of ``x1`` and the one-but-last dimension of + ``x2`` are not the same size. + + If a scalar value is passed in. + + See Also + -------- + vecdot : Vector-vector product. + matvec : Matrix-vector product. + matmul : Matrix-matrix product. + einsum : Einstein summation convention. + + Examples + -------- + Project a vector along X and Y. + + >>> v = np.array([0., 4., 2.]) + >>> a = np.array([[1., 0., 0.], + ... [0., 1., 0.], + ... [0., 0., 0.]]) + >>> np.vecmat(v, a) + array([ 0., 4., 0.]) + """) add_newdoc('numpy._core.umath', 'modf', @@ -2950,6 +3118,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.modf([0, 3.5]) (array([ 0. , 0.5]), array([ 0., 3.])) >>> np.modf(-0.5) @@ -2980,6 +3149,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.multiply(2.0, 4.0) 8.0 @@ -3004,7 +3174,7 @@ def add_newdoc(place, name, doc): add_newdoc('numpy._core.umath', 'negative', """ - Numerical negative, element-wise. + Numerical negation, element-wise. Parameters ---------- @@ -3020,6 +3190,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.negative([1.,-1.]) array([-1., 1.]) @@ -3036,8 +3207,6 @@ def add_newdoc(place, name, doc): """ Numerical positive, element-wise. - .. versionadded:: 1.13.0 - Parameters ---------- x : array_like or scalar @@ -3056,6 +3225,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> x1 = np.array(([1., -1.])) >>> np.positive(x1) @@ -3085,7 +3255,7 @@ def add_newdoc(place, name, doc): ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 See Also @@ -3094,6 +3264,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.not_equal([1.,2.], [1., 3.]) array([False, True]) >>> np.not_equal([1, 2], [[1, 3],[1, 4]]) @@ -3158,6 +3329,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + Cube each element in an array. >>> x1 = np.arange(6) @@ -3200,9 +3373,9 @@ def add_newdoc(place, name, doc): >>> p array([nan, nan]) - To get complex results, give the argument ``dtype=complex``. + To get complex results, give the argument ``dtype=np.complex128``. - >>> np.power(x3, 1.5, dtype=complex) + >>> np.power(x3, 1.5, dtype=np.complex128) array([-1.83697020e-16-1.j, -1.46957616e-15-8.j]) """) @@ -3222,8 +3395,6 @@ def add_newdoc(place, name, doc): To get complex results, cast the input to complex, or specify the ``dtype`` to be ``complex`` (see the example below). - .. versionadded:: 1.12.0 - Parameters ---------- x1 : array_like @@ -3245,6 +3416,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + Cube each element in a list. >>> x1 = range(6) @@ -3279,9 +3452,9 @@ def add_newdoc(place, name, doc): >>> p array([nan, nan]) - To get complex results, give the argument ``dtype=complex``. + To get complex results, give the argument ``dtype=np.complex128``. - >>> np.float_power(x3, 1.5, dtype=complex) + >>> np.float_power(x3, 1.5, dtype=np.complex128) array([-1.83697020e-16-1.j, -1.46957616e-15-8.j]) """) @@ -3308,6 +3481,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + Convert a degree array to radians >>> deg = np.arange(12.) * 30. @@ -3346,12 +3521,11 @@ def add_newdoc(place, name, doc): Notes ----- - .. versionadded:: 1.3.0 - ``deg2rad(x)`` is ``x * pi / 180``. Examples -------- + >>> import numpy as np >>> np.deg2rad(180) 3.1415926535897931 @@ -3386,6 +3560,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.reciprocal(2.) 0.5 >>> np.reciprocal([1, 2., 3.33]) @@ -3406,8 +3581,8 @@ def add_newdoc(place, name, doc): This should not be confused with: - * Python 3.7's `math.remainder` and C's ``remainder``, which - computes the IEEE remainder, which are the complement to + * Python's `math.remainder` and C's ``remainder``, which + compute the IEEE remainder, which are the complement to ``round(x1 / x2)``. * The MATLAB ``rem`` function and or the C ``%`` operator which is the complement to ``int(x1 / x2)``. @@ -3442,6 +3617,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.remainder([4, 7], [2, 3]) array([0, 1]) >>> np.remainder(np.arange(7), 5) @@ -3460,8 +3636,6 @@ def add_newdoc(place, name, doc): """ Return element-wise quotient and remainder simultaneously. - .. versionadded:: 1.13.0 - ``np.divmod(x, y)`` is equivalent to ``(x // y, x % y)``, but faster because it avoids redundant work. It is used to implement the Python built-in function ``divmod`` on NumPy arrays. @@ -3493,6 +3667,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.divmod(np.arange(5), 3) (array([0, 0, 0, 1, 1]), array([0, 1, 2, 0, 1])) @@ -3536,6 +3711,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.binary_repr(10) '1010' >>> np.right_shift(10, 1) @@ -3584,6 +3760,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) >>> np.rint(a) array([-2., -2., -0., 0., 2., 2., 2.]) @@ -3615,14 +3792,22 @@ def add_newdoc(place, name, doc): The sign of `x`. $OUT_SCALAR_1 + See Also + -------- + signbit + copysign + Notes ----- There is more than one definition of sign in common use for complex - numbers. The definition used here is equivalent to :math:`x/\\sqrt{x*x}` - which is different from a common alternative, :math:`x/|x|`. + numbers. The definition used here, :math:`x/|x|`, is the more common + and useful one, but is different from the one used in numpy prior to + version 2.0, :math:`x/\\sqrt{x*x}`, which is equivalent to + ``sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j``. Examples -------- + >>> import numpy as np >>> np.sign([-5., 4.5]) array([-1., 1.]) >>> np.sign(0) @@ -3648,8 +3833,14 @@ def add_newdoc(place, name, doc): Output array, or reference to `out` if that was supplied. $OUT_SCALAR_1 + See Also + -------- + sign + copysign + Examples -------- + >>> import numpy as np >>> np.signbit(-1.2) True >>> np.signbit(np.array([1, -2.3, 2.1])) @@ -3678,8 +3869,14 @@ def add_newdoc(place, name, doc): The values of `x1` with the sign of `x2`. $OUT_SCALAR_2 + See Also + -------- + sign + signbit + Examples -------- + >>> import numpy as np >>> np.copysign(1.3, -1) -1.3 >>> 1/np.copysign(0, 1) @@ -3715,6 +3912,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> eps = np.finfo(np.float64).eps >>> np.nextafter(1, 2) == eps + 1 True @@ -3750,6 +3948,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.spacing(1) == np.finfo(np.float64).eps True @@ -3791,6 +3990,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + Print sine of one angle: >>> np.sin(np.pi/2.) @@ -3844,6 +4045,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.sinh(0) 0.0 >>> np.sinh(np.pi*1j/2) @@ -3853,7 +4055,7 @@ def add_newdoc(place, name, doc): >>> # Discrepancy due to vagaries of floating point arithmetic. >>> # Example of providing the optional output parameter - >>> out1 = np.array([0], dtype='d') + >>> out1 = np.array([0], dtype=np.float64) >>> out2 = np.sinh([0.1], out1) >>> out2 is out1 True @@ -3902,6 +4104,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.sqrt([1,4,9]) array([ 1., 2., 3.]) @@ -3917,8 +4120,6 @@ def add_newdoc(place, name, doc): """ Return the cube-root of an array, element-wise. - .. versionadded:: 1.10.0 - Parameters ---------- x : array_like @@ -3936,6 +4137,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.cbrt([1,8,27]) array([ 1., 2., 3.]) @@ -3965,6 +4167,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.square([-1j, 1]) array([-1.-0.j, 1.+0.j]) @@ -3993,6 +4196,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.subtract(1.0, 4.0) -3.0 @@ -4045,13 +4249,14 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> from math import pi >>> np.tan(np.array([-pi,pi/2,pi])) array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16]) >>> >>> # Example of providing the optional output parameter illustrating >>> # that what is returned is a reference to said parameter - >>> out1 = np.array([0], dtype='d') + >>> out1 = np.array([0], dtype=np.float64) >>> out2 = np.cos([0.1], out1) >>> out2 is out1 True @@ -4066,7 +4271,7 @@ def add_newdoc(place, name, doc): add_newdoc('numpy._core.umath', 'tanh', """ - Compute hyperbolic tangent element-wise. + Hyperbolic tangent, element-wise. Equivalent to ``np.sinh(x)/np.cosh(x)`` or ``-1j * np.tan(1j*x)``. @@ -4098,12 +4303,13 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.tanh((0, np.pi*1j, np.pi*1j/2)) array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j]) >>> # Example of providing the optional output parameter illustrating >>> # that what is returned is a reference to said parameter - >>> out1 = np.array([0], dtype='d') + >>> out1 = np.array([0], dtype=np.float64) >>> out2 = np.tanh([0.1], out1) >>> out2 is out1 True @@ -4153,13 +4359,14 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> x = np.arange(9) >>> y1, y2 = np.frexp(x) >>> y1 array([ 0. , 0.5 , 0.5 , 0.75 , 0.5 , 0.625, 0.75 , 0.875, 0.5 ]) >>> y2 - array([0, 1, 2, 2, 3, 3, 3, 3, 4]) + array([0, 1, 2, 2, 3, 3, 3, 3, 4], dtype=int32) >>> y1 * 2**y2 array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.]) @@ -4200,6 +4407,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.ldexp(5, np.arange(4)) array([ 5., 10., 20., 40.], dtype=float16) @@ -4231,6 +4439,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.gcd(12, 20) 4 >>> np.gcd.reduce([15, 25, 35]) @@ -4262,6 +4471,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.lcm(12, 20) 60 >>> np.lcm.reduce([3, 12, 20]) @@ -4302,8 +4512,9 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.bitwise_count(1023) - 10 + np.uint8(10) >>> a = np.array([2**i - 1 for i in range(16)]) >>> np.bitwise_count(a) array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], @@ -4334,6 +4545,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> a = np.array(['Grace Hopper Conference', 'Open Source Day']) >>> np.strings.str_len(a) array([23, 15]) @@ -4373,6 +4585,17 @@ def add_newdoc(place, name, doc): -------- str.isalpha + Examples + -------- + >>> import numpy as np + >>> a = np.array(['a', 'b', '0']) + >>> np.strings.isalpha(a) + array([ True, True, False]) + + >>> a = np.array([['a', 'b', '0'], ['c', '1', '2']]) + >>> np.strings.isalpha(a) + array([[ True, True, False], [ True, False, False]]) + """) add_newdoc('numpy._core.umath', 'isdigit', @@ -4403,6 +4626,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> a = np.array(['a', 'b', '0']) >>> np.strings.isdigit(a) array([False, False, True]) @@ -4439,6 +4663,15 @@ def add_newdoc(place, name, doc): -------- str.isspace + Examples + -------- + >>> np.char.isspace(list("a b c")) + array([False, True, False, True, False]) + >>> np.char.isspace(b'\x0a \x0b \x0c') + np.True_ + >>> np.char.isspace(b'\x0a \x0b \x0c N') + np.False_ + """) add_newdoc('numpy._core.umath', 'isalnum', @@ -4463,10 +4696,11 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> a = np.array(['a', '1', 'a1', '(', '']) >>> np.strings.isalnum(a) array([ True, True, True, False, False]) - + """) add_newdoc('numpy._core.umath', 'islower', @@ -4492,6 +4726,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.strings.islower("GHC") array(False) >>> np.strings.islower("ghc") @@ -4522,11 +4757,12 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.strings.isupper("GHC") - array(True) + array(True) >>> a = np.array(["hello", "HELLO", "Hello"]) >>> np.strings.isupper(a) - array([False, True, False]) + array([False, True, False]) """) @@ -4552,12 +4788,13 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.strings.istitle("Numpy Is Great") array(True) >>> np.strings.istitle("Numpy is great") array(False) - + """) add_newdoc('numpy._core.umath', 'isdecimal', @@ -4586,6 +4823,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.strings.isdecimal(['12345', '4.99', '123ABC', '']) array([ True, False, False, False]) @@ -4617,6 +4855,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.strings.isnumeric(['123', '123abc', '9.0', '1/4', 'VIII']) array([ True, False, False, False, False]) @@ -4653,6 +4892,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> a = np.array(["NumPy is a Python library"]) >>> np.strings.find(a, "Python", 0, None) array([11]) @@ -4721,6 +4961,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> c array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> import numpy as np >>> a = np.array(["Computer Science"]) >>> np.strings.index(a, "Science") array([9]) @@ -4793,6 +5035,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> a = np.array(["Computer Science"]) >>> np.strings.rindex(a, "Science") array([9]) @@ -4866,6 +5109,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> s = np.array(['foo', 'bar']) >>> s array(['foo', 'bar'], dtype='>> import numpy as np >>> c = np.array(['a1b2','1b2a','b2a1','2a1b']); c array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='>> np.strings.center(c, width=9) @@ -4953,6 +5198,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.ljust(c, width=3) array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> import numpy as np >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.rjust(a, width=3) array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> import numpy as np >>> np.strings.zfill(['1', '-1', '+1'], 3) array(['001', '-01', '+01'], dtype='>> import numpy as np + The ufunc is used most easily via ``np.strings.partition``, which calls it after calculating the indices:: @@ -5114,6 +5364,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + The ufunc is used most easily via ``np.strings.rpartition``, which calls it after calculating the indices:: @@ -5156,6 +5408,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + The ufunc is used most easily via ``np.strings.partition``, which calls it under the hood:: @@ -5199,6 +5453,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + The ufunc is used most easily via ``np.strings.rpartition``, which calls it after calculating the indices:: @@ -5209,3 +5465,42 @@ def add_newdoc(place, name, doc): array(['', ' ', 'Bba'], dtype=StringDType())) """) + +add_newdoc('numpy._core.umath', '_slice', + """ + Slice the strings in `a` by slices specified by `start`, `stop`, `step`. + Like in the regular Python `slice` object, if only `start` is + specified then it is interpreted as the `stop`. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + Input array + + start : array-like, with integer dtype + The start of the slice, broadcasted to `a`'s shape + + stop : array-like, with integer dtype + The end of the slice, broadcasted to `a`'s shape + + step : array-like, with integer dtype + The step for the slice, broadcasted to `a`'s shape + + Returns + ------- + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input type + + Examples + -------- + >>> import numpy as np + + The ufunc is used most easily via ``np.strings.slice``, + which calls it under the hood:: + + >>> a = np.array(['hello', 'world']) + >>> np.strings.slice(a, 2) + array(['he', 'wo'], dtype='>> import numpy as np >>> y = "aa " >>> x = "aa" >>> np.char.equal(x, y) - array(True) + array(True) See Also -------- @@ -115,10 +117,11 @@ def not_equal(x1, x2): Examples -------- + >>> import numpy as np >>> x1 = np.array(['a', 'b', 'c']) >>> np.char.not_equal(x1, 'b') array([ True, False, True]) - + """ return compare_chararrays(x1, x2, '!=', True) @@ -149,10 +152,11 @@ def greater_equal(x1, x2): Examples -------- + >>> import numpy as np >>> x1 = np.array(['a', 'b', 'c']) >>> np.char.greater_equal(x1, 'b') array([False, True, True]) - + """ return compare_chararrays(x1, x2, '>=', True) @@ -182,10 +186,11 @@ def less_equal(x1, x2): Examples -------- + >>> import numpy as np >>> x1 = np.array(['a', 'b', 'c']) >>> np.char.less_equal(x1, 'b') array([ True, True, False]) - + """ return compare_chararrays(x1, x2, '<=', True) @@ -212,13 +217,14 @@ def greater(x1, x2): See Also -------- equal, not_equal, greater_equal, less_equal, less - + Examples -------- + >>> import numpy as np >>> x1 = np.array(['a', 'b', 'c']) >>> np.char.greater(x1, 'b') array([False, False, True]) - + """ return compare_chararrays(x1, x2, '>', True) @@ -248,14 +254,16 @@ def less(x1, x2): Examples -------- + >>> import numpy as np >>> x1 = np.array(['a', 'b', 'c']) >>> np.char.less(x1, 'b') array([True, False, False]) - + """ return compare_chararrays(x1, x2, '<', True) +@set_module("numpy.char") def multiply(a, i): """ Return (a * i), that is string multiple concatenation, @@ -266,7 +274,7 @@ def multiply(a, i): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array_like, with ``bytes_`` or ``str_`` dtype i : array_like, with any integer dtype @@ -283,6 +291,7 @@ def multiply(a, i): Examples -------- + >>> import numpy as np >>> a = np.array(["a", "b", "c"]) >>> np.strings.multiply(a, 3) array(['aaa', 'bbb', 'ccc'], dtype='>> import numpy as np >>> x = np.array(["Numpy is nice!"]) >>> np.char.partition(x, " ") array([['Numpy', ' ', 'is nice!']], dtype='>> import numpy as np >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.char.rpartition(a, 'A') array([['aAaAa', 'A', ''], @@ -396,6 +409,10 @@ class chararray(ndarray): Provides a convenient view on arrays of string and unicode values. + .. deprecated:: 2.5 + ``chararray`` is deprecated. Use an ``ndarray`` with a string or + bytes dtype instead. + .. note:: The `chararray` class exists for backwards compatibility with Numarray, it is not recommended for new development. Starting from numpy @@ -483,7 +500,6 @@ class adds the following functionality: title tofile tolist - tostring translate transpose upper @@ -515,6 +531,7 @@ class adds the following functionality: Examples -------- + >>> import numpy as np >>> charar = np.char.chararray((3, 3)) >>> charar[:] = 'a' >>> charar @@ -530,7 +547,7 @@ class adds the following functionality: [b'abc', b'abc', b'abc']], dtype='|S5') """ - def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None, + def __new__(cls, shape, itemsize=1, unicode=False, buffer=None, offset=0, strides=None, order='C'): if unicode: dtype = str_ @@ -550,10 +567,10 @@ def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None, filler = None if buffer is None: - self = ndarray.__new__(subtype, shape, (dtype, itemsize), + self = ndarray.__new__(cls, shape, (dtype, itemsize), order=order) else: - self = ndarray.__new__(subtype, shape, (dtype, itemsize), + self = ndarray.__new__(cls, shape, (dtype, itemsize), buffer=buffer, offset=offset, strides=strides, order=order) @@ -705,7 +722,7 @@ def __mod__(self, i): def __rmod__(self, other): return NotImplemented - def argsort(self, axis=-1, kind=None, order=None): + def argsort(self, axis=-1, kind=None, order=None, *, stable=None): """ Return the indices that sort the array lexicographically. @@ -723,7 +740,7 @@ def argsort(self, axis=-1, kind=None, order=None): dtype='|S5') """ - return self.__array__().argsort(axis, kind, order) + return self.__array__().argsort(axis, kind, order, stable=stable) argsort.__doc__ = ndarray.argsort.__doc__ def capitalize(self): @@ -1202,6 +1219,10 @@ def array(obj, itemsize=None, copy=True, unicode=None, order=None): """ Create a `~numpy.char.chararray`. + .. deprecated:: 2.5 + ``chararray`` is deprecated. Use an ``ndarray`` with a string or + bytes dtype instead. + .. note:: This class is provided for numarray backward-compatibility. New code (not concerned with numarray compatibility) should use @@ -1259,6 +1280,15 @@ class adds the following functionality: fastest). If order is 'A', then the returned array may be in any order (either C-, Fortran-contiguous, or even discontiguous). + + Examples + -------- + + >>> import numpy as np + >>> char_array = np.char.array(['hello', 'world', 'numpy','array']) + >>> char_array + chararray(['hello', 'world', 'numpy', 'array'], dtype='>> import numpy as np >>> np.char.asarray(['hello', 'world']) chararray(['hello', 'world'], dtype=' _CharArray[bytes_]: ... @overload def __new__( - subtype, + cls, shape: _ShapeLike, - itemsize: SupportsIndex | SupportsInt = ..., - unicode: L[False] = ..., - buffer: _SupportsBuffer = ..., - offset: SupportsIndex = ..., - strides: _ShapeLike = ..., - order: _OrderKACF = ..., - ) -> chararray[Any, dtype[bytes_]]: ... + itemsize: SupportsIndex | SupportsInt, + unicode: L[True], + buffer: Buffer | np.ndarray | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + order: _OrderKACF = "C", + ) -> _CharArray[str_]: ... @overload def __new__( - subtype, + cls, shape: _ShapeLike, - itemsize: SupportsIndex | SupportsInt = ..., - unicode: L[True] = ..., - buffer: _SupportsBuffer = ..., - offset: SupportsIndex = ..., - strides: _ShapeLike = ..., - order: _OrderKACF = ..., - ) -> chararray[Any, dtype[str_]]: ... + itemsize: SupportsIndex | SupportsInt = 1, + *, + unicode: L[True], + buffer: Buffer | np.ndarray | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + order: _OrderKACF = "C", + ) -> _CharArray[str_]: ... + # def __array_finalize__(self, obj: object) -> None: ... - def __mul__(self, other: i_co) -> chararray[Any, _CharDType]: ... - def __rmul__(self, other: i_co) -> chararray[Any, _CharDType]: ... - def __mod__(self, i: Any) -> chararray[Any, _CharDType]: ... + # + @overload # type: ignore[override] + def __eq__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... @overload - def __eq__( - self: _CharArray[str_], - other: U_co, - ) -> NDArray[np.bool]: ... + def __eq__(self: _CharArray[bytes_], other: S_co, /) -> NDArray[np.bool]: ... + + # + @overload # type: ignore[override] + def __ne__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... @overload - def __eq__( - self: _CharArray[bytes_], - other: S_co, - ) -> NDArray[np.bool]: ... + def __ne__(self: _CharArray[bytes_], other: S_co, /) -> NDArray[np.bool]: ... + # + @overload # type: ignore[override] + def __ge__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... # pyrefly: ignore[bad-override] @overload - def __ne__( - self: _CharArray[str_], - other: U_co, - ) -> NDArray[np.bool]: ... + def __ge__(self: _CharArray[bytes_], other: S_co, /) -> NDArray[np.bool]: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # + @overload # type: ignore[override] + def __le__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... # pyrefly: ignore[bad-override] @overload - def __ne__( - self: _CharArray[bytes_], - other: S_co, - ) -> NDArray[np.bool]: ... + def __le__(self: _CharArray[bytes_], other: S_co, /) -> NDArray[np.bool]: ... # pyright: ignore[reportIncompatibleMethodOverride] + # + @overload # type: ignore[override] + def __gt__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... # pyrefly: ignore[bad-override] @overload - def __ge__( - self: _CharArray[str_], - other: U_co, - ) -> NDArray[np.bool]: ... + def __gt__(self: _CharArray[bytes_], other: S_co, /) -> NDArray[np.bool]: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # + @overload # type: ignore[override] + def __lt__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... # pyrefly: ignore[bad-override] @overload - def __ge__( - self: _CharArray[bytes_], - other: S_co, - ) -> NDArray[np.bool]: ... + def __lt__(self: _CharArray[bytes_], other: S_co, /) -> NDArray[np.bool]: ... # pyright: ignore[reportIncompatibleMethodOverride] + # + @overload # type: ignore[override] + def __add__(self: _CharArray[str_], other: U_co, /) -> _CharArray[str_]: ... # pyrefly: ignore[bad-override] @overload - def __le__( - self: _CharArray[str_], - other: U_co, - ) -> NDArray[np.bool]: ... + def __add__(self: _CharArray[bytes_], other: S_co, /) -> _CharArray[bytes_]: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # + @overload # type: ignore[override] + def __radd__(self: _CharArray[str_], other: U_co, /) -> _CharArray[str_]: ... # pyrefly: ignore[bad-override] @overload - def __le__( - self: _CharArray[bytes_], - other: S_co, - ) -> NDArray[np.bool]: ... + def __radd__(self: _CharArray[bytes_], other: S_co, /) -> _CharArray[bytes_]: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # + def __mul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] + def __rmul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] + + # + def __mod__(self, i: Any) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] + # + def decode(self: _CharArray[bytes_], encoding: str | None = None, errors: str | None = None) -> _CharArray[str_]: ... + def encode(self: _CharArray[str_], encoding: str | None = None, errors: str | None = None) -> _CharArray[bytes_]: ... + + # @overload - def __gt__( - self: _CharArray[str_], - other: U_co, - ) -> NDArray[np.bool]: ... + def center(self: _CharArray[str_], width: i_co, fillchar: U_co = " ") -> _CharArray[str_]: ... @overload - def __gt__( - self: _CharArray[bytes_], - other: S_co, - ) -> NDArray[np.bool]: ... + def center(self: _CharArray[bytes_], width: i_co, fillchar: str | S_co = " ") -> _CharArray[bytes_]: ... + # @overload - def __lt__( - self: _CharArray[str_], - other: U_co, - ) -> NDArray[np.bool]: ... + def count(self: _CharArray[str_], sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... @overload - def __lt__( - self: _CharArray[bytes_], - other: S_co, - ) -> NDArray[np.bool]: ... + def count(self: _CharArray[bytes_], sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... + # @overload - def __add__( - self: _CharArray[str_], - other: U_co, - ) -> _CharArray[str_]: ... + def endswith(self: _CharArray[str_], suffix: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... @overload - def __add__( - self: _CharArray[bytes_], - other: S_co, - ) -> _CharArray[bytes_]: ... + def endswith(self: _CharArray[bytes_], suffix: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... + # + def expandtabs(self, tabsize: i_co = 8) -> Self: ... + + # @overload - def __radd__( - self: _CharArray[str_], - other: U_co, - ) -> _CharArray[str_]: ... + def find(self: _CharArray[str_], sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... @overload - def __radd__( - self: _CharArray[bytes_], - other: S_co, - ) -> _CharArray[bytes_]: ... + def find(self: _CharArray[bytes_], sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... + # @overload - def center( - self: _CharArray[str_], - width: i_co, - fillchar: U_co = ..., - ) -> _CharArray[str_]: ... + def index(self: _CharArray[str_], sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... @overload - def center( - self: _CharArray[bytes_], - width: i_co, - fillchar: S_co = ..., - ) -> _CharArray[bytes_]: ... + def index(self: _CharArray[bytes_], sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... + # @overload - def count( - self: _CharArray[str_], - sub: U_co, - start: i_co = ..., - end: None | i_co = ..., - ) -> NDArray[int_]: ... - @overload - def count( - self: _CharArray[bytes_], - sub: S_co, - start: i_co = ..., - end: None | i_co = ..., - ) -> NDArray[int_]: ... - - def decode( - self: _CharArray[bytes_], - encoding: None | str = ..., - errors: None | str = ..., - ) -> _CharArray[str_]: ... - - def encode( - self: _CharArray[str_], - encoding: None | str = ..., - errors: None | str = ..., - ) -> _CharArray[bytes_]: ... + def join(self: _CharArray[str_], seq: U_co) -> _CharArray[str_]: ... + @overload + def join(self: _CharArray[bytes_], seq: S_co) -> _CharArray[bytes_]: ... + # @overload - def endswith( - self: _CharArray[str_], - suffix: U_co, - start: i_co = ..., - end: None | i_co = ..., - ) -> NDArray[np.bool]: ... - @overload - def endswith( - self: _CharArray[bytes_], - suffix: S_co, - start: i_co = ..., - end: None | i_co = ..., - ) -> NDArray[np.bool]: ... - - def expandtabs( - self, - tabsize: i_co = ..., - ) -> chararray[Any, _CharDType]: ... - - @overload - def find( - self: _CharArray[str_], - sub: U_co, - start: i_co = ..., - end: None | i_co = ..., - ) -> NDArray[int_]: ... - @overload - def find( - self: _CharArray[bytes_], - sub: S_co, - start: i_co = ..., - end: None | i_co = ..., - ) -> NDArray[int_]: ... - - @overload - def index( - self: _CharArray[str_], - sub: U_co, - start: i_co = ..., - end: None | i_co = ..., - ) -> NDArray[int_]: ... - @overload - def index( - self: _CharArray[bytes_], - sub: S_co, - start: i_co = ..., - end: None | i_co = ..., - ) -> NDArray[int_]: ... - - @overload - def join( - self: _CharArray[str_], - seq: U_co, - ) -> _CharArray[str_]: ... + def ljust(self: _CharArray[str_], width: i_co, fillchar: U_co = " ") -> _CharArray[str_]: ... @overload - def join( - self: _CharArray[bytes_], - seq: S_co, - ) -> _CharArray[bytes_]: ... + def ljust(self: _CharArray[bytes_], width: i_co, fillchar: str | S_co = " ") -> _CharArray[bytes_]: ... + # @overload - def ljust( - self: _CharArray[str_], - width: i_co, - fillchar: U_co = ..., - ) -> _CharArray[str_]: ... + def lstrip(self: _CharArray[str_], chars: U_co | None = None) -> _CharArray[str_]: ... @overload - def ljust( - self: _CharArray[bytes_], - width: i_co, - fillchar: S_co = ..., - ) -> _CharArray[bytes_]: ... + def lstrip(self: _CharArray[bytes_], chars: S_co | None = None) -> _CharArray[bytes_]: ... + # + @overload # type: ignore[override] + def partition(self: _CharArray[str_], sep: U_co) -> _CharArray[str_]: ... # pyrefly: ignore[bad-override] @overload - def lstrip( - self: _CharArray[str_], - chars: None | U_co = ..., - ) -> _CharArray[str_]: ... + def partition(self: _CharArray[bytes_], sep: S_co) -> _CharArray[bytes_]: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # @overload - def lstrip( - self: _CharArray[bytes_], - chars: None | S_co = ..., - ) -> _CharArray[bytes_]: ... + def replace(self: _CharArray[str_], old: U_co, new: U_co, count: i_co | None = None) -> _CharArray[str_]: ... + @overload + def replace(self: _CharArray[bytes_], old: S_co, new: S_co, count: i_co | None = None) -> _CharArray[bytes_]: ... + # @overload - def partition( - self: _CharArray[str_], - sep: U_co, - ) -> _CharArray[str_]: ... + def rfind(self: _CharArray[str_], sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... @overload - def partition( - self: _CharArray[bytes_], - sep: S_co, - ) -> _CharArray[bytes_]: ... + def rfind(self: _CharArray[bytes_], sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... + # @overload - def replace( - self: _CharArray[str_], - old: U_co, - new: U_co, - count: None | i_co = ..., - ) -> _CharArray[str_]: ... + def rindex(self: _CharArray[str_], sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... @overload - def replace( - self: _CharArray[bytes_], - old: S_co, - new: S_co, - count: None | i_co = ..., - ) -> _CharArray[bytes_]: ... + def rindex(self: _CharArray[bytes_], sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... + # @overload - def rfind( - self: _CharArray[str_], - sub: U_co, - start: i_co = ..., - end: None | i_co = ..., - ) -> NDArray[int_]: ... - @overload - def rfind( - self: _CharArray[bytes_], - sub: S_co, - start: i_co = ..., - end: None | i_co = ..., - ) -> NDArray[int_]: ... - - @overload - def rindex( - self: _CharArray[str_], - sub: U_co, - start: i_co = ..., - end: None | i_co = ..., - ) -> NDArray[int_]: ... - @overload - def rindex( - self: _CharArray[bytes_], - sub: S_co, - start: i_co = ..., - end: None | i_co = ..., - ) -> NDArray[int_]: ... - - @overload - def rjust( - self: _CharArray[str_], - width: i_co, - fillchar: U_co = ..., - ) -> _CharArray[str_]: ... + def rjust(self: _CharArray[str_], width: i_co, fillchar: U_co = " ") -> _CharArray[str_]: ... @overload - def rjust( - self: _CharArray[bytes_], - width: i_co, - fillchar: S_co = ..., - ) -> _CharArray[bytes_]: ... + def rjust(self: _CharArray[bytes_], width: i_co, fillchar: str | S_co = " ") -> _CharArray[bytes_]: ... + # @overload - def rpartition( - self: _CharArray[str_], - sep: U_co, - ) -> _CharArray[str_]: ... + def rpartition(self: _CharArray[str_], sep: U_co) -> _CharArray[str_]: ... @overload - def rpartition( - self: _CharArray[bytes_], - sep: S_co, - ) -> _CharArray[bytes_]: ... + def rpartition(self: _CharArray[bytes_], sep: S_co) -> _CharArray[bytes_]: ... + # @overload - def rsplit( - self: _CharArray[str_], - sep: None | U_co = ..., - maxsplit: None | i_co = ..., - ) -> NDArray[object_]: ... + def rsplit(self: _CharArray[str_], sep: U_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... @overload - def rsplit( - self: _CharArray[bytes_], - sep: None | S_co = ..., - maxsplit: None | i_co = ..., - ) -> NDArray[object_]: ... + def rsplit(self: _CharArray[bytes_], sep: S_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... + # @overload - def rstrip( - self: _CharArray[str_], - chars: None | U_co = ..., - ) -> _CharArray[str_]: ... + def rstrip(self: _CharArray[str_], chars: U_co | None = None) -> _CharArray[str_]: ... @overload - def rstrip( - self: _CharArray[bytes_], - chars: None | S_co = ..., - ) -> _CharArray[bytes_]: ... + def rstrip(self: _CharArray[bytes_], chars: S_co | None = None) -> _CharArray[bytes_]: ... + # @overload - def split( - self: _CharArray[str_], - sep: None | U_co = ..., - maxsplit: None | i_co = ..., - ) -> NDArray[object_]: ... + def split(self: _CharArray[str_], sep: U_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... @overload - def split( - self: _CharArray[bytes_], - sep: None | S_co = ..., - maxsplit: None | i_co = ..., - ) -> NDArray[object_]: ... + def split(self: _CharArray[bytes_], sep: S_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... - def splitlines(self, keepends: None | b_co = ...) -> NDArray[object_]: ... + # + def splitlines(self, keepends: b_co | None = None) -> NDArray[object_]: ... + # @overload - def startswith( - self: _CharArray[str_], - prefix: U_co, - start: i_co = ..., - end: None | i_co = ..., - ) -> NDArray[np.bool]: ... + def startswith(self: _CharArray[str_], prefix: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... @overload - def startswith( - self: _CharArray[bytes_], - prefix: S_co, - start: i_co = ..., - end: None | i_co = ..., - ) -> NDArray[np.bool]: ... + def startswith(self: _CharArray[bytes_], prefix: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... + # @overload - def strip( - self: _CharArray[str_], - chars: None | U_co = ..., - ) -> _CharArray[str_]: ... + def strip(self: _CharArray[str_], chars: U_co | None = None) -> _CharArray[str_]: ... @overload - def strip( - self: _CharArray[bytes_], - chars: None | S_co = ..., - ) -> _CharArray[bytes_]: ... + def strip(self: _CharArray[bytes_], chars: S_co | None = None) -> _CharArray[bytes_]: ... + # @overload - def translate( - self: _CharArray[str_], - table: U_co, - deletechars: None | U_co = ..., - ) -> _CharArray[str_]: ... + def translate(self: _CharArray[str_], table: U_co, deletechars: U_co | None = None) -> _CharArray[str_]: ... @overload - def translate( - self: _CharArray[bytes_], - table: S_co, - deletechars: None | S_co = ..., - ) -> _CharArray[bytes_]: ... + def translate(self: _CharArray[bytes_], table: S_co, deletechars: S_co | None = None) -> _CharArray[bytes_]: ... - def zfill(self, width: _ArrayLikeInt_co) -> chararray[Any, _CharDType]: ... - def capitalize(self) -> chararray[_ShapeType, _CharDType]: ... - def title(self) -> chararray[_ShapeType, _CharDType]: ... - def swapcase(self) -> chararray[_ShapeType, _CharDType]: ... - def lower(self) -> chararray[_ShapeType, _CharDType]: ... - def upper(self) -> chararray[_ShapeType, _CharDType]: ... - def isalnum(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... - def isalpha(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... - def isdigit(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... - def islower(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... - def isspace(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... - def istitle(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... - def isupper(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... - def isnumeric(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... - def isdecimal(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... - -__all__: list[str] + # + def zfill(self, width: i_co) -> Self: ... + def capitalize(self) -> Self: ... + def title(self) -> Self: ... + def swapcase(self) -> Self: ... + def lower(self) -> Self: ... + def upper(self) -> Self: ... + + # + def isalnum(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isalpha(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isdigit(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def islower(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isspace(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def istitle(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isupper(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isnumeric(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isdecimal(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... # Comparison @overload def equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @overload def equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... @overload def not_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @overload def not_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def not_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... @overload def greater_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @overload def greater_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def greater_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... @overload def less_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @overload def less_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def less_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... @overload def greater(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @overload def greater(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def greater(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... @overload def less(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @overload def less(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def less(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... -# String operations @overload -def add(x1: U_co, x2: U_co) -> NDArray[str_]: ... +def add(x1: U_co, x2: U_co) -> NDArray[np.str_]: ... +@overload +def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ... @overload -def add(x1: S_co, x2: S_co) -> NDArray[bytes_]: ... +def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def add(x1: T_co, x2: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def multiply(a: U_co, i: i_co) -> NDArray[str_]: ... +def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ... +@overload +def multiply(a: S_co, i: i_co) -> NDArray[np.bytes_]: ... +@overload +def multiply(a: _StringDTypeSupportsArray, i: i_co) -> _StringDTypeArray: ... @overload -def multiply(a: S_co, i: i_co) -> NDArray[bytes_]: ... +def multiply(a: T_co, i: i_co) -> _StringDTypeOrUnicodeArray: ... @overload -def mod(a: U_co, value: Any) -> NDArray[str_]: ... +def mod(a: U_co, value: Any) -> NDArray[np.str_]: ... @overload -def mod(a: S_co, value: Any) -> NDArray[bytes_]: ... +def mod(a: S_co, value: Any) -> NDArray[np.bytes_]: ... +@overload +def mod(a: _StringDTypeSupportsArray, value: Any) -> _StringDTypeArray: ... +@overload +def mod(a: T_co, value: Any) -> _StringDTypeOrUnicodeArray: ... @overload def capitalize(a: U_co) -> NDArray[str_]: ... @overload def capitalize(a: S_co) -> NDArray[bytes_]: ... - @overload -def center(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ... +def capitalize(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... @overload -def center(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ... +def capitalize(a: T_co) -> _StringDTypeOrUnicodeArray: ... -def decode( - a: S_co, - encoding: None | str = ..., - errors: None | str = ..., -) -> NDArray[str_]: ... +@overload +def center(a: U_co, width: i_co, fillchar: U_co = " ") -> NDArray[str_]: ... +@overload +def center(a: S_co, width: i_co, fillchar: str | S_co = " ") -> NDArray[bytes_]: ... +@overload +def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: str | _StringDTypeSupportsArray = " ") -> _StringDTypeArray: ... +@overload +def center(a: T_co, width: i_co, fillchar: T_co = " ") -> _StringDTypeOrUnicodeArray: ... -def encode( - a: U_co, - encoding: None | str = ..., - errors: None | str = ..., -) -> NDArray[bytes_]: ... +def decode(a: S_co, encoding: str | None = None, errors: str | None = None) -> NDArray[str_]: ... +def encode(a: U_co | T_co, encoding: str | None = None, errors: str | None = None) -> NDArray[bytes_]: ... @overload -def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[str_]: ... +def expandtabs(a: U_co, tabsize: i_co = 8) -> NDArray[str_]: ... +@overload +def expandtabs(a: S_co, tabsize: i_co = 8) -> NDArray[bytes_]: ... @overload -def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[bytes_]: ... +def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = 8) -> _StringDTypeArray: ... +@overload +def expandtabs(a: T_co, tabsize: i_co = 8) -> _StringDTypeOrUnicodeArray: ... @overload def join(sep: U_co, seq: U_co) -> NDArray[str_]: ... @overload def join(sep: S_co, seq: S_co) -> NDArray[bytes_]: ... +@overload +def join(sep: _StringDTypeSupportsArray, seq: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def join(sep: T_co, seq: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def ljust(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ... +def ljust(a: U_co, width: i_co, fillchar: U_co = " ") -> NDArray[str_]: ... +@overload +def ljust(a: S_co, width: i_co, fillchar: str | S_co = " ") -> NDArray[bytes_]: ... +@overload +def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: str | _StringDTypeSupportsArray = " ") -> _StringDTypeArray: ... @overload -def ljust(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ... +def ljust(a: T_co, width: i_co, fillchar: T_co = " ") -> _StringDTypeOrUnicodeArray: ... @overload def lower(a: U_co) -> NDArray[str_]: ... @overload def lower(a: S_co) -> NDArray[bytes_]: ... +@overload +def lower(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def lower(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def lstrip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ... +def lstrip(a: U_co, chars: U_co | None = None) -> NDArray[str_]: ... @overload -def lstrip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ... +def lstrip(a: S_co, chars: S_co | None = None) -> NDArray[bytes_]: ... +@overload +def lstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = None) -> _StringDTypeArray: ... +@overload +def lstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload def partition(a: U_co, sep: U_co) -> NDArray[str_]: ... @overload def partition(a: S_co, sep: S_co) -> NDArray[bytes_]: ... +@overload +def partition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def partition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def replace( - a: U_co, - old: U_co, - new: U_co, - count: None | i_co = ..., -) -> NDArray[str_]: ... +def replace(a: U_co, old: U_co, new: U_co, count: i_co | None = -1) -> NDArray[str_]: ... +@overload +def replace(a: S_co, old: S_co, new: S_co, count: i_co | None = -1) -> NDArray[bytes_]: ... @overload def replace( - a: S_co, - old: S_co, - new: S_co, - count: None | i_co = ..., -) -> NDArray[bytes_]: ... + a: _StringDTypeSupportsArray, old: _StringDTypeSupportsArray, new: _StringDTypeSupportsArray, count: i_co = -1 +) -> _StringDTypeArray: ... +@overload +def replace(a: T_co, old: T_co, new: T_co, count: i_co = -1) -> _StringDTypeOrUnicodeArray: ... @overload -def rjust( - a: U_co, - width: i_co, - fillchar: U_co = ..., -) -> NDArray[str_]: ... +def rjust(a: U_co, width: i_co, fillchar: U_co = " ") -> NDArray[str_]: ... +@overload +def rjust(a: S_co, width: i_co, fillchar: str | S_co = " ") -> NDArray[bytes_]: ... +@overload +def rjust(a: _StringDTypeSupportsArray, width: i_co, fillchar: str | _StringDTypeSupportsArray = " ") -> _StringDTypeArray: ... @overload -def rjust( - a: S_co, - width: i_co, - fillchar: S_co = ..., -) -> NDArray[bytes_]: ... +def rjust(a: T_co, width: i_co, fillchar: T_co = " ") -> _StringDTypeOrUnicodeArray: ... @overload def rpartition(a: U_co, sep: U_co) -> NDArray[str_]: ... @overload def rpartition(a: S_co, sep: S_co) -> NDArray[bytes_]: ... +@overload +def rpartition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def rpartition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def rsplit( - a: U_co, - sep: None | U_co = ..., - maxsplit: None | i_co = ..., -) -> NDArray[object_]: ... +def rsplit(a: U_co, sep: U_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... +@overload +def rsplit(a: S_co, sep: S_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... @overload def rsplit( - a: S_co, - sep: None | S_co = ..., - maxsplit: None | i_co = ..., + a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray | None = None, maxsplit: i_co | None = None ) -> NDArray[object_]: ... +@overload +def rsplit(a: T_co, sep: T_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... @overload -def rstrip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ... +def rstrip(a: U_co, chars: U_co | None = None) -> NDArray[str_]: ... +@overload +def rstrip(a: S_co, chars: S_co | None = None) -> NDArray[bytes_]: ... @overload -def rstrip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ... +def rstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = None) -> _StringDTypeArray: ... +@overload +def rstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload -def split( - a: U_co, - sep: None | U_co = ..., - maxsplit: None | i_co = ..., -) -> NDArray[object_]: ... +def split(a: U_co, sep: U_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... +@overload +def split(a: S_co, sep: S_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... @overload def split( - a: S_co, - sep: None | S_co = ..., - maxsplit: None | i_co = ..., + a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray | None = None, maxsplit: i_co | None = None ) -> NDArray[object_]: ... +@overload +def split(a: T_co, sep: T_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... + +def splitlines(a: UST_co, keepends: b_co | None = None) -> NDArray[np.object_]: ... @overload -def splitlines(a: U_co, keepends: None | b_co = ...) -> NDArray[object_]: ... +def strip(a: U_co, chars: U_co | None = None) -> NDArray[str_]: ... @overload -def splitlines(a: S_co, keepends: None | b_co = ...) -> NDArray[object_]: ... - +def strip(a: S_co, chars: S_co | None = None) -> NDArray[bytes_]: ... @overload -def strip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ... +def strip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = None) -> _StringDTypeArray: ... @overload -def strip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ... +def strip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload def swapcase(a: U_co) -> NDArray[str_]: ... @overload def swapcase(a: S_co) -> NDArray[bytes_]: ... +@overload +def swapcase(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def swapcase(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def title(a: U_co) -> NDArray[str_]: ... @overload def title(a: S_co) -> NDArray[bytes_]: ... +@overload +def title(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def title(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def translate( - a: U_co, - table: U_co, - deletechars: None | U_co = ..., -) -> NDArray[str_]: ... +def translate(a: U_co, table: str, deletechars: str | None = None) -> NDArray[str_]: ... @overload -def translate( - a: S_co, - table: S_co, - deletechars: None | S_co = ..., -) -> NDArray[bytes_]: ... +def translate(a: S_co, table: str, deletechars: str | None = None) -> NDArray[bytes_]: ... +@overload +def translate(a: _StringDTypeSupportsArray, table: str, deletechars: str | None = None) -> _StringDTypeArray: ... +@overload +def translate(a: T_co, table: str, deletechars: str | None = None) -> _StringDTypeOrUnicodeArray: ... @overload def upper(a: U_co) -> NDArray[str_]: ... @overload def upper(a: S_co) -> NDArray[bytes_]: ... +@overload +def upper(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def upper(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def zfill(a: U_co, width: i_co) -> NDArray[str_]: ... @overload def zfill(a: S_co, width: i_co) -> NDArray[bytes_]: ... +@overload +def zfill(a: _StringDTypeSupportsArray, width: i_co) -> _StringDTypeArray: ... +@overload +def zfill(a: T_co, width: i_co) -> _StringDTypeOrUnicodeArray: ... # String information @overload -def count( - a: U_co, - sub: U_co, - start: i_co = ..., - end: None | i_co = ..., -) -> NDArray[int_]: ... -@overload -def count( - a: S_co, - sub: S_co, - start: i_co = ..., - end: None | i_co = ..., -) -> NDArray[int_]: ... - -@overload -def endswith( - a: U_co, - suffix: U_co, - start: i_co = ..., - end: None | i_co = ..., -) -> NDArray[np.bool]: ... -@overload -def endswith( - a: S_co, - suffix: S_co, - start: i_co = ..., - end: None | i_co = ..., -) -> NDArray[np.bool]: ... - -@overload -def find( - a: U_co, - sub: U_co, - start: i_co = ..., - end: None | i_co = ..., -) -> NDArray[int_]: ... -@overload -def find( - a: S_co, - sub: S_co, - start: i_co = ..., - end: None | i_co = ..., -) -> NDArray[int_]: ... - -@overload -def index( - a: U_co, - sub: U_co, - start: i_co = ..., - end: None | i_co = ..., -) -> NDArray[int_]: ... -@overload -def index( - a: S_co, - sub: S_co, - start: i_co = ..., - end: None | i_co = ..., -) -> NDArray[int_]: ... - -def isalpha(a: U_co | S_co) -> NDArray[np.bool]: ... -def isalnum(a: U_co | S_co) -> NDArray[np.bool]: ... -def isdecimal(a: U_co) -> NDArray[np.bool]: ... -def isdigit(a: U_co | S_co) -> NDArray[np.bool]: ... -def islower(a: U_co | S_co) -> NDArray[np.bool]: ... -def isnumeric(a: U_co) -> NDArray[np.bool]: ... -def isspace(a: U_co | S_co) -> NDArray[np.bool]: ... -def istitle(a: U_co | S_co) -> NDArray[np.bool]: ... -def isupper(a: U_co | S_co) -> NDArray[np.bool]: ... - -@overload -def rfind( - a: U_co, - sub: U_co, - start: i_co = ..., - end: None | i_co = ..., -) -> NDArray[int_]: ... -@overload -def rfind( - a: S_co, - sub: S_co, - start: i_co = ..., - end: None | i_co = ..., -) -> NDArray[int_]: ... - -@overload -def rindex( - a: U_co, - sub: U_co, - start: i_co = ..., - end: None | i_co = ..., -) -> NDArray[int_]: ... -@overload -def rindex( - a: S_co, - sub: S_co, - start: i_co = ..., - end: None | i_co = ..., -) -> NDArray[int_]: ... - -@overload -def startswith( - a: U_co, - prefix: U_co, - start: i_co = ..., - end: None | i_co = ..., -) -> NDArray[np.bool]: ... -@overload -def startswith( - a: S_co, - prefix: S_co, - start: i_co = ..., - end: None | i_co = ..., -) -> NDArray[np.bool]: ... - -def str_len(A: U_co | S_co) -> NDArray[int_]: ... +def count(a: U_co, sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def count(a: S_co, sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def count(a: T_co, sub: T_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.int_]: ... + +@overload +def endswith(a: U_co, suffix: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... +@overload +def endswith(a: S_co, suffix: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... +@overload +def endswith(a: T_co, suffix: T_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... + +@overload +def find(a: U_co, sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def find(a: S_co, sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def find(a: T_co, sub: T_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.int_]: ... + +@overload +def index(a: U_co, sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def index(a: S_co, sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def index(a: T_co, sub: T_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.int_]: ... + +def isalpha(a: UST_co) -> NDArray[np.bool]: ... +def isalnum(a: UST_co) -> NDArray[np.bool]: ... +def isdecimal(a: U_co | T_co) -> NDArray[np.bool]: ... +def isdigit(a: UST_co) -> NDArray[np.bool]: ... +def islower(a: UST_co) -> NDArray[np.bool]: ... +def isnumeric(a: U_co | T_co) -> NDArray[np.bool]: ... +def isspace(a: UST_co) -> NDArray[np.bool]: ... +def istitle(a: UST_co) -> NDArray[np.bool]: ... +def isupper(a: UST_co) -> NDArray[np.bool]: ... + +@overload +def rfind(a: U_co, sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def rfind(a: S_co, sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def rfind(a: T_co, sub: T_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.int_]: ... + +@overload +def rindex(a: U_co, sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def rindex(a: S_co, sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def rindex(a: T_co, sub: T_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.int_]: ... + +@overload +def startswith(a: U_co, prefix: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... +@overload +def startswith(a: S_co, prefix: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... +@overload +def startswith(a: T_co, prefix: T_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... + +def str_len(A: UST_co) -> NDArray[int_]: ... # Overload 1 and 2: str- or bytes-based array-likes -# overload 3: arbitrary object with unicode=False (-> bytes_) -# overload 4: arbitrary object with unicode=True (-> str_) +# overload 3 and 4: arbitrary object with unicode=False (-> bytes_) +# overload 5 and 6: arbitrary object with unicode=True (-> str_) +# overload 7: arbitrary object with unicode=None (default) (-> str_ | bytes_) @overload +@deprecated("numpy.char.array is deprecated and will be removed in a future release.") def array( obj: U_co, - itemsize: None | int = ..., - copy: bool = ..., - unicode: L[False] = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + copy: bool = True, + unicode: L[True] | None = None, + order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload +@deprecated("numpy.char.array is deprecated and will be removed in a future release.") def array( obj: S_co, - itemsize: None | int = ..., - copy: bool = ..., - unicode: L[False] = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + copy: bool = True, + unicode: L[False] | None = None, + order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload +@deprecated("numpy.char.array is deprecated and will be removed in a future release.") def array( obj: object, - itemsize: None | int = ..., - copy: bool = ..., - unicode: L[False] = ..., - order: _OrderKACF = ..., + itemsize: int | None, + copy: bool, + unicode: L[False], + order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload +@deprecated("numpy.char.array is deprecated and will be removed in a future release.") def array( obj: object, - itemsize: None | int = ..., - copy: bool = ..., - unicode: L[True] = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + copy: bool = True, + *, + unicode: L[False], + order: _OrderKACF = None, +) -> _CharArray[bytes_]: ... +@overload +@deprecated("numpy.char.array is deprecated and will be removed in a future release.") +def array( + obj: object, + itemsize: int | None, + copy: bool, + unicode: L[True], + order: _OrderKACF = None, +) -> _CharArray[str_]: ... +@overload +@deprecated("numpy.char.array is deprecated and will be removed in a future release.") +def array( + obj: object, + itemsize: int | None = None, + copy: bool = True, + *, + unicode: L[True], + order: _OrderKACF = None, ) -> _CharArray[str_]: ... +@overload +@deprecated("numpy.char.array is deprecated and will be removed in a future release.") +def array( + obj: object, + itemsize: int | None = None, + copy: bool = True, + unicode: bool | None = None, + order: _OrderKACF = None, +) -> _CharArray[str_] | _CharArray[bytes_]: ... @overload +@deprecated("numpy.char.asarray is deprecated and will be removed in a future release.") def asarray( obj: U_co, - itemsize: None | int = ..., - unicode: L[False] = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + unicode: L[True] | None = None, + order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload +@deprecated("numpy.char.asarray is deprecated and will be removed in a future release.") def asarray( obj: S_co, - itemsize: None | int = ..., - unicode: L[False] = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + unicode: L[False] | None = None, + order: _OrderKACF = None, +) -> _CharArray[bytes_]: ... +@overload +@deprecated("numpy.char.asarray is deprecated and will be removed in a future release.") +def asarray( + obj: object, + itemsize: int | None, + unicode: L[False], + order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload +@deprecated("numpy.char.asarray is deprecated and will be removed in a future release.") def asarray( obj: object, - itemsize: None | int = ..., - unicode: L[False] = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + *, + unicode: L[False], + order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload +@deprecated("numpy.char.asarray is deprecated and will be removed in a future release.") +def asarray( + obj: object, + itemsize: int | None, + unicode: L[True], + order: _OrderKACF = None, +) -> _CharArray[str_]: ... +@overload +@deprecated("numpy.char.asarray is deprecated and will be removed in a future release.") def asarray( obj: object, - itemsize: None | int = ..., - unicode: L[True] = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + *, + unicode: L[True], + order: _OrderKACF = None, ) -> _CharArray[str_]: ... +@overload +@deprecated("numpy.char.asarray is deprecated and will be removed in a future release.") +def asarray( + obj: object, + itemsize: int | None = None, + unicode: bool | None = None, + order: _OrderKACF = None, +) -> _CharArray[str_] | _CharArray[bytes_]: ... diff --git a/numpy/_core/einsumfunc.py b/numpy/_core/einsumfunc.py index 7aa5f22fe939..3a04b02b9c93 100644 --- a/numpy/_core/einsumfunc.py +++ b/numpy/_core/einsumfunc.py @@ -2,18 +2,21 @@ Implementation of optimized einsum. """ +import functools import itertools import operator -from numpy._core.multiarray import c_einsum -from numpy._core.numeric import asanyarray, tensordot +from numpy._core.multiarray import c_einsum, matmul +from numpy._core.numeric import asanyarray, reshape from numpy._core.overrides import array_function_dispatch +from numpy._core.umath import multiply __all__ = ['einsum', 'einsum_path'] # importing string for string.ascii_letters would be too slow # the first import before caching has been measured to take 800 Âĩs (#23777) -einsum_symbols = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' +# imports begin with uppercase to mimic ASCII values to avoid sorting issues +einsum_symbols = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' einsum_symbols_set = set(einsum_symbols) @@ -219,7 +222,7 @@ def _optimal_path(input_sets, output_set, idx_dict, memory_limit): return path def _parse_possible_contraction( - positions, input_sets, output_set, idx_dict, + positions, input_sets, output_set, idx_dict, memory_limit, path_cost, naive_cost ): """Compute the cost (removed size + flops) and resultant indices for @@ -290,7 +293,7 @@ def _update_other_results(results, best): Parameters ---------- results : list - List of contraction results produced by + List of contraction results produced by ``_parse_possible_contraction``. best : list The best contraction of ``results`` i.e. the one that @@ -398,7 +401,7 @@ def _greedy_path(input_sets, output_set, idx_dict, memory_limit): if result is not None: known_contractions.append(result) - # If we do not have a inner contraction, rescan pairs + # If we do not have an inner contraction, rescan pairs # including outer products if len(known_contractions) == 0: @@ -439,116 +442,6 @@ def _greedy_path(input_sets, output_set, idx_dict, memory_limit): return path -def _can_dot(inputs, result, idx_removed): - """ - Checks if we can use BLAS (np.tensordot) call and its beneficial to do so. - - Parameters - ---------- - inputs : list of str - Specifies the subscripts for summation. - result : str - Resulting summation. - idx_removed : set - Indices that are removed in the summation - - - Returns - ------- - type : bool - Returns true if BLAS should and can be used, else False - - Notes - ----- - If the operations is BLAS level 1 or 2 and is not already aligned - we default back to einsum as the memory movement to copy is more - costly than the operation itself. - - - Examples - -------- - - # Standard GEMM operation - >>> _can_dot(['ij', 'jk'], 'ik', set('j')) - True - - # Can use the standard BLAS, but requires odd data movement - >>> _can_dot(['ijj', 'jk'], 'ik', set('j')) - False - - # DDOT where the memory is not aligned - >>> _can_dot(['ijk', 'ikj'], '', set('ijk')) - False - - """ - - # All `dot` calls remove indices - if len(idx_removed) == 0: - return False - - # BLAS can only handle two operands - if len(inputs) != 2: - return False - - input_left, input_right = inputs - - for c in set(input_left + input_right): - # can't deal with repeated indices on same input or more than 2 total - nl, nr = input_left.count(c), input_right.count(c) - if (nl > 1) or (nr > 1) or (nl + nr > 2): - return False - - # can't do implicit summation or dimension collapse e.g. - # "ab,bc->c" (implicitly sum over 'a') - # "ab,ca->ca" (take diagonal of 'a') - if nl + nr - 1 == int(c in result): - return False - - # Build a few temporaries - set_left = set(input_left) - set_right = set(input_right) - keep_left = set_left - idx_removed - keep_right = set_right - idx_removed - rs = len(idx_removed) - - # At this point we are a DOT, GEMV, or GEMM operation - - # Handle inner products - - # DDOT with aligned data - if input_left == input_right: - return True - - # DDOT without aligned data (better to use einsum) - if set_left == set_right: - return False - - # Handle the 4 possible (aligned) GEMV or GEMM cases - - # GEMM or GEMV no transpose - if input_left[-rs:] == input_right[:rs]: - return True - - # GEMM or GEMV transpose both - if input_left[:rs] == input_right[-rs:]: - return True - - # GEMM or GEMV transpose right - if input_left[-rs:] == input_right[-rs:]: - return True - - # GEMM or GEMV transpose left - if input_left[:rs] == input_right[:rs]: - return True - - # Einsum is faster than GEMV if we have to copy data - if not keep_left or not keep_right: - return False - - # We are a matrix-matrix product, but we need to copy data - return True - - def _parse_einsum_input(operands): """ A reproduction of einsum c side einsum parsing in python. @@ -588,7 +481,7 @@ def _parse_einsum_input(operands): if s in '.,->': continue if s not in einsum_symbols: - raise ValueError("Character %s is not a valid symbol." % s) + raise ValueError(f"Character {s} is not a valid symbol.") else: tmp_operands = list(operands) @@ -690,7 +583,7 @@ def _parse_einsum_input(operands): tmp_subscripts = subscripts.replace(",", "") for s in sorted(set(tmp_subscripts)): if s not in (einsum_symbols): - raise ValueError("Character %s is not a valid symbol." % s) + raise ValueError(f"Character {s} is not a valid symbol.") if tmp_subscripts.count(s) == 1: output_subscript += s normal_inds = ''.join(sorted(set(output_subscript) - @@ -708,7 +601,7 @@ def _parse_einsum_input(operands): output_subscript = "" for s in sorted(set(tmp_subscripts)): if s not in einsum_symbols: - raise ValueError("Character %s is not a valid symbol." % s) + raise ValueError(f"Character {s} is not a valid symbol.") if tmp_subscripts.count(s) == 1: output_subscript += s @@ -718,8 +611,7 @@ def _parse_einsum_input(operands): raise ValueError("Output character %s appeared more than once in " "the output." % char) if char not in input_subscripts: - raise ValueError("Output character %s did not appear in the input" - % char) + raise ValueError(f"Output character {char} did not appear in the input") # Make sure number operands is equivalent to the number of terms if len(input_subscripts.split(',')) != len(operands): @@ -833,7 +725,7 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): >>> print(path_info[0]) ['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)] - >>> print(path_info[1]) + >>> print(path_info[1]) Complete contraction: ea,fb,abcd,gc,hd->efgh # may vary Naive scaling: 8 Optimized scaling: 5 @@ -875,7 +767,7 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): path_type = path_type[0] else: - raise TypeError("Did not understand the path: %s" % str(path_type)) + raise TypeError(f"Did not understand the path: {str(path_type)}") # Hidden option, only einsum should call this einsum_call_arg = einsum_call @@ -887,13 +779,14 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): # Build a few useful list and sets input_list = input_subscripts.split(',') + num_inputs = len(input_list) input_sets = [set(x) for x in input_list] output_set = set(output_subscript) indices = set(input_subscripts.replace(',', '')) + num_indices = len(indices) # Get length of each unique dimension and ensure all dimensions are correct dimension_dict = {} - broadcast_indices = [[] for x in range(len(input_list))] for tnum, term in enumerate(input_list): sh = operands[tnum].shape if len(sh) != len(term): @@ -903,10 +796,6 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): for cnum, char in enumerate(term): dim = sh[cnum] - # Build out broadcast indices - if dim == 1: - broadcast_indices[tnum].append(char) - if char in dimension_dict.keys(): # For broadcasting cases we always want the largest dim size if dimension_dict[char] == 1: @@ -918,9 +807,6 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): else: dimension_dict[char] = dim - # Convert broadcast inds to sets - broadcast_indices = [set(x) for x in broadcast_indices] - # Compute size of each input array plus the output array size_list = [_compute_size_by_dict(term, dimension_dict) for term in input_list + [output_subscript]] @@ -931,23 +817,16 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): else: memory_arg = memory_limit - # Compute naive cost - # This isn't quite right, need to look into exactly how einsum does this - inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0 - naive_cost = _flop_count( - indices, inner_product, len(input_list), dimension_dict - ) - # Compute the path if explicit_einsum_path: path = path_type[1:] elif ( (path_type is False) - or (len(input_list) in [1, 2]) + or (num_inputs in [1, 2]) or (indices == output_set) ): # Nothing to be optimized, leave it to einsum - path = [tuple(range(len(input_list)))] + path = [tuple(range(num_inputs))] elif path_type == "greedy": path = _greedy_path( input_sets, output_set, dimension_dict, memory_arg @@ -964,31 +843,23 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): # Build contraction tuple (positions, gemm, einsum_str, remaining) for cnum, contract_inds in enumerate(path): # Make sure we remove inds from right to left - contract_inds = tuple(sorted(list(contract_inds), reverse=True)) + contract_inds = tuple(sorted(contract_inds, reverse=True)) contract = _find_contraction(contract_inds, input_sets, output_set) out_inds, input_sets, idx_removed, idx_contract = contract - cost = _flop_count( - idx_contract, idx_removed, len(contract_inds), dimension_dict - ) - cost_list.append(cost) - scale_list.append(len(idx_contract)) - size_list.append(_compute_size_by_dict(out_inds, dimension_dict)) + if not einsum_call_arg: + # these are only needed for printing info + cost = _flop_count( + idx_contract, idx_removed, len(contract_inds), dimension_dict + ) + cost_list.append(cost) + scale_list.append(len(idx_contract)) + size_list.append(_compute_size_by_dict(out_inds, dimension_dict)) - bcast = set() tmp_inputs = [] for x in contract_inds: tmp_inputs.append(input_list.pop(x)) - bcast |= broadcast_indices.pop(x) - - new_bcast_inds = bcast - idx_removed - - # If we're broadcasting, nix blas - if not len(idx_removed & bcast): - do_blas = _can_dot(tmp_inputs, out_inds, idx_removed) - else: - do_blas = False # Last contraction if (cnum - len(path)) == -1: @@ -998,22 +869,17 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): idx_result = "".join([x[1] for x in sorted(sort_result)]) input_list.append(idx_result) - broadcast_indices.append(new_bcast_inds) einsum_str = ",".join(tmp_inputs) + "->" + idx_result - contraction = ( - contract_inds, idx_removed, einsum_str, input_list[:], do_blas - ) + contraction = (contract_inds, einsum_str, input_list[:]) contraction_list.append(contraction) - opt_cost = sum(cost_list) + 1 - if len(input_list) != 1: # Explicit "einsum_path" is usually trusted, but we detect this kind of # mistake in order to prevent from returning an intermediate value. raise RuntimeError( - "Invalid einsum_path is specified: {} more operands has to be " - "contracted.".format(len(input_list) - 1)) + f"Invalid einsum_path is specified: {len(input_list) - 1} more " + "operands has to be contracted.") if einsum_call_arg: return (operands, contraction_list) @@ -1022,22 +888,32 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): overall_contraction = input_subscripts + "->" + output_subscript header = ("scaling", "current", "remaining") + # Compute naive cost + # This isn't quite right, need to look into exactly how einsum does this + inner_product = ( + sum(len(set(x)) for x in input_subscripts.split(',')) - num_indices + ) > 0 + naive_cost = _flop_count( + indices, inner_product, num_inputs, dimension_dict + ) + + opt_cost = sum(cost_list) + 1 speedup = naive_cost / opt_cost max_i = max(size_list) - path_print = " Complete contraction: %s\n" % overall_contraction - path_print += " Naive scaling: %d\n" % len(indices) + path_print = f" Complete contraction: {overall_contraction}\n" + path_print += f" Naive scaling: {num_indices}\n" path_print += " Optimized scaling: %d\n" % max(scale_list) - path_print += " Naive FLOP count: %.3e\n" % naive_cost - path_print += " Optimized FLOP count: %.3e\n" % opt_cost - path_print += " Theoretical speedup: %3.3f\n" % speedup - path_print += " Largest intermediate: %.3e elements\n" % max_i + path_print += f" Naive FLOP count: {naive_cost:.3e}\n" + path_print += f" Optimized FLOP count: {opt_cost:.3e}\n" + path_print += f" Theoretical speedup: {speedup:3.3f}\n" + path_print += f" Largest intermediate: {max_i:.3e} elements\n" path_print += "-" * 74 + "\n" path_print += "%6s %24s %40s\n" % header path_print += "-" * 74 for n, contraction in enumerate(contraction_list): - inds, idx_rm, einsum_str, remaining, blas = contraction + _, einsum_str, remaining = contraction remaining_str = ",".join(remaining) + "->" + output_subscript path_run = (scale_list[n], einsum_str, remaining_str) path_print += "\n%4d %24s %40s" % path_run @@ -1046,6 +922,317 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): return (path, path_print) +def _parse_eq_to_pure_multiplication(a_term, shape_a, b_term, shape_b, out): + """If there are no contracted indices, then we can directly transpose and + insert singleton dimensions into ``a`` and ``b`` such that (broadcast) + elementwise multiplication performs the einsum. + + No need to cache this as it is within the cached + ``_parse_eq_to_batch_matmul``. + + """ + desired_a = "" + desired_b = "" + new_shape_a = [] + new_shape_b = [] + for ix in out: + if ix in a_term: + desired_a += ix + new_shape_a.append(shape_a[a_term.index(ix)]) + else: + new_shape_a.append(1) + if ix in b_term: + desired_b += ix + new_shape_b.append(shape_b[b_term.index(ix)]) + else: + new_shape_b.append(1) + + if desired_a != a_term: + eq_a = f"{a_term}->{desired_a}" + else: + eq_a = None + if desired_b != b_term: + eq_b = f"{b_term}->{desired_b}" + else: + eq_b = None + + return ( + eq_a, + eq_b, + new_shape_a, + new_shape_b, + None, # new_shape_ab, not needed since not fusing + None, # perm_ab, not needed as we transpose a and b first + True, # pure_multiplication=True + ) + + +@functools.lru_cache(2**12) +def _parse_eq_to_batch_matmul(eq, shape_a, shape_b): + """Cached parsing of a two term einsum equation into the necessary + sequence of arguments for contracttion via batched matrix multiplication. + The steps we need to specify are: + + 1. Remove repeated and trivial indices from the left and right terms, + and transpose them, done as a single einsum. + 2. Fuse the remaining indices so we have two 3D tensors. + 3. Perform the batched matrix multiplication. + 4. Unfuse the output to get the desired final index order. + + """ + lhs, out = eq.split("->") + a_term, b_term = lhs.split(",") + + if len(a_term) != len(shape_a): + raise ValueError(f"Term '{a_term}' does not match shape {shape_a}.") + if len(b_term) != len(shape_b): + raise ValueError(f"Term '{b_term}' does not match shape {shape_b}.") + + sizes = {} + singletons = set() + + # parse left term to unique indices with size > 1 + left = {} + for ix, d in zip(a_term, shape_a): + if d == 1: + # everything (including broadcasting) works nicely if simply ignore + # such dimensions, but we do need to track if they appear in output + # and thus should be reintroduced later + singletons.add(ix) + continue + if sizes.setdefault(ix, d) != d: + # set and check size + raise ValueError( + f"Index {ix} has mismatched sizes {sizes[ix]} and {d}." + ) + left[ix] = True + + # parse right term to unique indices with size > 1 + right = {} + for ix, d in zip(b_term, shape_b): + # broadcast indices (size 1 on one input and size != 1 + # on the other) should not be treated as singletons + if d == 1: + if ix not in left: + singletons.add(ix) + continue + singletons.discard(ix) + + if sizes.setdefault(ix, d) != d: + # set and check size + raise ValueError( + f"Index {ix} has mismatched sizes {sizes[ix]} and {d}." + ) + right[ix] = True + + # now we classify the unique size > 1 indices only + bat_inds = [] # appears on A, B, O + con_inds = [] # appears on A, B, . + a_keep = [] # appears on A, ., O + b_keep = [] # appears on ., B, O + # other indices (appearing on A or B only) will + # be summed or traced out prior to the matmul + for ix in left: + if right.pop(ix, False): + if ix in out: + bat_inds.append(ix) + else: + con_inds.append(ix) + elif ix in out: + a_keep.append(ix) + # now only indices unique to right remain + for ix in right: + if ix in out: + b_keep.append(ix) + + if not con_inds: + # contraction is pure multiplication, prepare inputs differently + return _parse_eq_to_pure_multiplication( + a_term, shape_a, b_term, shape_b, out + ) + + # only need the size one indices that appear in the output + singletons = [ix for ix in out if ix in singletons] + + # take diagonal, remove any trivial axes and transpose left + desired_a = "".join((*bat_inds, *a_keep, *con_inds)) + if a_term != desired_a: + eq_a = f"{a_term}->{desired_a}" + else: + eq_a = None + + # take diagonal, remove any trivial axes and transpose right + desired_b = "".join((*bat_inds, *con_inds, *b_keep)) + if b_term != desired_b: + eq_b = f"{b_term}->{desired_b}" + else: + eq_b = None + + # then we want to reshape + if bat_inds: + lgroups = (bat_inds, a_keep, con_inds) + rgroups = (bat_inds, con_inds, b_keep) + ogroups = (bat_inds, a_keep, b_keep) + else: + # avoid size 1 batch dimension if no batch indices + lgroups = (a_keep, con_inds) + rgroups = (con_inds, b_keep) + ogroups = (a_keep, b_keep) + + if any(len(group) != 1 for group in lgroups): + # need to fuse 'kept' and contracted indices + # (though could allow batch indices to be broadcast) + new_shape_a = tuple( + functools.reduce(operator.mul, (sizes[ix] for ix in ix_group), 1) + for ix_group in lgroups + ) + else: + new_shape_a = None + + if any(len(group) != 1 for group in rgroups): + # need to fuse 'kept' and contracted indices + # (though could allow batch indices to be broadcast) + new_shape_b = tuple( + functools.reduce(operator.mul, (sizes[ix] for ix in ix_group), 1) + for ix_group in rgroups + ) + else: + new_shape_b = None + + if any(len(group) != 1 for group in ogroups) or singletons: + new_shape_ab = (1,) * len(singletons) + tuple( + sizes[ix] for ix_group in ogroups for ix in ix_group + ) + else: + new_shape_ab = None + + # then we might need to permute the matmul produced output: + out_produced = "".join((*singletons, *bat_inds, *a_keep, *b_keep)) + if out_produced != out: + perm_ab = tuple(out_produced.index(ix) for ix in out) + else: + perm_ab = None + + return ( + eq_a, + eq_b, + new_shape_a, + new_shape_b, + new_shape_ab, + perm_ab, + False, # pure_multiplication=False + ) + + +@functools.lru_cache(maxsize=64) +def _parse_output_order(order, a_is_fcontig, b_is_fcontig): + order = order.upper() + if order == "K": + return None + elif order in "CF": + return order + elif order == "A": + if a_is_fcontig and b_is_fcontig: + return "F" + else: + return "C" + else: + raise ValueError( + "ValueError: order must be one of " + f"'C', 'F', 'A', or 'K' (got '{order}')" + ) + + +def bmm_einsum(eq, a, b, out=None, **kwargs): + """Perform arbitrary pairwise einsums using only ``matmul``, or + ``multiply`` if no contracted indices are involved (plus maybe single term + ``einsum`` to prepare the terms individually). The logic for each is cached + based on the equation and array shape, and each step is only performed if + necessary. + + Parameters + ---------- + eq : str + The einsum equation. + a : array_like + The first array to contract. + b : array_like + The second array to contract. + + Returns + ------- + array_like + + Notes + ----- + A fuller description of this algorithm, and original source for this + implementation, can be found at https://github.com/jcmgray/einsum_bmm. + """ + ( + eq_a, + eq_b, + new_shape_a, + new_shape_b, + new_shape_ab, + perm_ab, + pure_multiplication, + ) = _parse_eq_to_batch_matmul(eq, a.shape, b.shape) + + # n.b. one could special case various cases to call c_einsum directly here + + # need to handle `order` a little manually, since we do transpose + # operations before and potentially after the ufunc calls + output_order = _parse_output_order( + kwargs.pop("order", "K"), a.flags.f_contiguous, b.flags.f_contiguous + ) + + # prepare left + if eq_a is not None: + # diagonals, sums, and tranpose + a = c_einsum(eq_a, a) + if new_shape_a is not None: + a = reshape(a, new_shape_a) + + # prepare right + if eq_b is not None: + # diagonals, sums, and tranpose + b = c_einsum(eq_b, b) + if new_shape_b is not None: + b = reshape(b, new_shape_b) + + if pure_multiplication: + # no contracted indices + if output_order is not None: + kwargs["order"] = output_order + + # do the 'contraction' via multiplication! + return multiply(a, b, out=out, **kwargs) + + # can only supply out here if no other reshaping / transposing + matmul_out_compatible = (new_shape_ab is None) and (perm_ab is None) + if matmul_out_compatible: + kwargs["out"] = out + + # do the contraction! + ab = matmul(a, b, **kwargs) + + # prepare the output + if new_shape_ab is not None: + ab = reshape(ab, new_shape_ab) + if perm_ab is not None: + ab = ab.transpose(perm_ab) + + if (out is not None) and (not matmul_out_compatible): + # handle case where out is specified, but we also needed + # to reshape / transpose ``ab`` after the matmul + out[:] = ab + ab = out + elif output_order is not None: + ab = asanyarray(ab, order=output_order) + + return ab + + def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs): # Arguably we dispatch on more arguments than we really should; see note in # _einsum_path_dispatcher for why. @@ -1132,8 +1319,6 @@ def einsum(*operands, out=None, optimize=False, **kwargs): Notes ----- - .. versionadded:: 1.6.0 - The Einstein summation convention can be used to compute many multi-dimensional, linear algebraic array operations. `einsum` provides a succinct way of representing these. @@ -1183,7 +1368,7 @@ def einsum(*operands, out=None, optimize=False, **kwargs): ``np.einsum('i->', a)`` is like :py:func:`np.sum(a) ` if ``a`` is a 1-D array, and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) ` if ``a`` is a square 2-D array. - The difference is that `einsum` does not allow broadcasting by default. + The difference is that `einsum` does not allow broadcasting by default. Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the order of the output subscript labels and therefore returns matrix multiplication, unlike the example above in implicit mode. @@ -1191,7 +1376,7 @@ def einsum(*operands, out=None, optimize=False, **kwargs): To enable and control broadcasting, use an ellipsis. Default NumPy-style broadcasting is done by adding an ellipsis to the left of each term, like ``np.einsum('...ii->...i', a)``. - ``np.einsum('...i->...', a)`` is like + ``np.einsum('...i->...', a)`` is like :py:func:`np.sum(a, axis=-1) ` for array ``a`` of any shape. To take the trace along the first and last axes, you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix @@ -1210,16 +1395,12 @@ def einsum(*operands, out=None, optimize=False, **kwargs): The examples below have corresponding `einsum` calls with the two parameter methods. - .. versionadded:: 1.10.0 - Views returned from einsum are now writeable whenever the input array is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now have the same effect as :py:func:`np.swapaxes(a, 0, 2) ` and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal of a 2D array. - .. versionadded:: 1.12.0 - Added the ``optimize`` argument which will optimize the contraction order of an einsum expression. For a contraction with three or more operands this can greatly increase the computational efficiency at the cost of @@ -1413,7 +1594,7 @@ def einsum(*operands, out=None, optimize=False, **kwargs): Optimal `einsum` (best usage pattern in some use cases): ~110ms - >>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, + >>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, ... optimize='optimal')[0] >>> for iteration in range(500): ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path) @@ -1434,65 +1615,29 @@ def einsum(*operands, out=None, optimize=False, **kwargs): unknown_kwargs = [k for (k, v) in kwargs.items() if k not in valid_einsum_kwargs] if len(unknown_kwargs): - raise TypeError("Did not understand the following kwargs: %s" - % unknown_kwargs) + raise TypeError(f"Did not understand the following kwargs: {unknown_kwargs}") # Build the contraction list and operand operands, contraction_list = einsum_path(*operands, optimize=optimize, einsum_call=True) - # Handle order kwarg for output array, c_einsum allows mixed case - output_order = kwargs.pop('order', 'K') - if output_order.upper() == 'A': - if all(arr.flags.f_contiguous for arr in operands): - output_order = 'F' - else: - output_order = 'C' - # Start contraction loop for num, contraction in enumerate(contraction_list): - inds, idx_rm, einsum_str, remaining, blas = contraction + inds, einsum_str, _ = contraction tmp_operands = [operands.pop(x) for x in inds] # Do we need to deal with the output? handle_out = specified_out and ((num + 1) == len(contraction_list)) - # Call tensordot if still possible - if blas: - # Checks have already been handled - input_str, results_index = einsum_str.split('->') - input_left, input_right = input_str.split(',') - - tensor_result = input_left + input_right - for s in idx_rm: - tensor_result = tensor_result.replace(s, "") - - # Find indices to contract over - left_pos, right_pos = [], [] - for s in sorted(idx_rm): - left_pos.append(input_left.find(s)) - right_pos.append(input_right.find(s)) - - # Contract! - new_view = tensordot( - *tmp_operands, axes=(tuple(left_pos), tuple(right_pos)) - ) - - # Build a new view if needed - if (tensor_result != results_index) or handle_out: - if handle_out: - kwargs["out"] = out - new_view = c_einsum( - tensor_result + '->' + results_index, new_view, **kwargs - ) + # If out was specified + if handle_out: + kwargs["out"] = out - # Call einsum + if len(tmp_operands) == 2: + # Call (batched) matrix multiplication if possible + new_view = bmm_einsum(einsum_str, *tmp_operands, **kwargs) else: - # If out was specified - if handle_out: - kwargs["out"] = out - - # Do the contraction + # Call einsum new_view = c_einsum(einsum_str, *tmp_operands, **kwargs) # Append new items and dereference what we can @@ -1502,4 +1647,4 @@ def einsum(*operands, out=None, optimize=False, **kwargs): if specified_out: return out else: - return asanyarray(operands[0], order=output_order) + return operands[0] diff --git a/numpy/_core/einsumfunc.pyi b/numpy/_core/einsumfunc.pyi index 513f0635e35e..3e42ef6dc238 100644 --- a/numpy/_core/einsumfunc.pyi +++ b/numpy/_core/einsumfunc.pyi @@ -1,35 +1,30 @@ from collections.abc import Sequence -from typing import TypeVar, Any, overload, Literal +from typing import Any, Literal, overload import numpy as np -from numpy import number, _OrderKACF +from numpy import _OrderKACF from numpy._typing import ( NDArray, _ArrayLikeBool_co, - _ArrayLikeUInt_co, - _ArrayLikeInt_co, - _ArrayLikeFloat_co, _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, _ArrayLikeObject_co, + _ArrayLikeUInt_co, _DTypeLikeBool, - _DTypeLikeUInt, - _DTypeLikeInt, - _DTypeLikeFloat, _DTypeLikeComplex, _DTypeLikeComplex_co, + _DTypeLikeFloat, + _DTypeLikeInt, _DTypeLikeObject, + _DTypeLikeUInt, ) -_ArrayType = TypeVar( - "_ArrayType", - bound=NDArray[np.bool | number[Any]], -) - -_OptimizeKind = None | bool | Literal["greedy", "optimal"] | Sequence[Any] -_CastingSafe = Literal["no", "equiv", "safe", "same_kind"] -_CastingUnsafe = Literal["unsafe"] +__all__ = ["einsum", "einsum_path"] -__all__: list[str] +type _OptimizeKind = bool | Literal["greedy", "optimal"] | Sequence[Any] | None +type _CastingSafe = Literal["no", "equiv", "safe", "same_kind"] +type _CastingUnsafe = Literal["unsafe"] # TODO: Properly handle the `casting`-based combinatorics # TODO: We need to evaluate the content `__subscripts` in order @@ -42,55 +37,55 @@ def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeBool_co, - out: None = ..., - dtype: None | _DTypeLikeBool = ..., + out: None = None, + dtype: _DTypeLikeBool | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeUInt_co, - out: None = ..., - dtype: None | _DTypeLikeUInt = ..., + out: None = None, + dtype: _DTypeLikeUInt | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeInt_co, - out: None = ..., - dtype: None | _DTypeLikeInt = ..., + out: None = None, + dtype: _DTypeLikeInt | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeFloat_co, - out: None = ..., - dtype: None | _DTypeLikeFloat = ..., + out: None = None, + dtype: _DTypeLikeFloat | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeComplex_co, - out: None = ..., - dtype: None | _DTypeLikeComplex = ..., + out: None = None, + dtype: _DTypeLikeComplex | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( @@ -98,44 +93,44 @@ def einsum( /, *operands: Any, casting: _CastingUnsafe, - dtype: None | _DTypeLikeComplex_co = ..., - out: None = ..., + dtype: _DTypeLikeComplex_co | None = ..., + out: None = None, order: _OrderKACF = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload -def einsum( +def einsum[OutT: NDArray[np.bool | np.number]]( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeComplex_co, - out: _ArrayType, - dtype: None | _DTypeLikeComplex_co = ..., + out: OutT, + dtype: _DTypeLikeComplex_co | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., -) -> _ArrayType: ... + optimize: _OptimizeKind = False, +) -> OutT: ... @overload -def einsum( +def einsum[OutT: NDArray[np.bool | np.number]]( subscripts: str | _ArrayLikeInt_co, /, *operands: Any, - out: _ArrayType, + out: OutT, casting: _CastingUnsafe, - dtype: None | _DTypeLikeComplex_co = ..., + dtype: _DTypeLikeComplex_co | None = ..., order: _OrderKACF = ..., - optimize: _OptimizeKind = ..., -) -> _ArrayType: ... + optimize: _OptimizeKind = False, +) -> OutT: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeObject_co, - out: None = ..., - dtype: None | _DTypeLikeObject = ..., + out: None = None, + dtype: _DTypeLikeObject | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( @@ -143,33 +138,33 @@ def einsum( /, *operands: Any, casting: _CastingUnsafe, - dtype: None | _DTypeLikeObject = ..., - out: None = ..., + dtype: _DTypeLikeObject | None = ..., + out: None = None, order: _OrderKACF = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload -def einsum( +def einsum[OutT: NDArray[np.bool | np.number]]( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeObject_co, - out: _ArrayType, - dtype: None | _DTypeLikeObject = ..., + out: OutT, + dtype: _DTypeLikeObject | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., -) -> _ArrayType: ... + optimize: _OptimizeKind = False, +) -> OutT: ... @overload -def einsum( +def einsum[OutT: NDArray[np.bool | np.number]]( subscripts: str | _ArrayLikeInt_co, /, *operands: Any, - out: _ArrayType, + out: OutT, casting: _CastingUnsafe, - dtype: None | _DTypeLikeObject = ..., + dtype: _DTypeLikeObject | None = ..., order: _OrderKACF = ..., - optimize: _OptimizeKind = ..., -) -> _ArrayType: ... + optimize: _OptimizeKind = False, +) -> OutT: ... # NOTE: `einsum_call` is a hidden kwarg unavailable for public use. # It is therefore excluded from the signatures below. @@ -179,5 +174,6 @@ def einsum_path( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeComplex_co | _DTypeLikeObject, - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = "greedy", + einsum_call: Literal[False] = False, ) -> tuple[list[Any], str]: ... diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 02bc025b5e40..e5f4ec0e77f5 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -2,18 +2,15 @@ """ import functools +import math import types -import warnings import numpy as np -from .._utils import set_module -from . import multiarray as mu -from . import overrides -from . import umath as um -from . import numerictypes as nt -from .multiarray import asarray, array, asanyarray, concatenate +from numpy._utils import set_module + +from . import _methods, multiarray as mu, numerictypes as nt, overrides, umath as um from ._multiarray_umath import _array_converter -from . import _methods +from .multiarray import asanyarray, asarray, concatenate _dt_ = nt.sctype2char @@ -21,8 +18,8 @@ __all__ = [ 'all', 'amax', 'amin', 'any', 'argmax', 'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip', - 'compress', 'cumprod', 'cumsum', 'diagonal', 'mean', - 'max', 'min', 'matrix_transpose', + 'compress', 'cumprod', 'cumsum', 'cumulative_prod', 'cumulative_sum', + 'diagonal', 'mean', 'max', 'min', 'matrix_transpose', 'ndim', 'nonzero', 'partition', 'prod', 'ptp', 'put', 'ravel', 'repeat', 'reshape', 'resize', 'round', 'searchsorted', 'shape', 'size', 'sort', 'squeeze', @@ -134,9 +131,6 @@ def take(a, indices, axis=None, out=None, mode='raise'): The source array. indices : array_like (Nj...) The indices of the values to extract. - - .. versionadded:: 1.8.0 - Also allow scalars for indices. axis : int, optional The axis over which to select values. By default, the flattened @@ -169,14 +163,13 @@ def take(a, indices, axis=None, out=None, mode='raise'): Notes ----- - By eliminating the inner loop in the description above, and using `s_` to build simple slice objects, `take` can be expressed in terms of applying fancy indexing to each 1-d slice:: Ni, Nk = a.shape[:axis], a.shape[axis+1:] for ii in ndindex(Ni): - for kk in ndindex(Nj): + for kk in ndindex(Nk): out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices] For this reason, it is equivalent to (but faster than) the following use @@ -186,6 +179,7 @@ def take(a, indices, axis=None, out=None, mode='raise'): Examples -------- + >>> import numpy as np >>> a = [4, 3, 5, 7, 6, 8] >>> indices = [0, 1, 4] >>> np.take(a, indices) @@ -206,13 +200,12 @@ def take(a, indices, axis=None, out=None, mode='raise'): return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode) -def _reshape_dispatcher(a, /, shape=None, *, newshape=None, order=None, - copy=None): +def _reshape_dispatcher(a, /, shape, order=None, *, copy=None): return (a,) @array_function_dispatch(_reshape_dispatcher) -def reshape(a, /, shape=None, *, newshape=None, order='C', copy=None): +def reshape(a, /, shape, order='C', *, copy=None): """ Gives a new shape to an array without changing its data. @@ -225,10 +218,6 @@ def reshape(a, /, shape=None, *, newshape=None, order='C', copy=None): an integer, then the result will be a 1-D array of that length. One shape dimension can be -1. In this case, the value is inferred from the length of the array and remaining dimensions. - newshape : int or tuple of ints - .. deprecated:: 2.1 - Replaced by ``shape`` argument. Retained for backward - compatibility. order : {'C', 'F', 'A'}, optional Read the elements of ``a`` using this index order, and place the elements into the reshaped array using this index order. 'C' @@ -293,6 +282,7 @@ def reshape(a, /, shape=None, *, newshape=None, order='C', copy=None): Examples -------- + >>> import numpy as np >>> a = np.array([[1,2,3], [4,5,6]]) >>> np.reshape(a, 6) array([1, 2, 3, 4, 5, 6]) @@ -304,23 +294,6 @@ def reshape(a, /, shape=None, *, newshape=None, order='C', copy=None): [3, 4], [5, 6]]) """ - if newshape is None and shape is None: - raise TypeError( - "reshape() missing 1 required positional argument: 'shape'") - if newshape is not None: - if shape is not None: - raise TypeError( - "You cannot specify 'newshape' and 'shape' arguments " - "at the same time.") - # Deprecated in NumPy 2.1, 2024-04-18 - warnings.warn( - "`newshape` keyword argument is deprecated, " - "use `shape=...` or pass shape positionally instead. " - "(deprecated in NumPy 2.1)", - DeprecationWarning, - stacklevel=2, - ) - shape = newshape if copy is not None: return _wrapfunc(a, 'reshape', shape, order=order, copy=copy) return _wrapfunc(a, 'reshape', shape, order=order) @@ -339,10 +312,9 @@ def choose(a, choices, out=None, mode='raise'): First of all, if confused or uncertain, definitely look at the Examples - in its full generality, this function is less simple than it might - seem from the following code description (below ndi = - `numpy.lib.index_tricks`): + seem from the following code description:: - ``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``. + np.choose(a,c) == np.array([c[a[I]][I] for I in np.ndindex(a.shape)]) But this omits some subtleties. Here is a fully general summary: @@ -416,6 +388,7 @@ def choose(a, choices, out=None, mode='raise'): Examples -------- + >>> import numpy as np >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], ... [20, 21, 22, 23], [30, 31, 32, 33]] >>> np.choose([2, 3, 1, 0], choices @@ -490,6 +463,7 @@ def repeat(a, repeats, axis=None): Examples -------- + >>> import numpy as np >>> np.repeat(3, 4) array([3, 3, 3, 3]) >>> x = np.array([[1,2],[3,4]]) @@ -551,6 +525,7 @@ def put(a, ind, v, mode='raise'): Examples -------- + >>> import numpy as np >>> a = np.arange(5) >>> np.put(a, [0, 2], [-44, -55]) >>> a @@ -565,8 +540,7 @@ def put(a, ind, v, mode='raise'): try: put = a.put except AttributeError as e: - raise TypeError("argument 1 must be numpy.ndarray, " - "not {name}".format(name=type(a).__name__)) from e + raise TypeError(f"argument 1 must be numpy.ndarray, not {type(a)}") from e return put(ind, v, mode=mode) @@ -599,6 +573,7 @@ def swapaxes(a, axis1, axis2): Examples -------- + >>> import numpy as np >>> x = np.array([[1,2,3]]) >>> np.swapaxes(x,0,1) array([[1], @@ -647,10 +622,11 @@ def transpose(a, axes=None): Input array. axes : tuple or list of ints, optional If specified, it must be a tuple or list which contains a permutation - of [0,1,...,N-1] where N is the number of axes of `a`. The `i`'th axis - of the returned array will correspond to the axis numbered ``axes[i]`` - of the input. If not specified, defaults to ``range(a.ndim)[::-1]``, - which reverses the order of the axes. + of [0, 1, ..., N-1] where N is the number of axes of `a`. Negative + indices can also be used to specify axes. The i-th axis of the returned + array will correspond to the axis numbered ``axes[i]`` of the input. + If not specified, defaults to ``range(a.ndim)[::-1]``, which reverses + the order of the axes. Returns ------- @@ -670,6 +646,7 @@ def transpose(a, axes=None): Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> a array([[1, 2], @@ -692,6 +669,10 @@ def transpose(a, axes=None): >>> np.transpose(a).shape (5, 4, 3, 2) + >>> a = np.arange(3*4*5).reshape((3, 4, 5)) + >>> np.transpose(a, (-1, 0, -2)).shape + (5, 3, 4) + """ return _wrapfunc(a, 'transpose', axes) @@ -724,6 +705,7 @@ def matrix_transpose(x, /): Examples -------- + >>> import numpy as np >>> np.matrix_transpose([[1, 2], [3, 4]]) array([[1, 3], [2, 4]]) @@ -760,8 +742,6 @@ def partition(a, kth, axis=-1, kind='introselect', order=None): partitions on the either side of the k-th element in the output array is undefined. - .. versionadded:: 1.8.0 - Parameters ---------- a : array_like @@ -774,8 +754,6 @@ def partition(a, kth, axis=-1, kind='introselect', order=None): provided with a sequence of k-th it will partition all elements indexed by k-th of them into their sorted position at once. - .. deprecated:: 1.22.0 - Passing booleans as index is deprecated. axis : int or None, optional Axis along which to sort. If None, the array is flattened before sorting. The default is -1, which sorts along the last axis. @@ -823,8 +801,11 @@ def partition(a, kth, axis=-1, kind='introselect', order=None): the real parts except when they are equal, in which case the order is determined by the imaginary parts. + The sort order of ``np.nan`` is bigger than ``np.inf``. + Examples -------- + >>> import numpy as np >>> a = np.array([7, 1, 7, 7, 1, 5, 7, 2, 3, 2, 6, 2, 3, 0]) >>> p = np.partition(a, 4) >>> p @@ -872,8 +853,6 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None): indices of the same shape as `a` that index data along the given axis in partitioned order. - .. versionadded:: 1.8.0 - Parameters ---------- a : array_like @@ -886,8 +865,6 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None): sequence of k-th it will partition all of them into their sorted position at once. - .. deprecated:: 1.22.0 - Passing booleans as index is deprecated. axis : int or None, optional Axis along which to sort. The default is -1 (the last axis). If None, the flattened array is used. @@ -918,12 +895,20 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None): Notes ----- - See `partition` for notes on the different selection algorithms. + The returned indices are not guaranteed to be sorted according to + the values. Furthermore, the default selection algorithm ``introselect`` + is unstable, and hence the returned indices are not guaranteed + to be the earliest/latest occurrence of the element. + + `argpartition` works for real/complex inputs with nan values, + see `partition` for notes on the enhanced sort order and + different selection algorithms. Examples -------- One dimensional array: + >>> import numpy as np >>> x = np.array([3, 4, 2, 1]) >>> x[np.argpartition(x, 3)] array([2, 1, 3, 4]) # may vary @@ -968,10 +953,6 @@ def sort(a, axis=-1, kind=None, order=None, *, stable=None): and 'mergesort' use timsort or radix sort under the covers and, in general, the actual implementation will vary with data type. The 'mergesort' option is retained for backwards compatibility. - - .. versionchanged:: 1.15.0. - The 'stable' option was added. - order : str or list of str, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. A single field can @@ -1042,8 +1023,6 @@ def sort(a, axis=-1, kind=None, order=None, *, stable=None): placements are sorted according to the non-nan part if it exists. Non-nan values are sorted as before. - .. versionadded:: 1.12.0 - quicksort has been changed to: `introsort `_. When sorting does not make enough progress it switches to @@ -1060,8 +1039,6 @@ def sort(a, axis=-1, kind=None, order=None, *, stable=None): ability to select the implementation and it is hardwired for the different data types. - .. versionadded:: 1.17.0 - Timsort is added for better performance on already or nearly sorted data. On random data timsort is almost identical to mergesort. It is now used for stable sort while quicksort is still the @@ -1071,12 +1048,11 @@ def sort(a, axis=-1, kind=None, order=None, *, stable=None): 'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an O(n) sort instead of O(n log n). - .. versionchanged:: 1.18.0 - NaT now sorts to the end of arrays for consistency with NaN. Examples -------- + >>> import numpy as np >>> a = np.array([[1,4],[3,1]]) >>> np.sort(a) # sort along the last axis array([[1, 4], @@ -1142,9 +1118,6 @@ def argsort(a, axis=-1, kind=None, order=None, *, stable=None): and 'mergesort' use timsort under the covers and, in general, the actual implementation will vary with data type. The 'mergesort' option is retained for backwards compatibility. - - .. versionchanged:: 1.15.0. - The 'stable' option was added. order : str or list of str, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. A single field can @@ -1187,6 +1160,7 @@ def argsort(a, axis=-1, kind=None, order=None, *, stable=None): -------- One dimensional array: + >>> import numpy as np >>> x = np.array([3, 1, 2]) >>> np.argsort(x) array([1, 2, 0]) @@ -1214,7 +1188,7 @@ def argsort(a, axis=-1, kind=None, order=None, *, stable=None): array([[0, 3], [2, 2]]) - Indices of the sorted elements of a N-dimensional array: + Indices of the sorted elements of an N-dimensional array: >>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape) >>> ind @@ -1289,6 +1263,7 @@ def argmax(a, axis=None, out=None, *, keepdims=np._NoValue): Examples -------- + >>> import numpy as np >>> a = np.arange(6).reshape(2,3) + 10 >>> a array([[10, 11, 12], @@ -1300,8 +1275,10 @@ def argmax(a, axis=None, out=None, *, keepdims=np._NoValue): >>> np.argmax(a, axis=1) array([2, 2]) - Indexes of the maximal elements of a N-dimensional array: + Indexes of the maximal elements of an N-dimensional array: + >>> a.flat[np.argmax(a)] + 15 >>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape) >>> ind (1, 2) @@ -1386,6 +1363,7 @@ def argmin(a, axis=None, out=None, *, keepdims=np._NoValue): Examples -------- + >>> import numpy as np >>> a = np.arange(6).reshape(2,3) + 10 >>> a array([[10, 11, 12], @@ -1397,8 +1375,10 @@ def argmin(a, axis=None, out=None, *, keepdims=np._NoValue): >>> np.argmin(a, axis=1) array([0, 0]) - Indices of the minimum elements of a N-dimensional array: + Indices of the minimum elements of an N-dimensional array: + >>> a.flat[np.argmin(a)] + 10 >>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape) >>> ind (0, 0) @@ -1472,8 +1452,6 @@ def searchsorted(a, v, side='left', sorter=None): Optional array of integer indices that sort array a into ascending order. They are typically the result of argsort. - .. versionadded:: 1.7.0 - Returns ------- indices : int or array of ints @@ -1499,6 +1477,7 @@ def searchsorted(a, v, side='left', sorter=None): Examples -------- + >>> import numpy as np >>> np.searchsorted([11,12,13,14,15], 13) 2 >>> np.searchsorted([11,12,13,14,15], 13, side='right') @@ -1506,6 +1485,18 @@ def searchsorted(a, v, side='left', sorter=None): >>> np.searchsorted([11,12,13,14,15], [-10, 20, 12, 13]) array([0, 5, 1, 2]) + When `sorter` is used, the returned indices refer to the sorted + array of `a` and not `a` itself: + + >>> a = np.array([40, 10, 20, 30]) + >>> sorter = np.argsort(a) + >>> sorter + array([1, 2, 3, 0]) # Indices that would sort the array 'a' + >>> result = np.searchsorted(a, 25, sorter=sorter) + >>> result + 2 + >>> a[sorter[result]] + 30 # The element at index 2 of the sorted array is 30. """ return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter) @@ -1562,7 +1553,8 @@ def resize(a, new_shape): Examples -------- - >>> a=np.array([[0,1],[2,3]]) + >>> import numpy as np + >>> a = np.array([[0,1],[2,3]]) >>> np.resize(a,(2,3)) array([[0, 1, 2], [3, 0, 1]]) @@ -1590,7 +1582,8 @@ def resize(a, new_shape): # First case must zero fill. The second would have repeats == 0. return np.zeros_like(a, shape=new_shape) - repeats = -(-new_size // a.size) # ceil division + # ceiling division without negating new_size + repeats = (new_size + a.size - 1) // a.size a = concatenate((a,) * repeats)[:new_size] return reshape(a, new_shape) @@ -1610,8 +1603,6 @@ def squeeze(a, axis=None): a : array_like Input data. axis : None or int or tuple of ints, optional - .. versionadded:: 1.7.0 - Selects a subset of the entries of length one in the shape. If an axis is selected with shape entry greater than one, an error is raised. @@ -1636,6 +1627,7 @@ def squeeze(a, axis=None): Examples -------- + >>> import numpy as np >>> x = np.array([[[0], [1], [2]]]) >>> x.shape (1, 3, 1) @@ -1749,6 +1741,7 @@ def diagonal(a, offset=0, axis1=0, axis2=1): Examples -------- + >>> import numpy as np >>> a = np.arange(4).reshape(2,2) >>> a array([[0, 1], @@ -1856,6 +1849,7 @@ def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None): Examples -------- + >>> import numpy as np >>> np.trace(np.eye(3)) 3.0 >>> a = np.arange(8).reshape((2,2,2)) @@ -1947,6 +1941,7 @@ def ravel(a, order='C'): -------- It is equivalent to ``reshape(-1, order=order)``. + >>> import numpy as np >>> x = np.array([[1, 2, 3], [4, 5, 6]]) >>> np.ravel(x) array([1, 2, 3, 4, 5, 6]) @@ -2008,15 +2003,6 @@ def nonzero(a): To group the indices by element, rather than dimension, use `argwhere`, which returns a row for each non-zero element. - .. note:: - - When called on a zero-d array or scalar, ``nonzero(a)`` is treated - as ``nonzero(atleast_1d(a))``. - - .. deprecated:: 1.17.0 - - Use `atleast_1d` explicitly if this behavior is deliberate. - Parameters ---------- a : array_like @@ -2040,11 +2026,12 @@ def nonzero(a): Notes ----- While the nonzero values can be obtained with ``a[nonzero(a)]``, it is - recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which + recommended to use ``x[x.astype(np.bool)]`` or ``x[x != 0]`` instead, which will correctly handle 0-d arrays. Examples -------- + >>> import numpy as np >>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]]) >>> x array([[3, 0, 0], @@ -2118,6 +2105,7 @@ def shape(a): Examples -------- + >>> import numpy as np >>> np.shape(np.eye(3)) (3, 3) >>> np.shape([[1, 3]]) @@ -2185,6 +2173,7 @@ def compress(condition, a, axis=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4], [5, 6]]) >>> a array([[1, 2], @@ -2210,12 +2199,14 @@ def compress(condition, a, axis=None, out=None): return _wrapfunc(a, 'compress', condition, axis=axis, out=out) -def _clip_dispatcher(a, a_min, a_max, out=None, **kwargs): - return (a, a_min, a_max) +def _clip_dispatcher(a, a_min=None, a_max=None, out=None, *, min=None, + max=None, **kwargs): + return (a, a_min, a_max, out, min, max) @array_function_dispatch(_clip_dispatcher) -def clip(a, a_min, a_max, out=None, **kwargs): +def clip(a, a_min=np._NoValue, a_max=np._NoValue, out=None, *, + min=np._NoValue, max=np._NoValue, **kwargs): """ Clip (limit) the values in an array. @@ -2234,18 +2225,23 @@ def clip(a, a_min, a_max, out=None, **kwargs): Array containing elements to clip. a_min, a_max : array_like or None Minimum and maximum value. If ``None``, clipping is not performed on - the corresponding edge. Only one of `a_min` and `a_max` may be - ``None``. Both are broadcast against `a`. + the corresponding edge. If both ``a_min`` and ``a_max`` are ``None``, + the elements of the returned array stay the same. Both are broadcasted + against ``a``. out : ndarray, optional The results will be placed in this array. It may be the input array for in-place clipping. `out` must be of the right shape to hold the output. Its type is preserved. + min, max : array_like or None + Array API compatible alternatives for ``a_min`` and ``a_max`` + arguments. Either ``a_min`` and ``a_max`` or ``min`` and ``max`` + can be passed at the same time. Default: ``None``. + + .. versionadded:: 2.1.0 **kwargs For other keyword-only arguments, see the :ref:`ufunc docs `. - .. versionadded:: 1.17.0 - Returns ------- clipped_array : ndarray @@ -2265,6 +2261,7 @@ def clip(a, a_min, a_max, out=None, **kwargs): Examples -------- + >>> import numpy as np >>> a = np.arange(10) >>> a array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) @@ -2283,6 +2280,19 @@ def clip(a, a_min, a_max, out=None, **kwargs): array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8]) """ + if a_min is np._NoValue and a_max is np._NoValue: + a_min = None if min is np._NoValue else min + a_max = None if max is np._NoValue else max + elif a_min is np._NoValue: + raise TypeError("clip() missing 1 required positional " + "argument: 'a_min'") + elif a_max is np._NoValue: + raise TypeError("clip() missing 1 required positional " + "argument: 'a_max'") + elif min is not np._NoValue or max is not np._NoValue: + raise ValueError("Passing `min` or `max` keyword argument when " + "`a_min` and `a_max` are provided is forbidden.") + return _wrapfunc(a, 'clip', a_min, a_max, out=out, **kwargs) @@ -2304,11 +2314,8 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, axis : None or int or tuple of ints, optional Axis or axes along which a sum is performed. The default, axis=None, will sum all of the elements of the input array. If - axis is negative it counts from the last to the first axis. - - .. versionadded:: 1.7.0 - - If axis is a tuple of ints, a sum is performed on all of the axes + axis is negative it counts from the last to the first axis. If + axis is a tuple of ints, a sum is performed on all of the axes specified in the tuple instead of a single axis or all the axes as before. dtype : dtype, optional @@ -2334,14 +2341,9 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, exceptions will be raised. initial : scalar, optional Starting value for the sum. See `~numpy.ufunc.reduce` for details. - - .. versionadded:: 1.15.0 - where : array_like of bool, optional Elements to include in the sum. See `~numpy.ufunc.reduce` for details. - .. versionadded:: 1.17.0 - Returns ------- sum_along_axis : ndarray @@ -2355,6 +2357,7 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, ndarray.sum : Equivalent method. add: ``numpy.add.reduce`` equivalent function. cumsum : Cumulative sum of array elements. + trapezoid : Integration of array values using composite trapezoidal rule. mean, average @@ -2382,15 +2385,16 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, more precise approach to summation. Especially when summing a large number of lower precision floating point numbers, such as ``float32``, numerical errors can become significant. - In such cases it can be advisable to use `dtype="float64"` to use a higher + In such cases it can be advisable to use `dtype=np.float64` to use a higher precision for the output. Examples -------- + >>> import numpy as np >>> np.sum([0.5, 1.5]) 2.0 >>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32) - 1 + np.int32(1) >>> np.sum([[0, 1], [0, 5]]) 6 >>> np.sum([[0, 1], [0, 5]], axis=0) @@ -2403,7 +2407,7 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, If the accumulator is too small, overflow occurs: >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8) - -128 + np.int8(-128) You can also start the sum with a value other than zero: @@ -2412,19 +2416,12 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, """ if isinstance(a, _gentype): # 2018-02-25, 1.15.0 - warnings.warn( - "Calling np.sum(generator) is deprecated, and in the future will " - "give a different result. Use np.sum(np.fromiter(generator)) or " + raise TypeError( + "Calling np.sum(generator) is deprecated." + "Use np.sum(np.fromiter(generator)) or " "the python sum builtin instead.", - DeprecationWarning, stacklevel=2 ) - res = _sum_(a) - if out is not None: - out[...] = res - return out - return res - return _wrapreduction( a, np.add, 'sum', axis, dtype, out, keepdims=keepdims, initial=initial, where=where @@ -2451,11 +2448,8 @@ def any(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue): Axis or axes along which a logical OR reduction is performed. The default (``axis=None``) is to perform a logical OR over all the dimensions of the input array. `axis` may be negative, in - which case it counts from the last to the first axis. - - .. versionadded:: 1.7.0 - - If this is a tuple of ints, a reduction is performed on multiple + which case it counts from the last to the first axis. If this + is a tuple of ints, a reduction is performed on multiple axes, instead of a single axis or all the axes as before. out : ndarray, optional Alternate output array in which to place the result. It must have @@ -2505,6 +2499,7 @@ def any(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue): Examples -------- + >>> import numpy as np >>> np.any([[True, False], [True, True]]) True @@ -2563,11 +2558,8 @@ def all(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue): Axis or axes along which a logical AND reduction is performed. The default (``axis=None``) is to perform a logical AND over all the dimensions of the input array. `axis` may be negative, in - which case it counts from the last to the first axis. - - .. versionadded:: 1.7.0 - - If this is a tuple of ints, a reduction is performed on multiple + which case it counts from the last to the first axis. If this + is a tuple of ints, a reduction is performed on multiple axes, instead of a single axis or all the axes as before. out : ndarray, optional Alternate output array in which to place the result. @@ -2617,6 +2609,7 @@ def all(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue): Examples -------- + >>> import numpy as np >>> np.all([[True,False],[True,True]]) False @@ -2642,6 +2635,202 @@ def all(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue): keepdims=keepdims, where=where) +def _cumulative_func(x, func, axis, dtype, out, include_initial): + x = np.atleast_1d(x) + x_ndim = x.ndim + if axis is None: + if x_ndim >= 2: + raise ValueError("For arrays which have more than one dimension " + "``axis`` argument is required.") + axis = 0 + + if out is not None and include_initial: + item = [slice(None)] * x_ndim + item[axis] = slice(1, None) + func.accumulate(x, axis=axis, dtype=dtype, out=out[tuple(item)]) + item[axis] = 0 + out[tuple(item)] = func.identity + return out + + res = func.accumulate(x, axis=axis, dtype=dtype, out=out) + if include_initial: + initial_shape = list(x.shape) + initial_shape[axis] = 1 + res = np.concat( + [np.full_like(res, func.identity, shape=initial_shape), res], + axis=axis, + ) + + return res + + +def _cumulative_prod_dispatcher(x, /, *, axis=None, dtype=None, out=None, + include_initial=None): + return (x, out) + + +@array_function_dispatch(_cumulative_prod_dispatcher) +def cumulative_prod(x, /, *, axis=None, dtype=None, out=None, + include_initial=False): + """ + Return the cumulative product of elements along a given axis. + + This function is an Array API compatible alternative to `numpy.cumprod`. + + Parameters + ---------- + x : array_like + Input array. + axis : int, optional + Axis along which the cumulative product is computed. The default + (None) is only allowed for one-dimensional arrays. For arrays + with more than one dimension ``axis`` is required. + dtype : dtype, optional + Type of the returned array, as well as of the accumulator in which + the elements are multiplied. If ``dtype`` is not specified, it + defaults to the dtype of ``x``, unless ``x`` has an integer dtype + with a precision less than that of the default platform integer. + In that case, the default platform integer is used instead. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type of the resulting values will be cast if necessary. + See :ref:`ufuncs-output-type` for more details. + include_initial : bool, optional + Boolean indicating whether to include the initial value (ones) as + the first value in the output. With ``include_initial=True`` + the shape of the output is different than the shape of the input. + Default: ``False``. + + Returns + ------- + cumulative_prod_along_axis : ndarray + A new array holding the result is returned unless ``out`` is + specified, in which case a reference to ``out`` is returned. The + result has the same shape as ``x`` if ``include_initial=False``. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + Examples + -------- + >>> a = np.array([1, 2, 3]) + >>> np.cumulative_prod(a) # intermediate results 1, 1*2 + ... # total product 1*2*3 = 6 + array([1, 2, 6]) + >>> a = np.array([1, 2, 3, 4, 5, 6]) + >>> np.cumulative_prod(a, dtype=np.float64) # specify type of output + array([ 1., 2., 6., 24., 120., 720.]) + + The cumulative product for each column (i.e., over the rows) of ``b``: + + >>> b = np.array([[1, 2, 3], [4, 5, 6]]) + >>> np.cumulative_prod(b, axis=0) + array([[ 1, 2, 3], + [ 4, 10, 18]]) + + The cumulative product for each row (i.e. over the columns) of ``b``: + + >>> np.cumulative_prod(b, axis=1) + array([[ 1, 2, 6], + [ 4, 20, 120]]) + + """ + return _cumulative_func(x, um.multiply, axis, dtype, out, include_initial) + + +def _cumulative_sum_dispatcher(x, /, *, axis=None, dtype=None, out=None, + include_initial=None): + return (x, out) + + +@array_function_dispatch(_cumulative_sum_dispatcher) +def cumulative_sum(x, /, *, axis=None, dtype=None, out=None, + include_initial=False): + """ + Return the cumulative sum of the elements along a given axis. + + This function is an Array API compatible alternative to `numpy.cumsum`. + + Parameters + ---------- + x : array_like + Input array. + axis : int, optional + Axis along which the cumulative sum is computed. The default + (None) is only allowed for one-dimensional arrays. For arrays + with more than one dimension ``axis`` is required. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If ``dtype`` is not specified, it defaults + to the dtype of ``x``, unless ``x`` has an integer dtype with + a precision less than that of the default platform integer. + In that case, the default platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. See :ref:`ufuncs-output-type` + for more details. + include_initial : bool, optional + Boolean indicating whether to include the initial value (zeros) as + the first value in the output. With ``include_initial=True`` + the shape of the output is different than the shape of the input. + Default: ``False``. + + Returns + ------- + cumulative_sum_along_axis : ndarray + A new array holding the result is returned unless ``out`` is + specified, in which case a reference to ``out`` is returned. The + result has the same shape as ``x`` if ``include_initial=False``. + + See Also + -------- + sum : Sum array elements. + trapezoid : Integration of array values using composite trapezoidal rule. + diff : Calculate the n-th discrete difference along given axis. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + ``cumulative_sum(a)[-1]`` may not be equal to ``sum(a)`` for + floating-point values since ``sum`` may use a pairwise summation routine, + reducing the roundoff-error. See `sum` for more information. + + Examples + -------- + >>> a = np.array([1, 2, 3, 4, 5, 6]) + >>> a + array([1, 2, 3, 4, 5, 6]) + >>> np.cumulative_sum(a) + array([ 1, 3, 6, 10, 15, 21]) + >>> np.cumulative_sum(a, dtype=np.float64) # specifies type of output value(s) + array([ 1., 3., 6., 10., 15., 21.]) + + >>> b = np.array([[1, 2, 3], [4, 5, 6]]) + >>> np.cumulative_sum(b,axis=0) # sum over rows for each of the 3 columns + array([[1, 2, 3], + [5, 7, 9]]) + >>> np.cumulative_sum(b,axis=1) # sum over columns for each of the 2 rows + array([[ 1, 3, 6], + [ 4, 9, 15]]) + + ``cumulative_sum(c)[-1]`` may not be equal to ``sum(c)`` + + >>> c = np.array([1, 2e-9, 3e-9] * 1000000) + >>> np.cumulative_sum(c)[-1] + 1000000.0050045159 + >>> c.sum() + 1000000.0050000029 + + """ + return _cumulative_func(x, um.add, axis, dtype, out, include_initial) + + def _cumsum_dispatcher(a, axis=None, dtype=None, out=None): return (a, out) @@ -2680,7 +2869,9 @@ def cumsum(a, axis=None, dtype=None, out=None): See Also -------- + cumulative_sum : Array API compatible alternative for ``cumsum``. sum : Sum array elements. + trapezoid : Integration of array values using composite trapezoidal rule. diff : Calculate the n-th discrete difference along given axis. Notes @@ -2694,13 +2885,14 @@ def cumsum(a, axis=None, dtype=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.array([[1,2,3], [4,5,6]]) >>> a array([[1, 2, 3], [4, 5, 6]]) >>> np.cumsum(a) array([ 1, 3, 6, 10, 15, 21]) - >>> np.cumsum(a, dtype=float) # specifies type of output value(s) + >>> np.cumsum(a, dtype=np.float64) # specifies type of output value(s) array([ 1., 3., 6., 10., 15., 21.]) >>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns @@ -2749,9 +2941,6 @@ def ptp(a, axis=None, out=None, keepdims=np._NoValue): Axis along which to find the peaks. By default, flatten the array. `axis` may be negative, in which case it counts from the last to the first axis. - - .. versionadded:: 1.15.0 - If this is a tuple of ints, a reduction is performed on multiple axes, instead of a single axis or all the axes as before. out : array_like @@ -2778,6 +2967,7 @@ def ptp(a, axis=None, out=None, keepdims=np._NoValue): Examples -------- + >>> import numpy as np >>> x = np.array([[4, 9, 2, 10], ... [6, 9, 7, 12]]) @@ -2831,12 +3021,9 @@ def max(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, Input data. axis : None or int or tuple of ints, optional Axis or axes along which to operate. By default, flattened input is - used. - - .. versionadded:: 1.7.0 + used. If this is a tuple of ints, the maximum is selected over + multiple axes, instead of a single axis or all the axes as before. - If this is a tuple of ints, the maximum is selected over multiple axes, - instead of a single axis or all the axes as before. out : ndarray, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. @@ -2857,14 +3044,10 @@ def max(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, The minimum value of an output element. Must be present to allow computation on empty slice. See `~numpy.ufunc.reduce` for details. - .. versionadded:: 1.15.0 - where : array_like of bool, optional Elements to compare for the maximum. See `~numpy.ufunc.reduce` for details. - .. versionadded:: 1.17.0 - Returns ------- max : ndarray or scalar @@ -2900,6 +3083,7 @@ def max(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, Examples -------- + >>> import numpy as np >>> a = np.arange(4).reshape((2,2)) >>> a array([[0, 1], @@ -2912,7 +3096,7 @@ def max(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, array([1, 3]) >>> np.max(a, where=[False, True], initial=-1, axis=0) array([-1, 3]) - >>> b = np.arange(5, dtype=float) + >>> b = np.arange(5, dtype=np.float64) >>> b[2] = np.nan >>> np.max(b) np.float64(nan) @@ -2976,8 +3160,6 @@ def min(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, Axis or axes along which to operate. By default, flattened input is used. - .. versionadded:: 1.7.0 - If this is a tuple of ints, the minimum is selected over multiple axes, instead of a single axis or all the axes as before. out : ndarray, optional @@ -3000,14 +3182,10 @@ def min(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, The maximum value of an output element. Must be present to allow computation on empty slice. See `~numpy.ufunc.reduce` for details. - .. versionadded:: 1.15.0 - where : array_like of bool, optional Elements to compare for the minimum. See `~numpy.ufunc.reduce` for details. - .. versionadded:: 1.17.0 - Returns ------- min : ndarray or scalar @@ -3043,6 +3221,7 @@ def min(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, Examples -------- + >>> import numpy as np >>> a = np.arange(4).reshape((2,2)) >>> a array([[0, 1], @@ -3056,7 +3235,7 @@ def min(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, >>> np.min(a, where=[False, True], initial=10, axis=0) array([10, 1]) - >>> b = np.arange(5, dtype=float) + >>> b = np.arange(5, dtype=np.float64) >>> b[2] = np.nan >>> np.min(b) np.float64(nan) @@ -3121,8 +3300,6 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, input array. If axis is negative it counts from the last to the first axis. - .. versionadded:: 1.7.0 - If axis is a tuple of ints, a product is performed on all of the axes specified in the tuple instead of a single axis or all the axes as before. @@ -3150,15 +3327,10 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial : scalar, optional The starting value for this product. See `~numpy.ufunc.reduce` for details. - - .. versionadded:: 1.15.0 - where : array_like of bool, optional Elements to include in the product. See `~numpy.ufunc.reduce` for details. - .. versionadded:: 1.17.0 - Returns ------- product_along_axis : ndarray, see `dtype` parameter above. @@ -3188,6 +3360,7 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, -------- By default, calculate the product of all elements: + >>> import numpy as np >>> np.prod([1.,2.]) 2.0 @@ -3267,6 +3440,7 @@ def cumprod(a, axis=None, dtype=None, out=None): See Also -------- + cumulative_prod : Array API compatible alternative for ``cumprod``. :ref:`ufuncs-output-type` Notes @@ -3276,12 +3450,13 @@ def cumprod(a, axis=None, dtype=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.array([1,2,3]) >>> np.cumprod(a) # intermediate results 1, 1*2 ... # total product 1*2*3 = 6 array([1, 2, 6]) >>> a = np.array([[1, 2, 3], [4, 5, 6]]) - >>> np.cumprod(a, dtype=float) # specify type of output + >>> np.cumprod(a, dtype=np.float64) # specify type of output array([ 1., 2., 6., 24., 120., 720.]) The cumulative product for each column (i.e., over the rows) of `a`: @@ -3328,6 +3503,7 @@ def ndim(a): Examples -------- + >>> import numpy as np >>> np.ndim([[1,2,3],[4,5,6]]) 2 >>> np.ndim(np.array([[1,2,3],[4,5,6]])) @@ -3355,10 +3531,13 @@ def size(a, axis=None): ---------- a : array_like Input data. - axis : int, optional - Axis along which the elements are counted. By default, give + axis : None or int or tuple of ints, optional + Axis or axes along which the elements are counted. By default, give the total number of elements. + .. versionchanged:: 2.4 + Extended to accept multiple axes. + Returns ------- element_count : int @@ -3372,13 +3551,16 @@ def size(a, axis=None): Examples -------- + >>> import numpy as np >>> a = np.array([[1,2,3],[4,5,6]]) >>> np.size(a) 6 - >>> np.size(a,1) + >>> np.size(a,axis=1) 3 - >>> np.size(a,0) + >>> np.size(a,axis=0) 2 + >>> np.size(a,axis=(0,1)) + 6 """ if axis is None: @@ -3387,10 +3569,10 @@ def size(a, axis=None): except AttributeError: return asarray(a).size else: - try: - return a.shape[axis] - except AttributeError: - return asarray(a).shape[axis] + _shape = shape(a) + from .numeric import normalize_axis_tuple + axis = normalize_axis_tuple(axis, len(_shape), allow_duplicate=False) + return math.prod(_shape[ax] for ax in axis) def _round_dispatcher(a, decimals=None, out=None): @@ -3476,6 +3658,7 @@ def round(a, decimals=0, out=None): Examples -------- + >>> import numpy as np >>> np.round([0.37, 1.64]) array([0., 2.]) >>> np.round([0.37, 1.64], decimals=1) @@ -3532,8 +3715,6 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, *, Axis or axes along which the means are computed. The default is to compute the mean of the flattened array. - .. versionadded:: 1.7.0 - If this is a tuple of ints, a mean is performed over multiple axes, instead of a single axis or all the axes as before. dtype : data-type, optional @@ -3590,6 +3771,7 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, *, Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> np.mean(a) 2.5 @@ -3604,13 +3786,19 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, *, >>> a[0, :] = 1.0 >>> a[1, :] = 0.1 >>> np.mean(a) - 0.54999924 + np.float32(0.54999924) Computing the mean in float64 is more accurate: >>> np.mean(a, dtype=np.float64) 0.55000000074505806 # may vary + Computing the mean in timedelta64 is available: + + >>> b = np.array([1, 3], dtype="timedelta64[D]") + >>> np.mean(b) + np.timedelta64(2,'D') + Specifying a where argument: >>> a = np.array([[5, 9, 13], [14, 10, 12], [11, 15, 19]]) @@ -3659,9 +3847,6 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, axis : None or int or tuple of ints, optional Axis or axes along which the standard deviation is computed. The default is to compute the standard deviation of the flattened array. - - .. versionadded:: 1.7.0 - If this is a tuple of ints, a standard deviation is performed over multiple axes, instead of a single axis or all the axes as before. dtype : dtype, optional @@ -3699,7 +3884,7 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, The axis for the calculation of the mean should be the same as used in the call to this std function. - .. versionadded:: 1.26.0 + .. versionadded:: 2.0.0 correction : {int, float}, optional Array API compatible name for the ``ddof`` parameter. Only one of them @@ -3748,7 +3933,7 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, \sqrt{\frac{\sum_i{|a_i - \bar{a}|^2 }}{N - 1}} - In statistics, the resulting quantity is sometimed called the "sample + In statistics, the resulting quantity is sometimes called the "sample standard deviation" because if `a` is a random sample from a larger population, this calculation provides the square root of an unbiased estimate of the variance of the population. The use of :math:`N-1` in the @@ -3770,6 +3955,7 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> np.std(a) 1.1180339887498949 # may vary @@ -3784,7 +3970,7 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, >>> a[0, :] = 1.0 >>> a[1, :] = 0.1 >>> np.std(a) - 0.45000005 + np.float32(0.45000005) Computing the standard deviation in float64 is more accurate: @@ -3866,9 +4052,6 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, axis : None or int or tuple of ints, optional Axis or axes along which the variance is computed. The default is to compute the variance of the flattened array. - - .. versionadded:: 1.7.0 - If this is a tuple of ints, a variance is performed over multiple axes, instead of a single axis or all the axes as before. dtype : data-type, optional @@ -3905,7 +4088,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, The axis for the calculation of the mean should be the same as used in the call to this var function. - .. versionadded:: 1.26.0 + .. versionadded:: 2.0.0 correction : {int, float}, optional Array API compatible name for the ``ddof`` parameter. Only one of them @@ -3952,7 +4135,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, \frac{\sum_i{|a_i - \bar{a}|^2}}{N - 1} - In statistics, the resulting quantity is sometimed called the "sample + In statistics, the resulting quantity is sometimes called the "sample variance" because if `a` is a random sample from a larger population, this calculation provides an unbiased estimate of the variance of the population. The use of :math:`N-1` in the denominator is often called @@ -3972,6 +4155,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> np.var(a) 1.25 @@ -3986,7 +4170,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, >>> a[0, :] = 1.0 >>> a[1, :] = 0.1 >>> np.var(a) - 0.20250003 + np.float32(0.20250003) Computing the variance in float64 is more accurate: @@ -4047,4 +4231,3 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) - diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index cde666f6f37d..3e210e757b4e 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -1,1084 +1,1776 @@ +from _typeshed import Incomplete from collections.abc import Sequence -from typing import Any, overload, TypeVar, Literal, SupportsIndex +from typing import ( + Any, + Literal, + Never, + Protocol, + SupportsIndex, + TypedDict, + Unpack, + overload, + type_check_only, +) import numpy as np from numpy import ( - number, - uint64, - int_, - int64, - intp, - float16, - floating, - complexfloating, - object_, - generic, - _OrderKACF, - _OrderACF, + _CastingKind, _ModeKind, + _OrderACF, + _OrderKACF, _PartitionKind, _SortKind, _SortSide, - _CastingKind, + complexfloating, + float16, + floating, + int64, + int_, + intp, + object_, + uint64, ) +from numpy._globals import _NoValueType from numpy._typing import ( - DTypeLike, - _DTypeLike, ArrayLike, - _ArrayLike, + DTypeLike, NDArray, - _ShapeLike, - _Shape, + _AnyShape, + _ArrayLike, _ArrayLikeBool_co, - _ArrayLikeUInt_co, - _ArrayLikeInt_co, - _ArrayLikeFloat_co, _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt, + _ArrayLikeInt_co, _ArrayLikeObject_co, - _IntLike_co, + _ArrayLikeUInt_co, _BoolLike_co, _ComplexLike_co, + _DTypeLike, + _IntLike_co, + _NestedSequence, _NumberLike_co, _ScalarLike_co, + _Shape, + _ShapeLike, + _SupportsArray, ) -_SCT = TypeVar("_SCT", bound=generic) -_SCT_uifcO = TypeVar("_SCT_uifcO", bound=number[Any] | object_) -_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) +__all__ = [ + "all", + "amax", + "amin", + "any", + "argmax", + "argmin", + "argpartition", + "argsort", + "around", + "choose", + "clip", + "compress", + "cumprod", + "cumsum", + "cumulative_prod", + "cumulative_sum", + "diagonal", + "mean", + "max", + "min", + "matrix_transpose", + "ndim", + "nonzero", + "partition", + "prod", + "ptp", + "put", + "ravel", + "repeat", + "reshape", + "resize", + "round", + "searchsorted", + "shape", + "size", + "sort", + "squeeze", + "std", + "sum", + "swapaxes", + "take", + "trace", + "transpose", + "var", +] + +@type_check_only +class _SupportsShape[ShapeT_co: _Shape](Protocol): + # NOTE: it matters that `self` is positional only + @property + def shape(self, /) -> ShapeT_co: ... + +@type_check_only +class _UFuncKwargs(TypedDict, total=False): + where: _ArrayLikeBool_co | None + order: _OrderKACF + subok: bool + signature: str | tuple[str | None, ...] + casting: _CastingKind + +# a "sequence" that isn't a string, bytes, bytearray, or memoryview +type _PyArray[_T] = list[_T] | tuple[_T, ...] +# `int` also covers `bool` +type _PyScalar = complex | bytes | str -__all__: list[str] +type _0D = tuple[()] +type _1D = tuple[int] +type _2D = tuple[int, int] +type _3D = tuple[int, int, int] +type _4D = tuple[int, int, int, int] +type _Array1D[ScalarT: np.generic] = np.ndarray[_1D, np.dtype[ScalarT]] + +### + +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload -def take( - a: _ArrayLike[_SCT], +def take[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], indices: _IntLike_co, - axis: None = ..., - out: None = ..., - mode: _ModeKind = ..., -) -> _SCT: ... + axis: None = None, + out: None = None, + mode: _ModeKind = "raise", +) -> ScalarT: ... @overload def take( a: ArrayLike, indices: _IntLike_co, - axis: None | SupportsIndex = ..., - out: None = ..., - mode: _ModeKind = ..., + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", ) -> Any: ... @overload -def take( - a: _ArrayLike[_SCT], +def take[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], indices: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - out: None = ..., - mode: _ModeKind = ..., -) -> NDArray[_SCT]: ... + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", +) -> NDArray[ScalarT]: ... @overload def take( a: ArrayLike, indices: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - out: None = ..., - mode: _ModeKind = ..., + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", ) -> NDArray[Any]: ... @overload -def take( +def take[ArrayT: np.ndarray]( a: ArrayLike, indices: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - out: _ArrayType = ..., - mode: _ModeKind = ..., -) -> _ArrayType: ... - + axis: SupportsIndex | None, + out: ArrayT, + mode: _ModeKind = "raise", +) -> ArrayT: ... @overload +def take[ArrayT: np.ndarray]( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + *, + out: ArrayT, + mode: _ModeKind = "raise", +) -> ArrayT: ... + +# keep in sync with `ma.core.reshape` +@overload # shape: index +def reshape[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + /, + shape: SupportsIndex, + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> _Array1D[ScalarT]: ... +@overload # shape: ~ShapeT +def reshape[ScalarT: np.generic, ShapeT: _Shape]( + a: _ArrayLike[ScalarT], + /, + shape: ShapeT, + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... +@overload # shape: Sequence[index] +def reshape[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + /, + shape: Sequence[SupportsIndex], + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> NDArray[ScalarT]: ... +@overload # shape: index def reshape( - a: _ArrayLike[_SCT], - newshape: _ShapeLike, - order: _OrderACF = ..., - copy: None | bool = ..., -) -> NDArray[_SCT]: ... -@overload + a: ArrayLike, + /, + shape: SupportsIndex, + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> np.ndarray[_1D]: ... +@overload # shape: ~ShapeT +def reshape[ShapeT: _Shape]( + a: ArrayLike, + /, + shape: ShapeT, + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> np.ndarray[ShapeT]: ... +@overload # shape: Sequence[index] def reshape( a: ArrayLike, - newshape: _ShapeLike, - order: _OrderACF = ..., - copy: None | bool = ..., + /, + shape: Sequence[SupportsIndex], + order: _OrderACF = "C", + *, + copy: bool | None = None, ) -> NDArray[Any]: ... +# keep in sync with `ma.core.choose` @overload def choose( a: _IntLike_co, choices: ArrayLike, - out: None = ..., - mode: _ModeKind = ..., + out: None = None, + mode: _ModeKind = "raise", ) -> Any: ... @overload -def choose( +def choose[ScalarT: np.generic]( a: _ArrayLikeInt_co, - choices: _ArrayLike[_SCT], - out: None = ..., - mode: _ModeKind = ..., -) -> NDArray[_SCT]: ... + choices: _ArrayLike[ScalarT], + out: None = None, + mode: _ModeKind = "raise", +) -> NDArray[ScalarT]: ... @overload def choose( a: _ArrayLikeInt_co, choices: ArrayLike, - out: None = ..., - mode: _ModeKind = ..., + out: None = None, + mode: _ModeKind = "raise", ) -> NDArray[Any]: ... @overload -def choose( +def choose[ArrayT: np.ndarray]( a: _ArrayLikeInt_co, choices: ArrayLike, - out: _ArrayType = ..., - mode: _ModeKind = ..., -) -> _ArrayType: ... + out: ArrayT, + mode: _ModeKind = "raise", +) -> ArrayT: ... +# keep in sync with `ma.core.repeat` +@overload +def repeat[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + repeats: _ArrayLikeInt_co, + axis: None = None, +) -> _Array1D[ScalarT]: ... +@overload +def repeat[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, +) -> NDArray[ScalarT]: ... @overload def repeat( - a: _ArrayLike[_SCT], + a: ArrayLike, repeats: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., -) -> NDArray[_SCT]: ... + axis: None = None, +) -> _Array1D[Any]: ... @overload def repeat( a: ArrayLike, repeats: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex, ) -> NDArray[Any]: ... +# keep in sync with `ma.core.put` def put( a: NDArray[Any], ind: _ArrayLikeInt_co, v: ArrayLike, - mode: _ModeKind = ..., + mode: _ModeKind = "raise", ) -> None: ... +# keep in sync with `ndarray.swapaxes` and `ma.core.swapaxes` @overload -def swapaxes( - a: _ArrayLike[_SCT], - axis1: SupportsIndex, - axis2: SupportsIndex, -) -> NDArray[_SCT]: ... +def swapaxes[ArrayT: np.ndarray](a: ArrayT, axis1: SupportsIndex, axis2: SupportsIndex) -> ArrayT: ... @overload -def swapaxes( - a: ArrayLike, - axis1: SupportsIndex, - axis2: SupportsIndex, -) -> NDArray[Any]: ... +def swapaxes[ScalarT: np.generic](a: _ArrayLike[ScalarT], axis1: SupportsIndex, axis2: SupportsIndex) -> NDArray[ScalarT]: ... +@overload +def swapaxes(a: ArrayLike, axis1: SupportsIndex, axis2: SupportsIndex) -> NDArray[Any]: ... +# keep in sync with `ma.core.transpose` @overload -def transpose( - a: _ArrayLike[_SCT], - axes: None | _ShapeLike = ... -) -> NDArray[_SCT]: ... +def transpose[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axes: _ShapeLike | None = None, +) -> NDArray[ScalarT]: ... @overload def transpose( a: ArrayLike, - axes: None | _ShapeLike = ... + axes: _ShapeLike | None = None, ) -> NDArray[Any]: ... @overload -def matrix_transpose(x: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... +def matrix_transpose[ScalarT: np.generic](x: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload -def matrix_transpose(x: ArrayLike) -> NDArray[Any]: ... +def matrix_transpose(x: ArrayLike, /) -> NDArray[Any]: ... +# +@overload +def partition[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: None = None, +) -> NDArray[ScalarT]: ... @overload def partition( - a: _ArrayLike[_SCT], - kth: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - kind: _PartitionKind = ..., - order: None | str | Sequence[str] = ..., -) -> NDArray[_SCT]: ... + a: _ArrayLike[np.void], + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, +) -> NDArray[np.void]: ... @overload def partition( a: ArrayLike, - kth: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - kind: _PartitionKind = ..., - order: None | str | Sequence[str] = ..., + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, ) -> NDArray[Any]: ... +# keep roughly in sync with `ndarray.argpartition` +@overload # axis: None def argpartition( a: ArrayLike, - kth: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - kind: _PartitionKind = ..., - order: None | str | Sequence[str] = ..., -) -> NDArray[intp]: ... + kth: _ArrayLikeInt, + axis: None, + kind: _PartitionKind = "introselect", + order: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.intp]]: ... +@overload # known shape, axis: index (default) +def argpartition[ShapeT: _Shape]( + a: np.ndarray[ShapeT], + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.intp]]: ... +@overload # 1d array-like, axis: index (default) +def argpartition( + a: Sequence[np.generic | complex], + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.intp]]: ... +@overload # 2d array-like, axis: index (default) +def argpartition( + a: Sequence[Sequence[np.generic | complex]], + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.intp]]: ... +@overload # ?d array-like, axis: index (default) +def argpartition( + a: ArrayLike, + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None, +) -> NDArray[np.intp]: ... +@overload # void, axis: None +def argpartition( + a: _SupportsArray[np.dtype[np.void]], + kth: _ArrayLikeInt, + axis: None, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, +) -> np.ndarray[tuple[int], np.dtype[intp]]: ... +@overload # void, axis: index (default) +def argpartition[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.void]], + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, +) -> np.ndarray[ShapeT, np.dtype[np.intp]]: ... +# @overload -def sort( - a: _ArrayLike[_SCT], - axis: None | SupportsIndex = ..., - kind: None | _SortKind = ..., - order: None | str | Sequence[str] = ..., +def sort[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: SupportsIndex | None = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, *, - stable: None | bool = ..., -) -> NDArray[_SCT]: ... + stable: bool | None = None, +) -> NDArray[ScalarT]: ... @overload def sort( a: ArrayLike, - axis: None | SupportsIndex = ..., - kind: None | _SortKind = ..., - order: None | str | Sequence[str] = ..., + axis: SupportsIndex | None = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, *, - stable: None | bool = ..., + stable: bool | None = None, ) -> NDArray[Any]: ... def argsort( a: ArrayLike, - axis: None | SupportsIndex = ..., - kind: None | _SortKind = ..., - order: None | str | Sequence[str] = ..., + axis: SupportsIndex | None = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, *, - stable: None | bool = ..., + stable: bool | None = None, ) -> NDArray[intp]: ... @overload def argmax( a: ArrayLike, - axis: None = ..., - out: None = ..., + axis: None = None, + out: None = None, *, - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., ) -> intp: ... @overload def argmax( a: ArrayLike, - axis: None | SupportsIndex = ..., - out: None = ..., + axis: SupportsIndex | None = None, + out: None = None, *, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload -def argmax( +def argmax[BoolOrIntArrayT: NDArray[np.integer | np.bool]]( a: ArrayLike, - axis: None | SupportsIndex = ..., - out: _ArrayType = ..., + axis: SupportsIndex | None, + out: BoolOrIntArrayT, *, - keepdims: bool = ..., -) -> _ArrayType: ... + keepdims: bool | _NoValueType = ..., +) -> BoolOrIntArrayT: ... +@overload +def argmax[BoolOrIntArrayT: NDArray[np.integer | np.bool]]( + a: ArrayLike, + axis: SupportsIndex | None = None, + *, + out: BoolOrIntArrayT, + keepdims: bool | _NoValueType = ..., +) -> BoolOrIntArrayT: ... @overload def argmin( a: ArrayLike, - axis: None = ..., - out: None = ..., + axis: None = None, + out: None = None, *, - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., ) -> intp: ... @overload def argmin( a: ArrayLike, - axis: None | SupportsIndex = ..., - out: None = ..., + axis: SupportsIndex | None = None, + out: None = None, *, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload -def argmin( +def argmin[BoolOrIntArrayT: NDArray[np.integer | np.bool]]( a: ArrayLike, - axis: None | SupportsIndex = ..., - out: _ArrayType = ..., + axis: SupportsIndex | None, + out: BoolOrIntArrayT, *, - keepdims: bool = ..., -) -> _ArrayType: ... + keepdims: bool | _NoValueType = ..., +) -> BoolOrIntArrayT: ... +@overload +def argmin[BoolOrIntArrayT: NDArray[np.integer | np.bool]]( + a: ArrayLike, + axis: SupportsIndex | None = None, + *, + out: BoolOrIntArrayT, + keepdims: bool | _NoValueType = ..., +) -> BoolOrIntArrayT: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def searchsorted( a: ArrayLike, v: _ScalarLike_co, - side: _SortSide = ..., - sorter: None | _ArrayLikeInt_co = ..., # 1D int array + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, # 1D int array ) -> intp: ... @overload def searchsorted( a: ArrayLike, v: ArrayLike, - side: _SortSide = ..., - sorter: None | _ArrayLikeInt_co = ..., # 1D int array + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, # 1D int array ) -> NDArray[intp]: ... +# keep in sync with `ma.core.resize` @overload -def resize( - a: _ArrayLike[_SCT], - new_shape: _ShapeLike, -) -> NDArray[_SCT]: ... +def resize[ScalarT: np.generic](a: _ArrayLike[ScalarT], new_shape: SupportsIndex | tuple[SupportsIndex]) -> _Array1D[ScalarT]: ... @overload -def resize( - a: ArrayLike, - new_shape: _ShapeLike, -) -> NDArray[Any]: ... +def resize[ScalarT: np.generic, AnyShapeT: (_0D, _1D, _2D, _3D, _4D)]( + a: _ArrayLike[ScalarT], + new_shape: AnyShapeT, +) -> np.ndarray[AnyShapeT, np.dtype[ScalarT]]: ... +@overload +def resize[ScalarT: np.generic](a: _ArrayLike[ScalarT], new_shape: _ShapeLike) -> NDArray[ScalarT]: ... +@overload +def resize(a: ArrayLike, new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[_1D]: ... +@overload +def resize[AnyShapeT: (_0D, _1D, _2D, _3D, _4D)](a: ArrayLike, new_shape: AnyShapeT) -> np.ndarray[AnyShapeT]: ... +@overload +def resize(a: ArrayLike, new_shape: _ShapeLike) -> NDArray[Any]: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload -def squeeze( - a: _SCT, - axis: None | _ShapeLike = ..., -) -> _SCT: ... +def squeeze[ScalarT: np.generic](a: ScalarT, axis: _ShapeLike | None = None) -> ScalarT: ... @overload -def squeeze( - a: _ArrayLike[_SCT], - axis: None | _ShapeLike = ..., -) -> NDArray[_SCT]: ... +def squeeze[ScalarT: np.generic](a: _ArrayLike[ScalarT], axis: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload -def squeeze( - a: ArrayLike, - axis: None | _ShapeLike = ..., -) -> NDArray[Any]: ... +def squeeze(a: ArrayLike, axis: _ShapeLike | None = None) -> NDArray[Any]: ... +# keep in sync with `ma.core.diagonal` @overload -def diagonal( - a: _ArrayLike[_SCT], - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., # >= 2D array -) -> NDArray[_SCT]: ... +def diagonal[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, # >= 2D array +) -> NDArray[ScalarT]: ... @overload def diagonal( a: ArrayLike, - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., # >= 2D array + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, # >= 2D array ) -> NDArray[Any]: ... +# keep in sync with `ma.core.trace` @overload def trace( a: ArrayLike, # >= 2D array - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., - out: None = ..., + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + out: None = None, ) -> Any: ... @overload -def trace( +def trace[ArrayT: np.ndarray]( + a: ArrayLike, # >= 2D array + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike | None, + out: ArrayT, +) -> ArrayT: ... +@overload +def trace[ArrayT: np.ndarray]( a: ArrayLike, # >= 2D array - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., - out: _ArrayType = ..., -) -> _ArrayType: ... + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + *, + out: ArrayT, +) -> ArrayT: ... @overload -def ravel(a: _ArrayLike[_SCT], order: _OrderKACF = ...) -> NDArray[_SCT]: ... +def ravel[ScalarT: np.generic](a: _ArrayLike[ScalarT], order: _OrderKACF = "C") -> _Array1D[ScalarT]: ... +@overload +def ravel(a: bytes | _NestedSequence[bytes], order: _OrderKACF = "C") -> _Array1D[np.bytes_]: ... +@overload +def ravel(a: str | _NestedSequence[str], order: _OrderKACF = "C") -> _Array1D[np.str_]: ... +@overload +def ravel(a: bool | _NestedSequence[bool], order: _OrderKACF = "C") -> _Array1D[np.bool]: ... +@overload +def ravel(a: int | _NestedSequence[int], order: _OrderKACF = "C") -> _Array1D[np.int_ | Any]: ... +@overload +def ravel(a: float | _NestedSequence[float], order: _OrderKACF = "C") -> _Array1D[np.float64 | Any]: ... @overload -def ravel(a: ArrayLike, order: _OrderKACF = ...) -> NDArray[Any]: ... +def ravel(a: complex | _NestedSequence[complex], order: _OrderKACF = "C") -> _Array1D[np.complex128 | Any]: ... +@overload +def ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[_1D]: ... -def nonzero(a: ArrayLike) -> tuple[NDArray[intp], ...]: ... +def nonzero(a: _ArrayLike[Any]) -> tuple[_Array1D[np.intp], ...]: ... -def shape(a: ArrayLike) -> _Shape: ... +# this prevents `Any` from being returned with Pyright +@overload +def shape(a: _SupportsShape[Never]) -> _AnyShape: ... +@overload +def shape[ShapeT: _Shape](a: _SupportsShape[ShapeT]) -> ShapeT: ... +@overload +def shape(a: _PyScalar) -> tuple[()]: ... +# `collections.abc.Sequence` can't be used hesre, since `bytes` and `str` are +# subtypes of it, which would make the return types incompatible. +@overload +def shape(a: _PyArray[_PyScalar]) -> _1D: ... +@overload +def shape(a: _PyArray[_PyArray[_PyScalar]]) -> _2D: ... +# this overload will be skipped by typecheckers that don't support PEP 688 +@overload +def shape(a: memoryview | bytearray) -> _1D: ... +@overload +def shape(a: ArrayLike) -> _AnyShape: ... @overload -def compress( +def compress[ScalarT: np.generic]( condition: _ArrayLikeBool_co, # 1D bool array - a: _ArrayLike[_SCT], - axis: None | SupportsIndex = ..., - out: None = ..., -) -> NDArray[_SCT]: ... + a: _ArrayLike[ScalarT], + axis: SupportsIndex | None = None, + out: None = None, +) -> NDArray[ScalarT]: ... @overload def compress( condition: _ArrayLikeBool_co, # 1D bool array a: ArrayLike, - axis: None | SupportsIndex = ..., - out: None = ..., + axis: SupportsIndex | None = None, + out: None = None, ) -> NDArray[Any]: ... @overload -def compress( +def compress[ArrayT: np.ndarray]( condition: _ArrayLikeBool_co, # 1D bool array a: ArrayLike, - axis: None | SupportsIndex = ..., - out: _ArrayType = ..., -) -> _ArrayType: ... + axis: SupportsIndex | None, + out: ArrayT, +) -> ArrayT: ... +@overload +def compress[ArrayT: np.ndarray]( + condition: _ArrayLikeBool_co, # 1D bool array + a: ArrayLike, + axis: SupportsIndex | None = None, + *, + out: ArrayT, +) -> ArrayT: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload -def clip( - a: _SCT, - a_min: None | ArrayLike, - a_max: None | ArrayLike, - out: None = ..., - *, - dtype: None = ..., - where: None | _ArrayLikeBool_co = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[None | str, ...] = ..., - casting: _CastingKind = ..., -) -> _SCT: ... +def clip[ScalarT: np.generic]( + a: ScalarT, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> ScalarT: ... @overload def clip( a: _ScalarLike_co, - a_min: None | ArrayLike, - a_max: None | ArrayLike, - out: None = ..., - *, - dtype: None = ..., - where: None | _ArrayLikeBool_co = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[None | str, ...] = ..., - casting: _CastingKind = ..., + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], ) -> Any: ... @overload -def clip( - a: _ArrayLike[_SCT], - a_min: None | ArrayLike, - a_max: None | ArrayLike, - out: None = ..., - *, - dtype: None = ..., - where: None | _ArrayLikeBool_co = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[None | str, ...] = ..., - casting: _CastingKind = ..., -) -> NDArray[_SCT]: ... +def clip[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> NDArray[ScalarT]: ... @overload def clip( a: ArrayLike, - a_min: None | ArrayLike, - a_max: None | ArrayLike, - out: None = ..., - *, - dtype: None = ..., - where: None | _ArrayLikeBool_co = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[None | str, ...] = ..., - casting: _CastingKind = ..., + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], ) -> NDArray[Any]: ... @overload -def clip( +def clip[ArrayT: np.ndarray]( a: ArrayLike, - a_min: None | ArrayLike, - a_max: None | ArrayLike, - out: _ArrayType = ..., - *, - dtype: DTypeLike, - where: None | _ArrayLikeBool_co = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[None | str, ...] = ..., - casting: _CastingKind = ..., -) -> Any: ... + a_min: ArrayLike | None, + a_max: ArrayLike | None, + out: ArrayT, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> ArrayT: ... +@overload +def clip[ArrayT: np.ndarray]( + a: ArrayLike, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + *, + out: ArrayT, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> ArrayT: ... @overload def clip( a: ArrayLike, - a_min: None | ArrayLike, - a_max: None | ArrayLike, - out: _ArrayType, - *, - dtype: DTypeLike = ..., - where: None | _ArrayLikeBool_co = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[None | str, ...] = ..., - casting: _CastingKind = ..., -) -> _ArrayType: ... + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> Any: ... @overload -def sum( - a: _ArrayLike[_SCT], - axis: None = ..., - dtype: None = ..., - out: None = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _SCT: ... +def sum[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... +@overload +def sum[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT | NDArray[ScalarT]: ... +@overload +def sum[ScalarT: np.generic]( + a: ArrayLike, + axis: None, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... +@overload +def sum[ScalarT: np.generic]( + a: ArrayLike, + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... +@overload +def sum[ScalarT: np.generic]( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT | NDArray[ScalarT]: ... +@overload +def sum[ScalarT: np.generic]( + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT | NDArray[ScalarT]: ... @overload def sum( a: ArrayLike, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: None = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload -def sum( +def sum[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +@overload +def sum[ArrayT: np.ndarray]( a: ArrayLike, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: _ArrayType = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _ArrayType: ... + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +# keep in sync with `any` @overload def all( - a: ArrayLike, - axis: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., + a: ArrayLike | None, + axis: None = None, + out: None = None, + keepdims: Literal[False, 0] | _NoValueType = ..., *, - where: _ArrayLikeBool_co = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> np.bool: ... @overload def all( - a: ArrayLike, - axis: None | _ShapeLike = ..., - out: None = ..., - keepdims: bool = ..., + a: ArrayLike | None, + axis: int | tuple[int, ...] | None = None, + out: None = None, + keepdims: _BoolLike_co | _NoValueType = ..., *, - where: _ArrayLikeBool_co = ..., -) -> Any: ... + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Incomplete: ... +@overload +def all[ArrayT: np.ndarray]( + a: ArrayLike | None, + axis: int | tuple[int, ...] | None, + out: ArrayT, + keepdims: _BoolLike_co | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... @overload -def all( - a: ArrayLike, - axis: None | _ShapeLike = ..., - out: _ArrayType = ..., - keepdims: bool = ..., +def all[ArrayT: np.ndarray]( + a: ArrayLike | None, + axis: int | tuple[int, ...] | None = None, *, - where: _ArrayLikeBool_co = ..., -) -> _ArrayType: ... + out: ArrayT, + keepdims: _BoolLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +# keep in sync with `all` @overload def any( - a: ArrayLike, - axis: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., + a: ArrayLike | None, + axis: None = None, + out: None = None, + keepdims: Literal[False, 0] | _NoValueType = ..., *, - where: _ArrayLikeBool_co = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> np.bool: ... @overload def any( - a: ArrayLike, - axis: None | _ShapeLike = ..., - out: None = ..., - keepdims: bool = ..., + a: ArrayLike | None, + axis: int | tuple[int, ...] | None = None, + out: None = None, + keepdims: _BoolLike_co | _NoValueType = ..., *, - where: _ArrayLikeBool_co = ..., -) -> Any: ... + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Incomplete: ... +@overload +def any[ArrayT: np.ndarray]( + a: ArrayLike | None, + axis: int | tuple[int, ...] | None, + out: ArrayT, + keepdims: _BoolLike_co | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... @overload -def any( - a: ArrayLike, - axis: None | _ShapeLike = ..., - out: _ArrayType = ..., - keepdims: bool = ..., +def any[ArrayT: np.ndarray]( + a: ArrayLike | None, + axis: int | tuple[int, ...] | None = None, *, - where: _ArrayLikeBool_co = ..., -) -> _ArrayType: ... + out: ArrayT, + keepdims: _BoolLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +# @overload -def cumsum( - a: _ArrayLike[_SCT], - axis: None | SupportsIndex = ..., - dtype: None = ..., - out: None = ..., -) -> NDArray[_SCT]: ... +def cumsum[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, +) -> NDArray[ScalarT]: ... @overload def cumsum( a: ArrayLike, - axis: None | SupportsIndex = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[Any]: ... @overload -def cumsum( +def cumsum[ScalarT: np.generic]( a: ArrayLike, - axis: None | SupportsIndex = ..., - dtype: _DTypeLike[_SCT] = ..., - out: None = ..., -) -> NDArray[_SCT]: ... + axis: SupportsIndex | None, + dtype: _DTypeLike[ScalarT], + out: None = None, +) -> NDArray[ScalarT]: ... +@overload +def cumsum[ScalarT: np.generic]( + a: ArrayLike, + axis: SupportsIndex | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, +) -> NDArray[ScalarT]: ... @overload def cumsum( a: ArrayLike, - axis: None | SupportsIndex = ..., - dtype: DTypeLike = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, ) -> NDArray[Any]: ... @overload -def cumsum( +def cumsum[ArrayT: np.ndarray]( + a: ArrayLike, + axis: SupportsIndex | None, + dtype: DTypeLike | None, + out: ArrayT, +) -> ArrayT: ... +@overload +def cumsum[ArrayT: np.ndarray]( a: ArrayLike, - axis: None | SupportsIndex = ..., - dtype: DTypeLike = ..., - out: _ArrayType = ..., -) -> _ArrayType: ... + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, +) -> ArrayT: ... @overload -def ptp( - a: _ArrayLike[_SCT], - axis: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., -) -> _SCT: ... +def cumulative_sum[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + /, + *, + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> NDArray[ScalarT]: ... +@overload +def cumulative_sum( + x: ArrayLike, + /, + *, + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> NDArray[Any]: ... +@overload +def cumulative_sum[ScalarT: np.generic]( + x: ArrayLike, + /, + *, + axis: SupportsIndex | None = None, + dtype: _DTypeLike[ScalarT], + out: None = None, + include_initial: bool = False, +) -> NDArray[ScalarT]: ... +@overload +def cumulative_sum( + x: ArrayLike, + /, + *, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, + include_initial: bool = False, +) -> NDArray[Any]: ... +@overload +def cumulative_sum[ArrayT: np.ndarray]( + x: ArrayLike, + /, + *, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: ArrayT, + include_initial: bool = False, +) -> ArrayT: ... + +@overload +def ptp[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> ScalarT: ... @overload def ptp( a: ArrayLike, - axis: None | _ShapeLike = ..., - out: None = ..., - keepdims: bool = ..., + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload -def ptp( +def ptp[ArrayT: np.ndarray]( a: ArrayLike, - axis: None | _ShapeLike = ..., - out: _ArrayType = ..., - keepdims: bool = ..., -) -> _ArrayType: ... + axis: _ShapeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... +@overload +def ptp[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... @overload -def amax( - a: _ArrayLike[_SCT], - axis: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _SCT: ... +def amax[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... @overload def amax( a: ArrayLike, - axis: None | _ShapeLike = ..., - out: None = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload -def amax( +def amax[ArrayT: np.ndarray]( a: ArrayLike, - axis: None | _ShapeLike = ..., - out: _ArrayType = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _ArrayType: ... + axis: _ShapeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +@overload +def amax[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... @overload -def amin( - a: _ArrayLike[_SCT], - axis: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _SCT: ... +def amin[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... @overload def amin( a: ArrayLike, - axis: None | _ShapeLike = ..., - out: None = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload -def amin( +def amin[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +@overload +def amin[ArrayT: np.ndarray]( a: ArrayLike, - axis: None | _ShapeLike = ..., - out: _ArrayType = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _ArrayType: ... + axis: _ShapeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... # TODO: `np.prod()``: For object arrays `initial` does not necessarily # have to be a numerical scalar. # The only requirement is that it is compatible # with the `.__mul__()` method(s) of the passed array's elements. - # Note that the same situation holds for all wrappers around # `np.ufunc.reduce`, e.g. `np.sum()` (`.__add__()`). +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def prod( a: _ArrayLikeBool_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> int_: ... @overload def prod( a: _ArrayLikeUInt_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> uint64: ... @overload def prod( a: _ArrayLikeInt_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> int64: ... @overload def prod( a: _ArrayLikeFloat_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> floating[Any]: ... + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> floating: ... @overload def prod( a: _ArrayLikeComplex_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> complexfloating[Any, Any]: ... + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> complexfloating: ... @overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - dtype: None = ..., - out: None = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload -def prod( +def prod[ScalarT: np.generic]( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... +@overload +def prod[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., - dtype: _DTypeLike[_SCT] = ..., - out: None = ..., - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _SCT: ... + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... @overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - dtype: None | DTypeLike = ..., - out: None = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload -def prod( +def prod[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - dtype: None | DTypeLike = ..., - out: _ArrayType = ..., - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., -) -> _ArrayType: ... + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +@overload +def prod[ArrayT: np.ndarray]( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def cumprod( a: _ArrayLikeBool_co, - axis: None | SupportsIndex = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[int_]: ... @overload def cumprod( a: _ArrayLikeUInt_co, - axis: None | SupportsIndex = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[uint64]: ... @overload def cumprod( a: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[int64]: ... @overload def cumprod( a: _ArrayLikeFloat_co, - axis: None | SupportsIndex = ..., - dtype: None = ..., - out: None = ..., -) -> NDArray[floating[Any]]: ... + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, +) -> NDArray[floating]: ... @overload def cumprod( a: _ArrayLikeComplex_co, - axis: None | SupportsIndex = ..., - dtype: None = ..., - out: None = ..., -) -> NDArray[complexfloating[Any, Any]]: ... + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, +) -> NDArray[complexfloating]: ... @overload def cumprod( a: _ArrayLikeObject_co, - axis: None | SupportsIndex = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[object_]: ... @overload -def cumprod( +def cumprod[ScalarT: np.generic]( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: SupportsIndex | None, + dtype: _DTypeLike[ScalarT], + out: None = None, +) -> NDArray[ScalarT]: ... +@overload +def cumprod[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | SupportsIndex = ..., - dtype: _DTypeLike[_SCT] = ..., - out: None = ..., -) -> NDArray[_SCT]: ... + axis: SupportsIndex | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, +) -> NDArray[ScalarT]: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | SupportsIndex = ..., - dtype: DTypeLike = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, ) -> NDArray[Any]: ... @overload -def cumprod( +def cumprod[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | SupportsIndex = ..., - dtype: DTypeLike = ..., - out: _ArrayType = ..., -) -> _ArrayType: ... + axis: SupportsIndex | None, + dtype: DTypeLike | None, + out: ArrayT, +) -> ArrayT: ... +@overload +def cumprod[ArrayT: np.ndarray]( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, +) -> ArrayT: ... + +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 +@overload +def cumulative_prod( + x: _ArrayLikeBool_co, + /, + *, + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> NDArray[int_]: ... +@overload +def cumulative_prod( + x: _ArrayLikeUInt_co, + /, + *, + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> NDArray[uint64]: ... +@overload +def cumulative_prod( + x: _ArrayLikeInt_co, + /, + *, + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> NDArray[int64]: ... +@overload +def cumulative_prod( + x: _ArrayLikeFloat_co, + /, + *, + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> NDArray[floating]: ... +@overload +def cumulative_prod( + x: _ArrayLikeComplex_co, + /, + *, + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> NDArray[complexfloating]: ... +@overload +def cumulative_prod( + x: _ArrayLikeObject_co, + /, + *, + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> NDArray[object_]: ... +@overload +def cumulative_prod[ScalarT: np.generic]( + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, + /, + *, + axis: SupportsIndex | None = None, + dtype: _DTypeLike[ScalarT], + out: None = None, + include_initial: bool = False, +) -> NDArray[ScalarT]: ... +@overload +def cumulative_prod( + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, + /, + *, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, + include_initial: bool = False, +) -> NDArray[Any]: ... +@overload +def cumulative_prod[ArrayT: np.ndarray]( + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, + /, + *, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: ArrayT, + include_initial: bool = False, +) -> ArrayT: ... def ndim(a: ArrayLike) -> int: ... -def size(a: ArrayLike, axis: None | int = ...) -> int: ... +def size(a: ArrayLike, axis: int | tuple[int, ...] | None = None) -> int: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def around( a: _BoolLike_co, - decimals: SupportsIndex = ..., - out: None = ..., + decimals: SupportsIndex = 0, + out: None = None, ) -> float16: ... @overload -def around( - a: _SCT_uifcO, - decimals: SupportsIndex = ..., - out: None = ..., -) -> _SCT_uifcO: ... +def around[NumberOrObjectT: np.number | np.object_]( + a: NumberOrObjectT, + decimals: SupportsIndex = 0, + out: None = None, +) -> NumberOrObjectT: ... @overload def around( a: _ComplexLike_co | object_, - decimals: SupportsIndex = ..., - out: None = ..., + decimals: SupportsIndex = 0, + out: None = None, ) -> Any: ... @overload def around( a: _ArrayLikeBool_co, - decimals: SupportsIndex = ..., - out: None = ..., + decimals: SupportsIndex = 0, + out: None = None, ) -> NDArray[float16]: ... @overload -def around( - a: _ArrayLike[_SCT_uifcO], - decimals: SupportsIndex = ..., - out: None = ..., -) -> NDArray[_SCT_uifcO]: ... +def around[NumberOrObjectT: np.number | np.object_]( + a: _ArrayLike[NumberOrObjectT], + decimals: SupportsIndex = 0, + out: None = None, +) -> NDArray[NumberOrObjectT]: ... @overload def around( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - decimals: SupportsIndex = ..., - out: None = ..., + decimals: SupportsIndex = 0, + out: None = None, ) -> NDArray[Any]: ... @overload -def around( +def around[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - decimals: SupportsIndex = ..., - out: _ArrayType = ..., -) -> _ArrayType: ... + decimals: SupportsIndex, + out: ArrayT, +) -> ArrayT: ... +@overload +def around[ArrayT: np.ndarray]( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + decimals: SupportsIndex = 0, + *, + out: ArrayT, +) -> ArrayT: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def mean( a: _ArrayLikeFloat_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., *, - where: _ArrayLikeBool_co = ..., -) -> floating[Any]: ... + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> floating: ... @overload def mean( a: _ArrayLikeComplex_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., *, - where: _ArrayLikeBool_co = ..., -) -> complexfloating[Any, Any]: ... + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> complexfloating: ... @overload def mean( + a: _ArrayLike[np.timedelta64], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.timedelta64: ... +@overload +def mean[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - dtype: None = ..., - out: None = ..., - keepdims: bool = ..., + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., *, - where: _ArrayLikeBool_co = ..., -) -> Any: ... + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... @overload -def mean( +def mean[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., - dtype: _DTypeLike[_SCT] = ..., - out: None = ..., - keepdims: Literal[False] = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, *, - where: _ArrayLikeBool_co = ..., -) -> _SCT: ... + out: ArrayT, + keepdims: bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... @overload -def mean( +def mean[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: None = ..., - keepdims: bool = ..., + axis: None, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., *, - where: _ArrayLikeBool_co = ..., -) -> Any: ... + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... +@overload +def mean[ScalarT: np.generic]( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... +@overload +def mean[ScalarT: np.generic]( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None, + dtype: _DTypeLike[ScalarT], + out: None, + keepdims: Literal[True, 1], + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload +def mean[ScalarT: np.generic]( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None, + dtype: _DTypeLike[ScalarT], + out: None = None, + *, + keepdims: bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT | NDArray[ScalarT]: ... +@overload +def mean[ScalarT: np.generic]( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT | NDArray[ScalarT]: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: _ArrayType = ..., - keepdims: bool = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., *, - where: _ArrayLikeBool_co = ..., -) -> _ArrayType: ... + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Incomplete: ... @overload def std( a: _ArrayLikeComplex_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - ddof: int | float = ..., - keepdims: Literal[False] = ..., + axis: None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co = ..., - correction: int | float = ..., -) -> floating[Any]: ... + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> floating: ... @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - dtype: None = ..., - out: None = ..., - ddof: int | float = ..., - keepdims: bool = ..., - *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., - correction: int | float = ..., + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> Any: ... @overload -def std( +def std[ScalarT: np.generic]( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None, + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> ScalarT: ... +@overload +def std[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., - dtype: _DTypeLike[_SCT] = ..., - out: None = ..., - ddof: int | float = ..., - keepdims: Literal[False] = ..., + axis: None = None, *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., - correction: int | float = ..., -) -> _SCT: ... + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> ScalarT: ... @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: None = ..., - ddof: int | float = ..., - keepdims: bool = ..., - *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., - correction: int | float = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> Any: ... @overload -def std( +def std[ArrayT: np.ndarray]( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> ArrayT: ... +@overload +def std[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: _ArrayType = ..., - ddof: int | float = ..., - keepdims: bool = ..., - *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., - correction: int | float = ..., -) -> _ArrayType: ... + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> ArrayT: ... @overload def var( a: _ArrayLikeComplex_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - ddof: int | float = ..., - keepdims: Literal[False] = ..., + axis: None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co = ..., - correction: int | float = ..., -) -> floating[Any]: ... + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> floating: ... @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - dtype: None = ..., - out: None = ..., - ddof: int | float = ..., - keepdims: bool = ..., - *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., - correction: int | float = ..., + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> Any: ... @overload -def var( +def var[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., - dtype: _DTypeLike[_SCT] = ..., - out: None = ..., - ddof: int | float = ..., - keepdims: Literal[False] = ..., + axis: None, + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., - correction: int | float = ..., -) -> _SCT: ... + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> ScalarT: ... +@overload +def var[ScalarT: np.generic]( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> ScalarT: ... @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: None = ..., - ddof: int | float = ..., - keepdims: bool = ..., - *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., - correction: int | float = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> Any: ... @overload -def var( +def var[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: _ArrayType = ..., - ddof: int | float = ..., - keepdims: bool = ..., - *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., - correction: int | float = ..., -) -> _ArrayType: ... + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> ArrayT: ... +@overload +def var[ArrayT: np.ndarray]( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> ArrayT: ... max = amax min = amin diff --git a/numpy/_core/function_base.py b/numpy/_core/function_base.py index 898bc0e309ce..b01ba108d2c4 100644 --- a/numpy/_core/function_base.py +++ b/numpy/_core/function_base.py @@ -1,14 +1,16 @@ import functools -import warnings +import inspect import operator import types +import warnings import numpy as np -from . import numeric as _nx -from .numeric import result_type, nan, asanyarray, ndim -from numpy._core.multiarray import add_docstring -from numpy._core._multiarray_umath import _array_converter from numpy._core import overrides +from numpy._core._multiarray_umath import _array_converter +from numpy._core.multiarray import add_docstring + +from . import numeric as _nx +from .numeric import asanyarray, nan, ndim, result_type __all__ = ['logspace', 'linspace', 'geomspace'] @@ -33,13 +35,10 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, The endpoint of the interval can optionally be excluded. - .. versionchanged:: 1.16.0 - Non-scalar `start` and `stop` are now supported. - .. versionchanged:: 1.20.0 Values are rounded towards ``-inf`` instead of ``0`` when an integer ``dtype`` is specified. The old behavior can - still be obtained with ``np.linspace(start, stop, num).astype(int)`` + still be obtained with ``np.linspace(start, stop, num).astype(np.int_)`` Parameters ---------- @@ -63,14 +62,10 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, is inferred from `start` and `stop`. The inferred dtype will never be an integer; `float` is chosen even if the arguments would produce an array of integers. - - .. versionadded:: 1.9.0 axis : int, optional The axis in the result to store the samples. Relevant only if start or stop are array-like. By default (0), the samples will be along a new axis inserted at the beginning. Use -1 to get an axis at the end. - - .. versionadded:: 1.16.0 device : str, optional The device on which to place the created array. Default: None. For Array-API interoperability only, so must be ``"cpu"`` if passed. @@ -101,6 +96,7 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, Examples -------- + >>> import numpy as np >>> np.linspace(2.0, 3.0, num=5) array([2. , 2.25, 2.5 , 2.75, 3. ]) >>> np.linspace(2.0, 3.0, num=5, endpoint=False) @@ -127,7 +123,7 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, num = operator.index(num) if num < 0: raise ValueError( - "Number of samples, %s, must be non-negative." % num + f"Number of samples, {num}, must be non-negative." ) div = (num - 1) if endpoint else num @@ -163,11 +159,10 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, y *= delta else: y = y * delta + elif _mult_inplace: + y *= step else: - if _mult_inplace: - y *= step - else: - y = y * step + y = y * step else: # sequences with 0 items or 1 item with endpoint=True (i.e. div <= 0) # have an undefined step @@ -208,9 +203,6 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, (`base` to the power of `start`) and ends with ``base ** stop`` (see `endpoint` below). - .. versionchanged:: 1.16.0 - Non-scalar `start` and `stop` are now supported. - .. versionchanged:: 1.25.0 Non-scalar 'base` is now supported @@ -243,9 +235,6 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, along a new axis inserted at the beginning. Use -1 to get an axis at the end. - .. versionadded:: 1.16.0 - - Returns ------- samples : ndarray @@ -272,6 +261,7 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, Examples -------- + >>> import numpy as np >>> np.logspace(2.0, 3.0, num=4) array([ 100. , 215.443469 , 464.15888336, 1000. ]) >>> np.logspace(2.0, 3.0, num=4, endpoint=False) @@ -326,9 +316,6 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): This is similar to `logspace`, but with endpoints specified directly. Each output sample is a constant multiple of the previous. - .. versionchanged:: 1.16.0 - Non-scalar `start` and `stop` are now supported. - Parameters ---------- start : array_like @@ -353,8 +340,6 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): or stop are array-like. By default (0), the samples will be along a new axis inserted at the beginning. Use -1 to get an axis at the end. - .. versionadded:: 1.16.0 - Returns ------- samples : ndarray @@ -378,6 +363,7 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): Examples -------- + >>> import numpy as np >>> np.geomspace(1, 1000, num=4) array([ 1., 10., 100., 1000.]) >>> np.geomspace(1, 1000, num=3, endpoint=False) @@ -389,9 +375,9 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): Note that the above may not produce exact integers: - >>> np.geomspace(1, 256, num=9, dtype=int) + >>> np.geomspace(1, 256, num=9, dtype=np.int_) array([ 1, 2, 4, 7, 16, 32, 63, 127, 256]) - >>> np.around(np.geomspace(1, 256, num=9)).astype(int) + >>> np.around(np.geomspace(1, 256, num=9)).astype(np.int_) array([ 1, 2, 4, 8, 16, 32, 64, 128, 256]) Negative, decreasing, and complex inputs are allowed: @@ -488,11 +474,13 @@ def _needs_add_docstring(obj): def _add_docstring(obj, doc, warn_on_python): if warn_on_python and not _needs_add_docstring(obj): warnings.warn( - "add_newdoc was used on a pure-python object {}. " - "Prefer to attach it directly to the source." - .format(obj), + f"add_newdoc was used on a pure-python object {obj}. " + "Prefer to attach it directly to the source.", UserWarning, stacklevel=3) + + doc = inspect.cleandoc(doc) + try: add_docstring(obj, doc) except Exception: @@ -510,10 +498,10 @@ def add_newdoc(place, obj, doc, warn_on_python=True): ---------- place : str The absolute name of the module to import from - obj : str or None + obj : str | None The name of the object to add documentation to, typically a class or function name. - doc : {str, Tuple[str, str], List[Tuple[str, str]]} + doc : str | tuple[str, str] | list[tuple[str, str]] If a string, the documentation to apply to `obj` If a tuple, then the first element is interpreted as an attribute @@ -548,12 +536,12 @@ def add_newdoc(place, obj, doc, warn_on_python=True): """ new = getattr(__import__(place, globals(), {}, [obj]), obj) if isinstance(doc, str): - _add_docstring(new, doc.strip(), warn_on_python) + if "${ARRAY_FUNCTION_LIKE}" in doc: + doc = overrides.get_array_function_like_doc(new, doc) + _add_docstring(new, doc, warn_on_python) elif isinstance(doc, tuple): attr, docstring = doc - _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python) + _add_docstring(getattr(new, attr), docstring, warn_on_python) elif isinstance(doc, list): for attr, docstring in doc: - _add_docstring( - getattr(new, attr), docstring.strip(), warn_on_python - ) + _add_docstring(getattr(new, attr), docstring, warn_on_python) diff --git a/numpy/_core/function_base.pyi b/numpy/_core/function_base.pyi index 59c3d6b4ea2c..982a169bcbd5 100644 --- a/numpy/_core/function_base.pyi +++ b/numpy/_core/function_base.pyi @@ -1,202 +1,444 @@ -from typing import ( - Literal as L, - overload, - Any, - SupportsIndex, - TypeVar, -) +from _typeshed import Incomplete +from typing import Any, Literal as L, SupportsIndex, overload -from numpy import floating, complexfloating, generic +import numpy as np from numpy._typing import ( - NDArray, DTypeLike, - _DTypeLike, - _ArrayLikeFloat_co, + NDArray, _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ComplexLike_co, + _DTypeLike, ) +from numpy._typing._array_like import _DualArrayLike -_SCT = TypeVar("_SCT", bound=generic) +__all__ = ["geomspace", "linspace", "logspace"] -__all__: list[str] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _ToFloat64 = float | np.integer | np.bool # `np.float64` is assignable to `float` +type _ToArrayFloat64 = _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float] +### + +@overload +def linspace( + start: _ToFloat64, + stop: _ToFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> _Array1D[np.float64]: ... +@overload +def linspace( + start: complex, + stop: complex, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> _Array1D[np.complex128 | Any]: ... +@overload +def linspace[ScalarT: np.generic]( + start: _ComplexLike_co, + stop: _ComplexLike_co, + num: SupportsIndex, + endpoint: bool, + retstep: L[False], + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> _Array1D[ScalarT]: ... +@overload +def linspace[ScalarT: np.generic]( + start: _ComplexLike_co, + stop: _ComplexLike_co, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + *, + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> _Array1D[ScalarT]: ... +@overload +def linspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> NDArray[np.float64]: ... @overload def linspace( start: _ArrayLikeFloat_co, stop: _ArrayLikeFloat_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - retstep: L[False] = ..., - dtype: None = ..., - axis: SupportsIndex = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, *, - device: None | L["cpu"] = ..., -) -> NDArray[floating[Any]]: ... + device: L["cpu"] | None = None, +) -> NDArray[np.float64 | Any]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - retstep: L[False] = ..., - dtype: None = ..., - axis: SupportsIndex = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, *, - device: None | L["cpu"] = ..., -) -> NDArray[complexfloating[Any, Any]]: ... + device: L["cpu"] | None = None, +) -> NDArray[np.complex128 | Any]: ... @overload -def linspace( +def linspace[ScalarT: np.generic]( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex, + endpoint: bool, + retstep: L[False], + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> NDArray[ScalarT]: ... +@overload +def linspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - retstep: L[False] = ..., - dtype: _DTypeLike[_SCT] = ..., - axis: SupportsIndex = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, *, - device: None | L["cpu"] = ..., -) -> NDArray[_SCT]: ... + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> NDArray[ScalarT]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - retstep: L[False] = ..., - dtype: DTypeLike = ..., - axis: SupportsIndex = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> NDArray[Incomplete]: ... +@overload +def linspace( + start: _ToFloat64, + stop: _ToFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[_Array1D[np.float64], np.float64]: ... +@overload +def linspace( + start: complex, + stop: complex, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[_Array1D[np.complex128 | Any], np.complex128 | Any]: ... +@overload +def linspace[ScalarT: np.generic]( + start: _ComplexLike_co, + stop: _ComplexLike_co, + num: SupportsIndex = 50, + endpoint: bool = True, *, - device: None | L["cpu"] = ..., -) -> NDArray[Any]: ... + retstep: L[True], + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[_Array1D[ScalarT], ScalarT]: ... +@overload +def linspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[np.float64], np.float64]: ... @overload def linspace( start: _ArrayLikeFloat_co, stop: _ArrayLikeFloat_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - retstep: L[True] = ..., - dtype: None = ..., - axis: SupportsIndex = ..., + num: SupportsIndex = 50, + endpoint: bool = True, *, - device: None | L["cpu"] = ..., -) -> tuple[NDArray[floating[Any]], floating[Any]]: ... + retstep: L[True], + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[np.float64 | Any], np.float64 | Any]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - retstep: L[True] = ..., - dtype: None = ..., - axis: SupportsIndex = ..., + num: SupportsIndex = 50, + endpoint: bool = True, *, - device: None | L["cpu"] = ..., -) -> tuple[NDArray[complexfloating[Any, Any]], complexfloating[Any, Any]]: ... + retstep: L[True], + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[np.complex128 | Any], np.complex128 | Any]: ... @overload -def linspace( +def linspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - retstep: L[True] = ..., - dtype: _DTypeLike[_SCT] = ..., - axis: SupportsIndex = ..., + num: SupportsIndex = 50, + endpoint: bool = True, *, - device: None | L["cpu"] = ..., -) -> tuple[NDArray[_SCT], _SCT]: ... + retstep: L[True], + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[ScalarT], ScalarT]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - retstep: L[True] = ..., - dtype: DTypeLike = ..., - axis: SupportsIndex = ..., - *, - device: None | L["cpu"] = ..., -) -> tuple[NDArray[Any], Any]: ... + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[Incomplete], Incomplete]: ... +# +@overload +def logspace( + start: _ToFloat64, + stop: _ToFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ToFloat64 = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> _Array1D[np.float64]: ... +@overload +def logspace( + start: complex, + stop: complex, + num: SupportsIndex = 50, + endpoint: bool = True, + base: complex = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> _Array1D[np.complex128 | Any]: ... +@overload +def logspace[ScalarT: np.generic]( + start: _ComplexLike_co, + stop: _ComplexLike_co, + num: SupportsIndex, + endpoint: bool, + base: _ComplexLike_co, + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, +) -> _Array1D[ScalarT]: ... +@overload +def logspace[ScalarT: np.generic]( + start: _ComplexLike_co, + stop: _ComplexLike_co, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeComplex_co = 10.0, + *, + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, +) -> _Array1D[ScalarT]: ... +@overload +def logspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ToArrayFloat64 = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.float64]: ... @overload def logspace( start: _ArrayLikeFloat_co, stop: _ArrayLikeFloat_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - base: _ArrayLikeFloat_co = ..., - dtype: None = ..., - axis: SupportsIndex = ..., -) -> NDArray[floating[Any]]: ... + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeFloat_co = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.float64 | Any]: ... @overload def logspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - base: _ArrayLikeComplex_co = ..., - dtype: None = ..., - axis: SupportsIndex = ..., -) -> NDArray[complexfloating[Any, Any]]: ... + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeComplex_co = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.complex128 | Any]: ... @overload -def logspace( +def logspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - base: _ArrayLikeComplex_co = ..., - dtype: _DTypeLike[_SCT] = ..., - axis: SupportsIndex = ..., -) -> NDArray[_SCT]: ... + num: SupportsIndex, + endpoint: bool, + base: _ArrayLikeComplex_co, + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, +) -> NDArray[ScalarT]: ... +@overload +def logspace[ScalarT: np.generic]( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeComplex_co = 10.0, + *, + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, +) -> NDArray[ScalarT]: ... @overload def logspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - base: _ArrayLikeComplex_co = ..., - dtype: DTypeLike = ..., - axis: SupportsIndex = ..., -) -> NDArray[Any]: ... + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeComplex_co = 10.0, + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, +) -> NDArray[Incomplete]: ... +# +@overload +def geomspace( + start: _ToFloat64, + stop: _ToFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> _Array1D[np.float64]: ... +@overload +def geomspace( + start: complex, + stop: complex, + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> _Array1D[np.complex128 | Any]: ... +@overload +def geomspace[ScalarT: np.generic]( + start: _ComplexLike_co, + stop: _ComplexLike_co, + num: SupportsIndex, + endpoint: bool, + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, +) -> _Array1D[ScalarT]: ... +@overload +def geomspace[ScalarT: np.generic]( + start: _ComplexLike_co, + stop: _ComplexLike_co, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, +) -> _Array1D[ScalarT]: ... +@overload +def geomspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.float64]: ... @overload def geomspace( start: _ArrayLikeFloat_co, stop: _ArrayLikeFloat_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - dtype: None = ..., - axis: SupportsIndex = ..., -) -> NDArray[floating[Any]]: ... + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.float64 | Any]: ... @overload def geomspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - dtype: None = ..., - axis: SupportsIndex = ..., -) -> NDArray[complexfloating[Any, Any]]: ... + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.complex128 | Any]: ... @overload -def geomspace( +def geomspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - dtype: _DTypeLike[_SCT] = ..., - axis: SupportsIndex = ..., -) -> NDArray[_SCT]: ... + num: SupportsIndex, + endpoint: bool, + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, +) -> NDArray[ScalarT]: ... +@overload +def geomspace[ScalarT: np.generic]( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, +) -> NDArray[ScalarT]: ... @overload def geomspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - dtype: DTypeLike = ..., - axis: SupportsIndex = ..., -) -> NDArray[Any]: ... + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, +) -> NDArray[Incomplete]: ... +# def add_newdoc( place: str, obj: str, doc: str | tuple[str, str] | list[tuple[str, str]], - warn_on_python: bool = ..., + warn_on_python: bool = True, ) -> None: ... diff --git a/numpy/_core/getlimits.py b/numpy/_core/getlimits.py index b01e47fade43..f0e2a7c05d86 100644 --- a/numpy/_core/getlimits.py +++ b/numpy/_core/getlimits.py @@ -3,122 +3,30 @@ """ __all__ = ['finfo', 'iinfo'] -import warnings +import math +import types +from functools import cached_property -from .._utils import set_module -from ._machar import MachAr -from . import numeric -from . import numerictypes as ntypes -from .numeric import array, inf, nan -from .umath import log10, exp2, nextafter, isnan +from numpy._utils import set_module + +from . import numeric, numerictypes as ntypes +from ._multiarray_umath import _populate_finfo_constants def _fr0(a): """fix rank-0 --> rank-1""" if a.ndim == 0: - a = a.copy() - a.shape = (1,) + a = a.reshape((1,)) return a def _fr1(a): """fix rank > 0 --> rank-0""" if a.size == 1: - a = a.copy() - a.shape = () + a = a.reshape(()) return a -class MachArLike: - """ Object to simulate MachAr instance """ - def __init__(self, ftype, *, eps, epsneg, huge, tiny, - ibeta, smallest_subnormal=None, **kwargs): - self.params = _MACHAR_PARAMS[ftype] - self.ftype = ftype - self.title = self.params['title'] - # Parameter types same as for discovered MachAr object. - if not smallest_subnormal: - self._smallest_subnormal = nextafter( - self.ftype(0), self.ftype(1), dtype=self.ftype) - else: - self._smallest_subnormal = smallest_subnormal - self.epsilon = self.eps = self._float_to_float(eps) - self.epsneg = self._float_to_float(epsneg) - self.xmax = self.huge = self._float_to_float(huge) - self.xmin = self._float_to_float(tiny) - self.smallest_normal = self.tiny = self._float_to_float(tiny) - self.ibeta = self.params['itype'](ibeta) - self.__dict__.update(kwargs) - self.precision = int(-log10(self.eps)) - self.resolution = self._float_to_float( - self._float_conv(10) ** (-self.precision)) - self._str_eps = self._float_to_str(self.eps) - self._str_epsneg = self._float_to_str(self.epsneg) - self._str_xmin = self._float_to_str(self.xmin) - self._str_xmax = self._float_to_str(self.xmax) - self._str_resolution = self._float_to_str(self.resolution) - self._str_smallest_normal = self._float_to_str(self.xmin) - - @property - def smallest_subnormal(self): - """Return the value for the smallest subnormal. - - Returns - ------- - smallest_subnormal : float - value for the smallest subnormal. - - Warns - ----- - UserWarning - If the calculated value for the smallest subnormal is zero. - """ - # Check that the calculated value is not zero, in case it raises a - # warning. - value = self._smallest_subnormal - if self.ftype(0) == value: - warnings.warn( - 'The value of the smallest subnormal for {} type ' - 'is zero.'.format(self.ftype), UserWarning, stacklevel=2) - - return self._float_to_float(value) - - @property - def _str_smallest_subnormal(self): - """Return the string representation of the smallest subnormal.""" - return self._float_to_str(self.smallest_subnormal) - - def _float_to_float(self, value): - """Converts float to float. - - Parameters - ---------- - value : float - value to be converted. - """ - return _fr1(self._float_conv(value)) - - def _float_conv(self, value): - """Converts float to conv. - - Parameters - ---------- - value : float - value to be converted. - """ - return array([value], self.ftype) - - def _float_to_str(self, value): - """Converts float to str. - - Parameters - ---------- - value : float - value to be converted. - """ - return self.params['fmt'] % array(_fr0(value)[0], self.ftype) - - _convert_to_float = { ntypes.csingle: ntypes.single, ntypes.complex128: ntypes.float64, @@ -128,256 +36,22 @@ def _float_to_str(self, value): # Parameters for creating MachAr / MachAr-like objects _title_fmt = 'numpy {} precision floating point number' _MACHAR_PARAMS = { - ntypes.double: dict( - itype = ntypes.int64, - fmt = '%24.16e', - title = _title_fmt.format('double')), - ntypes.single: dict( - itype = ntypes.int32, - fmt = '%15.7e', - title = _title_fmt.format('single')), - ntypes.longdouble: dict( - itype = ntypes.longlong, - fmt = '%s', - title = _title_fmt.format('long double')), - ntypes.half: dict( - itype = ntypes.int16, - fmt = '%12.5e', - title = _title_fmt.format('half'))} - -# Key to identify the floating point type. Key is result of -# -# ftype = np.longdouble # or float64, float32, etc. -# v = (ftype(-1.0) / ftype(10.0)) -# v.view(v.dtype.newbyteorder('<')).tobytes() -# -# Uses division to work around deficiencies in strtold on some platforms. -# See: -# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure - -_KNOWN_TYPES = {} -def _register_type(machar, bytepat): - _KNOWN_TYPES[bytepat] = machar - - -_float_ma = {} - - -def _register_known_types(): - # Known parameters for float16 - # See docstring of MachAr class for description of parameters. - f16 = ntypes.float16 - float16_ma = MachArLike(f16, - machep=-10, - negep=-11, - minexp=-14, - maxexp=16, - it=10, - iexp=5, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(f16(-10)), - epsneg=exp2(f16(-11)), - huge=f16(65504), - tiny=f16(2 ** -14)) - _register_type(float16_ma, b'f\xae') - _float_ma[16] = float16_ma - - # Known parameters for float32 - f32 = ntypes.float32 - float32_ma = MachArLike(f32, - machep=-23, - negep=-24, - minexp=-126, - maxexp=128, - it=23, - iexp=8, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(f32(-23)), - epsneg=exp2(f32(-24)), - huge=f32((1 - 2 ** -24) * 2**128), - tiny=exp2(f32(-126))) - _register_type(float32_ma, b'\xcd\xcc\xcc\xbd') - _float_ma[32] = float32_ma - - # Known parameters for float64 - f64 = ntypes.float64 - epsneg_f64 = 2.0 ** -53.0 - tiny_f64 = 2.0 ** -1022.0 - float64_ma = MachArLike(f64, - machep=-52, - negep=-53, - minexp=-1022, - maxexp=1024, - it=52, - iexp=11, - ibeta=2, - irnd=5, - ngrd=0, - eps=2.0 ** -52.0, - epsneg=epsneg_f64, - huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4), - tiny=tiny_f64) - _register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf') - _float_ma[64] = float64_ma - - # Known parameters for IEEE 754 128-bit binary float - ld = ntypes.longdouble - epsneg_f128 = exp2(ld(-113)) - tiny_f128 = exp2(ld(-16382)) - # Ignore runtime error when this is not f128 - with numeric.errstate(all='ignore'): - huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4) - float128_ma = MachArLike(ld, - machep=-112, - negep=-113, - minexp=-16382, - maxexp=16384, - it=112, - iexp=15, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(ld(-112)), - epsneg=epsneg_f128, - huge=huge_f128, - tiny=tiny_f128) - # IEEE 754 128-bit binary float - _register_type(float128_ma, - b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf') - _float_ma[128] = float128_ma - - # Known parameters for float80 (Intel 80-bit extended precision) - epsneg_f80 = exp2(ld(-64)) - tiny_f80 = exp2(ld(-16382)) - # Ignore runtime error when this is not f80 - with numeric.errstate(all='ignore'): - huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4) - float80_ma = MachArLike(ld, - machep=-63, - negep=-64, - minexp=-16382, - maxexp=16384, - it=63, - iexp=15, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(ld(-63)), - epsneg=epsneg_f80, - huge=huge_f80, - tiny=tiny_f80) - # float80, first 10 bytes containing actual storage - _register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf') - _float_ma[80] = float80_ma - - # Guessed / known parameters for double double; see: - # https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic - # These numbers have the same exponent range as float64, but extended - # number of digits in the significand. - huge_dd = nextafter(ld(inf), ld(0), dtype=ld) - # As the smallest_normal in double double is so hard to calculate we set - # it to NaN. - smallest_normal_dd = nan - # Leave the same value for the smallest subnormal as double - smallest_subnormal_dd = ld(nextafter(0., 1.)) - float_dd_ma = MachArLike(ld, - machep=-105, - negep=-106, - minexp=-1022, - maxexp=1024, - it=105, - iexp=11, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(ld(-105)), - epsneg=exp2(ld(-106)), - huge=huge_dd, - tiny=smallest_normal_dd, - smallest_subnormal=smallest_subnormal_dd) - # double double; low, high order (e.g. PPC 64) - _register_type(float_dd_ma, - b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf') - # double double; high, low order (e.g. PPC 64 le) - _register_type(float_dd_ma, - b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<') - _float_ma['dd'] = float_dd_ma - - -def _get_machar(ftype): - """ Get MachAr instance or MachAr-like instance - - Get parameters for floating point type, by first trying signatures of - various known floating point types, then, if none match, attempting to - identify parameters by analysis. - - Parameters - ---------- - ftype : class - Numpy floating point type class (e.g. ``np.float64``) - - Returns - ------- - ma_like : instance of :class:`MachAr` or :class:`MachArLike` - Object giving floating point parameters for `ftype`. - - Warns - ----- - UserWarning - If the binary signature of the float type is not in the dictionary of - known float types. - """ - params = _MACHAR_PARAMS.get(ftype) - if params is None: - raise ValueError(repr(ftype)) - # Detect known / suspected types - # ftype(-1.0) / ftype(10.0) is better than ftype('-0.1') because stold - # may be deficient - key = (ftype(-1.0) / ftype(10.)) - key = key.view(key.dtype.newbyteorder("<")).tobytes() - ma_like = None - if ftype == ntypes.longdouble: - # Could be 80 bit == 10 byte extended precision, where last bytes can - # be random garbage. - # Comparing first 10 bytes to pattern first to avoid branching on the - # random garbage. - ma_like = _KNOWN_TYPES.get(key[:10]) - if ma_like is None: - # see if the full key is known. - ma_like = _KNOWN_TYPES.get(key) - if ma_like is None and len(key) == 16: - # machine limits could be f80 masquerading as np.float128, - # find all keys with length 16 and make new dict, but make the keys - # only 10 bytes long, the last bytes can be random garbage - _kt = {k[:10]: v for k, v in _KNOWN_TYPES.items() if len(k) == 16} - ma_like = _kt.get(key[:10]) - if ma_like is not None: - return ma_like - # Fall back to parameter discovery - warnings.warn( - f'Signature {key} for {ftype} does not match any known type: ' - 'falling back to type probe function.\n' - 'This warnings indicates broken support for the dtype!', - UserWarning, stacklevel=2) - return _discovered_machar(ftype) - - -def _discovered_machar(ftype): - """ Create MachAr instance with found information on float types - - TODO: MachAr should be retired completely ideally. We currently only - ever use it system with broken longdouble (valgrind, WSL). - """ - params = _MACHAR_PARAMS[ftype] - return MachAr(lambda v: array([v], ftype), - lambda v: _fr0(v.astype(params['itype']))[0], - lambda v: array(_fr0(v)[0], ftype), - lambda v: params['fmt'] % array(_fr0(v)[0], ftype), - params['title']) + ntypes.double: { + 'itype': ntypes.int64, + 'fmt': '%24.16e', + 'title': _title_fmt.format('double')}, + ntypes.single: { + 'itype': ntypes.int32, + 'fmt': '%15.7e', + 'title': _title_fmt.format('single')}, + ntypes.longdouble: { + 'itype': ntypes.longlong, + 'fmt': '%s', + 'title': _title_fmt.format('long double')}, + ntypes.half: { + 'itype': ntypes.int16, + 'fmt': '%12.5e', + 'title': _title_fmt.format('half')}} @set_module('numpy') @@ -412,17 +86,20 @@ class finfo: The largest representable number. maxexp : int The smallest positive power of the base (2) that causes overflow. + Corresponds to the C standard MAX_EXP. min : floating point number of the appropriate type The smallest representable number, typically ``-max``. minexp : int The most negative power of the base (2) consistent with there - being no leading 0's in the mantissa. + being no leading 0's in the mantissa. Corresponds to the C + standard MIN_EXP - 1. negep : int The exponent that yields `epsneg`. nexp : int The number of bits in the exponent including its sign and bias. nmant : int - The number of bits in the mantissa. + The number of explicit bits in the mantissa (excluding the implicit + leading bit for normalized numbers). precision : int The approximate number of decimal digits to which this kind of float is precise. @@ -463,6 +140,12 @@ class finfo: fill the gap between 0 and ``smallest_normal``. However, subnormal numbers may have significantly reduced precision [2]_. + For ``longdouble``, the representation varies across platforms. On most + platforms it is IEEE 754 binary128 (quad precision) or binary64-extended + (80-bit extended precision). On PowerPC systems, it may use the IBM + double-double format (a pair of float64 values), which has special + characteristics for precision and range. + This function can also be used for complex data types as well. If used, the output will be the same as the corresponding real float type (e.g. numpy.finfo(numpy.csingle) is the same as numpy.finfo(numpy.single)). @@ -477,6 +160,7 @@ class finfo: Examples -------- + >>> import numpy as np >>> np.finfo(np.float64).dtype dtype('float64') >>> np.finfo(np.complex64).dtype @@ -484,9 +168,14 @@ class finfo: """ - _finfo_cache = {} + _finfo_cache = {} # noqa: RUF012 + + __class_getitem__ = classmethod(types.GenericAlias) def __new__(cls, dtype): + if dtype is None: + raise TypeError("dtype must not be None") + try: obj = cls._finfo_cache.get(dtype) # most common path if obj is not None: @@ -494,15 +183,6 @@ def __new__(cls, dtype): except TypeError: pass - if dtype is None: - # Deprecated in NumPy 1.25, 2023-01-16 - warnings.warn( - "finfo() dtype cannot be None. This behavior will " - "raise an error in the future. (Deprecated in NumPy 1.25)", - DeprecationWarning, - stacklevel=2 - ) - try: dtype = numeric.dtype(dtype) except TypeError: @@ -518,7 +198,7 @@ def __new__(cls, dtype): dtypes.append(newdtype) dtype = newdtype if not issubclass(dtype, numeric.inexact): - raise ValueError("data type %r not inexact" % (dtype)) + raise ValueError(f"data type {dtype!r} not inexact") obj = cls._finfo_cache.get(dtype) if obj is not None: return obj @@ -544,77 +224,107 @@ def __new__(cls, dtype): def _init(self, dtype): self.dtype = numeric.dtype(dtype) - machar = _get_machar(dtype) - - for word in ['precision', 'iexp', - 'maxexp', 'minexp', 'negep', - 'machep']: - setattr(self, word, getattr(machar, word)) - for word in ['resolution', 'epsneg', 'smallest_subnormal']: - setattr(self, word, getattr(machar, word).flat[0]) self.bits = self.dtype.itemsize * 8 - self.max = machar.huge.flat[0] - self.min = -self.max - self.eps = machar.eps.flat[0] - self.nexp = machar.iexp - self.nmant = machar.it - self._machar = machar - self._str_tiny = machar._str_xmin.strip() - self._str_max = machar._str_xmax.strip() - self._str_epsneg = machar._str_epsneg.strip() - self._str_eps = machar._str_eps.strip() - self._str_resolution = machar._str_resolution.strip() - self._str_smallest_normal = machar._str_smallest_normal.strip() - self._str_smallest_subnormal = machar._str_smallest_subnormal.strip() + self._fmt = None + self._repr = None + _populate_finfo_constants(self, self.dtype) return self + @cached_property + def epsneg(self): + # Assume typical floating point logic. Could also use nextafter. + return self.eps / self._radix + + @cached_property + def resolution(self): + return self.dtype.type(10)**-self.precision + + @cached_property + def machep(self): + return int(math.log2(self.eps)) + + @cached_property + def negep(self): + return int(math.log2(self.epsneg)) + + @cached_property + def nexp(self): + # considering all ones (inf/nan) and all zeros (subnormal/zero) + return math.ceil(math.log2(self.maxexp - self.minexp + 2)) + + @cached_property + def iexp(self): + # Calculate exponent bits from it's range: + return math.ceil(math.log2(self.maxexp - self.minexp)) + def __str__(self): + if (fmt := getattr(self, "_fmt", None)) is not None: + return fmt + + def get_str(name, pad=None): + if (val := getattr(self, name, None)) is None: + return "" + if pad is not None: + s = str(val).ljust(pad) + return str(val) + + precision = get_str("precision", 3) + machep = get_str("machep", 6) + negep = get_str("negep", 6) + minexp = get_str("minexp", 6) + maxexp = get_str("maxexp", 6) + resolution = get_str("resolution") + eps = get_str("eps") + epsneg = get_str("epsneg") + tiny = get_str("tiny") + smallest_normal = get_str("smallest_normal") + smallest_subnormal = get_str("smallest_subnormal") + nexp = get_str("nexp", 6) + max_ = get_str("max") + if hasattr(self, "min") and hasattr(self, "max") and -self.min == self.max: + min_ = "-max" + else: + min_ = get_str("min") + fmt = ( - 'Machine parameters for %(dtype)s\n' - '---------------------------------------------------------------\n' - 'precision = %(precision)3s resolution = %(_str_resolution)s\n' - 'machep = %(machep)6s eps = %(_str_eps)s\n' - 'negep = %(negep)6s epsneg = %(_str_epsneg)s\n' - 'minexp = %(minexp)6s tiny = %(_str_tiny)s\n' - 'maxexp = %(maxexp)6s max = %(_str_max)s\n' - 'nexp = %(nexp)6s min = -max\n' - 'smallest_normal = %(_str_smallest_normal)s ' - 'smallest_subnormal = %(_str_smallest_subnormal)s\n' - '---------------------------------------------------------------\n' - ) - return fmt % self.__dict__ + f'Machine parameters for {self.dtype}\n' + f'---------------------------------------------------------------\n' + f'precision = {precision} resolution = {resolution}\n' + f'machep = {machep} eps = {eps}\n' + f'negep = {negep} epsneg = {epsneg}\n' + f'minexp = {minexp} tiny = {tiny}\n' + f'maxexp = {maxexp} max = {max_}\n' + f'nexp = {nexp} min = {min_}\n' + f'smallest_normal = {smallest_normal} ' + f'smallest_subnormal = {smallest_subnormal}\n' + f'---------------------------------------------------------------\n' + ) + self._fmt = fmt + return fmt def __repr__(self): + if (repr_str := getattr(self, "_repr", None)) is not None: + return repr_str + c = self.__class__.__name__ - d = self.__dict__.copy() - d['klass'] = c - return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s," - " max=%(_str_max)s, dtype=%(dtype)s)") % d) - @property - def smallest_normal(self): - """Return the value for the smallest normal. + # Use precision+1 digits in exponential notation + fmt_str = _MACHAR_PARAMS.get(self.dtype.type, {}).get('fmt', '%s') + if fmt_str != '%s' and hasattr(self, 'max') and hasattr(self, 'min'): + max_str = (fmt_str % self.max).strip() + min_str = (fmt_str % self.min).strip() + else: + max_str = str(self.max) + min_str = str(self.min) - Returns - ------- - smallest_normal : float - Value for the smallest normal. + resolution_str = str(self.resolution) - Warns - ----- - UserWarning - If the calculated value for the smallest normal is requested for - double-double. - """ - # This check is necessary because the value for smallest_normal is - # platform dependent for longdouble types. - if isnan(self._machar.smallest_normal.flat[0]): - warnings.warn( - 'The value of smallest normal is undefined for double double', - UserWarning, stacklevel=2) - return self._machar.smallest_normal.flat[0] + repr_str = (f"{c}(resolution={resolution_str}, min={min_str}," + f" max={max_str}, dtype={self.dtype})") + self._repr = repr_str + return repr_str - @property + @cached_property def tiny(self): """Return the value for tiny, alias of smallest_normal. @@ -663,6 +373,7 @@ class iinfo: -------- With types: + >>> import numpy as np >>> ii16 = np.iinfo(np.int16) >>> ii16.min -32768 @@ -684,8 +395,10 @@ class iinfo: """ - _min_vals = {} - _max_vals = {} + _min_vals = {} # noqa: RUF012 + _max_vals = {} # noqa: RUF012 + + __class_getitem__ = classmethod(types.GenericAlias) def __init__(self, int_type): try: @@ -696,7 +409,7 @@ def __init__(self, int_type): self.bits = self.dtype.itemsize * 8 self.key = "%s%d" % (self.kind, self.bits) if self.kind not in 'iu': - raise ValueError("Invalid integer data type %r." % (self.kind,)) + raise ValueError(f"Invalid integer data type {self.kind!r}.") @property def min(self): @@ -707,7 +420,7 @@ def min(self): try: val = iinfo._min_vals[self.key] except KeyError: - val = int(-(1 << (self.bits-1))) + val = int(-(1 << (self.bits - 1))) iinfo._min_vals[self.key] = val return val @@ -720,7 +433,7 @@ def max(self): if self.kind == 'u': val = int((1 << self.bits) - 1) else: - val = int((1 << (self.bits-1)) - 1) + val = int((1 << (self.bits - 1)) - 1) iinfo._max_vals[self.key] = val return val diff --git a/numpy/_core/getlimits.pyi b/numpy/_core/getlimits.pyi index da5e3c23ea72..a22149ceb5c6 100644 --- a/numpy/_core/getlimits.pyi +++ b/numpy/_core/getlimits.pyi @@ -1,6 +1,124 @@ -from numpy import ( - finfo as finfo, - iinfo as iinfo, +from functools import cached_property +from types import GenericAlias +from typing import Final, Generic, Self, overload +from typing_extensions import TypeVar + +import numpy as np +from numpy._typing import ( + _CLongDoubleCodes, + _Complex64Codes, + _Complex128Codes, + _DTypeLike, + _Float16Codes, + _Float32Codes, + _Float64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntPCodes, + _LongDoubleCodes, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, ) -__all__: list[str] +__all__ = ["finfo", "iinfo"] + +### + +_IntegerT_co = TypeVar("_IntegerT_co", bound=np.integer, default=np.integer, covariant=True) +_FloatingT_co = TypeVar("_FloatingT_co", bound=np.floating, default=np.floating, covariant=True) + +### + +class iinfo(Generic[_IntegerT_co]): + dtype: np.dtype[_IntegerT_co] + bits: Final[int] + kind: Final[str] + key: Final[str] + + @property + def min(self, /) -> int: ... + @property + def max(self, /) -> int: ... + + # + @overload + def __init__(self, /, int_type: _IntegerT_co | _DTypeLike[_IntegerT_co]) -> None: ... + @overload + def __init__(self: iinfo[np.int_], /, int_type: _IntPCodes | type[int] | int) -> None: ... + @overload + def __init__(self: iinfo[np.int8], /, int_type: _Int8Codes) -> None: ... + @overload + def __init__(self: iinfo[np.uint8], /, int_type: _UInt8Codes) -> None: ... + @overload + def __init__(self: iinfo[np.int16], /, int_type: _Int16Codes) -> None: ... + @overload + def __init__(self: iinfo[np.uint16], /, int_type: _UInt16Codes) -> None: ... + @overload + def __init__(self: iinfo[np.int32], /, int_type: _Int32Codes) -> None: ... + @overload + def __init__(self: iinfo[np.uint32], /, int_type: _UInt32Codes) -> None: ... + @overload + def __init__(self: iinfo[np.int64], /, int_type: _Int64Codes) -> None: ... + @overload + def __init__(self: iinfo[np.uint64], /, int_type: _UInt64Codes) -> None: ... + @overload + def __init__(self, /, int_type: str) -> None: ... + + # + @classmethod + def __class_getitem__(cls, item: object, /) -> GenericAlias: ... + +class finfo(Generic[_FloatingT_co]): + dtype: np.dtype[_FloatingT_co] # readonly + eps: _FloatingT_co # readonly + _radix: _FloatingT_co # readonly + smallest_normal: _FloatingT_co # readonly + smallest_subnormal: _FloatingT_co # readonly + max: _FloatingT_co # readonly + min: _FloatingT_co # readonly + + _fmt: str | None # `__str__` cache + _repr: str | None # `__repr__` cache + + bits: Final[int] + maxexp: Final[int] + minexp: Final[int] + nmant: Final[int] + precision: Final[int] + + @classmethod + def __class_getitem__(cls, item: object, /) -> GenericAlias: ... + + # + @overload + def __new__(cls, dtype: _FloatingT_co | _DTypeLike[_FloatingT_co]) -> Self: ... + @overload + def __new__(cls, dtype: _Float16Codes) -> finfo[np.float16]: ... + @overload + def __new__(cls, dtype: _Float32Codes | _Complex64Codes | _DTypeLike[np.complex64]) -> finfo[np.float32]: ... + @overload + def __new__(cls, dtype: _Float64Codes | _Complex128Codes | type[complex] | complex) -> finfo[np.float64]: ... + @overload + def __new__(cls, dtype: _LongDoubleCodes | _CLongDoubleCodes | _DTypeLike[np.clongdouble]) -> finfo[np.longdouble]: ... + @overload + def __new__(cls, dtype: str) -> finfo: ... + + # + @cached_property + def epsneg(self, /) -> _FloatingT_co: ... + @cached_property + def resolution(self, /) -> _FloatingT_co: ... + @cached_property + def machep(self, /) -> int: ... + @cached_property + def negep(self, /) -> int: ... + @cached_property + def nexp(self, /) -> int: ... + @cached_property + def iexp(self, /) -> int: ... + @cached_property + def tiny(self, /) -> _FloatingT_co: ... diff --git a/numpy/_core/include/meson.build b/numpy/_core/include/meson.build index fa0e6e83f794..89176c32cc8f 100644 --- a/numpy/_core/include/meson.build +++ b/numpy/_core/include/meson.build @@ -7,7 +7,6 @@ installed_headers = [ 'numpy/halffloat.h', 'numpy/ndarrayobject.h', 'numpy/ndarraytypes.h', - 'numpy/npy_1_7_deprecated_api.h', 'numpy/npy_2_compat.h', 'numpy/npy_2_complexcompat.h', 'numpy/npy_3kcompat.h', diff --git a/numpy/_core/include/numpy/arrayscalars.h b/numpy/_core/include/numpy/arrayscalars.h index ff048061f70a..46bc58cc2a35 100644 --- a/numpy/_core/include/numpy/arrayscalars.h +++ b/numpy/_core/include/numpy/arrayscalars.h @@ -173,9 +173,11 @@ typedef struct { #define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1]))) #define PyArrayScalar_FromLong(i) \ ((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)]))) -#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \ - return Py_INCREF(PyArrayScalar_FromLong(i)), \ - PyArrayScalar_FromLong(i) +#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) do { \ + PyObject *obj = PyArrayScalar_FromLong(i); \ + Py_INCREF(obj); \ + return obj; \ +} while (0) #define PyArrayScalar_RETURN_FALSE \ return Py_INCREF(PyArrayScalar_False), \ PyArrayScalar_False diff --git a/numpy/_core/include/numpy/dtype_api.h b/numpy/_core/include/numpy/dtype_api.h index c35577fbbcad..5ac964782ec0 100644 --- a/numpy/_core/include/numpy/dtype_api.h +++ b/numpy/_core/include/numpy/dtype_api.h @@ -99,6 +99,11 @@ typedef enum { } NPY_ARRAYMETHOD_FLAGS; +typedef enum { + /* Casting via same_value logic */ + NPY_SAME_VALUE_CONTEXT_FLAG=1, +} NPY_ARRAYMETHOD_CONTEXT_FLAGS; + typedef struct PyArrayMethod_Context_tag { /* The caller, which is typically the original ufunc. May be NULL */ PyObject *caller; @@ -107,7 +112,22 @@ typedef struct PyArrayMethod_Context_tag { /* Operand descriptors, filled in by resolve_descriptors */ PyArray_Descr *const *descriptors; + #if NPY_FEATURE_VERSION > NPY_2_3_API_VERSION + void * _reserved; + /* + * Optional flag to pass information into the inner loop + * NPY_ARRAYMETHOD_CONTEXT_FLAGS + */ + uint64_t flags; + + /* + * Optional run-time parameters to pass to the loop (currently used in sorting). + * Fixed parameters are expected to be passed via auxdata. + */ + void *parameters; + /* Structure may grow (this is harmless for DType authors) */ + #endif } PyArrayMethod_Context; @@ -125,6 +145,13 @@ typedef struct { } PyArrayMethod_Spec; +// This is used for the convenience function `PyUFunc_AddLoopsFromSpecs` +typedef struct { + const char *name; + PyArrayMethod_Spec *spec; +} PyUFunc_LoopSlot; + + /* * ArrayMethod slots * ----------------- @@ -144,7 +171,6 @@ typedef struct { #define NPY_METH_contiguous_indexed_loop 9 #define _NPY_METH_static_data 10 - /* * The resolve descriptors function, must be able to handle NULL values for * all output (but not input) `given_descrs` and fill `loop_descrs`. @@ -268,7 +294,8 @@ typedef int (PyArrayMethod_TranslateGivenDescriptors)(int nin, int nout, * * The function must clean up on error. * - * @param nargs Number of arguments + * @param nin Number of input arguments + * @param nout Number of output arguments * @param new_dtypes The DTypes of the output (usually probably not needed) * @param given_descrs Original given_descrs to the resolver, necessary to * fetch any information related to the new dtypes from the original. @@ -366,6 +393,7 @@ typedef int (PyArrayMethod_PromoterFunction)(PyObject *ufunc, #define NPY_DT_get_clear_loop 9 #define NPY_DT_get_fill_zero_loop 10 #define NPY_DT_finalize_descr 11 +#define NPY_DT_get_constant 12 // These PyArray_ArrFunc slots will be deprecated and replaced eventually // getitem and setitem can be defined as a performance optimization; @@ -376,7 +404,7 @@ typedef int (PyArrayMethod_PromoterFunction)(PyObject *ufunc, // used to separate dtype slots from arrfuncs slots // intended only for internal use but defined here for clarity -#define _NPY_DT_ARRFUNCS_OFFSET (1 << 10) +#define _NPY_DT_ARRFUNCS_OFFSET (1 << 11) // Cast is disabled // #define NPY_DT_PyArray_ArrFuncs_cast 0 + _NPY_DT_ARRFUNCS_OFFSET @@ -449,7 +477,7 @@ typedef PyArray_DTypeMeta *(PyArrayDTypeMeta_CommonDType)( static inline PyArray_DTypeMeta * NPY_DT_NewRef(PyArray_DTypeMeta *o) { - Py_INCREF(o); + Py_INCREF((PyObject *)o); return o; } @@ -466,6 +494,42 @@ typedef PyArray_Descr *(PyArrayDTypeMeta_EnsureCanonical)(PyArray_Descr *dtype); */ typedef PyArray_Descr *(PyArrayDTypeMeta_FinalizeDescriptor)(PyArray_Descr *dtype); +/* + * Constants that can be queried and used e.g. by reduce identies defaults. + * These are also used to expose .finfo and .iinfo for example. + */ +/* Numerical constants */ +#define NPY_CONSTANT_zero 1 +#define NPY_CONSTANT_one 2 +#define NPY_CONSTANT_all_bits_set 3 +#define NPY_CONSTANT_maximum_finite 4 +#define NPY_CONSTANT_minimum_finite 5 +#define NPY_CONSTANT_inf 6 +#define NPY_CONSTANT_ninf 7 +#define NPY_CONSTANT_nan 8 +#define NPY_CONSTANT_finfo_radix 9 +#define NPY_CONSTANT_finfo_eps 10 +#define NPY_CONSTANT_finfo_smallest_normal 11 +#define NPY_CONSTANT_finfo_smallest_subnormal 12 +/* Constants that are always of integer type, value is `npy_intp/Py_ssize_t` */ +#define NPY_CONSTANT_finfo_nmant (1 << 16) + 0 +#define NPY_CONSTANT_finfo_min_exp (1 << 16) + 1 +#define NPY_CONSTANT_finfo_max_exp (1 << 16) + 2 +#define NPY_CONSTANT_finfo_decimal_digits (1 << 16) + 3 + +/* It may make sense to continue with other constants here, e.g. pi, etc? */ + +/* + * Function to get a constant value for the dtype. Data may be unaligned, the + * function is always called with the GIL held. + * + * @param descr The dtype instance (i.e. self) + * @param ID The ID of the constant to get. + * @param data Pointer to the data to be written too, may be unaligned. + * @returns 1 on success, 0 if the constant is not available, or -1 with an error set. + */ +typedef int (PyArrayDTypeMeta_GetConstant)(PyArray_Descr *descr, int ID, void *data); + /* * TODO: These two functions are currently only used for experimental DType * API support. Their relation should be "reversed": NumPy should @@ -476,4 +540,8 @@ typedef PyArray_Descr *(PyArrayDTypeMeta_FinalizeDescriptor)(PyArray_Descr *dtyp typedef int(PyArrayDTypeMeta_SetItem)(PyArray_Descr *, PyObject *, char *); typedef PyObject *(PyArrayDTypeMeta_GetItem)(PyArray_Descr *, char *); +typedef struct { + NPY_SORTKIND flags; +} PyArrayMethod_SortParameters; + #endif /* NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_ */ diff --git a/numpy/_core/include/numpy/ndarrayobject.h b/numpy/_core/include/numpy/ndarrayobject.h index f06bafe5b52a..82a1589ff075 100644 --- a/numpy/_core/include/numpy/ndarrayobject.h +++ b/numpy/_core/include/numpy/ndarrayobject.h @@ -32,7 +32,7 @@ extern "C" { #define PyArray_DescrCheck(op) PyObject_TypeCheck(op, &PyArrayDescr_Type) #define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type) -#define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type) +#define PyArray_CheckExact(op) (Py_TYPE((PyObject*)(op)) == &PyArray_Type) #define PyArray_HasArrayInterfaceType(op, type, context, out) \ ((((out)=PyArray_FromStructInterface(op)) != Py_NotImplemented) || \ @@ -220,15 +220,6 @@ NPY_TITLE_KEY_check(PyObject *key, PyObject *value) if (key == title) { return 1; } -#ifdef PYPY_VERSION - /* - * On PyPy, dictionary keys do not always preserve object identity. - * Fall back to comparison by value. - */ - if (PyUnicode_Check(title) && PyUnicode_Check(key)) { - return PyUnicode_Compare(title, key) == 0 ? 1 : 0; - } -#endif return 0; } diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index 95821b0baff2..6afdcf821a6b 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -6,6 +6,10 @@ #include "npy_cpu.h" #include "utils.h" +#ifdef __cplusplus +extern "C" { +#endif + #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN /* Always allow threading unless it was explicitly disabled at build time */ @@ -123,7 +127,6 @@ enum NPY_TYPECHAR { NPY_CLONGDOUBLELTR = 'G', NPY_OBJECTLTR = 'O', NPY_STRINGLTR = 'S', - NPY_DEPRECATED_STRINGLTR2 = 'a', NPY_UNICODELTR = 'U', NPY_VOIDLTR = 'V', NPY_DATETIMELTR = 'M', @@ -158,18 +161,37 @@ enum NPY_TYPECHAR { }; /* - * Changing this may break Numpy API compatibility - * due to changing offsets in PyArray_ArrFuncs, so be - * careful. Here we have reused the mergesort slot for - * any kind of stable sort, the actual implementation will - * depend on the data type. + * Changing this may break Numpy API compatibility due to changing offsets in + * PyArray_ArrFuncs, so be careful. Here we have reused the mergesort slot for + * any kind of stable sort, the actual implementation will depend on the data + * type. + * + * Updated in NumPy 2.4 + * + * Updated with new names denoting requirements rather than specifying a + * particular algorithm. All the previous values are reused in a way that + * should be downstream compatible, but the actual algorithms used may be + * different than before. The new approach should be more flexible and easier + * to update. + * + * Names with a leading underscore are private, and should only be used + * internally by NumPy. + * + * NPY_NSORTS remains the same for backwards compatibility, it should not be + * changed. */ + typedef enum { - _NPY_SORT_UNDEFINED=-1, - NPY_QUICKSORT=0, - NPY_HEAPSORT=1, - NPY_MERGESORT=2, - NPY_STABLESORT=2, + _NPY_SORT_UNDEFINED = -1, + NPY_QUICKSORT = 0, + NPY_HEAPSORT = 1, + NPY_MERGESORT = 2, + NPY_STABLESORT = 2, + // new style names + _NPY_SORT_HEAPSORT = 1, + NPY_SORT_DEFAULT = 0, + NPY_SORT_STABLE = 2, + NPY_SORT_DESCENDING = 4, } NPY_SORTKIND; #define NPY_NSORTS (NPY_STABLESORT + 1) @@ -210,6 +232,16 @@ typedef enum { NPY_KEEPORDER=2 } NPY_ORDER; +#if NPY_FEATURE_VERSION >= NPY_2_4_API_VERSION +/* + * check that no values overflow/change during casting + * Used explicitly only in the ArrayMethod creation or resolve_dtypes functions to + * indicate that a same-value cast is supported. In external APIs, use only + * NPY_SAME_VALUE_CASTING + */ +#define NPY_SAME_VALUE_CASTING_FLAG 64 +#endif + /* For specifying allowed casting in operations which support it */ typedef enum { _NPY_ERROR_OCCURRED_IN_CAST = -1, @@ -223,6 +255,9 @@ typedef enum { NPY_SAME_KIND_CASTING=3, /* Allow any casts */ NPY_UNSAFE_CASTING=4, +#if NPY_FEATURE_VERSION >= NPY_2_4_API_VERSION + NPY_SAME_VALUE_CASTING=NPY_UNSAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG, +#endif } NPY_CASTING; typedef enum { @@ -841,7 +876,7 @@ typedef struct { npy_int32 month, day, hour, min, sec, us, ps, as; } npy_datetimestruct; -/* This is not used internally. */ +/* This structure contains an exploded view of a timedelta value */ typedef struct { npy_int64 day; npy_int32 sec, us, ps, as; @@ -1302,7 +1337,7 @@ typedef struct { PyArrayIterObject *iters[64]; #elif defined(__cplusplus) /* - * C++ doesn't stricly support flexible members and gives compilers + * C++ doesn't strictly support flexible members and gives compilers * warnings (pedantic only), so we lie. We can't make it 64 because * then Cython is unhappy (larger struct at runtime is OK smaller not). */ @@ -1675,7 +1710,7 @@ PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) /* * PyDataType_* FLAGS, FLACHK, REFCHK, HASFIELDS, HASSUBARRAY, UNSIZED, * SUBARRAY, NAMES, FIELDS, C_METADATA, and METADATA require version specific - * lookup and are defined in npy_2_compat.h. + * lookup and are defined in npy_2_compat.h. */ @@ -1904,10 +1939,6 @@ typedef struct { #error "Do not use the reserved keyword NPY_DEPRECATED_INCLUDES." #endif #define NPY_DEPRECATED_INCLUDES -#if !defined(NPY_NO_DEPRECATED_API) || \ - (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) -#include "npy_1_7_deprecated_api.h" -#endif /* * There is no file npy_1_8_deprecated_api.h since there are no additional * deprecated API features in NumPy 1.8. @@ -1919,7 +1950,32 @@ typedef struct { * (NPY_NO_DEPRECATED_API < NPY_1_9_API_VERSION) * #include "npy_1_9_deprecated_api.h" * #endif + * Then in the npy_1_9_deprecated_api.h header add something like this + * -------------------- + * #ifndef NPY_DEPRECATED_INCLUDES + * #error "Should never include npy_*_*_deprecated_api directly." + * #endif + * #ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ + * #define NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ + * + * #ifndef NPY_NO_DEPRECATED_API + * #if defined(_WIN32) + * #define _WARN___STR2__(x) #x + * #define _WARN___STR1__(x) _WARN___STR2__(x) + * #define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: " + * #pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it with " \ + * "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION") + * #else + * #warning "Using deprecated NumPy API, disable it with " \ + * "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION" + * #endif + * #endif + * -------------------- */ #undef NPY_DEPRECATED_INCLUDES +#ifdef __cplusplus +} +#endif + #endif /* NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ */ diff --git a/numpy/_core/include/numpy/npy_1_7_deprecated_api.h b/numpy/_core/include/numpy/npy_1_7_deprecated_api.h deleted file mode 100644 index be53cded488d..000000000000 --- a/numpy/_core/include/numpy/npy_1_7_deprecated_api.h +++ /dev/null @@ -1,112 +0,0 @@ -#ifndef NPY_DEPRECATED_INCLUDES -#error "Should never include npy_*_*_deprecated_api directly." -#endif - -#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ -#define NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ - -/* Emit a warning if the user did not specifically request the old API */ -#ifndef NPY_NO_DEPRECATED_API -#if defined(_WIN32) -#define _WARN___STR2__(x) #x -#define _WARN___STR1__(x) _WARN___STR2__(x) -#define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: " -#pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it with " \ - "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION") -#else -#warning "Using deprecated NumPy API, disable it with " \ - "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION" -#endif -#endif - -/* - * This header exists to collect all dangerous/deprecated NumPy API - * as of NumPy 1.7. - * - * This is an attempt to remove bad API, the proliferation of macros, - * and namespace pollution currently produced by the NumPy headers. - */ - -/* These array flags are deprecated as of NumPy 1.7 */ -#define NPY_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS -#define NPY_FORTRAN NPY_ARRAY_F_CONTIGUOUS - -/* - * The consistent NPY_ARRAY_* names which don't pollute the NPY_* - * namespace were added in NumPy 1.7. - * - * These versions of the carray flags are deprecated, but - * probably should only be removed after two releases instead of one. - */ -#define NPY_C_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS -#define NPY_F_CONTIGUOUS NPY_ARRAY_F_CONTIGUOUS -#define NPY_OWNDATA NPY_ARRAY_OWNDATA -#define NPY_FORCECAST NPY_ARRAY_FORCECAST -#define NPY_ENSURECOPY NPY_ARRAY_ENSURECOPY -#define NPY_ENSUREARRAY NPY_ARRAY_ENSUREARRAY -#define NPY_ELEMENTSTRIDES NPY_ARRAY_ELEMENTSTRIDES -#define NPY_ALIGNED NPY_ARRAY_ALIGNED -#define NPY_NOTSWAPPED NPY_ARRAY_NOTSWAPPED -#define NPY_WRITEABLE NPY_ARRAY_WRITEABLE -#define NPY_BEHAVED NPY_ARRAY_BEHAVED -#define NPY_BEHAVED_NS NPY_ARRAY_BEHAVED_NS -#define NPY_CARRAY NPY_ARRAY_CARRAY -#define NPY_CARRAY_RO NPY_ARRAY_CARRAY_RO -#define NPY_FARRAY NPY_ARRAY_FARRAY -#define NPY_FARRAY_RO NPY_ARRAY_FARRAY_RO -#define NPY_DEFAULT NPY_ARRAY_DEFAULT -#define NPY_IN_ARRAY NPY_ARRAY_IN_ARRAY -#define NPY_OUT_ARRAY NPY_ARRAY_OUT_ARRAY -#define NPY_INOUT_ARRAY NPY_ARRAY_INOUT_ARRAY -#define NPY_IN_FARRAY NPY_ARRAY_IN_FARRAY -#define NPY_OUT_FARRAY NPY_ARRAY_OUT_FARRAY -#define NPY_INOUT_FARRAY NPY_ARRAY_INOUT_FARRAY -#define NPY_UPDATE_ALL NPY_ARRAY_UPDATE_ALL - -/* This way of accessing the default type is deprecated as of NumPy 1.7 */ -#define PyArray_DEFAULT NPY_DEFAULT_TYPE - -/* - * Deprecated as of NumPy 1.7, this kind of shortcut doesn't - * belong in the public API. - */ -#define NPY_AO PyArrayObject - -/* - * Deprecated as of NumPy 1.7, an all-lowercase macro doesn't - * belong in the public API. - */ -#define fortran fortran_ - -/* - * Deprecated as of NumPy 1.7, as it is a namespace-polluting - * macro. - */ -#define FORTRAN_IF PyArray_FORTRAN_IF - -/* Deprecated as of NumPy 1.7, datetime64 uses c_metadata instead */ -#define NPY_METADATA_DTSTR "__timeunit__" - -/* - * Deprecated as of NumPy 1.7. - * The reasoning: - * - These are for datetime, but there's no datetime "namespace". - * - They just turn NPY_STR_ into "", which is just - * making something simple be indirected. - */ -#define NPY_STR_Y "Y" -#define NPY_STR_M "M" -#define NPY_STR_W "W" -#define NPY_STR_D "D" -#define NPY_STR_h "h" -#define NPY_STR_m "m" -#define NPY_STR_s "s" -#define NPY_STR_ms "ms" -#define NPY_STR_us "us" -#define NPY_STR_ns "ns" -#define NPY_STR_ps "ps" -#define NPY_STR_fs "fs" -#define NPY_STR_as "as" - - -#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ */ diff --git a/numpy/_core/include/numpy/npy_2_compat.h b/numpy/_core/include/numpy/npy_2_compat.h index 50e637f79223..e39e65aedea7 100644 --- a/numpy/_core/include/numpy/npy_2_compat.h +++ b/numpy/_core/include/numpy/npy_2_compat.h @@ -53,7 +53,7 @@ #if NPY_ABI_VERSION < 0x02000000 /* * Define 2.0 feature version as it is needed below to decide whether we - * compile for both 1.x and 2.x (defining it gaurantees 1.x only). + * compile for both 1.x and 2.x (defining it guarantees 1.x only). */ #define NPY_2_0_API_VERSION 0x00000012 /* @@ -74,7 +74,7 @@ #ifdef import_array1 static inline int -PyArray_ImportNumPyAPI() +PyArray_ImportNumPyAPI(void) { if (NPY_UNLIKELY(PyArray_API == NULL)) { import_array1(-1); @@ -125,7 +125,7 @@ PyArray_ImportNumPyAPI() #define NPY_DEFAULT_INT \ (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_INTP : NPY_LONG) #define NPY_RAVEL_AXIS \ - (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? -1 : 32) + (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_MIN_INT : 32) #define NPY_MAXARGS \ (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? 64 : 32) #endif diff --git a/numpy/_core/include/numpy/npy_3kcompat.h b/numpy/_core/include/numpy/npy_3kcompat.h index 62fde943aacc..cd91f66268c7 100644 --- a/numpy/_core/include/numpy/npy_3kcompat.h +++ b/numpy/_core/include/numpy/npy_3kcompat.h @@ -5,8 +5,7 @@ * hence the "3k" naming. * * If you want to use this for your own projects, it's recommended to make a - * copy of it. Although the stuff below is unlikely to change, we don't provide - * strong backwards compatibility guarantees at the moment. + * copy of it. We don't provide backwards compatibility guarantees. */ #ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_ @@ -15,27 +14,13 @@ #include #include -#ifndef NPY_PY3K -#define NPY_PY3K 1 -#endif - -#include "numpy/npy_common.h" -#include "numpy/ndarrayobject.h" +#include "npy_common.h" #ifdef __cplusplus extern "C" { #endif -/* - * PyInt -> PyLong - */ - - -/* - * This is a renamed copy of the Python non-limited API function _PyLong_AsInt. It is - * included here because it is missing from the PyPy API. It completes the PyLong_As* - * group of functions and can be useful in replacing PyInt_Check. - */ +/* Python13 removes _PyLong_AsInt */ static inline int Npy__PyLong_AsInt(PyObject *obj) { @@ -53,128 +38,14 @@ Npy__PyLong_AsInt(PyObject *obj) return (int)result; } +#if defined _MSC_VER && _MSC_VER >= 1900 -#if defined(NPY_PY3K) -/* Return True only if the long fits in a C long */ -static inline int PyInt_Check(PyObject *op) { - int overflow = 0; - if (!PyLong_Check(op)) { - return 0; - } - PyLong_AsLongAndOverflow(op, &overflow); - return (overflow == 0); -} - - -#define PyInt_FromLong PyLong_FromLong -#define PyInt_AsLong PyLong_AsLong -#define PyInt_AS_LONG PyLong_AsLong -#define PyInt_AsSsize_t PyLong_AsSsize_t -#define PyNumber_Int PyNumber_Long - -/* NOTE: - * - * Since the PyLong type is very different from the fixed-range PyInt, - * we don't define PyInt_Type -> PyLong_Type. - */ -#endif /* NPY_PY3K */ - -/* Py3 changes PySlice_GetIndicesEx' first argument's type to PyObject* */ -#ifdef NPY_PY3K -# define NpySlice_GetIndicesEx PySlice_GetIndicesEx -#else -# define NpySlice_GetIndicesEx(op, nop, start, end, step, slicelength) \ - PySlice_GetIndicesEx((PySliceObject *)op, nop, start, end, step, slicelength) -#endif - -#if PY_VERSION_HEX < 0x030900a4 - /* Introduced in https://github.com/python/cpython/commit/d2ec81a8c99796b51fb8c49b77a7fe369863226f */ - #define Py_SET_TYPE(obj, type) ((Py_TYPE(obj) = (type)), (void)0) - /* Introduced in https://github.com/python/cpython/commit/b10dc3e7a11fcdb97e285882eba6da92594f90f9 */ - #define Py_SET_SIZE(obj, size) ((Py_SIZE(obj) = (size)), (void)0) - /* Introduced in https://github.com/python/cpython/commit/c86a11221df7e37da389f9c6ce6e47ea22dc44ff */ - #define Py_SET_REFCNT(obj, refcnt) ((Py_REFCNT(obj) = (refcnt)), (void)0) -#endif - - -#define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall(x) - -/* - * PyString -> PyBytes - */ - -#if defined(NPY_PY3K) - -#define PyString_Type PyBytes_Type -#define PyString_Check PyBytes_Check -#define PyStringObject PyBytesObject -#define PyString_FromString PyBytes_FromString -#define PyString_FromStringAndSize PyBytes_FromStringAndSize -#define PyString_AS_STRING PyBytes_AS_STRING -#define PyString_AsStringAndSize PyBytes_AsStringAndSize -#define PyString_FromFormat PyBytes_FromFormat -#define PyString_Concat PyBytes_Concat -#define PyString_ConcatAndDel PyBytes_ConcatAndDel -#define PyString_AsString PyBytes_AsString -#define PyString_GET_SIZE PyBytes_GET_SIZE -#define PyString_Size PyBytes_Size - -#define PyUString_Type PyUnicode_Type -#define PyUString_Check PyUnicode_Check -#define PyUStringObject PyUnicodeObject -#define PyUString_FromString PyUnicode_FromString -#define PyUString_FromStringAndSize PyUnicode_FromStringAndSize -#define PyUString_FromFormat PyUnicode_FromFormat -#define PyUString_Concat PyUnicode_Concat2 -#define PyUString_ConcatAndDel PyUnicode_ConcatAndDel -#define PyUString_GET_SIZE PyUnicode_GET_SIZE -#define PyUString_Size PyUnicode_Size -#define PyUString_InternFromString PyUnicode_InternFromString -#define PyUString_Format PyUnicode_Format - -#define PyBaseString_Check(obj) (PyUnicode_Check(obj)) - -#else - -#define PyBytes_Type PyString_Type -#define PyBytes_Check PyString_Check -#define PyBytesObject PyStringObject -#define PyBytes_FromString PyString_FromString -#define PyBytes_FromStringAndSize PyString_FromStringAndSize -#define PyBytes_AS_STRING PyString_AS_STRING -#define PyBytes_AsStringAndSize PyString_AsStringAndSize -#define PyBytes_FromFormat PyString_FromFormat -#define PyBytes_Concat PyString_Concat -#define PyBytes_ConcatAndDel PyString_ConcatAndDel -#define PyBytes_AsString PyString_AsString -#define PyBytes_GET_SIZE PyString_GET_SIZE -#define PyBytes_Size PyString_Size - -#define PyUString_Type PyString_Type -#define PyUString_Check PyString_Check -#define PyUStringObject PyStringObject -#define PyUString_FromString PyString_FromString -#define PyUString_FromStringAndSize PyString_FromStringAndSize -#define PyUString_FromFormat PyString_FromFormat -#define PyUString_Concat PyString_Concat -#define PyUString_ConcatAndDel PyString_ConcatAndDel -#define PyUString_GET_SIZE PyString_GET_SIZE -#define PyUString_Size PyString_Size -#define PyUString_InternFromString PyString_InternFromString -#define PyUString_Format PyString_Format - -#define PyBaseString_Check(obj) (PyBytes_Check(obj) || PyUnicode_Check(obj)) - -#endif /* NPY_PY3K */ +#include /* * Macros to protect CRT calls against instant termination when passed an * invalid parameter (https://bugs.python.org/issue23524). */ -#if defined _MSC_VER && _MSC_VER >= 1900 - -#include - extern _invalid_parameter_handler _Py_silent_invalid_parameter_handler; #define NPY_BEGIN_SUPPRESS_IPH { _invalid_parameter_handler _Py_old_handler = \ _set_thread_local_invalid_parameter_handler(_Py_silent_invalid_parameter_handler); @@ -187,20 +58,6 @@ extern _invalid_parameter_handler _Py_silent_invalid_parameter_handler; #endif /* _MSC_VER >= 1900 */ - -static inline void -PyUnicode_ConcatAndDel(PyObject **left, PyObject *right) -{ - Py_SETREF(*left, PyUnicode_Concat(*left, right)); - Py_DECREF(right); -} - -static inline void -PyUnicode_Concat2(PyObject **left, PyObject *right) -{ - Py_SETREF(*left, PyUnicode_Concat(*left, right)); -} - /* * PyFile_* compatibility */ @@ -217,13 +74,6 @@ npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos) npy_off_t pos; FILE *handle; - /* For Python 2 PyFileObject, use PyFile_AsFile */ -#if !defined(NPY_PY3K) - if (PyFile_Check(file)) { - return PyFile_AsFile(file); - } -#endif - /* Flush first to ensure things end up in the file in the correct order */ ret = PyObject_CallMethod(file, "flush", ""); if (ret == NULL) { @@ -335,13 +185,6 @@ npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos) PyObject *ret, *io, *io_raw; npy_off_t position; - /* For Python 2 PyFileObject, do nothing */ -#if !defined(NPY_PY3K) - if (PyFile_Check(file)) { - return 0; - } -#endif - position = npy_ftell(handle); /* Close the FILE* handle */ @@ -395,29 +238,11 @@ npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos) return 0; } -static inline int -npy_PyFile_Check(PyObject *file) -{ - int fd; - /* For Python 2, check if it is a PyFileObject */ -#if !defined(NPY_PY3K) - if (PyFile_Check(file)) { - return 1; - } -#endif - fd = PyObject_AsFileDescriptor(file); - if (fd == -1) { - PyErr_Clear(); - return 0; - } - return 1; -} - static inline PyObject* npy_PyFile_OpenFile(PyObject *filename, const char *mode) { PyObject *open; - open = PyDict_GetItemString(PyEval_GetBuiltins(), "open"); + open = PyDict_GetItemString(PyEval_GetBuiltins(), "open"); // noqa: borrowed-ref OK if (open == NULL) { return NULL; } @@ -437,8 +262,8 @@ npy_PyFile_CloseFile(PyObject *file) return 0; } - -/* This is a copy of _PyErr_ChainExceptions +/* This is a copy of _PyErr_ChainExceptions, which + * is no longer exported from Python3.12 */ static inline void npy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb) @@ -447,30 +272,25 @@ npy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb) return; if (PyErr_Occurred()) { - /* only py3 supports this anyway */ - #ifdef NPY_PY3K - PyObject *exc2, *val2, *tb2; - PyErr_Fetch(&exc2, &val2, &tb2); - PyErr_NormalizeException(&exc, &val, &tb); - if (tb != NULL) { - PyException_SetTraceback(val, tb); - Py_DECREF(tb); - } - Py_DECREF(exc); - PyErr_NormalizeException(&exc2, &val2, &tb2); - PyException_SetContext(val2, val); - PyErr_Restore(exc2, val2, tb2); - #endif + PyObject *exc2, *val2, *tb2; + PyErr_Fetch(&exc2, &val2, &tb2); + PyErr_NormalizeException(&exc, &val, &tb); + if (tb != NULL) { + PyException_SetTraceback(val, tb); + Py_DECREF(tb); + } + Py_DECREF(exc); + PyErr_NormalizeException(&exc2, &val2, &tb2); + PyException_SetContext(val2, val); + PyErr_Restore(exc2, val2, tb2); } else { PyErr_Restore(exc, val, tb); } } - /* This is a copy of _PyErr_ChainExceptions, with: - * - a minimal implementation for python 2 - * - __cause__ used instead of __context__ + * __cause__ used instead of __context__ */ static inline void npy_PyErr_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb) @@ -479,64 +299,23 @@ npy_PyErr_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb) return; if (PyErr_Occurred()) { - /* only py3 supports this anyway */ - #ifdef NPY_PY3K - PyObject *exc2, *val2, *tb2; - PyErr_Fetch(&exc2, &val2, &tb2); - PyErr_NormalizeException(&exc, &val, &tb); - if (tb != NULL) { - PyException_SetTraceback(val, tb); - Py_DECREF(tb); - } - Py_DECREF(exc); - PyErr_NormalizeException(&exc2, &val2, &tb2); - PyException_SetCause(val2, val); - PyErr_Restore(exc2, val2, tb2); - #endif + PyObject *exc2, *val2, *tb2; + PyErr_Fetch(&exc2, &val2, &tb2); + PyErr_NormalizeException(&exc, &val, &tb); + if (tb != NULL) { + PyException_SetTraceback(val, tb); + Py_DECREF(tb); + } + Py_DECREF(exc); + PyErr_NormalizeException(&exc2, &val2, &tb2); + PyException_SetCause(val2, val); + PyErr_Restore(exc2, val2, tb2); } else { PyErr_Restore(exc, val, tb); } } -/* - * PyObject_Cmp - */ -#if defined(NPY_PY3K) -static inline int -PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp) -{ - int v; - v = PyObject_RichCompareBool(i1, i2, Py_LT); - if (v == 1) { - *cmp = -1; - return 1; - } - else if (v == -1) { - return -1; - } - - v = PyObject_RichCompareBool(i1, i2, Py_GT); - if (v == 1) { - *cmp = 1; - return 1; - } - else if (v == -1) { - return -1; - } - - v = PyObject_RichCompareBool(i1, i2, Py_EQ); - if (v == 1) { - *cmp = 0; - return 1; - } - else { - *cmp = 0; - return -1; - } -} -#endif - /* * PyCObject functions adapted to PyCapsules. * diff --git a/numpy/_core/include/numpy/npy_common.h b/numpy/_core/include/numpy/npy_common.h index c6ef7a6ec669..5eaa29035428 100644 --- a/numpy/_core/include/numpy/npy_common.h +++ b/numpy/_core/include/numpy/npy_common.h @@ -98,11 +98,23 @@ #endif #ifdef _MSC_VER - #define NPY_FINLINE static __forceinline + #ifdef __cplusplus + #define NPY_FINLINE __forceinline + #else + #define NPY_FINLINE static __forceinline + #endif #elif defined(__GNUC__) - #define NPY_FINLINE static inline __attribute__((always_inline)) + #ifdef __cplusplus + #define NPY_FINLINE inline __attribute__((always_inline)) + #else + #define NPY_FINLINE static inline __attribute__((always_inline)) + #endif #else - #define NPY_FINLINE static + #ifdef __cplusplus + #define NPY_FINLINE inline + #else + #define NPY_FINLINE static NPY_INLINE + #endif #endif #if defined(_MSC_VER) @@ -379,6 +391,7 @@ typedef struct #include + #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) typedef _Dcomplex npy_cdouble; typedef _Fcomplex npy_cfloat; @@ -409,9 +422,6 @@ typedef longdouble_t _Complex npy_clongdouble; #define NPY_MAX_INT128 NPY_LONGLONG_SUFFIX(85070591730234615865843651857942052864) #define NPY_MIN_INT128 (-NPY_MAX_INT128 - NPY_LONGLONG_SUFFIX(1)) #define NPY_MAX_UINT128 NPY_ULONGLONG_SUFFIX(170141183460469231731687303715884105728) -#define NPY_MAX_INT256 NPY_LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967) -#define NPY_MIN_INT256 (-NPY_MAX_INT256 - NPY_LONGLONG_SUFFIX(1)) -#define NPY_MAX_UINT256 NPY_ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935) #define NPY_MIN_DATETIME NPY_MIN_INT64 #define NPY_MAX_DATETIME NPY_MAX_INT64 #define NPY_MIN_TIMEDELTA NPY_MIN_INT64 @@ -514,17 +524,6 @@ typedef longdouble_t _Complex npy_clongdouble; #define NPY_UINT64_FMT NPY_ULONG_FMT #define MyPyLong_FromInt64 PyLong_FromLong #define MyPyLong_AsInt64 PyLong_AsLong -#elif NPY_BITSOF_LONG == 128 -#define NPY_INT128 NPY_LONG -#define NPY_UINT128 NPY_ULONG - typedef long npy_int128; - typedef unsigned long npy_uint128; -#define PyInt128ScalarObject PyLongScalarObject -#define PyInt128ArrType_Type PyLongArrType_Type -#define PyUInt128ScalarObject PyULongScalarObject -#define PyUInt128ArrType_Type PyULongArrType_Type -#define NPY_INT128_FMT NPY_LONG_FMT -#define NPY_UINT128_FMT NPY_ULONG_FMT #endif #if NPY_BITSOF_LONGLONG == 8 @@ -594,36 +593,6 @@ typedef longdouble_t _Complex npy_clongdouble; # define NPY_MAX_LONGLONG NPY_MAX_INT64 # define NPY_MIN_LONGLONG NPY_MIN_INT64 # define NPY_MAX_ULONGLONG NPY_MAX_UINT64 -#elif NPY_BITSOF_LONGLONG == 128 -# ifndef NPY_INT128 -# define NPY_INT128 NPY_LONGLONG -# define NPY_UINT128 NPY_ULONGLONG - typedef npy_longlong npy_int128; - typedef npy_ulonglong npy_uint128; -# define PyInt128ScalarObject PyLongLongScalarObject -# define PyInt128ArrType_Type PyLongLongArrType_Type -# define PyUInt128ScalarObject PyULongLongScalarObject -# define PyUInt128ArrType_Type PyULongLongArrType_Type -#define NPY_INT128_FMT NPY_LONGLONG_FMT -#define NPY_UINT128_FMT NPY_ULONGLONG_FMT -# endif -# define NPY_MAX_LONGLONG NPY_MAX_INT128 -# define NPY_MIN_LONGLONG NPY_MIN_INT128 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT128 -#elif NPY_BITSOF_LONGLONG == 256 -# define NPY_INT256 NPY_LONGLONG -# define NPY_UINT256 NPY_ULONGLONG - typedef npy_longlong npy_int256; - typedef npy_ulonglong npy_uint256; -# define PyInt256ScalarObject PyLongLongScalarObject -# define PyInt256ArrType_Type PyLongLongArrType_Type -# define PyUInt256ScalarObject PyULongLongScalarObject -# define PyUInt256ArrType_Type PyULongLongArrType_Type -#define NPY_INT256_FMT NPY_LONGLONG_FMT -#define NPY_UINT256_FMT NPY_ULONGLONG_FMT -# define NPY_MAX_LONGLONG NPY_MAX_INT256 -# define NPY_MIN_LONGLONG NPY_MIN_INT256 -# define NPY_MAX_ULONGLONG NPY_MAX_UINT256 #endif #if NPY_BITSOF_INT == 8 @@ -681,19 +650,6 @@ typedef longdouble_t _Complex npy_clongdouble; # define MyPyLong_FromInt64 PyLong_FromLong # define MyPyLong_AsInt64 PyLong_AsLong #endif -#elif NPY_BITSOF_INT == 128 -#ifndef NPY_INT128 -#define NPY_INT128 NPY_INT -#define NPY_UINT128 NPY_UINT - typedef int npy_int128; - typedef unsigned int npy_uint128; -# define PyInt128ScalarObject PyIntScalarObject -# define PyInt128ArrType_Type PyIntArrType_Type -# define PyUInt128ScalarObject PyUIntScalarObject -# define PyUInt128ArrType_Type PyUIntArrType_Type -#define NPY_INT128_FMT NPY_INT_FMT -#define NPY_UINT128_FMT NPY_UINT_FMT -#endif #endif #if NPY_BITSOF_SHORT == 8 @@ -751,19 +707,6 @@ typedef longdouble_t _Complex npy_clongdouble; # define MyPyLong_FromInt64 PyLong_FromLong # define MyPyLong_AsInt64 PyLong_AsLong #endif -#elif NPY_BITSOF_SHORT == 128 -#ifndef NPY_INT128 -#define NPY_INT128 NPY_SHORT -#define NPY_UINT128 NPY_USHORT - typedef short npy_int128; - typedef unsigned short npy_uint128; -# define PyInt128ScalarObject PyShortScalarObject -# define PyInt128ArrType_Type PyShortArrType_Type -# define PyUInt128ScalarObject PyUShortScalarObject -# define PyUInt128ArrType_Type PyUShortArrType_Type -#define NPY_INT128_FMT NPY_SHORT_FMT -#define NPY_UINT128_FMT NPY_USHORT_FMT -#endif #endif @@ -823,18 +766,6 @@ typedef longdouble_t _Complex npy_clongdouble; # define MyPyLong_AsInt64 PyLong_AsLong #endif #elif NPY_BITSOF_CHAR == 128 -#ifndef NPY_INT128 -#define NPY_INT128 NPY_BYTE -#define NPY_UINT128 NPY_UBYTE - typedef signed char npy_int128; - typedef unsigned char npy_uint128; -# define PyInt128ScalarObject PyByteScalarObject -# define PyInt128ArrType_Type PyByteArrType_Type -# define PyUInt128ScalarObject PyUByteScalarObject -# define PyUInt128ArrType_Type PyUByteArrType_Type -#define NPY_INT128_FMT NPY_BYTE_FMT -#define NPY_UINT128_FMT NPY_UBYTE_FMT -#endif #endif @@ -1045,17 +976,6 @@ typedef npy_half npy_float16; #define NPY_FLOAT128_FMT NPY_LONGDOUBLE_FMT #define NPY_COMPLEX256_FMT NPY_CLONGDOUBLE_FMT #endif -#elif NPY_BITSOF_LONGDOUBLE == 256 -#define NPY_FLOAT256 NPY_LONGDOUBLE -#define NPY_COMPLEX512 NPY_CLONGDOUBLE - typedef npy_longdouble npy_float256; - typedef npy_clongdouble npy_complex512; -# define PyFloat256ScalarObject PyLongDoubleScalarObject -# define PyComplex512ScalarObject PyCLongDoubleScalarObject -# define PyFloat256ArrType_Type PyLongDoubleArrType_Type -# define PyComplex512ArrType_Type PyCLongDoubleArrType_Type -#define NPY_FLOAT256_FMT NPY_LONGDOUBLE_FMT -#define NPY_COMPLEX512_FMT NPY_CLONGDOUBLE_FMT #endif /* datetime typedefs */ diff --git a/numpy/_core/include/numpy/npy_cpu.h b/numpy/_core/include/numpy/npy_cpu.h index a19f8e6bbdd9..d3a29da57f36 100644 --- a/numpy/_core/include/numpy/npy_cpu.h +++ b/numpy/_core/include/numpy/npy_cpu.h @@ -18,7 +18,9 @@ * NPY_CPU_ARCEL * NPY_CPU_ARCEB * NPY_CPU_RISCV64 + * NPY_CPU_RISCV32 * NPY_CPU_LOONGARCH + * NPY_CPU_SW_64 * NPY_CPU_WASM */ #ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ @@ -102,28 +104,23 @@ #define NPY_CPU_ARCEL #elif defined(__arc__) && defined(__BIG_ENDIAN__) #define NPY_CPU_ARCEB -#elif defined(__riscv) && defined(__riscv_xlen) && __riscv_xlen == 64 - #define NPY_CPU_RISCV64 -#elif defined(__loongarch__) - #define NPY_CPU_LOONGARCH -#elif defined(__EMSCRIPTEN__) +#elif defined(__riscv) + #if __riscv_xlen == 64 + #define NPY_CPU_RISCV64 + #elif __riscv_xlen == 32 + #define NPY_CPU_RISCV32 + #endif +#elif defined(__loongarch_lp64) + #define NPY_CPU_LOONGARCH64 +#elif defined(__sw_64__) + #define NPY_CPU_SW_64 +#elif defined(__EMSCRIPTEN__) || defined(__wasm__) /* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */ + /* __wasm__ is defined by clang when targeting wasm */ #define NPY_CPU_WASM #else #error Unknown CPU, please report this to numpy maintainers with \ information about your platform (OS, CPU and compiler) #endif -/* - * Except for the following architectures, memory access is limited to the natural - * alignment of data types otherwise it may lead to bus error or performance regression. - * For more details about unaligned access, see https://www.kernel.org/doc/Documentation/unaligned-memory-access.txt. -*/ -#if defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) || defined(__aarch64__) || defined(__powerpc64__) - #define NPY_ALIGNMENT_REQUIRED 0 -#endif -#ifndef NPY_ALIGNMENT_REQUIRED - #define NPY_ALIGNMENT_REQUIRED 1 -#endif - #endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ */ diff --git a/numpy/_core/include/numpy/npy_endian.h b/numpy/_core/include/numpy/npy_endian.h index 5e58a7f52cee..ecb4b000763d 100644 --- a/numpy/_core/include/numpy/npy_endian.h +++ b/numpy/_core/include/numpy/npy_endian.h @@ -49,7 +49,9 @@ || defined(NPY_CPU_PPC64LE) \ || defined(NPY_CPU_ARCEL) \ || defined(NPY_CPU_RISCV64) \ + || defined(NPY_CPU_RISCV32) \ || defined(NPY_CPU_LOONGARCH) \ + || defined(NPY_CPU_SW_64) \ || defined(NPY_CPU_WASM) #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN diff --git a/numpy/_core/include/numpy/npy_math.h b/numpy/_core/include/numpy/npy_math.h index 216b173fde58..abc784bc686c 100644 --- a/numpy/_core/include/numpy/npy_math.h +++ b/numpy/_core/include/numpy/npy_math.h @@ -362,7 +362,11 @@ NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0); static inline double npy_creal(const npy_cdouble z) { - return ((double *) &z)[0]; +#if defined(__cplusplus) + return z._Val[0]; +#else + return creal(z); +#endif } static inline void npy_csetreal(npy_cdouble *z, const double r) @@ -372,7 +376,11 @@ static inline void npy_csetreal(npy_cdouble *z, const double r) static inline double npy_cimag(const npy_cdouble z) { - return ((double *) &z)[1]; +#if defined(__cplusplus) + return z._Val[1]; +#else + return cimag(z); +#endif } static inline void npy_csetimag(npy_cdouble *z, const double i) @@ -382,7 +390,11 @@ static inline void npy_csetimag(npy_cdouble *z, const double i) static inline float npy_crealf(const npy_cfloat z) { - return ((float *) &z)[0]; +#if defined(__cplusplus) + return z._Val[0]; +#else + return crealf(z); +#endif } static inline void npy_csetrealf(npy_cfloat *z, const float r) @@ -392,7 +404,11 @@ static inline void npy_csetrealf(npy_cfloat *z, const float r) static inline float npy_cimagf(const npy_cfloat z) { - return ((float *) &z)[1]; +#if defined(__cplusplus) + return z._Val[1]; +#else + return cimagf(z); +#endif } static inline void npy_csetimagf(npy_cfloat *z, const float i) @@ -402,7 +418,11 @@ static inline void npy_csetimagf(npy_cfloat *z, const float i) static inline npy_longdouble npy_creall(const npy_clongdouble z) { - return ((longdouble_t *) &z)[0]; +#if defined(__cplusplus) + return (npy_longdouble)z._Val[0]; +#else + return creall(z); +#endif } static inline void npy_csetreall(npy_clongdouble *z, const longdouble_t r) @@ -412,7 +432,11 @@ static inline void npy_csetreall(npy_clongdouble *z, const longdouble_t r) static inline npy_longdouble npy_cimagl(const npy_clongdouble z) { - return ((longdouble_t *) &z)[1]; +#if defined(__cplusplus) + return (npy_longdouble)z._Val[1]; +#else + return cimagl(z); +#endif } static inline void npy_csetimagl(npy_clongdouble *z, const longdouble_t i) diff --git a/numpy/_core/include/numpy/numpyconfig.h b/numpy/_core/include/numpy/numpyconfig.h index 0b6b2dda4290..4bfe3ab09dea 100644 --- a/numpy/_core/include/numpy/numpyconfig.h +++ b/numpy/_core/include/numpy/numpyconfig.h @@ -7,12 +7,6 @@ * On Mac OS X, because there is only one configuration stage for all the archs * in universal builds, any macro which depends on the arch needs to be * hardcoded. - * - * Note that distutils/pip will attempt a universal2 build when Python itself - * is built as universal2, hence this hardcoding is needed even if we do not - * support universal2 wheels anymore (see gh-22796). - * This code block can be removed after we have dropped the setup.py based - * build completely. */ #ifdef __APPLE__ #undef NPY_SIZEOF_LONG @@ -81,6 +75,11 @@ #define NPY_1_24_API_VERSION 0x00000010 #define NPY_1_25_API_VERSION 0x00000011 #define NPY_2_0_API_VERSION 0x00000012 +#define NPY_2_1_API_VERSION 0x00000013 +#define NPY_2_2_API_VERSION 0x00000013 +#define NPY_2_3_API_VERSION 0x00000014 +#define NPY_2_4_API_VERSION 0x00000015 +#define NPY_2_5_API_VERSION 0x00000015 /* @@ -103,10 +102,11 @@ * default, or narrow it down if they wish to use newer API. If you adjust * this, consider the Python version support (example for 1.25.x): * - * NumPy 1.25.x supports Python: 3.9 3.10 3.11 (3.12) - * NumPy 1.19.x supports Python: 3.6 3.7 3.8 3.9 - * NumPy 1.17.x supports Python: 3.5 3.6 3.7 3.8 - * NumPy 1.15.x supports Python: ... 3.6 3.7 + * NumPy 1.26.x supports Python: 3.9 3.10 3.11 3.12 + * NumPy 1.25.x supports Python: 3.9 3.10 3.11 + * NumPy 1.19.x supports Python: 3.6 3.7 3.8 3.9 + * NumPy 1.17.x supports Python: 3.5 3.6 3.7 3.8 + * NumPy 1.15.x supports Python: ... 3.6 3.7 * * Users of the stable ABI may wish to target the last Python that is not * end of life. This would be 3.8 at NumPy 1.25 release time. @@ -120,8 +120,8 @@ /* user provided a target version, use it */ #define NPY_FEATURE_VERSION NPY_TARGET_VERSION #else - /* Use the default (increase when dropping Python 3.9 support) */ - #define NPY_FEATURE_VERSION NPY_1_19_API_VERSION + /* Use the default (increase when dropping Python 3.12 support) */ + #define NPY_FEATURE_VERSION NPY_1_25_API_VERSION #endif /* Sanity check the (requested) feature version */ @@ -129,7 +129,14 @@ #error "NPY_TARGET_VERSION higher than NumPy headers!" #elif NPY_FEATURE_VERSION < NPY_1_15_API_VERSION /* No support for irrelevant old targets, no need for error, but warn. */ - #warning "Requested NumPy target lower than supported NumPy 1.15." + #ifndef _MSC_VER + #warning "Requested NumPy target lower than supported NumPy 1.15." + #else + #define _WARN___STR2__(x) #x + #define _WARN___STR1__(x) _WARN___STR2__(x) + #define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: " + #pragma message(_WARN___LOC__"Requested NumPy target lower than supported NumPy 1.15.") + #endif #endif /* @@ -160,6 +167,14 @@ #define NPY_FEATURE_VERSION_STRING "1.25" #elif NPY_FEATURE_VERSION == NPY_2_0_API_VERSION #define NPY_FEATURE_VERSION_STRING "2.0" +#elif NPY_FEATURE_VERSION == NPY_2_1_API_VERSION + #define NPY_FEATURE_VERSION_STRING "2.1" +#elif NPY_FEATURE_VERSION == NPY_2_3_API_VERSION + #define NPY_FEATURE_VERSION_STRING "2.3" +#elif NPY_FEATURE_VERSION == NPY_2_4_API_VERSION + #define NPY_FEATURE_VERSION_STRING "2.4" +#elif NPY_FEATURE_VERSION == NPY_2_5_API_VERSION + #define NPY_FEATURE_VERSION_STRING "2.5" #else #error "Missing version string define for new NumPy version." #endif diff --git a/numpy/_core/include/numpy/ufuncobject.h b/numpy/_core/include/numpy/ufuncobject.h index dca375b32673..f5f82b57c91f 100644 --- a/numpy/_core/include/numpy/ufuncobject.h +++ b/numpy/_core/include/numpy/ufuncobject.h @@ -65,6 +65,39 @@ typedef int (PyUFunc_TypeResolutionFunc)( PyObject *type_tup, PyArray_Descr **out_dtypes); +/* + * This is the signature for the functions that may be assigned to the + * `process_core_dims_func` field of the PyUFuncObject structure. + * Implementation of this function is optional. This function is only used + * by generalized ufuncs (i.e. those with the field `core_enabled` set to 1). + * The function is called by the ufunc during the processing of the arguments + * of a call of the ufunc. The function can check the core dimensions of the + * input and output arrays and return -1 with an exception set if any + * requirements are not satisfied. If the caller of the ufunc didn't provide + * output arrays, the core dimensions associated with the output arrays (i.e. + * those that are not also used in input arrays) will have the value -1 in + * `core_dim_sizes`. This function can replace any output core dimensions + * that are -1 with a value that is appropriate for the ufunc. + * + * Parameter Description + * --------------- ------------------------------------------------------ + * ufunc The ufunc object + * core_dim_sizes An array with length `ufunc->core_num_dim_ix`. + * The core dimensions of the arrays passed to the ufunc + * will have been set. If the caller of the ufunc didn't + * provide the output array(s), the output-only core + * dimensions will have the value -1. + * + * The function must not change any element in `core_dim_sizes` that is + * not -1 on input. Doing so will result in incorrect output from the + * ufunc, and could result in a crash of the Python interpreter. + * + * The function must return 0 on success, -1 on failure (with an exception + * set). + */ +typedef int (PyUFunc_ProcessCoreDimsFunc)( + struct _tagPyUFuncObject *ufunc, + npy_intp *core_dim_sizes); typedef struct _tagPyUFuncObject { PyObject_HEAD @@ -137,8 +170,10 @@ typedef struct _tagPyUFuncObject { * with the dtypes for the inputs and outputs. */ PyUFunc_TypeResolutionFunc *type_resolver; - /* Was the legacy loop resolver */ - void *reserved2; + + /* A dictionary to monkeypatch ufuncs */ + PyObject *dict; + /* * This was blocked off to be the "new" inner loop selector in 1.7, * but this was never implemented. (This is also why the above @@ -191,6 +226,12 @@ typedef struct _tagPyUFuncObject { /* A PyListObject of `(tuple of DTypes, ArrayMethod/Promoter)` */ PyObject *_loops; #endif + #if NPY_FEATURE_VERSION >= NPY_2_1_API_VERSION + /* + * Optional function to process core dimensions of a gufunc. + */ + PyUFunc_ProcessCoreDimsFunc *process_core_dims_func; + #endif } PyUFuncObject; #include "arrayobject.h" @@ -275,8 +316,7 @@ typedef struct _loop1d_info { #define UFUNC_PYVALS_NAME "UFUNC_PYVALS" -/* - * THESE MACROS ARE DEPRECATED. +/* THESE MACROS ARE DEPRECATED. * Use npy_set_floatstatus_* in the npymath library. */ #define UFUNC_FPE_DIVIDEBYZERO NPY_FPE_DIVIDEBYZERO @@ -284,10 +324,7 @@ typedef struct _loop1d_info { #define UFUNC_FPE_UNDERFLOW NPY_FPE_UNDERFLOW #define UFUNC_FPE_INVALID NPY_FPE_INVALID -#define generate_divbyzero_error() npy_set_floatstatus_divbyzero() -#define generate_overflow_error() npy_set_floatstatus_overflow() - - /* Make sure it gets defined if it isn't already */ +/* Make sure it gets defined if it isn't already */ #ifndef UFUNC_NOFPE /* Clear the floating point exception default of Borland C++ */ #if defined(__BORLANDC__) diff --git a/numpy/_core/memmap.py b/numpy/_core/memmap.py index fb2c95a9d338..89a36808e6f1 100644 --- a/numpy/_core/memmap.py +++ b/numpy/_core/memmap.py @@ -1,8 +1,10 @@ -from contextlib import nullcontext import operator +from contextlib import nullcontext + import numpy as np -from .._utils import set_module -from .numeric import uint8, ndarray, dtype +from numpy._utils import set_module + +from .numeric import dtype, ndarray, uint8 __all__ = ['memmap'] @@ -11,10 +13,10 @@ writeable_filemodes = ["r+", "w+"] mode_equivalents = { - "readonly":"r", - "copyonwrite":"c", - "readwrite":"r+", - "write":"w+" + "readonly": "r", + "copyonwrite": "c", + "readwrite": "r+", + "write": "w+" } @@ -84,7 +86,7 @@ class memmap(ndarray): .. versionchanged:: 2.0 The shape parameter can now be any integer sequence type, previously types were limited to tuple and int. - + order : {'C', 'F'}, optional Specify the order of the ndarray memory layout: :term:`row-major`, C-style or :term:`column-major`, @@ -127,8 +129,8 @@ class memmap(ndarray): Examples -------- - >>> data = np.arange(12, dtype='float32') - >>> data.resize((3,4)) + >>> import numpy as np + >>> data = np.arange(12, dtype=np.float32).reshape((3, 4)) This example uses a temporary file so that doctest doesn't write files to your directory. You would use a 'normal' filename. @@ -139,7 +141,7 @@ class memmap(ndarray): Create a memmap with dtype and shape that matches our data: - >>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4)) + >>> fp = np.memmap(filename, dtype=np.float32, mode='w+', shape=(3,4)) >>> fp memmap([[0., 0., 0., 0.], [0., 0., 0., 0.], @@ -162,7 +164,7 @@ class memmap(ndarray): Load the memmap and verify data was stored: - >>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) + >>> newfp = np.memmap(filename, dtype=np.float32, mode='r', shape=(3,4)) >>> newfp memmap([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], @@ -170,13 +172,13 @@ class memmap(ndarray): Read-only memmap: - >>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) + >>> fpr = np.memmap(filename, dtype=np.float32, mode='r', shape=(3,4)) >>> fpr.flags.writeable False Copy-on-write memmap: - >>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4)) + >>> fpc = np.memmap(filename, dtype=np.float32, mode='c', shape=(3,4)) >>> fpc.flags.writeable True @@ -202,7 +204,7 @@ class memmap(ndarray): Offset into a memmap: - >>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16) + >>> fpo = np.memmap(filename, dtype=np.float32, mode='r', offset=16) >>> fpo memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32) @@ -210,7 +212,7 @@ class memmap(ndarray): __array_priority__ = -100.0 - def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, + def __new__(cls, filename, dtype=uint8, mode='r+', offset=0, shape=None, order='C'): # Import here to minimize 'import numpy' overhead import mmap @@ -219,9 +221,9 @@ def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, mode = mode_equivalents[mode] except KeyError as e: if mode not in valid_filemodes: + all_modes = valid_filemodes + list(mode_equivalents.keys()) raise ValueError( - "mode must be one of {!r} (got {!r})" - .format(valid_filemodes + list(mode_equivalents.keys()), mode) + f"mode must be one of {all_modes!r} (got {mode!r})" ) from None if mode == 'w+' and shape is None: @@ -232,7 +234,7 @@ def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, else: f_ctx = open( os.fspath(filename), - ('r' if mode == 'c' else mode)+'b' + ('r' if mode == 'c' else mode) + 'b' ) with f_ctx as fid: @@ -249,22 +251,26 @@ def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, size = bytes // _dbytes shape = (size,) else: - if type(shape) not in (tuple, list): + if not isinstance(shape, (tuple, list)): try: shape = [operator.index(shape)] except TypeError: pass shape = tuple(shape) - size = np.intp(1) # avoid default choice of np.int_, which might overflow + size = np.intp(1) # avoid overflows for k in shape: size *= k - bytes = int(offset + size*_dbytes) + bytes = int(offset + size * _dbytes) - if mode in ('w+', 'r+') and flen < bytes: - fid.seek(bytes - 1, 0) - fid.write(b'\0') - fid.flush() + if mode in ('w+', 'r+'): + # gh-27723 + # if bytes == 0, we write out 1 byte to allow empty memmap. + bytes = max(bytes, 1) + if flen < bytes: + fid.seek(bytes - 1, 0) + fid.write(b'\0') + fid.flush() if mode == 'c': acc = mmap.ACCESS_COPY @@ -275,10 +281,15 @@ def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, start = offset - offset % mmap.ALLOCATIONGRANULARITY bytes -= start + # bytes == 0 is problematic as in mmap length=0 maps the full file. + # See PR gh-27723 for a more detailed explanation. + if bytes == 0 and start > 0: + bytes += mmap.ALLOCATIONGRANULARITY + start -= mmap.ALLOCATIONGRANULARITY array_offset = offset - start mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start) - self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm, + self = ndarray.__new__(cls, shape, dtype=descr, buffer=mm, offset=array_offset, order=order) self._mmap = mm self.offset = offset diff --git a/numpy/_core/memmap.pyi b/numpy/_core/memmap.pyi index 03c6b772dcd5..bdb3dc721de2 100644 --- a/numpy/_core/memmap.pyi +++ b/numpy/_core/memmap.pyi @@ -1,3 +1,94 @@ -from numpy import memmap as memmap +from _typeshed import StrOrBytesPath, SupportsWrite +from typing import ( + Any, + ClassVar, + Final, + Literal, + Protocol, + Self, + overload, + override, + type_check_only, +) +from typing_extensions import TypeVar -__all__: list[str] +import numpy as np +from numpy import _OrderKACF, _SupportsFileMethods +from numpy._typing import DTypeLike, _AnyShape, _DTypeLike, _Shape + +__all__ = ["memmap"] + +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype[Any], default=np.dtype[Any], covariant=True) + +type _Mode = Literal["r", "c", "r+", "w+"] +type _ToMode = Literal[_Mode, "readonly", "copyonwrite", "readwrite", "write"] + +@type_check_only +class _SupportsFileMethodsRW(SupportsWrite[bytes], _SupportsFileMethods, Protocol): ... + +### + +class memmap(np.ndarray[_ShapeT_co, _DTypeT_co]): + __module__: Literal["numpy"] = "numpy" # pyrefly: ignore[bad-override] + __array_priority__: ClassVar[float] = 100.0 # pyright: ignore[reportIncompatibleMethodOverride] # pyrefly: ignore[bad-override] + + filename: Final[str | None] + offset: Final[int] + mode: Final[_Mode] + + @overload + def __new__[ScalarT: np.generic]( + cls, + filename: StrOrBytesPath | _SupportsFileMethodsRW, + dtype: _DTypeT_co, + mode: _ToMode = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", + ) -> Self: ... + @overload + def __new__( + cls, + filename: StrOrBytesPath | _SupportsFileMethodsRW, + dtype: type[np.uint8] = ..., + mode: _ToMode = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", + ) -> memmap[_AnyShape, np.dtype[np.uint8]]: ... + @overload + def __new__[ScalarT: np.generic]( + cls, + filename: StrOrBytesPath | _SupportsFileMethodsRW, + dtype: _DTypeLike[ScalarT], + mode: _ToMode = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", + ) -> memmap[_AnyShape, np.dtype[ScalarT]]: ... + @overload + def __new__( + cls, + filename: StrOrBytesPath | _SupportsFileMethodsRW, + dtype: DTypeLike, + mode: _ToMode = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", + ) -> memmap: ... + + # + @override + def __array_finalize__(self, obj: object, /) -> None: ... + @override + def __array_wrap__( # type: ignore[override] + self, + /, + array: memmap[_ShapeT_co, _DTypeT_co], + context: tuple[np.ufunc, tuple[Any, ...], int] | None = None, + return_scalar: bool = False, + ) -> Any: ... + + # + def flush(self) -> None: ... diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index dbe76e0a3dea..5b78cc307be0 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -47,7 +47,12 @@ C_ABI_VERSION = '0x02000000' # 0x00000010 - 1.24.x # 0x00000011 - 1.25.x # 0x00000012 - 2.0.x -C_API_VERSION = '0x00000012' +# 0x00000013 - 2.1.x +# 0x00000013 - 2.2.x +# 0x00000014 - 2.3.x +# 0x00000015 - 2.4.x +# 0x00000015 - 2.5.x +C_API_VERSION = '0x00000015' # Check whether we have a mismatch between the set C API VERSION and the # actual C API VERSION. Will raise a MismatchCAPIError if so. @@ -86,7 +91,7 @@ cpu_family = host_machine.cpu_family() use_svml = ( host_machine.system() == 'linux' and cpu_family == 'x86_64' and - ('AVX512_SKX' in CPU_DISPATCH_NAMES or 'AVX512_SKX' in CPU_BASELINE_NAMES) and + ('X86_V4' in CPU_DISPATCH_NAMES or 'X86_V4' in CPU_BASELINE_NAMES) and not get_option('disable-svml') ) if use_svml @@ -96,6 +101,10 @@ if use_svml endif endif +if host_machine.cpu_family() == 'loongarch64' + add_project_arguments(['-DHWY_COMPILE_ONLY_SCALAR'], language: ['cpp']) +endif + use_highway = not get_option('disable-highway') if use_highway and not fs.exists('src/highway/README.md') error('Missing the `highway` git submodule! Run `git submodule update --init` to fix this.') @@ -121,6 +130,21 @@ if use_intel_sort and not fs.exists('src/npysort/x86-simd-sort/README.md') error('Missing the `x86-simd-sort` git submodule! Run `git submodule update --init` to fix this.') endif +# openMP related settings: +if get_option('disable-threading') and get_option('enable-openmp') + error('Build options `disable-threading` and `enable-openmp` are conflicting. Please set at most one to true.') +endif + +use_openmp = get_option('enable-openmp') and not get_option('disable-threading') + +# Setup openmp flags for x86-simd-sort: +omp = [] +omp_dep = [] +if use_intel_sort and use_openmp + omp = dependency('openmp', required : true) + omp_dep = declare_dependency(dependencies: omp, compile_args: ['-DXSS_USE_OPENMP']) +endif + if not fs.exists('src/common/pythoncapi-compat') error('Missing the `pythoncapi-compat` git submodule! ' + 'Run `git submodule update --init` to fix this.') @@ -244,8 +268,8 @@ endforeach # variable attributes tested via "int %s a" % attribute optional_variable_attributes = [ - ['thread_local', 'HAVE_THREAD_LOCAL'], - ['_Thread_local', 'HAVE__THREAD_LOCAL'], + ['thread_local', 'HAVE_THREAD_LOCAL'], # C23 + ['_Thread_local', 'HAVE__THREAD_LOCAL'], # C11/C17 ['__thread', 'HAVE__THREAD'], ['__declspec(thread)', 'HAVE___DECLSPEC_THREAD_'] ] @@ -336,14 +360,26 @@ endif optional_function_attributes = [ ['optimize("unroll-loops")', 'OPTIMIZE_UNROLL_LOOPS'], ['optimize("O3")', 'OPTIMIZE_OPT_3'], - ['optimize("O2")', 'OPTIMIZE_OPT_2'], - ['optimize("nonnull (1)")', 'NONNULL'], + ['nonnull(1)', 'NONNULL'], ] -#foreach attr: optional_function_attributes -# if cc.has_function_attribute(attr[0]) -# cdata.set10('HAVE_ATTRIBUTE_' + attr[1], true) -# endif -#endforeach +if get_option('disable-optimization') == false + foreach attr: optional_function_attributes + test_code = ''' + __attribute__((@0@)) void test_function(void *ptr) { + (void*)ptr; + return; + } + int main(void) { + int dummy = 0; + test_function(&dummy); + return 0; + } + '''.format(attr[0]) + if cc.compiles(test_code, name: '__attribute__((' + attr[0] + '))', args: ['-Werror', '-Wattributes']) + cdata.set10('HAVE_ATTRIBUTE_' + attr[1], true) + endif + endforeach +endif # Max possible optimization flags. We pass this flags to all our dispatch-able # (multi_targets) sources. @@ -488,6 +524,7 @@ endif if longdouble_format == 'UNKNOWN' or longdouble_format == 'UNDEFINED' error('Unknown long double format of size: ' + cc.sizeof('long double').to_string()) endif +message(f'Long double format: @longdouble_format@') cdata.set10('HAVE_LDOUBLE_' + longdouble_format, true) if cc.has_header('endian.h') @@ -506,6 +543,10 @@ if cc.has_function_attribute('visibility:hidden') and host_machine.system() != ' endif cdata.set('NPY_VISIBILITY_HIDDEN', visibility_hidden) +# if not set, we're using lapack_lite +if have_lapack + cdata.set10('HAVE_EXTERNAL_LAPACK', have_lapack) +endif config_h = configure_file( input: 'config.h.in', @@ -564,33 +605,6 @@ npymath_lib = static_library('npymath', gnu_symbol_visibility: 'hidden', ) -dir_separator = '/' -if build_machine.system() == 'windows' - dir_separator = '\\' -endif -configure_file( - input: 'npymath.ini.in', - output: 'npymath.ini', - configuration: configuration_data({ - 'pkgname' : 'numpy._core', - 'sep' : dir_separator, - }), - install: true, - install_dir: np_dir / '_core/lib/npy-pkg-config', - install_tag: 'devel' -) -configure_file( - input: 'mlib.ini.in', - output: 'mlib.ini', - configuration: configuration_data({ - 'posix_mathlib' : mlib_linkflag, - 'msvc_mathlib' : 'm.lib', - }), - install: true, - install_dir: np_dir / '_core/lib/npy-pkg-config', - install_tag: 'devel' -) - if false # This doesn't quite work (yet), it assumes we'll install headers under # include/, and trying to add the correct path with `extra_cflags` runs into @@ -674,9 +688,18 @@ c_args_common = [ cflags_large_file_support, ] +# CPP exceptions are handled in the unique_hash code and therefore the `-fexceptions` +# flag. +unique_hash_cpp_args = c_args_common +if cc.get_argument_syntax() != 'msvc' + unique_hash_cpp_args += [ + '-fexceptions', + '-fno-rtti', # no runtime type information + ] +endif + # Same as NPY_CXX_FLAGS (TODO: extend for what ccompiler_opt adds) cpp_args_common = c_args_common + [ - '-D__STDC_VERSION__=0', # for compatibility with C headers ] if cc.get_argument_syntax() != 'msvc' cpp_args_common += [ @@ -710,6 +733,7 @@ py.extension_module('_multiarray_tests', 'src/common/mem_overlap.c', 'src/common/npy_argparse.c', 'src/common/npy_hashtable.c', + 'src/common/npy_import.c', src_file.process('src/common/templ_common.h.src') ], c_args: c_args_common, @@ -726,10 +750,10 @@ _umath_tests_mtargets = mod_features.multi_targets( '_umath_tests.dispatch.h', 'src/umath/_umath_tests.dispatch.c', dispatch: [ - AVX2, SSE41, SSE2, + X86_V3, X86_V2, ASIMDHP, ASIMD, NEON, VSX3, VSX2, VSX, - VXE, VX, + VXE, VX, RVV, ], baseline: CPU_BASELINE, prefix: 'NPY_', @@ -771,7 +795,7 @@ foreach gen_mtargets : [ 'argfunc.dispatch.h', src_file.process('src/multiarray/argfunc.dispatch.c.src'), [ - AVX512_SKX, AVX2, XOP, SSE42, SSE2, + X86_V4, X86_V3, X86_V2, VSX2, ASIMD, NEON, VXE, VX @@ -808,23 +832,25 @@ foreach gen_mtargets : [ [ 'x86_simd_argsort.dispatch.h', 'src/npysort/x86_simd_argsort.dispatch.cpp', - use_intel_sort ? [AVX512_SKX, AVX2] : [] + use_intel_sort ? [X86_V4, X86_V3] : [] ], [ 'x86_simd_qsort.dispatch.h', 'src/npysort/x86_simd_qsort.dispatch.cpp', - use_intel_sort ? [AVX512_SKX, AVX2] : [] + use_intel_sort ? [X86_V4, X86_V3] : [] ], [ 'x86_simd_qsort_16bit.dispatch.h', 'src/npysort/x86_simd_qsort_16bit.dispatch.cpp', - use_intel_sort ? [AVX512_SPR, AVX512_ICL] : [] + # Do not enable AVX-512 on MSVC 32-bit (x86): it’s buggy there; + # Ref: NumPy issue numpy/numpy#29808 + use_intel_sort and not (cc.get_id() == 'msvc' and cpu_family == 'x86') ? [AVX512_SPR, AVX512_ICL] : [] ], [ 'highway_qsort.dispatch.h', 'src/npysort/highway_qsort.dispatch.cpp', use_highway ? [ - SVE, ASIMD, VSX2, # FIXME: disable VXE due to runtime segfault + ASIMD, VSX2, # FIXME: disable VXE due to runtime segfault ] : [] ], [ @@ -835,12 +861,15 @@ foreach gen_mtargets : [ ] : [] ], ] + + + mtargets = mod_features.multi_targets( gen_mtargets[0], multiarray_gen_headers + gen_mtargets[1], dispatch: gen_mtargets[2], # baseline: CPU_BASELINE, it doesn't provide baseline fallback prefix: 'NPY_', - dependencies: [py_dep, np_core_dep], + dependencies: [py_dep, np_core_dep, omp_dep], c_args: c_args_common + max_opt, cpp_args: cpp_args_common + max_opt, include_directories: [ @@ -872,57 +901,63 @@ foreach gen_mtargets : [ 'loops_arithm_fp.dispatch.h', src_file.process('src/umath/loops_arithm_fp.dispatch.c.src'), [ - [AVX2, FMA3], SSE2, + X86_V3, X86_V2, ASIMD, NEON, VSX3, VSX2, VXE, VX, + LSX, ] ], [ 'loops_arithmetic.dispatch.h', src_file.process('src/umath/loops_arithmetic.dispatch.c.src'), [ - AVX512_SKX, AVX512F, AVX2, SSE41, SSE2, + X86_V4, X86_V3, X86_V2, NEON, VSX4, VSX2, VX, + LSX, ] ], [ 'loops_comparison.dispatch.h', src_file.process('src/umath/loops_comparison.dispatch.c.src'), [ - AVX512_SKX, AVX512F, AVX2, SSE42, SSE2, + X86_V4, X86_V3, X86_V2, VSX3, VSX2, NEON, VXE, VX, + LSX, ] ], [ 'loops_exponent_log.dispatch.h', src_file.process('src/umath/loops_exponent_log.dispatch.c.src'), [ - AVX512_SKX, AVX512F, [AVX2, FMA3] + X86_V4, X86_V3, ] ], [ 'loops_hyperbolic.dispatch.h', - src_file.process('src/umath/loops_hyperbolic.dispatch.c.src'), + src_file.process('src/umath/loops_hyperbolic.dispatch.cpp.src'), [ - AVX512_SKX, [AVX2, FMA3], + X86_V4, X86_V3, VSX4, VSX2, NEON_VFPV4, - VXE, VX + VXE, + LSX, ] ], [ 'loops_logical.dispatch.h', - src_file.process('src/umath/loops_logical.dispatch.c.src'), + 'src/umath/loops_logical.dispatch.cpp', [ ASIMD, NEON, - AVX512_SKX, AVX2, SSE2, + X86_V4, X86_V3, X86_V2, VSX2, VX, + LSX, + RVV, ] ], [ @@ -930,9 +965,10 @@ foreach gen_mtargets : [ src_file.process('src/umath/loops_minmax.dispatch.c.src'), [ ASIMD, NEON, - AVX512_SKX, AVX2, SSE2, + X86_V4, X86_V3, X86_V2, VSX2, VXE, VX, + LSX, ] ], [ @@ -944,68 +980,80 @@ foreach gen_mtargets : [ ], [ 'loops_trigonometric.dispatch.h', - src_file.process('src/umath/loops_trigonometric.dispatch.c.src'), + 'src/umath/loops_trigonometric.dispatch.cpp', [ - AVX512F, [AVX2, FMA3], + X86_V4, X86_V3, VSX4, VSX3, VSX2, NEON_VFPV4, - VXE2, VXE + VXE2, VXE, + LSX, ] ], [ 'loops_umath_fp.dispatch.h', src_file.process('src/umath/loops_umath_fp.dispatch.c.src'), - [AVX512_SKX] + [X86_V4] ], [ 'loops_unary.dispatch.h', src_file.process('src/umath/loops_unary.dispatch.c.src'), [ ASIMD, NEON, - AVX512_SKX, AVX2, SSE2, + X86_V4, X86_V3, X86_V2, VSX2, - VXE, VX + VXE, VX, + LSX, ] ], [ 'loops_unary_fp.dispatch.h', src_file.process('src/umath/loops_unary_fp.dispatch.c.src'), [ - SSE41, SSE2, + X86_V2, VSX2, ASIMD, NEON, - VXE, VX + VXE, VX, + LSX, ] ], [ 'loops_unary_fp_le.dispatch.h', src_file.process('src/umath/loops_unary_fp_le.dispatch.c.src'), [ - SSE41, SSE2, + X86_V2, VSX2, ASIMD, NEON, + LSX, ] ], [ 'loops_unary_complex.dispatch.h', src_file.process('src/umath/loops_unary_complex.dispatch.c.src'), [ - AVX512F, [AVX2, FMA3], SSE2, + X86_V4, X86_V3, X86_V2, ASIMD, NEON, VSX3, VSX2, VXE, VX, + LSX, ] ], [ 'loops_autovec.dispatch.h', src_file.process('src/umath/loops_autovec.dispatch.c.src'), [ - AVX2, SSE2, + X86_V3, X86_V2, NEON, VSX2, VX, + LSX, + RVV ] ], + [ + 'loops_half.dispatch.h', + src_file.process('src/umath/loops_half.dispatch.c.src'), + [AVX512_SPR, X86_V4] + ], ] mtargets = mod_features.multi_targets( gen_mtargets[0], umath_gen_headers + gen_mtargets[1], @@ -1020,7 +1068,8 @@ foreach gen_mtargets : [ 'src/common', 'src/multiarray', 'src/npymath', - 'src/umath' + 'src/umath', + 'src/highway', ] ) if not is_variable('multiarray_umath_mtargets') @@ -1034,16 +1083,18 @@ endforeach # ------------------------------ src_multiarray_umath_common = [ 'src/common/array_assign.c', + 'src/common/blas_utils.c', 'src/common/gil_utils.c', 'src/common/mem_overlap.c', 'src/common/npy_argparse.c', 'src/common/npy_hashtable.c', + 'src/common/npy_import.c', 'src/common/npy_longdouble.c', - 'src/common/ucsnarrow.c', 'src/common/ufunc_override.c', 'src/common/numpyos.c', 'src/common/npy_cpu_features.c', 'src/common/npy_cpu_dispatch.c', + 'src/common/npy_sort.c', src_file.process('src/common/templ_common.h.src') ] if have_blas @@ -1055,11 +1106,12 @@ endif src_multiarray = multiarray_gen_headers + [ 'src/multiarray/abstractdtypes.c', - 'src/multiarray/alloc.c', + 'src/multiarray/alloc.cpp', 'src/multiarray/arrayobject.c', 'src/multiarray/array_coercion.c', 'src/multiarray/array_converter.c', 'src/multiarray/array_method.c', + 'src/multiarray/array_api_standard.c', 'src/multiarray/array_assign_scalar.c', 'src/multiarray/array_assign_array.c', 'src/multiarray/arrayfunction_override.c', @@ -1084,7 +1136,7 @@ src_multiarray = multiarray_gen_headers + [ 'src/multiarray/dragon4.c', 'src/multiarray/dtype_transfer.c', 'src/multiarray/dtype_traversal.c', - src_file.process('src/multiarray/einsum.c.src'), + 'src/multiarray/einsum.cpp', src_file.process('src/multiarray/einsum_sumprod.c.src'), 'src/multiarray/public_dtype_api.c', 'src/multiarray/flagsobject.c', @@ -1101,6 +1153,7 @@ src_multiarray = multiarray_gen_headers + [ 'src/multiarray/nditer_constr.c', 'src/multiarray/nditer_pywrap.c', src_file.process('src/multiarray/nditer_templ.c.src'), + 'src/multiarray/npy_static_data.c', 'src/multiarray/number.c', 'src/multiarray/refcount.c', src_file.process('src/multiarray/scalartypes.c.src'), @@ -1108,7 +1161,7 @@ src_multiarray = multiarray_gen_headers + [ 'src/multiarray/scalarapi.c', 'src/multiarray/shape.c', 'src/multiarray/strfuncs.c', - 'src/multiarray/stringdtype/casts.c', + 'src/multiarray/stringdtype/casts.cpp', 'src/multiarray/stringdtype/dtype.c', 'src/multiarray/stringdtype/utf8_utils.c', 'src/multiarray/stringdtype/static_string.c', @@ -1135,6 +1188,7 @@ src_multiarray = multiarray_gen_headers + [ # Remove this `arm64_exports.c` file once scipy macos arm64 build correctly # links to the arm64 npymath library, see gh-22673 'src/npymath/arm64_exports.c', + 'src/multiarray/fnv.c', ] src_umath = umath_gen_headers + [ @@ -1145,7 +1199,7 @@ src_umath = umath_gen_headers + [ 'src/umath/ufunc_type_resolution.c', 'src/umath/clip.cpp', 'src/umath/clip.h', - 'src/umath/dispatching.c', + 'src/umath/dispatching.cpp', 'src/umath/extobj.c', 'src/umath/legacy_array_method.c', 'src/umath/override.c', @@ -1195,6 +1249,22 @@ if use_svml endforeach endif +unique_hash_so = static_library( + 'unique_hash', + ['src/multiarray/unique.cpp'], + c_args: c_args_common, + cpp_args: unique_hash_cpp_args, + include_directories: [ + 'include', + 'src/common', + 'src/npymath', + ], + dependencies: [ + py_dep, + np_core_dep, + ], +) + py.extension_module('_multiarray_umath', [ config_h, @@ -1206,7 +1276,8 @@ py.extension_module('_multiarray_umath', src_numpy_api[1], # __multiarray_api.h src_umath_doc_h, npy_math_internal_h, - ] + svml_objects, + ], + objects: svml_objects, c_args: c_args_common, cpp_args: cpp_args_common, include_directories: [ @@ -1215,9 +1286,14 @@ py.extension_module('_multiarray_umath', 'src/multiarray', 'src/npymath', 'src/umath', + 'src/highway' ], - dependencies: [blas_dep], - link_with: [npymath_lib, multiarray_umath_mtargets.static_lib('_multiarray_umath_mtargets')] + highway_lib, + dependencies: [blas_dep, omp], + link_with: [ + npymath_lib, + unique_hash_so, + multiarray_umath_mtargets.static_lib('_multiarray_umath_mtargets') + ] + highway_lib, install: true, subdir: 'numpy/_core', ) @@ -1273,27 +1349,36 @@ py.extension_module('_simd', link_with: [npymath_lib, _simd_mtargets.static_lib('_simd_mtargets')], install: true, subdir: 'numpy/_core', + install_tag: 'tests', ) python_sources = [ '__init__.py', '__init__.pyi', '_add_newdocs.py', + '_add_newdocs.pyi', '_add_newdocs_scalars.py', + '_add_newdocs_scalars.pyi', '_asarray.py', '_asarray.pyi', '_dtype.py', + '_dtype.pyi', '_dtype_ctypes.py', + '_dtype_ctypes.pyi', '_exceptions.py', + '_exceptions.pyi', '_internal.py', '_internal.pyi', - '_machar.py', '_methods.py', + '_methods.pyi', + '_simd.pyi', '_string_helpers.py', + '_string_helpers.pyi', '_type_aliases.py', '_type_aliases.pyi', '_ufunc_config.py', '_ufunc_config.pyi', + '_umath_tests.pyi', 'arrayprint.py', 'arrayprint.pyi', 'cversions.py', @@ -1316,6 +1401,9 @@ python_sources = [ 'numerictypes.py', 'numerictypes.pyi', 'overrides.py', + 'overrides.pyi', + 'printoptions.py', + 'printoptions.pyi', 'records.py', 'records.pyi', 'shape_base.py', @@ -1323,6 +1411,7 @@ python_sources = [ 'strings.py', 'strings.pyi', 'umath.py', + 'umath.pyi', ] py.install_sources( diff --git a/numpy/_core/mlib.ini.in b/numpy/_core/mlib.ini.in deleted file mode 100644 index badaa2ae9de4..000000000000 --- a/numpy/_core/mlib.ini.in +++ /dev/null @@ -1,12 +0,0 @@ -[meta] -Name = mlib -Description = Math library used with this version of numpy -Version = 1.0 - -[default] -Libs=@posix_mathlib@ -Cflags= - -[msvc] -Libs=@msvc_mathlib@ -Cflags= diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index 77e249a85828..1757270deb62 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -1,24 +1,31 @@ """ -Create the numpy._core.multiarray namespace for backward compatibility. -In v1.16 the multiarray and umath c-extension modules were merged into -a single _multiarray_umath extension module. So we replicate the old +Create the numpy._core.multiarray namespace for backward compatibility. +In v1.16 the multiarray and umath c-extension modules were merged into +a single _multiarray_umath extension module. So we replicate the old namespace by importing from the extension module. """ import functools -from . import overrides -from . import _multiarray_umath -from ._multiarray_umath import * # noqa: F403 + +from . import _multiarray_umath, overrides +from ._multiarray_umath import * + # These imports are needed for backward compatibility, # do not change them. issue gh-15518 # _get_ndarray_c_version is semi-public, on purpose not added to __all__ -from ._multiarray_umath import ( - _flagdict, from_dlpack, _place, _reconstruct, - _vec_string, _ARRAY_API, _monotonicity, _get_ndarray_c_version, - _get_madvise_hugepage, _set_madvise_hugepage, - _get_promotion_state, _set_promotion_state - ) +from ._multiarray_umath import ( # noqa: F401 + _ARRAY_API, + _flagdict, + _get_madvise_hugepage, + _get_ndarray_c_version, + _monotonicity, + _place, + _reconstruct, + _set_madvise_hugepage, + _vec_string, + from_dlpack, +) __all__ = [ '_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS', @@ -39,12 +46,10 @@ 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer', 'nested_iters', 'normalize_axis_index', 'packbits', 'promote_types', 'putmask', 'ravel_multi_index', 'result_type', 'scalar', 'set_datetimeparse_function', - 'set_legacy_print_mode', 'set_typeDict', 'shares_memory', 'typeinfo', - 'unpackbits', 'unravel_index', 'vdot', 'where', 'zeros', - '_get_promotion_state', '_set_promotion_state'] + 'unpackbits', 'unravel_index', 'vdot', 'where', 'zeros'] -# For backward compatibility, make sure pickle imports +# For backward compatibility, make sure pickle imports # these functions from here _reconstruct.__module__ = 'numpy._core.multiarray' scalar.__module__ = 'numpy._core.multiarray' @@ -68,9 +73,36 @@ nested_iters.__module__ = 'numpy' promote_types.__module__ = 'numpy' zeros.__module__ = 'numpy' -_get_promotion_state.__module__ = 'numpy' -_set_promotion_state.__module__ = 'numpy' normalize_axis_index.__module__ = 'numpy.lib.array_utils' +add_docstring.__module__ = 'numpy.lib' +compare_chararrays.__module__ = 'numpy.char' + + +def _override___module__(): + namespace_names = globals() + for ufunc_name in [ + 'absolute', 'arccos', 'arccosh', 'add', 'arcsin', 'arcsinh', 'arctan', + 'arctan2', 'arctanh', 'bitwise_and', 'bitwise_count', 'invert', + 'left_shift', 'bitwise_or', 'right_shift', 'bitwise_xor', 'cbrt', + 'ceil', 'conjugate', 'copysign', 'cos', 'cosh', 'deg2rad', 'degrees', + 'divide', 'divmod', 'equal', 'exp', 'exp2', 'expm1', 'fabs', + 'float_power', 'floor', 'floor_divide', 'fmax', 'fmin', 'fmod', + 'frexp', 'gcd', 'greater', 'greater_equal', 'heaviside', 'hypot', + 'isfinite', 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', 'less', + 'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp', + 'logaddexp2', 'logical_and', 'logical_not', 'logical_or', + 'logical_xor', 'matmul', 'matvec', 'maximum', 'minimum', 'remainder', + 'modf', 'multiply', 'negative', 'nextafter', 'not_equal', 'positive', + 'power', 'rad2deg', 'radians', 'reciprocal', 'rint', 'sign', 'signbit', + 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh', + 'trunc', 'vecdot', 'vecmat', + ]: + ufunc = namespace_names[ufunc_name] + ufunc.__module__ = "numpy" + ufunc.__qualname__ = ufunc_name + + +_override___module__() # We can't verify dispatcher signatures because NumPy's C functions don't @@ -82,11 +114,20 @@ @array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like) def empty_like( - prototype, dtype=None, order=None, subok=None, shape=None, *, device=None + prototype, dtype=None, order="K", subok=True, shape=None, *, device=None ): """ - empty_like(prototype, dtype=None, order='K', subok=True, shape=None, *, - device=None) + empty_like( + prototype, + /, + dtype=None, + order='K', + subok=True, + shape=None, + *, + device=None, + ) + -- Return a new array with the same shape and type as a given array. @@ -97,15 +138,11 @@ def empty_like( of the returned array. dtype : data-type, optional Overrides the data type of the result. - - .. versionadded:: 1.6.0 order : {'C', 'F', 'A', or 'K'}, optional Overrides the memory layout of the result. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `prototype` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `prototype` as closely as possible. - - .. versionadded:: 1.6.0 subok : bool, optional. If True, then the newly created array will use the sub-class type of `prototype`, otherwise it will be a base-class array. Defaults @@ -114,8 +151,6 @@ def empty_like( Overrides the shape of the result. If order='K' and the number of dimensions is unchanged, will try to keep order, otherwise, order='C' is implied. - - .. versionadded:: 1.17.0 device : str, optional The device on which to place the created array. Default: None. For Array-API interoperability only, so must be ``"cpu"`` if passed. @@ -145,13 +180,14 @@ def empty_like( Examples -------- + >>> import numpy as np >>> a = ([1,2,3], [4,5,6]) # a is array-like >>> np.empty_like(a) array([[-1073741821, -1073741821, 3], # uninitialized [ 0, 0, -1073741821]]) >>> a = np.array([[1., 2., 3.],[4.,5.,6.]]) >>> np.empty_like(a) - array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninit + array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninitialized [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]]) """ @@ -159,15 +195,18 @@ def empty_like( @array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate) -def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None): +def concatenate(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): """ concatenate( - (a1, a2, ...), - axis=0, - out=None, - dtype=None, - casting="same_kind" + arrays, + /, + axis=0, + out=None, + *, + dtype=None, + casting="same_kind", ) + -- Join a sequence of arrays along an existing axis. @@ -192,7 +231,7 @@ def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None): casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur. Defaults to 'same_kind'. For a description of the options, please see :term:`casting`. - + .. versionadded:: 1.20.0 Returns @@ -226,6 +265,7 @@ def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None): Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> b = np.array([[5, 6]]) >>> np.concatenate((a, b), axis=0) @@ -267,7 +307,7 @@ def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.inner) -def inner(a, b): +def inner(a, b, /): """ inner(a, b, /) @@ -299,6 +339,7 @@ def inner(a, b): -------- tensordot : Sum products over arbitrary axes. dot : Generalised matrix product, using second last dimension of `b`. + vecdot : Vector dot product of two arrays. einsum : Einstein summation convention. Notes @@ -324,6 +365,7 @@ def inner(a, b): -------- Ordinary inner product for vectors: + >>> import numpy as np >>> a = np.array([1,2,3]) >>> b = np.array([0,1,0]) >>> np.inner(a, b) @@ -359,7 +401,7 @@ def inner(a, b): @array_function_from_c_func_and_dispatcher(_multiarray_umath.where) -def where(condition, x=None, y=None): +def where(condition, x=None, y=None, /): """ where(condition, [x, y], /) @@ -400,6 +442,7 @@ def where(condition, x=None, y=None): Examples -------- + >>> import numpy as np >>> a = np.arange(10) >>> a array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) @@ -434,7 +477,7 @@ def where(condition, x=None, y=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort) -def lexsort(keys, axis=None): +def lexsort(keys, axis=-1): """ lexsort(keys, axis=-1) @@ -472,6 +515,7 @@ def lexsort(keys, axis=None): -------- Sort names: first by surname, then by name. + >>> import numpy as np >>> surnames = ('Hertz', 'Galilei', 'Hertz') >>> first_names = ('Heinrich', 'Galileo', 'Gustav') >>> ind = np.lexsort((first_names, surnames)) @@ -554,7 +598,7 @@ def lexsort(keys, axis=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast) -def can_cast(from_, to, casting=None): +def can_cast(from_, to, casting="safe"): """ can_cast(from_, to, casting='safe') @@ -584,16 +628,6 @@ def can_cast(from_, to, casting=None): Notes ----- - .. versionchanged:: 1.17.0 - Casting between a simple data type and a structured one is possible only - for "unsafe" casting. Casting to multiple fields is allowed, but - casting from multiple fields is not. - - .. versionchanged:: 1.9.0 - Casting from numeric to string types in 'safe' casting mode requires - that the string dtype length is long enough to store the maximum - integer/float value converted. - .. versionchanged:: 2.0 This function does not support Python scalars anymore and does not apply any value-based logic for 0-D arrays and NumPy scalars. @@ -606,6 +640,7 @@ def can_cast(from_, to, casting=None): -------- Basic examples + >>> import numpy as np >>> np.can_cast(np.int32, np.int64) True >>> np.can_cast(np.float64, complex) @@ -625,7 +660,7 @@ def can_cast(from_, to, casting=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type) -def min_scalar_type(a): +def min_scalar_type(a, /): """ min_scalar_type(a, /) @@ -646,16 +681,13 @@ def min_scalar_type(a): out : dtype The minimal data type. - Notes - ----- - .. versionadded:: 1.6.0 - See Also -------- result_type, promote_types, dtype, can_cast Examples -------- + >>> import numpy as np >>> np.min_scalar_type(10) dtype('uint8') @@ -668,7 +700,7 @@ def min_scalar_type(a): >>> np.min_scalar_type(1e50) dtype('float64') - >>> np.min_scalar_type(np.arange(4,dtype='f8')) + >>> np.min_scalar_type(np.arange(4, dtype=np.float64)) dtype('float64') """ @@ -681,19 +713,7 @@ def result_type(*arrays_and_dtypes): result_type(*arrays_and_dtypes) Returns the type that results from applying the NumPy - type promotion rules to the arguments. - - Type promotion in NumPy works similarly to the rules in languages - like C++, with some slight differences. When both scalars and - arrays are used, the array's type takes precedence and the actual value - of the scalar is taken into account. - - For example, calculating 3*a, where a is an array of 32-bit floats, - intuitively should result in a 32-bit float output. If the 3 is a - 32-bit integer, the NumPy rules indicate it can't convert losslessly - into a 32-bit float, so a 64-bit float should be the result type. - By examining the value of the constant, '3', we see that it fits in - an 8-bit integer, which can be cast losslessly into the 32-bit float. + :ref:`type promotion ` rules to the arguments. Parameters ---------- @@ -709,35 +729,13 @@ def result_type(*arrays_and_dtypes): -------- dtype, promote_types, min_scalar_type, can_cast - Notes - ----- - .. versionadded:: 1.6.0 - - The specific algorithm used is as follows. - - Categories are determined by first checking which of boolean, - integer (int/uint), or floating point (float/complex) the maximum - kind of all the arrays and the scalars are. - - If there are only scalars or the maximum category of the scalars - is higher than the maximum category of the arrays, - the data types are combined with :func:`promote_types` - to produce the return value. - - Otherwise, `min_scalar_type` is called on each scalar, and - the resulting data types are all combined with :func:`promote_types` - to produce the return value. - - The set of int values is not a subset of the uint values for types - with the same number of bits, something not reflected in - :func:`min_scalar_type`, but handled as a special case in `result_type`. - Examples -------- - >>> np.result_type(3, np.arange(7, dtype='i1')) + >>> import numpy as np + >>> np.result_type(3, np.arange(7, dtype=np.int8)) dtype('int8') - >>> np.result_type('i4', 'c8') + >>> np.result_type(np.int32, np.complex64) dtype('complex128') >>> np.result_type(3.0, -2) @@ -806,6 +804,7 @@ def dot(a, b, out=None): See Also -------- vdot : Complex-conjugating dot product. + vecdot : Vector dot product of two arrays. tensordot : Sum products over arbitrary axes. einsum : Einstein summation convention. matmul : '@' operator as method with out parameter. @@ -813,6 +812,7 @@ def dot(a, b, out=None): Examples -------- + >>> import numpy as np >>> np.dot(3, 4) 12 @@ -841,19 +841,23 @@ def dot(a, b, out=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot) -def vdot(a, b): - """ +def vdot(a, b, /): + r""" vdot(a, b, /) Return the dot product of two vectors. - The vdot(`a`, `b`) function handles complex numbers differently than - dot(`a`, `b`). If the first argument is complex the complex conjugate - of the first argument is used for the calculation of the dot product. + The `vdot` function handles complex numbers differently than `dot`: + if the first argument is complex, it is replaced by its complex conjugate + in the dot product calculation. `vdot` also handles multidimensional + arrays differently than `dot`: it does not perform a matrix product, but + flattens the arguments to 1-D arrays before taking a vector dot product. - Note that `vdot` handles multidimensional arrays differently than `dot`: - it does *not* perform a matrix product, but flattens input arguments - to 1-D vectors first. Consequently, it should only be used for vectors. + Consequently, when the arguments are 2-D arrays of the same shape, this + function effectively returns their + `Frobenius inner product `_ + (also known as the *trace inner product* or the *standard inner product* + on a vector space of matrices). Parameters ---------- @@ -876,6 +880,7 @@ def vdot(a, b): Examples -------- + >>> import numpy as np >>> a = np.array([1+2j,3+4j]) >>> b = np.array([5+6j,7+8j]) >>> np.vdot(a, b) @@ -899,7 +904,7 @@ def vdot(a, b): @array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount) -def bincount(x, weights=None, minlength=None): +def bincount(x, /, weights=None, minlength=0): """ bincount(x, /, weights=None, minlength=0) @@ -923,8 +928,6 @@ def bincount(x, weights=None, minlength=None): minlength : int, optional A minimum number of bins for the output array. - .. versionadded:: 1.6.0 - Returns ------- out : ndarray of ints @@ -945,6 +948,7 @@ def bincount(x, weights=None, minlength=None): Examples -------- + >>> import numpy as np >>> np.bincount(np.arange(5)) array([1, 1, 1, 1, 1]) >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7])) @@ -957,7 +961,7 @@ def bincount(x, weights=None, minlength=None): The input array needs to be of integer dtype, otherwise a TypeError is raised: - >>> np.bincount(np.arange(5, dtype=float)) + >>> np.bincount(np.arange(5, dtype=np.float64)) Traceback (most recent call last): ... TypeError: Cannot cast array data from dtype('float64') to dtype('int64') @@ -976,7 +980,7 @@ def bincount(x, weights=None, minlength=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index) -def ravel_multi_index(multi_index, dims, mode=None, order=None): +def ravel_multi_index(multi_index, dims, mode="raise", order="C"): """ ravel_multi_index(multi_index, dims, mode='raise', order='C') @@ -1014,12 +1018,9 @@ def ravel_multi_index(multi_index, dims, mode=None, order=None): -------- unravel_index - Notes - ----- - .. versionadded:: 1.6.0 - Examples -------- + >>> import numpy as np >>> arr = np.array([[3,6,6],[4,5,1]]) >>> np.ravel_multi_index(arr, (7,6)) array([22, 41, 37]) @@ -1037,7 +1038,7 @@ def ravel_multi_index(multi_index, dims, mode=None, order=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index) -def unravel_index(indices, shape=None, order=None): +def unravel_index(indices, shape, order="C"): """ unravel_index(indices, shape, order='C') @@ -1052,16 +1053,10 @@ def unravel_index(indices, shape=None, order=None): this function accepted just one index value. shape : tuple of ints The shape of the array to use for unraveling ``indices``. - - .. versionchanged:: 1.16.0 - Renamed from ``dims`` to ``shape``. - order : {'C', 'F'}, optional Determines whether the indices should be viewed as indexing in row-major (C-style) or column-major (Fortran-style) order. - .. versionadded:: 1.6.0 - Returns ------- unraveled_coords : tuple of ndarray @@ -1074,6 +1069,7 @@ def unravel_index(indices, shape=None, order=None): Examples -------- + >>> import numpy as np >>> np.unravel_index([22, 41, 37], (7,6)) (array([3, 6, 6]), array([4, 5, 1])) >>> np.unravel_index([31, 41, 13], (7,6), order='F') @@ -1087,7 +1083,7 @@ def unravel_index(indices, shape=None, order=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto) -def copyto(dst, src, casting=None, where=None): +def copyto(dst, src, casting="same_kind", where=True): """ copyto(dst, src, casting='same_kind', where=True) @@ -1096,8 +1092,6 @@ def copyto(dst, src, casting=None, where=None): Raises a TypeError if the `casting` rule is violated, and if `where` is provided, it selects which elements to copy. - .. versionadded:: 1.7.0 - Parameters ---------- dst : ndarray @@ -1120,6 +1114,7 @@ def copyto(dst, src, casting=None, where=None): Examples -------- + >>> import numpy as np >>> A = np.array([4, 5, 6]) >>> B = [1, 2, 3] >>> np.copyto(A, B) @@ -1140,7 +1135,7 @@ def copyto(dst, src, casting=None, where=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask) def putmask(a, /, mask, values): """ - putmask(a, mask, values) + putmask(a, /, mask, values) Changes elements of an array based on conditional and input values. @@ -1165,6 +1160,7 @@ def putmask(a, /, mask, values): Examples -------- + >>> import numpy as np >>> x = np.arange(6).reshape(2, 3) >>> np.putmask(x, x>2, x**2) >>> x @@ -1183,7 +1179,7 @@ def putmask(a, /, mask, values): @array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits) -def packbits(a, axis=None, bitorder='big'): +def packbits(a, /, axis=None, bitorder="big"): """ packbits(a, /, axis=None, bitorder='big') @@ -1205,8 +1201,6 @@ def packbits(a, axis=None, bitorder='big'): reverse the order so ``[1, 1, 0, 0, 0, 0, 0, 0] => 3``. Defaults to 'big'. - .. versionadded:: 1.17.0 - Returns ------- packed : ndarray @@ -1222,6 +1216,7 @@ def packbits(a, axis=None, bitorder='big'): Examples -------- + >>> import numpy as np >>> a = np.array([[[1,0,1], ... [0,1,0]], ... [[1,1,0], @@ -1241,7 +1236,7 @@ def packbits(a, axis=None, bitorder='big'): @array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits) -def unpackbits(a, axis=None, count=None, bitorder='big'): +def unpackbits(a, /, axis=None, count=None, bitorder="big"): """ unpackbits(a, /, axis=None, count=None, bitorder='big') @@ -1268,17 +1263,12 @@ def unpackbits(a, axis=None, count=None, bitorder='big'): default). Counts larger than the available number of bits will add zero padding to the output. Negative counts must not exceed the available number of bits. - - .. versionadded:: 1.17.0 - bitorder : {'big', 'little'}, optional The order of the returned bits. 'big' will mimic bin(val), ``3 = 0b00000011 => [0, 0, 0, 0, 0, 0, 1, 1]``, 'little' will reverse the order to ``[1, 1, 0, 0, 0, 0, 0, 0]``. Defaults to 'big'. - .. versionadded:: 1.17.0 - Returns ------- unpacked : ndarray, uint8 type @@ -1291,6 +1281,7 @@ def unpackbits(a, axis=None, count=None, bitorder='big'): Examples -------- + >>> import numpy as np >>> a = np.array([[2], [7], [23]], dtype=np.uint8) >>> a array([[ 2], @@ -1325,16 +1316,16 @@ def unpackbits(a, axis=None, count=None, bitorder='big'): @array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory) -def shares_memory(a, b, max_work=None): +def shares_memory(a, b, /, max_work=-1): """ - shares_memory(a, b, /, max_work=None) + shares_memory(a, b, /, max_work=-1) Determine if two arrays share memory. .. warning:: This function can be exponentially slow for some inputs, unless - `max_work` is set to a finite number or ``MAY_SHARE_BOUNDS``. + `max_work` is set to zero or a positive integer. If in doubt, use `numpy.may_share_memory` instead. Parameters @@ -1346,12 +1337,13 @@ def shares_memory(a, b, max_work=None): of candidate solutions to consider). The following special values are recognized: - max_work=MAY_SHARE_EXACT (default) + max_work=-1 (default) The problem is solved exactly. In this case, the function returns True only if there is an element shared between the arrays. Finding the exact solution may take extremely long in some cases. - max_work=MAY_SHARE_BOUNDS + max_work=0 Only the memory bounds of a and b are checked. + This is equivalent to using ``may_share_memory()``. Raises ------ @@ -1368,6 +1360,7 @@ def shares_memory(a, b, max_work=None): Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3, 4]) >>> np.shares_memory(x, np.array([5, 6, 7])) False @@ -1402,9 +1395,9 @@ def shares_memory(a, b, max_work=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory) -def may_share_memory(a, b, max_work=None): +def may_share_memory(a, b, /, max_work=0): """ - may_share_memory(a, b, /, max_work=None) + may_share_memory(a, b, /, max_work=0) Determine if two arrays might share memory @@ -1432,6 +1425,7 @@ def may_share_memory(a, b, max_work=None): Examples -------- + >>> import numpy as np >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) False >>> x = np.zeros([3, 4]) @@ -1443,20 +1437,18 @@ def may_share_memory(a, b, max_work=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday) -def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None): +def is_busday(dates, weekmask="1111100", holidays=None, busdaycal=None, out=None): """ is_busday( - dates, - weekmask='1111100', - holidays=None, - busdaycal=None, - out=None + dates, + weekmask='1111100', + holidays=None, + busdaycal=None, + out=None, ) Calculates which of the given dates are valid days, and which are not. - .. versionadded:: 1.7.0 - Parameters ---------- dates : array_like of datetime64[D] @@ -1494,6 +1486,7 @@ def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None): Examples -------- + >>> import numpy as np >>> # The weekdays are Friday, Saturday, and Monday ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'], ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) @@ -1503,25 +1496,23 @@ def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset) -def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, +def busday_offset(dates, offsets, roll="raise", weekmask="1111100", holidays=None, busdaycal=None, out=None): """ busday_offset( - dates, - offsets, - roll='raise', - weekmask='1111100', - holidays=None, - busdaycal=None, - out=None + dates, + offsets, + roll='raise', + weekmask='1111100', + holidays=None, + busdaycal=None, + out=None, ) First adjusts the date to fall on a valid day according to the ``roll`` rule, then applies offsets to the given dates counted in valid days. - .. versionadded:: 1.7.0 - Parameters ---------- dates : array_like of datetime64[D] @@ -1578,43 +1569,44 @@ def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, Examples -------- + >>> import numpy as np >>> # First business day in October 2011 (not accounting for holidays) ... np.busday_offset('2011-10', 0, roll='forward') - numpy.datetime64('2011-10-03') + np.datetime64('2011-10-03') >>> # Last business day in February 2012 (not accounting for holidays) ... np.busday_offset('2012-03', -1, roll='forward') - numpy.datetime64('2012-02-29') + np.datetime64('2012-02-29') >>> # Third Wednesday in January 2011 ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed') - numpy.datetime64('2011-01-19') + np.datetime64('2011-01-19') >>> # 2012 Mother's Day in Canada and the U.S. ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun') - numpy.datetime64('2012-05-13') + np.datetime64('2012-05-13') >>> # First business day on or after a date ... np.busday_offset('2011-03-20', 0, roll='forward') - numpy.datetime64('2011-03-21') + np.datetime64('2011-03-21') >>> np.busday_offset('2011-03-22', 0, roll='forward') - numpy.datetime64('2011-03-22') + np.datetime64('2011-03-22') >>> # First business day after a date ... np.busday_offset('2011-03-20', 1, roll='backward') - numpy.datetime64('2011-03-21') + np.datetime64('2011-03-21') >>> np.busday_offset('2011-03-22', 1, roll='backward') - numpy.datetime64('2011-03-23') + np.datetime64('2011-03-23') """ return (dates, offsets, weekmask, holidays, out) @array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count) -def busday_count(begindates, enddates, weekmask=None, holidays=None, +def busday_count(begindates, enddates, weekmask="1111100", holidays=(), busdaycal=None, out=None): """ busday_count( - begindates, - enddates, - weekmask='1111100', - holidays=[], - busdaycal=None, + begindates, + enddates, + weekmask='1111100', + holidays=[], + busdaycal=None, out=None ) @@ -1624,8 +1616,6 @@ def busday_count(begindates, enddates, weekmask=None, holidays=None, If ``enddates`` specifies a date value that is earlier than the corresponding ``begindates`` date value, the count will be negative. - .. versionadded:: 1.7.0 - Parameters ---------- begindates : array_like of datetime64[D] @@ -1667,6 +1657,7 @@ def busday_count(begindates, enddates, weekmask=None, holidays=None, Examples -------- + >>> import numpy as np >>> # Number of weekdays in January 2011 ... np.busday_count('2011-01', '2011-02') 21 @@ -1680,9 +1671,8 @@ def busday_count(begindates, enddates, weekmask=None, holidays=None, return (begindates, enddates, weekmask, holidays, out) -@array_function_from_c_func_and_dispatcher( - _multiarray_umath.datetime_as_string) -def datetime_as_string(arr, unit=None, timezone=None, casting=None): +@array_function_from_c_func_and_dispatcher(_multiarray_umath.datetime_as_string) +def datetime_as_string(arr, unit=None, timezone="naive", casting="same_kind"): """ datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind') @@ -1693,7 +1683,7 @@ def datetime_as_string(arr, unit=None, timezone=None, casting=None): arr : array_like of datetime64 The array of UTC timestamps to format. unit : str - One of None, 'auto', or + One of None, 'auto', or a :ref:`datetime unit `. timezone : {'naive', 'UTC', 'local'} or tzinfo Timezone information to use when displaying the datetime. If 'UTC', @@ -1710,7 +1700,8 @@ def datetime_as_string(arr, unit=None, timezone=None, casting=None): Examples -------- - >>> import pytz + >>> import numpy as np + >>> from zoneinfo import ZoneInfo >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]') >>> d array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30', @@ -1723,9 +1714,9 @@ def datetime_as_string(arr, unit=None, timezone=None, casting=None): '2002-10-27T07:30Z'], dtype='>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern')) + >>> np.datetime_as_string(d, timezone=ZoneInfo('US/Eastern')) array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400', '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='=3.12 _CastingKind, - _ModeKind, - _SupportsBuffer, - _IOProtocol, _CopyMode, + _ModeKind, _NDIterFlagsKind, - _NDIterOpFlagsKind, + _NDIterFlagsOp, + _OrderCF, + _OrderKACF, + _SupportsFileMethods, + broadcast, + complexfloating, + correlate, + count_nonzero, + datetime64, + dtype, + einsum as c_einsum, + float64, + floating, + from_dlpack, + int_, + interp, + intp, + matmul, + ndarray, + signedinteger, + str_, + timedelta64, + ufunc, + uint8, + unsignedinteger, + vecdot, ) - from numpy._typing import ( - # Shapes - _ShapeLike, - - # DTypes + ArrayLike, DTypeLike, - _DTypeLike, - - # Arrays NDArray, - ArrayLike, + _AnyShape, _ArrayLike, - _SupportsArrayFunc, - _NestedSequence, _ArrayLikeBool_co, - _ArrayLikeUInt_co, - _ArrayLikeInt_co, - _ArrayLikeFloat_co, + _ArrayLikeBytes_co, _ArrayLikeComplex_co, - _ArrayLikeTD64_co, _ArrayLikeDT64_co, + _ArrayLikeFloat_co, + _ArrayLikeInt, + _ArrayLikeInt_co, _ArrayLikeObject_co, _ArrayLikeStr_co, - _ArrayLikeBytes_co, - _ScalarLike_co, - _IntLike_co, + _ArrayLikeTD64_co, + _ArrayLikeUInt_co, + _DT64Codes, + _DTypeLike, _FloatLike_co, + _IntLike_co, + _NestedSequence, + _ScalarLike_co, + _Shape, + _ShapeLike, + _SupportsArrayFunc, + _SupportsDType, _TD64Like_co, ) +from numpy._typing._ufunc import ( + _2PTuple, + _PyFunc_Nin1_Nout1, + _PyFunc_Nin1P_Nout2P, + _PyFunc_Nin2_Nout1, + _PyFunc_Nin3P_Nout1, +) -_T_co = TypeVar("_T_co", covariant=True) -_T_contra = TypeVar("_T_contra", contravariant=True) -_SCT = TypeVar("_SCT", bound=generic) -_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) +__all__ = [ + "_ARRAY_API", + "ALLOW_THREADS", + "BUFSIZE", + "CLIP", + "DATETIMEUNITS", + "ITEM_HASOBJECT", + "ITEM_IS_POINTER", + "LIST_PICKLE", + "MAXDIMS", + "MAY_SHARE_BOUNDS", + "MAY_SHARE_EXACT", + "NEEDS_INIT", + "NEEDS_PYAPI", + "RAISE", + "USE_GETITEM", + "USE_SETITEM", + "WRAP", + "_flagdict", + "from_dlpack", + "_place", + "_reconstruct", + "_vec_string", + "_monotonicity", + "add_docstring", + "arange", + "array", + "asarray", + "asanyarray", + "ascontiguousarray", + "asfortranarray", + "bincount", + "broadcast", + "busday_count", + "busday_offset", + "busdaycalendar", + "can_cast", + "compare_chararrays", + "concatenate", + "copyto", + "correlate", + "correlate2", + "count_nonzero", + "c_einsum", + "datetime_as_string", + "datetime_data", + "dot", + "dragon4_positional", + "dragon4_scientific", + "dtype", + "empty", + "empty_like", + "error", + "flagsobj", + "flatiter", + "format_longfloat", + "frombuffer", + "fromfile", + "fromiter", + "fromstring", + "get_handler_name", + "get_handler_version", + "inner", + "interp", + "interp_complex", + "is_busday", + "lexsort", + "matmul", + "vecdot", + "may_share_memory", + "min_scalar_type", + "ndarray", + "nditer", + "nested_iters", + "normalize_axis_index", + "packbits", + "promote_types", + "putmask", + "ravel_multi_index", + "result_type", + "scalar", + "set_datetimeparse_function", + "set_typeDict", + "shares_memory", + "typeinfo", + "unpackbits", + "unravel_index", + "vdot", + "where", + "zeros", +] + +_ArrayT_co = TypeVar("_ArrayT_co", bound=np.ndarray, default=np.ndarray, covariant=True) + +type _Array[ShapeT: _Shape, ScalarT: np.generic] = ndarray[ShapeT, dtype[ScalarT]] +type _Array1D[ScalarT: np.generic] = ndarray[tuple[int], dtype[ScalarT]] # Valid time units -_UnitKind = L[ +type _UnitKind = L[ "Y", "M", "D", @@ -99,7 +199,7 @@ _UnitKind = L[ "fs", "as", ] -_RollKind = L[ # `raise` is deliberately excluded +type _RollKind = L[ # `raise` is deliberately excluded "nat", "forward", "following", @@ -109,876 +209,1129 @@ _RollKind = L[ # `raise` is deliberately excluded "modifiedpreceding", ] -class _SupportsLenAndGetItem(Protocol[_T_contra, _T_co]): - def __len__(self) -> int: ... - def __getitem__(self, key: _T_contra, /) -> _T_co: ... +type _ArangeScalar = np.integer | np.floating | np.datetime64 | np.timedelta64 + +# The datetime functions perform unsafe casts to `datetime64[D]`, +# so a lot of different argument types are allowed here +type _ToDates = dt.date | _NestedSequence[dt.date] +type _ToDeltas = dt.timedelta | _NestedSequence[dt.timedelta] + +type _BitOrder = L["big", "little"] +type _MaxWork = L[-1, 0] + +@type_check_only +class _SupportsArray[ArrayT_co: np.ndarray](Protocol): + def __array__(self, /) -> ArrayT_co: ... + +# using `Final` or `TypeAlias` will break stubtest +error = Exception + +# from ._multiarray_umath +ITEM_HASOBJECT: Final = 1 +LIST_PICKLE: Final = 2 +ITEM_IS_POINTER: Final = 4 +NEEDS_INIT: Final = 8 +NEEDS_PYAPI: Final = 16 +USE_GETITEM: Final = 32 +USE_SETITEM: Final = 64 +DATETIMEUNITS: Final[CapsuleType] = ... +_ARRAY_API: Final[CapsuleType] = ... + +_flagdict: Final[dict[str, int]] = ... +_monotonicity: Final[Callable[..., object]] = ... +_place: Final[Callable[..., object]] = ... +_reconstruct: Final[Callable[..., object]] = ... +_vec_string: Final[Callable[..., object]] = ... +correlate2: Final[Callable[..., object]] = ... +dragon4_positional: Final[Callable[..., object]] = ... +dragon4_scientific: Final[Callable[..., object]] = ... +interp_complex: Final[Callable[..., object]] = ... +set_datetimeparse_function: Final[Callable[..., object]] = ... -__all__: list[str] +def get_handler_name(a: NDArray[Any] = ..., /) -> str | None: ... +def get_handler_version(a: NDArray[Any] = ..., /) -> int | None: ... +def format_longfloat(x: np.longdouble, precision: int) -> str: ... +def scalar[DTypeT: np.dtype](dtype: DTypeT, object: bytes | object = ...) -> ndarray[tuple[()], DTypeT]: ... +def set_typeDict(dict_: dict[str, np.dtype], /) -> None: ... + +typeinfo: Final[dict[str, np.dtype[np.generic]]] = ... ALLOW_THREADS: Final[int] # 0 or 1 (system-specific) -BUFSIZE: L[8192] -CLIP: L[0] -WRAP: L[1] -RAISE: L[2] -MAXDIMS: L[32] -MAY_SHARE_BOUNDS: L[0] -MAY_SHARE_EXACT: L[-1] -tracemalloc_domain: L[389047] +BUFSIZE: Final = 8_192 +CLIP: Final = 0 +WRAP: Final = 1 +RAISE: Final = 2 +MAXDIMS: Final = 64 +MAY_SHARE_BOUNDS: Final = 0 +MAY_SHARE_EXACT: Final = -1 +tracemalloc_domain: Final = 389_047 -@overload -def empty_like( - prototype: _ArrayType, - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike = ..., +# keep in sync with zeros (below) and ones (`_core/numeric.pyi`) +@overload # 1d, float64 default +def empty( + shape: SupportsIndex, + dtype: None = None, + order: _OrderCF = "C", *, - device: None | L["cpu"] = ..., -) -> _ArrayType: ... -@overload -def empty_like( - prototype: _ArrayLike[_SCT], - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike = ..., + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64]: ... +@overload # 1d, specific dtype +def empty[DTypeT: np.dtype]( + shape: SupportsIndex, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", *, - device: None | L["cpu"] = ..., -) -> NDArray[_SCT]: ... -@overload -def empty_like( - prototype: object, - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike = ..., + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[tuple[int], DTypeT]: ... +@overload # 1d, specific scalar type +def empty[ScalarT: np.generic]( + shape: SupportsIndex, + dtype: type[ScalarT], + order: _OrderCF = "C", *, - device: None | L["cpu"] = ..., -) -> NDArray[Any]: ... -@overload -def empty_like( - prototype: Any, - dtype: _DTypeLike[_SCT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike = ..., + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[ScalarT]: ... +@overload # 1d, unknown dtype +def empty( + shape: SupportsIndex, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", *, - device: None | L["cpu"] = ..., -) -> NDArray[_SCT]: ... -@overload -def empty_like( - prototype: Any, - dtype: DTypeLike, - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike = ..., + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[Incomplete]: ... +@overload # known shape, float64 default +def empty[ShapeT: _Shape]( + shape: ShapeT, + dtype: None = None, + order: _OrderCF = "C", *, - device: None | L["cpu"] = ..., -) -> NDArray[Any]: ... - -@overload -def array( - object: _ArrayType, - dtype: None = ..., + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, float64]: ... +@overload # known shape, specific dtype +def empty[ShapeT: _Shape, DTypeT: np.dtype]( + shape: ShapeT, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", *, - copy: None | bool | _CopyMode = ..., - order: _OrderKACF = ..., - subok: L[True], - ndmin: int = ..., - like: None | _SupportsArrayFunc = ..., -) -> _ArrayType: ... -@overload -def array( - object: _ArrayLike[_SCT], - dtype: None = ..., + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[ShapeT, DTypeT]: ... +@overload # known shape, specific scalar type +def empty[ShapeT: _Shape, ScalarT: np.generic]( + shape: ShapeT, + dtype: type[ScalarT], + order: _OrderCF = "C", *, - copy: None | bool | _CopyMode = ..., - order: _OrderKACF = ..., - subok: bool = ..., - ndmin: int = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... -@overload -def array( - object: object, - dtype: None = ..., + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, ScalarT]: ... +@overload # known shape, unknown dtype +def empty[ShapeT: _Shape]( + shape: ShapeT, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", *, - copy: None | bool | _CopyMode = ..., - order: _OrderKACF = ..., - subok: bool = ..., - ndmin: int = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... -@overload -def array( - object: Any, - dtype: _DTypeLike[_SCT], + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, Incomplete]: ... +@overload # unknown shape, float64 default +def empty( + shape: _ShapeLike, + dtype: None = None, + order: _OrderCF = "C", *, - copy: None | bool | _CopyMode = ..., - order: _OrderKACF = ..., - subok: bool = ..., - ndmin: int = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... -@overload -def array( - object: Any, - dtype: DTypeLike, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[float64]: ... +@overload # unknown shape, specific dtype +def empty[DTypeT: np.dtype]( + shape: _ShapeLike, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", *, - copy: None | bool | _CopyMode = ..., - order: _OrderKACF = ..., - subok: bool = ..., - ndmin: int = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[_AnyShape, DTypeT]: ... +@overload # unknown shape, specific scalar type +def empty[ScalarT: np.generic]( + shape: _ShapeLike, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... +@overload # unknown shape, unknown dtype +def empty( + shape: _ShapeLike, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[Incomplete]: ... -@overload +# keep in sync with empty (above) and ones (`_core/numeric.pyi`) +@overload # 1d, float64 default +def zeros( + shape: SupportsIndex, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64]: ... +@overload # 1d, specific dtype +def zeros[DTypeT: np.dtype]( + shape: SupportsIndex, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[tuple[int], DTypeT]: ... +@overload # 1d, specific scalar type +def zeros[ScalarT: np.generic]( + shape: SupportsIndex, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[ScalarT]: ... +@overload # 1d, unknown dtype +def zeros( + shape: SupportsIndex, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[Incomplete]: ... +@overload # known shape, float64 default +def zeros[ShapeT: _Shape]( + shape: ShapeT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, float64]: ... +@overload # known shape, specific dtype +def zeros[ShapeT: _Shape, DTypeT: np.dtype]( + shape: ShapeT, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[ShapeT, DTypeT]: ... +@overload # known shape, specific scalar type +def zeros[ShapeT: _Shape, ScalarT: np.generic]( + shape: ShapeT, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, ScalarT]: ... +@overload # known shape, unknown dtype +def zeros[ShapeT: _Shape]( + shape: ShapeT, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, Incomplete]: ... +@overload # unknown shape, float64 default def zeros( shape: _ShapeLike, - dtype: None = ..., - order: _OrderCF = ..., + dtype: None = None, + order: _OrderCF = "C", *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[float64]: ... -@overload -def zeros( +@overload # unknown shape, specific dtype +def zeros[DTypeT: np.dtype]( shape: _ShapeLike, - dtype: _DTypeLike[_SCT], - order: _OrderCF = ..., + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... -@overload + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[_AnyShape, DTypeT]: ... +@overload # unknown shape, specific scalar type +def zeros[ScalarT: np.generic]( + shape: _ShapeLike, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... +@overload # unknown shape, unknown dtype def zeros( shape: _ShapeLike, - dtype: DTypeLike, - order: _OrderCF = ..., + dtype: DTypeLike | None = None, + order: _OrderCF = "C", *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[Incomplete]: ... +# @overload -def empty( - shape: _ShapeLike, - dtype: None = ..., - order: _OrderCF = ..., +def empty_like[ArrayT: np.ndarray]( + prototype: ArrayT, + /, + dtype: None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[float64]: ... + device: L["cpu"] | None = None, +) -> ArrayT: ... @overload -def empty( - shape: _ShapeLike, - dtype: _DTypeLike[_SCT], - order: _OrderCF = ..., +def empty_like[ScalarT: np.generic]( + prototype: _ArrayLike[ScalarT], + /, + dtype: None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... + device: L["cpu"] | None = None, +) -> NDArray[ScalarT]: ... @overload -def empty( - shape: _ShapeLike, - dtype: DTypeLike, - order: _OrderCF = ..., +def empty_like[ScalarT: np.generic]( + prototype: Incomplete, + /, + dtype: _DTypeLike[ScalarT], + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... + device: L["cpu"] | None = None, +) -> NDArray[ScalarT]: ... +@overload +def empty_like( + prototype: Incomplete, + /, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: L["cpu"] | None = None, +) -> NDArray[Incomplete]: ... @overload -def unravel_index( # type: ignore[misc] - indices: _IntLike_co, - shape: _ShapeLike, - order: _OrderCF = ..., -) -> tuple[intp, ...]: ... +def array[ArrayT: np.ndarray]( + object: ArrayT, + dtype: None = None, + *, + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", + subok: L[True], + ndmin: int = 0, + ndmax: int = 0, + like: _SupportsArrayFunc | None = None, +) -> ArrayT: ... +@overload +def array[ArrayT: np.ndarray]( + object: _SupportsArray[ArrayT], + dtype: None = None, + *, + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", + subok: L[True], + ndmin: L[0] = 0, + ndmax: int = 0, + like: _SupportsArrayFunc | None = None, +) -> ArrayT: ... +@overload +def array[ScalarT: np.generic]( + object: _ArrayLike[ScalarT], + dtype: None = None, + *, + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", + subok: bool = False, + ndmin: int = 0, + ndmax: int = 0, + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... +@overload +def array[ScalarT: np.generic]( + object: Any, + dtype: _DTypeLike[ScalarT], + *, + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", + subok: bool = False, + ndmin: int = 0, + ndmax: int = 0, + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... @overload -def unravel_index( - indices: _ArrayLikeInt_co, - shape: _ShapeLike, - order: _OrderCF = ..., -) -> tuple[NDArray[intp], ...]: ... +def array( + object: Any, + dtype: DTypeLike | None = None, + *, + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", + subok: bool = False, + ndmin: int = 0, + ndmax: int = 0, + like: _SupportsArrayFunc | None = None, +) -> NDArray[Any]: ... +# @overload -def ravel_multi_index( # type: ignore[misc] - multi_index: Sequence[_IntLike_co], - dims: Sequence[SupportsIndex], - mode: _ModeKind | tuple[_ModeKind, ...] = ..., - order: _OrderCF = ..., +def ravel_multi_index( + multi_index: SupportsLenAndGetItem[_IntLike_co], + dims: _ShapeLike, + mode: _ModeKind | tuple[_ModeKind, ...] = "raise", + order: _OrderCF = "C", ) -> intp: ... @overload def ravel_multi_index( - multi_index: Sequence[_ArrayLikeInt_co], - dims: Sequence[SupportsIndex], - mode: _ModeKind | tuple[_ModeKind, ...] = ..., - order: _OrderCF = ..., + multi_index: SupportsLenAndGetItem[_ArrayLikeInt_co], + dims: _ShapeLike, + mode: _ModeKind | tuple[_ModeKind, ...] = "raise", + order: _OrderCF = "C", ) -> NDArray[intp]: ... +# +@overload +def unravel_index(indices: _IntLike_co, shape: _ShapeLike, order: _OrderCF = "C") -> tuple[intp, ...]: ... +@overload +def unravel_index(indices: _ArrayLikeInt_co, shape: _ShapeLike, order: _OrderCF = "C") -> tuple[NDArray[intp], ...]: ... + +# +def normalize_axis_index(axis: int, ndim: int, msg_prefix: str | None = None) -> int: ... + # NOTE: Allow any sequence of array-like objects @overload -def concatenate( # type: ignore[misc] - arrays: _ArrayLike[_SCT], +def concatenate[ScalarT: np.generic]( + arrays: _ArrayLike[ScalarT], /, - axis: None | SupportsIndex = ..., - out: None = ..., + axis: SupportsIndex | None = 0, + out: None = None, *, - dtype: None = ..., - casting: None | _CastingKind = ... -) -> NDArray[_SCT]: ... + dtype: None = None, + casting: _CastingKind | None = "same_kind", +) -> NDArray[ScalarT]: ... @overload -def concatenate( # type: ignore[misc] - arrays: _SupportsLenAndGetItem[int, ArrayLike], +def concatenate[ScalarT: np.generic]( + arrays: SupportsLenAndGetItem[ArrayLike], /, - axis: None | SupportsIndex = ..., - out: None = ..., + axis: SupportsIndex | None = 0, + out: None = None, *, - dtype: None = ..., - casting: None | _CastingKind = ... -) -> NDArray[Any]: ... + dtype: _DTypeLike[ScalarT], + casting: _CastingKind | None = "same_kind", +) -> NDArray[ScalarT]: ... @overload -def concatenate( # type: ignore[misc] - arrays: _SupportsLenAndGetItem[int, ArrayLike], +def concatenate( + arrays: SupportsLenAndGetItem[ArrayLike], /, - axis: None | SupportsIndex = ..., - out: None = ..., + axis: SupportsIndex | None = 0, + out: None = None, *, - dtype: _DTypeLike[_SCT], - casting: None | _CastingKind = ... -) -> NDArray[_SCT]: ... + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", +) -> NDArray[Incomplete]: ... @overload -def concatenate( # type: ignore[misc] - arrays: _SupportsLenAndGetItem[int, ArrayLike], +def concatenate[OutT: np.ndarray]( + arrays: SupportsLenAndGetItem[ArrayLike], /, - axis: None | SupportsIndex = ..., - out: None = ..., + axis: SupportsIndex | None = 0, *, - dtype: DTypeLike, - casting: None | _CastingKind = ... -) -> NDArray[Any]: ... + out: OutT, + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", +) -> OutT: ... @overload -def concatenate( - arrays: _SupportsLenAndGetItem[int, ArrayLike], +def concatenate[OutT: np.ndarray]( + arrays: SupportsLenAndGetItem[ArrayLike], /, - axis: None | SupportsIndex = ..., - out: _ArrayType = ..., + axis: SupportsIndex | None, + out: OutT, *, - dtype: DTypeLike = ..., - casting: None | _CastingKind = ... -) -> _ArrayType: ... + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", +) -> OutT: ... -def inner( - a: ArrayLike, - b: ArrayLike, - /, -) -> Any: ... +# keep in sync with `ma.core.inner` +def inner(a: ArrayLike, b: ArrayLike, /) -> Incomplete: ... +# keep in sync with `ma.core.where` @overload -def where( - condition: ArrayLike, - /, -) -> tuple[NDArray[intp], ...]: ... +def where(condition: ArrayLike, x: None = None, y: None = None, /) -> tuple[NDArray[intp], ...]: ... @overload -def where( - condition: ArrayLike, - x: ArrayLike, - y: ArrayLike, - /, -) -> NDArray[Any]: ... +def where(condition: ArrayLike, x: ArrayLike, y: ArrayLike, /) -> NDArray[Incomplete]: ... -def lexsort( - keys: ArrayLike, - axis: None | SupportsIndex = ..., -) -> Any: ... +def lexsort(keys: ArrayLike, axis: SupportsIndex = -1) -> NDArray[intp]: ... -def can_cast( - from_: ArrayLike | DTypeLike, - to: DTypeLike, - casting: None | _CastingKind = ..., -) -> bool: ... +def can_cast(from_: ArrayLike | DTypeLike, to: DTypeLike, casting: _CastingKind = "safe") -> bool: ... -def min_scalar_type( - a: ArrayLike, /, -) -> dtype[Any]: ... - -def result_type( - *arrays_and_dtypes: ArrayLike | DTypeLike, -) -> dtype[Any]: ... +def min_scalar_type(a: ArrayLike, /) -> dtype: ... +def result_type(*arrays_and_dtypes: ArrayLike | DTypeLike | None) -> dtype: ... +# keep in sync with `ma.core.dot` @overload -def dot(a: ArrayLike, b: ArrayLike, out: None = ...) -> Any: ... +def dot(a: ArrayLike, b: ArrayLike, out: None = None) -> Incomplete: ... @overload -def dot(a: ArrayLike, b: ArrayLike, out: _ArrayType) -> _ArrayType: ... +def dot[OutT: np.ndarray](a: ArrayLike, b: ArrayLike, out: OutT) -> OutT: ... @overload -def vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> np.bool: ... # type: ignore[misc] +def vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> np.bool: ... @overload -def vdot(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, /) -> unsignedinteger[Any]: ... # type: ignore[misc] +def vdot(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, /) -> unsignedinteger: ... @overload -def vdot(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, /) -> signedinteger[Any]: ... # type: ignore[misc] +def vdot(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, /) -> signedinteger: ... @overload -def vdot(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, /) -> floating[Any]: ... # type: ignore[misc] +def vdot(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, /) -> floating: ... @overload -def vdot(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /) -> complexfloating[Any, Any]: ... # type: ignore[misc] +def vdot(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /) -> complexfloating: ... @overload def vdot(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, /) -> timedelta64: ... @overload -def vdot(a: _ArrayLikeObject_co, b: Any, /) -> Any: ... +def vdot(a: _ArrayLikeObject_co, b: object, /) -> Any: ... @overload -def vdot(a: Any, b: _ArrayLikeObject_co, /) -> Any: ... +def vdot(a: object, b: _ArrayLikeObject_co, /) -> Any: ... -def bincount( - x: ArrayLike, - /, - weights: None | ArrayLike = ..., - minlength: SupportsIndex = ..., -) -> NDArray[intp]: ... +# +def bincount(x: _ArrayLikeInt_co, /, weights: ArrayLike | None = None, minlength: SupportsIndex = 0) -> _Array1D[intp]: ... -def copyto( - dst: NDArray[Any], - src: ArrayLike, - casting: None | _CastingKind = ..., - where: None | _ArrayLikeBool_co = ..., -) -> None: ... +# +def copyto(dst: ndarray, src: ArrayLike, casting: _CastingKind = "same_kind", where: object = True) -> None: ... +def putmask(a: ndarray, /, mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... -def putmask( - a: NDArray[Any], - /, - mask: _ArrayLikeBool_co, - values: ArrayLike, -) -> None: ... +@overload +def packbits(a: _ArrayLikeInt_co, /, axis: None = None, bitorder: _BitOrder = "big") -> _Array1D[uint8]: ... +@overload +def packbits(a: _ArrayLikeInt_co, /, axis: SupportsIndex, bitorder: _BitOrder = "big") -> NDArray[uint8]: ... -def packbits( - a: _ArrayLikeInt_co, +@overload +def unpackbits( + a: _ArrayLike[uint8], /, - axis: None | SupportsIndex = ..., - bitorder: L["big", "little"] = ..., -) -> NDArray[uint8]: ... - + axis: None = None, + count: SupportsIndex | None = None, + bitorder: _BitOrder = "big", +) -> _Array1D[uint8]: ... +@overload def unpackbits( a: _ArrayLike[uint8], /, - axis: None | SupportsIndex = ..., - count: None | SupportsIndex = ..., - bitorder: L["big", "little"] = ..., + axis: SupportsIndex, + count: SupportsIndex | None = None, + bitorder: _BitOrder = "big", ) -> NDArray[uint8]: ... -def shares_memory( - a: object, - b: object, - /, - max_work: None | int = ..., -) -> bool: ... - -def may_share_memory( - a: object, - b: object, - /, - max_work: None | int = ..., -) -> bool: ... +# any two python objects will be accepted, not just `ndarray`s +def shares_memory(a: object, b: object, /, max_work: _MaxWork = -1) -> bool: ... +def may_share_memory(a: object, b: object, /, max_work: _MaxWork = 0) -> bool: ... @overload -def asarray( - a: _ArrayLike[_SCT], - dtype: None = ..., - order: _OrderKACF = ..., - *, - device: None | L["cpu"] = ..., - copy: None | bool = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... -@overload -def asarray( - a: object, - dtype: None = ..., +def asarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + dtype: None = None, order: _OrderKACF = ..., *, - device: None | L["cpu"] = ..., - copy: None | bool = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[ScalarT]: ... @overload -def asarray( +def asarray[ScalarT: np.generic]( a: Any, - dtype: _DTypeLike[_SCT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = ..., *, - device: None | L["cpu"] = ..., - copy: None | bool = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[ScalarT]: ... @overload def asarray( a: Any, - dtype: DTypeLike, + dtype: DTypeLike | None = ..., order: _OrderKACF = ..., *, - device: None | L["cpu"] = ..., - copy: None | bool = ..., - like: None | _SupportsArrayFunc = ..., + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... @overload -def asanyarray( - a: _ArrayType, # Preserve subclass-information - dtype: None = ..., +def asanyarray[ArrayT: np.ndarray]( + a: ArrayT, # Preserve subclass-information + dtype: None = None, order: _OrderKACF = ..., *, - device: None | L["cpu"] = ..., - copy: None | bool = ..., - like: None | _SupportsArrayFunc = ..., -) -> _ArrayType: ... -@overload -def asanyarray( - a: _ArrayLike[_SCT], - dtype: None = ..., + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> ArrayT: ... +@overload +def asanyarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + dtype: None = None, order: _OrderKACF = ..., *, - device: None | L["cpu"] = ..., - copy: None | bool = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[ScalarT]: ... @overload -def asanyarray( - a: object, - dtype: None = ..., - order: _OrderKACF = ..., - *, - device: None | L["cpu"] = ..., - copy: None | bool = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... -@overload -def asanyarray( +def asanyarray[ScalarT: np.generic]( a: Any, - dtype: _DTypeLike[_SCT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = ..., *, - device: None | L["cpu"] = ..., - copy: None | bool = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[ScalarT]: ... @overload def asanyarray( a: Any, - dtype: DTypeLike, + dtype: DTypeLike | None = ..., order: _OrderKACF = ..., *, - device: None | L["cpu"] = ..., - copy: None | bool = ..., - like: None | _SupportsArrayFunc = ..., + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... @overload -def ascontiguousarray( - a: _ArrayLike[_SCT], - dtype: None = ..., +def ascontiguousarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + dtype: None = None, *, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... + like: _SupportsArrayFunc | None = ..., +) -> NDArray[ScalarT]: ... @overload -def ascontiguousarray( - a: object, - dtype: None = ..., - *, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... -@overload -def ascontiguousarray( +def ascontiguousarray[ScalarT: np.generic]( a: Any, - dtype: _DTypeLike[_SCT], + dtype: _DTypeLike[ScalarT], *, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... + like: _SupportsArrayFunc | None = ..., +) -> NDArray[ScalarT]: ... @overload def ascontiguousarray( a: Any, - dtype: DTypeLike, + dtype: DTypeLike | None = ..., *, - like: None | _SupportsArrayFunc = ..., + like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... @overload -def asfortranarray( - a: _ArrayLike[_SCT], - dtype: None = ..., +def asfortranarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + dtype: None = None, *, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... + like: _SupportsArrayFunc | None = ..., +) -> NDArray[ScalarT]: ... @overload -def asfortranarray( - a: object, - dtype: None = ..., - *, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... -@overload -def asfortranarray( +def asfortranarray[ScalarT: np.generic]( a: Any, - dtype: _DTypeLike[_SCT], + dtype: _DTypeLike[ScalarT], *, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... + like: _SupportsArrayFunc | None = ..., +) -> NDArray[ScalarT]: ... @overload def asfortranarray( a: Any, - dtype: DTypeLike, + dtype: DTypeLike | None = ..., *, - like: None | _SupportsArrayFunc = ..., + like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... -def promote_types(__type1: DTypeLike, __type2: DTypeLike) -> dtype[Any]: ... +def promote_types(__type1: DTypeLike, __type2: DTypeLike) -> dtype: ... # `sep` is a de facto mandatory argument, as its default value is deprecated @overload def fromstring( string: str | bytes, - dtype: None = ..., - count: SupportsIndex = ..., + dtype: None = None, + count: SupportsIndex = -1, *, sep: str, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[float64]: ... + like: _SupportsArrayFunc | None = None, +) -> _Array1D[float64]: ... @overload -def fromstring( +def fromstring[ScalarT: np.generic]( string: str | bytes, - dtype: _DTypeLike[_SCT], - count: SupportsIndex = ..., + dtype: _DTypeLike[ScalarT], + count: SupportsIndex = -1, *, sep: str, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... + like: _SupportsArrayFunc | None = None, +) -> _Array1D[ScalarT]: ... @overload def fromstring( string: str | bytes, - dtype: DTypeLike, - count: SupportsIndex = ..., + dtype: DTypeLike | None = None, + count: SupportsIndex = -1, *, sep: str, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... + like: _SupportsArrayFunc | None = None, +) -> _Array1D[Any]: ... +# +@overload +def frompyfunc[ReturnT]( + func: Callable[[Any], ReturnT], /, + nin: L[1], + nout: L[1], + *, + identity: None = None, +) -> _PyFunc_Nin1_Nout1[ReturnT, None]: ... +@overload +def frompyfunc[ReturnT, IdentityT]( + func: Callable[[Any], ReturnT], /, + nin: L[1], + nout: L[1], + *, + identity: IdentityT, +) -> _PyFunc_Nin1_Nout1[ReturnT, IdentityT]: ... +@overload +def frompyfunc[ReturnT]( + func: Callable[[Any, Any], ReturnT], /, + nin: L[2], + nout: L[1], + *, + identity: None = None, +) -> _PyFunc_Nin2_Nout1[ReturnT, None]: ... +@overload +def frompyfunc[ReturnT, IdentityT]( + func: Callable[[Any, Any], ReturnT], /, + nin: L[2], + nout: L[1], + *, + identity: IdentityT, +) -> _PyFunc_Nin2_Nout1[ReturnT, IdentityT]: ... +@overload +def frompyfunc[ReturnT, NInT: int]( + func: Callable[..., ReturnT], /, + nin: NInT, + nout: L[1], + *, + identity: None = None, +) -> _PyFunc_Nin3P_Nout1[ReturnT, None, NInT]: ... +@overload +def frompyfunc[ReturnT, NInT: int, IdentityT]( + func: Callable[..., ReturnT], /, + nin: NInT, + nout: L[1], + *, + identity: IdentityT, +) -> _PyFunc_Nin3P_Nout1[ReturnT, IdentityT, NInT]: ... +@overload +def frompyfunc[ReturnT, NInT: int, NOutT: int]( + func: Callable[..., _2PTuple[ReturnT]], /, + nin: NInT, + nout: NOutT, + *, + identity: None = None, +) -> _PyFunc_Nin1P_Nout2P[ReturnT, None, NInT, NOutT]: ... +@overload +def frompyfunc[ReturnT, NInT: int, NOutT: int, IdentityT]( + func: Callable[..., _2PTuple[ReturnT]], /, + nin: NInT, + nout: NOutT, + *, + identity: IdentityT, +) -> _PyFunc_Nin1P_Nout2P[ReturnT, IdentityT, NInT, NOutT]: ... +@overload def frompyfunc( func: Callable[..., Any], /, nin: SupportsIndex, nout: SupportsIndex, *, - identity: Any = ..., + identity: object | None = ..., ) -> ufunc: ... @overload def fromfile( - file: str | bytes | os.PathLike[Any] | _IOProtocol, - dtype: None = ..., + file: StrOrBytesPath | _SupportsFileMethods, + dtype: None = None, count: SupportsIndex = ..., sep: str = ..., offset: SupportsIndex = ..., *, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[float64]: ... + like: _SupportsArrayFunc | None = ..., +) -> _Array1D[float64]: ... @overload -def fromfile( - file: str | bytes | os.PathLike[Any] | _IOProtocol, - dtype: _DTypeLike[_SCT], +def fromfile[ScalarT: np.generic]( + file: StrOrBytesPath | _SupportsFileMethods, + dtype: _DTypeLike[ScalarT], count: SupportsIndex = ..., sep: str = ..., offset: SupportsIndex = ..., *, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... + like: _SupportsArrayFunc | None = ..., +) -> _Array1D[ScalarT]: ... @overload def fromfile( - file: str | bytes | os.PathLike[Any] | _IOProtocol, - dtype: DTypeLike, + file: StrOrBytesPath | _SupportsFileMethods, + dtype: DTypeLike | None = ..., count: SupportsIndex = ..., sep: str = ..., offset: SupportsIndex = ..., *, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... + like: _SupportsArrayFunc | None = ..., +) -> _Array1D[Any]: ... @overload -def fromiter( +def fromiter[ScalarT: np.generic]( iter: Iterable[Any], - dtype: _DTypeLike[_SCT], + dtype: _DTypeLike[ScalarT], count: SupportsIndex = ..., *, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... + like: _SupportsArrayFunc | None = ..., +) -> NDArray[ScalarT]: ... @overload def fromiter( iter: Iterable[Any], - dtype: DTypeLike, + dtype: DTypeLike | None, count: SupportsIndex = ..., *, - like: None | _SupportsArrayFunc = ..., + like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... @overload def frombuffer( - buffer: _SupportsBuffer, - dtype: None = ..., - count: SupportsIndex = ..., - offset: SupportsIndex = ..., + buffer: Buffer, + dtype: None = None, + count: SupportsIndex = -1, + offset: SupportsIndex = 0, *, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[float64]: ... -@overload -def frombuffer( - buffer: _SupportsBuffer, - dtype: _DTypeLike[_SCT], - count: SupportsIndex = ..., - offset: SupportsIndex = ..., + like: _SupportsArrayFunc | None = None, +) -> _Array1D[float64]: ... +@overload +def frombuffer[ScalarT: np.generic]( + buffer: Buffer, + dtype: _DTypeLike[ScalarT], + count: SupportsIndex = -1, + offset: SupportsIndex = 0, *, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... + like: _SupportsArrayFunc | None = None, +) -> _Array1D[ScalarT]: ... @overload def frombuffer( - buffer: _SupportsBuffer, - dtype: DTypeLike, - count: SupportsIndex = ..., - offset: SupportsIndex = ..., + buffer: Buffer, + dtype: DTypeLike | None = None, + count: SupportsIndex = -1, + offset: SupportsIndex = 0, *, - like: None | _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... + like: _SupportsArrayFunc | None = None, +) -> _Array1D[Any]: ... -@overload -def arange( # type: ignore[misc] - stop: _IntLike_co, - /, *, - dtype: None = ..., - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[signedinteger[Any]]: ... -@overload -def arange( # type: ignore[misc] - start: _IntLike_co, - stop: _IntLike_co, - step: _IntLike_co = ..., - dtype: None = ..., - *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[signedinteger[Any]]: ... -@overload -def arange( # type: ignore[misc] - stop: _FloatLike_co, - /, *, - dtype: None = ..., - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[floating[Any]]: ... -@overload -def arange( # type: ignore[misc] - start: _FloatLike_co, - stop: _FloatLike_co, - step: _FloatLike_co = ..., - dtype: None = ..., - *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[floating[Any]]: ... -@overload +# keep in sync with ma.core.arange +# NOTE: The `float64 | Any` return types needed to avoid incompatible overlapping overloads +@overload # dtype= +def arange[ScalarT: _ArangeScalar]( + start_or_stop: _ArangeScalar | float, + /, + stop: _ArangeScalar | float | None = None, + step: _ArangeScalar | float | None = 1, + *, + dtype: _DTypeLike[ScalarT], + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[ScalarT]: ... +@overload # (int-like, int-like?, int-like?) def arange( - stop: _TD64Like_co, - /, *, - dtype: None = ..., - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[timedelta64]: ... -@overload + start_or_stop: _IntLike_co, + /, + stop: _IntLike_co | None = None, + step: _IntLike_co | None = 1, + *, + dtype: type[int] | _DTypeLike[np.int_] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.int_]: ... +@overload # (float, float-like?, float-like?) def arange( - start: _TD64Like_co, - stop: _TD64Like_co, - step: _TD64Like_co = ..., - dtype: None = ..., - *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[timedelta64]: ... -@overload -def arange( # both start and stop must always be specified for datetime64 - start: datetime64, - stop: datetime64, - step: datetime64 = ..., - dtype: None = ..., - *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[datetime64]: ... -@overload + start_or_stop: float | floating, + /, + stop: _FloatLike_co | None = None, + step: _FloatLike_co | None = 1, + *, + dtype: type[float] | _DTypeLike[np.float64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64 | Any]: ... +@overload # (float-like, float, float-like?) def arange( - stop: Any, - /, *, - dtype: _DTypeLike[_SCT], - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... -@overload + start_or_stop: _FloatLike_co, + /, + stop: float | floating, + step: _FloatLike_co | None = 1, + *, + dtype: type[float] | _DTypeLike[np.float64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64 | Any]: ... +@overload # (timedelta, timedelta-like?, timedelta-like?) def arange( - start: Any, - stop: Any, - step: Any = ..., - dtype: _DTypeLike[_SCT] = ..., + start_or_stop: np.timedelta64, + /, + stop: _TD64Like_co | None = None, + step: _TD64Like_co | None = 1, *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... -@overload + dtype: _DTypeLike[np.timedelta64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.timedelta64[Incomplete]]: ... +@overload # (timedelta-like, timedelta, timedelta-like?) def arange( - stop: Any, /, + start_or_stop: _TD64Like_co, + /, + stop: np.timedelta64, + step: _TD64Like_co | None = 1, *, - dtype: DTypeLike, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... -@overload + dtype: _DTypeLike[np.timedelta64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.timedelta64[Incomplete]]: ... +@overload # (datetime, datetime, timedelta-like) (requires both start and stop) def arange( - start: Any, - stop: Any, - step: Any = ..., - dtype: DTypeLike = ..., + start_or_stop: np.datetime64, + /, + stop: np.datetime64, + step: _TD64Like_co | None = 1, *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... + dtype: _DTypeLike[np.datetime64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.datetime64[Incomplete]]: ... +@overload # (str, str, timedelta-like, dtype=dt64-like) (requires both start and stop) +def arange( + start_or_stop: str, + /, + stop: str, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.datetime64] | _DT64Codes, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.datetime64[Incomplete]]: ... +@overload # dtype= +def arange( + start_or_stop: _ArangeScalar | float | str, + /, + stop: _ArangeScalar | float | str | None = None, + step: _ArangeScalar | float | None = 1, + *, + dtype: DTypeLike | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[Incomplete]: ... -def datetime_data( - dtype: str | _DTypeLike[datetime64] | _DTypeLike[timedelta64], /, -) -> tuple[str, int]: ... +# +def datetime_data(dtype: str | _DTypeLike[datetime64 | timedelta64], /) -> tuple[str, int]: ... -# The datetime functions perform unsafe casts to `datetime64[D]`, -# so a lot of different argument types are allowed here +# +@final +class busdaycalendar: + __module__: ClassVar[L["numpy"]] = "numpy" # type: ignore[misc] # pyright: ignore[reportIncompatibleVariableOverride] + + def __init__( + self, + /, + weekmask: str | Sequence[_IntLike_co] | _SupportsArray[NDArray[np.bool | np.integer]] = "1111100", + holidays: Sequence[dt.date | np.datetime64[dt.date]] | _SupportsArray[NDArray[np.datetime64[dt.date]]] | None = None, + ) -> None: ... + @property + def weekmask(self) -> _Array1D[np.bool]: ... + @property + def holidays(self) -> _Array1D[np.datetime64[dt.date]]: ... +# @overload -def busday_count( # type: ignore[misc] +def busday_count( begindates: _ScalarLike_co | dt.date, enddates: _ScalarLike_co | dt.date, - weekmask: ArrayLike = ..., - holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - busdaycal: None | busdaycalendar = ..., - out: None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates = (), + busdaycal: busdaycalendar | None = None, + out: None = None, ) -> int_: ... @overload -def busday_count( # type: ignore[misc] - begindates: ArrayLike | dt.date | _NestedSequence[dt.date], - enddates: ArrayLike | dt.date | _NestedSequence[dt.date], - weekmask: ArrayLike = ..., - holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - busdaycal: None | busdaycalendar = ..., - out: None = ..., +def busday_count( + begindates: ArrayLike | _ToDates, + enddates: ArrayLike | _ToDates, + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates = (), + busdaycal: busdaycalendar | None = None, + out: None = None, ) -> NDArray[int_]: ... @overload -def busday_count( - begindates: ArrayLike | dt.date | _NestedSequence[dt.date], - enddates: ArrayLike | dt.date | _NestedSequence[dt.date], - weekmask: ArrayLike = ..., - holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - busdaycal: None | busdaycalendar = ..., - out: _ArrayType = ..., -) -> _ArrayType: ... +def busday_count[OutT: np.ndarray]( + begindates: ArrayLike | _ToDates, + enddates: ArrayLike | _ToDates, + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates = (), + busdaycal: busdaycalendar | None = None, + *, + out: OutT, +) -> OutT: ... +@overload +def busday_count[OutT: np.ndarray]( + begindates: ArrayLike | _ToDates, + enddates: ArrayLike | _ToDates, + weekmask: ArrayLike, + holidays: ArrayLike | _ToDates, + busdaycal: busdaycalendar | None, + out: OutT, +) -> OutT: ... # `roll="raise"` is (more or less?) equivalent to `casting="safe"` @overload -def busday_offset( # type: ignore[misc] +def busday_offset( dates: datetime64 | dt.date, offsets: _TD64Like_co | dt.timedelta, - roll: L["raise"] = ..., - weekmask: ArrayLike = ..., - holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - busdaycal: None | busdaycalendar = ..., - out: None = ..., + roll: L["raise"] = "raise", + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + out: None = None, ) -> datetime64: ... @overload -def busday_offset( # type: ignore[misc] - dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], - offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], - roll: L["raise"] = ..., - weekmask: ArrayLike = ..., - holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - busdaycal: None | busdaycalendar = ..., - out: None = ..., +def busday_offset( + dates: _ArrayLike[datetime64] | _NestedSequence[dt.date], + offsets: _ArrayLikeTD64_co | _ToDeltas, + roll: L["raise"] = "raise", + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + out: None = None, ) -> NDArray[datetime64]: ... @overload -def busday_offset( # type: ignore[misc] - dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], - offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], - roll: L["raise"] = ..., - weekmask: ArrayLike = ..., - holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - busdaycal: None | busdaycalendar = ..., - out: _ArrayType = ..., -) -> _ArrayType: ... -@overload -def busday_offset( # type: ignore[misc] +def busday_offset[OutT: np.ndarray]( + dates: _ArrayLike[datetime64] | _ToDates, + offsets: _ArrayLikeTD64_co | _ToDeltas, + roll: L["raise"] = "raise", + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + *, + out: OutT, +) -> OutT: ... +@overload +def busday_offset[OutT: np.ndarray]( + dates: _ArrayLike[datetime64] | _ToDates, + offsets: _ArrayLikeTD64_co | _ToDeltas, + roll: L["raise"], + weekmask: ArrayLike, + holidays: ArrayLike | _ToDates | None, + busdaycal: busdaycalendar | None, + out: OutT, +) -> OutT: ... +@overload +def busday_offset( dates: _ScalarLike_co | dt.date, offsets: _ScalarLike_co | dt.timedelta, roll: _RollKind, - weekmask: ArrayLike = ..., - holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - busdaycal: None | busdaycalendar = ..., - out: None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + out: None = None, ) -> datetime64: ... @overload -def busday_offset( # type: ignore[misc] - dates: ArrayLike | dt.date | _NestedSequence[dt.date], - offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], +def busday_offset( + dates: ArrayLike | _NestedSequence[dt.date], + offsets: ArrayLike | _ToDeltas, roll: _RollKind, - weekmask: ArrayLike = ..., - holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - busdaycal: None | busdaycalendar = ..., - out: None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + out: None = None, ) -> NDArray[datetime64]: ... @overload -def busday_offset( - dates: ArrayLike | dt.date | _NestedSequence[dt.date], - offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], +def busday_offset[OutT: np.ndarray]( + dates: ArrayLike | _ToDates, + offsets: ArrayLike | _ToDeltas, roll: _RollKind, - weekmask: ArrayLike = ..., - holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - busdaycal: None | busdaycalendar = ..., - out: _ArrayType = ..., -) -> _ArrayType: ... + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + *, + out: OutT, +) -> OutT: ... +@overload +def busday_offset[OutT: np.ndarray]( + dates: ArrayLike | _ToDates, + offsets: ArrayLike | _ToDeltas, + roll: _RollKind, + weekmask: ArrayLike, + holidays: ArrayLike | _ToDates | None, + busdaycal: busdaycalendar | None, + out: OutT, +) -> OutT: ... @overload -def is_busday( # type: ignore[misc] +def is_busday( dates: _ScalarLike_co | dt.date, - weekmask: ArrayLike = ..., - holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - busdaycal: None | busdaycalendar = ..., - out: None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + out: None = None, ) -> np.bool: ... @overload -def is_busday( # type: ignore[misc] +def is_busday( dates: ArrayLike | _NestedSequence[dt.date], - weekmask: ArrayLike = ..., - holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - busdaycal: None | busdaycalendar = ..., - out: None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + out: None = None, ) -> NDArray[np.bool]: ... @overload -def is_busday( - dates: ArrayLike | _NestedSequence[dt.date], - weekmask: ArrayLike = ..., - holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - busdaycal: None | busdaycalendar = ..., - out: _ArrayType = ..., -) -> _ArrayType: ... +def is_busday[OutT: np.ndarray]( + dates: ArrayLike | _ToDates, + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + *, + out: OutT, +) -> OutT: ... +@overload +def is_busday[OutT: np.ndarray]( + dates: ArrayLike | _ToDates, + weekmask: ArrayLike, + holidays: ArrayLike | _ToDates | None, + busdaycal: busdaycalendar | None, + out: OutT, +) -> OutT: ... + +type _TimezoneContext = L["naive", "UTC", "local"] | dt.tzinfo @overload -def datetime_as_string( # type: ignore[misc] +def datetime_as_string( arr: datetime64 | dt.date, - unit: None | L["auto"] | _UnitKind = ..., - timezone: L["naive", "UTC", "local"] | dt.tzinfo = ..., - casting: _CastingKind = ..., + unit: L["auto"] | _UnitKind | None = None, + timezone: _TimezoneContext = "naive", + casting: _CastingKind = "same_kind", ) -> str_: ... @overload def datetime_as_string( arr: _ArrayLikeDT64_co | _NestedSequence[dt.date], - unit: None | L["auto"] | _UnitKind = ..., - timezone: L["naive", "UTC", "local"] | dt.tzinfo = ..., - casting: _CastingKind = ..., + unit: L["auto"] | _UnitKind | None = None, + timezone: _TimezoneContext = "naive", + casting: _CastingKind = "same_kind", ) -> NDArray[str_]: ... @overload @@ -998,7 +1351,7 @@ def compare_chararrays( def add_docstring(obj: Callable[..., Any], docstring: str, /) -> None: ... -_GetItemKeys = L[ +type _GetItemKeys = L[ "C", "CONTIGUOUS", "C_CONTIGUOUS", "F", "FORTRAN", "F_CONTIGUOUS", "W", "WRITEABLE", @@ -1011,7 +1364,7 @@ _GetItemKeys = L[ "FNC", "FORC", ] -_SetItemKeys = L[ +type _SetItemKeys = L[ "A", "ALIGNED", "W", "WRITEABLE", "X", "WRITEBACKIFCOPY", @@ -1050,12 +1403,169 @@ class flagsobj: def __getitem__(self, key: _GetItemKeys) -> bool: ... def __setitem__(self, key: _SetItemKeys, value: bool) -> None: ... +@final +class flatiter(Generic[_ArrayT_co]): + __module__: ClassVar[L["numpy"]] = "numpy" # type: ignore[misc] # pyright: ignore[reportIncompatibleVariableOverride] + __hash__: ClassVar[None] = None # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + + @property + def base(self, /) -> _ArrayT_co: ... + @property + def coords[ShapeT: _Shape](self: flatiter[np.ndarray[ShapeT]], /) -> ShapeT: ... + @property + def index(self, /) -> int: ... + + # iteration + def __len__(self, /) -> int: ... + def __iter__(self, /) -> Self: ... + def __next__[ScalarT: np.generic](self: flatiter[NDArray[ScalarT]], /) -> ScalarT: ... + + # indexing + @overload # nd: _[()] + def __getitem__(self, key: tuple[()], /) -> _ArrayT_co: ... + @overload # 0d; _[] + def __getitem__[ScalarT: np.generic](self: flatiter[NDArray[ScalarT]], key: int | np.integer, /) -> ScalarT: ... + @overload # 1d; _[[*]], _[:], _[...] + def __getitem__[DTypeT: dtype]( + self: flatiter[np.ndarray[Any, DTypeT]], + key: list[int] | slice | EllipsisType | flatiter[NDArray[np.integer]], + /, + ) -> ndarray[tuple[int], DTypeT]: ... + @overload # 2d; _[[*[*]]] + def __getitem__[DTypeT: dtype]( + self: flatiter[np.ndarray[Any, DTypeT]], + key: list[list[int]], + /, + ) -> ndarray[tuple[int, int], DTypeT]: ... + @overload # ?d + def __getitem__[DTypeT: dtype]( + self: flatiter[np.ndarray[Any, DTypeT]], + key: NDArray[np.integer] | _NestedSequence[int], + /, + ) -> ndarray[_AnyShape, DTypeT]: ... + + # NOTE: `__setitem__` operates via `unsafe` casting rules, and can thus accept any + # type accepted by the relevant underlying `np.generic` constructor, which isn't + # known statically. So we cannot meaningfully annotate the value parameter. + def __setitem__(self, key: slice | EllipsisType | _ArrayLikeInt, val: object, /) -> None: ... + + # NOTE: `dtype` and `copy` are no-ops at runtime, so we don't support them here to + # avoid confusion + def __array__[DTypeT: dtype]( + self: flatiter[np.ndarray[Any, DTypeT]], + dtype: None = None, + /, + *, + copy: None = None, + ) -> ndarray[tuple[int], DTypeT]: ... + + # This returns a flat copy of the underlying array, not of the iterator itself + def copy[DTypeT: dtype](self: flatiter[np.ndarray[Any, DTypeT]], /) -> ndarray[tuple[int], DTypeT]: ... + +@final +class nditer: + __module__: ClassVar[L["numpy"]] = "numpy" # type: ignore[misc] # pyright: ignore[reportIncompatibleVariableOverride] + + @overload + def __init__( + self, + /, + op: ArrayLike, + flags: Sequence[_NDIterFlagsKind] | None = None, + op_flags: Sequence[_NDIterFlagsOp] | None = None, + op_dtypes: DTypeLike | None = None, + order: _OrderKACF = "K", + casting: _CastingKind = "safe", + op_axes: Sequence[SupportsIndex] | None = None, + itershape: _ShapeLike | None = None, + buffersize: SupportsIndex = 0, + ) -> None: ... + @overload + def __init__( + self, + /, + op: Sequence[ArrayLike | None], + flags: Sequence[_NDIterFlagsKind] | None = None, + op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = None, + op_dtypes: Sequence[DTypeLike | None] | None = None, + order: _OrderKACF = "K", + casting: _CastingKind = "safe", + op_axes: Sequence[Sequence[SupportsIndex]] | None = None, + itershape: _ShapeLike | None = None, + buffersize: SupportsIndex = 0, + ) -> None: ... + + # + def __enter__(self, /) -> nditer: ... + def __exit__(self, cls: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None, /) -> None: ... + + # + def __iter__(self) -> nditer: ... + def __next__(self) -> tuple[NDArray[Incomplete], ...]: ... + def __len__(self) -> int: ... + + # + @overload + def __getitem__(self, index: SupportsIndex) -> NDArray[Incomplete]: ... + @overload + def __getitem__(self, index: slice) -> tuple[NDArray[Incomplete], ...]: ... + def __setitem__(self, index: slice | SupportsIndex, value: ArrayLike) -> None: ... + + # + def __copy__(self) -> Self: ... + def copy(self) -> Self: ... + + # + def close(self) -> None: ... + def debug_print(self) -> None: ... + def enable_external_loop(self) -> None: ... + def iternext(self) -> bool: ... + def remove_axis(self, i: SupportsIndex, /) -> None: ... + def remove_multi_index(self) -> None: ... + def reset(self) -> None: ... + + # + @property + def dtypes(self) -> tuple[np.dtype[Incomplete], ...]: ... + @property + def finished(self) -> bool: ... + @property + def has_delayed_bufalloc(self) -> bool: ... + @property + def has_index(self) -> bool: ... + @property + def has_multi_index(self) -> bool: ... + @property + def index(self) -> int: ... + @property + def iterationneedsapi(self) -> bool: ... + @property + def iterindex(self) -> int: ... + @property + def iterrange(self) -> tuple[int, ...]: ... + @property + def itersize(self) -> int: ... + @property + def itviews(self) -> tuple[NDArray[Incomplete], ...]: ... + @property + def multi_index(self) -> tuple[int, ...]: ... + @property + def ndim(self) -> int: ... + @property + def nop(self) -> int: ... + @property + def operands(self) -> tuple[NDArray[Incomplete], ...]: ... + @property + def shape(self) -> tuple[int, ...]: ... + @property + def value(self) -> tuple[NDArray[Incomplete], ...]: ... + def nested_iters( op: ArrayLike | Sequence[ArrayLike], axes: Sequence[Sequence[SupportsIndex]], - flags: None | Sequence[_NDIterFlagsKind] = ..., - op_flags: None | Sequence[Sequence[_NDIterOpFlagsKind]] = ..., - op_dtypes: DTypeLike | Sequence[DTypeLike] = ..., + flags: Sequence[_NDIterFlagsKind] | None = ..., + op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = ..., + op_dtypes: DTypeLike | Sequence[DTypeLike | None] | None = ..., order: _OrderKACF = ..., casting: _CastingKind = ..., buffersize: SupportsIndex = ..., diff --git a/numpy/_core/npymath.ini.in b/numpy/_core/npymath.ini.in deleted file mode 100644 index a233b8f3bfa9..000000000000 --- a/numpy/_core/npymath.ini.in +++ /dev/null @@ -1,20 +0,0 @@ -[meta] -Name=npymath -Description=Portable, core math library implementing C99 standard -Version=0.1 - -[variables] -pkgname=@pkgname@ -prefix=${pkgdir} -libdir=${prefix}@sep@lib -includedir=${prefix}@sep@include - -[default] -Libs=-L${libdir} -lnpymath -Cflags=-I${includedir} -Requires=mlib - -[msvc] -Libs=/LIBPATH:${libdir} npymath.lib -Cflags=/INCLUDE:${includedir} -Requires=mlib diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index 82755a0eff46..6bd03ae75c5d 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -1,34 +1,66 @@ +import builtins import functools import itertools +import math +import numbers import operator import sys import warnings -import numbers -import builtins -import math import numpy as np -from . import multiarray -from . import numerictypes as nt -from .multiarray import ( - ALLOW_THREADS, BUFSIZE, CLIP, MAXDIMS, MAY_SHARE_BOUNDS, MAY_SHARE_EXACT, - RAISE, WRAP, arange, array, asarray, asanyarray, ascontiguousarray, - asfortranarray, broadcast, can_cast, concatenate, copyto, dot, dtype, - empty, empty_like, flatiter, frombuffer, from_dlpack, fromfile, fromiter, - fromstring, inner, lexsort, matmul, may_share_memory, min_scalar_type, - ndarray, nditer, nested_iters, promote_types, putmask, result_type, - shares_memory, vdot, where, zeros, normalize_axis_index, - _get_promotion_state, _set_promotion_state, vecdot +from numpy.exceptions import AxisError + +from . import multiarray, numerictypes, numerictypes as nt, overrides, shape_base, umath +from ._ufunc_config import errstate +from .multiarray import ( # noqa: F401 + ALLOW_THREADS, + BUFSIZE, + CLIP, + MAXDIMS, + MAY_SHARE_BOUNDS, + MAY_SHARE_EXACT, + RAISE, + WRAP, + arange, + array, + asanyarray, + asarray, + ascontiguousarray, + asfortranarray, + broadcast, + can_cast, + concatenate, + copyto, + dot, + dtype, + empty, + empty_like, + flatiter, + from_dlpack, + frombuffer, + fromfile, + fromiter, + fromstring, + inner, + lexsort, + matmul, + may_share_memory, + min_scalar_type, + ndarray, + nditer, + nested_iters, + normalize_axis_index, + promote_types, + putmask, + result_type, + shares_memory, + vdot, + vecdot, + where, + zeros, ) - -from . import overrides -from . import umath -from . import shape_base -from .overrides import set_array_function_like_doc, set_module -from .umath import (multiply, invert, sin, PINF, NAN) -from . import numerictypes -from ..exceptions import AxisError -from ._ufunc_config import errstate, _no_nep50_warning +from .overrides import finalize_array_function_like, set_module +from .umath import NAN, PINF, invert, multiply, sin bitwise_not = invert ufunc = type(sin) @@ -53,7 +85,7 @@ 'identity', 'allclose', 'putmask', 'flatnonzero', 'inf', 'nan', 'False_', 'True_', 'bitwise_not', 'full', 'full_like', 'matmul', 'vecdot', 'shares_memory', - 'may_share_memory', '_get_promotion_state', '_set_promotion_state'] + 'may_share_memory'] def _zeros_like_dispatcher( @@ -76,15 +108,11 @@ def zeros_like( the returned array. dtype : data-type, optional Overrides the data type of the result. - - .. versionadded:: 1.6.0 order : {'C', 'F', 'A', or 'K'}, optional Overrides the memory layout of the result. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely as possible. - - .. versionadded:: 1.6.0 subok : bool, optional. If True, then the newly created array will use the sub-class type of `a`, otherwise it will be a base-class array. Defaults @@ -93,8 +121,6 @@ def zeros_like( Overrides the shape of the result. If order='K' and the number of dimensions is unchanged, will try to keep order, otherwise, order='C' is implied. - - .. versionadded:: 1.17.0 device : str, optional The device on which to place the created array. Default: None. For Array-API interoperability only, so must be ``"cpu"`` if passed. @@ -115,6 +141,7 @@ def zeros_like( Examples -------- + >>> import numpy as np >>> x = np.arange(6) >>> x = x.reshape((2, 3)) >>> x @@ -124,7 +151,7 @@ def zeros_like( array([[0, 0, 0], [0, 0, 0]]) - >>> y = np.arange(3, dtype=float) + >>> y = np.arange(3, dtype=np.float64) >>> y array([0., 1., 2.]) >>> np.zeros_like(y) @@ -140,7 +167,7 @@ def zeros_like( return res -@set_array_function_like_doc +@finalize_array_function_like @set_module('numpy') def ones(shape, dtype=None, order='C', *, device=None, like=None): """ @@ -180,10 +207,11 @@ def ones(shape, dtype=None, order='C', *, device=None, like=None): Examples -------- + >>> import numpy as np >>> np.ones(5) array([1., 1., 1., 1., 1.]) - >>> np.ones((5,), dtype=int) + >>> np.ones((5,), dtype=np.int_) array([1, 1, 1, 1, 1]) >>> np.ones((2, 1)) @@ -229,15 +257,11 @@ def ones_like( the returned array. dtype : data-type, optional Overrides the data type of the result. - - .. versionadded:: 1.6.0 order : {'C', 'F', 'A', or 'K'}, optional Overrides the memory layout of the result. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely as possible. - - .. versionadded:: 1.6.0 subok : bool, optional. If True, then the newly created array will use the sub-class type of `a`, otherwise it will be a base-class array. Defaults @@ -246,8 +270,6 @@ def ones_like( Overrides the shape of the result. If order='K' and the number of dimensions is unchanged, will try to keep order, otherwise, order='C' is implied. - - .. versionadded:: 1.17.0 device : str, optional The device on which to place the created array. Default: None. For Array-API interoperability only, so must be ``"cpu"`` if passed. @@ -268,6 +290,7 @@ def ones_like( Examples -------- + >>> import numpy as np >>> x = np.arange(6) >>> x = x.reshape((2, 3)) >>> x @@ -277,7 +300,7 @@ def ones_like( array([[1, 1, 1], [1, 1, 1]]) - >>> y = np.arange(3, dtype=float) + >>> y = np.arange(3, dtype=np.float64) >>> y array([0., 1., 2.]) >>> np.ones_like(y) @@ -294,10 +317,10 @@ def ones_like( def _full_dispatcher( shape, fill_value, dtype=None, order=None, *, device=None, like=None ): - return(like,) + return (like,) -@set_array_function_like_doc +@finalize_array_function_like @set_module('numpy') def full(shape, fill_value, dtype=None, order='C', *, device=None, like=None): """ @@ -338,6 +361,7 @@ def full(shape, fill_value, dtype=None, order='C', *, device=None, like=None): Examples -------- + >>> import numpy as np >>> np.full((2, 2), np.inf) array([[inf, inf], [inf, inf]]) @@ -403,8 +427,6 @@ def full_like( Overrides the shape of the result. If order='K' and the number of dimensions is unchanged, will try to keep order, otherwise, order='C' is implied. - - .. versionadded:: 1.17.0 device : str, optional The device on which to place the created array. Default: None. For Array-API interoperability only, so must be ``"cpu"`` if passed. @@ -425,21 +447,22 @@ def full_like( Examples -------- - >>> x = np.arange(6, dtype=int) + >>> import numpy as np + >>> x = np.arange(6, dtype=np.int_) >>> np.full_like(x, 1) array([1, 1, 1, 1, 1, 1]) >>> np.full_like(x, 0.1) array([0, 0, 0, 0, 0, 0]) - >>> np.full_like(x, 0.1, dtype=np.double) + >>> np.full_like(x, 0.1, dtype=np.float64) array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) - >>> np.full_like(x, np.nan, dtype=np.double) + >>> np.full_like(x, np.nan, dtype=np.float64) array([nan, nan, nan, nan, nan, nan]) - >>> y = np.arange(6, dtype=np.double) + >>> y = np.arange(6, dtype=np.float64) >>> np.full_like(y, 0.1) array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) - >>> y = np.zeros([2, 2, 3], dtype=int) + >>> y = np.zeros([2, 2, 3], dtype=np.int_) >>> np.full_like(y, [0, 0, 255]) array([[[ 0, 0, 255], [ 0, 0, 255]], @@ -462,15 +485,10 @@ def count_nonzero(a, axis=None, *, keepdims=False): """ Counts the number of non-zero values in the array ``a``. - The word "non-zero" is in reference to the Python 2.x - built-in method ``__nonzero__()`` (renamed ``__bool__()`` - in Python 3.x) of Python objects that tests an object's - "truthfulness". For example, any number is considered - truthful if it is nonzero, whereas any string is considered - truthful if it is not the empty string. Thus, this function - (recursively) counts how many elements in ``a`` (and in - sub-arrays thereof) have their ``__nonzero__()`` or ``__bool__()`` - method evaluated to ``True``. + A non-zero value is one that evaluates to truthful in a boolean + context, including any non-zero number and any string that + is not empty. This function recursively counts how many elements + in ``a`` (and its sub-arrays) are non-zero values. Parameters ---------- @@ -480,16 +498,11 @@ def count_nonzero(a, axis=None, *, keepdims=False): Axis or tuple of axes along which to count non-zeros. Default is None, meaning that non-zeros will be counted along a flattened version of ``a``. - - .. versionadded:: 1.12.0 - keepdims : bool, optional If this is set to True, the axes that are counted are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. - .. versionadded:: 1.19.0 - Returns ------- count : int or array of int @@ -503,12 +516,13 @@ def count_nonzero(a, axis=None, *, keepdims=False): Examples -------- + >>> import numpy as np >>> np.count_nonzero(np.eye(4)) - 4 + np.int64(4) >>> a = np.array([[0, 1, 7, 0], ... [3, 0, 2, 19]]) >>> np.count_nonzero(a) - 5 + np.int64(5) >>> np.count_nonzero(a, axis=0) array([1, 1, 2, 1]) >>> np.count_nonzero(a, axis=1) @@ -557,6 +571,7 @@ def isfortran(a): order (last index varies the fastest), or FORTRAN-contiguous order in memory (first index varies the fastest). + >>> import numpy as np >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') >>> a array([[1, 2, 3], @@ -632,6 +647,7 @@ def argwhere(a): Examples -------- + >>> import numpy as np >>> x = np.arange(6).reshape(2,3) >>> x array([[0, 1, 2], @@ -680,6 +696,7 @@ def flatnonzero(a): Examples -------- + >>> import numpy as np >>> x = np.arange(-2, 3) >>> x array([-2, -1, 0, 1, 2]) @@ -752,6 +769,7 @@ def correlate(a, v, mode='valid'): Examples -------- + >>> import numpy as np >>> np.correlate([1, 2, 3], [0, 1, 0.5]) array([3.5]) >>> np.correlate([1, 2, 3], [0, 1, 0.5], "same") @@ -851,6 +869,7 @@ def convolve(a, v, mode='full'): Note how the convolution operator flips the second array before "sliding" the two across one another: + >>> import numpy as np >>> np.convolve([1, 2, 3], [0, 1, 0.5]) array([0. , 1. , 2.5, 4. , 1.5]) @@ -869,12 +888,12 @@ def convolve(a, v, mode='full'): """ a, v = array(a, copy=None, ndmin=1), array(v, copy=None, ndmin=1) - if (len(v) > len(a)): - a, v = v, a if len(a) == 0: raise ValueError('a cannot be empty') if len(v) == 0: raise ValueError('v cannot be empty') + if len(v) > len(a): + a, v = v, a return multiarray.correlate(a, v[::-1], mode) @@ -906,8 +925,6 @@ def outer(a, b, out=None): out : (M, N) ndarray, optional A location where the result is stored - .. versionadded:: 1.9.0 - Returns ------- out : (M, N) ndarray @@ -935,6 +952,7 @@ def outer(a, b, out=None): -------- Make a (*very* coarse) grid for computing a Mandelbrot set: + >>> import numpy as np >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5)) >>> rl array([[-2., -1., 0., 1., 2.], @@ -959,7 +977,7 @@ def outer(a, b, out=None): An example using a "vector" of letters: - >>> x = np.array(['a', 'b', 'c'], dtype=object) + >>> x = np.array(['a', 'b', 'c'], dtype=np.object_) >>> np.outer(x, [1, 2, 3]) array([['a', 'aa', 'aaa'], ['b', 'bb', 'bbb'], @@ -999,7 +1017,8 @@ def tensordot(a, b, axes=2): * (2,) array_like Or, a list of axes to be summed over, first sequence applying to `a`, second to `b`. Both elements array_like must be of the same length. - + Each axis may appear at most once; repeated axes are not allowed. + For example, ``axes=([1, 1], [0, 0])`` is invalid. Returns ------- output : ndarray @@ -1012,26 +1031,54 @@ def tensordot(a, b, axes=2): Notes ----- Three common use cases are: - - * ``axes = 0`` : tensor product :math:`a\\otimes b` - * ``axes = 1`` : tensor dot product :math:`a\\cdot b` - * ``axes = 2`` : (default) tensor double contraction :math:`a:b` - - When `axes` is a positive integer ``N``, the operation starts with - axis ``-N`` of `a` and axis ``0`` of `b`, and it continues through - axis ``-1`` of `a` and axis ``N-1`` of `b` (inclusive). + * ``axes = 0`` : tensor product :math:`a\\otimes b` + * ``axes = 1`` : tensor dot product :math:`a\\cdot b` + * ``axes = 2`` : (default) tensor double contraction :math:`a:b` + + When `axes` is integer_like, the sequence of axes for evaluation + will be: from the -Nth axis to the -1th axis in `a`, + and from the 0th axis to (N-1)th axis in `b`. + For example, ``axes = 2`` is the equal to + ``axes = [[-2, -1], [0, 1]]``. + When N-1 is smaller than 0, or when -N is larger than -1, + the element of `a` and `b` are defined as the `axes`. When there is more than one axis to sum over - and they are not the last (first) axes of `a` (`b`) - the argument `axes` should consist of two sequences of the same length, with the first axis to sum over given first in both sequences, the second axis second, and so forth. + The calculation can be referred to ``numpy.einsum``. + + For example, if ``a.shape == (2, 3, 4)`` and ``b.shape == (3, 4, 5)``, + then ``axes=([1, 2], [0, 1])`` sums over the ``(3, 4)`` dimensions of + both arrays and produces an output of shape ``(2, 5)``. + + Each summation axis corresponds to a distinct contraction index; repeating + an axis (for example ``axes=([1, 1], [0, 0])``) is invalid. The shape of the result consists of the non-contracted axes of the first tensor, followed by the non-contracted axes of the second. Examples -------- - A "traditional" example: + An example on integer_like: + + >>> a_0 = np.array([[1, 2], [3, 4]]) + >>> b_0 = np.array([[5, 6], [7, 8]]) + >>> c_0 = np.tensordot(a_0, b_0, axes=0) + >>> c_0.shape + (2, 2, 2, 2) + >>> c_0 + array([[[[ 5, 6], + [ 7, 8]], + [[10, 12], + [14, 16]]], + [[[15, 18], + [21, 24]], + [[20, 24], + [28, 32]]]]) + + An example on array_like: >>> a = np.arange(60.).reshape(3,4,5) >>> b = np.arange(24.).reshape(4,3,2) @@ -1044,7 +1091,9 @@ def tensordot(a, b, axes=2): [4664., 5018.], [4796., 5162.], [4928., 5306.]]) - >>> # A slower but equivalent way of computing the same... + + A slower but equivalent way of computing the same... + >>> d = np.zeros((5,2)) >>> for i in range(5): ... for j in range(2): @@ -1060,10 +1109,9 @@ def tensordot(a, b, axes=2): An extended example taking advantage of the overloading of + and \\*: - >>> a = np.array(range(1, 9)) - >>> a.shape = (2, 2, 2) - >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object) - >>> A.shape = (2, 2) + >>> a = np.array(range(1, 9)).reshape((2, 2, 2)) + >>> A = np.array(('a', 'b', 'c', 'd'), dtype=np.object_) + >>> A = A.reshape((2, 2)) >>> a; A array([[[1, 2], [3, 4]], @@ -1109,7 +1157,7 @@ def tensordot(a, b, axes=2): iter(axes) except Exception: axes_a = list(range(-axes, 0)) - axes_b = list(range(0, axes)) + axes_b = list(range(axes)) else: axes_a, axes_b = axes try: @@ -1125,6 +1173,11 @@ def tensordot(a, b, axes=2): axes_b = [axes_b] nb = 1 + if len(set(axes_a)) != len(axes_a): + raise ValueError("duplicate axes are not allowed in tensordot") + if len(set(axes_b)) != len(axes_b): + raise ValueError("duplicate axes are not allowed in tensordot") + a, b = asarray(a), asarray(b) as_ = a.shape nda = a.ndim @@ -1150,13 +1203,13 @@ def tensordot(a, b, axes=2): notin = [k for k in range(nda) if k not in axes_a] newaxes_a = notin + axes_a N2 = math.prod(as_[axis] for axis in axes_a) - newshape_a = (math.prod([as_[ax] for ax in notin]), N2) + newshape_a = (math.prod(as_[ax] for ax in notin), N2) olda = [as_[axis] for axis in notin] notin = [k for k in range(ndb) if k not in axes_b] newaxes_b = axes_b + notin N2 = math.prod(bs[axis] for axis in axes_b) - newshape_b = (N2, math.prod([bs[ax] for ax in notin])) + newshape_b = (N2, math.prod(bs[ax] for ax in notin)) oldb = [bs[axis] for axis in notin] at = a.transpose(newaxes_a).reshape(newshape_a) @@ -1204,12 +1257,11 @@ def roll(a, shift, axis=None): Notes ----- - .. versionadded:: 1.12.0 - Supports rolling over multiple dimensions simultaneously. Examples -------- + >>> import numpy as np >>> x = np.arange(10) >>> np.roll(x, 2) array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]) @@ -1256,9 +1308,9 @@ def roll(a, shift, axis=None): if broadcasted.ndim > 1: raise ValueError( "'shift' and 'axis' should be scalars or 1D sequences") - shifts = {ax: 0 for ax in range(a.ndim)} + shifts = dict.fromkeys(range(a.ndim), 0) for sh, ax in broadcasted: - shifts[ax] += sh + shifts[ax] += int(sh) rolls = [((slice(None), slice(None)),)] * a.ndim for ax, offset in shifts.items(): @@ -1343,6 +1395,7 @@ def rollaxis(a, axis, start=0): Examples -------- + >>> import numpy as np >>> a = np.ones((3,4,5,6)) >>> np.rollaxis(a, 3, 1).shape (3, 6, 4, 5) @@ -1364,7 +1417,7 @@ def rollaxis(a, axis, start=0): start -= 1 if axis == start: return a[...] - axes = list(range(0, n)) + axes = list(range(n)) axes.remove(axis) axes.insert(start, axis) return a.transpose(axes) @@ -1383,8 +1436,6 @@ def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False): Used internally by multi-axis-checking logic. - .. versionadded:: 1.13.0 - Parameters ---------- axis : int, iterable of int @@ -1415,16 +1466,16 @@ def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False): normalize_axis_index : normalizing a single scalar axis """ # Optimization to speed-up the most common cases. - if type(axis) not in (tuple, list): + if not isinstance(axis, (tuple, list)): try: axis = [operator.index(axis)] except TypeError: pass # Going via an iterator directly is slower than via list comprehension. - axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis]) + axis = tuple(normalize_axis_index(ax, ndim, argname) for ax in axis) if not allow_duplicate and len(set(axis)) != len(axis): if argname: - raise ValueError('repeated axis in `{}` argument'.format(argname)) + raise ValueError(f'repeated axis in `{argname}` argument') else: raise ValueError('repeated axis') return axis @@ -1441,8 +1492,6 @@ def moveaxis(a, source, destination): Other axes remain in their original order. - .. versionadded:: 1.11.0 - Parameters ---------- a : np.ndarray @@ -1465,6 +1514,7 @@ def moveaxis(a, source, destination): Examples -------- + >>> import numpy as np >>> x = np.zeros((3, 4, 5)) >>> np.moveaxis(x, 0, -1).shape (4, 5, 3) @@ -1517,10 +1567,7 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors are defined by the last axis of `a` and `b` by default, and these axes - can have dimensions 2 or 3. Where the dimension of either `a` or `b` is - 2, the third component of the input vector is assumed to be zero and the - cross product calculated accordingly. In cases where both input vectors - have dimension 2, the z-component of the cross product is returned. + must have 3 dimensions. Parameters ---------- @@ -1533,9 +1580,7 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): axisb : int, optional Axis of `b` that defines the vector(s). By default, the last axis. axisc : int, optional - Axis of `c` containing the cross product vector(s). Ignored if - both input vectors have dimension 2, as the return is scalar. - By default, the last axis. + Axis of `c` containing the cross product vector(s). By default, the last axis. axis : int, optional If defined, the axis of `a`, `b` and `c` that defines the vector(s) and cross product(s). Overrides `axisa`, `axisb` and `axisc`. @@ -1548,27 +1593,24 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): Raises ------ ValueError - When the dimension of the vector(s) in `a` and/or `b` does not - equal 2 or 3. + When the dimension of the vector(s) in `a` or `b` does not equal 3. See Also -------- inner : Inner product outer : Outer product. - linalg.cross : An Array API compatible variation of ``np.cross``, - which accepts (arrays of) 3-element vectors only. + linalg.cross : An Array API compatible variation of ``np.cross``. ix_ : Construct index arrays. Notes ----- - .. versionadded:: 1.9.0 - Supports full broadcasting of the inputs. Examples -------- Vector cross-product. + >>> import numpy as np >>> x = [1, 2, 3] >>> y = [4, 5, 6] >>> np.cross(x, y) @@ -1576,13 +1618,6 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): One vector with dimension 2. - >>> x = [1, 2] - >>> y = [4, 5, 6] - >>> np.cross(x, y) - array([12, -6, -3]) - - Equivalently: - >>> x = [1, 2, 0] >>> y = [4, 5, 6] >>> np.cross(x, y) @@ -1590,10 +1625,10 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): Both vectors with dimension 2. - >>> x = [1,2] - >>> y = [4,5] + >>> x = [1, 2, 0] + >>> y = [4, 5, 0] >>> np.cross(x, y) - array(-3) + array([0, 0, -3]) Multiple vector cross-products. Note that the direction of the cross product vector is defined by the *right-hand rule*. @@ -1640,24 +1675,16 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): # Move working axis to the end of the shape a = moveaxis(a, axisa, -1) b = moveaxis(b, axisb, -1) - msg = ("incompatible dimensions for cross product\n" - "(dimension must be 2 or 3)") - if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3): - raise ValueError(msg) - if a.shape[-1] == 2 or b.shape[-1] == 2: - # Deprecated in NumPy 2.0, 2023-09-26 - warnings.warn( - "Arrays of 2-dimensional vectors are deprecated. Use arrays of " - "3-dimensional vectors instead. (deprecated in NumPy 2.0)", - DeprecationWarning, stacklevel=2 + if a.shape[-1] != 3 or b.shape[-1] != 3: + raise ValueError( + f"Both input arrays must be (arrays of) 3-dimensional vectors, " + f"but they are {a.shape[-1]} and {b.shape[-1]} dimensional instead." ) # Create the output array - shape = broadcast(a[..., 0], b[..., 0]).shape - if a.shape[-1] == 3 or b.shape[-1] == 3: - shape += (3,) - # Check axisc is within bounds - axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc') + shape = *broadcast(a[..., 0], b[..., 0]).shape, 3 + # Check axisc is within bounds + axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc') dtype = promote_types(a.dtype, b.dtype) cp = empty(shape, dtype) @@ -1668,58 +1695,26 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): # create local aliases for readability a0 = a[..., 0] a1 = a[..., 1] - if a.shape[-1] == 3: - a2 = a[..., 2] + a2 = a[..., 2] b0 = b[..., 0] b1 = b[..., 1] - if b.shape[-1] == 3: - b2 = b[..., 2] - if cp.ndim != 0 and cp.shape[-1] == 3: - cp0 = cp[..., 0] - cp1 = cp[..., 1] - cp2 = cp[..., 2] - - if a.shape[-1] == 2: - if b.shape[-1] == 2: - # a0 * b1 - a1 * b0 - multiply(a0, b1, out=cp) - cp -= a1 * b0 - return cp - else: - assert b.shape[-1] == 3 - # cp0 = a1 * b2 - 0 (a2 = 0) - # cp1 = 0 - a0 * b2 (a2 = 0) - # cp2 = a0 * b1 - a1 * b0 - multiply(a1, b2, out=cp0) - multiply(a0, b2, out=cp1) - negative(cp1, out=cp1) - multiply(a0, b1, out=cp2) - cp2 -= a1 * b0 - else: - assert a.shape[-1] == 3 - if b.shape[-1] == 3: - # cp0 = a1 * b2 - a2 * b1 - # cp1 = a2 * b0 - a0 * b2 - # cp2 = a0 * b1 - a1 * b0 - multiply(a1, b2, out=cp0) - tmp = array(a2 * b1) - cp0 -= tmp - multiply(a2, b0, out=cp1) - multiply(a0, b2, out=tmp) - cp1 -= tmp - multiply(a0, b1, out=cp2) - multiply(a1, b0, out=tmp) - cp2 -= tmp - else: - assert b.shape[-1] == 2 - # cp0 = 0 - a2 * b1 (b2 = 0) - # cp1 = a2 * b0 - 0 (b2 = 0) - # cp2 = a0 * b1 - a1 * b0 - multiply(a2, b1, out=cp0) - negative(cp0, out=cp0) - multiply(a2, b0, out=cp1) - multiply(a0, b1, out=cp2) - cp2 -= a1 * b0 + b2 = b[..., 2] + cp0 = cp[..., 0] + cp1 = cp[..., 1] + cp2 = cp[..., 2] + + # cp0 = a1 * b2 - a2 * b1 + # cp1 = a2 * b0 - a0 * b2 + # cp2 = a0 * b1 - a1 * b0 + multiply(a1, b2, out=cp0) + tmp = np.multiply(a2, b1, out=...) + cp0 -= tmp + multiply(a2, b0, out=cp1) + multiply(a0, b2, out=tmp) + cp1 -= tmp + multiply(a0, b1, out=cp2) + multiply(a1, b0, out=tmp) + cp2 -= tmp return moveaxis(cp, -1, axisc) @@ -1745,8 +1740,6 @@ def indices(dimensions, dtype=int, sparse=False): Return a sparse representation of the grid instead of a dense representation. Default is False. - .. versionadded:: 1.17 - Returns ------- grid : one ndarray or tuple of ndarrays @@ -1776,6 +1769,7 @@ def indices(dimensions, dtype=int, sparse=False): Examples -------- + >>> import numpy as np >>> grid = np.indices((2, 3)) >>> grid.shape (2, 2, 3) @@ -1814,14 +1808,14 @@ def indices(dimensions, dtype=int, sparse=False): """ dimensions = tuple(dimensions) N = len(dimensions) - shape = (1,)*N + shape = (1,) * N if sparse: - res = tuple() + res = () else: - res = empty((N,)+dimensions, dtype=dtype) + res = empty((N,) + dimensions, dtype=dtype) for i, dim in enumerate(dimensions): idx = arange(dim, dtype=dtype).reshape( - shape[:i] + (dim,) + shape[i+1:] + shape[:i] + (dim,) + shape[i + 1:] ) if sparse: res = res + (idx,) @@ -1830,7 +1824,7 @@ def indices(dimensions, dtype=int, sparse=False): return res -@set_array_function_like_doc +@finalize_array_function_like @set_module('numpy') def fromfunction(function, shape, *, dtype=float, like=None, **kwargs): """ @@ -1875,20 +1869,21 @@ def fromfunction(function, shape, *, dtype=float, like=None, **kwargs): Examples -------- - >>> np.fromfunction(lambda i, j: i, (2, 2), dtype=float) + >>> import numpy as np + >>> np.fromfunction(lambda i, j: i, (2, 2), dtype=np.float64) array([[0., 0.], [1., 1.]]) - >>> np.fromfunction(lambda i, j: j, (2, 2), dtype=float) + >>> np.fromfunction(lambda i, j: j, (2, 2), dtype=np.float64) array([[0., 1.], [0., 1.]]) - >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int) + >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=np.int_) array([[ True, False, False], [False, True, False], [False, False, True]]) - >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int) + >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=np.int_) array([[0, 1, 2], [1, 2, 3], [2, 3, 4]]) @@ -1905,8 +1900,11 @@ def fromfunction(function, shape, *, dtype=float, like=None, **kwargs): _fromfunction_with_like = array_function_dispatch()(fromfunction) -def _frombuffer(buf, dtype, shape, order): - return frombuffer(buf, dtype=dtype).reshape(shape, order=order) +def _frombuffer(buf, dtype, shape, order, axis_order=None): + array = frombuffer(buf, dtype=dtype) + if order == 'K' and axis_order is not None: + return array.reshape(shape, order='C').transpose(axis_order) + return array.reshape(shape, order=order) @set_module('numpy') @@ -1964,14 +1962,20 @@ def isscalar(element): Examples -------- + >>> import numpy as np + >>> np.isscalar(3.1) True + >>> np.isscalar(np.array(3.1)) False + >>> np.isscalar([3.1]) False + >>> np.isscalar(False) True + >>> np.isscalar('numpy') True @@ -2001,7 +2005,7 @@ def binary_repr(num, width=None): In a two's-complement system negative numbers are represented by the two's complement of the absolute value. This is the most common method of - representing signed integers on computers [1]_. A N-bit two's-complement + representing signed integers on computers [1]_. An N-bit two's-complement system can represent every integer in the range :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. @@ -2039,6 +2043,7 @@ def binary_repr(num, width=None): Examples -------- + >>> import numpy as np >>> np.binary_repr(3) '11' >>> np.binary_repr(-3) @@ -2069,32 +2074,31 @@ def err_if_insufficient(width, binwidth): return '0' * (width or 1) elif num > 0: - binary = bin(num)[2:] + binary = f'{num:b}' binwidth = len(binary) outwidth = (binwidth if width is None else builtins.max(binwidth, width)) err_if_insufficient(width, binwidth) return binary.zfill(outwidth) - else: - if width is None: - return '-' + bin(-num)[2:] + elif width is None: + return f'-{-num:b}' - else: - poswidth = len(bin(-num)[2:]) + else: + poswidth = len(f'{-num:b}') - # See gh-8679: remove extra digit - # for numbers at boundaries. - if 2**(poswidth - 1) == -num: - poswidth -= 1 + # See gh-8679: remove extra digit + # for numbers at boundaries. + if 2**(poswidth - 1) == -num: + poswidth -= 1 - twocomp = 2**(poswidth + 1) + num - binary = bin(twocomp)[2:] - binwidth = len(binary) + twocomp = 2**(poswidth + 1) + num + binary = f'{twocomp:b}' + binwidth = len(binary) - outwidth = builtins.max(binwidth, width) - err_if_insufficient(width, binwidth) - return '1' * (outwidth - binwidth) + binary + outwidth = builtins.max(binwidth, width) + err_if_insufficient(width, binwidth) + return '1' * (outwidth - binwidth) + binary @set_module('numpy') @@ -2123,6 +2127,7 @@ def base_repr(number, base=2, padding=0): Examples -------- + >>> import numpy as np >>> np.base_repr(5) '101' >>> np.base_repr(6, 5) @@ -2169,7 +2174,7 @@ def _maketup(descr, val): return tuple(res) -@set_array_function_like_doc +@finalize_array_function_like @set_module('numpy') def identity(n, dtype=None, *, like=None): """ @@ -2196,6 +2201,7 @@ def identity(n, dtype=None, *, like=None): Examples -------- + >>> import numpy as np >>> np.identity(3) array([[1., 0., 0.], [0., 1., 0.], @@ -2245,8 +2251,6 @@ def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): Whether to compare NaN's as equal. If True, NaN's in `a` will be considered equal to NaN's in `b` in the output array. - .. versionadded:: 1.10.0 - Returns ------- allclose : bool @@ -2286,17 +2290,23 @@ def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): Examples -------- + >>> import numpy as np >>> np.allclose([1e10,1e-7], [1.00001e10,1e-8]) False + >>> np.allclose([1e10,1e-8], [1.00001e10,1e-9]) True + >>> np.allclose([1e10,1e-8], [1.0001e10,1e-9]) False + >>> np.allclose([1.0, np.nan], [1.0, np.nan]) False + >>> np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) True + """ res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)) return builtins.bool(res) @@ -2346,8 +2356,6 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): Notes ----- - .. versionadded:: 1.7.0 - For finite values, isclose uses the following equation to test whether two floating point values are equivalent.:: @@ -2370,24 +2378,34 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): Examples -------- + >>> import numpy as np >>> np.isclose([1e10,1e-7], [1.00001e10,1e-8]) array([ True, False]) + >>> np.isclose([1e10,1e-8], [1.00001e10,1e-9]) array([ True, True]) + >>> np.isclose([1e10,1e-8], [1.0001e10,1e-9]) array([False, True]) + >>> np.isclose([1.0, np.nan], [1.0, np.nan]) array([ True, False]) + >>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) array([ True, True]) + >>> np.isclose([1e-8, 1e-7], [0.0, 0.0]) array([ True, False]) + >>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0) array([False, False]) + >>> np.isclose([1e-10, 1e-10], [1e-20, 0.0]) array([ True, True]) + >>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0) array([False, True]) + """ # Turn all but python scalars into arrays. x, y, atol, rtol = ( @@ -2407,8 +2425,21 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): elif isinstance(y, int): y = float(y) - with errstate(invalid='ignore'), _no_nep50_warning(): - result = (less_equal(abs(x-y), atol + rtol * abs(y)) + # atol and rtol can be arrays + if not (np.all(np.isfinite(atol)) and np.all(np.isfinite(rtol))): + err_s = np.geterr()["invalid"] + err_msg = f"One of rtol or atol is not valid, atol: {atol}, rtol: {rtol}" + + if err_s == "warn": + warnings.warn(err_msg, RuntimeWarning, stacklevel=2) + elif err_s == "raise": + raise FloatingPointError(err_msg) + elif err_s == "print": + print(err_msg) + + with errstate(invalid='ignore'): + + result = (less_equal(abs(x - y), atol + rtol * abs(y)) & isfinite(y) | (x == y)) if equal_nan: @@ -2450,8 +2481,6 @@ def array_equal(a1, a2, equal_nan=False): complex, values will be considered equal if either the real or the imaginary component of a given value is ``nan``. - .. versionadded:: 1.19.0 - Returns ------- b : bool @@ -2466,17 +2495,24 @@ def array_equal(a1, a2, equal_nan=False): Examples -------- + >>> import numpy as np + >>> np.array_equal([1, 2], [1, 2]) True + >>> np.array_equal(np.array([1, 2]), np.array([1, 2])) True + >>> np.array_equal([1, 2], [1, 2, 3]) False + >>> np.array_equal([1, 2], [1, 4]) False + >>> a = np.array([1, np.nan]) >>> np.array_equal(a, a) False + >>> np.array_equal(a, a, equal_nan=True) True @@ -2497,24 +2533,24 @@ def array_equal(a1, a2, equal_nan=False): if a1.shape != a2.shape: return False if not equal_nan: - return builtins.bool((a1 == a2).all()) - cannot_have_nan = (_dtype_cannot_hold_nan(a1.dtype) - and _dtype_cannot_hold_nan(a2.dtype)) - if cannot_have_nan: - if a1 is a2: - return True - return builtins.bool((a1 == a2).all()) + return builtins.bool((asanyarray(a1 == a2)).all()) if a1 is a2: # nan will compare equal so an array will compare equal to itself. return True - # Handling NaN values if equal_nan is True - a1nan, a2nan = isnan(a1), isnan(a2) - # NaN's occur at different locations - if not (a1nan == a2nan).all(): - return False - # Shapes of a1, a2 and masks are guaranteed to be consistent by this point - return builtins.bool((a1[~a1nan] == a2[~a1nan]).all()) + + cannot_have_nan = (_dtype_cannot_hold_nan(a1.dtype) + and _dtype_cannot_hold_nan(a2.dtype)) + if cannot_have_nan: + return builtins.bool(asarray(a1 == a2).all()) + + # Fast path for a1 and a2 being all NaN arrays + a1nan = isnan(a1) + if a1nan.all(): + return builtins.bool(isnan(a2).all()) + + equal_or_both_nan = (a1 == a2) | (a1nan & isnan(a2)) + return builtins.bool(equal_or_both_nan.all()) def _array_equiv_dispatcher(a1, a2): @@ -2541,6 +2577,7 @@ def array_equiv(a1, a2): Examples -------- + >>> import numpy as np >>> np.array_equiv([1, 2], [1, 2]) True >>> np.array_equiv([1, 2], [1, 3]) @@ -2566,15 +2603,15 @@ def array_equiv(a1, a2): except Exception: return False - return builtins.bool((a1 == a2).all()) + return builtins.bool(asanyarray(a1 == a2).all()) -def _astype_dispatcher(x, dtype, /, *, copy=None): +def _astype_dispatcher(x, dtype, /, *, copy=None, device=None): return (x, dtype) @array_function_dispatch(_astype_dispatcher) -def astype(x, dtype, /, *, copy = True): +def astype(x, dtype, /, *, copy=True, device=None): """ Copies an array to a specified data type. @@ -2595,6 +2632,11 @@ def astype(x, dtype, /, *, copy = True): matches the data type of the input array, the input array must be returned; otherwise, a newly allocated array must be returned. Defaults to ``True``. + device : str, optional + The device on which to place the returned array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.1.0 Returns ------- @@ -2607,6 +2649,7 @@ def astype(x, dtype, /, *, copy = True): Examples -------- + >>> import numpy as np >>> arr = np.array([1, 2, 3]); arr array([1, 2, 3]) >>> np.astype(arr, np.float64) @@ -2620,9 +2663,15 @@ def astype(x, dtype, /, *, copy = True): True """ - if not isinstance(x, np.ndarray): + if not (isinstance(x, np.ndarray) or isscalar(x)): raise TypeError( - f"Input should be a NumPy array. It is a {type(x)} instead." + "Input should be a NumPy array or scalar. " + f"It is a {type(x)} instead." + ) + if device is not None and device != "cpu": + raise ValueError( + 'Device not understood. Only "cpu" is allowed, but received:' + f' {device}' ) return x.astype(dtype, copy=copy) @@ -2635,22 +2684,20 @@ def astype(x, dtype, /, *, copy = True): def extend_all(module): existing = set(__all__) - mall = getattr(module, '__all__') + mall = module.__all__ for a in mall: if a not in existing: __all__.append(a) -from .umath import * -from .numerictypes import * -from . import fromnumeric -from .fromnumeric import * -from . import arrayprint -from .arrayprint import * -from . import _asarray +from . import _asarray, _ufunc_config, arrayprint, fromnumeric from ._asarray import * -from . import _ufunc_config from ._ufunc_config import * +from .arrayprint import * +from .fromnumeric import * +from .numerictypes import * +from .umath import * + extend_all(fromnumeric) extend_all(umath) extend_all(numerictypes) diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index 8871cf9d264a..1dd34d6a4fcd 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -1,709 +1,1366 @@ -from collections.abc import Callable, Sequence +from _typeshed import Incomplete +from builtins import bool as py_bool +from collections.abc import Callable, Iterable, Sequence from typing import ( Any, - overload, - TypeVar, Literal as L, SupportsAbs, SupportsIndex, - NoReturn, + TypeGuard, + TypeVar, + overload, ) -if sys.version_info >= (3, 10): - from typing import TypeGuard -else: - from typing_extensions import TypeGuard import numpy as np from numpy import ( - ComplexWarning as ComplexWarning, - generic, - unsignedinteger, - signedinteger, - floating, - complexfloating, - int_, - intp, - float64, - timedelta64, - object_, - _OrderKACF, + False_, + True_, _OrderCF, + _OrderKACF, + bitwise_not, + inf, + little_endian, + nan, + newaxis, + ufunc, ) - from numpy._typing import ( ArrayLike, - NDArray, DTypeLike, - _ShapeLike, - _DTypeLike, + NDArray, + _AnyShape, _ArrayLike, - _SupportsArrayFunc, - _ScalarLike_co, _ArrayLikeBool_co, - _ArrayLikeUInt_co, - _ArrayLikeInt_co, - _ArrayLikeFloat_co, _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeNumber_co, _ArrayLikeTD64_co, - _ArrayLikeObject_co, - _ArrayLikeUnknown, + _Complex128Codes, + _DTypeLike, + _DTypeLikeBool, + _Float64Codes, + _IntPCodes, + _NestedSequence, + _NumberLike_co, + _ScalarLike_co, + _Shape, + _ShapeLike, + _SupportsArray, + _SupportsArrayFunc, + _SupportsDType, ) -_T = TypeVar("_T") -_SCT = TypeVar("_SCT", bound=generic) -_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) +from ._asarray import require +from ._ufunc_config import ( + errstate, + getbufsize, + geterr, + geterrcall, + setbufsize, + seterr, + seterrcall, +) +from .arrayprint import ( + array2string, + array_repr, + array_str, + format_float_positional, + format_float_scientific, + get_printoptions, + printoptions, + set_printoptions, +) +from .fromnumeric import ( + all, + amax, + amin, + any, + argmax, + argmin, + argpartition, + argsort, + around, + choose, + clip, + compress, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + diagonal, + matrix_transpose, + max, + mean, + min, + ndim, + nonzero, + partition, + prod, + ptp, + put, + ravel, + repeat, + reshape, + resize, + round, + searchsorted, + shape, + size, + sort, + squeeze, + std, + sum, + swapaxes, + take, + trace, + transpose, + var, +) +from .multiarray import ( + ALLOW_THREADS as ALLOW_THREADS, + BUFSIZE as BUFSIZE, + CLIP as CLIP, + MAXDIMS as MAXDIMS, + MAY_SHARE_BOUNDS as MAY_SHARE_BOUNDS, + MAY_SHARE_EXACT as MAY_SHARE_EXACT, + RAISE as RAISE, + WRAP as WRAP, + _Array, + arange, + array, + asanyarray, + asarray, + ascontiguousarray, + asfortranarray, + broadcast, + can_cast, + concatenate, + copyto, + dot, + dtype, + empty, + empty_like, + flatiter, + from_dlpack, + frombuffer, + fromfile, + fromiter, + fromstring, + inner, + lexsort, + matmul, + may_share_memory, + min_scalar_type, + ndarray, + nditer, + nested_iters, + normalize_axis_index as normalize_axis_index, + promote_types, + putmask, + result_type, + shares_memory, + vdot, + where, + zeros, +) +from .numerictypes import ( + ScalarType, + bool, + bool_, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + cdouble, + character, + clongdouble, + complex64, + complex128, + complex192, + complex256, + complexfloating, + csingle, + datetime64, + datetime_as_string, + datetime_data, + double, + flexible, + float16, + float32, + float64, + float96, + float128, + floating, + generic, + half, + inexact, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + is_busday, + isdtype, + issubdtype, + long, + longdouble, + longlong, + number, + object_, + short, + signedinteger, + single, + str_, + timedelta64, + typecodes, + ubyte, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + ushort, + void, +) +from .umath import ( + absolute, + add, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + bitwise_and, + bitwise_count, + bitwise_or, + bitwise_xor, + cbrt, + ceil, + conj, + conjugate, + copysign, + cos, + cosh, + deg2rad, + degrees, + divide, + divmod, + e, + equal, + euler_gamma, + exp, + exp2, + expm1, + fabs, + float_power, + floor, + floor_divide, + fmax, + fmin, + fmod, + frexp, + frompyfunc, + gcd, + greater, + greater_equal, + heaviside, + hypot, + invert, + isfinite, + isinf, + isnan, + isnat, + lcm, + ldexp, + left_shift, + less, + less_equal, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + matvec, + maximum, + minimum, + mod, + modf, + multiply, + negative, + nextafter, + not_equal, + pi, + positive, + power, + rad2deg, + radians, + reciprocal, + remainder, + right_shift, + rint, + sign, + signbit, + sin, + sinh, + spacing, + sqrt, + square, + subtract, + tan, + tanh, + true_divide, + trunc, + vecdot, + vecmat, +) -_CorrelateMode = L["valid", "same", "full"] +__all__ = [ + "False_", + "ScalarType", + "True_", + "absolute", + "add", + "all", + "allclose", + "amax", + "amin", + "any", + "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argpartition", + "argsort", + "argwhere", + "around", + "array", + "array2string", + "array_equal", + "array_equiv", + "array_repr", + "array_str", + "asanyarray", + "asarray", + "ascontiguousarray", + "asfortranarray", + "astype", + "base_repr", + "binary_repr", + "bitwise_and", + "bitwise_count", + "bitwise_not", + "bitwise_or", + "bitwise_xor", + "bool", + "bool_", + "broadcast", + "busday_count", + "busday_offset", + "busdaycalendar", + "byte", + "bytes_", + "can_cast", + "cbrt", + "cdouble", + "ceil", + "character", + "choose", + "clip", + "clongdouble", + "complex64", + "complex128", + "complex192", + "complex256", + "complexfloating", + "compress", + "concatenate", + "conj", + "conjugate", + "convolve", + "copysign", + "copyto", + "correlate", + "cos", + "cosh", + "count_nonzero", + "cross", + "csingle", + "cumprod", + "cumsum", + "cumulative_prod", + "cumulative_sum", + "datetime64", + "datetime_as_string", + "datetime_data", + "deg2rad", + "degrees", + "diagonal", + "divide", + "divmod", + "dot", + "double", + "dtype", + "e", + "empty", + "empty_like", + "equal", + "errstate", + "euler_gamma", + "exp", + "exp2", + "expm1", + "fabs", + "flatiter", + "flatnonzero", + "flexible", + "float16", + "float32", + "float64", + "float96", + "float128", + "float_power", + "floating", + "floor", + "floor_divide", + "fmax", + "fmin", + "fmod", + "format_float_positional", + "format_float_scientific", + "frexp", + "from_dlpack", + "frombuffer", + "fromfile", + "fromfunction", + "fromiter", + "frompyfunc", + "fromstring", + "full", + "full_like", + "gcd", + "generic", + "get_printoptions", + "getbufsize", + "geterr", + "geterrcall", + "greater", + "greater_equal", + "half", + "heaviside", + "hypot", + "identity", + "indices", + "inexact", + "inf", + "inner", + "int8", + "int16", + "int32", + "int64", + "int_", + "intc", + "integer", + "intp", + "invert", + "is_busday", + "isclose", + "isdtype", + "isfinite", + "isfortran", + "isinf", + "isnan", + "isnat", + "isscalar", + "issubdtype", + "lcm", + "ldexp", + "left_shift", + "less", + "less_equal", + "lexsort", + "little_endian", + "log", + "log1p", + "log2", + "log10", + "logaddexp", + "logaddexp2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "long", + "longdouble", + "longlong", + "matmul", + "matrix_transpose", + "matvec", + "max", + "maximum", + "may_share_memory", + "mean", + "min", + "min_scalar_type", + "minimum", + "mod", + "modf", + "moveaxis", + "multiply", + "nan", + "ndarray", + "ndim", + "nditer", + "negative", + "nested_iters", + "newaxis", + "nextafter", + "nonzero", + "not_equal", + "number", + "object_", + "ones", + "ones_like", + "outer", + "partition", + "pi", + "positive", + "power", + "printoptions", + "prod", + "promote_types", + "ptp", + "put", + "putmask", + "rad2deg", + "radians", + "ravel", + "reciprocal", + "remainder", + "repeat", + "require", + "reshape", + "resize", + "result_type", + "right_shift", + "rint", + "roll", + "rollaxis", + "round", + "searchsorted", + "set_printoptions", + "setbufsize", + "seterr", + "seterrcall", + "shape", + "shares_memory", + "short", + "sign", + "signbit", + "signedinteger", + "sin", + "single", + "sinh", + "size", + "sort", + "spacing", + "sqrt", + "square", + "squeeze", + "std", + "str_", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "tensordot", + "timedelta64", + "trace", + "transpose", + "true_divide", + "trunc", + "typecodes", + "ubyte", + "ufunc", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintc", + "uintp", + "ulong", + "ulonglong", + "unsignedinteger", + "ushort", + "var", + "vdot", + "vecdot", + "vecmat", + "void", + "where", + "zeros", + "zeros_like", +] -__all__: list[str] +_AnyNumericScalarT = TypeVar( + "_AnyNumericScalarT", + np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, np.longdouble, + np.complex64, np.complex128, np.clongdouble, + np.timedelta64, + np.object_, +) -@overload -def zeros_like( - a: _ArrayType, - dtype: None = ..., - order: _OrderKACF = ..., - subok: L[True] = ..., - shape: None = ..., +type _CorrelateMode = L["valid", "same", "full"] + +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _Array3D[ScalarT: np.generic] = np.ndarray[tuple[int, int, int], np.dtype[ScalarT]] +type _Array4D[ScalarT: np.generic] = np.ndarray[tuple[int, int, int, int], np.dtype[ScalarT]] + +type _Int_co = np.integer | np.bool +type _Float_co = np.floating | _Int_co +type _Number_co = np.number | np.bool +type _TD64_co = np.timedelta64 | _Int_co + +type _ArrayLike1D[ScalarT: np.generic] = _SupportsArray[np.dtype[ScalarT]] | Sequence[ScalarT] +type _ArrayLike1DBool_co = _SupportsArray[np.dtype[np.bool]] | Sequence[py_bool | np.bool] +type _ArrayLike1DInt_co = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] +type _ArrayLike1DFloat_co = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] +type _ArrayLike1DNumber_co = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] +type _ArrayLike1DTD64_co = _ArrayLike1D[_TD64_co] +type _ArrayLike1DObject_co = _ArrayLike1D[np.object_] + +type _DTypeLikeInt = type[int] | _IntPCodes +type _DTypeLikeFloat64 = type[float] | _Float64Codes +type _DTypeLikeComplex128 = type[complex] | _Complex128Codes + +### + +# keep in sync with `ones_like` +@overload +def zeros_like[ArrayT: np.ndarray]( + a: ArrayT, + dtype: None = None, + order: _OrderKACF = "K", + subok: L[True] = True, + shape: None = None, *, - device: None | L["cpu"] = ..., -) -> _ArrayType: ... -@overload -def zeros_like( - a: _ArrayLike[_SCT], - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike = ..., + device: L["cpu"] | None = None, +) -> ArrayT: ... +@overload +def zeros_like[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + dtype: None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: None | L["cpu"] = ..., -) -> NDArray[_SCT]: ... + device: L["cpu"] | None = None, +) -> NDArray[ScalarT]: ... @overload -def zeros_like( +def zeros_like[ScalarT: np.generic]( a: object, - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike= ..., - *, - device: None | L["cpu"] = ..., -) -> NDArray[Any]: ... -@overload -def zeros_like( - a: Any, - dtype: _DTypeLike[_SCT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike= ..., + dtype: _DTypeLike[ScalarT], + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: None | L["cpu"] = ..., -) -> NDArray[_SCT]: ... + device: L["cpu"] | None = None, +) -> NDArray[ScalarT]: ... @overload def zeros_like( - a: Any, - dtype: DTypeLike, - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike= ..., + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: None | L["cpu"] = ..., + device: L["cpu"] | None = None, ) -> NDArray[Any]: ... -@overload +# keep in sync with empty and zeros (`_core/multiarray.pyi`) +@overload # 1d, float64 default +def ones( + shape: SupportsIndex, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64]: ... +@overload # 1d, specific dtype +def ones[DTypeT: np.dtype]( + shape: SupportsIndex, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[tuple[int], DTypeT]: ... +@overload # 1d, specific scalar type +def ones[ScalarT: np.generic]( + shape: SupportsIndex, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[ScalarT]: ... +@overload # 1d, unknown dtype +def ones( + shape: SupportsIndex, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[Incomplete]: ... +@overload # known shape, float64 default +def ones[ShapeT: _Shape]( + shape: ShapeT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, float64]: ... +@overload # known shape, specific dtype +def ones[ShapeT: _Shape, DTypeT: np.dtype]( + shape: ShapeT, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[ShapeT, DTypeT]: ... +@overload # known shape, specific scalar type +def ones[ShapeT: _Shape, ScalarT: np.generic]( + shape: ShapeT, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, ScalarT]: ... +@overload # known shape, unknown dtype +def ones[ShapeT: _Shape]( + shape: ShapeT, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, Incomplete]: ... +@overload # unknown shape, float64 default def ones( shape: _ShapeLike, - dtype: None = ..., - order: _OrderCF = ..., + dtype: None = None, + order: _OrderCF = "C", *, - device: None | L["cpu"] = ..., - like: _SupportsArrayFunc = ..., + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[float64]: ... -@overload -def ones( +@overload # unknown shape, specific dtype +def ones[DTypeT: np.dtype]( shape: _ShapeLike, - dtype: _DTypeLike[_SCT], - order: _OrderCF = ..., + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", *, - device: None | L["cpu"] = ..., - like: _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... -@overload + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[_AnyShape, DTypeT]: ... +@overload # unknown shape, specific scalar type +def ones[ScalarT: np.generic]( + shape: _ShapeLike, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... +@overload # unknown shape, unknown dtype def ones( shape: _ShapeLike, - dtype: DTypeLike, - order: _OrderCF = ..., + dtype: DTypeLike | None = None, + order: _OrderCF = "C", *, - device: None | L["cpu"] = ..., - like: _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[Incomplete]: ... -@overload -def ones_like( - a: _ArrayType, - dtype: None = ..., - order: _OrderKACF = ..., - subok: L[True] = ..., - shape: None = ..., +# keep in sync with `zeros_like` +@overload +def ones_like[ArrayT: np.ndarray]( + a: ArrayT, + dtype: None = None, + order: _OrderKACF = "K", + subok: L[True] = True, + shape: None = None, + *, + device: L["cpu"] | None = None, +) -> ArrayT: ... +@overload +def ones_like[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + dtype: None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: None | L["cpu"] = ..., -) -> _ArrayType: ... + device: L["cpu"] | None = None, +) -> NDArray[ScalarT]: ... @overload -def ones_like( - a: _ArrayLike[_SCT], - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike = ..., +def ones_like[ScalarT: np.generic]( + a: object, + dtype: _DTypeLike[ScalarT], + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: None | L["cpu"] = ..., -) -> NDArray[_SCT]: ... + device: L["cpu"] | None = None, +) -> NDArray[ScalarT]: ... @overload def ones_like( a: object, - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike= ..., + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: None | L["cpu"] = ..., + device: L["cpu"] | None = None, ) -> NDArray[Any]: ... -@overload -def ones_like( - a: Any, - dtype: _DTypeLike[_SCT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike= ..., + +# TODO: Add overloads for bool, int, float, complex, str, bytes, and memoryview +# 1-D shape +@overload +def full[ScalarT: np.generic]( + shape: SupportsIndex, + fill_value: ScalarT, + dtype: None = None, + order: _OrderCF = "C", *, - device: None | L["cpu"] = ..., -) -> NDArray[_SCT]: ... + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[tuple[int], ScalarT]: ... @overload -def ones_like( - a: Any, - dtype: DTypeLike, - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike= ..., +def full[DTypeT: np.dtype]( + shape: SupportsIndex, + fill_value: Any, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", *, - device: None | L["cpu"] = ..., -) -> NDArray[Any]: ... - + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> np.ndarray[tuple[int], DTypeT]: ... @overload -def full( - shape: _ShapeLike, +def full[ScalarT: np.generic]( + shape: SupportsIndex, fill_value: Any, - dtype: None = ..., - order: _OrderCF = ..., + dtype: type[ScalarT], + order: _OrderCF = "C", *, - device: None | L["cpu"] = ..., - like: _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[tuple[int], ScalarT]: ... @overload def full( - shape: _ShapeLike, + shape: SupportsIndex, fill_value: Any, - dtype: _DTypeLike[_SCT], - order: _OrderCF = ..., + dtype: DTypeLike | None = None, + order: _OrderCF = "C", *, - device: None | L["cpu"] = ..., - like: _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[tuple[int], Any]: ... +# known shape +@overload +def full[ShapeT: _Shape, ScalarT: np.generic]( + shape: ShapeT, + fill_value: ScalarT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, ScalarT]: ... @overload -def full( - shape: _ShapeLike, +def full[ShapeT: _Shape, DTypeT: np.dtype]( + shape: ShapeT, fill_value: Any, - dtype: DTypeLike, - order: _OrderCF = ..., + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", *, - device: None | L["cpu"] = ..., - like: _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... - + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... @overload -def full_like( - a: _ArrayType, +def full[ShapeT: _Shape, ScalarT: np.generic]( + shape: ShapeT, fill_value: Any, - dtype: None = ..., - order: _OrderKACF = ..., - subok: L[True] = ..., - shape: None = ..., + dtype: type[ScalarT], + order: _OrderCF = "C", *, - device: None | L["cpu"] = ..., -) -> _ArrayType: ... + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, ScalarT]: ... @overload -def full_like( - a: _ArrayLike[_SCT], +def full[ShapeT: _Shape]( + shape: ShapeT, fill_value: Any, - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike = ..., + dtype: DTypeLike | None = None, + order: _OrderCF = "C", *, - device: None | L["cpu"] = ..., -) -> NDArray[_SCT]: ... + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, Any]: ... +# unknown shape @overload -def full_like( - a: object, +def full[ScalarT: np.generic]( + shape: _ShapeLike, + fill_value: ScalarT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... +@overload +def full[DTypeT: np.dtype]( + shape: _ShapeLike, fill_value: Any, - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike= ..., + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", *, - device: None | L["cpu"] = ..., -) -> NDArray[Any]: ... + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> np.ndarray[Any, DTypeT]: ... @overload -def full_like( - a: Any, +def full[ScalarT: np.generic]( + shape: _ShapeLike, fill_value: Any, - dtype: _DTypeLike[_SCT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike= ..., + dtype: type[ScalarT], + order: _OrderCF = "C", *, - device: None | L["cpu"] = ..., -) -> NDArray[_SCT]: ... + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... @overload -def full_like( - a: Any, +def full( + shape: _ShapeLike, fill_value: Any, - dtype: DTypeLike, - order: _OrderKACF = ..., - subok: bool = ..., - shape: None | _ShapeLike= ..., + dtype: DTypeLike | None = None, + order: _OrderCF = "C", *, - device: None | L["cpu"] = ..., + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... @overload -def count_nonzero( - a: ArrayLike, - axis: None = ..., +def full_like[ArrayT: np.ndarray]( + a: ArrayT, + fill_value: object, + dtype: None = None, + order: _OrderKACF = "K", + subok: L[True] = True, + shape: None = None, *, - keepdims: L[False] = ..., -) -> int: ... + device: L["cpu"] | None = None, +) -> ArrayT: ... +@overload +def full_like[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + fill_value: object, + dtype: None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, + *, + device: L["cpu"] | None = None, +) -> NDArray[ScalarT]: ... @overload -def count_nonzero( - a: ArrayLike, - axis: _ShapeLike = ..., +def full_like[ScalarT: np.generic]( + a: object, + fill_value: object, + dtype: _DTypeLike[ScalarT], + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - keepdims: bool = ..., -) -> Any: ... # TODO: np.intp or ndarray[np.intp] + device: L["cpu"] | None = None, +) -> NDArray[ScalarT]: ... +@overload +def full_like( + a: object, + fill_value: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, + *, + device: L["cpu"] | None = None, +) -> NDArray[Any]: ... -def isfortran(a: NDArray[Any] | generic) -> bool: ... +# +@overload +def count_nonzero(a: ArrayLike, axis: None = None, *, keepdims: L[False] = False) -> np.intp: ... +@overload +def count_nonzero(a: _ScalarLike_co, axis: _ShapeLike | None = None, *, keepdims: L[True]) -> np.intp: ... +@overload +def count_nonzero( + a: NDArray[Any] | _NestedSequence[ArrayLike], axis: _ShapeLike | None = None, *, keepdims: L[True] +) -> NDArray[np.intp]: ... +@overload +def count_nonzero(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: py_bool = False) -> Any: ... -def argwhere(a: ArrayLike) -> NDArray[intp]: ... +# +def isfortran(a: ndarray | generic) -> py_bool: ... -def flatnonzero(a: ArrayLike) -> NDArray[intp]: ... +# +def argwhere(a: ArrayLike) -> _Array2D[np.intp]: ... +def flatnonzero(a: ArrayLike) -> _Array1D[np.intp]: ... +# NOTE: we ignore UP047 because inlining `_AnyScalarT` would result in a lot of code duplication + +# keep in sync with `convolve` and `ma.core.correlate` @overload -def correlate( - a: _ArrayLikeUnknown, - v: _ArrayLikeUnknown, - mode: _CorrelateMode = ..., -) -> NDArray[Any]: ... -@overload -def correlate( - a: _ArrayLikeBool_co, - v: _ArrayLikeBool_co, - mode: _CorrelateMode = ..., -) -> NDArray[np.bool]: ... -@overload -def correlate( - a: _ArrayLikeUInt_co, - v: _ArrayLikeUInt_co, - mode: _CorrelateMode = ..., -) -> NDArray[unsignedinteger[Any]]: ... +def correlate( # noqa: UP047 + a: _ArrayLike1D[_AnyNumericScalarT], v: _ArrayLike1D[_AnyNumericScalarT], mode: _CorrelateMode = "valid" +) -> _Array1D[_AnyNumericScalarT]: ... @overload -def correlate( - a: _ArrayLikeInt_co, - v: _ArrayLikeInt_co, - mode: _CorrelateMode = ..., -) -> NDArray[signedinteger[Any]]: ... +def correlate(a: _ArrayLike1DBool_co, v: _ArrayLike1DBool_co, mode: _CorrelateMode = "valid") -> _Array1D[np.bool]: ... @overload -def correlate( - a: _ArrayLikeFloat_co, - v: _ArrayLikeFloat_co, - mode: _CorrelateMode = ..., -) -> NDArray[floating[Any]]: ... +def correlate(a: _ArrayLike1DInt_co, v: _ArrayLike1DInt_co, mode: _CorrelateMode = "valid") -> _Array1D[np.int_ | Any]: ... @overload -def correlate( - a: _ArrayLikeComplex_co, - v: _ArrayLikeComplex_co, - mode: _CorrelateMode = ..., -) -> NDArray[complexfloating[Any, Any]]: ... +def correlate(a: _ArrayLike1DFloat_co, v: _ArrayLike1DFloat_co, mode: _CorrelateMode = "valid") -> _Array1D[np.float64 | Any]: ... @overload def correlate( - a: _ArrayLikeTD64_co, - v: _ArrayLikeTD64_co, - mode: _CorrelateMode = ..., -) -> NDArray[timedelta64]: ... + a: _ArrayLike1DNumber_co, v: _ArrayLike1DNumber_co, mode: _CorrelateMode = "valid" +) -> _Array1D[np.complex128 | Any]: ... @overload def correlate( - a: _ArrayLikeObject_co, - v: _ArrayLikeObject_co, - mode: _CorrelateMode = ..., -) -> NDArray[object_]: ... + a: _ArrayLike1DTD64_co, v: _ArrayLike1DTD64_co, mode: _CorrelateMode = "valid" +) -> _Array1D[np.timedelta64 | Any]: ... +# keep in sync with `correlate` @overload -def convolve( - a: _ArrayLikeUnknown, - v: _ArrayLikeUnknown, - mode: _CorrelateMode = ..., -) -> NDArray[Any]: ... +def convolve( # noqa: UP047 + a: _ArrayLike1D[_AnyNumericScalarT], v: _ArrayLike1D[_AnyNumericScalarT], mode: _CorrelateMode = "valid" +) -> _Array1D[_AnyNumericScalarT]: ... @overload -def convolve( - a: _ArrayLikeBool_co, - v: _ArrayLikeBool_co, - mode: _CorrelateMode = ..., -) -> NDArray[np.bool]: ... +def convolve(a: _ArrayLike1DBool_co, v: _ArrayLike1DBool_co, mode: _CorrelateMode = "valid") -> _Array1D[np.bool]: ... @overload -def convolve( - a: _ArrayLikeUInt_co, - v: _ArrayLikeUInt_co, - mode: _CorrelateMode = ..., -) -> NDArray[unsignedinteger[Any]]: ... +def convolve(a: _ArrayLike1DInt_co, v: _ArrayLike1DInt_co, mode: _CorrelateMode = "valid") -> _Array1D[np.int_ | Any]: ... @overload -def convolve( - a: _ArrayLikeInt_co, - v: _ArrayLikeInt_co, - mode: _CorrelateMode = ..., -) -> NDArray[signedinteger[Any]]: ... +def convolve(a: _ArrayLike1DFloat_co, v: _ArrayLike1DFloat_co, mode: _CorrelateMode = "valid") -> _Array1D[np.float64 | Any]: ... @overload def convolve( - a: _ArrayLikeFloat_co, - v: _ArrayLikeFloat_co, - mode: _CorrelateMode = ..., -) -> NDArray[floating[Any]]: ... + a: _ArrayLike1DNumber_co, v: _ArrayLike1DNumber_co, mode: _CorrelateMode = "valid" +) -> _Array1D[np.complex128 | Any]: ... @overload def convolve( - a: _ArrayLikeComplex_co, - v: _ArrayLikeComplex_co, - mode: _CorrelateMode = ..., -) -> NDArray[complexfloating[Any, Any]]: ... -@overload -def convolve( - a: _ArrayLikeTD64_co, - v: _ArrayLikeTD64_co, - mode: _CorrelateMode = ..., -) -> NDArray[timedelta64]: ... -@overload -def convolve( - a: _ArrayLikeObject_co, - v: _ArrayLikeObject_co, - mode: _CorrelateMode = ..., -) -> NDArray[object_]: ... + a: _ArrayLike1DTD64_co, v: _ArrayLike1DTD64_co, mode: _CorrelateMode = "valid" +) -> _Array1D[np.timedelta64 | Any]: ... +# keep roughly in sync with `convolve` and `correlate`, but for 2-D output and an additional `out` overload, +# and also keep in sync with `ma.core.outer` (minus `out`) @overload -def outer( - a: _ArrayLikeUnknown, - b: _ArrayLikeUnknown, - out: None = ..., -) -> NDArray[Any]: ... +def outer( # noqa: UP047 + a: _ArrayLike[_AnyNumericScalarT], b: _ArrayLike[_AnyNumericScalarT], out: None = None +) -> _Array2D[_AnyNumericScalarT]: ... @overload -def outer( - a: _ArrayLikeBool_co, - b: _ArrayLikeBool_co, - out: None = ..., -) -> NDArray[np.bool]: ... +def outer(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, out: None = None) -> _Array2D[np.bool]: ... @overload -def outer( - a: _ArrayLikeUInt_co, - b: _ArrayLikeUInt_co, - out: None = ..., -) -> NDArray[unsignedinteger[Any]]: ... +def outer(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, out: None = None) -> _Array2D[np.int_ | Any]: ... @overload -def outer( - a: _ArrayLikeInt_co, - b: _ArrayLikeInt_co, - out: None = ..., -) -> NDArray[signedinteger[Any]]: ... +def outer(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, out: None = None) -> _Array2D[np.float64 | Any]: ... @overload -def outer( - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, - out: None = ..., -) -> NDArray[floating[Any]]: ... +def outer(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, out: None = None) -> _Array2D[np.complex128 | Any]: ... @overload -def outer( - a: _ArrayLikeComplex_co, - b: _ArrayLikeComplex_co, - out: None = ..., -) -> NDArray[complexfloating[Any, Any]]: ... +def outer(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, out: None = None) -> _Array2D[np.timedelta64 | Any]: ... @overload -def outer( - a: _ArrayLikeTD64_co, - b: _ArrayLikeTD64_co, - out: None = ..., -) -> NDArray[timedelta64]: ... +def outer[ArrayT: np.ndarray](a: _ArrayLikeNumber_co | _ArrayLikeTD64_co, b: _ArrayLikeNumber_co | _ArrayLikeTD64_co, out: ArrayT) -> ArrayT: ... + +# keep in sync with numpy.linalg._linalg.tensordot (ignoring `/, *`) @overload -def outer( - a: _ArrayLikeObject_co, - b: _ArrayLikeObject_co, - out: None = ..., -) -> NDArray[object_]: ... +def tensordot( # noqa: UP047 + a: _ArrayLike[_AnyNumericScalarT], b: _ArrayLike[_AnyNumericScalarT], axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[_AnyNumericScalarT]: ... @overload -def outer( - a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - b: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - out: _ArrayType, -) -> _ArrayType: ... - +def tensordot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2) -> NDArray[np.bool]: ... @overload def tensordot( - a: _ArrayLikeUnknown, - b: _ArrayLikeUnknown, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., -) -> NDArray[Any]: ... + a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[np.int_ | Any]: ... @overload def tensordot( - a: _ArrayLikeBool_co, - b: _ArrayLikeBool_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., -) -> NDArray[np.bool]: ... + a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[np.float64 | Any]: ... @overload def tensordot( - a: _ArrayLikeUInt_co, - b: _ArrayLikeUInt_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., -) -> NDArray[unsignedinteger[Any]]: ... + a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[np.complex128 | Any]: ... + +# @overload -def tensordot( +def cross( # noqa: UP047 + a: _ArrayLike[_AnyNumericScalarT], + b: _ArrayLike[_AnyNumericScalarT], + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, +) -> NDArray[_AnyNumericScalarT]: ... +@overload +def cross( a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., -) -> NDArray[signedinteger[Any]]: ... + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, +) -> NDArray[np.int_ | Any]: ... @overload -def tensordot( +def cross( a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., -) -> NDArray[floating[Any]]: ... + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, +) -> NDArray[np.float64 | Any]: ... @overload -def tensordot( +def cross( a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., -) -> NDArray[complexfloating[Any, Any]]: ... -@overload -def tensordot( - a: _ArrayLikeTD64_co, - b: _ArrayLikeTD64_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., -) -> NDArray[timedelta64]: ... -@overload -def tensordot( - a: _ArrayLikeObject_co, - b: _ArrayLikeObject_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., -) -> NDArray[object_]: ... + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, +) -> NDArray[np.complex128 | Any]: ... +# @overload -def roll( - a: _ArrayLike[_SCT], - shift: _ShapeLike, - axis: None | _ShapeLike = ..., -) -> NDArray[_SCT]: ... +def roll[ArrayT: np.ndarray](a: ArrayT, shift: _ShapeLike, axis: _ShapeLike | None = None) -> ArrayT: ... @overload -def roll( - a: ArrayLike, - shift: _ShapeLike, - axis: None | _ShapeLike = ..., -) -> NDArray[Any]: ... - -def rollaxis( - a: NDArray[_SCT], - axis: int, - start: int = ..., -) -> NDArray[_SCT]: ... - -def moveaxis( - a: NDArray[_SCT], - source: _ShapeLike, - destination: _ShapeLike, -) -> NDArray[_SCT]: ... - -@overload -def cross( - x1: _ArrayLikeUnknown, - x2: _ArrayLikeUnknown, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: None | int = ..., -) -> NDArray[Any]: ... -@overload -def cross( - x1: _ArrayLikeBool_co, - x2: _ArrayLikeBool_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: None | int = ..., -) -> NoReturn: ... +def roll[ScalarT: np.generic](a: _ArrayLike[ScalarT], shift: _ShapeLike, axis: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload -def cross( - x1: _ArrayLikeUInt_co, - x2: _ArrayLikeUInt_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: None | int = ..., -) -> NDArray[unsignedinteger[Any]]: ... -@overload -def cross( - x1: _ArrayLikeInt_co, - x2: _ArrayLikeInt_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: None | int = ..., -) -> NDArray[signedinteger[Any]]: ... -@overload -def cross( - x1: _ArrayLikeFloat_co, - x2: _ArrayLikeFloat_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: None | int = ..., -) -> NDArray[floating[Any]]: ... -@overload -def cross( - x1: _ArrayLikeComplex_co, - x2: _ArrayLikeComplex_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: None | int = ..., -) -> NDArray[complexfloating[Any, Any]]: ... -@overload -def cross( - x1: _ArrayLikeObject_co, - x2: _ArrayLikeObject_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: None | int = ..., -) -> NDArray[object_]: ... +def roll(a: ArrayLike, shift: _ShapeLike, axis: _ShapeLike | None = None) -> NDArray[Any]: ... -@overload -def indices( - dimensions: Sequence[int], - dtype: type[int] = ..., - sparse: L[False] = ..., -) -> NDArray[int_]: ... -@overload -def indices( - dimensions: Sequence[int], - dtype: type[int] = ..., - sparse: L[True] = ..., -) -> tuple[NDArray[int_], ...]: ... -@overload -def indices( - dimensions: Sequence[int], - dtype: _DTypeLike[_SCT], - sparse: L[False] = ..., -) -> NDArray[_SCT]: ... -@overload -def indices( - dimensions: Sequence[int], - dtype: _DTypeLike[_SCT], - sparse: L[True], -) -> tuple[NDArray[_SCT], ...]: ... -@overload -def indices( - dimensions: Sequence[int], - dtype: DTypeLike, - sparse: L[False] = ..., -) -> NDArray[Any]: ... -@overload +# +def rollaxis[ArrayT: np.ndarray](a: ArrayT, axis: int, start: int = 0) -> ArrayT: ... +def moveaxis[ArrayT: np.ndarray](a: ArrayT, source: _ShapeLike, destination: _ShapeLike) -> ArrayT: ... +def normalize_axis_tuple( + axis: int | Iterable[int], + ndim: int, + argname: str | None = None, + allow_duplicate: py_bool | None = False, +) -> tuple[int, ...]: ... + +# +@overload # 0d, dtype=int (default), sparse=False (default) +def indices(dimensions: tuple[()], dtype: type[int] = int, sparse: L[False] = False) -> _Array1D[np.intp]: ... +@overload # 0d, dtype=, sparse=True +def indices(dimensions: tuple[()], dtype: DTypeLike | None = int, *, sparse: L[True]) -> tuple[()]: ... +@overload # 0d, dtype=, sparse=False (default) +def indices[ScalarT: np.generic](dimensions: tuple[()], dtype: _DTypeLike[ScalarT], sparse: L[False] = False) -> _Array1D[ScalarT]: ... +@overload # 0d, dtype=, sparse=False (default) +def indices(dimensions: tuple[()], dtype: DTypeLike, sparse: L[False] = False) -> _Array1D[Any]: ... +@overload # 1d, dtype=int (default), sparse=False (default) +def indices(dimensions: tuple[int], dtype: type[int] = int, sparse: L[False] = False) -> _Array2D[np.intp]: ... +@overload # 1d, dtype=int (default), sparse=True +def indices(dimensions: tuple[int], dtype: type[int] = int, *, sparse: L[True]) -> tuple[_Array1D[np.intp]]: ... +@overload # 1d, dtype=, sparse=False (default) +def indices[ScalarT: np.generic](dimensions: tuple[int], dtype: _DTypeLike[ScalarT], sparse: L[False] = False) -> _Array2D[ScalarT]: ... +@overload # 1d, dtype=, sparse=True +def indices[ScalarT: np.generic](dimensions: tuple[int], dtype: _DTypeLike[ScalarT], sparse: L[True]) -> tuple[_Array1D[ScalarT]]: ... +@overload # 1d, dtype=, sparse=False (default) +def indices(dimensions: tuple[int], dtype: DTypeLike, sparse: L[False] = False) -> _Array2D[Any]: ... +@overload # 1d, dtype=, sparse=True +def indices(dimensions: tuple[int], dtype: DTypeLike, sparse: L[True]) -> tuple[_Array1D[Any]]: ... +@overload # 2d, dtype=int (default), sparse=False (default) +def indices(dimensions: tuple[int, int], dtype: type[int] = int, sparse: L[False] = False) -> _Array3D[np.intp]: ... +@overload # 2d, dtype=int (default), sparse=True def indices( - dimensions: Sequence[int], - dtype: DTypeLike, - sparse: L[True], -) -> tuple[NDArray[Any], ...]: ... + dimensions: tuple[int, int], dtype: type[int] = int, *, sparse: L[True] +) -> tuple[_Array2D[np.intp], _Array2D[np.intp]]: ... +@overload # 2d, dtype=, sparse=False (default) +def indices[ScalarT: np.generic](dimensions: tuple[int, int], dtype: _DTypeLike[ScalarT], sparse: L[False] = False) -> _Array3D[ScalarT]: ... +@overload # 2d, dtype=, sparse=True +def indices[ScalarT: np.generic]( + dimensions: tuple[int, int], dtype: _DTypeLike[ScalarT], sparse: L[True] +) -> tuple[_Array2D[ScalarT], _Array2D[ScalarT]]: ... +@overload # 2d, dtype=, sparse=False (default) +def indices(dimensions: tuple[int, int], dtype: DTypeLike, sparse: L[False] = False) -> _Array3D[Any]: ... +@overload # 2d, dtype=, sparse=True +def indices(dimensions: tuple[int, int], dtype: DTypeLike, sparse: L[True]) -> tuple[_Array2D[Any], _Array2D[Any]]: ... +@overload # ?d, dtype=int (default), sparse=False (default) +def indices(dimensions: Sequence[int], dtype: type[int] = int, sparse: L[False] = False) -> NDArray[np.intp]: ... +@overload # ?d, dtype=int (default), sparse=True +def indices(dimensions: Sequence[int], dtype: type[int] = int, *, sparse: L[True]) -> tuple[NDArray[np.intp], ...]: ... +@overload # ?d, dtype=, sparse=False (default) +def indices[ScalarT: np.generic](dimensions: Sequence[int], dtype: _DTypeLike[ScalarT], sparse: L[False] = False) -> NDArray[ScalarT]: ... +@overload # ?d, dtype=, sparse=True +def indices[ScalarT: np.generic](dimensions: Sequence[int], dtype: _DTypeLike[ScalarT], sparse: L[True]) -> tuple[NDArray[ScalarT], ...]: ... +@overload # ?d, dtype=, sparse=False (default) +def indices(dimensions: Sequence[int], dtype: DTypeLike, sparse: L[False] = False) -> ndarray: ... +@overload # ?d, dtype=, sparse=True +def indices(dimensions: Sequence[int], dtype: DTypeLike, sparse: L[True]) -> tuple[ndarray, ...]: ... -def fromfunction( - function: Callable[..., _T], +# +def fromfunction[ReturnT]( + function: Callable[..., ReturnT], shape: Sequence[int], *, - dtype: DTypeLike = ..., - like: _SupportsArrayFunc = ..., - **kwargs: Any, -) -> _T: ... - -def isscalar(element: object) -> TypeGuard[ - generic | bool | int | float | complex | str | bytes | memoryview -]: ... + dtype: DTypeLike | None = float, + like: _SupportsArrayFunc | None = None, + **kwargs: object, +) -> ReturnT: ... -def binary_repr(num: SupportsIndex, width: None | int = ...) -> str: ... +# +def isscalar(element: object) -> TypeGuard[generic | complex | str | bytes | memoryview]: ... -def base_repr( - number: SupportsAbs[float], - base: float = ..., - padding: SupportsIndex = ..., -) -> str: ... +# +def binary_repr(num: SupportsIndex, width: int | None = None) -> str: ... +def base_repr(number: SupportsAbs[float], base: float = 2, padding: SupportsIndex | None = 0) -> str: ... -@overload -def identity( - n: int, - dtype: None = ..., - *, - like: _SupportsArrayFunc = ..., -) -> NDArray[float64]: ... -@overload -def identity( - n: int, - dtype: _DTypeLike[_SCT], - *, - like: _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... -@overload -def identity( - n: int, - dtype: DTypeLike, - *, - like: _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... +# +@overload # dtype: None (default) +def identity(n: int, dtype: None = None, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.float64]: ... +@overload # dtype: known scalar type +def identity[ScalarT: np.generic](n: int, dtype: _DTypeLike[ScalarT], *, like: _SupportsArrayFunc | None = None) -> _Array2D[ScalarT]: ... +@overload # dtype: like bool +def identity(n: int, dtype: _DTypeLikeBool, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.bool]: ... +@overload # dtype: like int_ +def identity(n: int, dtype: _DTypeLikeInt, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.int_ | Any]: ... +@overload # dtype: like float64 +def identity(n: int, dtype: _DTypeLikeFloat64, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.float64 | Any]: ... +@overload # dtype: like complex128 +def identity(n: int, dtype: _DTypeLikeComplex128, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.complex128 | Any]: ... +@overload # dtype: unknown +def identity(n: int, dtype: DTypeLike, *, like: _SupportsArrayFunc | None = None) -> _Array2D[Incomplete]: ... +# def allclose( a: ArrayLike, b: ArrayLike, - rtol: ArrayLike = ..., - atol: ArrayLike = ..., - equal_nan: bool = ..., -) -> bool: ... + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> py_bool: ... -@overload +# +@overload # scalar, scalar def isclose( - a: _ScalarLike_co, - b: _ScalarLike_co, - rtol: ArrayLike = ..., - atol: ArrayLike = ..., - equal_nan: bool = ..., + a: _NumberLike_co, + b: _NumberLike_co, + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, ) -> np.bool: ... -@overload +@overload # known shape, same shape or scalar +def isclose[ShapeT: _Shape]( + a: np.ndarray[ShapeT], + b: np.ndarray[ShapeT] | _NumberLike_co, + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[ShapeT, np.dtype[np.bool]]: ... +@overload # same shape or scalar, known shape +def isclose[ShapeT: _Shape]( + a: np.ndarray[ShapeT] | _NumberLike_co, + b: np.ndarray[ShapeT], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[ShapeT, np.dtype[np.bool]]: ... +@overload # 1d sequence, <=1d array-like +def isclose( + a: Sequence[_NumberLike_co], + b: Sequence[_NumberLike_co] | _NumberLike_co | np.ndarray[tuple[int]], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +@overload # <=1d array-like, 1d sequence +def isclose( + a: Sequence[_NumberLike_co] | _NumberLike_co | np.ndarray[tuple[int]], + b: Sequence[_NumberLike_co], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +@overload # 2d sequence, <=2d array-like +def isclose( + a: Sequence[Sequence[_NumberLike_co]], + b: Sequence[Sequence[_NumberLike_co]] | Sequence[_NumberLike_co] | _NumberLike_co | np.ndarray[tuple[int] | tuple[int, int]], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +@overload # <=2d array-like, 2d sequence +def isclose( + b: Sequence[Sequence[_NumberLike_co]] | Sequence[_NumberLike_co] | _NumberLike_co | np.ndarray[tuple[int] | tuple[int, int]], + a: Sequence[Sequence[_NumberLike_co]], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +@overload # unknown shape, unknown shape def isclose( a: ArrayLike, b: ArrayLike, - rtol: ArrayLike = ..., - atol: ArrayLike = ..., - equal_nan: bool = ..., -) -> NDArray[np.bool]: ... + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> NDArray[np.bool] | Any: ... -def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: bool = ...) -> bool: ... +# +def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: py_bool = False) -> py_bool: ... +def array_equiv(a1: ArrayLike, a2: ArrayLike) -> py_bool: ... -def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ... - -@overload -def astype( - x: NDArray[Any], - dtype: _DTypeLike[_SCT], - copy: bool = ..., -) -> NDArray[_SCT]: ... +# @overload -def astype( - x: NDArray[Any], - dtype: DTypeLike, - copy: bool = ..., -) -> NDArray[Any]: ... +def astype[ShapeT: _Shape, ScalarT: np.generic]( + x: ndarray[ShapeT], + dtype: _DTypeLike[ScalarT], + /, + *, + copy: py_bool = True, + device: L["cpu"] | None = None, +) -> ndarray[ShapeT, dtype[ScalarT]]: ... +@overload +def astype[ShapeT: _Shape]( + x: ndarray[ShapeT], + dtype: DTypeLike | None, + /, + *, + copy: py_bool = True, + device: L["cpu"] | None = None, +) -> ndarray[ShapeT]: ... diff --git a/numpy/_core/numerictypes.py b/numpy/_core/numerictypes.py index ac52cff49db2..bd3764f11b84 100644 --- a/numpy/_core/numerictypes.py +++ b/numpy/_core/numerictypes.py @@ -12,10 +12,10 @@ Bit-width names - int8 int16 int32 int64 int128 - uint8 uint16 uint32 uint64 uint128 - float16 float32 float64 float96 float128 float256 - complex32 complex64 complex128 complex192 complex256 complex512 + int8 int16 int32 int64 + uint8 uint16 uint32 uint64 + float16 float32 float64 float96 float128 + complex64 complex128 complex192 complex256 datetime64 timedelta64 c-based names @@ -77,110 +77,51 @@ """ import numbers -import warnings + +from numpy._utils import set_module from . import multiarray as ma from .multiarray import ( - ndarray, array, dtype, datetime_data, datetime_as_string, - busday_offset, busday_count, is_busday, busdaycalendar - ) -from .._utils import set_module + busday_count, + busday_offset, + busdaycalendar, + datetime_as_string, + datetime_data, + dtype, + is_busday, + ndarray, +) # we add more at the bottom __all__ = [ - 'ScalarType', 'typecodes', 'issubdtype', 'datetime_data', - 'datetime_as_string', 'busday_offset', 'busday_count', + 'ScalarType', 'typecodes', 'issubdtype', 'datetime_data', + 'datetime_as_string', 'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar', 'isdtype' ] # we don't need all these imports, but we need to keep them for compatibility # for users using np._core.numerictypes.UPPER_TABLE -from ._string_helpers import ( - english_lower, english_upper, english_capitalize, LOWER_TABLE, UPPER_TABLE -) - -from ._type_aliases import ( - sctypeDict, allTypes, sctypes -) -from ._dtype import _kind_name - # we don't export these for import *, but we do want them accessible # as numerictypes.bool, etc. -from builtins import bool, int, float, complex, object, str, bytes - +from builtins import bool, bytes, complex, float, int, object, str # noqa: F401, UP029 + +from ._string_helpers import ( # noqa: F401 + LOWER_TABLE, + UPPER_TABLE, + english_capitalize, + english_lower, + english_upper, +) +from ._type_aliases import allTypes, sctypeDict, sctypes # We use this later generic = allTypes['generic'] genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16', - 'int32', 'uint32', 'int64', 'uint64', 'int128', - 'uint128', 'float16', - 'float32', 'float64', 'float80', 'float96', 'float128', - 'float256', - 'complex32', 'complex64', 'complex128', 'complex160', - 'complex192', 'complex256', 'complex512', 'object'] - -@set_module('numpy') -def maximum_sctype(t): - """ - Return the scalar type of highest precision of the same kind as the input. - - .. deprecated:: 2.0 - Use an explicit dtype like int64 or float64 instead. - - Parameters - ---------- - t : dtype or dtype specifier - The input data type. This can be a `dtype` object or an object that - is convertible to a `dtype`. - - Returns - ------- - out : dtype - The highest precision data type of the same kind (`dtype.kind`) as `t`. - - See Also - -------- - obj2sctype, mintypecode, sctype2char - dtype - - Examples - -------- - >>> from numpy._core.numerictypes import maximum_sctype - >>> maximum_sctype(int) - - >>> maximum_sctype(np.uint8) - - >>> maximum_sctype(complex) - # may vary - - >>> maximum_sctype(str) - - - >>> maximum_sctype('i2') - - >>> maximum_sctype('f4') - # may vary - - """ - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`maximum_sctype` is deprecated. Use an explicit dtype like int64 " - "or float64 instead. (deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - g = obj2sctype(t) - if g is None: - return t - t = g - base = _kind_name(dtype(t)) - if base in sctypes: - return sctypes[base][-1] - else: - return t + 'int32', 'uint32', 'int64', 'uint64', + 'float16', 'float32', 'float64', 'float96', 'float128', + 'complex64', 'complex128', 'complex192', 'complex256', + 'object'] @set_module('numpy') @@ -215,7 +156,7 @@ def issctype(rep): Strings are also a scalar type: - >>> issctype(np.dtype('str')) + >>> issctype(np.dtype(np.str_)) True """ @@ -225,12 +166,12 @@ def issctype(rep): res = obj2sctype(rep) if res and res != object_: return True - return False + else: + return False except Exception: return False -@set_module('numpy') def obj2sctype(rep, default=None): """ Return the scalar dtype or NumPy equivalent of Python type of an object. @@ -373,7 +314,7 @@ def _preprocess_dtype(dtype): if isinstance(dtype, ma.dtype): dtype = dtype.type if isinstance(dtype, ndarray) or dtype not in allTypes.values(): - raise _PreprocessDTypeError() + raise _PreprocessDTypeError return dtype @@ -451,7 +392,7 @@ def isdtype(dtype, kind): elif isinstance(kind, str): raise ValueError( "kind argument is a string, but" - f" {repr(kind)} is not a known kind name." + f" {kind!r} is not a known kind name." ) else: try: @@ -593,7 +534,7 @@ def _scalar_type_key(typ): ScalarType = [int, float, complex, bool, bytes, str, memoryview] -ScalarType += sorted(set(sctypeDict.values()), key=_scalar_type_key) +ScalarType += sorted(dict.fromkeys(sctypeDict.values()), key=_scalar_type_key) ScalarType = tuple(ScalarType) diff --git a/numpy/_core/numerictypes.pyi b/numpy/_core/numerictypes.pyi index 52ab73012604..46bb6a379861 100644 --- a/numpy/_core/numerictypes.pyi +++ b/numpy/_core/numerictypes.pyi @@ -1,99 +1,196 @@ -from typing import ( - Literal as L, - Any, - TypeVar, - TypedDict, -) +from builtins import bool as py_bool +from typing import Any, Final, Literal as L, TypedDict, type_check_only import numpy as np from numpy import ( + bool, + bool_, + byte, + bytes_, + cdouble, + character, + clongdouble, + complex64, + complex128, + complex192, + complex256, + complexfloating, + csingle, + datetime64, + double, dtype, + flexible, + float16, + float32, + float64, + float96, + float128, + floating, generic, - ubyte, - ushort, - uintc, - ulong, - ulonglong, - byte, - short, + half, + inexact, + int8, + int16, + int32, + int64, + int_, intc, + integer, + intp, long, - longlong, - half, - single, - double, longdouble, - csingle, - cdouble, - clongdouble, - datetime64, - timedelta64, + longlong, + number, object_, + short, + signedinteger, + single, str_, - bytes_, + timedelta64, + ubyte, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + ushort, void, ) +from numpy._typing import DTypeLike -from numpy._core._type_aliases import ( - sctypeDict as sctypeDict, +from ._type_aliases import sctypeDict as sctypeDict +from .multiarray import ( + busday_count, + busday_offset, + busdaycalendar, + datetime_as_string, + datetime_data, + is_busday, ) -from numpy._typing import DTypeLike - -_T = TypeVar("_T") -_SCT = TypeVar("_SCT", bound=generic) +__all__ = [ + "ScalarType", + "typecodes", + "issubdtype", + "datetime_data", + "datetime_as_string", + "busday_offset", + "busday_count", + "is_busday", + "busdaycalendar", + "isdtype", + "generic", + "unsignedinteger", + "character", + "inexact", + "number", + "integer", + "flexible", + "complexfloating", + "signedinteger", + "floating", + "bool", + "float16", + "float32", + "float64", + "longdouble", + "complex64", + "complex128", + "clongdouble", + "bytes_", + "str_", + "void", + "object_", + "datetime64", + "timedelta64", + "int8", + "byte", + "uint8", + "ubyte", + "int16", + "short", + "uint16", + "ushort", + "int32", + "intc", + "uint32", + "uintc", + "int64", + "long", + "uint64", + "ulong", + "longlong", + "ulonglong", + "intp", + "uintp", + "double", + "cdouble", + "single", + "csingle", + "half", + "bool_", + "int_", + "uint", + "float96", + "float128", + "complex192", + "complex256", +] +@type_check_only class _TypeCodes(TypedDict): - Character: L['c'] - Integer: L['bhilqp'] - UnsignedInteger: L['BHILQP'] - Float: L['efdg'] - Complex: L['FDG'] - AllInteger: L['bBhHiIlLqQpP'] - AllFloat: L['efdgFDG'] - Datetime: L['Mm'] - All: L['?bhilqpBHILQPefdgFDGSUVOMm'] - -__all__: list[str] + Character: L["c"] + Integer: L["bhilqnp"] + UnsignedInteger: L["BHILQNP"] + Float: L["efdg"] + Complex: L["FDG"] + AllInteger: L["bBhHiIlLqQnNpP"] + AllFloat: L["efdgFDG"] + Datetime: L["Mm"] + All: L["?bhilqnpBHILQNPefdgFDGSUVOMm"] -def isdtype( - dtype: dtype[Any] | type[Any], - kind: DTypeLike | tuple[DTypeLike, ...] -) -> bool: ... +def isdtype(dtype: dtype | type, kind: DTypeLike | tuple[DTypeLike, ...]) -> py_bool: ... +def issubdtype(arg1: DTypeLike | None, arg2: DTypeLike | None) -> py_bool: ... -def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> bool: ... - -typecodes: _TypeCodes -ScalarType: tuple[ - type[int], - type[float], - type[complex], - type[bool], - type[bytes], - type[str], - type[memoryview], - type[np.bool], - type[csingle], - type[cdouble], - type[clongdouble], - type[half], - type[single], - type[double], - type[longdouble], - type[byte], - type[short], - type[intc], - type[long], - type[longlong], - type[timedelta64], - type[datetime64], - type[object_], - type[bytes_], - type[str_], - type[ubyte], - type[ushort], - type[uintc], - type[ulong], - type[ulonglong], - type[void], -] +typecodes: Final[_TypeCodes] = ... +ScalarType: Final[ + tuple[ + type[int], + type[float], + type[complex], + type[py_bool], + type[bytes], + type[str], + type[memoryview[Any]], + type[np.bool], + type[complex64], + type[complex128], + type[complex128 | complex192 | complex256], + type[float16], + type[float32], + type[float64], + type[float64 | float96 | float128], + type[int8], + type[int16], + type[int32], + type[int32 | int64], + type[int64], + type[datetime64], + type[timedelta64], + type[object_], + type[bytes_], + type[str_], + type[uint8], + type[uint16], + type[uint32], + type[uint32 | uint64], + type[uint64], + type[void], + ] +] = ... +typeDict: Final = sctypeDict diff --git a/numpy/_core/overrides.py b/numpy/_core/overrides.py index 6bb57c3dbf9a..6d5e7750b09b 100644 --- a/numpy/_core/overrides.py +++ b/numpy/_core/overrides.py @@ -1,13 +1,15 @@ """Implementation of __array_function__ overrides from NEP-18.""" import collections import functools -import os +import inspect -from .._utils import set_module -from .._utils._inspect import getargspec from numpy._core._multiarray_umath import ( - add_docstring, _get_implementing_args, _ArrayFunctionDispatcher) - + _ArrayFunctionDispatcher, + _get_implementing_args, + add_docstring, +) +from numpy._utils import set_module # noqa: F401 +from numpy._utils._inspect import getargspec ARRAY_FUNCTIONS = set() @@ -20,12 +22,13 @@ compatible with that passed in via this argument.""" ) -def set_array_function_like_doc(public_api): - if public_api.__doc__ is not None: - public_api.__doc__ = public_api.__doc__.replace( - "${ARRAY_FUNCTION_LIKE}", - array_function_like_doc, - ) +def get_array_function_like_doc(public_api, docstring_template=""): + ARRAY_FUNCTIONS.add(public_api) + docstring = public_api.__doc__ or docstring_template + return docstring.replace("${ARRAY_FUNCTION_LIKE}", array_function_like_doc) + +def finalize_array_function_like(public_api): + public_api.__doc__ = get_array_function_like_doc(public_api) return public_api @@ -154,11 +157,15 @@ def decorator(implementation): "argument and a keyword-only argument. " f"{implementation} does not seem to comply.") - if docs_from_dispatcher: - add_docstring(implementation, dispatcher.__doc__) + if docs_from_dispatcher and dispatcher.__doc__ is not None: + doc = inspect.cleandoc(dispatcher.__doc__) + add_docstring(implementation, doc) public_api = _ArrayFunctionDispatcher(dispatcher, implementation) - public_api = functools.wraps(implementation)(public_api) + functools.update_wrapper(public_api, implementation) + + if not verify and not getattr(implementation, "__text_signature__", None): + public_api.__signature__ = inspect.signature(dispatcher) if module is not None: public_api.__module__ = module diff --git a/numpy/_core/overrides.pyi b/numpy/_core/overrides.pyi new file mode 100644 index 000000000000..627165e98d3d --- /dev/null +++ b/numpy/_core/overrides.pyi @@ -0,0 +1,44 @@ +from collections.abc import Callable, Iterable +from typing import Any, Final, NamedTuple + +from numpy._utils import set_module as set_module + +type _FuncLike = type | Callable[..., object] +type _Dispatcher[**_Tss] = Callable[_Tss, Iterable[object]] + +### + +ARRAY_FUNCTIONS: set[Callable[..., Any]] = ... +array_function_like_doc: Final[str] = ... + +class ArgSpec(NamedTuple): + args: list[str] + varargs: str | None + keywords: str | None + defaults: tuple[Any, ...] + +def get_array_function_like_doc(public_api: Callable[..., object], docstring_template: str = "") -> str: ... +def finalize_array_function_like[FuncLikeT: _FuncLike](public_api: FuncLikeT) -> FuncLikeT: ... + +# +def verify_matching_signatures[**Tss](implementation: Callable[Tss, object], dispatcher: _Dispatcher[Tss]) -> None: ... + +# NOTE: This actually returns a `_ArrayFunctionDispatcher` callable wrapper object, with +# the original wrapped callable stored in the `._implementation` attribute. It checks +# for any `__array_function__` of the values of specific arguments that the dispatcher +# specifies. Since the dispatcher only returns an iterable of passed array-like args, +# this overridable behaviour is impossible to annotate. +def array_function_dispatch[**Tss, FuncLikeT: _FuncLike]( + dispatcher: _Dispatcher[Tss] | None = None, + module: str | None = None, + verify: bool = True, + docs_from_dispatcher: bool = False, +) -> Callable[[FuncLikeT], FuncLikeT]: ... + +# +def array_function_from_dispatcher[**Tss, T]( + implementation: Callable[Tss, T], + module: str | None = None, + verify: bool = True, + docs_from_dispatcher: bool = True, +) -> Callable[[_Dispatcher[Tss]], Callable[Tss, T]]: ... diff --git a/numpy/_core/printoptions.py b/numpy/_core/printoptions.py new file mode 100644 index 000000000000..5d6f9635cd3c --- /dev/null +++ b/numpy/_core/printoptions.py @@ -0,0 +1,32 @@ +""" +Stores and defines the low-level format_options context variable. + +This is defined in its own file outside of the arrayprint module +so we can import it from C while initializing the multiarray +C module during import without introducing circular dependencies. +""" + +import sys +from contextvars import ContextVar + +__all__ = ["format_options"] + +default_format_options_dict = { + "edgeitems": 3, # repr N leading and trailing items of each dimension + "threshold": 1000, # total items > triggers array summarization + "floatmode": "maxprec", + "precision": 8, # precision of floating point representations + "suppress": False, # suppress printing small floating values in exp format + "linewidth": 75, + "nanstr": "nan", + "infstr": "inf", + "sign": "-", + "formatter": None, + # Internally stored as an int to simplify comparisons; converted from/to + # str/False on the way in/out. + 'legacy': sys.maxsize, + 'override_repr': None, +} + +format_options = ContextVar( + "format_options", default=default_format_options_dict) diff --git a/numpy/_core/printoptions.pyi b/numpy/_core/printoptions.pyi new file mode 100644 index 000000000000..bd7c7b40692d --- /dev/null +++ b/numpy/_core/printoptions.pyi @@ -0,0 +1,28 @@ +from collections.abc import Callable +from contextvars import ContextVar +from typing import Any, Final, TypedDict + +from .arrayprint import _FormatDict + +__all__ = ["format_options"] + +### + +class _FormatOptionsDict(TypedDict): + edgeitems: int + threshold: int + floatmode: str + precision: int + suppress: bool + linewidth: int + nanstr: str + infstr: str + sign: str + formatter: _FormatDict | None + legacy: int + override_repr: Callable[[Any], str] | None + +### + +default_format_options_dict: Final[_FormatOptionsDict] = ... +format_options: ContextVar[_FormatOptionsDict] diff --git a/numpy/_core/records.py b/numpy/_core/records.py index 8bdeec15c6d2..8ad0f1fb07a3 100644 --- a/numpy/_core/records.py +++ b/numpy/_core/records.py @@ -6,9 +6,9 @@ from collections import Counter from contextlib import nullcontext -from .._utils import set_module -from . import numeric as sb -from . import numerictypes as nt +from numpy._utils import set_module + +from . import numeric as sb, numerictypes as nt from .arrayprint import _get_legacy_print_mode # All of the functions allow formats to be a dtype @@ -97,6 +97,7 @@ class format_parser: Examples -------- + >>> import numpy as np >>> np.rec.format_parser(['>> np.rec.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'], + >>> np.rec.format_parser(['f8', 'i4', 'S5'], ['col1', 'col2', 'col3'], ... []).dtype dtype([('col1', '>> np.rec.format_parser(['>> np.rec.format_parser(['>> import numpy as np >>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', '>> x array([(1., 2), (3., 4)], dtype=[('x', '>> from tempfile import TemporaryFile - >>> a = np.empty(10,dtype='f8,i4,a5') + >>> a = np.empty(10,dtype='f8,i4,S5') >>> a[5] = (0.5,10,'abcde') >>> >>> fd=TemporaryFile() @@ -872,7 +869,7 @@ def fromfile(fd, dtype=None, shape=None, offset=0, formats=None, >>> a.tofile(fd) >>> >>> _ = fd.seek(0) - >>> r=np.rec.fromfile(fd, formats='f8,i4,a5', shape=10, + >>> r=np.rec.fromfile(fd, formats='f8,i4,S5', shape=10, ... byteorder='<') >>> print(r[5]) (0.5, 10, b'abcde') diff --git a/numpy/_core/records.pyi b/numpy/_core/records.pyi index e7de3d10c521..7b9c36057c35 100644 --- a/numpy/_core/records.pyi +++ b/numpy/_core/records.pyi @@ -1,329 +1,338 @@ -import os -from collections.abc import Sequence, Iterable +# pyright: reportSelfClsParameterName=false +from _typeshed import Incomplete, StrOrBytesPath +from collections.abc import Buffer, Iterable, Sequence from typing import ( Any, - TypeVar, - overload, + ClassVar, + Literal, Protocol, SupportsIndex, - Literal -) - -from numpy import ( - ndarray, - dtype, - generic, - void, - _ByteOrder, - _SupportsBuffer, - _ShapeType, - _DType_co, - _OrderKACF, + overload, + type_check_only, ) +from typing_extensions import TypeVar +import numpy as np +from numpy import _ByteOrder, _OrderKACF from numpy._typing import ( ArrayLike, DTypeLike, NDArray, - _ShapeLike, - _ArrayLikeInt_co, + _AnyShape, _ArrayLikeVoid_co, _NestedSequence, + _Shape, + _ShapeLike, ) -_SCT = TypeVar("_SCT", bound=generic) +__all__ = [ + "array", + "find_duplicate", + "format_parser", + "fromarrays", + "fromfile", + "fromrecords", + "fromstring", + "recarray", + "record", +] -_RecArray = recarray[Any, dtype[_SCT]] +# Explicit covariant type variables are needed because mypy isn't very good at variance inference right now. +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) +type _RecArray[_ScalarT: np.generic] = recarray[_AnyShape, np.dtype[_ScalarT]] + +@type_check_only class _SupportsReadInto(Protocol): def seek(self, offset: int, whence: int, /) -> object: ... def tell(self, /) -> int: ... def readinto(self, buffer: memoryview, /) -> int: ... -class record(void): - def __getattribute__(self, attr: str) -> Any: ... - def __setattr__(self, attr: str, val: ArrayLike) -> None: ... +### + +# exported in `numpy.rec` +class record(np.void): # type: ignore[misc] + __name__: ClassVar[Literal["record"]] = "record" + __module__: Literal["numpy"] = "numpy" # pyrefly: ignore[bad-override] + def pprint(self) -> str: ... + + def __getattribute__(self, attr: str, /) -> Any: ... + def __setattr__(self, attr: str, val: ArrayLike, /) -> None: ... + + # + @overload # type: ignore[override] + def __getitem__(self, key: str | SupportsIndex, /) -> Incomplete: ... # pyrefly: ignore[bad-override] @overload - def __getitem__(self, key: str | SupportsIndex) -> Any: ... - @overload - def __getitem__(self, key: list[str]) -> record: ... + def __getitem__(self, key: list[str], /) -> record: ... + +# exported in `numpy.rec` +class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): + __name__: ClassVar[Literal["recarray"]] = "recarray" + __module__: Literal["numpy.rec"] = "numpy.rec" # pyrefly: ignore[bad-override] -class recarray(ndarray[_ShapeType, _DType_co]): - # NOTE: While not strictly mandatory, we're demanding here that arguments - # for the `format_parser`- and `dtype`-based dtype constructors are - # mutually exclusive @overload def __new__( - subtype, + cls, shape: _ShapeLike, - dtype: None = ..., - buf: None | _SupportsBuffer = ..., - offset: SupportsIndex = ..., - strides: None | _ShapeLike = ..., + dtype: None = None, + buf: Buffer | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, *, - formats: DTypeLike, - names: None | str | Sequence[str] = ..., - titles: None | str | Sequence[str] = ..., - byteorder: None | _ByteOrder = ..., - aligned: bool = ..., - order: _OrderKACF = ..., - ) -> recarray[Any, dtype[record]]: ... + formats: DTypeLike | None, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + byteorder: _ByteOrder | None = None, + aligned: bool = False, + order: _OrderKACF = "C", + ) -> _RecArray[record]: ... @overload def __new__( - subtype, + cls, shape: _ShapeLike, - dtype: DTypeLike, - buf: None | _SupportsBuffer = ..., - offset: SupportsIndex = ..., - strides: None | _ShapeLike = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - byteorder: None = ..., - aligned: Literal[False] = ..., - order: _OrderKACF = ..., - ) -> recarray[Any, dtype[Any]]: ... - def __array_finalize__(self, obj: object) -> None: ... - def __getattribute__(self, attr: str) -> Any: ... - def __setattr__(self, attr: str, val: ArrayLike) -> None: ... - @overload - def __getitem__(self, indx: ( - SupportsIndex - | _ArrayLikeInt_co - | tuple[SupportsIndex | _ArrayLikeInt_co, ...] - )) -> Any: ... - @overload - def __getitem__(self: recarray[Any, dtype[void]], indx: ( - None - | slice - | ellipsis - | SupportsIndex - | _ArrayLikeInt_co - | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...] - )) -> recarray[Any, _DType_co]: ... - @overload - def __getitem__(self, indx: ( - None - | slice - | ellipsis - | SupportsIndex - | _ArrayLikeInt_co - | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...] - )) -> ndarray[Any, _DType_co]: ... - @overload - def __getitem__(self, indx: str) -> NDArray[Any]: ... - @overload - def __getitem__(self, indx: list[str]) -> recarray[_ShapeType, dtype[record]]: ... + dtype: DTypeLike | None, + buf: Buffer | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + formats: None = None, + names: None = None, + titles: None = None, + byteorder: None = None, + aligned: Literal[False] = False, + order: _OrderKACF = "C", + ) -> _RecArray[Incomplete]: ... + + def __getattribute__(self, attr: str, /) -> Any: ... + def __setattr__(self, attr: str, val: ArrayLike, /) -> None: ... + + def __array_finalize__(self, /, obj: object) -> None: ... + + # @overload - def field(self, attr: int | str, val: None = ...) -> Any: ... + def field(self, /, attr: int | str, val: ArrayLike) -> None: ... @overload - def field(self, attr: int | str, val: ArrayLike) -> None: ... + def field(self, /, attr: int | str, val: None = None) -> Incomplete: ... +# exported in `numpy.rec` class format_parser: - dtype: dtype[void] + dtype: np.dtype[np.void] def __init__( self, - formats: DTypeLike, - names: None | str | Sequence[str], - titles: None | str | Sequence[str], - aligned: bool = ..., - byteorder: None | _ByteOrder = ..., + /, + formats: DTypeLike | None, + names: str | Sequence[str] | None, + titles: str | Sequence[str] | None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, ) -> None: ... -__all__: list[str] - +# exported in `numpy.rec` @overload def fromarrays( arrayList: Iterable[ArrayLike], - dtype: DTypeLike = ..., - shape: None | _ShapeLike = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - aligned: bool = ..., - byteorder: None = ..., + dtype: DTypeLike | None = None, + shape: _ShapeLike | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, ) -> _RecArray[Any]: ... @overload def fromarrays( arrayList: Iterable[ArrayLike], - dtype: None = ..., - shape: None | _ShapeLike = ..., + dtype: None = None, + shape: _ShapeLike | None = None, *, - formats: DTypeLike, - names: None | str | Sequence[str] = ..., - titles: None | str | Sequence[str] = ..., - aligned: bool = ..., - byteorder: None | _ByteOrder = ..., + formats: DTypeLike | None, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, ) -> _RecArray[record]: ... @overload def fromrecords( - recList: _ArrayLikeVoid_co | tuple[Any, ...] | _NestedSequence[tuple[Any, ...]], - dtype: DTypeLike = ..., - shape: None | _ShapeLike = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - aligned: bool = ..., - byteorder: None = ..., + recList: _ArrayLikeVoid_co | tuple[object, ...] | _NestedSequence[tuple[object, ...]], + dtype: DTypeLike | None = None, + shape: _ShapeLike | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, ) -> _RecArray[record]: ... @overload def fromrecords( - recList: _ArrayLikeVoid_co | tuple[Any, ...] | _NestedSequence[tuple[Any, ...]], - dtype: None = ..., - shape: None | _ShapeLike = ..., + recList: _ArrayLikeVoid_co | tuple[object, ...] | _NestedSequence[tuple[object, ...]], + dtype: None = None, + shape: _ShapeLike | None = None, *, - formats: DTypeLike = ..., - names: None | str | Sequence[str] = ..., - titles: None | str | Sequence[str] = ..., - aligned: bool = ..., - byteorder: None | _ByteOrder = ..., + formats: DTypeLike | None, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, ) -> _RecArray[record]: ... +# exported in `numpy.rec` @overload def fromstring( - datastring: _SupportsBuffer, - dtype: DTypeLike, - shape: None | _ShapeLike = ..., - offset: int = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - aligned: bool = ..., - byteorder: None = ..., + datastring: Buffer, + dtype: DTypeLike | None, + shape: _ShapeLike | None = None, + offset: int = 0, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, ) -> _RecArray[record]: ... @overload def fromstring( - datastring: _SupportsBuffer, - dtype: None = ..., - shape: None | _ShapeLike = ..., - offset: int = ..., + datastring: Buffer, + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, *, - formats: DTypeLike, - names: None | str | Sequence[str] = ..., - titles: None | str | Sequence[str] = ..., - aligned: bool = ..., - byteorder: None | _ByteOrder = ..., + formats: DTypeLike | None, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, ) -> _RecArray[record]: ... +# exported in `numpy.rec` @overload def fromfile( - fd: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _SupportsReadInto, - dtype: DTypeLike, - shape: None | _ShapeLike = ..., - offset: int = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - aligned: bool = ..., - byteorder: None = ..., + fd: StrOrBytesPath | _SupportsReadInto, + dtype: DTypeLike | None, + shape: _ShapeLike | None = None, + offset: int = 0, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, ) -> _RecArray[Any]: ... @overload def fromfile( - fd: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _SupportsReadInto, - dtype: None = ..., - shape: None | _ShapeLike = ..., - offset: int = ..., + fd: StrOrBytesPath | _SupportsReadInto, + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, *, - formats: DTypeLike, - names: None | str | Sequence[str] = ..., - titles: None | str | Sequence[str] = ..., - aligned: bool = ..., - byteorder: None | _ByteOrder = ..., + formats: DTypeLike | None, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, ) -> _RecArray[record]: ... +# exported in `numpy.rec` @overload -def array( - obj: _SCT | NDArray[_SCT], - dtype: None = ..., - shape: None | _ShapeLike = ..., - offset: int = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - aligned: bool = ..., - byteorder: None = ..., - copy: bool = ..., -) -> _RecArray[_SCT]: ... +def array[ScalarT: np.generic]( + obj: ScalarT | NDArray[ScalarT], + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, + strides: tuple[int, ...] | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, + copy: bool = True, +) -> _RecArray[ScalarT]: ... @overload def array( obj: ArrayLike, - dtype: DTypeLike, - shape: None | _ShapeLike = ..., - offset: int = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - aligned: bool = ..., - byteorder: None = ..., - copy: bool = ..., + dtype: DTypeLike | None, + shape: _ShapeLike | None = None, + offset: int = 0, + strides: tuple[int, ...] | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, + copy: bool = True, ) -> _RecArray[Any]: ... @overload def array( obj: ArrayLike, - dtype: None = ..., - shape: None | _ShapeLike = ..., - offset: int = ..., + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, + strides: tuple[int, ...] | None = None, *, - formats: DTypeLike, - names: None | str | Sequence[str] = ..., - titles: None | str | Sequence[str] = ..., - aligned: bool = ..., - byteorder: None | _ByteOrder = ..., - copy: bool = ..., + formats: DTypeLike | None, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + copy: bool = True, ) -> _RecArray[record]: ... @overload def array( obj: None, - dtype: DTypeLike, + dtype: DTypeLike | None, shape: _ShapeLike, - offset: int = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - aligned: bool = ..., - byteorder: None = ..., - copy: bool = ..., + offset: int = 0, + strides: tuple[int, ...] | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, + copy: bool = True, ) -> _RecArray[Any]: ... @overload def array( obj: None, - dtype: None = ..., + dtype: None = None, *, shape: _ShapeLike, - offset: int = ..., - formats: DTypeLike, - names: None | str | Sequence[str] = ..., - titles: None | str | Sequence[str] = ..., - aligned: bool = ..., - byteorder: None | _ByteOrder = ..., - copy: bool = ..., + offset: int = 0, + strides: tuple[int, ...] | None = None, + formats: DTypeLike | None, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + copy: bool = True, ) -> _RecArray[record]: ... @overload def array( obj: _SupportsReadInto, - dtype: DTypeLike, - shape: None | _ShapeLike = ..., - offset: int = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - aligned: bool = ..., - byteorder: None = ..., - copy: bool = ..., + dtype: DTypeLike | None, + shape: _ShapeLike | None = None, + offset: int = 0, + strides: tuple[int, ...] | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, + copy: bool = True, ) -> _RecArray[Any]: ... @overload def array( obj: _SupportsReadInto, - dtype: None = ..., - shape: None | _ShapeLike = ..., - offset: int = ..., + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, + strides: tuple[int, ...] | None = None, *, - formats: DTypeLike, - names: None | str | Sequence[str] = ..., - titles: None | str | Sequence[str] = ..., - aligned: bool = ..., - byteorder: None | _ByteOrder = ..., - copy: bool = ..., + formats: DTypeLike | None, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + copy: bool = True, ) -> _RecArray[record]: ... + +# exported in `numpy.rec` +def find_duplicate[T](list: Iterable[T]) -> list[T]: ... diff --git a/numpy/_core/shape_base.py b/numpy/_core/shape_base.py index 8ecd5f61903c..39de8739db0e 100644 --- a/numpy/_core/shape_base.py +++ b/numpy/_core/shape_base.py @@ -1,16 +1,12 @@ __all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack', - 'stack', 'vstack'] + 'stack', 'unstack', 'vstack'] import functools import itertools import operator -import warnings -from . import numeric as _nx -from . import overrides +from . import fromnumeric as _from_nx, numeric as _nx, overrides from .multiarray import array, asanyarray, normalize_axis_index -from . import fromnumeric as _from_nx - array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') @@ -45,6 +41,7 @@ def atleast_1d(*arys): Examples -------- + >>> import numpy as np >>> np.atleast_1d(1.0) array([1.]) @@ -103,6 +100,7 @@ def atleast_2d(*arys): Examples -------- + >>> import numpy as np >>> np.atleast_2d(3.0) array([[3.]]) @@ -163,6 +161,7 @@ def atleast_3d(*arys): Examples -------- + >>> import numpy as np >>> np.atleast_3d(3.0) array([[[3.]]]) @@ -234,7 +233,9 @@ def vstack(tup, *, dtype=None, casting="same_kind"): ---------- tup : sequence of ndarrays The arrays must have the same shape along all but the first axis. - 1-D arrays must have the same length. + 1-D arrays must have the same length. In the case of a single + array_like input, it will be treated as a sequence of arrays; i.e., + each element along the zeroth axis is treated as a separate array. dtype : str or dtype If provided, the destination array will have this dtype. Cannot be @@ -261,9 +262,11 @@ def vstack(tup, *, dtype=None, casting="same_kind"): dstack : Stack arrays in sequence depth wise (along third axis). column_stack : Stack 1-D arrays as columns into a 2-D array. vsplit : Split an array into multiple sub-arrays vertically (row-wise). + unstack : Split an array into a tuple of sub-arrays along an axis. Examples -------- + >>> import numpy as np >>> a = np.array([1, 2, 3]) >>> b = np.array([4, 5, 6]) >>> np.vstack((a,b)) @@ -305,7 +308,9 @@ def hstack(tup, *, dtype=None, casting="same_kind"): ---------- tup : sequence of ndarrays The arrays must have the same shape along all but the second axis, - except 1-D arrays which can be any length. + except 1-D arrays which can be any length. In the case of a single + array_like input, it will be treated as a sequence of arrays; i.e., + each element along the zeroth axis is treated as a separate array. dtype : str or dtype If provided, the destination array will have this dtype. Cannot be @@ -331,11 +336,13 @@ def hstack(tup, *, dtype=None, casting="same_kind"): vstack : Stack arrays in sequence vertically (row wise). dstack : Stack arrays in sequence depth wise (along third axis). column_stack : Stack 1-D arrays as columns into a 2-D array. - hsplit : Split an array into multiple sub-arrays + hsplit : Split an array into multiple sub-arrays horizontally (column-wise). + unstack : Split an array into a tuple of sub-arrays along an axis. Examples -------- + >>> import numpy as np >>> a = np.array((1,2,3)) >>> b = np.array((4,5,6)) >>> np.hstack((a,b)) @@ -377,12 +384,12 @@ def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): dimensions of the result. For example, if ``axis=0`` it will be the first dimension and if ``axis=-1`` it will be the last dimension. - .. versionadded:: 1.10.0 - Parameters ---------- - arrays : sequence of array_like - Each array must have the same shape. + arrays : sequence of ndarrays + Each array must have the same shape. In the case of a single ndarray + array_like input, it will be treated as a sequence of arrays; i.e., + each element along the zeroth axis is treated as a separate array. axis : int, optional The axis in the result array along which the input arrays are stacked. @@ -414,9 +421,11 @@ def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): concatenate : Join a sequence of arrays along an existing axis. block : Assemble an nd-array from nested lists of blocks. split : Split array into a list of multiple sub-arrays of equal size. + unstack : Split an array into a tuple of sub-arrays along an axis. Examples -------- + >>> import numpy as np >>> rng = np.random.default_rng() >>> arrays = [rng.normal(size=(3,4)) for _ in range(10)] >>> np.stack(arrays, axis=0).shape @@ -456,6 +465,77 @@ def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): return _nx.concatenate(expanded_arrays, axis=axis, out=out, dtype=dtype, casting=casting) +def _unstack_dispatcher(x, /, *, axis=None): + return (x,) + +@array_function_dispatch(_unstack_dispatcher) +def unstack(x, /, *, axis=0): + """ + Split an array into a sequence of arrays along the given axis. + + The ``axis`` parameter specifies the dimension along which the array will + be split. For example, if ``axis=0`` (the default) it will be the first + dimension and if ``axis=-1`` it will be the last dimension. + + The result is a tuple of arrays split along ``axis``. + + .. versionadded:: 2.1.0 + + Parameters + ---------- + x : ndarray + The array to be unstacked. + axis : int, optional + Axis along which the array will be split. Default: ``0``. + + Returns + ------- + unstacked : tuple of ndarrays + The unstacked arrays. + + See Also + -------- + stack : Join a sequence of arrays along a new axis. + concatenate : Join a sequence of arrays along an existing axis. + block : Assemble an nd-array from nested lists of blocks. + split : Split array into a list of multiple sub-arrays of equal size. + + Notes + ----- + ``unstack`` serves as the reverse operation of :py:func:`stack`, i.e., + ``stack(unstack(x, axis=axis), axis=axis) == x``. + + This function is equivalent to ``tuple(np.moveaxis(x, axis, 0))``, since + iterating on an array iterates along the first axis. + + Examples + -------- + >>> arr = np.arange(24).reshape((2, 3, 4)) + >>> np.unstack(arr) + (array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]), + array([[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]])) + >>> np.unstack(arr, axis=1) + (array([[ 0, 1, 2, 3], + [12, 13, 14, 15]]), + array([[ 4, 5, 6, 7], + [16, 17, 18, 19]]), + array([[ 8, 9, 10, 11], + [20, 21, 22, 23]])) + >>> arr2 = np.stack(np.unstack(arr, axis=1), axis=1) + >>> arr2.shape + (2, 3, 4) + >>> np.all(arr == arr2) + np.True_ + + """ + if x.ndim == 0: + raise ValueError("Input array must be at least 1-d.") + return tuple(_nx.moveaxis(x, axis, 0)) + # Internal functions to eliminate the overhead of repeated dispatch in one of # the two possible paths inside np.block. @@ -470,7 +550,7 @@ def _block_format_index(index): """ Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``. """ - idx_str = ''.join('[{}]'.format(i) for i in index if i is not None) + idx_str = ''.join(f'[{i}]' for i in index if i is not None) return 'arrays' + idx_str @@ -505,20 +585,18 @@ def _block_check_depths_match(arrays, parent_index=[]): the choice of algorithm used using benchmarking wisdom. """ - if type(arrays) is tuple: + if isinstance(arrays, tuple): # not strictly necessary, but saves us from: # - more than one way to do things - no point treating tuples like # lists # - horribly confusing behaviour that results when tuples are # treated like ndarray raise TypeError( - '{} is a tuple. ' + f'{_block_format_index(parent_index)} is a tuple. ' 'Only lists can be used to arrange blocks, and np.block does ' - 'not allow implicit conversion from tuple to ndarray.'.format( - _block_format_index(parent_index) - ) + 'not allow implicit conversion from tuple to ndarray.' ) - elif type(arrays) is list and len(arrays) > 0: + elif isinstance(arrays, list) and len(arrays) > 0: idxs_ndims = (_block_check_depths_match(arr, parent_index + [i]) for i, arr in enumerate(arrays)) @@ -529,19 +607,16 @@ def _block_check_depths_match(arrays, parent_index=[]): max_arr_ndim = ndim if len(index) != len(first_index): raise ValueError( - "List depths are mismatched. First element was at depth " - "{}, but there is an element at depth {} ({})".format( - len(first_index), - len(index), - _block_format_index(index) - ) + "List depths are mismatched. First element was at " + f"depth {len(first_index)}, but there is an element at " + f"depth {len(index)} ({_block_format_index(index)})" ) # propagate our flag that indicates an empty list at the bottom if index[-1] is None: first_index = index return first_index, max_arr_ndim, final_size - elif type(arrays) is list and len(arrays) == 0: + elif isinstance(arrays, list) and len(arrays) == 0: # We've 'bottomed out' on an empty list return parent_index + [None], 0, 0 else: @@ -601,14 +676,14 @@ def _concatenate_shapes(shapes, axis): # Take a shape, any shape first_shape = shapes[0] first_shape_pre = first_shape[:axis] - first_shape_post = first_shape[axis+1:] + first_shape_post = first_shape[axis + 1:] if any(shape[:axis] != first_shape_pre or - shape[axis+1:] != first_shape_post for shape in shapes): + shape[axis + 1:] != first_shape_post for shape in shapes): raise ValueError( - 'Mismatched array shapes in block along axis {}.'.format(axis)) + f'Mismatched array shapes in block along axis {axis}.') - shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:]) + shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis + 1:]) offsets_at_axis = _accumulate(shape_at_axis) slice_prefixes = [(slice(start, end),) @@ -646,7 +721,7 @@ def _block_info_recursion(arrays, max_depth, result_ndim, depth=0): """ if depth < max_depth: shapes, slices, arrays = zip( - *[_block_info_recursion(arr, max_depth, result_ndim, depth+1) + *[_block_info_recursion(arr, max_depth, result_ndim, depth + 1) for arr in arrays]) axis = result_ndim - max_depth + depth @@ -680,9 +755,9 @@ def _block(arrays, max_depth, result_ndim, depth=0): for details). """ if depth < max_depth: - arrs = [_block(arr, max_depth, result_ndim, depth+1) + arrs = [_block(arr, max_depth, result_ndim, depth + 1) for arr in arrays] - return _concatenate(arrs, axis=-(max_depth-depth)) + return _concatenate(arrs, axis=-(max_depth - depth)) else: # We've 'bottomed out' - arrays is either a scalar or an array # type(arrays) is not list @@ -693,7 +768,7 @@ def _block_dispatcher(arrays): # Use type(...) is list to match the behavior of np.block(), which special # cases list specifically rather than allowing for generic iterables or # tuple. Also, we know that list.__array_function__ will never exist. - if type(arrays) is list: + if isinstance(arrays, list): for subarrays in arrays: yield from _block_dispatcher(subarrays) else: @@ -710,7 +785,7 @@ def block(arrays): second-last dimension (-2), and so on until the outermost list is reached. Blocks can be of any dimension, but will not be broadcasted using - the normal rules. Instead, leading axes of size 1 are inserted, + the normal rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim`` the same for all blocks. This is primarily useful for working with scalars, and means that code like ``np.block([v, 1])`` is valid, where ``v.ndim == 1``. @@ -718,8 +793,6 @@ def block(arrays): When the nested list is two levels deep, this allows block matrices to be constructed from their components. - .. versionadded:: 1.13.0 - Parameters ---------- arrays : nested list of array_like or scalars (but not tuples) @@ -756,10 +829,10 @@ def block(arrays): dstack : Stack arrays in sequence depth wise (along third axis). column_stack : Stack 1-D arrays as columns into a 2-D array. vsplit : Split an array into multiple sub-arrays vertically (row-wise). + unstack : Split an array into a tuple of sub-arrays along an axis. Notes ----- - When called with only scalars, ``np.block`` is equivalent to an ndarray call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to ``np.array([[1, 2], [3, 4]])``. @@ -789,8 +862,9 @@ def block(arrays): Examples -------- - The most common use of this function is to build a block matrix + The most common use of this function is to build a block matrix: + >>> import numpy as np >>> A = np.eye(2) * 2 >>> B = np.eye(3) * 3 >>> np.block([ @@ -803,7 +877,7 @@ def block(arrays): [1., 1., 0., 3., 0.], [1., 1., 0., 0., 3.]]) - With a list of depth 1, `block` can be used as `hstack` + With a list of depth 1, `block` can be used as `hstack`: >>> np.block([1, 2, 3]) # hstack([1, 2, 3]) array([1, 2, 3]) @@ -835,7 +909,7 @@ def block(arrays): [2, 2], [2, 2]]) - It can also be used in places of `atleast_1d` and `atleast_2d` + It can also be used in place of `atleast_1d` and `atleast_2d`: >>> a = np.array(0) >>> b = np.array([1]) @@ -886,9 +960,7 @@ def _block_setup(arrays): list_ndim = len(bottom_index) if bottom_index and bottom_index[-1] is None: raise ValueError( - 'List at {} cannot be empty'.format( - _block_format_index(bottom_index) - ) + f'List at {_block_format_index(bottom_index)} cannot be empty' ) result_ndim = max(arr_ndim, list_ndim) return arrays, list_ndim, result_ndim, final_size diff --git a/numpy/_core/shape_base.pyi b/numpy/_core/shape_base.pyi index 8cf604b7358d..b41602ae8d47 100644 --- a/numpy/_core/shape_base.pyi +++ b/numpy/_core/shape_base.pyi @@ -1,123 +1,187 @@ from collections.abc import Sequence -from typing import TypeVar, overload, Any, SupportsIndex +from typing import Any, SupportsIndex, overload -from numpy import generic, _CastingKind -from numpy._typing import ( - NDArray, - ArrayLike, - DTypeLike, - _ArrayLike, - _DTypeLike, -) +import numpy as np +from numpy import _CastingKind +from numpy._typing import ArrayLike, DTypeLike, NDArray, _ArrayLike, _DTypeLike -_SCT = TypeVar("_SCT", bound=generic) -_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) - -__all__: list[str] +__all__ = [ + "atleast_1d", + "atleast_2d", + "atleast_3d", + "block", + "hstack", + "stack", + "unstack", + "vstack", +] +# keep in sync with `numpy.ma.extras.atleast_1d` +@overload +def atleast_1d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... +@overload +def atleast_1d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[NDArray[ScalarT1], NDArray[ScalarT2]]: ... +@overload +def atleast_1d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[NDArray[ScalarT], ...]: ... @overload -def atleast_1d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... +def atleast_1d(a0: ArrayLike, /) -> NDArray[Any]: ... @overload -def atleast_1d(arys: ArrayLike, /) -> NDArray[Any]: ... +def atleast_1d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[Any]]: ... @overload -def atleast_1d(*arys: ArrayLike) -> tuple[NDArray[Any], ...]: ... +def atleast_1d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... +# keep in sync with `numpy.ma.extras.atleast_2d` +@overload +def atleast_2d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... +@overload +def atleast_2d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[NDArray[ScalarT1], NDArray[ScalarT2]]: ... +@overload +def atleast_2d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[NDArray[ScalarT], ...]: ... @overload -def atleast_2d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... +def atleast_2d(a0: ArrayLike, /) -> NDArray[Any]: ... @overload -def atleast_2d(arys: ArrayLike, /) -> NDArray[Any]: ... +def atleast_2d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[Any]]: ... @overload -def atleast_2d(*arys: ArrayLike) -> tuple[NDArray[Any], ...]: ... +def atleast_2d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... +# keep in sync with `numpy.ma.extras.atleast_3d` @overload -def atleast_3d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... +def atleast_3d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload -def atleast_3d(arys: ArrayLike, /) -> NDArray[Any]: ... +def atleast_3d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[NDArray[ScalarT1], NDArray[ScalarT2]]: ... @overload -def atleast_3d(*arys: ArrayLike) -> tuple[NDArray[Any], ...]: ... +def atleast_3d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[NDArray[ScalarT], ...]: ... +@overload +def atleast_3d(a0: ArrayLike, /) -> NDArray[Any]: ... +@overload +def atleast_3d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[Any]]: ... +@overload +def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... +# used by numpy.lib._shape_base_impl +def _arrays_for_stack_dispatcher[T](arrays: Sequence[T]) -> tuple[T, ...]: ... + +# keep in sync with `numpy.ma.extras.vstack` @overload -def vstack( - tup: Sequence[_ArrayLike[_SCT]], +def vstack[ScalarT: np.generic]( + tup: Sequence[_ArrayLike[ScalarT]], *, - dtype: None = ..., - casting: _CastingKind = ... -) -> NDArray[_SCT]: ... + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> NDArray[ScalarT]: ... @overload -def vstack( +def vstack[ScalarT: np.generic]( tup: Sequence[ArrayLike], *, - dtype: _DTypeLike[_SCT], - casting: _CastingKind = ... -) -> NDArray[_SCT]: ... + dtype: _DTypeLike[ScalarT], + casting: _CastingKind = "same_kind" +) -> NDArray[ScalarT]: ... @overload def vstack( tup: Sequence[ArrayLike], *, - dtype: DTypeLike = ..., - casting: _CastingKind = ... + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... +# keep in sync with `numpy.ma.extras.hstack` @overload -def hstack( - tup: Sequence[_ArrayLike[_SCT]], +def hstack[ScalarT: np.generic]( + tup: Sequence[_ArrayLike[ScalarT]], *, - dtype: None = ..., - casting: _CastingKind = ... -) -> NDArray[_SCT]: ... + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> NDArray[ScalarT]: ... @overload -def hstack( +def hstack[ScalarT: np.generic]( tup: Sequence[ArrayLike], *, - dtype: _DTypeLike[_SCT], - casting: _CastingKind = ... -) -> NDArray[_SCT]: ... + dtype: _DTypeLike[ScalarT], + casting: _CastingKind = "same_kind" +) -> NDArray[ScalarT]: ... @overload def hstack( tup: Sequence[ArrayLike], *, - dtype: DTypeLike = ..., - casting: _CastingKind = ... + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... +# keep in sync with `numpy.ma.extras.stack` @overload -def stack( - arrays: Sequence[_ArrayLike[_SCT]], - axis: SupportsIndex = ..., - out: None = ..., +def stack[ScalarT: np.generic]( + arrays: Sequence[_ArrayLike[ScalarT]], + axis: SupportsIndex = 0, + out: None = None, *, - dtype: None = ..., - casting: _CastingKind = ... -) -> NDArray[_SCT]: ... + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> NDArray[ScalarT]: ... @overload -def stack( +def stack[ScalarT: np.generic]( arrays: Sequence[ArrayLike], - axis: SupportsIndex = ..., - out: None = ..., + axis: SupportsIndex = 0, + out: None = None, *, - dtype: _DTypeLike[_SCT], - casting: _CastingKind = ... -) -> NDArray[_SCT]: ... + dtype: _DTypeLike[ScalarT], + casting: _CastingKind = "same_kind" +) -> NDArray[ScalarT]: ... @overload def stack( arrays: Sequence[ArrayLike], - axis: SupportsIndex = ..., - out: None = ..., + axis: SupportsIndex = 0, + out: None = None, *, - dtype: DTypeLike = ..., - casting: _CastingKind = ... + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... @overload -def stack( +def stack[OutT: np.ndarray]( arrays: Sequence[ArrayLike], - axis: SupportsIndex = ..., - out: _ArrayType = ..., + axis: SupportsIndex, + out: OutT, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> OutT: ... +@overload +def stack[OutT: np.ndarray]( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + *, + out: OutT, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> OutT: ... + +@overload +def unstack[ScalarT: np.generic]( + array: _ArrayLike[ScalarT], + /, + *, + axis: int = 0, +) -> tuple[NDArray[ScalarT], ...]: ... +@overload +def unstack( + array: ArrayLike, + /, *, - dtype: DTypeLike = ..., - casting: _CastingKind = ... -) -> _ArrayType: ... + axis: int = 0, +) -> tuple[NDArray[Any], ...]: ... @overload -def block(arrays: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... +def block[ScalarT: np.generic](arrays: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... @overload def block(arrays: ArrayLike) -> NDArray[Any]: ... diff --git a/numpy/_core/src/_simd/_simd.c b/numpy/_core/src/_simd/_simd.c index 87ecc3e9f479..d25d7bbf1c38 100644 --- a/numpy/_core/src/_simd/_simd.c +++ b/numpy/_core/src/_simd/_simd.c @@ -85,13 +85,14 @@ PyMODINIT_FUNC PyInit__simd(void) goto err; \ } \ } - #ifdef NPY__CPU_MESON_BUILD - NPY_MTARGETS_CONF_DISPATCH(NPY_CPU_HAVE, ATTACH_MODULE, MAKE_MSVC_HAPPY) - NPY_MTARGETS_CONF_BASELINE(ATTACH_BASELINE_MODULE, MAKE_MSVC_HAPPY) - #else - NPY__CPU_DISPATCH_CALL(NPY_CPU_HAVE, ATTACH_MODULE, MAKE_MSVC_HAPPY) - NPY__CPU_DISPATCH_BASELINE_CALL(ATTACH_BASELINE_MODULE, MAKE_MSVC_HAPPY) - #endif + NPY_MTARGETS_CONF_DISPATCH(NPY_CPU_HAVE, ATTACH_MODULE, MAKE_MSVC_HAPPY) + NPY_MTARGETS_CONF_BASELINE(ATTACH_BASELINE_MODULE, MAKE_MSVC_HAPPY) + +#ifdef Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; err: Py_DECREF(m); diff --git a/numpy/_core/src/_simd/_simd.dispatch.c.src b/numpy/_core/src/_simd/_simd.dispatch.c.src index 02f84fa5592c..120fbfee3270 100644 --- a/numpy/_core/src/_simd/_simd.dispatch.c.src +++ b/numpy/_core/src/_simd/_simd.dispatch.c.src @@ -1,4 +1,3 @@ -/*@targets #simd_test*/ #include "_simd.h" #include "_simd_inc.h" @@ -30,7 +29,7 @@ * #ncont_sup = 0, 0, 0, 0, 1, 1, 1, 1, 1, 1# * #intdiv_sup= 1, 1, 1, 1, 1, 1, 1, 1, 0, 0# * #shl_imm = 0, 0, 15, 15, 31, 31, 63, 63, 0, 0# - * #shr_imm = 0, 0, 16, 16, 32, 32, 64, 64, 0, 0# + * #shr_imm = 0, 0, 15, 15, 31, 31, 63, 63, 0, 0# * #bitw8b_sup= 1, 0, 0, 0, 0, 0, 0, 0, 0, 0# */ #if @simd_sup@ diff --git a/numpy/_core/src/_simd/_simd.h b/numpy/_core/src/_simd/_simd.h index f3b0a8ccdda9..82a4451cc3a2 100644 --- a/numpy/_core/src/_simd/_simd.h +++ b/numpy/_core/src/_simd/_simd.h @@ -18,10 +18,8 @@ #include "npy_cpu_dispatch.h" #include "numpy/npy_cpu.h" -#ifndef NPY_DISABLE_OPTIMIZATION // autogenerated, required for CPU dispatch macros #include "_simd.dispatch.h" -#endif /** * Create a new module for each required optimization which contains all NPYV intrinsics, * diff --git a/numpy/_core/src/_simd/_simd_easyintrin.inc b/numpy/_core/src/_simd/_simd_easyintrin.inc index e300e54843a0..65c83279898d 100644 --- a/numpy/_core/src/_simd/_simd_easyintrin.inc +++ b/numpy/_core/src/_simd/_simd_easyintrin.inc @@ -243,7 +243,6 @@ NPY_EXPAND(FN(8, __VA_ARGS__)) #define SIMD__IMPL_COUNT_15(FN, ...) \ - NPY_EXPAND(FN(0, __VA_ARGS__)) \ SIMD__IMPL_COUNT_15_(FN, __VA_ARGS__) #define SIMD__IMPL_COUNT_16(FN, ...) \ @@ -251,7 +250,6 @@ NPY_EXPAND(FN(16, __VA_ARGS__)) #define SIMD__IMPL_COUNT_31(FN, ...) \ - NPY_EXPAND(FN(0, __VA_ARGS__)) \ SIMD__IMPL_COUNT_31_(FN, __VA_ARGS__) #define SIMD__IMPL_COUNT_32(FN, ...) \ @@ -267,7 +265,6 @@ NPY_EXPAND(FN(48, __VA_ARGS__)) #define SIMD__IMPL_COUNT_63(FN, ...) \ - NPY_EXPAND(FN(0, __VA_ARGS__)) \ SIMD__IMPL_COUNT_63_(FN, __VA_ARGS__) #define SIMD__IMPL_COUNT_64(FN, ...) \ diff --git a/numpy/_core/src/_simd/_simd_vector.inc b/numpy/_core/src/_simd/_simd_vector.inc index 3d0c15375074..4911402bc568 100644 --- a/numpy/_core/src/_simd/_simd_vector.inc +++ b/numpy/_core/src/_simd/_simd_vector.inc @@ -92,7 +92,7 @@ static PyTypeObject PySIMDVectorType = { * miss-align load variable of 256/512-bit vector from non-aligned * 256/512-bit stack pointer. * - * check the following links for more clearification: + * check the following links for more clarification: * https://github.com/numpy/numpy/pull/18330#issuecomment-821539919 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=49001 */ diff --git a/numpy/distutils/checks/cpu_asimd.c b/numpy/_core/src/_simd/checks/cpu_asimd.c similarity index 100% rename from numpy/distutils/checks/cpu_asimd.c rename to numpy/_core/src/_simd/checks/cpu_asimd.c diff --git a/numpy/distutils/checks/cpu_asimddp.c b/numpy/_core/src/_simd/checks/cpu_asimddp.c similarity index 100% rename from numpy/distutils/checks/cpu_asimddp.c rename to numpy/_core/src/_simd/checks/cpu_asimddp.c diff --git a/numpy/distutils/checks/cpu_asimdfhm.c b/numpy/_core/src/_simd/checks/cpu_asimdfhm.c similarity index 100% rename from numpy/distutils/checks/cpu_asimdfhm.c rename to numpy/_core/src/_simd/checks/cpu_asimdfhm.c diff --git a/numpy/distutils/checks/cpu_asimdhp.c b/numpy/_core/src/_simd/checks/cpu_asimdhp.c similarity index 100% rename from numpy/distutils/checks/cpu_asimdhp.c rename to numpy/_core/src/_simd/checks/cpu_asimdhp.c diff --git a/numpy/distutils/checks/cpu_avx.c b/numpy/_core/src/_simd/checks/cpu_avx.c similarity index 100% rename from numpy/distutils/checks/cpu_avx.c rename to numpy/_core/src/_simd/checks/cpu_avx.c diff --git a/numpy/distutils/checks/cpu_avx2.c b/numpy/_core/src/_simd/checks/cpu_avx2.c similarity index 100% rename from numpy/distutils/checks/cpu_avx2.c rename to numpy/_core/src/_simd/checks/cpu_avx2.c diff --git a/numpy/distutils/checks/cpu_avx512_clx.c b/numpy/_core/src/_simd/checks/cpu_avx512_clx.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_clx.c rename to numpy/_core/src/_simd/checks/cpu_avx512_clx.c diff --git a/numpy/distutils/checks/cpu_avx512_cnl.c b/numpy/_core/src/_simd/checks/cpu_avx512_cnl.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_cnl.c rename to numpy/_core/src/_simd/checks/cpu_avx512_cnl.c diff --git a/numpy/distutils/checks/cpu_avx512_icl.c b/numpy/_core/src/_simd/checks/cpu_avx512_icl.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_icl.c rename to numpy/_core/src/_simd/checks/cpu_avx512_icl.c diff --git a/numpy/distutils/checks/cpu_avx512_knl.c b/numpy/_core/src/_simd/checks/cpu_avx512_knl.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_knl.c rename to numpy/_core/src/_simd/checks/cpu_avx512_knl.c diff --git a/numpy/distutils/checks/cpu_avx512_knm.c b/numpy/_core/src/_simd/checks/cpu_avx512_knm.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_knm.c rename to numpy/_core/src/_simd/checks/cpu_avx512_knm.c diff --git a/numpy/distutils/checks/cpu_avx512_skx.c b/numpy/_core/src/_simd/checks/cpu_avx512_skx.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_skx.c rename to numpy/_core/src/_simd/checks/cpu_avx512_skx.c diff --git a/numpy/distutils/checks/cpu_avx512_spr.c b/numpy/_core/src/_simd/checks/cpu_avx512_spr.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_spr.c rename to numpy/_core/src/_simd/checks/cpu_avx512_spr.c diff --git a/numpy/distutils/checks/cpu_avx512cd.c b/numpy/_core/src/_simd/checks/cpu_avx512cd.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512cd.c rename to numpy/_core/src/_simd/checks/cpu_avx512cd.c diff --git a/numpy/distutils/checks/cpu_avx512f.c b/numpy/_core/src/_simd/checks/cpu_avx512f.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512f.c rename to numpy/_core/src/_simd/checks/cpu_avx512f.c diff --git a/numpy/distutils/checks/cpu_f16c.c b/numpy/_core/src/_simd/checks/cpu_f16c.c similarity index 100% rename from numpy/distutils/checks/cpu_f16c.c rename to numpy/_core/src/_simd/checks/cpu_f16c.c diff --git a/numpy/distutils/checks/cpu_fma3.c b/numpy/_core/src/_simd/checks/cpu_fma3.c similarity index 100% rename from numpy/distutils/checks/cpu_fma3.c rename to numpy/_core/src/_simd/checks/cpu_fma3.c diff --git a/numpy/distutils/checks/cpu_fma4.c b/numpy/_core/src/_simd/checks/cpu_fma4.c similarity index 100% rename from numpy/distutils/checks/cpu_fma4.c rename to numpy/_core/src/_simd/checks/cpu_fma4.c diff --git a/numpy/_core/src/_simd/checks/cpu_lsx.c b/numpy/_core/src/_simd/checks/cpu_lsx.c new file mode 100644 index 000000000000..5993c93a5f86 --- /dev/null +++ b/numpy/_core/src/_simd/checks/cpu_lsx.c @@ -0,0 +1,11 @@ +#ifndef __loongarch_sx +#error "HOST/ARCH doesn't support LSX" +#endif + +#include + +int main(void) +{ + __m128i a = __lsx_vadd_d(__lsx_vldi(0), __lsx_vldi(0)); + return __lsx_vpickve2gr_w(a, 0); +} diff --git a/numpy/distutils/checks/cpu_neon.c b/numpy/_core/src/_simd/checks/cpu_neon.c similarity index 100% rename from numpy/distutils/checks/cpu_neon.c rename to numpy/_core/src/_simd/checks/cpu_neon.c diff --git a/numpy/distutils/checks/cpu_neon_fp16.c b/numpy/_core/src/_simd/checks/cpu_neon_fp16.c similarity index 100% rename from numpy/distutils/checks/cpu_neon_fp16.c rename to numpy/_core/src/_simd/checks/cpu_neon_fp16.c diff --git a/numpy/distutils/checks/cpu_neon_vfpv4.c b/numpy/_core/src/_simd/checks/cpu_neon_vfpv4.c similarity index 100% rename from numpy/distutils/checks/cpu_neon_vfpv4.c rename to numpy/_core/src/_simd/checks/cpu_neon_vfpv4.c diff --git a/numpy/distutils/checks/cpu_popcnt.c b/numpy/_core/src/_simd/checks/cpu_popcnt.c similarity index 100% rename from numpy/distutils/checks/cpu_popcnt.c rename to numpy/_core/src/_simd/checks/cpu_popcnt.c diff --git a/numpy/distutils/checks/cpu_rvv.c b/numpy/_core/src/_simd/checks/cpu_rvv.c similarity index 100% rename from numpy/distutils/checks/cpu_rvv.c rename to numpy/_core/src/_simd/checks/cpu_rvv.c diff --git a/numpy/distutils/checks/cpu_sse.c b/numpy/_core/src/_simd/checks/cpu_sse.c similarity index 100% rename from numpy/distutils/checks/cpu_sse.c rename to numpy/_core/src/_simd/checks/cpu_sse.c diff --git a/numpy/distutils/checks/cpu_sse2.c b/numpy/_core/src/_simd/checks/cpu_sse2.c similarity index 100% rename from numpy/distutils/checks/cpu_sse2.c rename to numpy/_core/src/_simd/checks/cpu_sse2.c diff --git a/numpy/distutils/checks/cpu_sse3.c b/numpy/_core/src/_simd/checks/cpu_sse3.c similarity index 100% rename from numpy/distutils/checks/cpu_sse3.c rename to numpy/_core/src/_simd/checks/cpu_sse3.c diff --git a/numpy/distutils/checks/cpu_sse41.c b/numpy/_core/src/_simd/checks/cpu_sse41.c similarity index 100% rename from numpy/distutils/checks/cpu_sse41.c rename to numpy/_core/src/_simd/checks/cpu_sse41.c diff --git a/numpy/distutils/checks/cpu_sse42.c b/numpy/_core/src/_simd/checks/cpu_sse42.c similarity index 100% rename from numpy/distutils/checks/cpu_sse42.c rename to numpy/_core/src/_simd/checks/cpu_sse42.c diff --git a/numpy/distutils/checks/cpu_ssse3.c b/numpy/_core/src/_simd/checks/cpu_ssse3.c similarity index 100% rename from numpy/distutils/checks/cpu_ssse3.c rename to numpy/_core/src/_simd/checks/cpu_ssse3.c diff --git a/numpy/distutils/checks/cpu_sve.c b/numpy/_core/src/_simd/checks/cpu_sve.c similarity index 100% rename from numpy/distutils/checks/cpu_sve.c rename to numpy/_core/src/_simd/checks/cpu_sve.c diff --git a/numpy/distutils/checks/cpu_vsx.c b/numpy/_core/src/_simd/checks/cpu_vsx.c similarity index 100% rename from numpy/distutils/checks/cpu_vsx.c rename to numpy/_core/src/_simd/checks/cpu_vsx.c diff --git a/numpy/distutils/checks/cpu_vsx2.c b/numpy/_core/src/_simd/checks/cpu_vsx2.c similarity index 100% rename from numpy/distutils/checks/cpu_vsx2.c rename to numpy/_core/src/_simd/checks/cpu_vsx2.c diff --git a/numpy/distutils/checks/cpu_vsx3.c b/numpy/_core/src/_simd/checks/cpu_vsx3.c similarity index 100% rename from numpy/distutils/checks/cpu_vsx3.c rename to numpy/_core/src/_simd/checks/cpu_vsx3.c diff --git a/numpy/distutils/checks/cpu_vsx4.c b/numpy/_core/src/_simd/checks/cpu_vsx4.c similarity index 100% rename from numpy/distutils/checks/cpu_vsx4.c rename to numpy/_core/src/_simd/checks/cpu_vsx4.c diff --git a/numpy/distutils/checks/cpu_vx.c b/numpy/_core/src/_simd/checks/cpu_vx.c similarity index 100% rename from numpy/distutils/checks/cpu_vx.c rename to numpy/_core/src/_simd/checks/cpu_vx.c diff --git a/numpy/distutils/checks/cpu_vxe.c b/numpy/_core/src/_simd/checks/cpu_vxe.c similarity index 100% rename from numpy/distutils/checks/cpu_vxe.c rename to numpy/_core/src/_simd/checks/cpu_vxe.c diff --git a/numpy/distutils/checks/cpu_vxe2.c b/numpy/_core/src/_simd/checks/cpu_vxe2.c similarity index 100% rename from numpy/distutils/checks/cpu_vxe2.c rename to numpy/_core/src/_simd/checks/cpu_vxe2.c diff --git a/numpy/distutils/checks/cpu_xop.c b/numpy/_core/src/_simd/checks/cpu_xop.c similarity index 100% rename from numpy/distutils/checks/cpu_xop.c rename to numpy/_core/src/_simd/checks/cpu_xop.c diff --git a/numpy/distutils/checks/extra_avx512bw_mask.c b/numpy/_core/src/_simd/checks/extra_avx512bw_mask.c similarity index 100% rename from numpy/distutils/checks/extra_avx512bw_mask.c rename to numpy/_core/src/_simd/checks/extra_avx512bw_mask.c diff --git a/numpy/distutils/checks/extra_avx512dq_mask.c b/numpy/_core/src/_simd/checks/extra_avx512dq_mask.c similarity index 100% rename from numpy/distutils/checks/extra_avx512dq_mask.c rename to numpy/_core/src/_simd/checks/extra_avx512dq_mask.c diff --git a/numpy/distutils/checks/extra_avx512f_reduce.c b/numpy/_core/src/_simd/checks/extra_avx512f_reduce.c similarity index 100% rename from numpy/distutils/checks/extra_avx512f_reduce.c rename to numpy/_core/src/_simd/checks/extra_avx512f_reduce.c diff --git a/numpy/distutils/checks/extra_vsx3_half_double.c b/numpy/_core/src/_simd/checks/extra_vsx3_half_double.c similarity index 100% rename from numpy/distutils/checks/extra_vsx3_half_double.c rename to numpy/_core/src/_simd/checks/extra_vsx3_half_double.c diff --git a/numpy/distutils/checks/extra_vsx4_mma.c b/numpy/_core/src/_simd/checks/extra_vsx4_mma.c similarity index 100% rename from numpy/distutils/checks/extra_vsx4_mma.c rename to numpy/_core/src/_simd/checks/extra_vsx4_mma.c diff --git a/numpy/distutils/checks/extra_vsx_asm.c b/numpy/_core/src/_simd/checks/extra_vsx_asm.c similarity index 100% rename from numpy/distutils/checks/extra_vsx_asm.c rename to numpy/_core/src/_simd/checks/extra_vsx_asm.c diff --git a/numpy/distutils/checks/test_flags.c b/numpy/_core/src/_simd/checks/test_flags.c similarity index 100% rename from numpy/distutils/checks/test_flags.c rename to numpy/_core/src/_simd/checks/test_flags.c diff --git a/numpy/_core/src/common/array_assign.h b/numpy/_core/src/common/array_assign.h index 8a28ed1d3a01..cc5f044ef080 100644 --- a/numpy/_core/src/common/array_assign.h +++ b/numpy/_core/src/common/array_assign.h @@ -46,7 +46,7 @@ PyArray_AssignRawScalar(PyArrayObject *dst, NPY_NO_EXPORT int raw_array_assign_scalar(int ndim, npy_intp const *shape, PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides, - PyArray_Descr *src_dtype, char *src_data); + PyArray_Descr *src_dtype, char *src_data, NPY_CASTING casting); /* * Assigns the scalar value to every element of the destination raw array @@ -59,7 +59,7 @@ raw_array_wheremasked_assign_scalar(int ndim, npy_intp const *shape, PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides, PyArray_Descr *src_dtype, char *src_data, PyArray_Descr *wheremask_dtype, char *wheremask_data, - npy_intp const *wheremask_strides); + npy_intp const *wheremask_strides, NPY_CASTING casting); /******** LOW-LEVEL ARRAY MANIPULATION HELPERS ********/ diff --git a/numpy/_core/src/common/binop_override.h b/numpy/_core/src/common/binop_override.h index ec3d046796ab..e17b147c1d0a 100644 --- a/numpy/_core/src/common/binop_override.h +++ b/numpy/_core/src/common/binop_override.h @@ -1,11 +1,12 @@ #ifndef NUMPY_CORE_SRC_COMMON_BINOP_OVERRIDE_H_ #define NUMPY_CORE_SRC_COMMON_BINOP_OVERRIDE_H_ -#include #include +#include #include "numpy/arrayobject.h" #include "get_attr_string.h" +#include "npy_static_data.h" /* * Logic for deciding when binops should return NotImplemented versus when @@ -128,15 +129,15 @@ binop_should_defer(PyObject *self, PyObject *other, int inplace) * Classes with __array_ufunc__ are living in the future, and only need to * check whether __array_ufunc__ equals None. */ - attr = PyArray_LookupSpecial(other, npy_um_str_array_ufunc); - if (attr != NULL) { + if (PyArray_LookupSpecial(other, npy_interned_str.array_ufunc, &attr) < 0) { + PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */ + } + else if (attr != NULL) { defer = !inplace && (attr == Py_None); Py_DECREF(attr); return defer; } - else if (PyErr_Occurred()) { - PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */ - } + /* * Otherwise, we need to check for the legacy __array_priority__. But if * other.__class__ is a subtype of self.__class__, then it's already had diff --git a/numpy/_core/src/common/blas_utils.c b/numpy/_core/src/common/blas_utils.c new file mode 100644 index 000000000000..43d4b1e845f0 --- /dev/null +++ b/numpy/_core/src/common/blas_utils.c @@ -0,0 +1,61 @@ +#include "numpy/npy_math.h" // npy_get_floatstatus_barrier +#include "numpy/numpyconfig.h" // NPY_VISIBILITY_HIDDEN +#include "blas_utils.h" +#include "npy_cblas.h" + +#include +#include +#include + +#ifdef __APPLE__ +#include +#endif + +#if NPY_BLAS_CHECK_FPE_SUPPORT +/* + * Static variable to cache runtime check of BLAS FPE support. + * Will always be false (ignore all FPE) when accelerate is the compiled backend + */ + #if defined(ACCELERATE_NEW_LAPACK) +static bool blas_supports_fpe = false; + #else +static bool blas_supports_fpe = true; + #endif // ACCELERATE_NEW_LAPACK + +#endif // NPY_BLAS_CHECK_FPE_SUPPORT + + +NPY_VISIBILITY_HIDDEN bool +npy_blas_supports_fpe(void) +{ +#if NPY_BLAS_CHECK_FPE_SUPPORT + return blas_supports_fpe; +#else + return true; +#endif +} + +NPY_VISIBILITY_HIDDEN bool +npy_set_blas_supports_fpe(bool value) +{ +#if NPY_BLAS_CHECK_FPE_SUPPORT + blas_supports_fpe = (bool)value; + return blas_supports_fpe; +#endif + return true; // ignore input not set up on this platform +} + +NPY_VISIBILITY_HIDDEN int +npy_get_floatstatus_after_blas(void) +{ +#if NPY_BLAS_CHECK_FPE_SUPPORT + if (!blas_supports_fpe){ + // BLAS does not support FPE and we need to return FPE state. + // Instead of clearing and then grabbing state, just return + // that no flags are set. + return 0; + } +#endif + char *param = NULL; + return npy_get_floatstatus_barrier(param); +} diff --git a/numpy/_core/src/common/blas_utils.h b/numpy/_core/src/common/blas_utils.h new file mode 100644 index 000000000000..79d1e5ce274c --- /dev/null +++ b/numpy/_core/src/common/blas_utils.h @@ -0,0 +1,33 @@ +#include "numpy/numpyconfig.h" // for NPY_VISIBILITY_HIDDEN + +#include + +/* + * NPY_BLAS_CHECK_FPE_SUPPORT controls whether we need a runtime check + * for floating-point error (FPE) support in BLAS. + * The known culprit right now is SVM likely only on mac, but that is not + * quite clear. + * This checks always on all ARM (it is a small check overall). + */ +#if defined(__APPLE__) && defined(__aarch64__) && defined(HAVE_CBLAS) +#define NPY_BLAS_CHECK_FPE_SUPPORT 1 +#else +#define NPY_BLAS_CHECK_FPE_SUPPORT 0 +#endif + +/* Runtime check if BLAS supports floating-point errors. + * true - BLAS supports FPE and one can rely on them to indicate errors + * false - BLAS does not support FPE. Special handling needed for FPE state + */ +NPY_VISIBILITY_HIDDEN bool +npy_blas_supports_fpe(void); + +/* Allow setting the BLAS FPE flag from Python.*/ +NPY_VISIBILITY_HIDDEN bool +npy_set_blas_supports_fpe(bool value); + +/* If BLAS supports FPE, exactly the same as npy_get_floatstatus_barrier(). + * Otherwise, we can't rely on FPE state and need special handling. + */ +NPY_VISIBILITY_HIDDEN int +npy_get_floatstatus_after_blas(void); diff --git a/numpy/_core/src/common/cblasfuncs.c b/numpy/_core/src/common/cblasfuncs.c index 1b0eb91a0b10..66a215dfeb64 100644 --- a/numpy/_core/src/common/cblasfuncs.c +++ b/numpy/_core/src/common/cblasfuncs.c @@ -3,6 +3,7 @@ * inner product and dot for numpy arrays */ #define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _UMATHMODULE #define _MULTIARRAYMODULE #define PY_SSIZE_T_CLEAN @@ -10,6 +11,8 @@ #include "numpy/arrayobject.h" #include "numpy/npy_math.h" +#include "numpy/ufuncobject.h" +#include "blas_utils.h" #include "npy_cblas.h" #include "arraytypes.h" #include "common.h" @@ -375,6 +378,8 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2, return PyArray_Return(result); } + npy_clear_floatstatus_barrier((char *) out_buf); + if (ap2shape == _scalar) { /* * Multiplication by a scalar -- Level 1 BLAS @@ -689,6 +694,10 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2, NPY_END_ALLOW_THREADS; } + int fpes = npy_get_floatstatus_after_blas(); + if (fpes && PyUFunc_GiveFloatingpointErrors("dot", fpes) < 0) { + goto fail; + } Py_DECREF(ap1); Py_DECREF(ap2); diff --git a/numpy/_core/src/common/common.hpp b/numpy/_core/src/common/common.hpp index 44ba449d8e0e..fdc453d2fe5f 100644 --- a/numpy/_core/src/common/common.hpp +++ b/numpy/_core/src/common/common.hpp @@ -5,8 +5,8 @@ * they are gathered to make it easy for us and for the future need to support PCH. */ #include "npdef.hpp" -#include "utils.hpp" #include "npstd.hpp" +#include "utils.hpp" #include "half.hpp" #include "meta.hpp" #include "float_status.hpp" diff --git a/numpy/_core/src/common/dlpack/dlpack.h b/numpy/_core/src/common/dlpack/dlpack.h index e05e600304d9..4dc164fe9c1b 100644 --- a/numpy/_core/src/common/dlpack/dlpack.h +++ b/numpy/_core/src/common/dlpack/dlpack.h @@ -109,7 +109,7 @@ typedef enum { */ kDLCUDAManaged = 13, /*! - * \brief Unified shared memory allocated on a oneAPI non-partititioned + * \brief Unified shared memory allocated on a oneAPI non-partitioned * device. Call to oneAPI runtime is required to determine the device * type, the USM allocation type and the sycl context it is bound to. * @@ -270,7 +270,7 @@ typedef struct DLManagedTensor { void (*deleter)(struct DLManagedTensor * self); } DLManagedTensor; -// bit masks used in in the DLManagedTensorVersioned +// bit masks used in the DLManagedTensorVersioned /*! \brief bit mask to indicate that the tensor is read only. */ #define DLPACK_FLAG_BITMASK_READ_ONLY (1UL << 0UL) diff --git a/numpy/_core/src/common/get_attr_string.h b/numpy/_core/src/common/get_attr_string.h index 36d39189f9e7..324a92c5ef0c 100644 --- a/numpy/_core/src/common/get_attr_string.h +++ b/numpy/_core/src/common/get_attr_string.h @@ -2,7 +2,8 @@ #define NUMPY_CORE_SRC_COMMON_GET_ATTR_STRING_H_ #include -#include "ufunc_object.h" +#include "npy_pycompat.h" + static inline npy_bool _is_basic_python_type(PyTypeObject *tp) @@ -44,24 +45,21 @@ _is_basic_python_type(PyTypeObject *tp) * Assumes that the special method is a numpy-specific one, so does not look * at builtin types. It does check base ndarray and numpy scalar types. * - * In future, could be made more like _Py_LookupSpecial + * It may make sense to just replace this with `PyObject_GetOptionalAttr`. */ -static inline PyObject * -PyArray_LookupSpecial(PyObject *obj, PyObject *name_unicode) +static inline int +PyArray_LookupSpecial( + PyObject *obj, PyObject *name_unicode, PyObject **res) { PyTypeObject *tp = Py_TYPE(obj); /* We do not need to check for special attributes on trivial types */ if (_is_basic_python_type(tp)) { - return NULL; - } - PyObject *res = PyObject_GetAttr((PyObject *)tp, name_unicode); - - if (res == NULL && PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_Clear(); + *res = NULL; + return 0; } - return res; + return PyObject_GetOptionalAttr((PyObject *)tp, name_unicode, res); } @@ -73,23 +71,20 @@ PyArray_LookupSpecial(PyObject *obj, PyObject *name_unicode) * * Kept for backwards compatibility. In future, we should deprecate this. */ -static inline PyObject * -PyArray_LookupSpecial_OnInstance(PyObject *obj, PyObject *name_unicode) +static inline int +PyArray_LookupSpecial_OnInstance( + PyObject *obj, PyObject *name_unicode, PyObject **res) { PyTypeObject *tp = Py_TYPE(obj); /* We do not need to check for special attributes on trivial types */ + /* Note: This check should likely be reduced on Python 3.13+ */ if (_is_basic_python_type(tp)) { - return NULL; - } - - PyObject *res = PyObject_GetAttr(obj, name_unicode); - - if (res == NULL && PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_Clear(); + *res = NULL; + return 0; } - return res; + return PyObject_GetOptionalAttr(obj, name_unicode, res); } #endif /* NUMPY_CORE_SRC_COMMON_GET_ATTR_STRING_H_ */ diff --git a/numpy/_core/src/common/gil_utils.c b/numpy/_core/src/common/gil_utils.c index 45008b367807..c87cbe2d64ae 100644 --- a/numpy/_core/src/common/gil_utils.c +++ b/numpy/_core/src/common/gil_utils.c @@ -16,9 +16,6 @@ npy_gil_error(PyObject *type, const char *format, ...) NPY_ALLOW_C_API_DEF; NPY_ALLOW_C_API; if (!PyErr_Occurred()) { -#if !defined(PYPY_VERSION) - PyErr_FormatV(type, format, va); -#else PyObject *exc_str = PyUnicode_FromFormatV(format, va); if (exc_str == NULL) { // no reason to have special handling for this error case, since @@ -29,8 +26,19 @@ npy_gil_error(PyObject *type, const char *format, ...) } PyErr_SetObject(type, exc_str); Py_DECREF(exc_str); -#endif } NPY_DISABLE_C_API; va_end(va); } + +// Acquire the GIL before emitting a warning containing a message of +// the given category and stacklevel. +NPY_NO_EXPORT int +npy_gil_warning(PyObject *category, int stacklevel, const char *message) +{ + NPY_ALLOW_C_API_DEF; + NPY_ALLOW_C_API; + int result = PyErr_WarnEx(category, message, stacklevel); + NPY_DISABLE_C_API; + return result; +} diff --git a/numpy/_core/src/common/gil_utils.h b/numpy/_core/src/common/gil_utils.h index fd77fa6058f0..a6dc5ad99bc0 100644 --- a/numpy/_core/src/common/gil_utils.h +++ b/numpy/_core/src/common/gil_utils.h @@ -8,6 +8,9 @@ extern "C" { NPY_NO_EXPORT void npy_gil_error(PyObject *type, const char *format, ...); +NPY_NO_EXPORT int +npy_gil_warning(PyObject *category, int stacklevel, const char *message); + #ifdef __cplusplus } #endif diff --git a/numpy/_core/src/common/half.hpp b/numpy/_core/src/common/half.hpp index 484750ad84cd..14dabbe79d7f 100644 --- a/numpy/_core/src/common/half.hpp +++ b/numpy/_core/src/common/half.hpp @@ -9,8 +9,6 @@ // TODO(@seiko2plus): // - covers half-precision operations that being supported by numpy/halffloat.h // - add support for arithmetic operations -// - enables __fp16 causes massive FP exceptions on aarch64, -// needs a deep investigation namespace np { @@ -19,42 +17,19 @@ namespace np { /// Provides a type that implements 16-bit floating point (half-precision). /// This type is ensured to be 16-bit size. -#if 1 // ndef __ARM_FP16_FORMAT_IEEE class Half final { public: - /// Whether `Half` has a full native HW support. - static constexpr bool kNative = false; - /// Whether `Half` has a native HW support for single/double conversion. - template - static constexpr bool kNativeConversion = ( - ( - std::is_same_v && - #if defined(NPY_HAVE_FP16) || defined(NPY_HAVE_VSX3) - true - #else - false - #endif - ) || ( - std::is_same_v && - #if defined(NPY_HAVE_AVX512FP16) || (defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX3_HALF_DOUBLE)) - true - #else - false - #endif - ) - ); - /// Default constructor. initialize nothing. Half() = default; /// Construct from float /// If there are no hardware optimization available, rounding will always /// be set to ties to even. - explicit Half(float f) + NPY_FINLINE explicit Half(float f) { #if defined(NPY_HAVE_FP16) __m128 mf = _mm_load_ss(&f); - bits_ = static_cast(_mm_cvtsi128_si32(_mm_cvtps_ph(mf, _MM_FROUND_TO_NEAREST_INT))); + bits_ = _mm_extract_epi16(_mm_cvtps_ph(mf, _MM_FROUND_TO_NEAREST_INT), 0); #elif defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX_ASM) __vector float vf32 = vec_splats(f); __vector unsigned short vf16; @@ -64,6 +39,9 @@ class Half final { #else bits_ = vec_extract(vf16, 0); #endif + #elif defined(__ARM_FP16_FORMAT_IEEE) + __fp16 f16 = __fp16(f); + bits_ = BitCast(f16); #else bits_ = half_private::FromFloatBits(BitCast(f)); #endif @@ -72,20 +50,23 @@ class Half final { /// Construct from double. /// If there are no hardware optimization available, rounding will always /// be set to ties to even. - explicit Half(double f) + NPY_FINLINE explicit Half(double f) { #if defined(NPY_HAVE_AVX512FP16) __m128d md = _mm_load_sd(&f); - bits_ = static_cast(_mm_cvtsi128_si32(_mm_castph_si128(_mm_cvtpd_ph(md)))); + bits_ = _mm_extract_epi16(_mm_castph_si128(_mm_cvtpd_ph(md)), 0); #elif defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX3_HALF_DOUBLE) __asm__ __volatile__ ("xscvdphp %x0,%x1" : "=wa" (bits_) : "wa" (f)); + #elif defined(__ARM_FP16_FORMAT_IEEE) + __fp16 f16 = __fp16(f); + bits_ = BitCast(f16); #else bits_ = half_private::FromDoubleBits(BitCast(f)); #endif } /// Cast to float - explicit operator float() const + NPY_FINLINE explicit operator float() const { #if defined(NPY_HAVE_FP16) float ret; @@ -99,13 +80,15 @@ class Half final { : "=wa"(vf32) : "wa"(vec_splats(bits_))); return vec_extract(vf32, 0); + #elif defined(__ARM_FP16_FORMAT_IEEE) + return float(BitCast<__fp16>(bits_)); #else return BitCast(half_private::ToFloatBits(bits_)); #endif } /// Cast to double - explicit operator double() const + NPY_FINLINE explicit operator double() const { #if defined(NPY_HAVE_AVX512FP16) double ret; @@ -117,6 +100,8 @@ class Half final { : "=wa"(f64) : "wa"(bits_)); return f64; + #elif defined(__ARM_FP16_FORMAT_IEEE) + return double(BitCast<__fp16>(bits_)); #else return BitCast(half_private::ToDoubleBits(bits_)); #endif @@ -223,40 +208,6 @@ class Half final { private: uint16_t bits_; }; -#else // __ARM_FP16_FORMAT_IEEE -class Half final { - public: - static constexpr bool kNative = true; - template - static constexpr bool kNativeConversion = ( - std::is_same_v || std::is_same_v - ); - Half() = default; - constexpr Half(__fp16 h) : half_(h) - {} - constexpr operator __fp16() const - { return half_; } - static Half FromBits(uint16_t bits) - { - Half h; - h.half_ = BitCast<__fp16>(bits); - return h; - } - uint16_t Bits() const - { return BitCast(half_); } - constexpr bool Less(Half r) const - { return half_ < r.half_; } - constexpr bool LessEqual(Half r) const - { return half_ <= r.half_; } - constexpr bool Equal(Half r) const - { return half_ == r.half_; } - constexpr bool IsNaN() const - { return half_ != half_; } - - private: - __fp16 half_; -}; -#endif // __ARM_FP16_FORMAT_IEEE /// @} cpp_core_types diff --git a/numpy/_core/src/common/npstd.hpp b/numpy/_core/src/common/npstd.hpp index e5f9afbf29b3..93c89d7065f7 100644 --- a/numpy/_core/src/common/npstd.hpp +++ b/numpy/_core/src/common/npstd.hpp @@ -1,6 +1,8 @@ #ifndef NUMPY_CORE_SRC_COMMON_NPSTD_HPP #define NUMPY_CORE_SRC_COMMON_NPSTD_HPP +#include + #include #include #include @@ -14,8 +16,6 @@ #include #include -#include - #include "npy_config.h" namespace np { diff --git a/numpy/_core/src/common/npy_argparse.c b/numpy/_core/src/common/npy_argparse.c index 2be17483ec28..ea15ec68026b 100644 --- a/numpy/_core/src/common/npy_argparse.c +++ b/numpy/_core/src/common/npy_argparse.c @@ -1,17 +1,41 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include +#include #include "numpy/ndarraytypes.h" #include "numpy/npy_2_compat.h" #include "npy_argparse.h" - #include "npy_import.h" #include "arrayfunction_override.h" +#if PY_VERSION_HEX < 0x30d00b3 +static PyThread_type_lock argparse_mutex; +#define LOCK_ARGPARSE_MUTEX \ + PyThread_acquire_lock(argparse_mutex, WAIT_LOCK) +#define UNLOCK_ARGPARSE_MUTEX \ + PyThread_release_lock(argparse_mutex) +#else +static PyMutex argparse_mutex = {0}; +#define LOCK_ARGPARSE_MUTEX PyMutex_Lock(&argparse_mutex) +#define UNLOCK_ARGPARSE_MUTEX PyMutex_Unlock(&argparse_mutex) +#endif + +NPY_NO_EXPORT int +init_argparse_mutex(void) { +#if PY_VERSION_HEX < 0x30d00b3 + argparse_mutex = PyThread_allocate_lock(); + if (argparse_mutex == NULL) { + PyErr_NoMemory(); + return -1; + } +#endif + return 0; +} /** * Small wrapper converting to array just like CPython does. @@ -220,16 +244,18 @@ static int raise_incorrect_number_of_positional_args(const char *funcname, const _NpyArgParserCache *cache, Py_ssize_t len_args) { + const char *verb = (len_args == 1) ? "was" : "were"; if (cache->npositional == cache->nrequired) { PyErr_Format(PyExc_TypeError, - "%s() takes %d positional arguments but %zd were given", - funcname, cache->npositional, len_args); + "%s() takes %d positional arguments but %zd %s given", + funcname, cache->npositional, len_args, verb); } else { PyErr_Format(PyExc_TypeError, "%s() takes from %d to %d positional arguments but " - "%zd were given", - funcname, cache->nrequired, cache->npositional, len_args); + "%zd %s given", + funcname, cache->nrequired, cache->npositional, + len_args, verb); } return -1; } @@ -257,11 +283,11 @@ raise_missing_argument(const char *funcname, * * See macro version for an example pattern of how to use this function. * - * @param funcname - * @param cache + * @param funcname Function name + * @param cache a NULL initialized persistent storage for data * @param args Python passed args (METH_FASTCALL) - * @param len_args - * @param kwnames + * @param len_args Number of arguments (not flagged) + * @param kwnames Tuple as passed by METH_FASTCALL or NULL. * @param ... List of arguments (see macro version). * * @return Returns 0 on success and -1 on failure. @@ -274,15 +300,20 @@ _npy_parse_arguments(const char *funcname, /* ... is NULL, NULL, NULL terminated: name, converter, value */ ...) { - if (NPY_UNLIKELY(cache->npositional == -1)) { - va_list va; - va_start(va, kwnames); - - int res = initialize_keywords(funcname, cache, va); - va_end(va); - if (res < 0) { - return -1; + if (!atomic_load_explicit((_Atomic(uint8_t) *)&cache->initialized, memory_order_acquire)) { + LOCK_ARGPARSE_MUTEX; + if (!atomic_load_explicit((_Atomic(uint8_t) *)&cache->initialized, memory_order_acquire)) { + va_list va; + va_start(va, kwnames); + int res = initialize_keywords(funcname, cache, va); + va_end(va); + if (res < 0) { + UNLOCK_ARGPARSE_MUTEX; + return -1; + } + atomic_store_explicit((_Atomic(uint8_t) *)&cache->initialized, 1, memory_order_release); } + UNLOCK_ARGPARSE_MUTEX; } if (NPY_UNLIKELY(len_args > cache->npositional)) { diff --git a/numpy/_core/src/common/npy_argparse.h b/numpy/_core/src/common/npy_argparse.h index f4122103d22b..e1eef918cb33 100644 --- a/numpy/_core/src/common/npy_argparse.h +++ b/numpy/_core/src/common/npy_argparse.h @@ -20,7 +20,6 @@ NPY_NO_EXPORT int PyArray_PythonPyIntFromInt(PyObject *obj, int *value); - #define _NPY_MAX_KWARGS 15 typedef struct { @@ -28,16 +27,18 @@ typedef struct { int nargs; int npositional_only; int nrequired; + npy_uint8 initialized; /* Null terminated list of keyword argument name strings */ PyObject *kw_strings[_NPY_MAX_KWARGS+1]; } _NpyArgParserCache; +NPY_NO_EXPORT int init_argparse_mutex(void); /* * The sole purpose of this macro is to hide the argument parsing cache. * Since this cache must be static, this also removes a source of error. */ -#define NPY_PREPARE_ARGPARSER static _NpyArgParserCache __argparse_cache = {-1} +#define NPY_PREPARE_ARGPARSER static _NpyArgParserCache __argparse_cache; /** * Macro to help with argument parsing. @@ -68,7 +69,7 @@ typedef struct { * used in cunjunction with the macro defined in the same scope. * (No two `npy_parse_arguments` may share a single `NPY_PREPARE_ARGPARSER`.) * - * @param funcname + * @param funcname Function name * @param args Python passed args (METH_FASTCALL) * @param len_args Number of arguments (not flagged) * @param kwnames Tuple as passed by METH_FASTCALL or NULL. diff --git a/numpy/_core/src/common/npy_config.h b/numpy/_core/src/common/npy_config.h index 82641a85509e..ccb81ca7110b 100644 --- a/numpy/_core/src/common/npy_config.h +++ b/numpy/_core/src/common/npy_config.h @@ -1,6 +1,11 @@ #ifndef NUMPY_CORE_SRC_COMMON_NPY_CONFIG_H_ #define NUMPY_CORE_SRC_COMMON_NPY_CONFIG_H_ +#if defined(_MSC_VER) +// Suppress warn C4146: -x is valid for unsigned (wraps around) +#pragma warning(disable:4146) +#endif + #include "config.h" #include "npy_cpu_dispatch.h" // brings NPY_HAVE_[CPU features] #include "numpy/numpyconfig.h" diff --git a/numpy/_core/src/common/npy_cpu_dispatch.c b/numpy/_core/src/common/npy_cpu_dispatch.c index 992a470ada04..2cb3cd817d2a 100644 --- a/numpy/_core/src/common/npy_cpu_dispatch.c +++ b/numpy/_core/src/common/npy_cpu_dispatch.c @@ -1,12 +1,15 @@ -#include "npy_cpu_dispatch.h" +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE -static PyObject *npy__cpu_dispatch_registery = NULL; +#include "npy_cpu_dispatch.h" +#include "numpy/ndarraytypes.h" +#include "npy_static_data.h" NPY_VISIBILITY_HIDDEN int npy_cpu_dispatch_tracer_init(PyObject *mod) { - if (npy__cpu_dispatch_registery != NULL) { - PyErr_Format(PyExc_RuntimeError, "CPU dispatcher tracer already initlized"); + if (npy_static_pydata.cpu_dispatch_registry != NULL) { + PyErr_Format(PyExc_RuntimeError, "CPU dispatcher tracer already initialized"); return -1; } PyObject *mod_dict = PyModule_GetDict(mod); @@ -22,7 +25,7 @@ npy_cpu_dispatch_tracer_init(PyObject *mod) if (err != 0) { return -1; } - npy__cpu_dispatch_registery = reg_dict; + npy_static_pydata.cpu_dispatch_registry = reg_dict; return 0; } @@ -30,13 +33,13 @@ NPY_VISIBILITY_HIDDEN void npy_cpu_dispatch_trace(const char *fname, const char *signature, const char **dispatch_info) { - PyObject *func_dict = PyDict_GetItemString(npy__cpu_dispatch_registery, fname); + PyObject *func_dict = PyDict_GetItemString(npy_static_pydata.cpu_dispatch_registry, fname); // noqa: borrowed-ref OK if (func_dict == NULL) { func_dict = PyDict_New(); if (func_dict == NULL) { return; } - int err = PyDict_SetItemString(npy__cpu_dispatch_registery, fname, func_dict); + int err = PyDict_SetItemString(npy_static_pydata.cpu_dispatch_registry, fname, func_dict); Py_DECREF(func_dict); if (err != 0) { return; diff --git a/numpy/_core/src/common/npy_cpu_dispatch.h b/numpy/_core/src/common/npy_cpu_dispatch.h index ddf6bd554492..49d29b8aa655 100644 --- a/numpy/_core/src/common/npy_cpu_dispatch.h +++ b/numpy/_core/src/common/npy_cpu_dispatch.h @@ -7,51 +7,19 @@ * To get a better understanding of the mechanism behind it. */ #include "npy_cpu_features.h" // NPY_CPU_HAVE -#if (defined(__s390x__) || defined(__powerpc64__)) && !defined(__cplusplus) && defined(bool) - /* - * "altivec.h" header contains the definitions(bool, vector, pixel), - * usually in c++ we undefine them after including the header. - * It's better anyway to take them off and use built-in types(__vector, __pixel, __bool) instead, - * since c99 supports bool variables which may lead to ambiguous errors. - */ - // backup 'bool' before including 'npy_cpu_dispatch_config.h', since it may not defined as a compiler token. - #define NPY__CPU_DISPATCH_GUARD_BOOL - typedef bool npy__cpu_dispatch_guard_bool; -#endif /** - * Including the main configuration header 'npy_cpu_dispatch_config.h'. - * This header is generated by the 'ccompiler_opt' distutils module and the Meson build system. + * This header genereated by the build system and contains: * - * For the distutils-generated version, it contains: - * - Headers for platform-specific instruction sets. - * - Feature #definitions, e.g. NPY_HAVE_AVX2. - * - Helper macros that encapsulate enabled features through user-defined build options - * '--cpu-baseline' and '--cpu-dispatch'. These options are essential for implementing - * attributes like `__cpu_baseline__` and `__cpu_dispatch__` in the NumPy module. - * - * For the Meson-generated version, it contains: * - Headers for platform-specific instruction sets. * - Helper macros that encapsulate enabled features through user-defined build options * '--cpu-baseline' and '--cpu-dispatch'. These options remain crucial for implementing * attributes like `__cpu_baseline__` and `__cpu_dispatch__` in the NumPy module. * - Additional helper macros necessary for runtime dispatching. * - * Note: In the Meson build, features #definitions are conveyed via compiler arguments. + * Note: features #definitions are conveyed via compiler arguments. */ #include "npy_cpu_dispatch_config.h" -#ifndef NPY__CPU_MESON_BUILD - // Define helper macros necessary for runtime dispatching for distutils. - #include "npy_cpu_dispatch_distutils.h" -#endif -#if defined(NPY_HAVE_VSX) || defined(NPY_HAVE_VX) - #undef bool - #undef vector - #undef pixel - #ifdef NPY__CPU_DISPATCH_GUARD_BOOL - #define bool npy__cpu_dispatch_guard_bool - #undef NPY__CPU_DISPATCH_GUARD_BOOL - #endif -#endif + /** * Initialize the CPU dispatch tracer. * diff --git a/numpy/_core/src/common/npy_cpu_dispatch_distutils.h b/numpy/_core/src/common/npy_cpu_dispatch_distutils.h deleted file mode 100644 index 8db995412f4b..000000000000 --- a/numpy/_core/src/common/npy_cpu_dispatch_distutils.h +++ /dev/null @@ -1,116 +0,0 @@ -#ifndef NUMPY_CORE_SRC_COMMON_NPY_CPU_DISPATCH_DISTUTILS_H_ -#define NUMPY_CORE_SRC_COMMON_NPY_CPU_DISPATCH_DISTUTILS_H_ -#ifndef NUMPY_CORE_SRC_COMMON_NPY_CPU_DISPATCH_H_ - #error "Not standalone header please use 'npy_cpu_dispatch.h'" -#endif -/** - * This header should be removed after support for distutils is removed. - * It provides helper macros required for CPU runtime dispatching, - * which are already defined within `meson_cpu/main_config.h.in`. - * - * The following macros are explained within `meson_cpu/main_config.h.in`, - * although there are some differences in their usage: - * - * - Dispatched targets must be defined at the top of each dispatch-able - * source file within an inline or multi-line comment block. - * For example: //@targets baseline SSE2 AVX2 AVX512_SKX - * - * - The generated configuration derived from each dispatch-able source - * file must be guarded with `#ifndef NPY_DISABLE_OPTIMIZATION`. - * For example: - * #ifndef NPY_DISABLE_OPTIMIZATION - * #include "arithmetic.dispatch.h" - * #endif - */ -#include "npy_cpu_features.h" // NPY_CPU_HAVE -#include "numpy/utils.h" // NPY_EXPAND, NPY_CAT - -#ifdef NPY__CPU_TARGET_CURRENT - // 'NPY__CPU_TARGET_CURRENT': only defined by the dispatch-able sources - #define NPY_CPU_DISPATCH_CURFX(NAME) NPY_CAT(NPY_CAT(NAME, _), NPY__CPU_TARGET_CURRENT) -#else - #define NPY_CPU_DISPATCH_CURFX(NAME) NPY_EXPAND(NAME) -#endif -/** - * Defining the default behavior for the configurable macros of dispatch-able sources, - * 'NPY__CPU_DISPATCH_CALL(...)' and 'NPY__CPU_DISPATCH_BASELINE_CALL(...)' - * - * These macros are defined inside the generated config files that been derived from - * the configuration statements of the dispatch-able sources. - * - * The generated config file takes the same name of the dispatch-able source with replacing - * the extension to '.h' instead of '.c', and it should be treated as a header template. - */ -#ifndef NPY_DISABLE_OPTIMIZATION - #define NPY__CPU_DISPATCH_BASELINE_CALL(CB, ...) \ - &&"Expected config header of the dispatch-able source"; - #define NPY__CPU_DISPATCH_CALL(CHK, CB, ...) \ - &&"Expected config header of the dispatch-able source"; -#else - /** - * We assume by default that all configuration statements contains 'baseline' option however, - * if the dispatch-able source doesn't require it, then the dispatch-able source and following macros - * need to be guard it with '#ifndef NPY_DISABLE_OPTIMIZATION' - */ - #define NPY__CPU_DISPATCH_BASELINE_CALL(CB, ...) \ - NPY_EXPAND(CB(__VA_ARGS__)) - #define NPY__CPU_DISPATCH_CALL(CHK, CB, ...) -#endif // !NPY_DISABLE_OPTIMIZATION - -#define NPY_CPU_DISPATCH_DECLARE(...) \ - NPY__CPU_DISPATCH_CALL(NPY_CPU_DISPATCH_DECLARE_CHK_, NPY_CPU_DISPATCH_DECLARE_CB_, __VA_ARGS__) \ - NPY__CPU_DISPATCH_BASELINE_CALL(NPY_CPU_DISPATCH_DECLARE_BASE_CB_, __VA_ARGS__) -// Preprocessor callbacks -#define NPY_CPU_DISPATCH_DECLARE_CB_(DUMMY, TARGET_NAME, LEFT, ...) \ - NPY_CAT(NPY_CAT(LEFT, _), TARGET_NAME) __VA_ARGS__; -#define NPY_CPU_DISPATCH_DECLARE_BASE_CB_(LEFT, ...) \ - LEFT __VA_ARGS__; -// Dummy CPU runtime checking -#define NPY_CPU_DISPATCH_DECLARE_CHK_(FEATURE) - -#define NPY_CPU_DISPATCH_DECLARE_XB(...) \ - NPY__CPU_DISPATCH_CALL(NPY_CPU_DISPATCH_DECLARE_CHK_, NPY_CPU_DISPATCH_DECLARE_CB_, __VA_ARGS__) -#define NPY_CPU_DISPATCH_CALL(...) \ - NPY__CPU_DISPATCH_CALL(NPY_CPU_HAVE, NPY_CPU_DISPATCH_CALL_CB_, __VA_ARGS__) \ - NPY__CPU_DISPATCH_BASELINE_CALL(NPY_CPU_DISPATCH_CALL_BASE_CB_, __VA_ARGS__) -// Preprocessor callbacks -#define NPY_CPU_DISPATCH_CALL_CB_(TESTED_FEATURES, TARGET_NAME, LEFT, ...) \ - (TESTED_FEATURES) ? (NPY_CAT(NPY_CAT(LEFT, _), TARGET_NAME) __VA_ARGS__) : -#define NPY_CPU_DISPATCH_CALL_BASE_CB_(LEFT, ...) \ - (LEFT __VA_ARGS__) - -#define NPY_CPU_DISPATCH_CALL_XB(...) \ - NPY__CPU_DISPATCH_CALL(NPY_CPU_HAVE, NPY_CPU_DISPATCH_CALL_XB_CB_, __VA_ARGS__) \ - ((void) 0 /* discarded expression value */) -#define NPY_CPU_DISPATCH_CALL_XB_CB_(TESTED_FEATURES, TARGET_NAME, LEFT, ...) \ - (TESTED_FEATURES) ? (void) (NPY_CAT(NPY_CAT(LEFT, _), TARGET_NAME) __VA_ARGS__) : - -#define NPY_CPU_DISPATCH_CALL_ALL(...) \ - (NPY__CPU_DISPATCH_CALL(NPY_CPU_HAVE, NPY_CPU_DISPATCH_CALL_ALL_CB_, __VA_ARGS__) \ - NPY__CPU_DISPATCH_BASELINE_CALL(NPY_CPU_DISPATCH_CALL_ALL_BASE_CB_, __VA_ARGS__)) -// Preprocessor callbacks -#define NPY_CPU_DISPATCH_CALL_ALL_CB_(TESTED_FEATURES, TARGET_NAME, LEFT, ...) \ - ((TESTED_FEATURES) ? (NPY_CAT(NPY_CAT(LEFT, _), TARGET_NAME) __VA_ARGS__) : (void) 0), -#define NPY_CPU_DISPATCH_CALL_ALL_BASE_CB_(LEFT, ...) \ - ( LEFT __VA_ARGS__ ) - -#define NPY_CPU_DISPATCH_INFO() \ - { \ - NPY__CPU_DISPATCH_CALL(NPY_CPU_HAVE, NPY_CPU_DISPATCH_INFO_HIGH_CB_, DUMMY) \ - NPY__CPU_DISPATCH_BASELINE_CALL(NPY_CPU_DISPATCH_INFO_BASE_HIGH_CB_, DUMMY) \ - "", \ - NPY__CPU_DISPATCH_CALL(NPY_CPU_HAVE, NPY_CPU_DISPATCH_INFO_CB_, DUMMY) \ - NPY__CPU_DISPATCH_BASELINE_CALL(NPY_CPU_DISPATCH_INFO_BASE_CB_, DUMMY) \ - ""\ - } -#define NPY_CPU_DISPATCH_INFO_HIGH_CB_(TESTED_FEATURES, TARGET_NAME, ...) \ - (TESTED_FEATURES) ? NPY_TOSTRING(TARGET_NAME) : -#define NPY_CPU_DISPATCH_INFO_BASE_HIGH_CB_(...) \ - (1) ? "baseline(" NPY_WITH_CPU_BASELINE ")" : -// Preprocessor callbacks -#define NPY_CPU_DISPATCH_INFO_CB_(TESTED_FEATURES, TARGET_NAME, ...) \ - NPY_TOSTRING(TARGET_NAME) " " -#define NPY_CPU_DISPATCH_INFO_BASE_CB_(...) \ - "baseline(" NPY_WITH_CPU_BASELINE ")" - -#endif // NUMPY_CORE_SRC_COMMON_NPY_CPU_DISPATCH_DISTUTILS_H_ diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index 4f4448d13bcd..faffb7fc0781 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -5,6 +5,11 @@ /******************** Private Definitions *********************/ +// This is initialized during module initialization and thereafter immutable. +// We don't include it in the global data struct because the definitions in +// this file are shared by the _simd, _umath_tests, and +// _multiarray_umath modules + // Hold all CPU features boolean values static unsigned char npy__cpu_have[NPY_CPU_FEATURE_MAX]; @@ -75,12 +80,23 @@ static struct { {NPY_CPU_FEATURE_SSE41, "SSE41"}, {NPY_CPU_FEATURE_POPCNT, "POPCNT"}, {NPY_CPU_FEATURE_SSE42, "SSE42"}, + {NPY_CPU_FEATURE_X86_V2, "X86_V2"}, {NPY_CPU_FEATURE_AVX, "AVX"}, {NPY_CPU_FEATURE_F16C, "F16C"}, {NPY_CPU_FEATURE_XOP, "XOP"}, {NPY_CPU_FEATURE_FMA4, "FMA4"}, {NPY_CPU_FEATURE_FMA3, "FMA3"}, {NPY_CPU_FEATURE_AVX2, "AVX2"}, + {NPY_CPU_FEATURE_LAHF, "LAHF"}, + {NPY_CPU_FEATURE_CX16, "CX16"}, + {NPY_CPU_FEATURE_MOVBE, "MOVBE"}, + {NPY_CPU_FEATURE_BMI, "BMI"}, + {NPY_CPU_FEATURE_BMI2, "BMI2"}, + {NPY_CPU_FEATURE_LZCNT, "LZCNT"}, + {NPY_CPU_FEATURE_GFNI, "GFNI"}, + {NPY_CPU_FEATURE_VPCLMULQDQ, "VPCLMULQDQ"}, + {NPY_CPU_FEATURE_VAES, "VAES"}, + {NPY_CPU_FEATURE_X86_V3, "X86_V3"}, {NPY_CPU_FEATURE_AVX512F, "AVX512F"}, {NPY_CPU_FEATURE_AVX512CD, "AVX512CD"}, {NPY_CPU_FEATURE_AVX512ER, "AVX512ER"}, @@ -97,9 +113,11 @@ static struct { {NPY_CPU_FEATURE_AVX512VBMI2, "AVX512VBMI2"}, {NPY_CPU_FEATURE_AVX512BITALG, "AVX512BITALG"}, {NPY_CPU_FEATURE_AVX512FP16 , "AVX512FP16"}, + {NPY_CPU_FEATURE_AVX512BF16 , "AVX512BF16"}, {NPY_CPU_FEATURE_AVX512_KNL, "AVX512_KNL"}, {NPY_CPU_FEATURE_AVX512_KNM, "AVX512_KNM"}, {NPY_CPU_FEATURE_AVX512_SKX, "AVX512_SKX"}, + {NPY_CPU_FEATURE_X86_V4, "X86_V4"}, {NPY_CPU_FEATURE_AVX512_CLX, "AVX512_CLX"}, {NPY_CPU_FEATURE_AVX512_CNL, "AVX512_CNL"}, {NPY_CPU_FEATURE_AVX512_ICL, "AVX512_ICL"}, @@ -120,7 +138,8 @@ static struct { {NPY_CPU_FEATURE_ASIMDDP, "ASIMDDP"}, {NPY_CPU_FEATURE_ASIMDFHM, "ASIMDFHM"}, {NPY_CPU_FEATURE_SVE, "SVE"}, - {NPY_CPU_FEATURE_RVV, "RVV"}}; + {NPY_CPU_FEATURE_RVV, "RVV"}, + {NPY_CPU_FEATURE_LSX, "LSX"}}; NPY_VISIBILITY_HIDDEN PyObject * @@ -216,14 +235,13 @@ npy__cpu_validate_baseline(void) #define NPY__CPU_VALIDATE_CB(FEATURE, DUMMY) \ if (!npy__cpu_have[NPY_CAT(NPY_CPU_FEATURE_, FEATURE)]) { \ - const int size = sizeof(NPY_TOSTRING(FEATURE)); \ + const int size = sizeof(NPY_TOSTRING(FEATURE)) - 1; \ memcpy(fptr, NPY_TOSTRING(FEATURE), size); \ fptr[size] = ' '; fptr += size + 1; \ } NPY_WITH_CPU_BASELINE_CALL(NPY__CPU_VALIDATE_CB, DUMMY) // extra arg for msvc - *fptr = '\0'; - if (baseline_failure[0] != '\0') { + if (fptr > baseline_failure) { *(fptr-1) = '\0'; // trim the last space PyErr_Format(PyExc_RuntimeError, "NumPy was built with baseline optimizations: \n" @@ -240,7 +258,7 @@ npy__cpu_validate_baseline(void) static int npy__cpu_check_env(int disable, const char *env) { - static const char *names[] = { + static const char *const names[] = { "enable", "disable", "NPY_ENABLE_CPU_FEATURES", "NPY_DISABLE_CPU_FEATURES", "During parsing environment variable: 'NPY_ENABLE_CPU_FEATURES':\n", @@ -271,7 +289,7 @@ npy__cpu_check_env(int disable, const char *env) { char *notsupp_cur = ¬supp[0]; //comma and space including (htab, vtab, CR, LF, FF) - const char *delim = ", \t\v\r\n\f"; + const char delim[] = ", \t\v\r\n\f"; char *feature = strtok(features, delim); while (feature) { if (npy__cpu_baseline_fid(feature) > 0){ @@ -392,12 +410,18 @@ npy__cpu_getxcr0(void) } static void -npy__cpu_cpuid(int reg[4], int func_id) +npy__cpu_cpuid_count(int reg[4], int func_id, int count) { #if defined(_MSC_VER) - __cpuidex(reg, func_id, 0); + __cpuidex(reg, func_id, count); #elif defined(__INTEL_COMPILER) __cpuid(reg, func_id); + // classic Intel compilers do not support count + if (count != 0) { + for (int i = 0; i < 4; i++) { + reg[i] = 0; + } + } #elif defined(__GNUC__) || defined(__clang__) #if defined(NPY_CPU_X86) && defined(__PIC__) // %ebx may be the PIC register @@ -406,13 +430,13 @@ npy__cpu_cpuid(int reg[4], int func_id) "xchg{l}\t{%%}ebx, %1\n\t" : "=a" (reg[0]), "=r" (reg[1]), "=c" (reg[2]), "=d" (reg[3]) - : "a" (func_id), "c" (0) + : "a" (func_id), "c" (count) ); #else __asm__("cpuid\n\t" : "=a" (reg[0]), "=b" (reg[1]), "=c" (reg[2]), "=d" (reg[3]) - : "a" (func_id), "c" (0) + : "a" (func_id), "c" (count) ); #endif #else @@ -420,6 +444,12 @@ npy__cpu_cpuid(int reg[4], int func_id) #endif } +static void +npy__cpu_cpuid(int reg[4], int func_id) +{ + npy__cpu_cpuid_count(reg, func_id, 0); +} + static void npy__cpu_init_features(void) { @@ -435,7 +465,13 @@ npy__cpu_init_features(void) #ifdef NPY_CPU_AMD64 npy__cpu_have[NPY_CPU_FEATURE_SSE3] = 1; #endif - return; + // For unsupported compilers, we default to NPY_CPU_X86_V2 availability + // as this is the minimum baseline required to bypass initial capability checks. + // However, we deliberately don't set any additional CPU feature flags, + // allowing us to detect this fallback behavior later via the Python + // __cpu_features__ dictionary. + npy__cpu_have[NPY_CPU_FEATURE_X86_V2] = 1; + return; } npy__cpu_cpuid(reg, 1); @@ -447,34 +483,42 @@ npy__cpu_init_features(void) npy__cpu_have[NPY_CPU_FEATURE_SSE41] = (reg[2] & (1 << 19)) != 0; npy__cpu_have[NPY_CPU_FEATURE_POPCNT] = (reg[2] & (1 << 23)) != 0; npy__cpu_have[NPY_CPU_FEATURE_SSE42] = (reg[2] & (1 << 20)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_CX16] = (reg[2] & (1 << 13)) != 0; npy__cpu_have[NPY_CPU_FEATURE_F16C] = (reg[2] & (1 << 29)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_MOVBE] = (reg[2] & (1 << 22)) != 0; - // check OSXSAVE - if ((reg[2] & (1 << 27)) == 0) - return; - // check AVX OS support - int xcr = npy__cpu_getxcr0(); - if ((xcr & 6) != 6) - return; - npy__cpu_have[NPY_CPU_FEATURE_AVX] = (reg[2] & (1 << 28)) != 0; - if (!npy__cpu_have[NPY_CPU_FEATURE_AVX]) - return; + int osxsave = (reg[2] & (1 << 27)) != 0; + int xcr = 0; + if (osxsave) { + xcr = npy__cpu_getxcr0(); + } + int avx_os = (xcr & 6) == 6; + npy__cpu_have[NPY_CPU_FEATURE_AVX] = (reg[2] & (1 << 28)) != 0 && avx_os; npy__cpu_have[NPY_CPU_FEATURE_FMA3] = (reg[2] & (1 << 12)) != 0; // second call to the cpuid to get extended AMD feature bits npy__cpu_cpuid(reg, 0x80000001); - npy__cpu_have[NPY_CPU_FEATURE_XOP] = (reg[2] & (1 << 11)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_FMA4] = (reg[2] & (1 << 16)) != 0; +#ifdef NPY_CPU_AMD64 + // long mode only + npy__cpu_have[NPY_CPU_FEATURE_LAHF] = (reg[2] & (1 << 0)) != 0; +#else + // alawys available + npy__cpu_have[NPY_CPU_FEATURE_LAHF] = 1; +#endif + npy__cpu_have[NPY_CPU_FEATURE_LZCNT] = (reg[2] & (1 << 5)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_POPCNT] |= npy__cpu_have[NPY_CPU_FEATURE_LZCNT]; + npy__cpu_have[NPY_CPU_FEATURE_XOP] = (reg[2] & (1 << 11)) != 0 && npy__cpu_have[NPY_CPU_FEATURE_AVX]; + npy__cpu_have[NPY_CPU_FEATURE_FMA4] = (reg[2] & (1 << 16)) != 0 && npy__cpu_have[NPY_CPU_FEATURE_AVX]; // third call to the cpuid to get extended AVX2 & AVX512 feature bits npy__cpu_cpuid(reg, 7); - npy__cpu_have[NPY_CPU_FEATURE_AVX2] = (reg[1] & (1 << 5)) != 0; - if (!npy__cpu_have[NPY_CPU_FEATURE_AVX2]) - return; - // detect AVX2 & FMA3 - npy__cpu_have[NPY_CPU_FEATURE_FMA] = npy__cpu_have[NPY_CPU_FEATURE_FMA3]; + npy__cpu_have[NPY_CPU_FEATURE_AVX2] = (reg[1] & (1 << 5)) != 0 && npy__cpu_have[NPY_CPU_FEATURE_AVX]; + npy__cpu_have[NPY_CPU_FEATURE_BMI] = (reg[1] & (1 << 3)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_BMI2] = (reg[1] & (1 << 8)) != 0 && npy__cpu_have[NPY_CPU_FEATURE_BMI]; + npy__cpu_have[NPY_CPU_FEATURE_GFNI] = (reg[2] & (1 << 8)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_VAES] = (reg[2] & (1 << 9)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_VPCLMULQDQ] = (reg[2] & (1 << 10)) != 0; - // check AVX512 OS support int avx512_os = (xcr & 0xe6) == 0xe6; #if defined(__APPLE__) && defined(__x86_64__) /** @@ -486,7 +530,7 @@ npy__cpu_init_features(void) * - https://github.com/golang/go/issues/43089 * - https://github.com/numpy/numpy/issues/19319 */ - if (!avx512_os) { + if (!avx512_os && avx_os) { npy_uintp commpage64_addr = 0x00007fffffe00000ULL; npy_uint16 commpage64_ver = *((npy_uint16*)(commpage64_addr + 0x01E)); // cpu_capabilities64 undefined in versions < 13 @@ -496,65 +540,110 @@ npy__cpu_init_features(void) } } #endif - if (!avx512_os) { - return; - } - npy__cpu_have[NPY_CPU_FEATURE_AVX512F] = (reg[1] & (1 << 16)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512CD] = (reg[1] & (1 << 28)) != 0; - if (npy__cpu_have[NPY_CPU_FEATURE_AVX512F] && npy__cpu_have[NPY_CPU_FEATURE_AVX512CD]) { + npy__cpu_have[NPY_CPU_FEATURE_AVX512F] = (reg[1] & (1 << 16)) != 0 && avx512_os; + if (npy__cpu_have[NPY_CPU_FEATURE_AVX512F]) { + npy__cpu_have[NPY_CPU_FEATURE_AVX512CD] = (reg[1] & (1 << 28)) != 0; // Knights Landing npy__cpu_have[NPY_CPU_FEATURE_AVX512PF] = (reg[1] & (1 << 26)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX512ER] = (reg[1] & (1 << 27)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNL] = npy__cpu_have[NPY_CPU_FEATURE_AVX512ER] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512PF]; // Knights Mill npy__cpu_have[NPY_CPU_FEATURE_AVX512VPOPCNTDQ] = (reg[2] & (1 << 14)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX5124VNNIW] = (reg[3] & (1 << 2)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX5124FMAPS] = (reg[3] & (1 << 3)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNM] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNL] && - npy__cpu_have[NPY_CPU_FEATURE_AVX5124FMAPS] && - npy__cpu_have[NPY_CPU_FEATURE_AVX5124VNNIW] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512VPOPCNTDQ]; - // Skylake-X npy__cpu_have[NPY_CPU_FEATURE_AVX512DQ] = (reg[1] & (1 << 17)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX512BW] = (reg[1] & (1 << 30)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512VL] = (reg[1] & (1 << 31)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_SKX] = npy__cpu_have[NPY_CPU_FEATURE_AVX512BW] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512DQ] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512VL]; + // cast and use of unsigned int literal silences UBSan warning: + // "runtime error: left shift of 1 by 31 places cannot be represented in type 'int'" + npy__cpu_have[NPY_CPU_FEATURE_AVX512VL] = (reg[1] & (int)(1u << 31)) != 0; // Cascade Lake npy__cpu_have[NPY_CPU_FEATURE_AVX512VNNI] = (reg[2] & (1 << 11)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_CLX] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_SKX] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512VNNI]; - // Cannon Lake npy__cpu_have[NPY_CPU_FEATURE_AVX512IFMA] = (reg[1] & (1 << 21)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI] = (reg[2] & (1 << 1)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_CNL] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_SKX] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512IFMA] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI]; // Ice Lake npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI2] = (reg[2] & (1 << 6)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX512BITALG] = (reg[2] & (1 << 12)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_ICL] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_CLX] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512_CNL] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI2] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512BITALG] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512VPOPCNTDQ]; // Sapphire Rapids - npy__cpu_have[NPY_CPU_FEATURE_AVX512FP16] = (reg[3] & (1 << 23)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_SPR] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_ICL] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512FP16]; - + npy__cpu_have[NPY_CPU_FEATURE_AVX512FP16] = (reg[3] & (1 << 23)) != 0; + npy__cpu_cpuid_count(reg, 7, 1); + npy__cpu_have[NPY_CPU_FEATURE_AVX512BF16] = (reg[0] & (1 << 5)) != 0; } + + // Groups + npy__cpu_have[NPY_CPU_FEATURE_X86_V2] = npy__cpu_have[NPY_CPU_FEATURE_SSE] && + npy__cpu_have[NPY_CPU_FEATURE_SSE2] && + npy__cpu_have[NPY_CPU_FEATURE_SSE3] && + npy__cpu_have[NPY_CPU_FEATURE_SSSE3] && + npy__cpu_have[NPY_CPU_FEATURE_SSE41] && + npy__cpu_have[NPY_CPU_FEATURE_SSE42] && + npy__cpu_have[NPY_CPU_FEATURE_POPCNT] && + #ifdef NPY_CPU_AMD64 + npy__cpu_have[NPY_CPU_FEATURE_CX16] && + #endif + npy__cpu_have[NPY_CPU_FEATURE_LAHF]; + + npy__cpu_have[NPY_CPU_FEATURE_X86_V3] = npy__cpu_have[NPY_CPU_FEATURE_X86_V2] && + npy__cpu_have[NPY_CPU_FEATURE_AVX] && + npy__cpu_have[NPY_CPU_FEATURE_AVX2] && + npy__cpu_have[NPY_CPU_FEATURE_F16C] && + npy__cpu_have[NPY_CPU_FEATURE_FMA3] && + npy__cpu_have[NPY_CPU_FEATURE_BMI] && + npy__cpu_have[NPY_CPU_FEATURE_BMI2] && + npy__cpu_have[NPY_CPU_FEATURE_LZCNT] && + npy__cpu_have[NPY_CPU_FEATURE_MOVBE]; + + + npy__cpu_have[NPY_CPU_FEATURE_X86_V4] = npy__cpu_have[NPY_CPU_FEATURE_X86_V3] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512F] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512CD] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512BW] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512DQ] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VL]; + + + npy__cpu_have[NPY_CPU_FEATURE_AVX512_ICL] = npy__cpu_have[NPY_CPU_FEATURE_X86_V4] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VNNI] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512IFMA] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI2] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512BITALG] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VPOPCNTDQ] && + npy__cpu_have[NPY_CPU_FEATURE_GFNI] && + npy__cpu_have[NPY_CPU_FEATURE_VAES] && + npy__cpu_have[NPY_CPU_FEATURE_VPCLMULQDQ]; + + npy__cpu_have[NPY_CPU_FEATURE_AVX512_SPR] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_ICL] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512FP16] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512BF16]; + + + + // Legacy groups + npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNL] = npy__cpu_have[NPY_CPU_FEATURE_AVX512F] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512CD] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512ER] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512PF]; + + npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNM] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNL] && + npy__cpu_have[NPY_CPU_FEATURE_AVX5124FMAPS] && + npy__cpu_have[NPY_CPU_FEATURE_AVX5124VNNIW] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VPOPCNTDQ]; + + npy__cpu_have[NPY_CPU_FEATURE_AVX512_CLX] = npy__cpu_have[NPY_CPU_FEATURE_X86_V4] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VNNI]; + + npy__cpu_have[NPY_CPU_FEATURE_AVX512_CNL] = npy__cpu_have[NPY_CPU_FEATURE_X86_V4] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512IFMA] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI]; + } /***************** POWER ******************/ #elif defined(NPY_CPU_PPC64) || defined(NPY_CPU_PPC64LE) -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) #ifdef __FreeBSD__ #include // defines PPC_FEATURE_HAS_VSX #endif @@ -577,7 +666,7 @@ static void npy__cpu_init_features(void) { memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX); -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) #ifdef __linux__ unsigned int hwcap = getauxval(AT_HWCAP); if ((hwcap & PPC_FEATURE_HAS_VSX) == 0) @@ -604,7 +693,7 @@ npy__cpu_init_features(void) npy__cpu_have[NPY_CPU_FEATURE_VSX2] = (hwcap & PPC_FEATURE2_ARCH_2_07) != 0; npy__cpu_have[NPY_CPU_FEATURE_VSX3] = (hwcap & PPC_FEATURE2_ARCH_3_00) != 0; npy__cpu_have[NPY_CPU_FEATURE_VSX4] = (hwcap & PPC_FEATURE2_ARCH_3_1) != 0; -// TODO: AIX, OpenBSD +// TODO: AIX #else npy__cpu_have[NPY_CPU_FEATURE_VSX] = 1; #if defined(NPY_CPU_PPC64LE) || defined(NPY_HAVE_VSX2) @@ -624,10 +713,14 @@ npy__cpu_init_features(void) #elif defined(__s390x__) #include -#ifndef HWCAP_S390_VXE - #define HWCAP_S390_VXE 8192 -#endif +/* kernel HWCAP names, available in musl, not available in glibc<2.33: https://sourceware.org/bugzilla/show_bug.cgi?id=25971 */ +#ifndef HWCAP_S390_VXRS + #define HWCAP_S390_VXRS 2048 +#endif +#ifndef HWCAP_S390_VXRS_EXT + #define HWCAP_S390_VXRS_EXT 8192 +#endif #ifndef HWCAP_S390_VXRS_EXT2 #define HWCAP_S390_VXRS_EXT2 32768 #endif @@ -636,9 +729,9 @@ static void npy__cpu_init_features(void) { memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX); - + unsigned int hwcap = getauxval(AT_HWCAP); - if ((hwcap & HWCAP_S390_VX) == 0) { + if ((hwcap & HWCAP_S390_VXRS) == 0) { return; } @@ -648,12 +741,30 @@ npy__cpu_init_features(void) npy__cpu_have[NPY_CPU_FEATURE_VXE2] = 1; return; } - - npy__cpu_have[NPY_CPU_FEATURE_VXE] = (hwcap & HWCAP_S390_VXE) != 0; + + npy__cpu_have[NPY_CPU_FEATURE_VXE] = (hwcap & HWCAP_S390_VXRS_EXT) != 0; npy__cpu_have[NPY_CPU_FEATURE_VX] = 1; } +/***************** LoongArch ******************/ + +#elif defined(__loongarch_lp64) + +#include +#include + +static void +npy__cpu_init_features(void) +{ + memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX); + unsigned int hwcap = getauxval(AT_HWCAP); + + if ((hwcap & HWCAP_LOONGARCH_LSX)) { + npy__cpu_have[NPY_CPU_FEATURE_LSX] = 1; + return; + } +} /***************** ARM ******************/ @@ -668,7 +779,7 @@ npy__cpu_init_features_arm8(void) npy__cpu_have[NPY_CPU_FEATURE_ASIMD] = 1; } -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) /* * we aren't sure of what kind kernel or clib we deal with * so we play it safe @@ -679,7 +790,7 @@ npy__cpu_init_features_arm8(void) #if defined(__linux__) __attribute__((weak)) unsigned long getauxval(unsigned long); // linker should handle it #endif -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) || defined(__OpenBSD__) __attribute__((weak)) int elf_aux_info(int, void *, int); // linker should handle it static unsigned long getauxval(unsigned long k) @@ -742,34 +853,33 @@ npy__cpu_init_features_linux(void) #endif } #ifdef __arm__ + npy__cpu_have[NPY_CPU_FEATURE_NEON] = (hwcap & NPY__HWCAP_NEON) != 0; + if (npy__cpu_have[NPY_CPU_FEATURE_NEON]) { + npy__cpu_have[NPY_CPU_FEATURE_NEON_FP16] = (hwcap & NPY__HWCAP_HALF) != 0; + npy__cpu_have[NPY_CPU_FEATURE_NEON_VFPV4] = (hwcap & NPY__HWCAP_VFPv4) != 0; + } // Detect Arm8 (aarch32 state) if ((hwcap2 & NPY__HWCAP2_AES) || (hwcap2 & NPY__HWCAP2_SHA1) || (hwcap2 & NPY__HWCAP2_SHA2) || (hwcap2 & NPY__HWCAP2_PMULL) || (hwcap2 & NPY__HWCAP2_CRC32)) { - hwcap = hwcap2; + npy__cpu_have[NPY_CPU_FEATURE_ASIMD] = npy__cpu_have[NPY_CPU_FEATURE_NEON]; + } #else - if (1) - { - if (!(hwcap & (NPY__HWCAP_FP | NPY__HWCAP_ASIMD))) { - // Is this could happen? maybe disabled by kernel - // BTW this will break the baseline of AARCH64 - return 1; - } -#endif - npy__cpu_have[NPY_CPU_FEATURE_FPHP] = (hwcap & NPY__HWCAP_FPHP) != 0; - npy__cpu_have[NPY_CPU_FEATURE_ASIMDHP] = (hwcap & NPY__HWCAP_ASIMDHP) != 0; - npy__cpu_have[NPY_CPU_FEATURE_ASIMDDP] = (hwcap & NPY__HWCAP_ASIMDDP) != 0; - npy__cpu_have[NPY_CPU_FEATURE_ASIMDFHM] = (hwcap & NPY__HWCAP_ASIMDFHM) != 0; - npy__cpu_have[NPY_CPU_FEATURE_SVE] = (hwcap & NPY__HWCAP_SVE) != 0; - npy__cpu_init_features_arm8(); - } else { - npy__cpu_have[NPY_CPU_FEATURE_NEON] = (hwcap & NPY__HWCAP_NEON) != 0; - if (npy__cpu_have[NPY_CPU_FEATURE_NEON]) { - npy__cpu_have[NPY_CPU_FEATURE_NEON_FP16] = (hwcap & NPY__HWCAP_HALF) != 0; - npy__cpu_have[NPY_CPU_FEATURE_NEON_VFPV4] = (hwcap & NPY__HWCAP_VFPv4) != 0; - } + if (!(hwcap & (NPY__HWCAP_FP | NPY__HWCAP_ASIMD))) { + // Is this could happen? maybe disabled by kernel + // BTW this will break the baseline of AARCH64 + return 1; } + npy__cpu_init_features_arm8(); +#endif + npy__cpu_have[NPY_CPU_FEATURE_FPHP] = (hwcap & NPY__HWCAP_FPHP) != 0; + npy__cpu_have[NPY_CPU_FEATURE_ASIMDHP] = (hwcap & NPY__HWCAP_ASIMDHP) != 0; + npy__cpu_have[NPY_CPU_FEATURE_ASIMDDP] = (hwcap & NPY__HWCAP_ASIMDDP) != 0; + npy__cpu_have[NPY_CPU_FEATURE_ASIMDFHM] = (hwcap & NPY__HWCAP_ASIMDFHM) != 0; +#ifndef __arm__ + npy__cpu_have[NPY_CPU_FEATURE_SVE] = (hwcap & NPY__HWCAP_SVE) != 0; +#endif return 1; } #endif @@ -778,7 +888,7 @@ static void npy__cpu_init_features(void) { memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX); -#ifdef __linux__ +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) if (npy__cpu_init_features_linux()) return; #endif @@ -817,22 +927,30 @@ npy__cpu_init_features(void) #elif defined(__riscv) && __riscv_xlen == 64 -#include +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) + #include -#ifndef HWCAP_RVV - // https://github.com/torvalds/linux/blob/v6.8/arch/riscv/include/uapi/asm/hwcap.h#L24 - #define COMPAT_HWCAP_ISA_V (1 << ('V' - 'A')) + #ifndef HWCAP_RVV + // https://github.com/torvalds/linux/blob/v6.8/arch/riscv/include/uapi/asm/hwcap.h#L24 + #define COMPAT_HWCAP_ISA_V (1 << ('V' - 'A')) + #endif #endif static void npy__cpu_init_features(void) { memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX); - +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) +#ifdef __linux__ unsigned int hwcap = getauxval(AT_HWCAP); +#else + unsigned long hwcap; + elf_aux_info(AT_HWCAP, &hwcap, sizeof(hwcap)); +#endif if (hwcap & COMPAT_HWCAP_ISA_V) { npy__cpu_have[NPY_CPU_FEATURE_RVV] = 1; } +#endif } /*********** Unsupported ARCH ***********/ @@ -842,7 +960,7 @@ npy__cpu_init_features(void) { /* * just in case if the compiler doesn't respect ANSI - * but for knowing platforms it still nessecery, because @npy__cpu_init_features + * but for knowing platforms it still necessary, because @npy__cpu_init_features * may called multiple of times and we need to clear the disabled features by * ENV Var or maybe in the future we can support other methods like * global variables, go back to @npy__cpu_try_disable_env for more understanding diff --git a/numpy/_core/src/common/npy_cpu_features.h b/numpy/_core/src/common/npy_cpu_features.h index d1e9d7e60d9f..de05a17afdb8 100644 --- a/numpy/_core/src/common/npy_cpu_features.h +++ b/numpy/_core/src/common/npy_cpu_features.h @@ -26,8 +26,15 @@ enum npy_cpu_features NPY_CPU_FEATURE_FMA4 = 12, NPY_CPU_FEATURE_FMA3 = 13, NPY_CPU_FEATURE_AVX2 = 14, - NPY_CPU_FEATURE_FMA = 15, // AVX2 & FMA3, provides backward compatibility - + NPY_CPU_FEATURE_LAHF = 15, + NPY_CPU_FEATURE_CX16 = 16, + NPY_CPU_FEATURE_MOVBE = 17, + NPY_CPU_FEATURE_BMI = 18, + NPY_CPU_FEATURE_BMI2 = 19, + NPY_CPU_FEATURE_LZCNT = 20, + NPY_CPU_FEATURE_GFNI = 21, + NPY_CPU_FEATURE_VAES = 22, + NPY_CPU_FEATURE_VPCLMULQDQ = 23, NPY_CPU_FEATURE_AVX512F = 30, NPY_CPU_FEATURE_AVX512CD = 31, NPY_CPU_FEATURE_AVX512ER = 32, @@ -44,6 +51,8 @@ enum npy_cpu_features NPY_CPU_FEATURE_AVX512VBMI2 = 43, NPY_CPU_FEATURE_AVX512BITALG = 44, NPY_CPU_FEATURE_AVX512FP16 = 45, + NPY_CPU_FEATURE_AVX512BF16 = 46, + // X86 CPU Groups // Knights Landing (F,CD,ER,PF) @@ -56,10 +65,17 @@ enum npy_cpu_features NPY_CPU_FEATURE_AVX512_CLX = 104, // Cannon Lake (F,CD,BW,DQ,VL,IFMA,VBMI) NPY_CPU_FEATURE_AVX512_CNL = 105, - // Ice Lake (F,CD,BW,DQ,VL,IFMA,VBMI,VNNI,VBMI2,BITALG,VPOPCNTDQ) + // Ice Lake (F,CD,BW,DQ,VL,IFMA,VBMI,VNNI,VBMI2,BITALG,VPOPCNTDQ,GFNI,VPCLMULDQ,VAES) NPY_CPU_FEATURE_AVX512_ICL = 106, - // Sapphire Rapids (Ice Lake, AVX512FP16) + // Sapphire Rapids (Ice Lake, AVX512FP16, AVX512BF16) NPY_CPU_FEATURE_AVX512_SPR = 107, + // x86-64-v2 microarchitectures (SSE[1-4.*], POPCNT, LAHF, CX16) + // On 32-bit, cx16 is not available so it is not included + NPY_CPU_FEATURE_X86_V2 = 108, + // x86-64-v3 microarchitectures (X86_V2, AVX, AVX2, FMA3, BMI, BMI2, LZCNT, F16C, MOVBE) + NPY_CPU_FEATURE_X86_V3 = 109, + // x86-64-v4 microarchitectures (X86_V3, AVX512F, AVX512CD, AVX512VL, AVX512BW, AVX512DQ) + NPY_CPU_FEATURE_X86_V4 = NPY_CPU_FEATURE_AVX512_SKX, // IBM/POWER VSX // POWER7 @@ -91,7 +107,7 @@ enum npy_cpu_features // IBM/ZARCH NPY_CPU_FEATURE_VX = 350, - + // Vector-Enhancements Facility 1 NPY_CPU_FEATURE_VXE = 351, @@ -101,6 +117,9 @@ enum npy_cpu_features // RISC-V NPY_CPU_FEATURE_RVV = 400, + // LOONGARCH + NPY_CPU_FEATURE_LSX = 500, + NPY_CPU_FEATURE_MAX }; @@ -113,7 +132,7 @@ enum npy_cpu_features * - uses 'NPY_DISABLE_CPU_FEATURES' to disable dispatchable features * - uses 'NPY_ENABLE_CPU_FEATURES' to enable dispatchable features * - * It will set a RuntimeError when + * It will set a RuntimeError when * - CPU baseline features from the build are not supported at runtime * - 'NPY_DISABLE_CPU_FEATURES' tries to disable a baseline feature * - 'NPY_DISABLE_CPU_FEATURES' and 'NPY_ENABLE_CPU_FEATURES' are @@ -122,14 +141,14 @@ enum npy_cpu_features * by the machine or build * - 'NPY_ENABLE_CPU_FEATURES' tries to enable a feature when the project was * not built with any feature optimization support - * + * * It will set an ImportWarning when: * - 'NPY_DISABLE_CPU_FEATURES' tries to disable a feature that is not supported * by the machine or build * - 'NPY_DISABLE_CPU_FEATURES' or 'NPY_ENABLE_CPU_FEATURES' tries to * disable/enable a feature when the project was not built with any feature * optimization support - * + * * return 0 on success otherwise return -1 */ NPY_VISIBILITY_HIDDEN int diff --git a/numpy/_core/src/common/npy_cpuinfo_parser.h b/numpy/_core/src/common/npy_cpuinfo_parser.h index 154c4245ba2b..30f2976d28b6 100644 --- a/numpy/_core/src/common/npy_cpuinfo_parser.h +++ b/numpy/_core/src/common/npy_cpuinfo_parser.h @@ -36,25 +36,43 @@ #define NPY__HWCAP 16 #define NPY__HWCAP2 26 -// arch/arm/include/uapi/asm/hwcap.h -#define NPY__HWCAP_HALF (1 << 1) -#define NPY__HWCAP_NEON (1 << 12) -#define NPY__HWCAP_VFPv3 (1 << 13) -#define NPY__HWCAP_VFPv4 (1 << 16) -#define NPY__HWCAP2_AES (1 << 0) -#define NPY__HWCAP2_PMULL (1 << 1) -#define NPY__HWCAP2_SHA1 (1 << 2) -#define NPY__HWCAP2_SHA2 (1 << 3) -#define NPY__HWCAP2_CRC32 (1 << 4) -// arch/arm64/include/uapi/asm/hwcap.h -#define NPY__HWCAP_FP (1 << 0) -#define NPY__HWCAP_ASIMD (1 << 1) -#define NPY__HWCAP_FPHP (1 << 9) -#define NPY__HWCAP_ASIMDHP (1 << 10) -#define NPY__HWCAP_ASIMDDP (1 << 20) -#define NPY__HWCAP_SVE (1 << 22) -#define NPY__HWCAP_ASIMDFHM (1 << 23) -/* +#ifdef __arm__ + // arch/arm/include/uapi/asm/hwcap.h + #define NPY__HWCAP_HALF (1 << 1) + #define NPY__HWCAP_NEON (1 << 12) + #define NPY__HWCAP_VFPv3 (1 << 13) + #define NPY__HWCAP_VFPv4 (1 << 16) + + #define NPY__HWCAP_FPHP (1 << 22) + #define NPY__HWCAP_ASIMDHP (1 << 23) + #define NPY__HWCAP_ASIMDDP (1 << 24) + #define NPY__HWCAP_ASIMDFHM (1 << 25) + + #define NPY__HWCAP2_AES (1 << 0) + #define NPY__HWCAP2_PMULL (1 << 1) + #define NPY__HWCAP2_SHA1 (1 << 2) + #define NPY__HWCAP2_SHA2 (1 << 3) + #define NPY__HWCAP2_CRC32 (1 << 4) +#else + // arch/arm64/include/uapi/asm/hwcap.h + #define NPY__HWCAP_FP (1 << 0) + #define NPY__HWCAP_ASIMD (1 << 1) + + #define NPY__HWCAP_FPHP (1 << 9) + #define NPY__HWCAP_ASIMDHP (1 << 10) + #define NPY__HWCAP_ASIMDDP (1 << 20) + #define NPY__HWCAP_ASIMDFHM (1 << 23) + + #define NPY__HWCAP_AES (1 << 3) + #define NPY__HWCAP_PMULL (1 << 4) + #define NPY__HWCAP_SHA1 (1 << 5) + #define NPY__HWCAP_SHA2 (1 << 6) + #define NPY__HWCAP_CRC32 (1 << 7) + #define NPY__HWCAP_SVE (1 << 22) +#endif + + +/* * Get the size of a file by reading it until the end. This is needed * because files under /proc do not always return a valid size when * using fseek(0, SEEK_END) + ftell(). Nor can they be mmap()-ed. @@ -87,7 +105,7 @@ get_file_size(const char* pathname) return result; } -/* +/* * Read the content of /proc/cpuinfo into a user-provided buffer. * Return the length of the data, or -1 on error. Does *not* * zero-terminate the content. Will not read more @@ -123,7 +141,7 @@ read_file(const char* pathname, char* buffer, size_t buffsize) return count; } -/* +/* * Extract the content of a the first occurrence of a given field in * the content of /proc/cpuinfo and return it as a heap-allocated * string that must be freed by the caller. @@ -182,7 +200,7 @@ extract_cpuinfo_field(const char* buffer, int buflen, const char* field) return result; } -/* +/* * Checks that a space-separated list of items contains one given 'item'. * Returns 1 if found, 0 otherwise. */ @@ -220,44 +238,51 @@ has_list_item(const char* list, const char* item) return 0; } -static void setHwcap(char* cpuFeatures, unsigned long* hwcap) { - *hwcap |= has_list_item(cpuFeatures, "neon") ? NPY__HWCAP_NEON : 0; - *hwcap |= has_list_item(cpuFeatures, "half") ? NPY__HWCAP_HALF : 0; - *hwcap |= has_list_item(cpuFeatures, "vfpv3") ? NPY__HWCAP_VFPv3 : 0; - *hwcap |= has_list_item(cpuFeatures, "vfpv4") ? NPY__HWCAP_VFPv4 : 0; - - *hwcap |= has_list_item(cpuFeatures, "asimd") ? NPY__HWCAP_ASIMD : 0; - *hwcap |= has_list_item(cpuFeatures, "fp") ? NPY__HWCAP_FP : 0; - *hwcap |= has_list_item(cpuFeatures, "fphp") ? NPY__HWCAP_FPHP : 0; - *hwcap |= has_list_item(cpuFeatures, "asimdhp") ? NPY__HWCAP_ASIMDHP : 0; - *hwcap |= has_list_item(cpuFeatures, "asimddp") ? NPY__HWCAP_ASIMDDP : 0; - *hwcap |= has_list_item(cpuFeatures, "asimdfhm") ? NPY__HWCAP_ASIMDFHM : 0; -} - static int get_feature_from_proc_cpuinfo(unsigned long *hwcap, unsigned long *hwcap2) { - char* cpuinfo = NULL; - int cpuinfo_len; - cpuinfo_len = get_file_size("/proc/cpuinfo"); + *hwcap = 0; + *hwcap2 = 0; + + int cpuinfo_len = get_file_size("/proc/cpuinfo"); if (cpuinfo_len < 0) { return 0; } - cpuinfo = malloc(cpuinfo_len); + char *cpuinfo = malloc(cpuinfo_len); if (cpuinfo == NULL) { return 0; } + cpuinfo_len = read_file("/proc/cpuinfo", cpuinfo, cpuinfo_len); - char* cpuFeatures = extract_cpuinfo_field(cpuinfo, cpuinfo_len, "Features"); - if(cpuFeatures == NULL) { + char *cpuFeatures = extract_cpuinfo_field(cpuinfo, cpuinfo_len, "Features"); + if (cpuFeatures == NULL) { + free(cpuinfo); return 0; } - setHwcap(cpuFeatures, hwcap); - *hwcap2 |= *hwcap; + *hwcap |= has_list_item(cpuFeatures, "fphp") ? NPY__HWCAP_FPHP : 0; + *hwcap |= has_list_item(cpuFeatures, "asimdhp") ? NPY__HWCAP_ASIMDHP : 0; + *hwcap |= has_list_item(cpuFeatures, "asimddp") ? NPY__HWCAP_ASIMDDP : 0; + *hwcap |= has_list_item(cpuFeatures, "asimdfhm") ? NPY__HWCAP_ASIMDFHM : 0; +#ifdef __arm__ + *hwcap |= has_list_item(cpuFeatures, "neon") ? NPY__HWCAP_NEON : 0; + *hwcap |= has_list_item(cpuFeatures, "half") ? NPY__HWCAP_HALF : 0; + *hwcap |= has_list_item(cpuFeatures, "vfpv3") ? NPY__HWCAP_VFPv3 : 0; + *hwcap |= has_list_item(cpuFeatures, "vfpv4") ? NPY__HWCAP_VFPv4 : 0; *hwcap2 |= has_list_item(cpuFeatures, "aes") ? NPY__HWCAP2_AES : 0; *hwcap2 |= has_list_item(cpuFeatures, "pmull") ? NPY__HWCAP2_PMULL : 0; *hwcap2 |= has_list_item(cpuFeatures, "sha1") ? NPY__HWCAP2_SHA1 : 0; *hwcap2 |= has_list_item(cpuFeatures, "sha2") ? NPY__HWCAP2_SHA2 : 0; *hwcap2 |= has_list_item(cpuFeatures, "crc32") ? NPY__HWCAP2_CRC32 : 0; +#else + *hwcap |= has_list_item(cpuFeatures, "asimd") ? NPY__HWCAP_ASIMD : 0; + *hwcap |= has_list_item(cpuFeatures, "fp") ? NPY__HWCAP_FP : 0; + *hwcap |= has_list_item(cpuFeatures, "aes") ? NPY__HWCAP_AES : 0; + *hwcap |= has_list_item(cpuFeatures, "pmull") ? NPY__HWCAP_PMULL : 0; + *hwcap |= has_list_item(cpuFeatures, "sha1") ? NPY__HWCAP_SHA1 : 0; + *hwcap |= has_list_item(cpuFeatures, "sha2") ? NPY__HWCAP_SHA2 : 0; + *hwcap |= has_list_item(cpuFeatures, "crc32") ? NPY__HWCAP_CRC32 : 0; +#endif + free(cpuinfo); + free(cpuFeatures); return 1; } #endif /* NUMPY_CORE_SRC_COMMON_NPY_CPUINFO_PARSER_H_ */ diff --git a/numpy/_core/src/common/npy_ctypes.h b/numpy/_core/src/common/npy_ctypes.h index 578de06397bd..78809732416c 100644 --- a/numpy/_core/src/common/npy_ctypes.h +++ b/numpy/_core/src/common/npy_ctypes.h @@ -4,6 +4,7 @@ #include #include "npy_import.h" +#include "multiarraymodule.h" /* * Check if a python type is a ctypes class. @@ -17,16 +18,18 @@ static inline int npy_ctypes_check(PyTypeObject *obj) { - static PyObject *py_func = NULL; PyObject *ret_obj; int ret; - npy_cache_import("numpy._core._internal", "npy_ctypes_check", &py_func); - if (py_func == NULL) { + + if (npy_cache_import_runtime( + "numpy._core._internal", "npy_ctypes_check", + &npy_runtime_imports.npy_ctypes_check) == -1) { goto fail; } - ret_obj = PyObject_CallFunctionObjArgs(py_func, (PyObject *)obj, NULL); + ret_obj = PyObject_CallFunctionObjArgs( + npy_runtime_imports.npy_ctypes_check, (PyObject *)obj, NULL); if (ret_obj == NULL) { goto fail; } diff --git a/numpy/_core/src/common/npy_hashtable.c b/numpy/_core/src/common/npy_hashtable.c index 02fe5ca29751..5086fd26af69 100644 --- a/numpy/_core/src/common/npy_hashtable.c +++ b/numpy/_core/src/common/npy_hashtable.c @@ -1,21 +1,56 @@ -/* - * This functionality is designed specifically for the ufunc machinery to - * dispatch based on multiple DTypes. Since this is designed to be used - * as purely a cache, it currently does no reference counting. - * Even though this is a cache, there is currently no maximum size. It may - * make sense to limit the size, or count collisions: If too many collisions - * occur, we could grow the cache, otherwise, just replace an old item that - * was presumably not used for a long time. +/* Lock-free hash table implementation for identity based keys + * (C arrays of pointers) used for ufunc dispatching cache. + * + * This cache does not do any reference counting of the stored objects, + * and the stored pointers must remain valid while in the cache. + * The cache entries cannot be changed or deleted once added, only new + * entries can be added. It is thread safe and lock-free for reading, and + * uses a mutex for writing (adding new entries). See below for the details + * of thread safety. + * + * The actual hash table is stored in the `buckets` struct which contains + * a flexible array member for the keys and values. It avoids multiple + * atomic operations as resizing the hash table only requires a single atomic + * store to swap in the new buckets pointer. * - * If a different part of NumPy requires a custom hashtable, the code should - * be reused with care since specializing it more for the ufunc dispatching - * case is likely desired. + * Thread safety notes for free-threading builds: + * - Reading from the cache (getting items) is lock-free and thread safe. + * The reader reads the current `buckets` pointer using an atomic load + * with memory_order_acquire order. This ensures that the reader + * synchronizes with any concurrent writers that may be resizing the cache. + * The value of item is then read using an atomic load with memory_order_acquire + * order so that it sees the key written by the writer before the value. + * + * - Writing to the cache (adding new items) uses ``tb->mutex`` mutex to + * ensure only one thread writes at a time. The new items are added + * concurrently with readers and synchronized using atomic operations. + * The key is stored first (using memcpy), and then the value is stored + * using an atomic store with memory_order_release order so that + * the store of key is visible to readers that see the value. + * + * - Resizing the cache uses the same mutex to ensure only one thread + * resizes at a time. The new larger cache is built while holding the + * mutex, and then swapped in using an atomic operation. Because, + * readers can be reading from the old cache while the new one is + * swapped in, the old cache is not free immediately. Instead, it is + * kept in a linked list of old caches using the `prev` pointer in the + * `buckets` struct. The old caches are only freed when the identity + * hash table is deallocated, ensuring that no readers are using them + * anymore. */ -#include "templ_common.h" #include "npy_hashtable.h" +#include "templ_common.h" +#include +// It is defined here instead of header to avoid flexible array member warning in C++. +struct buckets { + struct buckets *prev; /* linked list of old buckets */ + npy_intp size; /* current size */ + npy_intp nelem; /* number of elements */ + PyObject *array[]; /* array of keys and values */ +}; #if SIZEOF_PY_UHASH_T > 4 #define _NpyHASH_XXPRIME_1 ((Py_uhash_t)11400714785074694791ULL) @@ -30,30 +65,13 @@ #endif #ifdef Py_GIL_DISABLED -// TODO: replace with PyMutex when it is public -#define LOCK_TABLE(tb) \ - if (!PyThread_acquire_lock(tb->mutex, NOWAIT_LOCK)) { \ - PyThread_acquire_lock(tb->mutex, WAIT_LOCK); \ - } -#define UNLOCK_TABLE(tb) PyThread_release_lock(tb->mutex); -#define INITIALIZE_LOCK(tb) \ - tb->mutex = PyThread_allocate_lock(); \ - if (tb->mutex == NULL) { \ - PyErr_NoMemory(); \ - PyMem_Free(res); \ - return NULL; \ - } -#define FREE_LOCK(tb) \ - if (tb->mutex != NULL) { \ - PyThread_free_lock(tb->mutex); \ - } +#define FT_ATOMIC_LOAD_PTR_ACQUIRE(ptr) \ + atomic_load_explicit((_Atomic(void *) *)&(ptr), memory_order_acquire) +#define FT_ATOMIC_STORE_PTR_RELEASE(ptr, val) \ + atomic_store_explicit((_Atomic(void *) *)&(ptr), (void *)(val), memory_order_release) #else -// the GIL serializes access to the table so no need -// for locking if it is enabled -#define LOCK_TABLE(tb) -#define UNLOCK_TABLE(tb) -#define INITIALIZE_LOCK(tb) -#define FREE_LOCK(tb) +#define FT_ATOMIC_LOAD_PTR_ACQUIRE(ptr) (ptr) +#define FT_ATOMIC_STORE_PTR_RELEASE(ptr, val) (ptr) = (val) #endif /* @@ -86,23 +104,25 @@ identity_list_hash(PyObject *const *v, int len) static inline PyObject ** -find_item(PyArrayIdentityHash const *tb, PyObject *const *key) +find_item_buckets(struct buckets *buckets, int key_len, PyObject *const *key, + PyObject **pvalue) { - Py_hash_t hash = identity_list_hash(key, tb->key_len); + Py_hash_t hash = identity_list_hash(key, key_len); npy_uintp perturb = (npy_uintp)hash; - npy_intp bucket; - npy_intp mask = tb->size - 1 ; - PyObject **item; + npy_intp mask = buckets->size - 1; + npy_intp bucket = (npy_intp)hash & mask; - bucket = (npy_intp)hash & mask; while (1) { - item = &(tb->buckets[bucket * (tb->key_len + 1)]); - - if (item[0] == NULL) { + PyObject **item = &(buckets->array[bucket * (key_len + 1)]); + PyObject *val = FT_ATOMIC_LOAD_PTR_ACQUIRE(item[0]); + if (pvalue != NULL) { + *pvalue = val; + } + if (val == NULL) { /* The item is not in the cache; return the empty bucket */ return item; } - if (memcmp(item+1, key, tb->key_len * sizeof(PyObject *)) == 0) { + if (memcmp(item+1, key, key_len * sizeof(PyObject *)) == 0) { /* This is a match, so return the item/bucket */ return item; } @@ -113,10 +133,18 @@ find_item(PyArrayIdentityHash const *tb, PyObject *const *key) } +static inline PyObject ** +find_item(PyArrayIdentityHash const *tb, PyObject *const *key, PyObject **pvalue) +{ + struct buckets *buckets = FT_ATOMIC_LOAD_PTR_ACQUIRE(tb->buckets); + return find_item_buckets(buckets, tb->key_len, key, pvalue); +} + + NPY_NO_EXPORT PyArrayIdentityHash * PyArrayIdentityHash_New(int key_len) { - PyArrayIdentityHash *res = PyMem_Malloc(sizeof(PyArrayIdentityHash)); + PyArrayIdentityHash *res = (PyArrayIdentityHash *)PyMem_Malloc(sizeof(PyArrayIdentityHash)); if (res == NULL) { PyErr_NoMemory(); return NULL; @@ -124,17 +152,23 @@ PyArrayIdentityHash_New(int key_len) assert(key_len > 0); res->key_len = key_len; - res->size = 4; /* Start with a size of 4 */ - res->nelem = 0; - INITIALIZE_LOCK(res); + npy_intp initial_size = 4; /* Start with a size of 4 */ - res->buckets = PyMem_Calloc(4 * (key_len + 1), sizeof(PyObject *)); + res->buckets = PyMem_Calloc(1, sizeof(struct buckets) + + initial_size * (key_len + 1) * sizeof(PyObject *)); if (res->buckets == NULL) { PyErr_NoMemory(); PyMem_Free(res); return NULL; } + res->buckets->prev = NULL; + res->buckets->size = initial_size; + res->buckets->nelem = 0; + +#ifdef Py_GIL_DISABLED + res->mutex = (PyMutex){0}; +#endif return res; } @@ -142,8 +176,18 @@ PyArrayIdentityHash_New(int key_len) NPY_NO_EXPORT void PyArrayIdentityHash_Dealloc(PyArrayIdentityHash *tb) { - PyMem_Free(tb->buckets); - FREE_LOCK(tb); + struct buckets *b = tb->buckets; +#ifdef Py_GIL_DISABLED + // free all old buckets + while (b != NULL) { + struct buckets *prev = b->prev; + PyMem_Free(b); + b = prev; + } +#else + assert(b->prev == NULL); + PyMem_Free(b); +#endif PyMem_Free(tb); } @@ -151,108 +195,110 @@ PyArrayIdentityHash_Dealloc(PyArrayIdentityHash *tb) static int _resize_if_necessary(PyArrayIdentityHash *tb) { - npy_intp new_size, prev_size = tb->size; - PyObject **old_table = tb->buckets; +#ifdef Py_GIL_DISABLED + assert(PyMutex_IsLocked(&tb->mutex)); +#endif + struct buckets *old_buckets = tb->buckets; + int key_len = tb->key_len; + npy_intp prev_size = old_buckets->size; assert(prev_size > 0); - if ((tb->nelem + 1) * 2 > prev_size) { - /* Double in size */ - new_size = prev_size * 2; - } - else { - new_size = prev_size; - while ((tb->nelem + 8) * 2 < new_size / 2) { - /* - * Should possibly be improved. However, we assume that we - * almost never shrink. Still if we do, do not shrink as much - * as possible to avoid growing right away. - */ - new_size /= 2; - } - assert(new_size >= 4); - } - if (new_size == prev_size) { + if ((old_buckets->nelem + 1) * 2 <= old_buckets->size) { + /* No resize necessary if load factor is not more than 0.5 */ return 0; } + /* Double in size */ + npy_intp new_size = old_buckets->size * 2; + npy_intp alloc_size; - if (npy_mul_sizes_with_overflow(&alloc_size, new_size, tb->key_len + 1)) { + if (npy_mul_sizes_with_overflow(&alloc_size, new_size, key_len + 1)) { return -1; } - tb->buckets = PyMem_Calloc(alloc_size, sizeof(PyObject *)); - if (tb->buckets == NULL) { - tb->buckets = old_table; + struct buckets *new_buckets = (struct buckets *)PyMem_Calloc( + 1, sizeof(struct buckets) + alloc_size * sizeof(PyObject *)); + if (new_buckets == NULL) { PyErr_NoMemory(); return -1; } - - tb->size = new_size; + new_buckets->size = new_size; + new_buckets->nelem = 0; for (npy_intp i = 0; i < prev_size; i++) { - PyObject **item = &old_table[i * (tb->key_len + 1)]; + PyObject **item = &old_buckets->array[i * (key_len + 1)]; if (item[0] != NULL) { - PyObject **tb_item = find_item(tb, item + 1); + PyObject **tb_item = find_item_buckets(new_buckets, key_len, item + 1, NULL); + memcpy(tb_item+1, item+1, key_len * sizeof(PyObject *)); + new_buckets->nelem++; tb_item[0] = item[0]; - memcpy(tb_item+1, item+1, tb->key_len * sizeof(PyObject *)); } } - PyMem_Free(old_table); +#ifdef Py_GIL_DISABLED + new_buckets->prev = old_buckets; +#else + PyMem_Free(old_buckets); +#endif + FT_ATOMIC_STORE_PTR_RELEASE(tb->buckets, new_buckets); return 0; } /** - * Add an item to the identity cache. The storage location must not change - * unless the cache is cleared. + * Set an item in the identity hash table if it does not already exist. + * If it does exist, return the existing item. * * @param tb The mapping. * @param key The key, must be a C-array of pointers of the length * corresponding to the mapping. - * @param value Normally a Python object, no reference counting is done. - * use NULL to clear an item. If the item does not exist, no - * action is performed for NULL. - * @param replace If 1, allow replacements. - * @returns 0 on success, -1 with a MemoryError or RuntimeError (if an item - * is added which is already in the cache). The caller should avoid - * the RuntimeError. + * @param value Normally a Python object, no reference counting is done + * and it should not be NULL. + * @param result The resulting value, either the existing one or the + * newly added value. + * @returns 0 on success, -1 with a MemoryError set on failure. */ -NPY_NO_EXPORT int -PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, - PyObject *const *key, PyObject *value, int replace) +static inline int +PyArrayIdentityHash_SetItemDefaultLockHeld(PyArrayIdentityHash *tb, + PyObject *const *key, PyObject *default_value, PyObject **result) { - LOCK_TABLE(tb); - if (value != NULL && _resize_if_necessary(tb) < 0) { - /* Shrink, only if a new value is added. */ - UNLOCK_TABLE(tb); +#ifdef Py_GIL_DISABLED + assert(PyMutex_IsLocked(&tb->mutex)); +#endif + assert(default_value != NULL); + if (_resize_if_necessary(tb) < 0) { return -1; } - PyObject **tb_item = find_item(tb, key); - if (value != NULL) { - if (tb_item[0] != NULL && !replace) { - UNLOCK_TABLE(tb); - PyErr_SetString(PyExc_RuntimeError, - "Identity cache already includes the item."); - return -1; - } - tb_item[0] = value; + PyObject **tb_item = find_item(tb, key, NULL); + if (tb_item[0] == NULL) { memcpy(tb_item+1, key, tb->key_len * sizeof(PyObject *)); - tb->nelem += 1; - } - else { - /* Clear the bucket -- just the value should be enough though. */ - memset(tb_item, 0, (tb->key_len + 1) * sizeof(PyObject *)); + tb->buckets->nelem++; + FT_ATOMIC_STORE_PTR_RELEASE(tb_item[0], default_value); + *result = default_value; + } else { + *result = tb_item[0]; } - UNLOCK_TABLE(tb); return 0; } +NPY_NO_EXPORT int +PyArrayIdentityHash_SetItemDefault(PyArrayIdentityHash *tb, + PyObject *const *key, PyObject *default_value, PyObject **result) +{ +#ifdef Py_GIL_DISABLED + PyMutex_Lock(&tb->mutex); +#endif + int ret = PyArrayIdentityHash_SetItemDefaultLockHeld(tb, key, default_value, result); +#ifdef Py_GIL_DISABLED + PyMutex_Unlock(&tb->mutex); +#endif + return ret; +} + NPY_NO_EXPORT PyObject * -PyArrayIdentityHash_GetItem(PyArrayIdentityHash const *tb, PyObject *const *key) +PyArrayIdentityHash_GetItem(PyArrayIdentityHash *tb, PyObject *const *key) { - LOCK_TABLE(tb); - PyObject *res = find_item(tb, key)[0]; - UNLOCK_TABLE(tb); - return res; + PyObject *value = NULL; + find_item(tb, key, &value); + return value; } diff --git a/numpy/_core/src/common/npy_hashtable.h b/numpy/_core/src/common/npy_hashtable.h index fdb241667164..a369ba1ba59b 100644 --- a/numpy/_core/src/common/npy_hashtable.h +++ b/numpy/_core/src/common/npy_hashtable.h @@ -7,24 +7,27 @@ #include "numpy/ndarraytypes.h" +#ifdef __cplusplus +extern "C" { +#endif + +struct buckets; + typedef struct { - int key_len; /* number of identities used */ - /* Buckets stores: val1, key1[0], key1[1], ..., val2, key2[0], ... */ - PyObject **buckets; - npy_intp size; /* current size */ - npy_intp nelem; /* number of elements */ + int key_len; /* number of identities used */ + struct buckets *buckets; /* current buckets */ #ifdef Py_GIL_DISABLED - PyThread_type_lock *mutex; + PyMutex mutex; #endif } PyArrayIdentityHash; NPY_NO_EXPORT int -PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, - PyObject *const *key, PyObject *value, int replace); +PyArrayIdentityHash_SetItemDefault(PyArrayIdentityHash *tb, + PyObject *const *key, PyObject *default_value, PyObject **result); NPY_NO_EXPORT PyObject * -PyArrayIdentityHash_GetItem(PyArrayIdentityHash const *tb, PyObject *const *key); +PyArrayIdentityHash_GetItem(PyArrayIdentityHash *tb, PyObject *const *key); NPY_NO_EXPORT PyArrayIdentityHash * PyArrayIdentityHash_New(int key_len); @@ -32,4 +35,8 @@ PyArrayIdentityHash_New(int key_len); NPY_NO_EXPORT void PyArrayIdentityHash_Dealloc(PyArrayIdentityHash *tb); +#ifdef __cplusplus +} +#endif + #endif /* NUMPY_CORE_SRC_COMMON_NPY_NPY_HASHTABLE_H_ */ diff --git a/numpy/_core/src/common/npy_import.c b/numpy/_core/src/common/npy_import.c new file mode 100644 index 000000000000..534d7b34020b --- /dev/null +++ b/numpy/_core/src/common/npy_import.c @@ -0,0 +1,88 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + +#include "numpy/ndarraytypes.h" +#include "npy_import.h" +#include + + +NPY_VISIBILITY_HIDDEN npy_runtime_imports_struct npy_runtime_imports; + +NPY_NO_EXPORT int +init_import_mutex(void) { +#if PY_VERSION_HEX < 0x30d00b3 + npy_runtime_imports.import_mutex = PyThread_allocate_lock(); + if (npy_runtime_imports.import_mutex == NULL) { + PyErr_NoMemory(); + return -1; + } +#endif + return 0; +} + + +/*! \brief Import a Python object from an entry point string. + + * The name should be of the form "(module ':')? (object '.')* attr". + * If no module is present, it is assumed to be "numpy". + * On error, returns NULL. + */ +NPY_NO_EXPORT PyObject* +npy_import_entry_point(const char *entry_point) { + PyObject *result; + const char *item; + + const char *colon = strchr(entry_point, ':'); + if (colon) { // there is a module. + result = PyUnicode_FromStringAndSize(entry_point, colon - entry_point); + if (result != NULL) { + Py_SETREF(result, PyImport_Import(result)); + } + item = colon + 1; + } + else { + result = PyImport_ImportModule("numpy"); + item = entry_point; + } + + const char *dot = item - 1; + while (result != NULL && dot != NULL) { + item = dot + 1; + dot = strchr(item, '.'); + PyObject *string = PyUnicode_FromStringAndSize( + item, dot ? dot - item : strlen(item)); + if (string == NULL) { + Py_DECREF(result); + return NULL; + } + Py_SETREF(result, PyObject_GetAttr(result, string)); + Py_DECREF(string); + } + return result; +} + + +NPY_NO_EXPORT int +npy_cache_import_runtime(const char *module, const char *attr, PyObject **obj) { + if (!atomic_load_explicit((_Atomic(PyObject *) *)obj, memory_order_acquire)) { + PyObject* value = npy_import(module, attr); + if (value == NULL) { + return -1; + } +#if PY_VERSION_HEX < 0x30d00b3 + PyThread_acquire_lock(npy_runtime_imports.import_mutex, WAIT_LOCK); +#else + PyMutex_Lock(&npy_runtime_imports.import_mutex); +#endif + if (!atomic_load_explicit((_Atomic(PyObject *) *)obj, memory_order_acquire)) { + atomic_store_explicit((_Atomic(PyObject *) *)obj, Py_NewRef(value), memory_order_release); + } +#if PY_VERSION_HEX < 0x30d00b3 + PyThread_release_lock(npy_runtime_imports.import_mutex); +#else + PyMutex_Unlock(&npy_runtime_imports.import_mutex); +#endif + Py_DECREF(value); + } + return 0; +} diff --git a/numpy/_core/src/common/npy_import.h b/numpy/_core/src/common/npy_import.h index 58b4ba0bc7e5..9eab510726aa 100644 --- a/numpy/_core/src/common/npy_import.h +++ b/numpy/_core/src/common/npy_import.h @@ -3,7 +3,94 @@ #include -/*! \brief Fetch and cache Python function. +#include "numpy/npy_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Cached references to objects obtained via an import. All of these are + * can be initialized at any time by npy_cache_import_runtime. + */ +typedef struct npy_runtime_imports_struct { +#if PY_VERSION_HEX < 0x30d00b3 + PyThread_type_lock import_mutex; +#else + PyMutex import_mutex; +#endif + PyObject *_add_dtype_helper; + PyObject *_all; + PyObject *_amax; + PyObject *_amin; + PyObject *_any; + PyObject *array_function_errmsg_formatter; + PyObject *array_ufunc_errmsg_formatter; + PyObject *_clip; + PyObject *_commastring; + PyObject *_convert_to_stringdtype_kwargs; + PyObject *_default_array_repr; + PyObject *_default_array_str; + PyObject *_dump; + PyObject *_dumps; + PyObject *_getfield_is_safe; + PyObject *internal_gcd_func; + PyObject *_mean; + PyObject *NO_NEP50_WARNING; + PyObject *npy_ctypes_check; + PyObject *numpy_matrix; + PyObject *_prod; + PyObject *_promote_fields; + PyObject *_std; + PyObject *_sum; + PyObject *_ufunc_doc_signature_formatter; + PyObject *_ufunc_inspect_signature_builder; + PyObject *_usefields; + PyObject *_var; + PyObject *_view_is_safe; + PyObject *_void_scalar_to_string; + PyObject *sort; + PyObject *argsort; +} npy_runtime_imports_struct; + +NPY_VISIBILITY_HIDDEN extern npy_runtime_imports_struct npy_runtime_imports; + +/*! \brief Import a Python object. + + * This function imports the Python function specified by + * \a module and \a function, increments its reference count, and returns + * the result. On error, returns NULL. + * + * @param module Absolute module name. + * @param attr module attribute to cache. + */ +static inline PyObject* +npy_import(const char *module, const char *attr) +{ + PyObject *ret = NULL; + PyObject *mod = PyImport_ImportModule(module); + + if (mod != NULL) { + ret = PyObject_GetAttrString(mod, attr); + Py_DECREF(mod); + } + return ret; +} + +NPY_NO_EXPORT int +init_import_mutex(void); + +/*! \brief Import a Python object from an entry point string. + + * The name should be of the form "(module ':')? (object '.')* attr". + * If no module is present, it is assumed to be "numpy". + * On error, returns NULL. + */ +NPY_NO_EXPORT PyObject* +npy_import_entry_point(const char *entry_point); + + +/*! \brief Fetch and cache Python object at runtime. * * Import a Python function and cache it for use. The function checks if * cache is NULL, and if not NULL imports the Python function specified by @@ -14,19 +101,13 @@ * * @param module Absolute module name. * @param attr module attribute to cache. - * @param cache Storage location for imported function. + * @param obj Storage location for imported function. */ -static inline void -npy_cache_import(const char *module, const char *attr, PyObject **cache) -{ - if (NPY_UNLIKELY(*cache == NULL)) { - PyObject *mod = PyImport_ImportModule(module); +NPY_NO_EXPORT int +npy_cache_import_runtime(const char *module, const char *attr, PyObject **obj); - if (mod != NULL) { - *cache = PyObject_GetAttrString(mod, attr); - Py_DECREF(mod); - } - } +#ifdef __cplusplus } +#endif #endif /* NUMPY_CORE_SRC_COMMON_NPY_IMPORT_H_ */ diff --git a/numpy/_core/src/common/npy_longdouble.c b/numpy/_core/src/common/npy_longdouble.c index ce80a9ae2bc3..644af776f9a9 100644 --- a/numpy/_core/src/common/npy_longdouble.c +++ b/numpy/_core/src/common/npy_longdouble.c @@ -144,8 +144,8 @@ npy_longdouble_from_PyLong(PyObject *long_obj) { result = NumPyOS_ascii_strtold(cstr, &end); if (errno == ERANGE) { /* strtold returns INFINITY of the correct sign. */ - if (PyErr_Warn(PyExc_RuntimeWarning, - "overflow encountered in conversion from python long") < 0) { + if (PyErr_WarnEx(PyExc_RuntimeWarning, + "overflow encountered in conversion from python long", 1) < 0) { goto fail; } } diff --git a/numpy/_core/src/common/npy_pycompat.h b/numpy/_core/src/common/npy_pycompat.h index 67d4f6f625a0..52d44b17283a 100644 --- a/numpy/_core/src/common/npy_pycompat.h +++ b/numpy/_core/src/common/npy_pycompat.h @@ -4,18 +4,38 @@ #include "numpy/npy_3kcompat.h" #include "pythoncapi-compat/pythoncapi_compat.h" -/* - * In Python 3.10a7 (or b1), python started using the identity for the hash - * when a value is NaN. See https://bugs.python.org/issue43475 - */ -#if PY_VERSION_HEX > 0x030a00a6 #define Npy_HashDouble _Py_HashDouble + +#ifdef Py_GIL_DISABLED +// Specialized version of critical section locking to safely use +// PySequence_Fast APIs without the GIL. For performance, the argument *to* +// PySequence_Fast() is provided to the macro, not the *result* of +// PySequence_Fast(), which would require an extra test to determine if the +// lock must be acquired. +// +// These are tweaked versions of macros defined in CPython in +// pycore_critical_section.h, originally added in CPython commit baf347d91643. +// They should behave identically to the versions in CPython. Once the +// macros are expanded, the only difference relative to those versions is the +// use of public C API symbols that are equivalent to the ones used in the +// corresponding CPython definitions. +#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) \ + { \ + PyObject *_orig_seq = (PyObject *)(original); \ + const int _should_lock_cs = \ + PyList_CheckExact(_orig_seq); \ + PyCriticalSection _cs_fast; \ + if (_should_lock_cs) { \ + PyCriticalSection_Begin(&_cs_fast, _orig_seq); \ + } +#define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST() \ + if (_should_lock_cs) { \ + PyCriticalSection_End(&_cs_fast); \ + } \ + } #else -static inline Py_hash_t -Npy_HashDouble(PyObject *NPY_UNUSED(identity), double val) -{ - return _Py_HashDouble(val); -} +#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) { do { (void)(original); } while (0) +#define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST() } #endif diff --git a/numpy/_core/src/common/npy_sort.c b/numpy/_core/src/common/npy_sort.c new file mode 100644 index 000000000000..632962e884dd --- /dev/null +++ b/numpy/_core/src/common/npy_sort.c @@ -0,0 +1,67 @@ +#include +#include +#include +#include "npy_sort.h" +#include "dtypemeta.h" + +#ifdef __cplusplus +extern "C" { +#endif + +NPY_NO_EXPORT int +npy_default_sort_loop(PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata) +{ + PyArray_CompareFunc *cmp = (PyArray_CompareFunc *)context->method->static_data; + + PyArrayMethod_SortParameters *sort_params = + (PyArrayMethod_SortParameters *)context->parameters; + PyArray_SortImpl *sort_func = NULL; + + switch (sort_params->flags) { + case NPY_SORT_DEFAULT: + sort_func = npy_quicksort_impl; + break; + case NPY_SORT_STABLE: + sort_func = npy_mergesort_impl; + break; + default: + PyErr_SetString(PyExc_ValueError, "Invalid sort kind"); + return -1; + } + + return sort_func(data[0], dimensions[0], context, + context->descriptors[0]->elsize, cmp); +} + +NPY_NO_EXPORT int +npy_default_argsort_loop(PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata) +{ + PyArray_CompareFunc *cmp = (PyArray_CompareFunc *)context->method->static_data; + + PyArrayMethod_SortParameters *sort_params = + (PyArrayMethod_SortParameters *)context->parameters; + PyArray_ArgSortImpl *argsort_func = NULL; + + switch (sort_params->flags) { + case NPY_SORT_DEFAULT: + argsort_func = npy_aquicksort_impl; + break; + case NPY_SORT_STABLE: + argsort_func = npy_amergesort_impl; + break; + default: + PyErr_SetString(PyExc_ValueError, "Invalid sort kind"); + return -1; + } + + return argsort_func(data[0], (npy_intp *)data[1], dimensions[0], context, + context->descriptors[0]->elsize, cmp); +} + +#ifdef __cplusplus +} +#endif diff --git a/numpy/_core/src/common/npy_sort.h.src b/numpy/_core/src/common/npy_sort.h.src index d6e4357225a8..95d6f9d1ee70 100644 --- a/numpy/_core/src/common/npy_sort.h.src +++ b/numpy/_core/src/common/npy_sort.h.src @@ -5,6 +5,7 @@ #include #include #include +#include #define NPY_ENOMEM 1 #define NPY_ECOMP 2 @@ -107,6 +108,41 @@ NPY_NO_EXPORT int npy_aheapsort(void *vec, npy_intp *ind, npy_intp cnt, void *ar NPY_NO_EXPORT int npy_amergesort(void *vec, npy_intp *ind, npy_intp cnt, void *arr); NPY_NO_EXPORT int npy_atimsort(void *vec, npy_intp *ind, npy_intp cnt, void *arr); +/* + ***************************************************************************** + ** NEW-STYLE GENERIC SORT ** + ***************************************************************************** + */ + +NPY_NO_EXPORT int npy_default_sort_loop(PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata); +NPY_NO_EXPORT int npy_default_argsort_loop(PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata); + +/* + ***************************************************************************** + ** GENERIC SORT IMPLEMENTATIONS ** + ***************************************************************************** + */ + +typedef int (PyArray_SortImpl)(void *start, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp); +typedef int (PyArray_ArgSortImpl)(void *vv, npy_intp *tosort, npy_intp n, + void *varr, npy_intp elsize, + PyArray_CompareFunc *cmp); + +NPY_NO_EXPORT int npy_quicksort_impl(void *start, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp); +NPY_NO_EXPORT int npy_mergesort_impl(void *start, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp); +NPY_NO_EXPORT int npy_aquicksort_impl(void *vv, npy_intp *tosort, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp); +NPY_NO_EXPORT int npy_amergesort_impl(void *v, npy_intp *tosort, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp); + + #ifdef __cplusplus } #endif diff --git a/numpy/_core/src/common/numpyos.c b/numpy/_core/src/common/numpyos.c index 319f5dcc395f..a5ca28081d52 100644 --- a/numpy/_core/src/common/numpyos.c +++ b/numpy/_core/src/common/numpyos.c @@ -282,7 +282,7 @@ fix_ascii_format(char* buf, size_t buflen, int decimal) * - format: The printf()-style format to use for the code to use for * converting. * - value: The value to convert - * - decimal: if != 0, always has a decimal, and at leasat one digit after + * - decimal: if != 0, always has a decimal, and at least one digit after * the decimal. This has the same effect as passing 'Z' in the original * PyOS_ascii_formatd * diff --git a/numpy/_core/src/common/numpyos.h b/numpy/_core/src/common/numpyos.h index fac82f7d438c..8fbecb122577 100644 --- a/numpy/_core/src/common/numpyos.h +++ b/numpy/_core/src/common/numpyos.h @@ -51,7 +51,7 @@ NPY_NO_EXPORT int NumPyOS_ascii_isupper(char c); NPY_NO_EXPORT int -NumPyOS_ascii_tolower(char c); +NumPyOS_ascii_tolower(int c); /* Convert a string to an int in an arbitrary base */ NPY_NO_EXPORT npy_longlong diff --git a/numpy/_core/src/common/pythoncapi-compat b/numpy/_core/src/common/pythoncapi-compat index 01341acbbef0..8636bccf29ad 160000 --- a/numpy/_core/src/common/pythoncapi-compat +++ b/numpy/_core/src/common/pythoncapi-compat @@ -1 +1 @@ -Subproject commit 01341acbbef0ca85cf2fa31b63307ddf4d9a87fb +Subproject commit 8636bccf29adfa23463f810b3c2830f7cff1e933 diff --git a/numpy/_core/src/common/raii_utils.hpp b/numpy/_core/src/common/raii_utils.hpp new file mode 100644 index 000000000000..1049e97387f0 --- /dev/null +++ b/numpy/_core/src/common/raii_utils.hpp @@ -0,0 +1,171 @@ +#ifndef NUMPY_CORE_SRC_COMMON_RAII_UTILS_HPP_ +#define NUMPY_CORE_SRC_COMMON_RAII_UTILS_HPP_ + +// +// Utilities for RAII management of resources. +// +// Another (and arguably clearer) name for this resource management pattern +// is "Scope-Bound Resource Management", but RAII is much more common, so we +// use the familiar acronym. +// + +#include + +// For npy_string_allocator, PyArray_StringDTypeObject, NPY_NO_EXPORT: +#include "numpy/ndarraytypes.h" + +// Forward declarations not currently in a header. +// XXX Where should these be moved? +NPY_NO_EXPORT npy_string_allocator * +NpyString_acquire_allocator(const PyArray_StringDTypeObject *descr); +NPY_NO_EXPORT void +NpyString_release_allocator(npy_string_allocator *allocator); + + +namespace np { namespace raii { + +// +// RAII for PyGILState_* API. +// +// In C++ code, use this at the beginning of a scope, e.g. +// +// { +// np::raii::EnsureGIL ensure_gil{}; +// [code that uses the Python C API here] +// } +// +// instead of +// +// PyGILState_STATE gil_state = PyGILState_Ensure(); +// [code that uses the Python C API here] +// PyGILState_Release(gil_state); +// +// or +// NPY_ALLOW_C_API_DEF +// NPY_ALLOW_C_API +// [code that uses the Python C API here] +// NPY_DISABLE_C_API +// +// This ensures that PyGILState_Release(gil_state) is called, even if the +// wrapped code throws an exception or executes a return or a goto. +// +class EnsureGIL +{ + PyGILState_STATE gil_state; + +public: + + EnsureGIL() { + gil_state = PyGILState_Ensure(); + } + + ~EnsureGIL() { + PyGILState_Release(gil_state); + } + + EnsureGIL(const EnsureGIL&) = delete; + EnsureGIL(EnsureGIL&& other) = delete; + EnsureGIL& operator=(const EnsureGIL&) = delete; + EnsureGIL& operator=(EnsureGIL&&) = delete; +}; + + +// +// RAII for Python thread state. +// +// In C++ code, use this at the beginning of a scope, e.g. +// +// { +// np::raii::SaveThreadState save_thread_state{}; +// [code...] +// } +// +// instead of +// +// PyThreadState *thread_state = PyEval_SaveThread(); +// [code...] +// PyEval_RestoreThread(thread_state); +// +// or +// Py_BEGIN_ALLOW_THREADS +// [code...] +// Py_END_ALLOW_THREADS +// +// or +// NPY_BEGIN_THREADS_DEF +// NPY_BEGIN_THREADS +// [code...] +// NPY_END_THREADS +// +// This ensures that PyEval_RestoreThread(thread_state) is called, even +// if the wrapped code throws an exception or executes a return or a goto. +// +class SaveThreadState +{ + PyThreadState *thread_state; + +public: + + SaveThreadState() { + thread_state = PyEval_SaveThread(); + } + + ~SaveThreadState() { + PyEval_RestoreThread(thread_state); + } + + SaveThreadState(const SaveThreadState&) = delete; + SaveThreadState(SaveThreadState&& other) = delete; + SaveThreadState& operator=(const SaveThreadState&) = delete; + SaveThreadState& operator=(SaveThreadState&&) = delete; +}; + + +// +// RAII for npy_string_allocator. +// +// Instead of +// +// Py_INCREF(descr); +// npy_string_allocator *allocator = NpyString_acquire_allocator(descr); +// [code that uses allocator] +// NpyString_release_allocator(allocator); +// Py_DECREF(descr); +// +// use +// +// { +// np::raii::NpyStringAcquireAllocator alloc(descr); +// [code that uses alloc.allocator()] +// } +// +class NpyStringAcquireAllocator +{ + PyArray_StringDTypeObject *_descr; + npy_string_allocator *_allocator; + +public: + + NpyStringAcquireAllocator(PyArray_StringDTypeObject *descr) : _descr(descr) { + Py_INCREF(_descr); + _allocator = NpyString_acquire_allocator(_descr); + } + + ~NpyStringAcquireAllocator() { + NpyString_release_allocator(_allocator); + Py_DECREF(_descr); + } + + NpyStringAcquireAllocator(const NpyStringAcquireAllocator&) = delete; + NpyStringAcquireAllocator(NpyStringAcquireAllocator&& other) = delete; + NpyStringAcquireAllocator& operator=(const NpyStringAcquireAllocator&) = delete; + NpyStringAcquireAllocator& operator=(NpyStringAcquireAllocator&&) = delete; + + npy_string_allocator *allocator() { + return _allocator; + } +}; + +}} // namespace np { namespace raii { + +#endif diff --git a/numpy/_core/src/common/simd/README.md b/numpy/_core/src/common/simd/README.md new file mode 100644 index 000000000000..a13a0f75b6fc --- /dev/null +++ b/numpy/_core/src/common/simd/README.md @@ -0,0 +1,266 @@ +# NumPy SIMD Wrapper for Highway + +This directory contains a lightweight C++ wrapper over Google's [Highway](https://github.com/google/highway) SIMD library, designed specifically for NumPy's needs. + +> **Note**: This directory also contains the C interface of universal intrinsics (under `simd.h`) which is no longer supported. The Highway wrapper described in this document should be used instead for all new SIMD code. + +## Overview + +The wrapper simplifies Highway's SIMD interface by eliminating class tags and using lane types directly, which can be deduced from arguments in most cases. This design makes the SIMD code more intuitive and easier to maintain while still leveraging Highway generic intrinsics. + +## Architecture + +The wrapper consists of two main headers: + +1. `simd.hpp`: The main header that defines namespaces and includes configuration macros +2. `simd.inc.hpp`: Implementation details included by `simd.hpp` multiple times for different namespaces + +Additionally, this directory contains legacy C interface files for universal intrinsics (`simd.h` and related files) which are deprecated and should not be used for new code. All new SIMD code should use the Highway wrapper. + + +## Usage + +### Basic Usage + +```cpp +#include "simd/simd.hpp" + +// Use np::simd for maximum width SIMD operations +using namespace np::simd; +float *data = /* ... */; +Vec v = LoadU(data); +v = Add(v, v); +StoreU(v, data); + +// Use np::simd128 for fixed 128-bit SIMD operations +using namespace np::simd128; +Vec v128 = LoadU(data); +v128 = Add(v128, v128); +StoreU(v128, data); +``` + +### Checking for SIMD Support + +```cpp +#include "simd/simd.hpp" + +// Check if SIMD is enabled +#if NPY_HWY + // SIMD code +#else + // Scalar fallback code +#endif + +// Check for float64 support +#if NPY_HWY_F64 + // Use float64 SIMD operations +#endif + +// Check for FMA support +#if NPY_HWY_FMA + // Use FMA operations +#endif +``` + +## Type Support and Constraints + +The wrapper provides type constraints to help with SFINAE (Substitution Failure Is Not An Error) and compile-time type checking: + +- `kSupportLane`: Determines whether the specified lane type is supported by the SIMD extension. + ```cpp + // Base template - always defined, even when SIMD is not enabled (for SFINAE) + template + constexpr bool kSupportLane = NPY_HWY != 0; + template <> + constexpr bool kSupportLane = NPY_HWY_F64 != 0; + ``` + +- `kMaxLanes`: Maximum number of lanes supported by the SIMD extension for the specified lane type. + ```cpp + template + constexpr size_t kMaxLanes = HWY_MAX_LANES_D(_Tag); + ``` + +```cpp +#include "simd/simd.hpp" + +// Check if float64 operations are supported +if constexpr (np::simd::kSupportLane) { + // Use float64 operations +} +``` + +These constraints allow for compile-time checking of which lane types are supported, which can be used in SFINAE contexts to enable or disable functions based on type support. + +## Available Operations + +The wrapper provides the following common operations that are used in NumPy: + +- Vector creation operations: + - `Zero`: Returns a vector with all lanes set to zero + - `Set`: Returns a vector with all lanes set to the given value + - `Undefined`: Returns an uninitialized vector + +- Memory operations: + - `LoadU`: Unaligned load of a vector from memory + - `StoreU`: Unaligned store of a vector to memory + +- Vector information: + - `Lanes`: Returns the number of vector lanes based on the lane type + +- Type conversion: + - `BitCast`: Reinterprets a vector to a different type without modifying the underlying data + - `VecFromMask`: Converts a mask to a vector + +- Comparison operations: + - `Eq`: Element-wise equality comparison + - `Le`: Element-wise less than or equal comparison + - `Lt`: Element-wise less than comparison + - `Gt`: Element-wise greater than comparison + - `Ge`: Element-wise greater than or equal comparison + +- Arithmetic operations: + - `Add`: Element-wise addition + - `Sub`: Element-wise subtraction + - `Mul`: Element-wise multiplication + - `Div`: Element-wise division + - `Min`: Element-wise minimum + - `Max`: Element-wise maximum + - `Abs`: Element-wise absolute value + - `Sqrt`: Element-wise square root + +- Logical operations: + - `And`: Bitwise AND + - `Or`: Bitwise OR + - `Xor`: Bitwise XOR + - `AndNot`: Bitwise AND NOT (a & ~b) + +Additional Highway operations can be accessed via the `hn` namespace alias inside the `simd` or `simd128` namespaces. + +## Extending + +To add more operations from Highway: + +1. Import them in the `simd.inc.hpp` file using the `using` directive if they don't require a tag: + ```cpp + // For operations that don't require a tag + using hn::FunctionName; + ``` + +2. Define wrapper functions for intrinsics that require a class tag: + ```cpp + // For operations that require a tag + template + HWY_API ReturnType FunctionName(Args... args) { + return hn::FunctionName(_Tag(), args...); + } + ``` + +3. Add appropriate documentation and SFINAE constraints if needed + + +## Build Configuration + +The SIMD wrapper automatically disables SIMD operations when optimizations are disabled: + +- When `NPY_DISABLE_OPTIMIZATION` is defined, SIMD operations are disabled +- SIMD is enabled only when the Highway target is not scalar (`HWY_TARGET != HWY_SCALAR`) + and not EMU128 (`HWY_TARGET != HWY_EMU128`) + +## Design Notes + +1. **Why avoid Highway scalar operations?** + - NumPy already provides kernels for scalar operations + - Compilers can better optimize standard library implementations + - Not all Highway intrinsics are fully supported in scalar mode + - For strict IEEE 754 floating-point compliance requirements, direct scalar + implementations offer more predictable behavior than EMU128 + +2. **Legacy Universal Intrinsics** + - The older universal intrinsics C interface (in `simd.h` and accessible via `NPY_SIMD` macros) is deprecated + - All new SIMD code should use this Highway-based wrapper (accessible via `NPY_HWY` macros) + - The legacy code is maintained for compatibility but will eventually be removed + +3. **Feature Detection Constants vs. Highway Constants** + - NumPy-specific constants (`NPY_HWY_F16`, `NPY_HWY_F64`, `NPY_HWY_FMA`) provide additional safety beyond raw Highway constants + - Highway constants (e.g., `HWY_HAVE_FLOAT16`) only check platform capabilities but don't consider NumPy's build configuration + - Our constants combine both checks: + ```cpp + #define NPY_HWY_F16 (NPY_HWY && HWY_HAVE_FLOAT16) + ``` + - This ensures SIMD features won't be used when: + - Platform supports it but NumPy optimization is disabled via meson option: + ``` + option('disable-optimization', type: 'boolean', value: false, + description: 'Disable CPU optimized code (dispatch,simd,unroll...)') + ``` + - Highway target is scalar (`HWY_TARGET == HWY_SCALAR`) + - Using these constants ensures consistent behavior across different compilation settings + - Without this additional layer, code might incorrectly try to use SIMD paths in scalar mode + +4. **Namespace Design** + - `np::simd`: Maximum width SIMD operations (scalable) + - `np::simd128`: Fixed 128-bit SIMD operations + - `hn`: Highway namespace alias (available within the SIMD namespaces) + +5. **Why Namespaces and Why Not Just Use Highway Directly?** + - Highway's design uses class tag types as template parameters (e.g., `Vec>`) when defining vector types + - Many Highway functions require explicitly passing a tag instance as the first parameter + - This class tag-based approach increases verbosity and complexity in user code + - Our wrapper eliminates this by internally managing tags through namespaces, letting users directly use types e.g. `Vec` + - Simple example with raw Highway: + ```cpp + // Highway's approach + float *data = /* ... */; + + namespace hn = hwy::HWY_NAMESPACE; + using namespace hn; + + // Full-width operations + ScalableTag df; // Create a tag instance + Vec v = LoadU(df, data); // LoadU requires a tag instance + StoreU(v, df, data); // StoreU requires a tag instance + + // 128-bit operations + Full128 df128; // Create a 128-bit tag instance + Vec v128 = LoadU(df128, data); // LoadU requires a tag instance + StoreU(v128, df128, data); // StoreU requires a tag instance + ``` + + - Simple example with our wrapper: + ```cpp + // Our wrapper approach + float *data = /* ... */; + + // Full-width operations + using namespace np::simd; + Vec v = LoadU(data); // Full-width vector load + StoreU(v, data); + + // 128-bit operations + using namespace np::simd128; + Vec v128 = LoadU(data); // 128-bit vector load + StoreU(v128, data); + ``` + + - The namespaced approach simplifies code, reduces errors, and provides a more intuitive interface + - It preserves all Highway operations benefits while reducing cognitive overhead + +5. **Why Namespaces Are Essential for This Design?** + - Namespaces allow us to define different internal tag types (`hn::ScalableTag` in `np::simd` vs `hn::Full128` in `np::simd128`) + - This provides a consistent type-based interface (`Vec`) without requiring users to manually create tags + - Enables using the same function names (like `LoadU`) with different implementations based on SIMD width + - Without namespaces, we'd have to either reintroduce tags (defeating the purpose of the wrapper) or create different function names for each variant (e.g., `LoadU` vs `LoadU128`) + +6. **Template Type Parameters** + - `TLane`: The scalar type for each vector lane (e.g., uint8_t, float, double) + + +## Requirements + +- C++17 or later +- Google Highway library + +## License + +Same as NumPy's license diff --git a/numpy/_core/src/common/simd/avx2/arithmetic.h b/numpy/_core/src/common/simd/avx2/arithmetic.h index 58d842a6d3a4..15b9be85dc51 100644 --- a/numpy/_core/src/common/simd/avx2/arithmetic.h +++ b/numpy/_core/src/common/simd/avx2/arithmetic.h @@ -215,9 +215,9 @@ NPY_FINLINE npyv_s64 npyv_divc_s64(npyv_s64 a, const npyv_s64x3 divisor) // q = (a + mulhi) >> sh __m256i q = _mm256_add_epi64(a, mulhi); // emulate arithmetic right shift - const __m256i sigb = npyv_setall_s64(1LL << 63); - q = _mm256_srl_epi64(_mm256_add_epi64(q, sigb), shf1); - q = _mm256_sub_epi64(q, _mm256_srl_epi64(sigb, shf1)); + const __m256i sbit = npyv_setall_s64(0x8000000000000000); + q = _mm256_srl_epi64(_mm256_add_epi64(q, sbit), shf1); + q = _mm256_sub_epi64(q, _mm256_srl_epi64(sbit, shf1)); // q = q - XSIGN(a) // trunc(a/d) = (q ^ dsign) - dsign q = _mm256_sub_epi64(q, asign); diff --git a/numpy/_core/src/common/simd/avx2/memory.h b/numpy/_core/src/common/simd/avx2/memory.h index f18636538174..8b30cb4cdf6c 100644 --- a/numpy/_core/src/common/simd/avx2/memory.h +++ b/numpy/_core/src/common/simd/avx2/memory.h @@ -705,7 +705,7 @@ NPYV_IMPL_AVX2_REST_PARTIAL_TYPES_PAIR(u64, s64) NPYV_IMPL_AVX2_REST_PARTIAL_TYPES_PAIR(f64, s64) /************************************************************ - * de-interlave load / interleave contiguous store + * de-interleave load / interleave contiguous store ************************************************************/ // two channels #define NPYV_IMPL_AVX2_MEM_INTERLEAVE(SFX, ZSFX) \ diff --git a/numpy/_core/src/common/simd/avx512/avx512.h b/numpy/_core/src/common/simd/avx512/avx512.h index aa6abe256424..2a4a20b2970d 100644 --- a/numpy/_core/src/common/simd/avx512/avx512.h +++ b/numpy/_core/src/common/simd/avx512/avx512.h @@ -11,6 +11,8 @@ // Enough limit to allow us to use _mm512_i32gather_* and _mm512_i32scatter_* #define NPY_SIMD_MAXLOAD_STRIDE32 (0x7fffffff / 16) #define NPY_SIMD_MAXSTORE_STRIDE32 (0x7fffffff / 16) +#define NPY_SIMD_MAXLOAD_STRIDE64 (0x7fffffff / 16) +#define NPY_SIMD_MAXSTORE_STRIDE64 (0x7fffffff / 16) typedef __m512i npyv_u8; typedef __m512i npyv_s8; diff --git a/numpy/_core/src/common/simd/avx512/memory.h b/numpy/_core/src/common/simd/avx512/memory.h index e981ef8f6dd1..53e24477e6ac 100644 --- a/numpy/_core/src/common/simd/avx512/memory.h +++ b/numpy/_core/src/common/simd/avx512/memory.h @@ -651,7 +651,7 @@ NPYV_IMPL_AVX512_REST_PARTIAL_TYPES_PAIR(u64, s64) NPYV_IMPL_AVX512_REST_PARTIAL_TYPES_PAIR(f64, s64) /************************************************************ - * de-interlave load / interleave contiguous store + * de-interleave load / interleave contiguous store ************************************************************/ // two channels #define NPYV_IMPL_AVX512_MEM_INTERLEAVE(SFX, ZSFX) \ diff --git a/numpy/_core/src/common/simd/intdiv.h b/numpy/_core/src/common/simd/intdiv.h index d843eaf4c9d9..0284d49d23bb 100644 --- a/numpy/_core/src/common/simd/intdiv.h +++ b/numpy/_core/src/common/simd/intdiv.h @@ -216,6 +216,10 @@ NPY_FINLINE npyv_u8x3 npyv_divisor_u8(npy_uint8 d) divisor.val[0] = npyv_setall_u8(m); divisor.val[1] = npyv_reinterpret_u8_s8(npyv_setall_s8(-sh1)); divisor.val[2] = npyv_reinterpret_u8_s8(npyv_setall_s8(-sh2)); +#elif defined(NPY_HAVE_LSX) + divisor.val[0] = npyv_setall_u8(m); + divisor.val[1] = npyv_setall_u8(sh1); + divisor.val[2] = npyv_setall_u8(sh2); #else #error "please initialize the shifting operand for the new architecture" #endif @@ -225,7 +229,7 @@ NPY_FINLINE npyv_u8x3 npyv_divisor_u8(npy_uint8 d) NPY_FINLINE npyv_s16x3 npyv_divisor_s16(npy_int16 d); NPY_FINLINE npyv_s8x3 npyv_divisor_s8(npy_int8 d) { -#ifdef NPY_HAVE_SSE2 // SSE/AVX2/AVX512 +#if defined(NPY_HAVE_SSE2) // SSE/AVX2/AVX512 npyv_s16x3 p = npyv_divisor_s16(d); npyv_s8x3 r; r.val[0] = npyv_reinterpret_s8_s16(p.val[0]); @@ -249,7 +253,7 @@ NPY_FINLINE npyv_s8x3 npyv_divisor_s8(npy_int8 d) npyv_s8x3 divisor; divisor.val[0] = npyv_setall_s8(m); divisor.val[2] = npyv_setall_s8(d < 0 ? -1 : 0); - #if defined(NPY_HAVE_VSX2) || defined(NPY_HAVE_VX) + #if defined(NPY_HAVE_VSX2) || defined(NPY_HAVE_VX) || defined(NPY_HAVE_LSX) divisor.val[1] = npyv_setall_s8(sh); #elif defined(NPY_HAVE_NEON) divisor.val[1] = npyv_setall_s8(-sh); @@ -291,6 +295,9 @@ NPY_FINLINE npyv_u16x3 npyv_divisor_u16(npy_uint16 d) #elif defined(NPY_HAVE_NEON) divisor.val[1] = npyv_reinterpret_u16_s16(npyv_setall_s16(-sh1)); divisor.val[2] = npyv_reinterpret_u16_s16(npyv_setall_s16(-sh2)); +#elif defined(NPY_HAVE_LSX) + divisor.val[1] = npyv_setall_u16(sh1); + divisor.val[2] = npyv_setall_u16(sh2); #else #error "please initialize the shifting operand for the new architecture" #endif @@ -321,6 +328,8 @@ NPY_FINLINE npyv_s16x3 npyv_divisor_s16(npy_int16 d) divisor.val[1] = npyv_setall_s16(sh); #elif defined(NPY_HAVE_NEON) divisor.val[1] = npyv_setall_s16(-sh); +#elif defined(NPY_HAVE_LSX) + divisor.val[1] = npyv_setall_s16(sh); #else #error "please initialize the shifting operand for the new architecture" #endif @@ -358,6 +367,9 @@ NPY_FINLINE npyv_u32x3 npyv_divisor_u32(npy_uint32 d) #elif defined(NPY_HAVE_NEON) divisor.val[1] = npyv_reinterpret_u32_s32(npyv_setall_s32(-sh1)); divisor.val[2] = npyv_reinterpret_u32_s32(npyv_setall_s32(-sh2)); +#elif defined(NPY_HAVE_LSX) + divisor.val[1] = npyv_setall_u32(sh1); + divisor.val[2] = npyv_setall_u32(sh2); #else #error "please initialize the shifting operand for the new architecture" #endif @@ -393,6 +405,8 @@ NPY_FINLINE npyv_s32x3 npyv_divisor_s32(npy_int32 d) divisor.val[1] = npyv_setall_s32(sh); #elif defined(NPY_HAVE_NEON) divisor.val[1] = npyv_setall_s32(-sh); +#elif defined(NPY_HAVE_LSX) + divisor.val[1] = npyv_setall_s32(sh); #else #error "please initialize the shifting operand for the new architecture" #endif @@ -427,6 +441,9 @@ NPY_FINLINE npyv_u64x3 npyv_divisor_u64(npy_uint64 d) #ifdef NPY_HAVE_SSE2 // SSE/AVX2/AVX512 divisor.val[1] = npyv_set_u64(sh1); divisor.val[2] = npyv_set_u64(sh2); + #elif defined(NPY_HAVE_LSX) + divisor.val[1] = npyv_setall_u64(sh1); + divisor.val[2] = npyv_setall_u64(sh2); #else #error "please initialize the shifting operand for the new architecture" #endif @@ -465,6 +482,8 @@ NPY_FINLINE npyv_s64x3 npyv_divisor_s64(npy_int64 d) divisor.val[2] = npyv_setall_s64(d < 0 ? -1 : 0); // sign of divisor #ifdef NPY_HAVE_SSE2 // SSE/AVX2/AVX512 divisor.val[1] = npyv_set_s64(sh); + #elif defined(NPY_HAVE_LSX) + divisor.val[1] = npyv_setall_s64(sh); #else #error "please initialize the shifting operand for the new architecture" #endif diff --git a/numpy/_core/src/common/simd/lsx/arithmetic.h b/numpy/_core/src/common/simd/lsx/arithmetic.h new file mode 100644 index 000000000000..33aad40871bd --- /dev/null +++ b/numpy/_core/src/common/simd/lsx/arithmetic.h @@ -0,0 +1,257 @@ +#ifndef NPY_SIMD + #error "Not a standalone header" +#endif + +#ifndef _NPY_SIMD_LSX_ARITHMETIC_H +#define _NPY_SIMD_LSX_ARITHMETIC_H + +/*************************** + * Addition + ***************************/ +// non-saturated +#define npyv_add_u8 __lsx_vadd_b +#define npyv_add_s8 __lsx_vadd_b +#define npyv_add_u16 __lsx_vadd_h +#define npyv_add_s16 __lsx_vadd_h +#define npyv_add_u32 __lsx_vadd_w +#define npyv_add_s32 __lsx_vadd_w +#define npyv_add_u64 __lsx_vadd_d +#define npyv_add_s64 __lsx_vadd_d +#define npyv_add_f32 __lsx_vfadd_s +#define npyv_add_f64 __lsx_vfadd_d + +// saturated +#define npyv_adds_u8 __lsx_vsadd_bu +#define npyv_adds_s8 __lsx_vsadd_b +#define npyv_adds_u16 __lsx_vsadd_hu +#define npyv_adds_s16 __lsx_vsadd_h +#define npyv_adds_u32 __lsx_vsadd_wu +#define npyv_adds_s32 __lsx_vsadd_w +#define npyv_adds_u64 __lsx_vsadd_du +#define npyv_adds_s64 __lsx_vsadd_d + + +/*************************** + * Subtraction + ***************************/ +// non-saturated +#define npyv_sub_u8 __lsx_vsub_b +#define npyv_sub_s8 __lsx_vsub_b +#define npyv_sub_u16 __lsx_vsub_h +#define npyv_sub_s16 __lsx_vsub_h +#define npyv_sub_u32 __lsx_vsub_w +#define npyv_sub_s32 __lsx_vsub_w +#define npyv_sub_u64 __lsx_vsub_d +#define npyv_sub_s64 __lsx_vsub_d +#define npyv_sub_f32 __lsx_vfsub_s +#define npyv_sub_f64 __lsx_vfsub_d + +// saturated +#define npyv_subs_u8 __lsx_vssub_bu +#define npyv_subs_s8 __lsx_vssub_b +#define npyv_subs_u16 __lsx_vssub_hu +#define npyv_subs_s16 __lsx_vssub_h +#define npyv_subs_u32 __lsx_vssub_wu +#define npyv_subs_s32 __lsx_vssub_w +#define npyv_subs_u64 __lsx_vssub_du +#define npyv_subs_s64 __lsx_vssub_d + + +/*************************** + * Multiplication + ***************************/ +// non-saturated +#define npyv_mul_u8 __lsx_vmul_b +#define npyv_mul_s8 __lsx_vmul_b +#define npyv_mul_u16 __lsx_vmul_h +#define npyv_mul_s16 __lsx_vmul_h +#define npyv_mul_u32 __lsx_vmul_w +#define npyv_mul_s32 __lsx_vmul_w +#define npyv_mul_f32 __lsx_vfmul_s +#define npyv_mul_f64 __lsx_vfmul_d + + +/*************************** + * Integer Division + ***************************/ +// See simd/intdiv.h for more clarification +// divide each unsigned 8-bit element by a precomputed divisor +NPY_FINLINE npyv_u8 npyv_divc_u8(npyv_u8 a, const npyv_u8x3 divisor) +{ + // high part of unsigned multiplication + __m128i mulhi = __lsx_vmuh_bu(a, divisor.val[0]); + // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 + __m128i q = __lsx_vsub_b(a, mulhi); + q = __lsx_vsrl_b(q, divisor.val[1]); + q = __lsx_vadd_b(mulhi, q); + q = __lsx_vsrl_b(q, divisor.val[2]); + + return q; +} +// divide each signed 8-bit element by a precomputed divisor (round towards zero) +NPY_FINLINE npyv_s16 npyv_divc_s16(npyv_s16 a, const npyv_s16x3 divisor); +NPY_FINLINE npyv_s8 npyv_divc_s8(npyv_s8 a, const npyv_s8x3 divisor) +{ + __m128i mulhi = __lsx_vmuh_b(a, divisor.val[0]); + // q = ((a + mulhi) >> sh1) - XSIGN(a) + // trunc(a/d) = (q ^ dsign) - dsign + __m128i q = __lsx_vsra_b(__lsx_vadd_b(a, mulhi), divisor.val[1]); + q = __lsx_vsub_b(q, __lsx_vsrai_b(a, 7)); + q = __lsx_vsub_b(__lsx_vxor_v(q, divisor.val[2]), divisor.val[2]); + return q; +} +// divide each unsigned 16-bit element by a precomputed divisor +NPY_FINLINE npyv_u16 npyv_divc_u16(npyv_u16 a, const npyv_u16x3 divisor) +{ + // high part of unsigned multiplication + __m128i mulhi = __lsx_vmuh_hu(a, divisor.val[0]); + // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 + __m128i q = __lsx_vsub_h(a, mulhi); + q = __lsx_vsrl_h(q, divisor.val[1]); + q = __lsx_vadd_h(mulhi, q); + q = __lsx_vsrl_h(q, divisor.val[2]); + return q; +} +// divide each signed 16-bit element by a precomputed divisor (round towards zero) +NPY_FINLINE npyv_s16 npyv_divc_s16(npyv_s16 a, const npyv_s16x3 divisor) +{ + // high part of signed multiplication + __m128i mulhi = __lsx_vmuh_h(a, divisor.val[0]); + // q = ((a + mulhi) >> sh1) - XSIGN(a) + // trunc(a/d) = (q ^ dsign) - dsign + __m128i q = __lsx_vsra_h(__lsx_vadd_h(a, mulhi), divisor.val[1]); + q = __lsx_vsub_h(q, __lsx_vsrai_h(a, 15)); + q = __lsx_vsub_h(__lsx_vxor_v(q, divisor.val[2]), divisor.val[2]); + return q; +} +// divide each unsigned 32-bit element by a precomputed divisor +NPY_FINLINE npyv_u32 npyv_divc_u32(npyv_u32 a, const npyv_u32x3 divisor) +{ + // high part of unsigned multiplication + __m128i mulhi = __lsx_vmuh_wu(a, divisor.val[0]); + // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 + __m128i q = __lsx_vsub_w(a, mulhi); + q = __lsx_vsrl_w(q, divisor.val[1]); + q = __lsx_vadd_w(mulhi, q); + q = __lsx_vsrl_w(q, divisor.val[2]); + return q; +} +// divide each signed 32-bit element by a precomputed divisor (round towards zero) +NPY_FINLINE npyv_s32 npyv_divc_s32(npyv_s32 a, const npyv_s32x3 divisor) +{ + __m128i mulhi = __lsx_vmuh_w(a, divisor.val[0]); + __m128i q = __lsx_vsra_w(__lsx_vadd_w(a, mulhi), divisor.val[1]); + q = __lsx_vsub_w(q, __lsx_vsrai_w(a, 31)); + q = __lsx_vsub_w(__lsx_vxor_v(q, divisor.val[2]), divisor.val[2]);; + return q; +} +// returns the high 64 bits of unsigned 64-bit multiplication +// xref https://stackoverflow.com/a/28827013 +NPY_FINLINE npyv_u64 npyv__mullhi_u64(npyv_u64 a, npyv_u64 b) +{ + __m128i hi = __lsx_vmuh_du(a, b); + return hi; +} +// divide each unsigned 64-bit element by a precomputed divisor +NPY_FINLINE npyv_u64 npyv_divc_u64(npyv_u64 a, const npyv_u64x3 divisor) +{ + // high part of unsigned multiplication + __m128i mulhi = __lsx_vmuh_du(a, divisor.val[0]); + // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 + __m128i q = __lsx_vsub_d(a, mulhi); + q = __lsx_vsrl_d(q, divisor.val[1]); + q = __lsx_vadd_d(mulhi, q); + q = __lsx_vsrl_d(q, divisor.val[2]); + return q; +} +// divide each signed 64-bit element by a precomputed divisor (round towards zero) +NPY_FINLINE npyv_s64 npyv_divc_s64(npyv_s64 a, const npyv_s64x3 divisor) +{ + __m128i mulhi = __lsx_vmuh_d(a, divisor.val[0]); + __m128i q = __lsx_vsra_d(__lsx_vadd_d(a, mulhi), divisor.val[1]); + q = __lsx_vsub_d(q, __lsx_vsrai_d(a, 63)); + q = __lsx_vsub_d(__lsx_vxor_v(q, divisor.val[2]), divisor.val[2]); + return q; +} +/*************************** + * Division + ***************************/ +#define npyv_div_f32 __lsx_vfdiv_s +#define npyv_div_f64 __lsx_vfdiv_d +/*************************** + * FUSED + ***************************/ +// multiply and add, a*b + c +#define npyv_muladd_f32 __lsx_vfmadd_s +#define npyv_muladd_f64 __lsx_vfmadd_d +// multiply and subtract, a*b - c +#define npyv_mulsub_f32 __lsx_vfmsub_s +#define npyv_mulsub_f64 __lsx_vfmsub_d +// negate multiply and add, -(a*b) + c equal to -(a*b - c) +#define npyv_nmuladd_f32 __lsx_vfnmsub_s +#define npyv_nmuladd_f64 __lsx_vfnmsub_d +// negate multiply and subtract, -(a*b) - c equal to -(a*b +c) +#define npyv_nmulsub_f32 __lsx_vfnmadd_s +#define npyv_nmulsub_f64 __lsx_vfnmadd_d + // multiply, add for odd elements and subtract even elements. + // (a * b) -+ c +NPY_FINLINE npyv_f32 npyv_muladdsub_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c) + { + return __lsx_vfmadd_s(a, b, (__m128)__lsx_vxor_v((__m128i)c, (__m128i)(v4f32){-0.0, 0.0, -0.0, 0.0})); + + } +NPY_FINLINE npyv_f64 npyv_muladdsub_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c) + { + return __lsx_vfmadd_d(a, b, (__m128d)__lsx_vxor_v((__m128i)c, (__m128i)(v2f64){-0.0, 0.0})); + + } + +/*************************** + * Summation + ***************************/ +// reduce sum across vector +NPY_FINLINE npy_uint32 npyv_sum_u32(npyv_u32 a) +{ + __m128i t1 = __lsx_vhaddw_du_wu(a, a); + __m128i t2 = __lsx_vhaddw_qu_du(t1, t1); + return __lsx_vpickve2gr_wu(t2, 0); +} + +NPY_FINLINE npy_uint64 npyv_sum_u64(npyv_u64 a) +{ + __m128i t = __lsx_vhaddw_qu_du(a, a); + return __lsx_vpickve2gr_du(t, 0); +} + +NPY_FINLINE float npyv_sum_f32(npyv_f32 a) +{ + __m128 ft = __lsx_vfadd_s(a, (__m128)__lsx_vbsrl_v((__m128i)a, 8)); + ft = __lsx_vfadd_s(ft, (__m128)__lsx_vbsrl_v(ft, 4)); + return ft[0]; +} + +NPY_FINLINE double npyv_sum_f64(npyv_f64 a) +{ + __m128d fd = __lsx_vfadd_d(a, (__m128d)__lsx_vreplve_d((__m128i)a, 1)); + return fd[0]; +} + +// expand the source vector and performs sum reduce +NPY_FINLINE npy_uint16 npyv_sumup_u8(npyv_u8 a) +{ + __m128i first = __lsx_vhaddw_hu_bu((__m128i)a,(__m128i)a); + __m128i second = __lsx_vhaddw_wu_hu((__m128i)first,(__m128i)first); + __m128i third = __lsx_vhaddw_du_wu((__m128i)second,(__m128i)second); + __m128i four = __lsx_vhaddw_qu_du((__m128i)third,(__m128i)third); + return four[0]; +} + +NPY_FINLINE npy_uint32 npyv_sumup_u16(npyv_u16 a) +{ + __m128i t1 = __lsx_vhaddw_wu_hu(a, a); + __m128i t2 = __lsx_vhaddw_du_wu(t1, t1); + __m128i t3 = __lsx_vhaddw_qu_du(t2, t2); + return __lsx_vpickve2gr_w(t3, 0); +} + +#endif // _NPY_SIMD_LSX_ARITHMETIC_H diff --git a/numpy/_core/src/common/simd/lsx/conversion.h b/numpy/_core/src/common/simd/lsx/conversion.h new file mode 100644 index 000000000000..72c22e90701c --- /dev/null +++ b/numpy/_core/src/common/simd/lsx/conversion.h @@ -0,0 +1,100 @@ +#ifndef NPY_SIMD + #error "Not a standalone header" +#endif + +#ifndef _NPY_SIMD_LSX_CVT_H +#define _NPY_SIMD_LSX_CVT_H + +// convert mask types to integer types +#define npyv_cvt_u8_b8(BL) BL +#define npyv_cvt_s8_b8(BL) BL +#define npyv_cvt_u16_b16(BL) BL +#define npyv_cvt_s16_b16(BL) BL +#define npyv_cvt_u32_b32(BL) BL +#define npyv_cvt_s32_b32(BL) BL +#define npyv_cvt_u64_b64(BL) BL +#define npyv_cvt_s64_b64(BL) BL +#define npyv_cvt_f32_b32(BL) (__m128)(BL) +#define npyv_cvt_f64_b64(BL) (__m128d)(BL) + +// convert integer types to mask types +#define npyv_cvt_b8_u8(A) A +#define npyv_cvt_b8_s8(A) A +#define npyv_cvt_b16_u16(A) A +#define npyv_cvt_b16_s16(A) A +#define npyv_cvt_b32_u32(A) A +#define npyv_cvt_b32_s32(A) A +#define npyv_cvt_b64_u64(A) A +#define npyv_cvt_b64_s64(A) A +#define npyv_cvt_b32_f32(A) (__m128i)(A) +#define npyv_cvt_b64_f64(A) (__m128i)(A) + +// convert boolean vector to integer bitfield +NPY_FINLINE npy_uint64 npyv_tobits_b8(npyv_b8 a) +{ return (npy_uint16)__lsx_vmsknz_b(a)[0]; } +NPY_FINLINE npy_uint64 npyv_tobits_b16(npyv_b16 a) +{ + __m128i b = __lsx_vsat_hu(a, 7); + __m128i pack = __lsx_vpickev_b(b, b); + return (npy_uint8)__lsx_vmsknz_b(pack)[0]; +} +NPY_FINLINE npy_uint64 npyv_tobits_b32(npyv_b32 a) +{ + __m128i b = __lsx_vmskltz_w(a); + v4i32 ret = (v4i32)b; + return ret[0]; +} + +NPY_FINLINE npy_uint64 npyv_tobits_b64(npyv_b64 a) +{ + __m128i b = __lsx_vmskltz_d(a); + v2i64 ret = (v2i64)b; + return ret[0]; +} + +// expand +NPY_FINLINE npyv_u16x2 npyv_expand_u16_u8(npyv_u8 data) { + npyv_u16x2 r; + r.val[0] = __lsx_vsllwil_hu_bu(data, 0); + r.val[1] = __lsx_vexth_hu_bu(data); + return r; +} + +NPY_FINLINE npyv_u32x2 npyv_expand_u32_u16(npyv_u16 data) { + npyv_u32x2 r; + r.val[0] = __lsx_vsllwil_wu_hu(data, 0); + r.val[1] = __lsx_vexth_wu_hu(data); + return r; +} + +// pack two 16-bit boolean into one 8-bit boolean vector +NPY_FINLINE npyv_b8 npyv_pack_b8_b16(npyv_b16 a, npyv_b16 b) { + return __lsx_vpickev_b(__lsx_vsat_h(b, 7),__lsx_vsat_h(a, 7)); +} + +// pack four 32-bit boolean vectors into one 8-bit boolean vector +NPY_FINLINE npyv_b8 +npyv_pack_b8_b32(npyv_b32 a, npyv_b32 b, npyv_b32 c, npyv_b32 d) { + __m128i ab = __lsx_vpickev_h(__lsx_vsat_w(b, 15), __lsx_vsat_w(a, 15)); + __m128i cd = __lsx_vpickev_h(__lsx_vsat_w(d, 15), __lsx_vsat_w(c, 15)); + return npyv_pack_b8_b16(ab, cd); +} + +// pack eight 64-bit boolean vectors into one 8-bit boolean vector +NPY_FINLINE npyv_b8 +npyv_pack_b8_b64(npyv_b64 a, npyv_b64 b, npyv_b64 c, npyv_b64 d, + npyv_b64 e, npyv_b64 f, npyv_b64 g, npyv_b64 h) { + __m128i ab = __lsx_vpickev_h(__lsx_vsat_w(b, 15), __lsx_vsat_w(a, 15)); + __m128i cd = __lsx_vpickev_h(__lsx_vsat_w(d, 15), __lsx_vsat_w(c, 15)); + __m128i ef = __lsx_vpickev_h(__lsx_vsat_w(f, 15), __lsx_vsat_w(e, 15)); + __m128i gh = __lsx_vpickev_h(__lsx_vsat_w(h, 15), __lsx_vsat_w(g, 15)); + return npyv_pack_b8_b32(ab, cd, ef, gh); +} + +// round to nearest integer (assuming even) +#define npyv_round_s32_f32 __lsx_vftintrne_w_s +NPY_FINLINE npyv_s32 npyv_round_s32_f64(npyv_f64 a, npyv_f64 b) +{ + return __lsx_vftintrne_w_d(b, a); +} +#endif // _NPY_SIMD_LSX_CVT_H diff --git a/numpy/_core/src/common/simd/lsx/lsx.h b/numpy/_core/src/common/simd/lsx/lsx.h new file mode 100644 index 000000000000..80017296fc98 --- /dev/null +++ b/numpy/_core/src/common/simd/lsx/lsx.h @@ -0,0 +1,77 @@ +#ifndef _NPY_SIMD_H_ + #error "Not a standalone header" +#endif + +#ifndef _NPY_SIMD_LSX_LSX_H +#define _NPY_SIMD_LSX_LSX_H + +#define NPY_SIMD 128 +#define NPY_SIMD_WIDTH 16 +#define NPY_SIMD_F64 1 +#define NPY_SIMD_F32 1 +#define NPY_SIMD_FMA3 1 +#define NPY_SIMD_BIGENDIAN 0 +#define NPY_SIMD_CMPSIGNAL 1 + +typedef __m128i npyv_u8; +typedef __m128i npyv_s8; +typedef __m128i npyv_u16; +typedef __m128i npyv_s16; +typedef __m128i npyv_u32; +typedef __m128i npyv_s32; +typedef __m128i npyv_u64; +typedef __m128i npyv_s64; +typedef __m128 npyv_f32; +typedef __m128d npyv_f64; + +typedef __m128i npyv_b8; +typedef __m128i npyv_b16; +typedef __m128i npyv_b32; +typedef __m128i npyv_b64; + +typedef struct { __m128i val[2]; } npyv_m128ix2; +typedef npyv_m128ix2 npyv_u8x2; +typedef npyv_m128ix2 npyv_s8x2; +typedef npyv_m128ix2 npyv_u16x2; +typedef npyv_m128ix2 npyv_s16x2; +typedef npyv_m128ix2 npyv_u32x2; +typedef npyv_m128ix2 npyv_s32x2; +typedef npyv_m128ix2 npyv_u64x2; +typedef npyv_m128ix2 npyv_s64x2; + +typedef struct { __m128i val[3]; } npyv_m128ix3; +typedef npyv_m128ix3 npyv_u8x3; +typedef npyv_m128ix3 npyv_s8x3; +typedef npyv_m128ix3 npyv_u16x3; +typedef npyv_m128ix3 npyv_s16x3; +typedef npyv_m128ix3 npyv_u32x3; +typedef npyv_m128ix3 npyv_s32x3; +typedef npyv_m128ix3 npyv_u64x3; +typedef npyv_m128ix3 npyv_s64x3; + +typedef struct { __m128 val[2]; } npyv_f32x2; +typedef struct { __m128d val[2]; } npyv_f64x2; +typedef struct { __m128 val[3]; } npyv_f32x3; +typedef struct { __m128d val[3]; } npyv_f64x3; + +#define npyv_nlanes_u8 16 +#define npyv_nlanes_s8 16 +#define npyv_nlanes_u16 8 +#define npyv_nlanes_s16 8 +#define npyv_nlanes_u32 4 +#define npyv_nlanes_s32 4 +#define npyv_nlanes_u64 2 +#define npyv_nlanes_s64 2 +#define npyv_nlanes_f32 4 +#define npyv_nlanes_f64 2 + + +#include "memory.h" +#include "misc.h" +#include "reorder.h" +#include "operators.h" +#include "conversion.h" +#include "arithmetic.h" +#include "math.h" + +#endif //#ifndef _NPY_SIMD_LSX_LSX_H \ No newline at end of file diff --git a/numpy/_core/src/common/simd/lsx/math.h b/numpy/_core/src/common/simd/lsx/math.h new file mode 100644 index 000000000000..6109fb4e8260 --- /dev/null +++ b/numpy/_core/src/common/simd/lsx/math.h @@ -0,0 +1,228 @@ +#ifndef NPY_SIMD + #error "Not a standalone header" +#endif + +#ifndef _NPY_SIMD_LSX_MATH_H +#define _NPY_SIMD_LSX_MATH_H +/*************************** + * Elementary + ***************************/ +// Square root +#define npyv_sqrt_f32 __lsx_vfsqrt_s +#define npyv_sqrt_f64 __lsx_vfsqrt_d + +// Reciprocal +NPY_FINLINE npyv_f32 npyv_recip_f32(npyv_f32 a) +{ return __lsx_vfrecip_s(a); } +NPY_FINLINE npyv_f64 npyv_recip_f64(npyv_f64 a) +{ return __lsx_vfrecip_d(a); } + +// Absolute +NPY_FINLINE npyv_f32 npyv_abs_f32(npyv_f32 a) +{ + return (npyv_f32)__lsx_vbitclri_w(a, 0x1F); +} +NPY_FINLINE npyv_f64 npyv_abs_f64(npyv_f64 a) +{ + return (npyv_f64)__lsx_vbitclri_d(a, 0x3F); +} + +// Square +NPY_FINLINE npyv_f32 npyv_square_f32(npyv_f32 a) +{ return __lsx_vfmul_s(a, a); } +NPY_FINLINE npyv_f64 npyv_square_f64(npyv_f64 a) +{ return __lsx_vfmul_d(a, a); } + +// Maximum, natively mapping with no guarantees to handle NaN. +#define npyv_max_f32 __lsx_vfmax_s +#define npyv_max_f64 __lsx_vfmax_d +// Maximum, supports IEEE floating-point arithmetic (IEC 60559), +// - If one of the two vectors contains NaN, the equivalent element of the other vector is set +// - Only if both corresponded elements are NaN, NaN is set. +NPY_FINLINE npyv_f32 npyv_maxp_f32(npyv_f32 a, npyv_f32 b) +{ + return __lsx_vfmax_s(a, b); +} +NPY_FINLINE npyv_f64 npyv_maxp_f64(npyv_f64 a, npyv_f64 b) +{ + return __lsx_vfmax_d(a, b); +} +// If any of corresponded element is NaN, NaN is set. +NPY_FINLINE npyv_f32 npyv_maxn_f32(npyv_f32 a, npyv_f32 b) +{ + __m128i mask = __lsx_vand_v(npyv_notnan_f32(a), npyv_notnan_f32(b)); + __m128 max = __lsx_vfmax_s(a, b); + return npyv_select_f32(mask, max, (__m128){NAN, NAN, NAN, NAN}); +} +NPY_FINLINE npyv_f64 npyv_maxn_f64(npyv_f64 a, npyv_f64 b) +{ + __m128i mask = __lsx_vand_v(npyv_notnan_f64(a), npyv_notnan_f64(b)); + __m128d max = __lsx_vfmax_d(a, b); + return npyv_select_f64(mask, max, (__m128d){NAN, NAN}); +} + +// Maximum, integer operations +#define npyv_max_u8 __lsx_vmax_bu +#define npyv_max_s8 __lsx_vmax_b +#define npyv_max_u16 __lsx_vmax_hu +#define npyv_max_s16 __lsx_vmax_h +#define npyv_max_u32 __lsx_vmax_wu +#define npyv_max_s32 __lsx_vmax_w +#define npyv_max_u64 __lsx_vmax_du +#define npyv_max_s64 __lsx_vmax_d + +// Minimum, natively mapping with no guarantees to handle NaN. +#define npyv_min_f32 __lsx_vfmin_s +#define npyv_min_f64 __lsx_vfmin_d + +// Minimum, supports IEEE floating-point arithmetic (IEC 60559), +// - If one of the two vectors contains NaN, the equivalent element of the other vector is set +// - Only if both corresponded elements are NaN, NaN is set. +NPY_FINLINE npyv_f32 npyv_minp_f32(npyv_f32 a, npyv_f32 b) +{ + return __lsx_vfmin_s(a, b); +} +NPY_FINLINE npyv_f64 npyv_minp_f64(npyv_f64 a, npyv_f64 b) +{ + return __lsx_vfmin_d(a, b); +} +NPY_FINLINE npyv_f32 npyv_minn_f32(npyv_f32 a, npyv_f32 b) +{ + __m128i mask = __lsx_vand_v(npyv_notnan_f32(a), npyv_notnan_f32(b)); + __m128 min = __lsx_vfmin_s(a, b); + return npyv_select_f32(mask, min, (__m128){NAN, NAN, NAN, NAN}); +} +NPY_FINLINE npyv_f64 npyv_minn_f64(npyv_f64 a, npyv_f64 b) +{ + __m128i mask = __lsx_vand_v(npyv_notnan_f64(a), npyv_notnan_f64(b)); + __m128d min = __lsx_vfmin_d(a, b); + return npyv_select_f64(mask, min, (__m128d){NAN, NAN}); +} + +// Minimum, integer operations +#define npyv_min_u8 __lsx_vmin_bu +#define npyv_min_s8 __lsx_vmin_b +#define npyv_min_u16 __lsx_vmin_hu +#define npyv_min_s16 __lsx_vmin_h +#define npyv_min_u32 __lsx_vmin_wu +#define npyv_min_s32 __lsx_vmin_w +#define npyv_min_u64 __lsx_vmin_du +#define npyv_min_s64 __lsx_vmin_d + +// reduce min&max for ps & pd +#define NPY_IMPL_LSX_REDUCE_MINMAX(INTRIN, INF, INF64) \ + NPY_FINLINE float npyv_reduce_##INTRIN##_f32(npyv_f32 a) \ + { \ + __m128i vector2 = {0, 0}; \ + v4i32 index1 = {2, 3, 0, 0}; \ + v4i32 index2 = {1, 0, 0, 0}; \ + __m128 v64 = __lsx_vf##INTRIN##_s(a, (__m128)__lsx_vshuf_w((__m128i)index1, (__m128i)vector2, (__m128i)a)); \ + __m128 v32 = __lsx_vf##INTRIN##_s(v64, (__m128)__lsx_vshuf_w((__m128i)index2, (__m128i)vector2, (__m128i)v64)); \ + return v32[0]; \ + } \ + NPY_FINLINE float npyv_reduce_##INTRIN##n_f32(npyv_f32 a) \ + { \ + npyv_b32 notnan = npyv_notnan_f32(a); \ + if (NPY_UNLIKELY(!npyv_all_b32(notnan))) { \ + const union { npy_uint32 i; float f;} pnan = {0x7fc00000UL}; \ + return pnan.f; \ + } \ + return npyv_reduce_##INTRIN##_f32(a); \ + } \ + NPY_FINLINE float npyv_reduce_##INTRIN##p_f32(npyv_f32 a) \ + { \ + npyv_b32 notnan = npyv_notnan_f32(a); \ + if (NPY_UNLIKELY(!npyv_any_b32(notnan))) { \ + return a[0]; \ + } \ + a = npyv_select_f32(notnan, a, npyv_reinterpret_f32_u32(npyv_setall_u32(INF))); \ + return npyv_reduce_##INTRIN##_f32(a); \ + } \ + NPY_FINLINE double npyv_reduce_##INTRIN##_f64(npyv_f64 a) \ + { \ + __m128i index2 = {1, 0}; \ + __m128d v64 = __lsx_vf##INTRIN##_d(a, (__m128d)__lsx_vshuf_d(index2, (__m128i){0, 0}, (__m128i)a)); \ + return (double)v64[0]; \ + } \ + NPY_FINLINE double npyv_reduce_##INTRIN##p_f64(npyv_f64 a) \ + { \ + npyv_b64 notnan = npyv_notnan_f64(a); \ + if (NPY_UNLIKELY(!npyv_any_b64(notnan))) { \ + return a[0]; \ + } \ + a = npyv_select_f64(notnan, a, npyv_reinterpret_f64_u64(npyv_setall_u64(INF64))); \ + return npyv_reduce_##INTRIN##_f64(a); \ + } \ + NPY_FINLINE double npyv_reduce_##INTRIN##n_f64(npyv_f64 a) \ + { \ + npyv_b64 notnan = npyv_notnan_f64(a); \ + if (NPY_UNLIKELY(!npyv_all_b64(notnan))) { \ + const union { npy_uint64 i; double d;} pnan = {0x7ff8000000000000ull}; \ + return pnan.d; \ + } \ + return npyv_reduce_##INTRIN##_f64(a); \ + } + +NPY_IMPL_LSX_REDUCE_MINMAX(min, 0x7f800000, 0x7ff0000000000000) +NPY_IMPL_LSX_REDUCE_MINMAX(max, 0xff800000, 0xfff0000000000000) +#undef NPY_IMPL_LSX_REDUCE_MINMAX + +// reduce min&max for 8&16&32&64-bits +#define NPY_IMPL_LSX_REDUCE_MINMAX(STYPE, INTRIN, TFLAG) \ + NPY_FINLINE STYPE##64 npyv_reduce_##INTRIN##64(__m128i a) \ + { \ + __m128i vector2 = {0, 0}; \ + v4i32 index1 = {2, 3, 0, 0}; \ + __m128i v64 = npyv_##INTRIN##64(a, __lsx_vshuf_w((__m128i)index1, (__m128i)vector2, a)); \ + return (STYPE##64)__lsx_vpickve2gr_d##TFLAG(v64, 0); \ + } \ + NPY_FINLINE STYPE##32 npyv_reduce_##INTRIN##32(__m128i a) \ + { \ + __m128i vector2 = {0, 0}; \ + v4i32 index1 = {2, 3, 0, 0}; \ + v4i32 index2 = {1, 0, 0, 0}; \ + __m128i v64 = npyv_##INTRIN##32(a, __lsx_vshuf_w((__m128i)index1, (__m128i)vector2, a)); \ + __m128i v32 = npyv_##INTRIN##32(v64, __lsx_vshuf_w((__m128i)index2, (__m128i)vector2, v64)); \ + return (STYPE##32)__lsx_vpickve2gr_w##TFLAG(v32, 0); \ + } \ + NPY_FINLINE STYPE##16 npyv_reduce_##INTRIN##16(__m128i a) \ + { \ + __m128i vector2 = {0, 0}; \ + v4i32 index1 = {2, 3, 0, 0}; \ + v4i32 index2 = {1, 0, 0, 0}; \ + v8i16 index3 = {1, 0, 0, 0, 4, 5, 6, 7 }; \ + __m128i v64 = npyv_##INTRIN##16(a, __lsx_vshuf_w((__m128i)index1, (__m128i)vector2, a)); \ + __m128i v32 = npyv_##INTRIN##16(v64, __lsx_vshuf_w((__m128i)index2, (__m128i)vector2, v64)); \ + __m128i v16 = npyv_##INTRIN##16(v32, __lsx_vshuf_h((__m128i)index3, (__m128i)vector2, v32)); \ + return (STYPE##16)__lsx_vpickve2gr_h##TFLAG(v16, 0); \ + } \ + NPY_FINLINE STYPE##8 npyv_reduce_##INTRIN##8(__m128i a) \ + { \ + __m128i val =npyv_##INTRIN##8((__m128i)a, __lsx_vbsrl_v(a, 8)); \ + val = npyv_##INTRIN##8(val, __lsx_vbsrl_v(val, 4)); \ + val = npyv_##INTRIN##8(val, __lsx_vbsrl_v(val, 2)); \ + val = npyv_##INTRIN##8(val, __lsx_vbsrl_v(val, 1)); \ + return (STYPE##8)__lsx_vpickve2gr_b##TFLAG(val, 0); \ + } +NPY_IMPL_LSX_REDUCE_MINMAX(npy_uint, min_u, u) +NPY_IMPL_LSX_REDUCE_MINMAX(npy_int, min_s,) +NPY_IMPL_LSX_REDUCE_MINMAX(npy_uint, max_u, u) +NPY_IMPL_LSX_REDUCE_MINMAX(npy_int, max_s,) +#undef NPY_IMPL_LSX_REDUCE_MINMAX + +// round to nearest integer even +#define npyv_rint_f32 (__m128)__lsx_vfrintrne_s +#define npyv_rint_f64 (__m128d)__lsx_vfrintrne_d +// ceil +#define npyv_ceil_f32 (__m128)__lsx_vfrintrp_s +#define npyv_ceil_f64 (__m128d)__lsx_vfrintrp_d + +// trunc +#define npyv_trunc_f32 (__m128)__lsx_vfrintrz_s +#define npyv_trunc_f64 (__m128d)__lsx_vfrintrz_d + +// floor +#define npyv_floor_f32 (__m128)__lsx_vfrintrm_s +#define npyv_floor_f64 (__m128d)__lsx_vfrintrm_d + +#endif diff --git a/numpy/_core/src/common/simd/lsx/memory.h b/numpy/_core/src/common/simd/lsx/memory.h new file mode 100644 index 000000000000..9c3e6442c6d6 --- /dev/null +++ b/numpy/_core/src/common/simd/lsx/memory.h @@ -0,0 +1,594 @@ +#ifndef NPY_SIMD + #error "Not a standalone header" +#endif + +#ifndef _NPY_SIMD_LSX_MEMORY_H +#define _NPY_SIMD_LSX_MEMORY_H + +#include +#include "misc.h" + +/*************************** + * load/store + ***************************/ +#define NPYV_IMPL_LSX_MEM(SFX, CTYPE) \ + NPY_FINLINE npyv_##SFX npyv_load_##SFX(const CTYPE *ptr) \ + { return (npyv_##SFX)(__lsx_vld(ptr, 0)); } \ + NPY_FINLINE npyv_##SFX npyv_loada_##SFX(const CTYPE *ptr) \ + { return (npyv_##SFX)(__lsx_vld(ptr, 0)); } \ + NPY_FINLINE npyv_##SFX npyv_loads_##SFX(const CTYPE *ptr) \ + { return (npyv_##SFX)(__lsx_vld(ptr, 0)); } \ + NPY_FINLINE npyv_##SFX npyv_loadl_##SFX(const CTYPE *ptr) \ + { return (npyv_##SFX)__lsx_vldrepl_d(ptr, 0); } \ + NPY_FINLINE void npyv_store_##SFX(CTYPE *ptr, npyv_##SFX vec) \ + { __lsx_vst(vec, ptr, 0); } \ + NPY_FINLINE void npyv_storea_##SFX(CTYPE *ptr, npyv_##SFX vec) \ + { __lsx_vst(vec, ptr, 0); } \ + NPY_FINLINE void npyv_stores_##SFX(CTYPE *ptr, npyv_##SFX vec) \ + { __lsx_vst(vec, ptr, 0); } \ + NPY_FINLINE void npyv_storel_##SFX(CTYPE *ptr, npyv_##SFX vec) \ + { __lsx_vstelm_d(vec, ptr, 0, 0); } \ + NPY_FINLINE void npyv_storeh_##SFX(CTYPE *ptr, npyv_##SFX vec) \ + { __lsx_vstelm_d(vec, ptr, 0, 1); } + +NPYV_IMPL_LSX_MEM(u8, npy_uint8) +NPYV_IMPL_LSX_MEM(s8, npy_int8) +NPYV_IMPL_LSX_MEM(u16, npy_uint16) +NPYV_IMPL_LSX_MEM(s16, npy_int16) +NPYV_IMPL_LSX_MEM(u32, npy_uint32) +NPYV_IMPL_LSX_MEM(s32, npy_int32) +NPYV_IMPL_LSX_MEM(u64, npy_uint64) +NPYV_IMPL_LSX_MEM(s64, npy_int64) +NPYV_IMPL_LSX_MEM(f32, float) +NPYV_IMPL_LSX_MEM(f64, double) + +/*************************** + * Non-contiguous Load + ***************************/ +//// 32 +NPY_FINLINE npyv_s32 npyv_loadn_s32(const npy_int32 *ptr, npy_intp stride) +{ + __m128i a = __lsx_vreplgr2vr_w(*ptr); + a = __lsx_vinsgr2vr_w(a, ptr[stride], 1); + a = __lsx_vinsgr2vr_w(a, ptr[stride*2], 2); + a = __lsx_vinsgr2vr_w(a, ptr[stride*3], 3); + return a; +} +NPY_FINLINE npyv_u32 npyv_loadn_u32(const npy_uint32 *ptr, npy_intp stride) +{ return npyv_reinterpret_u32_s32(npyv_loadn_s32((const npy_int32*)ptr, stride)); } +NPY_FINLINE npyv_f32 npyv_loadn_f32(const float *ptr, npy_intp stride) //ok +{ return npyv_reinterpret_f32_s32(npyv_loadn_s32((const npy_int32*)ptr, stride)); } +//// 64 +NPY_FINLINE npyv_f64 npyv_loadn_f64(const double *ptr, npy_intp stride) +{ return (npyv_f64)__lsx_vilvl_d((__m128i)(v2f64)__lsx_vld((ptr + stride), 0), (__m128i)(v2f64)__lsx_vld(ptr, 0)); } +NPY_FINLINE npyv_u64 npyv_loadn_u64(const npy_uint64 *ptr, npy_intp stride) +{ return npyv_reinterpret_u64_f64(npyv_loadn_f64((const double*)ptr, stride)); } +NPY_FINLINE npyv_s64 npyv_loadn_s64(const npy_int64 *ptr, npy_intp stride) +{ return npyv_reinterpret_s64_f64(npyv_loadn_f64((const double*)ptr, stride)); } + +//// 64-bit load over 32-bit stride +NPY_FINLINE npyv_f32 npyv_loadn2_f32(const float *ptr, npy_intp stride) +{ return (npyv_f32)__lsx_vilvl_d(__lsx_vld((const double *)(ptr + stride), 0), __lsx_vld((const double *)ptr, 0)); } +NPY_FINLINE npyv_u32 npyv_loadn2_u32(const npy_uint32 *ptr, npy_intp stride) +{ return npyv_reinterpret_u32_f32(npyv_loadn2_f32((const float*)ptr, stride)); } +NPY_FINLINE npyv_s32 npyv_loadn2_s32(const npy_int32 *ptr, npy_intp stride) +{ return npyv_reinterpret_s32_f32(npyv_loadn2_f32((const float*)ptr, stride)); } + +//// 128-bit load over 64-bit stride +NPY_FINLINE npyv_f64 npyv_loadn2_f64(const double *ptr, npy_intp stride) +{ (void)stride; return npyv_load_f64(ptr); } +NPY_FINLINE npyv_u64 npyv_loadn2_u64(const npy_uint64 *ptr, npy_intp stride) +{ (void)stride; return npyv_load_u64(ptr); } +NPY_FINLINE npyv_s64 npyv_loadn2_s64(const npy_int64 *ptr, npy_intp stride) +{ (void)stride; return npyv_load_s64(ptr); } + +/*************************** + * Non-contiguous Store + ***************************/ +//// 32 +NPY_FINLINE void npyv_storen_s32(npy_int32 *ptr, npy_intp stride, npyv_s32 a) +{ + + __lsx_vstelm_w(a, ptr, 0, 0); + __lsx_vstelm_w(a, ptr + stride, 0, 1); + __lsx_vstelm_w(a, ptr + stride*2, 0, 2); + __lsx_vstelm_w(a, ptr + stride*3, 0, 3); +} +NPY_FINLINE void npyv_storen_u32(npy_uint32 *ptr, npy_intp stride, npyv_u32 a) +{ npyv_storen_s32((npy_int32*)ptr, stride, a); } +NPY_FINLINE void npyv_storen_f32(float *ptr, npy_intp stride, npyv_f32 a) +{ npyv_storen_s32((npy_int32*)ptr, stride, (npyv_s32)a); } +//// 64 +NPY_FINLINE void npyv_storen_f64(double *ptr, npy_intp stride, npyv_f64 a) +{ + __lsx_vstelm_d(a, ptr, 0, 0); + __lsx_vstelm_d(a, ptr + stride, 0, 1); +} +NPY_FINLINE void npyv_storen_u64(npy_uint64 *ptr, npy_intp stride, npyv_u64 a) +{ npyv_storen_f64((double*)ptr, stride, (npyv_f64)a); } +NPY_FINLINE void npyv_storen_s64(npy_int64 *ptr, npy_intp stride, npyv_s64 a) +{ npyv_storen_f64((double*)ptr, stride, (npyv_f64)a); } +//// 64-bit store over 32-bit stride +NPY_FINLINE void npyv_storen2_u32(npy_uint32 *ptr, npy_intp stride, npyv_u32 a) +{ + __lsx_vstelm_d(npyv_reinterpret_u64_u32(a), ptr, 0, 0); + __lsx_vstelm_d(npyv_reinterpret_u64_u32(a), ptr+stride, 0, 1); // zn:TODO +} +NPY_FINLINE void npyv_storen2_s32(npy_int32 *ptr, npy_intp stride, npyv_s32 a) +{ npyv_storen2_u32((npy_uint32*)ptr, stride, a); } +NPY_FINLINE void npyv_storen2_f32(float *ptr, npy_intp stride, npyv_f32 a) +{ npyv_storen2_u32((npy_uint32*)ptr, stride, (npyv_u32)a); } + +//// 128-bit store over 64-bit stride +NPY_FINLINE void npyv_storen2_u64(npy_uint64 *ptr, npy_intp stride, npyv_u64 a) +{ (void)stride; npyv_store_u64(ptr, a); } +NPY_FINLINE void npyv_storen2_s64(npy_int64 *ptr, npy_intp stride, npyv_s64 a) +{ (void)stride; npyv_store_s64(ptr, a); } +NPY_FINLINE void npyv_storen2_f64(double *ptr, npy_intp stride, npyv_f64 a) +{ (void)stride; npyv_store_f64(ptr, a); } +/********************************* + * Partial Load + *********************************/ +//// 32 +NPY_FINLINE npyv_s32 npyv_load_till_s32(const npy_int32 *ptr, npy_uintp nlane, npy_int32 fill) +{ + assert(nlane > 0); + const __m128i vfill = npyv_setall_s32(fill); + switch(nlane) { + case 1: + return __lsx_vinsgr2vr_w(vfill, ptr[0], 0); + case 2: + return __lsx_vinsgr2vr_d(vfill, *(unsigned long *)ptr, 0); + case 3: + return __lsx_vinsgr2vr_w(__lsx_vld(ptr, 0), fill, 3); + default: + return npyv_load_s32(ptr); + } +} +// fill zero to rest lanes +NPY_FINLINE npyv_s32 npyv_load_tillz_s32(const npy_int32 *ptr, npy_uintp nlane) +{ + assert(nlane > 0); + __m128i zfill = __lsx_vldi(0); + switch(nlane) { + case 1: + return __lsx_vinsgr2vr_w(zfill, ptr[0], 0); + case 2: + return __lsx_vinsgr2vr_d(zfill, *(unsigned long *)ptr, 0); + case 3: + return __lsx_vinsgr2vr_w(__lsx_vld(ptr, 0), 0, 3); + default: + return npyv_load_s32(ptr); + } +} +//// 64 +NPY_FINLINE npyv_s64 npyv_load_till_s64(const npy_int64 *ptr, npy_uintp nlane, npy_int64 fill) +{ + assert(nlane > 0); + if (nlane == 1) { + const __m128i vfill = npyv_setall_s64(fill); + return __lsx_vinsgr2vr_d(vfill, ptr[0], 0); + } + return npyv_load_s64(ptr); +} +// fill zero to rest lanes +NPY_FINLINE npyv_s64 npyv_load_tillz_s64(const npy_int64 *ptr, npy_uintp nlane) +{ + assert(nlane > 0); + if (nlane == 1) { + return __lsx_vinsgr2vr_d(__lsx_vld(ptr, 0), 0, 1); + } + return npyv_load_s64(ptr); +} + +//// 64-bit nlane +NPY_FINLINE npyv_s32 npyv_load2_till_s32(const npy_int32 *ptr, npy_uintp nlane, + npy_int32 fill_lo, npy_int32 fill_hi) +{ + assert(nlane > 0); + if (nlane == 1) { + const __m128i vfill = npyv_set_s32(fill_lo, fill_hi, fill_lo, fill_hi); + return (npyv_s32)__lsx_vinsgr2vr_d(vfill, *(long *)ptr, 0); + } + return npyv_load_s32(ptr); +} +// fill zero to rest lanes +NPY_FINLINE npyv_s32 npyv_load2_tillz_s32(const npy_int32 *ptr, npy_uintp nlane) +{ return (npyv_s32)npyv_load_tillz_s64((const npy_int64*)ptr, nlane); } + +//// 128-bit nlane +NPY_FINLINE npyv_s64 npyv_load2_till_s64(const npy_int64 *ptr, npy_uintp nlane, + npy_int64 fill_lo, npy_int64 fill_hi) +{ (void)nlane; (void)fill_lo; (void)fill_hi; return npyv_load_s64(ptr); } + +NPY_FINLINE npyv_s64 npyv_load2_tillz_s64(const npy_int64 *ptr, npy_uintp nlane) +{ (void)nlane; return npyv_load_s64(ptr); } + +/********************************* + * Non-contiguous partial load + *********************************/ +//// 32 +NPY_FINLINE npyv_s32 +npyv_loadn_till_s32(const npy_int32 *ptr, npy_intp stride, npy_uintp nlane, npy_int32 fill) +{ + assert(nlane > 0); + __m128i vfill = npyv_setall_s32(fill); + switch(nlane) { + case 3: + vfill = __lsx_vinsgr2vr_w(vfill, ptr[stride*2], 2); + case 2: + vfill = __lsx_vinsgr2vr_w(vfill, ptr[stride], 1); + case 1: + vfill = __lsx_vinsgr2vr_w(vfill, ptr[0], 0); + break; + default: + return npyv_loadn_s32(ptr, stride); + } // switch + return vfill; +} +// fill zero to rest lanes +NPY_FINLINE npyv_s32 +npyv_loadn_tillz_s32(const npy_int32 *ptr, npy_intp stride, npy_uintp nlane) +{ + assert(nlane > 0); + switch(nlane) { + case 1: + return __lsx_vinsgr2vr_w(__lsx_vldi(0), ptr[0], 0); + case 2: + { + npyv_s32 a = __lsx_vinsgr2vr_w(__lsx_vldi(0), ptr[0], 0); + return __lsx_vinsgr2vr_w(a, ptr[stride], 1); + } + case 3: + { + npyv_s32 a = __lsx_vinsgr2vr_w(__lsx_vldi(0), ptr[0], 0); + a = __lsx_vinsgr2vr_w(a, ptr[stride], 1); + a = __lsx_vinsgr2vr_w(a, ptr[stride*2], 2); + return a; + } + default: + return npyv_loadn_s32(ptr, stride); + } +} +//// 64 +NPY_FINLINE npyv_s64 +npyv_loadn_till_s64(const npy_int64 *ptr, npy_intp stride, npy_uintp nlane, npy_int64 fill) +{ + assert(nlane > 0); + if (nlane == 1) { + const __m128i vfill = npyv_setall_s64(fill); + return __lsx_vinsgr2vr_d(vfill, ptr[0], 0); + } + return npyv_loadn_s64(ptr, stride); +} +// fill zero to rest lanes +NPY_FINLINE npyv_s64 npyv_loadn_tillz_s64(const npy_int64 *ptr, npy_intp stride, npy_uintp nlane) +{ + assert(nlane > 0); + if (nlane == 1) { + return __lsx_vinsgr2vr_d(__lsx_vldi(0), ptr[0], 0); + } + return npyv_loadn_s64(ptr, stride); +} + +//// 64-bit load over 32-bit stride +NPY_FINLINE npyv_s32 npyv_loadn2_till_s32(const npy_int32 *ptr, npy_intp stride, npy_uintp nlane, + npy_int32 fill_lo, npy_int32 fill_hi) +{ + assert(nlane > 0); + if (nlane == 1) { + const __m128i vfill = npyv_set_s32(0, 0, fill_lo, fill_hi); + return (npyv_s32)__lsx_vinsgr2vr_d(vfill, *(long *)ptr, 0); + } + return npyv_loadn2_s32(ptr, stride); +} +NPY_FINLINE npyv_s32 npyv_loadn2_tillz_s32(const npy_int32 *ptr, npy_intp stride, npy_uintp nlane) +{ + assert(nlane > 0); + if (nlane == 1) { + return (npyv_s32)__lsx_vinsgr2vr_d(__lsx_vldi(0), *(long *)ptr, 0); + } + return npyv_loadn2_s32(ptr, stride); +} + +//// 128-bit load over 64-bit stride +NPY_FINLINE npyv_s64 npyv_loadn2_till_s64(const npy_int64 *ptr, npy_intp stride, npy_uintp nlane, + npy_int64 fill_lo, npy_int64 fill_hi) +{ assert(nlane > 0); (void)stride; (void)nlane; (void)fill_lo; (void)fill_hi; return npyv_load_s64(ptr); } + +NPY_FINLINE npyv_s64 npyv_loadn2_tillz_s64(const npy_int64 *ptr, npy_intp stride, npy_uintp nlane) +{ assert(nlane > 0); (void)stride; (void)nlane; return npyv_load_s64(ptr); } + +/********************************* + * Partial store + *********************************/ +//// 32 +NPY_FINLINE void npyv_store_till_s32(npy_int32 *ptr, npy_uintp nlane, npyv_s32 a) +{ + assert(nlane > 0); + switch(nlane) { + case 1: + __lsx_vstelm_w(a, ptr, 0, 0); + break; + case 2: + __lsx_vstelm_d(a, (long *)ptr, 0, 0); + break; + case 3: + __lsx_vstelm_d(a, (long *)ptr, 0, 0); + __lsx_vstelm_w(a, ptr, 2<<2, 2); + break; + default: + npyv_store_s32(ptr, a); + } +} +//// 64 +NPY_FINLINE void npyv_store_till_s64(npy_int64 *ptr, npy_uintp nlane, npyv_s64 a) +{ + assert(nlane > 0); + if (nlane == 1) { + __lsx_vstelm_d(a, ptr, 0, 0); + return; + } + npyv_store_s64(ptr, a); +} +//// 64-bit nlane +NPY_FINLINE void npyv_store2_till_s32(npy_int32 *ptr, npy_uintp nlane, npyv_s32 a) +{ npyv_store_till_s64((npy_int64*)ptr, nlane, a); } + +//// 128-bit nlane +NPY_FINLINE void npyv_store2_till_s64(npy_int64 *ptr, npy_uintp nlane, npyv_s64 a) +{ + assert(nlane > 0); (void)nlane; + npyv_store_s64(ptr, a); +} + +/********************************* + * Non-contiguous partial store + *********************************/ +//// 32 +NPY_FINLINE void npyv_storen_till_s32(npy_int32 *ptr, npy_intp stride, npy_uintp nlane, npyv_s32 a) +{ + assert(nlane > 0); + __lsx_vstelm_w(a, ptr, 0, 0); + switch(nlane) { + case 1: + return; + case 2: + ptr[stride*1] = __lsx_vpickve2gr_w(a, 1); + return; + case 3: + ptr[stride*1] = __lsx_vpickve2gr_w(a, 1); + ptr[stride*2] = __lsx_vpickve2gr_w(a, 2); + return; + default: + ptr[stride*1] = __lsx_vpickve2gr_w(a, 1); + ptr[stride*2] = __lsx_vpickve2gr_w(a, 2); + ptr[stride*3] = __lsx_vpickve2gr_w(a, 3); + } +} +//// 64 +NPY_FINLINE void npyv_storen_till_s64(npy_int64 *ptr, npy_intp stride, npy_uintp nlane, npyv_s64 a) +{ + assert(nlane > 0); + if (nlane == 1) { + __lsx_vstelm_d(a, ptr, 0, 0); + return; + } + npyv_storen_s64(ptr, stride, a); +} + +//// 64-bit store over 32-bit stride +NPY_FINLINE void npyv_storen2_till_s32(npy_int32 *ptr, npy_intp stride, npy_uintp nlane, npyv_s32 a) +{ + assert(nlane > 0); + npyv_storel_s32(ptr, a); + if (nlane > 1) { + npyv_storeh_s32(ptr + stride, a); + } +} + +//// 128-bit store over 64-bit stride +NPY_FINLINE void npyv_storen2_till_s64(npy_int64 *ptr, npy_intp stride, npy_uintp nlane, npyv_s64 a) +{ assert(nlane > 0); (void)stride; (void)nlane; npyv_store_s64(ptr, a); } + +/***************************************************************** + * Implement partial load/store for u32/f32/u64/f64... via casting + *****************************************************************/ +#define NPYV_IMPL_LSX_REST_PARTIAL_TYPES(F_SFX, T_SFX) \ + NPY_FINLINE npyv_##F_SFX npyv_load_till_##F_SFX \ + (const npyv_lanetype_##F_SFX *ptr, npy_uintp nlane, npyv_lanetype_##F_SFX fill) \ + { \ + union { \ + npyv_lanetype_##F_SFX from_##F_SFX; \ + npyv_lanetype_##T_SFX to_##T_SFX; \ + } pun; \ + pun.from_##F_SFX = fill; \ + return npyv_reinterpret_##F_SFX##_##T_SFX(npyv_load_till_##T_SFX( \ + (const npyv_lanetype_##T_SFX *)ptr, nlane, pun.to_##T_SFX \ + )); \ + } \ + NPY_FINLINE npyv_##F_SFX npyv_loadn_till_##F_SFX \ + (const npyv_lanetype_##F_SFX *ptr, npy_intp stride, npy_uintp nlane, \ + npyv_lanetype_##F_SFX fill) \ + { \ + union { \ + npyv_lanetype_##F_SFX from_##F_SFX; \ + npyv_lanetype_##T_SFX to_##T_SFX; \ + } pun; \ + pun.from_##F_SFX = fill; \ + return npyv_reinterpret_##F_SFX##_##T_SFX(npyv_loadn_till_##T_SFX( \ + (const npyv_lanetype_##T_SFX *)ptr, stride, nlane, pun.to_##T_SFX \ + )); \ + } \ + NPY_FINLINE npyv_##F_SFX npyv_load_tillz_##F_SFX \ + (const npyv_lanetype_##F_SFX *ptr, npy_uintp nlane) \ + { \ + return npyv_reinterpret_##F_SFX##_##T_SFX(npyv_load_tillz_##T_SFX( \ + (const npyv_lanetype_##T_SFX *)ptr, nlane \ + )); \ + } \ + NPY_FINLINE npyv_##F_SFX npyv_loadn_tillz_##F_SFX \ + (const npyv_lanetype_##F_SFX *ptr, npy_intp stride, npy_uintp nlane) \ + { \ + return npyv_reinterpret_##F_SFX##_##T_SFX(npyv_loadn_tillz_##T_SFX( \ + (const npyv_lanetype_##T_SFX *)ptr, stride, nlane \ + )); \ + } \ + NPY_FINLINE void npyv_store_till_##F_SFX \ + (npyv_lanetype_##F_SFX *ptr, npy_uintp nlane, npyv_##F_SFX a) \ + { \ + npyv_store_till_##T_SFX( \ + (npyv_lanetype_##T_SFX *)ptr, nlane, \ + npyv_reinterpret_##T_SFX##_##F_SFX(a) \ + ); \ + } \ + NPY_FINLINE void npyv_storen_till_##F_SFX \ + (npyv_lanetype_##F_SFX *ptr, npy_intp stride, npy_uintp nlane, npyv_##F_SFX a) \ + { \ + npyv_storen_till_##T_SFX( \ + (npyv_lanetype_##T_SFX *)ptr, stride, nlane, \ + npyv_reinterpret_##T_SFX##_##F_SFX(a) \ + ); \ + } + +NPYV_IMPL_LSX_REST_PARTIAL_TYPES(u32, s32) +NPYV_IMPL_LSX_REST_PARTIAL_TYPES(f32, s32) +NPYV_IMPL_LSX_REST_PARTIAL_TYPES(u64, s64) +NPYV_IMPL_LSX_REST_PARTIAL_TYPES(f64, s64) + +// 128-bit/64-bit stride +#define NPYV_IMPL_LSX_REST_PARTIAL_TYPES_PAIR(F_SFX, T_SFX) \ + NPY_FINLINE npyv_##F_SFX npyv_load2_till_##F_SFX \ + (const npyv_lanetype_##F_SFX *ptr, npy_uintp nlane, \ + npyv_lanetype_##F_SFX fill_lo, npyv_lanetype_##F_SFX fill_hi) \ + { \ + union pun { \ + npyv_lanetype_##F_SFX from_##F_SFX; \ + npyv_lanetype_##T_SFX to_##T_SFX; \ + }; \ + union pun pun_lo; \ + union pun pun_hi; \ + pun_lo.from_##F_SFX = fill_lo; \ + pun_hi.from_##F_SFX = fill_hi; \ + return npyv_reinterpret_##F_SFX##_##T_SFX(npyv_load2_till_##T_SFX( \ + (const npyv_lanetype_##T_SFX *)ptr, nlane, pun_lo.to_##T_SFX, pun_hi.to_##T_SFX \ + )); \ + } \ + NPY_FINLINE npyv_##F_SFX npyv_loadn2_till_##F_SFX \ + (const npyv_lanetype_##F_SFX *ptr, npy_intp stride, npy_uintp nlane, \ + npyv_lanetype_##F_SFX fill_lo, npyv_lanetype_##F_SFX fill_hi) \ + { \ + union pun { \ + npyv_lanetype_##F_SFX from_##F_SFX; \ + npyv_lanetype_##T_SFX to_##T_SFX; \ + }; \ + union pun pun_lo; \ + union pun pun_hi; \ + pun_lo.from_##F_SFX = fill_lo; \ + pun_hi.from_##F_SFX = fill_hi; \ + return npyv_reinterpret_##F_SFX##_##T_SFX(npyv_loadn2_till_##T_SFX( \ + (const npyv_lanetype_##T_SFX *)ptr, stride, nlane, pun_lo.to_##T_SFX, \ + pun_hi.to_##T_SFX \ + )); \ + } \ + NPY_FINLINE npyv_##F_SFX npyv_load2_tillz_##F_SFX \ + (const npyv_lanetype_##F_SFX *ptr, npy_uintp nlane) \ + { \ + return npyv_reinterpret_##F_SFX##_##T_SFX(npyv_load2_tillz_##T_SFX( \ + (const npyv_lanetype_##T_SFX *)ptr, nlane \ + )); \ + } \ + NPY_FINLINE npyv_##F_SFX npyv_loadn2_tillz_##F_SFX \ + (const npyv_lanetype_##F_SFX *ptr, npy_intp stride, npy_uintp nlane) \ + { \ + return npyv_reinterpret_##F_SFX##_##T_SFX(npyv_loadn2_tillz_##T_SFX( \ + (const npyv_lanetype_##T_SFX *)ptr, stride, nlane \ + )); \ + } \ + NPY_FINLINE void npyv_store2_till_##F_SFX \ + (npyv_lanetype_##F_SFX *ptr, npy_uintp nlane, npyv_##F_SFX a) \ + { \ + npyv_store2_till_##T_SFX( \ + (npyv_lanetype_##T_SFX *)ptr, nlane, \ + npyv_reinterpret_##T_SFX##_##F_SFX(a) \ + ); \ + } \ + NPY_FINLINE void npyv_storen2_till_##F_SFX \ + (npyv_lanetype_##F_SFX *ptr, npy_intp stride, npy_uintp nlane, npyv_##F_SFX a) \ + { \ + npyv_storen2_till_##T_SFX( \ + (npyv_lanetype_##T_SFX *)ptr, stride, nlane, \ + npyv_reinterpret_##T_SFX##_##F_SFX(a) \ + ); \ + } + +NPYV_IMPL_LSX_REST_PARTIAL_TYPES_PAIR(u32, s32) +NPYV_IMPL_LSX_REST_PARTIAL_TYPES_PAIR(f32, s32) +NPYV_IMPL_LSX_REST_PARTIAL_TYPES_PAIR(u64, s64) +NPYV_IMPL_LSX_REST_PARTIAL_TYPES_PAIR(f64, s64) + +/************************************************************ + * de-interlave load / interleave contiguous store + ************************************************************/ +// two channels +#define NPYV_IMPL_LSX_MEM_INTERLEAVE(SFX, ZSFX) \ + NPY_FINLINE npyv_##SFX##x2 npyv_zip_##SFX(npyv_##SFX, npyv_##SFX); \ + NPY_FINLINE npyv_##SFX##x2 npyv_unzip_##SFX(npyv_##SFX, npyv_##SFX); \ + NPY_FINLINE npyv_##SFX##x2 npyv_load_##SFX##x2( \ + const npyv_lanetype_##SFX *ptr \ + ) { \ + return npyv_unzip_##SFX( \ + npyv_load_##SFX(ptr), npyv_load_##SFX(ptr+npyv_nlanes_##SFX) \ + ); \ + } \ + NPY_FINLINE void npyv_store_##SFX##x2( \ + npyv_lanetype_##SFX *ptr, npyv_##SFX##x2 v \ + ) { \ + npyv_##SFX##x2 zip = npyv_zip_##SFX(v.val[0], v.val[1]); \ + npyv_store_##SFX(ptr, zip.val[0]); \ + npyv_store_##SFX(ptr + npyv_nlanes_##SFX, zip.val[1]); \ + } + +NPYV_IMPL_LSX_MEM_INTERLEAVE(u8, uint8_t); +NPYV_IMPL_LSX_MEM_INTERLEAVE(s8, int8_t) +NPYV_IMPL_LSX_MEM_INTERLEAVE(u16, uint16_t) +NPYV_IMPL_LSX_MEM_INTERLEAVE(s16, int16_t) +NPYV_IMPL_LSX_MEM_INTERLEAVE(u32, uint32_t) +NPYV_IMPL_LSX_MEM_INTERLEAVE(s32, int32_t) +NPYV_IMPL_LSX_MEM_INTERLEAVE(u64, uint64_t) +NPYV_IMPL_LSX_MEM_INTERLEAVE(s64, int64_t) +NPYV_IMPL_LSX_MEM_INTERLEAVE(f32, float) +NPYV_IMPL_LSX_MEM_INTERLEAVE(f64, double) + +/********************************* + * Lookup table + *********************************/ +// uses vector as indexes into a table +// that contains 32 elements of float32. +NPY_FINLINE npyv_f32 npyv_lut32_f32(const float *table, npyv_u32 idx) +{ + const int i0 = __lsx_vpickve2gr_wu(idx, 0); + const int i1 = __lsx_vpickve2gr_wu(idx, 1); + const int i2 = __lsx_vpickve2gr_wu(idx, 2); + const int i3 = __lsx_vpickve2gr_wu(idx, 3); + return npyv_set_f32(table[i0], table[i1], table[i2], table[i3]); +} +NPY_FINLINE npyv_u32 npyv_lut32_u32(const npy_uint32 *table, npyv_u32 idx) +{ return npyv_reinterpret_u32_f32(npyv_lut32_f32((const float*)table, idx)); } +NPY_FINLINE npyv_s32 npyv_lut32_s32(const npy_int32 *table, npyv_u32 idx) +{ return npyv_reinterpret_s32_f32(npyv_lut32_f32((const float*)table, idx)); } + +// uses vector as indexes into a table +// that contains 16 elements of float64. +NPY_FINLINE npyv_f64 npyv_lut16_f64(const double *table, npyv_u64 idx) +{ + const int i0 = __lsx_vpickve2gr_wu(idx, 0); + const int i1 = __lsx_vpickve2gr_wu(idx, 2); + return npyv_set_f64(table[i0], table[i1]); +} +NPY_FINLINE npyv_u64 npyv_lut16_u64(const npy_uint64 *table, npyv_u64 idx) +{ return npyv_reinterpret_u64_f64(npyv_lut16_f64((const double*)table, idx)); } +NPY_FINLINE npyv_s64 npyv_lut16_s64(const npy_int64 *table, npyv_u64 idx) +{ return npyv_reinterpret_s64_f64(npyv_lut16_f64((const double*)table, idx)); } + +#endif // _NPY_SIMD_LSX_MEMORY_H diff --git a/numpy/_core/src/common/simd/lsx/misc.h b/numpy/_core/src/common/simd/lsx/misc.h new file mode 100644 index 000000000000..a65eda3c5573 --- /dev/null +++ b/numpy/_core/src/common/simd/lsx/misc.h @@ -0,0 +1,268 @@ +#ifndef NPY_SIMD + #error "Not a standalone header" +#endif +#include +#ifndef _NPY_SIMD_LSX_MISC_H +#define _NPY_SIMD_LSX_MISC_H + +// vector with zero lanes +#define npyv_zero_u8() __lsx_vldi(0) +#define npyv_zero_s8() __lsx_vldi(0) +#define npyv_zero_u16() __lsx_vldi(0) +#define npyv_zero_s16() __lsx_vldi(0) +#define npyv_zero_u32() __lsx_vldi(0) +#define npyv_zero_s32() __lsx_vldi(0) +#define npyv_zero_u64() __lsx_vldi(0) +#define npyv_zero_s64() __lsx_vldi(0) +#define npyv_zero_f32() (__m128)__lsx_vldi(0) +#define npyv_zero_f64() (__m128d)__lsx_vldi(0) + +// vector with a specific value set to all lanes +#define npyv_setall_u8(VAL) __lsx_vreplgr2vr_b((unsigned char)(VAL)) +#define npyv_setall_s8(VAL) __lsx_vreplgr2vr_b((signed char)(VAL)) +#define npyv_setall_u16(VAL) __lsx_vreplgr2vr_h((unsigned short)(VAL)) +#define npyv_setall_s16(VAL) __lsx_vreplgr2vr_h((signed short)(VAL)) +#define npyv_setall_u32(VAL) __lsx_vreplgr2vr_w((unsigned int)(VAL)) +#define npyv_setall_s32(VAL) __lsx_vreplgr2vr_w((signed int)(VAL)) +#define npyv_setall_u64(VAL) __lsx_vreplgr2vr_d((unsigned long long)(VAL)) +#define npyv_setall_s64(VAL) __lsx_vreplgr2vr_d((long long)(VAL)) +#define npyv_setall_f32(VAL) (__m128)(v4f32){VAL, VAL, VAL, VAL} +#define npyv_setall_f64(VAL) (__m128d)(v2f64){VAL, VAL} + +/** + * vector with specific values set to each lane and + * set a specific value to all remained lanes + * + * Args that generated by NPYV__SET_FILL_* not going to expand if + * _mm_setr_* are defined as macros. + */ +NPY_FINLINE __m128i npyv__set_u8( + npy_uint8 i0, npy_uint8 i1, npy_uint8 i2, npy_uint8 i3, npy_uint8 i4, npy_uint8 i5, npy_uint8 i6, npy_uint8 i7, + npy_uint8 i8, npy_uint8 i9, npy_uint8 i10, npy_uint8 i11, npy_uint8 i12, npy_uint8 i13, npy_uint8 i14, npy_uint8 i15) +{ + v16u8 vec = {i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11, i12, i13, i14, i15}; + return (__m128i)vec; +} +NPY_FINLINE __m128i npyv__set_s8( + npy_int8 i0, npy_int8 i1, npy_int8 i2, npy_int8 i3, npy_int8 i4, npy_int8 i5, npy_int8 i6, npy_int8 i7, + npy_int8 i8, npy_int8 i9, npy_int8 i10, npy_int8 i11, npy_int8 i12, npy_int8 i13, npy_int8 i14, npy_int8 i15) +{ + v16i8 vec = {i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11, i12, i13, i14, i15}; + return (__m128i)vec; +} +NPY_FINLINE __m128i npyv__set_u16(npy_uint16 i0, npy_uint16 i1, npy_uint16 i2, npy_uint16 i3, npy_uint16 i4, npy_uint16 i5, + npy_uint16 i6, npy_uint16 i7) +{ + v8u16 vec = {i0, i1, i2, i3, i4, i5, i6, i7}; + return (__m128i)vec; +} +NPY_FINLINE __m128i npyv__set_s16(npy_int16 i0, npy_int16 i1, npy_int16 i2, npy_int16 i3, npy_int16 i4, npy_int16 i5, + npy_int16 i6, npy_int16 i7) +{ + v8i16 vec = {i0, i1, i2, i3, i4, i5, i6, i7}; + return (__m128i)vec; +} +NPY_FINLINE __m128i npyv__set_u32(npy_uint32 i0, npy_uint32 i1, npy_uint32 i2, npy_uint32 i3) +{ + v4u32 vec = {i0, i1, i2, i3}; + return (__m128i)vec; +} +NPY_FINLINE __m128i npyv__set_s32(npy_int32 i0, npy_int32 i1, npy_int32 i2, npy_int32 i3) +{ + v4i32 vec = {i0, i1, i2, i3}; + return (__m128i)vec; +} +NPY_FINLINE __m128i npyv__set_u64(npy_uint64 i0, npy_uint64 i1) +{ + v2u64 vec = {i0, i1}; + return (__m128i)vec; +} +NPY_FINLINE __m128i npyv__set_s64(npy_int64 i0, npy_int64 i1) +{ + v2i64 vec = {i0, i1}; + return (__m128i)vec; +} +NPY_FINLINE __m128 npyv__set_f32(float i0, float i1, float i2, float i3) +{ + __m128 vec = {i0, i1, i2, i3}; + return vec; +} +NPY_FINLINE __m128d npyv__set_f64(double i0, double i1) +{ + __m128d vec = {i0, i1}; + return vec; +} +#define npyv_setf_u8(FILL, ...) npyv__set_u8(NPYV__SET_FILL_16(char, FILL, __VA_ARGS__)) +#define npyv_setf_s8(FILL, ...) npyv__set_s8(NPYV__SET_FILL_16(char, FILL, __VA_ARGS__)) +#define npyv_setf_u16(FILL, ...) npyv__set_u16(NPYV__SET_FILL_8(short, FILL, __VA_ARGS__)) +#define npyv_setf_s16(FILL, ...) npyv__set_s16(NPYV__SET_FILL_8(short, FILL, __VA_ARGS__)) +#define npyv_setf_u32(FILL, ...) npyv__set_u32(NPYV__SET_FILL_4(int, FILL, __VA_ARGS__)) +#define npyv_setf_s32(FILL, ...) npyv__set_s32(NPYV__SET_FILL_4(int, FILL, __VA_ARGS__)) +#define npyv_setf_u64(FILL, ...) npyv__set_u64(NPYV__SET_FILL_2(npy_int64, FILL, __VA_ARGS__)) +#define npyv_setf_s64(FILL, ...) npyv__set_s64(NPYV__SET_FILL_2(npy_int64, FILL, __VA_ARGS__)) +#define npyv_setf_f32(FILL, ...) npyv__set_f32(NPYV__SET_FILL_4(float, FILL, __VA_ARGS__)) +#define npyv_setf_f64(FILL, ...) npyv__set_f64(NPYV__SET_FILL_2(double, FILL, __VA_ARGS__)) + +// vector with specific values set to each lane and +// set zero to all remained lanes +#define npyv_set_u8(...) npyv_setf_u8(0, __VA_ARGS__) +#define npyv_set_s8(...) npyv_setf_s8(0, __VA_ARGS__) +#define npyv_set_u16(...) npyv_setf_u16(0, __VA_ARGS__) +#define npyv_set_s16(...) npyv_setf_s16(0, __VA_ARGS__) +#define npyv_set_u32(...) npyv_setf_u32(0, __VA_ARGS__) +#define npyv_set_s32(...) npyv_setf_s32(0, __VA_ARGS__) +#define npyv_set_u64(...) npyv_setf_u64(0, __VA_ARGS__) +#define npyv_set_s64(...) npyv_setf_s64(0, __VA_ARGS__) +#define npyv_set_f32(...) npyv_setf_f32(0, __VA_ARGS__) +#define npyv_set_f64(...) npyv_setf_f64(0, __VA_ARGS__) + +// Per lane select +NPY_FINLINE __m128i npyv_select_u8(__m128i mask, __m128i a, __m128i b) +{ + return __lsx_vbitsel_v(b, a, mask); +} + +NPY_FINLINE __m128 npyv_select_f32(__m128i mask, __m128 a, __m128 b) +{ + return (__m128)__lsx_vbitsel_v((__m128i)b, (__m128i)a, mask); +} +NPY_FINLINE __m128d npyv_select_f64(__m128i mask, __m128d a, __m128d b) +{ + return (__m128d)__lsx_vbitsel_v((__m128i)b, (__m128i)a, mask); +} + +#define npyv_select_s8 npyv_select_u8 +#define npyv_select_u16 npyv_select_u8 +#define npyv_select_s16 npyv_select_u8 +#define npyv_select_u32 npyv_select_u8 +#define npyv_select_s32 npyv_select_u8 +#define npyv_select_u64 npyv_select_u8 +#define npyv_select_s64 npyv_select_u8 + +// extract the first vector's lane +#define npyv_extract0_u8(A) ((npy_uint8)__lsx_vpickve2gr_bu(A, 0)) +#define npyv_extract0_s8(A) ((npy_int8)__lsx_vpickve2gr_b(A, 0)) +#define npyv_extract0_u16(A) ((npy_uint16)__lsx_vpickve2gr_hu(A, 0)) +#define npyv_extract0_s16(A) ((npy_int16)__lsx_vpickve2gr_h(A, 0)) +#define npyv_extract0_u32(A) ((npy_uint32)__lsx_vpickve2gr_wu(A, 0)) +#define npyv_extract0_s32(A) ((npy_int32)__lsx_vpickve2gr_w(A, 0)) +#define npyv_extract0_u64(A) ((npy_uint64)__lsx_vpickve2gr_du(A, 0)) +#define npyv_extract0_s64(A) ((npy_int64)__lsx_vpickve2gr_d(A, 0)) +#define npyv_extract0_f32(A) A[0] +#define npyv_extract0_f64(A) A[0] + +// Reinterpret +#define npyv_reinterpret_u8_u8(X) X +#define npyv_reinterpret_u8_s8(X) X +#define npyv_reinterpret_u8_u16(X) X +#define npyv_reinterpret_u8_s16(X) X +#define npyv_reinterpret_u8_u32(X) X +#define npyv_reinterpret_u8_s32(X) X +#define npyv_reinterpret_u8_u64(X) X +#define npyv_reinterpret_u8_s64(X) X +#define npyv_reinterpret_u8_f32(X) (__m128i)X +#define npyv_reinterpret_u8_f64(X) (__m128i)X + +#define npyv_reinterpret_s8_s8(X) X +#define npyv_reinterpret_s8_u8(X) X +#define npyv_reinterpret_s8_u16(X) X +#define npyv_reinterpret_s8_s16(X) X +#define npyv_reinterpret_s8_u32(X) X +#define npyv_reinterpret_s8_s32(X) X +#define npyv_reinterpret_s8_u64(X) X +#define npyv_reinterpret_s8_s64(X) X +#define npyv_reinterpret_s8_f32(X) (__m128i)X +#define npyv_reinterpret_s8_f64(X) (__m128i)X + +#define npyv_reinterpret_u16_u16(X) X +#define npyv_reinterpret_u16_u8(X) X +#define npyv_reinterpret_u16_s8(X) X +#define npyv_reinterpret_u16_s16(X) X +#define npyv_reinterpret_u16_u32(X) X +#define npyv_reinterpret_u16_s32(X) X +#define npyv_reinterpret_u16_u64(X) X +#define npyv_reinterpret_u16_s64(X) X +#define npyv_reinterpret_u16_f32(X) (__m128i)X +#define npyv_reinterpret_u16_f64(X) (__m128i)X + +#define npyv_reinterpret_s16_s16(X) X +#define npyv_reinterpret_s16_u8(X) X +#define npyv_reinterpret_s16_s8(X) X +#define npyv_reinterpret_s16_u16(X) X +#define npyv_reinterpret_s16_u32(X) X +#define npyv_reinterpret_s16_s32(X) X +#define npyv_reinterpret_s16_u64(X) X +#define npyv_reinterpret_s16_s64(X) X +#define npyv_reinterpret_s16_f32(X) (__m128i)X +#define npyv_reinterpret_s16_f64(X) (__m128i)X + +#define npyv_reinterpret_u32_u32(X) X +#define npyv_reinterpret_u32_u8(X) X +#define npyv_reinterpret_u32_s8(X) X +#define npyv_reinterpret_u32_u16(X) X +#define npyv_reinterpret_u32_s16(X) X +#define npyv_reinterpret_u32_s32(X) X +#define npyv_reinterpret_u32_u64(X) X +#define npyv_reinterpret_u32_s64(X) X +#define npyv_reinterpret_u32_f32(X) (__m128i)X +#define npyv_reinterpret_u32_f64(X) (__m128i)X + +#define npyv_reinterpret_s32_s32(X) X +#define npyv_reinterpret_s32_u8(X) X +#define npyv_reinterpret_s32_s8(X) X +#define npyv_reinterpret_s32_u16(X) X +#define npyv_reinterpret_s32_s16(X) X +#define npyv_reinterpret_s32_u32(X) X +#define npyv_reinterpret_s32_u64(X) X +#define npyv_reinterpret_s32_s64(X) X +#define npyv_reinterpret_s32_f32(X) (__m128i)X +#define npyv_reinterpret_s32_f64(X) (__m128i)X + +#define npyv_reinterpret_u64_u64(X) X +#define npyv_reinterpret_u64_u8(X) X +#define npyv_reinterpret_u64_s8(X) X +#define npyv_reinterpret_u64_u16(X) X +#define npyv_reinterpret_u64_s16(X) X +#define npyv_reinterpret_u64_u32(X) X +#define npyv_reinterpret_u64_s32(X) X +#define npyv_reinterpret_u64_s64(X) X +#define npyv_reinterpret_u64_f32(X) (__m128i)X +#define npyv_reinterpret_u64_f64(X) (__m128i)X + +#define npyv_reinterpret_s64_s64(X) X +#define npyv_reinterpret_s64_u8(X) X +#define npyv_reinterpret_s64_s8(X) X +#define npyv_reinterpret_s64_u16(X) X +#define npyv_reinterpret_s64_s16(X) X +#define npyv_reinterpret_s64_u32(X) X +#define npyv_reinterpret_s64_s32(X) X +#define npyv_reinterpret_s64_u64(X) X +#define npyv_reinterpret_s64_f32(X) (__m128i)X +#define npyv_reinterpret_s64_f64(X) (__m128i)X + +#define npyv_reinterpret_f32_f32(X) X +#define npyv_reinterpret_f32_u8(X) (__m128)X +#define npyv_reinterpret_f32_s8(X) (__m128)X +#define npyv_reinterpret_f32_u16(X) (__m128)X +#define npyv_reinterpret_f32_s16(X) (__m128)X +#define npyv_reinterpret_f32_u32(X) (__m128)X +#define npyv_reinterpret_f32_s32(X) (__m128)X +#define npyv_reinterpret_f32_u64(X) (__m128)X +#define npyv_reinterpret_f32_s64(X) (__m128)X +#define npyv_reinterpret_f32_f64(X) (__m128)X + +#define npyv_reinterpret_f64_f64(X) X +#define npyv_reinterpret_f64_u8(X) (__m128d)X +#define npyv_reinterpret_f64_s8(X) (__m128d)X +#define npyv_reinterpret_f64_u16(X) (__m128d)X +#define npyv_reinterpret_f64_s16(X) (__m128d)X +#define npyv_reinterpret_f64_u32(X) (__m128d)X +#define npyv_reinterpret_f64_s32(X) (__m128d)X +#define npyv_reinterpret_f64_u64(X) (__m128d)X +#define npyv_reinterpret_f64_s64(X) (__m128d)X +#define npyv_reinterpret_f64_f32(X) (__m128d)X + +// Only required by AVX2/AVX512 +#define npyv_cleanup() ((void)0) + +#endif diff --git a/numpy/_core/src/common/simd/lsx/operators.h b/numpy/_core/src/common/simd/lsx/operators.h new file mode 100644 index 000000000000..f2af02d52632 --- /dev/null +++ b/numpy/_core/src/common/simd/lsx/operators.h @@ -0,0 +1,263 @@ +#ifndef NPY_SIMD + #error "Not a standalone header" +#endif + +#ifndef _NPY_SIMD_LSX_OPERATORS_H +#define _NPY_SIMD_LSX_OPERATORS_H + +/*************************** + * Shifting + ***************************/ + +// left +#define npyv_shl_u16(A, C) __lsx_vsll_h(A, npyv_setall_s16(C)) +#define npyv_shl_s16(A, C) __lsx_vsll_h(A, npyv_setall_s16(C)) +#define npyv_shl_u32(A, C) __lsx_vsll_w(A, npyv_setall_s32(C)) +#define npyv_shl_s32(A, C) __lsx_vsll_w(A, npyv_setall_s32(C)) +#define npyv_shl_u64(A, C) __lsx_vsll_d(A, npyv_setall_s64(C)) +#define npyv_shl_s64(A, C) __lsx_vsll_d(A, npyv_setall_s64(C)) + +// left by an immediate constant +#define npyv_shli_u16 __lsx_vslli_h +#define npyv_shli_s16 __lsx_vslli_h +#define npyv_shli_u32 __lsx_vslli_w +#define npyv_shli_s32 __lsx_vslli_w +#define npyv_shli_u64 __lsx_vslli_d +#define npyv_shli_s64 __lsx_vslli_d + +// right +#define npyv_shr_u16(A, C) __lsx_vsrl_h(A, npyv_setall_u16(C)) +#define npyv_shr_s16(A, C) __lsx_vsra_h(A, npyv_setall_u16(C)) +#define npyv_shr_u32(A, C) __lsx_vsrl_w(A, npyv_setall_u32(C)) +#define npyv_shr_s32(A, C) __lsx_vsra_w(A, npyv_setall_u32(C)) +#define npyv_shr_u64(A, C) __lsx_vsrl_d(A, npyv_setall_u64(C)) +#define npyv_shr_s64(A, C) __lsx_vsra_d(A, npyv_setall_u64(C)) + +// Right by an immediate constant +#define npyv_shri_u16 __lsx_vsrli_h +#define npyv_shri_s16 __lsx_vsrai_h +#define npyv_shri_u32 __lsx_vsrli_w +#define npyv_shri_s32 __lsx_vsrai_w +#define npyv_shri_u64 __lsx_vsrli_d +#define npyv_shri_s64 __lsx_vsrai_d + +/*************************** + * Logical + ***************************/ + +// AND +#define npyv_and_u8 __lsx_vand_v +#define npyv_and_s8 __lsx_vand_v +#define npyv_and_u16 __lsx_vand_v +#define npyv_and_s16 __lsx_vand_v +#define npyv_and_u32 __lsx_vand_v +#define npyv_and_s32 __lsx_vand_v +#define npyv_and_u64 __lsx_vand_v +#define npyv_and_s64 __lsx_vand_v +#define npyv_and_f32(A, B) \ + (__m128)__lsx_vand_v((__m128i)A, (__m128i)B) +#define npyv_and_f64(A, B) \ + (__m128d)__lsx_vand_v((__m128i)A, (__m128i)B) +#define npyv_and_b8 __lsx_vand_v +#define npyv_and_b16 __lsx_vand_v +#define npyv_and_b32 __lsx_vand_v +#define npyv_and_b64 __lsx_vand_v + +// OR +#define npyv_or_u8 __lsx_vor_v +#define npyv_or_s8 __lsx_vor_v +#define npyv_or_u16 __lsx_vor_v +#define npyv_or_s16 __lsx_vor_v +#define npyv_or_u32 __lsx_vor_v +#define npyv_or_s32 __lsx_vor_v +#define npyv_or_u64 __lsx_vor_v +#define npyv_or_s64 __lsx_vor_v +#define npyv_or_f32(A, B) \ + (__m128)__lsx_vor_v((__m128i)A, (__m128i)B) +#define npyv_or_f64(A, B) \ + (__m128d)__lsx_vor_v((__m128i)A, (__m128i)B) +#define npyv_or_b8 __lsx_vor_v +#define npyv_or_b16 __lsx_vor_v +#define npyv_or_b32 __lsx_vor_v +#define npyv_or_b64 __lsx_vor_v + +// XOR +#define npyv_xor_u8 __lsx_vxor_v +#define npyv_xor_s8 __lsx_vxor_v +#define npyv_xor_u16 __lsx_vxor_v +#define npyv_xor_s16 __lsx_vxor_v +#define npyv_xor_u32 __lsx_vxor_v +#define npyv_xor_s32 __lsx_vxor_v +#define npyv_xor_u64 __lsx_vxor_v +#define npyv_xor_s64 __lsx_vxor_v +#define npyv_xor_f32(A, B) \ + (__m128)__lsx_vxor_v((__m128i)A, (__m128i)B) +#define npyv_xor_f64(A, B) \ + (__m128d)__lsx_vxor_v((__m128i)A, (__m128i)B) +#define npyv_xor_b8 __lsx_vxor_v +#define npyv_xor_b16 __lsx_vxor_v +#define npyv_xor_b32 __lsx_vxor_v +#define npyv_xor_b64 __lsx_vxor_v + +// NOT +#define npyv_not_u8(A) __lsx_vxori_b((__m128i)A, 0xff) +#define npyv_not_s8 npyv_not_u8 +#define npyv_not_u16 npyv_not_u8 +#define npyv_not_s16 npyv_not_u8 +#define npyv_not_u32 npyv_not_u8 +#define npyv_not_s32 npyv_not_u8 +#define npyv_not_u64 npyv_not_u8 +#define npyv_not_s64 npyv_not_u8 +#define npyv_not_f32 (__m128)npyv_not_u8 +#define npyv_not_f64 (__m128d)npyv_not_u8 +#define npyv_not_b8 npyv_not_u8 +#define npyv_not_b16 npyv_not_u8 +#define npyv_not_b32 npyv_not_u8 +#define npyv_not_b64 npyv_not_u8 + +// ANDC, ORC and XNOR +#define npyv_andc_u8(A, B) __lsx_vandn_v(B, A) +#define npyv_andc_b8(A, B) __lsx_vandn_v(B, A) +#define npyv_orc_b8(A, B) npyv_or_b8(npyv_not_b8(B), A) +#define npyv_xnor_b8 __lsx_vseq_b + +/*************************** + * Comparison + ***************************/ + +// Int Equal +#define npyv_cmpeq_u8 __lsx_vseq_b +#define npyv_cmpeq_s8 __lsx_vseq_b +#define npyv_cmpeq_u16 __lsx_vseq_h +#define npyv_cmpeq_s16 __lsx_vseq_h +#define npyv_cmpeq_u32 __lsx_vseq_w +#define npyv_cmpeq_s32 __lsx_vseq_w +#define npyv_cmpeq_u64 __lsx_vseq_d +#define npyv_cmpeq_s64 __lsx_vseq_d + +// Int Not Equal +#define npyv_cmpneq_u8(A, B) npyv_not_u8(npyv_cmpeq_u8(A, B)) +#define npyv_cmpneq_u16(A, B) npyv_not_u16(npyv_cmpeq_u16(A, B)) +#define npyv_cmpneq_u32(A, B) npyv_not_u32(npyv_cmpeq_u32(A, B)) +#define npyv_cmpneq_u64(A, B) npyv_not_u64(npyv_cmpeq_u64(A, B)) +#define npyv_cmpneq_s8 npyv_cmpneq_u8 +#define npyv_cmpneq_s16 npyv_cmpneq_u16 +#define npyv_cmpneq_s32 npyv_cmpneq_u32 +#define npyv_cmpneq_s64 npyv_cmpneq_u64 + +// signed greater than +#define npyv_cmpgt_s8(A, B) __lsx_vslt_b(B, A) +#define npyv_cmpgt_s16(A, B) __lsx_vslt_h(B, A) +#define npyv_cmpgt_s32(A, B) __lsx_vslt_w(B, A) +#define npyv_cmpgt_s64(A, B) __lsx_vslt_d(B, A) + +// signed greater than or equal +#define npyv_cmpge_s8(A, B) __lsx_vsle_b(B, A) +#define npyv_cmpge_s16(A, B) __lsx_vsle_h(B, A) +#define npyv_cmpge_s32(A, B) __lsx_vsle_w(B, A) +#define npyv_cmpge_s64(A, B) __lsx_vsle_d(B, A) + +// unsigned greater than +#define npyv_cmpgt_u8(A, B) __lsx_vslt_bu(B, A) +#define npyv_cmpgt_u16(A, B) __lsx_vslt_hu(B, A) +#define npyv_cmpgt_u32(A, B) __lsx_vslt_wu(B, A) +#define npyv_cmpgt_u64(A, B) __lsx_vslt_du(B, A) + +// unsigned greater than or equal +#define npyv_cmpge_u8(A, B) __lsx_vsle_bu(B, A) +#define npyv_cmpge_u16(A, B) __lsx_vsle_hu(B, A) +#define npyv_cmpge_u32(A, B) __lsx_vsle_wu(B, A) +#define npyv_cmpge_u64(A, B) __lsx_vsle_du(B, A) + +// less than +#define npyv_cmplt_u8 __lsx_vslt_bu +#define npyv_cmplt_s8 __lsx_vslt_b +#define npyv_cmplt_u16 __lsx_vslt_hu +#define npyv_cmplt_s16 __lsx_vslt_h +#define npyv_cmplt_u32 __lsx_vslt_wu +#define npyv_cmplt_s32 __lsx_vslt_w +#define npyv_cmplt_u64 __lsx_vslt_du +#define npyv_cmplt_s64 __lsx_vslt_d + +// less than or equal +#define npyv_cmple_u8 __lsx_vsle_bu +#define npyv_cmple_s8 __lsx_vsle_b +#define npyv_cmple_u16 __lsx_vsle_hu +#define npyv_cmple_s16 __lsx_vsle_h +#define npyv_cmple_u32 __lsx_vsle_wu +#define npyv_cmple_s32 __lsx_vsle_w +#define npyv_cmple_u64 __lsx_vsle_du +#define npyv_cmple_s64 __lsx_vsle_d + +// precision comparison +#define npyv_cmpeq_f32 __lsx_vfcmp_ceq_s +#define npyv_cmpeq_f64 __lsx_vfcmp_ceq_d +#define npyv_cmpneq_f32 __lsx_vfcmp_cune_s +#define npyv_cmpneq_f64 __lsx_vfcmp_cune_d +#define npyv_cmplt_f32 __lsx_vfcmp_clt_s +#define npyv_cmplt_f64 __lsx_vfcmp_clt_d +#define npyv_cmple_f32 __lsx_vfcmp_cle_s +#define npyv_cmple_f64 __lsx_vfcmp_cle_d +#define npyv_cmpgt_f32(A, B) npyv_cmplt_f32(B, A) +#define npyv_cmpgt_f64(A, B) npyv_cmplt_f64(B, A) +#define npyv_cmpge_f32(A, B) npyv_cmple_f32(B, A) +#define npyv_cmpge_f64(A, B) npyv_cmple_f64(B, A) + +// check special cases +NPY_FINLINE npyv_b32 npyv_notnan_f32(npyv_f32 a) +{ return __lsx_vfcmp_cor_s(a, a); } //!nan,return:ffffffff +NPY_FINLINE npyv_b64 npyv_notnan_f64(npyv_f64 a) +{ return __lsx_vfcmp_cor_d(a, a); } + +// Test cross all vector lanes +// any: returns true if any of the elements is not equal to zero +// all: returns true if all elements are not equal to zero +#define NPYV_IMPL_LSX_ANYALL(SFX) \ + NPY_FINLINE bool npyv_any_##SFX(npyv_##SFX a) \ + { return __lsx_vmsknz_b((__m128i)a)[0] != 0; } \ + NPY_FINLINE bool npyv_all_##SFX(npyv_##SFX a) \ + { return __lsx_vmsknz_b((__m128i)a)[0] == 0xffff; } +NPYV_IMPL_LSX_ANYALL(b8) +NPYV_IMPL_LSX_ANYALL(b16) +NPYV_IMPL_LSX_ANYALL(b32) +NPYV_IMPL_LSX_ANYALL(b64) +#undef NPYV_IMPL_LSX_ANYALL + +#define NPYV_IMPL_LSX_ANYALL(SFX, TSFX, MASK) \ + NPY_FINLINE bool npyv_any_##SFX(npyv_##SFX a) \ + { \ + return __lsx_vmsknz_b(a)[0] != 0; \ + } \ + NPY_FINLINE bool npyv_all_##SFX(npyv_##SFX a) \ + { \ + return __lsx_vmsknz_b( \ + __lsx_vseq_##TSFX(a, npyv_zero_##SFX()) \ + )[0] == 0; \ + } +NPYV_IMPL_LSX_ANYALL(u8, b, 0xffff) +NPYV_IMPL_LSX_ANYALL(s8, b, 0xffff) +NPYV_IMPL_LSX_ANYALL(u16, h, 0xffff) +NPYV_IMPL_LSX_ANYALL(s16, h, 0xffff) +NPYV_IMPL_LSX_ANYALL(u32, w, 0xffff) +NPYV_IMPL_LSX_ANYALL(s32, w, 0xffff) +NPYV_IMPL_LSX_ANYALL(u64, d, 0xffff) +NPYV_IMPL_LSX_ANYALL(s64, d, 0xffff) +#undef NPYV_IMPL_LSX_ANYALL + +NPY_FINLINE bool npyv_any_f32(npyv_f32 a) +{ + return __lsx_vmsknz_b(__lsx_vfcmp_ceq_s(a, npyv_zero_f32()))[0] != 0xffff; +} +NPY_FINLINE bool npyv_all_f32(npyv_f32 a) +{ + return __lsx_vmsknz_b(__lsx_vfcmp_ceq_s(a, npyv_zero_f32()))[0] == 0; +} +NPY_FINLINE bool npyv_any_f64(npyv_f64 a) +{ + return __lsx_vmsknz_b(__lsx_vfcmp_ceq_d(a, npyv_zero_f64()))[0] != 0xffff; +} +NPY_FINLINE bool npyv_all_f64(npyv_f64 a) +{ + return __lsx_vmsknz_b(__lsx_vfcmp_ceq_d(a, npyv_zero_f64()))[0] == 0; +} +#endif // _NPY_SIMD_LSX_OPERATORS_H diff --git a/numpy/_core/src/common/simd/lsx/reorder.h b/numpy/_core/src/common/simd/lsx/reorder.h new file mode 100644 index 000000000000..0c8f07a8c207 --- /dev/null +++ b/numpy/_core/src/common/simd/lsx/reorder.h @@ -0,0 +1,186 @@ +#ifndef NPY_SIMD + #error "Not a standalone header" +#endif + +#ifndef _NPY_SIMD_LSX_REORDER_H +#define _NPY_SIMD_LSX_REORDER_H + +// combine lower part of two vectors +#define npyv_combinel_u8(A, B) __lsx_vilvl_d(B, A) +#define npyv_combinel_s8(A, B) __lsx_vilvl_d(B, A) +#define npyv_combinel_u16(A, B) __lsx_vilvl_d(B, A) +#define npyv_combinel_s16(A, B) __lsx_vilvl_d(B, A) +#define npyv_combinel_u32(A, B) __lsx_vilvl_d(B, A) +#define npyv_combinel_s32(A, B) __lsx_vilvl_d(B, A) +#define npyv_combinel_u64(A, B) __lsx_vilvl_d(B, A) +#define npyv_combinel_s64(A, B) __lsx_vilvl_d(B, A) +#define npyv_combinel_f32(A, B) (__m128)(__lsx_vilvl_d((__m128i)B, (__m128i)A)) +#define npyv_combinel_f64(A, B) (__m128d)(__lsx_vilvl_d((__m128i)B, (__m128i)A)) + +// combine higher part of two vectors +#define npyv_combineh_u8(A, B) __lsx_vilvh_d(B, A) +#define npyv_combineh_s8(A, B) __lsx_vilvh_d(B, A) +#define npyv_combineh_u16(A, B) __lsx_vilvh_d(B, A) +#define npyv_combineh_s16(A, B) __lsx_vilvh_d(B, A) +#define npyv_combineh_u32(A, B) __lsx_vilvh_d(B, A) +#define npyv_combineh_s32(A, B) __lsx_vilvh_d(B, A) +#define npyv_combineh_u64(A, B) __lsx_vilvh_d(B, A) +#define npyv_combineh_s64(A, B) __lsx_vilvh_d(B, A) +#define npyv_combineh_f32(A, B) (__m128)(__lsx_vilvh_d((__m128i)B, (__m128i)A)) +#define npyv_combineh_f64(A, B) (__m128d)(__lsx_vilvh_d((__m128i)B, (__m128i)A)) + +// combine two vectors from lower and higher parts of two other vectors +NPY_FINLINE npyv_s64x2 npyv__combine(__m128i a, __m128i b) +{ + npyv_s64x2 r; + r.val[0] = npyv_combinel_u8(a, b); + r.val[1] = npyv_combineh_u8(a, b); + return r; +} +NPY_FINLINE npyv_f32x2 npyv_combine_f32(__m128 a, __m128 b) +{ + npyv_f32x2 r; + r.val[0] = npyv_combinel_f32(a, b); + r.val[1] = npyv_combineh_f32(a, b); + return r; +} +NPY_FINLINE npyv_f64x2 npyv_combine_f64(__m128d a, __m128d b) +{ + npyv_f64x2 r; + r.val[0] = npyv_combinel_f64(a, b); + r.val[1] = npyv_combineh_f64(a, b); + return r; +} +#define npyv_combine_u8 npyv__combine +#define npyv_combine_s8 npyv__combine +#define npyv_combine_u16 npyv__combine +#define npyv_combine_s16 npyv__combine +#define npyv_combine_u32 npyv__combine +#define npyv_combine_s32 npyv__combine +#define npyv_combine_u64 npyv__combine +#define npyv_combine_s64 npyv__combine + +// interleave two vectors +#define NPYV_IMPL_LSX_ZIP(T_VEC, SFX, INTR_SFX) \ + NPY_FINLINE T_VEC##x2 npyv_zip_##SFX(T_VEC a, T_VEC b) \ + { \ + T_VEC##x2 r; \ + r.val[0] = __lsx_vilvl_##INTR_SFX(b, a); \ + r.val[1] = __lsx_vilvh_##INTR_SFX(b, a); \ + return r; \ + } + +NPYV_IMPL_LSX_ZIP(npyv_u8, u8, b) +NPYV_IMPL_LSX_ZIP(npyv_s8, s8, b) +NPYV_IMPL_LSX_ZIP(npyv_u16, u16, h) +NPYV_IMPL_LSX_ZIP(npyv_s16, s16, h) +NPYV_IMPL_LSX_ZIP(npyv_u32, u32, w) +NPYV_IMPL_LSX_ZIP(npyv_s32, s32, w) +NPYV_IMPL_LSX_ZIP(npyv_u64, u64, d) +NPYV_IMPL_LSX_ZIP(npyv_s64, s64, d) + +NPY_FINLINE npyv_f32x2 npyv_zip_f32(__m128 a, __m128 b) +{ + npyv_f32x2 r; + r.val[0] = (__m128)(__lsx_vilvl_w((__m128i)b, (__m128i)a)); + r.val[1] = (__m128)(__lsx_vilvh_w((__m128i)b, (__m128i)a)); + return r; +} +NPY_FINLINE npyv_f64x2 npyv_zip_f64(__m128d a, __m128d b) +{ + npyv_f64x2 r; + r.val[0] = (__m128d)(__lsx_vilvl_d((__m128i)b, (__m128i)a)); + r.val[1] = (__m128d)(__lsx_vilvh_d((__m128i)b, (__m128i)a)); + return r; +} + +// deinterleave two vectors +#define NPYV_IMPL_LSX_UNZIP(T_VEC, SFX, INTR_SFX) \ + NPY_FINLINE T_VEC##x2 npyv_unzip_##SFX(T_VEC a, T_VEC b) \ + { \ + T_VEC##x2 r; \ + r.val[0] = __lsx_vpickev_##INTR_SFX(b, a); \ + r.val[1] = __lsx_vpickod_##INTR_SFX(b, a); \ + return r; \ + } + +NPYV_IMPL_LSX_UNZIP(npyv_u8, u8, b) +NPYV_IMPL_LSX_UNZIP(npyv_s8, s8, b) +NPYV_IMPL_LSX_UNZIP(npyv_u16, u16, h) +NPYV_IMPL_LSX_UNZIP(npyv_s16, s16, h) +NPYV_IMPL_LSX_UNZIP(npyv_u32, u32, w) +NPYV_IMPL_LSX_UNZIP(npyv_s32, s32, w) +NPYV_IMPL_LSX_UNZIP(npyv_u64, u64, d) +NPYV_IMPL_LSX_UNZIP(npyv_s64, s64, d) + +NPY_FINLINE npyv_f32x2 npyv_unzip_f32(__m128 a, __m128 b) +{ + npyv_f32x2 r; + r.val[0] = (__m128)(__lsx_vpickev_w((__m128i)b, (__m128i)a)); + r.val[1] = (__m128)(__lsx_vpickod_w((__m128i)b, (__m128i)a)); + return r; +} +NPY_FINLINE npyv_f64x2 npyv_unzip_f64(__m128d a, __m128d b) +{ + npyv_f64x2 r; + r.val[0] = (__m128d)(__lsx_vpickev_d((__m128i)b, (__m128i)a)); + r.val[1] = (__m128d)(__lsx_vpickod_d((__m128i)b, (__m128i)a)); + return r; +} + +// Reverse elements of each 64-bit lane +NPY_FINLINE npyv_u8 npyv_rev64_u8(npyv_u8 a) +{ + v16u8 idx = {7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}; + return __lsx_vshuf_b(a, a, (__m128i)idx); +} + +#define npyv_rev64_s8 npyv_rev64_u8 + +NPY_FINLINE npyv_u16 npyv_rev64_u16(npyv_u16 a) +{ + v8u16 idx = {3, 2, 1, 0, 7, 6, 5, 4}; + return __lsx_vshuf_h((__m128i)idx, a, a); +} + +#define npyv_rev64_s16 npyv_rev64_u16 + +NPY_FINLINE npyv_u32 npyv_rev64_u32(npyv_u32 a) +{ + v4u32 idx = {1, 0, 3, 2}; + return __lsx_vshuf_w((__m128i)idx, a, a); +} +#define npyv_rev64_s32 npyv_rev64_u32 + +NPY_FINLINE npyv_f32 npyv_rev64_f32(npyv_f32 a) +{ + v4i32 idx = {1, 0, 3, 2}; + return (v4f32)__lsx_vshuf_w((__m128i)idx, (__m128i)a, (__m128i)a); +} + +// Permuting the elements of each 128-bit lane by immediate index for +// each element. +#define npyv_permi128_u32(A, E0, E1, E2, E3) \ + npyv_set_u32( \ + __lsx_vpickve2gr_wu(A, E0), __lsx_vpickve2gr_wu(A, E1), \ + __lsx_vpickve2gr_wu(A, E2), __lsx_vpickve2gr_wu(A, E3) \ + ) +#define npyv_permi128_s32(A, E0, E1, E2, E3) \ + npyv_set_s32( \ + __lsx_vpickve2gr_w(A, E0), __lsx_vpickve2gr_w(A, E1), \ + __lsx_vpickve2gr_w(A, E2), __lsx_vpickve2gr_w(A, E3) \ + ) +#define npyv_permi128_u64(A, E0, E1) \ + npyv_set_u64( \ + __lsx_vpickve2gr_du(A, E0), __lsx_vpickve2gr_du(A, E1) \ + ) +#define npyv_permi128_s64(A, E0, E1) \ + npyv_set_s64( \ + __lsx_vpickve2gr_d(A, E0), __lsx_vpickve2gr_d(A, E1) \ + ) +#define npyv_permi128_f32(A, E0, E1, E2, E3) \ + (__m128)__lsx_vshuf_w((__m128i)(v4u32){E0, E1, E2, E3}, (__m128i)A, (__m128i)A) + +#define npyv_permi128_f64(A, E0, E1) \ + (__m128d)__lsx_vshuf_d((__m128i){E0, E1}, (__m128i)A, (__m128i)A) +#endif // _NPY_SIMD_LSX_REORDER_H diff --git a/numpy/_core/src/common/simd/neon/math.h b/numpy/_core/src/common/simd/neon/math.h index 58d14809fbfe..76c5b58be788 100644 --- a/numpy/_core/src/common/simd/neon/math.h +++ b/numpy/_core/src/common/simd/neon/math.h @@ -28,11 +28,13 @@ NPY_FINLINE npyv_f32 npyv_square_f32(npyv_f32 a) // Based on ARM doc, see https://developer.arm.com/documentation/dui0204/j/CIHDIACI NPY_FINLINE npyv_f32 npyv_sqrt_f32(npyv_f32 a) { + const npyv_f32 one = vdupq_n_f32(1.0f); const npyv_f32 zero = vdupq_n_f32(0.0f); const npyv_u32 pinf = vdupq_n_u32(0x7f800000); npyv_u32 is_zero = vceqq_f32(a, zero), is_inf = vceqq_u32(vreinterpretq_u32_f32(a), pinf); - // guard against floating-point division-by-zero error - npyv_f32 guard_byz = vbslq_f32(is_zero, vreinterpretq_f32_u32(pinf), a); + npyv_u32 is_special = vorrq_u32(is_zero, is_inf); + // guard against division-by-zero and infinity input to vrsqrte to avoid invalid fp error + npyv_f32 guard_byz = vbslq_f32(is_special, one, a); // estimate to (1/√a) npyv_f32 rsqrte = vrsqrteq_f32(guard_byz); /** @@ -47,10 +49,8 @@ NPY_FINLINE npyv_f32 npyv_square_f32(npyv_f32 a) rsqrte = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a, rsqrte), rsqrte), rsqrte); // a * (1/√a) npyv_f32 sqrt = vmulq_f32(a, rsqrte); - // return zero if the a is zero - // - return zero if a is zero. - // - return positive infinity if a is positive infinity - return vbslq_f32(vorrq_u32(is_zero, is_inf), a, sqrt); + // Handle special cases: return a for zeros and positive infinities + return vbslq_f32(is_special, a, sqrt); } #endif // NPY_SIMD_F64 diff --git a/numpy/_core/src/common/simd/neon/memory.h b/numpy/_core/src/common/simd/neon/memory.h index e7503b822e03..777cb87f5bab 100644 --- a/numpy/_core/src/common/simd/neon/memory.h +++ b/numpy/_core/src/common/simd/neon/memory.h @@ -584,7 +584,7 @@ NPYV_IMPL_NEON_REST_PARTIAL_TYPES_PAIR(f64, s64) #endif /************************************************************ - * de-interlave load / interleave contiguous store + * de-interleave load / interleave contiguous store ************************************************************/ // two channels #define NPYV_IMPL_NEON_MEM_INTERLEAVE(SFX, T_PTR) \ diff --git a/numpy/_core/src/common/simd/simd.h b/numpy/_core/src/common/simd/simd.h index 2d9d48cf1cdd..fd1be6e0c867 100644 --- a/numpy/_core/src/common/simd/simd.h +++ b/numpy/_core/src/common/simd/simd.h @@ -1,5 +1,7 @@ #ifndef _NPY_SIMD_H_ #define _NPY_SIMD_H_ + +#include /* for alignof until C23 */ /** * the NumPy C SIMD vectorization interface "NPYV" are types and functions intended * to simplify vectorization of code on different platforms, currently supports @@ -21,7 +23,7 @@ extern "C" { /* * clang commit an aggressive optimization behaviour when flag `-ftrapping-math` * isn't fully supported that's present at -O1 or greater. When partially loading a - * vector register for a operations that requires to fill up the remaining lanes + * vector register for an operation that requires to fill up the remaining lanes * with certain value for example divide operation needs to fill the remaining value * with non-zero integer to avoid fp exception divide-by-zero. * clang optimizer notices that the entire register is not needed for the store @@ -85,6 +87,10 @@ typedef double npyv_lanetype_f64; #include "neon/neon.h" #endif +#ifdef NPY_HAVE_LSX + #include "lsx/lsx.h" +#endif + #ifndef NPY_SIMD /// SIMD width in bits or 0 if there's no SIMD extension available. #define NPY_SIMD 0 @@ -123,10 +129,11 @@ typedef double npyv_lanetype_f64; * acceptable limit of strides before using any of non-contiguous load/store intrinsics. * * For instance: - * npy_intp ld_stride = step[0] / sizeof(float); - * npy_intp st_stride = step[1] / sizeof(float); * - * if (npyv_loadable_stride_f32(ld_stride) && npyv_storable_stride_f32(st_stride)) { + * if (npyv_loadable_stride_f32(steps[0]) && npyv_storable_stride_f32(steps[1])) { + * // Strides are now guaranteed to be a multiple and compatible + * npy_intp ld_stride = steps[0] / sizeof(float); + * npy_intp st_stride = steps[1] / sizeof(float); * for (;;) * npyv_f32 a = npyv_loadn_f32(ld_pointer, ld_stride); * // ... @@ -134,7 +141,7 @@ typedef double npyv_lanetype_f64; * } * else { * for (;;) - * // C scalars + * // C scalars, use byte steps/strides. * } */ #ifndef NPY_SIMD_MAXLOAD_STRIDE32 @@ -149,11 +156,29 @@ typedef double npyv_lanetype_f64; #ifndef NPY_SIMD_MAXSTORE_STRIDE64 #define NPY_SIMD_MAXSTORE_STRIDE64 0 #endif -#define NPYV_IMPL_MAXSTRIDE(SFX, MAXLOAD, MAXSTORE) \ - NPY_FINLINE int npyv_loadable_stride_##SFX(npy_intp stride) \ - { return MAXLOAD > 0 ? llabs(stride) <= MAXLOAD : 1; } \ - NPY_FINLINE int npyv_storable_stride_##SFX(npy_intp stride) \ - { return MAXSTORE > 0 ? llabs(stride) <= MAXSTORE : 1; } +#define NPYV_IMPL_MAXSTRIDE(SFX, MAXLOAD, MAXSTORE) \ + NPY_FINLINE int \ + npyv_loadable_stride_##SFX(npy_intp stride) \ + { \ + if (alignof(npyv_lanetype_##SFX) != sizeof(npyv_lanetype_##SFX) && \ + stride % sizeof(npyv_lanetype_##SFX) != 0) { \ + /* stride not a multiple of itemsize, cannot handle. */ \ + return 0; \ + } \ + stride = stride / sizeof(npyv_lanetype_##SFX); \ + return MAXLOAD > 0 ? llabs(stride) <= MAXLOAD : 1; \ + } \ + NPY_FINLINE int \ + npyv_storable_stride_##SFX(npy_intp stride) \ + { \ + if (alignof(npyv_lanetype_##SFX) != sizeof(npyv_lanetype_##SFX) && \ + stride % sizeof(npyv_lanetype_##SFX) != 0) { \ + /* stride not a multiple of itemsize, cannot handle. */ \ + return 0; \ + } \ + stride = stride / sizeof(npyv_lanetype_##SFX); \ + return MAXSTORE > 0 ? llabs(stride) <= MAXSTORE : 1; \ + } #if NPY_SIMD NPYV_IMPL_MAXSTRIDE(u32, NPY_SIMD_MAXLOAD_STRIDE32, NPY_SIMD_MAXSTORE_STRIDE32) NPYV_IMPL_MAXSTRIDE(s32, NPY_SIMD_MAXLOAD_STRIDE32, NPY_SIMD_MAXSTORE_STRIDE32) diff --git a/numpy/_core/src/common/simd/simd.hpp b/numpy/_core/src/common/simd/simd.hpp new file mode 100644 index 000000000000..40556a68c59d --- /dev/null +++ b/numpy/_core/src/common/simd/simd.hpp @@ -0,0 +1,86 @@ +#ifndef NUMPY__CORE_SRC_COMMON_SIMD_SIMD_HPP_ +#define NUMPY__CORE_SRC_COMMON_SIMD_SIMD_HPP_ + +/** + * This header provides a thin wrapper over Google's Highway SIMD library. + * + * The wrapper aims to simplify the SIMD interface of Google's Highway by + * get ride of its class tags and use lane types directly which can be deduced + * from the args in most cases. + */ +/** + * Since `NPY_SIMD` is only limited to NumPy C universal intrinsics, + * `NPY_HWY` is defined to indicate the SIMD availability for Google's Highway + * C++ code. + * + * Highway SIMD is only available when optimization is enabled. + * When NPY_DISABLE_OPTIMIZATION is defined, SIMD operations are disabled + * and the code falls back to scalar implementations. + */ +#ifndef NPY_DISABLE_OPTIMIZATION +#include + +/** + * We avoid using Highway scalar operations for the following reasons: + * + * 1. NumPy already provides optimized kernels for scalar operations. Using these + * existing implementations is more consistent with NumPy's architecture and + * allows for compiler optimizations specific to standard library calls. + * + * 2. Not all Highway intrinsics are fully supported in scalar mode, which could + * lead to compilation errors or unexpected behavior for certain operations. + * + * 3. For NumPy's strict IEEE 754 floating-point compliance requirements, direct scalar + * implementations offer more predictable behavior than EMU128. + * + * Therefore, we only enable Highway SIMD when targeting actual SIMD instruction sets. + */ +#define NPY_HWY ((HWY_TARGET != HWY_SCALAR) && (HWY_TARGET != HWY_EMU128)) + +// Indicates if the SIMD operations are available for float16. +#define NPY_HWY_F16 (NPY_HWY && HWY_HAVE_FLOAT16) +// Note: Highway requires SIMD extentions with native float32 support, so we don't need +// to check for it. + +// Indicates if the SIMD operations are available for float64. +#define NPY_HWY_F64 (NPY_HWY && HWY_HAVE_FLOAT64) + +// Indicates if the SIMD floating operations are natively supports fma. +#define NPY_HWY_FMA (NPY_HWY && HWY_NATIVE_FMA) + +#else +#define NPY_HWY 0 +#define NPY_HWY_F16 0 +#define NPY_HWY_F64 0 +#define NPY_HWY_FMA 0 +#endif + +namespace np { + +/// Represents the max SIMD width supported by the platform. +namespace simd { +#if NPY_HWY +/// The highway namespace alias. +/// We can not import all the symbols from the HWY_NAMESPACE because it will +/// conflict with the existing symbols in the numpy namespace. +namespace hn = hwy::HWY_NAMESPACE; +// internaly used by the template header +template +using _Tag = hn::ScalableTag; +#endif +#include "simd.inc.hpp" +} // namespace simd + +/// Represents the 128-bit SIMD width. +namespace simd128 { +#if NPY_HWY +namespace hn = hwy::HWY_NAMESPACE; +template +using _Tag = hn::Full128; +#endif +#include "simd.inc.hpp" +} // namespace simd128 + +} // namespace np + +#endif // NUMPY__CORE_SRC_COMMON_SIMD_SIMD_HPP_ diff --git a/numpy/_core/src/common/simd/simd.inc.hpp b/numpy/_core/src/common/simd/simd.inc.hpp new file mode 100644 index 000000000000..f4a2540927dd --- /dev/null +++ b/numpy/_core/src/common/simd/simd.inc.hpp @@ -0,0 +1,132 @@ +#ifndef NPY_HWY +#error "This is not a standalone header. Include simd.hpp instead." +#define NPY_HWY 1 // Prevent editors from graying out the happy branch +#endif + +// Using anonymous namespace instead of inline to ensure each translation unit +// gets its own copy of constants based on local compilation flags +namespace { + +// NOTE: This file is included by simd.hpp multiple times with different namespaces +// so avoid including any headers here + +/** + * Determines whether the specified lane type is supported by the SIMD extension. + * Always defined as false when SIMD is not enabled, so it can be used in SFINAE. + * + * @tparam TLane The lane type to check for support. + */ +template +constexpr bool kSupportLane = NPY_HWY != 0; + +#if NPY_HWY +// Define lane type support based on Highway capabilities +template <> +constexpr bool kSupportLane = HWY_HAVE_FLOAT16 != 0; +template <> +constexpr bool kSupportLane = HWY_HAVE_FLOAT64 != 0; +template <> +constexpr bool kSupportLane = + HWY_HAVE_FLOAT64 != 0 && sizeof(long double) == sizeof(double); + +/// Maximum number of lanes supported by the SIMD extension for the specified lane type. +template +constexpr size_t kMaxLanes = HWY_MAX_LANES_D(_Tag); + +/// Represents an N-lane vector based on the specified lane type. +/// @tparam TLane The scalar type for each vector lane +template +using Vec = hn::Vec<_Tag>; + +/// Represents a mask vector with boolean values or as a bitmask. +/// @tparam TLane The scalar type the mask corresponds to +template +using Mask = hn::Mask<_Tag>; + +/// Unaligned load of a vector from memory. +template +HWY_API Vec +LoadU(const TLane *ptr) +{ + return hn::LoadU(_Tag(), ptr); +} + +/// Unaligned store of a vector to memory. +template +HWY_API void +StoreU(const Vec &a, TLane *ptr) +{ + hn::StoreU(a, _Tag(), ptr); +} + +/// Returns the number of vector lanes based on the lane type. +template +HWY_API HWY_LANES_CONSTEXPR size_t +Lanes(TLane tag = 0) +{ + return hn::Lanes(_Tag()); +} + +/// Returns an uninitialized N-lane vector. +template +HWY_API Vec +Undefined(TLane tag = 0) +{ + return hn::Undefined(_Tag()); +} + +/// Returns N-lane vector with all lanes equal to zero. +template +HWY_API Vec +Zero(TLane tag = 0) +{ + return hn::Zero(_Tag()); +} + +/// Returns N-lane vector with all lanes equal to the given value of type `TLane`. +template +HWY_API Vec +Set(TLane val) +{ + return hn::Set(_Tag(), val); +} + +/// Converts a mask to a vector based on the specified lane type. +template +HWY_API Vec +VecFromMask(const TMask &m) +{ + return hn::VecFromMask(_Tag(), m); +} + +/// Convert (Reinterpret) an N-lane vector to a different type without modifying the +/// underlying data. +template +HWY_API Vec +BitCast(const TVec &v) +{ + return hn::BitCast(_Tag(), v); +} + +// Import common Highway intrinsics +using hn::Abs; +using hn::Add; +using hn::And; +using hn::AndNot; +using hn::Div; +using hn::Eq; +using hn::Ge; +using hn::Gt; +using hn::Le; +using hn::Lt; +using hn::Max; +using hn::Min; +using hn::Mul; +using hn::Or; +using hn::Sqrt; +using hn::Sub; +using hn::Xor; + +#endif // NPY_HWY + +} // namespace diff --git a/numpy/_core/src/common/simd/sse/arithmetic.h b/numpy/_core/src/common/simd/sse/arithmetic.h index 357b136d25cd..b50942ab75ad 100644 --- a/numpy/_core/src/common/simd/sse/arithmetic.h +++ b/numpy/_core/src/common/simd/sse/arithmetic.h @@ -251,9 +251,9 @@ NPY_FINLINE npyv_s64 npyv_divc_s64(npyv_s64 a, const npyv_s64x3 divisor) // q = (a + mulhi) >> sh __m128i q = _mm_add_epi64(a, mulhi); // emulate arithmetic right shift - const __m128i sigb = npyv_setall_s64(1LL << 63); - q = _mm_srl_epi64(_mm_add_epi64(q, sigb), divisor.val[1]); - q = _mm_sub_epi64(q, _mm_srl_epi64(sigb, divisor.val[1])); + const __m128i sbit = npyv_setall_s64(0x8000000000000000); + q = _mm_srl_epi64(_mm_add_epi64(q, sbit), divisor.val[1]); + q = _mm_sub_epi64(q, _mm_srl_epi64(sbit, divisor.val[1])); // q = q - XSIGN(a) // trunc(a/d) = (q ^ dsign) - dsign q = _mm_sub_epi64(q, asign); diff --git a/numpy/_core/src/common/simd/sse/memory.h b/numpy/_core/src/common/simd/sse/memory.h index 90c01ffefedb..0cd52a88fb89 100644 --- a/numpy/_core/src/common/simd/sse/memory.h +++ b/numpy/_core/src/common/simd/sse/memory.h @@ -683,7 +683,7 @@ NPYV_IMPL_SSE_REST_PARTIAL_TYPES_PAIR(u64, s64) NPYV_IMPL_SSE_REST_PARTIAL_TYPES_PAIR(f64, s64) /************************************************************ - * de-interlave load / interleave contiguous store + * de-interleave load / interleave contiguous store ************************************************************/ // two channels #define NPYV_IMPL_SSE_MEM_INTERLEAVE(SFX, ZSFX) \ diff --git a/numpy/_core/src/common/simd/vec/memory.h b/numpy/_core/src/common/simd/vec/memory.h index dbcdc16da395..3e8583bed1e0 100644 --- a/numpy/_core/src/common/simd/vec/memory.h +++ b/numpy/_core/src/common/simd/vec/memory.h @@ -623,7 +623,7 @@ NPYV_IMPL_VEC_REST_PARTIAL_TYPES_PAIR(u64, s64) NPYV_IMPL_VEC_REST_PARTIAL_TYPES_PAIR(f64, s64) /************************************************************ - * de-interlave load / interleave contiguous store + * de-interleave load / interleave contiguous store ************************************************************/ // two channels #define NPYV_IMPL_VEC_MEM_INTERLEAVE(SFX) \ diff --git a/numpy/_core/src/common/simd/vec/operators.h b/numpy/_core/src/common/simd/vec/operators.h index 50dac20f6d7d..3a402689d02f 100644 --- a/numpy/_core/src/common/simd/vec/operators.h +++ b/numpy/_core/src/common/simd/vec/operators.h @@ -44,6 +44,10 @@ /*************************** * Logical ***************************/ +#define NPYV_IMPL_VEC_BIN_WRAP(INTRIN, SFX) \ + NPY_FINLINE npyv_##SFX npyv_##INTRIN##_##SFX(npyv_##SFX a, npyv_##SFX b) \ + { return vec_##INTRIN(a, b); } + #define NPYV_IMPL_VEC_BIN_CAST(INTRIN, SFX, CAST) \ NPY_FINLINE npyv_##SFX npyv_##INTRIN##_##SFX(npyv_##SFX a, npyv_##SFX b) \ { return (npyv_##SFX)vec_##INTRIN((CAST)a, (CAST)b); } @@ -54,6 +58,15 @@ #else #define NPYV_IMPL_VEC_BIN_B64(INTRIN) NPYV_IMPL_VEC_BIN_CAST(INTRIN, b64, npyv_b64) #endif + +// Up to clang __VEC__ 10305 logical intrinsics do not support f32 or f64 +#if defined(NPY_HAVE_VX) && defined(__clang__) && __VEC__ < 10305 + #define NPYV_IMPL_VEC_BIN_F32(INTRIN) NPYV_IMPL_VEC_BIN_CAST(INTRIN, f32, npyv_u32) + #define NPYV_IMPL_VEC_BIN_F64(INTRIN) NPYV_IMPL_VEC_BIN_CAST(INTRIN, f64, npyv_u64) +#else + #define NPYV_IMPL_VEC_BIN_F32(INTRIN) NPYV_IMPL_VEC_BIN_WRAP(INTRIN, f32) + #define NPYV_IMPL_VEC_BIN_F64(INTRIN) NPYV_IMPL_VEC_BIN_WRAP(INTRIN, f64) +#endif // AND #define npyv_and_u8 vec_and #define npyv_and_s8 vec_and @@ -64,9 +77,9 @@ #define npyv_and_u64 vec_and #define npyv_and_s64 vec_and #if NPY_SIMD_F32 - #define npyv_and_f32 vec_and + NPYV_IMPL_VEC_BIN_F32(and) #endif -#define npyv_and_f64 vec_and +NPYV_IMPL_VEC_BIN_F64(and) #define npyv_and_b8 vec_and #define npyv_and_b16 vec_and #define npyv_and_b32 vec_and @@ -82,9 +95,9 @@ NPYV_IMPL_VEC_BIN_B64(and) #define npyv_or_u64 vec_or #define npyv_or_s64 vec_or #if NPY_SIMD_F32 - #define npyv_or_f32 vec_or + NPYV_IMPL_VEC_BIN_F32(or) #endif -#define npyv_or_f64 vec_or +NPYV_IMPL_VEC_BIN_F64(or) #define npyv_or_b8 vec_or #define npyv_or_b16 vec_or #define npyv_or_b32 vec_or @@ -100,9 +113,9 @@ NPYV_IMPL_VEC_BIN_B64(or) #define npyv_xor_u64 vec_xor #define npyv_xor_s64 vec_xor #if NPY_SIMD_F32 - #define npyv_xor_f32 vec_xor + NPYV_IMPL_VEC_BIN_F32(xor) #endif -#define npyv_xor_f64 vec_xor +NPYV_IMPL_VEC_BIN_F64(xor) #define npyv_xor_b8 vec_xor #define npyv_xor_b16 vec_xor #define npyv_xor_b32 vec_xor diff --git a/numpy/_core/src/common/simd/vec/utils.h b/numpy/_core/src/common/simd/vec/utils.h index f8b28cfebd8c..7e4a7b8de8fa 100644 --- a/numpy/_core/src/common/simd/vec/utils.h +++ b/numpy/_core/src/common/simd/vec/utils.h @@ -25,14 +25,16 @@ #ifndef vec_neg #define vec_neg(a) (-(a)) #endif - #ifndef vec_and - #define vec_and(a, b) ((a) & (b)) // Vector AND - #endif - #ifndef vec_or - #define vec_or(a, b) ((a) | (b)) // Vector OR - #endif - #ifndef vec_xor - #define vec_xor(a, b) ((a) ^ (b)) // Vector XOR + #if !(defined(__clang__) && __VEC__ >= 10305) + #ifndef vec_and + #define vec_and(a, b) ((a) & (b)) // Vector AND + #endif + #ifndef vec_or + #define vec_or(a, b) ((a) | (b)) // Vector OR + #endif + #ifndef vec_xor + #define vec_xor(a, b) ((a) ^ (b)) // Vector XOR + #endif #endif #ifndef vec_sl #define vec_sl(a, b) ((a) << (b)) // Vector Shift Left diff --git a/numpy/_core/src/common/ucsnarrow.c b/numpy/_core/src/common/ucsnarrow.c deleted file mode 100644 index 203e02fbb3dd..000000000000 --- a/numpy/_core/src/common/ucsnarrow.c +++ /dev/null @@ -1,71 +0,0 @@ -#define NPY_NO_DEPRECATED_API NPY_API_VERSION -#define _MULTIARRAYMODULE - -#define PY_SSIZE_T_CLEAN -#include - -#include "numpy/arrayobject.h" -#include "numpy/npy_math.h" - -#include "npy_config.h" - - -#include "ctors.h" - -/* - * This file originally contained functions only needed on narrow builds of - * Python for converting back and forth between the NumPy Unicode data-type - * (always 4-bytes) and the Python Unicode scalar (2-bytes on a narrow build). - * - * This "narrow" interface is now deprecated in python and unused in NumPy. - */ - -/* - * Returns a PyUnicodeObject initialized from a buffer containing - * UCS4 unicode. - * - * Parameters - * ---------- - * src: char * - * Pointer to buffer containing UCS4 unicode. - * size: Py_ssize_t - * Size of buffer in bytes. - * swap: int - * If true, the data will be swapped. - * align: int - * If true, the data will be aligned. - * - * Returns - * ------- - * new_reference: PyUnicodeObject - */ -NPY_NO_EXPORT PyUnicodeObject * -PyUnicode_FromUCS4(char const *src_char, Py_ssize_t size, int swap, int align) -{ - Py_ssize_t ucs4len = size / sizeof(npy_ucs4); - npy_ucs4 const *src = (npy_ucs4 const *)src_char; - npy_ucs4 *buf = NULL; - - /* swap and align if needed */ - if (swap || align) { - buf = (npy_ucs4 *)malloc(size); - if (buf == NULL) { - PyErr_NoMemory(); - return NULL; - } - memcpy(buf, src, size); - if (swap) { - byte_swap_vector(buf, ucs4len, sizeof(npy_ucs4)); - } - src = buf; - } - - /* trim trailing zeros */ - while (ucs4len > 0 && src[ucs4len - 1] == 0) { - ucs4len--; - } - PyUnicodeObject *ret = (PyUnicodeObject *)PyUnicode_FromKindAndData( - PyUnicode_4BYTE_KIND, src, ucs4len); - free(buf); - return ret; -} diff --git a/numpy/_core/src/common/ucsnarrow.h b/numpy/_core/src/common/ucsnarrow.h deleted file mode 100644 index 4b17a2809c1d..000000000000 --- a/numpy/_core/src/common/ucsnarrow.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef NUMPY_CORE_SRC_COMMON_NPY_UCSNARROW_H_ -#define NUMPY_CORE_SRC_COMMON_NPY_UCSNARROW_H_ - -NPY_NO_EXPORT PyUnicodeObject * -PyUnicode_FromUCS4(char const *src, Py_ssize_t size, int swap, int align); - -#endif /* NUMPY_CORE_SRC_COMMON_NPY_UCSNARROW_H_ */ diff --git a/numpy/_core/src/common/ufunc_override.c b/numpy/_core/src/common/ufunc_override.c index dd7706d41475..1ed7165a4e83 100644 --- a/numpy/_core/src/common/ufunc_override.c +++ b/numpy/_core/src/common/ufunc_override.c @@ -1,12 +1,14 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE +#include "numpy/ndarrayobject.h" #include "numpy/ndarraytypes.h" #include "npy_pycompat.h" #include "get_attr_string.h" #include "npy_import.h" #include "ufunc_override.h" #include "scalartypes.h" +#include "npy_static_data.h" /* * Check whether an object has __array_ufunc__ defined on its class and it @@ -19,15 +21,8 @@ NPY_NO_EXPORT PyObject * PyUFuncOverride_GetNonDefaultArrayUfunc(PyObject *obj) { - static PyObject *ndarray_array_ufunc = NULL; PyObject *cls_array_ufunc; - /* On first entry, cache ndarray's __array_ufunc__ */ - if (ndarray_array_ufunc == NULL) { - ndarray_array_ufunc = PyObject_GetAttrString((PyObject *)&PyArray_Type, - "__array_ufunc__"); - } - /* Fast return for ndarray */ if (PyArray_CheckExact(obj)) { return NULL; @@ -41,15 +36,13 @@ PyUFuncOverride_GetNonDefaultArrayUfunc(PyObject *obj) * Does the class define __array_ufunc__? (Note that LookupSpecial has fast * return for basic python types, so no need to worry about those here) */ - cls_array_ufunc = PyArray_LookupSpecial(obj, npy_um_str_array_ufunc); - if (cls_array_ufunc == NULL) { - if (PyErr_Occurred()) { - PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */ - } + if (PyArray_LookupSpecial( + obj, npy_interned_str.array_ufunc, &cls_array_ufunc) < 0) { + PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */ return NULL; } - /* Ignore if the same as ndarray.__array_ufunc__ */ - if (cls_array_ufunc == ndarray_array_ufunc) { + /* Ignore if the same as ndarray.__array_ufunc__ (it may be NULL here) */ + if (cls_array_ufunc == npy_static_pydata.ndarray_array_ufunc) { Py_DECREF(cls_array_ufunc); return NULL; } @@ -112,10 +105,10 @@ PyUFuncOverride_GetOutObjects(PyObject *kwds, PyObject **out_kwd_obj, PyObject * if (PyTuple_CheckExact(*out_kwd_obj)) { /* * The C-API recommends calling PySequence_Fast before any of the other - * PySequence_Fast* functions. This is required for PyPy + * PySequence_Fast* functions. */ PyObject *seq; - seq = PySequence_Fast(*out_kwd_obj, + seq = PySequence_Fast(*out_kwd_obj, // noqa: borrowed-ref OK "Could not convert object to sequence"); if (seq == NULL) { Py_CLEAR(*out_kwd_obj); diff --git a/numpy/_core/src/common/umathmodule.h b/numpy/_core/src/common/umathmodule.h index 73d853341cda..9fc693685e70 100644 --- a/numpy/_core/src/common/umathmodule.h +++ b/numpy/_core/src/common/umathmodule.h @@ -9,7 +9,6 @@ NPY_NO_EXPORT PyObject * get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)); -PyObject * add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args); PyObject * ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUSED(kwds)); diff --git a/numpy/_core/src/dummymodule.c b/numpy/_core/src/dummymodule.c index 2f293d6c4cd6..e1ef80ab3af3 100644 --- a/numpy/_core/src/dummymodule.c +++ b/numpy/_core/src/dummymodule.c @@ -14,25 +14,27 @@ static struct PyMethodDef methods[] = { {NULL, NULL, 0, NULL} }; +static struct PyModuleDef_Slot dummy_slots[] = { +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + // signal that this module can be imported in isolated subinterpreters + {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, +#endif + {0, NULL}, +}; static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "dummy", - NULL, - -1, - methods, - NULL, - NULL, - NULL, - NULL + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "dummy", + .m_size = 0, + .m_methods = methods, + .m_slots = dummy_slots, }; /* Initialization function for the module */ PyMODINIT_FUNC PyInit__dummy(void) { - PyObject *m; - m = PyModule_Create(&moduledef); - if (!m) { - return NULL; - } - return m; + return PyModuleDef_Init(&moduledef); } diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index 3af6ba57bf82..ee36c8371293 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit 3af6ba57bf82c861870f92f0483149439007d652 +Subproject commit ee36c837129310be19c17c9108c6dc3f6ae06942 diff --git a/numpy/_core/src/multiarray/_datetime.h b/numpy/_core/src/multiarray/_datetime.h index c477d334e19d..5261e8232a08 100644 --- a/numpy/_core/src/multiarray/_datetime.h +++ b/numpy/_core/src/multiarray/_datetime.h @@ -174,8 +174,8 @@ convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple, npy_bool from_pickle); /* - * Gets a tzoffset in minutes by calling the fromutc() function on - * the Python datetime.tzinfo object. + * Gets a tzoffset in minutes by calling the astimezone() function on + * the Python datetime.datetime object. */ NPY_NO_EXPORT int get_tzoffset_from_pytzinfo(PyObject *timezone, npy_datetimestruct *dts); @@ -242,9 +242,10 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, /* * Converts a datetime into a PyObject *. * - * For days or coarser, returns a datetime.date. - * For microseconds or coarser, returns a datetime.datetime. - * For units finer than microseconds, returns an integer. + * NaT (Not-a-time) is returned as None. + * For D/W/Y/M (days or coarser), returns a datetime.date. + * For Îŧs/ms/s/m/h/D/W (microseconds or coarser), returns a datetime.datetime. + * For ns/ps/fs/as (units shorter than microseconds), returns an integer. */ NPY_NO_EXPORT PyObject * convert_datetime_to_pyobject(npy_datetime dt, PyArray_DatetimeMetaData *meta); @@ -252,9 +253,9 @@ convert_datetime_to_pyobject(npy_datetime dt, PyArray_DatetimeMetaData *meta); /* * Converts a timedelta into a PyObject *. * - * Not-a-time is returned as the string "NaT". - * For microseconds or coarser, returns a datetime.timedelta. - * For units finer than microseconds, returns an integer. + * NaT (Not-a-time) is returned as None. + * For Îŧs/ms/s/m/h/D/W (microseconds or coarser), returns a datetime.timedelta. + * For Y/M (non-linear units), generic units and ns/ps/fs/as (units shorter than microseconds), returns an integer. */ NPY_NO_EXPORT PyObject * convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta); @@ -328,4 +329,10 @@ find_object_datetime_type(PyObject *obj, int type_num); NPY_NO_EXPORT int PyArray_InitializeDatetimeCasts(void); +NPY_NO_EXPORT npy_hash_t +datetime_hash(PyArray_DatetimeMetaData *meta, npy_datetime dt); + +NPY_NO_EXPORT npy_hash_t +timedelta_hash(PyArray_DatetimeMetaData *meta, npy_timedelta td); + #endif /* NUMPY_CORE_SRC_MULTIARRAY__DATETIME_H_ */ diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index fbd5fc445a2c..f79ff9486fe4 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -7,6 +7,7 @@ #include "numpy/arrayscalars.h" #include "numpy/npy_math.h" #include "numpy/halffloat.h" +#include "npy_import.h" #include "common.h" #include "npy_argparse.h" #include "mem_overlap.h" @@ -42,6 +43,28 @@ argparse_example_function(PyObject *NPY_UNUSED(mod), Py_RETURN_NONE; } +/* + * Tests that argparse cache creation is thread-safe. *must* be called only + * by the python-level test_thread_safe_argparse_cache function, otherwise + * the cache might be created before the test to make sure cache creation is + * thread-safe runs + */ +static PyObject * +threaded_argparse_example_function(PyObject *NPY_UNUSED(mod), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) +{ + NPY_PREPARE_ARGPARSER; + int arg1; + PyObject *arg2; + if (npy_parse_arguments("thread_func", args, len_args, kwnames, + "$arg1", &PyArray_PythonPyIntFromInt, &arg1, + "$arg2", NULL, &arg2, + NULL, NULL, NULL) < 0) { + return NULL; + } + Py_RETURN_NONE; +} + /* test PyArray_IsPythonScalar, before including private py3 compat header */ static PyObject * IsPythonScalar(PyObject * dummy, PyObject *args) @@ -622,7 +645,7 @@ incref_elide_l(PyObject *dummy, PyObject *args) } /* get item without increasing refcount, item may still be on the python * stack but above the inaccessible top */ - r = PyList_GetItem(arg, 4); + r = PyList_GetItem(arg, 4); // noqa: borrowed-ref OK res = PyNumber_Add(r, r); return res; @@ -653,7 +676,7 @@ npy_create_writebackifcopy(PyObject* NPY_UNUSED(self), PyObject* args) return array; } -/* used to test WRITEBACKIFCOPY without resolution emits runtime warning */ +/* used to test WRITEBACKIFCOPY without resolution, emits runtime warning */ static PyObject* npy_abuse_writebackifcopy(PyObject* NPY_UNUSED(self), PyObject* args) { @@ -667,7 +690,7 @@ npy_abuse_writebackifcopy(PyObject* NPY_UNUSED(self), PyObject* args) array = PyArray_FromArray((PyArrayObject*)args, NULL, flags); if (array == NULL) return NULL; - Py_DECREF(array); /* calls array_dealloc even on PyPy */ + Py_DECREF(array); /* calls array_dealloc */ Py_RETURN_NONE; } @@ -841,7 +864,7 @@ get_all_cast_information(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) if (classes == NULL) { goto fail; } - Py_SETREF(classes, PySequence_Fast(classes, NULL)); + Py_SETREF(classes, PySequence_Fast(classes, NULL)); // noqa: borrowed-ref OK if (classes == NULL) { goto fail; } @@ -861,7 +884,7 @@ get_all_cast_information(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) PyObject *to_dtype, *cast_obj; Py_ssize_t pos = 0; - while (PyDict_Next(NPY_DT_SLOTS(from_dtype)->castingimpls, + while (PyDict_Next(NPY_DT_SLOTS(from_dtype)->castingimpls, // noqa: borrowed-ref OK &pos, &to_dtype, &cast_obj)) { if (cast_obj == Py_None) { continue; @@ -900,36 +923,24 @@ get_all_cast_information(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) return NULL; } +static void +identity_cache_destructor(PyObject *capsule) +{ + PyArrayIdentityHash *tb = (PyArrayIdentityHash *)PyCapsule_GetPointer(capsule, "PyArrayIdentityHash"); + assert(tb != NULL); + PyArrayIdentityHash_Dealloc(tb); +} /* - * Helper to test the identity cache, takes a list of values and adds - * all to the cache except the last key/value pair. The last value is - * ignored, instead the last key is looked up. - * None is returned, if the key is not found. - * If `replace` is True, duplicate entries are ignored when adding to the - * hashtable. + * Create an identity hash table with the given key length and return it + * as a capsule. */ static PyObject * -identityhash_tester(PyObject *NPY_UNUSED(mod), - PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) +create_identity_hash(PyObject *NPY_UNUSED(self), PyObject *args) { - NPY_PREPARE_ARGPARSER; - int key_len; - int replace; - PyObject *replace_obj = Py_False; - PyObject *sequence; - PyObject *result = NULL; - if (npy_parse_arguments("identityhash_tester", args, len_args, kwnames, - "key_len", &PyArray_PythonPyIntFromInt, &key_len, - "sequence", NULL, &sequence, - "|replace", NULL, &replace_obj, - NULL, NULL, NULL) < 0) { - return NULL; - } - replace = PyObject_IsTrue(replace_obj); - if (error_converting(replace)) { + if (!PyArg_ParseTuple(args, "i", &key_len)) { return NULL; } @@ -937,52 +948,91 @@ identityhash_tester(PyObject *NPY_UNUSED(mod), PyErr_SetString(PyExc_ValueError, "must have 1 to max-args keys."); return NULL; } + PyArrayIdentityHash *tb = PyArrayIdentityHash_New(key_len); if (tb == NULL) { return NULL; } - /* Replace the sequence with a guaranteed fast-sequence */ - sequence = PySequence_Fast(sequence, "converting sequence."); - if (sequence == NULL) { - goto finish; + PyObject *capsule = PyCapsule_New((void *)tb, "PyArrayIdentityHash", + identity_cache_destructor); + if (capsule == NULL) { + PyArrayIdentityHash_Dealloc(tb); + return NULL; } - Py_ssize_t length = PySequence_Fast_GET_SIZE(sequence); - for (Py_ssize_t i = 0; i < length; i++) { - PyObject *key_val = PySequence_Fast_GET_ITEM(sequence, i); - if (!PyTuple_CheckExact(key_val) || PyTuple_GET_SIZE(key_val) != 2) { - PyErr_SetString(PyExc_TypeError, "bad key-value pair."); - goto finish; - } - PyObject *key = PyTuple_GET_ITEM(key_val, 0); - PyObject *value = PyTuple_GET_ITEM(key_val, 1); - if (!PyTuple_CheckExact(key) || PyTuple_GET_SIZE(key) != key_len) { - PyErr_SetString(PyExc_TypeError, "bad key tuple."); - goto finish; - } + return capsule; +} - PyObject *keys[NPY_MAXARGS]; - for (int j = 0; j < key_len; j++) { - keys[j] = PyTuple_GET_ITEM(key, j); - } - if (i != length - 1) { - if (PyArrayIdentityHash_SetItem(tb, keys, value, replace) < 0) { - goto finish; - } - } - else { - result = PyArrayIdentityHash_GetItem(tb, keys); - if (result == NULL) { - result = Py_None; - } - Py_INCREF(result); - } +/* + * Set default item in identity hash table provided as capsule and key as tuple. + * If the key is already present, return the existing value else set to value and + * return that. + */ +static PyObject * +identity_hash_set_item_default(PyObject *NPY_UNUSED(self), PyObject *args) +{ + PyObject *capsule, *key_tuple, *value; + if (!PyArg_ParseTuple(args, "OOO", &capsule, &key_tuple, &value)) { + return NULL; } - finish: - Py_DECREF(sequence); - PyArrayIdentityHash_Dealloc(tb); + if (!PyCapsule_IsValid(capsule, "PyArrayIdentityHash")) { + PyErr_SetString(PyExc_TypeError, + "First argument must be a valid PyArrayIdentityHash capsule."); + return NULL; + } + + PyArrayIdentityHash *tb = (PyArrayIdentityHash *)PyCapsule_GetPointer(capsule, "PyArrayIdentityHash"); + assert(tb != NULL); + + if (!PyTuple_CheckExact(key_tuple) || PyTuple_GET_SIZE(key_tuple) != tb->key_len) { + PyErr_Format(PyExc_TypeError, + "key must be a tuple of length %d", tb->key_len); + return NULL; + } + + PyObject *result = NULL; + if (PyArrayIdentityHash_SetItemDefault(tb, &PyTuple_GET_ITEM(key_tuple, 0), value, &result) < 0) { + return NULL; + } + Py_INCREF(result); + return result; +} + + +/* + * Get item from identity hash table provided as capsule and key as tuple. + */ +static PyObject * +identity_hash_get_item(PyObject *NPY_UNUSED(self), PyObject *args) +{ + PyObject *capsule, *key_tuple; + + if (!PyArg_ParseTuple(args, "OO", &capsule, &key_tuple)) { + return NULL; + } + + if (!PyCapsule_IsValid(capsule, "PyArrayIdentityHash")) { + PyErr_SetString(PyExc_TypeError, + "First argument must be a valid PyArrayIdentityHash capsule."); + return NULL; + } + + PyArrayIdentityHash *tb = (PyArrayIdentityHash *)PyCapsule_GetPointer(capsule, "PyArrayIdentityHash"); + assert(tb != NULL); + + if (!PyTuple_CheckExact(key_tuple) || PyTuple_GET_SIZE(key_tuple) != tb->key_len) { + PyErr_Format(PyExc_TypeError, + "key must be a tuple of length %d", tb->key_len); + return NULL; + } + + PyObject *result = PyArrayIdentityHash_GetItem(tb, &PyTuple_GET_ITEM(key_tuple, 0)); + if (result == NULL) { + Py_RETURN_NONE; + } + Py_INCREF(result); return result; } @@ -1855,7 +1905,9 @@ get_fpu_mode(PyObject *NPY_UNUSED(self), PyObject *args) result = _controlfp(0, 0); return PyLong_FromLongLong(result); } -#elif (defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))) || (defined(_MSC_VER) && defined(__clang__)) +#elif (defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))) \ + || (defined(_MSC_VER) && defined(__clang__) && \ + (defined(_M_IX86) || defined(_M_AMD64))) { unsigned short cw = 0; __asm__("fstcw %w0" : "=m" (cw)); @@ -2065,10 +2117,18 @@ run_sortkind_converter(PyObject* NPY_UNUSED(self), PyObject *args) return NULL; } switch (kind) { - case _NPY_SORT_UNDEFINED: return PyUnicode_FromString("_NPY_SORT_UNDEFINED"); - case NPY_QUICKSORT: return PyUnicode_FromString("NPY_QUICKSORT"); - case NPY_HEAPSORT: return PyUnicode_FromString("NPY_HEAPSORT"); - case NPY_STABLESORT: return PyUnicode_FromString("NPY_STABLESORT"); + case _NPY_SORT_UNDEFINED: + return PyUnicode_FromString("_NPY_SORT_UNDEFINED"); + case NPY_QUICKSORT: + return PyUnicode_FromString("NPY_QUICKSORT"); + case NPY_HEAPSORT: + return PyUnicode_FromString("NPY_HEAPSORT"); + case NPY_STABLESORT: + return PyUnicode_FromString("NPY_STABLESORT"); + default: + // the other possible values in NPY_SORTKIND can only + // be set with keywords. + break; } return PyLong_FromLong(kind); } @@ -2138,12 +2198,17 @@ run_casting_converter(PyObject* NPY_UNUSED(self), PyObject *args) if (!PyArg_ParseTuple(args, "O&", PyArray_CastingConverter, &casting)) { return NULL; } - switch (casting) { + switch ((int)casting) { case NPY_NO_CASTING: return PyUnicode_FromString("NPY_NO_CASTING"); case NPY_EQUIV_CASTING: return PyUnicode_FromString("NPY_EQUIV_CASTING"); case NPY_SAFE_CASTING: return PyUnicode_FromString("NPY_SAFE_CASTING"); case NPY_SAME_KIND_CASTING: return PyUnicode_FromString("NPY_SAME_KIND_CASTING"); case NPY_UNSAFE_CASTING: return PyUnicode_FromString("NPY_UNSAFE_CASTING"); + case NPY_SAME_VALUE_CASTING: return PyUnicode_FromString("NPY_SAME_VALUE_CASTING"); + case NPY_NO_CASTING | NPY_SAME_VALUE_CASTING_FLAG: return PyUnicode_FromString("NPY_NO_CASTING | NPY_SAME_VALUE_CASTING_FLAG"); + case NPY_EQUIV_CASTING | NPY_SAME_VALUE_CASTING_FLAG: return PyUnicode_FromString("NPY_EQUIV_CASTING | NPY_SAME_VALUE_CASTING_FLAG"); + case NPY_SAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG: return PyUnicode_FromString("NPY_SAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG"); + case NPY_SAME_KIND_CASTING | NPY_SAME_VALUE_CASTING_FLAG: return PyUnicode_FromString("NPY_SAME_KIND_CASTING | NPY_SAME_VALUE_CASTING_FLAG"); default: return PyLong_FromLong(casting); } } @@ -2201,10 +2266,22 @@ run_scalar_intp_from_sequence(PyObject *NPY_UNUSED(self), PyObject *obj) return PyArray_IntTupleFromIntp(1, vals); } +static PyObject * +_npy_import_entry_point(PyObject *NPY_UNUSED(self), PyObject *obj) { + PyObject *res = PyUnicode_AsASCIIString(obj); + if (res != NULL) { + Py_SETREF(res, npy_import_entry_point(PyBytes_AS_STRING(res))); + } + return res; +} + static PyMethodDef Multiarray_TestsMethods[] = { {"argparse_example_function", (PyCFunction)argparse_example_function, METH_KEYWORDS | METH_FASTCALL, NULL}, + {"threaded_argparse_example_function", + (PyCFunction)threaded_argparse_example_function, + METH_KEYWORDS | METH_FASTCALL, NULL}, {"IsPythonScalar", IsPythonScalar, METH_VARARGS, NULL}, @@ -2259,9 +2336,15 @@ static PyMethodDef Multiarray_TestsMethods[] = { "Return a list with info on all available casts. Some of the info" "may differ for an actual cast if it uses value-based casting " "(flexible types)."}, - {"identityhash_tester", - (PyCFunction)identityhash_tester, - METH_KEYWORDS | METH_FASTCALL, NULL}, + {"create_identity_hash", + create_identity_hash, + METH_VARARGS, "Create a new PyArrayIdentityHash wrapped in a PyCapsule."}, + {"identity_hash_set_item_default", + (PyCFunction)identity_hash_set_item_default, + METH_VARARGS, "Set a default item in a PyArrayIdentityHash capsule."}, + {"identity_hash_get_item", + identity_hash_get_item, + METH_VARARGS, "Get an item from a PyArrayIdentityHash capsule."}, {"array_indexing", array_indexing, METH_VARARGS, NULL}, @@ -2382,36 +2465,63 @@ static PyMethodDef Multiarray_TestsMethods[] = { {"run_intp_converter", run_intp_converter, METH_VARARGS, NULL}, + {"npy_import_entry_point", + _npy_import_entry_point, + METH_O, NULL}, {NULL, NULL, 0, NULL} /* Sentinel */ }; -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_multiarray_tests", - NULL, - -1, - Multiarray_TestsMethods, - NULL, - NULL, - NULL, - NULL -}; +static int module_loaded = 0; -PyMODINIT_FUNC PyInit__multiarray_tests(void) +static int +_multiarray_tests_exec(PyObject *m) { - PyObject *m; + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; + } + module_loaded = 1; - m = PyModule_Create(&moduledef); - if (m == NULL) { - return m; + if (PyArray_ImportNumPyAPI() < 0) { + return -1; + } + if (init_argparse_mutex() < 0) { + return -1; } - import_array(); if (PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "cannot load _multiarray_tests module."); } - return m; + + return 0; +} + +static struct PyModuleDef_Slot _multiarray_tests_slots[] = { + {Py_mod_exec, _multiarray_tests_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, +#endif + {0, NULL}, +}; + +static struct PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "_multiarray_tests", + .m_size = 0, + .m_methods = Multiarray_TestsMethods, + .m_slots = _multiarray_tests_slots, +}; + +PyMODINIT_FUNC PyInit__multiarray_tests(void) +{ + return PyModuleDef_Init(&moduledef); } NPY_NO_EXPORT int diff --git a/numpy/_core/src/multiarray/abstractdtypes.c b/numpy/_core/src/multiarray/abstractdtypes.c index 8d00084f0efe..120ada551e7f 100644 --- a/numpy/_core/src/multiarray/abstractdtypes.c +++ b/numpy/_core/src/multiarray/abstractdtypes.c @@ -139,16 +139,16 @@ initialize_and_map_pytypes_to_dtypes() * the same could be achieved e.g. with additional abstract DTypes. */ PyArray_DTypeMeta *dtype; - dtype = NPY_DTYPE(PyArray_DescrFromType(NPY_UNICODE)); + dtype = typenum_to_dtypemeta(NPY_UNICODE); if (_PyArray_MapPyTypeToDType(dtype, &PyUnicode_Type, NPY_FALSE) < 0) { return -1; } - dtype = NPY_DTYPE(PyArray_DescrFromType(NPY_STRING)); + dtype = typenum_to_dtypemeta(NPY_STRING); if (_PyArray_MapPyTypeToDType(dtype, &PyBytes_Type, NPY_FALSE) < 0) { return -1; } - dtype = NPY_DTYPE(PyArray_DescrFromType(NPY_BOOL)); + dtype = typenum_to_dtypemeta(NPY_BOOL); if (_PyArray_MapPyTypeToDType(dtype, &PyBool_Type, NPY_FALSE) < 0) { return -1; } @@ -177,7 +177,6 @@ int_common_dtype(PyArray_DTypeMeta *NPY_UNUSED(cls), PyArray_DTypeMeta *other) /* This is a back-compat fallback to usually do the right thing... */ PyArray_DTypeMeta *uint8_dt = &PyArray_UInt8DType; PyArray_DTypeMeta *res = NPY_DT_CALL_common_dtype(other, uint8_dt); - Py_DECREF(uint8_dt); if (res == NULL) { PyErr_Clear(); } @@ -378,3 +377,116 @@ NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyComplexDType = {{{ .dt_slots = &pycomplexdtype_slots, .scalar_type = NULL, /* set in initialize_and_map_pytypes_to_dtypes */ }; + + +/* + * Additional functions to deal with Python literal int, float, complex + */ +/* + * This function takes an existing array operand and if the new descr does + * not match, replaces it with a new array that has the correct descriptor + * and holds exactly the scalar value. + */ +NPY_NO_EXPORT int +npy_update_operand_for_scalar( + PyArrayObject **operand, PyObject *scalar, PyArray_Descr *descr, + NPY_CASTING casting) +{ + if (PyArray_EquivTypes(PyArray_DESCR(*operand), descr)) { + /* + * TODO: This is an unfortunate work-around for legacy type resolvers + * (see `convert_ufunc_arguments` in `ufunc_object.c`), that + * currently forces us to replace the array. + */ + if (!(PyArray_FLAGS(*operand) & NPY_ARRAY_WAS_PYTHON_INT)) { + return 0; + } + } + else if (NPY_UNLIKELY(casting == NPY_EQUIV_CASTING) && + descr->type_num != NPY_OBJECT) { + /* + * incredibly niche, but users could pass equiv casting and we + * actually need to cast. Let object pass (technically correct) but + * in all other cases, we don't technically consider equivalent. + * NOTE(seberg): I don't think we should be beholden to this logic. + */ + PyErr_Format(PyExc_TypeError, + "cannot cast Python %s to %S under the casting rule 'equiv'", + Py_TYPE(scalar)->tp_name, descr); + return -1; + } + + Py_INCREF(descr); + PyArrayObject *new = (PyArrayObject *)PyArray_NewFromDescr( + &PyArray_Type, descr, 0, NULL, NULL, NULL, 0, NULL); + Py_SETREF(*operand, new); + if (*operand == NULL) { + return -1; + } + if (scalar == NULL) { + /* The ufunc.resolve_dtypes paths can go here. Anything should go. */ + return 0; + } + return PyArray_SETITEM(new, PyArray_BYTES(*operand), scalar); +} + + +/* + * When a user passed a Python literal (int, float, complex), special promotion + * rules mean that we don't know the exact descriptor that should be used. + * + * Typically, this just doesn't really matter. Unfortunately, there are two + * exceptions: + * 1. The user might have passed `signature=` which may not be compatible. + * In that case, we cannot really assume "safe" casting. + * 2. It is at least fathomable that a DType doesn't deal with this directly. + * or that using the original int64/object is wrong in the type resolution. + * + * The solution is to assume that we can use the common DType of the signature + * and the Python scalar DType (`in_DT`) as a safe intermediate. + */ +NPY_NO_EXPORT PyArray_Descr * +npy_find_descr_for_scalar( + PyObject *scalar, PyArray_Descr *original_descr, + PyArray_DTypeMeta *in_DT, PyArray_DTypeMeta *op_DT) +{ + PyArray_Descr *res; + /* There is a good chance, descriptors already match... */ + if (NPY_DTYPE(original_descr) == op_DT) { + Py_INCREF(original_descr); + return original_descr; + } + + PyArray_DTypeMeta *common = PyArray_CommonDType(in_DT, op_DT); + if (common == NULL) { + PyErr_Clear(); + /* This is fine. We simply assume the original descr is viable. */ + Py_INCREF(original_descr); + return original_descr; + } + /* A very likely case is that there is nothing to do: */ + if (NPY_DTYPE(original_descr) == common) { + Py_DECREF(common); + Py_INCREF(original_descr); + return original_descr; + } + if (!NPY_DT_is_parametric(common) || + /* In some paths we only have a scalar type, can't discover */ + scalar == NULL || + /* If the DType doesn't know the scalar type, guess at default. */ + !NPY_DT_CALL_is_known_scalar_type(common, Py_TYPE(scalar))) { + if (common->singleton != NULL) { + res = common->singleton; + Py_INCREF(res); + } + else { + res = NPY_DT_CALL_default_descr(common); + } + } + else { + res = NPY_DT_CALL_discover_descr_from_pyobject(common, scalar); + } + + Py_DECREF(common); + return res; +} diff --git a/numpy/_core/src/multiarray/abstractdtypes.h b/numpy/_core/src/multiarray/abstractdtypes.h index 4533e99b635f..63efea9580db 100644 --- a/numpy/_core/src/multiarray/abstractdtypes.h +++ b/numpy/_core/src/multiarray/abstractdtypes.h @@ -1,6 +1,7 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_ABSTRACTDTYPES_H_ #define NUMPY_CORE_SRC_MULTIARRAY_ABSTRACTDTYPES_H_ +#include "numpy/ndarraytypes.h" #include "arrayobject.h" #include "dtypemeta.h" @@ -41,14 +42,7 @@ static inline int npy_mark_tmp_array_if_pyscalar( PyObject *obj, PyArrayObject *arr, PyArray_DTypeMeta **dtype) { - /* - * We check the array dtype for two reasons: First, booleans are - * integer subclasses. Second, an int, float, or complex could have - * a custom DType registered, and then we should use that. - * Further, `np.float64` is a double subclass, so must reject it. - */ - if (PyLong_Check(obj) - && (PyArray_ISINTEGER(arr) || PyArray_ISOBJECT(arr))) { + if (PyLong_CheckExact(obj)) { ((PyArrayObject_fields *)arr)->flags |= NPY_ARRAY_WAS_PYTHON_INT; if (dtype != NULL) { Py_INCREF(&PyArray_PyLongDType); @@ -56,8 +50,7 @@ npy_mark_tmp_array_if_pyscalar( } return 1; } - else if (PyFloat_Check(obj) && !PyArray_IsScalar(obj, Double) - && PyArray_TYPE(arr) == NPY_DOUBLE) { + else if (PyFloat_CheckExact(obj)) { ((PyArrayObject_fields *)arr)->flags |= NPY_ARRAY_WAS_PYTHON_FLOAT; if (dtype != NULL) { Py_INCREF(&PyArray_PyFloatDType); @@ -65,8 +58,7 @@ npy_mark_tmp_array_if_pyscalar( } return 1; } - else if (PyComplex_Check(obj) && !PyArray_IsScalar(obj, CDouble) - && PyArray_TYPE(arr) == NPY_CDOUBLE) { + else if (PyComplex_CheckExact(obj)) { ((PyArrayObject_fields *)arr)->flags |= NPY_ARRAY_WAS_PYTHON_COMPLEX; if (dtype != NULL) { Py_INCREF(&PyArray_PyComplexDType); @@ -77,6 +69,18 @@ npy_mark_tmp_array_if_pyscalar( return 0; } + +NPY_NO_EXPORT int +npy_update_operand_for_scalar( + PyArrayObject **operand, PyObject *scalar, PyArray_Descr *descr, + NPY_CASTING casting); + + +NPY_NO_EXPORT PyArray_Descr * +npy_find_descr_for_scalar( + PyObject *scalar, PyArray_Descr *original_descr, + PyArray_DTypeMeta *in_DT, PyArray_DTypeMeta *op_DT); + #ifdef __cplusplus } #endif diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.cpp similarity index 70% rename from numpy/_core/src/multiarray/alloc.c rename to numpy/_core/src/multiarray/alloc.cpp index df64a13a26e8..ca2027403b96 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.cpp @@ -1,5 +1,6 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE +extern "C" { #define PY_SSIZE_T_CLEAN #include @@ -11,6 +12,9 @@ #include "numpy/npy_common.h" #include "npy_config.h" #include "alloc.h" +#include "npy_static_data.h" +#include "templ_common.h" +#include "multiarraymodule.h" #include #ifdef NPY_OS_LINUX @@ -24,24 +28,73 @@ #endif #endif -#define NBUCKETS 1024 /* number of buckets for data*/ -#define NBUCKETS_DIM 16 /* number of buckets for dimensions/strides */ -#define NCACHE 7 /* number of cache entries per bucket */ + +/* Do not enable the alloc cache if ASAN or MSAN instrumentation is enabled. + * The cache makes ASAN use-after-free or MSAN + * use-of-uninitialized-memory warnings less useful. */ +#if defined(__has_feature) +# if __has_feature(address_sanitizer) || __has_feature(memory_sanitizer) +# define USE_ALLOC_CACHE 0 +# endif +#endif +#ifndef USE_ALLOC_CACHE +# define USE_ALLOC_CACHE 1 +#endif + + +# define NBUCKETS 1024 /* number of buckets for data*/ +# define NBUCKETS_DIM 16 /* number of buckets for dimensions/strides */ +# define NCACHE 7 /* number of cache entries per bucket */ /* this structure fits neatly into a cacheline */ typedef struct { npy_uintp available; /* number of cached pointers */ void * ptrs[NCACHE]; } cache_bucket; -static cache_bucket datacache[NBUCKETS]; -static cache_bucket dimcache[NBUCKETS_DIM]; -static int _madvise_hugepage = 1; +static NPY_TLS cache_bucket _datacache[NBUCKETS]; +static NPY_TLS cache_bucket _dimcache[NBUCKETS_DIM]; + +// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61991 +// gcc has a bug where if the thread local variable +// is unused then in some cases it's destructor may not get +// called at thread exit. So to workaround this, we access the +// datacache and dimcache through this struct so that +// cache_destructor gets initialized and used, ensuring that +// the destructor gets called properly at thread exit. +// The datacache and dimcache are not embedded in this struct +// because that would make this struct very large and certain +// platforms like armhf can crash while allocating that large +// TLS block. +typedef struct cache_destructor { + cache_bucket *dimcache; + cache_bucket *datacache; + cache_destructor() { + dimcache = &_dimcache[0]; + datacache = &_datacache[0]; + } + ~cache_destructor() { + for (npy_uint i = 0; i < NBUCKETS; ++i) { + while (datacache[i].available > 0) { + PyMem_RawFree(datacache[i].ptrs[--datacache[i].available]); + } + } + for (npy_uint i = 0; i < NBUCKETS_DIM; ++i) { + while (dimcache[i].available > 0) { + PyMem_RawFree(dimcache[i].ptrs[--dimcache[i].available]); + } + } + } +} cache_destructor; + +static NPY_TLS cache_destructor tls_cache_destructor; +#define datacache tls_cache_destructor.datacache +#define dimcache tls_cache_destructor.dimcache /* * This function tells whether NumPy attempts to call `madvise` with * `MADV_HUGEPAGE`. `madvise` is only ever used on linux, so the value - * of `_madvise_hugepage` may be ignored. + * of `madvise_hugepage` may be ignored. * * It is exposed to Python as `np._core.multiarray._get_madvise_hugepage`. */ @@ -49,7 +102,7 @@ NPY_NO_EXPORT PyObject * _get_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { #ifdef NPY_OS_LINUX - if (_madvise_hugepage) { + if (npy_global_state.madvise_hugepage) { Py_RETURN_TRUE; } #endif @@ -59,20 +112,20 @@ _get_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) /* * This function enables or disables the use of `MADV_HUGEPAGE` on Linux - * by modifying the global static `_madvise_hugepage`. - * It returns the previous value of `_madvise_hugepage`. + * by modifying the global static `madvise_hugepage`. + * It returns the previous value of `madvise_hugepage`. * * It is exposed to Python as `np._core.multiarray._set_madvise_hugepage`. */ NPY_NO_EXPORT PyObject * _set_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *enabled_obj) { - int was_enabled = _madvise_hugepage; + int was_enabled = npy_global_state.madvise_hugepage; int enabled = PyObject_IsTrue(enabled_obj); if (enabled < 0) { return NULL; } - _madvise_hugepage = enabled; + npy_global_state.madvise_hugepage = enabled; if (was_enabled) { Py_RETURN_TRUE; } @@ -80,6 +133,24 @@ _set_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *enabled_obj) } +NPY_FINLINE void +indicate_hugepages(void *p, size_t size) { +#ifdef NPY_OS_LINUX + /* allow kernel allocating huge pages for large arrays */ + if (NPY_UNLIKELY(size >= ((1u<<22u))) && + npy_global_state.madvise_hugepage) { + npy_uintp offset = 4096u - (npy_uintp)p % (4096u); + npy_uintp length = size - offset; + /** + * Intentionally not checking for errors that may be returned by + * older kernel versions; optimistically tries enabling huge pages. + */ + madvise((void*)((npy_uintp)p + offset), length, MADV_HUGEPAGE); + } +#endif +} + + /* as the cache is managed in global variables verify the GIL is held */ /* @@ -96,7 +167,7 @@ _npy_alloc_cache(npy_uintp nelem, npy_uintp esz, npy_uint msz, assert((esz == 1 && cache == datacache) || (esz == sizeof(npy_intp) && cache == dimcache)); assert(PyGILState_Check()); -#ifndef Py_GIL_DISABLED +#if USE_ALLOC_CACHE if (nelem < msz) { if (cache[nelem].available > 0) { return cache[nelem].ptrs[--(cache[nelem].available)]; @@ -105,21 +176,7 @@ _npy_alloc_cache(npy_uintp nelem, npy_uintp esz, npy_uint msz, #endif p = alloc(nelem * esz); if (p) { -#ifdef _PyPyGC_AddMemoryPressure - _PyPyPyGC_AddMemoryPressure(nelem * esz); -#endif -#ifdef NPY_OS_LINUX - /* allow kernel allocating huge pages for large arrays */ - if (NPY_UNLIKELY(nelem * esz >= ((1u<<22u))) && _madvise_hugepage) { - npy_uintp offset = 4096u - (npy_uintp)p % (4096u); - npy_uintp length = nelem * esz - offset; - /** - * Intentionally not checking for errors that may be returned by - * older kernel versions; optimistically tries enabling huge pages. - */ - madvise((void*)((npy_uintp)p + offset), length, MADV_HUGEPAGE); - } -#endif + indicate_hugepages(p, nelem * esz); } return p; } @@ -133,7 +190,7 @@ _npy_free_cache(void * p, npy_uintp nelem, npy_uint msz, cache_bucket * cache, void (*dealloc)(void *)) { assert(PyGILState_Check()); -#ifndef Py_GIL_DISABLED +#if USE_ALLOC_CACHE if (p != NULL && nelem < msz) { if (cache[nelem].available < NCACHE) { cache[nelem].ptrs[cache[nelem].available++] = p; @@ -160,7 +217,6 @@ npy_alloc_cache_zero(size_t nmemb, size_t size) { void * p; size_t sz = nmemb * size; - NPY_BEGIN_THREADS_DEF; if (sz < NBUCKETS) { p = _npy_alloc_cache(sz, 1, NBUCKETS, datacache, &PyDataMem_NEW); if (p) { @@ -168,9 +224,10 @@ npy_alloc_cache_zero(size_t nmemb, size_t size) } return p; } - NPY_BEGIN_THREADS; p = PyDataMem_NEW_ZEROED(nmemb, size); - NPY_END_THREADS; + if (p) { + indicate_hugepages(p, sz); + } return p; } @@ -195,7 +252,7 @@ npy_alloc_cache_dim(npy_uintp sz) sz = 2; } return _npy_alloc_cache(sz, sizeof(npy_intp), NBUCKETS_DIM, dimcache, - &PyArray_malloc); + &PyMem_RawMalloc); } NPY_NO_EXPORT void @@ -236,8 +293,12 @@ PyDataMem_NEW(size_t size) void *result; assert(size != 0); - result = malloc(size); - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + result = PyMem_RawMalloc(size); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + if (ret == -1) { + PyMem_RawFree(result); + return NULL; + } return result; } @@ -249,8 +310,12 @@ PyDataMem_NEW_ZEROED(size_t nmemb, size_t size) { void *result; - result = calloc(nmemb, size); - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); + result = PyMem_RawCalloc(nmemb, size); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); + if (ret == -1) { + PyMem_RawFree(result); + return NULL; + } return result; } @@ -261,7 +326,7 @@ NPY_NO_EXPORT void PyDataMem_FREE(void *ptr) { PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); - free(ptr); + PyMem_RawFree(ptr); } /*NUMPY_API @@ -273,11 +338,13 @@ PyDataMem_RENEW(void *ptr, size_t size) void *result; assert(size != 0); - result = realloc(ptr, size); - if (result != ptr) { - PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); + PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); + result = PyMem_RawRealloc(ptr, size); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + if (ret == -1) { + PyMem_RawFree(result); + return NULL; } - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); return result; } @@ -287,7 +354,7 @@ PyDataMem_RENEW(void *ptr, size_t size) static inline void * default_malloc(void *NPY_UNUSED(ctx), size_t size) { - return _npy_alloc_cache(size, 1, NBUCKETS, datacache, &malloc); + return _npy_alloc_cache(size, 1, NBUCKETS, datacache, &PyMem_RawMalloc); } // The default data mem allocator calloc routine does not make use of a ctx. @@ -298,17 +365,17 @@ default_calloc(void *NPY_UNUSED(ctx), size_t nelem, size_t elsize) { void * p; size_t sz = nelem * elsize; - NPY_BEGIN_THREADS_DEF; if (sz < NBUCKETS) { - p = _npy_alloc_cache(sz, 1, NBUCKETS, datacache, &malloc); + p = _npy_alloc_cache(sz, 1, NBUCKETS, datacache, &PyMem_RawMalloc); if (p) { memset(p, 0, sz); } return p; } - NPY_BEGIN_THREADS; - p = calloc(nelem, elsize); - NPY_END_THREADS; + p = PyMem_RawCalloc(nelem, elsize); + if (p) { + indicate_hugepages(p, sz); + } return p; } @@ -318,7 +385,7 @@ default_calloc(void *NPY_UNUSED(ctx), size_t nelem, size_t elsize) static inline void * default_realloc(void *NPY_UNUSED(ctx), void *ptr, size_t new_size) { - return realloc(ptr, new_size); + return PyMem_RawRealloc(ptr, new_size); } // The default data mem allocator free routine does not make use of a ctx. @@ -327,7 +394,7 @@ default_realloc(void *NPY_UNUSED(ctx), void *ptr, size_t new_size) static inline void default_free(void *NPY_UNUSED(ctx), void *ptr, size_t size) { - _npy_free_cache(ptr, size, NBUCKETS, datacache, &free); + _npy_free_cache(ptr, size, NBUCKETS, datacache, &PyMem_RawFree); } /* Memory handler global default */ @@ -361,7 +428,11 @@ PyDataMem_UserNEW(size_t size, PyObject *mem_handler) } assert(size != 0); result = handler->allocator.malloc(handler->allocator.ctx, size); - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + if (ret == -1) { + handler->allocator.free(handler->allocator.ctx, result, size); + return NULL; + } return result; } @@ -375,7 +446,11 @@ PyDataMem_UserNEW_ZEROED(size_t nmemb, size_t size, PyObject *mem_handler) return NULL; } result = handler->allocator.calloc(handler->allocator.ctx, nmemb, size); - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); + if (ret == -1) { + handler->allocator.free(handler->allocator.ctx, result, size); + return NULL; + } return result; } @@ -405,11 +480,13 @@ PyDataMem_UserRENEW(void *ptr, size_t size, PyObject *mem_handler) } assert(size != 0); + PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); result = handler->allocator.realloc(handler->allocator.ctx, ptr, size); - if (result != ptr) { - PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + if (ret == -1) { + handler->allocator.free(handler->allocator.ctx, result, size); + return NULL; } - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); return result; } @@ -533,3 +610,19 @@ get_handler_version(PyObject *NPY_UNUSED(self), PyObject *args) Py_DECREF(mem_handler); return version; } + + +/* + * Internal function to malloc, but add an overflow check similar to Calloc + */ +NPY_NO_EXPORT void * +_Npy_MallocWithOverflowCheck(npy_intp size, npy_intp elsize) +{ + npy_intp total_size; + if (npy_mul_sizes_with_overflow(&total_size, size, elsize)) { + return NULL; + } + return PyMem_MALLOC(total_size); +} + +} /* extern "C" */ diff --git a/numpy/_core/src/multiarray/alloc.h b/numpy/_core/src/multiarray/alloc.h index aed2095fe73c..c7c0f6d2154e 100644 --- a/numpy/_core/src/multiarray/alloc.h +++ b/numpy/_core/src/multiarray/alloc.h @@ -1,6 +1,9 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_ALLOC_H_ #define NUMPY_CORE_SRC_MULTIARRAY_ALLOC_H_ +#ifdef __cplusplus +extern "C" { +#endif #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE #include "numpy/ndarraytypes.h" @@ -52,4 +55,72 @@ get_handler_name(PyObject *NPY_UNUSED(self), PyObject *obj); NPY_NO_EXPORT PyObject * get_handler_version(PyObject *NPY_UNUSED(self), PyObject *obj); +/* Helper to add an overflow check (and avoid inlininig this probably) */ +NPY_NO_EXPORT void * +_Npy_MallocWithOverflowCheck(npy_intp size, npy_intp elsize); + + +static inline void +_npy_init_workspace( + void **buf, void *static_buf, size_t static_buf_size, size_t elsize, size_t size) +{ + if (NPY_LIKELY(size <= static_buf_size)) { + *buf = static_buf; + } + else { + *buf = _Npy_MallocWithOverflowCheck(size, elsize); + if (*buf == NULL) { + PyErr_NoMemory(); + } + } +} + + +/* + * Helper definition macro for a small work/scratchspace. + * The `NAME` is the C array to be defined of with the type `TYPE`. + * + * The usage pattern for this is: + * + * NPY_ALLOC_WORKSPACE(arr, PyObject *, 14, n_objects); + * if (arr == NULL) { + * return -1; // Memory error is set + * } + * ... + * npy_free_workspace(arr); + * + * Notes + * ----- + * The reason is to avoid allocations in most cases, but gracefully + * succeed for large sizes as well. + * With some caches, it may be possible to malloc/calloc very quickly in which + * case we should not hesitate to replace this pattern. + */ +#define NPY_DEFINE_WORKSPACE(NAME, TYPE, fixed_size) \ + TYPE NAME##_static[fixed_size]; \ + TYPE *NAME; +#define NPY_INIT_WORKSPACE(NAME, TYPE, fixed_size, size) \ + _npy_init_workspace((void **)&NAME, NAME##_static, (fixed_size), sizeof(TYPE), (size)) + +#define NPY_ALLOC_WORKSPACE(NAME, TYPE, fixed_size, size) \ + NPY_DEFINE_WORKSPACE(NAME, TYPE, fixed_size) \ + NPY_INIT_WORKSPACE(NAME, TYPE, fixed_size, size) + + +static inline void +_npy_free_workspace(void *buf, void *static_buf) +{ + if (buf != static_buf) { + PyMem_FREE(buf); + } +} + +/* Free a small workspace allocation (macro to fetch the _static name) */ +#define npy_free_workspace(NAME) \ + _npy_free_workspace(NAME, NAME##_static) + +#ifdef __cplusplus +} /* extern "C" */ +#endif + #endif /* NUMPY_CORE_SRC_MULTIARRAY_ALLOC_H_ */ diff --git a/numpy/_core/src/multiarray/argfunc.dispatch.c.src b/numpy/_core/src/multiarray/argfunc.dispatch.c.src index d79be1df5034..79dc111d2438 100644 --- a/numpy/_core/src/multiarray/argfunc.dispatch.c.src +++ b/numpy/_core/src/multiarray/argfunc.dispatch.c.src @@ -1,12 +1,4 @@ /* -*- c -*- */ -/*@targets - ** $maxopt baseline - ** sse2 sse42 xop avx2 avx512_skx - ** vsx2 - ** neon asimd - ** vx vxe - **/ - #define NPY_NO_DEPRECATED_API NPY_API_VERSION #include "simd/simd.h" diff --git a/numpy/_core/src/multiarray/array_api_standard.c b/numpy/_core/src/multiarray/array_api_standard.c new file mode 100644 index 000000000000..317fd8a69bb4 --- /dev/null +++ b/numpy/_core/src/multiarray/array_api_standard.c @@ -0,0 +1,79 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + +#include + + +NPY_NO_EXPORT PyObject * +array_device(PyObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored)) +{ + return PyUnicode_FromString("cpu"); +} + +NPY_NO_EXPORT PyObject * +array_to_device(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"", "stream", NULL}; + char *device = ""; + PyObject *stream = Py_None; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "s|$O:to_device", kwlist, + &device, + &stream)) { + return NULL; + } + + if (stream != Py_None) { + PyErr_SetString(PyExc_ValueError, + "The stream argument in to_device() " + "is not supported"); + return NULL; + } + + if (strcmp(device, "cpu") != 0) { + PyErr_Format(PyExc_ValueError, + "Unsupported device: %s. Only 'cpu' is accepted.", device); + return NULL; + } + + Py_INCREF(self); + return self; +} + +NPY_NO_EXPORT PyObject * +array_array_namespace(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"api_version", NULL}; + PyObject *array_api_version = Py_None; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "|$O:__array_namespace__", kwlist, + &array_api_version)) { + return NULL; + } + + if (array_api_version != Py_None) { + if (!PyUnicode_Check(array_api_version)) + { + PyErr_Format(PyExc_ValueError, + "Only None and strings are allowed as the Array API version, " + "but received: %S.", array_api_version); + return NULL; + } else if (PyUnicode_CompareWithASCIIString(array_api_version, "2021.12") != 0 && + PyUnicode_CompareWithASCIIString(array_api_version, "2022.12") != 0 && + PyUnicode_CompareWithASCIIString(array_api_version, "2023.12") != 0 && + PyUnicode_CompareWithASCIIString(array_api_version, "2024.12") != 0) + { + PyErr_Format(PyExc_ValueError, + "Version \"%U\" of the Array API Standard is not supported.", + array_api_version); + return NULL; + } + } + + PyObject *numpy_module = PyImport_ImportModule("numpy"); + if (numpy_module == NULL){ + return NULL; + } + + return numpy_module; +} diff --git a/numpy/_core/src/multiarray/array_api_standard.h b/numpy/_core/src/multiarray/array_api_standard.h new file mode 100644 index 000000000000..6776863701b8 --- /dev/null +++ b/numpy/_core/src/multiarray/array_api_standard.h @@ -0,0 +1,14 @@ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_ARRAY_API_STANDARD_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_ARRAY_API_STANDARD_H_ + + +NPY_NO_EXPORT PyObject * +array_device(PyObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored)); + +NPY_NO_EXPORT PyObject * +array_to_device(PyObject *self, PyObject *args, PyObject *kwds); + +NPY_NO_EXPORT PyObject * +array_array_namespace(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds); + +#endif /* NUMPY_CORE_SRC_MULTIARRAY_ARRAY_API_STANDARD_H_ */ diff --git a/numpy/_core/src/multiarray/array_assign_array.c b/numpy/_core/src/multiarray/array_assign_array.c index 8886d1cacb40..306ed07b0ace 100644 --- a/numpy/_core/src/multiarray/array_assign_array.c +++ b/numpy/_core/src/multiarray/array_assign_array.c @@ -29,6 +29,8 @@ #include "umathmodule.h" +#define NPY_ALIGNED_CASTING_FLAG 1 + /* * Check that array data is both uint-aligned and true-aligned for all array * elements, as required by the copy/casting code in lowlevel_strided_loops.c @@ -79,7 +81,8 @@ copycast_isaligned(int ndim, npy_intp const *shape, NPY_NO_EXPORT int raw_array_assign_array(int ndim, npy_intp const *shape, PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides, - PyArray_Descr *src_dtype, char *src_data, npy_intp const *src_strides) + PyArray_Descr *src_dtype, char *src_data, npy_intp const *src_strides, + int flags) { int idim; npy_intp shape_it[NPY_MAXDIMS]; @@ -87,14 +90,11 @@ raw_array_assign_array(int ndim, npy_intp const *shape, npy_intp src_strides_it[NPY_MAXDIMS]; npy_intp coord[NPY_MAXDIMS]; - int aligned; + int aligned = (flags & NPY_ALIGNED_CASTING_FLAG) != 0; + int same_value_cast = (flags & NPY_SAME_VALUE_CASTING_FLAG) != 0; NPY_BEGIN_THREADS_DEF; - aligned = - copycast_isaligned(ndim, shape, dst_dtype, dst_data, dst_strides) && - copycast_isaligned(ndim, shape, src_dtype, src_data, src_strides); - /* Use raw iteration with no heap allocation */ if (PyArray_PrepareTwoRawArrayIter( ndim, shape, @@ -120,21 +120,25 @@ raw_array_assign_array(int ndim, npy_intp const *shape, /* Get the function to do the casting */ NPY_cast_info cast_info; - NPY_ARRAYMETHOD_FLAGS flags; + NPY_ARRAYMETHOD_FLAGS method_flags; if (PyArray_GetDTypeTransferFunction(aligned, src_strides_it[0], dst_strides_it[0], src_dtype, dst_dtype, 0, - &cast_info, &flags) != NPY_SUCCEED) { + &cast_info, &method_flags) != NPY_SUCCEED) { return -1; } - if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + if (!(method_flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { npy_clear_floatstatus_barrier((char*)&src_data); } + if (same_value_cast) { + cast_info.context.flags |= NPY_SAME_VALUE_CONTEXT_FLAG; + } + /* Ensure number of elements exceeds threshold for threading */ - if (!(flags & NPY_METH_REQUIRES_PYAPI)) { + if (!(method_flags & NPY_METH_REQUIRES_PYAPI)) { npy_intp nitems = 1, i; for (i = 0; i < ndim; i++) { nitems *= shape_it[i]; @@ -144,11 +148,14 @@ raw_array_assign_array(int ndim, npy_intp const *shape, npy_intp strides[2] = {src_strides_it[0], dst_strides_it[0]}; + int result = 0; NPY_RAW_ITER_START(idim, ndim, coord, shape_it) { /* Process the innermost dimension */ char *args[2] = {src_data, dst_data}; - if (cast_info.func(&cast_info.context, - args, &shape_it[0], strides, cast_info.auxdata) < 0) { + result = cast_info.func(&cast_info.context, + args, &shape_it[0], strides, + cast_info.auxdata); + if (result < 0) { goto fail; } } NPY_RAW_ITER_TWO_NEXT(idim, ndim, coord, shape_it, @@ -158,7 +165,7 @@ raw_array_assign_array(int ndim, npy_intp const *shape, NPY_END_THREADS; NPY_cast_info_xfree(&cast_info); - if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + if (!(method_flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { int fpes = npy_get_floatstatus_barrier((char*)&src_data); if (fpes && PyUFunc_GiveFloatingpointErrors("cast", fpes) < 0) { return -1; @@ -183,7 +190,7 @@ raw_array_wheremasked_assign_array(int ndim, npy_intp const *shape, PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides, PyArray_Descr *src_dtype, char *src_data, npy_intp const *src_strides, PyArray_Descr *wheremask_dtype, char *wheremask_data, - npy_intp const *wheremask_strides) + npy_intp const *wheremask_strides, int flags) { int idim; npy_intp shape_it[NPY_MAXDIMS]; @@ -192,14 +199,11 @@ raw_array_wheremasked_assign_array(int ndim, npy_intp const *shape, npy_intp wheremask_strides_it[NPY_MAXDIMS]; npy_intp coord[NPY_MAXDIMS]; - int aligned; + int aligned = (flags & NPY_ALIGNED_CASTING_FLAG) != 0; + int same_value_cast = (flags & NPY_SAME_VALUE_CASTING_FLAG) != 0; NPY_BEGIN_THREADS_DEF; - aligned = - copycast_isaligned(ndim, shape, dst_dtype, dst_data, dst_strides) && - copycast_isaligned(ndim, shape, src_dtype, src_data, src_strides); - /* Use raw iteration with no heap allocation */ if (PyArray_PrepareThreeRawArrayIter( ndim, shape, @@ -229,39 +233,45 @@ raw_array_wheremasked_assign_array(int ndim, npy_intp const *shape, /* Get the function to do the casting */ NPY_cast_info cast_info; - NPY_ARRAYMETHOD_FLAGS flags; + NPY_ARRAYMETHOD_FLAGS method_flags; if (PyArray_GetMaskedDTypeTransferFunction(aligned, src_strides_it[0], dst_strides_it[0], wheremask_strides_it[0], src_dtype, dst_dtype, wheremask_dtype, 0, - &cast_info, &flags) != NPY_SUCCEED) { + &cast_info, &method_flags) != NPY_SUCCEED) { return -1; } + if (same_value_cast) { + cast_info.context.flags |= NPY_SAME_VALUE_CONTEXT_FLAG; + } - if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + if (!(method_flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { npy_clear_floatstatus_barrier(src_data); } - if (!(flags & NPY_METH_REQUIRES_PYAPI)) { + if (!(method_flags & NPY_METH_REQUIRES_PYAPI)) { npy_intp nitems = 1, i; for (i = 0; i < ndim; i++) { nitems *= shape_it[i]; } NPY_BEGIN_THREADS_THRESHOLDED(nitems); } + npy_intp strides[2] = {src_strides_it[0], dst_strides_it[0]}; + int result = 0; NPY_RAW_ITER_START(idim, ndim, coord, shape_it) { PyArray_MaskedStridedUnaryOp *stransfer; stransfer = (PyArray_MaskedStridedUnaryOp *)cast_info.func; /* Process the innermost dimension */ char *args[2] = {src_data, dst_data}; - if (stransfer(&cast_info.context, - args, &shape_it[0], strides, - (npy_bool *)wheremask_data, wheremask_strides_it[0], - cast_info.auxdata) < 0) { + result = stransfer(&cast_info.context, + args, &shape_it[0], strides, + (npy_bool *)wheremask_data, wheremask_strides_it[0], + cast_info.auxdata); + if (result < 0) { goto fail; } } NPY_RAW_ITER_THREE_NEXT(idim, ndim, coord, shape_it, @@ -272,15 +282,13 @@ raw_array_wheremasked_assign_array(int ndim, npy_intp const *shape, NPY_END_THREADS; NPY_cast_info_xfree(&cast_info); - if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + if (!(method_flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { int fpes = npy_get_floatstatus_barrier(src_data); if (fpes && PyUFunc_GiveFloatingpointErrors("cast", fpes) < 0) { return -1; } } - return 0; - fail: NPY_END_THREADS; NPY_cast_info_xfree(&cast_info); @@ -307,7 +315,6 @@ PyArray_AssignArray(PyArrayObject *dst, PyArrayObject *src, NPY_CASTING casting) { int copied_src = 0; - npy_intp src_strides[NPY_MAXDIMS]; /* Use array_assign_scalar if 'src' NDIM is 0 */ @@ -438,12 +445,21 @@ PyArray_AssignArray(PyArrayObject *dst, PyArrayObject *src, } } + int flags = (NPY_SAME_VALUE_CASTING_FLAG & casting); + if (copycast_isaligned(PyArray_NDIM(dst), PyArray_DIMS(dst), PyArray_DESCR(dst), + PyArray_DATA(dst), PyArray_STRIDES(dst)) && + copycast_isaligned(PyArray_NDIM(dst), PyArray_DIMS(dst), PyArray_DESCR(src), + PyArray_DATA(src), src_strides)) { + /* NPY_ALIGNED_CASTING_FLAG is internal to this file */ + flags |= NPY_ALIGNED_CASTING_FLAG; + } + if (wheremask == NULL) { /* A straightforward value assignment */ /* Do the assignment with raw array iteration */ if (raw_array_assign_array(PyArray_NDIM(dst), PyArray_DIMS(dst), PyArray_DESCR(dst), PyArray_DATA(dst), PyArray_STRIDES(dst), - PyArray_DESCR(src), PyArray_DATA(src), src_strides) < 0) { + PyArray_DESCR(src), PyArray_DATA(src), src_strides, flags) < 0){ goto fail; } } @@ -465,7 +481,7 @@ PyArray_AssignArray(PyArrayObject *dst, PyArrayObject *src, PyArray_DESCR(dst), PyArray_DATA(dst), PyArray_STRIDES(dst), PyArray_DESCR(src), PyArray_DATA(src), src_strides, PyArray_DESCR(wheremask), PyArray_DATA(wheremask), - wheremask_strides) < 0) { + wheremask_strides, flags) < 0) { goto fail; } } diff --git a/numpy/_core/src/multiarray/array_assign_scalar.c b/numpy/_core/src/multiarray/array_assign_scalar.c index 6818c1aa2a1b..f7d04ed0a39f 100644 --- a/numpy/_core/src/multiarray/array_assign_scalar.c +++ b/numpy/_core/src/multiarray/array_assign_scalar.c @@ -37,7 +37,7 @@ NPY_NO_EXPORT int raw_array_assign_scalar(int ndim, npy_intp const *shape, PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides, - PyArray_Descr *src_dtype, char *src_data) + PyArray_Descr *src_dtype, char *src_data, NPY_CASTING casting) { int idim; npy_intp shape_it[NPY_MAXDIMS], dst_strides_it[NPY_MAXDIMS]; @@ -86,13 +86,19 @@ raw_array_assign_scalar(int ndim, npy_intp const *shape, NPY_BEGIN_THREADS_THRESHOLDED(nitems); } + if (((int)casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { + cast_info.context.flags |= NPY_SAME_VALUE_CONTEXT_FLAG; + } + npy_intp strides[2] = {0, dst_strides_it[0]}; + int result = 0; NPY_RAW_ITER_START(idim, ndim, coord, shape_it) { /* Process the innermost dimension */ char *args[2] = {src_data, dst_data}; - if (cast_info.func(&cast_info.context, - args, &shape_it[0], strides, cast_info.auxdata) < 0) { + result = cast_info.func(&cast_info.context, + args, &shape_it[0], strides, cast_info.auxdata); + if (result < 0) { goto fail; } } NPY_RAW_ITER_ONE_NEXT(idim, ndim, coord, @@ -126,7 +132,7 @@ raw_array_wheremasked_assign_scalar(int ndim, npy_intp const *shape, PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides, PyArray_Descr *src_dtype, char *src_data, PyArray_Descr *wheremask_dtype, char *wheremask_data, - npy_intp const *wheremask_strides) + npy_intp const *wheremask_strides, NPY_CASTING casting) { int idim; npy_intp shape_it[NPY_MAXDIMS], dst_strides_it[NPY_MAXDIMS]; @@ -177,8 +183,12 @@ raw_array_wheremasked_assign_scalar(int ndim, npy_intp const *shape, } NPY_BEGIN_THREADS_THRESHOLDED(nitems); } + if (((int)casting & NPY_SAME_VALUE_CASTING_FLAG) != 0) { + cast_info.context.flags |= NPY_SAME_VALUE_CONTEXT_FLAG; + } npy_intp strides[2] = {0, dst_strides_it[0]}; + int result = 0; NPY_RAW_ITER_START(idim, ndim, coord, shape_it) { /* Process the innermost dimension */ @@ -186,10 +196,11 @@ raw_array_wheremasked_assign_scalar(int ndim, npy_intp const *shape, stransfer = (PyArray_MaskedStridedUnaryOp *)cast_info.func; char *args[2] = {src_data, dst_data}; - if (stransfer(&cast_info.context, + result = stransfer(&cast_info.context, args, &shape_it[0], strides, (npy_bool *)wheremask_data, wheremask_strides_it[0], - cast_info.auxdata) < 0) { + cast_info.auxdata); + if (result < 0) { goto fail; } } NPY_RAW_ITER_TWO_NEXT(idim, ndim, coord, shape_it, @@ -243,8 +254,7 @@ PyArray_AssignRawScalar(PyArrayObject *dst, } /* Check the casting rule */ - if (!can_cast_scalar_to(src_dtype, src_data, - PyArray_DESCR(dst), casting)) { + if (!PyArray_CanCastTypeTo(src_dtype, PyArray_DESCR(dst), casting)) { npy_set_invalid_cast_error( src_dtype, PyArray_DESCR(dst), casting, NPY_TRUE); return -1; @@ -299,7 +309,7 @@ PyArray_AssignRawScalar(PyArrayObject *dst, /* Do the assignment with raw array iteration */ if (raw_array_assign_scalar(PyArray_NDIM(dst), PyArray_DIMS(dst), PyArray_DESCR(dst), PyArray_DATA(dst), PyArray_STRIDES(dst), - src_dtype, src_data) < 0) { + src_dtype, src_data, casting) < 0) { goto fail; } } @@ -320,7 +330,7 @@ PyArray_AssignRawScalar(PyArrayObject *dst, PyArray_DESCR(dst), PyArray_DATA(dst), PyArray_STRIDES(dst), src_dtype, src_data, PyArray_DESCR(wheremask), PyArray_DATA(wheremask), - wheremask_strides) < 0) { + wheremask_strides, casting) < 0) { goto fail; } } diff --git a/numpy/_core/src/multiarray/array_coercion.c b/numpy/_core/src/multiarray/array_coercion.c index 51aa874bf934..24982a4fdc1e 100644 --- a/numpy/_core/src/multiarray/array_coercion.c +++ b/numpy/_core/src/multiarray/array_coercion.c @@ -6,6 +6,7 @@ #include #include "numpy/npy_3kcompat.h" +#include "npy_pycompat.h" #include "lowlevel_strided_loops.h" #include "numpy/arrayobject.h" @@ -224,36 +225,39 @@ npy_discover_dtype_from_pytype(PyTypeObject *pytype) PyObject *DType; if (pytype == &PyArray_Type) { - DType = Py_None; + DType = Py_NewRef(Py_None); } else if (pytype == &PyFloat_Type) { - DType = (PyObject *)&PyArray_PyFloatDType; + DType = Py_NewRef((PyObject *)&PyArray_PyFloatDType); } else if (pytype == &PyLong_Type) { - DType = (PyObject *)&PyArray_PyLongDType; + DType = Py_NewRef((PyObject *)&PyArray_PyLongDType); } else { - DType = PyDict_GetItem(_global_pytype_to_type_dict, - (PyObject *)pytype); + int res = PyDict_GetItemRef(_global_pytype_to_type_dict, + (PyObject *)pytype, (PyObject **)&DType); - if (DType == NULL) { - /* the python type is not known */ + if (res <= 0) { + /* the python type is not known or an error was set */ return NULL; } } - Py_INCREF(DType); assert(DType == Py_None || PyObject_TypeCheck(DType, (PyTypeObject *)&PyArrayDTypeMeta_Type)); return (PyArray_DTypeMeta *)DType; } /* - * Note: This function never fails, but will return `NULL` for unknown scalars - * and `None` for known array-likes (e.g. tuple, list, ndarray). + * Note: This function never fails, but will return `NULL` for unknown scalars or + * known array-likes (e.g. tuple, list, ndarray). */ NPY_NO_EXPORT PyObject * PyArray_DiscoverDTypeFromScalarType(PyTypeObject *pytype) { - return (PyObject *)npy_discover_dtype_from_pytype(pytype); + PyObject *DType = (PyObject *)npy_discover_dtype_from_pytype(pytype); + if (DType == NULL || DType == Py_None) { + return NULL; + } + return DType; } @@ -475,20 +479,13 @@ npy_cast_raw_scalar_item( NPY_NO_EXPORT int PyArray_Pack(PyArray_Descr *descr, void *item, PyObject *value) { - PyArrayObject_fields arr_fields = { - .flags = NPY_ARRAY_WRITEABLE, /* assume array is not behaved. */ - }; - Py_SET_TYPE(&arr_fields, &PyArray_Type); - Py_SET_REFCNT(&arr_fields, 1); - if (NPY_UNLIKELY(descr->type_num == NPY_OBJECT)) { /* * We always have store objects directly, casting will lose some * type information. Any other dtype discards the type information. * TODO: For a Categorical[object] this path may be necessary? */ - arr_fields.descr = descr; - return PyDataType_GetArrFuncs(descr)->setitem(value, item, &arr_fields); + return NPY_DT_CALL_setitem(descr, value, item); } /* discover_dtype_from_pyobject includes a check for is_known_scalar_type */ @@ -523,8 +520,7 @@ PyArray_Pack(PyArray_Descr *descr, void *item, PyObject *value) if (DType == NPY_DTYPE(descr) || DType == (PyArray_DTypeMeta *)Py_None) { /* We can set the element directly (or at least will try to) */ Py_XDECREF(DType); - arr_fields.descr = descr; - return PyDataType_GetArrFuncs(descr)->setitem(value, item, &arr_fields); + return NPY_DT_CALL_setitem(descr, value, item); } PyArray_Descr *tmp_descr; tmp_descr = NPY_DT_CALL_discover_descr_from_pyobject(DType, value); @@ -542,8 +538,7 @@ PyArray_Pack(PyArray_Descr *descr, void *item, PyObject *value) if (PyDataType_FLAGCHK(tmp_descr, NPY_NEEDS_INIT)) { memset(data, 0, tmp_descr->elsize); } - arr_fields.descr = tmp_descr; - if (PyDataType_GetArrFuncs(tmp_descr)->setitem(value, data, &arr_fields) < 0) { + if (NPY_DT_CALL_setitem(tmp_descr, value, data) < 0) { PyObject_Free(data); Py_DECREF(tmp_descr); return -1; @@ -660,8 +655,8 @@ npy_new_coercion_cache( /** * Unlink coercion cache item. * - * @param current - * @return next coercion cache object (or NULL) + * @param current This coercion cache object + * @return next Next coercion cache object (or NULL) */ NPY_NO_EXPORT coercion_cache_obj * npy_unlink_coercion_cache(coercion_cache_obj *current) @@ -905,7 +900,7 @@ find_descriptor_from_array( * it supports inspecting the elements when the array has object dtype * (and the given datatype describes a parametric DType class). * - * @param arr + * @param arr The array object. * @param dtype NULL or a dtype class * @param descr A dtype instance, if the dtype is NULL the dtype class is * found and e.g. "S0" is converted to denote only String. @@ -1143,8 +1138,8 @@ PyArray_DiscoverDTypeAndShape_Recursive( force_sequence_due_to_char_dtype: - /* Ensure we have a sequence (required for PyPy) */ - seq = PySequence_Fast(obj, "Could not convert object to sequence"); + /* Ensure we have a sequence */ + seq = PySequence_Fast(obj, "Could not convert object to sequence"); // noqa: borrowed-ref - manual fix needed if (seq == NULL) { /* * Specifically do not fail on things that look like a dictionary, @@ -1164,6 +1159,10 @@ PyArray_DiscoverDTypeAndShape_Recursive( return -1; } + int ret = -1; + + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(obj); + npy_intp size = PySequence_Fast_GET_SIZE(seq); PyObject **objects = PySequence_Fast_ITEMS(seq); @@ -1171,21 +1170,23 @@ PyArray_DiscoverDTypeAndShape_Recursive( out_shape, 1, &size, NPY_TRUE, flags) < 0) { /* But do update, if there this is a ragged case */ *flags |= FOUND_RAGGED_ARRAY; - return max_dims; + ret = max_dims; + goto finish; } if (size == 0) { /* If the sequence is empty, this must be the last dimension */ *flags |= MAX_DIMS_WAS_REACHED; - return curr_dims + 1; + ret = curr_dims + 1; + goto finish; } /* Allow keyboard interrupts. See gh issue 18117. */ if (PyErr_CheckSignals() < 0) { - return -1; + goto finish; } /* - * For a sequence we need to make a copy of the final aggreate anyway. + * For a sequence we need to make a copy of the final aggregate anyway. * There's no need to pass explicit `copy=True`, so we switch * to `copy=None` (copy if needed). */ @@ -1201,10 +1202,16 @@ PyArray_DiscoverDTypeAndShape_Recursive( flags, copy); if (max_dims < 0) { - return -1; + goto finish; } } - return max_dims; + ret = max_dims; + + finish:; + + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); + + return ret; } diff --git a/numpy/_core/src/multiarray/array_converter.c b/numpy/_core/src/multiarray/array_converter.c index 5dea748688e9..578e7b1554f4 100644 --- a/numpy/_core/src/multiarray/array_converter.c +++ b/numpy/_core/src/multiarray/array_converter.c @@ -21,7 +21,7 @@ #include "abstractdtypes.h" #include "convert_datatype.h" #include "descriptor.h" -#include "multiarraymodule.h" +#include "npy_static_data.h" #include "ctors.h" #include "npy_config.h" @@ -83,7 +83,7 @@ array_converter_new( } else { item->array = (PyArrayObject *)PyArray_FromAny_int( - item->object, NULL, NULL, 0, 0, 0, NULL, + item->object, NULL, NULL, 0, NPY_MAXDIMS, 0, NULL, &item->scalar_input); if (item->array == NULL) { goto fail; @@ -97,7 +97,7 @@ array_converter_new( Py_INCREF(item->DType); /* - * Check whether we were passed a an int/float/complex Python scalar. + * Check whether we were passed an int/float/complex Python scalar. * If not, set `descr` and clear pyscalar/scalar flags as needed. */ if (item->scalar_input && npy_mark_tmp_array_if_pyscalar( @@ -186,8 +186,8 @@ static int pyscalar_mode_conv(PyObject *obj, scalar_policy *policy) { PyObject *strings[3] = { - npy_ma_str_convert, npy_ma_str_preserve, - npy_ma_str_convert_if_no_array}; + npy_interned_str.convert, npy_interned_str.preserve, + npy_interned_str.convert_if_no_array}; /* First quick pass using the identity (should practically always match) */ for (int i = 0; i < 3; i++) { diff --git a/numpy/_core/src/multiarray/array_method.c b/numpy/_core/src/multiarray/array_method.c index ac8a73aea005..c7280435d3c3 100644 --- a/numpy/_core/src/multiarray/array_method.c +++ b/numpy/_core/src/multiarray/array_method.c @@ -4,7 +4,7 @@ * pointers to do fast operations on the given input functions. * It thus adds an abstraction layer around individual ufunc loops. * - * Unlike methods, a ArrayMethod can have multiple inputs and outputs. + * Unlike methods, an ArrayMethod can have multiple inputs and outputs. * This has some serious implication for garbage collection, and as far * as I (@seberg) understands, it is not possible to always guarantee correct * cyclic garbage collection of dynamically created DTypes with methods. @@ -30,6 +30,7 @@ #define _UMATHMODULE #define _MULTIARRAYMODULE +#include #include #include "arrayobject.h" #include "array_coercion.h" @@ -38,6 +39,7 @@ #include "convert_datatype.h" #include "common.h" #include "numpy/ufuncobject.h" +#include "dtype_transfer.h" /* @@ -122,15 +124,16 @@ is_contiguous( * true, i.e., for cast safety "no-cast". It will not recognize view as an * option for other casts (e.g., viewing '>i8' as '>i4' with an offset of 4). * - * @param context - * @param aligned - * @param move_references UNUSED. - * @param strides - * @param descriptors - * @param out_loop - * @param out_transferdata - * @param flags - * @return 0 on success -1 on failure. + * @param context The arraymethod context + * @param aligned Flag indicating data is aligned (1) or not (0) + * param move_references UNUSED -- listed below but doxygen doesn't see as a parameter + * @param strides Array of step sizes for each dimension of the arrays involved + * @param out_loop Output pointer to the function that will perform the strided loop. + * @param out_transferdata Output pointer to auxiliary data (if any) + * needed by the out_loop function. + * @param flags Output pointer to additional flags (if any) + * needed by the out_loop function + * @returns 0 on success -1 on failure. */ NPY_NO_EXPORT int npy_default_get_strided_loop( @@ -168,7 +171,7 @@ npy_default_get_strided_loop( /** * Validate that the input is usable to create a new ArrayMethod. * - * @param spec + * @param spec Array method specification to be validated * @return 0 on success -1 on error. */ static int @@ -182,12 +185,17 @@ validate_spec(PyArrayMethod_Spec *spec) "not exceed %d. (method: %s)", NPY_MAXARGS, spec->name); return -1; } - switch (spec->casting) { + switch ((int)spec->casting) { case NPY_NO_CASTING: case NPY_EQUIV_CASTING: case NPY_SAFE_CASTING: case NPY_SAME_KIND_CASTING: case NPY_UNSAFE_CASTING: + case NPY_NO_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + case NPY_EQUIV_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + case NPY_SAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + case NPY_SAME_KIND_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + case NPY_UNSAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG: break; default: if (spec->casting != -1) { @@ -666,10 +674,11 @@ boundarraymethod__resolve_descripors( if (!parametric) { /* * Non-parametric can only mismatch if it switches from equiv to no - * (e.g. due to byteorder changes). + * (e.g. due to byteorder changes). Throw away same_value casting flag */ + int method_casting = self->method->casting & ~NPY_SAME_VALUE_CASTING_FLAG; if (cast != self->method->casting && - self->method->casting != NPY_EQUIV_CASTING) { + method_casting != NPY_EQUIV_CASTING) { PyErr_Format(PyExc_RuntimeError, "resolve_descriptors cast level changed even though " "the cast is non-parametric where the only possible " @@ -790,11 +799,10 @@ boundarraymethod__simple_strided_call( return NULL; } - PyArrayMethod_Context context = { - .caller = NULL, - .method = self->method, - .descriptors = descrs, - }; + PyArrayMethod_Context context; + NPY_context_init(&context, descrs); + context.method = self->method; + PyArrayMethod_StridedLoop *strided_loop = NULL; NpyAuxData *loop_data = NULL; NPY_ARRAYMETHOD_FLAGS flags = 0; @@ -982,3 +990,4 @@ NPY_NO_EXPORT PyTypeObject PyBoundArrayMethod_Type = { .tp_methods = boundarraymethod_methods, .tp_getset = boundarraymethods_getters, }; + diff --git a/numpy/_core/src/multiarray/array_method.h b/numpy/_core/src/multiarray/array_method.h index bcf270899f13..303425e38274 100644 --- a/numpy/_core/src/multiarray/array_method.h +++ b/numpy/_core/src/multiarray/array_method.h @@ -69,7 +69,7 @@ typedef struct PyArrayMethodObject_tag { /* - * We will sometimes have to create a ArrayMethod and allow passing it around, + * We will sometimes have to create an ArrayMethod and allow passing it around, * similar to `instance.method` returning a bound method, e.g. a function like * `ufunc.resolve()` can return a bound object. * The current main purpose of the BoundArrayMethod is that it holds on to the diff --git a/numpy/_core/src/multiarray/arrayfunction_override.c b/numpy/_core/src/multiarray/arrayfunction_override.c index 20223e1449fb..72211e2a6d62 100644 --- a/numpy/_core/src/multiarray/arrayfunction_override.c +++ b/numpy/_core/src/multiarray/arrayfunction_override.c @@ -4,24 +4,15 @@ #include #include "structmember.h" +#include "numpy/ndarrayobject.h" #include "numpy/ndarraytypes.h" #include "get_attr_string.h" #include "npy_import.h" +#include "npy_static_data.h" #include "multiarraymodule.h" #include "arrayfunction_override.h" -/* Return the ndarray.__array_function__ method. */ -static PyObject * -get_ndarray_array_function(void) -{ - PyObject* method = PyObject_GetAttrString((PyObject *)&PyArray_Type, - "__array_function__"); - assert(method != NULL); - return method; -} - - /* * Get an object's __array_function__ method in the fastest way possible. * Never raises an exception. Returns NULL if the method doesn't exist. @@ -29,20 +20,15 @@ get_ndarray_array_function(void) static PyObject * get_array_function(PyObject *obj) { - static PyObject *ndarray_array_function = NULL; - - if (ndarray_array_function == NULL) { - ndarray_array_function = get_ndarray_array_function(); - } - /* Fast return for ndarray */ if (PyArray_CheckExact(obj)) { - Py_INCREF(ndarray_array_function); - return ndarray_array_function; + Py_INCREF(npy_static_pydata.ndarray_array_function); + return npy_static_pydata.ndarray_array_function; } - PyObject *array_function = PyArray_LookupSpecial(obj, npy_ma_str_array_function); - if (array_function == NULL && PyErr_Occurred()) { + PyObject *array_function; + if (PyArray_LookupSpecial( + obj, npy_interned_str.array_function, &array_function) < 0) { PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */ } @@ -142,12 +128,7 @@ get_implementing_args_and_methods(PyObject *relevant_args, static int is_default_array_function(PyObject *obj) { - static PyObject *ndarray_array_function = NULL; - - if (ndarray_array_function == NULL) { - ndarray_array_function = get_ndarray_array_function(); - } - return obj == ndarray_array_function; + return obj == npy_static_pydata.ndarray_array_function; } @@ -174,11 +155,22 @@ array_function_method_impl(PyObject *func, PyObject *types, PyObject *args, return Py_NotImplemented; } } - - PyObject *implementation = PyObject_GetAttr(func, npy_ma_str_implementation); - if (implementation == NULL) { + /* + * Python functions are wrapped, and we should now call their + * implementation, so that we do not dispatch a second time + * on possible subclasses. + * C functions that can be overridden with "like" are not wrapped and + * thus do not have an _implementation attribute, but since the like + * keyword has been removed, we can safely call those directly. + */ + PyObject *implementation; + if (PyObject_GetOptionalAttr( + func, npy_interned_str.implementation, &implementation) < 0) { return NULL; } + else if (implementation == NULL) { + return PyObject_Call(func, args, kwargs); + } PyObject *result = PyObject_Call(implementation, args, kwargs); Py_DECREF(implementation); return result; @@ -252,14 +244,14 @@ get_args_and_kwargs( static void set_no_matching_types_error(PyObject *public_api, PyObject *types) { - static PyObject *errmsg_formatter = NULL; /* No acceptable override found, raise TypeError. */ - npy_cache_import("numpy._core._internal", - "array_function_errmsg_formatter", - &errmsg_formatter); - if (errmsg_formatter != NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", + "array_function_errmsg_formatter", + &npy_runtime_imports.array_function_errmsg_formatter) == 0) { PyObject *errmsg = PyObject_CallFunctionObjArgs( - errmsg_formatter, public_api, types, NULL); + npy_runtime_imports.array_function_errmsg_formatter, + public_api, types, NULL); if (errmsg != NULL) { PyErr_SetObject(PyExc_TypeError, errmsg); Py_DECREF(errmsg); @@ -321,12 +313,12 @@ array_implement_c_array_function_creation( } /* The like argument must be present in the keyword arguments, remove it */ - if (PyDict_DelItem(kwargs, npy_ma_str_like) < 0) { + if (PyDict_DelItem(kwargs, npy_interned_str.like) < 0) { goto finish; } /* Fetch the actual symbol (the long way right now) */ - numpy_module = PyImport_Import(npy_ma_str_numpy); + numpy_module = PyImport_Import(npy_interned_str.numpy); if (numpy_module == NULL) { goto finish; } @@ -379,7 +371,7 @@ array__get_implementing_args( return NULL; } - relevant_args = PySequence_Fast( + relevant_args = PySequence_Fast( // noqa: borrowed-ref OK relevant_args, "dispatcher for __array_function__ did not return an iterable"); if (relevant_args == NULL) { @@ -526,7 +518,7 @@ dispatcher_vectorcall(PyArray_ArrayFunctionDispatcherObject *self, fix_name_if_typeerror(self); return NULL; } - Py_SETREF(relevant_args, PySequence_Fast(relevant_args, + Py_SETREF(relevant_args, PySequence_Fast(relevant_args, // noqa: borrowed-ref OK "dispatcher for __array_function__ did not return an iterable")); if (relevant_args == NULL) { return NULL; diff --git a/numpy/_core/src/multiarray/arrayobject.c b/numpy/_core/src/multiarray/arrayobject.c index 5139bc8b4f00..460b007d7a7d 100644 --- a/numpy/_core/src/multiarray/arrayobject.c +++ b/numpy/_core/src/multiarray/arrayobject.c @@ -62,9 +62,7 @@ maintainer email: oliphant.travis@ieee.org #include "binop_override.h" #include "array_coercion.h" - - -NPY_NO_EXPORT npy_bool numpy_warn_if_no_mem_policy = 0; +#include "multiarraymodule.h" /*NUMPY_API Compute the size of an array (in number of items) @@ -251,7 +249,7 @@ PyArray_CopyObject(PyArrayObject *dest, PyObject *src_object) */ ndim = PyArray_DiscoverDTypeAndShape(src_object, PyArray_NDIM(dest), dims, &cache, - NPY_DTYPE(PyArray_DESCR(dest)), PyArray_DESCR(dest), &dtype, 1, NULL); + NPY_DTYPE(PyArray_DESCR(dest)), PyArray_DESCR(dest), &dtype, -1, NULL); if (ndim < 0) { return -1; } @@ -360,80 +358,87 @@ PyArray_ResolveWritebackIfCopy(PyArrayObject * self) /*********************** end C-API functions **********************/ -/* dealloc must not raise an error, best effort try to write - to stderr and clear the error -*/ - -static inline void -WARN_IN_DEALLOC(PyObject* warning, const char * msg) { - if (PyErr_WarnEx(warning, msg, 1) < 0) { - PyObject * s; - - s = PyUnicode_FromString("array_dealloc"); - if (s) { - PyErr_WriteUnraisable(s); - Py_DECREF(s); - } - else { - PyErr_WriteUnraisable(Py_None); - } +/* + * During dealloc we cannot propagate errors so if unraisable is set + * we simply print out the error message and convert the error into + * success (returning 0). + */ +static inline int +write_and_clear_error_if_unraisable(int status, npy_bool unraisable) +{ + if (status < 0 && unraisable) { + PyErr_WriteUnraisable(npy_interned_str.array_dealloc); + return 0; } + return status; } /* array object functions */ -static void -array_dealloc(PyArrayObject *self) +/* + * Much of the actual work for dealloc, split off for use in __setstate__ + * via clear_array_attributes function defined below. + * If not unraisable, will return -1 on error, 0 on success. + * If unraisable, always succeeds, though may print errors and warnings. + */ +static int +_clear_array_attributes(PyArrayObject *self, npy_bool unraisable) { PyArrayObject_fields *fa = (PyArrayObject_fields *)self; if (_buffer_info_free(fa->_buffer_info, (PyObject *)self) < 0) { - PyErr_WriteUnraisable(NULL); + if (write_and_clear_error_if_unraisable(-1, unraisable) < 0) { + return -1; + } } + fa->_buffer_info = NULL; - if (fa->weakreflist != NULL) { - PyObject_ClearWeakRefs((PyObject *)self); - } if (fa->base) { - int retval; if (PyArray_FLAGS(self) & NPY_ARRAY_WRITEBACKIFCOPY) { - char const * msg = "WRITEBACKIFCOPY detected in array_dealloc. " + char const * msg = "WRITEBACKIFCOPY detected in clearing of array. " " Required call to PyArray_ResolveWritebackIfCopy or " "PyArray_DiscardWritebackIfCopy is missing."; + int retval = PyErr_WarnEx(PyExc_RuntimeWarning, msg, 1); + if (write_and_clear_error_if_unraisable(retval, unraisable) < 0) { + return -1; + } /* * prevent reaching 0 twice and thus recursing into dealloc. * Increasing sys.gettotalrefcount, but path should not be taken. */ Py_INCREF(self); - WARN_IN_DEALLOC(PyExc_RuntimeWarning, msg); retval = PyArray_ResolveWritebackIfCopy(self); - if (retval < 0) - { - PyErr_Print(); - PyErr_Clear(); + if (write_and_clear_error_if_unraisable(retval, unraisable) < 0) { + return -1; } } /* * If fa->base is non-NULL, it is something * to DECREF -- either a view or a buffer object */ - Py_XDECREF(fa->base); + Py_CLEAR(fa->base); } if ((fa->flags & NPY_ARRAY_OWNDATA) && fa->data) { /* Free any internal references */ if (PyDataType_REFCHK(fa->descr)) { if (PyArray_ClearArray(self) < 0) { - PyErr_WriteUnraisable(NULL); + if (write_and_clear_error_if_unraisable(-1, unraisable) < 0) { + return -1; + } } } + /* mem_handler can be absent if NPY_ARRAY_OWNDATA arbitrarily set */ if (fa->mem_handler == NULL) { - if (numpy_warn_if_no_mem_policy) { + if (npy_global_state.warn_if_no_mem_policy) { char const *msg = "Trying to dealloc data, but a memory policy " "is not set. If you take ownership of the data, you must " "set a base owning the data (e.g. a PyCapsule)."; - WARN_IN_DEALLOC(PyExc_RuntimeWarning, msg); + int retval = PyErr_WarnEx(PyExc_RuntimeWarning, msg, 1); + if (write_and_clear_error_if_unraisable(retval, unraisable) < 0) { + return -1; + } } // Guess at malloc/free ??? free(fa->data); @@ -444,16 +449,40 @@ array_dealloc(PyArrayObject *self) nbytes = 1; } PyDataMem_UserFREE(fa->data, nbytes, fa->mem_handler); - Py_DECREF(fa->mem_handler); + Py_CLEAR(fa->mem_handler); } + fa->data = NULL; } /* must match allocation in PyArray_NewFromDescr */ npy_free_cache_dim(fa->dimensions, 2 * fa->nd); - Py_DECREF(fa->descr); + fa->dimensions = NULL; + Py_CLEAR(fa->descr); + return 0; +} + +static void +array_dealloc(PyArrayObject *self) +{ + // NPY_TRUE flags that errors are unraisable. + int ret = _clear_array_attributes(self, NPY_TRUE); + // silence unused variable warning in release builds + (void)ret; + assert(ret == 0); // should always succeed if unraisable. + // Only done on actual deallocation, nothing allocated by numpy. + if (((PyArrayObject_fields *)self)->weakreflist != NULL) { + PyObject_ClearWeakRefs((PyObject *)self); + } Py_TYPE(self)->tp_free((PyObject *)self); } +NPY_NO_EXPORT int +clear_array_attributes(PyArrayObject *self) +{ + // NPY_FALSE flags that errors can be raised. + return _clear_array_attributes(self, NPY_FALSE); +} + /*NUMPY_API * Prints the raw data of the ndarray in a form useful for debugging * low-level C issues. @@ -927,10 +956,14 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op) */ if (result == NULL && (cmp_op == Py_EQ || cmp_op == Py_NE) - && PyErr_ExceptionMatches(npy_UFuncNoLoopError)) { + && PyErr_ExceptionMatches( + npy_static_pydata._UFuncNoLoopError)) { PyErr_Clear(); PyArrayObject *array_other = (PyArrayObject *)PyArray_FROM_O(other); + if (array_other == NULL) { + return NULL; + } if (PyArray_TYPE(array_other) == NPY_VOID) { /* * Void arrays are currently not handled by ufuncs, so if the other @@ -941,13 +974,17 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op) } if (PyArray_NDIM(self) == 0 && PyArray_NDIM(array_other) == 0) { - /* - * (seberg) not sure that this is best, but we preserve Python - * bool result for "scalar" inputs for now by returning - * `NotImplemented`. - */ + // we have scalar arrays with different types + // we return a numpy bool directly instead of NotImplemented, + // which would mean a fallback to the python default __eq__/__neq__ + // see gh-27271 Py_DECREF(array_other); - Py_RETURN_NOTIMPLEMENTED; + if (cmp_op == Py_EQ) { + return Py_NewRef(PyArrayScalar_False); + } + else { + return Py_NewRef(PyArrayScalar_True); + } } /* Hack warning: using NpyIter to allocate broadcasted result. */ @@ -1229,7 +1266,7 @@ NPY_NO_EXPORT PyTypeObject PyArray_Type = { .tp_as_mapping = &array_as_mapping, .tp_str = (reprfunc)array_str, .tp_as_buffer = &array_as_buffer, - .tp_flags =(Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE), + .tp_flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_SEQUENCE), .tp_richcompare = (richcmpfunc)array_richcompare, .tp_weaklistoffset = offsetof(PyArrayObject_fields, weakreflist), diff --git a/numpy/_core/src/multiarray/arrayobject.h b/numpy/_core/src/multiarray/arrayobject.h index 03e59c41ca92..d0f95dc228c7 100644 --- a/numpy/_core/src/multiarray/arrayobject.h +++ b/numpy/_core/src/multiarray/arrayobject.h @@ -9,8 +9,6 @@ extern "C" { #endif -extern NPY_NO_EXPORT npy_bool numpy_warn_if_no_mem_policy; - NPY_NO_EXPORT PyObject * _strings_richcompare(PyArrayObject *self, PyArrayObject *other, int cmp_op, int rstrip); @@ -21,6 +19,13 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op); NPY_NO_EXPORT int array_might_be_written(PyArrayObject *obj); +/* + * For use in __setstate__, where pickle gives us an instance on which we + * have to replace all the actual data. Returns 0 on success, -1 on error. + */ +NPY_NO_EXPORT int +clear_array_attributes(PyArrayObject *self); + /* * This flag is used to mark arrays which we would like to, in the future, * turn into views. It causes a warning to be issued on the first attempt to diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index 1ecfc6d94cd7..d67bdd046c6d 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -3,6 +3,7 @@ #include #include #include +#include #include #define NPY_NO_DEPRECATED_API NPY_API_VERSION @@ -15,6 +16,7 @@ #include "npy_pycompat.h" #include "numpy/npy_math.h" #include "numpy/halffloat.h" +#include "numpy/dtype_api.h" #include "npy_config.h" #include "npy_sort.h" @@ -42,6 +44,21 @@ #include "arraytypes.h" #include "umathmodule.h" +#include "npy_static_data.h" + +/**begin repeat + * #NAME = BOOL, + * BYTE, UBYTE, SHORT, USHORT, INT, UINT, + * LONG, ULONG, LONGLONG, ULONGLONG, + * HALF, FLOAT, DOUBLE, LONGDOUBLE, + * CFLOAT, CDOUBLE, CLONGDOUBLE, + * DATETIME, TIMEDELTA# + */ +static inline void +@NAME@_copyswap(void *dst, void *src, int swap, void *arr); + +/**end repeat**/ + /* * Define a stack allocated dummy array with only the minimum information set: @@ -95,7 +112,7 @@ MyPyFloat_AsDouble(PyObject *obj) } num = PyNumber_Float(obj); if (num == NULL) { - return NPY_NAN; + return -1; } ret = PyFloat_AS_DOUBLE(num); Py_DECREF(num); @@ -107,6 +124,9 @@ static float MyPyFloat_AsFloat(PyObject *obj) { double d_val = MyPyFloat_AsDouble(obj); + if (error_converting(d_val)) { + return -1; + } float res = (float)d_val; if (NPY_UNLIKELY(npy_isinf(res) && !npy_isinf(d_val))) { if (PyUFunc_GiveFloatingpointErrors("cast", NPY_FPE_OVERFLOW) < 0) { @@ -121,10 +141,13 @@ static npy_half MyPyFloat_AsHalf(PyObject *obj) { double d_val = MyPyFloat_AsDouble(obj); + if (error_converting(d_val)) { + return -1; + } npy_half res = npy_double_to_half(d_val); if (NPY_UNLIKELY(npy_half_isinf(res) && !npy_isinf(d_val))) { if (PyUFunc_GiveFloatingpointErrors("cast", NPY_FPE_OVERFLOW) < 0) { - return npy_double_to_half(-1.); + return -1; // exception return as integer } } return res; @@ -136,10 +159,16 @@ MyPyFloat_FromHalf(npy_half h) return PyFloat_FromDouble(npy_half_to_double(h)); } -/* Handle case of assigning from an array scalar in setitem */ +/* + * Handle case of assigning from an array scalar in setitem. + * NOTE/TODO(seberg): This was important, but is now only used + * for *nested* 0-D arrays which makes it dubious whether it should + * remain used. + * (At the point of writing, I did not want to worry about BC though.) + */ static int -convert_to_scalar_and_retry(PyObject *op, void *ov, void *vap, - int (*setitem)(PyObject *op, void *ov, void *vap)) +convert_to_scalar_and_retry(PyArray_Descr *descr, PyObject *op, char *ov, + int (*setitem)(PyArray_Descr *descr, PyObject *op, char *ov)) { PyObject *temp; @@ -150,7 +179,7 @@ convert_to_scalar_and_retry(PyObject *op, void *ov, void *vap, return -1; } else { - int res = setitem(temp, ov, vap); + int res = setitem(descr, temp, ov); Py_DECREF(temp); return res; } @@ -274,41 +303,10 @@ static int #endif ) { PyArray_Descr *descr = PyArray_DescrFromType(NPY_@TYPE@); - int promotion_state = get_npy_promotion_state(); - if (promotion_state == NPY_USE_LEGACY_PROMOTION || ( - promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN - && !npy_give_promotion_warnings())) { - /* - * This path will be taken both for the "promotion" case such as - * `uint8_arr + 123` as well as the assignment case. - * The "legacy" path should only ever be taken for assignment - * (legacy promotion will prevent overflows by promoting up) - * so a normal deprecation makes sense. - * When weak promotion is active, we use "future" behavior unless - * warnings were explicitly opt-in. - */ - if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, - "NumPy will stop allowing conversion of out-of-bound " - "Python integers to integer arrays. The conversion " - "of %.100R to %S will fail in the future.\n" - "For the old behavior, usually:\n" - " np.array(value).astype(dtype)\n" - "will give the desired result (the cast overflows).", - obj, descr) < 0) { - Py_DECREF(descr); - return -1; - } - Py_DECREF(descr); - return 0; - } - else { - /* Live in the future, outright error: */ - PyErr_Format(PyExc_OverflowError, - "Python integer %R out of bounds for %S", obj, descr); - Py_DECREF(descr); - return -1; - } - assert(0); + PyErr_Format(PyExc_OverflowError, + "Python integer %R out of bounds for %S", obj, descr); + Py_DECREF(descr); + return -1; } return 0; } @@ -354,9 +352,8 @@ static PyObject * } NPY_NO_EXPORT int -@TYPE@_setitem(PyObject *op, void *ov, void *vap) +@TYPE@_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; @type@ temp; /* ensures alignment */ #if @is_int@ @@ -394,28 +391,23 @@ NPY_NO_EXPORT int } else { temp = (@type@)@func2@(op); - } - if (PyErr_Occurred()) { - PyObject *type, *value, *traceback; - PyErr_Fetch(&type, &value, &traceback); - if (PySequence_NoString_Check(op)) { - PyErr_SetString(PyExc_ValueError, - "setting an array element with a sequence."); - npy_PyErr_ChainExceptionsCause(type, value, traceback); - } - else { - PyErr_Restore(type, value, traceback); + if (temp == (@type@)-1 && PyErr_Occurred()) { + PyObject *type, *value, *traceback; + PyErr_Fetch(&type, &value, &traceback); + if (PySequence_NoString_Check(op)) { + PyErr_SetString(PyExc_ValueError, + "setting an array element with a sequence."); + npy_PyErr_ChainExceptionsCause(type, value, traceback); + } + else { + PyErr_Restore(type, value, traceback); + } + return -1; } - return -1; - } - if (ap == NULL || PyArray_ISBEHAVED(ap)) { - assert(npy_is_aligned(ov, NPY_ALIGNOF(@type@))); - *((@type@ *)ov)=temp; - } - else { - PyDataType_GetArrFuncs(PyArray_DESCR(ap))->copyswap(ov, &temp, PyArray_ISBYTESWAPPED(ap), - ap); } + // Support descr == NULL for some scalarmath paths. + @TYPE@_copyswap( + ov, &temp, descr != NULL && PyDataType_ISBYTESWAPPED(descr), NULL); return 0; } @@ -461,19 +453,17 @@ static PyObject * * #suffix = f, , l# */ NPY_NO_EXPORT int -@NAME@_setitem(PyObject *op, void *ov, void *vap) +@NAME@_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; Py_complex oop; @type@ temp; - if (PyArray_IsZeroDim(op)) { - return convert_to_scalar_and_retry(op, ov, vap, @NAME@_setitem); - } - if (PyArray_IsScalar(op, @kind@)){ temp = PyArrayScalar_VAL(op, @kind@); } + else if (PyArray_IsZeroDim(op)) { + return convert_to_scalar_and_retry(descr, op, ov, @NAME@_setitem); + } else { if (op == Py_None) { oop.real = NPY_NAN; @@ -532,10 +522,8 @@ NPY_NO_EXPORT int #endif } - memcpy(ov, &temp, NPY_SIZEOF_@NAME@); - if (ap != NULL && PyArray_ISBYTESWAPPED(ap)) { - byte_swap_vector(ov, 2, sizeof(@ftype@)); - } + @NAME@_copyswap( + ov, &temp, descr != NULL && PyDataType_ISBYTESWAPPED(descr), NULL); return 0; } @@ -571,8 +559,8 @@ string_to_long_double(PyObject*op) errno = 0; temp = NumPyOS_ascii_strtold(s, &end); if (errno == ERANGE) { - if (PyErr_Warn(PyExc_RuntimeWarning, - "overflow encountered in conversion from string") < 0) { + if (PyErr_WarnEx(PyExc_RuntimeWarning, + "overflow encountered in conversion from string", 1) < 0) { Py_XDECREF(b); return 0; } @@ -617,19 +605,17 @@ LONGDOUBLE_getitem(void *ip, void *ap) } NPY_NO_EXPORT int -LONGDOUBLE_setitem(PyObject *op, void *ov, void *vap) +LONGDOUBLE_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; /* ensure alignment */ npy_longdouble temp; - if (PyArray_IsZeroDim(op)) { - return convert_to_scalar_and_retry(op, ov, vap, LONGDOUBLE_setitem); - } - if (PyArray_IsScalar(op, LongDouble)) { temp = PyArrayScalar_VAL(op, LongDouble); } + else if (PyArray_IsZeroDim(op)) { + return convert_to_scalar_and_retry(descr, op, ov, LONGDOUBLE_setitem); + } else { /* In case something funny happened in PyArray_IsScalar */ if (PyErr_Occurred()) { @@ -640,13 +626,9 @@ LONGDOUBLE_setitem(PyObject *op, void *ov, void *vap) if (PyErr_Occurred()) { return -1; } - if (ap == NULL || PyArray_ISBEHAVED(ap)) { - *((npy_longdouble *)ov) = temp; - } - else { - copy_and_swap(ov, &temp, PyArray_ITEMSIZE(ap), 1, 0, - PyArray_ISBYTESWAPPED(ap)); - } + // Support descr == NULL for scalarmath paths + LONGDOUBLE_copyswap( + ov, &temp, descr != NULL && PyDataType_ISBYTESWAPPED(descr), NULL); return 0; } @@ -662,19 +644,40 @@ UNICODE_getitem(void *ip, void *vap) { PyArrayObject *ap = vap; Py_ssize_t size = PyArray_ITEMSIZE(ap); + Py_ssize_t ucs4len = size / sizeof(npy_ucs4); int swap = PyArray_ISBYTESWAPPED(ap); int align = !PyArray_ISALIGNED(ap); + npy_ucs4 const *src = (npy_ucs4 const*)ip; + npy_ucs4 *buf = NULL; - return (PyObject *)PyUnicode_FromUCS4(ip, size, swap, align); + /* swap and align if needed */ + if (swap || align) { + buf = (npy_ucs4 *)malloc(size); + if (buf == NULL) { + PyErr_NoMemory(); + return NULL; + } + memcpy(buf, src, size); + if (swap) { + byte_swap_vector(buf, ucs4len, sizeof(npy_ucs4)); + } + src = buf; + } + + /* trim trailing zeros */ + while (ucs4len > 0 && src[ucs4len - 1] == 0) { + ucs4len--; + } + PyObject *ret = PyUnicode_FromKindAndData(PyUnicode_4BYTE_KIND, src, ucs4len); + free(buf); + return ret; } static int -UNICODE_setitem(PyObject *op, void *ov, void *vap) +UNICODE_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; - if (PyArray_IsZeroDim(op)) { - return convert_to_scalar_and_retry(op, ov, vap, UNICODE_setitem); + return convert_to_scalar_and_retry(descr, op, ov, UNICODE_setitem); } if (PySequence_NoString_Check(op)) { @@ -696,7 +699,7 @@ UNICODE_setitem(PyObject *op, void *ov, void *vap) } /* truncate if needed */ - Py_ssize_t max_len = PyArray_ITEMSIZE(ap) >> 2; + Py_ssize_t max_len = descr->elsize >> 2; Py_ssize_t actual_len = PyUnicode_GetLength(temp); if (actual_len < 0) { Py_DECREF(temp); @@ -713,7 +716,8 @@ UNICODE_setitem(PyObject *op, void *ov, void *vap) Py_ssize_t num_bytes = actual_len * 4; char *buffer; - if (!PyArray_ISALIGNED(ap)) { + int aligned = npy_is_aligned(ov, NPY_ALIGNOF(Py_UCS4)); + if (!aligned) { buffer = PyArray_malloc(num_bytes); if (buffer == NULL) { Py_DECREF(temp); @@ -730,16 +734,16 @@ UNICODE_setitem(PyObject *op, void *ov, void *vap) return -1; } - if (!PyArray_ISALIGNED(ap)) { + if (!aligned) { memcpy(ov, buffer, num_bytes); PyArray_free(buffer); } /* Fill in the rest of the space with 0 */ - if (PyArray_ITEMSIZE(ap) > num_bytes) { - memset((char*)ov + num_bytes, 0, (PyArray_ITEMSIZE(ap) - num_bytes)); + if (descr->elsize > num_bytes) { + memset((char*)ov + num_bytes, 0, (descr->elsize - num_bytes)); } - if (PyArray_ISBYTESWAPPED(ap)) { + if (PyDataType_ISBYTESWAPPED(descr)) { byte_swap_vector(ov, actual_len, 4); } Py_DECREF(temp); @@ -767,15 +771,14 @@ STRING_getitem(void *ip, void *vap) } static int -STRING_setitem(PyObject *op, void *ov, void *vap) +STRING_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; char *ptr; Py_ssize_t len; PyObject *temp = NULL; if (PyArray_IsZeroDim(op)) { - return convert_to_scalar_and_retry(op, ov, vap, STRING_setitem); + return convert_to_scalar_and_retry(descr, op, ov, STRING_setitem); } if (PySequence_NoString_Check(op)) { @@ -813,13 +816,13 @@ STRING_setitem(PyObject *op, void *ov, void *vap) Py_DECREF(temp); return -1; } - memcpy(ov, ptr, PyArray_MIN(PyArray_ITEMSIZE(ap),len)); + memcpy(ov, ptr, PyArray_MIN(descr->elsize, len)); /* * If string length is smaller than room in array * Then fill the rest of the element size with NULL */ - if (PyArray_ITEMSIZE(ap) > len) { - memset((char *)ov + len, 0, (PyArray_ITEMSIZE(ap) - len)); + if (descr->elsize > len) { + memset((char *)ov + len, 0, (descr->elsize - len)); } Py_DECREF(temp); return 0; @@ -846,7 +849,7 @@ OBJECT_getitem(void *ip, void *NPY_UNUSED(ap)) static int -OBJECT_setitem(PyObject *op, void *ov, void *NPY_UNUSED(ap)) +OBJECT_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { PyObject *obj; @@ -872,11 +875,9 @@ VOID_getitem(void *input, void *vap) _PyArray_LegacyDescr *descr = (_PyArray_LegacyDescr *)PyArray_DESCR(vap); if (PyDataType_HASFIELDS(descr)) { - PyObject *key; PyObject *names; int i, n; PyObject *ret; - PyObject *tup; PyArrayObject_fields dummy_fields = get_dummy_stack_array(ap); PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; @@ -887,9 +888,7 @@ VOID_getitem(void *input, void *vap) for (i = 0; i < n; i++) { npy_intp offset; PyArray_Descr *new; - key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(descr->fields, key); - if (_unpack_field(tup, &new, &offset) < 0) { + if (_unpack_field_index(descr, i, &new, &offset) < 0) { Py_DECREF(ret); return NULL; } @@ -974,14 +973,10 @@ NPY_NO_EXPORT int _setup_field(int i, _PyArray_LegacyDescr *descr, PyArrayObject *arr, npy_intp *offset_p, char *dstdata) { - PyObject *key; - PyObject *tup; PyArray_Descr *new; npy_intp offset; - key = PyTuple_GET_ITEM(descr->names, i); - tup = PyDict_GetItem(descr->fields, key); - if (_unpack_field(tup, &new, &offset) < 0) { + if (_unpack_field_index(descr, i, &new, &offset) < 0) { return -1; } @@ -1034,18 +1029,17 @@ _copy_and_return_void_setitem(_PyArray_LegacyDescr *dstdescr, char *dstdata, } static int -VOID_setitem(PyObject *op, void *input, void *vap) +VOID_setitem(PyArray_Descr *descr_, PyObject *op, char *ip) { - char *ip = input; - PyArrayObject *ap = vap; - int itemsize = PyArray_ITEMSIZE(ap); + _PyArray_LegacyDescr *descr = (_PyArray_LegacyDescr *)descr_; + int itemsize = descr->elsize; int res; - _PyArray_LegacyDescr *descr = (_PyArray_LegacyDescr *)PyArray_DESCR(ap); if (PyDataType_HASFIELDS(descr)) { PyObject *errmsg; npy_int i; npy_intp offset; + PyArray_Descr *field_descr; int failed = 0; /* If op is 0d-ndarray or numpy scalar, directly get dtype & data ptr */ @@ -1078,23 +1072,18 @@ VOID_setitem(PyObject *op, void *input, void *vap) return -1; } - PyArrayObject_fields dummy_fields = get_dummy_stack_array(ap); - PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; - for (i = 0; i < names_size; i++) { - PyObject *item; - - if (_setup_field(i, descr, dummy_arr, &offset, ip) == -1) { + PyObject *item = PyTuple_GetItem(op, i); + if (item == NULL) { failed = 1; break; } - item = PyTuple_GetItem(op, i); - if (item == NULL) { + if (_unpack_field_index(descr, i, &field_descr, &offset) < 0) { failed = 1; break; } /* use setitem to set this field */ - if (PyArray_SETITEM(dummy_arr, ip + offset, item) < 0) { + if (NPY_DT_CALL_setitem(field_descr, item, ip + offset) < 0) { failed = 1; break; } @@ -1104,17 +1093,13 @@ VOID_setitem(PyObject *op, void *input, void *vap) /* Otherwise must be non-void scalar. Try to assign to each field */ npy_intp names_size = PyTuple_GET_SIZE(descr->names); - PyArrayObject_fields dummy_fields = get_dummy_stack_array(ap); - PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; - for (i = 0; i < names_size; i++) { - /* temporarily make ap have only this field */ - if (_setup_field(i, descr, dummy_arr, &offset, ip) == -1) { + if (_unpack_field_index(descr, i, &field_descr, &offset) < 0) { failed = 1; break; } /* use setitem to set this field */ - if (PyArray_SETITEM(dummy_arr, ip + offset, op) < 0) { + if (NPY_DT_CALL_setitem(field_descr, op, ip + offset) < 0) { failed = 1; break; } @@ -1144,7 +1129,7 @@ VOID_setitem(PyObject *op, void *input, void *vap) PyArrayObject *ret = (PyArrayObject *)PyArray_NewFromDescrAndBase( &PyArray_Type, descr->subarray->base, shape.len, shape.ptr, NULL, ip, - PyArray_FLAGS(ap), NULL, NULL); + NPY_ARRAY_WRITEABLE, NULL, NULL); npy_free_cache_dim_obj(shape); if (!ret) { return -1; @@ -1222,15 +1207,14 @@ TIMEDELTA_getitem(void *ip, void *vap) } static int -DATETIME_setitem(PyObject *op, void *ov, void *vap) +DATETIME_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; /* ensure alignment */ npy_datetime temp = 0; PyArray_DatetimeMetaData *meta = NULL; /* Get the datetime units metadata */ - meta = get_datetime_metadata_from_dtype(PyArray_DESCR(ap)); + meta = get_datetime_metadata_from_dtype(descr); if (meta == NULL) { return -1; } @@ -1242,27 +1226,20 @@ DATETIME_setitem(PyObject *op, void *ov, void *vap) } /* Copy the value into the output */ - if (ap == NULL || PyArray_ISBEHAVED(ap)) { - *((npy_datetime *)ov)=temp; - } - else { - PyDataType_GetArrFuncs(PyArray_DESCR(ap))->copyswap(ov, &temp, PyArray_ISBYTESWAPPED(ap), - ap); - } - + DATETIME_copyswap( + ov, &temp, descr != NULL && PyDataType_ISBYTESWAPPED(descr), NULL); return 0; } static int -TIMEDELTA_setitem(PyObject *op, void *ov, void *vap) +TIMEDELTA_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; /* ensure alignment */ npy_timedelta temp = 0; PyArray_DatetimeMetaData *meta = NULL; /* Get the datetime units metadata */ - meta = get_datetime_metadata_from_dtype(PyArray_DESCR(ap)); + meta = get_datetime_metadata_from_dtype(descr); if (meta == NULL) { return -1; } @@ -1273,19 +1250,39 @@ TIMEDELTA_setitem(PyObject *op, void *ov, void *vap) return -1; } - /* Copy the value into the output */ - if (ap == NULL || PyArray_ISBEHAVED(ap)) { - *((npy_timedelta *)ov)=temp; - } - else { - PyDataType_GetArrFuncs(PyArray_DESCR(ap))->copyswap(ov, &temp, PyArray_ISBYTESWAPPED(ap), - ap); - } - + TIMEDELTA_copyswap( + ov, &temp, descr != NULL && PyDataType_ISBYTESWAPPED(descr), NULL); return 0; } +/**begin repeat + * + * #NAME = BOOL, + * BYTE, UBYTE, SHORT, USHORT, INT, UINT, + * LONG, ULONG, LONGLONG, ULONGLONG, + * HALF, FLOAT, DOUBLE, LONGDOUBLE, + * CFLOAT, CDOUBLE, CLONGDOUBLE, + * OBJECT, STRING, UNICODE, VOID, + * DATETIME, TIMEDELTA# + */ + +/* + * Legacy fallback setitem, should be deprecated, but if anyone calls + * our setitem *without* an array (or stealing it for their dtype) + * they might need it. E.g. a NumPy 3 should probably just dump it all, though. + */ +static int +@NAME@_legacy_setitem(PyObject *value, void *data, void *vap) +{ + // Most builtins allow descr to be NULL traditionally, so assume it's OK + PyArray_Descr *descr = vap == NULL ? NULL : PyArray_DESCR((PyArrayObject *)vap); + return @NAME@_setitem(descr, value, data); +} + +/**end repeat**/ + + /* ***************************************************************************** ** TYPE TO TYPE CONVERSIONS ** @@ -1358,7 +1355,7 @@ static void while (n--) { @type1@ t = (@type1@)*ip++; - *op++ = t; + *op++ = (@type2@)t; #if @steps@ == 2 /* complex type */ *op++ = 0; #endif @@ -1541,7 +1538,7 @@ static void if (temp == NULL) { return; } - if (@to@_setitem(temp, op, aop)) { + if (@to@_setitem(PyArray_DESCR(aop), temp, (char *)op)) { Py_DECREF(temp); return; } @@ -1589,7 +1586,7 @@ static void Py_INCREF(Py_False); temp = Py_False; } - if (@to@_setitem(temp, op, aop)) { + if (@to@_setitem(PyArray_DESCR(aop), temp, (char *)op)) { Py_DECREF(temp); return; } @@ -1954,7 +1951,7 @@ _basic_copy(void *dst, void *src, int elsize) { * npy_half, npy_float, npy_double, npy_longdouble, * npy_datetime, npy_timedelta# */ -static void +static inline void @fname@_copyswapn (void *dst, npy_intp dstride, void *src, npy_intp sstride, npy_intp n, int swap, void *NPY_UNUSED(arr)) { @@ -1965,7 +1962,7 @@ static void } } -static void +static inline void @fname@_copyswap (void *dst, void *src, int swap, void *NPY_UNUSED(arr)) { /* copy first if needed */ @@ -2047,7 +2044,7 @@ static void /* ignore swap */ } -static void +static inline void @fname@_copyswap (void *dst, void *src, int NPY_UNUSED(swap), void *NPY_UNUSED(arr)) { @@ -2080,7 +2077,7 @@ static void } } -static void +static inline void @fname@_copyswap (void *dst, void *src, int swap, void *NPY_UNUSED(arr)) { /* copy first if needed */ @@ -2225,7 +2222,7 @@ OBJECT_copyswapn(PyObject **dst, npy_intp dstride, PyObject **src, return; } -static void +static inline void OBJECT_copyswap(PyObject **dst, PyObject **src, int NPY_UNUSED(swap), void *NPY_UNUSED(arr)) { @@ -2281,7 +2278,7 @@ VOID_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride, PyArrayObject_fields dummy_fields = get_dummy_stack_array(arr); PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; - while (PyDict_Next(descr->fields, &pos, &key, &value)) { + while (PyDict_Next(descr->fields, &pos, &key, &value)) { // noqa: borrowed-ref OK npy_intp offset; PyArray_Descr *new; if (NPY_TITLE_KEY(key, value)) { @@ -2366,7 +2363,7 @@ VOID_copyswap (char *dst, char *src, int swap, PyArrayObject *arr) PyArrayObject_fields dummy_fields = get_dummy_stack_array(arr); PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; - while (PyDict_Next(descr->fields, &pos, &key, &value)) { + while (PyDict_Next(descr->fields, &pos, &key, &value)) { // noqa: borrowed-ref OK npy_intp offset; PyArray_Descr * new; @@ -2456,7 +2453,7 @@ UNICODE_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride, } -static void +static inline void STRING_copyswap(char *dst, char *src, int NPY_UNUSED(swap), PyArrayObject *arr) { assert(arr != NULL); @@ -2467,7 +2464,7 @@ STRING_copyswap(char *dst, char *src, int NPY_UNUSED(swap), PyArrayObject *arr) _basic_copy(dst, src, PyArray_ITEMSIZE(arr)); } -static void +static inline void UNICODE_copyswap (char *dst, char *src, int swap, PyArrayObject *arr) { int itemsize; @@ -2541,6 +2538,42 @@ static npy_bool } /**end repeat**/ +/**begin repeat + * + * #name = BOOL, BYTE, UBYTE, USHORT, SHORT, UINT, INT, ULONG, LONG, FLOAT, DOUBLE# + * #type = npy_bool, npy_byte, npy_byte, npy_uint16, npy_int16, npy_uint32, npy_int32, npy_uint64, npy_int64, npy_float, npy_double# + * #nonzero = _NONZERO*11# + */ +static npy_intp +count_nonzero_trivial_@name@(npy_intp count, const char *data, npy_int stride) +{ + npy_intp nonzero_count = 0; + while (count--) { + @type@ *ptmp = (@type@ *)data; + nonzero_count += (npy_bool) @nonzero@(*ptmp); + data += stride; + } + return nonzero_count; +} +/**end repeat**/ + +NPY_NO_EXPORT npy_intp +count_nonzero_trivial_dispatcher(npy_intp count, const char* data, npy_intp stride, int dtype_num) { + switch(dtype_num) { + /**begin repeat + * + * #dtypeID = NPY_BOOL, NPY_UINT8, NPY_INT8, NPY_UINT16, NPY_INT16, NPY_UINT32, NPY_INT32, NPY_UINT64, NPY_INT64, NPY_FLOAT32, NPY_FLOAT64# + * #name = BOOL, BYTE, UBYTE, USHORT, SHORT, UINT, INT, ULONG, LONG, FLOAT, DOUBLE# + */ + case @dtypeID@: + { + return count_nonzero_trivial_@name@(count, data, stride); + } + /**end repeat**/ + } + return -1; +} + /**begin repeat * * #fname = CFLOAT, CDOUBLE, CLONGDOUBLE# @@ -2650,7 +2683,7 @@ VOID_nonzero (char *ip, PyArrayObject *ap) PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; _PyArray_LegacyDescr *descr = (_PyArray_LegacyDescr *)PyArray_DESCR(ap); - while (PyDict_Next(descr->fields, &pos, &key, &value)) { + while (PyDict_Next(descr->fields, &pos, &key, &value)) { // noqa: borrowed-ref OK PyArray_Descr * new; npy_intp offset; if (NPY_TITLE_KEY(key, value)) { @@ -2987,9 +3020,8 @@ UNICODE_compare(npy_ucs4 *ip1, npy_ucs4 *ip2, static int VOID_compare(char *ip1, char *ip2, PyArrayObject *ap) { - PyArray_Descr *descr; - PyObject *names, *key; - PyObject *tup; + _PyArray_LegacyDescr *descr; + PyObject *names; PyArrayObject_fields dummy_struct; PyArrayObject *dummy = (PyArrayObject *)&dummy_struct; char *nip1, *nip2; @@ -3002,18 +3034,16 @@ VOID_compare(char *ip1, char *ip2, PyArrayObject *ap) if (mem_handler == NULL) { goto finish; } - descr = PyArray_DESCR(ap); + descr = (_PyArray_LegacyDescr *)PyArray_DESCR(ap); /* * Compare on the first-field. If equal, then * compare on the second-field, etc. */ - names = PyDataType_NAMES(descr); + names = descr->names; for (i = 0; i < PyTuple_GET_SIZE(names); i++) { PyArray_Descr *new; npy_intp offset; - key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(PyDataType_FIELDS(descr), key); - if (_unpack_field(tup, &new, &offset) < 0) { + if (_unpack_field_index(descr, i, &new, &offset) < 0) { goto finish; } /* Set the fields needed by compare or copyswap */ @@ -3998,7 +4028,7 @@ static PyArray_ArrFuncs _Py@NAME@_ArrFuncs = { @from@_to_VOID }, @from@_getitem, - @from@_setitem, + @from@_legacy_setitem, (PyArray_CopySwapNFunc*)@from@_copyswapn, (PyArray_CopySwapFunc*)@from@_copyswap, (PyArray_CompareFunc*)@from@_compare, @@ -4117,7 +4147,7 @@ static PyArray_ArrFuncs _Py@NAME@_ArrFuncs = { @from@_to_VOID }, @from@_getitem, - @from@_setitem, + @from@_legacy_setitem, (PyArray_CopySwapNFunc*)@from@_copyswapn, (PyArray_CopySwapFunc*)@from@_copyswap, (PyArray_CompareFunc*)@from@_compare, @@ -4185,8 +4215,7 @@ NPY_NO_EXPORT _PyArray_LegacyDescr @from@_Descr = { /* The smallest type number is ?, the largest bounded by 'z'. */ #define _MAX_LETTER ('z' + 1) -static npy_int16 _letter_to_num[_MAX_LETTER - '?']; -#define LETTER_TO_NUM(letter) _letter_to_num[letter - '?'] +#define LETTER_TO_NUM(letter) npy_static_cdata._letter_to_num[letter - '?'] static _PyArray_LegacyDescr *_builtin_descrs[] = { &BOOL_Descr, @@ -4290,10 +4319,202 @@ PyArray_DescrFromType(int type) /* ***************************************************************************** - ** SETUP TYPE INFO ** + ** NEWSTYLE TYPE METHODS ** ***************************************************************************** */ +static int +BOOL_get_constant(PyArray_Descr *descr, int constant_id, void *ptr) +{ + switch (constant_id) { + case NPY_CONSTANT_zero: + case NPY_CONSTANT_minimum_finite: + *(npy_bool *)ptr = NPY_FALSE; + return 1; + case NPY_CONSTANT_one: + case NPY_CONSTANT_maximum_finite: + *(npy_bool *)ptr = NPY_TRUE; + return 1; + default: + return 0; + } +} + +/**begin repeat + * #NAME = BYTE, UBYTE, SHORT, USHORT, INT, UINT, + * LONG, ULONG, LONGLONG, ULONGLONG# + * #type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, + * npy_long, npy_ulong, npy_longlong, npy_ulonglong# + * #IS_UNSIGNED = 0, 1, 0, 1, 0, 1, + * 0, 1, 0, 1# + * #MIN = NPY_MIN_BYTE, 0, NPY_MIN_SHORT, 0, NPY_MIN_INT, 0, + * NPY_MIN_LONG, 0, NPY_MIN_LONGLONG, 0# + */ +static int +@NAME@_get_constant(PyArray_Descr *descr, int constant_id, void *ptr) +{ + @type@ val; + switch (constant_id) { + case NPY_CONSTANT_zero: + val = 0; + break; + case NPY_CONSTANT_one: + val = 1; + break; + case NPY_CONSTANT_minimum_finite: +#if @IS_UNSIGNED@ + val = 0; +#else + val = @MIN@; +#endif + break; + case NPY_CONSTANT_maximum_finite: + val = NPY_MAX_@NAME@; + break; + default: + return 0; + } + @NAME@_copyswap(ptr, &val, !PyArray_ISNBO(descr->byteorder), NULL); + return 1; +} +/**end repeat**/ + +/* +Keeping Half macros consistent with standard C +Refernce: https://en.cppreference.com/w/c/types/limits.html +*/ +#define HALF_MAX 31743 /* Bit pattern for 65504.0 */ +#define HALF_MIN 1024 /* Bit pattern for smallest positive normal: 2^-14 */ +#define HALF_NEG_MAX 64511 /* Bit pattern for -65504.0 */ +#define HALF_EPSILON 5120 +#define HALF_TRUE_MIN 0x0001 /* Bit pattern for smallest positive subnormal: 2^-24 */ +#define HALF_MAX_EXP 16 +#define HALF_MIN_EXP -13 +#define HALF_MANT_DIG 11 /* 10 + 1 (implicit) */ +#define HALF_DIG 3 + +/* + * On PPC64 systems with IBM double-double format pair of IEEE binary64 + * values (not a true IEEE quad). We derived the values based on the Interval machine epsilon definition of epsilon, + * difference between 1.0 and the next representable floating-point number larger than 1.0 + * ~106 bits of mantissa precision (53+53) gives epsilon of 2^-105, but glibc returns 2^-1074 (DBL_TRUE_MIN). + */ +#if defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE) || \ + defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE) + #undef LDBL_EPSILON + #define LDBL_EPSILON 0x1p-105L /* 2^-105 */ +#endif +/* + * Define *_TRUE_MIN macros for smallest subnormal values if not available. + * Use nextafter(0, 1) to get the smallest positive representable value. + */ +#ifndef FLT_TRUE_MIN + #define FLT_TRUE_MIN npy_nextafterf(0.0f, 1.0f) +#endif +#ifndef DBL_TRUE_MIN + #define DBL_TRUE_MIN npy_nextafter(0.0, 1.0) +#endif +#ifndef LDBL_TRUE_MIN + #define LDBL_TRUE_MIN npy_nextafterl(0.0L, 1.0L) +#endif + +/**begin repeat + * #NAME = HALF,FLOAT, DOUBLE, LONGDOUBLE# + * #ABB = HALF, FLT, DBL, LDBL# + * #type = npy_half, npy_float, npy_double, npy_longdouble# + * #RADIX = 16384, 2, 2, 2# + * #NEG_MAX = HALF_NEG_MAX, -FLT_MAX, -DBL_MAX, -LDBL_MAX# + */ +static int +@NAME@_get_constant(PyArray_Descr *descr, int constant_id, void *ptr) +{ + @type@ val; + switch (constant_id) { + case NPY_CONSTANT_zero: + val = 0; + break; + case NPY_CONSTANT_one: + val = 1; + break; + case NPY_CONSTANT_minimum_finite: + val = @NEG_MAX@; + break; + case NPY_CONSTANT_maximum_finite: + #if defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE) || \ + defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE) + /* For IBM double-double, use nextafter(inf, 0) to get the true + * maximum representable value (matches old MachArLike behavior) */ + if (sizeof(@type@) == sizeof(npy_longdouble)) { + val = npy_nextafterl((@type@)NPY_INFINITY, (@type@)0.0L); + break; + } + #endif + val = @ABB@_MAX; + break; + case NPY_CONSTANT_inf: + val = (@type@)NPY_INFINITYF; + break; + case NPY_CONSTANT_nan: + val = (@type@)NPY_NANF; + break; + case NPY_CONSTANT_finfo_radix: + val = @RADIX@; + break; + case NPY_CONSTANT_finfo_eps: + val = @ABB@_EPSILON; + break; + case NPY_CONSTANT_finfo_smallest_normal: + val = @ABB@_MIN; + break; + case NPY_CONSTANT_finfo_smallest_subnormal: + val = @ABB@_TRUE_MIN; + break; + case NPY_CONSTANT_finfo_nmant: + *(npy_intp *)ptr = @ABB@_MANT_DIG - 1; + return 1; + case NPY_CONSTANT_finfo_min_exp: + /* + Definition: Minimum negative integer such that FLT_RADIX raised by power one less than that integer is a normalized float, double and long double respectively + + refernce: https://en.cppreference.com/w/c/types/limits.html + */ + *(npy_intp *)ptr = @ABB@_MIN_EXP - 1; + return 1; + case NPY_CONSTANT_finfo_max_exp: + *(npy_intp *)ptr = @ABB@_MAX_EXP; + return 1; + case NPY_CONSTANT_finfo_decimal_digits: + *(npy_intp *)ptr = @ABB@_DIG; + return 1; + default: + return 0; + } + @NAME@_copyswap(ptr, &val, !PyArray_ISNBO(descr->byteorder), NULL); + return 1; +} +/**end repeat**/ + + +/**begin repeat + * #NAME = CFLOAT, CDOUBLE, CLONGDOUBLE, + * OBJECT, STRING, UNICODE, VOID, + * DATETIME, TIMEDELTA# + */ +static int +@NAME@_get_constant(PyArray_Descr *descr, int constant_id, void *ptr) +{ + // TODO: We currently don't use this, but we could quickly for + // reduction identity/initial value so should implement these. + return 0; +} +/**end repeat**/ + + +/* + ***************************************************************************** + ** SETUP TYPE INFO ** + ***************************************************************************** + */ /* * This function is called during numpy module initialization, @@ -4303,15 +4524,14 @@ NPY_NO_EXPORT int set_typeinfo(PyObject *dict) { PyObject *infodict = NULL; + PyArray_DTypeMeta *dtypemeta; // borrowed int i; _PyArray_LegacyDescr *dtype; PyObject *cobj, *key; // SIMD runtime dispatching - #ifndef NPY_DISABLE_OPTIMIZATION - #include "argfunc.dispatch.h" - #endif + #include "argfunc.dispatch.h" /**begin repeat * #FROM = BYTE, UBYTE, SHORT, USHORT, INT, UINT, * LONG, ULONG, LONGLONG, ULONGLONG, @@ -4359,7 +4579,7 @@ set_typeinfo(PyObject *dict) * PyArray_ComplexAbstractDType*3, * PyArrayDescr_Type*6 # */ - if (dtypemeta_wrap_legacy_descriptor( + dtypemeta = dtypemeta_wrap_legacy_descriptor( _builtin_descrs[NPY_@NAME@], &_Py@Name@_ArrFuncs, (PyTypeObject *)&@scls@, @@ -4369,9 +4589,12 @@ set_typeinfo(PyObject *dict) #else NULL #endif - ) < 0) { + ); + if (dtypemeta == NULL) { return -1; } + NPY_DT_SLOTS(dtypemeta)->setitem = @NAME@_setitem; + NPY_DT_SLOTS(dtypemeta)->get_constant = @NAME@_get_constant; /**end repeat**/ diff --git a/numpy/_core/src/multiarray/arraytypes.h.src b/numpy/_core/src/multiarray/arraytypes.h.src index 35764dc1b253..59dc836a2de5 100644 --- a/numpy/_core/src/multiarray/arraytypes.h.src +++ b/numpy/_core/src/multiarray/arraytypes.h.src @@ -1,6 +1,10 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_ARRAYTYPES_H_ #define NUMPY_CORE_SRC_MULTIARRAY_ARRAYTYPES_H_ +#ifdef __cplusplus +extern "C" { +#endif + #include "common.h" NPY_NO_EXPORT int @@ -40,14 +44,12 @@ small_correlate(const char * d_, npy_intp dstride, */ NPY_NO_EXPORT int -@TYPE@_setitem(PyObject *obj, void *data_ptr, void *arr); +@TYPE@_setitem(PyArray_Descr *descr, PyObject *obj, char *data_ptr); /**end repeat**/ -#ifndef NPY_DISABLE_OPTIMIZATION - #include "argfunc.dispatch.h" -#endif +#include "argfunc.dispatch.h" /**begin repeat * #TYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, * LONG, ULONG, LONGLONG, ULONGLONG, @@ -164,4 +166,11 @@ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT int BOOL_argmax, #undef INT_not_size_named #undef LONGLONG_not_size_named +NPY_NO_EXPORT npy_intp +count_nonzero_trivial_dispatcher(npy_intp count, const char* data, npy_intp stride, int dtype_num); + +#ifdef __cplusplus +} +#endif + #endif /* NUMPY_CORE_SRC_MULTIARRAY_ARRAYTYPES_H_ */ diff --git a/numpy/_core/src/multiarray/arraywrap.c b/numpy/_core/src/multiarray/arraywrap.c index 8b37798f983b..09e46bd4d3e7 100644 --- a/numpy/_core/src/multiarray/arraywrap.c +++ b/numpy/_core/src/multiarray/arraywrap.c @@ -9,11 +9,11 @@ #include "numpy/arrayobject.h" #include "numpy/npy_3kcompat.h" +#include "numpy/npy_math.h" #include "get_attr_string.h" #include "arraywrap.h" -#include "multiarraymodule.h" - +#include "npy_static_data.h" /* * Find the array wrap or array prepare method that applies to the inputs. @@ -33,7 +33,7 @@ npy_find_array_wrap( PyObject *wrap = NULL; PyObject *wrap_type = NULL; - double priority = 0; /* silence uninitialized warning */ + double priority = -NPY_INFINITY; /* * Iterate through all inputs taking the first one with an __array_wrap__ @@ -43,34 +43,32 @@ npy_find_array_wrap( for (int i = 0; i < nin; i++) { PyObject *obj = inputs[i]; if (PyArray_CheckExact(obj)) { - if (wrap == NULL || priority < NPY_PRIORITY) { - Py_INCREF(Py_None); - Py_XSETREF(wrap, Py_None); - priority = 0; + if (priority < NPY_PRIORITY) { + Py_XSETREF(wrap, Py_NewRef(Py_None)); + priority = NPY_PRIORITY; } } else if (PyArray_IsAnyScalar(obj)) { - if (wrap == NULL || priority < NPY_SCALAR_PRIORITY) { - Py_INCREF(Py_None); - Py_XSETREF(wrap, Py_None); + if (priority < NPY_SCALAR_PRIORITY) { + Py_XSETREF(wrap, Py_NewRef(Py_None)); priority = NPY_SCALAR_PRIORITY; } } else { - PyObject *new_wrap = PyArray_LookupSpecial_OnInstance(obj, npy_ma_str_array_wrap); - if (new_wrap == NULL) { - if (PyErr_Occurred()) { - goto fail; - } + PyObject *new_wrap; + if (PyArray_LookupSpecial_OnInstance( + obj, npy_interned_str.array_wrap, &new_wrap) < 0) { + goto fail; + } + else if (new_wrap == NULL) { continue; } - double curr_priority = PyArray_GetPriority(obj, 0); + double curr_priority = PyArray_GetPriority(obj, NPY_PRIORITY); if (wrap == NULL || priority < curr_priority /* Prefer subclasses `__array_wrap__`: */ - || (curr_priority == 0 && wrap == Py_None)) { + || (curr_priority == NPY_PRIORITY && wrap == Py_None)) { Py_XSETREF(wrap, new_wrap); - Py_INCREF(Py_TYPE(obj)); - Py_XSETREF(wrap_type, (PyObject *)Py_TYPE(obj)); + Py_XSETREF(wrap_type, Py_NewRef(Py_TYPE(obj))); priority = curr_priority; } else { @@ -80,12 +78,10 @@ npy_find_array_wrap( } if (wrap == NULL) { - Py_INCREF(Py_None); - wrap = Py_None; + wrap = Py_NewRef(Py_None); } if (wrap_type == NULL) { - Py_INCREF(&PyArray_Type); - wrap_type = (PyObject *)&PyArray_Type; + wrap_type = Py_NewRef(&PyArray_Type); } *out_wrap = wrap; @@ -145,7 +141,7 @@ npy_apply_wrap( /* If provided, we prefer the actual out objects wrap: */ if (original_out != NULL && original_out != Py_None) { - /* + /* * If an original output object was passed, wrapping shouldn't * change it. In particular, it doesn't make sense to convert to * scalar. So replace the passed in wrap and wrap_type. @@ -159,15 +155,14 @@ npy_apply_wrap( } else { /* Replace passed wrap/wrap_type (borrowed refs) with new_wrap/type. */ - new_wrap = PyArray_LookupSpecial_OnInstance( - original_out, npy_ma_str_array_wrap); - if (new_wrap != NULL) { + if (PyArray_LookupSpecial_OnInstance( + original_out, npy_interned_str.array_wrap, &new_wrap) < 0) { + return NULL; + } + else if (new_wrap != NULL) { wrap = new_wrap; wrap_type = (PyObject *)Py_TYPE(original_out); } - else if (PyErr_Occurred()) { - return NULL; - } } } /* @@ -186,7 +181,7 @@ npy_apply_wrap( Py_XDECREF(new_wrap); Py_INCREF(obj); if (return_scalar) { - /* + /* * Use PyArray_Return to convert to scalar when necessary * (PyArray_Return actually checks for non-arrays). */ @@ -267,7 +262,7 @@ npy_apply_wrap( } } - /* + /* * Retry without passing context and return_scalar parameters. * If that succeeds, we give a DeprecationWarning. */ diff --git a/numpy/_core/src/multiarray/buffer.c b/numpy/_core/src/multiarray/buffer.c index f83e7b918e4e..908553462bfe 100644 --- a/numpy/_core/src/multiarray/buffer.c +++ b/numpy/_core/src/multiarray/buffer.c @@ -26,7 +26,7 @@ /************************************************************************* * PEP 3118 buffer protocol * - * Implementing PEP 3118 is somewhat convoluted because of the desirata: + * Implementing PEP 3118 is somewhat convoluted because of the requirements: * * - Don't add new members to ndarray or descr structs, to preserve binary * compatibility. (Also, adding the items is actually not very useful, @@ -268,7 +268,7 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str, int ret; name = PyTuple_GET_ITEM(ldescr->names, k); - item = PyDict_GetItem(ldescr->fields, name); + item = PyDict_GetItem(ldescr->fields, name); // noqa: borrowed-ref OK child = (PyArray_Descr*)PyTuple_GetItem(item, 0); offset_obj = PyTuple_GetItem(item, 1); @@ -793,8 +793,10 @@ array_getbuffer(PyObject *obj, Py_buffer *view, int flags) } /* Fill in information (and add it to _buffer_info if necessary) */ + Py_BEGIN_CRITICAL_SECTION(self); info = _buffer_get_info( &((PyArrayObject_fields *)self)->_buffer_info, obj, flags); + Py_END_CRITICAL_SECTION(); if (info == NULL) { goto fail; } @@ -880,7 +882,10 @@ void_getbuffer(PyObject *self, Py_buffer *view, int flags) * to find the correct format. This format must also be stored, since * at least in theory it can change (in practice it should never change). */ - _buffer_info_t *info = _buffer_get_info(&scalar->_buffer_info, self, flags); + _buffer_info_t *info = NULL; + Py_BEGIN_CRITICAL_SECTION(scalar); + info = _buffer_get_info(&scalar->_buffer_info, self, flags); + Py_END_CRITICAL_SECTION(); if (info == NULL) { Py_DECREF(self); return -1; diff --git a/numpy/_core/src/multiarray/calculation.c b/numpy/_core/src/multiarray/calculation.c index cf77ce90902d..b95b37987f8e 100644 --- a/numpy/_core/src/multiarray/calculation.c +++ b/numpy/_core/src/multiarray/calculation.c @@ -308,7 +308,7 @@ PyArray_Ptp(PyArrayObject *ap, int axis, PyArrayObject *out) /*NUMPY_API - * Set variance to 1 to by-pass square-root calculation and return variance + * Set variance to 1 to bypass square-root calculation and return variance * Std */ NPY_NO_EXPORT PyObject * @@ -576,7 +576,7 @@ PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out) Py_INCREF(arr); } else { - arr = PyArray_Copy(a); + arr = PyArray_NewCopy(a, NPY_KEEPORDER); if (arr == NULL) { return NULL; } @@ -637,8 +637,7 @@ PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out) return (PyObject *)out; } else { - Py_INCREF(a); - return (PyObject *)a; + return PyArray_NewCopy(a, NPY_KEEPORDER); } } if (decimals == 0) { @@ -653,7 +652,15 @@ PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out) else { op1 = n_ops.true_divide; op2 = n_ops.multiply; - decimals = -decimals; + if (decimals == INT_MIN) { + // not technically correct but it doesn't matter because no one in + // this millenium is using floating point numbers with enough + // accuracy for this to matter + decimals = INT_MAX; + } + else { + decimals = -decimals; + } } if (!out) { if (PyArray_ISINTEGER(a)) { @@ -836,12 +843,9 @@ PyArray_Conjugate(PyArrayObject *self, PyArrayObject *out) else { PyArrayObject *ret; if (!PyArray_ISNUMBER(self)) { - /* 2017-05-04, 1.13 */ - if (DEPRECATE("attempting to conjugate non-numeric dtype; this " - "will error in the future to match the behavior of " - "np.conjugate") < 0) { - return NULL; - } + PyErr_SetString(PyExc_TypeError, + "cannot conjugate non-numeric dtype"); + return NULL; } if (out) { if (PyArray_AssignArray(out, self, diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index 655122ff7f09..fd4f24151331 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -25,15 +25,6 @@ * variable is misnamed, but it's part of the public API so I'm not sure we * can just change it. Maybe someone should try and see if anyone notices. */ -/* - * In numpy 1.6 and earlier, this was NPY_UNSAFE_CASTING. In a future - * release, it will become NPY_SAME_KIND_CASTING. Right now, during the - * transitional period, we continue to follow the NPY_UNSAFE_CASTING rules (to - * avoid breaking people's code), but we also check for whether the cast would - * be allowed under the NPY_SAME_KIND_CASTING rules, and if not we issue a - * warning (that people's code will be broken in a future release.) - */ - NPY_NO_EXPORT NPY_CASTING NPY_DEFAULT_ASSIGN_CASTING = NPY_SAME_KIND_CASTING; @@ -62,7 +53,7 @@ NPY_NO_EXPORT PyArray_Descr * PyArray_DTypeFromObjectStringDiscovery( PyObject *obj, PyArray_Descr *last_dtype, int string_type) { - int itemsize; + npy_intp itemsize; if (string_type == NPY_STRING) { PyObject *temp = PyObject_Str(obj); @@ -75,6 +66,12 @@ PyArray_DTypeFromObjectStringDiscovery( if (itemsize < 0) { return NULL; } + if (itemsize > NPY_MAX_INT) { + /* We can allow this, but should audit code paths before we do. */ + PyErr_Format(PyExc_TypeError, + "string of length %zd is too large to store inside array.", itemsize); + return NULL; + } } else if (string_type == NPY_UNICODE) { PyObject *temp = PyObject_Str(obj); @@ -86,6 +83,11 @@ PyArray_DTypeFromObjectStringDiscovery( if (itemsize < 0) { return NULL; } + if (itemsize > NPY_MAX_INT / 4) { + PyErr_Format(PyExc_TypeError, + "string of length %zd is too large to store inside array.", itemsize); + return NULL; + } itemsize *= 4; /* convert UCS4 codepoints to bytes */ } else { @@ -188,9 +190,9 @@ _IsWriteable(PyArrayObject *ap) /** * Convert an array shape to a string such as "(1, 2)". * - * @param Dimensionality of the shape - * @param npy_intp pointer to shape array - * @param String to append after the shape `(1, 2)%s`. + * @param n Dimensionality of the shape + * @param vals npy_intp pointer to shape array + * @param ending String to append after the shape `(1, 2)%s`. * * @return Python unicode string */ @@ -299,12 +301,11 @@ dot_alignment_error(PyArrayObject *a, int i, PyArrayObject *b, int j) /** * unpack tuple of PyDataType_FIELDS(dtype) (descr, offset, title[not-needed]) * - * @param "value" should be the tuple. - * - * @return "descr" will be set to the field's dtype - * @return "offset" will be set to the field's offset + * @param value should be the tuple. + * @param descr will be set to the field's dtype + * @param offset will be set to the field's offset * - * returns -1 on failure, 0 on success. + * @return -1 on failure, 0 on success. */ NPY_NO_EXPORT int _unpack_field(PyObject *value, PyArray_Descr **descr, npy_intp *offset) @@ -327,6 +328,30 @@ _unpack_field(PyObject *value, PyArray_Descr **descr, npy_intp *offset) return 0; } + +/** + * Unpack a field from a structured dtype. The field index must be valid. + * + * @param descr The dtype to unpack. + * @param index The index of the field to unpack. + * @param odescr will be set to the field's dtype + * @param offset will be set to the field's offset + * + * @return -1 on failure, 0 on success. + */ + NPY_NO_EXPORT int + _unpack_field_index( + _PyArray_LegacyDescr *descr, + npy_intp index, + PyArray_Descr **odescr, + npy_intp *offset) + { + PyObject *key = PyTuple_GET_ITEM(descr->names, index); + PyObject *tup = PyDict_GetItem(descr->fields, key); // noqa: borrowed-ref OK + return _unpack_field(tup, odescr, offset); + } + + /* * check whether arrays with datatype dtype might have object fields. This will * only happen for structured dtypes (which may have hidden objects even if the @@ -393,8 +418,8 @@ new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out, /* set copy-back */ Py_INCREF(out); if (PyArray_SetWritebackIfCopyBase(out_buf, out) < 0) { - Py_DECREF(out); Py_DECREF(out_buf); + // PyArray_SetWritebackIfCopyBase steals reference to second argument return NULL; } } @@ -448,25 +473,65 @@ check_is_convertible_to_scalar(PyArrayObject *v) return 0; } - /* Remove this if-else block when the deprecation expires */ - if (PyArray_SIZE(v) == 1) { - /* Numpy 1.25.0, 2023-01-02 */ - if (DEPRECATE( - "Conversion of an array with ndim > 0 to a scalar " - "is deprecated, and will error in future. " - "Ensure you extract a single element from your array " - "before performing this operation. " - "(Deprecated NumPy 1.25.)") < 0) { - return -1; - } - return 0; - } else { - PyErr_SetString(PyExc_TypeError, - "only length-1 arrays can be converted to Python scalars"); - return -1; - } - PyErr_SetString(PyExc_TypeError, "only 0-dimensional arrays can be converted to Python scalars"); return -1; } + +NPY_NO_EXPORT PyObject * +build_array_interface(PyObject *dataptr, PyObject *descr, PyObject *strides, + PyObject *typestr, PyObject *shape) +{ + PyObject *inter = NULL; + PyObject *version = NULL; + int ret; + + inter = PyDict_New(); + if (inter == NULL) { + goto fail; + } + + ret = PyDict_SetItemString(inter, "data", dataptr); + if (ret < 0) { + goto fail; + } + + ret = PyDict_SetItemString(inter, "strides", strides); + if (ret < 0) { + goto fail; + } + + ret = PyDict_SetItemString(inter, "descr", descr); + if (ret < 0) { + goto fail; + } + + ret = PyDict_SetItemString(inter, "typestr", typestr); + if (ret < 0) { + goto fail; + } + + ret = PyDict_SetItemString(inter, "shape", shape); + if (ret < 0) { + goto fail; + } + + version = PyLong_FromLong(3); + if (version == NULL) { + goto fail; + } + + ret = PyDict_SetItemString(inter, "version", version); + if (ret < 0) { + goto fail; + } + Py_XDECREF(version); + return inter; + + +fail: + Py_XDECREF(inter); + Py_XDECREF(version); + return NULL; + +} diff --git a/numpy/_core/src/multiarray/common.h b/numpy/_core/src/multiarray/common.h index 1a01224b1670..c0b5c043c7a3 100644 --- a/numpy/_core/src/multiarray/common.h +++ b/numpy/_core/src/multiarray/common.h @@ -1,6 +1,8 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_COMMON_H_ #define NUMPY_CORE_SRC_MULTIARRAY_COMMON_H_ +#include + #include #include "numpy/npy_common.h" #include "numpy/ndarraytypes.h" @@ -8,22 +10,21 @@ #include "npy_cpu_dispatch.h" #include "numpy/npy_cpu.h" +#include "npy_static_data.h" #include "npy_import.h" #include +#include + +#ifdef __cplusplus +extern "C" { +#endif #define error_converting(x) (((x) == -1) && PyErr_Occurred()) -#ifdef NPY_ALLOW_THREADS -#define NPY_BEGIN_THREADS_NDITER(iter) \ - do { \ - if (!NpyIter_IterationNeedsAPI(iter)) { \ - NPY_BEGIN_THREADS_THRESHOLDED(NpyIter_GetIterSize(iter)); \ - } \ - } while(0) -#else -#define NPY_BEGIN_THREADS_NDITER(iter) -#endif +NPY_NO_EXPORT PyObject * +build_array_interface(PyObject *dataptr, PyObject *descr, PyObject *strides, + PyObject *typestr, PyObject *shape); NPY_NO_EXPORT PyArray_Descr * PyArray_DTypeFromObjectStringDiscovery( @@ -68,19 +69,23 @@ convert_shape_to_string(npy_intp n, npy_intp const *vals, char *ending); NPY_NO_EXPORT void dot_alignment_error(PyArrayObject *a, int i, PyArrayObject *b, int j); + /** * unpack tuple of PyDataType_FIELDS(dtype) (descr, offset, title[not-needed]) - * - * @param "value" should be the tuple. - * - * @return "descr" will be set to the field's dtype - * @return "offset" will be set to the field's offset - * - * returns -1 on failure, 0 on success. */ NPY_NO_EXPORT int _unpack_field(PyObject *value, PyArray_Descr **descr, npy_intp *offset); +/** + * Unpack a field from a structured dtype by index. + */ +NPY_NO_EXPORT int +_unpack_field_index( + _PyArray_LegacyDescr *descr, + npy_intp index, + PyArray_Descr **odescr, + npy_intp *offset); + /* * check whether arrays with datatype dtype might have object fields. This will * only happen for structured dtypes (which may have hidden objects even if the @@ -110,13 +115,13 @@ check_and_adjust_index(npy_intp *index, npy_intp max_item, int axis, /* Try to be as clear as possible about what went wrong. */ if (axis >= 0) { PyErr_Format(PyExc_IndexError, - "index %"NPY_INTP_FMT" is out of bounds " - "for axis %d with size %"NPY_INTP_FMT, + "index %" NPY_INTP_FMT" is out of bounds " + "for axis %d with size %" NPY_INTP_FMT, *index, axis, max_item); } else { PyErr_Format(PyExc_IndexError, - "index %"NPY_INTP_FMT" is out of bounds " - "for size %"NPY_INTP_FMT, *index, max_item); + "index %" NPY_INTP_FMT " is out of bounds " + "for size %" NPY_INTP_FMT, *index, max_item); } return -1; } @@ -139,25 +144,14 @@ check_and_adjust_axis_msg(int *axis, int ndim, PyObject *msg_prefix) { /* Check that index is valid, taking into account negative indices */ if (NPY_UNLIKELY((*axis < -ndim) || (*axis >= ndim))) { - /* - * Load the exception type, if we don't already have it. Unfortunately - * we don't have access to npy_cache_import here - */ - static PyObject *AxisError_cls = NULL; - PyObject *exc; - - npy_cache_import("numpy.exceptions", "AxisError", &AxisError_cls); - if (AxisError_cls == NULL) { - return -1; - } - /* Invoke the AxisError constructor */ - exc = PyObject_CallFunction(AxisError_cls, "iiO", - *axis, ndim, msg_prefix); + PyObject *exc = PyObject_CallFunction( + npy_static_pydata.AxisError, "iiO", *axis, ndim, + msg_prefix); if (exc == NULL) { return -1; } - PyErr_SetObject(AxisError_cls, exc); + PyErr_SetObject(npy_static_pydata.AxisError, exc); Py_DECREF(exc); return -1; @@ -180,7 +174,9 @@ check_and_adjust_axis(int *axis, int ndim) * . * clang versions < 8.0.0 have the same bug. */ -#if (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112 \ +#ifdef __cplusplus +#define NPY_ALIGNOF(type) alignof(type) +#elif (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112 \ || (defined __GNUC__ && __GNUC__ < 4 + (__GNUC_MINOR__ < 9) \ && !defined __clang__) \ || (defined __clang__ && __clang_major__ < 8)) @@ -252,15 +248,6 @@ npy_uint_alignment(int itemsize) * compared to memchr it returns one stride past end instead of NULL if needle * is not found. */ -#ifdef __clang__ - /* - * The code below currently makes use of !NPY_ALIGNMENT_REQUIRED, which - * should be OK but causes the clang sanitizer to warn. It may make - * sense to modify the code to avoid this "unaligned" access but - * it would be good to carefully check the performance changes. - */ - __attribute__((no_sanitize("alignment"))) -#endif static inline char * npy_memchr(char * haystack, char needle, npy_intp stride, npy_intp size, npy_intp * psubloopsize, int invert) @@ -281,11 +268,12 @@ npy_memchr(char * haystack, char needle, } else { /* usually find elements to skip path */ - if (!NPY_ALIGNMENT_REQUIRED && needle == 0 && stride == 1) { + if (needle == 0 && stride == 1) { /* iterate until last multiple of 4 */ char * block_end = haystack + size - (size % sizeof(unsigned int)); while (p < block_end) { - unsigned int v = *(unsigned int*)p; + unsigned int v; + memcpy(&v, p, sizeof(v)); if (v != 0) { break; } @@ -340,8 +328,6 @@ NPY_NO_EXPORT int check_is_convertible_to_scalar(PyArrayObject *v); -#include "ucsnarrow.h" - /* * Make a new empty array, of the passed size, of a type that takes the * priority of ap1 and ap2 into account. @@ -364,4 +350,8 @@ new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out, */ #define NPY_ITER_REDUCTION_AXIS(axis) (axis + (1 << (NPY_BITSOF_INT - 2))) +#ifdef __cplusplus +} +#endif + #endif /* NUMPY_CORE_SRC_MULTIARRAY_COMMON_H_ */ diff --git a/numpy/_core/src/multiarray/common_dtype.c b/numpy/_core/src/multiarray/common_dtype.c index f2ec41e0c7aa..fa3328e8f276 100644 --- a/numpy/_core/src/multiarray/common_dtype.c +++ b/numpy/_core/src/multiarray/common_dtype.c @@ -7,9 +7,11 @@ #include "numpy/npy_common.h" #include "numpy/arrayobject.h" +#include "alloc.h" #include "convert_datatype.h" #include "dtypemeta.h" #include "abstractdtypes.h" +#include "npy_static_data.h" /* @@ -63,7 +65,7 @@ PyArray_CommonDType(PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2) } if (common_dtype == (PyArray_DTypeMeta *)Py_NotImplemented) { Py_DECREF(Py_NotImplemented); - PyErr_Format(npy_DTypePromotionError, + PyErr_Format(npy_static_pydata.DTypePromotionError, "The DTypes %S and %S do not have a common DType. " "For example they cannot be stored in a single array unless " "the dtype is `object`.", dtype1, dtype2); @@ -105,7 +107,7 @@ PyArray_CommonDType(PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2) * default_builtin_common_dtype * * @param length Number of DTypes - * @param dtypes + * @param dtypes List of DTypes to be reduced */ static PyArray_DTypeMeta * reduce_dtypes_to_most_knowledgeable( @@ -131,7 +133,7 @@ reduce_dtypes_to_most_knowledgeable( } if (res == (PyArray_DTypeMeta *)Py_NotImplemented) { - /* guess at other being more "knowledgable" */ + /* guess at other being more "knowledgeable" */ PyArray_DTypeMeta *tmp = dtypes[low]; dtypes[low] = dtypes[high]; dtypes[high] = tmp; @@ -210,19 +212,10 @@ PyArray_PromoteDTypeSequence( PyArray_DTypeMeta *result = NULL; /* Copy dtypes so that we can reorder them (only allocate when many) */ - PyObject *_scratch_stack[NPY_MAXARGS]; - PyObject **_scratch_heap = NULL; - PyArray_DTypeMeta **dtypes = (PyArray_DTypeMeta **)_scratch_stack; - - if (length > NPY_MAXARGS) { - _scratch_heap = PyMem_Malloc(length * sizeof(PyObject *)); - if (_scratch_heap == NULL) { - PyErr_NoMemory(); - return NULL; - } - dtypes = (PyArray_DTypeMeta **)_scratch_heap; + NPY_ALLOC_WORKSPACE(dtypes, PyArray_DTypeMeta *, 16, length); + if (dtypes == NULL) { + return NULL; } - memcpy(dtypes, dtypes_in, length * sizeof(PyObject *)); /* @@ -284,7 +277,7 @@ PyArray_PromoteDTypeSequence( Py_INCREF(dtypes_in[l]); PyTuple_SET_ITEM(dtypes_in_tuple, l, (PyObject *)dtypes_in[l]); } - PyErr_Format(npy_DTypePromotionError, + PyErr_Format(npy_static_pydata.DTypePromotionError, "The DType %S could not be promoted by %S. This means that " "no common DType exists for the given inputs. " "For example they cannot be stored in a single array unless " @@ -310,6 +303,6 @@ PyArray_PromoteDTypeSequence( } finish: - PyMem_Free(_scratch_heap); + npy_free_workspace(dtypes); return result; } diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index 27455797cfa3..23e922d470d0 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -101,7 +101,7 @@ minmax(const npy_intp *data, npy_intp data_len, npy_intp *mn, npy_intp *mx) * arr_bincount is registered as bincount. * * bincount accepts one, two or three arguments. The first is an array of - * non-negative integers The second, if present, is an array of weights, + * non-negative integers. The second, if present, is an array of weights, * which must be promotable to double. Call these arguments list and * weight. Both must be one-dimensional with len(weight) == len(list). If * weight is not present then bincount(list)[i] is the number of occurrences @@ -130,22 +130,64 @@ arr_bincount(PyObject *NPY_UNUSED(self), PyObject *const *args, return NULL; } - lst = (PyArrayObject *)PyArray_ContiguousFromAny(list, NPY_INTP, 1, 1); + /* + * Accepting arbitrary lists that are cast to NPY_INTP, possibly + * losing precision because of unsafe casts, is deprecated. We + * continue to use PyArray_ContiguousFromAny(list, NPY_INTP, 1, 1) + * to convert the input during the deprecation period, but we also + * check to see if a deprecation warning should be generated. + * Some refactoring will be needed when the deprecation expires. + */ + + /* Check to see if we should generate a deprecation warning. */ + if (!PyArray_Check(list)) { + /* list is not a numpy array, so convert it. */ + PyArrayObject *tmp1 = (PyArrayObject *)PyArray_FromAny( + list, NULL, 1, 1, + NPY_ARRAY_DEFAULT, NULL); + if (tmp1 == NULL) { + goto fail; + } + if (PyArray_SIZE(tmp1) > 0) { + /* The input is not empty, so convert it to NPY_INTP. */ + int flags = NPY_ARRAY_WRITEABLE | NPY_ARRAY_ALIGNED | NPY_ARRAY_C_CONTIGUOUS; + if (PyArray_ISINTEGER(tmp1)) { + flags = flags | NPY_ARRAY_FORCECAST; + } + PyArray_Descr* local_dtype = PyArray_DescrFromType(NPY_INTP); + lst = (PyArrayObject *)PyArray_FromAny((PyObject *)tmp1, local_dtype, 1, 1, flags, NULL); + Py_DECREF(tmp1); + if (lst == NULL) { + goto fail; + } + } + else { + /* Got an empty list. */ + Py_DECREF(tmp1); + } + } + if (lst == NULL) { - goto fail; + int flags = NPY_ARRAY_WRITEABLE | NPY_ARRAY_ALIGNED | NPY_ARRAY_C_CONTIGUOUS; + if (PyArray_Check((PyObject *)list) && + PyArray_ISINTEGER((PyArrayObject *)list)) { + flags = flags | NPY_ARRAY_FORCECAST; + } + PyArray_Descr* local_dtype = PyArray_DescrFromType(NPY_INTP); + lst = (PyArrayObject *)PyArray_FromAny(list, local_dtype, 1, 1, flags, NULL); + if (lst == NULL) { + goto fail; + } } len = PyArray_SIZE(lst); /* - * This if/else if can be removed by changing the argspec to O|On above, - * once we retire the deprecation + * This if/else if can be removed by changing the argspec above, */ if (mlength == Py_None) { - /* NumPy 1.14, 2017-06-01 */ - if (DEPRECATE("0 should be passed as minlength instead of None; " - "this will error in future.") < 0) { - goto fail; - } + PyErr_SetString(PyExc_TypeError, + "use 0 instead of None for minlength"); + goto fail; } else if (mlength != NULL) { minlength = PyArray_PyIntAsIntp(mlength); @@ -395,7 +437,7 @@ _linear_search(const npy_double key, const npy_double *arr, const npy_intp len, /** @brief find index of a sorted array such that arr[i] <= key < arr[i + 1]. * - * If an starting index guess is in-range, the array values around this + * If a starting index guess is in-range, the array values around this * index are first checked. This allows for repeated calls for well-ordered * keys (a very common case) to use the previous index as a very good guess. * @@ -865,11 +907,11 @@ arr_interp_complex(PyObject *NPY_UNUSED(self), PyObject *const *args, Py_ssize_t return NULL; } -static const char *EMPTY_SEQUENCE_ERR_MSG = "indices must be integral: the provided " \ +static const char EMPTY_SEQUENCE_ERR_MSG[] = "indices must be integral: the provided " \ "empty sequence was inferred as float. Wrap it with " \ "'np.array(indices, dtype=np.intp)'"; -static const char *NON_INTEGRAL_ERROR_MSG = "only int indices permitted"; +static const char NON_INTEGRAL_ERROR_MSG[] = "only int indices permitted"; /* Convert obj to an ndarray with integer dtype or fail */ static PyArrayObject * @@ -1410,18 +1452,11 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t PyObject *obj; PyObject *str; const char *docstr; - static char *msg = "already has a different docstring"; + static const char msg[] = "already has a different docstring"; /* Don't add docstrings */ #if PY_VERSION_HEX > 0x030b0000 - static long optimize = -1000; - if (optimize < 0) { - PyObject *flags = PySys_GetObject("flags"); /* borrowed object */ - PyObject *level = PyObject_GetAttrString(flags, "optimize"); - optimize = PyLong_AsLong(level); - Py_DECREF(level); - } - if (optimize > 1) { + if (npy_static_cdata.optimize > 1) { #else if (Py_OptimizeFlag > 1) { #endif @@ -1474,7 +1509,7 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t PyTypeObject *new = (PyTypeObject *)obj; _ADDDOC(new->tp_doc, new->tp_name); if (new->tp_dict != NULL && PyDict_CheckExact(new->tp_dict) && - PyDict_GetItemString(new->tp_dict, "__doc__") == Py_None) { + PyDict_GetItemString(new->tp_dict, "__doc__") == Py_None) { // noqa: borrowed-ref - manual fix needed /* Warning: Modifying `tp_dict` is not generally safe! */ if (PyDict_SetItemString(new->tp_dict, "__doc__", str) < 0) { return NULL; @@ -1572,19 +1607,15 @@ pack_inner(const char *inptr, bb[1] = npyv_tobits_b8(npyv_cmpneq_u8(v1, v_zero)); bb[2] = npyv_tobits_b8(npyv_cmpneq_u8(v2, v_zero)); bb[3] = npyv_tobits_b8(npyv_cmpneq_u8(v3, v_zero)); - if(out_stride == 1 && - (!NPY_ALIGNMENT_REQUIRED || isAligned)) { - npy_uint64 *ptr64 = (npy_uint64*)outptr; + if(out_stride == 1 && isAligned) { #if NPY_SIMD_WIDTH == 16 - npy_uint64 bcomp = bb[0] | (bb[1] << 16) | (bb[2] << 32) | (bb[3] << 48); - ptr64[0] = bcomp; + npy_uint64 arr[1] = {bb[0] | (bb[1] << 16) | (bb[2] << 32) | (bb[3] << 48)}; #elif NPY_SIMD_WIDTH == 32 - ptr64[0] = bb[0] | (bb[1] << 32); - ptr64[1] = bb[2] | (bb[3] << 32); + npy_uint64 arr[2] = {bb[0] | (bb[1] << 32), bb[2] | (bb[3] << 32)}; #else - ptr64[0] = bb[0]; ptr64[1] = bb[1]; - ptr64[2] = bb[2]; ptr64[3] = bb[3]; + npy_uint64 arr[4] = {bb[0], bb[1], bb[2], bb[3]}; #endif + memcpy(outptr, arr, sizeof(arr)); outptr += vstepx4; } else { for(int i = 0; i < 4; i++) { @@ -1754,15 +1785,6 @@ pack_bits(PyObject *input, int axis, char order) static PyObject * unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order) { - static int unpack_init = 0; - /* - * lookuptable for bitorder big as it has been around longer - * bitorder little is handled via byteswapping in the loop - */ - static union { - npy_uint8 bytes[8]; - npy_uint64 uint64; - } unpack_lookup_big[256]; PyArrayObject *inp; PyArrayObject *new = NULL; PyArrayObject *out = NULL; @@ -1848,22 +1870,6 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order) goto fail; } - /* - * setup lookup table under GIL, 256 8 byte blocks representing 8 bits - * expanded to 1/0 bytes - */ - if (unpack_init == 0) { - npy_intp j; - for (j=0; j < 256; j++) { - npy_intp k; - for (k=0; k < 8; k++) { - npy_uint8 v = (j & (1 << k)) == (1 << k); - unpack_lookup_big[j].bytes[7 - k] = v; - } - } - unpack_init = 1; - } - count = PyArray_DIM(new, axis) * 8; if (outdims[axis] > count) { in_n = count / 8; @@ -1890,7 +1896,7 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order) /* for unity stride we can just copy out of the lookup table */ if (order == 'b') { for (index = 0; index < in_n; index++) { - npy_uint64 v = unpack_lookup_big[*inptr].uint64; + npy_uint64 v = npy_static_cdata.unpack_lookup_big[*inptr].uint64; memcpy(outptr, &v, 8); outptr += 8; inptr += in_stride; @@ -1898,7 +1904,7 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order) } else { for (index = 0; index < in_n; index++) { - npy_uint64 v = unpack_lookup_big[*inptr].uint64; + npy_uint64 v = npy_static_cdata.unpack_lookup_big[*inptr].uint64; if (order != 'b') { v = npy_bswap8(v); } @@ -1909,7 +1915,7 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order) } /* Clean up the tail portion */ if (in_tail) { - npy_uint64 v = unpack_lookup_big[*inptr].uint64; + npy_uint64 v = npy_static_cdata.unpack_lookup_big[*inptr].uint64; if (order != 'b') { v = npy_bswap8(v); } diff --git a/numpy/_core/src/multiarray/compiled_base.h b/numpy/_core/src/multiarray/compiled_base.h index e0e73ac798bf..b8081c8d3a55 100644 --- a/numpy/_core/src/multiarray/compiled_base.h +++ b/numpy/_core/src/multiarray/compiled_base.h @@ -10,9 +10,9 @@ arr_bincount(PyObject *, PyObject *const *, Py_ssize_t, PyObject *); NPY_NO_EXPORT PyObject * arr__monotonicity(PyObject *, PyObject *, PyObject *kwds); NPY_NO_EXPORT PyObject * -arr_interp(PyObject *, PyObject *const *, Py_ssize_t, PyObject *, PyObject *); +arr_interp(PyObject *, PyObject *const *, Py_ssize_t, PyObject *); NPY_NO_EXPORT PyObject * -arr_interp_complex(PyObject *, PyObject *const *, Py_ssize_t, PyObject *, PyObject *); +arr_interp_complex(PyObject *, PyObject *const *, Py_ssize_t, PyObject *); NPY_NO_EXPORT PyObject * arr_ravel_multi_index(PyObject *, PyObject *, PyObject *); NPY_NO_EXPORT PyObject * diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index 9eba190323ea..41405beef9a8 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -18,6 +18,7 @@ #include "conversion_utils.h" #include "alloc.h" #include "npy_buffer.h" +#include "npy_static_data.h" #include "multiarraymodule.h" static int @@ -116,18 +117,10 @@ PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq) seq->ptr = NULL; seq->len = 0; - /* - * When the deprecation below expires, remove the `if` statement, and - * update the comment for PyArray_OptionalIntpConverter. - */ if (obj == Py_None) { - /* Numpy 1.20, 2020-05-31 */ - if (DEPRECATE( - "Passing None into shape arguments as an alias for () is " - "deprecated.") < 0){ - return NPY_FAIL; - } - return NPY_SUCCEED; + PyErr_SetString(PyExc_TypeError, + "Use () not None as shape arguments"); + return NPY_FAIL; } PyObject *seq_obj = NULL; @@ -137,7 +130,7 @@ PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq) * dimension_from_scalar as soon as possible. */ if (!PyLong_CheckExact(obj) && PySequence_Check(obj)) { - seq_obj = PySequence_Fast(obj, + seq_obj = PySequence_Fast(obj, // noqa: borrowed-ref - manual fix needed "expected a sequence of integers or a single integer."); if (seq_obj == NULL) { /* continue attempting to parse as a single integer. */ @@ -214,7 +207,6 @@ PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq) /* * Like PyArray_IntpConverter, but leaves `seq` untouched if `None` is passed - * rather than treating `None` as `()`. */ NPY_NO_EXPORT int PyArray_OptionalIntpConverter(PyObject *obj, PyArray_Dims *seq) @@ -234,10 +226,8 @@ PyArray_CopyConverter(PyObject *obj, NPY_COPYMODE *copymode) { } int int_copymode; - static PyObject* numpy_CopyMode = NULL; - npy_cache_import("numpy", "_CopyMode", &numpy_CopyMode); - if (numpy_CopyMode != NULL && (PyObject *)Py_TYPE(obj) == numpy_CopyMode) { + if ((PyObject *)Py_TYPE(obj) == npy_static_pydata._CopyMode) { PyObject* mode_value = PyObject_GetAttrString(obj, "value"); if (mode_value == NULL) { return NPY_FAIL; @@ -271,10 +261,8 @@ NPY_NO_EXPORT int PyArray_AsTypeCopyConverter(PyObject *obj, NPY_ASTYPECOPYMODE *copymode) { int int_copymode; - static PyObject* numpy_CopyMode = NULL; - npy_cache_import("numpy", "_CopyMode", &numpy_CopyMode); - if (numpy_CopyMode != NULL && (PyObject *)Py_TYPE(obj) == numpy_CopyMode) { + if ((PyObject *)Py_TYPE(obj) == npy_static_pydata._CopyMode) { PyErr_SetString(PyExc_ValueError, "_CopyMode enum is not allowed for astype function. " "Use true/false instead."); @@ -330,7 +318,7 @@ PyArray_BufferConverter(PyObject *obj, PyArray_Chunk *buf) buf->len = (npy_intp) view.len; /* - * In Python 3 both of the deprecated functions PyObject_AsWriteBuffer and + * Both of the deprecated functions PyObject_AsWriteBuffer and * PyObject_AsReadBuffer that this code replaces release the buffer. It is * up to the object that supplies the buffer to guarantee that the buffer * sticks around after the release. @@ -450,15 +438,11 @@ PyArray_ConvertMultiAxis(PyObject *axis_in, int ndim, npy_bool *out_axis_flags) NPY_NO_EXPORT int PyArray_BoolConverter(PyObject *object, npy_bool *val) { - if (PyObject_IsTrue(object)) { - *val = NPY_TRUE; - } - else { - *val = NPY_FALSE; - } - if (PyErr_Occurred()) { + int bool_val = PyObject_IsTrue(object); + if (bool_val == -1) { return NPY_FAIL; } + *val = (npy_bool)bool_val; return NPY_SUCCEED; } @@ -472,15 +456,11 @@ PyArray_OptionalBoolConverter(PyObject *object, int *val) if (object == Py_None) { return NPY_SUCCEED; } - if (PyObject_IsTrue(object)) { - *val = 1; - } - else { - *val = 0; - } - if (PyErr_Occurred()) { + int bool_val = PyObject_IsTrue(object); + if (bool_val == -1) { return NPY_FAIL; } + *val = (npy_bool)bool_val; return NPY_SUCCEED; } @@ -680,15 +660,12 @@ static int searchside_parser(char const *str, Py_ssize_t length, void *data) } /* Filters out the case sensitive/non-exact - * match inputs and other inputs and outputs DeprecationWarning + * match inputs and other inputs and outputs */ if (!is_exact) { - /* NumPy 1.20, 2020-05-19 */ - if (DEPRECATE("inexact matches and case insensitive matches " - "for search side are deprecated, please use " - "one of 'left' or 'right' instead.") < 0) { - return -1; - } + PyErr_SetString(PyExc_ValueError, + "search side must be one of 'left' or 'right'"); + return -1; } return 0; @@ -772,15 +749,12 @@ static int clipmode_parser(char const *str, Py_ssize_t length, void *data) } /* Filters out the case sensitive/non-exact - * match inputs and other inputs and outputs DeprecationWarning + * match inputs and other inputs and outputs */ if (!is_exact) { - /* Numpy 1.20, 2020-05-19 */ - if (DEPRECATE("inexact matches and case insensitive matches " - "for clip mode are deprecated, please use " - "one of 'clip', 'raise', or 'wrap' instead.") < 0) { - return -1; - } + PyErr_SetString(PyExc_ValueError, + "Use one of 'clip', 'raise', or 'wrap' for clip mode"); + return -1; } return 0; @@ -896,12 +870,9 @@ static int correlatemode_parser(char const *str, Py_ssize_t length, void *data) * match inputs and other inputs and outputs DeprecationWarning */ if (!is_exact) { - /* Numpy 1.21, 2021-01-19 */ - if (DEPRECATE("inexact matches and case insensitive matches for " - "convolve/correlate mode are deprecated, please " - "use one of 'valid', 'same', or 'full' instead.") < 0) { - return -1; - } + PyErr_SetString(PyExc_ValueError, + "Use one of 'valid', 'same', or 'full' for convolve/correlate mode"); + return -1; } return 0; @@ -940,7 +911,7 @@ PyArray_CorrelatemodeConverter(PyObject *object, NPY_CORRELATEMODE *val) } } -static int casting_parser(char const *str, Py_ssize_t length, void *data) +static int casting_parser_full(char const *str, Py_ssize_t length, void *data, int can_use_same_value) { NPY_CASTING *casting = (NPY_CASTING *)data; if (length < 2) { @@ -970,6 +941,10 @@ static int casting_parser(char const *str, Py_ssize_t length, void *data) *casting = NPY_SAME_KIND_CASTING; return 0; } + if (can_use_same_value && length == 10 && strcmp(str, "same_value") == 0) { + *casting = NPY_SAME_VALUE_CASTING; + return 0; + } break; case 's': if (length == 6 && strcmp(str, "unsafe") == 0) { @@ -981,6 +956,11 @@ static int casting_parser(char const *str, Py_ssize_t length, void *data) return -1; } +static int casting_parser(char const *str, Py_ssize_t length, void *data) +{ + return casting_parser_full(str, length, data, 0); +} + /*NUMPY_API * Convert any Python object, *obj*, to an NPY_CASTING enum. */ @@ -990,10 +970,26 @@ PyArray_CastingConverter(PyObject *obj, NPY_CASTING *casting) return string_converter_helper( obj, (void *)casting, casting_parser, "casting", "must be one of 'no', 'equiv', 'safe', " - "'same_kind', or 'unsafe'"); + "'same_kind', 'unsafe'"); + return 0; +} + +static int casting_parser_same_value(char const *str, Py_ssize_t length, void *data) +{ + return casting_parser_full(str, length, data, 1); +} + +NPY_NO_EXPORT int +PyArray_CastingConverterSameValue(PyObject *obj, NPY_CASTING *casting) +{ + return string_converter_helper( + obj, (void *)casting, casting_parser_same_value, "casting", + "must be one of 'no', 'equiv', 'safe', " + "'same_kind', 'unsafe', 'same_value'"); return 0; } + /***************************** * Other conversion functions *****************************/ @@ -1126,7 +1122,7 @@ PyArray_IntpFromPyIntConverter(PyObject *o, npy_intp *val) * @param seq A sequence created using `PySequence_Fast`. * @param vals Array used to store dimensions (must be large enough to * hold `maxvals` values). - * @param max_vals Maximum number of dimensions that can be written into `vals`. + * @param maxvals Maximum number of dimensions that can be written into `vals`. * @return Number of dimensions or -1 if an error occurred. * * .. note:: @@ -1164,7 +1160,7 @@ PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals) { PyObject *seq_obj = NULL; if (!PyLong_CheckExact(seq) && PySequence_Check(seq)) { - seq_obj = PySequence_Fast(seq, + seq_obj = PySequence_Fast(seq, // noqa: borrowed-ref - manual fix needed "expected a sequence of integers or a single integer"); if (seq_obj == NULL) { /* continue attempting to parse as a single integer. */ @@ -1203,7 +1199,7 @@ PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals) * that it is in an unpickle context instead of a normal context without * evil global state like we create here. */ -NPY_NO_EXPORT int evil_global_disable_warn_O4O8_flag = 0; +NPY_NO_EXPORT NPY_TLS int evil_global_disable_warn_O4O8_flag = 0; /* * Convert a gentype (that is actually a generic kind character) and @@ -1236,11 +1232,6 @@ PyArray_TypestrConvert(int itemsize, int gentype) case 8: newtype = NPY_INT64; break; -#ifdef NPY_INT128 - case 16: - newtype = NPY_INT128; - break; -#endif } break; @@ -1258,11 +1249,6 @@ PyArray_TypestrConvert(int itemsize, int gentype) case 8: newtype = NPY_UINT64; break; -#ifdef NPY_INT128 - case 16: - newtype = NPY_UINT128; - break; -#endif } break; @@ -1346,20 +1332,6 @@ PyArray_TypestrConvert(int itemsize, int gentype) newtype = NPY_STRING; break; - case NPY_DEPRECATED_STRINGLTR2: - { - /* - * raise a deprecation warning, which might be an exception - * if warnings are errors, so leave newtype unset in that - * case - */ - int ret = DEPRECATE("Data type alias 'a' was deprecated in NumPy 2.0. " - "Use the 'S' alias instead."); - if (ret == 0) { - newtype = NPY_STRING; - } - break; - } case NPY_UNICODELTR: newtype = NPY_UNICODE; break; @@ -1415,12 +1387,7 @@ PyArray_IntTupleFromIntp(int len, npy_intp const *vals) NPY_NO_EXPORT int _not_NoValue(PyObject *obj, PyObject **out) { - static PyObject *NoValue = NULL; - npy_cache_import("numpy", "_NoValue", &NoValue); - if (NoValue == NULL) { - return 0; - } - if (obj == NoValue) { + if (obj == npy_static_pydata._NoValue) { *out = NULL; } else { @@ -1440,7 +1407,7 @@ PyArray_DeviceConverterOptional(PyObject *object, NPY_DEVICE *device) } if (PyUnicode_Check(object) && - PyUnicode_Compare(object, npy_ma_str_cpu) == 0) { + PyUnicode_Compare(object, npy_interned_str.cpu) == 0) { *device = NPY_DEVICE_CPU; return NPY_SUCCEED; } diff --git a/numpy/_core/src/multiarray/conversion_utils.h b/numpy/_core/src/multiarray/conversion_utils.h index f138c3b98529..bff1db0c069d 100644 --- a/numpy/_core/src/multiarray/conversion_utils.h +++ b/numpy/_core/src/multiarray/conversion_utils.h @@ -113,7 +113,7 @@ PyArray_DeviceConverterOptional(PyObject *object, NPY_DEVICE *device); * that it is in an unpickle context instead of a normal context without * evil global state like we create here. */ -extern NPY_NO_EXPORT int evil_global_disable_warn_O4O8_flag; +extern NPY_NO_EXPORT NPY_TLS int evil_global_disable_warn_O4O8_flag; /* * Convert function which replaces np._NoValue with NULL. diff --git a/numpy/_core/src/multiarray/convert.c b/numpy/_core/src/multiarray/convert.c index aad40cab9593..ccd883f2b0f4 100644 --- a/numpy/_core/src/multiarray/convert.c +++ b/numpy/_core/src/multiarray/convert.c @@ -11,6 +11,7 @@ #include "numpy/arrayscalars.h" +#include "alloc.h" #include "common.h" #include "arrayobject.h" #include "ctors.h" @@ -43,6 +44,7 @@ npy_fallocate(npy_intp nbytes, FILE * fp) */ #if defined(HAVE_FALLOCATE) && defined(__linux__) int r; + npy_intp offset; /* small files not worth the system call */ if (nbytes < 16 * 1024 * 1024) { return 0; @@ -59,7 +61,8 @@ npy_fallocate(npy_intp nbytes, FILE * fp) * the flag "1" (=FALLOC_FL_KEEP_SIZE) is needed for the case of files * opened in append mode (issue #8329) */ - r = fallocate(fileno(fp), 1, npy_ftell(fp), nbytes); + offset = npy_ftell(fp); + r = fallocate(fileno(fp), 1, offset, nbytes); NPY_END_ALLOW_THREADS; /* @@ -67,7 +70,8 @@ npy_fallocate(npy_intp nbytes, FILE * fp) */ if (r == -1 && errno == ENOSPC) { PyErr_Format(PyExc_OSError, "Not enough free space to write " - "%"NPY_INTP_FMT" bytes", nbytes); + "%"NPY_INTP_FMT" bytes after offset %"NPY_INTP_FMT, + nbytes, offset); return -1; } #endif @@ -331,11 +335,7 @@ NPY_NO_EXPORT PyObject * PyArray_ToString(PyArrayObject *self, NPY_ORDER order) { npy_intp numbytes; - npy_intp i; - char *dptr; - int elsize; PyObject *ret; - PyArrayIterObject *it; if (order == NPY_ANYORDER) order = PyArray_ISFORTRAN(self) ? NPY_FORTRANORDER : NPY_CORDER; @@ -350,41 +350,65 @@ PyArray_ToString(PyArrayObject *self, NPY_ORDER order) numbytes = PyArray_NBYTES(self); if ((PyArray_IS_C_CONTIGUOUS(self) && (order == NPY_CORDER)) || (PyArray_IS_F_CONTIGUOUS(self) && (order == NPY_FORTRANORDER))) { - ret = PyBytes_FromStringAndSize(PyArray_DATA(self), (Py_ssize_t) numbytes); + return PyBytes_FromStringAndSize(PyArray_DATA(self), (Py_ssize_t) numbytes); } - else { - PyObject *new; - if (order == NPY_FORTRANORDER) { - /* iterators are always in C-order */ - new = PyArray_Transpose(self, NULL); - if (new == NULL) { - return NULL; - } + + /* Avoid Ravel where possible for fewer copies. */ + if (!PyDataType_REFCHK(PyArray_DESCR(self)) && + ((PyArray_DESCR(self)->flags & NPY_NEEDS_INIT) == 0)) { + + /* Allocate final Bytes Object */ + ret = PyBytes_FromStringAndSize(NULL, (Py_ssize_t) numbytes); + if (ret == NULL) { + return NULL; } - else { - Py_INCREF(self); - new = (PyObject *)self; + + /* Writable Buffer */ + char* dest = PyBytes_AS_STRING(ret); + + int flags = NPY_ARRAY_WRITEABLE; + if (order == NPY_FORTRANORDER) { + flags |= NPY_ARRAY_F_CONTIGUOUS; } - it = (PyArrayIterObject *)PyArray_IterNew(new); - Py_DECREF(new); - if (it == NULL) { + + Py_INCREF(PyArray_DESCR(self)); + /* Array view */ + PyArrayObject *dest_array = (PyArrayObject *)PyArray_NewFromDescr( + &PyArray_Type, + PyArray_DESCR(self), + PyArray_NDIM(self), + PyArray_DIMS(self), + NULL, // strides + dest, + flags, + NULL + ); + + if (dest_array == NULL) { + Py_DECREF(ret); return NULL; } - ret = PyBytes_FromStringAndSize(NULL, (Py_ssize_t) numbytes); - if (ret == NULL) { - Py_DECREF(it); + + /* Copy directly from source to destination with proper ordering */ + if (PyArray_CopyInto(dest_array, self) < 0) { + Py_DECREF(dest_array); + Py_DECREF(ret); return NULL; } - dptr = PyBytes_AS_STRING(ret); - i = it->size; - elsize = PyArray_ITEMSIZE(self); - while (i--) { - memcpy(dptr, it->dataptr, elsize); - dptr += elsize; - PyArray_ITER_NEXT(it); - } - Py_DECREF(it); + + Py_DECREF(dest_array); + return ret; + } + + /* Non-contiguous, Has References and/or Init Path. */ + PyArrayObject *contig = (PyArrayObject *)PyArray_Ravel(self, order); + if (contig == NULL) { + return NULL; + } + + ret = PyBytes_FromStringAndSize(PyArray_DATA(contig), numbytes); + Py_DECREF(contig); return ret; } @@ -397,28 +421,24 @@ PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj) return -1; } + PyArray_Descr *descr = PyArray_DESCR(arr); + /* * If we knew that the output array has at least one element, we would * not actually need a helping buffer, we always null it, just in case. - * - * (The longlong here should help with alignment.) + * Use `long double` to ensure that the heap allocation is aligned. */ - npy_longlong value_buffer_stack[4] = {0}; - char *value_buffer_heap = NULL; - char *value = (char *)value_buffer_stack; - PyArray_Descr *descr = PyArray_DESCR(arr); - - if ((size_t)descr->elsize > sizeof(value_buffer_stack)) { - /* We need a large temporary buffer... */ - value_buffer_heap = PyObject_Calloc(1, descr->elsize); - if (value_buffer_heap == NULL) { - PyErr_NoMemory(); - return -1; - } - value = value_buffer_heap; + size_t n_max_align_t = (descr->elsize + sizeof(long double) - 1) / sizeof(long double); + NPY_ALLOC_WORKSPACE(value, long double, 2, n_max_align_t); + if (value == NULL) { + return -1; } + if (PyDataType_FLAGCHK(descr, NPY_NEEDS_INIT)) { + memset(value, 0, descr->elsize); + } + if (PyArray_Pack(descr, value, obj) < 0) { - PyMem_FREE(value_buffer_heap); + npy_free_workspace(value); return -1; } @@ -429,12 +449,12 @@ PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj) int retcode = raw_array_assign_scalar( PyArray_NDIM(arr), PyArray_DIMS(arr), descr, PyArray_BYTES(arr), PyArray_STRIDES(arr), - descr, value); + descr, (void *)value, NPY_UNSAFE_CASTING); if (PyDataType_REFCHK(descr)) { - PyArray_ClearBuffer(descr, value, 0, 1, 1); + PyArray_ClearBuffer(descr, (void *)value, 0, 1, 1); } - PyMem_FREE(value_buffer_heap); + npy_free_workspace(value); return retcode; } diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 5711bce7bc08..0e976009a767 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -26,6 +26,7 @@ #include "legacy_dtype_implementation.h" #include "stringdtype/dtype.h" +#include "alloc.h" #include "abstractdtypes.h" #include "convert_datatype.h" #include "_datetime.h" @@ -35,7 +36,8 @@ #include "dtype_transfer.h" #include "dtype_traversal.h" #include "arrayobject.h" - +#include "npy_static_data.h" +#include "multiarraymodule.h" /* * Required length of string when converting from unsigned integer type. @@ -48,26 +50,6 @@ */ NPY_NO_EXPORT npy_intp REQUIRED_STR_LEN[] = {0, 3, 5, 10, 10, 20, 20, 20, 20}; -/* - * Whether or not legacy value-based promotion/casting is used. - */ - -NPY_NO_EXPORT PyObject *NO_NEP50_WARNING_CTX = NULL; -NPY_NO_EXPORT PyObject *npy_DTypePromotionError = NULL; -NPY_NO_EXPORT PyObject *npy_UFuncNoLoopError = NULL; - -static NPY_TLS int npy_promotion_state = NPY_USE_LEGACY_PROMOTION; - -NPY_NO_EXPORT int -get_npy_promotion_state() { - return npy_promotion_state; -} - -NPY_NO_EXPORT void -set_npy_promotion_state(int new_promotion_state) { - npy_promotion_state = new_promotion_state; -} - static PyObject * PyArray_GetGenericToVoidCastingImpl(void); @@ -81,120 +63,24 @@ static PyObject * PyArray_GetObjectToGenericCastingImpl(void); -/* - * Return 1 if promotion warnings should be given and 0 if they are currently - * suppressed in the local context. - */ -NPY_NO_EXPORT int -npy_give_promotion_warnings(void) -{ - PyObject *val; - - npy_cache_import( - "numpy._core._ufunc_config", "NO_NEP50_WARNING", - &NO_NEP50_WARNING_CTX); - if (NO_NEP50_WARNING_CTX == NULL) { - PyErr_WriteUnraisable(NULL); - return 1; - } - - if (PyContextVar_Get(NO_NEP50_WARNING_CTX, Py_False, &val) < 0) { - /* Errors should not really happen, but if it does assume we warn. */ - PyErr_WriteUnraisable(NULL); - return 1; - } - Py_DECREF(val); - /* only when the no-warnings context is false, we give warnings */ - return val == Py_False; -} - - -NPY_NO_EXPORT PyObject * -npy__get_promotion_state(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(arg)) { - int promotion_state = get_npy_promotion_state(); - if (promotion_state == NPY_USE_WEAK_PROMOTION) { - return PyUnicode_FromString("weak"); - } - else if (promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN) { - return PyUnicode_FromString("weak_and_warn"); - } - else if (promotion_state == NPY_USE_LEGACY_PROMOTION) { - return PyUnicode_FromString("legacy"); - } - PyErr_SetString(PyExc_SystemError, "invalid promotion state!"); - return NULL; -} - - -NPY_NO_EXPORT PyObject * -npy__set_promotion_state(PyObject *NPY_UNUSED(mod), PyObject *arg) -{ - if (!PyUnicode_Check(arg)) { - PyErr_SetString(PyExc_TypeError, - "_set_promotion_state() argument or NPY_PROMOTION_STATE " - "must be a string."); - return NULL; - } - int new_promotion_state; - if (PyUnicode_CompareWithASCIIString(arg, "weak") == 0) { - new_promotion_state = NPY_USE_WEAK_PROMOTION; - } - else if (PyUnicode_CompareWithASCIIString(arg, "weak_and_warn") == 0) { - new_promotion_state = NPY_USE_WEAK_PROMOTION_AND_WARN; - } - else if (PyUnicode_CompareWithASCIIString(arg, "legacy") == 0) { - new_promotion_state = NPY_USE_LEGACY_PROMOTION; - } - else { - PyErr_Format(PyExc_TypeError, - "_set_promotion_state() argument or NPY_PROMOTION_STATE must be " - "'weak', 'legacy', or 'weak_and_warn' but got '%.100S'", arg); - return NULL; - } - set_npy_promotion_state(new_promotion_state); - Py_RETURN_NONE; -} - -/** - * Fetch the casting implementation from one DType to another. - * - * @params from - * @params to - * - * @returns A castingimpl (PyArrayDTypeMethod *), None or NULL with an - * error set. - */ -NPY_NO_EXPORT PyObject * -PyArray_GetCastingImpl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) +static PyObject * +create_casting_impl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) { - PyObject *res; - if (from == to) { - res = (PyObject *)NPY_DT_SLOTS(from)->within_dtype_castingimpl; - } - else { - res = PyDict_GetItemWithError(NPY_DT_SLOTS(from)->castingimpls, (PyObject *)to); - } - if (res != NULL || PyErr_Occurred()) { - Py_XINCREF(res); - return res; - } /* - * The following code looks up CastingImpl based on the fact that anything + * Look up CastingImpl based on the fact that anything * can be cast to and from objects or structured (void) dtypes. - * - * The last part adds casts dynamically based on legacy definition */ if (from->type_num == NPY_OBJECT) { - res = PyArray_GetObjectToGenericCastingImpl(); + return PyArray_GetObjectToGenericCastingImpl(); } else if (to->type_num == NPY_OBJECT) { - res = PyArray_GetGenericToObjectCastingImpl(); + return PyArray_GetGenericToObjectCastingImpl(); } else if (from->type_num == NPY_VOID) { - res = PyArray_GetVoidToGenericCastingImpl(); + return PyArray_GetVoidToGenericCastingImpl(); } else if (to->type_num == NPY_VOID) { - res = PyArray_GetGenericToVoidCastingImpl(); + return PyArray_GetGenericToVoidCastingImpl(); } /* * Reject non-legacy dtypes. They need to use the new API to add casts and @@ -218,50 +104,113 @@ PyArray_GetCastingImpl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) from->singleton, to->type_num); if (castfunc == NULL) { PyErr_Clear(); - /* Remember that this cast is not possible */ - if (PyDict_SetItem(NPY_DT_SLOTS(from)->castingimpls, - (PyObject *) to, Py_None) < 0) { - return NULL; - } Py_RETURN_NONE; } } - - /* PyArray_AddLegacyWrapping_CastingImpl find the correct casting level: */ - /* - * TODO: Possibly move this to the cast registration time. But if we do - * that, we have to also update the cast when the casting safety - * is registered. + /* Create a cast using the state of the legacy casting setup defined + * during the setup of the DType. + * + * Ideally we would do this when we create the DType, but legacy user + * DTypes don't have a way to signal that a DType is done setting up + * casts. Without such a mechanism, the safest way to know that a + * DType is done setting up is to register the cast lazily the first + * time a user does the cast. + * + * We *could* register the casts when we create the wrapping + * DTypeMeta, but that means the internals of the legacy user DType + * system would need to update the state of the casting safety flags + * in the cast implementations stored on the DTypeMeta. That's an + * inversion of abstractions and would be tricky to do without + * creating circular dependencies inside NumPy. */ if (PyArray_AddLegacyWrapping_CastingImpl(from, to, -1) < 0) { return NULL; } + /* castingimpls is unconditionally filled by + * AddLegacyWrapping_CastingImpl, so this won't create a recursive + * critical section + */ return PyArray_GetCastingImpl(from, to); } +} - if (res == NULL) { +static PyObject * +ensure_castingimpl_exists(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) +{ + int return_error = 0; + PyObject *res = NULL; + + /* Need to create the cast. This might happen at runtime so we enter a + critical section to avoid races */ + + Py_BEGIN_CRITICAL_SECTION(NPY_DT_SLOTS(from)->castingimpls); + + /* check if another thread filled it while this thread was blocked on + acquiring the critical section */ + if (PyDict_GetItemRef(NPY_DT_SLOTS(from)->castingimpls, (PyObject *)to, + &res) < 0) { + return_error = 1; + } + else if (res == NULL) { + res = create_casting_impl(from, to); + if (res == NULL) { + return_error = 1; + } + else if (PyDict_SetItem(NPY_DT_SLOTS(from)->castingimpls, + (PyObject *)to, res) < 0) { + return_error = 1; + } + } + Py_END_CRITICAL_SECTION(); + if (return_error) { + Py_XDECREF(res); return NULL; } - if (from == to) { + if (from == to && res == Py_None) { PyErr_Format(PyExc_RuntimeError, "Internal NumPy error, within-DType cast missing for %S!", from); Py_DECREF(res); return NULL; } - if (PyDict_SetItem(NPY_DT_SLOTS(from)->castingimpls, - (PyObject *)to, res) < 0) { - Py_DECREF(res); + return res; +} + +/** + * Fetch the casting implementation from one DType to another. + * + * @param from The implementation to cast from + * @param to The implementation to cast to + * + * @returns A castingimpl (PyArrayDTypeMethod *), None or NULL with an + * error set. + */ +NPY_NO_EXPORT PyObject * +PyArray_GetCastingImpl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) +{ + PyObject *res = NULL; + if (from == to) { + if ((NPY_DT_SLOTS(from)->within_dtype_castingimpl) != NULL) { + res = Py_XNewRef( + (PyObject *)NPY_DT_SLOTS(from)->within_dtype_castingimpl); + } + } + else if (PyDict_GetItemRef(NPY_DT_SLOTS(from)->castingimpls, + (PyObject *)to, &res) < 0) { return NULL; } - return res; + if (res != NULL) { + return res; + } + + return ensure_castingimpl_exists(from, to); } /** * Fetch the (bound) casting implementation from one DType to another. * - * @params from - * @params to + * @params from source DType + * @params to destination DType * * @returns A bound casting implementation or None (or NULL for error). */ @@ -311,9 +260,13 @@ _get_castingimpl(PyObject *NPY_UNUSED(module), PyObject *args) * Supports the NPY_CAST_IS_VIEW check, and should be preferred to allow * extending cast-levels if necessary. * It is not valid for one of the arguments to be -1 to indicate an error. + * Pass through NPY_SAME_VALUE_CASTING_FLAG on casting1, unless both have the + * flag, in which case return max_casting | NPY_SAME_VALUE_CASTING_FLAG. + * Usually this will be exactly NPY_SAME_VALUE_CASTING, but the logic here + * should handle other 'casting with same_value' options * - * @param casting1 - * @param casting2 + * @param casting1 First (left-hand) casting level to compare + * @param casting2 Second (right-hand) casting level to compare * @return The minimal casting error (can be -1). */ NPY_NO_EXPORT NPY_CASTING @@ -322,11 +275,14 @@ PyArray_MinCastSafety(NPY_CASTING casting1, NPY_CASTING casting2) if (casting1 < 0 || casting2 < 0) { return -1; } + int both_same_casting = casting1 & casting2 & NPY_SAME_VALUE_CASTING_FLAG; + casting1 &= ~NPY_SAME_VALUE_CASTING_FLAG; + casting2 &= ~NPY_SAME_VALUE_CASTING_FLAG; /* larger casting values are less safe */ if (casting1 > casting2) { - return casting1; + return casting1 | both_same_casting; } - return casting2; + return casting2 | both_same_casting; } @@ -395,7 +351,7 @@ PyArray_GetCastFunc(PyArray_Descr *descr, int type_num) PyObject *cobj; key = PyLong_FromLong(type_num); - cobj = PyDict_GetItem(obj, key); + cobj = PyDict_GetItem(obj, key); // noqa: borrowed-ref OK Py_DECREF(key); if (cobj && PyCapsule_CheckExact(cobj)) { castfunc = PyCapsule_GetPointer(cobj, NULL); @@ -409,12 +365,7 @@ PyArray_GetCastFunc(PyArray_Descr *descr, int type_num) !PyTypeNum_ISCOMPLEX(type_num) && PyTypeNum_ISNUMBER(type_num) && !PyTypeNum_ISBOOL(type_num)) { - static PyObject *cls = NULL; - npy_cache_import("numpy.exceptions", "ComplexWarning", &cls); - if (cls == NULL) { - return NULL; - } - int ret = PyErr_WarnEx(cls, + int ret = PyErr_WarnEx(npy_static_pydata.ComplexWarning, "Casting complex values to real discards " "the imaginary part", 1); if (ret < 0) { @@ -507,11 +458,13 @@ _get_cast_safety_from_castingimpl(PyArrayMethodObject *castingimpl, * implementations fully to have them available for doing the actual cast * later. * - * @param from + * @param from The descriptor to cast from * @param to The descriptor to cast to (may be NULL) * @param to_dtype If `to` is NULL, must pass the to_dtype (otherwise this * is ignored). - * @param[out] view_offset + * @param view_offset If set, the cast can be described by a view with + * this byte offset. For example, casting "i8" to "i8," + * (the structured dtype) can be described with `*view_offset = 0`. * @return NPY_CASTING or -1 on error or if the cast is not possible. */ NPY_NO_EXPORT NPY_CASTING @@ -556,7 +509,7 @@ PyArray_GetCastInfo( * user would have to guess the string length.) * * @param casting the requested casting safety. - * @param from + * @param from The descriptor to cast from * @param to The descriptor to cast to (may be NULL) * @param to_dtype If `to` is NULL, must pass the to_dtype (otherwise this * is ignored). @@ -716,7 +669,6 @@ dtype_kind_to_ordering(char kind) return 5; /* String kind */ case 'S': - case 'a': return 6; /* Unicode kind */ case 'U': @@ -736,26 +688,6 @@ dtype_kind_to_ordering(char kind) } } -/* Converts a type number from unsigned to signed */ -static int -type_num_unsigned_to_signed(int type_num) -{ - switch (type_num) { - case NPY_UBYTE: - return NPY_BYTE; - case NPY_USHORT: - return NPY_SHORT; - case NPY_UINT: - return NPY_INT; - case NPY_ULONG: - return NPY_LONG; - case NPY_ULONGLONG: - return NPY_LONGLONG; - default: - return type_num; - } -} - /*NUMPY_API * Returns true if data of type 'from' may be cast to data of type @@ -801,83 +733,6 @@ static int min_scalar_type_num(char *valueptr, int type_num, int *is_small_unsigned); -/* - * NOTE: This function uses value based casting logic for scalars. It will - * require updates when we phase out value-based-casting. - */ -NPY_NO_EXPORT npy_bool -can_cast_scalar_to(PyArray_Descr *scal_type, char *scal_data, - PyArray_Descr *to, NPY_CASTING casting) -{ - /* - * If the two dtypes are actually references to the same object - * or if casting type is forced unsafe then always OK. - * - * TODO: Assuming that unsafe casting always works is not actually correct - */ - if (scal_type == to || casting == NPY_UNSAFE_CASTING ) { - return 1; - } - - int valid = PyArray_CheckCastSafety(casting, scal_type, to, NPY_DTYPE(to)); - if (valid == 1) { - /* This is definitely a valid cast. */ - return 1; - } - if (valid < 0) { - /* Probably must return 0, but just keep trying for now. */ - PyErr_Clear(); - } - - /* - * If the scalar isn't a number, value-based casting cannot kick in and - * we must not attempt it. - * (Additional fast-checks would be possible, but probably unnecessary.) - */ - if (!PyTypeNum_ISNUMBER(scal_type->type_num)) { - return 0; - } - - /* - * At this point we have to check value-based casting. - */ - PyArray_Descr *dtype; - int is_small_unsigned = 0, type_num; - /* An aligned memory buffer large enough to hold any builtin numeric type */ - npy_longlong value[4]; - - int swap = !PyArray_ISNBO(scal_type->byteorder); - PyDataType_GetArrFuncs(scal_type)->copyswap(&value, scal_data, swap, NULL); - - type_num = min_scalar_type_num((char *)&value, scal_type->type_num, - &is_small_unsigned); - - /* - * If we've got a small unsigned scalar, and the 'to' type - * is not unsigned, then make it signed to allow the value - * to be cast more appropriately. - */ - if (is_small_unsigned && !(PyTypeNum_ISUNSIGNED(to->type_num))) { - type_num = type_num_unsigned_to_signed(type_num); - } - - dtype = PyArray_DescrFromType(type_num); - if (dtype == NULL) { - return 0; - } -#if 0 - printf("min scalar cast "); - PyObject_Print(dtype, stdout, 0); - printf(" to "); - PyObject_Print(to, stdout, 0); - printf("\n"); -#endif - npy_bool ret = PyArray_CanCastTypeTo(dtype, to, casting); - Py_DECREF(dtype); - return ret; -} - - NPY_NO_EXPORT npy_bool can_cast_pyscalar_scalar_to( int flags, PyArray_Descr *to, NPY_CASTING casting) @@ -897,30 +752,41 @@ can_cast_pyscalar_scalar_to( } else if (PyDataType_ISFLOAT(to)) { if (flags & NPY_ARRAY_WAS_PYTHON_COMPLEX) { - return casting == NPY_UNSAFE_CASTING; + return ((casting == NPY_UNSAFE_CASTING) || ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0)); } return 1; } else if (PyDataType_ISINTEGER(to)) { if (!(flags & NPY_ARRAY_WAS_PYTHON_INT)) { - return casting == NPY_UNSAFE_CASTING; + return ((casting == NPY_UNSAFE_CASTING) || ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0)); } return 1; } /* - * For all other cases we use the default dtype. + * For all other cases we need to make a bit of a dance to find the cast + * safety. We do so by finding the descriptor for the "scalar" (without + * a value; for parametric user dtypes a value may be needed eventually). */ - PyArray_Descr *from; + PyArray_DTypeMeta *from_DType; + PyArray_Descr *default_dtype; if (flags & NPY_ARRAY_WAS_PYTHON_INT) { - from = PyArray_DescrFromType(NPY_LONG); + default_dtype = PyArray_DescrNewFromType(NPY_INTP); + from_DType = &PyArray_PyLongDType; } else if (flags & NPY_ARRAY_WAS_PYTHON_FLOAT) { - from = PyArray_DescrFromType(NPY_DOUBLE); + default_dtype = PyArray_DescrNewFromType(NPY_FLOAT64); + from_DType = &PyArray_PyFloatDType; } else { - from = PyArray_DescrFromType(NPY_CDOUBLE); + default_dtype = PyArray_DescrNewFromType(NPY_COMPLEX128); + from_DType = &PyArray_PyComplexDType; } + + PyArray_Descr *from = npy_find_descr_for_scalar( + NULL, default_dtype, from_DType, NPY_DTYPE(to)); + Py_DECREF(default_dtype); + int res = PyArray_CanCastTypeTo(from, to, casting); Py_DECREF(from); return res; @@ -944,25 +810,14 @@ PyArray_CanCastArrayTo(PyArrayObject *arr, PyArray_Descr *to, to = NULL; } - if (get_npy_promotion_state() == NPY_USE_LEGACY_PROMOTION) { - /* - * If it's a scalar, check the value. (This only currently matters for - * numeric types and for `to == NULL` it can't be numeric.) - */ - if (PyArray_NDIM(arr) == 0 && !PyArray_HASFIELDS(arr) && to != NULL) { - return can_cast_scalar_to(from, PyArray_DATA(arr), to, casting); - } - } - else { - /* - * If it's a scalar, check the value. (This only currently matters for - * numeric types and for `to == NULL` it can't be numeric.) - */ - if (PyArray_FLAGS(arr) & NPY_ARRAY_WAS_PYTHON_LITERAL && to != NULL) { - return can_cast_pyscalar_scalar_to( - PyArray_FLAGS(arr) & NPY_ARRAY_WAS_PYTHON_LITERAL, to, - casting); - } + /* + * If it's a scalar, check the value. (This only currently matters for + * numeric types and for `to == NULL` it can't be numeric.) + */ + if (PyArray_FLAGS(arr) & NPY_ARRAY_WAS_PYTHON_LITERAL && to != NULL) { + return can_cast_pyscalar_scalar_to( + PyArray_FLAGS(arr) & NPY_ARRAY_WAS_PYTHON_LITERAL, to, + casting); } /* Otherwise, use the standard rules (same as `PyArray_CanCastTypeTo`) */ @@ -979,7 +834,7 @@ PyArray_CanCastArrayTo(PyArrayObject *arr, PyArray_Descr *to, NPY_NO_EXPORT const char * npy_casting_to_string(NPY_CASTING casting) { - switch (casting) { + switch ((int)casting) { case NPY_NO_CASTING: return "'no'"; case NPY_EQUIV_CASTING: @@ -990,6 +845,16 @@ npy_casting_to_string(NPY_CASTING casting) return "'same_kind'"; case NPY_UNSAFE_CASTING: return "'unsafe'"; + case NPY_NO_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + return "'no and same_value'"; + case NPY_EQUIV_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + return "'equiv and same_value'"; + case NPY_SAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + return "'safe and same_value'"; + case NPY_SAME_KIND_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + return "'same_kind and same_value'"; + case NPY_UNSAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + return "'same_value'"; default: return ""; } @@ -999,11 +864,10 @@ npy_casting_to_string(NPY_CASTING casting) /** * Helper function to set a useful error when casting is not possible. * - * @param src_dtype - * @param dst_dtype - * @param casting - * @param scalar Whether this was a "scalar" cast (includes 0-D array with - * PyArray_CanCastArrayTo result). + * @param src_dtype The source descriptor to cast from + * @param dst_dtype The destination descriptor trying to cast to + * @param casting The casting rule that was violated + * @param scalar Boolean flag indicating if this was a "scalar" cast. */ NPY_NO_EXPORT void npy_set_invalid_cast_error( @@ -1042,58 +906,6 @@ PyArray_CanCastScalar(PyTypeObject *from, PyTypeObject *to) return (npy_bool) PyArray_CanCastSafely(fromtype, totype); } -/* - * Internal promote types function which handles unsigned integers which - * fit in same-sized signed integers specially. - */ -static PyArray_Descr * -promote_types(PyArray_Descr *type1, PyArray_Descr *type2, - int is_small_unsigned1, int is_small_unsigned2) -{ - if (is_small_unsigned1) { - int type_num1 = type1->type_num; - int type_num2 = type2->type_num; - int ret_type_num; - - if (type_num2 < NPY_NTYPES_LEGACY && !(PyTypeNum_ISBOOL(type_num2) || - PyTypeNum_ISUNSIGNED(type_num2))) { - /* Convert to the equivalent-sized signed integer */ - type_num1 = type_num_unsigned_to_signed(type_num1); - - ret_type_num = _npy_type_promotion_table[type_num1][type_num2]; - /* The table doesn't handle string/unicode/void, check the result */ - if (ret_type_num >= 0) { - return PyArray_DescrFromType(ret_type_num); - } - } - - return PyArray_PromoteTypes(type1, type2); - } - else if (is_small_unsigned2) { - int type_num1 = type1->type_num; - int type_num2 = type2->type_num; - int ret_type_num; - - if (type_num1 < NPY_NTYPES_LEGACY && !(PyTypeNum_ISBOOL(type_num1) || - PyTypeNum_ISUNSIGNED(type_num1))) { - /* Convert to the equivalent-sized signed integer */ - type_num2 = type_num_unsigned_to_signed(type_num2); - - ret_type_num = _npy_type_promotion_table[type_num1][type_num2]; - /* The table doesn't handle string/unicode/void, check the result */ - if (ret_type_num >= 0) { - return PyArray_DescrFromType(ret_type_num); - } - } - - return PyArray_PromoteTypes(type1, type2); - } - else { - return PyArray_PromoteTypes(type1, type2); - } - -} - /** * This function should possibly become public API eventually. At this @@ -1588,11 +1400,19 @@ static int min_scalar_type_num(char *valueptr, int type_num, } +/*NUMPY_API + * If arr is a scalar (has 0 dimensions) with a built-in number data type, + * finds the smallest type size/kind which can still represent its data. + * Otherwise, returns the array's data type. + * + * NOTE: This API is a left over from before NumPy 2 (and NEP 50) and should + * probably be eventually deprecated and removed. + */ NPY_NO_EXPORT PyArray_Descr * -PyArray_MinScalarType_internal(PyArrayObject *arr, int *is_small_unsigned) +PyArray_MinScalarType(PyArrayObject *arr) { + int is_small_unsigned; PyArray_Descr *dtype = PyArray_DESCR(arr); - *is_small_unsigned = 0; /* * If the array isn't a numeric scalar, just return the array's dtype. */ @@ -1609,23 +1429,11 @@ PyArray_MinScalarType_internal(PyArrayObject *arr, int *is_small_unsigned) return PyArray_DescrFromType( min_scalar_type_num((char *)&value, - dtype->type_num, is_small_unsigned)); + dtype->type_num, &is_small_unsigned)); } } -/*NUMPY_API - * If arr is a scalar (has 0 dimensions) with a built-in number data type, - * finds the smallest type size/kind which can still represent its data. - * Otherwise, returns the array's data type. - * - */ -NPY_NO_EXPORT PyArray_Descr * -PyArray_MinScalarType(PyArrayObject *arr) -{ - int is_small_unsigned; - return PyArray_MinScalarType_internal(arr, &is_small_unsigned); -} /* * Provides an ordering for the dtype 'kind' character codes, to help @@ -1800,40 +1608,21 @@ PyArray_ResultType( return NPY_DT_CALL_ensure_canonical(result); } - void **info_on_heap = NULL; - void *_info_on_stack[NPY_MAXARGS * 2]; - PyArray_DTypeMeta **all_DTypes; - PyArray_Descr **all_descriptors; - - if (narrs + ndtypes > NPY_MAXARGS) { - info_on_heap = PyMem_Malloc(2 * (narrs+ndtypes) * sizeof(PyObject *)); - if (info_on_heap == NULL) { - PyErr_NoMemory(); - return NULL; - } - all_DTypes = (PyArray_DTypeMeta **)info_on_heap; - all_descriptors = (PyArray_Descr **)(info_on_heap + narrs + ndtypes); - } - else { - all_DTypes = (PyArray_DTypeMeta **)_info_on_stack; - all_descriptors = (PyArray_Descr **)(_info_on_stack + narrs + ndtypes); + NPY_ALLOC_WORKSPACE(workspace, void *, 2 * 8, 2 * (narrs + ndtypes)); + if (workspace == NULL) { + return NULL; } + PyArray_DTypeMeta **all_DTypes = (PyArray_DTypeMeta **)workspace; // borrowed references + PyArray_Descr **all_descriptors = (PyArray_Descr **)(&all_DTypes[narrs+ndtypes]); + /* Copy all dtypes into a single array defining non-value-based behaviour */ for (npy_intp i=0; i < ndtypes; i++) { all_DTypes[i] = NPY_DTYPE(descrs[i]); - Py_INCREF(all_DTypes[i]); all_descriptors[i] = descrs[i]; } - int at_least_one_scalar = 0; - int all_pyscalar = ndtypes == 0; for (npy_intp i=0, i_all=ndtypes; i < narrs; i++, i_all++) { - /* Array descr is also the correct "default" for scalars: */ - if (PyArray_NDIM(arrs[i]) == 0) { - at_least_one_scalar = 1; - } - /* * If the original was a Python scalar/literal, we use only the * corresponding abstract DType (and no descriptor) below. @@ -1843,10 +1632,6 @@ PyArray_ResultType( if (PyArray_FLAGS(arrs[i]) & NPY_ARRAY_WAS_PYTHON_INT) { /* This could even be an object dtype here for large ints */ all_DTypes[i_all] = &PyArray_PyLongDType; - if (PyArray_TYPE(arrs[i]) != NPY_LONG) { - /* Not a "normal" scalar, so we cannot avoid the legacy path */ - all_pyscalar = 0; - } } else if (PyArray_FLAGS(arrs[i]) & NPY_ARRAY_WAS_PYTHON_FLOAT) { all_DTypes[i_all] = &PyArray_PyFloatDType; @@ -1857,16 +1642,11 @@ PyArray_ResultType( else { all_descriptors[i_all] = PyArray_DTYPE(arrs[i]); all_DTypes[i_all] = NPY_DTYPE(all_descriptors[i_all]); - all_pyscalar = 0; } - Py_INCREF(all_DTypes[i_all]); } PyArray_DTypeMeta *common_dtype = PyArray_PromoteDTypeSequence( narrs+ndtypes, all_DTypes); - for (npy_intp i=0; i < narrs+ndtypes; i++) { - Py_DECREF(all_DTypes[i]); - } if (common_dtype == NULL) { goto error; } @@ -1918,182 +1698,25 @@ PyArray_ResultType( } } - /* - * Unfortunately, when 0-D "scalar" arrays are involved and mixed, we *may* - * have to use the value-based logic. - * `PyArray_CheckLegacyResultType` may behave differently based on the - * current value of `npy_legacy_promotion`: - * 1. It does nothing (we use the "new" behavior) - * 2. It does nothing, but warns if there the result would differ. - * 3. It replaces the result based on the legacy value-based logic. - */ - if (at_least_one_scalar && !all_pyscalar && result->type_num < NPY_NTYPES_LEGACY) { - if (PyArray_CheckLegacyResultType( - &result, narrs, arrs, ndtypes, descrs) < 0) { - Py_DECREF(common_dtype); - Py_DECREF(result); - return NULL; - } - } - Py_DECREF(common_dtype); - PyMem_Free(info_on_heap); + npy_free_workspace(workspace); return result; error: Py_XDECREF(result); Py_XDECREF(common_dtype); - PyMem_Free(info_on_heap); + npy_free_workspace(workspace); return NULL; } -/* - * Produces the result type of a bunch of inputs, using the UFunc - * type promotion rules. Use this function when you have a set of - * input arrays, and need to determine an output array dtype. - * - * If all the inputs are scalars (have 0 dimensions) or the maximum "kind" - * of the scalars is greater than the maximum "kind" of the arrays, does - * a regular type promotion. - * - * Otherwise, does a type promotion on the MinScalarType - * of all the inputs. Data types passed directly are treated as array - * types. - */ -NPY_NO_EXPORT int -PyArray_CheckLegacyResultType( - PyArray_Descr **new_result, - npy_intp narrs, PyArrayObject **arr, - npy_intp ndtypes, PyArray_Descr **dtypes) -{ - PyArray_Descr *ret = NULL; - int promotion_state = get_npy_promotion_state(); - if (promotion_state == NPY_USE_WEAK_PROMOTION) { - return 0; - } - if (promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN - && !npy_give_promotion_warnings()) { - return 0; - } - - npy_intp i; - - /* If there's just one type, results must match */ - if (narrs + ndtypes == 1) { - return 0; - } - - int use_min_scalar = should_use_min_scalar(narrs, arr, ndtypes, dtypes); - - /* Loop through all the types, promoting them */ - if (!use_min_scalar) { - - /* Build a single array of all the dtypes */ - PyArray_Descr **all_dtypes = PyArray_malloc( - sizeof(*all_dtypes) * (narrs + ndtypes)); - if (all_dtypes == NULL) { - PyErr_NoMemory(); - return -1; - } - for (i = 0; i < narrs; ++i) { - all_dtypes[i] = PyArray_DESCR(arr[i]); - } - for (i = 0; i < ndtypes; ++i) { - all_dtypes[narrs + i] = dtypes[i]; - } - ret = PyArray_PromoteTypeSequence(all_dtypes, narrs + ndtypes); - PyArray_free(all_dtypes); - } - else { - int ret_is_small_unsigned = 0; - - for (i = 0; i < narrs; ++i) { - int tmp_is_small_unsigned; - PyArray_Descr *tmp = PyArray_MinScalarType_internal( - arr[i], &tmp_is_small_unsigned); - if (tmp == NULL) { - Py_XDECREF(ret); - return -1; - } - /* Combine it with the existing type */ - if (ret == NULL) { - ret = tmp; - ret_is_small_unsigned = tmp_is_small_unsigned; - } - else { - PyArray_Descr *tmpret = promote_types( - tmp, ret, tmp_is_small_unsigned, ret_is_small_unsigned); - Py_DECREF(tmp); - Py_DECREF(ret); - ret = tmpret; - if (ret == NULL) { - return -1; - } - - ret_is_small_unsigned = tmp_is_small_unsigned && - ret_is_small_unsigned; - } - } - - for (i = 0; i < ndtypes; ++i) { - PyArray_Descr *tmp = dtypes[i]; - /* Combine it with the existing type */ - if (ret == NULL) { - ret = tmp; - Py_INCREF(ret); - } - else { - PyArray_Descr *tmpret = promote_types( - tmp, ret, 0, ret_is_small_unsigned); - Py_DECREF(ret); - ret = tmpret; - if (ret == NULL) { - return -1; - } - } - } - /* None of the above loops ran */ - if (ret == NULL) { - PyErr_SetString(PyExc_TypeError, - "no arrays or types available to calculate result type"); - } - } - - if (ret == NULL) { - return -1; - } - - int unchanged_result = PyArray_EquivTypes(*new_result, ret); - if (unchanged_result) { - Py_DECREF(ret); - return 0; - } - - if (promotion_state == NPY_USE_LEGACY_PROMOTION) { - Py_SETREF(*new_result, ret); - return 0; - } - - assert(promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN); - if (PyErr_WarnFormat(PyExc_UserWarning, 1, - "result dtype changed due to the removal of value-based " - "promotion from NumPy. Changed from %S to %S.", - ret, *new_result) < 0) { - Py_DECREF(ret); - return -1; - } - Py_DECREF(ret); - return 0; -} - /** * Promotion of descriptors (of arbitrary DType) to their correctly * promoted instances of the given DType. * I.e. the given DType could be a string, which then finds the correct * string length, given all `descrs`. * - * @param ndescrs number of descriptors to cast and find the common instance. + * @param ndescr number of descriptors to cast and find the common instance. * At least one must be passed in. * @param descrs The descriptors to work with. * @param DType The DType of the desired output descriptor. @@ -2179,7 +1802,6 @@ PyArray_Zero(PyArrayObject *arr) { char *zeroval; int ret, storeflags; - static PyObject * zero_obj = NULL; if (_check_object_rec(PyArray_DESCR(arr)) < 0) { return NULL; @@ -2190,25 +1812,19 @@ PyArray_Zero(PyArrayObject *arr) return NULL; } - if (zero_obj == NULL) { - zero_obj = PyLong_FromLong((long) 0); - if (zero_obj == NULL) { - return NULL; - } - } if (PyArray_ISOBJECT(arr)) { /* XXX this is dangerous, the caller probably is not aware that zeroval is actually a static PyObject* In the best case they will only use it as-is, but - if they simply memcpy it into a ndarray without using + if they simply memcpy it into an ndarray without using setitem(), refcount errors will occur */ - memcpy(zeroval, &zero_obj, sizeof(PyObject *)); + memcpy(zeroval, &npy_static_pydata.zero_obj, sizeof(PyObject *)); return zeroval; } storeflags = PyArray_FLAGS(arr); PyArray_ENABLEFLAGS(arr, NPY_ARRAY_BEHAVED); - ret = PyArray_SETITEM(arr, zeroval, zero_obj); + ret = PyArray_SETITEM(arr, zeroval, npy_static_pydata.zero_obj); ((PyArrayObject_fields *)arr)->flags = storeflags; if (ret < 0) { PyDataMem_FREE(zeroval); @@ -2225,7 +1841,6 @@ PyArray_One(PyArrayObject *arr) { char *oneval; int ret, storeflags; - static PyObject * one_obj = NULL; if (_check_object_rec(PyArray_DESCR(arr)) < 0) { return NULL; @@ -2236,26 +1851,20 @@ PyArray_One(PyArrayObject *arr) return NULL; } - if (one_obj == NULL) { - one_obj = PyLong_FromLong((long) 1); - if (one_obj == NULL) { - return NULL; - } - } if (PyArray_ISOBJECT(arr)) { /* XXX this is dangerous, the caller probably is not aware that oneval is actually a static PyObject* In the best case they will only use it as-is, but - if they simply memcpy it into a ndarray without using + if they simply memcpy it into an ndarray without using setitem(), refcount errors will occur */ - memcpy(oneval, &one_obj, sizeof(PyObject *)); + memcpy(oneval, &npy_static_pydata.one_obj, sizeof(PyObject *)); return oneval; } storeflags = PyArray_FLAGS(arr); PyArray_ENABLEFLAGS(arr, NPY_ARRAY_BEHAVED); - ret = PyArray_SETITEM(arr, oneval, one_obj); + ret = PyArray_SETITEM(arr, oneval, npy_static_pydata.one_obj); ((PyArrayObject_fields *)arr)->flags = storeflags; if (ret < 0) { PyDataMem_FREE(oneval); @@ -2412,7 +2021,7 @@ PyArray_ConvertToCommonType(PyObject *op, int *retn) * Private function to add a casting implementation by unwrapping a bound * array method. * - * @param meth + * @param meth The array method to be unwrapped * @return 0 on success -1 on failure. */ NPY_NO_EXPORT int @@ -2464,7 +2073,12 @@ PyArray_AddCastingImplementation(PyBoundArrayMethodObject *meth) /** * Add a new casting implementation using a PyArrayMethod_Spec. * - * @param spec + * Using this function outside of module initialization without holding a + * critical section on the castingimpls dict may lead to a race to fill the + * dict. Use PyArray_GetGastingImpl to lazily register casts at runtime + * safely. + * + * @param spec The specification to use as a source * @param private If private, allow slots not publicly exposed. * @return 0 on success -1 on failure */ @@ -2518,9 +2132,9 @@ legacy_same_dtype_resolve_descriptors( if (PyDataType_ISNOTSWAPPED(loop_descrs[0]) == PyDataType_ISNOTSWAPPED(loop_descrs[1])) { *view_offset = 0; - return NPY_NO_CASTING; + return NPY_NO_CASTING | NPY_SAME_VALUE_CASTING_FLAG; } - return NPY_EQUIV_CASTING; + return NPY_EQUIV_CASTING | NPY_SAME_VALUE_CASTING_FLAG; } @@ -2638,13 +2252,7 @@ complex_to_noncomplex_get_loop( PyArrayMethod_StridedLoop **out_loop, NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - static PyObject *cls = NULL; - int ret; - npy_cache_import("numpy.exceptions", "ComplexWarning", &cls); - if (cls == NULL) { - return -1; - } - ret = PyErr_WarnEx(cls, + int ret = PyErr_WarnEx(npy_static_pydata.ComplexWarning, "Casting complex values to real discards " "the imaginary part", 1); if (ret < 0) { @@ -2713,6 +2321,7 @@ add_numeric_cast(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) if (dtypes[0]->singleton->kind == dtypes[1]->singleton->kind && from_itemsize == to_itemsize) { spec.casting = NPY_EQUIV_CASTING; + spec.casting |= NPY_SAME_VALUE_CASTING_FLAG; /* When there is no casting (equivalent C-types) use byteswap loops */ slots[0].slot = NPY_METH_resolve_descriptors; @@ -2727,13 +2336,17 @@ add_numeric_cast(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) } else if (_npy_can_cast_safely_table[from->type_num][to->type_num]) { spec.casting = NPY_SAFE_CASTING; - } - else if (dtype_kind_to_ordering(dtypes[0]->singleton->kind) <= - dtype_kind_to_ordering(dtypes[1]->singleton->kind)) { - spec.casting = NPY_SAME_KIND_CASTING; + spec.casting |= NPY_SAME_VALUE_CASTING_FLAG; } else { - spec.casting = NPY_UNSAFE_CASTING; + if (dtype_kind_to_ordering(dtypes[0]->singleton->kind) <= + dtype_kind_to_ordering(dtypes[1]->singleton->kind)) { + spec.casting = NPY_SAME_KIND_CASTING; + } + else { + spec.casting = NPY_UNSAFE_CASTING; + } + spec.casting |= NPY_SAME_VALUE_CASTING_FLAG; } /* Create a bound method, unbind and store it */ @@ -2842,6 +2455,11 @@ cast_to_string_resolve_descriptors( return -1; } if (dtypes[1]->type_num == NPY_UNICODE) { + if (size > NPY_MAX_INT / 4) { + PyErr_Format(PyExc_TypeError, + "string of length %zd is too large to store inside array.", size); + return -1; + } size *= 4; } @@ -2866,10 +2484,10 @@ cast_to_string_resolve_descriptors( return -1; } - if (self->casting == NPY_UNSAFE_CASTING) { + if ((self->casting == NPY_UNSAFE_CASTING) || ((self->casting & NPY_SAME_VALUE_CASTING_FLAG) > 0)){ assert(dtypes[0]->type_num == NPY_UNICODE && dtypes[1]->type_num == NPY_STRING); - return NPY_UNSAFE_CASTING; + return self->casting; } if (loop_descrs[1]->elsize >= size) { @@ -3152,7 +2770,7 @@ nonstructured_to_structured_resolve_descriptors( Py_ssize_t pos = 0; PyObject *key, *tuple; - while (PyDict_Next(to_descr->fields, &pos, &key, &tuple)) { + while (PyDict_Next(to_descr->fields, &pos, &key, &tuple)) { // noqa: borrowed-ref OK PyArray_Descr *field_descr = (PyArray_Descr *)PyTuple_GET_ITEM(tuple, 0); npy_intp field_view_off = NPY_MIN_INTP; NPY_CASTING field_casting = PyArray_GetCastInfo( @@ -3268,31 +2886,11 @@ nonstructured_to_structured_get_loop( return 0; } - static PyObject * PyArray_GetGenericToVoidCastingImpl(void) { - static PyArrayMethodObject *method = NULL; - - if (method != NULL) { - Py_INCREF(method); - return (PyObject *)method; - } - - method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); - if (method == NULL) { - return PyErr_NoMemory(); - } - - method->name = "any_to_void_cast"; - method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; - method->casting = -1; - method->resolve_descriptors = &nonstructured_to_structured_resolve_descriptors; - method->get_strided_loop = &nonstructured_to_structured_get_loop; - method->nin = 1; - method->nout = 1; - - return (PyObject *)method; + Py_INCREF(npy_static_pydata.GenericToVoidMethod); + return npy_static_pydata.GenericToVoidMethod; } @@ -3321,7 +2919,7 @@ structured_to_nonstructured_resolve_descriptors( return -1; } PyObject *key = PyTuple_GetItem(PyDataType_NAMES(given_descrs[0]), 0); - PyObject *base_tup = PyDict_GetItem(PyDataType_FIELDS(given_descrs[0]), key); + PyObject *base_tup = PyDict_GetItem(PyDataType_FIELDS(given_descrs[0]), key); // noqa: borrowed-ref OK base_descr = (PyArray_Descr *)PyTuple_GET_ITEM(base_tup, 0); struct_view_offset = PyLong_AsSsize_t(PyTuple_GET_ITEM(base_tup, 1)); if (error_converting(struct_view_offset)) { @@ -3429,27 +3027,8 @@ structured_to_nonstructured_get_loop( static PyObject * PyArray_GetVoidToGenericCastingImpl(void) { - static PyArrayMethodObject *method = NULL; - - if (method != NULL) { - Py_INCREF(method); - return (PyObject *)method; - } - - method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); - if (method == NULL) { - return PyErr_NoMemory(); - } - - method->name = "void_to_any_cast"; - method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; - method->casting = -1; - method->resolve_descriptors = &structured_to_nonstructured_resolve_descriptors; - method->get_strided_loop = &structured_to_nonstructured_get_loop; - method->nin = 1; - method->nout = 1; - - return (PyObject *)method; + Py_INCREF(npy_static_pydata.VoidToGenericMethod); + return npy_static_pydata.VoidToGenericMethod; } @@ -3475,7 +3054,7 @@ can_cast_fields_safety( for (Py_ssize_t i = 0; i < field_count; i++) { npy_intp field_view_off = NPY_MIN_INTP; PyObject *from_key = PyTuple_GET_ITEM(PyDataType_NAMES(from), i); - PyObject *from_tup = PyDict_GetItemWithError(PyDataType_FIELDS(from), from_key); + PyObject *from_tup = PyDict_GetItemWithError(PyDataType_FIELDS(from), from_key); // noqa: borrowed-ref OK if (from_tup == NULL) { return give_bad_field_error(from_key); } @@ -3483,7 +3062,7 @@ can_cast_fields_safety( /* Check whether the field names match */ PyObject *to_key = PyTuple_GET_ITEM(PyDataType_NAMES(to), i); - PyObject *to_tup = PyDict_GetItem(PyDataType_FIELDS(to), to_key); + PyObject *to_tup = PyDict_GetItem(PyDataType_FIELDS(to), to_key); // noqa: borrowed-ref OK if (to_tup == NULL) { return give_bad_field_error(from_key); } @@ -3813,31 +3392,11 @@ object_to_any_resolve_descriptors( static PyObject * PyArray_GetObjectToGenericCastingImpl(void) { - static PyArrayMethodObject *method = NULL; - - if (method != NULL) { - Py_INCREF(method); - return (PyObject *)method; - } - - method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); - if (method == NULL) { - return PyErr_NoMemory(); - } - - method->nin = 1; - method->nout = 1; - method->name = "object_to_any_cast"; - method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; - method->casting = NPY_UNSAFE_CASTING; - method->resolve_descriptors = &object_to_any_resolve_descriptors; - method->get_strided_loop = &object_to_any_get_loop; - - return (PyObject *)method; + Py_INCREF(npy_static_pydata.ObjectToGenericMethod); + return npy_static_pydata.ObjectToGenericMethod; } - /* Any object is simple (could even use the default) */ static NPY_CASTING any_to_object_resolve_descriptors( @@ -3870,27 +3429,8 @@ any_to_object_resolve_descriptors( static PyObject * PyArray_GetGenericToObjectCastingImpl(void) { - static PyArrayMethodObject *method = NULL; - - if (method != NULL) { - Py_INCREF(method); - return (PyObject *)method; - } - - method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); - if (method == NULL) { - return PyErr_NoMemory(); - } - - method->nin = 1; - method->nout = 1; - method->name = "any_to_object_cast"; - method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; - method->casting = NPY_SAFE_CASTING; - method->resolve_descriptors = &any_to_object_resolve_descriptors; - method->get_strided_loop = &any_to_object_get_loop; - - return (PyObject *)method; + Py_INCREF(npy_static_pydata.GenericToObjectMethod); + return npy_static_pydata.GenericToObjectMethod; } @@ -3942,6 +3482,75 @@ PyArray_InitializeObjectToObjectCast(void) return res; } +static int +initialize_void_and_object_globals(void) { + PyArrayMethodObject *method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); + if (method == NULL) { + PyErr_NoMemory(); + return -1; + } + + method->name = "void_to_any_cast"; + method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; + method->casting = -1; + method->resolve_descriptors = &structured_to_nonstructured_resolve_descriptors; + method->get_strided_loop = &structured_to_nonstructured_get_loop; + method->nin = 1; + method->nout = 1; + npy_static_pydata.VoidToGenericMethod = (PyObject *)method; + + method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); + if (method == NULL) { + PyErr_NoMemory(); + return -1; + } + + method->name = "any_to_void_cast"; + method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; + method->casting = -1; + method->resolve_descriptors = &nonstructured_to_structured_resolve_descriptors; + method->get_strided_loop = &nonstructured_to_structured_get_loop; + method->nin = 1; + method->nout = 1; + npy_static_pydata.GenericToVoidMethod = (PyObject *)method; + + method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); + if (method == NULL) { + PyErr_NoMemory(); + return -1; + } + + method->nin = 1; + method->nout = 1; + method->name = "object_to_any_cast"; + method->flags = (NPY_METH_SUPPORTS_UNALIGNED + | NPY_METH_REQUIRES_PYAPI + | NPY_METH_NO_FLOATINGPOINT_ERRORS); + method->casting = NPY_UNSAFE_CASTING; + method->resolve_descriptors = &object_to_any_resolve_descriptors; + method->get_strided_loop = &object_to_any_get_loop; + npy_static_pydata.ObjectToGenericMethod = (PyObject *)method; + + method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); + if (method == NULL) { + PyErr_NoMemory(); + return -1; + } + + method->nin = 1; + method->nout = 1; + method->name = "any_to_object_cast"; + method->flags = (NPY_METH_SUPPORTS_UNALIGNED + | NPY_METH_REQUIRES_PYAPI + | NPY_METH_NO_FLOATINGPOINT_ERRORS); + method->casting = NPY_SAFE_CASTING; + method->resolve_descriptors = &any_to_object_resolve_descriptors; + method->get_strided_loop = &any_to_object_get_loop; + npy_static_pydata.GenericToObjectMethod = (PyObject *)method; + + return 0; +} + NPY_NO_EXPORT int PyArray_InitializeCasts() @@ -3962,5 +3571,10 @@ PyArray_InitializeCasts() if (PyArray_InitializeDatetimeCasts() < 0) { return -1; } + + if (initialize_void_and_object_globals() < 0) { + return -1; + } + return 0; } diff --git a/numpy/_core/src/multiarray/convert_datatype.h b/numpy/_core/src/multiarray/convert_datatype.h index 02f25ad0b383..5dc6b4deacb6 100644 --- a/numpy/_core/src/multiarray/convert_datatype.h +++ b/numpy/_core/src/multiarray/convert_datatype.h @@ -9,23 +9,6 @@ extern "C" { extern NPY_NO_EXPORT npy_intp REQUIRED_STR_LEN[]; -#define NPY_USE_LEGACY_PROMOTION 0 -#define NPY_USE_WEAK_PROMOTION 1 -#define NPY_USE_WEAK_PROMOTION_AND_WARN 2 - -extern NPY_NO_EXPORT PyObject *NO_NEP50_WARNING_CTX; -extern NPY_NO_EXPORT PyObject *npy_DTypePromotionError; -extern NPY_NO_EXPORT PyObject *npy_UFuncNoLoopError; - -NPY_NO_EXPORT int -npy_give_promotion_warnings(void); - -NPY_NO_EXPORT PyObject * -npy__get_promotion_state(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(arg)); - -NPY_NO_EXPORT PyObject * -npy__set_promotion_state(PyObject *NPY_UNUSED(mod), PyObject *arg); - NPY_NO_EXPORT PyObject * PyArray_GetCastingImpl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to); @@ -57,11 +40,6 @@ PyArray_ValidType(int type); NPY_NO_EXPORT int dtype_kind_to_ordering(char kind); -/* Used by PyArray_CanCastArrayTo and in the legacy ufunc type resolution */ -NPY_NO_EXPORT npy_bool -can_cast_scalar_to(PyArray_Descr *scal_type, char *scal_data, - PyArray_Descr *to, NPY_CASTING casting); - NPY_NO_EXPORT npy_bool can_cast_pyscalar_scalar_to( int flags, PyArray_Descr *to, NPY_CASTING casting); @@ -137,12 +115,6 @@ simple_cast_resolve_descriptors( NPY_NO_EXPORT int PyArray_InitializeCasts(void); -NPY_NO_EXPORT int -get_npy_promotion_state(); - -NPY_NO_EXPORT void -set_npy_promotion_state(int new_promotion_state); - #ifdef __cplusplus } #endif diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 25319f2f6bf5..91a3db2d6e5f 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -16,7 +16,7 @@ #include "npy_pycompat.h" #include "npy_ctypes.h" -#include "multiarraymodule.h" +#include "npy_static_data.h" #include "common.h" #include "ctors.h" @@ -493,33 +493,49 @@ NPY_NO_EXPORT int PyArray_AssignFromCache_Recursive( PyArrayObject *self, const int ndim, coercion_cache_obj **cache) { + int ret = -1; /* Consume first cache element by extracting information and freeing it */ PyObject *obj = (*cache)->arr_or_sequence; Py_INCREF(obj); - npy_bool sequence = (*cache)->sequence; + npy_bool is_sequence = (*cache)->sequence; + /* + If it is a sequence, this object is the argument to PySequence_Fast, e.g. + the iterable that the user wants to coerce into an array + */ + PyObject *orig_seq = (*cache)->converted_obj; + /* Owned reference to an item in the sequence */ + PyObject *item_pyvalue = NULL; int depth = (*cache)->depth; *cache = npy_unlink_coercion_cache(*cache); - /* The element is either a sequence, or an array */ - if (!sequence) { + /* The element is either a sequence or an array */ + if (!is_sequence) { /* Straight forward array assignment */ assert(PyArray_Check(obj)); if (PyArray_CopyInto(self, (PyArrayObject *)obj) < 0) { - goto fail; + goto finish; } } else { assert(depth != ndim); - npy_intp length = PySequence_Length(obj); - if (length != PyArray_DIMS(self)[0]) { - PyErr_SetString(PyExc_RuntimeError, - "Inconsistent object during array creation? " - "Content of sequences changed (length inconsistent)."); - goto fail; - } - - for (npy_intp i = 0; i < length; i++) { - PyObject *value = PySequence_Fast_GET_ITEM(obj, i); + npy_intp orig_length = PyArray_DIMS(self)[0]; + int err = 1; + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(orig_seq); + for (npy_intp i = 0; i < orig_length; i++) { + // this macro takes *the argument* of PySequence_Fast, which is orig_seq; + // not the object returned by PySequence_Fast, which is a proxy object + // with its own per-object PyMutex lock. + // We want to lock the list object exposed to users, not the proxy. + npy_intp length = PySequence_Fast_GET_SIZE(obj); + if (length != orig_length) { + PyErr_SetString(PyExc_RuntimeError, + "Inconsistent object during array creation? " + "Content of sequences changed (length inconsistent)."); + goto finish_critical_section; + } + else { + Py_XSETREF(item_pyvalue, Py_NewRef(PySequence_Fast_GET_ITEM(obj, i))); + } if (ndim == depth + 1) { /* @@ -532,11 +548,11 @@ PyArray_AssignFromCache_Recursive( */ char *item; item = (PyArray_BYTES(self) + i * PyArray_STRIDES(self)[0]); - if (PyArray_Pack(PyArray_DESCR(self), item, value) < 0) { - goto fail; + if (PyArray_Pack(PyArray_DESCR(self), item, item_pyvalue) < 0) { + goto finish_critical_section; } /* If this was an array(-like) we still need to unlike int: */ - if (*cache != NULL && (*cache)->converted_obj == value) { + if (*cache != NULL && (*cache)->converted_obj == item_pyvalue) { *cache = npy_unlink_coercion_cache(*cache); } } @@ -544,22 +560,30 @@ PyArray_AssignFromCache_Recursive( PyArrayObject *view; view = (PyArrayObject *)array_item_asarray(self, i); if (view == NULL) { - goto fail; + goto finish_critical_section; } if (PyArray_AssignFromCache_Recursive(view, ndim, cache) < 0) { Py_DECREF(view); - goto fail; + goto finish_critical_section; } Py_DECREF(view); } } + err = 0; + finish_critical_section:; + + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); + if (err) { + goto finish; + } + } - Py_DECREF(obj); - return 0; + ret = 0; - fail: + finish:; + Py_XDECREF(item_pyvalue); Py_DECREF(obj); - return -1; + return ret; } @@ -611,15 +635,6 @@ PyArray_AssignFromCache(PyArrayObject *self, coercion_cache_obj *cache) { static void raise_memory_error(int nd, npy_intp const *dims, PyArray_Descr *descr) { - static PyObject *exc_type = NULL; - - npy_cache_import( - "numpy._core._exceptions", "_ArrayMemoryError", - &exc_type); - if (exc_type == NULL) { - goto fail; - } - PyObject *shape = PyArray_IntTupleFromIntp(nd, dims); if (shape == NULL) { goto fail; @@ -631,7 +646,7 @@ raise_memory_error(int nd, npy_intp const *dims, PyArray_Descr *descr) if (exc_value == NULL){ goto fail; } - PyErr_SetObject(exc_type, exc_value); + PyErr_SetObject(npy_static_pydata._ArrayMemoryError, exc_value); Py_DECREF(exc_value); return; @@ -831,6 +846,12 @@ PyArray_NewFromDescr_int( if (data == NULL) { + /* This closely follows PyArray_ZeroContiguousBuffer. We can't use + * that because here we need to allocate after checking if there is + * custom zeroing logic and that function accepts an already-allocated + * array + */ + /* float errors do not matter and we do not release GIL */ NPY_ARRAYMETHOD_FLAGS zero_flags; PyArrayMethod_GetTraverseLoop *get_fill_zero_loop = @@ -937,34 +958,13 @@ PyArray_NewFromDescr_int( */ if (subtype != &PyArray_Type) { PyObject *res, *func; - static PyObject *ndarray_array_finalize = NULL; - /* First time, cache ndarray's __array_finalize__ */ - if (ndarray_array_finalize == NULL) { - ndarray_array_finalize = PyObject_GetAttr( - (PyObject *)&PyArray_Type, npy_ma_str_array_finalize); - } - func = PyObject_GetAttr((PyObject *)subtype, npy_ma_str_array_finalize); + func = PyObject_GetAttr((PyObject *)subtype, npy_interned_str.array_finalize); if (func == NULL) { goto fail; } - else if (func == ndarray_array_finalize) { + else if (func == npy_static_pydata.ndarray_array_finalize) { Py_DECREF(func); } - else if (func == Py_None) { - Py_DECREF(func); - /* - * 2022-01-08, NumPy 1.23; when deprecation period is over, remove this - * whole stanza so one gets a "NoneType object is not callable" TypeError. - */ - if (DEPRECATE( - "Setting __array_finalize__ = None to indicate no finalization" - "should be done is deprecated. Instead, just inherit from " - "ndarray or, if that is not possible, explicitly set to " - "ndarray.__array_function__; this will raise a TypeError " - "in the future. (Deprecated since NumPy 1.23)") < 0) { - goto fail; - } - } else { if (PyCapsule_CheckExact(func)) { /* A C-function is stored here */ @@ -1016,15 +1016,16 @@ PyArray_NewFromDescr( int nd, npy_intp const *dims, npy_intp const *strides, void *data, int flags, PyObject *obj) { - if (subtype == NULL) { + if (descr == NULL) { PyErr_SetString(PyExc_ValueError, - "subtype is NULL in PyArray_NewFromDescr"); + "descr is NULL in PyArray_NewFromDescr"); return NULL; } - if (descr == NULL) { + if (subtype == NULL) { PyErr_SetString(PyExc_ValueError, - "descr is NULL in PyArray_NewFromDescr"); + "subtype is NULL in PyArray_NewFromDescr"); + Py_DECREF(descr); return NULL; } @@ -1328,12 +1329,12 @@ _array_from_buffer_3118(PyObject *memoryview) return NULL; } - if (PyErr_Warn( + if (PyErr_WarnEx( PyExc_RuntimeWarning, "A builtin ctypes object gave a PEP3118 format " "string that does not match its itemsize, so a " "best-guess will be made of the data type. " - "Newer versions of python may behave correctly.") < 0) { + "Newer versions of python may behave correctly.", 1) < 0) { Py_DECREF(descr); return NULL; } @@ -1425,7 +1426,7 @@ _array_from_buffer_3118(PyObject *memoryview) * * an object with an __array__ function. * * @param op The object to convert to an array - * @param requested_type a requested dtype instance, may be NULL; The result + * @param requested_dtype a requested dtype instance, may be NULL; The result * DType may be used, but is not enforced. * @param writeable whether the result must be writeable. * @param context Unused parameter, must be NULL (should be removed later). @@ -1532,6 +1533,16 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, return NULL; } + /* + * The internal implementation treats 0 as actually wanting a zero-dimensional + * array, but the API for this function has typically treated it as + * "anything is fine", so convert here. + * TODO: should we use another value as a placeholder instead? + */ + if (max_depth == 0 || max_depth > NPY_MAXDIMS) { + max_depth = NPY_MAXDIMS; + } + int was_scalar; PyObject* ret = PyArray_FromAny_int( op, dt_info.descr, dt_info.dtype, @@ -1563,7 +1574,7 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, * This is the main code to make a NumPy array from a Python * Object. It is called from many different places. */ - PyArrayObject *arr = NULL, *ret; + PyArrayObject *arr = NULL, *ret = NULL; PyArray_Descr *dtype = NULL; coercion_cache_obj *cache = NULL; int ndim = 0; @@ -1585,11 +1596,11 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, } ndim = PyArray_DiscoverDTypeAndShape( - op, NPY_MAXDIMS, dims, &cache, in_DType, in_descr, &dtype, + op, max_depth, dims, &cache, in_DType, in_descr, &dtype, copy, &was_copied_by__array__); if (ndim < 0) { - return NULL; + goto cleanup; } /* If the cache is NULL, then the object is considered a scalar */ @@ -1602,16 +1613,14 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, if (min_depth != 0 && ndim < min_depth) { PyErr_SetString(PyExc_ValueError, "object of too small depth for desired array"); - Py_DECREF(dtype); npy_free_coercion_cache(cache); - return NULL; + goto cleanup; } - if (max_depth != 0 && ndim > max_depth) { + if (ndim > max_depth) { PyErr_SetString(PyExc_ValueError, "object too deep for desired array"); - Py_DECREF(dtype); npy_free_coercion_cache(cache); - return NULL; + goto cleanup; } /* Got the correct parameters, but the cache may already hold the result */ @@ -1626,9 +1635,11 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, if (was_copied_by__array__ == 1) { flags = flags & ~NPY_ARRAY_ENSURECOPY; } - PyObject *res = PyArray_FromArray(arr, dtype, flags); + // PyArray_FromArray steals a reference to the dtype + Py_INCREF(dtype); + ret = (PyArrayObject *)PyArray_FromArray(arr, dtype, flags); npy_unlink_coercion_cache(cache); - return res; + goto cleanup; } else if (cache == NULL && PyArray_IsScalar(op, Void) && !(((PyVoidScalarObject *)op)->flags & NPY_ARRAY_OWNDATA) && @@ -1643,13 +1654,15 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, * provide a dtype (newtype is NULL). */ assert(ndim == 0); - - return PyArray_NewFromDescrAndBase( + // PyArray_NewFromDescrAndBase steals a reference to the dtype + Py_INCREF(dtype); + ret = (PyArrayObject *)PyArray_NewFromDescrAndBase( &PyArray_Type, dtype, 0, NULL, NULL, ((PyVoidScalarObject *)op)->obval, ((PyVoidScalarObject *)op)->flags, NULL, op); + goto cleanup; } /* * If we got this far, we definitely have to create a copy, since we are @@ -1657,9 +1670,8 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, */ if (flags & NPY_ARRAY_ENSURENOCOPY) { PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); - Py_DECREF(dtype); npy_free_coercion_cache(cache); - return NULL; + goto cleanup; } if (cache == NULL && in_descr != NULL && @@ -1686,16 +1698,18 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, * have a better solution at some point): * https://github.com/pandas-dev/pandas/issues/35481 */ - return PyArray_FromScalar(op, dtype); + // PyArray_FromScalar steals a reference to dtype + Py_INCREF(dtype); + ret = (PyArrayObject *)PyArray_FromScalar(op, dtype); + goto cleanup; } /* There was no array (or array-like) passed in directly. */ if (flags & NPY_ARRAY_WRITEBACKIFCOPY) { PyErr_SetString(PyExc_TypeError, "WRITEBACKIFCOPY used for non-array input."); - Py_DECREF(dtype); npy_free_coercion_cache(cache); - return NULL; + goto cleanup; } /* Create a new array and copy the data */ @@ -1705,8 +1719,7 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, flags&NPY_ARRAY_F_CONTIGUOUS, NULL); if (ret == NULL) { npy_free_coercion_cache(cache); - Py_DECREF(dtype); - return NULL; + goto cleanup; } if (ndim == PyArray_NDIM(ret)) { /* @@ -1723,12 +1736,10 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, assert(ndim == 0); if (PyArray_Pack(dtype, PyArray_BYTES(ret), op) < 0) { - Py_DECREF(dtype); - Py_DECREF(ret); - return NULL; + Py_CLEAR(ret); + goto cleanup; } - Py_DECREF(dtype); - return (PyObject *)ret; + goto cleanup; } assert(ndim != 0); assert(op == cache->converted_obj); @@ -1741,15 +1752,17 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, ((PyArrayObject_fields *)ret)->descr = dtype; } - int success = PyArray_AssignFromCache(ret, cache); + int succeed = PyArray_AssignFromCache(ret, cache); ((PyArrayObject_fields *)ret)->nd = out_ndim; ((PyArrayObject_fields *)ret)->descr = out_descr; - Py_DECREF(dtype); - if (success < 0) { - Py_DECREF(ret); - return NULL; + if (succeed < 0) { + Py_CLEAR(ret); } + +cleanup:; + + Py_XDECREF(dtype); return (PyObject *)ret; } @@ -1802,7 +1815,7 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, */ NPY_NO_EXPORT PyObject * PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, - int max_depth, int requires, PyObject *context) + int max_depth, int requirements, PyObject *context) { npy_dtype_info dt_info = {NULL, NULL}; @@ -1817,8 +1830,13 @@ PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, return NULL; } + /* See comment in PyArray_FromAny for rationale */ + if (max_depth == 0 || max_depth > NPY_MAXDIMS) { + max_depth = NPY_MAXDIMS; + } + PyObject* ret = PyArray_CheckFromAny_int( - op, dt_info.descr, dt_info.dtype, min_depth, max_depth, requires, + op, dt_info.descr, dt_info.dtype, min_depth, max_depth, requirements, context); Py_XDECREF(dt_info.descr); @@ -1830,40 +1848,38 @@ PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, * Internal version of PyArray_CheckFromAny that accepts a dtypemeta. Borrows * references to the descriptor and dtype. */ - NPY_NO_EXPORT PyObject * PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, PyArray_DTypeMeta *in_DType, int min_depth, - int max_depth, int requires, PyObject *context) + int max_depth, int requirements, PyObject *context) { PyObject *obj; - if (requires & NPY_ARRAY_NOTSWAPPED) { - if (!in_descr && PyArray_Check(op) && - PyArray_ISBYTESWAPPED((PyArrayObject* )op)) { - in_descr = PyArray_DescrNew(PyArray_DESCR((PyArrayObject *)op)); + Py_XINCREF(in_descr); /* take ownership as we may replace it */ + if (requirements & NPY_ARRAY_NOTSWAPPED) { + if (!in_descr && PyArray_Check(op)) { + in_descr = PyArray_DESCR((PyArrayObject *)op); + Py_INCREF(in_descr); + } + if (in_descr) { + PyArray_DESCR_REPLACE_CANONICAL(in_descr); if (in_descr == NULL) { return NULL; } } - else if (in_descr && !PyArray_ISNBO(in_descr->byteorder)) { - PyArray_DESCR_REPLACE(in_descr); - } - if (in_descr && in_descr->byteorder != NPY_IGNORE) { - in_descr->byteorder = NPY_NATIVE; - } } int was_scalar; obj = PyArray_FromAny_int(op, in_descr, in_DType, min_depth, - max_depth, requires, context, &was_scalar); + max_depth, requirements, context, &was_scalar); + Py_XDECREF(in_descr); if (obj == NULL) { return NULL; } - if ((requires & NPY_ARRAY_ELEMENTSTRIDES) + if ((requirements & NPY_ARRAY_ELEMENTSTRIDES) && !PyArray_ElementStrides(obj)) { PyObject *ret; - if (requires & NPY_ARRAY_ENSURENOCOPY) { + if (requirements & NPY_ARRAY_ENSURENOCOPY) { PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); return NULL; } @@ -2045,13 +2061,12 @@ PyArray_FromStructInterface(PyObject *input) PyObject *attr; char endian = NPY_NATBYTE; - attr = PyArray_LookupSpecial_OnInstance(input, npy_ma_str_array_struct); - if (attr == NULL) { - if (PyErr_Occurred()) { - return NULL; - } else { - return Py_NotImplemented; - } + if (PyArray_LookupSpecial_OnInstance( + input, npy_interned_str.array_struct, &attr) < 0) { + return NULL; + } + else if (attr == NULL) { + return Py_NotImplemented; } if (!PyCapsule_CheckExact(attr)) { if (PyType_Check(input) && PyObject_HasAttrString(attr, "__get__")) { @@ -2141,7 +2156,7 @@ _is_default_descr(PyObject *descr, PyObject *typestr) { if (!PyList_Check(descr) || PyList_GET_SIZE(descr) != 1) { return 0; } - PyObject *tuple = PyList_GET_ITEM(descr, 0); + PyObject *tuple = PyList_GET_ITEM(descr, 0); // noqa: borrowed-ref - manual fix needed if (!(PyTuple_Check(tuple) && PyTuple_GET_SIZE(tuple) == 2)) { return 0; } @@ -2165,16 +2180,16 @@ PyArray_FromInterface(PyObject *origin) PyArray_Descr *dtype = NULL; char *data = NULL; Py_buffer view; - int i, n; + Py_ssize_t i, n; npy_intp dims[NPY_MAXDIMS], strides[NPY_MAXDIMS]; int dataflags = NPY_ARRAY_BEHAVED; + int use_scalar_assign = 0; - iface = PyArray_LookupSpecial_OnInstance(origin, npy_ma_str_array_interface); - - if (iface == NULL) { - if (PyErr_Occurred()) { - return NULL; - } + if (PyArray_LookupSpecial_OnInstance( + origin, npy_interned_str.array_interface, &iface) < 0) { + return NULL; + } + else if (iface == NULL) { return Py_NotImplemented; } if (!PyDict_Check(iface)) { @@ -2243,8 +2258,8 @@ PyArray_FromInterface(PyObject *origin) Py_SETREF(dtype, new_dtype); } } + Py_DECREF(descr); } - Py_DECREF(descr); } Py_CLEAR(attr); @@ -2257,12 +2272,10 @@ PyArray_FromInterface(PyObject *origin) /* Shape must be specified when 'data' is specified */ int result = PyDict_ContainsString(iface, "data"); if (result < 0) { - Py_DECREF(attr); return NULL; } else if (result == 1) { Py_DECREF(iface); - Py_DECREF(attr); PyErr_SetString(PyExc_ValueError, "Missing __array_interface__ shape"); return NULL; @@ -2282,6 +2295,12 @@ PyArray_FromInterface(PyObject *origin) /* Get dimensions from shape tuple */ else { n = PyTuple_GET_SIZE(attr); + if (n > NPY_MAXDIMS) { + PyErr_Format(PyExc_ValueError, + "number of dimensions must be within [0, %d], got %d", + NPY_MAXDIMS, n); + goto fail; + } for (i = 0; i < n; i++) { PyObject *tmp = PyTuple_GET_ITEM(attr, i); dims[i] = PyArray_PyIntAsIntp(tmp); @@ -2299,7 +2318,10 @@ PyArray_FromInterface(PyObject *origin) } /* Case for data access through pointer */ - if (attr && PyTuple_Check(attr)) { + if (attr == NULL) { + use_scalar_assign = 1; + } + else if (PyTuple_Check(attr)) { PyObject *dataptr; if (PyTuple_GET_SIZE(attr) != 2) { PyErr_SetString(PyExc_TypeError, @@ -2331,7 +2353,7 @@ PyArray_FromInterface(PyObject *origin) } /* Case for data access through buffer */ - else if (attr) { + else { if (attr != Py_None) { base = attr; } @@ -2349,7 +2371,7 @@ PyArray_FromInterface(PyObject *origin) } data = (char *)view.buf; /* - * In Python 3 both of the deprecated functions PyObject_AsWriteBuffer and + * Both of the deprecated functions PyObject_AsWriteBuffer and * PyObject_AsReadBuffer that this code replaces release the buffer. It is * up to the object that supplies the buffer to guarantee that the buffer * sticks around after the release. @@ -2388,18 +2410,32 @@ PyArray_FromInterface(PyObject *origin) if (ret == NULL) { goto fail; } - if (data == NULL) { + if (use_scalar_assign) { + /* + * NOTE(seberg): I honestly doubt anyone is using this scalar path and we + * could probably just deprecate (or just remove it in a 3.0 version). + */ if (PyArray_SIZE(ret) > 1) { PyErr_SetString(PyExc_ValueError, "cannot coerce scalar to array with size > 1"); Py_DECREF(ret); goto fail; } - if (PyArray_SETITEM(ret, PyArray_DATA(ret), origin) < 0) { + if (PyArray_Pack(PyArray_DESCR(ret), PyArray_DATA(ret), origin) < 0) { Py_DECREF(ret); goto fail; } } + else if (data == NULL && PyArray_NBYTES(ret) != 0) { + /* Caller should ensure this, but <2.4 used the above scalar coerction path */ + PyErr_SetString(PyExc_ValueError, + "data is NULL but array contains data, in older versions of NumPy " + "this may have used the scalar path. To get the scalar path " + "you must leave the data field undefined."); + Py_DECREF(ret); + goto fail; + } + result = PyDict_GetItemStringRef(iface, "strides", &attr); if (result == -1){ return NULL; @@ -2472,7 +2508,7 @@ check_or_clear_and_warn_error_if_due_to_copy_kwarg(PyObject *kwnames) goto restore_error; } int copy_kwarg_unsupported = PyUnicode_Contains( - str_value, npy_ma_str_array_err_msg_substr); + str_value, npy_interned_str.array_err_msg_substr); Py_DECREF(str_value); if (copy_kwarg_unsupported == -1) { goto restore_error; @@ -2487,7 +2523,10 @@ check_or_clear_and_warn_error_if_due_to_copy_kwarg(PyObject *kwnames) Py_XDECREF(traceback); if (DEPRECATE("__array__ implementation doesn't accept a copy keyword, " "so passing copy=False failed. __array__ must implement " - "'dtype' and 'copy' keyword arguments.") < 0) { + "'dtype' and 'copy' keyword arguments. " + "To learn more, see the migration guide " + "https://numpy.org/devdocs/numpy_2_0_migration_guide.html" + "#adapting-to-changes-in-the-copy-keyword") < 0) { return -1; } return 0; @@ -2524,11 +2563,11 @@ PyArray_FromArrayAttr_int(PyObject *op, PyArray_Descr *descr, int copy, PyObject *new; PyObject *array_meth; - array_meth = PyArray_LookupSpecial_OnInstance(op, npy_ma_str_array); - if (array_meth == NULL) { - if (PyErr_Occurred()) { - return NULL; - } + if (PyArray_LookupSpecial_OnInstance( + op, npy_interned_str.array, &array_meth) < 0) { + return NULL; + } + else if (array_meth == NULL) { return Py_NotImplemented; } @@ -2543,15 +2582,6 @@ PyArray_FromArrayAttr_int(PyObject *op, PyArray_Descr *descr, int copy, return Py_NotImplemented; } - static PyObject *kwnames_is_copy = NULL; - if (kwnames_is_copy == NULL) { - kwnames_is_copy = Py_BuildValue("(s)", "copy"); - if (kwnames_is_copy == NULL) { - Py_DECREF(array_meth); - return NULL; - } - } - Py_ssize_t nargs = 0; PyObject *arguments[2]; PyObject *kwnames = NULL; @@ -2567,7 +2597,7 @@ PyArray_FromArrayAttr_int(PyObject *op, PyArray_Descr *descr, int copy, * signature of the __array__ method being called does not have `copy`. */ if (copy != -1) { - kwnames = kwnames_is_copy; + kwnames = npy_static_pydata.kwnames_is_copy; arguments[nargs] = copy == 1 ? Py_True : Py_False; } @@ -2713,7 +2743,6 @@ PyArray_CopyAsFlat(PyArrayObject *dst, PyArrayObject *src, NPY_ORDER order) npy_intp dst_count, src_count, count; npy_intp dst_size, src_size; - int needs_api; NPY_BEGIN_THREADS_DEF; @@ -2774,13 +2803,13 @@ PyArray_CopyAsFlat(PyArrayObject *dst, PyArrayObject *src, NPY_ORDER order) /* Get all the values needed for the inner loop */ dst_iternext = NpyIter_GetIterNext(dst_iter, NULL); dst_dataptr = NpyIter_GetDataPtrArray(dst_iter); - /* Since buffering is disabled, we can cache the stride */ + /* The inner stride is also the fixed stride for the whole iteration. */ dst_stride = NpyIter_GetInnerStrideArray(dst_iter)[0]; dst_countptr = NpyIter_GetInnerLoopSizePtr(dst_iter); src_iternext = NpyIter_GetIterNext(src_iter, NULL); src_dataptr = NpyIter_GetDataPtrArray(src_iter); - /* Since buffering is disabled, we can cache the stride */ + /* The inner stride is also the fixed stride for the whole iteration. */ src_stride = NpyIter_GetInnerStrideArray(src_iter)[0]; src_countptr = NpyIter_GetInnerLoopSizePtr(src_iter); @@ -2790,15 +2819,6 @@ PyArray_CopyAsFlat(PyArrayObject *dst, PyArrayObject *src, NPY_ORDER order) return -1; } - needs_api = NpyIter_IterationNeedsAPI(dst_iter) || - NpyIter_IterationNeedsAPI(src_iter); - - /* - * Because buffering is disabled in the iterator, the inner loop - * strides will be the same throughout the iteration loop. Thus, - * we can pass them to this function to take advantage of - * contiguous strides, etc. - */ NPY_cast_info cast_info; NPY_ARRAYMETHOD_FLAGS flags; if (PyArray_GetDTypeTransferFunction( @@ -2812,7 +2832,8 @@ PyArray_CopyAsFlat(PyArrayObject *dst, PyArrayObject *src, NPY_ORDER order) NpyIter_Deallocate(src_iter); return -1; } - needs_api |= (flags & NPY_METH_REQUIRES_PYAPI) != 0; + /* No need to worry about API use in unbuffered iterator */ + int needs_api = (flags & NPY_METH_REQUIRES_PYAPI) != 0; if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { npy_clear_floatstatus_barrier((char *)src_iter); } @@ -2831,7 +2852,6 @@ PyArray_CopyAsFlat(PyArrayObject *dst, PyArrayObject *src, NPY_ORDER order) count = (src_count < dst_count) ? src_count : dst_count; if (cast_info.func(&cast_info.context, args, &count, strides, cast_info.auxdata) < 0) { - res = -1; break; } @@ -3653,12 +3673,9 @@ array_from_text(PyArray_Descr *dtype, npy_intp num, char const *sep, size_t *nre Py_DECREF(r); return NULL; } - /* 2019-09-12, NumPy 1.18 */ - if (DEPRECATE( - "string or file could not be read to its end due to unmatched " - "data; this will raise a ValueError in the future.") < 0) { - goto fail; - } + PyErr_SetString(PyExc_ValueError, + "string or file could not be read to its end due to unmatched data"); + goto fail; } fail: @@ -3736,21 +3753,20 @@ PyArray_FromFile(FILE *fp, PyArray_Descr *dtype, npy_intp num, char *sep) } if (((npy_intp) nread) < num) { /* - * Realloc memory for smaller number of elements, use original dtype - * which may have include a subarray (and is used for `nread`). + * Resize array to smaller number of elements. Note that original + * dtype may have included a subarray, so we may not be 1-d. */ - const size_t nsize = PyArray_MAX(nread,1) * dtype->elsize; - char *tmp; - - /* The handler is always valid */ - if((tmp = PyDataMem_UserRENEW(PyArray_DATA(ret), nsize, - PyArray_HANDLER(ret))) == NULL) { + npy_intp dims[NPY_MAXDIMS]; + dims[0] = (npy_intp)nread; + for (int i = 1; i < PyArray_NDIM(ret); i++) { + dims[i] = PyArray_DIMS(ret)[i]; + } + PyArray_Dims new_dims = {dims, PyArray_NDIM(ret)}; + if (PyArray_Resize_int(ret, &new_dims, 0) < 0) { Py_DECREF(dtype); Py_DECREF(ret); - return PyErr_NoMemory(); + return NULL; } - ((PyArrayObject_fields *)ret)->data = tmp; - PyArray_DIMS(ret)[0] = nread; } Py_DECREF(dtype); return (PyObject *)ret; @@ -4002,6 +4018,7 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) PyObject *iter = NULL; PyArrayObject *ret = NULL; npy_intp i, elsize, elcount; + npy_intp dims[NPY_MAXDIMS]; if (dtype == NULL) { return NULL; @@ -4041,6 +4058,9 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) if (ret == NULL) { goto done; } + /* set up for possible resizing */ + memcpy(dims, PyArray_DIMS(ret), PyArray_NDIM(ret)*sizeof(npy_intp)); + PyArray_Dims new_dims = {dims, PyArray_NDIM(ret)}; char *item = PyArray_BYTES(ret); for (i = 0; i < count || count == -1; i++, item += elsize) { @@ -4048,14 +4068,12 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) if (value == NULL) { if (PyErr_Occurred()) { /* Fetching next item failed perhaps due to exhausting iterator */ - goto done; + goto fail; } break; } if (NPY_UNLIKELY(i >= elcount) && elsize != 0) { - char *new_data = NULL; - npy_intp nbytes; /* Grow PyArray_DATA(ret): this is similar for the strategy for PyListObject, but we use @@ -4064,31 +4082,18 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) be suitable to reuse here. */ elcount = (i >> 1) + (i < 4 ? 4 : 2) + i; - if (!npy_mul_sizes_with_overflow(&nbytes, elcount, elsize)) { - /* The handler is always valid */ - new_data = PyDataMem_UserRENEW( - PyArray_BYTES(ret), nbytes, PyArray_HANDLER(ret)); - } - if (new_data == NULL) { - PyErr_SetString(PyExc_MemoryError, - "cannot allocate array memory"); + dims[0] = elcount; + if (PyArray_Resize_int(ret, &new_dims, 0) < 0) { Py_DECREF(value); - goto done; + goto fail; } - ((PyArrayObject_fields *)ret)->data = new_data; - /* resize array for cleanup: */ - PyArray_DIMS(ret)[0] = elcount; /* Reset `item` pointer to point into realloc'd chunk */ - item = new_data + i * elsize; - if (PyDataType_FLAGCHK(dtype, NPY_NEEDS_INIT)) { - /* Initialize new chunk: */ - memset(item, 0, nbytes - i * elsize); - } + item = ((char *)PyArray_DATA(ret)) + i * elsize; } if (PyArray_Pack(dtype, item, value) < 0) { Py_DECREF(value); - goto done; + goto fail; } Py_DECREF(value); } @@ -4097,46 +4102,22 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) PyErr_Format(PyExc_ValueError, "iterator too short: Expected %zd but iterator had only %zd " "items.", (Py_ssize_t)count, (Py_ssize_t)i); - goto done; + goto fail; } /* * Realloc the data so that don't keep extra memory tied up and fix * the arrays first dimension (there could be more than one). */ - if (i == 0 || elsize == 0) { - /* The size cannot be zero for realloc. */ + dims[0] = i; + if (!PyArray_Resize_int(ret, &new_dims, 0)) { + goto done; } - else { - /* Resize array to actual final size (it may be too large) */ - /* The handler is always valid */ - char *new_data = PyDataMem_UserRENEW( - PyArray_DATA(ret), i * elsize, PyArray_HANDLER(ret)); - - if (new_data == NULL) { - PyErr_SetString(PyExc_MemoryError, - "cannot allocate array memory"); - goto done; - } - ((PyArrayObject_fields *)ret)->data = new_data; - if (count < 0) { - /* - * If the count was smaller than zero, the strides may be all 0 - * (even in the later dimensions for `count < 0`! - * Thus, fix all strides here again for C-contiguity. - */ - int oflags; - _array_fill_strides( - PyArray_STRIDES(ret), PyArray_DIMS(ret), PyArray_NDIM(ret), - PyArray_ITEMSIZE(ret), NPY_ARRAY_C_CONTIGUOUS, &oflags); - PyArray_STRIDES(ret)[0] = elsize; - assert(oflags & NPY_ARRAY_C_CONTIGUOUS); - } - } - PyArray_DIMS(ret)[0] = i; + fail: + Py_CLEAR(ret); - done: + done: Py_XDECREF(iter); Py_XDECREF(dtype); if (PyErr_Occurred()) { diff --git a/numpy/_core/src/multiarray/ctors.h b/numpy/_core/src/multiarray/ctors.h index 094589968b66..b7a60e0065e0 100644 --- a/numpy/_core/src/multiarray/ctors.h +++ b/numpy/_core/src/multiarray/ctors.h @@ -68,11 +68,11 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, NPY_NO_EXPORT PyObject * PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, PyArray_DTypeMeta *in_DType, int min_depth, - int max_depth, int requires, PyObject *context); + int max_depth, int requirements, PyObject *context); NPY_NO_EXPORT PyObject * PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, - int max_depth, int requires, PyObject *context); + int max_depth, int requirements, PyObject *context); NPY_NO_EXPORT PyObject * PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags); diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index 474c048db6cf..9489e2b92c6a 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -1233,6 +1233,10 @@ can_cast_datetime64_units(NPY_DATETIMEUNIT src_unit, NPY_DATETIMEUNIT dst_unit, NPY_CASTING casting) { + if ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { + /* TODO: support this */ + return 0; + } switch (casting) { /* Allow anything with unsafe casting */ case NPY_UNSAFE_CASTING: @@ -1278,6 +1282,10 @@ can_cast_timedelta64_units(NPY_DATETIMEUNIT src_unit, NPY_DATETIMEUNIT dst_unit, NPY_CASTING casting) { + if ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { + /* Use SAFE_CASTING, which implies SAME_VALUE */ + casting = NPY_SAFE_CASTING; + } switch (casting) { /* Allow anything with unsafe casting */ case NPY_UNSAFE_CASTING: @@ -1325,6 +1333,10 @@ can_cast_datetime64_metadata(PyArray_DatetimeMetaData *src_meta, PyArray_DatetimeMetaData *dst_meta, NPY_CASTING casting) { + if ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { + /* Force SAFE_CASTING */ + casting = NPY_SAFE_CASTING; + } switch (casting) { case NPY_UNSAFE_CASTING: return 1; @@ -1352,6 +1364,10 @@ can_cast_timedelta64_metadata(PyArray_DatetimeMetaData *src_meta, PyArray_DatetimeMetaData *dst_meta, NPY_CASTING casting) { + if ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { + /* Use SAFE_CASTING, which implies SAME_VALUE */ + casting = NPY_SAFE_CASTING; + } switch (casting) { case NPY_UNSAFE_CASTING: return 1; @@ -1795,12 +1811,9 @@ convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple, /* (unit, num, event) */ if (tuple_size == 3) { - /* Numpy 1.14, 2017-08-11 */ - if (DEPRECATE( - "When passing a 3-tuple as (unit, num, event), the event " - "is ignored (since 1.7) - use (unit, num) instead") < 0) { - return -1; - } + PyErr_SetString(PyExc_ValueError, + "Use (unit, num) with no event"); + return -1; } /* (unit, num, den, event) */ else if (tuple_size == 4) { @@ -1830,13 +1843,11 @@ convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple, } } else if (event != Py_None) { - /* Numpy 1.14, 2017-08-11 */ - if (DEPRECATE( + PyErr_SetString(PyExc_ValueError, "When passing a 4-tuple as (unit, num, den, event), the " - "event argument is ignored (since 1.7), so should be None" - ) < 0) { - return -1; - } + "event argument must be None" + ); + return -1; } den = PyLong_AsLong(PyTuple_GET_ITEM(tuple, 2)); if (error_converting(den)) { @@ -2250,8 +2261,8 @@ NpyDatetime_ConvertPyDateTimeToDatetimeStruct( } /* - * Gets a tzoffset in minutes by calling the fromutc() function on - * the Python datetime.tzinfo object. + * Gets a tzoffset in minutes by calling the astimezone() function on + * the Python datetime.datetime object. */ NPY_NO_EXPORT int get_tzoffset_from_pytzinfo(PyObject *timezone_obj, npy_datetimestruct *dts) @@ -2260,14 +2271,14 @@ get_tzoffset_from_pytzinfo(PyObject *timezone_obj, npy_datetimestruct *dts) npy_datetimestruct loc_dts; /* Create a Python datetime to give to the timezone object */ - dt = PyDateTime_FromDateAndTime((int)dts->year, dts->month, dts->day, - dts->hour, dts->min, 0, 0); + dt = PyDateTimeAPI->DateTime_FromDateAndTime((int)dts->year, dts->month, dts->day, + dts->hour, dts->min, 0, 0, PyDateTime_TimeZone_UTC, PyDateTimeAPI->DateTimeType); if (dt == NULL) { return -1; } /* Convert the datetime from UTC to local time */ - loc_dt = PyObject_CallMethod(timezone_obj, "fromutc", "O", dt); + loc_dt = PyObject_CallMethod(dt, "astimezone", "O", timezone_obj); Py_DECREF(dt); if (loc_dt == NULL) { return -1; @@ -2766,10 +2777,10 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, /* * Converts a datetime into a PyObject *. * - * Not-a-time is returned as the string "NaT". - * For days or coarser, returns a datetime.date. - * For microseconds or coarser, returns a datetime.datetime. - * For units finer than microseconds, returns an integer. + * NaT (Not-a-time) is returned as None. + * For D/W/Y/M (days or coarser), returns a datetime.date. + * For Îŧs/ms/s/m/h/D/W (microseconds or coarser), returns a datetime.datetime. + * For ns/ps/fs/as (units shorter than microseconds), returns an integer. */ NPY_NO_EXPORT PyObject * convert_datetime_to_pyobject(npy_datetime dt, PyArray_DatetimeMetaData *meta) @@ -2818,85 +2829,232 @@ convert_datetime_to_pyobject(npy_datetime dt, PyArray_DatetimeMetaData *meta) } /* - * Converts a timedelta into a PyObject *. + * We require that if d is a PyDateTime, then + * hash(numpy.datetime64(d)) == hash(d). + * Where possible, convert dt to a PyDateTime and hash it. + * + * NOTE: "equals" across PyDate, PyDateTime and np.datetime64 is not transitive: + * datetime.datetime(1970, 1, 1) == np.datetime64(0, 'us') + * np.datetime64(0, 'us') == np.datetime64(0, 'D') + * datetime.datetime(1970, 1, 1) != np.datetime64(0, 'D') # date, not datetime! + * + * But: + * datetime.date(1970, 1, 1) == np.datetime64(0, 'D') * - * Not-a-time is returned as the string "NaT". - * For microseconds or coarser, returns a datetime.timedelta. - * For units finer than microseconds, returns an integer. + * For hash(datetime64(0, 'D')) we could return either PyDate.hash or PyDateTime.hash. + * We choose PyDateTime.hash to match datetime64(0, 'us') */ -NPY_NO_EXPORT PyObject * -convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta) +NPY_NO_EXPORT npy_hash_t +datetime_hash(PyArray_DatetimeMetaData *meta, npy_datetime dt) { - npy_timedelta value; - int days = 0, seconds = 0, useconds = 0; + PyObject *obj; + npy_hash_t res; + npy_datetimestruct dts; - /* - * Convert NaT (not-a-time) into None. - */ - if (td == NPY_DATETIME_NAT) { - Py_RETURN_NONE; + if (dt == NPY_DATETIME_NAT) { + return -1; /* should have been handled by caller */ } - /* - * If the type's precision is greater than microseconds, is - * Y/M/B (nonlinear units), or is generic units, return an int - */ - if (meta->base > NPY_FR_us || - meta->base == NPY_FR_Y || - meta->base == NPY_FR_M || - meta->base == NPY_FR_GENERIC) { - return PyLong_FromLongLong(td); + if (meta->base == NPY_FR_GENERIC) { + obj = PyLong_FromLongLong(dt); + } else { + if (NpyDatetime_ConvertDatetime64ToDatetimeStruct(meta, dt, &dts) < 0) { + return -1; + } + + if (dts.year < 1 || dts.year > 9999 + || dts.ps != 0 || dts.as != 0) { + /* NpyDatetime_ConvertDatetime64ToDatetimeStruct does memset, + * so this is safe from loose struct packing. */ + obj = PyBytes_FromStringAndSize((const char *)&dts, sizeof(dts)); + } else { + obj = PyDateTime_FromDateAndTime(dts.year, dts.month, dts.day, + dts.hour, dts.min, dts.sec, dts.us); + } + } + + if (obj == NULL) { + return -1; } - value = td; + res = PyObject_Hash(obj); + + Py_DECREF(obj); + + return res; +} + +static int +convert_timedelta_to_timedeltastruct(PyArray_DatetimeMetaData *meta, + npy_timedelta td, + npy_timedeltastruct *out) +{ + memset(out, 0, sizeof(npy_timedeltastruct)); /* Apply the unit multiplier (TODO: overflow treatment...) */ - value *= meta->num; + td *= meta->num; /* Convert to days/seconds/useconds */ switch (meta->base) { case NPY_FR_W: - days = value * 7; + out->day = td * 7; break; case NPY_FR_D: - days = value; + out->day = td; break; case NPY_FR_h: - days = extract_unit_64(&value, 24ULL); - seconds = value*60*60; + out->day = extract_unit_64(&td, 24LL); + out->sec = (npy_int32)(td * 60*60); break; case NPY_FR_m: - days = extract_unit_64(&value, 60ULL*24); - seconds = value*60; + out->day = extract_unit_64(&td, 60LL*24); + out->sec = (npy_int32)(td * 60); break; case NPY_FR_s: - days = extract_unit_64(&value, 60ULL*60*24); - seconds = value; + out->day = extract_unit_64(&td, 60LL*60*24); + out->sec = (npy_int32)td; break; case NPY_FR_ms: - days = extract_unit_64(&value, 1000ULL*60*60*24); - seconds = extract_unit_64(&value, 1000ULL); - useconds = value*1000; + out->day = extract_unit_64(&td, 1000LL*60*60*24); + out->sec = (npy_int32)extract_unit_64(&td, 1000LL); + out->us = (npy_int32)(td * 1000LL); break; case NPY_FR_us: - days = extract_unit_64(&value, 1000ULL*1000*60*60*24); - seconds = extract_unit_64(&value, 1000ULL*1000); - useconds = value; + out->day = extract_unit_64(&td, 1000LL*1000*60*60*24); + out->sec = (npy_int32)extract_unit_64(&td, 1000LL*1000); + out->us = (npy_int32)td; break; - default: - // unreachable, handled by the `if` above - assert(NPY_FALSE); + case NPY_FR_ns: + out->day = extract_unit_64(&td, 1000LL*1000*1000*60*60*24); + out->sec = (npy_int32)extract_unit_64(&td, 1000LL*1000*1000); + out->us = (npy_int32)extract_unit_64(&td, 1000LL); + out->ps = (npy_int32)(td * 1000LL); + break; + case NPY_FR_ps: + out->day = extract_unit_64(&td, 1000LL*1000*1000*1000*60*60*24); + out->sec = (npy_int32)extract_unit_64(&td, 1000LL*1000*1000*1000); + out->us = (npy_int32)extract_unit_64(&td, 1000LL*1000); + out->ps = (npy_int32)td; + break; + case NPY_FR_fs: + out->sec = (npy_int32)extract_unit_64(&td, 1000LL*1000*1000*1000*1000); + out->us = (npy_int32)extract_unit_64(&td, 1000LL*1000*1000); + out->ps = (npy_int32)extract_unit_64(&td, 1000LL); + out->as = (npy_int32)(td * 1000LL); break; + case NPY_FR_as: + out->sec = (npy_int32)extract_unit_64(&td, 1000LL*1000*1000*1000*1000*1000); + out->us = (npy_int32)extract_unit_64(&td, 1000LL*1000*1000*1000); + out->ps = (npy_int32)extract_unit_64(&td, 1000LL*1000); + out->as = (npy_int32)td; + break; + default: + PyErr_SetString(PyExc_RuntimeError, + "NumPy timedelta metadata is corrupted with invalid " + "base unit"); + return -1; + } + + return 0; +} + +/* + * Converts a timedelta into a PyObject *. + * + * NaT (Not-a-time) is returned as None. + * For Îŧs/ms/s/m/h/D/W (microseconds or coarser), returns a datetime.timedelta. + * For Y/M (non-linear units), generic units and ns/ps/fs/as (units shorter than microseconds), returns an integer. + */ +NPY_NO_EXPORT PyObject * +convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta) +{ + npy_timedeltastruct tds; + + /* + * Convert NaT (not-a-time) into None. + */ + if (td == NPY_DATETIME_NAT) { + Py_RETURN_NONE; } + + /* + * If the type's precision is greater than microseconds, is + * Y/M (nonlinear units), or is generic units, return an int + */ + if (meta->base > NPY_FR_us || + meta->base == NPY_FR_Y || + meta->base == NPY_FR_M || + meta->base == NPY_FR_GENERIC) { + return PyLong_FromLongLong(td); + } + + if (convert_timedelta_to_timedeltastruct(meta, td, &tds) < 0) { + return NULL; + } + /* * If it would overflow the datetime.timedelta days, return a raw int */ - if (days < -999999999 || days > 999999999) { + if (tds.day < -999999999 || tds.day > 999999999) { return PyLong_FromLongLong(td); } else { - return PyDelta_FromDSU(days, seconds, useconds); + return PyDelta_FromDSU(tds.day, tds.sec, tds.us); + } +} + +/* + * We require that if d is a PyDelta, then + * hash(numpy.timedelta64(d)) == hash(d). + * Where possible, convert dt to a PyDelta and hash it. + */ +NPY_NO_EXPORT npy_hash_t +timedelta_hash(PyArray_DatetimeMetaData *meta, npy_timedelta td) +{ + PyObject *obj; + npy_hash_t res; + npy_timedeltastruct tds; + + if (td == NPY_DATETIME_NAT) { + return -1; /* should have been handled by caller */ + } + + if (meta->base == NPY_FR_GENERIC) { + /* generic compares equal to *every* other base, so no single hash works. */ + PyErr_SetString(PyExc_ValueError, "Can't hash generic timedelta64"); + return -1; } + + /* Y and M can be converted to each other but not to other units */ + + if (meta->base == NPY_FR_Y) { + obj = PyLong_FromLongLong(td * 12); + } else if (meta->base == NPY_FR_M) { + obj = PyLong_FromLongLong(td); + } else { + if (convert_timedelta_to_timedeltastruct(meta, td, &tds) < 0) { + return -1; + } + + if (tds.day < -999999999 || tds.day > 999999999 + || tds.ps != 0 || tds.as != 0) { + /* convert_timedelta_to_timedeltastruct does memset, + * so this is safe from loose struct packing. */ + obj = PyBytes_FromStringAndSize((const char *)&tds, sizeof(tds)); + } else { + obj = PyDelta_FromDSU(tds.day, tds.sec, tds.us); + } + } + + if (obj == NULL) { + return -1; + } + + res = PyObject_Hash(obj); + + Py_DECREF(obj); + + return res; } /* @@ -2976,15 +3134,18 @@ cast_datetime_to_datetime(PyArray_DatetimeMetaData *src_meta, */ NPY_NO_EXPORT int cast_timedelta_to_timedelta(PyArray_DatetimeMetaData *src_meta, - PyArray_DatetimeMetaData *dst_meta, - npy_timedelta src_dt, - npy_timedelta *dst_dt) + PyArray_DatetimeMetaData *dst_meta, + npy_timedelta src_dt, + npy_timedelta *dst_dt) { npy_int64 num = 0, denom = 0; - /* If the metadata is the same, short-circuit the conversion */ - if (src_meta->base == dst_meta->base && - src_meta->num == dst_meta->num) { + /* + * If the metadata is the same or if src_dt is NAT, short-circuit + * the conversion. + */ + if ((src_meta->base == dst_meta->base && src_meta->num == dst_meta->num) + || src_dt == NPY_DATETIME_NAT) { *dst_dt = src_dt; return 0; } diff --git a/numpy/_core/src/multiarray/datetime_busdaycal.c b/numpy/_core/src/multiarray/datetime_busdaycal.c index 3a7e3a383dca..4c6986544f6c 100644 --- a/numpy/_core/src/multiarray/datetime_busdaycal.c +++ b/numpy/_core/src/multiarray/datetime_busdaycal.c @@ -159,15 +159,15 @@ PyArray_WeekMaskConverter(PyObject *weekmask_in, npy_bool *weekmask) int i; for (i = 0; i < 7; ++i) { - long val; + int val; PyObject *f = PySequence_GetItem(obj, i); if (f == NULL) { Py_DECREF(obj); return 0; } - val = PyLong_AsLong(f); - if (error_converting(val)) { + val = PyObject_IsTrue(f); + if (val == -1) { Py_DECREF(f); Py_DECREF(obj); return 0; diff --git a/numpy/_core/src/multiarray/datetime_strings.c b/numpy/_core/src/multiarray/datetime_strings.c index f92eec3f5a59..97f24cfe821e 100644 --- a/numpy/_core/src/multiarray/datetime_strings.c +++ b/numpy/_core/src/multiarray/datetime_strings.c @@ -984,7 +984,7 @@ NpyDatetime_MakeISO8601Datetime( * the string representation, so ensure that the data * is being cast according to the casting rule. */ - if (casting != NPY_UNSAFE_CASTING) { + if ((casting != NPY_UNSAFE_CASTING) && ((casting & NPY_SAME_VALUE_CASTING_FLAG) == 0)) { /* Producing a date as a local time is always 'unsafe' */ if (base <= NPY_FR_D && local) { PyErr_SetString(PyExc_TypeError, "Cannot create a local " diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index cb031cc43c58..0bbc6358f75b 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -6,6 +6,8 @@ #include #include +#include + #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" #include "numpy/npy_math.h" @@ -13,6 +15,7 @@ #include "npy_config.h" #include "npy_ctypes.h" #include "npy_import.h" +#include "npy_pycompat.h" // PyObject_GetOptionalAttr #include "_datetime.h" @@ -20,12 +23,14 @@ #include "conversion_utils.h" /* for PyArray_TypestrConvert */ #include "templ_common.h" /* for npy_mul_sizes_with_overflow */ #include "descriptor.h" -#include "multiarraymodule.h" +#include "npy_static_data.h" +#include "multiarraymodule.h" // for thread unsafe state access #include "alloc.h" #include "assert.h" #include "npy_buffer.h" #include "dtypemeta.h" #include "stringdtype/dtype.h" +#include "array_coercion.h" #ifndef PyDictProxy_Check #define PyDictProxy_Check(obj) (Py_TYPE(obj) == &PyDictProxy_Type) @@ -80,70 +85,54 @@ _try_convert_from_ctypes_type(PyTypeObject *type) } /* - * This function creates a dtype object when the object has a "dtype" attribute, - * and it can be converted to a dtype object. + * This function creates a dtype object when the object has a "__numpy_dtype__" + * or "dtype" attribute which must be valid NumPy dtype instance. * * Returns `Py_NotImplemented` if this is not possible. - * Currently the only failure mode for a NULL return is a RecursionError. */ static PyArray_Descr * _try_convert_from_dtype_attr(PyObject *obj) { + int used_dtype_attr = 0; /* For arbitrary objects that have a "dtype" attribute */ - PyObject *dtypedescr = PyObject_GetAttrString(obj, "dtype"); - if (dtypedescr == NULL) { - /* - * This can be reached due to recursion limit being hit while fetching - * the attribute (tested for py3.7). This removes the custom message. - */ - goto fail; - } - - if (PyArray_DescrCheck(dtypedescr)) { - /* The dtype attribute is already a valid descriptor */ - return (PyArray_Descr *)dtypedescr; - } - - if (Py_EnterRecursiveCall( - " while trying to convert the given data type from its " - "`.dtype` attribute.") != 0) { - Py_DECREF(dtypedescr); + PyObject *attr; + int res = PyObject_GetOptionalAttr(obj, npy_interned_str.numpy_dtype, &attr); + if (res < 0) { return NULL; } - - PyArray_Descr *newdescr = _convert_from_any(dtypedescr, 0); - Py_DECREF(dtypedescr); - Py_LeaveRecursiveCall(); - if (newdescr == NULL) { - goto fail; + else if (res == 0) { + /* + * When "__numpy_dtype__" does not exist, also check "dtype". This should + * be removed in the future. + * We do however support a weird `class myclass(np.void): dtype = ...` + * syntax. + */ + used_dtype_attr = 1; + int res = PyObject_GetOptionalAttr(obj, npy_interned_str.dtype, &attr); + if (res < 0) { + return NULL; + } + else if (res == 0) { + Py_INCREF(Py_NotImplemented); + return (PyArray_Descr *)Py_NotImplemented; + } } - - /* Deprecated 2021-01-05, NumPy 1.21 */ - if (DEPRECATE("in the future the `.dtype` attribute of a given data" - "type object must be a valid dtype instance. " - "`data_type.dtype` may need to be coerced using " - "`np.dtype(data_type.dtype)`. (Deprecated NumPy 1.20)") < 0) { - Py_DECREF(newdescr); + if (!PyArray_DescrCheck(attr)) { + if (PyType_Check(obj) && PyObject_HasAttrString(attr, "__get__")) { + /* If the object has a __get__, assume this is a class property. */ + Py_DECREF(attr); + Py_INCREF(Py_NotImplemented); + return (PyArray_Descr *)Py_NotImplemented; + } + PyErr_Format(PyExc_ValueError, + "Could not convert %R to a NumPy dtype (via `.%S` value %R).", obj, + used_dtype_attr ? npy_interned_str.dtype : npy_interned_str.numpy_dtype, + attr); + Py_DECREF(attr); return NULL; } - - return newdescr; - - fail: - /* Ignore all but recursion errors, to give ctypes a full try. */ - if (!PyErr_ExceptionMatches(PyExc_RecursionError)) { - PyErr_Clear(); - Py_INCREF(Py_NotImplemented); - return (PyArray_Descr *)Py_NotImplemented; - } - return NULL; -} - -/* Expose to another file with a prefixed name */ -NPY_NO_EXPORT PyArray_Descr * -_arraydescr_try_convert_from_dtype_attr(PyObject *obj) -{ - return _try_convert_from_dtype_attr(obj); + /* The dtype attribute is already a valid descriptor */ + return (PyArray_Descr *)attr; } /* @@ -270,8 +259,16 @@ _convert_from_tuple(PyObject *obj, int align) if (PyDataType_ISUNSIZED(type)) { /* interpret next item as a typesize */ int itemsize = PyArray_PyIntAsInt(PyTuple_GET_ITEM(obj,1)); - - if (error_converting(itemsize)) { + if (type->type_num == NPY_UNICODE) { + if (itemsize > NPY_MAX_INT / 4) { + itemsize = -1; + } + else { + itemsize *= 4; + } + } + if (itemsize < 0) { + /* Error may or may not be set by PyIntAsInt. */ PyErr_SetString(PyExc_ValueError, "invalid itemsize in generic type tuple"); Py_DECREF(type); @@ -281,12 +278,8 @@ _convert_from_tuple(PyObject *obj, int align) if (type == NULL) { return NULL; } - if (type->type_num == NPY_UNICODE) { - type->elsize = itemsize << 2; - } - else { - type->elsize = itemsize; - } + + type->elsize = itemsize; return type; } else if (type->metadata && (PyDict_Check(val) || PyDictProxy_Check(val))) { @@ -423,7 +416,7 @@ _convert_from_array_descr(PyObject *obj, int align) return NULL; } for (int i = 0; i < n; i++) { - PyObject *item = PyList_GET_ITEM(obj, i); + PyObject *item = PyList_GET_ITEM(obj, i); // noqa: borrowed-ref - manual fix needed if (!PyTuple_Check(item) || (PyTuple_GET_SIZE(item) < 2)) { PyErr_Format(PyExc_TypeError, "Field elements must be 2- or 3-tuples, got '%R'", @@ -506,10 +499,10 @@ _convert_from_array_descr(PyObject *obj, int align) "StringDType is not currently supported for structured dtype fields."); goto fail; } - if ((PyDict_GetItemWithError(fields, name) != NULL) + if ((PyDict_GetItemWithError(fields, name) != NULL) // noqa: borrowed-ref OK || (title && PyUnicode_Check(title) - && (PyDict_GetItemWithError(fields, title) != NULL))) { + && (PyDict_GetItemWithError(fields, title) != NULL))) { // noqa: borrowed-ref OK PyErr_Format(PyExc_ValueError, "field %R occurs more than once", name); Py_DECREF(conv); @@ -547,7 +540,7 @@ _convert_from_array_descr(PyObject *obj, int align) goto fail; } if (PyUnicode_Check(title)) { - PyObject *existing = PyDict_GetItemWithError(fields, title); + PyObject *existing = PyDict_GetItemWithError(fields, title); // noqa: borrowed-ref OK if (existing == NULL && PyErr_Occurred()) { goto fail; } @@ -612,7 +605,7 @@ _convert_from_list(PyObject *obj, int align) * Ignore any empty string at end which _internal._commastring * can produce */ - PyObject *last_item = PyList_GET_ITEM(obj, n-1); + PyObject *last_item = PyList_GET_ITEM(obj, n-1); // noqa: borrowed-ref OK if (PyUnicode_Check(last_item)) { Py_ssize_t s = PySequence_Size(last_item); if (s < 0) { @@ -642,7 +635,7 @@ _convert_from_list(PyObject *obj, int align) int totalsize = 0; for (int i = 0; i < n; i++) { PyArray_Descr *conv = _convert_from_any( - PyList_GET_ITEM(obj, i), align); + PyList_GET_ITEM(obj, i), align); // noqa: borrowed-ref OK if (conv == NULL) { goto fail; } @@ -722,13 +715,13 @@ _convert_from_commastring(PyObject *obj, int align) { PyObject *parsed; PyArray_Descr *res; - static PyObject *_commastring = NULL; assert(PyUnicode_Check(obj)); - npy_cache_import("numpy._core._internal", "_commastring", &_commastring); - if (_commastring == NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", "_commastring", + &npy_runtime_imports._commastring) == -1) { return NULL; } - parsed = PyObject_CallOneArg(_commastring, obj); + parsed = PyObject_CallOneArg(npy_runtime_imports._commastring, obj); if (parsed == NULL) { return NULL; } @@ -793,7 +786,7 @@ _validate_union_object_dtype(_PyArray_LegacyDescr *new, _PyArray_LegacyDescr *co if (name == NULL) { return -1; } - tup = PyDict_GetItemWithError(conv->fields, name); + tup = PyDict_GetItemWithError(conv->fields, name); // noqa: borrowed-ref OK if (tup == NULL) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -851,7 +844,7 @@ _try_convert_from_inherit_tuple(PyArray_Descr *type, PyObject *newobj) return (PyArray_Descr *)Py_NotImplemented; } if (!PyDataType_ISLEGACY(type) || !PyDataType_ISLEGACY(conv)) { - /* + /* * This specification should probably be never supported, but * certainly not for new-style DTypes. */ @@ -939,7 +932,7 @@ _validate_object_field_overlap(_PyArray_LegacyDescr *dtype) if (key == NULL) { return -1; } - tup = PyDict_GetItemWithError(fields, key); + tup = PyDict_GetItemWithError(fields, key); // noqa: borrowed-ref OK if (tup == NULL) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -959,7 +952,7 @@ _validate_object_field_overlap(_PyArray_LegacyDescr *dtype) if (key == NULL) { return -1; } - tup = PyDict_GetItemWithError(fields, key); + tup = PyDict_GetItemWithError(fields, key); // noqa: borrowed-ref OK if (tup == NULL) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -1028,17 +1021,13 @@ _validate_object_field_overlap(_PyArray_LegacyDescr *dtype) static PyArray_Descr * _convert_from_field_dict(PyObject *obj, int align) { - PyObject *_numpy_internal; - PyArray_Descr *res; - - _numpy_internal = PyImport_ImportModule("numpy._core._internal"); - if (_numpy_internal == NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", "_usefields", &npy_runtime_imports._usefields) < 0) { return NULL; } - res = (PyArray_Descr *)PyObject_CallMethod(_numpy_internal, - "_usefields", "Oi", obj, align); - Py_DECREF(_numpy_internal); - return res; + + return (PyArray_Descr *)PyObject_CallFunctionObjArgs( + npy_runtime_imports._usefields, obj, align ? Py_True : Py_False, NULL); } /* @@ -1216,7 +1205,7 @@ _convert_from_dict(PyObject *obj, int align) } /* Insert into dictionary */ - if (PyDict_GetItemWithError(fields, name) != NULL) { + if (PyDict_GetItemWithError(fields, name) != NULL) { // noqa: borrowed-ref OK PyErr_SetString(PyExc_ValueError, "name already used as a name or title"); Py_DECREF(tup); @@ -1235,7 +1224,7 @@ _convert_from_dict(PyObject *obj, int align) } if (len == 3) { if (PyUnicode_Check(title)) { - if (PyDict_GetItemWithError(fields, title) != NULL) { + if (PyDict_GetItemWithError(fields, title) != NULL) { // noqa: borrowed-ref OK PyErr_SetString(PyExc_ValueError, "title already used as a name or title."); Py_DECREF(tup); @@ -1406,7 +1395,8 @@ PyArray_DescrConverter2(PyObject *obj, PyArray_Descr **at) * TODO: This function should eventually receive a deprecation warning and * be removed. * - * @param descr + * @param descr descriptor to be checked + * @param DType pointer to the DType of the descriptor * @return 1 if this is not a concrete dtype instance 0 otherwise */ static int @@ -1438,9 +1428,9 @@ descr_is_legacy_parametric_instance(PyArray_Descr *descr, * both results can be NULL (if the input is). But it always sets the DType * when a descriptor is set. * - * @param dtype - * @param out_descr - * @param out_DType + * @param dtype Input descriptor to be converted + * @param out_descr Output descriptor + * @param out_DType DType of the output descriptor * @return 0 on success -1 on failure */ NPY_NO_EXPORT int @@ -1467,7 +1457,7 @@ PyArray_ExtractDTypeAndDescriptor(PyArray_Descr *dtype, * Converter function filling in an npy_dtype_info struct on success. * * @param obj representing a dtype instance (descriptor) or DType class. - * @param[out] npy_dtype_info filled with the DType class and dtype/descriptor + * @param[out] dt_info npy_dtype_info filled with the DType class and dtype/descriptor * instance. The class is always set while the instance may be NULL. * On error, both will be NULL. * @return 0 on failure and 1 on success (as a converter) @@ -1519,7 +1509,7 @@ PyArray_DTypeOrDescrConverterRequired(PyObject *obj, npy_dtype_info *dt_info) * NULL anyway). * * @param obj None or obj representing a dtype instance (descr) or DType class. - * @param[out] npy_dtype_info filled with the DType class and dtype/descriptor + * @param[out] dt_info filled with the DType class and dtype/descriptor * instance. If `obj` is None, is not modified. Otherwise the class * is always set while the instance may be NULL. * On error, both will be NULL. @@ -1596,6 +1586,10 @@ _convert_from_type(PyObject *obj) { return PyArray_DescrFromType(NPY_OBJECT); } else { + PyObject *DType = PyArray_DiscoverDTypeFromScalarType(typ); + if (DType != NULL) { + return PyArray_GetDefaultDescr((PyArray_DTypeMeta *)DType); + } PyArray_Descr *ret = _try_convert_from_dtype_attr(obj); if ((PyObject *)ret != Py_NotImplemented) { return ret; @@ -1807,19 +1801,27 @@ _convert_from_str(PyObject *obj, int align) /* Python byte string characters are unsigned */ check_num = (unsigned char) type[0]; } - /* A kind + size like 'f8' */ + /* Possibly a kind + size like 'f8' but also could be 'bool' */ else { char *typeend = NULL; int kind; - /* Parse the integer, make sure it's the rest of the string */ - elsize = (int)strtol(type + 1, &typeend, 10); - /* Make sure size is not negative */ - if (elsize < 0) { + /* Attempt to parse the integer, make sure it's the rest of the string */ + errno = 0; + long result = strtol(type + 1, &typeend, 10); + npy_bool some_parsing_happened = !(type == typeend); + npy_bool entire_string_consumed = *typeend == '\0'; + npy_bool parsing_succeeded = + (errno == 0) && some_parsing_happened && entire_string_consumed; + // make sure it doesn't overflow or go negative + if (result > INT_MAX || result < 0) { goto fail; } - if (typeend - type == len) { + elsize = (int)result; + + + if (parsing_succeeded && typeend - type == len) { kind = type[0]; switch (kind) { @@ -1827,14 +1829,6 @@ _convert_from_str(PyObject *obj, int align) check_num = NPY_STRING; break; - case NPY_DEPRECATED_STRINGLTR2: - if (DEPRECATE("Data type alias 'a' was deprecated in NumPy 2.0. " - "Use the 'S' alias instead.") < 0) { - return NULL; - } - check_num = NPY_STRING; - break; - /* * When specifying length of UNICODE * the number of characters is given to match @@ -1844,7 +1838,10 @@ _convert_from_str(PyObject *obj, int align) */ case NPY_UNICODELTR: check_num = NPY_UNICODE; - elsize <<= 2; + if (elsize > (NPY_MAX_INT / 4)) { + goto fail; + } + elsize *= 4; break; case NPY_VOIDLTR: @@ -1865,6 +1862,9 @@ _convert_from_str(PyObject *obj, int align) } } } + else if (parsing_succeeded) { + goto fail; + } } if (PyErr_Occurred()) { @@ -1879,7 +1879,7 @@ _convert_from_str(PyObject *obj, int align) if (typeDict == NULL) { goto fail; } - PyObject *item = PyDict_GetItemWithError(typeDict, obj); + PyObject *item = PyDict_GetItemWithError(typeDict, obj); // noqa: borrowed-ref - manual fix needed if (item == NULL) { if (PyErr_Occurred()) { return NULL; @@ -1899,13 +1899,6 @@ _convert_from_str(PyObject *obj, int align) goto fail; } - if (strcmp(type, "a") == 0) { - if (DEPRECATE("Data type alias 'a' was deprecated in NumPy 2.0. " - "Use the 'S' alias instead.") < 0) { - return NULL; - } - } - /* * Probably only ever dispatches to `_convert_from_type`, but who * knows what users are injecting into `np.typeDict`. @@ -1958,7 +1951,7 @@ NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNew(PyArray_Descr *base_descr) { if (!PyDataType_ISLEGACY(base_descr)) { - /* + /* * The main use of this function is mutating strings, so probably * disallowing this is fine in practice. */ @@ -2025,6 +2018,7 @@ arraydescr_dealloc(PyArray_Descr *self) { Py_XDECREF(self->typeobj); if (!PyDataType_ISLEGACY(self)) { + /* non legacy dtypes must not have fields, etc. */ Py_TYPE(self)->tp_free((PyObject *)self); return; } @@ -2074,7 +2068,7 @@ static PyMemberDef arraydescr_members[] = { {"alignment", T_PYSSIZET, offsetof(PyArray_Descr, alignment), READONLY, NULL}, {"flags", -#if NPY_ULONGLONG == NPY_UINT64 +#if NPY_SIZEOF_LONGLONG == 8 T_ULONGLONG, offsetof(PyArray_Descr, flags), READONLY, NULL}, #else #error Assuming long long is 64bit, if not replace with getter function. @@ -2095,6 +2089,10 @@ arraydescr_subdescr_get(PyArray_Descr *self, void *NPY_UNUSED(ignored)) NPY_NO_EXPORT PyObject * arraydescr_protocol_typestr_get(PyArray_Descr *self, void *NPY_UNUSED(ignored)) { + if (!PyDataType_ISLEGACY(NPY_DTYPE(self))) { + return (PyObject *) Py_TYPE(self)->tp_str((PyObject *)self); + } + char basic_ = self->kind; char endian = self->byteorder; int size = self->elsize; @@ -2255,7 +2253,7 @@ _arraydescr_isnative(PyArray_Descr *self) PyArray_Descr *new; int offset; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(self), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(self), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -2359,20 +2357,6 @@ arraydescr_names_set( return -1; } - /* - * FIXME - * - * This deprecation has been temporarily removed for the NumPy 1.7 - * release. It should be re-added after the 1.7 branch is done, - * and a convenience API to replace the typical use-cases for - * mutable names should be implemented. - * - * if (DEPRECATE("Setting NumPy dtype names is deprecated, the dtype " - * "will become immutable in a future version") < 0) { - * return -1; - * } - */ - N = PyTuple_GET_SIZE(self->names); if (!PySequence_Check(val) || PyObject_Size((PyObject *)val) != N) { /* Should be a TypeError, but this should be deprecated anyway. */ @@ -2415,7 +2399,7 @@ arraydescr_names_set( int ret; key = PyTuple_GET_ITEM(self->names, i); /* Borrowed references to item and new_key */ - item = PyDict_GetItemWithError(self->fields, key); + item = PyDict_GetItemWithError(self->fields, key); // noqa: borrowed-ref OK if (item == NULL) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -2543,7 +2527,9 @@ arraydescr_new(PyTypeObject *subtype, return NULL; } - PyObject *odescr, *metadata=NULL; + PyObject *odescr; + PyObject *oalign = NULL; + PyObject *metadata = NULL; PyArray_Descr *conv; npy_bool align = NPY_FALSE; npy_bool copy = NPY_FALSE; @@ -2551,14 +2537,33 @@ arraydescr_new(PyTypeObject *subtype, static char *kwlist[] = {"dtype", "align", "copy", "metadata", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O&O!:dtype", kwlist, + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O!:dtype", kwlist, &odescr, - PyArray_BoolConverter, &align, + &oalign, PyArray_BoolConverter, ©, &PyDict_Type, &metadata)) { return NULL; } + if (oalign != NULL) { + /* + * In the future, reject non Python (or NumPy) boolean, including integers to avoid any + * possibility of thinking that an integer alignment makes sense here. + */ + if (!PyBool_Check(oalign) && !PyArray_IsScalar(oalign, Bool)) { + /* Deprecated 2025-07-01: NumPy 2.4 */ + if (PyErr_WarnFormat(npy_static_pydata.VisibleDeprecationWarning, 1, + "dtype(): align should be passed as Python or NumPy boolean but got `align=%.100R`. " + "Did you mean to pass a tuple to create a subarray type? (Deprecated NumPy 2.4)", + oalign) < 0) { + return NULL; + } + } + if (!PyArray_BoolConverter(oalign, &align)) { + return NULL; + } + } + conv = _convert_from_any(odescr, align); if (conv == NULL) { return NULL; @@ -2643,8 +2648,10 @@ _get_pickleabletype_from_datetime_metadata(PyArray_Descr *dtype) if (dtype->metadata != NULL) { Py_INCREF(dtype->metadata); PyTuple_SET_ITEM(ret, 0, dtype->metadata); - } else { - PyTuple_SET_ITEM(ret, 0, PyDict_New()); + } + else { + PyTuple_SET_ITEM(ret, 0, Py_None); + Py_INCREF(Py_None); } /* Convert the datetime metadata into a tuple */ @@ -2703,7 +2710,7 @@ arraydescr_reduce(PyArray_Descr *self, PyObject *NPY_UNUSED(args)) Py_DECREF(ret); return NULL; } - obj = PyObject_GetAttr(mod, npy_ma_str_dtype); + obj = PyObject_GetAttr(mod, npy_interned_str.dtype); Py_DECREF(mod); if (obj == NULL) { Py_DECREF(ret); @@ -2820,7 +2827,7 @@ _descr_find_object(PyArray_Descr *self) int offset; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(self), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(self), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -2844,13 +2851,13 @@ _descr_find_object(PyArray_Descr *self) static PyObject * arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) { - int elsize = -1, alignment = -1; + Py_ssize_t elsize = -1, alignment = -1; int version = 4; char endian; PyObject *endian_obj; PyObject *subarray, *fields, *names = NULL, *metadata=NULL; int incref_names = 1; - int int_dtypeflags = 0; + npy_int64 signed_dtypeflags = 0; npy_uint64 dtypeflags; if (!PyDataType_ISLEGACY(self)) { @@ -2869,24 +2876,24 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) } switch (PyTuple_GET_SIZE(PyTuple_GET_ITEM(args,0))) { case 9: - if (!PyArg_ParseTuple(args, "(iOOOOiiiO):__setstate__", + if (!PyArg_ParseTuple(args, "(iOOOOnnkO):__setstate__", &version, &endian_obj, &subarray, &names, &fields, &elsize, - &alignment, &int_dtypeflags, &metadata)) { + &alignment, &signed_dtypeflags, &metadata)) { PyErr_Clear(); return NULL; } break; case 8: - if (!PyArg_ParseTuple(args, "(iOOOOiii):__setstate__", + if (!PyArg_ParseTuple(args, "(iOOOOnnk):__setstate__", &version, &endian_obj, &subarray, &names, &fields, &elsize, - &alignment, &int_dtypeflags)) { + &alignment, &signed_dtypeflags)) { return NULL; } break; case 7: - if (!PyArg_ParseTuple(args, "(iOOOOii):__setstate__", + if (!PyArg_ParseTuple(args, "(iOOOOnn):__setstate__", &version, &endian_obj, &subarray, &names, &fields, &elsize, &alignment)) { @@ -2894,7 +2901,7 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) } break; case 6: - if (!PyArg_ParseTuple(args, "(iOOOii):__setstate__", + if (!PyArg_ParseTuple(args, "(iOOOnn):__setstate__", &version, &endian_obj, &subarray, &fields, &elsize, &alignment)) { @@ -2903,20 +2910,17 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) break; case 5: version = 0; - if (!PyArg_ParseTuple(args, "(OOOii):__setstate__", + if (!PyArg_ParseTuple(args, "(OOOnn):__setstate__", &endian_obj, &subarray, &fields, &elsize, &alignment)) { return NULL; } break; default: - /* raise an error */ - if (PyTuple_GET_SIZE(PyTuple_GET_ITEM(args,0)) > 5) { - version = PyLong_AsLong(PyTuple_GET_ITEM(args, 0)); - } - else { - version = -1; - } + PyErr_SetString(PyExc_ValueError, + "Invalid state while unpickling. Is the pickle corrupted " + "or created with a newer NumPy version?"); + return NULL; } /* @@ -2936,7 +2940,7 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) if (fields != Py_None) { PyObject *key, *list; key = PyLong_FromLong(-1); - list = PyDict_GetItemWithError(fields, key); + list = PyDict_GetItemWithError(fields, key); // noqa: borrowed-ref OK if (!list) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -3112,7 +3116,7 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) for (i = 0; i < PyTuple_GET_SIZE(names); ++i) { name = PyTuple_GET_ITEM(names, i); - field = PyDict_GetItemWithError(fields, name); + field = PyDict_GetItemWithError(fields, name); // noqa: borrowed-ref OK if (!field) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -3151,12 +3155,12 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) * flags as an int even though it actually was a char in the PyArray_Descr * structure */ - if (int_dtypeflags < 0 && int_dtypeflags >= -128) { + if (signed_dtypeflags < 0 && signed_dtypeflags >= -128) { /* NumPy used to use a char. So normalize if signed. */ - int_dtypeflags += 128; + signed_dtypeflags += 128; } - dtypeflags = int_dtypeflags; - if (dtypeflags != int_dtypeflags) { + dtypeflags = (npy_uint64)signed_dtypeflags; + if (dtypeflags != signed_dtypeflags) { PyErr_Format(PyExc_ValueError, "incorrect value for flags variable (overflow)"); return NULL; @@ -3169,16 +3173,8 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) self->flags = _descr_find_object((PyArray_Descr *)self); } - /* - * We have a borrowed reference to metadata so no need - * to alter reference count when throwing away Py_None. - */ - if (metadata == Py_None) { - metadata = NULL; - } - - if (PyDataType_ISDATETIME(self) && (metadata != NULL)) { - PyObject *old_metadata; + PyObject *old_metadata, *new_metadata; + if (PyDataType_ISDATETIME(self)) { PyArray_DatetimeMetaData temp_dt_data; if ((! PyTuple_Check(metadata)) || (PyTuple_Size(metadata) != 2)) { @@ -3195,20 +3191,26 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) return NULL; } - old_metadata = self->metadata; - self->metadata = PyTuple_GET_ITEM(metadata, 0); + new_metadata = PyTuple_GET_ITEM(metadata, 0); memcpy((char *) &((PyArray_DatetimeDTypeMetaData *)self->c_metadata)->meta, - (char *) &temp_dt_data, - sizeof(PyArray_DatetimeMetaData)); - Py_XINCREF(self->metadata); - Py_XDECREF(old_metadata); + (char *) &temp_dt_data, + sizeof(PyArray_DatetimeMetaData)); } else { - PyObject *old_metadata = self->metadata; - self->metadata = metadata; - Py_XINCREF(self->metadata); - Py_XDECREF(old_metadata); + new_metadata = metadata; + } + + old_metadata = self->metadata; + /* + * We have a borrowed reference to metadata so no need + * to alter reference count when throwing away Py_None. + */ + if (new_metadata == Py_None) { + new_metadata = NULL; } + self->metadata = new_metadata; + Py_XINCREF(new_metadata); + Py_XDECREF(old_metadata); Py_RETURN_NONE; } @@ -3316,7 +3318,7 @@ PyArray_DescrNewByteorder(PyArray_Descr *oself, char newendian) return NULL; } /* make new dictionary with replaced PyArray_Descr Objects */ - while (PyDict_Next(self->fields, &pos, &key, &value)) { + while (PyDict_Next(self->fields, &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -3442,7 +3444,7 @@ is_dtype_struct_simple_unaligned_layout(PyArray_Descr *dtype) if (key == NULL) { return 0; } - tup = PyDict_GetItem(fields, key); + tup = PyDict_GetItem(fields, key); // noqa: borrowed-ref OK if (tup == NULL) { return 0; } @@ -3607,7 +3609,7 @@ _check_has_fields(PyArray_Descr *self) static PyObject * _subscript_by_name(_PyArray_LegacyDescr *self, PyObject *op) { - PyObject *obj = PyDict_GetItemWithError(self->fields, op); + PyObject *obj = PyDict_GetItemWithError(self->fields, op); // noqa: borrowed-ref OK if (obj == NULL) { if (!PyErr_Occurred()) { PyErr_Format(PyExc_KeyError, @@ -3644,7 +3646,7 @@ _is_list_of_strings(PyObject *obj) } seqlen = PyList_GET_SIZE(obj); for (i = 0; i < seqlen; i++) { - PyObject *item = PyList_GET_ITEM(obj, i); + PyObject *item = PyList_GET_ITEM(obj, i); // noqa: borrowed-ref - manual fix needed if (!PyUnicode_Check(item)) { return NPY_FALSE; } @@ -3688,7 +3690,7 @@ arraydescr_field_subset_view(_PyArray_LegacyDescr *self, PyObject *ind) */ PyTuple_SET_ITEM(names, i, name); - tup = PyDict_GetItemWithError(self->fields, name); + tup = PyDict_GetItemWithError(self->fields, name); // noqa: borrowed-ref OK if (tup == NULL) { if (!PyErr_Occurred()) { PyErr_SetObject(PyExc_KeyError, name); @@ -3777,6 +3779,42 @@ descr_subscript(PyArray_Descr *self, PyObject *op) } } +static PyObject * +array_typestr_get(PyArray_Descr *self) +{ + return arraydescr_protocol_typestr_get(self, NULL); +} + + +NPY_NO_EXPORT PyObject * +array_protocol_descr_get(PyArray_Descr *self) +{ + PyObject *res; + PyObject *dobj; + + res = arraydescr_protocol_descr_get(self, NULL); + if (res) { + return res; + } + PyErr_Clear(); + + /* get default */ + dobj = PyTuple_New(2); + if (dobj == NULL) { + return NULL; + } + PyTuple_SET_ITEM(dobj, 0, PyUnicode_FromString("")); + PyTuple_SET_ITEM(dobj, 1, array_typestr_get(self)); + res = PyList_New(1); + if (res == NULL) { + Py_DECREF(dobj); + return NULL; + } + PyList_SET_ITEM(res, 0, dobj); + return res; +} + + static PySequenceMethods descr_as_sequence = { (lenfunc) descr_length, /* sq_length */ (binaryfunc) NULL, /* sq_concat */ diff --git a/numpy/_core/src/multiarray/descriptor.h b/numpy/_core/src/multiarray/descriptor.h index 820e53f0c3e8..233c434fd6b4 100644 --- a/numpy/_core/src/multiarray/descriptor.h +++ b/numpy/_core/src/multiarray/descriptor.h @@ -29,6 +29,8 @@ NPY_NO_EXPORT PyObject *arraydescr_protocol_typestr_get( NPY_NO_EXPORT PyObject *arraydescr_protocol_descr_get( PyArray_Descr *self, void *); +NPY_NO_EXPORT PyObject *array_protocol_descr_get(PyArray_Descr *self); + /* * offset: A starting offset. * alignment: A power-of-two alignment. @@ -44,9 +46,6 @@ NPY_NO_EXPORT PyObject *arraydescr_protocol_descr_get( NPY_NO_EXPORT PyObject * array_set_typeDict(PyObject *NPY_UNUSED(ignored), PyObject *args); -NPY_NO_EXPORT PyArray_Descr * -_arraydescr_try_convert_from_dtype_attr(PyObject *obj); - NPY_NO_EXPORT int is_dtype_struct_simple_unaligned_layout(PyArray_Descr *dtype); diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index 05935f608a29..29e5aecec5d5 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -8,7 +8,7 @@ #include "numpy/arrayobject.h" #include "npy_argparse.h" #include "npy_dlpack.h" -#include "multiarraymodule.h" +#include "npy_static_data.h" #include "conversion_utils.h" @@ -57,7 +57,7 @@ array_dlpack_deleter_unversioned(DLManagedTensor *self) /* - * Deleter for a DLPack capsule wrapping a DLManagedTensor(Versioed). + * Deleter for a DLPack capsule wrapping a DLManagedTensor(Versioned). * * This is exactly as mandated by dlpack */ @@ -285,10 +285,6 @@ fill_dl_tensor_information( } dl_tensor->ndim = ndim; - if (PyArray_IS_C_CONTIGUOUS(self)) { - /* No need to pass strides, so just NULL it again */ - dl_tensor->strides = NULL; - } dl_tensor->byte_offset = 0; return 0; @@ -351,9 +347,8 @@ create_dlpack_capsule( dl_tensor = &managed->dl_tensor; } - dl_tensor->shape = (int64_t *)((char *)ptr + offset); - /* Note that strides may be set to NULL later if C-contiguous */ - dl_tensor->strides = dl_tensor->shape + ndim; + dl_tensor->shape = (ndim > 0) ? (int64_t *)((char *)ptr + offset) : NULL; + dl_tensor->strides = (ndim > 0) ? dl_tensor->shape + ndim : NULL; if (fill_dl_tensor_information(dl_tensor, self, result_device) < 0) { PyMem_Free(ptr); @@ -397,7 +392,8 @@ device_converter(PyObject *obj, DLDevice *result_device) return NPY_SUCCEED; } - PyErr_SetString(PyExc_ValueError, "unsupported device requested"); + /* Must be a BufferError */ + PyErr_SetString(PyExc_BufferError, "unsupported device requested"); return NPY_FAIL; } @@ -504,36 +500,12 @@ from_dlpack(PyObject *NPY_UNUSED(self), return NULL; } - /* Prepare the arguments to call objects __dlpack__() method */ - static PyObject *call_kwnames = NULL; - static PyObject *dl_cpu_device_tuple = NULL; - static PyObject *max_version = NULL; - - if (call_kwnames == NULL) { - call_kwnames = Py_BuildValue("(sss)", "dl_device", "copy", "max_version"); - if (call_kwnames == NULL) { - return NULL; - } - } - if (dl_cpu_device_tuple == NULL) { - dl_cpu_device_tuple = Py_BuildValue("(i,i)", 1, 0); - if (dl_cpu_device_tuple == NULL) { - return NULL; - } - } - if (max_version == NULL) { - max_version = Py_BuildValue("(i,i)", 1, 0); - if (max_version == NULL) { - return NULL; - } - } - /* * Prepare arguments for the full call. We always forward copy and pass * our max_version. `device` is always passed as `None`, but if the user * provided a device, we will replace it with the "cpu": (1, 0). */ - PyObject *call_args[] = {obj, Py_None, copy, max_version}; + PyObject *call_args[] = {obj, Py_None, copy, npy_static_pydata.dl_max_version}; Py_ssize_t nargsf = 1 | PY_VECTORCALL_ARGUMENTS_OFFSET; /* If device is passed it must be "cpu" and replace it with (1, 0) */ @@ -544,12 +516,13 @@ from_dlpack(PyObject *NPY_UNUSED(self), return NULL; } assert(device_request == NPY_DEVICE_CPU); - call_args[1] = dl_cpu_device_tuple; + call_args[1] = npy_static_pydata.dl_cpu_device_tuple; } PyObject *capsule = PyObject_VectorcallMethod( - npy_ma_str___dlpack__, call_args, nargsf, call_kwnames); + npy_interned_str.__dlpack__, call_args, nargsf, + npy_static_pydata.dl_call_kwnames); if (capsule == NULL) { /* * TODO: This path should be deprecated in NumPy 2.1. Once deprecated @@ -563,7 +536,7 @@ from_dlpack(PyObject *NPY_UNUSED(self), /* max_version may be unsupported, try without kwargs */ PyErr_Clear(); capsule = PyObject_VectorcallMethod( - npy_ma_str___dlpack__, call_args, nargsf, NULL); + npy_interned_str.__dlpack__, call_args, nargsf, NULL); } if (capsule == NULL) { return NULL; @@ -601,7 +574,7 @@ from_dlpack(PyObject *NPY_UNUSED(self), return NULL; } dl_tensor = managed->dl_tensor; - readonly = 0; + readonly = 1; } const int ndim = dl_tensor.ndim; @@ -702,14 +675,13 @@ from_dlpack(PyObject *NPY_UNUSED(self), } PyObject *ret = PyArray_NewFromDescr(&PyArray_Type, descr, ndim, shape, - dl_tensor.strides != NULL ? strides : NULL, data, 0, NULL); + dl_tensor.strides != NULL ? strides : NULL, data, readonly ? 0 : + NPY_ARRAY_WRITEABLE, NULL); + if (ret == NULL) { Py_DECREF(capsule); return NULL; } - if (readonly) { - PyArray_CLEARFLAGS((PyArrayObject *)ret, NPY_ARRAY_WRITEABLE); - } PyObject *new_capsule; if (versioned) { diff --git a/numpy/_core/src/multiarray/dragon4.c b/numpy/_core/src/multiarray/dragon4.c index 480b78bdbb32..8783ec71e4af 100644 --- a/numpy/_core/src/multiarray/dragon4.c +++ b/numpy/_core/src/multiarray/dragon4.c @@ -163,28 +163,7 @@ typedef struct { char repr[16384]; } Dragon4_Scratch; -static int _bigint_static_in_use = 0; -static Dragon4_Scratch _bigint_static; - -static Dragon4_Scratch* -get_dragon4_bigint_scratch(void) { - /* this test+set is not threadsafe, but no matter because we have GIL */ - if (_bigint_static_in_use) { - PyErr_SetString(PyExc_RuntimeError, - "numpy float printing code is not re-entrant. " - "Ping the devs to fix it."); - return NULL; - } - _bigint_static_in_use = 1; - - /* in this dummy implementation we only return the static allocation */ - return &_bigint_static; -} - -static void -free_dragon4_bigint_scratch(Dragon4_Scratch *mem){ - _bigint_static_in_use = 0; -} +static NPY_TLS Dragon4_Scratch _bigint_static; /* Copy integer */ static void @@ -1636,7 +1615,8 @@ typedef struct Dragon4_Options { * * See Dragon4_Options for description of remaining arguments. */ -static npy_uint32 + +static npy_int32 FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, npy_int32 exponent, char signbit, npy_uint32 mantissaBit, npy_bool hasUnequalMargins, DigitMode digit_mode, @@ -1667,7 +1647,7 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, buffer[pos++] = '-'; has_sign = 1; } - + numDigits = Dragon4(mantissa, exponent, mantissaBit, hasUnequalMargins, digit_mode, cutoff_mode, precision, min_digits, buffer + has_sign, maxPrintLen - has_sign, @@ -1679,14 +1659,14 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, /* if output has a whole number */ if (printExponent >= 0) { /* leave the whole number at the start of the buffer */ - numWholeDigits = printExponent+1; + numWholeDigits = printExponent+1; if (numDigits <= numWholeDigits) { npy_int32 count = numWholeDigits - numDigits; pos += numDigits; - /* don't overflow the buffer */ - if (pos + count > maxPrintLen) { - count = maxPrintLen - pos; + if (count > maxPrintLen - pos) { + PyErr_SetString(PyExc_RuntimeError, "Float formatting result too large"); + return -1; } /* add trailing zeros up to the decimal point */ @@ -1788,9 +1768,12 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, pos < maxPrintLen) { /* add trailing zeros up to add_digits length */ /* compute the number of trailing zeros needed */ + npy_int32 count = desiredFractionalDigits - numFractionDigits; - if (pos + count > maxPrintLen) { - count = maxPrintLen - pos; + + if (count > maxPrintLen - pos) { + PyErr_SetString(PyExc_RuntimeError, "Float formatting result too large"); + return -1; } numFractionDigits += count; @@ -1823,7 +1806,7 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, } /* add any whitespace padding to right side */ - if (digits_right >= numFractionDigits) { + if (digits_right >= numFractionDigits) { npy_int32 count = digits_right - numFractionDigits; /* in trim_mode DptZeros, if right padding, add a space for the . */ @@ -1832,8 +1815,9 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, buffer[pos++] = ' '; } - if (pos + count > maxPrintLen) { - count = maxPrintLen - pos; + if (count > maxPrintLen - pos) { + PyErr_SetString(PyExc_RuntimeError, "Float formatting result too large"); + return -1; } for ( ; count > 0; count--) { @@ -1844,14 +1828,16 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, if (digits_left > numWholeDigits + has_sign) { npy_int32 shift = digits_left - (numWholeDigits + has_sign); npy_int32 count = pos; - - if (count + shift > maxPrintLen) { - count = maxPrintLen - shift; + + if (count > maxPrintLen - shift) { + PyErr_SetString(PyExc_RuntimeError, "Float formatting result too large"); + return -1; } if (count > 0) { memmove(buffer + shift, buffer, count); } + pos = shift + count; for ( ; shift > 0; shift--) { buffer[shift - 1] = ' '; @@ -1881,7 +1867,7 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, * * See Dragon4_Options for description of remaining arguments. */ -static npy_uint32 +static npy_int32 FormatScientific (char *buffer, npy_uint32 bufferSize, BigInt *mantissa, npy_int32 exponent, char signbit, npy_uint32 mantissaBit, npy_bool hasUnequalMargins, DigitMode digit_mode, @@ -2179,7 +2165,7 @@ PrintInfNan(char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa, * Helper function that takes Dragon4 parameters and options and * calls Dragon4. */ -static npy_uint32 +static npy_int32 Format_floatbits(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, npy_int32 exponent, char signbit, npy_uint32 mantissaBit, npy_bool hasUnequalMargins, Dragon4_Options *opt) @@ -2208,13 +2194,13 @@ Format_floatbits(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, * exponent: 5 bits * mantissa: 10 bits */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IEEE_binary16( - Dragon4_Scratch *scratch, npy_half *value, Dragon4_Options *opt) + npy_half *value, Dragon4_Options *opt) { - char *buffer = scratch->repr; - const npy_uint32 bufferSize = sizeof(scratch->repr); - BigInt *bigints = scratch->bigints; + char *buffer = _bigint_static.repr; + const npy_uint32 bufferSize = sizeof(_bigint_static.repr); + BigInt *bigints = _bigint_static.bigints; npy_uint16 val = *value; npy_uint32 floatExponent, floatMantissa, floatSign; @@ -2295,14 +2281,14 @@ Dragon4_PrintFloat_IEEE_binary16( * exponent: 8 bits * mantissa: 23 bits */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IEEE_binary32( - Dragon4_Scratch *scratch, npy_float32 *value, + npy_float32 *value, Dragon4_Options *opt) { - char *buffer = scratch->repr; - const npy_uint32 bufferSize = sizeof(scratch->repr); - BigInt *bigints = scratch->bigints; + char *buffer = _bigint_static.repr; + const npy_uint32 bufferSize = sizeof(_bigint_static.repr); + BigInt *bigints = _bigint_static.bigints; union { @@ -2388,13 +2374,13 @@ Dragon4_PrintFloat_IEEE_binary32( * exponent: 11 bits * mantissa: 52 bits */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IEEE_binary64( - Dragon4_Scratch *scratch, npy_float64 *value, Dragon4_Options *opt) + npy_float64 *value, Dragon4_Options *opt) { - char *buffer = scratch->repr; - const npy_uint32 bufferSize = sizeof(scratch->repr); - BigInt *bigints = scratch->bigints; + char *buffer = _bigint_static.repr; + const npy_uint32 bufferSize = sizeof(_bigint_static.repr); + BigInt *bigints = _bigint_static.bigints; union { @@ -2503,13 +2489,13 @@ typedef struct FloatVal128 { * intbit 1 bit, first u64 * mantissa: 63 bits, first u64 */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_Intel_extended( - Dragon4_Scratch *scratch, FloatVal128 value, Dragon4_Options *opt) + FloatVal128 value, Dragon4_Options *opt) { - char *buffer = scratch->repr; - const npy_uint32 bufferSize = sizeof(scratch->repr); - BigInt *bigints = scratch->bigints; + char *buffer = _bigint_static.repr; + const npy_uint32 bufferSize = sizeof(_bigint_static.repr); + BigInt *bigints = _bigint_static.bigints; npy_uint32 floatExponent, floatSign; npy_uint64 floatMantissa; @@ -2601,9 +2587,9 @@ Dragon4_PrintFloat_Intel_extended( * system. But numpy defines NPY_FLOAT80, so if we come across it, assume it is * an Intel extended format. */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_Intel_extended80( - Dragon4_Scratch *scratch, npy_float80 *value, Dragon4_Options *opt) + npy_float80 *value, Dragon4_Options *opt) { FloatVal128 val128; union { @@ -2619,15 +2605,15 @@ Dragon4_PrintFloat_Intel_extended80( val128.lo = buf80.integer.a; val128.hi = buf80.integer.b; - return Dragon4_PrintFloat_Intel_extended(scratch, val128, opt); + return Dragon4_PrintFloat_Intel_extended(val128, opt); } #endif /* HAVE_LDOUBLE_INTEL_EXTENDED_10_BYTES_LE */ #ifdef HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE /* Intel's 80-bit IEEE extended precision format, 96-bit storage */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_Intel_extended96( - Dragon4_Scratch *scratch, npy_float96 *value, Dragon4_Options *opt) + npy_float96 *value, Dragon4_Options *opt) { FloatVal128 val128; union { @@ -2643,15 +2629,15 @@ Dragon4_PrintFloat_Intel_extended96( val128.lo = buf96.integer.a; val128.hi = buf96.integer.b; - return Dragon4_PrintFloat_Intel_extended(scratch, val128, opt); + return Dragon4_PrintFloat_Intel_extended(val128, opt); } #endif /* HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE */ #ifdef HAVE_LDOUBLE_MOTOROLA_EXTENDED_12_BYTES_BE /* Motorola Big-endian equivalent of the Intel-extended 96 fp format */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_Motorola_extended96( - Dragon4_Scratch *scratch, npy_float96 *value, Dragon4_Options *opt) + npy_float96 *value, Dragon4_Options *opt) { FloatVal128 val128; union { @@ -2668,7 +2654,7 @@ Dragon4_PrintFloat_Motorola_extended96( val128.hi = buf96.integer.a >> 16; /* once again we assume the int has same endianness as the float */ - return Dragon4_PrintFloat_Intel_extended(scratch, val128, opt); + return Dragon4_PrintFloat_Intel_extended(val128, opt); } #endif /* HAVE_LDOUBLE_MOTOROLA_EXTENDED_12_BYTES_BE */ @@ -2686,9 +2672,9 @@ typedef union FloatUnion128 #ifdef HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE /* Intel's 80-bit IEEE extended precision format, 128-bit storage */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_Intel_extended128( - Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt) + npy_float128 *value, Dragon4_Options *opt) { FloatVal128 val128; FloatUnion128 buf128; @@ -2698,7 +2684,7 @@ Dragon4_PrintFloat_Intel_extended128( val128.lo = buf128.integer.a; val128.hi = buf128.integer.b; - return Dragon4_PrintFloat_Intel_extended(scratch, val128, opt); + return Dragon4_PrintFloat_Intel_extended(val128, opt); } #endif /* HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE */ @@ -2715,13 +2701,13 @@ Dragon4_PrintFloat_Intel_extended128( * I am not sure if the arch also supports uint128, and C does not seem to * support int128 literals. So we use uint64 to do manipulation. */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IEEE_binary128( - Dragon4_Scratch *scratch, FloatVal128 val128, Dragon4_Options *opt) + FloatVal128 val128, Dragon4_Options *opt) { - char *buffer = scratch->repr; - const npy_uint32 bufferSize = sizeof(scratch->repr); - BigInt *bigints = scratch->bigints; + char *buffer = _bigint_static.repr; + const npy_uint32 bufferSize = sizeof(_bigint_static.repr); + BigInt *bigints = _bigint_static.bigints; npy_uint32 floatExponent, floatSign; @@ -2800,9 +2786,9 @@ Dragon4_PrintFloat_IEEE_binary128( } #if defined(HAVE_LDOUBLE_IEEE_QUAD_LE) -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IEEE_binary128_le( - Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt) + npy_float128 *value, Dragon4_Options *opt) { FloatVal128 val128; FloatUnion128 buf128; @@ -2811,7 +2797,7 @@ Dragon4_PrintFloat_IEEE_binary128_le( val128.lo = buf128.integer.a; val128.hi = buf128.integer.b; - return Dragon4_PrintFloat_IEEE_binary128(scratch, val128, opt); + return Dragon4_PrintFloat_IEEE_binary128(val128, opt); } #endif /* HAVE_LDOUBLE_IEEE_QUAD_LE */ @@ -2820,9 +2806,9 @@ Dragon4_PrintFloat_IEEE_binary128_le( * This function is untested, very few, if any, architectures implement * big endian IEEE binary128 floating point. */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IEEE_binary128_be( - Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt) + npy_float128 *value, Dragon4_Options *opt) { FloatVal128 val128; FloatUnion128 buf128; @@ -2831,7 +2817,7 @@ Dragon4_PrintFloat_IEEE_binary128_be( val128.lo = buf128.integer.b; val128.hi = buf128.integer.a; - return Dragon4_PrintFloat_IEEE_binary128(scratch, val128, opt); + return Dragon4_PrintFloat_IEEE_binary128(val128, opt); } #endif /* HAVE_LDOUBLE_IEEE_QUAD_BE */ @@ -2875,13 +2861,13 @@ Dragon4_PrintFloat_IEEE_binary128_be( * https://gcc.gnu.org/wiki/Ieee128PowerPCA * https://www.ibm.com/support/knowledgecenter/en/ssw_aix_71/com.ibm.aix.genprogc/128bit_long_double_floating-point_datatype.htm */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IBM_double_double( - Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt) + npy_float128 *value, Dragon4_Options *opt) { - char *buffer = scratch->repr; - const npy_uint32 bufferSize = sizeof(scratch->repr); - BigInt *bigints = scratch->bigints; + char *buffer = _bigint_static.repr; + const npy_uint32 bufferSize = sizeof(_bigint_static.repr); + BigInt *bigints = _bigint_static.bigints; FloatVal128 val128; FloatUnion128 buf128; @@ -3062,22 +3048,17 @@ Dragon4_PrintFloat_IBM_double_double( * which goes up to about 10^4932. The Dragon4_scratch struct provides a string * buffer of this size. */ + #define make_dragon4_typefuncs_inner(Type, npy_type, format) \ \ PyObject *\ Dragon4_Positional_##Type##_opt(npy_type *val, Dragon4_Options *opt)\ {\ PyObject *ret;\ - Dragon4_Scratch *scratch = get_dragon4_bigint_scratch();\ - if (scratch == NULL) {\ - return NULL;\ - }\ - if (Dragon4_PrintFloat_##format(scratch, val, opt) < 0) {\ - free_dragon4_bigint_scratch(scratch);\ + if (Dragon4_PrintFloat_##format(val, opt) < 0) {\ return NULL;\ }\ - ret = PyUnicode_FromString(scratch->repr);\ - free_dragon4_bigint_scratch(scratch);\ + ret = PyUnicode_FromString(_bigint_static.repr);\ return ret;\ }\ \ @@ -3106,16 +3087,10 @@ PyObject *\ Dragon4_Scientific_##Type##_opt(npy_type *val, Dragon4_Options *opt)\ {\ PyObject *ret;\ - Dragon4_Scratch *scratch = get_dragon4_bigint_scratch();\ - if (scratch == NULL) {\ - return NULL;\ - }\ - if (Dragon4_PrintFloat_##format(scratch, val, opt) < 0) {\ - free_dragon4_bigint_scratch(scratch);\ + if (Dragon4_PrintFloat_##format(val, opt) < 0) { \ return NULL;\ }\ - ret = PyUnicode_FromString(scratch->repr);\ - free_dragon4_bigint_scratch(scratch);\ + ret = PyUnicode_FromString(_bigint_static.repr);\ return ret;\ }\ PyObject *\ diff --git a/numpy/_core/src/multiarray/dragon4_LICENSE.txt b/numpy/_core/src/multiarray/dragon4_LICENSE.txt new file mode 100644 index 000000000000..7bd49e7074a8 --- /dev/null +++ b/numpy/_core/src/multiarray/dragon4_LICENSE.txt @@ -0,0 +1,27 @@ +Copyright (c) 2014 Ryan Juckett + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. + +dragon4.c|h h contains a modified version of Ryan Juckett's Dragon4 +implementation, obtained from https://www.ryanjuckett.com, +which has been ported from C++ to C and which has +modifications specific to printing floats in numpy. + +Ryan Juckett's original code was under the Zlib license; he gave numpy +permission to include it under the MIT license instead. diff --git a/numpy/_core/src/multiarray/dtype_transfer.c b/numpy/_core/src/multiarray/dtype_transfer.c index d7a5e80800b6..dbad10842aff 100644 --- a/numpy/_core/src/multiarray/dtype_transfer.c +++ b/numpy/_core/src/multiarray/dtype_transfer.c @@ -235,8 +235,8 @@ any_to_object_get_loop( NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - - *flags = NPY_METH_REQUIRES_PYAPI; /* No need for floating point errors */ + /* Python API doesn't use FPEs and this also attempts to hide spurious ones. */ + *flags = NPY_METH_REQUIRES_PYAPI | NPY_METH_NO_FLOATINGPOINT_ERRORS; *out_loop = _strided_to_strided_any_to_object; *out_transferdata = PyMem_Malloc(sizeof(_any_to_object_auxdata)); @@ -342,7 +342,8 @@ object_to_any_get_loop( NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - *flags = NPY_METH_REQUIRES_PYAPI; + /* Python API doesn't use FPEs and this also attempts to hide spurious ones. */ + *flags = NPY_METH_REQUIRES_PYAPI | NPY_METH_NO_FLOATINGPOINT_ERRORS; /* NOTE: auxdata is only really necessary to flag `move_references` */ _object_to_any_auxdata *data = PyMem_Malloc(sizeof(*data)); @@ -2318,7 +2319,7 @@ get_fields_transfer_function(int NPY_UNUSED(aligned), *out_flags = PyArrayMethod_MINIMAL_FLAGS; for (i = 0; i < field_count; ++i) { key = PyTuple_GET_ITEM(PyDataType_NAMES(dst_dtype), i); - tup = PyDict_GetItem(PyDataType_FIELDS(dst_dtype), key); + tup = PyDict_GetItem(PyDataType_FIELDS(dst_dtype), key); // noqa: borrowed-ref OK if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype, &dst_offset, &title)) { PyMem_Free(data); @@ -2382,7 +2383,7 @@ get_fields_transfer_function(int NPY_UNUSED(aligned), NPY_traverse_info_init(&data->decref_src); key = PyTuple_GET_ITEM(PyDataType_NAMES(src_dtype), 0); - tup = PyDict_GetItem(PyDataType_FIELDS(src_dtype), key); + tup = PyDict_GetItem(PyDataType_FIELDS(src_dtype), key); // noqa: borrowed-ref OK if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype, &src_offset, &title)) { PyMem_Free(data); @@ -2434,14 +2435,14 @@ get_fields_transfer_function(int NPY_UNUSED(aligned), /* set up the transfer function for each field */ for (i = 0; i < field_count; ++i) { key = PyTuple_GET_ITEM(PyDataType_NAMES(dst_dtype), i); - tup = PyDict_GetItem(PyDataType_FIELDS(dst_dtype), key); + tup = PyDict_GetItem(PyDataType_FIELDS(dst_dtype), key); // noqa: borrowed-ref OK if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype, &dst_offset, &title)) { NPY_AUXDATA_FREE((NpyAuxData *)data); return NPY_FAIL; } key = PyTuple_GET_ITEM(PyDataType_NAMES(src_dtype), i); - tup = PyDict_GetItem(PyDataType_FIELDS(src_dtype), key); + tup = PyDict_GetItem(PyDataType_FIELDS(src_dtype), key); // noqa: borrowed-ref OK if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype, &src_offset, &title)) { NPY_AUXDATA_FREE((NpyAuxData *)data); @@ -2909,8 +2910,6 @@ _clear_cast_info_after_get_loop_failure(NPY_cast_info *cast_info) * TODO: Expand the view functionality for general offsets, not just 0: * Partial casts could be skipped also for `view_offset != 0`. * - * The `out_needs_api` flag must be initialized. - * * NOTE: In theory casting errors here could be slightly misleading in case * of a multi-step casting scenario. It should be possible to improve * this in the future. @@ -3427,11 +3426,13 @@ PyArray_CastRawArrays(npy_intp count, /* Cast */ char *args[2] = {src, dst}; npy_intp strides[2] = {src_stride, dst_stride}; - cast_info.func(&cast_info.context, args, &count, strides, cast_info.auxdata); + int result = cast_info.func(&cast_info.context, args, &count, strides, cast_info.auxdata); /* Cleanup */ NPY_cast_info_xfree(&cast_info); - + if (result < 0) { + return NPY_FAIL; + } if (flags & NPY_METH_REQUIRES_PYAPI && PyErr_Occurred()) { return NPY_FAIL; } diff --git a/numpy/_core/src/multiarray/dtype_transfer.h b/numpy/_core/src/multiarray/dtype_transfer.h index 04df5cb64c22..a354820e5d45 100644 --- a/numpy/_core/src/multiarray/dtype_transfer.h +++ b/numpy/_core/src/multiarray/dtype_transfer.h @@ -25,6 +25,15 @@ typedef struct { } NPY_cast_info; +static inline void +NPY_context_init(PyArrayMethod_Context *context, PyArray_Descr *descr[2]) +{ + context->descriptors = descr; + context->caller = NULL; + context->_reserved = NULL; + context->flags = 0; +} + /* * Create a new cast-info struct with cast_info->context.descriptors linked. * Compilers should inline this to ensure the whole struct is not actually @@ -40,13 +49,9 @@ NPY_cast_info_init(NPY_cast_info *cast_info) * a scratch space to `NPY_cast_info` and link to that instead. */ cast_info->auxdata = NULL; - cast_info->context.descriptors = cast_info->descriptors; - - // TODO: Delete this again probably maybe create a new minimal init macro - cast_info->context.caller = NULL; + NPY_context_init(&(cast_info->context), cast_info->descriptors); } - /* * Free's all references and data held inside the struct (not the struct). * First checks whether `cast_info.func == NULL`, and assume it is @@ -100,6 +105,7 @@ NPY_cast_info_copy(NPY_cast_info *cast_info, NPY_cast_info *original) Py_XINCREF(cast_info->descriptors[1]); cast_info->context.caller = original->context.caller; Py_XINCREF(cast_info->context.caller); + cast_info->context.flags = original->context.flags; cast_info->context.method = original->context.method; Py_XINCREF(cast_info->context.method); if (original->auxdata == NULL) { diff --git a/numpy/_core/src/multiarray/dtype_traversal.c b/numpy/_core/src/multiarray/dtype_traversal.c index 91b1889b7d1f..e86aab7411d4 100644 --- a/numpy/_core/src/multiarray/dtype_traversal.c +++ b/numpy/_core/src/multiarray/dtype_traversal.c @@ -346,7 +346,7 @@ get_fields_traverse_function( int offset; key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(dtype->fields, key); + tup = PyDict_GetItem(dtype->fields, key); // noqa: borrowed-ref OK if (!PyArg_ParseTuple(tup, "Oi|O", &fld_dtype, &offset, &title)) { NPY_AUXDATA_FREE((NpyAuxData *)data); return -1; diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 062243aa1402..c8fb6a1c8490 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -26,6 +26,8 @@ #include "templ_common.h" #include "refcount.h" #include "dtype_traversal.h" +#include "npy_static_data.h" +#include "multiarraymodule.h" #include @@ -94,7 +96,7 @@ use_new_as_default(PyArray_DTypeMeta *self) return NULL; } /* - * Lets not trust that the DType is implemented correctly + * Let's not trust that the DType is implemented correctly * TODO: Should probably do an exact type-check (at least unless this is * an abstract DType). */ @@ -116,6 +118,31 @@ use_new_as_default(PyArray_DTypeMeta *self) } +/* + * By default fill in zero, one, and negative one via the Python casts, + * users should override this, but this allows us to use it for legacy user dtypes. + */ +static int +default_get_constant(PyArray_Descr *descr, int constant_id, void *data) +{ + return 0; +} + + +static int +legacy_fallback_setitem(PyArray_Descr *descr, PyObject *value, char *data) +{ + PyArrayObject_fields arr_fields = { + .flags = NPY_ARRAY_WRITEABLE, /* assume array is not behaved. */ + .descr = descr, + }; + Py_SET_TYPE(&arr_fields, &PyArray_Type); + Py_SET_REFCNT(&arr_fields, 1); + + return PyDataType_GetArrFuncs(descr)->setitem(value, data, &arr_fields); +} + + static int legacy_setitem_using_DType(PyObject *obj, void *data, void *arr) { @@ -125,9 +152,7 @@ legacy_setitem_using_DType(PyObject *obj, void *data, void *arr) "supported for basic NumPy DTypes."); return -1; } - PyArrayDTypeMeta_SetItem *setitem; - setitem = NPY_DT_SLOTS(NPY_DTYPE(PyArray_DESCR(arr)))->setitem; - return setitem(PyArray_DESCR(arr), obj, data); + return NPY_DT_CALL_setitem(PyArray_DESCR(arr), obj, data); } @@ -157,9 +182,8 @@ PyArray_ArrFuncs default_funcs = { /* * Internal version of PyArrayInitDTypeMeta_FromSpec. * - * See the documentation of that function for more details. Does not do any - * error checking. - + * See the documentation of that function for more details. + * * Setting priv to a nonzero value indicates that a dtypemeta is being * initialized from inside NumPy, otherwise this function is being called by * the public implementation. @@ -194,6 +218,7 @@ dtypemeta_initialize_struct_from_spec( NPY_DT_SLOTS(DType)->get_clear_loop = NULL; NPY_DT_SLOTS(DType)->get_fill_zero_loop = NULL; NPY_DT_SLOTS(DType)->finalize_descr = NULL; + NPY_DT_SLOTS(DType)->get_constant = default_get_constant; NPY_DT_SLOTS(DType)->f = default_funcs; PyType_Slot *spec_slot = spec->slots; @@ -373,7 +398,7 @@ dtypemeta_initialize_struct_from_spec( * if the Py_TPFLAGS_HEAPTYPE flag is set (they are created from Python). * They are not for legacy DTypes or np.dtype itself. * - * @param self + * @param dtype_class Pointer to the Python type object * @return nonzero if the object is garbage collected */ static inline int @@ -493,12 +518,14 @@ string_discover_descr_from_pyobject( itemsize = PyUnicode_GetLength(obj); } if (itemsize != -1) { - if (cls->type_num == NPY_UNICODE) { - itemsize *= 4; - } - if (itemsize > NPY_MAX_INT) { + if (itemsize > NPY_MAX_INT || ( + cls->type_num == NPY_UNICODE && itemsize > NPY_MAX_INT / 4)) { PyErr_SetString(PyExc_TypeError, "string too large to store inside array."); + return NULL; + } + if (cls->type_num == NPY_UNICODE) { + itemsize *= 4; } PyArray_Descr *res = PyArray_DescrNewFromType(cls->type_num); if (res == NULL) { @@ -690,7 +717,7 @@ void_ensure_canonical(_PyArray_LegacyDescr *self) int maxalign = 1; for (Py_ssize_t i = 0; i < field_num; i++) { PyObject *name = PyTuple_GET_ITEM(self->names, i); - PyObject *tuple = PyDict_GetItem(self->fields, name); + PyObject *tuple = PyDict_GetItem(self->fields, name); // noqa: borrowed-ref OK PyObject *new_tuple = PyTuple_New(PyTuple_GET_SIZE(tuple)); PyArray_Descr *field_descr = NPY_DT_CALL_ensure_canonical( (PyArray_Descr *)PyTuple_GET_ITEM(tuple, 0)); @@ -752,7 +779,7 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) if (descr1->subarray == NULL && descr1->names == NULL && descr2->subarray == NULL && descr2->names == NULL) { if (descr1->elsize != descr2->elsize) { - PyErr_SetString(npy_DTypePromotionError, + PyErr_SetString(npy_static_pydata.DTypePromotionError, "Invalid type promotion with void datatypes of different " "lengths. Use the `np.bytes_` datatype instead to pad the " "shorter value with trailing zero bytes."); @@ -764,13 +791,13 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) if (descr1->names != NULL && descr2->names != NULL) { /* If both have fields promoting individual fields may be possible */ - static PyObject *promote_fields_func = NULL; - npy_cache_import("numpy._core._internal", "_promote_fields", - &promote_fields_func); - if (promote_fields_func == NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", "_promote_fields", + &npy_runtime_imports._promote_fields) == -1) { return NULL; } - PyObject *result = PyObject_CallFunctionObjArgs(promote_fields_func, + PyObject *result = PyObject_CallFunctionObjArgs( + npy_runtime_imports._promote_fields, descr1, descr2, NULL); if (result == NULL) { return NULL; @@ -791,7 +818,7 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) return NULL; } if (!cmp) { - PyErr_SetString(npy_DTypePromotionError, + PyErr_SetString(npy_static_pydata.DTypePromotionError, "invalid type promotion with subarray datatypes " "(shape mismatch)."); return NULL; @@ -821,7 +848,7 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) return new_descr; } - PyErr_SetString(npy_DTypePromotionError, + PyErr_SetString(npy_static_pydata.DTypePromotionError, "invalid type promotion with structured datatype(s)."); return NULL; } @@ -1065,9 +1092,9 @@ object_common_dtype( * Some may have more aliases, as `intp` is not its own thing, * as of writing this, these are not added here. * - * @returns 0 on success, -1 on failure. + * @returns A borrowed references to the new DType or NULL. */ -NPY_NO_EXPORT int +NPY_NO_EXPORT PyArray_DTypeMeta * dtypemeta_wrap_legacy_descriptor( _PyArray_LegacyDescr *descr, PyArray_ArrFuncs *arr_funcs, PyTypeObject *dtype_super_class, const char *name, const char *alias) @@ -1094,19 +1121,20 @@ dtypemeta_wrap_legacy_descriptor( "that of an existing dtype (with the assumption it is just " "copied over and can be replaced).", descr->typeobj, Py_TYPE(descr)); - return -1; + return NULL; } NPY_DType_Slots *dt_slots = PyMem_Malloc(sizeof(NPY_DType_Slots)); if (dt_slots == NULL) { - return -1; + return NULL; } memset(dt_slots, '\0', sizeof(NPY_DType_Slots)); + dt_slots->get_constant = default_get_constant; PyArray_DTypeMeta *dtype_class = PyMem_Malloc(sizeof(PyArray_DTypeMeta)); if (dtype_class == NULL) { PyMem_Free(dt_slots); - return -1; + return NULL; } /* @@ -1114,7 +1142,7 @@ dtypemeta_wrap_legacy_descriptor( * a prototype instances for everything except our own fields which * vary between the DTypes. * In particular any Object initialization must be strictly copied from - * the untouched prototype to avoid complexities (e.g. with PyPy). + * the untouched prototype to avoid complexities. * Any Type slots need to be fixed before PyType_Ready, although most * will be inherited automatically there. */ @@ -1126,12 +1154,7 @@ dtypemeta_wrap_legacy_descriptor( .tp_flags = Py_TPFLAGS_DEFAULT, .tp_base = NULL, /* set below */ .tp_new = (newfunc)legacy_dtype_default_new, - .tp_doc = ( - "DType class corresponding to the scalar type and dtype of " - "the same name.\n\n" - "Please see `numpy.dtype` for the typical way to create\n" - "dtype instances and :ref:`arrays.dtypes` for additional\n" - "information."), + .tp_doc = NULL, /* set in python */ },}, .flags = NPY_DT_LEGACY, /* Further fields are not common between DTypes */ @@ -1145,12 +1168,12 @@ dtypemeta_wrap_legacy_descriptor( /* Let python finish the initialization */ if (PyType_Ready((PyTypeObject *)dtype_class) < 0) { Py_DECREF(dtype_class); - return -1; + return NULL; } dt_slots->castingimpls = PyDict_New(); if (dt_slots->castingimpls == NULL) { Py_DECREF(dtype_class); - return -1; + return NULL; } /* @@ -1166,13 +1189,20 @@ dtypemeta_wrap_legacy_descriptor( /* Set default functions (correct for most dtypes, override below) */ dt_slots->default_descr = nonparametric_default_descr; dt_slots->discover_descr_from_pyobject = ( - nonparametric_discover_descr_from_pyobject); + nonparametric_discover_descr_from_pyobject); dt_slots->is_known_scalar_type = python_builtins_are_known_scalar_types; dt_slots->common_dtype = default_builtin_common_dtype; dt_slots->common_instance = NULL; dt_slots->ensure_canonical = ensure_native_byteorder; dt_slots->get_fill_zero_loop = NULL; dt_slots->finalize_descr = NULL; + // May be overwritten, but if not provide fallback via array struct hack. + // `getitem` is a trickier because of structured dtypes returning views. + if (dt_slots->f.setitem == NULL) { + dt_slots->f.setitem = legacy_setitem_using_DType; + } + dt_slots->setitem = legacy_fallback_setitem; + dt_slots->getitem = NULL; if (PyTypeNum_ISSIGNED(dtype_class->type_num)) { /* Convert our scalars (raise on too large unsigned and NaN, etc.) */ @@ -1230,7 +1260,7 @@ dtypemeta_wrap_legacy_descriptor( if (_PyArray_MapPyTypeToDType(dtype_class, descr->typeobj, PyTypeNum_ISUSERDEF(dtype_class->type_num)) < 0) { Py_DECREF(dtype_class); - return -1; + return NULL; } /* Finally, replace the current class of the descr */ @@ -1238,40 +1268,45 @@ dtypemeta_wrap_legacy_descriptor( /* And it to the types submodule if it is a builtin dtype */ if (!PyTypeNum_ISUSERDEF(descr->type_num)) { - static PyObject *add_dtype_helper = NULL; - npy_cache_import("numpy.dtypes", "_add_dtype_helper", &add_dtype_helper); - if (add_dtype_helper == NULL) { - return -1; + if (npy_cache_import_runtime("numpy.dtypes", "_add_dtype_helper", + &npy_runtime_imports._add_dtype_helper) == -1) { + return NULL; } if (PyObject_CallFunction( - add_dtype_helper, + npy_runtime_imports._add_dtype_helper, "Os", (PyObject *)dtype_class, alias) == NULL) { - return -1; + return NULL; + } + } + else { + // ensure the within dtype cast is populated for legacy user dtypes + if (PyArray_GetCastingImpl(dtype_class, dtype_class) == NULL) { + return NULL; } } - return 0; + return dtype_class; } static PyObject * -dtypemeta_get_abstract(PyArray_DTypeMeta *self) { +dtypemeta_get_abstract(PyArray_DTypeMeta *self, void *NPY_UNUSED(ignored)) { return PyBool_FromLong(NPY_DT_is_abstract(self)); } static PyObject * -dtypemeta_get_legacy(PyArray_DTypeMeta *self) { +dtypemeta_get_legacy(PyArray_DTypeMeta *self, void *NPY_UNUSED(ignored)) { return PyBool_FromLong(NPY_DT_is_legacy(self)); } static PyObject * -dtypemeta_get_parametric(PyArray_DTypeMeta *self) { +dtypemeta_get_parametric(PyArray_DTypeMeta *self, void *NPY_UNUSED(ignored)) { return PyBool_FromLong(NPY_DT_is_parametric(self)); } static PyObject * -dtypemeta_get_is_numeric(PyArray_DTypeMeta *self) { +dtypemeta_get_is_numeric(PyArray_DTypeMeta *self, void *NPY_UNUSED(ignored)) { return PyBool_FromLong(NPY_DT_is_numeric(self)); } @@ -1390,7 +1425,7 @@ PyArray_DTypeMeta *_Void_dtype = NULL; /*NUMPY_API - * Fetch the ArrFuncs struct which new lives on the DType and not the + * Fetch the ArrFuncs struct which now lives on the DType and not the * descriptor. Use of this struct should be avoided but remains necessary * for certain functionality. * @@ -1401,7 +1436,7 @@ PyArray_DTypeMeta *_Void_dtype = NULL; * This function is exposed with an underscore "privately" because the * public version is a static inline function which only calls the function * on 2.x but directly accesses the `descr` struct on 1.x. - * Once 1.x backwards compatibility is gone, it shoudl be exported without + * Once 1.x backwards compatibility is gone, it should be exported without * the underscore directly. * Internally, we define a private inline function `PyDataType_GetArrFuncs` * for convenience as we are allowed to access the `DType` slots directly. diff --git a/numpy/_core/src/multiarray/dtypemeta.h b/numpy/_core/src/multiarray/dtypemeta.h index 344b440b38e8..bf0acb48b899 100644 --- a/numpy/_core/src/multiarray/dtypemeta.h +++ b/numpy/_core/src/multiarray/dtypemeta.h @@ -67,6 +67,11 @@ typedef struct { * parameters, if any, as the operand dtype. */ PyArrayDTypeMeta_FinalizeDescriptor *finalize_descr; + /* + * Function to fetch constants. Always defined, but may return "undefined" + * for all values. + */ + PyArrayDTypeMeta_GetConstant *get_constant; /* * The casting implementation (ArrayMethod) to convert between two * instances of this DType, stored explicitly for fast access: @@ -80,16 +85,22 @@ typedef struct { PyObject *castingimpls; /* - * Storage for `descr->f`, since we may need to allow some customizatoin + * Storage for `descr->f`, since we may need to allow some customization * here at least in a transition period and we need to set it on every * dtype instance for backward compatibility. (Keep this at end) */ PyArray_ArrFuncs f; + + /* + * Hidden slots for the sort and argsort arraymethods. + */ + PyArrayMethodObject *sort_meth; + PyArrayMethodObject *argsort_meth; } NPY_DType_Slots; // This must be updated if new slots before within_dtype_castingimpl // are added -#define NPY_NUM_DTYPE_SLOTS 11 +#define NPY_NUM_DTYPE_SLOTS 12 #define NPY_NUM_DTYPE_PYARRAY_ARRFUNCS_SLOTS 22 #define NPY_DT_MAX_ARRFUNCS_SLOT \ NPY_NUM_DTYPE_PYARRAY_ARRFUNCS_SLOTS + _NPY_DT_ARRFUNCS_OFFSET @@ -124,6 +135,8 @@ typedef struct { NPY_DT_SLOTS(NPY_DTYPE(descr))->getitem(descr, data_ptr) #define NPY_DT_CALL_setitem(descr, value, data_ptr) \ NPY_DT_SLOTS(NPY_DTYPE(descr))->setitem(descr, value, data_ptr) +#define NPY_DT_CALL_get_constant(descr, constant_id, data_ptr) \ + NPY_DT_SLOTS(NPY_DTYPE(descr))->get_constant(descr, constant_id, data_ptr) /* @@ -153,7 +166,7 @@ NPY_NO_EXPORT int python_builtins_are_known_scalar_types( PyArray_DTypeMeta *cls, PyTypeObject *pytype); -NPY_NO_EXPORT int +NPY_NO_EXPORT PyArray_DTypeMeta * dtypemeta_wrap_legacy_descriptor( _PyArray_LegacyDescr *descr, PyArray_ArrFuncs *arr_funcs, PyTypeObject *dtype_super_class, const char *name, const char *alias); @@ -281,10 +294,23 @@ PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) static inline int PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) { - return PyDataType_GetArrFuncs(((PyArrayObject_fields *)arr)->descr)->setitem( - v, itemptr, arr); + return NPY_DT_CALL_setitem(PyArray_DESCR(arr), v, itemptr); } +// Like PyArray_DESCR_REPLACE, but calls ensure_canonical instead of DescrNew +#define PyArray_DESCR_REPLACE_CANONICAL(descr) do { \ + PyArray_Descr *_new_ = NPY_DT_CALL_ensure_canonical(descr); \ + Py_XSETREF(descr, _new_); \ + } while(0) + + +// Get the pointer to the PyArray_DTypeMeta for the type associated with the typenum. +static inline PyArray_DTypeMeta * +typenum_to_dtypemeta(enum NPY_TYPES typenum) { + PyArray_Descr * descr = PyArray_DescrFromType(typenum); + Py_DECREF(descr); + return NPY_DTYPE(descr); +} #endif /* NUMPY_CORE_SRC_MULTIARRAY_DTYPEMETA_H_ */ diff --git a/numpy/_core/src/multiarray/einsum.c.src b/numpy/_core/src/multiarray/einsum.cpp similarity index 97% rename from numpy/_core/src/multiarray/einsum.c.src rename to numpy/_core/src/multiarray/einsum.cpp index 81d3f3e1d79b..f12cec7824d7 100644 --- a/numpy/_core/src/multiarray/einsum.c.src +++ b/numpy/_core/src/multiarray/einsum.cpp @@ -20,14 +20,14 @@ #include //PyArray_AssignRawScalar #include - +extern "C" { #include "convert.h" #include "common.h" #include "ctors.h" #include "einsum_sumprod.h" #include "einsum_debug.h" - +} /* * Parses the subscripts for one operand into an output of 'ndim' @@ -40,7 +40,6 @@ * - subscripts="abbcbc", ndim=6 -> op_labels=[97, 98, -1, 99, -3, -2] * - subscripts="ab...bc", ndim=6 -> op_labels=[97, 98, 0, 0, -3, 99] */ - static int parse_operand_subscripts(char *subscripts, int length, int ndim, int iop, char *op_labels, @@ -131,13 +130,13 @@ parse_operand_subscripts(char *subscripts, int length, /* If it is a proper label, find any duplicates of it. */ if (label > 0) { /* Search for the next matching label. */ - char *next = memchr(op_labels + idim + 1, label, ndim - idim - 1); + char *next = (char*)memchr(op_labels + idim + 1, label, ndim - idim - 1); while (next != NULL) { /* The offset from next to op_labels[idim] (negative). */ *next = (char)((op_labels + idim) - next); /* Search for the next matching label. */ - next = memchr(next + 1, label, op_labels + ndim - 1 - next); + next = (char*)memchr(next + 1, label, op_labels + ndim - 1 - next); } } } @@ -322,7 +321,7 @@ get_single_op_view(PyArrayObject *op, char *labels, Py_TYPE(op), PyArray_DESCR(op), ndim_output, new_dims, new_strides, PyArray_DATA(op), PyArray_ISWRITEABLE(op) ? NPY_ARRAY_WRITEABLE : 0, - (PyObject *)op, (PyObject *)op, 0); + (PyObject *)op, (PyObject *)op, (_NPY_CREATION_FLAGS)0); if (*ret == NULL) { return -1; @@ -472,7 +471,7 @@ prepare_op_axes(int ndim, int iop, char *labels, int *axes, } /* It's a labeled dimension, find the matching one */ else { - char *match = memchr(labels, label, ndim); + char *match = (char*)memchr(labels, label, ndim); /* If the op doesn't have the label, broadcast it */ if (match == NULL) { axes[i] = -1; @@ -520,14 +519,16 @@ unbuffered_loop_nop1_ndim2(NpyIter *iter) return -1; } - /* - * Since the iterator wasn't tracking coordinates, the - * loop provided by the iterator is in Fortran-order. - */ + /* IterationNeedsAPI effectively only checks for object dtype here. */ int needs_api = NpyIter_IterationNeedsAPI(iter); if (!needs_api) { NPY_BEGIN_THREADS_THRESHOLDED(shape[1] * shape[0]); } + + /* + * Since the iterator wasn't tracking coordinates, the + * loop provided by the iterator is in Fortran-order. + */ for (coord = shape[1]; coord > 0; --coord) { sop(1, ptrs[0], strides[0], shape[0]); @@ -581,14 +582,16 @@ unbuffered_loop_nop1_ndim3(NpyIter *iter) return -1; } - /* - * Since the iterator wasn't tracking coordinates, the - * loop provided by the iterator is in Fortran-order. - */ + /* IterationNeedsAPI effectively only checks for object dtype here. */ int needs_api = NpyIter_IterationNeedsAPI(iter); if (!needs_api) { NPY_BEGIN_THREADS_THRESHOLDED(shape[2] * shape[1] * shape[0]); } + + /* + * Since the iterator wasn't tracking coordinates, the + * loop provided by the iterator is in Fortran-order. + */ for (coords[1] = shape[2]; coords[1] > 0; --coords[1]) { for (coords[0] = shape[1]; coords[0] > 0; --coords[0]) { sop(1, ptrs[0], strides[0], shape[0]); @@ -645,14 +648,16 @@ unbuffered_loop_nop2_ndim2(NpyIter *iter) return -1; } - /* - * Since the iterator wasn't tracking coordinates, the - * loop provided by the iterator is in Fortran-order. - */ + /* IterationNeedsAPI effectively only checks for object dtype here. */ int needs_api = NpyIter_IterationNeedsAPI(iter); if (!needs_api) { NPY_BEGIN_THREADS_THRESHOLDED(shape[1] * shape[0]); } + + /* + * Since the iterator wasn't tracking coordinates, the + * loop provided by the iterator is in Fortran-order. + */ for (coord = shape[1]; coord > 0; --coord) { sop(2, ptrs[0], strides[0], shape[0]); @@ -708,14 +713,16 @@ unbuffered_loop_nop2_ndim3(NpyIter *iter) return -1; } - /* - * Since the iterator wasn't tracking coordinates, the - * loop provided by the iterator is in Fortran-order. - */ + /* IterationNeedsAPI effectively only checks for object dtype here. */ int needs_api = NpyIter_IterationNeedsAPI(iter); if (!needs_api) { NPY_BEGIN_THREADS_THRESHOLDED(shape[2] * shape[1] * shape[0]); } + + /* + * Since the iterator wasn't tracking coordinates, the + * loop provided by the iterator is in Fortran-order. + */ for (coords[1] = shape[2]; coords[1] > 0; --coords[1]) { for (coords[0] = shape[1]; coords[0] > 0; --coords[0]) { sop(2, ptrs[0], strides[0], shape[0]); @@ -808,7 +815,7 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop, NpyIter *iter = NULL; sum_of_products_fn sop; - npy_intp fixed_strides[NPY_MAXARGS]; + npy_intp *stride; /* nop+1 (+1 is for the output) must fit in NPY_MAXARGS */ if (nop >= NPY_MAXARGS) { @@ -1024,10 +1031,13 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop, NPY_ITER_NBO| NPY_ITER_ALIGNED| NPY_ITER_ALLOCATE; + /* + * Note: We skip GROWINNER here because this gives a partially stable + * summation for float64. Pairwise summation would be better. + */ iter_flags = NPY_ITER_EXTERNAL_LOOP| NPY_ITER_BUFFERED| NPY_ITER_DELAY_BUFALLOC| - NPY_ITER_GROWINNER| NPY_ITER_REFS_OK| NPY_ITER_ZEROSIZE_OK; if (out != NULL) { @@ -1100,11 +1110,12 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop, * Get an inner loop function, specializing it based on * the strides that are fixed for the whole loop. */ - NpyIter_GetInnerFixedStrideArray(iter, fixed_strides); + stride = NpyIter_GetInnerStrideArray(iter); + sop = get_sum_of_products_function(nop, NpyIter_GetDescrArray(iter)[0]->type_num, NpyIter_GetDescrArray(iter)[0]->elsize, - fixed_strides); + stride); #if NPY_EINSUM_DBG_TRACING NpyIter_DebugPrint(iter); @@ -1118,9 +1129,7 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop, else if (NpyIter_GetIterSize(iter) != 0) { NpyIter_IterNextFunc *iternext; char **dataptr; - npy_intp *stride; npy_intp *countptr; - int needs_api; NPY_BEGIN_THREADS_DEF; iternext = NpyIter_GetIterNext(iter, NULL); @@ -1128,11 +1137,13 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop, goto fail; } dataptr = NpyIter_GetDataPtrArray(iter); - stride = NpyIter_GetInnerStrideArray(iter); countptr = NpyIter_GetInnerLoopSizePtr(iter); - needs_api = NpyIter_IterationNeedsAPI(iter); + /* IterationNeedsAPI additionally checks for object dtype here. */ + int needs_api = NpyIter_IterationNeedsAPI(iter); + if (!needs_api) { + NPY_BEGIN_THREADS_THRESHOLDED(NpyIter_GetIterSize(iter)); + } - NPY_BEGIN_THREADS_NDITER(iter); NPY_EINSUM_DBG_PRINT("Einsum loop\n"); do { sop(nop, dataptr, stride, *countptr); @@ -1140,7 +1151,7 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop, NPY_END_THREADS; /* If the API was needed, it may have thrown an error */ - if (NpyIter_IterationNeedsAPI(iter) && PyErr_Occurred()) { + if (needs_api && PyErr_Occurred()) { goto fail; } } diff --git a/numpy/_core/src/multiarray/flagsobject.c b/numpy/_core/src/multiarray/flagsobject.c index 8257727030c0..2570d3ec5d16 100644 --- a/numpy/_core/src/multiarray/flagsobject.c +++ b/numpy/_core/src/multiarray/flagsobject.c @@ -185,7 +185,7 @@ static char *msg = "future versions will not create a writeable " PyArrayFlagsObject *self, void *NPY_UNUSED(ignored)) \ { \ if (self->flags & NPY_ARRAY_WARN_ON_WRITE) { \ - if (PyErr_Warn(PyExc_FutureWarning, msg) < 0) {\ + if (PyErr_WarnEx(PyExc_FutureWarning, msg, 1) < 0) {\ return NULL; \ } \ }\ diff --git a/numpy/_core/src/multiarray/fnv.c b/numpy/_core/src/multiarray/fnv.c new file mode 100644 index 000000000000..2b7848519e61 --- /dev/null +++ b/numpy/_core/src/multiarray/fnv.c @@ -0,0 +1,85 @@ +/* + FNV-1a hash algorithm implementation + Based on the implementation from: + https://github.com/lcn2/fnv +*/ + +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + +#include +#include "numpy/npy_common.h" +#include "fnv.h" + + +#define FNV1A_32_INIT ((npy_uint32)0x811c9dc5) +#define FNV1A_64_INIT ((npy_uint64)0xcbf29ce484222325ULL) + +/* + Compute a 32-bit FNV-1a hash of buffer + original implementation from: + https://github.com/lcn2/fnv/blob/b7fcbee95538ee6a15744e756e7e7f1c02862cb0/hash_32a.c +*/ +npy_uint32 +npy_fnv1a_32(const void *buf, size_t len, npy_uint32 hval) +{ + const unsigned char *bp = (const unsigned char *)buf; /* start of buffer */ + const unsigned char *be = bp + len; /* beyond end of buffer */ + + /* + FNV-1a hash each octet in the buffer + */ + while (bp < be) { + + /* xor the bottom with the current octet */ + hval ^= (npy_uint32)*bp++; + + /* multiply by the 32 bit FNV magic prime */ + /* hval *= 0x01000193; */ + hval += (hval<<1) + (hval<<4) + (hval<<7) + (hval<<8) + (hval<<24); + } + + return hval; +} + +/* + Compute a 64-bit FNV-1a hash of the given data + original implementation from: + https://github.com/lcn2/fnv/blob/b7fcbee95538ee6a15744e756e7e7f1c02862cb0/hash_64a.c +*/ +npy_uint64 +npy_fnv1a_64(const void *buf, size_t len, npy_uint64 hval) +{ + const unsigned char *bp = (const unsigned char *)buf; /* start of buffer */ + const unsigned char *be = bp + len; /* beyond end of buffer */ + + /* + FNV-1a hash each octet in the buffer + */ + while (bp < be) { + + /* xor the bottom with the current octet */ + hval ^= (npy_uint64)*bp++; + + /* multiply by the 64 bit FNV magic prime */ + /* hval *= 0x100000001b3ULL; */ + hval += (hval << 1) + (hval << 4) + (hval << 5) + + (hval << 7) + (hval << 8) + (hval << 40); + } + + return hval; +} + +/* + * Compute a size_t FNV-1a hash of the given data + * This will use 32-bit or 64-bit hash depending on the size of size_t + */ +size_t +npy_fnv1a(const void *buf, size_t len) +{ +#if NPY_SIZEOF_SIZE_T == 8 + return (size_t)npy_fnv1a_64(buf, len, FNV1A_64_INIT); +#else /* NPY_SIZEOF_SIZE_T == 4 */ + return (size_t)npy_fnv1a_32(buf, len, FNV1A_32_INIT); +#endif +} diff --git a/numpy/_core/src/multiarray/fnv.h b/numpy/_core/src/multiarray/fnv.h new file mode 100644 index 000000000000..c76f54a645b9 --- /dev/null +++ b/numpy/_core/src/multiarray/fnv.h @@ -0,0 +1,26 @@ +/* + FNV-1a hash algorithm implementation + Based on the implementation from: + https://github.com/lcn2/fnv +*/ + +#ifndef NUMPY_CORE_INCLUDE_NUMPY_MULTIARRAY_FNV_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_MULTIARRAY_FNV_H_ + + +/* + Compute a size_t FNV-1a hash of the given data + This will use 32-bit or 64-bit hash depending on the size of size_t + + Parameters: + ----------- + buf - pointer to the data to be hashed + len - length of the data in bytes + + Returns: + ----------- + size_t hash value +*/ +size_t npy_fnv1a(const void *buf, size_t len); + +#endif // NUMPY_CORE_INCLUDE_NUMPY_MULTIARRAY_FNV_H_ diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index d18463f27bb5..cec1ae275b71 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -25,6 +25,8 @@ #include "alloc.h" #include "npy_buffer.h" #include "shape.h" +#include "multiarraymodule.h" +#include "array_api_standard.h" /******************* array attribute get and set routines ******************/ @@ -47,17 +49,13 @@ array_shape_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) } -static int -array_shape_set(PyArrayObject *self, PyObject *val, void* NPY_UNUSED(ignored)) +NPY_NO_EXPORT int +array_shape_set_internal(PyArrayObject *self, PyObject *val) { int nd; PyArrayObject *ret; + assert(val); - if (val == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete array shape"); - return -1; - } /* Assumes C-order */ ret = (PyArrayObject *)PyArray_Reshape(self, val); if (ret == NULL) { @@ -83,7 +81,7 @@ array_shape_set(PyArrayObject *self, PyObject *val, void* NPY_UNUSED(ignored)) /* Free old dimensions and strides */ npy_free_cache_dim_array(self); ((PyArrayObject_fields *)self)->nd = nd; - ((PyArrayObject_fields *)self)->dimensions = _dimensions; + ((PyArrayObject_fields *)self)->dimensions = _dimensions; ((PyArrayObject_fields *)self)->strides = _dimensions + nd; if (nd) { @@ -93,7 +91,7 @@ array_shape_set(PyArrayObject *self, PyObject *val, void* NPY_UNUSED(ignored)) } else { /* Free old dimensions and strides */ - npy_free_cache_dim_array(self); + npy_free_cache_dim_array(self); ((PyArrayObject_fields *)self)->nd = 0; ((PyArrayObject_fields *)self)->dimensions = NULL; ((PyArrayObject_fields *)self)->strides = NULL; @@ -104,6 +102,25 @@ array_shape_set(PyArrayObject *self, PyObject *val, void* NPY_UNUSED(ignored)) return 0; } +static int +array_shape_set(PyArrayObject *self, PyObject *val, void* NPY_UNUSED(ignored)) +{ + if (val == NULL) { + PyErr_SetString(PyExc_AttributeError, + "Cannot delete array shape"); + return -1; + } + + /* Deprecated NumPy 2.5, 2026-01-05 */ + if (DEPRECATE("Setting the shape on a NumPy array has been deprecated" + " in NumPy 2.5.\nAs an alternative, you can create a new" + " view using np.reshape (with copy=False if needed)." + ) < 0 ) { + return -1; + } + + return array_shape_set_internal(self, val); +} static PyObject * array_strides_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) @@ -114,6 +131,19 @@ array_strides_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) static int array_strides_set(PyArrayObject *self, PyObject *obj, void *NPY_UNUSED(ignored)) { + if (obj == NULL) { + PyErr_SetString(PyExc_AttributeError, + "Cannot delete array strides"); + return -1; + } + + /* Deprecated NumPy 2.4, 2025-05-11 */ + if (DEPRECATE("Setting the strides on a NumPy array has been deprecated in NumPy 2.4.\n" + "As an alternative, you can create a new view using np.lib.stride_tricks.as_strided." + ) < 0 ) { + return -1; + } + PyArray_Dims newstrides = {NULL, -1}; PyArrayObject *new; npy_intp numbytes = 0; @@ -122,11 +152,6 @@ array_strides_set(PyArrayObject *self, PyObject *obj, void *NPY_UNUSED(ignored)) npy_intp upper_offset = 0; Py_buffer view; - if (obj == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete array strides"); - return -1; - } if (!PyArray_OptionalIntpConverter(obj, &newstrides) || newstrides.len == -1) { PyErr_SetString(PyExc_TypeError, "invalid strides"); @@ -190,12 +215,6 @@ array_priority_get(PyArrayObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored)) return PyFloat_FromDouble(NPY_PRIORITY); } -static PyObject * -array_typestr_get(PyArrayObject *self) -{ - return arraydescr_protocol_typestr_get(PyArray_DESCR(self), NULL); -} - static PyObject * array_descr_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) { @@ -203,34 +222,6 @@ array_descr_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) return (PyObject *)PyArray_DESCR(self); } -static PyObject * -array_protocol_descr_get(PyArrayObject *self) -{ - PyObject *res; - PyObject *dobj; - - res = arraydescr_protocol_descr_get(PyArray_DESCR(self), NULL); - if (res) { - return res; - } - PyErr_Clear(); - - /* get default */ - dobj = PyTuple_New(2); - if (dobj == NULL) { - return NULL; - } - PyTuple_SET_ITEM(dobj, 0, PyUnicode_FromString("")); - PyTuple_SET_ITEM(dobj, 1, array_typestr_get(self)); - res = PyList_New(1); - if (res == NULL) { - Py_DECREF(dobj); - return NULL; - } - PyList_SET_ITEM(res, 0, dobj); - return res; -} - static PyObject * array_protocol_strides_get(PyArrayObject *self) { @@ -270,65 +261,49 @@ array_ctypes_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) static PyObject * array_interface_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) { - PyObject *dict; - PyObject *obj; - - dict = PyDict_New(); - if (dict == NULL) { - return NULL; - } - - int ret; + PyObject *dataptr = NULL; + PyObject *strides = NULL; + PyObject *shape = NULL; + PyObject *descr = NULL; + PyObject *typestr = NULL; + PyObject *dict = NULL; - /* dataptr */ - obj = array_dataptr_get(self, NULL); - ret = PyDict_SetItemString(dict, "data", obj); - Py_DECREF(obj); - if (ret < 0) { - Py_DECREF(dict); - return NULL; + dataptr = array_dataptr_get(self, NULL); + if (dataptr == NULL) { + goto finish; } - obj = array_protocol_strides_get(self); - ret = PyDict_SetItemString(dict, "strides", obj); - Py_DECREF(obj); - if (ret < 0) { - Py_DECREF(dict); - return NULL; + strides = array_protocol_strides_get(self); + if (strides == NULL) { + goto finish; } - obj = array_protocol_descr_get(self); - ret = PyDict_SetItemString(dict, "descr", obj); - Py_DECREF(obj); - if (ret < 0) { - Py_DECREF(dict); - return NULL; + descr = array_protocol_descr_get(PyArray_DESCR(self)); + if (descr == NULL) { + goto finish; } - obj = arraydescr_protocol_typestr_get(PyArray_DESCR(self), NULL); - ret = PyDict_SetItemString(dict, "typestr", obj); - Py_DECREF(obj); - if (ret < 0) { - Py_DECREF(dict); - return NULL; + typestr = arraydescr_protocol_typestr_get(PyArray_DESCR(self), NULL); + if (typestr == NULL) { + goto finish; } - obj = array_shape_get(self, NULL); - ret = PyDict_SetItemString(dict, "shape", obj); - Py_DECREF(obj); - if (ret < 0) { - Py_DECREF(dict); - return NULL; + shape = array_shape_get(self, NULL); + if (shape == NULL) { + goto finish; } - obj = PyLong_FromLong(3); - ret = PyDict_SetItemString(dict, "version", obj); - Py_DECREF(obj); - if (ret < 0) { - Py_DECREF(dict); - return NULL; - } + dict = build_array_interface( + dataptr, descr, strides, typestr, shape + ); + goto finish; +finish: + Py_XDECREF(dataptr); + Py_XDECREF(strides); + Py_XDECREF(shape); + Py_XDECREF(descr); + Py_XDECREF(typestr); return dict; } @@ -385,16 +360,16 @@ array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored)) /* check that we are not reinterpreting memory containing Objects. */ if (_may_have_objects(PyArray_DESCR(self)) || _may_have_objects(newtype)) { - static PyObject *checkfunc = NULL; PyObject *safe; - npy_cache_import("numpy._core._internal", "_view_is_safe", &checkfunc); - if (checkfunc == NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", "_view_is_safe", + &npy_runtime_imports._view_is_safe) == -1) { goto fail; } - safe = PyObject_CallFunction(checkfunc, "OO", - PyArray_DESCR(self), newtype); + safe = PyObject_CallFunction(npy_runtime_imports._view_is_safe, + "OO", PyArray_DESCR(self), newtype); if (safe == NULL) { goto fail; } @@ -490,15 +465,23 @@ array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored)) if (temp == NULL) { return -1; } + /* create new dimensions cache and fill it */ + npy_intp new_nd = PyArray_NDIM(temp); + npy_intp *new_dims = npy_alloc_cache_dim(2 * new_nd); + if (new_dims == NULL) { + Py_DECREF(temp); + PyErr_NoMemory(); + return -1; + } + memcpy(new_dims, PyArray_DIMS(temp), new_nd * sizeof(npy_intp)); + memcpy(new_dims + new_nd, PyArray_STRIDES(temp), new_nd * sizeof(npy_intp)); + /* Update self with new cache */ npy_free_cache_dim_array(self); - ((PyArrayObject_fields *)self)->dimensions = PyArray_DIMS(temp); - ((PyArrayObject_fields *)self)->nd = PyArray_NDIM(temp); - ((PyArrayObject_fields *)self)->strides = PyArray_STRIDES(temp); + ((PyArrayObject_fields *)self)->nd = new_nd; + ((PyArrayObject_fields *)self)->dimensions = new_dims; + ((PyArrayObject_fields *)self)->strides = new_dims + new_nd; newtype = PyArray_DESCR(temp); - Py_INCREF(PyArray_DESCR(temp)); - /* Fool deallocator not to delete these*/ - ((PyArrayObject_fields *)temp)->nd = 0; - ((PyArrayObject_fields *)temp)->dimensions = NULL; + Py_INCREF(newtype); Py_DECREF(temp); } @@ -855,40 +838,6 @@ array_matrix_transpose_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) return PyArray_MatrixTranspose(self); } -static PyObject * -array_ptp(PyArrayObject *self, void *NPY_UNUSED(ignored)) -{ - PyErr_SetString(PyExc_AttributeError, - "`ptp` was removed from the ndarray class in NumPy 2.0. " - "Use np.ptp(arr, ...) instead."); - return NULL; -} - -static PyObject * -array_newbyteorder(PyArrayObject *self, PyObject *args) -{ - PyErr_SetString(PyExc_AttributeError, - "`newbyteorder` was removed from the ndarray class " - "in NumPy 2.0. " - "Use `arr.view(arr.dtype.newbyteorder(order))` instead."); - return NULL; -} - -static PyObject * -array_itemset(PyArrayObject *self, PyObject *args) -{ - PyErr_SetString(PyExc_AttributeError, - "`itemset` was removed from the ndarray class in " - "NumPy 2.0. Use `arr[index] = value` instead."); - return NULL; -} - -static PyObject * -array_device(PyArrayObject *self, void *NPY_UNUSED(ignored)) -{ - return PyUnicode_FromString("cpu"); -} - NPY_NO_EXPORT PyGetSetDef array_getsetlist[] = { {"ndim", (getter)array_ndim_get, @@ -954,18 +903,6 @@ NPY_NO_EXPORT PyGetSetDef array_getsetlist[] = { (getter)array_matrix_transpose_get, NULL, NULL, NULL}, - {"ptp", - (getter)array_ptp, - NULL, - NULL, NULL}, - {"newbyteorder", - (getter)array_newbyteorder, - NULL, - NULL, NULL}, - {"itemset", - (getter)array_itemset, - NULL, - NULL, NULL}, {"device", (getter)array_device, NULL, diff --git a/numpy/_core/src/multiarray/getset.h b/numpy/_core/src/multiarray/getset.h index a95c98020a18..5436efaa325b 100644 --- a/numpy/_core/src/multiarray/getset.h +++ b/numpy/_core/src/multiarray/getset.h @@ -3,4 +3,6 @@ extern NPY_NO_EXPORT PyGetSetDef array_getsetlist[]; +NPY_NO_EXPORT int array_shape_set_internal(PyArrayObject *self, PyObject *val); + #endif /* NUMPY_CORE_SRC_MULTIARRAY_GETSET_H_ */ diff --git a/numpy/_core/src/multiarray/hashdescr.c b/numpy/_core/src/multiarray/hashdescr.c index f570caf1588f..be203eb197c3 100644 --- a/numpy/_core/src/multiarray/hashdescr.c +++ b/numpy/_core/src/multiarray/hashdescr.c @@ -1,8 +1,10 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include +#include #include @@ -78,7 +80,7 @@ static int _array_descr_builtin(PyArray_Descr* descr, PyObject *l) * For builtin type, hash relies on : kind + byteorder + flags + * type_num + elsize + alignment */ - t = Py_BuildValue("(cccii)", descr->kind, nbyteorder, + t = Py_BuildValue("(ccKnn)", descr->kind, nbyteorder, descr->flags, descr->elsize, descr->alignment); for(i = 0; i < PyTuple_Size(t); ++i) { @@ -127,7 +129,7 @@ static int _array_descr_walk_fields(PyObject *names, PyObject* fields, PyObject* * For each field, add the key + descr + offset to l */ key = PyTuple_GET_ITEM(names, pos); - value = PyDict_GetItem(fields, key); + value = PyDict_GetItem(fields, key); // noqa: borrowed-ref OK /* XXX: are those checks necessary ? */ if (value == NULL) { PyErr_SetString(PyExc_SystemError, @@ -256,12 +258,13 @@ static int _array_descr_walk(PyArray_Descr* descr, PyObject *l) } /* - * Return 0 if successful + * Return hash on success, -1 on failure */ -static int _PyArray_DescrHashImp(PyArray_Descr *descr, npy_hash_t *hash) +static npy_hash_t _PyArray_DescrHashImp(PyArray_Descr *descr) { PyObject *l, *tl; int st; + npy_hash_t hash; l = PyList_New(0); if (l == NULL) { @@ -283,25 +286,16 @@ static int _PyArray_DescrHashImp(PyArray_Descr *descr, npy_hash_t *hash) if (tl == NULL) return -1; - *hash = PyObject_Hash(tl); + hash = PyObject_Hash(tl); Py_DECREF(tl); - if (*hash == -1) { - /* XXX: does PyObject_Hash set an exception on failure ? */ -#if 0 - PyErr_SetString(PyExc_SystemError, - "(Hash) Error while hashing final tuple"); -#endif - return -1; - } - - return 0; + return hash; } NPY_NO_EXPORT npy_hash_t PyArray_DescrHash(PyObject* odescr) { PyArray_Descr *descr; - int st; + npy_hash_t hash; if (!PyArray_DescrCheck(odescr)) { PyErr_SetString(PyExc_ValueError, @@ -310,12 +304,15 @@ PyArray_DescrHash(PyObject* odescr) } descr = (PyArray_Descr*)odescr; - if (descr->hash == -1) { - st = _PyArray_DescrHashImp(descr, &descr->hash); - if (st) { + hash = atomic_load_explicit((_Atomic(npy_hash_t) *)&descr->hash, memory_order_relaxed); + + if (hash == -1) { + hash = _PyArray_DescrHashImp(descr); + if (hash == -1) { return -1; } + atomic_store_explicit((_Atomic(npy_hash_t) *)&descr->hash, hash, memory_order_relaxed); } - return descr->hash; + return hash; } diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index b4943851938d..4751db9b4705 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -4,6 +4,7 @@ #define PY_SSIZE_T_CLEAN #include #include +#include #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" @@ -15,7 +16,7 @@ -#include "multiarraymodule.h" +#include "npy_static_data.h" #include "common.h" #include "dtype_transfer.h" #include "dtypemeta.h" @@ -24,6 +25,7 @@ #include "lowlevel_strided_loops.h" #include "array_assign.h" #include "refcount.h" +#include "methods.h" #include "npy_sort.h" #include "npy_partition.h" @@ -398,7 +400,7 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0, } ni = PyArray_SIZE(indices); if ((ni > 0) && (PyArray_Size((PyObject *)self) == 0)) { - PyErr_SetString(PyExc_IndexError, + PyErr_SetString(PyExc_IndexError, "cannot replace elements of an empty array"); goto fail; } @@ -624,25 +626,26 @@ npy_fastputmask( npy_intp ni, npy_intp nv, npy_intp chunk) { if (chunk == 1) { - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } - if (chunk == 2) { - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + else if (chunk == 2) { + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } - if (chunk == 4) { - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + else if (chunk == 4) { + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } - if (chunk == 8) { - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + else if (chunk == 8) { + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } - if (chunk == 16) { - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + else if (chunk == 16) { + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } - if (chunk == 32) { - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + else if (chunk == 32) { + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + } + else { + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } - - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } @@ -785,21 +788,21 @@ static NPY_GCC_OPT_3 inline int npy_fastrepeat_impl( npy_intp n_outer, npy_intp n, npy_intp nel, npy_intp chunk, npy_bool broadcast, npy_intp* counts, char* new_data, char* old_data, - npy_intp elsize, NPY_cast_info cast_info, int needs_refcounting) + npy_intp elsize, NPY_cast_info *cast_info, int needs_custom_copy) { npy_intp i, j, k; for (i = 0; i < n_outer; i++) { for (j = 0; j < n; j++) { npy_intp tmp = broadcast ? counts[0] : counts[j]; for (k = 0; k < tmp; k++) { - if (!needs_refcounting) { + if (!needs_custom_copy) { memcpy(new_data, old_data, chunk); } else { char *data[2] = {old_data, new_data}; npy_intp strides[2] = {elsize, elsize}; - if (cast_info.func(&cast_info.context, data, &nel, - strides, cast_info.auxdata) < 0) { + if (cast_info->func(&cast_info->context, data, &nel, + strides, cast_info->auxdata) < 0) { return -1; } } @@ -811,48 +814,53 @@ npy_fastrepeat_impl( return 0; } + +/* + * Helper to allow the compiler to specialize for all direct element copy + * cases (e.g. all numerical dtypes). + */ static NPY_GCC_OPT_3 int npy_fastrepeat( npy_intp n_outer, npy_intp n, npy_intp nel, npy_intp chunk, npy_bool broadcast, npy_intp* counts, char* new_data, char* old_data, - npy_intp elsize, NPY_cast_info cast_info, int needs_refcounting) + npy_intp elsize, NPY_cast_info *cast_info, int needs_custom_copy) { - if (!needs_refcounting) { + if (!needs_custom_copy) { if (chunk == 1) { return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, - elsize, cast_info, needs_refcounting); + elsize, cast_info, needs_custom_copy); } if (chunk == 2) { return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, - elsize, cast_info, needs_refcounting); + elsize, cast_info, needs_custom_copy); } if (chunk == 4) { return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, - elsize, cast_info, needs_refcounting); + elsize, cast_info, needs_custom_copy); } if (chunk == 8) { return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, - elsize, cast_info, needs_refcounting); + elsize, cast_info, needs_custom_copy); } if (chunk == 16) { return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, - elsize, cast_info, needs_refcounting); + elsize, cast_info, needs_custom_copy); } if (chunk == 32) { return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, - elsize, cast_info, needs_refcounting); + elsize, cast_info, needs_custom_copy); } } return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, elsize, - cast_info, needs_refcounting); + cast_info, needs_custom_copy); } @@ -872,7 +880,6 @@ PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) char *new_data, *old_data; NPY_cast_info cast_info; NPY_ARRAYMETHOD_FLAGS flags; - int needs_refcounting; repeats = (PyArrayObject *)PyArray_ContiguousFromAny(op, NPY_INTP, 0, 1); if (repeats == NULL) { @@ -897,7 +904,6 @@ PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) aop = (PyArrayObject *)ap; n = PyArray_DIM(aop, axis); NPY_cast_info_init(&cast_info); - needs_refcounting = PyDataType_REFCHK(PyArray_DESCR(aop)); if (!broadcast && PyArray_SIZE(repeats) != n) { PyErr_Format(PyExc_ValueError, @@ -919,16 +925,23 @@ PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) } } + /* Fill in dimensions of new array */ + npy_intp dims[NPY_MAXDIMS] = {0}; + + for (int i = 0; i < PyArray_NDIM(aop); i++) { + dims[i] = PyArray_DIMS(aop)[i]; + } + + dims[axis] = total; + /* Construct new array */ - PyArray_DIMS(aop)[axis] = total; Py_INCREF(PyArray_DESCR(aop)); ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(aop), PyArray_DESCR(aop), PyArray_NDIM(aop), - PyArray_DIMS(aop), + dims, NULL, NULL, 0, (PyObject *)aop); - PyArray_DIMS(aop)[axis] = n; if (ret == NULL) { goto fail; } @@ -947,16 +960,18 @@ PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) n_outer *= PyArray_DIMS(aop)[i]; } - if (needs_refcounting) { + int needs_custom_copy = 0; + if (PyDataType_REFCHK(PyArray_DESCR(ret))) { + needs_custom_copy = 1; if (PyArray_GetDTypeTransferFunction( - 1, elsize, elsize, PyArray_DESCR(aop), PyArray_DESCR(aop), 0, + 1, elsize, elsize, PyArray_DESCR(aop), PyArray_DESCR(ret), 0, &cast_info, &flags) < 0) { goto fail; } } if (npy_fastrepeat(n_outer, n, nel, chunk, broadcast, counts, new_data, - old_data, elsize, cast_info, needs_refcounting) < 0) { + old_data, elsize, &cast_info, needs_custom_copy) < 0) { goto fail; } @@ -1016,6 +1031,7 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, } dtype = PyArray_DESCR(mps[0]); + int copy_existing_out = 0; /* Set-up return array */ if (out == NULL) { Py_INCREF(dtype); @@ -1027,10 +1043,6 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, (PyObject *)ap); } else { - int flags = NPY_ARRAY_CARRAY | - NPY_ARRAY_WRITEBACKIFCOPY | - NPY_ARRAY_FORCECAST; - if ((PyArray_NDIM(out) != multi->nd) || !PyArray_CompareLists(PyArray_DIMS(out), multi->dimensions, @@ -1040,9 +1052,13 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, goto fail; } + if (PyArray_FailUnlessWriteable(out, "output array") < 0) { + goto fail; + } + for (i = 0; i < n; i++) { if (arrays_overlap(out, mps[i])) { - flags |= NPY_ARRAY_ENSURECOPY; + copy_existing_out = 1; } } @@ -1052,10 +1068,25 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, * so the input array is not changed * before the error is called */ - flags |= NPY_ARRAY_ENSURECOPY; + copy_existing_out = 1; + } + + if (!PyArray_EquivTypes(dtype, PyArray_DESCR(out))) { + copy_existing_out = 1; + } + + if (copy_existing_out) { + Py_INCREF(dtype); + obj = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, + dtype, + multi->nd, + multi->dimensions, + NULL, NULL, 0, + (PyObject *)out); + } + else { + obj = (PyArrayObject *)Py_NewRef(out); } - Py_INCREF(dtype); - obj = (PyArrayObject *)PyArray_FromArray(out, dtype, flags); } if (obj == NULL) { @@ -1068,12 +1099,13 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, NPY_ARRAYMETHOD_FLAGS transfer_flags = 0; if (PyDataType_REFCHK(dtype)) { int is_aligned = IsUintAligned(obj); + PyArray_Descr *obj_dtype = PyArray_DESCR(obj); PyArray_GetDTypeTransferFunction( is_aligned, dtype->elsize, - dtype->elsize, + obj_dtype->elsize, dtype, - dtype, 0, &cast_info, + obj_dtype, 0, &cast_info, &transfer_flags); } @@ -1130,11 +1162,13 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, } Py_DECREF(ap); PyDataMem_FREE(mps); - if (out != NULL && out != obj) { - Py_INCREF(out); - PyArray_ResolveWritebackIfCopy(obj); + if (copy_existing_out) { + int res = PyArray_CopyInto(out, obj); Py_DECREF(obj); - obj = out; + if (res < 0) { + return NULL; + } + return Py_NewRef(out); } return (PyObject *)obj; @@ -1161,6 +1195,8 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, */ static int _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, + PyArrayMethod_StridedLoop *strided_loop, PyArrayMethod_Context *context, + NpyAuxData *auxdata, NPY_ARRAYMETHOD_FLAGS *method_flags, PyArray_PartitionFunc *part, npy_intp const *kth, npy_intp nkth) { npy_intp N = PyArray_DIM(op, axis); @@ -1168,8 +1204,8 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, npy_intp astride = PyArray_STRIDE(op, axis); int swap = PyArray_ISBYTESWAPPED(op); int is_aligned = IsAligned(op); - int needcopy = !is_aligned || swap || astride != elsize; - int needs_api = PyDataType_FLAGCHK(PyArray_DESCR(op), NPY_NEEDS_PYAPI); + int needcopy = 0; + int needs_api; char *buffer = NULL; @@ -1191,6 +1227,13 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, return 0; } + if (strided_loop != NULL) { + needs_api = *method_flags & NPY_METH_REQUIRES_PYAPI; + } + else { + needs_api = PyDataType_FLAGCHK(PyArray_DESCR(op), NPY_NEEDS_PYAPI); + } + PyObject *mem_handler = PyDataMem_GetHandler(); if (mem_handler == NULL) { return -1; @@ -1202,6 +1245,26 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, } size = it->size; + if (strided_loop != NULL) { + // Descriptors have already been resolved + odescr = context->descriptors[0]; + Py_INCREF(odescr); + } + else { + if (swap) { + odescr = PyArray_DescrNewByteorder(descr, NPY_SWAP); + } + else { + odescr = descr; + Py_INCREF(odescr); + } + } + + needcopy = !is_aligned || astride != elsize; + if (!PyArray_EquivTypes(descr, odescr)) { + needcopy = 1; + } + if (needcopy) { buffer = PyDataMem_UserNEW(N * elsize, mem_handler); if (buffer == NULL) { @@ -1212,14 +1275,6 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, memset(buffer, 0, N * elsize); } - if (swap) { - odescr = PyArray_DescrNewByteorder(descr, NPY_SWAP); - } - else { - odescr = descr; - Py_INCREF(odescr); - } - NPY_ARRAYMETHOD_FLAGS to_transfer_flags; if (PyArray_GetDTypeTransferFunction( @@ -1237,7 +1292,9 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, } } - NPY_BEGIN_THREADS_DESCR(descr); + if (!needs_api) { + NPY_BEGIN_THREADS; + } while (size--) { char *bufptr = it->dataptr; @@ -1262,7 +1319,14 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, */ if (part == NULL) { - ret = sort(bufptr, N, op); + if (strided_loop != NULL) { + char *const data[2] = {bufptr, bufptr}; + npy_intp strides[2] = {elsize, elsize}; + ret = strided_loop(context, data, &N, strides, NULL); + } + else { + ret = sort(bufptr, N, op); + } if (needs_api && PyErr_Occurred()) { ret = -1; } @@ -1300,12 +1364,13 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, } fail: - NPY_END_THREADS_DESCR(descr); + if (!needs_api) { + NPY_END_THREADS; + } /* cleanup internal buffer */ if (needcopy) { PyArray_ClearBuffer(odescr, buffer, elsize, N, 1); PyDataMem_UserFREE(buffer, N * elsize, mem_handler); - Py_DECREF(odescr); } if (ret < 0 && !PyErr_Occurred()) { /* Out of memory during sorting or buffer creation */ @@ -1318,6 +1383,7 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, if (PyErr_Occurred() && ret == 0) { ret = -1; } + Py_XDECREF(odescr); Py_DECREF(it); Py_DECREF(mem_handler); NPY_cast_info_xfree(&to_cast_info); @@ -1328,16 +1394,17 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, static PyObject* _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, - PyArray_ArgPartitionFunc *argpart, - npy_intp const *kth, npy_intp nkth) + PyArrayMethod_StridedLoop *strided_loop, PyArrayMethod_Context *context, + NpyAuxData *auxdata, NPY_ARRAYMETHOD_FLAGS *method_flags, + PyArray_ArgPartitionFunc *argpart, npy_intp const *kth, npy_intp nkth) { npy_intp N = PyArray_DIM(op, axis); npy_intp elsize = (npy_intp)PyArray_ITEMSIZE(op); npy_intp astride = PyArray_STRIDE(op, axis); int swap = PyArray_ISBYTESWAPPED(op); int is_aligned = IsAligned(op); - int needcopy = !is_aligned || swap || astride != elsize; - int needs_api = PyDataType_FLAGCHK(PyArray_DESCR(op), NPY_NEEDS_PYAPI); + int needcopy = 0; + int needs_api; int needidxbuffer; char *valbuffer = NULL; @@ -1374,6 +1441,13 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, rstride = PyArray_STRIDE(rop, axis); needidxbuffer = rstride != sizeof(npy_intp); + if (strided_loop != NULL) { + needs_api = *method_flags & NPY_METH_REQUIRES_PYAPI; + } + else { + needs_api = PyDataType_FLAGCHK(PyArray_DESCR(op), NPY_NEEDS_PYAPI); + } + /* Check if there is any argsorting to do */ if (N <= 1 || PyArray_SIZE(op) == 0) { Py_DECREF(mem_handler); @@ -1389,6 +1463,26 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, } size = it->size; + if (strided_loop != NULL) { + // Descriptors have already been resolved + odescr = context->descriptors[0]; + Py_INCREF(odescr); + } + else { + if (swap) { + odescr = PyArray_DescrNewByteorder(descr, NPY_SWAP); + } + else { + odescr = descr; + Py_INCREF(odescr); + } + } + + needcopy = !is_aligned || astride != elsize; + if (!PyArray_EquivTypes(descr, odescr)) { + needcopy = 1; + } + if (needcopy) { valbuffer = PyDataMem_UserNEW(N * elsize, mem_handler); if (valbuffer == NULL) { @@ -1399,14 +1493,6 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, memset(valbuffer, 0, N * elsize); } - if (swap) { - odescr = PyArray_DescrNewByteorder(descr, NPY_SWAP); - } - else { - odescr = descr; - Py_INCREF(odescr); - } - if (PyArray_GetDTypeTransferFunction( is_aligned, astride, elsize, descr, odescr, 0, &cast_info, &transfer_flags) != NPY_SUCCEED) { @@ -1423,7 +1509,9 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, } } - NPY_BEGIN_THREADS_DESCR(descr); + if (!needs_api) { + NPY_BEGIN_THREADS; + } while (size--) { char *valptr = it->dataptr; @@ -1452,8 +1540,15 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, } if (argpart == NULL) { - ret = argsort(valptr, idxptr, N, op); - /* Object comparisons may raise an exception in Python 3 */ + if (strided_loop != NULL) { + char *const data[2] = {valptr, (char *)idxptr}; + npy_intp strides[2] = {elsize, sizeof(npy_intp)}; + ret = strided_loop(context, data, &N, strides, NULL); + } + else { + ret = argsort(valptr, idxptr, N, op); + } + /* Object comparisons may raise an exception */ if (needs_api && PyErr_Occurred()) { ret = -1; } @@ -1467,7 +1562,7 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, for (i = 0; i < nkth; ++i) { ret = argpart(valptr, idxptr, N, kth[i], pivots, &npiv, nkth, op); - /* Object comparisons may raise an exception in Python 3 */ + /* Object comparisons may raise an exception */ if (needs_api && PyErr_Occurred()) { ret = -1; } @@ -1492,12 +1587,13 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, } fail: - NPY_END_THREADS_DESCR(descr); + if (!needs_api) { + NPY_END_THREADS; + } /* cleanup internal buffers */ if (needcopy) { PyArray_ClearBuffer(odescr, valbuffer, elsize, N, 1); PyDataMem_UserFREE(valbuffer, N * elsize, mem_handler); - Py_DECREF(odescr); } PyDataMem_UserFREE(idxbuffer, N * sizeof(npy_intp), mem_handler); if (ret < 0) { @@ -1508,6 +1604,7 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, Py_XDECREF(rop); rop = NULL; } + Py_XDECREF(odescr); Py_XDECREF(it); Py_XDECREF(rit); Py_DECREF(mem_handler); @@ -1517,56 +1614,6 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, } -/*NUMPY_API - * Sort an array in-place - */ -NPY_NO_EXPORT int -PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which) -{ - PyArray_SortFunc *sort = NULL; - int n = PyArray_NDIM(op); - - if (check_and_adjust_axis(&axis, n) < 0) { - return -1; - } - - if (PyArray_FailUnlessWriteable(op, "sort array") < 0) { - return -1; - } - - if (which < 0 || which >= NPY_NSORTS) { - PyErr_SetString(PyExc_ValueError, "not a valid sort kind"); - return -1; - } - - sort = PyDataType_GetArrFuncs(PyArray_DESCR(op))->sort[which]; - - if (sort == NULL) { - if (PyDataType_GetArrFuncs(PyArray_DESCR(op))->compare) { - switch (which) { - default: - case NPY_QUICKSORT: - sort = npy_quicksort; - break; - case NPY_HEAPSORT: - sort = npy_heapsort; - break; - case NPY_STABLESORT: - sort = npy_timsort; - break; - } - } - else { - PyErr_SetString(PyExc_TypeError, - "type does not have compare function"); - return -1; - } - } - - return _new_sortlike(op, axis, sort, NULL, NULL, 0); -} - - /* * make kth array positive, ravel and sort it */ @@ -1581,12 +1628,9 @@ partition_prep_kth_array(PyArrayObject * ktharray, npy_intp nkth, i; if (PyArray_ISBOOL(ktharray)) { - /* 2021-09-29, NumPy 1.22 */ - if (DEPRECATE( - "Passing booleans as partition index is deprecated" - " (warning added in NumPy 1.22)") < 0) { - return NULL; - } + PyErr_SetString(PyExc_ValueError, + "Booleans unacceptable as partition index"); + return NULL; } else if (!PyArray_ISINTEGER(ktharray)) { PyErr_Format(PyExc_TypeError, "Partition index must be integer"); @@ -1674,7 +1718,7 @@ PyArray_Partition(PyArrayObject *op, PyArrayObject * ktharray, int axis, return -1; } - ret = _new_sortlike(op, axis, sort, part, + ret = _new_sortlike(op, axis, sort, NULL, NULL, NULL, NULL, part, PyArray_DATA(kthrvl), PyArray_SIZE(kthrvl)); Py_DECREF(kthrvl); @@ -1683,52 +1727,6 @@ PyArray_Partition(PyArrayObject *op, PyArrayObject * ktharray, int axis, } -/*NUMPY_API - * ArgSort an array - */ -NPY_NO_EXPORT PyObject * -PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND which) -{ - PyArrayObject *op2; - PyArray_ArgSortFunc *argsort = NULL; - PyObject *ret; - - argsort = PyDataType_GetArrFuncs(PyArray_DESCR(op))->argsort[which]; - - if (argsort == NULL) { - if (PyDataType_GetArrFuncs(PyArray_DESCR(op))->compare) { - switch (which) { - default: - case NPY_QUICKSORT: - argsort = npy_aquicksort; - break; - case NPY_HEAPSORT: - argsort = npy_aheapsort; - break; - case NPY_STABLESORT: - argsort = npy_atimsort; - break; - } - } - else { - PyErr_SetString(PyExc_TypeError, - "type does not have compare function"); - return NULL; - } - } - - op2 = (PyArrayObject *)PyArray_CheckAxis(op, &axis, 0); - if (op2 == NULL) { - return NULL; - } - - ret = _new_argsortlike(op2, axis, argsort, NULL, NULL, 0); - - Py_DECREF(op2); - return ret; -} - - /*NUMPY_API * ArgPartition an array */ @@ -1776,7 +1774,7 @@ PyArray_ArgPartition(PyArrayObject *op, PyArrayObject *ktharray, int axis, return NULL; } - ret = _new_argsortlike(op2, axis, argsort, argpart, + ret = _new_argsortlike(op2, axis, argsort, NULL, NULL, NULL, NULL, argpart, PyArray_DATA(kthrvl), PyArray_SIZE(kthrvl)); Py_DECREF(kthrvl); @@ -2009,8 +2007,7 @@ PyArray_LexSort(PyObject *sort_keys, int axis) } rcode = argsort(its[j]->dataptr, (npy_intp *)rit->dataptr, N, mps[j]); - if (rcode < 0 || (PyDataType_REFCHK(PyArray_DESCR(mps[j])) - && PyErr_Occurred())) { + if (rcode < 0 || (object && PyErr_Occurred())) { goto fail; } PyArray_ITER_NEXT(its[j]); @@ -2100,7 +2097,6 @@ PyArray_SearchSorted(PyArrayObject *op1, PyObject *op2, if (dtype == NULL) { return NULL; } - /* refs to dtype we own = 1 */ /* Look for binary search function */ if (perm) { @@ -2111,26 +2107,23 @@ PyArray_SearchSorted(PyArrayObject *op1, PyObject *op2, } if (binsearch == NULL && argbinsearch == NULL) { PyErr_SetString(PyExc_TypeError, "compare not supported for type"); - /* refs to dtype we own = 1 */ Py_DECREF(dtype); - /* refs to dtype we own = 0 */ return NULL; } - /* need ap2 as contiguous array and of right type */ - /* refs to dtype we own = 1 */ - Py_INCREF(dtype); - /* refs to dtype we own = 2 */ + /* need ap2 as contiguous array and of right dtype (note: steals dtype reference) */ ap2 = (PyArrayObject *)PyArray_CheckFromAny(op2, dtype, 0, 0, NPY_ARRAY_CARRAY_RO | NPY_ARRAY_NOTSWAPPED, NULL); - /* refs to dtype we own = 1, array creation steals one even on failure */ if (ap2 == NULL) { - Py_DECREF(dtype); - /* refs to dtype we own = 0 */ return NULL; } + /* + * The dtype reference we had was used for creating ap2, which may have + * replaced it with another. So here we copy the dtype of ap2 and use it for `ap1`. + */ + dtype = (PyArray_Descr *)Py_NewRef(PyArray_DESCR(ap2)); /* * If the needle (ap2) is larger than the haystack (op1) we copy the @@ -2139,9 +2132,9 @@ PyArray_SearchSorted(PyArrayObject *op1, PyObject *op2, if (PyArray_SIZE(ap2) > PyArray_SIZE(op1)) { ap1_flags |= NPY_ARRAY_CARRAY_RO; } + /* dtype is stolen, after this we have no reference */ ap1 = (PyArrayObject *)PyArray_CheckFromAny((PyObject *)op1, dtype, 1, 1, ap1_flags, NULL); - /* refs to dtype we own = 0, array creation steals one even on failure */ if (ap1 == NULL) { goto fail; } @@ -2262,10 +2255,10 @@ PyArray_Diagonal(PyArrayObject *self, int offset, int axis1, int axis2) } /* Handle negative axes with standard Python indexing rules */ - if (check_and_adjust_axis_msg(&axis1, ndim, npy_ma_str_axis1) < 0) { + if (check_and_adjust_axis_msg(&axis1, ndim, npy_interned_str.axis1) < 0) { return NULL; } - if (check_and_adjust_axis_msg(&axis2, ndim, npy_ma_str_axis2) < 0) { + if (check_and_adjust_axis_msg(&axis2, ndim, npy_interned_str.axis2) < 0) { return NULL; } if (axis1 == axis2) { @@ -2502,11 +2495,13 @@ count_nonzero_u8(const char *data, npy_intp bstride, npy_uintp len) len -= len_m; count = len_m - zcount; #else - if (!NPY_ALIGNMENT_REQUIRED || npy_is_aligned(data, sizeof(npy_uint64))) { + if (npy_is_aligned(data, sizeof(npy_uint64))) { int step = 6 * sizeof(npy_uint64); int left_bytes = len % step; for (const char *end = data + len; data < end - left_bytes; data += step) { - count += count_nonzero_bytes_384((const npy_uint64 *)data); + npy_uint64 arr[6]; + memcpy(arr, data, step); + count += count_nonzero_bytes_384(arr); } len = left_bytes; } @@ -2606,7 +2601,7 @@ count_nonzero_u64(const char *data, npy_intp bstride, npy_uintp len) return count; } /* - * Counts the number of True values in a raw boolean array. This + * Counts the number of non-zero values in a raw int array. This * is a low-overhead function which does no heap allocations. * * Returns -1 on error. @@ -2716,6 +2711,15 @@ PyArray_CountNonzero(PyArrayObject *self) } } else { + /* Special low-overhead version specific to the float types (and some others) */ + if (PyArray_ISNOTSWAPPED(self) && PyArray_ISALIGNED(self)) { + npy_intp dispatched_nonzero_count = count_nonzero_trivial_dispatcher(count, + data, stride, dtype->type_num); + if (dispatched_nonzero_count >= 0) { + return dispatched_nonzero_count; + } + } + NPY_BEGIN_THREADS_THRESHOLDED(count); while (count--) { if (nonzero(data, self)) { @@ -2748,6 +2752,7 @@ PyArray_CountNonzero(PyArrayObject *self) if (iter == NULL) { return -1; } + /* IterationNeedsAPI also checks dtype for whether `nonzero` may need it */ needs_api = NpyIter_IterationNeedsAPI(iter); /* Get the pointers for inner loop iteration */ @@ -2757,7 +2762,9 @@ PyArray_CountNonzero(PyArrayObject *self) return -1; } - NPY_BEGIN_THREADS_NDITER(iter); + if (!needs_api) { + NPY_BEGIN_THREADS_THRESHOLDED(NpyIter_GetIterSize(iter)); + } dataptr = NpyIter_GetDataPtrArray(iter); strideptr = NpyIter_GetInnerStrideArray(iter); @@ -2882,10 +2889,11 @@ PyArray_Nonzero(PyArrayObject *self) * the fast bool count is followed by this sparse path is faster * than combining the two loops, even for larger arrays */ + npy_intp * multi_index_end = multi_index + nonzero_count; if (((double)nonzero_count / count) <= 0.1) { npy_intp subsize; npy_intp j = 0; - while (1) { + while (multi_index < multi_index_end) { npy_memchr(data + j * stride, 0, stride, count - j, &subsize, 1); j += subsize; @@ -2900,11 +2908,10 @@ PyArray_Nonzero(PyArrayObject *self) * stalls that are very expensive on most modern processors. */ else { - npy_intp *multi_index_end = multi_index + nonzero_count; npy_intp j = 0; /* Manually unroll for GCC and maybe other compilers */ - while (multi_index + 4 < multi_index_end) { + while (multi_index + 4 < multi_index_end && (j < count - 4) ) { *multi_index = j; multi_index += data[0] != 0; *multi_index = j + 1; @@ -2917,7 +2924,7 @@ PyArray_Nonzero(PyArrayObject *self) j += 4; } - while (multi_index < multi_index_end) { + while (multi_index < multi_index_end && (j < count) ) { *multi_index = j; multi_index += *data != 0; data += stride; @@ -2978,9 +2985,12 @@ PyArray_Nonzero(PyArrayObject *self) return NULL; } + /* IterationNeedsAPI also checks dtype for whether `nonzero` may need it */ needs_api = NpyIter_IterationNeedsAPI(iter); - NPY_BEGIN_THREADS_NDITER(iter); + if (!needs_api) { + NPY_BEGIN_THREADS_THRESHOLDED(NpyIter_GetIterSize(iter)); + } dataptr = NpyIter_GetDataPtrArray(iter); @@ -3113,3 +3123,241 @@ PyArray_MultiIndexSetItem(PyArrayObject *self, const npy_intp *multi_index, return PyArray_Pack(PyArray_DESCR(self), data, obj); } + + +/* Table of generic sort functions for use in PyArray_SortEx*/ +static PyArray_SortFunc* const generic_sort_table[] = {npy_quicksort, + npy_heapsort, + npy_timsort}; + +/*NUMPY_API + * Sort an array in-place with extended parameters + */ +NPY_NO_EXPORT int +PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND flags) +{ + PyArrayMethodObject *sort_method = NULL; + PyArrayMethod_StridedLoop *strided_loop = NULL; + PyArrayMethod_SortParameters sort_params = {.flags = flags}; + PyArrayMethod_Context context = {0}; + PyArray_Descr *loop_descrs[2]; + NpyAuxData *auxdata = NULL; + NPY_ARRAYMETHOD_FLAGS method_flags = 0; + + PyArray_SortFunc **sort_table = NULL; + PyArray_SortFunc *sort = NULL; + + int ret; + + if (check_and_adjust_axis(&axis, PyArray_NDIM(op)) < 0) { + return -1; + } + + if (PyArray_FailUnlessWriteable(op, "sort array") < 0) { + return -1; + } + + // Zero the NPY_HEAPSORT bit, maps NPY_HEAPSORT to NPY_QUICKSORT + flags &= ~_NPY_SORT_HEAPSORT; + + // Look for type specific functions + sort_method = NPY_DT_SLOTS(NPY_DTYPE(PyArray_DESCR(op)))->sort_meth; + if (sort_method != NULL) { + PyArray_Descr *descr = PyArray_DESCR(op); + PyArray_DTypeMeta *dt = NPY_DTYPE(descr); + + PyArray_DTypeMeta *dtypes[2] = {dt, dt}; + PyArray_Descr *given_descrs[2] = {descr, descr}; + // Sort cannot be a view, so view_offset is unused + npy_intp view_offset = 0; + + if (sort_method->resolve_descriptors( + sort_method, dtypes, given_descrs, loop_descrs, &view_offset) < 0) { + PyErr_SetString(PyExc_RuntimeError, + "unable to resolve descriptors for sort"); + return -1; + } + context.descriptors = loop_descrs; + context.parameters = &sort_params; + context.method = sort_method; + + // Arrays are always contiguous for sorting + npy_intp strides[2] = {loop_descrs[0]->elsize, loop_descrs[1]->elsize}; + + if (sort_method->get_strided_loop( + &context, 1, 0, strides, &strided_loop, &auxdata, &method_flags) < 0) { + ret = -1; + goto fail; + } + } + else { + sort_table = PyDataType_GetArrFuncs(PyArray_DESCR(op))->sort; + switch (flags) { + case NPY_SORT_DEFAULT: + sort = sort_table[NPY_QUICKSORT]; + break; + case NPY_SORT_STABLE: + sort = sort_table[NPY_STABLESORT]; + break; + default: + break; + } + + // Look for appropriate generic function if no type specific version + if (sort == NULL) { + if (!PyDataType_GetArrFuncs(PyArray_DESCR(op))->compare) { + PyErr_SetString(PyExc_TypeError, + "type does not have compare function"); + return -1; + } + switch (flags) { + case NPY_SORT_DEFAULT: + sort = generic_sort_table[NPY_QUICKSORT]; + break; + case NPY_SORT_STABLE: + sort = generic_sort_table[NPY_STABLESORT]; + break; + default: + break; + } + } + + if (sort == NULL) { + PyErr_SetString(PyExc_TypeError, + "no current sort function meets the requirements"); + return -1; + } + } + + ret = _new_sortlike(op, axis, sort, strided_loop, + &context, auxdata, &method_flags, NULL, NULL, 0); + +fail: + if (sort_method != NULL) { + NPY_AUXDATA_FREE(auxdata); + Py_DECREF(context.descriptors[0]); + Py_DECREF(context.descriptors[1]); + } + return ret; +} + +/* Table of generic argsort function for use by PyArray_ArgSortEx */ +static PyArray_ArgSortFunc* const generic_argsort_table[] = {npy_aquicksort, + npy_aheapsort, + npy_atimsort}; + +/*NUMPY_API + * ArgSort an array with extended parameters + */ +NPY_NO_EXPORT PyObject * +PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND flags) +{ + PyArrayObject *op2; + PyObject *ret; + PyArrayMethodObject *argsort_method = NULL; + PyArrayMethod_StridedLoop *strided_loop = NULL; + PyArrayMethod_SortParameters sort_params = {.flags = flags}; + PyArrayMethod_Context context = {0}; + PyArray_Descr *loop_descrs[2]; + NpyAuxData *auxdata = NULL; + NPY_ARRAYMETHOD_FLAGS method_flags = 0; + + PyArray_ArgSortFunc **argsort_table = NULL; + PyArray_ArgSortFunc *argsort = NULL; + + // Zero the NPY_HEAPSORT bit, maps NPY_HEAPSORT to NPY_QUICKSORT + flags &= ~_NPY_SORT_HEAPSORT; + + // Look for type specific functions + argsort_method = NPY_DT_SLOTS(NPY_DTYPE(PyArray_DESCR(op)))->argsort_meth; + if (argsort_method != NULL) { + PyArray_Descr *descr = PyArray_DESCR(op); + PyArray_Descr *odescr = PyArray_DescrFromType(NPY_INTP); + PyArray_DTypeMeta *dt = NPY_DTYPE(descr); + PyArray_DTypeMeta *odt = NPY_DTYPE(odescr); + + PyArray_DTypeMeta *dtypes[2] = {dt, odt}; + PyArray_Descr *given_descrs[2] = {descr, odescr}; + // we can ignore the view_offset for sorting + npy_intp view_offset = 0; + + int resolve_ret = argsort_method->resolve_descriptors( + argsort_method, dtypes, given_descrs, loop_descrs, &view_offset); + Py_DECREF(odescr); + if (resolve_ret < 0) { + PyErr_SetString(PyExc_RuntimeError, + "unable to resolve descriptors for argsort"); + return NULL; + } + context.descriptors = loop_descrs; + context.parameters = &sort_params; + context.method = argsort_method; + + // Arrays are always contiguous for sorting + npy_intp strides[2] = {loop_descrs[0]->elsize, loop_descrs[1]->elsize}; + + if (argsort_method->get_strided_loop( + &context, 1, 0, strides, &strided_loop, &auxdata, &method_flags) < 0) { + ret = NULL; + goto fail; + } + } + else { + argsort_table = PyDataType_GetArrFuncs(PyArray_DESCR(op))->argsort; + switch (flags) { + case NPY_SORT_DEFAULT: + argsort = argsort_table[NPY_QUICKSORT]; + break; + case NPY_SORT_STABLE: + argsort = argsort_table[NPY_STABLESORT]; + break; + default: + break; + } + + // Look for generic function if no type specific version + if (argsort == NULL) { + if (!PyDataType_GetArrFuncs(PyArray_DESCR(op))->compare) { + PyErr_SetString(PyExc_TypeError, + "type does not have compare function"); + return NULL; + } + switch (flags) { + case NPY_SORT_DEFAULT: + argsort = generic_argsort_table[NPY_QUICKSORT]; + break; + case NPY_SORT_STABLE: + argsort = generic_argsort_table[NPY_STABLESORT]; + break; + default: + break; + } + } + + if (argsort == NULL) { + PyErr_SetString(PyExc_TypeError, + "no current argsort function meets the requirements"); + return NULL; + } + } + + op2 = (PyArrayObject *)PyArray_CheckAxis(op, &axis, 0); + if (op2 == NULL) { + ret = NULL; + goto fail; + } + + ret = _new_argsortlike(op2, axis, argsort, strided_loop, + &context, auxdata, &method_flags, NULL, NULL, 0); + Py_DECREF(op2); + +fail: + if (argsort_method != NULL) { + NPY_AUXDATA_FREE(auxdata); + Py_DECREF(context.descriptors[0]); + Py_DECREF(context.descriptors[1]); + } + return ret; +} + + diff --git a/numpy/_core/src/multiarray/iterators.c b/numpy/_core/src/multiarray/iterators.c index 2806670d3e07..ae4797f59a86 100644 --- a/numpy/_core/src/multiarray/iterators.c +++ b/numpy/_core/src/multiarray/iterators.c @@ -23,83 +23,12 @@ #include "item_selection.h" #include "lowlevel_strided_loops.h" #include "array_assign.h" +#include "npy_pycompat.h" #define NEWAXIS_INDEX -1 #define ELLIPSIS_INDEX -2 #define SINGLE_INDEX -3 -/* - * Tries to convert 'o' into an npy_intp interpreted as an - * index. Returns 1 if it was successful, 0 otherwise. Does - * not set an exception. - */ -static int -coerce_index(PyObject *o, npy_intp *v) -{ - *v = PyArray_PyIntAsIntp(o); - - if ((*v) == -1 && PyErr_Occurred()) { - PyErr_Clear(); - return 0; - } - return 1; -} - -/* - * This function converts one element of the indexing tuple - * into a step size and a number of steps, returning the - * starting index. Non-slices are signalled in 'n_steps', - * as NEWAXIS_INDEX, ELLIPSIS_INDEX, or SINGLE_INDEX. - */ -NPY_NO_EXPORT npy_intp -parse_index_entry(PyObject *op, npy_intp *step_size, - npy_intp *n_steps, npy_intp max, - int axis, int check_index) -{ - npy_intp i; - - if (op == Py_None) { - *n_steps = NEWAXIS_INDEX; - i = 0; - } - else if (op == Py_Ellipsis) { - *n_steps = ELLIPSIS_INDEX; - i = 0; - } - else if (PySlice_Check(op)) { - npy_intp stop; - if (PySlice_GetIndicesEx(op, max, &i, &stop, step_size, n_steps) < 0) { - goto fail; - } - if (*n_steps <= 0) { - *n_steps = 0; - *step_size = 1; - i = 0; - } - } - else if (coerce_index(op, &i)) { - *n_steps = SINGLE_INDEX; - *step_size = 0; - if (check_index) { - if (check_and_adjust_index(&i, max, axis, NULL) < 0) { - goto fail; - } - } - } - else { - PyErr_SetString(PyExc_IndexError, - "each index entry must be either a " - "slice, an integer, Ellipsis, or " - "newaxis"); - goto fail; - } - return i; - - fail: - return -1; -} - - /*********************** Element-wise Array Iterator ***********************/ /* Aided by Peter J. Verveer's nd_image package and numpy's arraymap ****/ /* and Python's array iterator ***/ @@ -136,7 +65,6 @@ PyArray_RawIterBaseInit(PyArrayIterObject *it, PyArrayObject *ao) nd = PyArray_NDIM(ao); /* The legacy iterator only supports 32 dimensions */ assert(nd <= NPY_MAXDIMS_LEGACY_ITERS); - PyArray_UpdateFlags(ao, NPY_ARRAY_C_CONTIGUOUS); if (PyArray_ISCONTIGUOUS(ao)) { it->contiguous = 1; } @@ -427,7 +355,7 @@ iter_length(PyArrayIterObject *self) } -static PyArrayObject * +static PyObject * iter_subscript_Bool(PyArrayIterObject *self, PyArrayObject *ind, NPY_cast_info *cast_info) { @@ -484,7 +412,7 @@ iter_subscript_Bool(PyArrayIterObject *self, PyArrayObject *ind, } PyArray_ITER_RESET(self); } - return ret; + return (PyObject *) ret; } static PyObject * @@ -562,181 +490,154 @@ iter_subscript_int(PyArrayIterObject *self, PyArrayObject *ind, NPY_NO_EXPORT PyObject * iter_subscript(PyArrayIterObject *self, PyObject *ind) { - PyArray_Descr *indtype = NULL; - PyArray_Descr *dtype; - npy_intp start, step_size; - npy_intp n_steps; - PyArrayObject *ret; - char *dptr; - int size; - PyObject *obj = NULL; - PyObject *new; + PyObject *ret = NULL; + + int index_type; + int index_num = -1; + int ndim, fancy_ndim; + npy_intp start, stop, step, n_steps; + npy_index_info indices[NPY_MAXDIMS * 2 + 1]; + + PyArray_Descr *dtype = PyArray_DESCR(self->ao); + npy_intp dtype_size = dtype->elsize; NPY_cast_info cast_info = {.func = NULL}; - if (ind == Py_Ellipsis) { - ind = PySlice_New(NULL, NULL, NULL); - obj = iter_subscript(self, ind); - Py_DECREF(ind); - return obj; + /* Prepare the indices */ + index_type = prepare_index_noarray(1, &self->size, ind, indices, &index_num, + &ndim, &fancy_ndim, 1, 1); + + if (index_type < 0) { + return NULL; } - if (PyTuple_Check(ind)) { - int len; - len = PyTuple_GET_SIZE(ind); - if (len > 1) { - goto fail; - } - if (len == 0) { - Py_INCREF(self->ao); - return (PyObject *)self->ao; - } - ind = PyTuple_GET_ITEM(ind, 0); + + else if (indices[0].type == HAS_NEWAXIS) { + PyErr_SetString(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`) and integer or boolean " + "arrays are valid indices" + ); + goto finish; } - /* - * Tuples >1d not accepted --- i.e. no newaxis - * Could implement this with adjusted strides and dimensions in iterator - * Check for Boolean -- this is first because Bool is a subclass of Int - */ - PyArray_ITER_RESET(self); + // Single ellipsis index + else if (index_type == HAS_ELLIPSIS) { + ind = PySlice_New(NULL, NULL, NULL); + if (ind == NULL) { + goto finish; + } - if (PyBool_Check(ind)) { - int istrue = PyObject_IsTrue(ind); - if (istrue == -1) { - goto fail; + ret = iter_subscript(self, ind); + Py_DECREF(ind); + goto finish; + } + + // Single boolean index + else if (indices[0].type == HAS_0D_BOOL) { + /* Deprecated 2025-07, NumPy 2.4 */ + if (DEPRECATE("Indexing flat iterators with a 0-dimensional boolean index is deprecated " + "and may be removed in a future version. (Deprecated NumPy 2.4)") < 0) { + goto finish; } - if (istrue) { - return PyArray_ToScalar(self->dataptr, self->ao); + if (indices[0].value) { + ret = PyArray_ToScalar(self->dataptr, self->ao); + goto finish; } else { /* empty array */ npy_intp ii = 0; - dtype = PyArray_DESCR(self->ao); Py_INCREF(dtype); - ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(self->ao), - dtype, - 1, &ii, - NULL, NULL, 0, - (PyObject *)self->ao); - return (PyObject *)ret; + ret = PyArray_NewFromDescr(Py_TYPE(self->ao), + dtype, + 1, &ii, + NULL, NULL, 0, + (PyObject *)self->ao); + goto finish; } } - dtype = PyArray_DESCR(self->ao); - size = dtype->elsize; + PyArray_ITER_RESET(self); - /* set up a cast to handle item copying */ + if (index_type == HAS_INTEGER) { + if (check_and_adjust_index(&indices[0].value, self->size, -1, NULL) < 0) { + goto finish; + } + PyArray_ITER_GOTO1D(self, indices[0].value); + ret = PyArray_ToScalar(self->dataptr, self->ao); + PyArray_ITER_RESET(self); + goto finish; + } + /* set up a cast to handle item copying */ NPY_ARRAYMETHOD_FLAGS transfer_flags = 0; npy_intp one = 1; + /* We can assume the newly allocated output array is aligned */ int is_aligned = IsUintAligned(self->ao); if (PyArray_GetDTypeTransferFunction( - is_aligned, size, size, dtype, dtype, 0, &cast_info, + is_aligned, dtype_size, dtype_size, dtype, dtype, 0, &cast_info, &transfer_flags) < 0) { - goto fail; + goto finish; } - /* Check for Integer or Slice */ - if (PyLong_Check(ind) || PySlice_Check(ind)) { - start = parse_index_entry(ind, &step_size, &n_steps, - self->size, 0, 1); - if (start == -1) { - goto fail; - } - if (n_steps == ELLIPSIS_INDEX || n_steps == NEWAXIS_INDEX) { - PyErr_SetString(PyExc_IndexError, - "cannot use Ellipsis or newaxes here"); - goto fail; + if (index_type == HAS_SLICE) { + if (PySlice_GetIndicesEx(indices[0].object, + self->size, + &start, &stop, &step, &n_steps) < 0) { + goto finish; } + PyArray_ITER_GOTO1D(self, start); - if (n_steps == SINGLE_INDEX) { /* Integer */ - PyObject *tmp; - tmp = PyArray_ToScalar(self->dataptr, self->ao); - PyArray_ITER_RESET(self); - NPY_cast_info_xfree(&cast_info); - return tmp; - } Py_INCREF(dtype); - ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(self->ao), + ret = PyArray_NewFromDescr(Py_TYPE(self->ao), dtype, 1, &n_steps, NULL, NULL, 0, (PyObject *)self->ao); if (ret == NULL) { - goto fail; + goto finish; } - dptr = PyArray_DATA(ret); + + char *dptr = PyArray_DATA((PyArrayObject *) ret); while (n_steps--) { char *args[2] = {self->dataptr, dptr}; - npy_intp transfer_strides[2] = {size, size}; + npy_intp transfer_strides[2] = {dtype_size, dtype_size}; if (cast_info.func(&cast_info.context, args, &one, transfer_strides, cast_info.auxdata) < 0) { - goto fail; + goto finish; } - start += step_size; + start += step; PyArray_ITER_GOTO1D(self, start); - dptr += size; + dptr += dtype_size; } PyArray_ITER_RESET(self); - NPY_cast_info_xfree(&cast_info); - return (PyObject *)ret; - } - - /* convert to INTP array if Integer array scalar or List */ - indtype = PyArray_DescrFromType(NPY_INTP); - if (PyArray_IsScalar(ind, Integer) || PyList_Check(ind)) { - Py_INCREF(indtype); - obj = PyArray_FromAny(ind, indtype, 0, 0, NPY_ARRAY_FORCECAST, NULL); - if (obj == NULL) { - goto fail; - } - } - else { - Py_INCREF(ind); - obj = ind; - } - - /* Any remaining valid input is an array or has been turned into one */ - if (!PyArray_Check(obj)) { - goto fail; - } - - /* Check for Boolean array */ - if (PyArray_TYPE((PyArrayObject *)obj) == NPY_BOOL) { - ret = iter_subscript_Bool(self, (PyArrayObject *)obj, &cast_info); goto finish; } - /* Only integer arrays left */ - if (!PyArray_ISINTEGER((PyArrayObject *)obj)) { - goto fail; + if (index_type == HAS_BOOL) { + ret = iter_subscript_Bool(self, (PyArrayObject *) indices[0].object, &cast_info); + goto finish; } - Py_INCREF(indtype); - new = PyArray_FromAny(obj, indtype, 0, 0, - NPY_ARRAY_FORCECAST | NPY_ARRAY_ALIGNED, NULL); - if (new == NULL) { - goto fail; + if (index_type == HAS_FANCY) { + PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP); + PyArrayObject *cast_array = (PyArrayObject *) + PyArray_FromArray((PyArrayObject *) indices[0].object, indtype, NPY_ARRAY_FORCECAST); + if (cast_array == NULL) { + goto finish; + } + ret = iter_subscript_int(self, cast_array, &cast_info); + Py_DECREF(cast_array); + goto finish; } - ret = (PyArrayObject *)iter_subscript_int(self, (PyArrayObject *)new, - &cast_info); - Py_DECREF(new); - finish: - Py_DECREF(indtype); - Py_DECREF(obj); + PyErr_SetString(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`) and integer or boolean " + "arrays are valid indices" + ); +finish: NPY_cast_info_xfree(&cast_info); - return (PyObject *)ret; - - fail: - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_IndexError, "unsupported iterator index"); + for (int i = 0; i < index_num; i++) { + Py_XDECREF(indices[i].object); } - Py_XDECREF(indtype); - Py_XDECREF(obj); - NPY_cast_info_xfree(&cast_info); - - return NULL; - + return ret; } @@ -844,140 +745,134 @@ iter_ass_sub_int(PyArrayIterObject *self, PyArrayObject *ind, NPY_NO_EXPORT int iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) { - PyArrayObject *arrval = NULL; - PyArrayIterObject *val_it = NULL; - PyArray_Descr *type; - PyArray_Descr *indtype = NULL; - int retval = -1; - npy_intp start, step_size; - npy_intp n_steps; - PyObject *obj = NULL; - NPY_cast_info cast_info = {.func = NULL}; - if (val == NULL) { PyErr_SetString(PyExc_TypeError, "Cannot delete iterator elements"); return -1; } - if (PyArray_FailUnlessWriteable(self->ao, "underlying array") < 0) + if (PyArray_FailUnlessWriteable(self->ao, "underlying array") < 0) { return -1; + } - if (ind == Py_Ellipsis) { - ind = PySlice_New(NULL, NULL, NULL); - retval = iter_ass_subscript(self, ind, val); - Py_DECREF(ind); - return retval; + int ret = -1; + + int index_type; + int index_num = -1; + int ndim, fancy_ndim; + npy_intp start, stop, step, n_steps; + npy_index_info indices[NPY_MAXDIMS * 2 + 1]; + + PyArray_Descr *dtype = PyArray_DESCR(self->ao); + PyArrayObject *arrval = NULL; + PyArrayIterObject *val_it = NULL; + npy_intp dtype_size = dtype->elsize; + NPY_cast_info cast_info = {.func = NULL}; + + /* Prepare the indices */ + index_type = prepare_index_noarray(1, &self->size, ind, indices, &index_num, + &ndim, &fancy_ndim, 1, 1); + + if (index_type < 0) { + goto finish; + } + else if (indices[0].type == HAS_NEWAXIS) { + PyErr_SetString(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`) and integer or boolean " + "arrays are valid indices" + ); + goto finish; } - if (PyTuple_Check(ind)) { - int len; - len = PyTuple_GET_SIZE(ind); - if (len > 1) { + // Single ellipsis index + else if (index_type == HAS_ELLIPSIS) { + if (PyTuple_Check(ind)) { + PyErr_SetString(PyExc_IndexError, "Assigning to a flat iterator with a 0-D index is not supported"); goto finish; } - ind = PyTuple_GET_ITEM(ind, 0); - } - type = PyArray_DESCR(self->ao); + ind = PySlice_New(NULL, NULL, NULL); + if (ind == NULL) { + goto finish; + } - /* - * Check for Boolean -- this is first because - * Bool is a subclass of Int - */ - - if (PyBool_Check(ind)) { - retval = 0; - int istrue = PyObject_IsTrue(ind); - if (istrue == -1) { - return -1; + ret = iter_ass_subscript(self, ind, val); + Py_DECREF(ind); + goto finish; + } + + // Single boolean index + else if (indices[0].type == HAS_0D_BOOL) { + /* Deprecated 2025-07, NumPy 2.4 */ + if (DEPRECATE("Indexing flat iterators with a 0-dimensional boolean index is deprecated " + "and may be removed in a future version. (Deprecated NumPy 2.4)") < 0) { + goto finish; } - if (istrue) { - retval = PyArray_Pack( - PyArray_DESCR(self->ao), self->dataptr, val); + ret = 0; + if (indices[0].value) { + ret = PyArray_Pack(PyArray_DESCR(self->ao), self->dataptr, val); } goto finish; } - if (PySequence_Check(ind) || PySlice_Check(ind)) { - goto skip; - } - start = PyArray_PyIntAsIntp(ind); - if (error_converting(start)) { - PyErr_Clear(); - } - else { - if (check_and_adjust_index(&start, self->size, -1, NULL) < 0) { + PyArray_ITER_RESET(self); + + if (index_type == HAS_INTEGER) { + if (check_and_adjust_index(&indices[0].value, self->size, -1, NULL) < 0) { goto finish; } - PyArray_ITER_GOTO1D(self, start); - retval = PyArray_Pack(PyArray_DESCR(self->ao), self->dataptr, val); + PyArray_ITER_GOTO1D(self, indices[0].value); + ret = PyArray_Pack(PyArray_DESCR(self->ao), self->dataptr, val); PyArray_ITER_RESET(self); - if (retval < 0) { + if (ret < 0) { PyErr_SetString(PyExc_ValueError, "Error setting single item of array."); } goto finish; } - skip: - Py_INCREF(type); - arrval = (PyArrayObject *)PyArray_FromAny(val, type, 0, 0, + Py_INCREF(dtype); + arrval = (PyArrayObject *)PyArray_FromAny(val, dtype, 0, 0, NPY_ARRAY_FORCECAST, NULL); if (arrval == NULL) { - return -1; + goto finish; } val_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)arrval); if (val_it == NULL) { goto finish; } if (val_it->size == 0) { - retval = 0; + ret = 0; goto finish; } /* set up cast to handle single-element copies into arrval */ NPY_ARRAYMETHOD_FLAGS transfer_flags = 0; npy_intp one = 1; - int itemsize = type->elsize; /* We can assume the newly allocated array is aligned */ int is_aligned = IsUintAligned(self->ao); if (PyArray_GetDTypeTransferFunction( - is_aligned, itemsize, itemsize, type, type, 0, + is_aligned, dtype_size, dtype_size, PyArray_DESCR(arrval), dtype, 0, &cast_info, &transfer_flags) < 0) { goto finish; } - /* Check Slice */ - if (PySlice_Check(ind)) { - start = parse_index_entry(ind, &step_size, &n_steps, self->size, 0, 0); - if (start == -1) { - goto finish; - } - if (n_steps == ELLIPSIS_INDEX || n_steps == NEWAXIS_INDEX) { - PyErr_SetString(PyExc_IndexError, - "cannot use Ellipsis or newaxes here"); + if (index_type == HAS_SLICE) { + if (PySlice_GetIndicesEx(indices[0].object, + self->size, + &start, &stop, &step, &n_steps) < 0) { goto finish; } + PyArray_ITER_GOTO1D(self, start); - npy_intp transfer_strides[2] = {itemsize, itemsize}; - if (n_steps == SINGLE_INDEX) { - char *args[2] = {PyArray_DATA(arrval), self->dataptr}; - if (cast_info.func(&cast_info.context, args, &one, - transfer_strides, cast_info.auxdata) < 0) { - goto finish; - } - PyArray_ITER_RESET(self); - retval = 0; - goto finish; - } + npy_intp transfer_strides[2] = {dtype_size, dtype_size}; while (n_steps--) { char *args[2] = {val_it->dataptr, self->dataptr}; if (cast_info.func(&cast_info.context, args, &one, transfer_strides, cast_info.auxdata) < 0) { goto finish; } - start += step_size; + start += step; PyArray_ITER_GOTO1D(self, start); PyArray_ITER_NEXT(val_it); if (val_it->index == val_it->size) { @@ -985,60 +880,39 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) } } PyArray_ITER_RESET(self); - retval = 0; + ret = 0; goto finish; } - /* convert to INTP array if Integer array scalar or List */ - indtype = PyArray_DescrFromType(NPY_INTP); - if (PyList_Check(ind)) { - Py_INCREF(indtype); - obj = PyArray_FromAny(ind, indtype, 0, 0, NPY_ARRAY_FORCECAST, NULL); - } - else { - Py_INCREF(ind); - obj = ind; + if (index_type == HAS_BOOL) { + ret = iter_ass_sub_Bool(self, (PyArrayObject *) indices[0].object, val_it, &cast_info); + goto finish; } - if (obj != NULL && PyArray_Check(obj)) { - /* Check for Boolean object */ - if (PyArray_TYPE((PyArrayObject *)obj)==NPY_BOOL) { - if (iter_ass_sub_Bool(self, (PyArrayObject *)obj, - val_it, &cast_info) < 0) { - goto finish; - } - retval=0; - } - /* Check for integer array */ - else if (PyArray_ISINTEGER((PyArrayObject *)obj)) { - PyObject *new; - Py_INCREF(indtype); - new = PyArray_CheckFromAny(obj, indtype, 0, 0, - NPY_ARRAY_FORCECAST | NPY_ARRAY_BEHAVED_NS, NULL); - Py_DECREF(obj); - obj = new; - if (new == NULL) { - goto finish; - } - if (iter_ass_sub_int(self, (PyArrayObject *)obj, - val_it, &cast_info) < 0) { - goto finish; - } - retval = 0; + if (index_type == HAS_FANCY) { + PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP); + PyArrayObject *cast_array = (PyArrayObject *) + PyArray_FromArray((PyArrayObject *) indices[0].object, indtype, NPY_ARRAY_FORCECAST); + if (cast_array == NULL) { + goto finish; } + ret = iter_ass_sub_int(self, cast_array, val_it, &cast_info); + Py_DECREF(cast_array); + goto finish; } - finish: - if (!PyErr_Occurred() && retval < 0) { - PyErr_SetString(PyExc_IndexError, "unsupported iterator index"); + PyErr_SetString(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`) and integer or boolean " + "arrays are valid indices" + ); +finish: + NPY_cast_info_xfree(&cast_info); + for (int i = 0; i < index_num; i++) { + Py_XDECREF(indices[i].object); } - Py_XDECREF(indtype); - Py_XDECREF(obj); Py_XDECREF(val_it); Py_XDECREF(arrval); - NPY_cast_info_xfree(&cast_info); - return retval; - + return ret; } @@ -1463,6 +1337,7 @@ PyArray_MultiIterNew(int n, ...) return multiiter_new_impl(n, args_impl); } + static PyObject* arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, PyObject *kwds) @@ -1475,18 +1350,19 @@ arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, "keyword arguments not accepted."); return NULL; } - - fast_seq = PySequence_Fast(args, ""); // needed for pypy + fast_seq = PySequence_Fast(args, ""); // noqa: borrowed-ref OK if (fast_seq == NULL) { return NULL; } + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(args); n = PySequence_Fast_GET_SIZE(fast_seq); if (n > NPY_MAXARGS) { - Py_DECREF(fast_seq); - return multiiter_wrong_number_of_args(); + ret = multiiter_wrong_number_of_args(); + } else { + ret = multiiter_new_impl(n, PySequence_Fast_ITEMS(fast_seq)); } - ret = multiiter_new_impl(n, PySequence_Fast_ITEMS(fast_seq)); Py_DECREF(fast_seq); + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); return ret; } diff --git a/numpy/_core/src/multiarray/legacy_dtype_implementation.c b/numpy/_core/src/multiarray/legacy_dtype_implementation.c index 70b4fa1e49db..eee7ce492fab 100644 --- a/numpy/_core/src/multiarray/legacy_dtype_implementation.c +++ b/numpy/_core/src/multiarray/legacy_dtype_implementation.c @@ -320,8 +320,8 @@ can_cast_fields(PyObject *field1, PyObject *field2, NPY_CASTING casting) /* Iterate over all the fields and compare for castability */ ppos = 0; - while (PyDict_Next(field1, &ppos, &key, &tuple1)) { - if ((tuple2 = PyDict_GetItem(field2, key)) == NULL) { + while (PyDict_Next(field1, &ppos, &key, &tuple1)) { // noqa: borrowed-ref OK + if ((tuple2 = PyDict_GetItem(field2, key)) == NULL) { // noqa: borrowed-ref OK return 0; } /* Compare the dtype of the field for castability */ @@ -367,12 +367,12 @@ PyArray_LegacyCanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to, * field; recurse just in case the single field is itself structured. */ if (!PyDataType_HASFIELDS(to) && !PyDataType_ISOBJECT(to)) { - if (casting == NPY_UNSAFE_CASTING && + if ((casting == NPY_UNSAFE_CASTING || ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0)) && PyDict_Size(lfrom->fields) == 1) { Py_ssize_t ppos = 0; PyObject *tuple; PyArray_Descr *field; - PyDict_Next(lfrom->fields, &ppos, NULL, &tuple); + PyDict_Next(lfrom->fields, &ppos, NULL, &tuple); // noqa: borrowed-ref OK field = (PyArray_Descr *)PyTuple_GET_ITEM(tuple, 0); /* * For a subarray, we need to get the underlying type; @@ -399,7 +399,7 @@ PyArray_LegacyCanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to, * casting; this is not correct, but needed since the treatment in can_cast * below got out of sync with astype; see gh-13667. */ - if (casting == NPY_UNSAFE_CASTING) { + if (casting == NPY_UNSAFE_CASTING || (casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { return 1; } } @@ -408,14 +408,14 @@ PyArray_LegacyCanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to, * If "from" is a simple data type and "to" has fields, then only * unsafe casting works (and that works always, even to multiple fields). */ - return casting == NPY_UNSAFE_CASTING; + return (casting == NPY_UNSAFE_CASTING || (casting & NPY_SAME_VALUE_CASTING_FLAG) > 0); } /* * Everything else we consider castable for unsafe for now. * FIXME: ensure what we do here is consistent with "astype", * i.e., deal more correctly with subarrays and user-defined dtype. */ - else if (casting == NPY_UNSAFE_CASTING) { + else if (casting == NPY_UNSAFE_CASTING || (casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { return 1; } /* diff --git a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src index 1299e55b4258..050207ea188c 100644 --- a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src @@ -17,6 +17,7 @@ #include #include #include +#include #include "lowlevel_strided_loops.h" #include "array_assign.h" @@ -24,6 +25,7 @@ #include "usertypes.h" #include "umathmodule.h" +#include "gil_utils.h" /* * x86 platform works with unaligned access but the compiler is allowed to @@ -33,11 +35,7 @@ * instructions (16 byte). * So this flag can only be enabled if autovectorization is disabled. */ -#if NPY_ALIGNMENT_REQUIRED -# define NPY_USE_UNALIGNED_ACCESS 0 -#else -# define NPY_USE_UNALIGNED_ACCESS 0 -#endif +#define NPY_USE_UNALIGNED_ACCESS 0 #define _NPY_NOP1(x) (x) #define _NPY_NOP2(x) (x) @@ -708,6 +706,69 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * /************* STRIDED CASTING SPECIALIZED FUNCTIONS *************/ +#if defined(NPY_HAVE_NEON_FP16) + #define EMULATED_FP16 0 + #define NATIVE_FP16 1 + typedef _Float16 _npy_half; +#else + #define EMULATED_FP16 1 + #define NATIVE_FP16 0 + typedef npy_half _npy_half; +#endif + +#if EMULATED_FP16 +/* half-to-double, copied from CPP to allow inlining */ + +static NPY_GCC_OPT_3 +uint64_t ToDoubleBits(uint16_t h) +{ + uint16_t h_exp = (h&0x7c00u); + uint64_t d_sgn = ((uint64_t)h&0x8000u) << 48; + switch (h_exp) { + case 0x0000u: { // 0 or subnormal + uint16_t h_sig = (h&0x03ffu); + // Signed zero + if (h_sig == 0) { + return d_sgn; + } + // Subnormal + h_sig <<= 1; + while ((h_sig&0x0400u) == 0) { + h_sig <<= 1; + h_exp++; + } + uint64_t d_exp = ((uint64_t)(1023 - 15 - h_exp)) << 52; + uint64_t d_sig = ((uint64_t)(h_sig&0x03ffu)) << 42; + return d_sgn + d_exp + d_sig; + } + case 0x7c00u: // inf or NaN + // All-ones exponent and a copy of the significand + return d_sgn + 0x7ff0000000000000ULL + (((uint64_t)(h&0x03ffu)) << 42); + default: // normalized + // Just need to adjust the exponent and shift + return d_sgn + (((uint64_t)(h&0x7fffu) + 0xfc000u) << 42); + } +} + +NPY_FINLINE +npy_uint64 _npy_halfbits_to_doublebits(npy_uint16 h){ + /* + * Use npymath versions for all the special cases, only inline the + * x86_64 non-intrinsic case. Someday we will rewrite this in CPP and + * can then explore inlining more + */ + #if defined(NPY_HAVE_AVX512FP16) + return npy_halfbits_to_doublebits(h); + #elif defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX3_HALF_DOUBLE) + return npy_halfbits_to_doublebits(h); + #elif defined(__ARM_FP16_FORMAT_IEEE) + return npy_halfbits_to_doublebits(h); + #else + return ToDoubleBits(h); + #endif +} +#endif + /**begin repeat * * #NAME1 = BOOL, @@ -723,18 +784,20 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * * #type1 = npy_bool, * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong, * npy_byte, npy_short, npy_int, npy_long, npy_longlong, - * npy_half, npy_float, npy_double, npy_longdouble, + * _npy_half, npy_float, npy_double, npy_longdouble, * npy_cfloat, npy_cdouble, npy_clongdouble# * #rtype1 = npy_bool, * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong, * npy_byte, npy_short, npy_int, npy_long, npy_longlong, - * npy_half, npy_float, npy_double, npy_longdouble, + * _npy_half, npy_float, npy_double, npy_longdouble, * npy_float, npy_double, npy_longdouble# * #is_bool1 = 1, 0*17# - * #is_half1 = 0*11, 1, 0*6# + * #is_emu_half1 = 0*11, EMULATED_FP16, 0*6# + * #is_native_half1 = 0*11, NATIVE_FP16, 0*6# * #is_float1 = 0*12, 1, 0, 0, 1, 0, 0# * #is_double1 = 0*13, 1, 0, 0, 1, 0# * #is_complex1 = 0*15, 1*3# + * #is_unsigned1 = 1*6, 0*12# */ /**begin repeat1 @@ -752,15 +815,26 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * * #type2 = npy_bool, * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong, * npy_byte, npy_short, npy_int, npy_long, npy_longlong, - * npy_half, npy_float, npy_double, npy_longdouble, + * _npy_half, npy_float, npy_double, npy_longdouble, * npy_cfloat, npy_cdouble, npy_clongdouble# * #rtype2 = npy_bool, * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong, * npy_byte, npy_short, npy_int, npy_long, npy_longlong, - * npy_half, npy_float, npy_double, npy_longdouble, + * _npy_half, npy_float, npy_double, npy_longdouble, * npy_float, npy_double, npy_longdouble# + * #type2max = 0, + * UCHAR_MAX, USHRT_MAX, UINT_MAX, ULONG_MAX, ULLONG_MAX, + * SCHAR_MAX, SHRT_MAX, INT_MAX, LONG_MAX, LLONG_MAX, + * 65500.0f, FLT_MAX, DBL_MAX, LDBL_MAX, + * FLT_MAX, DBL_MAX, LDBL_MAX# + * #type2min = 0, + * 0, 0, 0, 0, 0, + * SCHAR_MIN, SHRT_MIN, INT_MIN, LONG_MIN, LLONG_MIN, + * -65500.0f, -FLT_MAX, -DBL_MAX, -LDBL_MAX, + * -FLT_MAX, -DBL_MAX, -LDBL_MAX# * #is_bool2 = 1, 0*17# - * #is_half2 = 0*11, 1, 0*6# + * #is_emu_half2 = 0*11, EMULATED_FP16, 0*6# + * #is_native_half2 = 0*11, NATIVE_FP16, 0*6# * #is_float2 = 0*12, 1, 0, 0, 1, 0, 0# * #is_double2 = 0*13, 1, 0, 0, 1, 0# * #is_complex2 = 0*15, 1*3# @@ -774,8 +848,11 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * #if !(NPY_USE_UNALIGNED_ACCESS && !@aligned@) -/* For half types, don't use actual double/float types in conversion */ -#if @is_half1@ || @is_half2@ +/* + * For emulated half types, don't use actual double/float types in conversion + * except for *_check_same_value_*(), follow _ROUND_TRIP and _TO_RTYPE1. + */ +#if @is_emu_half1@ || @is_emu_half2@ # if @is_float1@ # define _TYPE1 npy_uint32 @@ -801,46 +878,182 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * #endif /* Determine an appropriate casting conversion function */ -#if @is_half1@ - +#if @is_emu_half1@ +# define _TO_RTYPE1(x) npy_half_to_float(x) # if @is_float2@ # define _CONVERT_FN(x) npy_halfbits_to_floatbits(x) +# define _ROUND_TRIP(x) npy_floatbits_to_halfbits(_CONVERT_FN(x)) # elif @is_double2@ -# define _CONVERT_FN(x) npy_halfbits_to_doublebits(x) -# elif @is_half2@ +# define _CONVERT_FN(x) _npy_halfbits_to_doublebits(x) +# define _ROUND_TRIP(x) npy_doublebits_to_halfbits(_CONVERT_FN(x)) +# elif @is_emu_half2@ # define _CONVERT_FN(x) (x) +# define _ROUND_TRIP(x) (x) # elif @is_bool2@ # define _CONVERT_FN(x) ((npy_bool)!npy_half_iszero(x)) +# define _ROUND_TRIP(x) npy_float_to_half((float)(!npy_half_iszero(x))) # else # define _CONVERT_FN(x) ((_TYPE2)npy_half_to_float(x)) +# define _ROUND_TRIP(x) npy_float_to_half((float)_CONVERT_FN(x)) # endif -#elif @is_half2@ - +#elif @is_emu_half2@ +# define _TO_RTYPE1(x) (@rtype1@)(x) # if @is_float1@ # define _CONVERT_FN(x) npy_floatbits_to_halfbits(x) +# define _ROUND_TRIP(x) npy_half_to_float(npy_float_to_half(x)) # elif @is_double1@ # define _CONVERT_FN(x) npy_doublebits_to_halfbits(x) -# elif @is_half1@ +# define _ROUND_TRIP(x) npy_half_to_double(npy_double_to_half(x)) +# elif @is_emu_half1@ # define _CONVERT_FN(x) (x) +# define _ROUND_TRIP(x) (x) # elif @is_bool1@ # define _CONVERT_FN(x) npy_float_to_half((float)(x!=0)) +# define _ROUND_TRIP(x) (x) # else # define _CONVERT_FN(x) npy_float_to_half((float)x) +# define _ROUND_TRIP(x) ((@rtype1@)npy_half_to_float(_CONVERT_FN(x))) # endif #else - # if @is_bool2@ || @is_bool1@ # define _CONVERT_FN(x) ((npy_bool)(x != 0)) # else # define _CONVERT_FN(x) ((_TYPE2)x) # endif +# define _TO_RTYPE1(x) (@rtype1@)(x) +# define _ROUND_TRIP(x) _TO_RTYPE1(_CONVERT_FN(x)) #endif -static NPY_GCC_OPT_3 int +// Enable auto-vectorization for floating point casts with clang +#if @is_native_half1@ || @is_float1@ || @is_double1@ + #if @is_native_half2@ || @is_float2@ || @is_double2@ + #if defined(__clang__) && !defined(__EMSCRIPTEN__) && !defined(__wasm__) + #if __clang_major__ >= 12 + _Pragma("clang fp exceptions(ignore)") + #endif + #endif + #endif +#endif + +// Work around GCC bug for double->half casts. For SVE and +// OPT_LEVEL > 1, it implements this as double->single->half +// which is incorrect as it introduces double rounding with +// narrowing casts. +#if (@is_double1@ && @is_native_half2@) && \ + defined(NPY_HAVE_SVE) && defined(__GNUC__) + #define GCC_CAST_OPT_LEVEL __attribute__((optimize("O1"))) +#else + #define GCC_CAST_OPT_LEVEL NPY_GCC_OPT_3 +#endif + +#define _RETURN_SAME_VALUE_FAILURE \ + npy_gil_error(PyExc_ValueError, "could not cast 'same_value' @name1@ to @name2@"); \ + return -1 + +#if !@is_bool2@ +/* + * Check various modes of failure to accurately cast src_value to dst + */ +static GCC_CAST_OPT_LEVEL int +@prefix@_check_same_value_@name1@_to_@name2@(@rtype1@ *src_valueP) { + + @rtype1@ src_value = *src_valueP; + + /* 1. NaN/Infs always work for float to float and otherwise never */ +#if (@is_float1@ || @is_emu_half1@ || @is_double1@ || @is_native_half1@) + if (!npy_isfinite(_TO_RTYPE1(src_value))) { +# if (@is_float2@ || @is_emu_half2@ || @is_double2@ || @is_native_half2@) + return 0; /* float to float can preserve NaN/Inf */ +# else + _RETURN_SAME_VALUE_FAILURE; /* cannot preserve NaN/Inf */ +# endif + } +#endif + /* + * 2. Check that the src does not overflow the dst. + * This is complicated by a warning that, for instance, int8 cannot + * overflow int64max + */ +# ifdef __GNUC__ +# pragma GCC diagnostic push +# ifdef __clang__ +# pragma GCC diagnostic ignored "-Wtautological-constant-out-of-range-compare" +# endif +# pragma GCC diagnostic ignored "-Wtautological-compare" +# endif +# if !@is_bool1@ + if (_TO_RTYPE1(src_value) > @type2max@) { + _RETURN_SAME_VALUE_FAILURE; + } +# if !@is_unsigned1@ + if (_TO_RTYPE1(src_value) < @type2min@) { + _RETURN_SAME_VALUE_FAILURE; + } +# endif +# endif /* !is_bool1 */ + /* 3. Check that the value can round trip exactly */ + if (src_value != _ROUND_TRIP(src_value)) { + _RETURN_SAME_VALUE_FAILURE; + } +# ifdef __GNUC__ +# pragma GCC diagnostic pop +# endif /* __GNUC__ */ + return 0; +} +#endif + +/* + * Use a declaration instead of moving the function definition to here to make reviewing + * easier. TODO: move the repeat3 up here instead of these declarations + */ + +static GCC_CAST_OPT_LEVEL int +@prefix@_cast_@name1@_to_@name2@_no_same_value( + PyArrayMethod_Context *context, char *const *args, + const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *NPY_UNUSED(data)); + +#if !@is_bool2@ +static GCC_CAST_OPT_LEVEL int +@prefix@_cast_@name1@_to_@name2@_same_value( + PyArrayMethod_Context *context, char *const *args, + const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *NPY_UNUSED(data)); +#endif + +/* + * This is the entry point function called outside this file + */ + +static GCC_CAST_OPT_LEVEL int @prefix@_cast_@name1@_to_@name2@( + PyArrayMethod_Context *context, char *const *args, + const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *data) +{ +#if !@is_bool2@ + int same_value_casting = ((context->flags & NPY_SAME_VALUE_CONTEXT_FLAG) == NPY_SAME_VALUE_CONTEXT_FLAG); + if (same_value_casting) { + return @prefix@_cast_@name1@_to_@name2@_same_value(context, args, dimensions, strides, data); + } else { +#else + { +#endif + return @prefix@_cast_@name1@_to_@name2@_no_same_value(context, args, dimensions, strides, data); +}} + +/**begin repeat3 + * #func_name = no_same_value,same_value# + * #same_value = 0,1# + */ + + +#if !(@is_bool2@ && @same_value@) +static GCC_CAST_OPT_LEVEL int +@prefix@_cast_@name1@_to_@name2@_@func_name@( PyArrayMethod_Context *context, char *const *args, const npy_intp *dimensions, const npy_intp *strides, NpyAuxData *NPY_UNUSED(data)) @@ -868,7 +1081,7 @@ static NPY_GCC_OPT_3 int assert(N == 0 || npy_is_aligned(dst, NPY_ALIGNOF(_TYPE2))); #endif - /*printf("@prefix@_cast_@name1@_to_@name2@\n");*/ + /* printf("@prefix@_cast_@name1@_to_@name2@_@func_name@, N=%ld\n", N); */ while (N--) { #if @aligned@ @@ -885,31 +1098,81 @@ static NPY_GCC_OPT_3 int # if @is_complex2@ dst_value[0] = _CONVERT_FN(src_value[0]); dst_value[1] = _CONVERT_FN(src_value[1]); -# elif !@aligned@ +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)&src_value[0]) < 0) { + return -1; + } + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)&src_value[1]) < 0) { + return -1; + } +# endif //same_value +# elif !@aligned@ # if @is_bool2@ dst_value = _CONVERT_FN(src_value[0]) || _CONVERT_FN(src_value[1]); # else dst_value = _CONVERT_FN(src_value[0]); +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)&src_value[0]) < 0) { + return -1; + } + if (src_value[1] != 0) { + npy_gil_error(PyExc_ValueError, "could not cast 'same_value' @name1@ to @name2@: imag is not 0"); + return -1; + } +# endif //same_value # endif # else # if @is_bool2@ *(_TYPE2 *)dst = _CONVERT_FN(src_value[0]) || _CONVERT_FN(src_value[1]); # else *(_TYPE2 *)dst = _CONVERT_FN(src_value[0]); +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)&src_value[0]) < 0) { + return -1; + } + if (src_value[1] != 0) { + npy_gil_error(PyExc_ValueError, "could not cast 'same_value' @name1@ to @name2@: imag is not 0"); + return -1; + } +# endif //same_value # endif # endif -#else +#else // @is_complex1@ # if @is_complex2@ # if !@aligned@ dst_value[0] = _CONVERT_FN(src_value); -# else +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)&src_value) < 0) { + return -1; + } +# endif //same_value +# else //!aligned dst_value[0] = _CONVERT_FN(*(_TYPE1 *)src); +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)src) < 0) { + return -1; + } +# endif //same_value # endif dst_value[1] = 0; # elif !@aligned@ dst_value = _CONVERT_FN(src_value); +# if !@is_bool2@ +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)&src_value) < 0) { + return -1; + } +# endif //same_value +# endif // @is_bool2@ # else *(_TYPE2 *)dst = _CONVERT_FN(*(_TYPE1 *)src); +# if !@is_bool2@ +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@(((@rtype1@ *)src)) < 0) { + return -1; + } +# endif //same_value +# endif // @is_bool2@ # endif #endif @@ -932,10 +1195,27 @@ static NPY_GCC_OPT_3 int } return 0; } +#endif // !@is_bool2@ + +/**end repeat3**/ + +#if @is_native_half1@ || @is_float1@ || @is_double1@ + #if @is_native_half2@ || @is_float2@ || @is_double2@ + #if defined(__clang__) && !defined(__EMSCRIPTEN__) && !defined(__wasm__) + #if __clang_major__ >= 12 + _Pragma("clang fp exceptions(strict)") + #endif + #endif + #endif +#endif +#undef GCC_CAST_OPT_LEVEL #undef _CONVERT_FN #undef _TYPE2 #undef _TYPE1 +#undef _TO_RTYPE1 +#undef _ROUND_TRIP +#undef _RETURN_SAME_VALUE_FAILURE #endif diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index 1861241a040e..5a8ec64664ac 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -33,23 +33,6 @@ #include "umathmodule.h" -#define HAS_INTEGER 1 -#define HAS_NEWAXIS 2 -#define HAS_SLICE 4 -#define HAS_ELLIPSIS 8 -/* HAS_FANCY can be mixed with HAS_0D_BOOL, be careful when to use & or == */ -#define HAS_FANCY 16 -#define HAS_BOOL 32 -/* NOTE: Only set if it is neither fancy nor purely integer index! */ -#define HAS_SCALAR_ARRAY 64 -/* - * Indicate that this is a fancy index that comes from a 0d boolean. - * This means that the index does not operate along a real axis. The - * corresponding index type is just HAS_FANCY. - */ -#define HAS_0D_BOOL (HAS_FANCY | 128) - - static int _nonzero_indices(PyObject *myBool, PyArrayObject **arrays); @@ -263,20 +246,22 @@ unpack_indices(PyObject *index, PyObject **result, npy_intp result_n) * * Checks everything but the bounds. * - * @param the array being indexed - * @param the index object - * @param index info struct being filled (size of NPY_MAXDIMS * 2 + 1) - * @param number of indices found - * @param dimension of the indexing result - * @param dimension of the fancy/advanced indices part - * @param whether to allow the boolean special case + * @param array_ndims The number of dimensions of the array being indexed (1 for iterators) + * @param array_dims The dimensions of the array being indexed (self->size for iterators) + * @param index the index object + * @param indices index info struct being filled (size of NPY_MAXDIMS * 2 + 1) + * @param num number of indices found + * @param ndim dimension of the indexing result + * @param out_fancy_ndim dimension of the fancy/advanced indices part + * @param allow_boolean whether to allow the boolean special case + * @param is_flatiter_object Whether the object indexed is an iterator * * @returns the index_type or -1 on failure and fills the number of indices. */ NPY_NO_EXPORT int -prepare_index(PyArrayObject *self, PyObject *index, - npy_index_info *indices, - int *num, int *ndim, int *out_fancy_ndim, int allow_boolean) +prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, + npy_index_info *indices, int *num, int *ndim, int *out_fancy_ndim, + int allow_boolean, int is_flatiter_object) { int new_ndim, fancy_ndim, used_ndim, index_ndim; int curr_idx, get_idx; @@ -285,7 +270,7 @@ prepare_index(PyArrayObject *self, PyObject *index, npy_intp n; PyObject *obj = NULL; - PyArrayObject *arr; + PyArrayObject *arr = NULL; // free'd on error use Py_CLEAR to decref. int index_type = 0; int ellipsis_pos = -1; @@ -314,8 +299,8 @@ prepare_index(PyArrayObject *self, PyObject *index, while (get_idx < index_ndim) { if (curr_idx > NPY_MAXDIMS * 2) { - PyErr_SetString(PyExc_IndexError, - "too many indices for array"); + PyErr_Format(PyExc_IndexError, + "too many indices for %s", is_flatiter_object ? "flat iterator" : "array"); goto failed_building_indices; } @@ -377,7 +362,7 @@ prepare_index(PyArrayObject *self, PyObject *index, * Since this is always an error if it was not a boolean, we can * allow the 0-d special case before the rest. */ - else if (PyArray_NDIM(self) != 0) { + else if (array_ndims != 0) { /* * Single integer index, there are two cases here. * It could be an array, a 0-d array is handled @@ -418,17 +403,55 @@ prepare_index(PyArrayObject *self, PyObject *index, goto failed_building_indices; } + // We raise here because we changed the behavior for boolean + // indices for flat iterators from being handled as integers + // to being regular boolean indices. + // TODO: This should go away fairly soon and lists of booleans + // should be handled as regular boolean indices. + if (is_flatiter_object && PyArray_ISBOOL(tmp_arr) && !PyBool_Check(index)) { + Py_DECREF(tmp_arr); + PyErr_Format(PyExc_IndexError, + "boolean indices for iterators are not supported because " + "of previous behavior that was confusing (valid boolean " + "indices are expected to work in the future)" + ); + goto failed_building_indices; + } + /* * For example an empty list can be cast to an integer array, * however it will default to a float one. */ - if (PyArray_SIZE(tmp_arr) == 0) { - PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP); + if (PyArray_SIZE(tmp_arr) == 0 + || (is_flatiter_object && !PyArray_ISINTEGER(tmp_arr) && !PyArray_ISBOOL(tmp_arr))) { + PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP); arr = (PyArrayObject *)PyArray_FromArray(tmp_arr, indtype, NPY_ARRAY_FORCECAST); + + // If the cast succeeded (which means that the previous flat iterator + // indexing routine would have succeeded as well), we need to issue a + // deprecation warning. + if (arr + && is_flatiter_object + && PyArray_SIZE(tmp_arr) != 0 + && !PyArray_ISINTEGER(tmp_arr) + && !PyArray_ISBOOL(tmp_arr) + && DEPRECATE("Invalid non-array indices for iterator objects are deprecated and will be " + "removed in a future version. (Deprecated NumPy 2.4)") < 0) { + Py_DECREF(tmp_arr); + goto failed_building_indices; + } Py_DECREF(tmp_arr); if (arr == NULL) { + // Raise a helpful error if this was a ValueError (i.e. could not cast) + if (PyErr_ExceptionMatches(PyExc_ValueError)) { + PyErr_Format(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`)%s and integer or boolean " + "arrays are valid indices", + is_flatiter_object ? "" : ", numpy.newaxis (`None`)" + ); + } goto failed_building_indices; } } @@ -458,18 +481,19 @@ prepare_index(PyArrayObject *self, PyObject *index, * this is always an error. The check ensures that these errors are raised * and match those of the generic path. */ - if ((PyArray_NDIM(arr) == PyArray_NDIM(self)) + if ((PyArray_NDIM(arr) == array_ndims) && PyArray_CompareLists(PyArray_DIMS(arr), - PyArray_DIMS(self), + array_dims, PyArray_NDIM(arr))) { index_type = HAS_BOOL; indices[curr_idx].type = HAS_BOOL; indices[curr_idx].object = (PyObject *)arr; + arr = NULL; // Reference moved, clean up for error path. /* keep track anyway, just to be complete */ - used_ndim = PyArray_NDIM(self); - fancy_ndim = PyArray_NDIM(self); + used_ndim = array_ndims; + fancy_ndim = array_ndims; curr_idx += 1; break; } @@ -500,7 +524,7 @@ prepare_index(PyArrayObject *self, PyObject *index, indices[curr_idx].value = n; indices[curr_idx].object = PyArray_Zeros(1, &n, PyArray_DescrFromType(NPY_INTP), 0); - Py_DECREF(arr); + Py_CLEAR(arr); if (indices[curr_idx].object == NULL) { goto failed_building_indices; @@ -518,18 +542,16 @@ prepare_index(PyArrayObject *self, PyObject *index, n = _nonzero_indices((PyObject *)arr, nonzero_result); if (n < 0) { - Py_DECREF(arr); goto failed_building_indices; } /* Check that we will not run out of indices to store new ones */ if (curr_idx + n >= NPY_MAXDIMS * 2) { - PyErr_SetString(PyExc_IndexError, - "too many indices for array"); + PyErr_Format(PyExc_IndexError, + "too many indices for %s", is_flatiter_object ? "flat iterator" : "array"); for (i=0; i < n; i++) { Py_DECREF(nonzero_result[i]); } - Py_DECREF(arr); goto failed_building_indices; } @@ -543,7 +565,7 @@ prepare_index(PyArrayObject *self, PyObject *index, used_ndim += 1; curr_idx += 1; } - Py_DECREF(arr); + Py_CLEAR(arr); /* All added indices have 1 dimension */ if (fancy_ndim < 1) { @@ -564,7 +586,7 @@ prepare_index(PyArrayObject *self, PyObject *index, */ npy_intp ind = PyArray_PyIntAsIntp((PyObject *)arr); - Py_DECREF(arr); + Py_CLEAR(arr); if (error_converting(ind)) { goto failed_building_indices; } @@ -580,15 +602,17 @@ prepare_index(PyArrayObject *self, PyObject *index, } } + if (fancy_ndim < PyArray_NDIM(arr)) { + fancy_ndim = PyArray_NDIM(arr); + } + index_type |= HAS_FANCY; indices[curr_idx].type = HAS_FANCY; indices[curr_idx].value = -1; indices[curr_idx].object = (PyObject *)arr; + arr = NULL; // Reference moved, clean up for error path. used_ndim += 1; - if (fancy_ndim < PyArray_NDIM(arr)) { - fancy_ndim = PyArray_NDIM(arr); - } curr_idx += 1; continue; } @@ -603,12 +627,12 @@ prepare_index(PyArrayObject *self, PyObject *index, } else { /* The input was not an array, so give a general error message */ - PyErr_SetString(PyExc_IndexError, - "only integers, slices (`:`), ellipsis (`...`), " - "numpy.newaxis (`None`) and integer or boolean " - "arrays are valid indices"); + PyErr_Format(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`)%s and integer or boolean " + "arrays are valid indices", + is_flatiter_object ? "" : ", numpy.newaxis (`None`)" + ); } - Py_DECREF(arr); goto failed_building_indices; } @@ -616,10 +640,10 @@ prepare_index(PyArrayObject *self, PyObject *index, * Compare dimension of the index to the real ndim. this is * to find the ellipsis value or append an ellipsis if necessary. */ - if (used_ndim < PyArray_NDIM(self)) { + if (used_ndim < array_ndims) { if (index_type & HAS_ELLIPSIS) { - indices[ellipsis_pos].value = PyArray_NDIM(self) - used_ndim; - used_ndim = PyArray_NDIM(self); + indices[ellipsis_pos].value = array_ndims - used_ndim; + used_ndim = array_ndims; new_ndim += indices[ellipsis_pos].value; } else { @@ -630,19 +654,21 @@ prepare_index(PyArrayObject *self, PyObject *index, index_type |= HAS_ELLIPSIS; indices[curr_idx].object = NULL; indices[curr_idx].type = HAS_ELLIPSIS; - indices[curr_idx].value = PyArray_NDIM(self) - used_ndim; + indices[curr_idx].value = array_ndims - used_ndim; ellipsis_pos = curr_idx; - used_ndim = PyArray_NDIM(self); + used_ndim = array_ndims; new_ndim += indices[curr_idx].value; curr_idx += 1; } } - else if (used_ndim > PyArray_NDIM(self)) { + else if (used_ndim > array_ndims) { PyErr_Format(PyExc_IndexError, - "too many indices for array: " - "array is %d-dimensional, but %d were indexed", - PyArray_NDIM(self), + "too many indices for %s: " + "%s is %d-dimensional, but %d were indexed", + is_flatiter_object ? "flat iterator" : "array", + is_flatiter_object ? "flat iterator" : "array", + array_ndims, used_ndim); goto failed_building_indices; } @@ -697,14 +723,15 @@ prepare_index(PyArrayObject *self, PyObject *index, used_ndim = 0; for (i = 0; i < curr_idx; i++) { if ((indices[i].type == HAS_FANCY) && indices[i].value > 0) { - if (indices[i].value != PyArray_DIM(self, used_ndim)) { + if (indices[i].value != array_dims[used_ndim]) { char err_msg[174]; PyOS_snprintf(err_msg, sizeof(err_msg), - "boolean index did not match indexed array along " + "boolean index did not match indexed %s along " "axis %d; size of axis is %" NPY_INTP_FMT " but size of corresponding boolean axis is %" NPY_INTP_FMT, - used_ndim, PyArray_DIM(self, used_ndim), + is_flatiter_object ? "flat iterator" : "array", + used_ndim, array_dims[used_ndim], indices[i].value); PyErr_SetString(PyExc_IndexError, err_msg); goto failed_building_indices; @@ -733,6 +760,7 @@ prepare_index(PyArrayObject *self, PyObject *index, return index_type; failed_building_indices: + Py_XDECREF(arr); for (i=0; i < curr_idx; i++) { Py_XDECREF(indices[i].object); } @@ -740,6 +768,16 @@ prepare_index(PyArrayObject *self, PyObject *index, return -1; } +NPY_NO_EXPORT int +prepare_index(PyArrayObject *self, PyObject *index, + npy_index_info *indices, + int *num, int *ndim, int *out_fancy_ndim, int allow_boolean) +{ + return prepare_index_noarray(PyArray_NDIM(self), PyArray_DIMS(self), + index, indices, num, ndim, out_fancy_ndim, + allow_boolean, 0); +} + /** * Check if self has memory overlap with one of the index arrays, or with extra_op. @@ -782,10 +820,10 @@ index_has_memory_overlap(PyArrayObject *self, * The caller must ensure that the index is a full integer * one. * - * @param Array being indexed - * @param result pointer - * @param parsed index information - * @param number of indices + * @param self Array being indexed + * @param ptr result pointer + * @param indices parsed index information + * @param index_num number of indices * * @return 0 on success -1 on failure */ @@ -814,11 +852,12 @@ get_item_pointer(PyArrayObject *self, char **ptr, * Ensure_array allows to fetch a safe subspace view for advanced * indexing. * - * @param Array being indexed - * @param resulting array (new reference) - * @param parsed index information - * @param number of indices - * @param Whether result should inherit the type from self + * @param self Array being indexed + * @param view Resulting array (new reference) + * @param indices parsed index information + * @param index_num number of indices + * @param ensure_array true if result should be a base class array, + * false if result should inherit type from self * * @return 0 on success -1 on failure */ @@ -975,10 +1014,7 @@ array_boolean_subscript(PyArrayObject *self, /* Get a dtype transfer function */ NpyIter_GetInnerFixedStrideArray(iter, fixed_strides); NPY_cast_info cast_info; - /* - * TODO: Ignoring cast flags, since this is only ever a copy. In - * principle that may not be quite right in some future? - */ + NPY_ARRAYMETHOD_FLAGS cast_flags; if (PyArray_GetDTypeTransferFunction( IsUintAligned(self) && IsAligned(self), @@ -991,6 +1027,8 @@ array_boolean_subscript(PyArrayObject *self, NpyIter_Deallocate(iter); return NULL; } + cast_flags = PyArrayMethod_COMBINED_FLAGS( + cast_flags, NpyIter_GetTransferFlags(iter)); /* Get the values needed for the inner loop */ iternext = NpyIter_GetIterNext(iter, NULL); @@ -1001,7 +1039,10 @@ array_boolean_subscript(PyArrayObject *self, return NULL; } - NPY_BEGIN_THREADS_NDITER(iter); + /* NOTE: Don't worry about floating point errors as this is a copy. */ + if (!(cast_flags & NPY_METH_REQUIRES_PYAPI)) { + NPY_BEGIN_THREADS_THRESHOLDED(NpyIter_GetIterSize(iter)); + } innerstrides = NpyIter_GetInnerStrideArray(iter); dataptrs = NpyIter_GetDataPtrArray(iter); @@ -1057,7 +1098,7 @@ array_boolean_subscript(PyArrayObject *self, ret = (PyArrayObject *)PyArray_NewFromDescrAndBase( Py_TYPE(self), ret_dtype, 1, &size, PyArray_STRIDES(ret), PyArray_BYTES(ret), - PyArray_FLAGS(self), (PyObject *)self, (PyObject *)tmp); + PyArray_FLAGS(ret), (PyObject *)self, (PyObject *)tmp); Py_DECREF(tmp); if (ret == NULL) { @@ -1133,6 +1174,8 @@ array_assign_boolean_subscript(PyArrayObject *self, } else { v_stride = 0; + /* If the same value is repeated, iteration order does not matter */ + order = NPY_KEEPORDER; } v_data = PyArray_DATA(v); @@ -1194,8 +1237,11 @@ array_assign_boolean_subscript(PyArrayObject *self, return -1; } + cast_flags = PyArrayMethod_COMBINED_FLAGS( + cast_flags, NpyIter_GetTransferFlags(iter)); + if (!(cast_flags & NPY_METH_REQUIRES_PYAPI)) { - NPY_BEGIN_THREADS_NDITER(iter); + NPY_BEGIN_THREADS_THRESHOLDED(NpyIter_GetIterSize(iter)); } if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { npy_clear_floatstatus_barrier((char *)self); @@ -1345,7 +1391,7 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view) npy_intp offset; /* get the field offset and dtype */ - tup = PyDict_GetItemWithError(PyDataType_FIELDS(PyArray_DESCR(arr)), ind); + tup = PyDict_GetItemWithError(PyDataType_FIELDS(PyArray_DESCR(arr)), ind); // noqa: borrowed-ref OK if (tup == NULL && PyErr_Occurred()) { return 0; } @@ -1667,7 +1713,7 @@ array_subscript(PyArrayObject *self, PyObject *op) if (PyArray_GetDTypeTransferFunction(1, itemsize, itemsize, - PyArray_DESCR(self), PyArray_DESCR(self), + PyArray_DESCR(self), PyArray_DESCR(mit->extra_op), 0, &cast_info, &transfer_flags) != NPY_SUCCEED) { goto finish; } @@ -1960,6 +2006,19 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) tmp_arr = (PyArrayObject *)op; } + if (tmp_arr && solve_may_share_memory(self, tmp_arr, 1) != 0) { + Py_SETREF(tmp_arr, (PyArrayObject *)PyArray_NewCopy(tmp_arr, NPY_ANYORDER)); + } + for (i = 0; i < index_num; ++i) { + if (indices[i].object != NULL && PyArray_Check(indices[i].object) && + solve_may_share_memory(self, (PyArrayObject *)indices[i].object, 1) != 0) { + Py_SETREF(indices[i].object, PyArray_Copy((PyArrayObject*)indices[i].object)); + if (indices[i].object == NULL) { + goto fail; + } + } + } + /* * Special case for very simple 1-d fancy indexing, which however * is quite common. This saves not only a lot of setup time in the @@ -1992,9 +2051,9 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) npy_intp itemsize = PyArray_ITEMSIZE(self); int is_aligned = IsUintAligned(self) && IsUintAligned(tmp_arr); - if (PyArray_GetDTypeTransferFunction(is_aligned, - itemsize, itemsize, - PyArray_DESCR(self), PyArray_DESCR(self), + if (PyArray_GetDTypeTransferFunction( + is_aligned, itemsize, itemsize, + PyArray_DESCR(tmp_arr), PyArray_DESCR(self), 0, &cast_info, &transfer_flags) != NPY_SUCCEED) { goto fail; } @@ -2043,6 +2102,11 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) if (PyArray_CopyObject(tmp_arr, op) < 0) { goto fail; } + /* + * In this branch we copy directly from a newly allocated array which + * may have a new descr: + */ + descr = PyArray_DESCR(tmp_arr); } if (PyArray_MapIterCheckIndices(mit) < 0) { @@ -2086,10 +2150,9 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) /* May need a generic copy function (only for refs and odd sizes) */ NPY_ARRAYMETHOD_FLAGS transfer_flags; npy_intp itemsize = PyArray_ITEMSIZE(self); - - if (PyArray_GetDTypeTransferFunction(1, - itemsize, itemsize, - PyArray_DESCR(self), PyArray_DESCR(self), + if (PyArray_GetDTypeTransferFunction( + 1, itemsize, itemsize, + descr, PyArray_DESCR(self), 0, &cast_info, &transfer_flags) != NPY_SUCCEED) { goto fail; } @@ -2109,7 +2172,8 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) * Could add a casting check, but apparently most assignments do * not care about safe casting. */ - if (mapiter_set(mit, &cast_info, meth_flags, is_aligned) < 0) { + int result = mapiter_set(mit, &cast_info, meth_flags, is_aligned); + if (result < 0) { goto fail; } @@ -2404,10 +2468,10 @@ PyArray_MapIterNext(PyArrayMapIterObject *mit) * * mit->dimensions: Broadcast dimension of the fancy indices and * the subspace iteration dimension. * - * @param MapIterObject - * @param The parsed indices object - * @param Number of indices - * @param The array that is being iterated + * @param mit pointer to the MapIterObject + * @param indices The parsed indices object + * @param index_num Number of indices + * @param arr The array that is being iterated * * @return 0 on success -1 on failure (broadcasting or too many fancy indices) */ @@ -2651,7 +2715,9 @@ PyArray_MapIterCheckIndices(PyArrayMapIterObject *mit) return -1; } - NPY_BEGIN_THREADS_NDITER(op_iter); + if (!(NpyIter_GetTransferFlags(op_iter) & NPY_METH_REQUIRES_PYAPI)) { + NPY_BEGIN_THREADS_THRESHOLDED(NpyIter_GetIterSize(op_iter)); + } iterptr = NpyIter_GetDataPtrArray(op_iter); iterstride = NpyIter_GetInnerStrideArray(op_iter); do { @@ -2677,29 +2743,6 @@ PyArray_MapIterCheckIndices(PyArrayMapIterObject *mit) return 0; indexing_error: - - if (mit->size == 0) { - PyObject *err_type = NULL, *err_value = NULL, *err_traceback = NULL; - PyErr_Fetch(&err_type, &err_value, &err_traceback); - /* 2020-05-27, NumPy 1.20 */ - if (DEPRECATE( - "Out of bound index found. This was previously ignored " - "when the indexing result contained no elements. " - "In the future the index error will be raised. This error " - "occurs either due to an empty slice, or if an array has zero " - "elements even before indexing.\n" - "(Use `warnings.simplefilter('error')` to turn this " - "DeprecationWarning into an error and get more details on " - "the invalid index.)") < 0) { - npy_PyErr_ChainExceptions(err_type, err_value, err_traceback); - return -1; - } - Py_DECREF(err_type); - Py_DECREF(err_value); - Py_XDECREF(err_traceback); - return 0; - } - return -1; } @@ -3009,6 +3052,8 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type, if (extra_op == NULL) { goto fail; } + // extra_op_dtype might have been replaced, so get a new reference + extra_op_dtype = PyArray_DESCR(extra_op); } /* diff --git a/numpy/_core/src/multiarray/mapping.h b/numpy/_core/src/multiarray/mapping.h index 528cb6604892..d4577c78fdbb 100644 --- a/numpy/_core/src/multiarray/mapping.h +++ b/numpy/_core/src/multiarray/mapping.h @@ -3,6 +3,23 @@ extern NPY_NO_EXPORT PyMappingMethods array_as_mapping; +/* Indexing types */ +#define HAS_INTEGER 1 +#define HAS_NEWAXIS 2 +#define HAS_SLICE 4 +#define HAS_ELLIPSIS 8 +/* HAS_FANCY can be mixed with HAS_0D_BOOL, be careful when to use & or == */ +#define HAS_FANCY 16 +#define HAS_BOOL 32 +/* NOTE: Only set if it is neither fancy nor purely integer index! */ +#define HAS_SCALAR_ARRAY 64 +/* + * Indicate that this is a fancy index that comes from a 0d boolean. + * This means that the index does not operate along a real axis. The + * corresponding index type is just HAS_FANCY. + */ +#define HAS_0D_BOOL (HAS_FANCY | 128) + /* * Object to store information needed for advanced (also fancy) indexing. @@ -113,6 +130,11 @@ typedef struct { } npy_index_info; +NPY_NO_EXPORT int +prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, + npy_index_info *indices, int *num, int *ndim, int *out_fancy_ndim, + int allow_boolean, int is_flatiter_object); + NPY_NO_EXPORT Py_ssize_t array_length(PyArrayObject *self); diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index da9bd30c8b10..5333ea7b7538 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -25,14 +25,17 @@ #include "dtypemeta.h" #include "item_selection.h" #include "conversion_utils.h" +#include "getset.h" #include "shape.h" #include "strfuncs.h" #include "array_assign.h" #include "npy_dlpack.h" +#include "npy_static_data.h" #include "multiarraymodule.h" #include "methods.h" #include "alloc.h" +#include "array_api_standard.h" #include @@ -72,36 +75,28 @@ npy_forward_method( PyObject *callable, PyObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - PyObject *args_buffer[NPY_MAXARGS]; - /* Practically guaranteed NPY_MAXARGS is enough. */ - PyObject **new_args = args_buffer; - /* * `PY_VECTORCALL_ARGUMENTS_OFFSET` seems never set, probably `args[-1]` * is always `self` but do not rely on it unless Python documents that. */ npy_intp len_kwargs = kwnames != NULL ? PyTuple_GET_SIZE(kwnames) : 0; - size_t original_arg_size = (len_args + len_kwargs) * sizeof(PyObject *); - - if (NPY_UNLIKELY(len_args + len_kwargs > NPY_MAXARGS)) { - new_args = (PyObject **)PyMem_MALLOC(original_arg_size + sizeof(PyObject *)); - if (new_args == NULL) { - /* - * If this fails Python uses `PY_VECTORCALL_ARGUMENTS_OFFSET` and - * we should probably add a fast-path for that (hopefully almost) - * always taken. - */ - return PyErr_NoMemory(); - } + npy_intp total_nargs = (len_args + len_kwargs); + + NPY_ALLOC_WORKSPACE(new_args, PyObject *, 14, total_nargs + 1); + if (new_args == NULL) { + /* + * This may fail if Python starts passing `PY_VECTORCALL_ARGUMENTS_OFFSET` + * and we should probably add a fast-path for that (hopefully almost) + * always taken. + */ + return NULL; } new_args[0] = self; - memcpy(&new_args[1], args, original_arg_size); + memcpy(&new_args[1], args, total_nargs * sizeof(PyObject *)); PyObject *res = PyObject_Vectorcall(callable, new_args, len_args+1, kwnames); - if (NPY_UNLIKELY(len_args + len_kwargs > NPY_MAXARGS)) { - PyMem_FREE(new_args); - } + npy_free_workspace(new_args); return res; } @@ -111,13 +106,13 @@ npy_forward_method( * initialization is not thread-safe, but relies on the CPython GIL to * be correct. */ -#define NPY_FORWARD_NDARRAY_METHOD(name) \ - static PyObject *callable = NULL; \ - npy_cache_import("numpy._core._methods", name, &callable); \ - if (callable == NULL) { \ - return NULL; \ - } \ - return npy_forward_method(callable, (PyObject *)self, args, len_args, kwnames) +#define NPY_FORWARD_NDARRAY_METHOD(name) \ + if (npy_cache_import_runtime("numpy._core._methods", #name, \ + &npy_runtime_imports.name) == -1) { \ + return NULL; \ + } \ + return npy_forward_method(npy_runtime_imports.name, \ + (PyObject *)self, args, len_args, kwnames) static PyObject * @@ -356,14 +351,14 @@ static PyObject * array_max(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_amax"); + NPY_FORWARD_NDARRAY_METHOD(_amax); } static PyObject * array_min(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_amin"); + NPY_FORWARD_NDARRAY_METHOD(_amin); } static PyObject * @@ -387,7 +382,6 @@ PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int offset) { PyObject *ret = NULL; PyObject *safe; - static PyObject *checkfunc = NULL; int self_elsize, typed_elsize; if (self == NULL) { @@ -404,15 +398,16 @@ PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int offset) /* check that we are not reinterpreting memory containing Objects. */ if (_may_have_objects(PyArray_DESCR(self)) || _may_have_objects(typed)) { - npy_cache_import("numpy._core._internal", "_getfield_is_safe", - &checkfunc); - if (checkfunc == NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", "_getfield_is_safe", + &npy_runtime_imports._getfield_is_safe) == -1) { Py_DECREF(typed); return NULL; } /* only returns True or raises */ - safe = PyObject_CallFunction(checkfunc, "OOi", PyArray_DESCR(self), + safe = PyObject_CallFunction(npy_runtime_imports._getfield_is_safe, + "OOi", PyArray_DESCR(self), typed, offset); if (safe == NULL) { Py_DECREF(typed); @@ -616,22 +611,6 @@ array_tobytes(PyArrayObject *self, PyObject *args, PyObject *kwds) return PyArray_ToString(self, order); } -static PyObject * -array_tostring(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - NPY_ORDER order = NPY_CORDER; - static char *kwlist[] = {"order", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&:tostring", kwlist, - PyArray_OrderConverter, &order)) { - return NULL; - } - /* 2020-03-30, NumPy 1.19 */ - if (DEPRECATE("tostring() is deprecated. Use tobytes() instead.") < 0) { - return NULL; - } - return PyArray_ToString(self, order); -} /* Like PyArray_ToFile but takes the file as a python object */ static int @@ -774,6 +753,10 @@ array_toscalar(PyArrayObject *self, PyObject *args) return PyArray_MultiIndexGetItem(self, multi_index); } + +NPY_NO_EXPORT int +PyArray_CastingConverterSameValue(PyObject *obj, NPY_CASTING *casting); + static PyObject * array_astype(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) @@ -792,7 +775,7 @@ array_astype(PyArrayObject *self, if (npy_parse_arguments("astype", args, len_args, kwnames, "dtype", &PyArray_DTypeOrDescrConverterRequired, &dt_info, "|order", &PyArray_OrderConverter, &order, - "|casting", &PyArray_CastingConverter, &casting, + "|casting", &PyArray_CastingConverterSameValue, &casting, "|subok", &PyArray_PythonPyIntFromInt, &subok, "|copy", &PyArray_AsTypeCopyConverter, &forcecopy, NULL, NULL, NULL) < 0) { @@ -862,7 +845,12 @@ array_astype(PyArrayObject *self, ((PyArrayObject_fields *)ret)->nd = PyArray_NDIM(self); ((PyArrayObject_fields *)ret)->descr = dtype; } - int success = PyArray_CopyInto(ret, self); + int success; + if (((int)casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { + success = PyArray_AssignArray(ret, self, NULL, casting); + } else { + success = PyArray_AssignArray(ret, self, NULL, NPY_UNSAFE_CASTING); + } Py_DECREF(dtype); ((PyArrayObject_fields *)ret)->nd = out_ndim; @@ -886,28 +874,39 @@ array_finalizearray(PyArrayObject *self, PyObject *obj) } +/* + * Default `__array_wrap__` implementation. + * + * If `self` is not a base class, we always create a new view, even if + * `return_scalar` is set. This way we preserve the (presumably important) + * subclass information. + * If the type is a base class array, we honor `return_scalar` and call + * PyArray_Return to convert any array with ndim=0 to scalar. + * + * By default, do not return a scalar (because this was always the default). + */ static PyObject * array_wraparray(PyArrayObject *self, PyObject *args) { PyArrayObject *arr; - PyObject *obj; + PyObject *UNUSED = NULL; /* for the context argument */ + int return_scalar = 0; - if (PyTuple_Size(args) < 1) { - PyErr_SetString(PyExc_TypeError, - "only accepts 1 argument"); - return NULL; - } - obj = PyTuple_GET_ITEM(args, 0); - if (obj == NULL) { + if (!PyArg_ParseTuple(args, "O!|OO&:__array_wrap__", + &PyArray_Type, &arr, &UNUSED, + &PyArray_OptionalBoolConverter, &return_scalar)) { return NULL; } - if (!PyArray_Check(obj)) { - PyErr_SetString(PyExc_TypeError, - "can only be called with ndarray object"); - return NULL; + + if (return_scalar && Py_TYPE(self) == &PyArray_Type && PyArray_NDIM(arr) == 0) { + /* Strict scalar return here (but go via PyArray_Return anyway) */ + Py_INCREF(arr); + return PyArray_Return(arr); } - arr = (PyArrayObject *)obj; + /* + * Return an array, but should ensure it has the type of self + */ if (Py_TYPE(self) != Py_TYPE(arr)) { PyArray_Descr *dtype = PyArray_DESCR(arr); Py_INCREF(dtype); @@ -917,7 +916,7 @@ array_wraparray(PyArrayObject *self, PyObject *args) PyArray_NDIM(arr), PyArray_DIMS(arr), PyArray_STRIDES(arr), PyArray_DATA(arr), - PyArray_FLAGS(arr), (PyObject *)self, obj); + PyArray_FLAGS(arr), (PyObject *)self, (PyObject *)arr); } else { /* @@ -939,12 +938,17 @@ array_getarray(PyArrayObject *self, PyObject *args, PyObject *kwds) PyObject *ret; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&$O&:__array__", kwlist, - PyArray_DescrConverter, &newtype, + PyArray_DescrConverter2, &newtype, PyArray_CopyConverter, ©)) { Py_XDECREF(newtype); return NULL; } + if (newtype == NULL) { + newtype = PyArray_DESCR(self); + Py_INCREF(newtype); // newtype is owned. + } + /* convert to PyArray_Type */ if (!PyArray_CheckExact(self)) { PyArrayObject *new; @@ -962,6 +966,7 @@ array_getarray(PyArrayObject *self, PyObject *args, PyObject *kwds) (PyObject *)self ); if (new == NULL) { + Py_DECREF(newtype); return NULL; } self = new; @@ -971,22 +976,21 @@ array_getarray(PyArrayObject *self, PyObject *args, PyObject *kwds) } if (copy == NPY_COPY_ALWAYS) { - if (newtype == NULL) { - newtype = PyArray_DESCR(self); - } - ret = PyArray_CastToType(self, newtype, 0); + ret = PyArray_CastToType(self, newtype, 0); // steals newtype reference Py_DECREF(self); return ret; } else { // copy == NPY_COPY_IF_NEEDED || copy == NPY_COPY_NEVER - if (newtype == NULL || PyArray_EquivTypes(PyArray_DESCR(self), newtype)) { + if (PyArray_EquivTypes(PyArray_DESCR(self), newtype)) { + Py_DECREF(newtype); return (PyObject *)self; } if (copy == NPY_COPY_IF_NEEDED) { - ret = PyArray_CastToType(self, newtype, 0); + ret = PyArray_CastToType(self, newtype, 0); // steals newtype reference. Py_DECREF(self); return ret; } else { // copy == NPY_COPY_NEVER PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); + Py_DECREF(newtype); Py_DECREF(self); return NULL; } @@ -1008,26 +1012,26 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds) int i; int nin, nout; PyObject *out_kwd_obj; - PyObject *fast; - PyObject **in_objs, **out_objs, *where_obj; + PyObject **out_objs, *where_obj; /* check inputs */ nin = PyTuple_Size(args); if (nin < 0) { return -1; } - fast = PySequence_Fast(args, "Could not convert object to sequence"); - if (fast == NULL) { - return -1; - } - in_objs = PySequence_Fast_ITEMS(fast); for (i = 0; i < nin; ++i) { - if (PyUFunc_HasOverride(in_objs[i])) { - Py_DECREF(fast); +#if defined(Py_LIMITED_API) + PyObject *obj = PyTuple_GetItem(args, i); + if (obj == NULL) { + return -1; + } +#else + PyObject *obj = PyTuple_GET_ITEM(args, i); +#endif + if (PyUFunc_HasOverride(obj)) { return 1; } } - Py_DECREF(fast); if (kwds == NULL) { return 0; } @@ -1044,7 +1048,7 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds) } Py_DECREF(out_kwd_obj); /* check where if it exists */ - where_obj = PyDict_GetItemWithError(kwds, npy_ma_str_where); + where_obj = PyDict_GetItemWithError(kwds, npy_interned_str.where); // noqa: borrowed-ref OK if (where_obj == NULL) { if (PyErr_Occurred()) { return -1; @@ -1118,15 +1122,23 @@ array_function(PyArrayObject *NPY_UNUSED(self), PyObject *c_args, PyObject *c_kw &func, &types, &args, &kwargs)) { return NULL; } - - types = PySequence_Fast( + if (!PyTuple_CheckExact(args)) { + PyErr_SetString(PyExc_TypeError, "args must be a tuple."); + return NULL; + } + if (!PyDict_CheckExact(kwargs)) { + PyErr_SetString(PyExc_TypeError, "kwargs must be a dict."); + return NULL; + } + types = PySequence_Fast( // noqa: borrowed-ref OK types, "types argument to ndarray.__array_function__ must be iterable"); if (types == NULL) { return NULL; } - + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(types); result = array_function_method_impl(func, types, args, kwargs); + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); Py_DECREF(types); return result; } @@ -1157,7 +1169,6 @@ array_copy_keeporder(PyArrayObject *self, PyObject *args) return PyArray_NewCopy(self, NPY_KEEPORDER); } -#include static PyObject * array_resize(PyArrayObject *self, PyObject *args, PyObject *kwds) { @@ -1165,8 +1176,7 @@ array_resize(PyArrayObject *self, PyObject *args, PyObject *kwds) Py_ssize_t size = PyTuple_Size(args); int refcheck = 1; PyArray_Dims newshape; - PyObject *ret, *obj; - + PyObject *obj; if (!NpyArg_ParseKeywords(kwds, "|i", kwlist, &refcheck)) { return NULL; @@ -1189,12 +1199,11 @@ array_resize(PyArrayObject *self, PyObject *args, PyObject *kwds) return NULL; } - ret = PyArray_Resize(self, &newshape, refcheck, NPY_ANYORDER); + int ret = PyArray_Resize_int(self, &newshape, refcheck); npy_free_cache_dim_obj(newshape); - if (ret == NULL) { + if (ret < 0) { return NULL; } - Py_DECREF(ret); Py_RETURN_NONE; } @@ -1253,11 +1262,12 @@ array_sort(PyArrayObject *self, { int axis = -1; int val; - NPY_SORTKIND sortkind = _NPY_SORT_UNDEFINED; PyObject *order = NULL; PyArray_Descr *saved = NULL; PyArray_Descr *newd; + NPY_SORTKIND sortkind = _NPY_SORT_UNDEFINED; int stable = -1; + int descending = -1; NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("sort", args, len_args, kwnames, @@ -1265,18 +1275,39 @@ array_sort(PyArrayObject *self, "|kind", &PyArray_SortkindConverter, &sortkind, "|order", NULL, &order, "$stable", &PyArray_OptionalBoolConverter, &stable, +// "$descending", &PyArray_OptionalBoolConverter, &descending, NULL, NULL, NULL) < 0) { return NULL; } - if (order == Py_None) { - order = NULL; + + if (sortkind == _NPY_SORT_UNDEFINED) { + // keywords only if sortkind not passed + sortkind = 0; + sortkind |= (stable > 0)? NPY_SORT_STABLE: 0; + sortkind |= (descending > 0)? NPY_SORT_DESCENDING: 0; } + else { + // Check that no keywords are used + int keywords_used = 0; + keywords_used |= (stable != -1); + keywords_used |= (descending != -1); + if (keywords_used) { + PyErr_SetString(PyExc_ValueError, + "`kind` and keyword parameters can't be provided at " + "the same time. Use only one of them."); + return NULL; + } + } + + order = (order != Py_None)? order: NULL; + // Reorder field names if required. if (order != NULL) { PyObject *new_name; PyObject *_numpy_internal; saved = PyArray_DESCR(self); if (!PyDataType_HASFIELDS(saved)) { - PyErr_SetString(PyExc_ValueError, "Cannot specify " \ + PyErr_SetString(PyExc_ValueError, + "Cannot specify " "order when the array has no fields."); return NULL; } @@ -1299,20 +1330,9 @@ array_sort(PyArrayObject *self, ((_PyArray_LegacyDescr *)newd)->names = new_name; ((PyArrayObject_fields *)self)->descr = newd; } - if (sortkind != _NPY_SORT_UNDEFINED && stable != -1) { - PyErr_SetString(PyExc_ValueError, - "`kind` and `stable` parameters can't be provided at " - "the same time. Use only one of them."); - return NULL; - } - else if ((sortkind == _NPY_SORT_UNDEFINED && stable == -1) || (stable == 0)) { - sortkind = NPY_QUICKSORT; - } - else if (stable == 1) { - sortkind = NPY_STABLESORT; - } val = PyArray_Sort(self, axis, sortkind); + if (order != NULL) { Py_XDECREF(PyArray_DESCR(self)); ((PyArrayObject_fields *)self)->descr = saved; @@ -1323,6 +1343,7 @@ array_sort(PyArrayObject *self, Py_RETURN_NONE; } + static PyObject * array_partition(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) @@ -1396,15 +1417,19 @@ array_partition(PyArrayObject *self, Py_RETURN_NONE; } + static PyObject * array_argsort(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { int axis = -1; + PyObject *res; + PyObject *order = NULL; + PyArray_Descr *saved = NULL; + PyArray_Descr *newd; NPY_SORTKIND sortkind = _NPY_SORT_UNDEFINED; - PyObject *order = NULL, *res; - PyArray_Descr *newd, *saved=NULL; int stable = -1; + int descending = -1; NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("argsort", args, len_args, kwnames, @@ -1412,12 +1437,32 @@ array_argsort(PyArrayObject *self, "|kind", &PyArray_SortkindConverter, &sortkind, "|order", NULL, &order, "$stable", &PyArray_OptionalBoolConverter, &stable, +// "$descending", &PyArray_OptionalBoolConverter, &descending, NULL, NULL, NULL) < 0) { return NULL; } - if (order == Py_None) { - order = NULL; + + if (sortkind == _NPY_SORT_UNDEFINED) { + // keywords only if sortkind not passed + sortkind = 0; + sortkind |= (stable > 0)? NPY_SORT_STABLE: 0; + sortkind |= (descending > 0)? NPY_SORT_DESCENDING: 0; } + else { + // Check that no keywords are used + int keywords_used = 0; + keywords_used |= (stable != -1); + keywords_used |= (descending != -1); + if (keywords_used) { + PyErr_SetString(PyExc_ValueError, + "`kind` and keyword parameters can't be provided at " + "the same time. Use only one of them."); + return NULL; + } + } + + // Reorder field names if required. + order = (order != Py_None)? order: NULL; if (order != NULL) { PyObject *new_name; PyObject *_numpy_internal; @@ -1446,20 +1491,9 @@ array_argsort(PyArrayObject *self, ((_PyArray_LegacyDescr *)newd)->names = new_name; ((PyArrayObject_fields *)self)->descr = newd; } - if (sortkind != _NPY_SORT_UNDEFINED && stable != -1) { - PyErr_SetString(PyExc_ValueError, - "`kind` and `stable` parameters can't be provided at " - "the same time. Use only one of them."); - return NULL; - } - else if ((sortkind == _NPY_SORT_UNDEFINED && stable == -1) || (stable == 0)) { - sortkind = NPY_QUICKSORT; - } - else if (stable == 1) { - sortkind = NPY_STABLESORT; - } res = PyArray_ArgSort(self, axis, sortkind); + if (order != NULL) { Py_XDECREF(PyArray_DESCR(self)); ((PyArrayObject_fields *)self)->descr = saved; @@ -1570,7 +1604,7 @@ _deepcopy_call(char *iptr, char *optr, PyArray_Descr *dtype, PyArray_Descr *new; int offset, res; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(dtype), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(dtype), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -1585,7 +1619,7 @@ _deepcopy_call(char *iptr, char *optr, PyArray_Descr *dtype, } } } - else { + else if (PyDataType_ISOBJECT(dtype)) { PyObject *itemp, *otemp; PyObject *res; memcpy(&itemp, iptr, sizeof(itemp)); @@ -1737,7 +1771,7 @@ _setlist_pkl(PyArrayObject *self, PyObject *list) return -1; } while(iter->index < iter->size) { - theobject = PyList_GET_ITEM(list, iter->index); + theobject = PyList_GET_ITEM(list, iter->index); // noqa: borrowed-ref OK setitem(theobject, iter->dataptr, self); PyArray_ITER_NEXT(iter); } @@ -1849,77 +1883,115 @@ array_reduce_ex_regular(PyArrayObject *self, int NPY_UNUSED(protocol)) static PyObject * array_reduce_ex_picklebuffer(PyArrayObject *self, int protocol) { - PyObject *numeric_mod = NULL, *from_buffer_func = NULL; - PyObject *pickle_module = NULL, *picklebuf_class = NULL; - PyObject *picklebuf_args = NULL; + PyObject *from_buffer_func = NULL; + PyObject *picklebuf_class = NULL; PyObject *buffer = NULL, *transposed_array = NULL; PyArray_Descr *descr = NULL; + PyObject *rev_perm = NULL; // only used in 'K' order char order; descr = PyArray_DESCR(self); - /* we expect protocol 5 to be available in Python 3.8 */ - pickle_module = PyImport_ImportModule("pickle"); - if (pickle_module == NULL){ - return NULL; - } - picklebuf_class = PyObject_GetAttrString(pickle_module, "PickleBuffer"); - Py_DECREF(pickle_module); - if (picklebuf_class == NULL) { + if (npy_cache_import_runtime("pickle", "PickleBuffer", &picklebuf_class) == -1) { return NULL; } /* Construct a PickleBuffer of the array */ - - if (!PyArray_IS_C_CONTIGUOUS((PyArrayObject*) self) && - PyArray_IS_F_CONTIGUOUS((PyArrayObject*) self)) { + if (PyArray_IS_C_CONTIGUOUS((PyArrayObject *)self)) { + order = 'C'; + } + else if (PyArray_IS_F_CONTIGUOUS((PyArrayObject *)self)) { /* if the array if Fortran-contiguous and not C-contiguous, * the PickleBuffer instance will hold a view on the transpose * of the initial array, that is C-contiguous. */ order = 'F'; - transposed_array = PyArray_Transpose((PyArrayObject*)self, NULL); - picklebuf_args = Py_BuildValue("(N)", transposed_array); + transposed_array = PyArray_Transpose((PyArrayObject *)self, NULL); + if (transposed_array == NULL) { + return NULL; + } } else { - order = 'C'; - picklebuf_args = Py_BuildValue("(O)", self); - } - if (picklebuf_args == NULL) { - Py_DECREF(picklebuf_class); - return NULL; + order = 'K'; + const int n = PyArray_NDIM(self); + npy_stride_sort_item items[NPY_MAXDIMS]; + // sort (strde, perm) as descending = transpose to C + PyArray_CreateSortedStridePerm(n, PyArray_STRIDES(self), items); + rev_perm = PyTuple_New(n); + if (rev_perm == NULL) { + return NULL; + } + PyArray_Dims perm; + npy_intp dims[NPY_MAXDIMS]; + for (int i = 0; i < n; i++) { + dims[i] = items[i].perm; + PyObject *idx = PyLong_FromLong(i); + if (idx == NULL) { + Py_DECREF(rev_perm); + return NULL; + } + PyTuple_SET_ITEM(rev_perm, items[i].perm, idx); + } + perm.ptr = dims; + perm.len = n; + transposed_array = PyArray_Transpose((PyArrayObject *)self, &perm); + if (transposed_array == NULL) { + Py_DECREF(rev_perm); + return NULL; + } + if (!PyArray_IS_C_CONTIGUOUS((PyArrayObject *)transposed_array)) { + // self is non-contiguous + Py_DECREF(rev_perm); + Py_DECREF(transposed_array); + return array_reduce_ex_regular(self, protocol); + } } - - buffer = PyObject_CallObject(picklebuf_class, picklebuf_args); - Py_DECREF(picklebuf_class); - Py_DECREF(picklebuf_args); + buffer = PyObject_CallOneArg(picklebuf_class, transposed_array == NULL ? (PyObject*) self: transposed_array); if (buffer == NULL) { /* Some arrays may refuse to export a buffer, in which case * just fall back on regular __reduce_ex__ implementation * (gh-12745). */ + Py_XDECREF(rev_perm); + Py_XDECREF(transposed_array); PyErr_Clear(); return array_reduce_ex_regular(self, protocol); } /* Get the _frombuffer() function for reconstruction */ - - numeric_mod = PyImport_ImportModule("numpy._core.numeric"); - if (numeric_mod == NULL) { + if (npy_cache_import_runtime("numpy._core.numeric", "_frombuffer", + &from_buffer_func) == -1) { + Py_XDECREF(rev_perm); + Py_XDECREF(transposed_array); Py_DECREF(buffer); return NULL; } - from_buffer_func = PyObject_GetAttrString(numeric_mod, - "_frombuffer"); - Py_DECREF(numeric_mod); - if (from_buffer_func == NULL) { + + PyObject *shape = NULL; + if (order == 'K') { + shape = PyArray_IntTupleFromIntp( + PyArray_NDIM((PyArrayObject *)transposed_array), + PyArray_SHAPE((PyArrayObject *)transposed_array)); + } + else { + shape = PyArray_IntTupleFromIntp(PyArray_NDIM(self), + PyArray_SHAPE(self)); + } + Py_XDECREF(transposed_array); + if (shape == NULL) { + Py_XDECREF(rev_perm); Py_DECREF(buffer); return NULL; } - - return Py_BuildValue("N(NONN)", - from_buffer_func, buffer, (PyObject *)descr, - PyObject_GetAttrString((PyObject *)self, "shape"), - PyUnicode_FromStringAndSize(&order, 1)); + if (order == 'K') { + return Py_BuildValue("N(NONNN)", from_buffer_func, buffer, + (PyObject *)descr, shape, + PyUnicode_FromStringAndSize(&order, 1), rev_perm); + } + else { + return Py_BuildValue("N(NONN)", from_buffer_func, buffer, + (PyObject *)descr, shape, + PyUnicode_FromStringAndSize(&order, 1)); + } } static PyObject * @@ -1934,8 +2006,6 @@ array_reduce_ex(PyArrayObject *self, PyObject *args) descr = PyArray_DESCR(self); if ((protocol < 5) || - (!PyArray_IS_C_CONTIGUOUS((PyArrayObject*)self) && - !PyArray_IS_F_CONTIGUOUS((PyArrayObject*)self)) || PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) || (PyType_IsSubtype(((PyObject*)self)->ob_type, &PyArray_Type) && ((PyObject*)self)->ob_type != &PyArray_Type) || @@ -1947,6 +2017,11 @@ array_reduce_ex(PyArrayObject *self, PyObject *args) return array_reduce_ex_regular(self, protocol); } else { + /* The func will check internally + * if the array isn't backed by a contiguous data buffer or + * if the array refuses to export a buffer + * In either case, fall back to `array_reduce_ex_regular` + */ return array_reduce_ex_picklebuffer(self, protocol); } } @@ -1998,17 +2073,6 @@ array_setstate(PyArrayObject *self, PyObject *args) return NULL; } - /* - * Reassigning fa->descr messes with the reallocation strategy, - * since fa could be a 0-d or scalar, and then - * PyDataMem_UserFREE will be confused - */ - size_t n_tofree = PyArray_NBYTES(self); - if (n_tofree == 0) { - n_tofree = 1; - } - Py_XDECREF(PyArray_DESCR(self)); - fa->descr = typecode; Py_INCREF(typecode); nd = PyArray_IntpFromSequence(shape, dimensions, NPY_MAXDIMS); if (nd < 0) { @@ -2022,31 +2086,19 @@ array_setstate(PyArrayObject *self, PyObject *args) * copy from the pickled data (may not match allocation currently if 0). * Compare with `PyArray_NewFromDescr`, raise MemoryError for simplicity. */ - npy_bool empty = NPY_FALSE; - nbytes = 1; + nbytes = typecode->elsize; for (int i = 0; i < nd; i++) { if (dimensions[i] < 0) { PyErr_SetString(PyExc_TypeError, "impossible dimension while unpickling array"); return NULL; } - if (dimensions[i] == 0) { - empty = NPY_TRUE; - } overflowed = npy_mul_sizes_with_overflow( &nbytes, nbytes, dimensions[i]); if (overflowed) { return PyErr_NoMemory(); } } - overflowed = npy_mul_sizes_with_overflow( - &nbytes, nbytes, PyArray_ITEMSIZE(self)); - if (overflowed) { - return PyErr_NoMemory(); - } - if (empty) { - nbytes = 0; - } if (PyDataType_FLAGCHK(typecode, NPY_LIST_PICKLE)) { if (!PyList_Check(rawdata)) { @@ -2060,11 +2112,8 @@ array_setstate(PyArrayObject *self, PyObject *args) /* Backward compatibility with Python 2 NumPy pickles */ if (PyUnicode_Check(rawdata)) { - PyObject *tmp; - tmp = PyUnicode_AsLatin1String(rawdata); - Py_DECREF(rawdata); - rawdata = tmp; - if (tmp == NULL) { + Py_SETREF(rawdata, PyUnicode_AsLatin1String(rawdata)); + if (rawdata == NULL) { /* More informative error message */ PyErr_SetString(PyExc_ValueError, ("Failed to encode latin1 string when unpickling a Numpy array. " @@ -2092,32 +2141,13 @@ array_setstate(PyArrayObject *self, PyObject *args) return NULL; } } - - if ((PyArray_FLAGS(self) & NPY_ARRAY_OWNDATA)) { - /* - * Allocation will never be 0, see comment in ctors.c - * line 820 - */ - PyObject *handler = PyArray_HANDLER(self); - if (handler == NULL) { - /* This can happen if someone arbitrarily sets NPY_ARRAY_OWNDATA */ - PyErr_SetString(PyExc_RuntimeError, - "no memory handler found but OWNDATA flag set"); - return NULL; - } - PyDataMem_UserFREE(PyArray_DATA(self), n_tofree, handler); - PyArray_CLEARFLAGS(self, NPY_ARRAY_OWNDATA); - } - Py_XDECREF(PyArray_BASE(self)); - fa->base = NULL; - - PyArray_CLEARFLAGS(self, NPY_ARRAY_WRITEBACKIFCOPY); - - if (PyArray_DIMS(self) != NULL) { - npy_free_cache_dim_array(self); - fa->dimensions = NULL; + /* + * Get rid of everything on self, and then populate with pickle data. + */ + if (clear_array_attributes(self) < 0) { + return NULL; } - + fa->descr = typecode; fa->flags = NPY_ARRAY_DEFAULT; fa->nd = nd; @@ -2147,11 +2177,8 @@ array_setstate(PyArrayObject *self, PyObject *args) if (num == 0) { num = 1; } - /* Store the handler in case the default is modified */ - Py_XDECREF(fa->mem_handler); fa->mem_handler = PyDataMem_GetHandler(); if (fa->mem_handler == NULL) { - Py_CLEAR(fa->mem_handler); Py_DECREF(rawdata); return NULL; } @@ -2199,7 +2226,6 @@ array_setstate(PyArrayObject *self, PyObject *args) } else { /* The handlers should never be called in this case */ - Py_XDECREF(fa->mem_handler); fa->mem_handler = NULL; fa->data = datastr; if (PyArray_SetBaseObject(self, rawdata) < 0) { @@ -2213,9 +2239,7 @@ array_setstate(PyArrayObject *self, PyObject *args) if (num == 0) { num = 1; } - /* Store the functions in case the default handler is modified */ - Py_XDECREF(fa->mem_handler); fa->mem_handler = PyDataMem_GetHandler(); if (fa->mem_handler == NULL) { return NULL; @@ -2244,17 +2268,20 @@ array_setstate(PyArrayObject *self, PyObject *args) NPY_NO_EXPORT int PyArray_Dump(PyObject *self, PyObject *file, int protocol) { - static PyObject *method = NULL; PyObject *ret; - npy_cache_import("numpy._core._methods", "_dump", &method); - if (method == NULL) { + if (npy_cache_import_runtime( + "numpy._core._methods", "_dump", + &npy_runtime_imports._dump) == -1) { return -1; } + if (protocol < 0) { - ret = PyObject_CallFunction(method, "OO", self, file); + ret = PyObject_CallFunction( + npy_runtime_imports._dump, "OO", self, file); } else { - ret = PyObject_CallFunction(method, "OOi", self, file, protocol); + ret = PyObject_CallFunction( + npy_runtime_imports._dump, "OOi", self, file, protocol); } if (ret == NULL) { return -1; @@ -2267,16 +2294,16 @@ PyArray_Dump(PyObject *self, PyObject *file, int protocol) NPY_NO_EXPORT PyObject * PyArray_Dumps(PyObject *self, int protocol) { - static PyObject *method = NULL; - npy_cache_import("numpy._core._methods", "_dumps", &method); - if (method == NULL) { + if (npy_cache_import_runtime("numpy._core._methods", "_dumps", + &npy_runtime_imports._dumps) == -1) { return NULL; } if (protocol < 0) { - return PyObject_CallFunction(method, "O", self); + return PyObject_CallFunction(npy_runtime_imports._dumps, "O", self); } else { - return PyObject_CallFunction(method, "Oi", self, protocol); + return PyObject_CallFunction( + npy_runtime_imports._dumps, "Oi", self, protocol); } } @@ -2285,7 +2312,7 @@ static PyObject * array_dump(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_dump"); + NPY_FORWARD_NDARRAY_METHOD(_dump); } @@ -2293,7 +2320,7 @@ static PyObject * array_dumps(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_dumps"); + NPY_FORWARD_NDARRAY_METHOD(_dumps); } @@ -2345,14 +2372,14 @@ static PyObject * array_mean(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_mean"); + NPY_FORWARD_NDARRAY_METHOD(_mean); } static PyObject * array_sum(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_sum"); + NPY_FORWARD_NDARRAY_METHOD(_sum); } @@ -2382,7 +2409,7 @@ static PyObject * array_prod(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_prod"); + NPY_FORWARD_NDARRAY_METHOD(_prod); } static PyObject * @@ -2442,7 +2469,7 @@ static PyObject * array_any(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_any"); + NPY_FORWARD_NDARRAY_METHOD(_any); } @@ -2450,21 +2477,21 @@ static PyObject * array_all(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_all"); + NPY_FORWARD_NDARRAY_METHOD(_all); } static PyObject * array_stddev(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_std"); + NPY_FORWARD_NDARRAY_METHOD(_std); } static PyObject * array_variance(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_var"); + NPY_FORWARD_NDARRAY_METHOD(_var); } static PyObject * @@ -2545,7 +2572,7 @@ static PyObject * array_clip(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_clip"); + NPY_FORWARD_NDARRAY_METHOD(_clip); } @@ -2710,12 +2737,10 @@ array_setflags(PyArrayObject *self, PyObject *args, PyObject *kwds) if ((PyArray_BASE(self) == NULL) && !PyArray_CHKFLAGS(self, NPY_ARRAY_OWNDATA) && !PyArray_CHKFLAGS(self, NPY_ARRAY_WRITEABLE)) { - /* 2017-05-03, NumPy 1.17.0 */ - if (DEPRECATE("making a non-writeable array writeable " - "is deprecated for arrays without a base " - "which do not own their data.") < 0) { - return NULL; - } + PyErr_SetString(PyExc_ValueError, + "Cannot make a non-writeable array writeable " + "for arrays with a base that do not own their data."); + return NULL; } PyArray_ENABLEFLAGS(self, NPY_ARRAY_WRITEABLE); PyArray_CLEARFLAGS(self, NPY_ARRAY_WARN_ON_WRITE); @@ -2800,70 +2825,14 @@ array_class_getitem(PyObject *cls, PyObject *args) return Py_GenericAlias(cls, args); } -static PyObject * -array_array_namespace(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"api_version", NULL}; - PyObject *array_api_version = Py_None; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|$O:__array_namespace__", kwlist, - &array_api_version)) { - return NULL; - } - - if (array_api_version != Py_None) { - if (!PyUnicode_Check(array_api_version)) - { - PyErr_Format(PyExc_ValueError, - "Only None and strings are allowed as the Array API version, " - "but received: %S.", array_api_version); - return NULL; - } else if (PyUnicode_CompareWithASCIIString(array_api_version, "2021.12") != 0 && - PyUnicode_CompareWithASCIIString(array_api_version, "2022.12") != 0) - { - PyErr_Format(PyExc_ValueError, - "Version \"%U\" of the Array API Standard is not supported.", - array_api_version); - return NULL; - } - } - - PyObject *numpy_module = PyImport_ImportModule("numpy"); - if (numpy_module == NULL){ - return NULL; - } - - return numpy_module; -} - -static PyObject * -array_to_device(PyArrayObject *self, PyObject *args, PyObject *kwds) +static PyObject* array__set_shape(PyObject *self, PyObject *args) { - static char *kwlist[] = {"", "stream", NULL}; - char *device = ""; - PyObject *stream = Py_None; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "s|$O:to_device", kwlist, - &device, - &stream)) { - return NULL; - } + int r = array_shape_set_internal((PyArrayObject *)self, args); - if (stream != Py_None) { - PyErr_SetString(PyExc_ValueError, - "The stream argument in to_device() " - "is not supported"); + if (r < 0) { return NULL; } - - if (strcmp(device, "cpu") != 0) { - PyErr_Format(PyExc_ValueError, - "Unsupported device: %s.", device); - return NULL; - } - - Py_INCREF(self); - return (PyObject *)self; + Py_RETURN_NONE; } NPY_NO_EXPORT PyMethodDef array_methods[] = { @@ -2888,7 +2857,8 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { /* for the sys module */ {"__sizeof__", (PyCFunction) array_sizeof, - METH_NOARGS, NULL}, + METH_NOARGS, + "__sizeof__($self, /)\n--\n\nSize in memory."}, /* for the copy module */ {"__copy__", @@ -2917,13 +2887,14 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { {"__complex__", (PyCFunction) array_complex, - METH_VARARGS, NULL}, + METH_VARARGS, + "__complex__($self, /)\n--\n\ncomplex(self)"}, {"__format__", (PyCFunction) array_format, - METH_VARARGS, NULL}, + METH_VARARGS, + "__format__($self, spec, /)\n--\n\nformat(self[, spec])"}, - /* for typing; requires python >= 3.9 */ {"__class_getitem__", (PyCFunction)array_class_getitem, METH_CLASS | METH_O, NULL}, @@ -3067,9 +3038,6 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { {"tolist", (PyCFunction)array_tolist, METH_VARARGS, NULL}, - {"tostring", - (PyCFunction)array_tostring, - METH_VARARGS | METH_KEYWORDS, NULL}, {"trace", (PyCFunction)array_trace, METH_FASTCALL | METH_KEYWORDS, NULL}, @@ -3091,6 +3059,10 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { (PyCFunction)array_dlpack_device, METH_NOARGS, NULL}, + // For deprecation of ndarray setters + {"_set_shape", + (PyCFunction)array__set_shape, + METH_O, NULL}, // For Array API compatibility {"__array_namespace__", (PyCFunction)array_array_namespace, diff --git a/numpy/_core/src/multiarray/methods.h b/numpy/_core/src/multiarray/methods.h index 9d06794de2aa..f49e0205894d 100644 --- a/numpy/_core/src/multiarray/methods.h +++ b/numpy/_core/src/multiarray/methods.h @@ -1,6 +1,7 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_METHODS_H_ #define NUMPY_CORE_SRC_MULTIARRAY_METHODS_H_ +#include "npy_static_data.h" #include "npy_import.h" extern NPY_NO_EXPORT PyMethodDef array_methods[]; @@ -13,22 +14,12 @@ extern NPY_NO_EXPORT PyMethodDef array_methods[]; static inline PyObject * NpyPath_PathlikeToFspath(PyObject *file) { - static PyObject *os_PathLike = NULL; - static PyObject *os_fspath = NULL; - npy_cache_import("os", "PathLike", &os_PathLike); - if (os_PathLike == NULL) { - return NULL; - } - npy_cache_import("os", "fspath", &os_fspath); - if (os_fspath == NULL) { - return NULL; - } - - if (!PyObject_IsInstance(file, os_PathLike)) { + if (!PyObject_IsInstance(file, npy_static_pydata.os_PathLike)) { Py_INCREF(file); return file; } - return PyObject_CallFunctionObjArgs(os_fspath, file, NULL); + return PyObject_CallFunctionObjArgs(npy_static_pydata.os_fspath, + file, NULL); } #endif /* NUMPY_CORE_SRC_MULTIARRAY_METHODS_H_ */ diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index e11b560a93cd..9587ea5753c7 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -23,11 +23,13 @@ #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" +#include "multiarraymodule.h" #include "numpy/npy_math.h" #include "npy_argparse.h" #include "npy_config.h" #include "npy_pycompat.h" #include "npy_import.h" +#include "npy_static_data.h" #include "convert_datatype.h" #include "legacy_dtype_implementation.h" @@ -41,6 +43,7 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0; #include "arraytypes.h" #include "arrayobject.h" #include "array_converter.h" +#include "blas_utils.h" #include "hashdescr.h" #include "descriptor.h" #include "dragon4.h" @@ -63,7 +66,6 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0; #include "ctors.h" #include "array_assign.h" #include "common.h" -#include "multiarraymodule.h" #include "cblasfuncs.h" #include "vdot.h" #include "templ_common.h" /* for npy_mul_sizes_with_overflow */ @@ -82,6 +84,8 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0; #include "umathmodule.h" +#include "unique.h" + /* ***************************************************************************** ** INCLUDE GENERATED CODE ** @@ -97,26 +101,47 @@ NPY_NO_EXPORT PyObject * _umath_strings_richcompare( PyArrayObject *self, PyArrayObject *other, int cmp_op, int rstrip); -/* - * global variable to determine if legacy printing is enabled, accessible from - * C. For simplicity the mode is encoded as an integer where INT_MAX means no - * legacy mode, and '113'/'121' means 1.13/1.21 legacy mode; and 0 maps to - * INT_MAX. We can upgrade this if we have more complex requirements in the - * future. - */ -int npy_legacy_print_mode = INT_MAX; - -static PyObject * -set_legacy_print_mode(PyObject *NPY_UNUSED(self), PyObject *args) -{ - if (!PyArg_ParseTuple(args, "i", &npy_legacy_print_mode)) { - return NULL; +NPY_NO_EXPORT int +get_legacy_print_mode(void) { + /* Get the C value of the legacy printing mode. + * + * It is stored as a Python context variable so we access it via the C + * API. For simplicity the mode is encoded as an integer where INT_MAX + * means no legacy mode, and '113'/'121'/'125' means 1.13/1.21/1.25 legacy + * mode; and 0 maps to INT_MAX. We can upgrade this if we have more + * complex requirements in the future. + */ + PyObject *format_options = NULL; + PyContextVar_Get(npy_static_pydata.format_options, NULL, &format_options); + if (format_options == NULL) { + PyErr_SetString(PyExc_SystemError, + "NumPy internal error: unable to get format_options " + "context variable"); + return -1; + } + PyObject *legacy_print_mode = NULL; + if (PyDict_GetItemRef(format_options, npy_interned_str.legacy, + &legacy_print_mode) == -1) { + Py_DECREF(format_options); + return -1; } - if (!npy_legacy_print_mode) { - npy_legacy_print_mode = INT_MAX; + Py_DECREF(format_options); + if (legacy_print_mode == NULL) { + PyErr_SetString(PyExc_SystemError, + "NumPy internal error: unable to get legacy print " + "mode"); + return -1; } - Py_RETURN_NONE; + Py_ssize_t ret = PyLong_AsSsize_t(legacy_print_mode); + Py_DECREF(legacy_print_mode); + if (error_converting(ret)) { + return -1; + } + if (ret > INT_MAX) { + return INT_MAX; + } + return (int)ret; } @@ -136,12 +161,13 @@ PyArray_GetPriority(PyObject *obj, double default_) return NPY_SCALAR_PRIORITY; } - ret = PyArray_LookupSpecial_OnInstance(obj, npy_ma_str_array_priority); - if (ret == NULL) { - if (PyErr_Occurred()) { - /* TODO[gh-14801]: propagate crashes during attribute access? */ - PyErr_Clear(); - } + if (PyArray_LookupSpecial_OnInstance( + obj, npy_interned_str.array_priority, &ret) < 0) { + /* TODO[gh-14801]: propagate crashes during attribute access? */ + PyErr_Clear(); + return default_; + } + else if (ret == NULL) { return default_; } @@ -278,6 +304,7 @@ PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int nd, n = PyArray_DIMS(ap)[0]; ptr2 = (char **)PyArray_malloc(n * sizeof(char *)); if (!ptr2) { + Py_DECREF(ap); PyErr_NoMemory(); return -1; } @@ -291,6 +318,7 @@ PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int nd, m = PyArray_DIMS(ap)[1]; ptr3 = (char ***)PyArray_malloc(n*(m+1) * sizeof(char *)); if (!ptr3) { + Py_DECREF(ap); PyErr_NoMemory(); return -1; } @@ -506,8 +534,7 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis, NPY_NO_EXPORT PyArrayObject * PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays, NPY_ORDER order, PyArrayObject *ret, - PyArray_Descr *dtype, NPY_CASTING casting, - npy_bool casting_not_passed) + PyArray_Descr *dtype, NPY_CASTING casting) { int iarrays; npy_intp shape = 0; @@ -534,10 +561,8 @@ PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays, } } - int out_passed = 0; if (ret != NULL) { assert(dtype == NULL); - out_passed = 1; if (PyArray_NDIM(ret) != 1) { PyErr_SetString(PyExc_ValueError, "Output array must be 1D"); @@ -585,35 +610,18 @@ PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays, return NULL; } - int give_deprecation_warning = 1; /* To give warning for just one input array. */ for (iarrays = 0; iarrays < narrays; ++iarrays) { /* Adjust the window dimensions for this array */ sliding_view->dimensions[0] = PyArray_SIZE(arrays[iarrays]); if (!PyArray_CanCastArrayTo( arrays[iarrays], PyArray_DESCR(ret), casting)) { - /* This should be an error, but was previously allowed here. */ - if (casting_not_passed && out_passed) { - /* NumPy 1.20, 2020-09-03 */ - if (give_deprecation_warning && DEPRECATE( - "concatenate() with `axis=None` will use same-kind " - "casting by default in the future. Please use " - "`casting='unsafe'` to retain the old behaviour. " - "In the future this will be a TypeError.") < 0) { - Py_DECREF(sliding_view); - Py_DECREF(ret); - return NULL; - } - give_deprecation_warning = 0; - } - else { - npy_set_invalid_cast_error( - PyArray_DESCR(arrays[iarrays]), PyArray_DESCR(ret), - casting, PyArray_NDIM(arrays[iarrays]) == 0); - Py_DECREF(sliding_view); - Py_DECREF(ret); - return NULL; - } + npy_set_invalid_cast_error( + PyArray_DESCR(arrays[iarrays]), PyArray_DESCR(ret), + casting, PyArray_NDIM(arrays[iarrays]) == 0); + Py_DECREF(sliding_view); + Py_DECREF(ret); + return NULL; } /* Copy the data for this array */ @@ -642,12 +650,11 @@ PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays, * @param ret output array to fill * @param dtype Forced output array dtype (cannot be combined with ret) * @param casting Casting mode used - * @param casting_not_passed Deprecation helper */ NPY_NO_EXPORT PyObject * PyArray_ConcatenateInto(PyObject *op, int axis, PyArrayObject *ret, PyArray_Descr *dtype, - NPY_CASTING casting, npy_bool casting_not_passed) + NPY_CASTING casting) { int iarrays, narrays; PyArrayObject **arrays; @@ -665,10 +672,17 @@ PyArray_ConcatenateInto(PyObject *op, } /* Convert the input list into arrays */ - narrays = PySequence_Size(op); - if (narrays < 0) { + Py_ssize_t narrays_true = PySequence_Size(op); + if (narrays_true < 0) { return NULL; } + else if (narrays_true > NPY_MAX_INT) { + PyErr_Format(PyExc_ValueError, + "concatenate() only supports up to %d arrays but got %zd.", + NPY_MAX_INT, narrays_true); + return NULL; + } + narrays = (int)narrays_true; arrays = PyArray_malloc(narrays * sizeof(arrays[0])); if (arrays == NULL) { PyErr_NoMemory(); @@ -693,7 +707,7 @@ PyArray_ConcatenateInto(PyObject *op, if (axis == NPY_RAVEL_AXIS) { ret = PyArray_ConcatenateFlattenedArrays( narrays, arrays, NPY_CORDER, ret, dtype, - casting, casting_not_passed); + casting); } else { ret = PyArray_ConcatenateArrays( @@ -738,7 +752,7 @@ PyArray_Concatenate(PyObject *op, int axis) casting = NPY_SAME_KIND_CASTING; } return PyArray_ConcatenateInto( - op, axis, NULL, NULL, casting, 0); + op, axis, NULL, NULL, casting); } static int @@ -1080,6 +1094,8 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) Py_DECREF(it1); goto fail; } + + npy_clear_floatstatus_barrier((char *) result); NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(ap2)); while (it1->index < it1->size) { while (it2->index < it2->size) { @@ -1097,6 +1113,11 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) /* only for OBJECT arrays */ goto fail; } + + int fpes = npy_get_floatstatus_barrier((char *) result); + if (fpes && PyUFunc_GiveFloatingpointErrors("dot", fpes) < 0) { + goto fail; + } Py_DECREF(ap1); Py_DECREF(ap2); @@ -1193,6 +1214,7 @@ _pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, int typenum, goto clean_ret; } + int needs_pyapi = PyDataType_FLAGCHK(PyArray_DESCR(ret), NPY_NEEDS_PYAPI); NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(ret)); is1 = PyArray_STRIDES(ap1)[0]; is2 = PyArray_STRIDES(ap2)[0]; @@ -1203,6 +1225,9 @@ _pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, int typenum, n = n - n_left; for (i = 0; i < n_left; i++) { dot(ip1, is1, ip2, is2, op, n, ret); + if (needs_pyapi && PyErr_Occurred()) { + goto done; + } n++; ip2 -= is2; op += os; @@ -1214,19 +1239,21 @@ _pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, int typenum, op += os * (n1 - n2 + 1); } else { - for (i = 0; i < (n1 - n2 + 1); i++) { + for (i = 0; i < (n1 - n2 + 1) && (!needs_pyapi || !PyErr_Occurred()); + i++) { dot(ip1, is1, ip2, is2, op, n, ret); ip1 += is1; op += os; } } - for (i = 0; i < n_right; i++) { + for (i = 0; i < n_right && (!needs_pyapi || !PyErr_Occurred()); i++) { n--; dot(ip1, is1, ip2, is2, op, n, ret); ip1 += is1; op += os; } +done: NPY_END_THREADS_DESCR(PyArray_DESCR(ret)); if (PyErr_Occurred()) { goto clean_ret; @@ -1536,7 +1563,7 @@ _prepend_ones(PyArrayObject *arr, int nd, int ndmin, NPY_ORDER order) static inline PyObject * _array_fromobject_generic( PyObject *op, PyArray_Descr *in_descr, PyArray_DTypeMeta *in_DType, - NPY_COPYMODE copy, NPY_ORDER order, npy_bool subok, int ndmin) + NPY_COPYMODE copy, NPY_ORDER order, npy_bool subok, int ndmin, int ndmax) { PyArrayObject *oparr = NULL, *ret = NULL; PyArray_Descr *oldtype = NULL; @@ -1546,10 +1573,9 @@ _array_fromobject_generic( Py_XINCREF(in_descr); PyArray_Descr *dtype = in_descr; - if (ndmin > NPY_MAXDIMS) { + if (ndmin > ndmax) { PyErr_Format(PyExc_ValueError, - "ndmin bigger than allowable number of dimensions " - "NPY_MAXDIMS (=%d)", NPY_MAXDIMS); + "ndmin must be <= ndmax (%d)", ndmax); goto finish; } /* fast exit if simple call */ @@ -1658,7 +1684,7 @@ _array_fromobject_generic( flags |= NPY_ARRAY_FORCECAST; ret = (PyArrayObject *)PyArray_CheckFromAny_int( - op, dtype, in_DType, 0, 0, flags, NULL); + op, dtype, in_DType, 0, ndmax, flags, NULL); finish: Py_XDECREF(dtype); @@ -1689,6 +1715,7 @@ array_array(PyObject *NPY_UNUSED(ignored), npy_bool subok = NPY_FALSE; NPY_COPYMODE copy = NPY_COPY_ALWAYS; int ndmin = 0; + int ndmax = NPY_MAXDIMS; npy_dtype_info dt_info = {NULL, NULL}; NPY_ORDER order = NPY_KEEPORDER; PyObject *like = Py_None; @@ -1702,6 +1729,7 @@ array_array(PyObject *NPY_UNUSED(ignored), "$order", &PyArray_OrderConverter, &order, "$subok", &PyArray_BoolConverter, &subok, "$ndmin", &PyArray_PythonPyIntFromInt, &ndmin, + "$ndmax", &PyArray_PythonPyIntFromInt, &ndmax, "$like", NULL, &like, NULL, NULL, NULL) < 0) { Py_XDECREF(dt_info.descr); @@ -1723,8 +1751,15 @@ array_array(PyObject *NPY_UNUSED(ignored), op = args[0]; } + if (ndmax > NPY_MAXDIMS || ndmax < 0) { + PyErr_Format(PyExc_ValueError, "ndmax must be in the range [0, NPY_MAXDIMS (%d)] ", NPY_MAXDIMS); + Py_XDECREF(dt_info.descr); + Py_XDECREF(dt_info.dtype); + return NULL; + } + PyObject *res = _array_fromobject_generic( - op, dt_info.descr, dt_info.dtype, copy, order, subok, ndmin); + op, dt_info.descr, dt_info.dtype, copy, order, subok, ndmin, ndmax); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return res; @@ -1770,7 +1805,7 @@ array_asarray(PyObject *NPY_UNUSED(ignored), } PyObject *res = _array_fromobject_generic( - op, dt_info.descr, dt_info.dtype, copy, order, NPY_FALSE, 0); + op, dt_info.descr, dt_info.dtype, copy, order, NPY_FALSE, 0, NPY_MAXDIMS); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return res; @@ -1816,7 +1851,7 @@ array_asanyarray(PyObject *NPY_UNUSED(ignored), } PyObject *res = _array_fromobject_generic( - op, dt_info.descr, dt_info.dtype, copy, order, NPY_TRUE, 0); + op, dt_info.descr, dt_info.dtype, copy, order, NPY_TRUE, 0, NPY_MAXDIMS); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return res; @@ -1858,7 +1893,7 @@ array_ascontiguousarray(PyObject *NPY_UNUSED(ignored), PyObject *res = _array_fromobject_generic( op, dt_info.descr, dt_info.dtype, NPY_COPY_IF_NEEDED, NPY_CORDER, NPY_FALSE, - 1); + 1, NPY_MAXDIMS); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return res; @@ -1900,7 +1935,7 @@ array_asfortranarray(PyObject *NPY_UNUSED(ignored), PyObject *res = _array_fromobject_generic( op, dt_info.descr, dt_info.dtype, NPY_COPY_IF_NEEDED, NPY_FORTRANORDER, - NPY_FALSE, 1); + NPY_FALSE, 1, NPY_MAXDIMS); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return res; @@ -1908,29 +1943,64 @@ array_asfortranarray(PyObject *NPY_UNUSED(ignored), static PyObject * -array_copyto(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) +array_copyto(PyObject *NPY_UNUSED(ignored), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - static char *kwlist[] = {"dst", "src", "casting", "where", NULL}; - PyObject *wheremask_in = NULL; - PyArrayObject *dst = NULL, *src = NULL, *wheremask = NULL; + PyObject *dst_obj, *src_obj, *wheremask_in = NULL; + PyArrayObject *src = NULL, *wheremask = NULL; NPY_CASTING casting = NPY_SAME_KIND_CASTING; + NPY_PREPARE_ARGPARSER; + + if (npy_parse_arguments("copyto", args, len_args, kwnames, + "dst", NULL, &dst_obj, + "src", NULL, &src_obj, + "|casting", &PyArray_CastingConverter, &casting, + "|where", NULL, &wheremask_in, + NULL, NULL, NULL) < 0) { + goto fail; + } + + if (!PyArray_Check(dst_obj)) { + PyErr_Format(PyExc_TypeError, + "copyto() argument 1 must be a numpy.ndarray, not %s", + Py_TYPE(dst_obj)->tp_name); + goto fail; + } + PyArrayObject *dst = (PyArrayObject *)dst_obj; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!O&|O&O:copyto", kwlist, - &PyArray_Type, &dst, - &PyArray_Converter, &src, - &PyArray_CastingConverter, &casting, - &wheremask_in)) { + src = (PyArrayObject *)PyArray_FromAny(src_obj, NULL, 0, 0, 0, NULL); + if (src == NULL) { goto fail; } + PyArray_DTypeMeta *DType = NPY_DTYPE(PyArray_DESCR(src)); + Py_INCREF(DType); + if (npy_mark_tmp_array_if_pyscalar(src_obj, src, &DType)) { + /* The user passed a Python scalar */ + PyArray_Descr *descr = npy_find_descr_for_scalar( + src_obj, PyArray_DESCR(src), DType, + NPY_DTYPE(PyArray_DESCR(dst))); + Py_DECREF(DType); + if (descr == NULL) { + goto fail; + } + int res = npy_update_operand_for_scalar(&src, src_obj, descr, casting); + Py_DECREF(descr); + if (res < 0) { + goto fail; + } + } + else { + Py_DECREF(DType); + } if (wheremask_in != NULL) { /* Get the boolean where mask */ - PyArray_Descr *dtype = PyArray_DescrFromType(NPY_BOOL); - if (dtype == NULL) { + PyArray_Descr *descr = PyArray_DescrFromType(NPY_BOOL); + if (descr == NULL) { goto fail; } wheremask = (PyArrayObject *)PyArray_FromAny(wheremask_in, - dtype, 0, 0, 0, NULL); + descr, 0, 0, 0, NULL); if (wheremask == NULL) { goto fail; } @@ -2071,16 +2141,15 @@ array_scalar(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) } if (PyDataType_FLAGCHK(typecode, NPY_LIST_PICKLE)) { if (typecode->type_num == NPY_OBJECT) { - /* Deprecated 2020-11-24, NumPy 1.20 */ - if (DEPRECATE( - "Unpickling a scalar with object dtype is deprecated. " - "Object scalars should never be created. If this was a " - "properly created pickle, please open a NumPy issue. In " - "a best effort this returns the original object.") < 0) { - return NULL; - } - Py_INCREF(obj); - return obj; + PyErr_SetString(PyExc_TypeError, + "Cannot unpickle a scalar with object dtype."); + return NULL; + } + if (typecode->type_num == NPY_VSTRING) { + // TODO: if we ever add a StringDType scalar, this might need to change + PyErr_SetString(PyExc_TypeError, + "Cannot unpickle a StringDType scalar"); + return NULL; } /* We store the full array to unpack it here: */ if (!PyArray_CheckExact(obj)) { @@ -2228,14 +2297,20 @@ array_count_nonzero(PyObject *NPY_UNUSED(self), PyObject *const *args, Py_ssize_ return NULL; } - count = PyArray_CountNonzero(array); - + count = PyArray_CountNonzero(array); Py_DECREF(array); if (count == -1) { return NULL; } - return PyLong_FromSsize_t(count); + + PyArray_Descr *descr = PyArray_DescrFromType(NPY_INTP); + if (descr == NULL) { + return NULL; + } + PyObject *result = PyArray_Scalar(&count, descr, NULL); + Py_DECREF(descr); + return result; } static PyObject * @@ -2266,13 +2341,10 @@ array_fromstring(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds /* binary mode, condition copied from PyArray_FromString */ if (sep == NULL || strlen(sep) == 0) { - /* Numpy 1.14, 2017-10-19 */ - if (DEPRECATE( - "The binary mode of fromstring is deprecated, as it behaves " - "surprisingly on unicode inputs. Use frombuffer instead") < 0) { - Py_XDECREF(descr); - return NULL; - } + PyErr_SetString(PyExc_ValueError, + "The binary mode of fromstring is removed, use frombuffer instead"); + Py_XDECREF(descr); + return NULL; } return PyArray_FromString(data, (npy_intp)s, descr, (npy_intp)nin, sep); } @@ -2340,6 +2412,7 @@ array_fromfile(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds) } if (npy_fseek(fp, offset, SEEK_CUR) != 0) { PyErr_SetFromErrno(PyExc_OSError); + Py_XDECREF(type); goto cleanup; } if (type == NULL) { @@ -2437,7 +2510,6 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), PyObject *out = NULL; PyArray_Descr *dtype = NULL; NPY_CASTING casting = NPY_SAME_KIND_CASTING; - PyObject *casting_obj = NULL; PyObject *res; int axis = 0; @@ -2447,22 +2519,10 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), "|axis", &PyArray_AxisConverter, &axis, "|out", NULL, &out, "$dtype", &PyArray_DescrConverter2, &dtype, - "$casting", NULL, &casting_obj, + "$casting", &PyArray_CastingConverter, &casting, NULL, NULL, NULL) < 0) { return NULL; } - int casting_not_passed = 0; - if (casting_obj == NULL) { - /* - * Casting was not passed in, needed for deprecation only. - * This should be simplified once the deprecation is finished. - */ - casting_not_passed = 1; - } - else if (!PyArray_CastingConverter(casting_obj, &casting)) { - Py_XDECREF(dtype); - return NULL; - } if (out != NULL) { if (out == Py_None) { out = NULL; @@ -2474,7 +2534,7 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), } } res = PyArray_ConcatenateInto(a0, axis, (PyArrayObject *)out, dtype, - casting, casting_not_passed); + casting); Py_XDECREF(dtype); return res; } @@ -2647,13 +2707,13 @@ array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t len_ar } static int -einsum_sub_op_from_str(PyObject *args, PyObject **str_obj, char **subscripts, - PyArrayObject **op) +einsum_sub_op_from_str( + Py_ssize_t nargs, PyObject *const *args, + PyObject **str_obj, char **subscripts, PyArrayObject **op) { - int i, nop; + Py_ssize_t nop = nargs - 1; PyObject *subscripts_str; - nop = PyTuple_GET_SIZE(args) - 1; if (nop <= 0) { PyErr_SetString(PyExc_ValueError, "must specify the einstein sum subscripts string " @@ -2666,7 +2726,7 @@ einsum_sub_op_from_str(PyObject *args, PyObject **str_obj, char **subscripts, } /* Get the subscripts string */ - subscripts_str = PyTuple_GET_ITEM(args, 0); + subscripts_str = args[0]; if (PyUnicode_Check(subscripts_str)) { *str_obj = PyUnicode_AsASCIIString(subscripts_str); if (*str_obj == NULL) { @@ -2683,15 +2743,13 @@ einsum_sub_op_from_str(PyObject *args, PyObject **str_obj, char **subscripts, } /* Set the operands to NULL */ - for (i = 0; i < nop; ++i) { + for (Py_ssize_t i = 0; i < nop; ++i) { op[i] = NULL; } /* Get the operands */ - for (i = 0; i < nop; ++i) { - PyObject *obj = PyTuple_GET_ITEM(args, i+1); - - op[i] = (PyArrayObject *)PyArray_FROM_OF(obj, NPY_ARRAY_ENSUREARRAY); + for (Py_ssize_t i = 0; i < nop; ++i) { + op[i] = (PyArrayObject *)PyArray_FROM_OF(args[i+1], NPY_ARRAY_ENSUREARRAY); if (op[i] == NULL) { goto fail; } @@ -2700,7 +2758,7 @@ einsum_sub_op_from_str(PyObject *args, PyObject **str_obj, char **subscripts, return nop; fail: - for (i = 0; i < nop; ++i) { + for (Py_ssize_t i = 0; i < nop; ++i) { Py_XDECREF(op[i]); op[i] = NULL; } @@ -2717,32 +2775,33 @@ einsum_sub_op_from_str(PyObject *args, PyObject **str_obj, char **subscripts, static int einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) { - int ellipsis = 0, subindex = 0; + int ellipsis = 0, subindex = 0, ret = -1; npy_intp i, size; - PyObject *item; + PyObject *item, *seq; - obj = PySequence_Fast(obj, "the subscripts for each operand must " + seq = PySequence_Fast(obj, "the subscripts for each operand must " // noqa: borrowed-ref OK "be a list or a tuple"); - if (obj == NULL) { + if (seq == NULL) { return -1; } - size = PySequence_Size(obj); + + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(obj); + + size = PySequence_Size(seq); for (i = 0; i < size; ++i) { - item = PySequence_Fast_GET_ITEM(obj, i); + item = PySequence_Fast_GET_ITEM(seq, i); /* Ellipsis */ if (item == Py_Ellipsis) { if (ellipsis) { PyErr_SetString(PyExc_ValueError, "each subscripts list may have only one ellipsis"); - Py_DECREF(obj); - return -1; + goto cleanup; } if (subindex + 3 >= subsize) { PyErr_SetString(PyExc_ValueError, "subscripts list is too long"); - Py_DECREF(obj); - return -1; + goto cleanup; } subscripts[subindex++] = '.'; subscripts[subindex++] = '.'; @@ -2757,16 +2816,14 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) PyErr_SetString(PyExc_TypeError, "each subscript must be either an integer " "or an ellipsis"); - Py_DECREF(obj); - return -1; + goto cleanup; } npy_bool bad_input = 0; if (subindex + 1 >= subsize) { PyErr_SetString(PyExc_ValueError, "subscripts list is too long"); - Py_DECREF(obj); - return -1; + goto cleanup; } if (s < 0) { @@ -2785,16 +2842,19 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) if (bad_input) { PyErr_SetString(PyExc_ValueError, "subscript is not within the valid range [0, 52)"); - Py_DECREF(obj); - return -1; + goto cleanup; } } - } - Py_DECREF(obj); + ret = subindex; + + cleanup:; - return subindex; + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); + Py_DECREF(seq); + + return ret; } /* @@ -2804,13 +2864,12 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) * Returns -1 on error, number of operands placed in op otherwise. */ static int -einsum_sub_op_from_lists(PyObject *args, - char *subscripts, int subsize, PyArrayObject **op) +einsum_sub_op_from_lists(Py_ssize_t nargs, PyObject *const *args, + char *subscripts, int subsize, PyArrayObject **op) { int subindex = 0; - npy_intp i, nop; - nop = PyTuple_Size(args)/2; + Py_ssize_t nop = nargs / 2; if (nop == 0) { PyErr_SetString(PyExc_ValueError, "must provide at least an " @@ -2823,15 +2882,12 @@ einsum_sub_op_from_lists(PyObject *args, } /* Set the operands to NULL */ - for (i = 0; i < nop; ++i) { + for (Py_ssize_t i = 0; i < nop; ++i) { op[i] = NULL; } /* Get the operands and build the subscript string */ - for (i = 0; i < nop; ++i) { - PyObject *obj = PyTuple_GET_ITEM(args, 2*i); - int n; - + for (Py_ssize_t i = 0; i < nop; ++i) { /* Comma between the subscripts for each operand */ if (i != 0) { subscripts[subindex++] = ','; @@ -2842,14 +2898,13 @@ einsum_sub_op_from_lists(PyObject *args, } } - op[i] = (PyArrayObject *)PyArray_FROM_OF(obj, NPY_ARRAY_ENSUREARRAY); + op[i] = (PyArrayObject *)PyArray_FROM_OF(args[2*i], NPY_ARRAY_ENSUREARRAY); if (op[i] == NULL) { goto fail; } - obj = PyTuple_GET_ITEM(args, 2*i+1); - n = einsum_list_to_subscripts(obj, subscripts+subindex, - subsize-subindex); + int n = einsum_list_to_subscripts( + args[2*i + 1], subscripts+subindex, subsize-subindex); if (n < 0) { goto fail; } @@ -2857,10 +2912,7 @@ einsum_sub_op_from_lists(PyObject *args, } /* Add the '->' to the string if provided */ - if (PyTuple_Size(args) == 2*nop+1) { - PyObject *obj; - int n; - + if (nargs == 2*nop+1) { if (subindex + 2 >= subsize) { PyErr_SetString(PyExc_ValueError, "subscripts list is too long"); @@ -2869,9 +2921,8 @@ einsum_sub_op_from_lists(PyObject *args, subscripts[subindex++] = '-'; subscripts[subindex++] = '>'; - obj = PyTuple_GET_ITEM(args, 2*nop); - n = einsum_list_to_subscripts(obj, subscripts+subindex, - subsize-subindex); + int n = einsum_list_to_subscripts( + args[2*nop], subscripts+subindex, subsize-subindex); if (n < 0) { goto fail; } @@ -2884,7 +2935,7 @@ einsum_sub_op_from_lists(PyObject *args, return nop; fail: - for (i = 0; i < nop; ++i) { + for (Py_ssize_t i = 0; i < nop; ++i) { Py_XDECREF(op[i]); op[i] = NULL; } @@ -2893,36 +2944,39 @@ einsum_sub_op_from_lists(PyObject *args, } static PyObject * -array_einsum(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) +array_einsum(PyObject *NPY_UNUSED(dummy), + PyObject *const *args, Py_ssize_t nargsf, PyObject *kwnames) { char *subscripts = NULL, subscripts_buffer[256]; PyObject *str_obj = NULL, *str_key_obj = NULL; - PyObject *arg0; - int i, nop; + int nop; PyArrayObject *op[NPY_MAXARGS]; NPY_ORDER order = NPY_KEEPORDER; NPY_CASTING casting = NPY_SAFE_CASTING; + PyObject *out_obj = NULL; PyArrayObject *out = NULL; PyArray_Descr *dtype = NULL; PyObject *ret = NULL; + NPY_PREPARE_ARGPARSER; - if (PyTuple_GET_SIZE(args) < 1) { + Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); + + if (nargs < 1) { PyErr_SetString(PyExc_ValueError, "must specify the einstein sum subscripts string " "and at least one operand, or at least one operand " "and its corresponding subscripts list"); return NULL; } - arg0 = PyTuple_GET_ITEM(args, 0); /* einsum('i,j', a, b), einsum('i,j->ij', a, b) */ - if (PyBytes_Check(arg0) || PyUnicode_Check(arg0)) { - nop = einsum_sub_op_from_str(args, &str_obj, &subscripts, op); + if (PyBytes_Check(args[0]) || PyUnicode_Check(args[0])) { + nop = einsum_sub_op_from_str(nargs, args, &str_obj, &subscripts, op); } /* einsum(a, [0], b, [1]), einsum(a, [0], b, [1], [0,1]) */ else { - nop = einsum_sub_op_from_lists(args, subscripts_buffer, - sizeof(subscripts_buffer), op); + nop = einsum_sub_op_from_lists(nargs, args, subscripts_buffer, + sizeof(subscripts_buffer), op); subscripts = subscripts_buffer; } if (nop <= 0) { @@ -2930,63 +2984,26 @@ array_einsum(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) } /* Get the keyword arguments */ - if (kwds != NULL) { - PyObject *key, *value; - Py_ssize_t pos = 0; - while (PyDict_Next(kwds, &pos, &key, &value)) { - char *str = NULL; - - Py_XDECREF(str_key_obj); - str_key_obj = PyUnicode_AsASCIIString(key); - if (str_key_obj != NULL) { - key = str_key_obj; - } - - str = PyBytes_AsString(key); - - if (str == NULL) { - PyErr_Clear(); - PyErr_SetString(PyExc_TypeError, "invalid keyword"); - goto finish; - } - - if (strcmp(str,"out") == 0) { - if (PyArray_Check(value)) { - out = (PyArrayObject *)value; - } - else { - PyErr_SetString(PyExc_TypeError, - "keyword parameter out must be an " - "array for einsum"); - goto finish; - } - } - else if (strcmp(str,"order") == 0) { - if (!PyArray_OrderConverter(value, &order)) { - goto finish; - } - } - else if (strcmp(str,"casting") == 0) { - if (!PyArray_CastingConverter(value, &casting)) { - goto finish; - } - } - else if (strcmp(str,"dtype") == 0) { - if (!PyArray_DescrConverter2(value, &dtype)) { - goto finish; - } - } - else { - PyErr_Format(PyExc_TypeError, - "'%s' is an invalid keyword for einsum", - str); - goto finish; - } + if (kwnames != NULL) { + if (npy_parse_arguments("einsum", args+nargs, 0, kwnames, + "$out", NULL, &out_obj, + "$order", &PyArray_OrderConverter, &order, + "$casting", &PyArray_CastingConverter, &casting, + "$dtype", &PyArray_DescrConverter2, &dtype, + NULL, NULL, NULL) < 0) { + goto finish; + } + if (out_obj != NULL && !PyArray_Check(out_obj)) { + PyErr_SetString(PyExc_TypeError, + "keyword parameter out must be an " + "array for einsum"); + goto finish; } + out = (PyArrayObject *)out_obj; } ret = (PyObject *)PyArray_EinsteinSum(subscripts, nop, op, dtype, - order, casting, out); + order, casting, out); /* If no output was supplied, possibly convert to a scalar */ if (ret != NULL && out == NULL) { @@ -2994,7 +3011,7 @@ array_einsum(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) } finish: - for (i = 0; i < nop; ++i) { + for (Py_ssize_t i = 0; i < nop; ++i) { Py_XDECREF(op[i]); } Py_XDECREF(dtype); @@ -3176,31 +3193,6 @@ array__reconstruct(PyObject *NPY_UNUSED(dummy), PyObject *args) return NULL; } -static PyObject * -array_set_string_function(PyObject *NPY_UNUSED(self), PyObject *args, - PyObject *kwds) -{ - PyObject *op = NULL; - int repr = 1; - static char *kwlist[] = {"f", "repr", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|Oi:set_string_function", kwlist, &op, &repr)) { - return NULL; - } - /* reset the array_repr function to built-in */ - if (op == Py_None) { - op = NULL; - } - if (op != NULL && !PyCallable_Check(op)) { - PyErr_SetString(PyExc_TypeError, - "Argument must be callable."); - return NULL; - } - PyArray_SetStringFunction(op, repr); - Py_RETURN_NONE; -} - - static PyObject * array_set_datetimeparse_function(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args), PyObject *NPY_UNUSED(kwds)) @@ -3241,6 +3233,7 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) PyArrayObject *arr = NULL, *ax = NULL, *ay = NULL; PyObject *ret = NULL; PyArray_Descr *common_dt = NULL; + NpyIter *iter = NULL; arr = (PyArrayObject *)PyArray_FROM_O(condition); if (arr == NULL) { @@ -3310,7 +3303,6 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) /* `PyArray_DescrFromType` cannot fail for simple builtin types: */ PyArray_Descr * op_dt[4] = {common_dt, PyArray_DescrFromType(NPY_BOOL), x_dt, y_dt}; - NpyIter * iter; NPY_BEGIN_THREADS_DEF; iter = NpyIter_MultiNew( @@ -3444,6 +3436,9 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) Py_XDECREF(common_dt); NPY_cast_info_xfree(&x_cast_info); NPY_cast_info_xfree(&y_cast_info); + if (iter != NULL) { + NpyIter_Deallocate(iter); + } return NULL; } @@ -3517,30 +3512,18 @@ array_can_cast_safely(PyObject *NPY_UNUSED(self), * TODO: `PyArray_IsScalar` should not be required for new dtypes. * weak-promotion branch is in practice identical to dtype one. */ - if (get_npy_promotion_state() == NPY_USE_WEAK_PROMOTION) { - PyObject *descr = PyObject_GetAttr(from_obj, npy_ma_str_dtype); - if (descr == NULL) { - goto finish; - } - if (!PyArray_DescrCheck(descr)) { - Py_DECREF(descr); - PyErr_SetString(PyExc_TypeError, - "numpy_scalar.dtype did not return a dtype instance."); - goto finish; - } - ret = PyArray_CanCastTypeTo((PyArray_Descr *)descr, d2, casting); - Py_DECREF(descr); + PyObject *descr = PyObject_GetAttr(from_obj, npy_interned_str.dtype); + if (descr == NULL) { + goto finish; } - else { - /* need to convert to object to consider old value-based logic */ - PyArrayObject *arr; - arr = (PyArrayObject *)PyArray_FROM_O(from_obj); - if (arr == NULL) { - goto finish; - } - ret = PyArray_CanCastArrayTo(arr, d2, casting); - Py_DECREF(arr); + if (!PyArray_DescrCheck(descr)) { + Py_DECREF(descr); + PyErr_SetString(PyExc_TypeError, + "numpy_scalar.dtype did not return a dtype instance."); + goto finish; } + ret = PyArray_CanCastTypeTo((PyArray_Descr *)descr, d2, casting); + Py_DECREF(descr); } else if (PyArray_IsPythonNumber(from_obj)) { PyErr_SetString(PyExc_TypeError, @@ -3623,24 +3606,28 @@ static PyObject * array_result_type(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t len) { npy_intp i, narr = 0, ndtypes = 0; - PyArrayObject **arr = NULL; - PyArray_Descr **dtypes = NULL; PyObject *ret = NULL; if (len == 0) { PyErr_SetString(PyExc_ValueError, "at least one array or dtype is required"); - goto finish; + return NULL; } - arr = PyArray_malloc(2 * len * sizeof(void *)); + NPY_ALLOC_WORKSPACE(arr, PyArrayObject *, 2 * 3, 2 * len); if (arr == NULL) { - return PyErr_NoMemory(); + return NULL; } - dtypes = (PyArray_Descr**)&arr[len]; + PyArray_Descr **dtypes = (PyArray_Descr**)&arr[len]; + + PyObject *previous_obj = NULL; for (i = 0; i < len; ++i) { PyObject *obj = args[i]; + if (obj == previous_obj) { + continue; + } + if (PyArray_Check(obj)) { Py_INCREF(obj); arr[narr] = (PyArrayObject *)obj; @@ -3676,7 +3663,7 @@ array_result_type(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t for (i = 0; i < ndtypes; ++i) { Py_DECREF(dtypes[i]); } - PyArray_free(arr); + npy_free_workspace(arr); return ret; } @@ -4290,11 +4277,8 @@ array_shares_memory_impl(PyObject *args, PyObject *kwds, Py_ssize_t default_max_ } else if (result == MEM_OVERLAP_TOO_HARD) { if (raise_exceptions) { - static PyObject *too_hard_cls = NULL; - npy_cache_import("numpy.exceptions", "TooHardError", &too_hard_cls); - if (too_hard_cls) { - PyErr_SetString(too_hard_cls, "Exceeded max_work"); - } + PyErr_SetString(npy_static_pydata.TooHardError, + "Exceeded max_work"); return NULL; } else { @@ -4353,6 +4337,108 @@ normalize_axis_index(PyObject *NPY_UNUSED(self), } +static PyObject * +_populate_finfo_constants(PyObject *NPY_UNUSED(self), PyObject *args) +{ + if (PyTuple_Size(args) != 2) { + PyErr_SetString(PyExc_TypeError, "Expected 2 arguments"); + return NULL; + } + PyObject *finfo = PyTuple_GetItem(args, 0); + if (finfo == NULL || finfo == Py_None) { + PyErr_SetString(PyExc_TypeError, "First argument cannot be None"); + return NULL; + } + PyArray_Descr *descr = (PyArray_Descr *)PyTuple_GetItem(args, 1); + if (!PyArray_DescrCheck(descr)) { + PyErr_SetString(PyExc_TypeError, "Second argument must be a dtype"); + return NULL; + } + + static const struct { + char *name; + int id; + npy_bool is_int; + } finfo_constants[] = { + {"max", NPY_CONSTANT_maximum_finite, 0}, + {"min", NPY_CONSTANT_minimum_finite, 0}, + {"_radix", NPY_CONSTANT_finfo_radix, 0}, + {"eps", NPY_CONSTANT_finfo_eps, 0}, + {"smallest_normal", NPY_CONSTANT_finfo_smallest_normal, 0}, + {"smallest_subnormal", NPY_CONSTANT_finfo_smallest_subnormal, 0}, + {"nmant", NPY_CONSTANT_finfo_nmant, 1}, + {"minexp", NPY_CONSTANT_finfo_min_exp, 1}, + {"maxexp", NPY_CONSTANT_finfo_max_exp, 1}, + {"precision", NPY_CONSTANT_finfo_decimal_digits, 1}, + }; + static const int n_finfo_constants = sizeof(finfo_constants) / sizeof(finfo_constants[0]); + + int n_float_constants = 0; + for (int i = 0; i < n_finfo_constants; i++) { + if (!finfo_constants[i].is_int) { + n_float_constants++; + } + } + + PyArrayObject *buffer_array = NULL; + char *buffer_data = NULL; + npy_intp dims[1] = {n_float_constants}; + + Py_INCREF(descr); + buffer_array = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, + descr, 1, dims, NULL, NULL, 0, NULL); + if (buffer_array == NULL) { + return NULL; + } + buffer_data = PyArray_BYTES(buffer_array); + npy_intp elsize = PyArray_DESCR(buffer_array)->elsize; + + for (int i = 0; i < n_finfo_constants; i++) + { + PyObject *value_obj; + if (!finfo_constants[i].is_int) { + int res = NPY_DT_CALL_get_constant(descr, + finfo_constants[i].id, buffer_data); + if (res < 0) { + goto fail; + } + if (res == 0) { + buffer_data += elsize; // Move to next element + continue; + } + // Return as 0-d array item to preserve numpy scalar type + value_obj = PyArray_ToScalar(buffer_data, buffer_array); + buffer_data += elsize; // Move to next element + } + else { + npy_intp int_value; + int res = NPY_DT_CALL_get_constant(descr, finfo_constants[i].id, &int_value); + if (res < 0) { + goto fail; + } + if (res == 0) { + continue; + } + value_obj = PyLong_FromSsize_t(int_value); + } + if (value_obj == NULL) { + goto fail; + } + int res = PyObject_SetAttrString(finfo, finfo_constants[i].name, value_obj); + Py_DECREF(value_obj); + if (res < 0) { + goto fail; + } + } + + Py_DECREF(buffer_array); + Py_RETURN_NONE; + fail: + Py_XDECREF(buffer_array); + return NULL; +} + + static PyObject * _set_numpy_warn_if_no_mem_policy(PyObject *NPY_UNUSED(self), PyObject *arg) { @@ -4360,8 +4446,8 @@ _set_numpy_warn_if_no_mem_policy(PyObject *NPY_UNUSED(self), PyObject *arg) if (res < 0) { return NULL; } - int old_value = numpy_warn_if_no_mem_policy; - numpy_warn_if_no_mem_policy = res; + int old_value = npy_global_state.warn_if_no_mem_policy; + npy_global_state.warn_if_no_mem_policy = res; if (old_value) { Py_RETURN_TRUE; } @@ -4372,10 +4458,26 @@ _set_numpy_warn_if_no_mem_policy(PyObject *NPY_UNUSED(self), PyObject *arg) static PyObject * -_reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { - static int initialized = 0; +_blas_supports_fpe(PyObject *NPY_UNUSED(self), PyObject *arg) { + if (arg == Py_None) { + return PyBool_FromLong(npy_blas_supports_fpe()); + } + else if (arg == Py_True) { + return PyBool_FromLong(npy_set_blas_supports_fpe(true)); + } + else if (arg == Py_False) { + return PyBool_FromLong(npy_set_blas_supports_fpe(false)); + } + else { + PyErr_SetString(PyExc_TypeError, + "BLAS FPE support must be None, True, or False"); + return NULL; + } +} + -#if !defined(PYPY_VERSION) +static PyObject * +_reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { if (PyThreadState_Get()->interp != PyInterpreterState_Main()) { if (PyErr_WarnEx(PyExc_UserWarning, "NumPy was imported from a Python sub-interpreter but " @@ -4390,11 +4492,10 @@ _reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { return NULL; } /* No need to give the other warning in a sub-interpreter as well... */ - initialized = 1; + npy_global_state.reload_guard_initialized = 1; Py_RETURN_NONE; } -#endif - if (initialized) { + if (npy_global_state.reload_guard_initialized) { if (PyErr_WarnEx(PyExc_UserWarning, "The NumPy module was reloaded (imported a second time). " "This can in some cases result in small but subtle issues " @@ -4402,7 +4503,7 @@ _reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { return NULL; } } - initialized = 1; + npy_global_state.reload_guard_initialized = 1; Py_RETURN_NONE; } @@ -4417,9 +4518,6 @@ static struct PyMethodDef array_module_methods[] = { {"_reconstruct", (PyCFunction)array__reconstruct, METH_VARARGS, NULL}, - {"set_string_function", - (PyCFunction)array_set_string_function, - METH_VARARGS|METH_KEYWORDS, NULL}, {"set_datetimeparse_function", (PyCFunction)array_set_datetimeparse_function, METH_VARARGS|METH_KEYWORDS, NULL}, @@ -4443,7 +4541,7 @@ static struct PyMethodDef array_module_methods[] = { METH_FASTCALL | METH_KEYWORDS, NULL}, {"copyto", (PyCFunction)array_copyto, - METH_VARARGS|METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"nested_iters", (PyCFunction)NpyIter_NestedIters, METH_VARARGS|METH_KEYWORDS, NULL}, @@ -4494,7 +4592,7 @@ static struct PyMethodDef array_module_methods[] = { METH_FASTCALL, NULL}, {"c_einsum", (PyCFunction)array_einsum, - METH_VARARGS|METH_KEYWORDS, NULL}, + METH_FASTCALL|METH_KEYWORDS, NULL}, {"correlate", (PyCFunction)array_correlate, METH_FASTCALL | METH_KEYWORDS, NULL}, @@ -4581,14 +4679,14 @@ static struct PyMethodDef array_module_methods[] = { METH_VARARGS | METH_KEYWORDS, NULL}, {"normalize_axis_index", (PyCFunction)normalize_axis_index, METH_FASTCALL | METH_KEYWORDS, NULL}, - {"set_legacy_print_mode", (PyCFunction)set_legacy_print_mode, - METH_VARARGS, NULL}, {"_discover_array_parameters", (PyCFunction)_discover_array_parameters, METH_FASTCALL | METH_KEYWORDS, NULL}, {"_get_castingimpl", (PyCFunction)_get_castingimpl, METH_VARARGS | METH_KEYWORDS, NULL}, {"_load_from_filelike", (PyCFunction)_load_from_filelike, METH_FASTCALL | METH_KEYWORDS, NULL}, + {"_populate_finfo_constants", (PyCFunction)_populate_finfo_constants, + METH_VARARGS, NULL}, /* from umath */ {"frompyfunc", (PyCFunction) ufunc_frompyfunc, @@ -4605,30 +4703,24 @@ static struct PyMethodDef array_module_methods[] = { {"get_handler_version", (PyCFunction) get_handler_version, METH_VARARGS, NULL}, - {"_get_promotion_state", - (PyCFunction)npy__get_promotion_state, - METH_NOARGS, "Get the current NEP 50 promotion state."}, - {"_set_promotion_state", - (PyCFunction)npy__set_promotion_state, - METH_O, "Set the NEP 50 promotion state. This is not thread-safe.\n" - "The optional warnings can be safely silenced using the \n" - "`np._no_nep50_warning()` context manager."}, {"_set_numpy_warn_if_no_mem_policy", (PyCFunction)_set_numpy_warn_if_no_mem_policy, METH_O, "Change the warn if no mem policy flag for testing."}, - {"_add_newdoc_ufunc", (PyCFunction)add_newdoc_ufunc, - METH_VARARGS, NULL}, {"_get_sfloat_dtype", get_sfloat_dtype, METH_NOARGS, NULL}, {"_get_madvise_hugepage", (PyCFunction)_get_madvise_hugepage, METH_NOARGS, NULL}, {"_set_madvise_hugepage", (PyCFunction)_set_madvise_hugepage, METH_O, NULL}, + {"_blas_supports_fpe", (PyCFunction)_blas_supports_fpe, + METH_O, "BLAS FPE support pass None, True, or False and returns new value"}, {"_reload_guard", (PyCFunction)_reload_guard, METH_NOARGS, "Give a warning on reload and big warning in sub-interpreters."}, {"from_dlpack", (PyCFunction)from_dlpack, METH_FASTCALL | METH_KEYWORDS, NULL}, + {"_unique_hash", (PyCFunction)array__unique_hash, + METH_FASTCALL | METH_KEYWORDS, "Collect unique values via a hash map."}, {NULL, NULL, 0, NULL} /* sentinel */ }; @@ -4743,7 +4835,7 @@ setup_scalartypes(PyObject *NPY_UNUSED(dict)) DUAL_INHERIT(CDouble, Complex, ComplexFloating); SINGLE_INHERIT(CLongDouble, ComplexFloating); - DUAL_INHERIT2(String, String, Character); + DUAL_INHERIT2(String, Bytes, Character); DUAL_INHERIT2(Unicode, Unicode, Character); SINGLE_INHERIT(Void, Flexible); @@ -4799,190 +4891,43 @@ set_flaginfo(PyObject *d) return; } -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_current_allocator = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_function = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_struct = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_interface = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_priority = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_wrap = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_finalize = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_implementation = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_axis1 = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_axis2 = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_like = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_numpy = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_where = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_convert = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_preserve = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_convert_if_no_array = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_cpu = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_dtype = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_err_msg_substr = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str___dlpack__ = NULL; - -static int -intern_strings(void) -{ - npy_ma_str_current_allocator = PyUnicode_InternFromString("current_allocator"); - if (npy_ma_str_current_allocator == NULL) { - return -1; - } - npy_ma_str_array = PyUnicode_InternFromString("__array__"); - if (npy_ma_str_array == NULL) { - return -1; - } - npy_ma_str_array_function = PyUnicode_InternFromString("__array_function__"); - if (npy_ma_str_array_function == NULL) { - return -1; - } - npy_ma_str_array_struct = PyUnicode_InternFromString("__array_struct__"); - if (npy_ma_str_array_struct == NULL) { - return -1; - } - npy_ma_str_array_priority = PyUnicode_InternFromString("__array_priority__"); - if (npy_ma_str_array_priority == NULL) { - return -1; - } - npy_ma_str_array_interface = PyUnicode_InternFromString("__array_interface__"); - if (npy_ma_str_array_interface == NULL) { - return -1; - } - npy_ma_str_array_wrap = PyUnicode_InternFromString("__array_wrap__"); - if (npy_ma_str_array_wrap == NULL) { - return -1; - } - npy_ma_str_array_finalize = PyUnicode_InternFromString("__array_finalize__"); - if (npy_ma_str_array_finalize == NULL) { - return -1; - } - npy_ma_str_implementation = PyUnicode_InternFromString("_implementation"); - if (npy_ma_str_implementation == NULL) { - return -1; - } - npy_ma_str_axis1 = PyUnicode_InternFromString("axis1"); - if (npy_ma_str_axis1 == NULL) { - return -1; - } - npy_ma_str_axis2 = PyUnicode_InternFromString("axis2"); - if (npy_ma_str_axis2 == NULL) { - return -1; - } - npy_ma_str_like = PyUnicode_InternFromString("like"); - if (npy_ma_str_like == NULL) { - return -1; - } - npy_ma_str_numpy = PyUnicode_InternFromString("numpy"); - if (npy_ma_str_numpy == NULL) { - return -1; - } - npy_ma_str_where = PyUnicode_InternFromString("where"); - if (npy_ma_str_where == NULL) { - return -1; - } - /* scalar policies */ - npy_ma_str_convert = PyUnicode_InternFromString("convert"); - if (npy_ma_str_convert == NULL) { - return -1; - } - npy_ma_str_preserve = PyUnicode_InternFromString("preserve"); - if (npy_ma_str_preserve == NULL) { - return -1; - } - npy_ma_str_convert_if_no_array = PyUnicode_InternFromString("convert_if_no_array"); - if (npy_ma_str_convert_if_no_array == NULL) { - return -1; - } - npy_ma_str_cpu = PyUnicode_InternFromString("cpu"); - if (npy_ma_str_cpu == NULL) { - return -1; - } - npy_ma_str_dtype = PyUnicode_InternFromString("dtype"); - if (npy_ma_str_dtype == NULL) { - return -1; - } - npy_ma_str_array_err_msg_substr = PyUnicode_InternFromString( - "__array__() got an unexpected keyword argument 'copy'"); - if (npy_ma_str_array_err_msg_substr == NULL) { - return -1; - } - npy_ma_str___dlpack__ = PyUnicode_InternFromString("__dlpack__"); - if (npy_ma_str___dlpack__ == NULL) { - return -1; - } - return 0; -} - +// static variables are automatically zero-initialized +NPY_VISIBILITY_HIDDEN npy_global_state_struct npy_global_state; -/* - * Initializes global constants. At some points these need to be cleaned - * up, and sometimes we also import them where they are needed. But for - * some things, adding an `npy_cache_import` everywhere seems inconvenient. - * - * These globals should not need the C-layer at all and will be imported - * before anything on the C-side is initialized. - */ static int -initialize_static_globals(void) -{ - assert(npy_DTypePromotionError == NULL); - npy_cache_import( - "numpy.exceptions", "DTypePromotionError", - &npy_DTypePromotionError); - if (npy_DTypePromotionError == NULL) { - return -1; - } - - assert(npy_UFuncNoLoopError == NULL); - npy_cache_import( - "numpy._core._exceptions", "_UFuncNoLoopError", - &npy_UFuncNoLoopError); - if (npy_UFuncNoLoopError == NULL) { - return -1; - } - +initialize_global_state(void) { char *env = getenv("NUMPY_WARN_IF_NO_MEM_POLICY"); if ((env != NULL) && (strncmp(env, "1", 1) == 0)) { - numpy_warn_if_no_mem_policy = 1; + npy_global_state.warn_if_no_mem_policy = 1; } else { - numpy_warn_if_no_mem_policy = 0; + npy_global_state.warn_if_no_mem_policy = 0; } return 0; } +static int module_loaded = 0; -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_multiarray_umath", - NULL, - -1, - array_module_methods, - NULL, - NULL, - NULL, - NULL -}; - -/* Initialization function for the module */ -PyMODINIT_FUNC PyInit__multiarray_umath(void) { - PyObject *m, *d, *s; - PyObject *c_api; +static int +_multiarray_umath_exec(PyObject *m) { + PyObject *d, *s, *c_api; - /* Create the module and add the functions */ - m = PyModule_Create(&moduledef); - if (!m) { - return NULL; + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; } + module_loaded = 1; /* Initialize CPU features */ if (npy_cpu_init() < 0) { - goto err; + return -1; } /* Initialize CPU dispatch tracer */ if (npy_cpu_dispatch_tracer_init(m) < 0) { - goto err; + return -1; } #if defined(MS_WIN64) && defined(__GNUC__) @@ -4998,54 +4943,73 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { numpy_pydatetime_import(); if (PyErr_Occurred()) { - goto err; + return -1; } /* Add some symbolic constants to the module */ d = PyModule_GetDict(m); if (!d) { - goto err; + return -1; } if (intern_strings() < 0) { - goto err; + return -1; } if (initialize_static_globals() < 0) { - goto err; + return -1; + } + + if (initialize_global_state() < 0) { + return -1; + } + + if (init_import_mutex() < 0) { + return -1; } if (init_extobj() < 0) { - goto err; + return -1; } + /* Set __signature__ to None on the type (the instance has a property) */ + s = npy_import("numpy._globals", "_signature_descriptor"); + if (s == NULL) { + return -1; + } + PyUFunc_Type.tp_dict = Py_BuildValue( + "{ON}", npy_interned_str.__signature__, s); + if (PyUFunc_Type.tp_dict == NULL) { + return -1; + } if (PyType_Ready(&PyUFunc_Type) < 0) { - goto err; + Py_CLEAR(PyUFunc_Type.tp_dict); + return -1; } PyArrayDTypeMeta_Type.tp_base = &PyType_Type; if (PyType_Ready(&PyArrayDTypeMeta_Type) < 0) { - goto err; + return -1; } PyArrayDescr_Type.tp_hash = PyArray_DescrHash; Py_SET_TYPE(&PyArrayDescr_Type, &PyArrayDTypeMeta_Type); if (PyType_Ready(&PyArrayDescr_Type) < 0) { - goto err; + return -1; } initialize_casting_tables(); initialize_numeric_types(); if (initscalarmath(m) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyArray_Type) < 0) { - goto err; + return -1; } if (setup_scalartypes(d) < 0) { - goto err; + return -1; } PyArrayIter_Type.tp_iter = PyObject_SelfIter; @@ -5053,28 +5017,28 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { PyArrayMultiIter_Type.tp_iter = PyObject_SelfIter; PyArrayMultiIter_Type.tp_free = PyArray_free; if (PyType_Ready(&PyArrayIter_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyArrayMapIter_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyArrayMultiIter_Type) < 0) { - goto err; + return -1; } PyArrayNeighborhoodIter_Type.tp_new = PyType_GenericNew; if (PyType_Ready(&PyArrayNeighborhoodIter_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&NpyIter_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyArrayFlags_Type) < 0) { - goto err; + return -1; } NpyBusDayCalendar_Type.tp_new = PyType_GenericNew; if (PyType_Ready(&NpyBusDayCalendar_Type) < 0) { - goto err; + return -1; } /* @@ -5095,43 +5059,43 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { s = npy_cpu_features_dict(); if (s == NULL) { - goto err; + return -1; } if (PyDict_SetItemString(d, "__cpu_features__", s) < 0) { Py_DECREF(s); - goto err; + return -1; } Py_DECREF(s); s = npy_cpu_baseline_list(); if (s == NULL) { - goto err; + return -1; } if (PyDict_SetItemString(d, "__cpu_baseline__", s) < 0) { Py_DECREF(s); - goto err; + return -1; } Py_DECREF(s); s = npy_cpu_dispatch_list(); if (s == NULL) { - goto err; + return -1; } if (PyDict_SetItemString(d, "__cpu_dispatch__", s) < 0) { Py_DECREF(s); - goto err; + return -1; } Py_DECREF(s); s = PyCapsule_New((void *)_datetime_strings, NULL, NULL); if (s == NULL) { - goto err; + return -1; } PyDict_SetItemString(d, "DATETIMEUNITS", s); Py_DECREF(s); #define ADDCONST(NAME) \ - s = PyLong_FromLong(NPY_##NAME); \ + s = PyLong_FromLong(NPY_##NAME); \ PyDict_SetItemString(d, #NAME, s); \ Py_DECREF(s) @@ -5171,47 +5135,88 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { /* Finalize scalar types and expose them via namespace or typeinfo dict */ if (set_typeinfo(d) != 0) { - goto err; + return -1; } if (PyType_Ready(&PyArrayFunctionDispatcher_Type) < 0) { - goto err; + return -1; } PyDict_SetItemString( d, "_ArrayFunctionDispatcher", (PyObject *)&PyArrayFunctionDispatcher_Type); if (PyType_Ready(&PyArrayArrayConverter_Type) < 0) { - goto err; + return -1; } PyDict_SetItemString( d, "_array_converter", (PyObject *)&PyArrayArrayConverter_Type); if (PyType_Ready(&PyArrayMethod_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyBoundArrayMethod_Type) < 0) { - goto err; + return -1; } if (initialize_and_map_pytypes_to_dtypes() < 0) { - goto err; + return -1; } if (PyArray_InitializeCasts() < 0) { - goto err; + return -1; } if (init_string_dtype() < 0) { - goto err; + return -1; + } + + /* + * Initialize the default PyDataMem_Handler capsule singleton. + */ + PyDataMem_DefaultHandler = PyCapsule_New( + &default_handler, MEM_HANDLER_CAPSULE_NAME, NULL); + if (PyDataMem_DefaultHandler == NULL) { + return -1; + } +#ifdef Py_GIL_DISABLED + if (PyUnstable_SetImmortal(PyDataMem_DefaultHandler) == 0) { + PyErr_SetString(PyExc_RuntimeError, + "Could not mark memory handler capsule as immortal"); + return -1; + } +#endif + /* + * Initialize the context-local current handler + * with the default PyDataMem_Handler capsule. + */ + current_handler = PyContextVar_New("current_allocator", PyDataMem_DefaultHandler); + if (current_handler == NULL) { + return -1; } if (initumath(m) != 0) { - goto err; + return -1; } if (set_matmul_flags(d) < 0) { - goto err; + return -1; + } + + // initialize static references to ndarray.__array_*__ special methods + npy_static_pydata.ndarray_array_finalize = PyObject_GetAttrString( + (PyObject *)&PyArray_Type, "__array_finalize__"); + if (npy_static_pydata.ndarray_array_finalize == NULL) { + return -1; + } + npy_static_pydata.ndarray_array_ufunc = PyObject_GetAttrString( + (PyObject *)&PyArray_Type, "__array_ufunc__"); + if (npy_static_pydata.ndarray_array_ufunc == NULL) { + return -1; + } + npy_static_pydata.ndarray_array_function = PyObject_GetAttrString( + (PyObject *)&PyArray_Type, "__array_function__"); + if (npy_static_pydata.ndarray_array_function == NULL) { + return -1; } /* @@ -5223,34 +5228,31 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { * init_string_dtype() but that needs to happen after * the legacy dtypemeta classes are available. */ - static PyObject *add_dtype_helper = NULL; - npy_cache_import("numpy.dtypes", "_add_dtype_helper", &add_dtype_helper); - if (add_dtype_helper == NULL) { - goto err; + + if (npy_cache_import_runtime( + "numpy.dtypes", "_add_dtype_helper", + &npy_runtime_imports._add_dtype_helper) == -1) { + return -1; } if (PyObject_CallFunction( - add_dtype_helper, + npy_runtime_imports._add_dtype_helper, "Os", (PyObject *)&PyArray_StringDType, NULL) == NULL) { - goto err; + return -1; } PyDict_SetItemString(d, "StringDType", (PyObject *)&PyArray_StringDType); - /* - * Initialize the default PyDataMem_Handler capsule singleton. - */ - PyDataMem_DefaultHandler = PyCapsule_New( - &default_handler, MEM_HANDLER_CAPSULE_NAME, NULL); - if (PyDataMem_DefaultHandler == NULL) { - goto err; + // initialize static reference to a zero-like array + npy_static_pydata.zero_pyint_like_arr = PyArray_ZEROS( + 0, NULL, NPY_DEFAULT_INT, NPY_FALSE); + if (npy_static_pydata.zero_pyint_like_arr == NULL) { + return -1; } - /* - * Initialize the context-local current handler - * with the default PyDataMem_Handler capsule. - */ - current_handler = PyContextVar_New("current_allocator", PyDataMem_DefaultHandler); - if (current_handler == NULL) { - goto err; + ((PyArrayObject_fields *)npy_static_pydata.zero_pyint_like_arr)->flags |= + (NPY_ARRAY_WAS_PYTHON_INT|NPY_ARRAY_WAS_INT_AND_REPLACED); + + if (verify_static_structs_initialized() < 0) { + return -1; } /* @@ -5260,28 +5262,44 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { /* The dtype API is not auto-filled/generated via Python scripts: */ _fill_dtype_api(PyArray_API); if (c_api == NULL) { - goto err; + return -1; } PyDict_SetItemString(d, "_ARRAY_API", c_api); Py_DECREF(c_api); c_api = PyCapsule_New((void *)PyUFunc_API, NULL, NULL); if (c_api == NULL) { - goto err; + return -1; } PyDict_SetItemString(d, "_UFUNC_API", c_api); Py_DECREF(c_api); if (PyErr_Occurred()) { - goto err; + return -1; } - return m; + return 0; +} - err: - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_RuntimeError, - "cannot load multiarray module."); - } - Py_DECREF(m); - return NULL; +static struct PyModuleDef_Slot _multiarray_umath_slots[] = { + {Py_mod_exec, _multiarray_umath_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, +#endif + {0, NULL}, +}; + +static struct PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "_multiarray_umath", + .m_size = 0, + .m_methods = array_module_methods, + .m_slots = _multiarray_umath_slots, +}; + +PyMODINIT_FUNC PyInit__multiarray_umath(void) { + return PyModuleDef_Init(&moduledef); } diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index b3f15686dfe0..4ce211f4339b 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -1,26 +1,48 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ #define NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_current_allocator; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_function; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_struct; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_priority; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_interface; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_wrap; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_finalize; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_implementation; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_axis1; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_axis2; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_like; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_numpy; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_where; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_convert; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_preserve; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_convert_if_no_array; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_cpu; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_dtype; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_err_msg_substr; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str___dlpack__; +#ifdef __cplusplus +extern "C" { +#endif + +/* + * A struct storing global state for the _multiarray_umath + * module. The state is initialized when the module is imported + * so no locking is necessary to access it. + * + * These globals will need to move to per-module state to + * support reloading or subinterpreters. + */ +typedef struct npy_global_state_struct { + /* + * Used to test the internal-only scaled float test dtype + */ + npy_bool get_sfloat_dtype_initialized; + + /* + * controls the global madvise hugepage setting + */ + int madvise_hugepage; + + /* + * used to detect module reloading in the reload guard + */ + int reload_guard_initialized; + + /* + * Holds the user-defined setting for whether or not to warn + * if there is no memory policy set + */ + int warn_if_no_mem_policy; +} npy_global_state_struct; + + +NPY_VISIBILITY_HIDDEN extern npy_global_state_struct npy_global_state; + +NPY_NO_EXPORT int +get_legacy_print_mode(void); +#ifdef __cplusplus +} +#endif #endif /* NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ */ diff --git a/numpy/_core/src/multiarray/nditer_api.c b/numpy/_core/src/multiarray/nditer_api.c index 28b7bf6e632f..da58489c6b9d 100644 --- a/numpy/_core/src/multiarray/nditer_api.c +++ b/numpy/_core/src/multiarray/nditer_api.c @@ -17,13 +17,7 @@ #include "nditer_impl.h" #include "templ_common.h" #include "ctors.h" -#include "refcount.h" -/* Internal helper functions private to this file */ -static npy_intp -npyiter_checkreducesize(NpyIter *iter, npy_intp count, - npy_intp *reduce_innersize, - npy_intp *reduce_outerdim); /*NUMPY_API * Removes an axis from iteration. This requires that NPY_ITER_MULTI_INDEX @@ -299,6 +293,10 @@ NpyIter_Reset(NpyIter *iter, char **errmsg) return NPY_FAIL; } } + else if (itflags&NPY_ITFLAG_EXLOOP) { + /* make sure to update the user pointers (buffer copy does it above). */ + memcpy(NIT_USERPTRS(iter), NIT_DATAPTRS(iter), NPY_SIZEOF_INTP*nop); + } return NPY_SUCCEED; } @@ -654,7 +652,7 @@ NpyIter_GotoIterIndex(NpyIter *iter, npy_intp iterindex) char **ptrs; strides = NBF_STRIDES(bufferdata); - ptrs = NBF_PTRS(bufferdata); + ptrs = NIT_USERPTRS(iter); delta = iterindex - NIT_ITERINDEX(iter); for (iop = 0; iop < nop; ++iop) { @@ -828,6 +826,9 @@ NpyIter_IsFirstVisit(NpyIter *iter, int iop) /*NUMPY_API * Whether the iteration could be done with no buffering. + * + * Note that the iterator may use buffering to increase the inner loop size + * even when buffering is not required. */ NPY_NO_EXPORT npy_bool NpyIter_RequiresBuffering(NpyIter *iter) @@ -869,18 +870,37 @@ NpyIter_RequiresBuffering(NpyIter *iter) NPY_NO_EXPORT npy_bool NpyIter_IterationNeedsAPI(NpyIter *iter) { - return (NIT_ITFLAGS(iter)&NPY_ITFLAG_NEEDSAPI) != 0; + int nop = NIT_NOP(iter); + /* If any of the buffer filling need the API, flag it as well. */ + if (NpyIter_GetTransferFlags(iter) & NPY_METH_REQUIRES_PYAPI) { + return NPY_TRUE; + } + + for (int iop = 0; iop < nop; ++iop) { + PyArray_Descr *rdt = NIT_DTYPES(iter)[iop]; + if ((rdt->flags & (NPY_ITEM_REFCOUNT | + NPY_ITEM_IS_POINTER | + NPY_NEEDS_PYAPI)) != 0) { + /* Iteration needs API access */ + return NPY_TRUE; + } + } + + return NPY_FALSE; } -/* - * Fetch the ArrayMethod (runtime) flags for all "transfer functions' (i.e. - * copy to buffer/casts). +/*NUMPY_API + * Fetch the NPY_ARRAYMETHOD_FLAGS (runtime) flags for all "transfer functions' + * (i.e. copy to buffer/casts). + * + * It is the preferred way to check whether the iteration requires to hold the + * GIL or may set floating point errors during buffer copies. * - * TODO: This should be public API, but that only makes sense when the - * ArrayMethod API is made public. + * I.e. use `NpyIter_GetTransferFlags(iter) & NPY_METH_REQUIRES_PYAPI` to check + * if you cannot release the GIL. */ -NPY_NO_EXPORT int +NPY_NO_EXPORT NPY_ARRAYMETHOD_FLAGS NpyIter_GetTransferFlags(NpyIter *iter) { return NIT_ITFLAGS(iter) >> NPY_ITFLAG_TRANSFERFLAGS_SHIFT; @@ -1091,13 +1111,11 @@ NpyIter_GetDataPtrArray(NpyIter *iter) /*int ndim = NIT_NDIM(iter);*/ int nop = NIT_NOP(iter); - if (itflags&NPY_ITFLAG_BUFFER) { - NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter); - return NBF_PTRS(bufferdata); + if (itflags&(NPY_ITFLAG_BUFFER|NPY_ITFLAG_EXLOOP)) { + return NIT_USERPTRS(iter); } else { - NpyIter_AxisData *axisdata = NIT_AXISDATA(iter); - return NAD_PTRS(axisdata); + return NIT_DATAPTRS(iter); } } @@ -1214,11 +1232,9 @@ NpyIter_GetIndexPtr(NpyIter *iter) /*int ndim = NIT_NDIM(iter);*/ int nop = NIT_NOP(iter); - NpyIter_AxisData *axisdata = NIT_AXISDATA(iter); - if (itflags&NPY_ITFLAG_HASINDEX) { /* The index is just after the data pointers */ - return (npy_intp*)NAD_PTRS(axisdata) + nop; + return (npy_intp*)(NpyIter_GetDataPtrArray(iter) + nop); } else { return NULL; @@ -1329,8 +1345,10 @@ NpyIter_GetAxisStrideArray(NpyIter *iter, int axis) /*NUMPY_API * Get an array of strides which are fixed. Any strides which may - * change during iteration receive the value NPY_MAX_INTP. Once - * the iterator is ready to iterate, call this to get the strides + * change during iteration receive the value NPY_MAX_INTP + * (as of NumPy 2.3, `NPY_MAX_INTP` will never happen but must be supported; + * we could guarantee this, but not sure if we should). + * Once the iterator is ready to iterate, call this to get the strides * which will always be fixed in the inner loop, then choose optimized * inner loop functions which take advantage of those fixed strides. * @@ -1340,75 +1358,16 @@ NPY_NO_EXPORT void NpyIter_GetInnerFixedStrideArray(NpyIter *iter, npy_intp *out_strides) { npy_uint32 itflags = NIT_ITFLAGS(iter); - int ndim = NIT_NDIM(iter); - int iop, nop = NIT_NOP(iter); + int nop = NIT_NOP(iter); NpyIter_AxisData *axisdata0 = NIT_AXISDATA(iter); - npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); if (itflags&NPY_ITFLAG_BUFFER) { - NpyIter_BufferData *data = NIT_BUFFERDATA(iter); - npyiter_opitflags *op_itflags = NIT_OPITFLAGS(iter); - npy_intp stride, *strides = NBF_STRIDES(data), - *ad_strides = NAD_STRIDES(axisdata0); - PyArray_Descr **dtypes = NIT_DTYPES(iter); - - for (iop = 0; iop < nop; ++iop) { - stride = strides[iop]; - /* - * Operands which are always/never buffered have fixed strides, - * and everything has fixed strides when ndim is 0 or 1 - */ - if (ndim <= 1 || (op_itflags[iop]& - (NPY_OP_ITFLAG_CAST|NPY_OP_ITFLAG_BUFNEVER))) { - out_strides[iop] = stride; - } - /* If it's a reduction, 0-stride inner loop may have fixed stride */ - else if (stride == 0 && (itflags&NPY_ITFLAG_REDUCE)) { - /* If it's a reduction operand, definitely fixed stride */ - if (op_itflags[iop]&NPY_OP_ITFLAG_REDUCE) { - out_strides[iop] = stride; - } - /* - * Otherwise it's guaranteed to be a fixed stride if the - * stride is 0 for all the dimensions. - */ - else { - NpyIter_AxisData *axisdata = axisdata0; - int idim; - for (idim = 0; idim < ndim; ++idim) { - if (NAD_STRIDES(axisdata)[iop] != 0) { - break; - } - NIT_ADVANCE_AXISDATA(axisdata, 1); - } - /* If all the strides were 0, the stride won't change */ - if (idim == ndim) { - out_strides[iop] = stride; - } - else { - out_strides[iop] = NPY_MAX_INTP; - } - } - } - /* - * Inner loop contiguous array means its stride won't change when - * switching between buffering and not buffering - */ - else if (ad_strides[iop] == dtypes[iop]->elsize) { - out_strides[iop] = ad_strides[iop]; - } - /* - * Otherwise the strides can change if the operand is sometimes - * buffered, sometimes not. - */ - else { - out_strides[iop] = NPY_MAX_INTP; - } - } + /* If there is buffering we wrote the strides into the bufferdata. */ + memcpy(out_strides, NBF_STRIDES(NIT_BUFFERDATA(iter)), nop*NPY_SIZEOF_INTP); } else { - /* If there's no buffering, the strides are always fixed */ + /* If there's no buffering, the strides come from the operands. */ memcpy(out_strides, NAD_STRIDES(axisdata0), nop*NPY_SIZEOF_INTP); } } @@ -1477,8 +1436,6 @@ NpyIter_DebugPrint(NpyIter *iter) printf("ONEITERATION "); if (itflags&NPY_ITFLAG_DELAYBUF) printf("DELAYBUF "); - if (itflags&NPY_ITFLAG_NEEDSAPI) - printf("NEEDSAPI "); if (itflags&NPY_ITFLAG_REDUCE) printf("REDUCE "); if (itflags&NPY_ITFLAG_REUSE_REDUCE_LOOPS) @@ -1531,6 +1488,18 @@ NpyIter_DebugPrint(NpyIter *iter) printf("%i ", (int)NIT_BASEOFFSETS(iter)[iop]); } printf("\n"); + printf("| Ptrs: "); + for (iop = 0; iop < nop; ++iop) { + printf("%p ", (void *)NIT_DATAPTRS(iter)[iop]); + } + printf("\n"); + if (itflags&(NPY_ITFLAG_EXLOOP|NPY_ITFLAG_BUFFER)) { + printf("| User/buffer ptrs: "); + for (iop = 0; iop < nop; ++iop) { + printf("%p ", (void *)NIT_USERPTRS(iter)[iop]); + } + printf("\n"); + } if (itflags&NPY_ITFLAG_HASINDEX) { printf("| InitIndex: %d\n", (int)(npy_intp)NIT_RESETDATAPTR(iter)[nop]); @@ -1567,14 +1536,16 @@ NpyIter_DebugPrint(NpyIter *iter) printf("CAST "); if ((NIT_OPITFLAGS(iter)[iop])&NPY_OP_ITFLAG_BUFNEVER) printf("BUFNEVER "); - if ((NIT_OPITFLAGS(iter)[iop])&NPY_OP_ITFLAG_ALIGNED) - printf("ALIGNED "); if ((NIT_OPITFLAGS(iter)[iop])&NPY_OP_ITFLAG_REDUCE) printf("REDUCE "); if ((NIT_OPITFLAGS(iter)[iop])&NPY_OP_ITFLAG_VIRTUAL) printf("VIRTUAL "); if ((NIT_OPITFLAGS(iter)[iop])&NPY_OP_ITFLAG_WRITEMASKED) printf("WRITEMASKED "); + if ((NIT_OPITFLAGS(iter)[iop])&NPY_OP_ITFLAG_BUF_SINGLESTRIDE) + printf("BUF_SINGLESTRIDE "); + if ((NIT_OPITFLAGS(iter)[iop])&NPY_OP_ITFLAG_CONTIG) + printf("CONTIG "); printf("\n"); } printf("|\n"); @@ -1587,13 +1558,15 @@ NpyIter_DebugPrint(NpyIter *iter) printf("| BufferSize: %d\n", (int)NBF_BUFFERSIZE(bufferdata)); printf("| Size: %d\n", (int)NBF_SIZE(bufferdata)); printf("| BufIterEnd: %d\n", (int)NBF_BUFITEREND(bufferdata)); + printf("| BUFFER CoreSize: %d\n", + (int)NBF_CORESIZE(bufferdata)); if (itflags&NPY_ITFLAG_REDUCE) { printf("| REDUCE Pos: %d\n", (int)NBF_REDUCE_POS(bufferdata)); printf("| REDUCE OuterSize: %d\n", (int)NBF_REDUCE_OUTERSIZE(bufferdata)); printf("| REDUCE OuterDim: %d\n", - (int)NBF_REDUCE_OUTERDIM(bufferdata)); + (int)NBF_OUTERDIM(bufferdata)); } printf("| Strides: "); for (iop = 0; iop < nop; ++iop) @@ -1608,10 +1581,6 @@ NpyIter_DebugPrint(NpyIter *iter) printf("%d ", (int)fixedstrides[iop]); printf("\n"); } - printf("| Ptrs: "); - for (iop = 0; iop < nop; ++iop) - printf("%p ", (void *)NBF_PTRS(bufferdata)[iop]); - printf("\n"); if (itflags&NPY_ITFLAG_REDUCE) { printf("| REDUCE Outer Strides: "); for (iop = 0; iop < nop; ++iop) @@ -1659,14 +1628,9 @@ NpyIter_DebugPrint(NpyIter *iter) if (itflags&NPY_ITFLAG_HASINDEX) { printf("| Index Stride: %d\n", (int)NAD_STRIDES(axisdata)[nop]); } - printf("| Ptrs: "); - for (iop = 0; iop < nop; ++iop) { - printf("%p ", (void *)NAD_PTRS(axisdata)[iop]); - } - printf("\n"); if (itflags&NPY_ITFLAG_HASINDEX) { printf("| Index Value: %d\n", - (int)((npy_intp*)NAD_PTRS(axisdata))[nop]); + (int)((npy_intp*)NIT_DATAPTRS(iter))[nop]); } } @@ -1815,7 +1779,8 @@ npyiter_goto_iterindex(NpyIter *iter, npy_intp iterindex) int idim, ndim = NIT_NDIM(iter); int nop = NIT_NOP(iter); - char **dataptr; + char **dataptrs = NIT_DATAPTRS(iter); + NpyIter_AxisData *axisdata; npy_intp sizeof_axisdata; npy_intp istrides, nstrides, i, shape; @@ -1828,17 +1793,13 @@ npyiter_goto_iterindex(NpyIter *iter, npy_intp iterindex) ndim = ndim ? ndim : 1; - if (iterindex == 0) { - dataptr = NIT_RESETDATAPTR(iter); + for (istrides = 0; istrides < nstrides; ++istrides) { + dataptrs[istrides] = NIT_RESETDATAPTR(iter)[istrides]; + } + if (iterindex == 0) { for (idim = 0; idim < ndim; ++idim) { - char **ptrs; NAD_INDEX(axisdata) = 0; - ptrs = NAD_PTRS(axisdata); - for (istrides = 0; istrides < nstrides; ++istrides) { - ptrs[istrides] = dataptr[istrides]; - } - NIT_ADVANCE_AXISDATA(axisdata, 1); } } @@ -1847,47 +1808,113 @@ npyiter_goto_iterindex(NpyIter *iter, npy_intp iterindex) * Set the multi-index, from the fastest-changing to the * slowest-changing. */ - axisdata = NIT_AXISDATA(iter); - shape = NAD_SHAPE(axisdata); - i = iterindex; - iterindex /= shape; - NAD_INDEX(axisdata) = i - iterindex * shape; - for (idim = 0; idim < ndim-1; ++idim) { - NIT_ADVANCE_AXISDATA(axisdata, 1); - + for (idim = 0; idim < ndim; ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) { shape = NAD_SHAPE(axisdata); i = iterindex; iterindex /= shape; NAD_INDEX(axisdata) = i - iterindex * shape; + + npy_intp *strides = NAD_STRIDES(axisdata); + for (istrides = 0; istrides < nstrides; ++istrides) { + dataptrs[istrides] += NAD_INDEX(axisdata) * strides[istrides]; + } } + } - dataptr = NIT_RESETDATAPTR(iter); + if (itflags&NPY_ITFLAG_BUFFER) { + /* Find the remainder if chunking to the buffers coresize */ + npy_intp fact = NIT_ITERINDEX(iter) / NIT_BUFFERDATA(iter)->coresize; + npy_intp offset = NIT_ITERINDEX(iter) - fact * NIT_BUFFERDATA(iter)->coresize; + NIT_BUFFERDATA(iter)->coreoffset = offset; + } + else if (itflags&NPY_ITFLAG_EXLOOP) { + /* If buffered, user pointers are updated during buffer copy. */ + memcpy(NIT_USERPTRS(iter), dataptrs, nstrides * sizeof(void *)); + } +} - /* - * Accumulate the successive pointers with their - * offsets in the opposite order, starting from the - * original data pointers. - */ - for (idim = 0; idim < ndim; ++idim) { - npy_intp *strides; - char **ptrs; - strides = NAD_STRIDES(axisdata); - ptrs = NAD_PTRS(axisdata); +/* + * This helper fills the bufferdata copy information for an operand. It + * is very specific to copy from and to buffers. + */ +static inline void +npyiter_fill_buffercopy_params( + int nop, int iop, int ndim, npy_uint32 opitflags, npy_intp transfersize, + NpyIter_BufferData *bufferdata, + NpyIter_AxisData *axisdata, + NpyIter_AxisData *outer_axisdata, + int *ndim_transfer, + npy_intp *op_transfersize, + npy_intp *buf_stride, + npy_intp *op_strides[], npy_intp *op_shape[], npy_intp *op_coords[]) +{ + /* + * Set up if we had to do the full generic copy. + * NOTE: Except the transfersize itself everything here is fixed + * and we could create it once early on. + */ + *ndim_transfer = ndim; + *op_transfersize = transfersize; - i = NAD_INDEX(axisdata); + if ((opitflags & NPY_OP_ITFLAG_REDUCE) && (NAD_STRIDES(outer_axisdata)[iop] != 0)) { + /* + * Reduce with all inner strides ==0 (outer !=0). We buffer the outer + * stride which also means buffering only outersize items. + * (If the outer stride is 0, some inner ones are guaranteed nonzero.) + */ + assert(NAD_STRIDES(axisdata)[iop] == 0); + *ndim_transfer = 1; + *op_transfersize = NBF_REDUCE_OUTERSIZE(bufferdata); + *buf_stride = NBF_REDUCE_OUTERSTRIDES(bufferdata)[iop]; + + *op_shape = op_transfersize; + assert(**op_coords == 0); /* initialized by caller currently */ + *op_strides = &NAD_STRIDES(outer_axisdata)[iop]; + return; + } - for (istrides = 0; istrides < nstrides; ++istrides) { - ptrs[istrides] = dataptr[istrides] + i*strides[istrides]; - } + /* + * The copy is now a typical copy into a contiguous buffer. + * If it is a reduce, we only copy the inner part (i.e. less). + * The buffer strides are now always contiguous. + */ + *buf_stride = NBF_STRIDES(bufferdata)[iop]; - dataptr = ptrs; + if (opitflags & NPY_OP_ITFLAG_REDUCE) { + /* Outer dim is reduced, so omit it from copying */ + *ndim_transfer -= 1; + if (*op_transfersize > bufferdata->coresize) { + *op_transfersize = bufferdata->coresize; + } + /* copy setup is identical to non-reduced now. */ + } - NIT_ADVANCE_AXISDATA(axisdata, -1); + if (opitflags & NPY_OP_ITFLAG_BUF_SINGLESTRIDE) { + *ndim_transfer = 1; + *op_shape = op_transfersize; + assert(**op_coords == 0); /* initialized by caller currently */ + *op_strides = &NAD_STRIDES(axisdata)[iop]; + if ((*op_strides)[0] == 0 && ( + !(opitflags & NPY_OP_ITFLAG_CONTIG) || + (opitflags & NPY_OP_ITFLAG_WRITE))) { + /* + * If the user didn't force contig, optimize single element. + * (Unless CONTIG was requested and this is not a write/reduce!) + */ + *op_transfersize = 1; + *buf_stride = 0; } } + else { + /* We do a full multi-dimensional copy */ + *op_shape = &NAD_SHAPE(axisdata); + *op_coords = &NAD_INDEX(axisdata); + *op_strides = &NAD_STRIDES(axisdata)[iop]; + } } + /* * This gets called after the buffers have been exhausted, and * their data needs to be written back to the arrays. The multi-index @@ -1903,21 +1930,17 @@ npyiter_copy_from_buffers(NpyIter *iter) npyiter_opitflags *op_itflags = NIT_OPITFLAGS(iter); NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter); - NpyIter_AxisData *axisdata = NIT_AXISDATA(iter), - *reduce_outeraxisdata = NULL; + NpyIter_AxisData *axisdata = NIT_AXISDATA(iter); + NpyIter_AxisData *outer_axisdata = NULL; PyArray_Descr **dtypes = NIT_DTYPES(iter); + npy_intp *strides = NBF_STRIDES(bufferdata); npy_intp transfersize = NBF_SIZE(bufferdata); - npy_intp *strides = NBF_STRIDES(bufferdata), - *ad_strides = NAD_STRIDES(axisdata); - npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - char **ad_ptrs = NAD_PTRS(axisdata); + + char **dataptrs = NIT_DATAPTRS(iter); char **buffers = NBF_BUFFERS(bufferdata); char *buffer; - npy_intp reduce_outerdim = 0; - npy_intp *reduce_outerstrides = NULL; - npy_intp axisdata_incr = NIT_AXISDATA_SIZEOF(itflags, ndim, nop) / NPY_SIZEOF_INTP; @@ -1926,17 +1949,25 @@ npyiter_copy_from_buffers(NpyIter *iter) return 0; } - NPY_IT_DBG_PRINT("Iterator: Copying buffers to outputs\n"); - - if (itflags&NPY_ITFLAG_REDUCE) { - reduce_outerdim = NBF_REDUCE_OUTERDIM(bufferdata); - reduce_outerstrides = NBF_REDUCE_OUTERSTRIDES(bufferdata); - reduce_outeraxisdata = NIT_INDEX_AXISDATA(axisdata, reduce_outerdim); + if (itflags & NPY_ITFLAG_REDUCE) { + npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); + outer_axisdata = NIT_INDEX_AXISDATA(axisdata, NBF_OUTERDIM(bufferdata)); transfersize *= NBF_REDUCE_OUTERSIZE(bufferdata); } + NPY_IT_DBG_PRINT("Iterator: Copying buffers to outputs\n"); + NpyIter_TransferInfo *transferinfo = NBF_TRANSFERINFO(bufferdata); for (iop = 0; iop < nop; ++iop) { + if (op_itflags[iop]&NPY_OP_ITFLAG_BUFNEVER) { + continue; + } + + /* Currently, we always trash the buffer if there are references */ + if (PyDataType_REFCHK(dtypes[iop])) { + NIT_OPITFLAGS(iter)[iop] &= ~NPY_OP_ITFLAG_BUF_REUSABLE; + } + buffer = buffers[iop]; /* * Copy the data back to the arrays. If the type has refs, @@ -1945,73 +1976,27 @@ npyiter_copy_from_buffers(NpyIter *iter) * The flag USINGBUFFER is set when the buffer was used, so * only copy back when this flag is on. */ - if ((transferinfo[iop].write.func != NULL) && - (op_itflags[iop]&NPY_OP_ITFLAG_USINGBUFFER)) { - npy_intp op_transfersize; - - npy_intp src_stride, *dst_strides, *dst_coords, *dst_shape; - int ndim_transfer; - + if (transferinfo[iop].write.func != NULL) { NPY_IT_DBG_PRINT1("Iterator: Operand %d was buffered\n", (int)iop); - /* - * If this operand is being reduced in the inner loop, - * its buffering stride was set to zero, and just - * one element was copied. - */ - if (op_itflags[iop]&NPY_OP_ITFLAG_REDUCE) { - if (strides[iop] == 0) { - if (reduce_outerstrides[iop] == 0) { - op_transfersize = 1; - src_stride = 0; - dst_strides = &src_stride; - dst_coords = &NAD_INDEX(reduce_outeraxisdata); - dst_shape = &NAD_SHAPE(reduce_outeraxisdata); - ndim_transfer = 1; - } - else { - op_transfersize = NBF_REDUCE_OUTERSIZE(bufferdata); - src_stride = reduce_outerstrides[iop]; - dst_strides = - &NAD_STRIDES(reduce_outeraxisdata)[iop]; - dst_coords = &NAD_INDEX(reduce_outeraxisdata); - dst_shape = &NAD_SHAPE(reduce_outeraxisdata); - ndim_transfer = ndim - reduce_outerdim; - } - } - else { - if (reduce_outerstrides[iop] == 0) { - op_transfersize = NBF_SIZE(bufferdata); - src_stride = strides[iop]; - dst_strides = &ad_strides[iop]; - dst_coords = &NAD_INDEX(axisdata); - dst_shape = &NAD_SHAPE(axisdata); - ndim_transfer = reduce_outerdim ? - reduce_outerdim : 1; - } - else { - op_transfersize = transfersize; - src_stride = strides[iop]; - dst_strides = &ad_strides[iop]; - dst_coords = &NAD_INDEX(axisdata); - dst_shape = &NAD_SHAPE(axisdata); - ndim_transfer = ndim; - } - } - } - else { - op_transfersize = transfersize; - src_stride = strides[iop]; - dst_strides = &ad_strides[iop]; - dst_coords = &NAD_INDEX(axisdata); - dst_shape = &NAD_SHAPE(axisdata); - ndim_transfer = ndim; - } + npy_intp zero = 0; /* used as coord for 1-D copies */ + int ndim_transfer; + npy_intp op_transfersize; + npy_intp src_stride; + npy_intp *dst_strides; + npy_intp *dst_coords = &zero; + npy_intp *dst_shape; - NPY_IT_DBG_PRINT2("Iterator: Copying buffer to " - "operand %d (%d items)\n", - (int)iop, (int)op_transfersize); + npyiter_fill_buffercopy_params(nop, iop, ndim, op_itflags[iop], + transfersize, bufferdata, axisdata, outer_axisdata, + &ndim_transfer, &op_transfersize, &src_stride, + &dst_strides, &dst_shape, &dst_coords); + + NPY_IT_DBG_PRINT( + "Iterator: Copying buffer to operand %d (%zd items):\n" + " transfer ndim: %d, inner stride: %zd, inner shape: %zd, buffer stride: %zd\n", + iop, op_transfersize, ndim_transfer, dst_strides[0], dst_shape[0], src_stride); /* WRITEMASKED operand */ if (op_itflags[iop] & NPY_OP_ITFLAG_WRITEMASKED) { @@ -2021,15 +2006,15 @@ npyiter_copy_from_buffers(NpyIter *iter) * The mask pointer may be in the buffer or in * the array, detect which one. */ - if ((op_itflags[maskop]&NPY_OP_ITFLAG_USINGBUFFER) != 0) { - maskptr = (npy_bool *)buffers[maskop]; + if ((op_itflags[maskop]&NPY_OP_ITFLAG_BUFNEVER)) { + maskptr = (npy_bool *)dataptrs[maskop]; } else { - maskptr = (npy_bool *)ad_ptrs[maskop]; + maskptr = (npy_bool *)buffers[maskop]; } if (PyArray_TransferMaskedStridedToNDim(ndim_transfer, - ad_ptrs[iop], dst_strides, axisdata_incr, + dataptrs[iop], dst_strides, axisdata_incr, buffer, src_stride, maskptr, strides[maskop], dst_coords, axisdata_incr, @@ -2042,7 +2027,7 @@ npyiter_copy_from_buffers(NpyIter *iter) /* Regular operand */ else { if (PyArray_TransferStridedToNDim(ndim_transfer, - ad_ptrs[iop], dst_strides, axisdata_incr, + dataptrs[iop], dst_strides, axisdata_incr, buffer, src_stride, dst_coords, axisdata_incr, dst_shape, axisdata_incr, @@ -2059,11 +2044,11 @@ npyiter_copy_from_buffers(NpyIter *iter) * The flag USINGBUFFER is set when the buffer was used, so * only decrement refs when this flag is on. */ - else if (transferinfo[iop].clear.func != NULL && - (op_itflags[iop]&NPY_OP_ITFLAG_USINGBUFFER)) { + else if (transferinfo[iop].clear.func != NULL) { NPY_IT_DBG_PRINT1( "Iterator: clearing refs of operand %d\n", (int)iop); npy_intp buf_stride = dtypes[iop]->elsize; + // TODO: transfersize is too large for reductions if (transferinfo[iop].clear.func( NULL, transferinfo[iop].clear.descr, buffer, transfersize, buf_stride, transferinfo[iop].clear.auxdata) < 0) { @@ -2082,6 +2067,9 @@ npyiter_copy_from_buffers(NpyIter *iter) * This gets called after the iterator has been positioned to a multi-index * for the start of a buffer. It decides which operands need a buffer, * and copies the data into the buffers. + * + * If passed, this function expects `prev_dataptrs` to be `NIT_USERPTRS` + * (they are reset after querying `prev_dataptrs`). */ NPY_NO_EXPORT int npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs) @@ -2092,510 +2080,170 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs) npyiter_opitflags *op_itflags = NIT_OPITFLAGS(iter); NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter); - NpyIter_AxisData *axisdata = NIT_AXISDATA(iter), - *reduce_outeraxisdata = NULL; + NpyIter_AxisData *axisdata = NIT_AXISDATA(iter); + NpyIter_AxisData *outer_axisdata = NULL; - PyArray_Descr **dtypes = NIT_DTYPES(iter); PyArrayObject **operands = NIT_OPERANDS(iter); - npy_intp *strides = NBF_STRIDES(bufferdata), - *ad_strides = NAD_STRIDES(axisdata); + npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - char **ptrs = NBF_PTRS(bufferdata), **ad_ptrs = NAD_PTRS(axisdata); + char **user_ptrs = NIT_USERPTRS(iter), **dataptrs = NIT_DATAPTRS(iter); char **buffers = NBF_BUFFERS(bufferdata); - npy_intp iterindex, iterend, transfersize, - singlestridesize, reduce_innersize = 0, reduce_outerdim = 0; - int is_onestride = 0, any_buffered = 0; - - npy_intp *reduce_outerstrides = NULL; - char **reduce_outerptrs = NULL; - - /* - * Have to get this flag before npyiter_checkreducesize sets - * it for the next iteration. - */ - npy_bool reuse_reduce_loops = (prev_dataptrs != NULL) && - ((itflags&NPY_ITFLAG_REUSE_REDUCE_LOOPS) != 0); + npy_intp iterindex, iterend, transfersize; npy_intp axisdata_incr = NIT_AXISDATA_SIZEOF(itflags, ndim, nop) / NPY_SIZEOF_INTP; - NPY_IT_DBG_PRINT("Iterator: Copying inputs to buffers\n"); - - /* Calculate the size if using any buffers */ + /* Fetch the maximum size we may wish to copy (or use if unbuffered) */ iterindex = NIT_ITERINDEX(iter); iterend = NIT_ITEREND(iter); transfersize = NBF_BUFFERSIZE(bufferdata); - if (transfersize > iterend - iterindex) { - transfersize = iterend - iterindex; - } - - /* If last time around, the reduce loop structure was full, we reuse it */ - if (reuse_reduce_loops) { - npy_intp full_transfersize, prev_reduce_outersize; + outer_axisdata = NIT_INDEX_AXISDATA(axisdata, bufferdata->outerdim); + npy_intp remaining_outersize = ( + outer_axisdata->shape - outer_axisdata->index); - prev_reduce_outersize = NBF_REDUCE_OUTERSIZE(bufferdata); - reduce_outerstrides = NBF_REDUCE_OUTERSTRIDES(bufferdata); - reduce_outerptrs = NBF_REDUCE_OUTERPTRS(bufferdata); - reduce_outerdim = NBF_REDUCE_OUTERDIM(bufferdata); - reduce_outeraxisdata = NIT_INDEX_AXISDATA(axisdata, reduce_outerdim); - reduce_innersize = NBF_SIZE(bufferdata); - NBF_REDUCE_POS(bufferdata) = 0; - /* - * Try to do make the outersize as big as possible. This allows - * it to shrink when processing the last bit of the outer reduce loop, - * then grow again at the beginning of the next outer reduce loop. - */ - NBF_REDUCE_OUTERSIZE(bufferdata) = (NAD_SHAPE(reduce_outeraxisdata)- - NAD_INDEX(reduce_outeraxisdata)); - full_transfersize = NBF_REDUCE_OUTERSIZE(bufferdata)*reduce_innersize; - /* If the full transfer size doesn't fit in the buffer, truncate it */ - if (full_transfersize > NBF_BUFFERSIZE(bufferdata)) { - NBF_REDUCE_OUTERSIZE(bufferdata) = transfersize/reduce_innersize; - transfersize = NBF_REDUCE_OUTERSIZE(bufferdata)*reduce_innersize; - } - else { - transfersize = full_transfersize; - } - if (prev_reduce_outersize < NBF_REDUCE_OUTERSIZE(bufferdata)) { - /* - * If the previous time around less data was copied it may not - * be safe to reuse the buffers even if the pointers match. - */ - reuse_reduce_loops = 0; - } - NBF_BUFITEREND(bufferdata) = iterindex + reduce_innersize; + NPY_IT_DBG_PRINT("Iterator: Copying inputs to buffers\n"); + NPY_IT_DBG_PRINT(" Max transfersize=%zd, coresize=%zd\n", + transfersize, bufferdata->coresize); - NPY_IT_DBG_PRINT3("Reused reduce transfersize: %d innersize: %d " - "itersize: %d\n", - (int)transfersize, - (int)reduce_innersize, - (int)NpyIter_GetIterSize(iter)); - NPY_IT_DBG_PRINT1("Reduced reduce outersize: %d", - (int)NBF_REDUCE_OUTERSIZE(bufferdata)); - } /* - * If there are any reduction operands, we may have to make - * the size smaller so we don't copy the same value into - * a buffer twice, as the buffering does not have a mechanism - * to combine values itself. + * If there is a coreoffset just copy to the end of a single coresize + * NB: Also if the size is shrunk, we definitely won't set buffer re-use. */ - else if (itflags&NPY_ITFLAG_REDUCE) { - NPY_IT_DBG_PRINT("Iterator: Calculating reduce loops\n"); - transfersize = npyiter_checkreducesize(iter, transfersize, - &reduce_innersize, - &reduce_outerdim); - NPY_IT_DBG_PRINT3("Reduce transfersize: %d innersize: %d " - "itersize: %d\n", - (int)transfersize, - (int)reduce_innersize, - (int)NpyIter_GetIterSize(iter)); - - reduce_outerstrides = NBF_REDUCE_OUTERSTRIDES(bufferdata); - reduce_outerptrs = NBF_REDUCE_OUTERPTRS(bufferdata); - reduce_outeraxisdata = NIT_INDEX_AXISDATA(axisdata, reduce_outerdim); - NBF_SIZE(bufferdata) = reduce_innersize; - NBF_REDUCE_POS(bufferdata) = 0; - NBF_REDUCE_OUTERDIM(bufferdata) = reduce_outerdim; - NBF_BUFITEREND(bufferdata) = iterindex + reduce_innersize; - if (reduce_innersize == 0) { - NBF_REDUCE_OUTERSIZE(bufferdata) = 0; - return 0; - } - else { - NBF_REDUCE_OUTERSIZE(bufferdata) = transfersize/reduce_innersize; - } + if (bufferdata->coreoffset) { + prev_dataptrs = NULL; /* No way we can re-use the buffers safely. */ + transfersize = bufferdata->coresize - bufferdata->coreoffset; + NPY_IT_DBG_PRINT(" Shrunk transfersize due to coreoffset=%zd: %zd\n", + bufferdata->coreoffset, transfersize); } - else { - NBF_SIZE(bufferdata) = transfersize; - NBF_BUFITEREND(bufferdata) = iterindex + transfersize; + else if (transfersize > bufferdata->coresize * remaining_outersize) { + /* + * Shrink transfersize to not go beyond outer axis size. If not + * a reduction, it is unclear that this is necessary. + */ + transfersize = bufferdata->coresize * remaining_outersize; + NPY_IT_DBG_PRINT(" Shrunk transfersize outer size: %zd\n", transfersize); + } + + /* And ensure that we don't go beyond the iterator end (if ranged) */ + if (transfersize > iterend - iterindex) { + transfersize = iterend - iterindex; + NPY_IT_DBG_PRINT(" Shrunk transfersize to itersize: %zd\n", transfersize); } - /* Calculate the maximum size if using a single stride and no buffers */ - singlestridesize = NAD_SHAPE(axisdata)-NAD_INDEX(axisdata); - if (singlestridesize > iterend - iterindex) { - singlestridesize = iterend - iterindex; + bufferdata->size = transfersize; + NBF_BUFITEREND(bufferdata) = iterindex + transfersize; + + if (transfersize == 0) { + return 0; } - if (singlestridesize >= transfersize) { - is_onestride = 1; + + NPY_IT_DBG_PRINT("Iterator: Buffer transfersize=%zd\n", transfersize); + + if (itflags & NPY_ITFLAG_REDUCE) { + NBF_REDUCE_OUTERSIZE(bufferdata) = transfersize / bufferdata->coresize; + if (NBF_REDUCE_OUTERSIZE(bufferdata) > 1) { + /* WARNING: bufferdata->size does not include reduce-outersize */ + bufferdata->size = bufferdata->coresize; + NBF_BUFITEREND(bufferdata) = iterindex + bufferdata->coresize; + } + NBF_REDUCE_POS(bufferdata) = 0; } NpyIter_TransferInfo *transferinfo = NBF_TRANSFERINFO(bufferdata); for (iop = 0; iop < nop; ++iop) { + NPY_IT_DBG_PRINT("Iterator: buffer prep for op=%d @ %p inner-stride=%zd\n", + iop, dataptrs[iop], NBF_STRIDES(bufferdata)[iop]); - switch (op_itflags[iop]& - (NPY_OP_ITFLAG_BUFNEVER| - NPY_OP_ITFLAG_CAST| - NPY_OP_ITFLAG_REDUCE)) { - /* Never need to buffer this operand */ - case NPY_OP_ITFLAG_BUFNEVER: - ptrs[iop] = ad_ptrs[iop]; - if (itflags&NPY_ITFLAG_REDUCE) { - reduce_outerstrides[iop] = reduce_innersize * - strides[iop]; - reduce_outerptrs[iop] = ptrs[iop]; - } - /* - * Should not adjust the stride - ad_strides[iop] - * could be zero, but strides[iop] was initialized - * to the first non-trivial stride. - */ - /* The flag NPY_OP_ITFLAG_USINGBUFFER can be ignored here */ - assert(!(op_itflags[iop] & NPY_OP_ITFLAG_USINGBUFFER)); - break; - /* Never need to buffer this operand */ - case NPY_OP_ITFLAG_BUFNEVER|NPY_OP_ITFLAG_REDUCE: - ptrs[iop] = ad_ptrs[iop]; - reduce_outerptrs[iop] = ptrs[iop]; - reduce_outerstrides[iop] = 0; - /* - * Should not adjust the stride - ad_strides[iop] - * could be zero, but strides[iop] was initialized - * to the first non-trivial stride. - */ - /* The flag NPY_OP_ITFLAG_USINGBUFFER can be ignored here */ - assert(!(op_itflags[iop] & NPY_OP_ITFLAG_USINGBUFFER)); - break; - /* Just a copy */ - case 0: - /* Do not reuse buffer if it did not exist */ - if (!(op_itflags[iop] & NPY_OP_ITFLAG_USINGBUFFER) && - (prev_dataptrs != NULL)) { - prev_dataptrs[iop] = NULL; - } - /* - * No copyswap or cast was requested, so all we're - * doing is copying the data to fill the buffer and - * produce a single stride. If the underlying data - * already does that, no need to copy it. - */ - if (is_onestride) { - ptrs[iop] = ad_ptrs[iop]; - strides[iop] = ad_strides[iop]; - /* Signal that the buffer is not being used */ - op_itflags[iop] &= (~NPY_OP_ITFLAG_USINGBUFFER); - } - /* If some other op is reduced, we have a double reduce loop */ - else if ((itflags&NPY_ITFLAG_REDUCE) && - (reduce_outerdim == 1) && - (transfersize/reduce_innersize <= - NAD_SHAPE(reduce_outeraxisdata) - - NAD_INDEX(reduce_outeraxisdata))) { - ptrs[iop] = ad_ptrs[iop]; - reduce_outerptrs[iop] = ptrs[iop]; - strides[iop] = ad_strides[iop]; - reduce_outerstrides[iop] = - NAD_STRIDES(reduce_outeraxisdata)[iop]; - /* Signal that the buffer is not being used */ - op_itflags[iop] &= (~NPY_OP_ITFLAG_USINGBUFFER); - } - else { - /* In this case, the buffer is being used */ - ptrs[iop] = buffers[iop]; - strides[iop] = dtypes[iop]->elsize; - if (itflags&NPY_ITFLAG_REDUCE) { - reduce_outerstrides[iop] = reduce_innersize * - strides[iop]; - reduce_outerptrs[iop] = ptrs[iop]; - } - /* Signal that the buffer is being used */ - op_itflags[iop] |= NPY_OP_ITFLAG_USINGBUFFER; - } - break; - /* Just a copy, but with a reduction */ - case NPY_OP_ITFLAG_REDUCE: - /* Do not reuse buffer if it did not exist */ - if (!(op_itflags[iop] & NPY_OP_ITFLAG_USINGBUFFER) && - (prev_dataptrs != NULL)) { - prev_dataptrs[iop] = NULL; - } - if (ad_strides[iop] == 0) { - strides[iop] = 0; - /* It's all in one stride in the inner loop dimension */ - if (is_onestride) { - NPY_IT_DBG_PRINT1("reduce op %d all one stride\n", (int)iop); - ptrs[iop] = ad_ptrs[iop]; - reduce_outerstrides[iop] = 0; - /* Signal that the buffer is not being used */ - op_itflags[iop] &= (~NPY_OP_ITFLAG_USINGBUFFER); - } - /* It's all in one stride in the reduce outer loop */ - else if ((reduce_outerdim > 0) && - (transfersize/reduce_innersize <= - NAD_SHAPE(reduce_outeraxisdata) - - NAD_INDEX(reduce_outeraxisdata))) { - NPY_IT_DBG_PRINT1("reduce op %d all one outer stride\n", - (int)iop); - ptrs[iop] = ad_ptrs[iop]; - /* Outer reduce loop advances by one item */ - reduce_outerstrides[iop] = - NAD_STRIDES(reduce_outeraxisdata)[iop]; - /* Signal that the buffer is not being used */ - op_itflags[iop] &= (~NPY_OP_ITFLAG_USINGBUFFER); - } - /* In this case, the buffer is being used */ - else { - NPY_IT_DBG_PRINT1("reduce op %d must buffer\n", (int)iop); - ptrs[iop] = buffers[iop]; - /* Both outer and inner reduce loops have stride 0 */ - if (NAD_STRIDES(reduce_outeraxisdata)[iop] == 0) { - reduce_outerstrides[iop] = 0; - } - /* Outer reduce loop advances by one item */ - else { - reduce_outerstrides[iop] = dtypes[iop]->elsize; - } - /* Signal that the buffer is being used */ - op_itflags[iop] |= NPY_OP_ITFLAG_USINGBUFFER; - } - - } - else if (is_onestride) { - NPY_IT_DBG_PRINT1("reduce op %d all one stride in dim 0\n", (int)iop); - ptrs[iop] = ad_ptrs[iop]; - strides[iop] = ad_strides[iop]; - reduce_outerstrides[iop] = 0; - /* Signal that the buffer is not being used */ - op_itflags[iop] &= (~NPY_OP_ITFLAG_USINGBUFFER); - } - else { - /* It's all in one stride in the reduce outer loop */ - if ((reduce_outerdim == 1) && - (transfersize/reduce_innersize <= - NAD_SHAPE(reduce_outeraxisdata) - - NAD_INDEX(reduce_outeraxisdata))) { - ptrs[iop] = ad_ptrs[iop]; - strides[iop] = ad_strides[iop]; - /* Outer reduce loop advances by one item */ - reduce_outerstrides[iop] = - NAD_STRIDES(reduce_outeraxisdata)[iop]; - /* Signal that the buffer is not being used */ - op_itflags[iop] &= (~NPY_OP_ITFLAG_USINGBUFFER); - } - /* In this case, the buffer is being used */ - else { - ptrs[iop] = buffers[iop]; - strides[iop] = dtypes[iop]->elsize; - - if (NAD_STRIDES(reduce_outeraxisdata)[iop] == 0) { - /* Reduction in outer reduce loop */ - reduce_outerstrides[iop] = 0; - } - else { - /* Advance to next items in outer reduce loop */ - reduce_outerstrides[iop] = reduce_innersize * - dtypes[iop]->elsize; - } - /* Signal that the buffer is being used */ - op_itflags[iop] |= NPY_OP_ITFLAG_USINGBUFFER; - } - } - reduce_outerptrs[iop] = ptrs[iop]; - break; - default: - /* In this case, the buffer is always being used */ - any_buffered = 1; - - /* Signal that the buffer is being used */ - op_itflags[iop] |= NPY_OP_ITFLAG_USINGBUFFER; - - if (!(op_itflags[iop]&NPY_OP_ITFLAG_REDUCE)) { - ptrs[iop] = buffers[iop]; - strides[iop] = dtypes[iop]->elsize; - if (itflags&NPY_ITFLAG_REDUCE) { - reduce_outerstrides[iop] = reduce_innersize * - strides[iop]; - reduce_outerptrs[iop] = ptrs[iop]; - } - } - /* The buffer is being used with reduction */ - else { - ptrs[iop] = buffers[iop]; - if (ad_strides[iop] == 0) { - NPY_IT_DBG_PRINT1("cast op %d has innermost stride 0\n", (int)iop); - strides[iop] = 0; - /* Both outer and inner reduce loops have stride 0 */ - if (NAD_STRIDES(reduce_outeraxisdata)[iop] == 0) { - NPY_IT_DBG_PRINT1("cast op %d has outermost stride 0\n", (int)iop); - reduce_outerstrides[iop] = 0; - } - /* Outer reduce loop advances by one item */ - else { - NPY_IT_DBG_PRINT1("cast op %d has outermost stride !=0\n", (int)iop); - reduce_outerstrides[iop] = dtypes[iop]->elsize; - } - } - else { - NPY_IT_DBG_PRINT1("cast op %d has innermost stride !=0\n", (int)iop); - strides[iop] = dtypes[iop]->elsize; - - if (NAD_STRIDES(reduce_outeraxisdata)[iop] == 0) { - NPY_IT_DBG_PRINT1("cast op %d has outermost stride 0\n", (int)iop); - /* Reduction in outer reduce loop */ - reduce_outerstrides[iop] = 0; - } - else { - NPY_IT_DBG_PRINT1("cast op %d has outermost stride !=0\n", (int)iop); - /* Advance to next items in outer reduce loop */ - reduce_outerstrides[iop] = reduce_innersize * - dtypes[iop]->elsize; - } - } - reduce_outerptrs[iop] = ptrs[iop]; - } - break; + if (op_itflags[iop]&NPY_OP_ITFLAG_BUFNEVER) { + user_ptrs[iop] = dataptrs[iop]; + NBF_REDUCE_OUTERPTRS(bufferdata)[iop] = dataptrs[iop]; + NPY_IT_DBG_PRINT(" unbuffered op (skipping)\n"); + continue; } /* - * If OP_ITFLAG_USINGBUFFER is enabled and the read func is not NULL, - * the buffer needs to be read. + * We may be able to reuse buffers if the pointer is unchanged and + * there is no coreoffset (which sets `prev_dataptrs = NULL` above). + * We re-use `user_ptrs` for `prev_dataptrs` to simplify `iternext()`. */ - if (op_itflags[iop] & NPY_OP_ITFLAG_USINGBUFFER && - transferinfo[iop].read.func != NULL) { - npy_intp src_itemsize; - npy_intp op_transfersize; - - npy_intp dst_stride, *src_strides, *src_coords, *src_shape; - int ndim_transfer; + assert(prev_dataptrs == NULL || prev_dataptrs == user_ptrs); + int unchanged_ptr_and_no_offset = ( + prev_dataptrs != NULL && prev_dataptrs[iop] == dataptrs[iop]); - npy_bool skip_transfer = 0; + user_ptrs[iop] = buffers[iop]; + NBF_REDUCE_OUTERPTRS(bufferdata)[iop] = buffers[iop]; - src_itemsize = PyArray_DTYPE(operands[iop])->elsize; - - /* If we reach here, buffering is required */ - any_buffered = 1; + if (!(op_itflags[iop]&NPY_OP_ITFLAG_READ)) { + NPY_IT_DBG_PRINT(" non-reading op (skipping)\n"); + continue; + } + if (unchanged_ptr_and_no_offset && op_itflags[iop]&NPY_OP_ITFLAG_BUF_REUSABLE) { + NPY_IT_DBG_PRINT2("Iterator: skipping operands %d " + "copy (%d items) because the data pointer didn't change\n", + (int)iop, (int)transfersize); + continue; + } + else if (transfersize == NBF_BUFFERSIZE(bufferdata) + || (transfersize >= NBF_CORESIZE(bufferdata) + && op_itflags[iop]&NPY_OP_ITFLAG_REDUCE + && NAD_STRIDES(outer_axisdata)[iop] == 0)) { /* - * If this operand is being reduced in the inner loop, - * set its buffering stride to zero, and just copy - * one element. + * If we have a full copy or a reduce with 0 stride outer and + * a copy larger than the coresize, this is now re-usable. + * NB: With a core-offset, we always copy less than the core-size. */ - if (op_itflags[iop]&NPY_OP_ITFLAG_REDUCE) { - if (ad_strides[iop] == 0) { - strides[iop] = 0; - if (reduce_outerstrides[iop] == 0) { - op_transfersize = 1; - dst_stride = 0; - src_strides = &dst_stride; - src_coords = &NAD_INDEX(reduce_outeraxisdata); - src_shape = &NAD_SHAPE(reduce_outeraxisdata); - ndim_transfer = 1; - - /* - * When we're reducing a single element, and - * it's still the same element, don't overwrite - * it even when reuse reduce loops is unset. - * This preserves the precision of the - * intermediate calculation. - */ - if (prev_dataptrs && - prev_dataptrs[iop] == ad_ptrs[iop]) { - NPY_IT_DBG_PRINT1("Iterator: skipping operand %d" - " copy because it's a 1-element reduce\n", - (int)iop); - - skip_transfer = 1; - } - } - else { - op_transfersize = NBF_REDUCE_OUTERSIZE(bufferdata); - dst_stride = reduce_outerstrides[iop]; - src_strides = &NAD_STRIDES(reduce_outeraxisdata)[iop]; - src_coords = &NAD_INDEX(reduce_outeraxisdata); - src_shape = &NAD_SHAPE(reduce_outeraxisdata); - ndim_transfer = ndim - reduce_outerdim; - } - } - else { - if (reduce_outerstrides[iop] == 0) { - op_transfersize = NBF_SIZE(bufferdata); - dst_stride = strides[iop]; - src_strides = &ad_strides[iop]; - src_coords = &NAD_INDEX(axisdata); - src_shape = &NAD_SHAPE(axisdata); - ndim_transfer = reduce_outerdim ? reduce_outerdim : 1; - } - else { - op_transfersize = transfersize; - dst_stride = strides[iop]; - src_strides = &ad_strides[iop]; - src_coords = &NAD_INDEX(axisdata); - src_shape = &NAD_SHAPE(axisdata); - ndim_transfer = ndim; - } - } - } - else { - op_transfersize = transfersize; - dst_stride = strides[iop]; - src_strides = &ad_strides[iop]; - src_coords = &NAD_INDEX(axisdata); - src_shape = &NAD_SHAPE(axisdata); - ndim_transfer = ndim; - } + NPY_IT_DBG_PRINT(" marking operand %d for buffer reuse\n", iop); + NIT_OPITFLAGS(iter)[iop] |= NPY_OP_ITFLAG_BUF_REUSABLE; + } + else { + NPY_IT_DBG_PRINT(" marking operand %d as not reusable\n", iop); + NIT_OPITFLAGS(iter)[iop] &= ~NPY_OP_ITFLAG_BUF_REUSABLE; + } - /* - * If the whole buffered loop structure remains the same, - * and the source pointer for this data didn't change, - * we don't have to copy the data again. - */ - if (reuse_reduce_loops && prev_dataptrs[iop] == ad_ptrs[iop]) { - NPY_IT_DBG_PRINT2("Iterator: skipping operands %d " - "copy (%d items) because loops are reused and the data " - "pointer didn't change\n", - (int)iop, (int)op_transfersize); - skip_transfer = 1; - } + npy_intp zero = 0; /* used as coord for 1-D copies */ + int ndim_transfer; + npy_intp op_transfersize; + npy_intp dst_stride; + npy_intp *src_strides; + npy_intp *src_coords = &zero; + npy_intp *src_shape; + npy_intp src_itemsize = PyArray_DTYPE(operands[iop])->elsize; - /* - * Copy data to the buffers if necessary. - * - * We always copy if the operand has references. In that case - * a "write" function must be in use that either copies or clears - * the buffer. - * This write from buffer call does not check for skip-transfer - * so we have to assume the buffer is cleared. For dtypes that - * do not have references, we can assume that the write function - * will leave the source (buffer) unmodified. - */ - if (!skip_transfer || PyDataType_REFCHK(dtypes[iop])) { - NPY_IT_DBG_PRINT2("Iterator: Copying operand %d to " - "buffer (%d items)\n", - (int)iop, (int)op_transfersize); - - if (PyArray_TransferNDimToStrided( - ndim_transfer, ptrs[iop], dst_stride, - ad_ptrs[iop], src_strides, axisdata_incr, - src_coords, axisdata_incr, - src_shape, axisdata_incr, - op_transfersize, src_itemsize, - &transferinfo[iop].read) < 0) { - return -1; - } - } - } - } + npyiter_fill_buffercopy_params(nop, iop, ndim, op_itflags[iop], + transfersize, bufferdata, axisdata, outer_axisdata, + &ndim_transfer, &op_transfersize, &dst_stride, + &src_strides, &src_shape, &src_coords); - /* - * If buffering wasn't needed, we can grow the inner - * loop to as large as possible. - * - * TODO: Could grow REDUCE loop too with some more logic above. - */ - if (!any_buffered && (itflags&NPY_ITFLAG_GROWINNER) && - !(itflags&NPY_ITFLAG_REDUCE)) { - if (singlestridesize > transfersize) { - NPY_IT_DBG_PRINT2("Iterator: Expanding inner loop size " - "from %d to %d since buffering wasn't needed\n", - (int)NBF_SIZE(bufferdata), (int)singlestridesize); - NBF_SIZE(bufferdata) = singlestridesize; - NBF_BUFITEREND(bufferdata) = iterindex + singlestridesize; + /* + * Copy data to the buffers if necessary. + * + * We always copy if the operand has references. In that case + * a "write" function must be in use that either copies or clears + * the buffer. + * This write from buffer call does not check for skip-transfer + * so we have to assume the buffer is cleared. For dtypes that + * do not have references, we can assume that the write function + * will leave the source (buffer) unmodified. + */ + NPY_IT_DBG_PRINT( + "Iterator: Copying operand %d to buffer (%zd items):\n" + " transfer ndim: %d, inner stride: %zd, inner shape: %zd, buffer stride: %zd\n", + iop, op_transfersize, ndim_transfer, src_strides[0], src_shape[0], dst_stride); + + if (PyArray_TransferNDimToStrided( + ndim_transfer, buffers[iop], dst_stride, + dataptrs[iop], src_strides, axisdata_incr, + src_coords, axisdata_incr, + src_shape, axisdata_incr, + op_transfersize, src_itemsize, + &transferinfo[iop].read) < 0) { + return -1; } } - NPY_IT_DBG_PRINT1("Any buffering needed: %d\n", any_buffered); - NPY_IT_DBG_PRINT1("Iterator: Finished copying inputs to buffers " - "(buffered size is %d)\n", (int)NBF_SIZE(bufferdata)); + "(buffered size is %zd)\n", transfersize); return 0; } @@ -2633,13 +2281,16 @@ npyiter_clear_buffers(NpyIter *iter) PyArray_Descr **dtypes = NIT_DTYPES(iter); npyiter_opitflags *op_itflags = NIT_OPITFLAGS(iter); for (int iop = 0; iop < nop; ++iop, ++buffers) { - if (transferinfo[iop].clear.func == NULL || - !(op_itflags[iop]&NPY_OP_ITFLAG_USINGBUFFER)) { + if (transferinfo[iop].clear.func == NULL) { continue; } if (*buffers == 0) { continue; } + assert(!(op_itflags[iop]&NPY_OP_ITFLAG_BUFNEVER)); + /* Buffer cannot be re-used (not that we should ever try!) */ + op_itflags[iop] &= ~NPY_OP_ITFLAG_BUF_REUSABLE; + int itemsize = dtypes[iop]->elsize; if (transferinfo[iop].clear.func(NULL, dtypes[iop], *buffers, NBF_SIZE(bufferdata), itemsize, @@ -2654,236 +2305,6 @@ npyiter_clear_buffers(NpyIter *iter) } -/* - * This checks how much space can be buffered without encountering the - * same value twice, or for operands whose innermost stride is zero, - * without encountering a different value. By reducing the buffered - * amount to this size, reductions can be safely buffered. - * - * Reductions are buffered with two levels of looping, to avoid - * frequent copying to the buffers. The return value is the over-all - * buffer size, and when the flag NPY_ITFLAG_REDUCE is set, reduce_innersize - * receives the size of the inner of the two levels of looping. - * - * The value placed in reduce_outerdim is the index into the AXISDATA - * for where the second level of the double loop begins. - * - * The return value is always a multiple of the value placed in - * reduce_innersize. - */ -static npy_intp -npyiter_checkreducesize(NpyIter *iter, npy_intp count, - npy_intp *reduce_innersize, - npy_intp *reduce_outerdim) -{ - npy_uint32 itflags = NIT_ITFLAGS(iter); - int idim, ndim = NIT_NDIM(iter); - int iop, nop = NIT_NOP(iter); - - NpyIter_AxisData *axisdata; - npy_intp sizeof_axisdata; - npy_intp coord, shape, *strides; - npy_intp reducespace = 1, factor; - npy_bool nonzerocoord; - - npyiter_opitflags *op_itflags = NIT_OPITFLAGS(iter); - char stride0op[NPY_MAXARGS]; - - /* Default to no outer axis */ - *reduce_outerdim = 0; - - /* If there's only one dimension, no need to calculate anything */ - if (ndim == 1 || count == 0) { - *reduce_innersize = count; - return count; - } - - sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - axisdata = NIT_AXISDATA(iter); - - /* Indicate which REDUCE operands have stride 0 in the inner loop */ - strides = NAD_STRIDES(axisdata); - for (iop = 0; iop < nop; ++iop) { - stride0op[iop] = (op_itflags[iop]&NPY_OP_ITFLAG_REDUCE) && - (strides[iop] == 0); - NPY_IT_DBG_PRINT2("Iterator: Operand %d has stride 0 in " - "the inner loop? %d\n", iop, (int)stride0op[iop]); - } - shape = NAD_SHAPE(axisdata); - coord = NAD_INDEX(axisdata); - reducespace += (shape-coord-1); - factor = shape; - NIT_ADVANCE_AXISDATA(axisdata, 1); - - /* Initialize nonzerocoord based on the first coordinate */ - nonzerocoord = (coord != 0); - - /* Go forward through axisdata, calculating the space available */ - for (idim = 1; idim < ndim && reducespace < count; - ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) { - NPY_IT_DBG_PRINT2("Iterator: inner loop reducespace %d, count %d\n", - (int)reducespace, (int)count); - - strides = NAD_STRIDES(axisdata); - for (iop = 0; iop < nop; ++iop) { - /* - * If a reduce stride switched from zero to non-zero, or - * vice versa, that's the point where the data will stop - * being the same element or will repeat, and if the - * buffer starts with an all zero multi-index up to this - * point, gives us the reduce_innersize. - */ - if((stride0op[iop] && (strides[iop] != 0)) || - (!stride0op[iop] && - (strides[iop] == 0) && - (op_itflags[iop]&NPY_OP_ITFLAG_REDUCE))) { - NPY_IT_DBG_PRINT1("Iterator: Reduce operation limits " - "buffer to %d\n", (int)reducespace); - /* - * If we already found more elements than count, or - * the starting coordinate wasn't zero, the two-level - * looping is unnecessary/can't be done, so return. - */ - if (count <= reducespace) { - *reduce_innersize = count; - NIT_ITFLAGS(iter) |= NPY_ITFLAG_REUSE_REDUCE_LOOPS; - return count; - } - else if (nonzerocoord) { - if (reducespace < count) { - count = reducespace; - } - *reduce_innersize = count; - /* NOTE: This is similar to the (coord != 0) case below. */ - NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_REUSE_REDUCE_LOOPS; - return count; - } - else { - *reduce_innersize = reducespace; - break; - } - } - } - /* If we broke out of the loop early, we found reduce_innersize */ - if (iop != nop) { - NPY_IT_DBG_PRINT2("Iterator: Found first dim not " - "reduce (%d of %d)\n", iop, nop); - break; - } - - shape = NAD_SHAPE(axisdata); - coord = NAD_INDEX(axisdata); - if (coord != 0) { - nonzerocoord = 1; - } - reducespace += (shape-coord-1) * factor; - factor *= shape; - } - - /* - * If there was any non-zero coordinate, the reduction inner - * loop doesn't fit in the buffersize, or the reduction inner loop - * covered the entire iteration size, can't do the double loop. - */ - if (nonzerocoord || count < reducespace || idim == ndim) { - if (reducespace < count) { - count = reducespace; - } - *reduce_innersize = count; - /* In this case, we can't reuse the reduce loops */ - NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_REUSE_REDUCE_LOOPS; - return count; - } - - coord = NAD_INDEX(axisdata); - if (coord != 0) { - /* - * In this case, it is only safe to reuse the buffer if the amount - * of data copied is not more than the current axes, as is the - * case when reuse_reduce_loops was active already. - * It should be in principle OK when the idim loop returns immediately. - */ - NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_REUSE_REDUCE_LOOPS; - } - else { - /* In this case, we can reuse the reduce loops */ - NIT_ITFLAGS(iter) |= NPY_ITFLAG_REUSE_REDUCE_LOOPS; - } - - *reduce_innersize = reducespace; - count /= reducespace; - - NPY_IT_DBG_PRINT2("Iterator: reduce_innersize %d count /ed %d\n", - (int)reducespace, (int)count); - - /* - * Continue through the rest of the dimensions. If there are - * two separated reduction axes, we may have to cut the buffer - * short again. - */ - *reduce_outerdim = idim; - reducespace = 1; - factor = 1; - /* Indicate which REDUCE operands have stride 0 at the current level */ - strides = NAD_STRIDES(axisdata); - for (iop = 0; iop < nop; ++iop) { - stride0op[iop] = (op_itflags[iop]&NPY_OP_ITFLAG_REDUCE) && - (strides[iop] == 0); - NPY_IT_DBG_PRINT2("Iterator: Operand %d has stride 0 in " - "the outer loop? %d\n", iop, (int)stride0op[iop]); - } - shape = NAD_SHAPE(axisdata); - reducespace += (shape-coord-1) * factor; - factor *= shape; - NIT_ADVANCE_AXISDATA(axisdata, 1); - ++idim; - - for (; idim < ndim && reducespace < count; - ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) { - NPY_IT_DBG_PRINT2("Iterator: outer loop reducespace %d, count %d\n", - (int)reducespace, (int)count); - strides = NAD_STRIDES(axisdata); - for (iop = 0; iop < nop; ++iop) { - /* - * If a reduce stride switched from zero to non-zero, or - * vice versa, that's the point where the data will stop - * being the same element or will repeat, and if the - * buffer starts with an all zero multi-index up to this - * point, gives us the reduce_innersize. - */ - if((stride0op[iop] && (strides[iop] != 0)) || - (!stride0op[iop] && - (strides[iop] == 0) && - (op_itflags[iop]&NPY_OP_ITFLAG_REDUCE))) { - NPY_IT_DBG_PRINT1("Iterator: Reduce operation limits " - "buffer to %d\n", (int)reducespace); - /* - * This terminates the outer level of our double loop. - */ - if (count <= reducespace) { - return count * (*reduce_innersize); - } - else { - return reducespace * (*reduce_innersize); - } - } - } - - shape = NAD_SHAPE(axisdata); - coord = NAD_INDEX(axisdata); - if (coord != 0) { - nonzerocoord = 1; - } - reducespace += (shape-coord-1) * factor; - factor *= shape; - } - - if (reducespace < count) { - count = reducespace; - } - return count * (*reduce_innersize); -} - NPY_NO_EXPORT npy_bool npyiter_has_writeback(NpyIter *iter) { diff --git a/numpy/_core/src/multiarray/nditer_constr.c b/numpy/_core/src/multiarray/nditer_constr.c index 427dd3d876bc..ffe37e80c9be 100644 --- a/numpy/_core/src/multiarray/nditer_constr.c +++ b/numpy/_core/src/multiarray/nditer_constr.c @@ -14,6 +14,7 @@ /* Allow this .c file to include nditer_impl.h */ #define NPY_ITERATOR_IMPLEMENTATION_CODE +#include "alloc.h" #include "nditer_impl.h" #include "arrayobject.h" #include "array_coercion.h" @@ -49,7 +50,7 @@ npyiter_prepare_operands(int nop, PyArray_Descr **op_dtype, npy_uint32 flags, npy_uint32 *op_flags, npyiter_opitflags *op_itflags, - npy_int8 *out_maskop); + int *out_maskop); static int npyiter_check_casting(int nop, PyArrayObject **op, PyArray_Descr **op_dtype, @@ -99,6 +100,8 @@ npyiter_get_priority_subtype(int nop, PyArrayObject **op, static int npyiter_allocate_transfer_functions(NpyIter *iter); +static int +npyiter_find_buffering_setup(NpyIter *iter, npy_intp buffersize); /*NUMPY_API * Allocate a new iterator for multiple array objects, and advanced @@ -155,13 +158,6 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags, NPY_IT_TIME_POINT(c_start); - if (nop > NPY_MAXARGS) { - PyErr_Format(PyExc_ValueError, - "Cannot construct an iterator with more than %d operands " - "(%d were requested)", NPY_MAXARGS, nop); - return NULL; - } - /* * Before 1.8, if `oa_ndim == 0`, this meant `op_axes != NULL` was an error. * With 1.8, `oa_ndim == -1` takes this role, while op_axes in that case @@ -239,7 +235,6 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags, bufferdata = NIT_BUFFERDATA(iter); NBF_SIZE(bufferdata) = 0; memset(NBF_BUFFERS(bufferdata), 0, nop*NPY_SIZEOF_INTP); - memset(NBF_PTRS(bufferdata), 0, nop*NPY_SIZEOF_INTP); /* Ensure that the transferdata/auxdata is NULLed */ memset(NBF_TRANSFERINFO(bufferdata), 0, nop * sizeof(NpyIter_TransferInfo)); } @@ -253,28 +248,6 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags, NPY_IT_TIME_POINT(c_fill_axisdata); - if (itflags & NPY_ITFLAG_BUFFER) { - /* - * If buffering is enabled and no buffersize was given, use a default - * chosen to be big enough to get some amortization benefits, but - * small enough to be cache-friendly. - */ - if (buffersize <= 0) { - buffersize = NPY_BUFSIZE; - } - /* No point in a buffer bigger than the iteration size */ - if (buffersize > NIT_ITERSIZE(iter)) { - buffersize = NIT_ITERSIZE(iter); - } - NBF_BUFFERSIZE(bufferdata) = buffersize; - - /* - * Initialize for use in FirstVisit, which may be called before - * the buffers are filled and the reduce pos is updated. - */ - NBF_REDUCE_POS(bufferdata) = 0; - } - /* * If an index was requested, compute the strides for it. * Note that we must do this before changing the order of the @@ -451,35 +424,25 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags, } } - /* - * If REFS_OK was specified, check whether there are any - * reference arrays and flag it if so. - * - * NOTE: This really should be unnecessary, but chances are someone relies - * on it. The iterator itself does not require the API here - * as it only does so for casting/buffering. But in almost all - * use-cases the API will be required for whatever operation is done. - */ - if (flags & NPY_ITER_REFS_OK) { - for (iop = 0; iop < nop; ++iop) { - PyArray_Descr *rdt = op_dtype[iop]; - if ((rdt->flags & (NPY_ITEM_REFCOUNT | - NPY_ITEM_IS_POINTER | - NPY_NEEDS_PYAPI)) != 0) { - /* Iteration needs API access */ - NIT_ITFLAGS(iter) |= NPY_ITFLAG_NEEDSAPI; - } + /* If buffering is set prepare it */ + if (itflags & NPY_ITFLAG_BUFFER) { + if (npyiter_find_buffering_setup(iter, buffersize) < 0) { + NpyIter_Deallocate(iter); + return NULL; } - } - /* If buffering is set without delayed allocation */ - if (itflags & NPY_ITFLAG_BUFFER) { + /* + * Initialize for use in FirstVisit, which may be called before + * the buffers are filled and the reduce pos is updated. + */ + NBF_REDUCE_POS(bufferdata) = 0; + if (!npyiter_allocate_transfer_functions(iter)) { NpyIter_Deallocate(iter); return NULL; } if (!(itflags & NPY_ITFLAG_DELAYBUF)) { - /* Allocate the buffers */ + /* Allocate the buffers if that is not delayed */ if (!npyiter_allocate_buffers(iter, NULL)) { NpyIter_Deallocate(iter); return NULL; @@ -492,6 +455,11 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags, } } } + else if (itflags&NPY_ITFLAG_EXLOOP) { + /* make sure to update the user pointers (when buffering, it does this). */ + assert(!(itflags & NPY_ITFLAG_HASINDEX)); + memcpy(NIT_USERPTRS(iter), NIT_DATAPTRS(iter), nop * sizeof(void *)); + } NPY_IT_TIME_POINT(c_prepare_buffers); @@ -1006,6 +974,10 @@ npyiter_check_per_op_flags(npy_uint32 op_flags, npyiter_opitflags *op_itflags) *op_itflags |= NPY_OP_ITFLAG_VIRTUAL; } + if (op_flags & NPY_ITER_CONTIG) { + *op_itflags |= NPY_OP_ITFLAG_CONTIG; + } + return 1; } @@ -1103,14 +1075,25 @@ npyiter_prepare_one_operand(PyArrayObject **op, return 0; } *op_dataptr = PyArray_BYTES(*op); - /* PyArray_DESCR does not give us a reference */ - *op_dtype = PyArray_DESCR(*op); - if (*op_dtype == NULL) { - PyErr_SetString(PyExc_ValueError, - "Iterator input operand has no dtype descr"); - return 0; + + /* + * Checking whether casts are valid is done later, once the + * final data types have been selected. For now, just store the + * requested type. + */ + if (op_request_dtype != NULL && op_request_dtype != PyArray_DESCR(*op)) { + /* We just have a borrowed reference to op_request_dtype */ + *op_dtype = PyArray_AdaptDescriptorToArray( + *op, NULL, op_request_dtype); + if (*op_dtype == NULL) { + return 0; + } + } + else { + *op_dtype = PyArray_DESCR(*op); + Py_INCREF(*op_dtype); } - Py_INCREF(*op_dtype); + /* * If references weren't specifically allowed, make sure there * are no references in the inputs or requested dtypes. @@ -1129,19 +1112,6 @@ npyiter_prepare_one_operand(PyArrayObject **op, return 0; } } - /* - * Checking whether casts are valid is done later, once the - * final data types have been selected. For now, just store the - * requested type. - */ - if (op_request_dtype != NULL) { - /* We just have a borrowed reference to op_request_dtype */ - Py_SETREF(*op_dtype, PyArray_AdaptDescriptorToArray( - *op, NULL, op_request_dtype)); - if (*op_dtype == NULL) { - return 0; - } - } /* Check if the operand is in the byte order requested */ if (op_flags & NPY_ITER_NBO) { @@ -1162,7 +1132,7 @@ npyiter_prepare_one_operand(PyArrayObject **op, /* Check if the operand is aligned */ if (op_flags & NPY_ITER_ALIGNED) { /* Check alignment */ - if (!IsAligned(*op)) { + if (!PyArray_ISALIGNED(*op)) { NPY_IT_DBG_PRINT("Iterator: Setting NPY_OP_ITFLAG_CAST " "because of NPY_ITER_ALIGNED\n"); *op_itflags |= NPY_OP_ITFLAG_CAST; @@ -1194,10 +1164,10 @@ npyiter_prepare_operands(int nop, PyArrayObject **op_in, PyArray_Descr **op_dtype, npy_uint32 flags, npy_uint32 *op_flags, npyiter_opitflags *op_itflags, - npy_int8 *out_maskop) + int *out_maskop) { int iop, i; - npy_int8 maskop = -1; + int maskop = -1; int any_writemasked_ops = 0; /* @@ -1315,8 +1285,10 @@ npyiter_check_casting(int nop, PyArrayObject **op, printf("\n"); #endif /* If the types aren't equivalent, a cast is necessary */ - if (op[iop] != NULL && !PyArray_EquivTypes(PyArray_DESCR(op[iop]), - op_dtype[iop])) { + npy_intp view_offset = NPY_MIN_INTP; + if (op[iop] != NULL && !(PyArray_SafeCast( + PyArray_DESCR(op[iop]), op_dtype[iop], &view_offset, + NPY_NO_CASTING, 1) && view_offset == 0)) { /* Check read (op -> temp) casting */ if ((op_itflags[iop] & NPY_OP_ITFLAG_READ) && !PyArray_CanCastArrayTo(op[iop], @@ -1594,11 +1566,11 @@ npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itf axisdata = NIT_AXISDATA(iter); sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); + memcpy(NIT_DATAPTRS(iter), op_dataptr, nop * sizeof(void *)); if (ndim == 0) { /* Need to fill the first axisdata, even if the iterator is 0-d */ NAD_SHAPE(axisdata) = 1; NAD_INDEX(axisdata) = 0; - memcpy(NAD_PTRS(axisdata), op_dataptr, NPY_SIZEOF_INTP*nop); memset(NAD_STRIDES(axisdata), 0, NPY_SIZEOF_INTP*nop); } @@ -1609,7 +1581,6 @@ npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itf NAD_SHAPE(axisdata) = bshape; NAD_INDEX(axisdata) = 0; - memcpy(NAD_PTRS(axisdata), op_dataptr, NPY_SIZEOF_INTP*nop); for (iop = 0; iop < nop; ++iop) { op_cur = op[iop]; @@ -1931,6 +1902,428 @@ operand_different_than_broadcast: { } } + +/* + * At this point we (presumably) use a buffered iterator and here we want + * to find out the best way to buffer the iterator in a fashion that we don't + * have to figure out a lot of things on every outer iteration. + * + * How do we iterate? + * ------------------ + * There are currently two modes of "buffered" iteration: + * 1. The normal mode, where we either buffer each operand or not and + * then do a 1-D loop on those buffers (or operands). + * 2. The "reduce" mode. In reduce mode (ITFLAG_REDUCE) we internally use a + * a double iteration where for "reduce" operands we have: + * - One outer iteration with stride == 0 and a core with at least one + * stride != 0 (all of them if this is a true reduce/writeable operand). + * - One outer iteration with stride != 0 and a core of all strides == 0. + * This setup allows filling the buffer with only the stride != 0 and then + * doing the double loop. + * An Example for these two cases is: + * arr = np.ones((100, 10, 10))[::2, :, :] + * arr.sum(-1) + * arr.sum(-2) + * Where the slice prevents the iterator from collapsing axes and the + * result has stride 0 either along the last or the second to last axis. + * In both cases we can buffer 10x10 elements in reduce mode. + * (This iteration needs no buffer, add a cast to ensure actual buffering.) + * + * Only a writeable (reduce) operand require this reduce mode because for + * reading it is OK if the buffer holds duplicated elements. + * The benefit of the reduce mode is that it allows for larger core sizes and + * buffers since the zero strides do not allow a single 1-d iteration. + * If we use reduce-mode, we can apply it also to read-only operands as an + * optimization. + * + * The function here finds the first "outer" dimension and it's "core" to use + * that works with reductions. + * While iterating, we will fill the buffers making sure that we: + * - Never buffer beyond the first outer dimension (optimize chance of re-use). + * - If the iterator is manually set to an offset into what is part of the + * core (see second example below), then we only fill the buffer to finish + * that one core. This re-aligns us with the core and is necessary for + * reductions. (Such manual setting should be rare or happens exactly once + * for splitting the iteration into worker chunks.) + * + * And examples for these two constraints: + * Given the iteration shape is (100, 10, 10) and the core size 10 with a + * buffer size of 60 (due to limits), making dimension 1 the "outer" one. + * The first iterations/buffers would then range (excluding end-point): + * - (0, 0, 0) -> (0, 6, 0) + * - (0, 6, 0) -> (1, 0, 0) # Buffer only holds 40 of 60 possible elements. + * - (1, 0, 0) -> (1, 6, 0) + * - ... + * If the user limits to a range starting from 75, we use: + * - (0, 7, 5) -> (0, 8, 0) # Only 5 elements to re-align with core. + * - (0, 8, 0) -> (1, 0, 0) + * - ... # continue as above + * + * This means that the data stored in the buffer has always the same structure + * (except when manually moved), which allows us to fill the buffer more simply + * and optimally in some cases, and makes it easier to determine whether buffer + * content is re-usable (e.g., because it represents broadcasted operands). + * + * Best buffer and core size + * ------------------------- + * To avoid having to figure out what to copy every time we fill buffers, + * we here want to find the outer iteration dimension such that: + * - Its core size is <= the maximum buffersize if buffering is needed; + * - Reductions are possible (with or without reduce mode); + * - Iteration overhead is minimized. We estimate the total overhead with + * the number "outer" iterations: + * + * N_o = full_iterator_size / min(core_size * outer_dim_size, buffersize) + * + * This is approximately how often `iternext()` is called when the user + * is using an external-loop and how often we would fill buffers. + * The total overhead is then estimated as: + * + * (1 + n_buffers) * N_o + * + * Since the iterator size is a constant, we can estimate the overhead as: + * + * (1 + n_buffers) / min(core_size * outer_dim_size, buffersize) + * + * And when comparing two options multiply by the others divisor/size to + * avoid the division. + * + * TODO: Probably should tweak or simplify? The formula is clearly not + * the actual cost (Buffers add a constant total cost as well). + * Right now, it mostly rejects growing the core size when we are already + * close to the maximum buffersize (even overhead wise not worth it). + * That may be good enough, but maybe it can be spelled simpler? + * + * In theory, the reduction could also span multiple axes if other operands + * are buffered. We do not try to discover this. + */ +static int +npyiter_find_buffering_setup(NpyIter *iter, npy_intp buffersize) +{ + int nop = iter->nop; + int ndim = iter->ndim; + npy_uint32 itflags = iter->itflags; + NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter); + + /* Per operand space; could also reuse an iterator field initialized later */ + NPY_ALLOC_WORKSPACE(dim_scratch_space, int, 10, 2 * nop); + if (dim_scratch_space == NULL) { + return -1; + } + /* + * We check two things here, first how many operand dimensions can be + * iterated using a single stride (all dimensions are consistent), + * and second, whether we found a reduce dimension for the operand. + * That is an outer dimension a reduce would have to take place on. + */ + int *op_single_stride_dims = dim_scratch_space; + int *op_reduce_outer_dim = dim_scratch_space + nop; + + npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); + NpyIter_AxisData *axisdata = NIT_AXISDATA(iter); + npyiter_opitflags *op_itflags = NIT_OPITFLAGS(iter); + + /* + * We can only continue as long as we are within the maximum allowed size. + * When no buffering is needed and GROWINNER is set, we don't have to + * worry about this maximum. + * + * If the user passed no buffersize, default to one small enough that it + * should be cache friendly and big enough to amortize overheads. + */ + npy_intp maximum_size = buffersize <= 0 ? NPY_BUFSIZE : buffersize; + + /* The cost factor defined by: (1 + n_buffered) */ + int cost = 1; + + for (int iop = 0; iop < nop; ++iop) { + op_single_stride_dims[iop] = 1; + op_reduce_outer_dim[iop] = 0; + if (op_itflags[iop] & NPY_OP_ITFLAG_CAST) { + cost += 1; + } + } + + /* + * Once a reduce operand reaches a ==0/!=0 stride flip, this dimension + * becomes the outer reduce dimension. + */ + int outer_reduce_dim = 0; + + npy_intp size = axisdata->shape; /* the current total size */ + + /* Note that there is always one axidata that we use (even with ndim =0) */ + int best_dim = 0; + int best_cost = cost; + /* The size of the "outer" iteration and all previous dimensions: */ + npy_intp best_size = size; + npy_intp best_coresize = 1; + + NPY_IT_DBG_PRINT("Iterator: discovering best core size\n"); + for (int idim = 1; idim < ndim; idim++) { + if (outer_reduce_dim) { + /* Cannot currently expand beyond reduce dim! */ + break; + } + if (size >= maximum_size && + (cost > 1 || !(itflags & NPY_ITFLAG_GROWINNER))) { + /* Exceeded buffer size, can only improve without buffers and growinner. */ + break; + } + + npy_intp *prev_strides = NAD_STRIDES(axisdata); + npy_intp prev_shape = NAD_SHAPE(axisdata); + NIT_ADVANCE_AXISDATA(axisdata, 1); + npy_intp *strides = NAD_STRIDES(axisdata); + + for (int iop = 0; iop < nop; iop++) { + /* + * Check that we set things up nicely so strides coalesc. Except + * for index operands, which currently disrupts coalescing. + * NOTE(seberg): presumably `npyiter_compute_index_strides` should + * not set the strides to 0, but this was safer for backporting. + */ + assert((axisdata->shape != 1) || (prev_strides[iop] == strides[iop]) + || (op_itflags[iop] & (NPY_ITER_C_INDEX|NPY_ITER_F_INDEX))); + + if (op_single_stride_dims[iop] == idim) { + /* Best case: the strides still collapse for this operand. */ + if (prev_strides[iop] * prev_shape == strides[iop]) { + op_single_stride_dims[iop] += 1; + continue; + } + + /* + * Operand now requires buffering (if it was not already). + * NOTE: This is technically not true since we may still use + * an outer reduce at this point. + * So it prefers a non-reduce setup, which seems not + * ideal, but OK. + */ + if (!(op_itflags[iop] & NPY_OP_ITFLAG_CAST)) { + cost += 1; + } + } + + /* + * If this operand is a reduction operand and the stride swapped + * between !=0 and ==0 then this is the `outer_reduce_dim` and + * we will never continue further (see break at start of op loop). + */ + if ((op_itflags[iop] & NPY_OP_ITFLAG_REDUCE) + && (strides[iop] == 0 || prev_strides[iop] == 0)) { + assert(outer_reduce_dim == 0 || outer_reduce_dim == idim); + op_reduce_outer_dim[iop] = idim; + outer_reduce_dim = idim; + } + /* For clarity: op_reduce_outer_dim[iop] if set always matches. */ + assert(!op_reduce_outer_dim[iop] || op_reduce_outer_dim[iop] == outer_reduce_dim); + } + + npy_intp coresize = size; /* if we iterate here, this is the core */ + size *= axisdata->shape; + if (size == 0) { + break; /* Avoid a zero coresize. */ + } + + double bufsize = (double)size; + if (size > maximum_size && + (cost > 1 || !(itflags & NPY_ITFLAG_GROWINNER))) { + /* If we need buffering, limit size in cost calculation. */ + bufsize = (double)maximum_size; + } + + NPY_IT_DBG_PRINT(" dim=%d, n_buffered=%d, cost=%g @bufsize=%g (prev scaled cost=%g)\n", + idim, cost - 1, cost * (double)best_size, bufsize, best_cost * bufsize); + + /* + * Compare cost (use double to avoid overflows), as explained above + * the cost is compared via the other buffersize. + */ + if (cost * (double)best_size <= best_cost * bufsize) { + /* This dimension is better! */ + best_cost = cost; + best_coresize = coresize; + best_size = size; + best_dim = idim; + } + } + + npy_bool using_reduce = outer_reduce_dim && (best_dim == outer_reduce_dim); + npy_bool iterator_must_buffer = 0; + + /* We found the best chunking store the information */ + assert(best_coresize != 0); + NIT_BUFFERDATA(iter)->coresize = best_coresize; + NIT_BUFFERDATA(iter)->outerdim = best_dim; + + /* + * We found the best dimensions to iterate on and now need to fill + * in all the buffer information related to the iteration. + * This includes filling in information about reduce outer dims + * (we do this even if it is not a reduce for simplicity). + */ + axisdata = NIT_AXISDATA(iter); + NpyIter_AxisData *reduce_axisdata = NIT_INDEX_AXISDATA(axisdata, outer_reduce_dim); + + NPY_IT_DBG_PRINT("Iterator: Found core size=%zd, outer=%zd at dim=%d:\n", + best_coresize, reduce_axisdata->shape, best_dim); + + /* If we are not using a reduce axes mark it and shrink. */ + if (using_reduce) { + assert(NIT_ITFLAGS(iter) & NPY_ITFLAG_REDUCE); + NPY_IT_DBG_PRINT(" using reduce logic\n"); + } + else { + NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_REDUCE; + NPY_IT_DBG_PRINT(" not using reduce logic\n"); + } + + for (int iop = 0; iop < nop; iop++) { + /* We need to fill in the following information */ + npy_bool is_reduce_op; + npy_bool op_is_buffered = (op_itflags[iop]&NPY_OP_ITFLAG_CAST) != 0; + + /* If contig was requested and this is not writeable avoid zero strides */ + npy_bool avoid_zero_strides = ( + (op_itflags[iop] & NPY_OP_ITFLAG_CONTIG) + && !(op_itflags[iop] & NPY_OP_ITFLAG_WRITE)); + + /* + * Figure out if this is iterated as a reduce op. Even one marked + * for reduction may not be iterated as one. + */ + if (!using_reduce) { + is_reduce_op = 0; + } + else if (op_reduce_outer_dim[iop] == best_dim) { + /* This op *must* use reduce semantics. */ + is_reduce_op = 1; + } + else if (op_single_stride_dims[iop] == best_dim && !op_is_buffered) { + /* + * Optimization: This operand is not buffered and we might as well + * iterate it as an unbuffered reduce operand. + */ + is_reduce_op = 1; + } + else if (NAD_STRIDES(reduce_axisdata)[iop] == 0 + && op_single_stride_dims[iop] <= best_dim + && !avoid_zero_strides) { + /* + * Optimization: If the outer (reduce) stride is 0 on the operand + * then we can iterate this in a reduce way: buffer the core only + * and repeat it in the "outer" dimension. + * If user requested contig, we may have to avoid 0 strides, this + * is incompatible with the reduce path. + */ + is_reduce_op = 1; + } + else { + is_reduce_op = 0; + } + + /* + * See if the operand is a single stride (if we use reduce logic) + * we don't need to worry about the outermost dimension. + * If it is not a single stride, we must buffer the operand. + */ + if (op_single_stride_dims[iop] + is_reduce_op > best_dim) { + NIT_OPITFLAGS(iter)[iop] |= NPY_OP_ITFLAG_BUF_SINGLESTRIDE; + } + else { + op_is_buffered = 1; + } + + npy_intp inner_stride; + npy_intp reduce_outer_stride; + if (op_is_buffered) { + npy_intp itemsize = NIT_DTYPES(iter)[iop]->elsize; + /* + * A buffered operand has a stride of itemsize unless we use + * reduce logic. In that case, either the inner or outer stride + * is 0. + */ + if (is_reduce_op) { + if (NAD_STRIDES(reduce_axisdata)[iop] == 0) { + inner_stride = itemsize; + reduce_outer_stride = 0; + } + else { + inner_stride = 0; + reduce_outer_stride = itemsize; + } + } + else { + if (NIT_OPITFLAGS(iter)[iop] & NPY_OP_ITFLAG_BUF_SINGLESTRIDE + && NAD_STRIDES(axisdata)[iop] == 0 + && !avoid_zero_strides) { + /* This op is always 0 strides, so even the buffer is that. */ + inner_stride = 0; + reduce_outer_stride = 0; + } + else { + /* normal buffered op */ + inner_stride = itemsize; + reduce_outer_stride = itemsize * best_coresize; + } + } + } + else { + inner_stride = NAD_STRIDES(axisdata)[iop]; + reduce_outer_stride = NAD_STRIDES(reduce_axisdata)[iop]; + } + + if (!using_reduce) { + /* invalidate for now, since we should not use it */ + reduce_outer_stride = NPY_MIN_INTP; + } + + NPY_IT_DBG_PRINT( + "Iterator: op=%d (buffered=%d, reduce=%d, single-stride=%d):\n" + " inner stride: %zd\n" + " reduce outer stride: %zd (if iterator uses reduce)\n", + iop, op_is_buffered, is_reduce_op, + (NIT_OPITFLAGS(iter)[iop] & NPY_OP_ITFLAG_BUF_SINGLESTRIDE) != 0, + inner_stride, reduce_outer_stride); + + NBF_STRIDES(bufferdata)[iop] = inner_stride; + NBF_REDUCE_OUTERSTRIDES(bufferdata)[iop] = reduce_outer_stride; + + /* The actual reduce usage may have changed! */ + if (is_reduce_op) { + NIT_OPITFLAGS(iter)[iop] |= NPY_OP_ITFLAG_REDUCE; + } + else { + NIT_OPITFLAGS(iter)[iop] &= ~NPY_OP_ITFLAG_REDUCE; + } + + if (!op_is_buffered) { + NIT_OPITFLAGS(iter)[iop] |= NPY_OP_ITFLAG_BUFNEVER; + } + else { + iterator_must_buffer = 1; + } + } + + /* + * If we buffer or do not have grow-inner, make sure that the size is + * below the maximum_size, but a multiple of the coresize. + */ + if (iterator_must_buffer || !(itflags & NPY_ITFLAG_GROWINNER)) { + if (maximum_size < best_size) { + best_size = best_coresize * (maximum_size / best_coresize); + } + } + NIT_BUFFERDATA(iter)->buffersize = best_size; + /* Core starts at 0 initially, if needed it is set in goto index. */ + NIT_BUFFERDATA(iter)->coreoffset = 0; + + npy_free_workspace(dim_scratch_space); + return 0; +} + + /* * Replaces the AXISDATA for the iop'th operand, broadcasting * the dimensions as necessary. Assumes the replacement array is @@ -2021,13 +2414,7 @@ npyiter_replace_axisdata( /* Now the base data pointer is calculated, set it everywhere it's needed */ NIT_RESETDATAPTR(iter)[iop] = op_dataptr; NIT_BASEOFFSETS(iter)[iop] = baseoffset; - axisdata = axisdata0; - /* Fill at least one axisdata, for the 0-d case */ - NAD_PTRS(axisdata)[iop] = op_dataptr; - NIT_ADVANCE_AXISDATA(axisdata, 1); - for (idim = 1; idim < ndim; ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) { - NAD_PTRS(axisdata)[iop] = op_dataptr; - } + NIT_DATAPTRS(iter)[iop] = op_dataptr; } /* @@ -2048,15 +2435,15 @@ npyiter_compute_index_strides(NpyIter *iter, npy_uint32 flags) NpyIter_AxisData *axisdata; npy_intp sizeof_axisdata; + NIT_DATAPTRS(iter)[nop] = 0; /* * If there is only one element being iterated, we just have - * to touch the first AXISDATA because nothing will ever be - * incremented. This also initializes the data for the 0-d case. + * to touch the first set the "dataptr". + * This also initializes the data for the 0-d case. */ if (NIT_ITERSIZE(iter) == 1) { if (itflags & NPY_ITFLAG_HASINDEX) { axisdata = NIT_AXISDATA(iter); - NAD_PTRS(axisdata)[nop] = 0; } return; } @@ -2074,7 +2461,6 @@ npyiter_compute_index_strides(NpyIter *iter, npy_uint32 flags) else { NAD_STRIDES(axisdata)[nop] = indexstride; } - NAD_PTRS(axisdata)[nop] = 0; indexstride *= shape; } } @@ -2091,7 +2477,6 @@ npyiter_compute_index_strides(NpyIter *iter, npy_uint32 flags) else { NAD_STRIDES(axisdata)[nop] = indexstride; } - NAD_PTRS(axisdata)[nop] = 0; indexstride *= shape; } } @@ -2160,12 +2545,12 @@ npyiter_flip_negative_strides(NpyIter *iter) int iop, nop = NIT_NOP(iter); npy_intp istrides, nstrides = NAD_NSTRIDES(); - NpyIter_AxisData *axisdata, *axisdata0; + NpyIter_AxisData *axisdata; npy_intp *baseoffsets; npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); int any_flipped = 0; - axisdata0 = axisdata = NIT_AXISDATA(iter); + axisdata = NIT_AXISDATA(iter); baseoffsets = NIT_BASEOFFSETS(iter); for (idim = 0; idim < ndim; ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) { npy_intp *strides = NAD_STRIDES(axisdata); @@ -2216,13 +2601,7 @@ npyiter_flip_negative_strides(NpyIter *iter) for (istrides = 0; istrides < nstrides; ++istrides) { resetdataptr[istrides] += baseoffsets[istrides]; - } - axisdata = axisdata0; - for (idim = 0; idim < ndim; ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) { - char **ptrs = NAD_PTRS(axisdata); - for (istrides = 0; istrides < nstrides; ++istrides) { - ptrs[istrides] = resetdataptr[istrides]; - } + NIT_DATAPTRS(iter)[istrides] = resetdataptr[istrides]; } /* * Indicate that some of the perm entries are negative, @@ -2425,9 +2804,14 @@ npyiter_get_common_dtype(int nop, PyArrayObject **op, { int iop; npy_intp narrs = 0, ndtypes = 0; - PyArrayObject *arrs[NPY_MAXARGS]; - PyArray_Descr *dtypes[NPY_MAXARGS]; PyArray_Descr *ret; + NPY_ALLOC_WORKSPACE(arrs_and_dtypes, void *, 2 * 4, 2 * nop); + if (arrs_and_dtypes == NULL) { + return NULL; + } + + PyArrayObject **arrs = (PyArrayObject **)arrs_and_dtypes; + PyArray_Descr **dtypes = (PyArray_Descr **)arrs_and_dtypes + nop; NPY_IT_DBG_PRINT("Iterator: Getting a common data type from operands\n"); @@ -2470,6 +2854,7 @@ npyiter_get_common_dtype(int nop, PyArrayObject **op, ret = PyArray_ResultType(narrs, arrs, ndtypes, dtypes); } + npy_free_workspace(arrs_and_dtypes); return ret; } @@ -2688,18 +3073,13 @@ npyiter_allocate_arrays(NpyIter *iter, int **op_axes) { npy_uint32 itflags = NIT_ITFLAGS(iter); - int idim, ndim = NIT_NDIM(iter); + int ndim = NIT_NDIM(iter); int iop, nop = NIT_NOP(iter); int check_writemasked_reductions = 0; - NpyIter_BufferData *bufferdata = NULL; PyArrayObject **op = NIT_OPERANDS(iter); - if (itflags & NPY_ITFLAG_BUFFER) { - bufferdata = NIT_BUFFERDATA(iter); - } - if (flags & NPY_ITER_COPY_IF_OVERLAP) { /* * Perform operand memory overlap checks, if requested. @@ -2834,13 +3214,6 @@ npyiter_allocate_arrays(NpyIter *iter, npyiter_replace_axisdata(iter, iop, op[iop], ndim, op_axes ? op_axes[iop] : NULL); - /* - * New arrays are guaranteed true-aligned, but copy/cast code - * needs uint-alignment in addition. - */ - if (IsUintAligned(out)) { - op_itflags[iop] |= NPY_OP_ITFLAG_ALIGNED; - } /* New arrays need no cast */ op_itflags[iop] &= ~NPY_OP_ITFLAG_CAST; } @@ -2875,22 +3248,8 @@ npyiter_allocate_arrays(NpyIter *iter, */ npyiter_replace_axisdata(iter, iop, op[iop], 0, NULL); - /* - * New arrays are guaranteed true-aligned, but copy/cast code - * needs uint-alignment in addition. - */ - if (IsUintAligned(temp)) { - op_itflags[iop] |= NPY_OP_ITFLAG_ALIGNED; - } - /* - * New arrays need no cast, and in the case - * of scalars, always have stride 0 so never need buffering - */ - op_itflags[iop] |= NPY_OP_ITFLAG_BUFNEVER; + /* New arrays need no cast */ op_itflags[iop] &= ~NPY_OP_ITFLAG_CAST; - if (itflags & NPY_ITFLAG_BUFFER) { - NBF_STRIDES(bufferdata)[iop] = 0; - } } /* * Make a temporary copy if, @@ -2947,13 +3306,6 @@ npyiter_allocate_arrays(NpyIter *iter, npyiter_replace_axisdata(iter, iop, op[iop], ondim, op_axes ? op_axes[iop] : NULL); - /* - * New arrays are guaranteed true-aligned, but copy/cast code - * additionally needs uint-alignment in addition. - */ - if (IsUintAligned(temp)) { - op_itflags[iop] |= NPY_OP_ITFLAG_ALIGNED; - } /* The temporary copy needs no cast */ op_itflags[iop] &= ~NPY_OP_ITFLAG_CAST; } @@ -2969,22 +3321,19 @@ npyiter_allocate_arrays(NpyIter *iter, "but neither copying nor buffering was enabled"); return 0; } - - /* - * If the operand is aligned, any buffering can use aligned - * optimizations. - */ - if (IsUintAligned(op[iop])) { - op_itflags[iop] |= NPY_OP_ITFLAG_ALIGNED; - } } /* Here we can finally check for contiguous iteration */ - if (op_flags[iop] & NPY_ITER_CONTIG) { + if (op_itflags[iop] & NPY_OP_ITFLAG_CONTIG) { NpyIter_AxisData *axisdata = NIT_AXISDATA(iter); npy_intp stride = NAD_STRIDES(axisdata)[iop]; if (stride != op_dtype[iop]->elsize) { + /* + * Need to copy to buffer (cast) to ensure contiguous + * NOTE: This is the wrong place in case of axes reorder + * (there is an xfailing test for this). + */ NPY_IT_DBG_PRINT("Iterator: Setting NPY_OP_ITFLAG_CAST " "because of NPY_ITER_CONTIG\n"); op_itflags[iop] |= NPY_OP_ITFLAG_CAST; @@ -2997,63 +3346,6 @@ npyiter_allocate_arrays(NpyIter *iter, } } } - - /* - * If no alignment, byte swap, or casting is needed, - * the inner stride of this operand works for the whole - * array, we can set NPY_OP_ITFLAG_BUFNEVER. - */ - if ((itflags & NPY_ITFLAG_BUFFER) && - !(op_itflags[iop] & NPY_OP_ITFLAG_CAST)) { - NpyIter_AxisData *axisdata = NIT_AXISDATA(iter); - if (ndim <= 1) { - op_itflags[iop] |= NPY_OP_ITFLAG_BUFNEVER; - NBF_STRIDES(bufferdata)[iop] = NAD_STRIDES(axisdata)[iop]; - } - else if (PyArray_NDIM(op[iop]) > 0) { - npy_intp stride, shape, innerstride = 0, innershape; - npy_intp sizeof_axisdata = - NIT_AXISDATA_SIZEOF(itflags, ndim, nop); - /* Find stride of the first non-empty shape */ - for (idim = 0; idim < ndim; ++idim) { - innershape = NAD_SHAPE(axisdata); - if (innershape != 1) { - innerstride = NAD_STRIDES(axisdata)[iop]; - break; - } - NIT_ADVANCE_AXISDATA(axisdata, 1); - } - ++idim; - NIT_ADVANCE_AXISDATA(axisdata, 1); - /* Check that everything could have coalesced together */ - for (; idim < ndim; ++idim) { - stride = NAD_STRIDES(axisdata)[iop]; - shape = NAD_SHAPE(axisdata); - if (shape != 1) { - /* - * If N times the inner stride doesn't equal this - * stride, the multi-dimensionality is needed. - */ - if (innerstride*innershape != stride) { - break; - } - else { - innershape *= shape; - } - } - NIT_ADVANCE_AXISDATA(axisdata, 1); - } - /* - * If we looped all the way to the end, one stride works. - * Set that stride, because it may not belong to the first - * dimension. - */ - if (idim == ndim) { - op_itflags[iop] |= NPY_OP_ITFLAG_BUFNEVER; - NBF_STRIDES(bufferdata)[iop] = innerstride; - } - } - } } if (check_writemasked_reductions) { @@ -3124,28 +3416,39 @@ npyiter_allocate_transfer_functions(NpyIter *iter) npy_intp *strides = NAD_STRIDES(axisdata), op_stride; NpyIter_TransferInfo *transferinfo = NBF_TRANSFERINFO(bufferdata); + npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); + NpyIter_AxisData *reduce_axisdata = NIT_INDEX_AXISDATA(axisdata, bufferdata->outerdim); + npy_intp *reduce_strides = NAD_STRIDES(reduce_axisdata); + /* combined cast flags, the new cast flags for each cast: */ NPY_ARRAYMETHOD_FLAGS cflags = PyArrayMethod_MINIMAL_FLAGS; NPY_ARRAYMETHOD_FLAGS nc_flags; for (iop = 0; iop < nop; ++iop) { npyiter_opitflags flags = op_itflags[iop]; + /* - * Reduction operands may be buffered with a different stride, - * so we must pass NPY_MAX_INTP to the transfer function factory. + * Reduce operands buffer the outer stride if it is nonzero; compare + * `npyiter_fill_buffercopy_params`. + * (Inner strides cannot _all_ be zero if the outer is, but some.) */ - op_stride = (flags & NPY_OP_ITFLAG_REDUCE) ? NPY_MAX_INTP : - strides[iop]; + if ((op_itflags[iop] & NPY_OP_ITFLAG_REDUCE) && reduce_strides[iop] != 0) { + op_stride = reduce_strides[iop]; + } + else { + op_stride = strides[iop]; + } /* * If we have determined that a buffer may be needed, * allocate the appropriate transfer functions */ if (!(flags & NPY_OP_ITFLAG_BUFNEVER)) { + int aligned = IsUintAligned(op[iop]); if (flags & NPY_OP_ITFLAG_READ) { int move_references = 0; if (PyArray_GetDTypeTransferFunction( - (flags & NPY_OP_ITFLAG_ALIGNED) != 0, + aligned, op_stride, op_dtype[iop]->elsize, PyArray_DESCR(op[iop]), @@ -3175,7 +3478,7 @@ npyiter_allocate_transfer_functions(NpyIter *iter) * could be inconsistent. */ if (PyArray_GetMaskedDTypeTransferFunction( - (flags & NPY_OP_ITFLAG_ALIGNED) != 0, + aligned, op_dtype[iop]->elsize, op_stride, (strides[maskop] == mask_dtype->elsize) ? @@ -3192,7 +3495,7 @@ npyiter_allocate_transfer_functions(NpyIter *iter) } else { if (PyArray_GetDTypeTransferFunction( - (flags & NPY_OP_ITFLAG_ALIGNED) != 0, + aligned, op_dtype[iop]->elsize, op_stride, op_dtype[iop], @@ -3217,7 +3520,7 @@ npyiter_allocate_transfer_functions(NpyIter *iter) * src references. */ if (PyArray_GetClearFunction( - (flags & NPY_OP_ITFLAG_ALIGNED) != 0, + aligned, op_dtype[iop]->elsize, op_dtype[iop], &transferinfo[iop].clear, &nc_flags) < 0) { goto fail; @@ -3239,11 +3542,6 @@ npyiter_allocate_transfer_functions(NpyIter *iter) NIT_ITFLAGS(iter) |= cflags << NPY_ITFLAG_TRANSFERFLAGS_SHIFT; assert(NIT_ITFLAGS(iter) >> NPY_ITFLAG_TRANSFERFLAGS_SHIFT == cflags); - /* If any of the dtype transfer functions needed the API, flag it. */ - if (cflags & NPY_METH_REQUIRES_PYAPI) { - NIT_ITFLAGS(iter) |= NPY_ITFLAG_NEEDSAPI; - } - return 1; fail: diff --git a/numpy/_core/src/multiarray/nditer_impl.h b/numpy/_core/src/multiarray/nditer_impl.h index 790ddcb11f83..ab3724d67d11 100644 --- a/numpy/_core/src/multiarray/nditer_impl.h +++ b/numpy/_core/src/multiarray/nditer_impl.h @@ -55,13 +55,14 @@ /********** PRINTF DEBUG TRACING **************/ #define NPY_IT_DBG_TRACING 0 +/* TODO: Can remove the n-args macros, old C89 didn't have variadic macros. */ #if NPY_IT_DBG_TRACING -#define NPY_IT_DBG_PRINT(s) printf("%s", s) -#define NPY_IT_DBG_PRINT1(s, p1) printf(s, p1) -#define NPY_IT_DBG_PRINT2(s, p1, p2) printf(s, p1, p2) -#define NPY_IT_DBG_PRINT3(s, p1, p2, p3) printf(s, p1, p2, p3) +#define NPY_IT_DBG_PRINT(...) printf(__VA_ARGS__) +#define NPY_IT_DBG_PRINT1(s, p1) NPY_IT_DBG_PRINT(s, p1) +#define NPY_IT_DBG_PRINT2(s, p1, p2) NPY_IT_DBG_PRINT(s, p1, p2) +#define NPY_IT_DBG_PRINT3(s, p1, p2, p3) NPY_IT_DBG_PRINT(s, p1, p2, p3) #else -#define NPY_IT_DBG_PRINT(s) +#define NPY_IT_DBG_PRINT(...) #define NPY_IT_DBG_PRINT1(s, p1) #define NPY_IT_DBG_PRINT2(s, p1, p2) #define NPY_IT_DBG_PRINT3(s, p1, p2, p3) @@ -99,12 +100,10 @@ #define NPY_ITFLAG_ONEITERATION (1 << 9) /* Delay buffer allocation until first Reset* call */ #define NPY_ITFLAG_DELAYBUF (1 << 10) -/* Iteration needs API access during iternext */ -#define NPY_ITFLAG_NEEDSAPI (1 << 11) /* Iteration includes one or more operands being reduced */ -#define NPY_ITFLAG_REDUCE (1 << 12) +#define NPY_ITFLAG_REDUCE (1 << 11) /* Reduce iteration doesn't need to recalculate reduce loops next time */ -#define NPY_ITFLAG_REUSE_REDUCE_LOOPS (1 << 13) +#define NPY_ITFLAG_REUSE_REDUCE_LOOPS (1 << 12) /* * Offset of (combined) ArrayMethod flags for all transfer functions. * For now, we use the top 8 bits. @@ -119,22 +118,27 @@ #define NPY_OP_ITFLAG_READ 0x0002 /* The operand needs type conversion/byte swapping/alignment */ #define NPY_OP_ITFLAG_CAST 0x0004 -/* The operand never needs buffering */ +/* The operand never needs buffering (implies BUF_SINGLESTRIDE) */ #define NPY_OP_ITFLAG_BUFNEVER 0x0008 -/* The operand is aligned */ -#define NPY_OP_ITFLAG_ALIGNED 0x0010 +/* Whether the buffer filling can use a single stride (minus reduce if reduce) */ +#define NPY_OP_ITFLAG_BUF_SINGLESTRIDE 0x0010 /* The operand is being reduced */ #define NPY_OP_ITFLAG_REDUCE 0x0020 /* The operand is for temporary use, does not have a backing array */ #define NPY_OP_ITFLAG_VIRTUAL 0x0040 /* The operand requires masking when copying buffer -> array */ #define NPY_OP_ITFLAG_WRITEMASKED 0x0080 -/* The operand's data pointer is pointing into its buffer */ -#define NPY_OP_ITFLAG_USINGBUFFER 0x0100 +/* + * Whether the buffer is *fully* filled and thus ready for reuse. + * (Must check if the start pointer matches until copy-from-buffer checks) + */ +#define NPY_OP_ITFLAG_BUF_REUSABLE 0x0100 /* The operand must be copied (with UPDATEIFCOPY if also ITFLAG_WRITE) */ #define NPY_OP_ITFLAG_FORCECOPY 0x0200 /* The operand has temporary data, write it back at dealloc */ #define NPY_OP_ITFLAG_HAS_WRITEBACK 0x0400 +/* Whether the user requested a contiguous operand */ +#define NPY_OP_ITFLAG_CONTIG 0x0800 /* * The data layout of the iterator is fully specified by @@ -148,8 +152,8 @@ struct NpyIter_InternalOnly { /* Initial fixed position data */ npy_uint32 itflags; - npy_uint8 ndim, nop; - npy_int8 maskop; + npy_uint8 ndim; + int nop, maskop; npy_intp itersize, iterstart, iterend; /* iterindex is only used if RANGED or BUFFERED is set */ npy_intp iterindex; @@ -176,9 +180,13 @@ typedef npy_int16 npyiter_opitflags; ((NPY_SIZEOF_PY_INTPTR_T)*(nop)) #define NIT_OPITFLAGS_SIZEOF(itflags, ndim, nop) \ (NPY_PTR_ALIGNED(sizeof(npyiter_opitflags) * nop)) +#define NIT_DATAPTRS_SIZEOF(itflags, ndim, nop) \ + ((NPY_SIZEOF_PY_INTPTR_T)*(nop+1)) +#define NIT_USERPTRS_SIZEOF(itflags, ndim, nop) \ + ((NPY_SIZEOF_PY_INTPTR_T)*(nop+1)) #define NIT_BUFFERDATA_SIZEOF(itflags, ndim, nop) \ ((itflags&NPY_ITFLAG_BUFFER) ? ( \ - (NPY_SIZEOF_PY_INTPTR_T)*(6 + 5*nop) + sizeof(NpyIter_TransferInfo) * nop) : 0) + (NPY_SIZEOF_PY_INTPTR_T)*(8 + 4*nop) + sizeof(NpyIter_TransferInfo) * nop) : 0) /* Byte offsets of the iterator members starting from iter->iter_flexdata */ #define NIT_PERM_OFFSET() \ @@ -201,9 +209,15 @@ typedef npy_int16 npyiter_opitflags; #define NIT_BUFFERDATA_OFFSET(itflags, ndim, nop) \ (NIT_OPITFLAGS_OFFSET(itflags, ndim, nop) + \ NIT_OPITFLAGS_SIZEOF(itflags, ndim, nop)) -#define NIT_AXISDATA_OFFSET(itflags, ndim, nop) \ +#define NIT_DATAPTRS_OFFSET(itflags, ndim, nop) + \ (NIT_BUFFERDATA_OFFSET(itflags, ndim, nop) + \ NIT_BUFFERDATA_SIZEOF(itflags, ndim, nop)) +#define NIT_USERPTRS_OFFSET(itflags, ndim, nop) + \ + (NIT_DATAPTRS_OFFSET(itflags, ndim, nop) + \ + NIT_DATAPTRS_SIZEOF(itflags, ndim, nop)) +#define NIT_AXISDATA_OFFSET(itflags, ndim, nop) \ + (NIT_USERPTRS_OFFSET(itflags, ndim, nop) + \ + NIT_USERPTRS_SIZEOF(itflags, ndim, nop)) /* Internal-only ITERATOR DATA MEMBER ACCESS */ #define NIT_ITFLAGS(iter) \ @@ -236,6 +250,10 @@ typedef npy_int16 npyiter_opitflags; iter->iter_flexdata + NIT_OPITFLAGS_OFFSET(itflags, ndim, nop))) #define NIT_BUFFERDATA(iter) ((NpyIter_BufferData *)( \ iter->iter_flexdata + NIT_BUFFERDATA_OFFSET(itflags, ndim, nop))) +#define NIT_DATAPTRS(iter) ((char **)( \ + iter->iter_flexdata + NIT_DATAPTRS_OFFSET(itflags, ndim, nop))) +#define NIT_USERPTRS(iter) ((char **)( \ + iter->iter_flexdata + NIT_USERPTRS_OFFSET(itflags, ndim, nop))) #define NIT_AXISDATA(iter) ((NpyIter_AxisData *)( \ iter->iter_flexdata + NIT_AXISDATA_OFFSET(itflags, ndim, nop))) @@ -251,7 +269,7 @@ struct NpyIter_TransferInfo_tag { struct NpyIter_BufferData_tag { npy_intp buffersize, size, bufiterend, - reduce_pos, reduce_outersize, reduce_outerdim; + reduce_pos, coresize, outersize, coreoffset, outerdim; Py_intptr_t bd_flexdata; }; @@ -259,20 +277,20 @@ struct NpyIter_BufferData_tag { #define NBF_SIZE(bufferdata) ((bufferdata)->size) #define NBF_BUFITEREND(bufferdata) ((bufferdata)->bufiterend) #define NBF_REDUCE_POS(bufferdata) ((bufferdata)->reduce_pos) -#define NBF_REDUCE_OUTERSIZE(bufferdata) ((bufferdata)->reduce_outersize) -#define NBF_REDUCE_OUTERDIM(bufferdata) ((bufferdata)->reduce_outerdim) +#define NBF_CORESIZE(bufferdata) ((bufferdata)->coresize) +#define NBF_COREOFFSET(bufferdata) ((bufferdata)->coreoffset) +#define NBF_REDUCE_OUTERSIZE(bufferdata) ((bufferdata)->outersize) +#define NBF_OUTERDIM(bufferdata) ((bufferdata)->outerdim) #define NBF_STRIDES(bufferdata) ( \ &(bufferdata)->bd_flexdata + 0) -#define NBF_PTRS(bufferdata) ((char **) \ - (&(bufferdata)->bd_flexdata + 1*(nop))) #define NBF_REDUCE_OUTERSTRIDES(bufferdata) ( \ - (&(bufferdata)->bd_flexdata + 2*(nop))) + (&(bufferdata)->bd_flexdata + 1*(nop))) #define NBF_REDUCE_OUTERPTRS(bufferdata) ((char **) \ - (&(bufferdata)->bd_flexdata + 3*(nop))) + (&(bufferdata)->bd_flexdata + 2*(nop))) #define NBF_BUFFERS(bufferdata) ((char **) \ - (&(bufferdata)->bd_flexdata + 4*(nop))) + (&(bufferdata)->bd_flexdata + 3*(nop))) #define NBF_TRANSFERINFO(bufferdata) ((NpyIter_TransferInfo *) \ - (&(bufferdata)->bd_flexdata + 5*(nop))) + (&(bufferdata)->bd_flexdata + 4*(nop))) /* Internal-only AXISDATA MEMBER ACCESS. */ struct NpyIter_AxisData_tag { @@ -283,8 +301,6 @@ struct NpyIter_AxisData_tag { #define NAD_INDEX(axisdata) ((axisdata)->index) #define NAD_STRIDES(axisdata) ( \ &(axisdata)->ad_flexdata + 0) -#define NAD_PTRS(axisdata) ((char **) \ - (&(axisdata)->ad_flexdata + 1*(nop+1))) #define NAD_NSTRIDES() \ ((nop) + ((itflags&NPY_ITFLAG_HASINDEX) ? 1 : 0)) @@ -296,7 +312,7 @@ struct NpyIter_AxisData_tag { /* intp index */ \ 1 + \ /* intp stride[nop+1] AND char* ptr[nop+1] */ \ - 2*((nop)+1) \ + 1*((nop)+1) \ )*(size_t)NPY_SIZEOF_PY_INTPTR_T) /* @@ -364,12 +380,4 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs); NPY_NO_EXPORT void npyiter_clear_buffers(NpyIter *iter); -/* - * Function to get the ArrayMethod flags of the transfer functions. - * TODO: This function should be public and removed from `nditer_impl.h`, but - * this requires making the ArrayMethod flags public API first. - */ -NPY_NO_EXPORT int -NpyIter_GetTransferFlags(NpyIter *iter); - #endif /* NUMPY_CORE_SRC_MULTIARRAY_NDITER_IMPL_H_ */ diff --git a/numpy/_core/src/multiarray/nditer_pywrap.c b/numpy/_core/src/multiarray/nditer_pywrap.c index ad20194f308f..992bc013af3a 100644 --- a/numpy/_core/src/multiarray/nditer_pywrap.c +++ b/numpy/_core/src/multiarray/nditer_pywrap.c @@ -20,6 +20,7 @@ #include "common.h" #include "conversion_utils.h" #include "ctors.h" +#include "npy_pycompat.h" /* Functions not part of the public NumPy C API */ npy_bool npyiter_has_writeback(NpyIter *iter); @@ -42,8 +43,7 @@ struct NewNpyArrayIterObject_tag { PyArray_Descr **dtypes; PyArrayObject **operands; npy_intp *innerstrides, *innerloopsizeptr; - char readflags[NPY_MAXARGS]; - char writeflags[NPY_MAXARGS]; + char *writeflags; /* could inline allocation with variable sized object */ }; static int npyiter_cache_values(NewNpyArrayIterObject *self) @@ -77,8 +77,14 @@ static int npyiter_cache_values(NewNpyArrayIterObject *self) self->innerloopsizeptr = NULL; } - /* The read/write settings */ - NpyIter_GetReadFlags(iter, self->readflags); + if (self->writeflags == NULL) { + self->writeflags = PyMem_Malloc(sizeof(char) * NpyIter_GetNOp(iter)); + if (self->writeflags == NULL) { + PyErr_NoMemory(); + return -1; + } + } + /* The write flags settings (not-readable cannot be signalled to Python) */ NpyIter_GetWriteFlags(iter, self->writeflags); return 0; } @@ -93,6 +99,7 @@ npyiter_new(PyTypeObject *subtype, PyObject *NPY_UNUSED(args), if (self != NULL) { self->iter = NULL; self->nested_child = NULL; + self->writeflags = NULL; } return (PyObject *)self; @@ -576,66 +583,61 @@ npyiter_convert_op_axes(PyObject *op_axes_in, int nop, return 1; } -/* - * Converts the operand array and op_flags array into the form - * NpyIter_AdvancedNew needs. Sets nop, and on success, each - * op[i] owns a reference to an array object. - */ + static int -npyiter_convert_ops(PyObject *op_in, PyObject *op_flags_in, - PyArrayObject **op, npy_uint32 *op_flags, - int *nop_out) +npyiter_prepare_ops(PyObject *op_in, PyObject **out_owner, PyObject ***out_objs) { - int iop, nop; - - /* nop and op */ + /* Take ownership of op_in (either a tuple/list or single element): */ if (PyTuple_Check(op_in) || PyList_Check(op_in)) { - nop = PySequence_Size(op_in); - if (nop == 0) { + PyObject *seq = PySequence_Fast(op_in, "failed accessing item list"); // noqa: borrowed-ref OK + if (op_in == NULL) { + Py_DECREF(op_in); + return -1; + } + Py_ssize_t length = PySequence_Fast_GET_SIZE(op_in); + if (length == 0) { PyErr_SetString(PyExc_ValueError, "Must provide at least one operand"); - return 0; - } - if (nop > NPY_MAXARGS) { - PyErr_SetString(PyExc_ValueError, "Too many operands"); - return 0; + Py_DECREF(op_in); + return -1; } - - for (iop = 0; iop < nop; ++iop) { - PyObject *item = PySequence_GetItem(op_in, iop); - if (item == NULL) { - npy_intp i; - for (i = 0; i < iop; ++i) { - Py_XDECREF(op[i]); - } - return 0; - } - else if (item == Py_None) { - Py_DECREF(item); - item = NULL; - } - /* This is converted to an array after op flags are retrieved */ - op[iop] = (PyArrayObject *)item; + if (length > NPY_MAX_INT) { + /* NpyIter supports fewer args, but deal with it there. */ + PyErr_Format(PyExc_ValueError, + "Too many operands to nditer, found %zd.", length); + Py_DECREF(op_in); + return -1; } + *out_objs = PySequence_Fast_ITEMS(op_in); + *out_owner = seq; + return (int)length; } else { - nop = 1; - /* Is converted to an array after op flags are retrieved */ Py_INCREF(op_in); - op[0] = (PyArrayObject *)op_in; + *out_objs = out_owner; /* `out_owner` is in caller stack space */ + *out_owner = op_in; + return 1; } +} - *nop_out = nop; - +/* + * Converts the operand array and op_flags array into the form + * NpyIter_AdvancedNew needs. On success, each op[i] owns a reference + * to an array object. + */ +static int +npyiter_convert_ops(int nop, PyObject **op_objs, PyObject *op_flags_in, + PyArrayObject **op, npy_uint32 *op_flags) +{ /* op_flags */ if (op_flags_in == NULL || op_flags_in == Py_None) { - for (iop = 0; iop < nop; ++iop) { + for (int iop = 0; iop < nop; ++iop) { /* * By default, make NULL operands writeonly and flagged for * allocation, and everything else readonly. To write * to a provided operand, you must specify the write flag manually. */ - if (op[iop] == NULL) { + if (op_objs[iop] == Py_None) { op_flags[iop] = NPY_ITER_WRITEONLY | NPY_ITER_ALLOCATE; } else { @@ -645,23 +647,19 @@ npyiter_convert_ops(PyObject *op_in, PyObject *op_flags_in, } else if (npyiter_convert_op_flags_array(op_flags_in, op_flags, nop) != 1) { - for (iop = 0; iop < nop; ++iop) { - Py_XDECREF(op[iop]); - } - *nop_out = 0; return 0; } /* Now that we have the flags - convert all the ops to arrays */ - for (iop = 0; iop < nop; ++iop) { - if (op[iop] != NULL) { + for (int iop = 0; iop < nop; ++iop) { + if (op_objs[iop] != Py_None) { PyArrayObject *ao; int fromanyflags = 0; if (op_flags[iop]&(NPY_ITER_READWRITE|NPY_ITER_WRITEONLY)) { fromanyflags |= NPY_ARRAY_WRITEBACKIFCOPY; } - ao = (PyArrayObject *)PyArray_FROM_OF((PyObject *)op[iop], + ao = (PyArrayObject *)PyArray_FROM_OF((PyObject *)op_objs[iop], fromanyflags); if (ao == NULL) { if (PyErr_Occurred() && @@ -671,13 +669,8 @@ npyiter_convert_ops(PyObject *op_in, PyObject *op_flags_in, "but is an object which cannot be written " "back to via WRITEBACKIFCOPY"); } - for (iop = 0; iop < nop; ++iop) { - Py_DECREF(op[iop]); - } - *nop_out = 0; return 0; } - Py_DECREF(op[iop]); op[iop] = ao; } } @@ -696,19 +689,15 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds) PyObject *op_in = NULL, *op_flags_in = NULL, *op_dtypes_in = NULL, *op_axes_in = NULL; - int iop, nop = 0; - PyArrayObject *op[NPY_MAXARGS]; npy_uint32 flags = 0; NPY_ORDER order = NPY_KEEPORDER; NPY_CASTING casting = NPY_SAFE_CASTING; - npy_uint32 op_flags[NPY_MAXARGS]; - PyArray_Descr *op_request_dtypes[NPY_MAXARGS]; int oa_ndim = -1; - int op_axes_arrays[NPY_MAXARGS][NPY_MAXDIMS]; - int *op_axes[NPY_MAXARGS]; PyArray_Dims itershape = {NULL, -1}; int buffersize = 0; + int res = -1; + if (self->iter != NULL) { PyErr_SetString(PyExc_ValueError, "Iterator was already initialized"); @@ -729,32 +718,84 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds) return -1; } - /* Set the dtypes and ops to all NULL to start */ - memset(op_request_dtypes, 0, sizeof(op_request_dtypes)); + /* Need nop to set up workspaces */ + PyObject **op_objs = NULL; + PyObject *op_in_owned = NULL; /* Sequence/object owning op_objs. */ + PyArray_Descr **op_request_dtypes = NULL; + int pre_alloc_fail = 0; + int post_alloc_fail = 0; + int nop; + NPY_DEFINE_WORKSPACE(op, PyArrayObject *, 2 * 8); + NPY_DEFINE_WORKSPACE(op_flags, npy_uint32, 8); + NPY_DEFINE_WORKSPACE(op_axes_storage, int, 8 * NPY_MAXDIMS); + NPY_DEFINE_WORKSPACE(op_axes, int *, 8); + + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(op_in); + + nop = npyiter_prepare_ops(op_in, &op_in_owned, &op_objs); + if (nop < 0) { + pre_alloc_fail = 1; + goto cleanup; + } + + /* allocate workspace for Python objects (operands and dtypes) */ + NPY_INIT_WORKSPACE(op, PyArrayObject *, 2 * 8, 2 * nop); + if (op == NULL) { + pre_alloc_fail = 1; + goto cleanup; + } + memset(op, 0, sizeof(PyObject *) * 2 * nop); + op_request_dtypes = (PyArray_Descr **)(op + nop); + + /* And other workspaces (that do not need to clean up their content) */ + NPY_INIT_WORKSPACE(op_flags, npy_uint32, 8, nop); + NPY_INIT_WORKSPACE(op_axes_storage, int, 8 * NPY_MAXDIMS, nop * NPY_MAXDIMS); + NPY_INIT_WORKSPACE(op_axes, int *, 8, nop); + /* + * Trying to allocate should be OK if one failed, check for error now + * that we can use `goto finish` to clean up everything. + * (NPY_ALLOC_WORKSPACE has to be done before a goto fail currently.) + */ + if (op_flags == NULL || op_axes_storage == NULL || op_axes == NULL) { + post_alloc_fail = 1; + goto cleanup; + } /* op and op_flags */ - if (npyiter_convert_ops(op_in, op_flags_in, op, op_flags, &nop) - != 1) { - goto fail; + if (npyiter_convert_ops(nop, op_objs, op_flags_in, op, op_flags) != 1) { + post_alloc_fail = 1; + goto cleanup; + } + +cleanup:; + + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); + + if (pre_alloc_fail) { + goto pre_alloc_fail; + } + + if (post_alloc_fail) { + goto finish; } /* op_request_dtypes */ if (op_dtypes_in != NULL && op_dtypes_in != Py_None && npyiter_convert_dtypes(op_dtypes_in, op_request_dtypes, nop) != 1) { - goto fail; + goto finish; } /* op_axes */ if (op_axes_in != NULL && op_axes_in != Py_None) { /* Initialize to point to the op_axes arrays */ - for (iop = 0; iop < nop; ++iop) { - op_axes[iop] = op_axes_arrays[iop]; + for (int iop = 0; iop < nop; ++iop) { + op_axes[iop] = &op_axes_storage[iop * NPY_MAXDIMS]; } if (npyiter_convert_op_axes(op_axes_in, nop, op_axes, &oa_ndim) != 1) { - goto fail; + goto finish; } } @@ -767,7 +808,7 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds) PyErr_SetString(PyExc_ValueError, "'op_axes' and 'itershape' must have the same number " "of entries equal to the iterator ndim"); - goto fail; + goto finish; } } @@ -778,12 +819,12 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds) buffersize); if (self->iter == NULL) { - goto fail; + goto finish; } /* Cache some values for the member functions to use */ if (npyiter_cache_values(self) < 0) { - goto fail; + goto finish; } if (NpyIter_GetIterSize(self->iter) == 0) { @@ -795,25 +836,25 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds) self->finished = 0; } - npy_free_cache_dim_obj(itershape); + res = 0; - /* Release the references we got to the ops and dtypes */ - for (iop = 0; iop < nop; ++iop) { + finish: + for (int iop = 0; iop < nop; ++iop) { Py_XDECREF(op[iop]); Py_XDECREF(op_request_dtypes[iop]); } + npy_free_workspace(op); + npy_free_workspace(op_flags); + npy_free_workspace(op_axes_storage); + npy_free_workspace(op_axes); - return 0; - -fail: + pre_alloc_fail: + Py_XDECREF(op_in_owned); npy_free_cache_dim_obj(itershape); - for (iop = 0; iop < nop; ++iop) { - Py_XDECREF(op[iop]); - Py_XDECREF(op_request_dtypes[iop]); - } - return -1; + return res; } + NPY_NO_EXPORT PyObject * NpyIter_NestedIters(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds) @@ -826,14 +867,11 @@ NpyIter_NestedIters(PyObject *NPY_UNUSED(self), PyObject *op_in = NULL, *axes_in = NULL, *op_flags_in = NULL, *op_dtypes_in = NULL; - int iop, nop = 0, inest, nnest = 0; - PyArrayObject *op[NPY_MAXARGS]; + int iop, inest, nnest = 0; npy_uint32 flags = 0, flags_inner; NPY_ORDER order = NPY_KEEPORDER; NPY_CASTING casting = NPY_SAFE_CASTING; - npy_uint32 op_flags[NPY_MAXARGS], op_flags_inner[NPY_MAXARGS]; - PyArray_Descr *op_request_dtypes[NPY_MAXARGS], - *op_request_dtypes_inner[NPY_MAXARGS]; + int op_axes_data[NPY_MAXDIMS]; int *nested_op_axes[NPY_MAXDIMS]; int nested_naxes[NPY_MAXDIMS], iaxes, naxes; @@ -841,7 +879,8 @@ NpyIter_NestedIters(PyObject *NPY_UNUSED(self), char used_axes[NPY_MAXDIMS]; int buffersize = 0; - PyObject *ret = NULL; + PyObject *res = NULL; /* returned */ + PyObject *ret = NULL; /* intermediate object on failure */ if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|O&OOO&O&i", kwlist, &op_in, @@ -877,7 +916,7 @@ NpyIter_NestedIters(PyObject *NPY_UNUSED(self), } if (!PyTuple_Check(item) && !PyList_Check(item)) { PyErr_SetString(PyExc_ValueError, - "Each item in axes must be a an integer tuple"); + "Each item in axes must be an integer tuple"); Py_DECREF(item); return NULL; } @@ -921,27 +960,55 @@ NpyIter_NestedIters(PyObject *NPY_UNUSED(self), Py_DECREF(item); } - /* op and op_flags */ - if (npyiter_convert_ops(op_in, op_flags_in, op, op_flags, &nop) - != 1) { + /* Need nop to set up workspaces */ + PyObject **op_objs = NULL; + PyObject *op_in_owned; /* Sequence/object owning op_objs. */ + int nop = npyiter_prepare_ops(op_in, &op_in_owned, &op_objs); + if (nop < 0) { return NULL; } - /* Set the dtypes to all NULL to start as well */ - memset(op_request_dtypes, 0, sizeof(op_request_dtypes[0])*nop); - memset(op_request_dtypes_inner, 0, - sizeof(op_request_dtypes_inner[0])*nop); + /* allocate workspace for Python objects (operands and dtypes) */ + NPY_ALLOC_WORKSPACE(op, PyArrayObject *, 3 * 8, 3 * nop); + if (op == NULL) { + Py_DECREF(op_in_owned); + return NULL; + } + memset(op, 0, sizeof(PyObject *) * 3 * nop); + PyArray_Descr **op_request_dtypes = (PyArray_Descr **)(op + nop); + PyArray_Descr **op_request_dtypes_inner = op_request_dtypes + nop; + + /* And other workspaces (that do not need to clean up their content) */ + NPY_ALLOC_WORKSPACE(op_flags, npy_uint32, 8, nop); + NPY_ALLOC_WORKSPACE(op_flags_inner, npy_uint32, 8, nop); + NPY_ALLOC_WORKSPACE(op_axes_storage, int, 8 * NPY_MAXDIMS, nop * NPY_MAXDIMS); + NPY_ALLOC_WORKSPACE(op_axes, int *, 2 * 8, 2 * nop); + /* + * Trying to allocate should be OK if one failed, check for error now + * that we can use `goto finish` to clean up everything. + * (NPY_ALLOC_WORKSPACE has to be done before a goto fail currently.) + */ + if (op_flags == NULL || op_axes_storage == NULL || op_axes == NULL) { + goto finish; + } + /* Finalize shared workspace: */ + int **op_axes_nop = op_axes + nop; + + /* op and op_flags */ + if (npyiter_convert_ops(nop, op_objs, op_flags_in, op, op_flags) != 1) { + goto finish; + } /* op_request_dtypes */ if (op_dtypes_in != NULL && op_dtypes_in != Py_None && npyiter_convert_dtypes(op_dtypes_in, op_request_dtypes, nop) != 1) { - goto fail; + goto finish; } ret = PyTuple_New(nnest); if (ret == NULL) { - goto fail; + goto finish; } /* For broadcasting allocated arrays */ @@ -988,8 +1055,6 @@ NpyIter_NestedIters(PyObject *NPY_UNUSED(self), for (inest = 0; inest < nnest; ++inest) { NewNpyArrayIterObject *iter; - int *op_axes_nop[NPY_MAXARGS]; - /* * All the operands' op_axes are the same, except for * allocated outputs. @@ -1023,10 +1088,12 @@ NpyIter_NestedIters(PyObject *NPY_UNUSED(self), /* Allocate the iterator */ iter = (NewNpyArrayIterObject *)npyiter_new(&NpyIter_Type, NULL, NULL); if (iter == NULL) { - Py_DECREF(ret); - goto fail; + goto finish; } + /* Store iter into return tuple (owns the reference). */ + PyTuple_SET_ITEM(ret, inest, (PyObject *)iter); + if (inest < nnest-1) { iter->iter = NpyIter_AdvancedNew(nop, op, flags, order, casting, op_flags, op_request_dtypes, @@ -1044,15 +1111,12 @@ NpyIter_NestedIters(PyObject *NPY_UNUSED(self), } if (iter->iter == NULL) { - Py_DECREF(ret); - Py_DECREF(iter); - goto fail; + goto finish; } /* Cache some values for the member functions to use */ if (npyiter_cache_values(iter) < 0) { - Py_DECREF(ret); - goto fail; + goto finish; } if (NpyIter_GetIterSize(iter->iter) == 0) { @@ -1087,15 +1151,6 @@ NpyIter_NestedIters(PyObject *NPY_UNUSED(self), /* Clear the common dtype flag for the rest of the iterators */ flags &= ~NPY_ITER_COMMON_DTYPE; } - - PyTuple_SET_ITEM(ret, inest, (PyObject *)iter); - } - - /* Release our references to the ops and dtypes */ - for (iop = 0; iop < nop; ++iop) { - Py_XDECREF(op[iop]); - Py_XDECREF(op_request_dtypes[iop]); - Py_XDECREF(op_request_dtypes_inner[iop]); } /* Set up the nested child references */ @@ -1115,20 +1170,29 @@ NpyIter_NestedIters(PyObject *NPY_UNUSED(self), */ if (NpyIter_ResetBasePointers(iter->nested_child->iter, iter->dataptrs, NULL) != NPY_SUCCEED) { - Py_DECREF(ret); - return NULL; + goto finish; } } - return ret; + res = Py_NewRef(ret); + +finish: + Py_DECREF(op_in_owned); + Py_XDECREF(ret); -fail: for (iop = 0; iop < nop; ++iop) { Py_XDECREF(op[iop]); Py_XDECREF(op_request_dtypes[iop]); Py_XDECREF(op_request_dtypes_inner[iop]); } - return NULL; + + npy_free_workspace(op); + npy_free_workspace(op_flags); + npy_free_workspace(op_flags_inner); + npy_free_workspace(op_axes_storage); + npy_free_workspace(op_axes); + + return res; } @@ -1165,6 +1229,7 @@ npyiter_dealloc(NewNpyArrayIterObject *self) self->nested_child = NULL; PyErr_Restore(exc, val, tb); } + PyMem_Free(self->writeflags); Py_TYPE(self)->tp_free((PyObject*)self); } @@ -1332,7 +1397,9 @@ npyiter_remove_multi_index( NpyIter_RemoveMultiIndex(self->iter); /* RemoveMultiIndex invalidates cached values */ - npyiter_cache_values(self); + if (npyiter_cache_values(self) < 0) { + return NULL; + } /* RemoveMultiIndex also resets the iterator */ if (NpyIter_GetIterSize(self->iter) == 0) { self->started = 1; @@ -1356,9 +1423,14 @@ npyiter_enable_external_loop( return NULL; } - NpyIter_EnableExternalLoop(self->iter); + if (NpyIter_EnableExternalLoop(self->iter) != NPY_SUCCEED) { + return NULL; + } + /* EnableExternalLoop invalidates cached values */ - npyiter_cache_values(self); + if (npyiter_cache_values(self) < 0) { + return NULL; + } /* EnableExternalLoop also resets the iterator */ if (NpyIter_GetIterSize(self->iter) == 0) { self->started = 1; @@ -2001,21 +2073,6 @@ npyiter_seq_item(NewNpyArrayIterObject *self, Py_ssize_t i) return NULL; } -#if 0 - /* - * This check is disabled because it prevents things like - * np.add(it[0], it[1], it[2]), where it[2] is a write-only - * parameter. When write-only, the value of it[i] is - * likely random junk, as if it were allocated with an - * np.empty(...) call. - */ - if (!self->readflags[i]) { - PyErr_Format(PyExc_RuntimeError, - "Iterator operand %zd is write-only", i); - return NULL; - } -#endif - dataptr = self->dataptrs[i]; dtype = self->dtypes[i]; has_external_loop = NpyIter_HasExternalLoop(self->iter); diff --git a/numpy/_core/src/multiarray/nditer_templ.c.src b/numpy/_core/src/multiarray/nditer_templ.c.src index 3f91a482b461..7c6146538bb2 100644 --- a/numpy/_core/src/multiarray/nditer_templ.c.src +++ b/numpy/_core/src/multiarray/nditer_templ.c.src @@ -36,10 +36,11 @@ static int npyiter_iternext_itflags@tag_itflags@_dims@tag_ndim@_iters@tag_nop@( NpyIter *iter) { -#if !(@const_itflags@&NPY_ITFLAG_EXLOOP) || (@const_ndim@ > 1) const npy_uint32 itflags = @const_itflags@; -# if @const_ndim@ >= NPY_MAXDIMS - int idim, ndim = NIT_NDIM(iter); +# if @const_ndim@ <= 2 + int ndim = @const_ndim@; +# else + int ndim = NIT_NDIM(iter); # endif # if @const_nop@ < NPY_MAXDIMS const int nop = @const_nop@; @@ -47,16 +48,8 @@ npyiter_iternext_itflags@tag_itflags@_dims@tag_ndim@_iters@tag_nop@( int nop = NIT_NOP(iter); # endif - NpyIter_AxisData *axisdata0; + NpyIter_AxisData *axisdata; npy_intp istrides, nstrides = NAD_NSTRIDES(); -#endif -#if @const_ndim@ > 1 - NpyIter_AxisData *axisdata1; - npy_intp sizeof_axisdata; -#endif -#if @const_ndim@ > 2 - NpyIter_AxisData *axisdata2; -#endif #if (@const_itflags@&NPY_ITFLAG_RANGE) /* When ranged iteration is enabled, use the iterindex */ @@ -65,114 +58,60 @@ npyiter_iternext_itflags@tag_itflags@_dims@tag_ndim@_iters@tag_nop@( } #endif -#if @const_ndim@ > 1 - sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); -#endif - -# if !(@const_itflags@&NPY_ITFLAG_EXLOOP) || (@const_ndim@ > 1) - axisdata0 = NIT_AXISDATA(iter); -# endif -# if !(@const_itflags@&NPY_ITFLAG_EXLOOP) - /* Increment index 0 */ - NAD_INDEX(axisdata0)++; - /* Increment pointer 0 */ - for (istrides = 0; istrides < nstrides; ++istrides) { - NAD_PTRS(axisdata0)[istrides] += NAD_STRIDES(axisdata0)[istrides]; - } -# endif - -#if @const_ndim@ == 1 + npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop); + char **ptrs = NIT_DATAPTRS(iter); + axisdata = NIT_AXISDATA(iter); -# if !(@const_itflags@&NPY_ITFLAG_EXLOOP) - /* Finished when the index equals the shape */ - return NAD_INDEX(axisdata0) < NAD_SHAPE(axisdata0); -# else - return 0; +# if @const_itflags@&NPY_ITFLAG_EXLOOP + /* If an external loop is used, the first dimension never changes. */ + NIT_ADVANCE_AXISDATA(axisdata, 1); + ndim--; # endif -#else - -# if !(@const_itflags@&NPY_ITFLAG_EXLOOP) - if (NAD_INDEX(axisdata0) < NAD_SHAPE(axisdata0)) { - return 1; - } -# endif - - axisdata1 = NIT_INDEX_AXISDATA(axisdata0, 1); - /* Increment index 1 */ - NAD_INDEX(axisdata1)++; - /* Increment pointer 1 */ + /* + * Unroll the first dimension. + */ + NAD_INDEX(axisdata) += 1; for (istrides = 0; istrides < nstrides; ++istrides) { - NAD_PTRS(axisdata1)[istrides] += NAD_STRIDES(axisdata1)[istrides]; + ptrs[istrides] += NAD_STRIDES(axisdata)[istrides]; +# if (@const_itflags@&NPY_ITFLAG_EXLOOP) + NIT_USERPTRS(iter)[istrides] = ptrs[istrides]; +# endif } - if (NAD_INDEX(axisdata1) < NAD_SHAPE(axisdata1)) { - /* Reset the 1st index to 0 */ - NAD_INDEX(axisdata0) = 0; - /* Reset the 1st pointer to the value of the 2nd */ - for (istrides = 0; istrides < nstrides; ++istrides) { - NAD_PTRS(axisdata0)[istrides] = NAD_PTRS(axisdata1)[istrides]; - } + if (NAD_INDEX(axisdata) < NAD_SHAPE(axisdata)) { return 1; } -# if @const_ndim@ == 2 - return 0; -# else - - axisdata2 = NIT_INDEX_AXISDATA(axisdata1, 1); - /* Increment index 2 */ - NAD_INDEX(axisdata2)++; - /* Increment pointer 2 */ - for (istrides = 0; istrides < nstrides; ++istrides) { - NAD_PTRS(axisdata2)[istrides] += NAD_STRIDES(axisdata2)[istrides]; - } - - if (NAD_INDEX(axisdata2) < NAD_SHAPE(axisdata2)) { - /* Reset the 1st and 2nd indices to 0 */ - NAD_INDEX(axisdata0) = 0; - NAD_INDEX(axisdata1) = 0; - /* Reset the 1st and 2nd pointers to the value of the 3rd */ + /* + * Now continue (with resetting) + */ + for (int idim = 1; idim < ndim; idim++) { + /* reset index and pointers on this dimension to 0 */ + NAD_INDEX(axisdata) = 0; for (istrides = 0; istrides < nstrides; ++istrides) { - NAD_PTRS(axisdata0)[istrides] = NAD_PTRS(axisdata2)[istrides]; - NAD_PTRS(axisdata1)[istrides] = NAD_PTRS(axisdata2)[istrides]; + ptrs[istrides] -= NAD_SHAPE(axisdata) * NAD_STRIDES(axisdata)[istrides]; } - return 1; - } - for (idim = 3; idim < ndim; ++idim) { - NIT_ADVANCE_AXISDATA(axisdata2, 1); - /* Increment the index */ - NAD_INDEX(axisdata2)++; - /* Increment the pointer */ + /* And continue with the next dimension. */ + NIT_ADVANCE_AXISDATA(axisdata, 1); + + /* Increment index and pointers */ + NAD_INDEX(axisdata) += 1; for (istrides = 0; istrides < nstrides; ++istrides) { - NAD_PTRS(axisdata2)[istrides] += NAD_STRIDES(axisdata2)[istrides]; + ptrs[istrides] += NAD_STRIDES(axisdata)[istrides]; +# if (@const_itflags@&NPY_ITFLAG_EXLOOP) + NIT_USERPTRS(iter)[istrides] = ptrs[istrides]; +# endif } - - if (NAD_INDEX(axisdata2) < NAD_SHAPE(axisdata2)) { - /* Reset the indices and pointers of all previous axisdatas */ - axisdata1 = axisdata2; - do { - NIT_ADVANCE_AXISDATA(axisdata1, -1); - /* Reset the index to 0 */ - NAD_INDEX(axisdata1) = 0; - /* Reset the pointer to the updated value */ - for (istrides = 0; istrides < nstrides; ++istrides) { - NAD_PTRS(axisdata1)[istrides] = - NAD_PTRS(axisdata2)[istrides]; - } - } while (axisdata1 != axisdata0); - + if (NAD_INDEX(axisdata) < NAD_SHAPE(axisdata)) { return 1; } } + /* If the loop terminated, ran out of dimensions (end of array) */ return 0; - -# endif /* ndim != 2 */ - -#endif /* ndim != 1 */ } /**end repeat2**/ @@ -202,12 +141,10 @@ npyiter_buffered_reduce_iternext_iters@tag_nop@(NpyIter *iter) int iop; - NpyIter_AxisData *axisdata; NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter); char **ptrs; - char *prev_dataptrs[NPY_MAXARGS]; - ptrs = NBF_PTRS(bufferdata); + ptrs = NIT_USERPTRS(iter); /* * If the iterator handles the inner loop, need to increment all @@ -244,9 +181,8 @@ npyiter_buffered_reduce_iternext_iters@tag_nop@(NpyIter *iter) return 1; } - /* Save the previously used data pointers */ - axisdata = NIT_AXISDATA(iter); - memcpy(prev_dataptrs, NAD_PTRS(axisdata), NPY_SIZEOF_INTP*nop); + /* Save the previously used data pointers in the user pointers */ + memcpy(ptrs, NIT_DATAPTRS(iter), NPY_SIZEOF_INTP*nop); /* Write back to the arrays */ if (npyiter_copy_from_buffers(iter) < 0) { @@ -265,7 +201,7 @@ npyiter_buffered_reduce_iternext_iters@tag_nop@(NpyIter *iter) } /* Prepare the next buffers and set iterend/size */ - if (npyiter_copy_to_buffers(iter, prev_dataptrs) < 0) { + if (npyiter_copy_to_buffers(iter, ptrs) < 0) { npyiter_clear_buffers(iter); return 0; } @@ -297,7 +233,7 @@ npyiter_buffered_iternext(NpyIter *iter) char **ptrs; strides = NBF_STRIDES(bufferdata); - ptrs = NBF_PTRS(bufferdata); + ptrs = NIT_USERPTRS(iter); for (iop = 0; iop < nop; ++iop) { ptrs[iop] += strides[iop]; } diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c new file mode 100644 index 000000000000..1c31dcd5d810 --- /dev/null +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -0,0 +1,289 @@ +/* numpy static data structs and initialization */ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _UMATHMODULE +#define _MULTIARRAYMODULE + +#define PY_SSIZE_T_CLEAN +#include +#include + +#include "numpy/ndarraytypes.h" +#include "numpy/npy_common.h" +#include "numpy/arrayobject.h" +#include "npy_import.h" +#include "npy_static_data.h" +#include "extobj.h" + +// static variables are zero-filled by default, no need to explicitly do so +NPY_VISIBILITY_HIDDEN npy_interned_str_struct npy_interned_str; +NPY_VISIBILITY_HIDDEN npy_static_pydata_struct npy_static_pydata; +NPY_VISIBILITY_HIDDEN npy_static_cdata_struct npy_static_cdata; + +#define INTERN_STRING(struct_member, string) \ + assert(npy_interned_str.struct_member == NULL); \ + npy_interned_str.struct_member = PyUnicode_InternFromString(string); \ + if (npy_interned_str.struct_member == NULL) { \ + return -1; \ + } \ + +NPY_NO_EXPORT int +intern_strings(void) +{ + INTERN_STRING(current_allocator, "current_allocator"); + INTERN_STRING(array, "__array__"); + INTERN_STRING(array_function, "__array_function__"); + INTERN_STRING(array_struct, "__array_struct__"); + INTERN_STRING(array_priority, "__array_priority__"); + INTERN_STRING(array_interface, "__array_interface__"); + INTERN_STRING(array_ufunc, "__array_ufunc__"); + INTERN_STRING(array_wrap, "__array_wrap__"); + INTERN_STRING(array_finalize, "__array_finalize__"); + INTERN_STRING(numpy_dtype, "__numpy_dtype__"); + INTERN_STRING(implementation, "_implementation"); + INTERN_STRING(axis1, "axis1"); + INTERN_STRING(axis2, "axis2"); + INTERN_STRING(item, "item"); + INTERN_STRING(like, "like"); + INTERN_STRING(numpy, "numpy"); + INTERN_STRING(where, "where"); + INTERN_STRING(convert, "convert"); + INTERN_STRING(preserve, "preserve"); + INTERN_STRING(convert_if_no_array, "convert_if_no_array"); + INTERN_STRING(cpu, "cpu"); + INTERN_STRING(dtype, "dtype"); + INTERN_STRING( + array_err_msg_substr, + "__array__() got an unexpected keyword argument 'copy'"); + INTERN_STRING(out, "out"); + INTERN_STRING(errmode_strings[0], "ignore"); + INTERN_STRING(errmode_strings[1], "warn"); + INTERN_STRING(errmode_strings[2], "raise"); + INTERN_STRING(errmode_strings[3], "call"); + INTERN_STRING(errmode_strings[4], "print"); + INTERN_STRING(errmode_strings[5], "log"); + INTERN_STRING(__dlpack__, "__dlpack__"); + INTERN_STRING(pyvals_name, "UFUNC_PYVALS_NAME"); + INTERN_STRING(legacy, "legacy"); + INTERN_STRING(__doc__, "__doc__"); + INTERN_STRING(__signature__, "__signature__"); + INTERN_STRING(copy, "copy"); + INTERN_STRING(dl_device, "dl_device"); + INTERN_STRING(max_version, "max_version"); + INTERN_STRING(array_dealloc, "array_dealloc"); + return 0; +} + +#define IMPORT_GLOBAL(base_path, name, object) \ + assert(object == NULL); \ + object = npy_import(base_path, name); \ + if (object == NULL) { \ + return -1; \ + } + + +/* + * Initializes global constants. + * + * All global constants should live inside the npy_static_pydata + * struct. + * + * Not all entries in the struct are initialized here, some are + * initialized later but care must be taken in those cases to initialize + * the constant in a thread-safe manner, ensuring it is initialized + * exactly once. + * + * Anything initialized here is initialized during module import which + * the python interpreter ensures is done in a single thread. + * + * Anything imported here should not need the C-layer at all and will be + * imported before anything on the C-side is initialized. + */ +NPY_NO_EXPORT int +initialize_static_globals(void) +{ + /* + * Initialize contents of npy_static_pydata struct + * + * This struct holds cached references to python objects + * that we want to keep alive for the lifetime of the + * module for performance reasons + */ + + IMPORT_GLOBAL("math", "floor", + npy_static_pydata.math_floor_func); + + IMPORT_GLOBAL("math", "ceil", + npy_static_pydata.math_ceil_func); + + IMPORT_GLOBAL("math", "trunc", + npy_static_pydata.math_trunc_func); + + IMPORT_GLOBAL("math", "gcd", + npy_static_pydata.math_gcd_func); + + IMPORT_GLOBAL("numpy.exceptions", "AxisError", + npy_static_pydata.AxisError); + + IMPORT_GLOBAL("numpy.exceptions", "ComplexWarning", + npy_static_pydata.ComplexWarning); + + IMPORT_GLOBAL("numpy.exceptions", "DTypePromotionError", + npy_static_pydata.DTypePromotionError); + + IMPORT_GLOBAL("numpy.exceptions", "TooHardError", + npy_static_pydata.TooHardError); + + IMPORT_GLOBAL("numpy.exceptions", "VisibleDeprecationWarning", + npy_static_pydata.VisibleDeprecationWarning); + + IMPORT_GLOBAL("numpy._globals", "_CopyMode", + npy_static_pydata._CopyMode); + + IMPORT_GLOBAL("numpy._globals", "_NoValue", + npy_static_pydata._NoValue); + + IMPORT_GLOBAL("numpy._core._exceptions", "_ArrayMemoryError", + npy_static_pydata._ArrayMemoryError); + + IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncBinaryResolutionError", + npy_static_pydata._UFuncBinaryResolutionError); + + IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncInputCastingError", + npy_static_pydata._UFuncInputCastingError); + + IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncNoLoopError", + npy_static_pydata._UFuncNoLoopError); + + IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncOutputCastingError", + npy_static_pydata._UFuncOutputCastingError); + + IMPORT_GLOBAL("numpy._core.printoptions", "format_options", + npy_static_pydata.format_options); + + IMPORT_GLOBAL("os", "fspath", + npy_static_pydata.os_fspath); + + IMPORT_GLOBAL("os", "PathLike", + npy_static_pydata.os_PathLike); + + // default_truediv_type_tup + PyArray_Descr *tmp = PyArray_DescrFromType(NPY_DOUBLE); + npy_static_pydata.default_truediv_type_tup = + PyTuple_Pack(3, tmp, tmp, tmp); + Py_DECREF(tmp); + if (npy_static_pydata.default_truediv_type_tup == NULL) { + return -1; + } + + npy_static_pydata.kwnames_is_copy = + Py_BuildValue("(O)", npy_interned_str.copy); + if (npy_static_pydata.kwnames_is_copy == NULL) { + return -1; + } + + npy_static_pydata.one_obj = PyLong_FromLong((long) 1); + if (npy_static_pydata.one_obj == NULL) { + return -1; + } + + npy_static_pydata.zero_obj = PyLong_FromLong((long) 0); + if (npy_static_pydata.zero_obj == NULL) { + return -1; + } + + npy_static_pydata.dl_call_kwnames = + Py_BuildValue("(OOO)", npy_interned_str.dl_device, + npy_interned_str.copy, + npy_interned_str.max_version); + if (npy_static_pydata.dl_call_kwnames == NULL) { + return -1; + } + + npy_static_pydata.dl_cpu_device_tuple = Py_BuildValue("(i,i)", 1, 0); + if (npy_static_pydata.dl_cpu_device_tuple == NULL) { + return -1; + } + + npy_static_pydata.dl_max_version = Py_BuildValue("(i,i)", 1, 0); + if (npy_static_pydata.dl_max_version == NULL) { + return -1; + } + + /* + * Initialize contents of npy_static_cdata struct + * + * Note that some entries are initialized elsewhere. Care + * must be taken to ensure all entries are initialized during + * module initialization and immutable thereafter. + * + * This struct holds global static caches. These are set + * up this way for performance reasons. + */ + + PyObject *flags = PySys_GetObject("flags"); /* borrowed object */ + if (flags == NULL) { + PyErr_SetString(PyExc_AttributeError, "cannot get sys.flags"); + return -1; + } + PyObject *level = PyObject_GetAttrString(flags, "optimize"); + if (level == NULL) { + return -1; + } + npy_static_cdata.optimize = PyLong_AsLong(level); + Py_DECREF(level); + + /* + * see unpack_bits for how this table is used. + * + * LUT for bigendian bitorder, littleendian is handled via + * byteswapping in the loop. + * + * 256 8 byte blocks representing 8 bits expanded to 1 or 0 bytes + */ + npy_intp j; + for (j=0; j < 256; j++) { + npy_intp k; + for (k=0; k < 8; k++) { + npy_uint8 v = (j & (1 << k)) == (1 << k); + npy_static_cdata.unpack_lookup_big[j].bytes[7 - k] = v; + } + } + + return 0; +} + + +/* + * Verifies all entries in npy_interned_str and npy_static_pydata are + * non-NULL. + * + * Called at the end of initialization for _multiarray_umath. Some + * entries are initialized outside of this file because they depend on + * items that are initialized late in module initialization but they + * should all be initialized by the time this function is called. + */ +NPY_NO_EXPORT int +verify_static_structs_initialized(void) { + // verify all entries in npy_interned_str are filled in + for (int i=0; i < (sizeof(npy_interned_str_struct)/sizeof(PyObject *)); i++) { + if (*(((PyObject **)&npy_interned_str) + i) == NULL) { + PyErr_Format( + PyExc_SystemError, + "NumPy internal error: NULL entry detected in " + "npy_interned_str at index %d", i); + return -1; + } + } + + // verify all entries in npy_static_pydata are filled in + for (int i=0; i < (sizeof(npy_static_pydata_struct)/sizeof(PyObject *)); i++) { + if (*(((PyObject **)&npy_static_pydata) + i) == NULL) { + PyErr_Format( + PyExc_SystemError, + "NumPy internal error: NULL entry detected in " + "npy_static_pydata at index %d", i); + return -1; + } + } + return 0; +} diff --git a/numpy/_core/src/multiarray/npy_static_data.h b/numpy/_core/src/multiarray/npy_static_data.h new file mode 100644 index 000000000000..68b3d27c8160 --- /dev/null +++ b/numpy/_core/src/multiarray/npy_static_data.h @@ -0,0 +1,192 @@ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_STATIC_DATA_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_STATIC_DATA_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +NPY_NO_EXPORT int +initialize_static_globals(void); + +NPY_NO_EXPORT int +intern_strings(void); + +NPY_NO_EXPORT int +verify_static_structs_initialized(void); + +typedef struct npy_interned_str_struct { + PyObject *current_allocator; + PyObject *array; + PyObject *array_function; + PyObject *array_struct; + PyObject *array_priority; + PyObject *array_interface; + PyObject *array_wrap; + PyObject *array_finalize; + PyObject *array_ufunc; + PyObject *numpy_dtype; + PyObject *implementation; + PyObject *axis1; + PyObject *axis2; + PyObject *item; + PyObject *like; + PyObject *numpy; + PyObject *where; + PyObject *convert; + PyObject *preserve; + PyObject *convert_if_no_array; + PyObject *cpu; + PyObject *dtype; + PyObject *array_err_msg_substr; + PyObject *out; + PyObject *errmode_strings[6]; + PyObject *__dlpack__; + PyObject *pyvals_name; + PyObject *legacy; + PyObject *__doc__; + PyObject *__signature__; + PyObject *copy; + PyObject *dl_device; + PyObject *max_version; + PyObject *array_dealloc; +} npy_interned_str_struct; + +/* + * A struct that stores static global data used throughout + * _multiarray_umath, mostly to cache results that would be + * prohibitively expensive to compute at runtime in a tight loop. + * + * All items in this struct should be initialized during module + * initialization and thereafter should be immutable. Mutating items in + * this struct after module initialization is likely not thread-safe. + */ + +typedef struct npy_static_pydata_struct { + /* + * Used in ufunc_type_resolution.c to avoid reconstructing a tuple + * storing the default true division return types. + */ + PyObject *default_truediv_type_tup; + + /* + * Used to set up the default extobj context variable + */ + PyObject *default_extobj_capsule; + + /* + * The global ContextVar to store the extobject. It is exposed to Python + * as `_extobj_contextvar`. + */ + PyObject *npy_extobj_contextvar; + + /* + * A reference to ndarray's implementations for __array_*__ special methods + */ + PyObject *ndarray_array_ufunc; + PyObject *ndarray_array_finalize; + PyObject *ndarray_array_function; + + /* + * References to the '1' and '0' PyLong objects + */ + PyObject *one_obj; + PyObject *zero_obj; + + /* + * Reference to an np.array(0, dtype=np.long) instance + */ + PyObject *zero_pyint_like_arr; + + /* + * References to items obtained via an import at module initialization + */ + PyObject *AxisError; + PyObject *ComplexWarning; + PyObject *DTypePromotionError; + PyObject *TooHardError; + PyObject *VisibleDeprecationWarning; + PyObject *_CopyMode; + PyObject *_NoValue; + PyObject *_ArrayMemoryError; + PyObject *_UFuncBinaryResolutionError; + PyObject *_UFuncInputCastingError; + PyObject *_UFuncNoLoopError; + PyObject *_UFuncOutputCastingError; + PyObject *math_floor_func; + PyObject *math_ceil_func; + PyObject *math_trunc_func; + PyObject *math_gcd_func; + PyObject *os_PathLike; + PyObject *os_fspath; + PyObject *format_options; + + /* + * Used in the __array__ internals to avoid building a tuple inline + */ + PyObject *kwnames_is_copy; + + /* + * Used in __imatmul__ to avoid building tuples inline + */ + PyObject *axes_1d_obj_kwargs; + PyObject *axes_2d_obj_kwargs; + + /* + * Used for CPU feature detection and dispatch + */ + PyObject *cpu_dispatch_registry; + + /* + * references to ArrayMethod implementations that are cached + * to avoid repeatedly creating them + */ + PyObject *VoidToGenericMethod; + PyObject *GenericToVoidMethod; + PyObject *ObjectToGenericMethod; + PyObject *GenericToObjectMethod; + + /* + * Used in from_dlpack + */ + PyObject *dl_call_kwnames; + PyObject *dl_cpu_device_tuple; + PyObject *dl_max_version; +} npy_static_pydata_struct; + + +typedef struct npy_static_cdata_struct { + /* + * stores sys.flags.optimize as a long, which is used in the add_docstring + * implementation + */ + long optimize; + + /* + * LUT used by unpack_bits + */ + union { + npy_uint8 bytes[8]; + npy_uint64 uint64; + } unpack_lookup_big[256]; + + /* + * A look-up table to recover integer type numbers from type characters. + * + * See the _MAX_LETTER and LETTER_TO_NUM macros in arraytypes.c.src. + * + * The smallest type number is ?, the largest is bounded by 'z'. + * + * This is initialized alongside the built-in dtypes + */ + npy_int16 _letter_to_num['z' + 1 - '?']; +} npy_static_cdata_struct; + +NPY_VISIBILITY_HIDDEN extern npy_interned_str_struct npy_interned_str; +NPY_VISIBILITY_HIDDEN extern npy_static_pydata_struct npy_static_pydata; +NPY_VISIBILITY_HIDDEN extern npy_static_cdata_struct npy_static_cdata; + +#ifdef __cplusplus +} +#endif + +#endif // NUMPY_CORE_SRC_MULTIARRAY_STATIC_DATA_H_ diff --git a/numpy/_core/src/multiarray/number.c b/numpy/_core/src/multiarray/number.c index 9532662b327a..e27079a569ef 100644 --- a/numpy/_core/src/multiarray/number.c +++ b/numpy/_core/src/multiarray/number.c @@ -23,6 +23,10 @@ **************** Implement Number Protocol **************************** *************************************************************************/ +// this is not in the global data struct to avoid needing to include the +// definition of the NumericOps struct in multiarraymodule.h +// +// it is filled in during module initialization in a thread-safe manner NPY_NO_EXPORT NumericOps n_ops; /* NB: static objects initialized to zero */ /* @@ -118,6 +122,20 @@ _PyArray_SetNumericOps(PyObject *dict) SET(conjugate); SET(matmul); SET(clip); + + // initialize static globals needed for matmul + npy_static_pydata.axes_1d_obj_kwargs = Py_BuildValue( + "{s, [(i), (i, i), (i)]}", "axes", -1, -2, -1, -1); + if (npy_static_pydata.axes_1d_obj_kwargs == NULL) { + return -1; + } + + npy_static_pydata.axes_2d_obj_kwargs = Py_BuildValue( + "{s, [(i, i), (i, i), (i, i)]}", "axes", -2, -1, -2, -1, -2, -1); + if (npy_static_pydata.axes_2d_obj_kwargs == NULL) { + return -1; + } + return 0; } @@ -128,6 +146,9 @@ _get_keywords(int rtype, PyArrayObject *out) PyObject *kwds = NULL; if (rtype != NPY_NOTYPE || out != NULL) { kwds = PyDict_New(); + if (kwds == NULL) { + return NULL; + } if (rtype != NPY_NOTYPE) { PyArray_Descr *descr; descr = PyArray_DescrFromType(rtype); @@ -151,13 +172,16 @@ PyArray_GenericReduceFunction(PyArrayObject *m1, PyObject *op, int axis, PyObject *kwds; args = Py_BuildValue("(Oi)", m1, axis); + if (args == NULL) { + return NULL; + } kwds = _get_keywords(rtype, out); meth = PyObject_GetAttrString(op, "reduce"); if (meth && PyCallable_Check(meth)) { ret = PyObject_Call(meth, args, kwds); } Py_DECREF(args); - Py_DECREF(meth); + Py_XDECREF(meth); Py_XDECREF(kwds); return ret; } @@ -171,13 +195,16 @@ PyArray_GenericAccumulateFunction(PyArrayObject *m1, PyObject *op, int axis, PyObject *kwds; args = Py_BuildValue("(Oi)", m1, axis); + if (args == NULL) { + return NULL; + } kwds = _get_keywords(rtype, out); meth = PyObject_GetAttrString(op, "accumulate"); if (meth && PyCallable_Check(meth)) { ret = PyObject_Call(meth, args, kwds); } Py_DECREF(args); - Py_DECREF(meth); + Py_XDECREF(meth); Py_XDECREF(kwds); return ret; } @@ -268,15 +295,15 @@ array_matrix_multiply(PyObject *m1, PyObject *m2) static PyObject * array_inplace_matrix_multiply(PyArrayObject *self, PyObject *other) { - static PyObject *AxisError_cls = NULL; - npy_cache_import("numpy.exceptions", "AxisError", &AxisError_cls); - if (AxisError_cls == NULL) { - return NULL; - } - INPLACE_GIVE_UP_IF_NEEDED(self, other, nb_inplace_matrix_multiply, array_inplace_matrix_multiply); + PyObject *args = PyTuple_Pack(3, self, other, self); + if (args == NULL) { + return NULL; + } + PyObject *kwargs; + /* * Unlike `matmul(a, b, out=a)` we ensure that the result is not broadcast * if the result without `out` would have less dimensions than `a`. @@ -286,33 +313,11 @@ array_inplace_matrix_multiply(PyArrayObject *self, PyObject *other) * The error here will be confusing, but for now, we enforce this by * passing the correct `axes=`. */ - static PyObject *axes_1d_obj_kwargs = NULL; - static PyObject *axes_2d_obj_kwargs = NULL; - if (NPY_UNLIKELY(axes_1d_obj_kwargs == NULL)) { - axes_1d_obj_kwargs = Py_BuildValue( - "{s, [(i), (i, i), (i)]}", "axes", -1, -2, -1, -1); - if (axes_1d_obj_kwargs == NULL) { - return NULL; - } - } - if (NPY_UNLIKELY(axes_2d_obj_kwargs == NULL)) { - axes_2d_obj_kwargs = Py_BuildValue( - "{s, [(i, i), (i, i), (i, i)]}", "axes", -2, -1, -2, -1, -2, -1); - if (axes_2d_obj_kwargs == NULL) { - return NULL; - } - } - - PyObject *args = PyTuple_Pack(3, self, other, self); - if (args == NULL) { - return NULL; - } - PyObject *kwargs; if (PyArray_NDIM(self) == 1) { - kwargs = axes_1d_obj_kwargs; + kwargs = npy_static_pydata.axes_1d_obj_kwargs; } else { - kwargs = axes_2d_obj_kwargs; + kwargs = npy_static_pydata.axes_2d_obj_kwargs; } PyObject *res = PyObject_Call(n_ops.matmul, args, kwargs); Py_DECREF(args); @@ -322,7 +327,7 @@ array_inplace_matrix_multiply(PyArrayObject *self, PyObject *other) * AxisError should indicate that the axes argument didn't work out * which should mean the second operand not being 2 dimensional. */ - if (PyErr_ExceptionMatches(AxisError_cls)) { + if (PyErr_ExceptionMatches(npy_static_pydata.AxisError)) { PyErr_SetString(PyExc_ValueError, "inplace matrix multiplication requires the first operand to " "have at least one and the second at least two dimensions."); @@ -332,165 +337,59 @@ array_inplace_matrix_multiply(PyArrayObject *self, PyObject *other) return res; } -/* - * Determine if object is a scalar and if so, convert the object - * to a double and place it in the out_exponent argument - * and return the "scalar kind" as a result. If the object is - * not a scalar (or if there are other error conditions) - * return NPY_NOSCALAR, and out_exponent is undefined. - */ -static NPY_SCALARKIND -is_scalar_with_conversion(PyObject *o2, double* out_exponent) +static int +fast_scalar_power(PyObject *o1, PyObject *o2, int inplace, PyObject **result) { - PyObject *temp; - const int optimize_fpexps = 1; + PyObject *fastop = NULL; - if (PyLong_Check(o2)) { - long tmp = PyLong_AsLong(o2); - if (error_converting(tmp)) { - PyErr_Clear(); - return NPY_NOSCALAR; + if (PyLong_CheckExact(o2)) { + int overflow = 0; + long exp = PyLong_AsLongAndOverflow(o2, &overflow); + if (overflow != 0) { + return -1; } - *out_exponent = (double)tmp; - return NPY_INTPOS_SCALAR; - } - if (optimize_fpexps && PyFloat_Check(o2)) { - *out_exponent = PyFloat_AsDouble(o2); - return NPY_FLOAT_SCALAR; - } - - if (PyArray_Check(o2)) { - if ((PyArray_NDIM((PyArrayObject *)o2) == 0) && - ((PyArray_ISINTEGER((PyArrayObject *)o2) || - (optimize_fpexps && PyArray_ISFLOAT((PyArrayObject *)o2))))) { - temp = Py_TYPE(o2)->tp_as_number->nb_float(o2); - if (temp == NULL) { - return NPY_NOSCALAR; - } - *out_exponent = PyFloat_AsDouble(o2); - Py_DECREF(temp); - if (PyArray_ISINTEGER((PyArrayObject *)o2)) { - return NPY_INTPOS_SCALAR; - } - else { /* ISFLOAT */ - return NPY_FLOAT_SCALAR; - } + if (exp == -1) { + fastop = n_ops.reciprocal; } - } - else if (PyArray_IsScalar(o2, Integer) || - (optimize_fpexps && PyArray_IsScalar(o2, Floating))) { - temp = Py_TYPE(o2)->tp_as_number->nb_float(o2); - if (temp == NULL) { - return NPY_NOSCALAR; - } - *out_exponent = PyFloat_AsDouble(o2); - Py_DECREF(temp); - - if (PyArray_IsScalar(o2, Integer)) { - return NPY_INTPOS_SCALAR; + else if (exp == 2) { + fastop = n_ops.square; } - else { /* IsScalar(o2, Floating) */ - return NPY_FLOAT_SCALAR; + else { + return 1; } } - else if (PyIndex_Check(o2)) { - PyObject* value = PyNumber_Index(o2); - Py_ssize_t val; - if (value == NULL) { - if (PyErr_Occurred()) { - PyErr_Clear(); - } - return NPY_NOSCALAR; + else if (PyFloat_CheckExact(o2)) { + double exp = PyFloat_AsDouble(o2); + if (exp == 0.5) { + fastop = n_ops.sqrt; } - val = PyLong_AsSsize_t(value); - Py_DECREF(value); - if (error_converting(val)) { - PyErr_Clear(); - return NPY_NOSCALAR; + else { + return 1; } - *out_exponent = (double) val; - return NPY_INTPOS_SCALAR; } - return NPY_NOSCALAR; -} + else { + return 1; + } -/* - * optimize float array or complex array to a scalar power - * returns 0 on success, -1 if no optimization is possible - * the result is in value (can be NULL if an error occurred) - */ -static int -fast_scalar_power(PyObject *o1, PyObject *o2, int inplace, - PyObject **value) -{ - double exponent; - NPY_SCALARKIND kind; /* NPY_NOSCALAR is not scalar */ - - if (PyArray_Check(o1) && - !PyArray_ISOBJECT((PyArrayObject *)o1) && - ((kind=is_scalar_with_conversion(o2, &exponent))>0)) { - PyArrayObject *a1 = (PyArrayObject *)o1; - PyObject *fastop = NULL; - if (PyArray_ISFLOAT(a1) || PyArray_ISCOMPLEX(a1)) { - if (exponent == 1.0) { - fastop = n_ops.positive; - } - else if (exponent == -1.0) { - fastop = n_ops.reciprocal; - } - else if (exponent == 0.0) { - fastop = n_ops._ones_like; - } - else if (exponent == 0.5) { - fastop = n_ops.sqrt; - } - else if (exponent == 2.0) { - fastop = n_ops.square; - } - else { - return -1; - } + PyArrayObject *a1 = (PyArrayObject *)o1; + if (PyArray_ISOBJECT(a1)) { + return 1; + } + if (fastop != n_ops.square && !PyArray_ISFLOAT(a1) && !PyArray_ISCOMPLEX(a1)) { + // we special-case squaring for any array type + // gh-29388 + return 1; + } - if (inplace || can_elide_temp_unary(a1)) { - *value = PyArray_GenericInplaceUnaryFunction(a1, fastop); - } - else { - *value = PyArray_GenericUnaryFunction(a1, fastop); - } - return 0; - } - /* Because this is called with all arrays, we need to - * change the output if the kind of the scalar is different - * than that of the input and inplace is not on --- - * (thus, the input should be up-cast) - */ - else if (exponent == 2.0) { - fastop = n_ops.square; - if (inplace) { - *value = PyArray_GenericInplaceUnaryFunction(a1, fastop); - } - else { - /* We only special-case the FLOAT_SCALAR and integer types */ - if (kind == NPY_FLOAT_SCALAR && PyArray_ISINTEGER(a1)) { - PyArray_Descr *dtype = PyArray_DescrFromType(NPY_DOUBLE); - a1 = (PyArrayObject *)PyArray_CastToType(a1, dtype, - PyArray_ISFORTRAN(a1)); - if (a1 != NULL) { - /* cast always creates a new array */ - *value = PyArray_GenericInplaceUnaryFunction(a1, fastop); - Py_DECREF(a1); - } - } - else { - *value = PyArray_GenericUnaryFunction(a1, fastop); - } - } - return 0; - } + if (inplace || can_elide_temp_unary(a1)) { + *result = PyArray_GenericInplaceUnaryFunction(a1, fastop); + } + else { + *result = PyArray_GenericUnaryFunction(a1, fastop); } - /* no fast operation found */ - return -1; + + return 0; } static PyObject * @@ -647,7 +546,8 @@ array_inplace_power(PyArrayObject *a1, PyObject *o2, PyObject *NPY_UNUSED(modulo INPLACE_GIVE_UP_IF_NEEDED( a1, o2, nb_inplace_power, array_inplace_power); - if (fast_scalar_power((PyObject *)a1, o2, 1, &value) != 0) { + + if (fast_scalar_power((PyObject *) a1, o2, 1, &value) != 0) { value = PyArray_GenericInplaceBinaryFunction(a1, o2, n_ops.power); } return value; @@ -759,13 +659,10 @@ _array_nonzero(PyArrayObject *mp) return res; } else if (n == 0) { - /* 2017-09-25, 1.14 */ - if (DEPRECATE("The truth value of an empty array is ambiguous. " - "Returning False, but in future this will result in an error. " - "Use `array.size > 0` to check that an array is not empty.") < 0) { - return -1; - } - return 0; + PyErr_SetString(PyExc_ValueError, + "The truth value of an empty array is ambiguous. " + "Use `array.size > 0` to check that an array is not empty."); + return -1; } else { PyErr_SetString(PyExc_ValueError, diff --git a/numpy/_core/src/multiarray/public_dtype_api.c b/numpy/_core/src/multiarray/public_dtype_api.c index 60dceae3275d..9b2d7a393842 100644 --- a/numpy/_core/src/multiarray/public_dtype_api.c +++ b/numpy/_core/src/multiarray/public_dtype_api.c @@ -71,7 +71,9 @@ PyArrayInitDTypeMeta_FromSpec( return -1; } - dtypemeta_initialize_struct_from_spec(DType, spec, 0); + if (dtypemeta_initialize_struct_from_spec(DType, spec, 0) < 0) { + return -1; + } if (NPY_DT_SLOTS(DType)->setitem == NULL || NPY_DT_SLOTS(DType)->getitem == NULL) { diff --git a/numpy/_core/src/multiarray/refcount.c b/numpy/_core/src/multiarray/refcount.c index 1bc693532646..9c5a15da03f5 100644 --- a/numpy/_core/src/multiarray/refcount.c +++ b/numpy/_core/src/multiarray/refcount.c @@ -18,8 +18,8 @@ #include "iterators.h" #include "dtypemeta.h" #include "refcount.h" - #include "npy_config.h" +#include "templ_common.h" /* for npy_mul_sizes_with_overflow */ @@ -56,6 +56,53 @@ PyArray_ClearBuffer( } +/* + * Helper function to zero an array buffer. + * + * Here "zeroing" means an abstract zeroing operation, implementing the + * the behavior of `np.zeros`. E.g. for an of references this is more + * complicated than zero-filling the buffer. + * + * Failure (returns -1) indicates some sort of programming or logical + * error and should not happen for a data type that has been set up + * correctly. In principle a sufficiently weird dtype might run out of + * memory but in practice this likely won't happen. + */ +NPY_NO_EXPORT int +PyArray_ZeroContiguousBuffer( + PyArray_Descr *descr, char *data, + npy_intp stride, npy_intp size, int aligned) +{ + NPY_traverse_info zero_info; + NPY_traverse_info_init(&zero_info); + /* Flags unused: float errors do not matter and we do not release GIL */ + NPY_ARRAYMETHOD_FLAGS flags_unused; + PyArrayMethod_GetTraverseLoop *get_fill_zero_loop = + NPY_DT_SLOTS(NPY_DTYPE(descr))->get_fill_zero_loop; + if (get_fill_zero_loop != NULL) { + if (get_fill_zero_loop( + NULL, descr, aligned, descr->elsize, &(zero_info.func), + &(zero_info.auxdata), &flags_unused) < 0) { + return -1; + } + } + else { + assert(zero_info.func == NULL); + } + if (zero_info.func == NULL) { + /* the multiply here should never overflow, since we already + checked if the new array size doesn't overflow */ + memset(data, 0, size*stride); + return 0; + } + + int res = zero_info.func( + NULL, descr, data, size, stride, zero_info.auxdata); + NPY_traverse_info_xfree(&zero_info); + return res; +} + + /* * Helper function to clear whole array. It seems plausible that we should * be able to get away with assuming the array is contiguous. @@ -77,7 +124,7 @@ PyArray_ClearBuffer( * and only arrays which own their memory should clear it. */ int aligned = PyArray_ISALIGNED(arr); - if (PyArray_ISCONTIGUOUS(arr)) { + if (PyArray_ISCONTIGUOUS(arr) || PyArray_IS_F_CONTIGUOUS(arr)) { return PyArray_ClearBuffer( descr, PyArray_BYTES(arr), descr->elsize, PyArray_SIZE(arr), aligned); @@ -105,10 +152,12 @@ PyArray_ClearBuffer( /* Process the innermost dimension */ if (clear_info.func(NULL, clear_info.descr, data_it, inner_shape, inner_stride, clear_info.auxdata) < 0) { + NPY_traverse_info_xfree(&clear_info); return -1; } } NPY_RAW_ITER_ONE_NEXT(idim, ndim, coord, shape_it, data_it, strides_it); + NPY_traverse_info_xfree(&clear_info); return 0; } @@ -137,7 +186,7 @@ PyArray_Item_INCREF(char *data, PyArray_Descr *descr) int offset; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(descr), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(descr), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -199,7 +248,7 @@ PyArray_Item_XDECREF(char *data, PyArray_Descr *descr) int offset; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(descr), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(descr), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -433,7 +482,7 @@ _fill_with_none(char *optr, PyArray_Descr *dtype) int offset; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(dtype), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(dtype), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } diff --git a/numpy/_core/src/multiarray/refcount.h b/numpy/_core/src/multiarray/refcount.h index d9f472b2697e..41c428f321e4 100644 --- a/numpy/_core/src/multiarray/refcount.h +++ b/numpy/_core/src/multiarray/refcount.h @@ -6,6 +6,11 @@ PyArray_ClearBuffer( PyArray_Descr *descr, char *data, npy_intp stride, npy_intp size, int aligned); +NPY_NO_EXPORT int +PyArray_ZeroContiguousBuffer( + PyArray_Descr *descr, char *data, + npy_intp stride, npy_intp size, int aligned); + NPY_NO_EXPORT int PyArray_ClearArray(PyArrayObject *arr); diff --git a/numpy/_core/src/multiarray/scalarapi.c b/numpy/_core/src/multiarray/scalarapi.c index 9ca83d8a57f5..a602e312727b 100644 --- a/numpy/_core/src/multiarray/scalarapi.c +++ b/numpy/_core/src/multiarray/scalarapi.c @@ -11,8 +11,7 @@ #include "numpy/npy_math.h" #include "npy_config.h" - - +#include "npy_pycompat.h" // PyObject_GetOptionalAttr #include "array_coercion.h" #include "ctors.h" @@ -294,64 +293,42 @@ PyArray_DescrFromTypeObject(PyObject *type) return PyArray_DescrFromType(typenum); } - /* Check the generic types */ + /* Check the generic types, was deprecated in 1.19 and removed for 2.3 */ if ((type == (PyObject *) &PyNumberArrType_Type) || (type == (PyObject *) &PyInexactArrType_Type) || (type == (PyObject *) &PyFloatingArrType_Type)) { - if (DEPRECATE("Converting `np.inexact` or `np.floating` to " - "a dtype is deprecated. The current result is `float64` " - "which is not strictly correct.") < 0) { - return NULL; - } - typenum = NPY_DOUBLE; + PyErr_SetString(PyExc_TypeError, + "Converting `np.inexact` or `np.floating` to " + "a dtype not allowed"); + return NULL; } else if (type == (PyObject *)&PyComplexFloatingArrType_Type) { - if (DEPRECATE("Converting `np.complex` to a dtype is deprecated. " - "The current result is `complex128` which is not " - "strictly correct.") < 0) { - return NULL; - } - typenum = NPY_CDOUBLE; + PyErr_SetString(PyExc_TypeError, + "Converting `np.complex` to a dtype is not allowed."); + return NULL; } else if ((type == (PyObject *)&PyIntegerArrType_Type) || (type == (PyObject *)&PySignedIntegerArrType_Type)) { - if (DEPRECATE("Converting `np.integer` or `np.signedinteger` to " - "a dtype is deprecated. The current result is " - "`np.dtype(np.int_)` which is not strictly correct. " - "Note that the result depends on the system. To ensure " - "stable results use may want to use `np.int64` or " - "`np.int32`.") < 0) { - return NULL; - } - typenum = NPY_LONG; + PyErr_SetString(PyExc_TypeError, + "Converting 'np.integer' or 'np.signedinteger' to " + "a dtype is not allowed"); + return NULL; } else if (type == (PyObject *) &PyUnsignedIntegerArrType_Type) { - if (DEPRECATE("Converting `np.unsignedinteger` to a dtype is " - "deprecated. The current result is `np.dtype(np.uint)` " - "which is not strictly correct. Note that the result " - "depends on the system. To ensure stable results you may " - "want to use `np.uint64` or `np.uint32`.") < 0) { - return NULL; - } - typenum = NPY_ULONG; + PyErr_SetString(PyExc_TypeError, + "Converting `np.unsignedinteger` to a dtype is not allowed"); + return NULL; } else if (type == (PyObject *) &PyCharacterArrType_Type) { - if (DEPRECATE("Converting `np.character` to a dtype is deprecated. " - "The current result is `np.dtype(np.str_)` " - "which is not strictly correct. Note that `np.character` " - "is generally deprecated and 'S1' should be used.") < 0) { - return NULL; - } - typenum = NPY_STRING; + PyErr_SetString(PyExc_TypeError, + "Converting `np.character` to a dtype is not allowed"); + return NULL; } else if ((type == (PyObject *) &PyGenericArrType_Type) || (type == (PyObject *) &PyFlexibleArrType_Type)) { - if (DEPRECATE("Converting `np.generic` to a dtype is " - "deprecated. The current result is `np.dtype(np.void)` " - "which is not strictly correct.") < 0) { - return NULL; - } - typenum = NPY_VOID; + PyErr_SetString(PyExc_TypeError, + "Converting `np.generic` to a dtype is not allowed."); + return NULL; } if (typenum != NPY_NOTYPE) { @@ -365,17 +342,34 @@ PyArray_DescrFromTypeObject(PyObject *type) /* Do special thing for VOID sub-types */ if (PyType_IsSubtype((PyTypeObject *)type, &PyVoidArrType_Type)) { + PyObject *attr; + _PyArray_LegacyDescr *conv = NULL; + int res = PyObject_GetOptionalAttr(type, npy_interned_str.dtype, &attr); + if (res < 0) { + return NULL; // Should be a rather criticial error, so just fail. + } + if (res == 1) { + if (!PyArray_DescrCheck(attr)) { + if (PyObject_HasAttrString(attr, "__get__")) { + /* If the object has a __get__, assume this is a class property. */ + Py_DECREF(attr); + conv = NULL; + } + else { + PyErr_Format(PyExc_ValueError, + "`.dtype` attribute %R is not a valid dtype instance", + attr); + Py_DECREF(attr); + return NULL; + } + } + } + _PyArray_LegacyDescr *new = (_PyArray_LegacyDescr *)PyArray_DescrNewFromType(NPY_VOID); if (new == NULL) { return NULL; } - _PyArray_LegacyDescr *conv = (_PyArray_LegacyDescr *)( - _arraydescr_try_convert_from_dtype_attr(type)); - if (conv == NULL) { - Py_DECREF(new); - return NULL; - } - if ((PyObject *)conv != Py_NotImplemented && PyDataType_ISLEGACY(conv)) { + if (conv != NULL && PyDataType_ISLEGACY(conv)) { new->fields = conv->fields; Py_XINCREF(new->fields); new->names = conv->names; @@ -384,12 +378,18 @@ PyArray_DescrFromTypeObject(PyObject *type) new->subarray = conv->subarray; conv->subarray = NULL; } - Py_DECREF(conv); + Py_XDECREF(conv); Py_XDECREF(new->typeobj); new->typeobj = (PyTypeObject *)type; Py_INCREF(type); return (PyArray_Descr *)new; } + + PyObject *DType = PyArray_DiscoverDTypeFromScalarType((PyTypeObject *)type); + if (DType != NULL) { + return PyArray_GetDefaultDescr((PyArray_DTypeMeta *)DType); + } + return _descr_from_subtype(type); } @@ -535,15 +535,10 @@ PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base) if (buff == NULL) { return PyErr_NoMemory(); } - /* copyswap needs an array object, but only actually cares about the - * dtype - */ - PyArrayObject_fields dummy_arr; - if (base == NULL) { - dummy_arr.descr = descr; - base = (PyObject *)&dummy_arr; + memcpy(buff, data, itemsize); + if (swap) { + byte_swap_vector(buff, itemsize / 4, 4); } - copyswap(buff, data, swap, base); /* truncation occurs here */ PyObject *u = PyUnicode_FromKindAndData(PyUnicode_4BYTE_KIND, buff, itemsize / 4); @@ -586,9 +581,6 @@ PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base) if (PyTypeNum_ISFLEXIBLE(type_num)) { if (type_num == NPY_STRING) { destptr = PyBytes_AS_STRING(obj); - #if PY_VERSION_HEX < 0x030b00b0 - ((PyBytesObject *)obj)->ob_shash = -1; - #endif memcpy(destptr, data, itemsize); return obj; } diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index a5185cba60aa..e23fcef06574 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -20,10 +20,13 @@ #include "mapping.h" #include "ctors.h" #include "dtypemeta.h" +#include "descriptor.h" #include "usertypes.h" +#include "number.h" #include "numpyos.h" #include "can_cast_table.h" #include "common.h" +#include "conversion_utils.h" #include "flagsobject.h" #include "scalartypes.h" #include "_datetime.h" @@ -33,14 +36,14 @@ #include "dragon4.h" #include "npy_longdouble.h" #include "npy_buffer.h" +#include "npy_static_data.h" +#include "multiarraymodule.h" +#include "array_api_standard.h" #include #include "binop_override.h" -/* determines if legacy mode is enabled, global set in multiarraymodule.c */ -extern int npy_legacy_print_mode; - /* * used for allocating a single scalar, so use the default numpy * memory allocators instead of the (maybe) user overrides @@ -121,19 +124,6 @@ gentype_free(PyObject *v) } -static PyObject * -gentype_power(PyObject *m1, PyObject *m2, PyObject *modulo) -{ - if (modulo != Py_None) { - /* modular exponentiation is not implemented (gh-8804) */ - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - - BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_power, gentype_power); - return PyArray_Type.tp_as_number->nb_power(m1, m2, Py_None); -} - static PyObject * gentype_generic_method(PyObject *self, PyObject *args, PyObject *kwds, char *str) @@ -165,33 +155,216 @@ gentype_generic_method(PyObject *self, PyObject *args, PyObject *kwds, } } -static PyObject * -gentype_add(PyObject *m1, PyObject* m2) -{ - /* special case str.__radd__, which should not call array_add */ - if (PyBytes_Check(m1) || PyUnicode_Check(m1)) { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; + +/* + * Helper function to deal with binary operator deferral. Must be passed a + * valid self (a generic scalar) and an other item. + * May fill self_item and/or other_arr (but not both) with non-NULL values. + * + * Why this dance? When the other object is exactly a Python scalar something + * awkward happens historically in NumPy. + * NumPy doesn't define a result, but the ufunc would cast to `astype(object)` + * which is the same as `scalar.item()`. And that operation converts e.g. + * float32 or float64 to Python floats. + * It then retries. And because it is a builtin type now the operation may + * succeed. + * + * This retrying pass only makes sense if the other object is a Python + * scalar (otherwise we fill in `other_arr` which can be used to call the + * ufunc). + * Additionally, if `self.item()` has the same type as `self` we would end up + * in an infinite recursion. + * + * So the result of this function means the following: + * - < 0 error return. + * - self_op is filled in: Retry the Python operator. + * - other_op is filled in: Use the array operator (goes into ufuncs) + * (This may be the original generic if it is one.) + * - neither is filled in: Return NotImplemented. + * + * It is not possible for both to be filled. If `other` is also a generics, + * it is returned. + */ +static inline int +find_binary_operation_path( + PyObject *self, PyObject *other, PyObject **self_op, PyObject **other_op) +{ + *other_op = NULL; + *self_op = NULL; + + if (PyArray_IsScalar(other, Generic) || + PyLong_CheckExact(other) || + PyFloat_CheckExact(other) || + PyComplex_CheckExact(other) || + PyBool_Check(other) || + PyArray_Check(other)) { + /* + * The other operand is ready for the operation already. Must pass on + * on float/long/complex mainly for weak promotion (NEP 50). + */ + *other_op = Py_NewRef(other); + return 0; } - BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_add, gentype_add); - return PyArray_Type.tp_as_number->nb_add(m1, m2); + /* + * If other has __array_ufunc__ always use ufunc. If array-ufunc was None + * we already deferred. And any custom object with array-ufunc cannot call + * our ufuncs without preventing recursion. + * It may be nice to avoid double lookup in `BINOP_GIVE_UP_IF_NEEDED`. + */ + PyObject *attr; + if (PyArray_LookupSpecial(other, npy_interned_str.array_ufunc, &attr) < 0) { + PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */ + } + else if (attr != NULL) { + Py_DECREF(attr); + *other_op = Py_NewRef(other); + return 0; + } + + /* + * Now check `other`. We want to know whether it is an object scalar + * and the easiest way is by converting to an array here. + */ + int was_scalar; + PyArrayObject *arr = (PyArrayObject *)PyArray_FromAny_int( + other, NULL, NULL, 0, NPY_MAXDIMS, 0, NULL, &was_scalar); + if (arr == NULL) { + return -1; + } + + if (!was_scalar || PyArray_DESCR(arr)->type_num != NPY_OBJECT) { + /* + * The array is OK for usage and we can simply forward it. There + * is a theoretical subtlety here: If the other object implements + * `__array_wrap__`, we may ignore that. However, this only matters + * if the other object has the identical `__array_priority__` and + * additionally already deferred back to us. + * (`obj + scalar` and `scalar + obj` are not symmetric.) + * + * NOTE: Future NumPy may need to distinguish scalars here, one option + * could be marking the array. + */ + *other_op = (PyObject *)arr; + return 0; + } + Py_DECREF(arr); + + /* + * If we are here, we need to operate on Python scalars. In general + * that would just fails since NumPy doesn't know the other object! + * + * However, NumPy (historically) made this often work magically because + * ufuncs for object dtype end up casting to object with `.item()`. This in + * turn often returns a Python type (e.g. float for float32, float64)! + * Retrying then succeeds. So if (and only if) `self.item()` returns a new + * type, we can safely attempt the operation (again) with that. + */ + PyObject *self_item = PyObject_CallMethodNoArgs(self, npy_interned_str.item); + if (self_item == NULL) { + return -1; + } + if (Py_TYPE(self_item) != Py_TYPE(self)) { + /* self_item can be used to retry the operation */ + *self_op = self_item; + return 0; + } + /* The operation can't work and we will return NotImplemented */ + Py_DECREF(self_item); + return 0; } + +/* + * These are defined below as they require special handling, we still define + * a _gen version here. `power` is special as it has three arguments. + */ +static PyObject * +gentype_add(PyObject *m1, PyObject *m2); + +static PyObject * +gentype_multiply(PyObject *m1, PyObject *m2); + + /**begin repeat * - * #name = subtract, remainder, divmod, lshift, rshift, - * and, xor, or, floor_divide, true_divide# + * #name = add, multiply, subtract, remainder, divmod, + * lshift, rshift, and, xor, or, floor_divide, true_divide# + * #ufunc = add, multiply, subtract, remainder, divmod, + * left_shift, right_shift, bitwise_and, bitwise_xor, bitwise_or, + * floor_divide, true_divide# + * #func = Add, Multiply, Subtract, Remainder, Divmod, + * Lshift, Rshift, And, Xor, Or, FloorDivide, TrueDivide# + * #suff = _gen, _gen,,,,,,,,,,# */ +/* NOTE: We suffix the name for functions requiring special handling first. */ static PyObject * -gentype_@name@(PyObject *m1, PyObject *m2) +gentype_@name@@suff@(PyObject *m1, PyObject *m2) { BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_@name@, gentype_@name@); - return PyArray_Type.tp_as_number->nb_@name@(m1, m2); + + PyObject *self = NULL; + PyObject *other = NULL; + PyObject *self_op, *other_op; + + if (!PyArray_IsScalar(m2, Generic)) { + self = m1; + other = m2; + } + else { + self = m2; + other = m1; + } + if (find_binary_operation_path(self, other, &self_op, &other_op) < 0) { + return NULL; + } + if (self_op != NULL) { + PyObject *res; + if (self == m1) { + res = PyNumber_@func@(self_op, m2); + } + else { + res = PyNumber_@func@(m1, self_op); + } + Py_DECREF(self_op); + return res; + } + else if (other_op != NULL) { + /* Call the corresponding ufunc (with the array) */ + PyObject *res; + if (self == m1) { + res = PyArray_GenericBinaryFunction(m1, other_op, n_ops.@ufunc@); + } + else { + res = PyArray_GenericBinaryFunction(other_op, m2, n_ops.@ufunc@); + } + Py_DECREF(other_op); + return res; + } + else { + assert(other_op == NULL); + Py_RETURN_NOTIMPLEMENTED; + } } /**end repeat**/ -/* Get a nested slot, or NULL if absent */ +/* + * The following operators use the above, but require specialization. + */ + +static PyObject * +gentype_add(PyObject *m1, PyObject *m2) +{ + /* special case str.__radd__, which should not call array_add */ + if (PyBytes_Check(m1) || PyUnicode_Check(m1)) { + Py_INCREF(Py_NotImplemented); + return Py_NotImplemented; + } + + return gentype_add_gen(m1, m2); +} + +/* Get a nested slot, or NULL if absent (for multiply implementation) */ #define GET_NESTED_SLOT(type, group, slot) \ ((type)->group == NULL ? NULL : (type)->group->slot) @@ -220,11 +393,75 @@ gentype_multiply(PyObject *m1, PyObject *m2) Py_INCREF(Py_NotImplemented); return Py_NotImplemented; } - /* All normal cases are handled by PyArray's multiply */ - BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_multiply, gentype_multiply); - return PyArray_Type.tp_as_number->nb_multiply(m1, m2); + + return gentype_multiply_gen(m1, m2); +} + + +/* + * NOTE: The three argument nature of power requires code duplication here. + */ +static PyObject * +gentype_power(PyObject *m1, PyObject *m2, PyObject *modulo) +{ + if (modulo != Py_None) { + /* modular exponentiation is not implemented (gh-8804) */ + Py_INCREF(Py_NotImplemented); + return Py_NotImplemented; + } + + BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_power, gentype_power); + + PyObject *self = NULL; + PyObject *other = NULL; + PyObject *self_op, *other_op; + + if (!PyArray_IsScalar(m2, Generic)) { + self = m1; + other = m2; + } + else { + self = m2; + other = m1; + } + if (find_binary_operation_path(self, other, &self_op, &other_op) < 0) { + return NULL; + } + if (self_op != NULL) { + PyObject *res; + if (self == m1) { + res = PyNumber_Power(self_op, m2, Py_None); + } + else { + res = PyNumber_Power(m1, self_op, Py_None); + } + Py_DECREF(self_op); + return res; + } + else if (other_op != NULL) { + /* Call the corresponding ufunc (with the array) + * NOTE: As of NumPy 2.0 there are inconsistencies in array_power + * calling it would fail a (niche) test because an array is + * returned in one of the fast-paths. + * (once NumPy propagates 0-D arrays, this is irrelevant) + */ + PyObject *res; + if (self == m1) { + res = PyArray_GenericBinaryFunction(m1, other_op, n_ops.power); + } + else { + res = PyArray_GenericBinaryFunction(other_op, m2, n_ops.power); + } + Py_DECREF(other_op); + return res; + } + else { + assert(other_op == NULL); + Py_RETURN_NOTIMPLEMENTED; + } } + /**begin repeat * #TYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, * LONG, ULONG, LONGLONG, ULONGLONG# @@ -337,16 +574,20 @@ genint_type_repr(PyObject *self) if (value_string == NULL) { return NULL; } - if (npy_legacy_print_mode <= 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode <= 125) { return value_string; } int num = _typenum_fromtypeobj((PyObject *)Py_TYPE(self), 0); PyObject *repr; - if (num == 0) { + if (num == NPY_NOTYPE) { /* Not a builtin scalar (presumably), just use the name */ - repr = PyUnicode_FromFormat("%S(%S)", Py_TYPE(self)->tp_name, value_string); + repr = PyUnicode_FromFormat("%s(%S)", Py_TYPE(self)->tp_name, value_string); Py_DECREF(value_string); return repr; } @@ -374,7 +615,11 @@ genbool_type_str(PyObject *self) static PyObject * genbool_type_repr(PyObject *self) { - if (npy_legacy_print_mode <= 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode <= 125) { return genbool_type_str(self); } return PyUnicode_FromString( @@ -500,7 +745,11 @@ stringtype_@form@(PyObject *self) if (ret == NULL) { return NULL; } - if (npy_legacy_print_mode > 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { Py_SETREF(ret, PyUnicode_FromFormat("np.bytes_(%S)", ret)); } #endif /* IS_repr */ @@ -547,7 +796,11 @@ unicodetype_@form@(PyObject *self) if (ret == NULL) { return NULL; } - if (npy_legacy_print_mode > 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { Py_SETREF(ret, PyUnicode_FromFormat("np.str_(%S)", ret)); } #endif /* IS_repr */ @@ -609,14 +862,14 @@ _void_to_hex(const char* argbuf, const Py_ssize_t arglen, static PyObject * _void_scalar_to_string(PyObject *obj, int repr) { - static PyObject *tostring_func = NULL; - npy_cache_import("numpy._core.arrayprint", - "_void_scalar_to_string", &tostring_func); - if (tostring_func == NULL) { + if (npy_cache_import_runtime( + "numpy._core.arrayprint", "_void_scalar_to_string", + &npy_runtime_imports._void_scalar_to_string) == -1) { return NULL; } PyObject *is_repr = repr ? Py_True : Py_False; - return PyObject_CallFunctionObjArgs(tostring_func, obj, is_repr, NULL); + return PyObject_CallFunctionObjArgs( + npy_runtime_imports._void_scalar_to_string, obj, is_repr, NULL); } static PyObject * @@ -627,7 +880,11 @@ voidtype_repr(PyObject *self) /* Python helper checks for the legacy mode printing */ return _void_scalar_to_string(self, 1); } - if (npy_legacy_print_mode > 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { return _void_to_hex(s->obval, s->descr->elsize, "np.void(b'", "\\x", "')"); } else { @@ -679,19 +936,38 @@ datetimetype_repr(PyObject *self) */ if ((scal->obmeta.num == 1 && scal->obmeta.base != NPY_FR_h) || scal->obmeta.base == NPY_FR_GENERIC) { - if (npy_legacy_print_mode > 125) { - ret = PyUnicode_FromFormat("np.datetime64('%s')", iso); + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; } - else { - ret = PyUnicode_FromFormat("numpy.datetime64('%s')", iso); + + PyObject *meta = metastr_to_unicode(&scal->obmeta, 1); + if((scal->obval == NPY_DATETIME_NAT) && (meta != NULL)){ + if (legacy_print_mode > 125) { + ret = PyUnicode_FromFormat("np.datetime64('%s','%S')", iso, meta); + } else { + ret = PyUnicode_FromFormat("numpy.datetime64('%s','%S')", iso, meta); + } + } else { + if (legacy_print_mode > 125) { + ret = PyUnicode_FromFormat("np.datetime64('%s')", iso); + } + else { + ret = PyUnicode_FromFormat("numpy.datetime64('%s')", iso); + } } + Py_DECREF(meta); } else { PyObject *meta = metastr_to_unicode(&scal->obmeta, 1); if (meta == NULL) { return NULL; } - if (npy_legacy_print_mode > 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { ret = PyUnicode_FromFormat("np.datetime64('%s','%S')", iso, meta); } else { @@ -735,7 +1011,11 @@ timedeltatype_repr(PyObject *self) /* The metadata unit */ if (scal->obmeta.base == NPY_FR_GENERIC) { - if (npy_legacy_print_mode > 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { ret = PyUnicode_FromFormat("np.timedelta64(%S)", val); } else { @@ -748,7 +1028,11 @@ timedeltatype_repr(PyObject *self) Py_DECREF(val); return NULL; } - if (npy_legacy_print_mode > 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { ret = PyUnicode_FromFormat("np.timedelta64(%S,'%S')", val, meta); } else { @@ -1034,6 +1318,7 @@ legacy_@name@_format@kind@(npy_@name@ val){ /**begin repeat1 * #name = float, double, longdouble# + * #max_positional = 1.e6L, 1.e16L, 1.e16L# * #Name = Float, Double, LongDouble# * #NAME = FLOAT, DOUBLE, LONGDOUBLE# * #n = f, , l# @@ -1050,9 +1335,20 @@ static PyObject * npy_bool sign) { - if (npy_legacy_print_mode <= 113) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode <= 113) { return legacy_@name@_format@kind@(val); } + long double max_positional; + if (legacy_print_mode <= 202) { + max_positional = 1.e16L; + } + else { + max_positional = @max_positional@; + } int use_positional; if (npy_isnan(val) || val == 0) { @@ -1060,7 +1356,7 @@ static PyObject * } else { npy_@name@ absval = val < 0 ? -val : val; - use_positional = absval < 1.e16L && absval >= 1.e-4L; + use_positional = absval < max_positional && absval >= 1.e-4L; } if (use_positional) { @@ -1081,7 +1377,11 @@ static PyObject * if (string == NULL) { return NULL; } - if (npy_legacy_print_mode > 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { Py_SETREF(string, PyUnicode_FromFormat("@repr_format@", string)); } #endif /* IS_repr */ @@ -1096,7 +1396,11 @@ c@name@type_@kind@(PyObject *self) npy_c@name@ val = PyArrayScalar_VAL(self, C@Name@); TrimMode trim = TrimMode_DptZeros; - if (npy_legacy_print_mode <= 113) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode <= 113) { return legacy_c@name@_format@kind@(val); } @@ -1109,7 +1413,11 @@ c@name@type_@kind@(PyObject *self) #ifdef IS_str ret = PyUnicode_FromFormat("%Sj", istr); #else /* IS_repr */ - if (npy_legacy_print_mode <= 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode <= 125) { ret = PyUnicode_FromFormat("%Sj", istr); } else { @@ -1157,7 +1465,11 @@ c@name@type_@kind@(PyObject *self) #ifdef IS_str string = PyUnicode_FromFormat("(%S%Sj)", rstr, istr); #else /* IS_repr */ - if (npy_legacy_print_mode > 125) { + legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { string = PyUnicode_FromFormat("@crepr_format@", rstr, istr); } else { @@ -1182,14 +1494,25 @@ halftype_@kind@(PyObject *self) float floatval = npy_half_to_float(val); float absval; - if (npy_legacy_print_mode <= 113) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode <= 113) { return legacy_float_format@kind@(floatval); } + long double max_positional; + if (legacy_print_mode <= 202) { + max_positional = 1.e16L; + } + else { + max_positional = 1.e3L; + } absval = floatval < 0 ? -floatval : floatval; PyObject *string; - if (absval == 0 || (absval < 1.e16 && absval >= 1.e-4) ) { + if (absval == 0 || (absval < max_positional && absval >= 1.e-4) ) { string = format_half(val, 0, -1, 0, TrimMode_LeaveOneZero, -1, -1, -1); } else { @@ -1198,7 +1521,11 @@ halftype_@kind@(PyObject *self) #ifdef IS_str return string; #else - if (string == NULL || npy_legacy_print_mode <= 125) { + legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (string == NULL || legacy_print_mode <= 125) { return string; } PyObject *res = PyUnicode_FromFormat("np.float16(%S)", string); @@ -1265,8 +1592,6 @@ static PyNumberMethods gentype_as_number = { static PyObject * gentype_richcompare(PyObject *self, PyObject *other, int cmp_op) { - PyObject *arr, *ret; - /* * If the other object is None, False is always right. This avoids * the array None comparison, at least until deprecation it is fixed. @@ -1287,17 +1612,35 @@ gentype_richcompare(PyObject *self, PyObject *other, int cmp_op) RICHCMP_GIVE_UP_IF_NEEDED(self, other); - arr = PyArray_FromScalar(self, NULL); - if (arr == NULL) { + PyObject *self_op; + PyObject *other_op; + if (find_binary_operation_path(self, other, &self_op, &other_op) < 0) { return NULL; } - /* - * Call via PyObject_RichCompare to ensure that other.__eq__ - * has a chance to run when necessary - */ - ret = PyObject_RichCompare(arr, other, cmp_op); - Py_DECREF(arr); - return ret; + + /* We can always just call RichCompare again */ + if (other_op != NULL) { + /* If we use richcompare again, need to ensure that one op is array */ + self_op = PyArray_FromScalar(self, NULL); + if (self_op == NULL) { + Py_DECREF(other_op); + return NULL; + } + PyObject *res = PyObject_RichCompare(self_op, other_op, cmp_op); + Py_DECREF(self_op); + Py_DECREF(other_op); + return res; + } + else if (self_op != NULL) { + /* Try again, since other is an object scalar and this one mutated */ + PyObject *res = PyObject_RichCompare(self_op, other, cmp_op); + Py_DECREF(self_op); + return res; + } + else { + /* Comparison with arbitrary objects cannot be defined. */ + Py_RETURN_NOTIMPLEMENTED; + } } static PyObject * @@ -1445,26 +1788,78 @@ gentype_shape_get(PyObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored)) } +static PyObject * +gentype_dataptr_get(PyObject *self) +{ + return Py_BuildValue( + "NO", + PyLong_FromVoidPtr(scalar_value(self, NULL)), + Py_True + ); +} + + static PyObject * gentype_interface_get(PyObject *self, void *NPY_UNUSED(ignored)) { - PyArrayObject *arr; - PyObject *inter; + PyObject *dataptr = NULL; + PyObject *strides = NULL; + PyObject *shape = NULL; + PyObject *descr = NULL; + PyObject *typestr = NULL; + PyArray_Descr *array_descr = NULL; + PyObject *inter = NULL; - arr = (PyArrayObject *)PyArray_FromScalar(self, NULL); - if (arr == NULL) { - return NULL; + + array_descr = PyArray_DescrFromScalar(self); + if (array_descr == NULL) { + goto finish; } - inter = PyObject_GetAttrString((PyObject *)arr, "__array_interface__"); - if (inter != NULL) { - PyDict_SetItemString(inter, "__ref", (PyObject *)arr); + + /* dataptr */ + dataptr = gentype_dataptr_get(self); + if (dataptr == NULL) { + goto finish; } - Py_DECREF(arr); + + /* strides */ + strides = gentype_shape_get(self, NULL); + if (strides == NULL) { + goto finish; + } + + /* descr */ + descr = array_protocol_descr_get(array_descr); + if (descr == NULL) { + goto finish; + } + + /* typestr */ + typestr = arraydescr_protocol_typestr_get(array_descr, NULL); + if (typestr == NULL) { + goto finish; + } + + /* shape */ + shape = gentype_shape_get(self, NULL); + if (shape == NULL) { + goto finish; + } + + inter = build_array_interface(dataptr, descr, strides, typestr, shape); + goto finish; + +finish: + Py_XDECREF(descr); + Py_XDECREF(dataptr); + Py_XDECREF(strides); + Py_XDECREF(shape); + Py_XDECREF(typestr); + Py_XDECREF(array_descr); return inter; } - static PyObject * gentype_typedescr_get(PyObject *self, void *NPY_UNUSED(ignored)) { @@ -1598,33 +1993,6 @@ gentype_transpose_get(PyObject *self, void *NPY_UNUSED(ignored)) return self; } -static PyObject * -gentype_newbyteorder(PyObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored)) -{ - PyErr_SetString(PyExc_AttributeError, - "`newbyteorder` was removed from scalar types in NumPy 2.0. " - "Use `sc.view(sc.dtype.newbyteorder(order))` instead."); - return NULL; -} - -static PyObject * -gentype_itemset(PyObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored)) -{ - PyErr_SetString(PyExc_AttributeError, - "`itemset` was removed from scalar types in NumPy 2.0 " - "because scalars are immutable."); - return NULL; -} - -static PyObject * -gentype_ptp(PyObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored)) -{ - PyErr_SetString(PyExc_AttributeError, - "`ptp` was removed from scalar types in NumPy 2.0. " - "For a scalar, the range of values always equals 0."); - return NULL; -} - static PyGetSetDef gentype_getsets[] = { {"ndim", @@ -1669,14 +2037,8 @@ static PyGetSetDef gentype_getsets[] = { {"T", (getter)gentype_transpose_get, (setter)0, NULL, NULL}, - {"newbyteorder", - (getter)gentype_newbyteorder, - (setter)0, NULL, NULL}, - {"itemset", - (getter)gentype_itemset, - (setter)0, NULL, NULL}, - {"ptp", - (getter)gentype_ptp, + {"device", + (getter)array_device, (setter)0, NULL, NULL}, {"__array_interface__", (getter)gentype_interface_get, @@ -1717,39 +2079,42 @@ gentype_getarray(PyObject *scalar, PyObject *args) return ret; } -static char doc_sc_wraparray[] = "sc.__array_wrap__(obj) return scalar from array"; +static char doc_sc_wraparray[] = "__array_wrap__ implementation for scalar types"; +/* + * __array_wrap__ for scalars, returning a scalar if possible. + * (note that NumPy itself may well never call this itself). + */ static PyObject * gentype_wraparray(PyObject *NPY_UNUSED(scalar), PyObject *args) { - PyObject *obj; PyArrayObject *arr; + PyObject *UNUSED = NULL; /* for the context argument */ + /* return_scalar should be passed, but we're scalar, so return scalar by default */ + int return_scalar = 1; - if (PyTuple_Size(args) < 1) { - PyErr_SetString(PyExc_TypeError, - "only accepts 1 argument."); - return NULL; - } - obj = PyTuple_GET_ITEM(args, 0); - if (!PyArray_Check(obj)) { - PyErr_SetString(PyExc_TypeError, - "can only be called with ndarray object"); + if (!PyArg_ParseTuple(args, "O!|OO&:__array_wrap__", + &PyArray_Type, &arr, &UNUSED, + &PyArray_OptionalBoolConverter, &return_scalar)) { return NULL; } - arr = (PyArrayObject *)obj; - return PyArray_Scalar(PyArray_DATA(arr), - PyArray_DESCR(arr), (PyObject *)arr); + Py_INCREF(arr); + if (!return_scalar) { + return (PyObject *)arr; + } + else { + return PyArray_Return(arr); + } } /* * These gentype_* functions do not take keyword arguments. - * The proper flag is METH_VARARGS. + * The proper flag is METH_VARARGS or METH_NOARGS. */ /**begin repeat * - * #name = tolist, item, __deepcopy__, __copy__, - * swapaxes, conj, conjugate, nonzero, + * #name = tolist, item, swapaxes, conj, conjugate, nonzero, * fill, transpose# */ static PyObject * @@ -1759,6 +2124,34 @@ gentype_@name@(PyObject *self, PyObject *args) } /**end repeat**/ +static PyObject * +gentype___copy__(PyObject *self) +{ + // scalars are immutable, so we can return a new reference + // the only expections are scalars with void dtype + if (PyObject_IsInstance(self, (PyObject *)&PyVoidArrType_Type)) { + // path via array + return gentype_generic_method(self, NULL, NULL, "__copy__"); + } + return Py_NewRef(self); +} + +static PyObject * +gentype___deepcopy__(PyObject *self, PyObject *args) +{ + // note: maybe the signature needs to be updated as __deepcopy__ can accept the keyword memo + + // scalars are immutable, so we can return a new reference + // the only expections are scalars with void dtype + // if the number of arguments is not 1, we let gentype_generic_method do the + // error handling + if (PyObject_IsInstance(self, (PyObject *)&PyVoidArrType_Type) || (PyTuple_Size(args)!=1)) { + // path via array + return gentype_generic_method(self, args, NULL, "__deepcopy__"); + } + return Py_NewRef(self); +} + static PyObject * gentype_byteswap(PyObject *self, PyObject *args, PyObject *kwds) { @@ -1809,7 +2202,7 @@ gentype_byteswap(PyObject *self, PyObject *args, PyObject *kwds) * #name = take, getfield, put, repeat, tofile, mean, trace, diagonal, clip, * std, var, sum, cumsum, prod, cumprod, compress, sort, argsort, * round, argmax, argmin, max, min, any, all, astype, resize, - * reshape, choose, tostring, tobytes, copy, searchsorted, view, + * reshape, choose, tobytes, copy, searchsorted, view, * flatten, ravel, squeeze# */ static PyObject * @@ -1821,8 +2214,7 @@ gentype_@name@(PyObject *self, PyObject *args, PyObject *kwds) /**begin repeat - * #name = integer, floating, complexfloating# - * #complex = 0, 0, 1# + * #name = integer, floating# */ static PyObject * @name@type_dunder_round(PyObject *self, PyObject *args, PyObject *kwds) @@ -1833,14 +2225,6 @@ static PyObject * return NULL; } -#if @complex@ - if (DEPRECATE("The Python built-in `round` is deprecated for complex " - "scalars, and will raise a `TypeError` in a future release. " - "Use `np.round` or `scalar.round` instead.") < 0) { - return NULL; - } -#endif - PyObject *tup; if (ndigits == Py_None) { tup = PyTuple_Pack(0); @@ -1859,13 +2243,11 @@ static PyObject * return NULL; } -#if !@complex@ if (ndigits == Py_None) { PyObject *ret = PyNumber_Long(obj); Py_DECREF(obj); return ret; } -#endif return obj; } @@ -1983,7 +2365,7 @@ gentype_reduce(PyObject *self, PyObject *NPY_UNUSED(args)) buffer = view.buf; buflen = view.len; /* - * In Python 3 both of the deprecated functions PyObject_AsWriteBuffer and + * Both of the deprecated functions PyObject_AsWriteBuffer and * PyObject_AsReadBuffer that this code replaces release the buffer. It is * up to the object that supplies the buffer to guarantee that the buffer * sticks around after the release. @@ -2278,9 +2660,6 @@ static PyMethodDef gentype_methods[] = { {"tofile", (PyCFunction)gentype_tofile, METH_VARARGS | METH_KEYWORDS, NULL}, - {"tostring", - (PyCFunction)gentype_tostring, - METH_VARARGS | METH_KEYWORDS, NULL}, {"byteswap", (PyCFunction)gentype_byteswap, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -2314,7 +2693,7 @@ static PyMethodDef gentype_methods[] = { /* for the copy module */ {"__copy__", (PyCFunction)gentype___copy__, - METH_VARARGS, NULL}, + METH_NOARGS, NULL}, {"__deepcopy__", (PyCFunction)gentype___deepcopy__, METH_VARARGS, NULL}, @@ -2451,6 +2830,15 @@ static PyMethodDef gentype_methods[] = { {"setflags", (PyCFunction)gentype_setflags, METH_VARARGS | METH_KEYWORDS, NULL}, + + /* For Array API compatibility */ + {"__array_namespace__", + (PyCFunction)array_array_namespace, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"to_device", + (PyCFunction)array_to_device, + METH_VARARGS | METH_KEYWORDS, NULL}, + {NULL, NULL, 0, NULL} /* sentinel */ }; @@ -2506,6 +2894,16 @@ static PyMethodDef numbertype_methods[] = { {NULL, NULL, 0, NULL} /* sentinel */ }; +/**begin repeat + * #name = boolean,datetime# + */ +static PyMethodDef @name@type_methods[] = { + /* for typing */ + {"__class_getitem__", Py_GenericAlias, METH_CLASS | METH_O, NULL}, + {NULL, NULL, 0, NULL} /* sentinel */ +}; +/**end repeat**/ + /**begin repeat * #name = cfloat,clongdouble# */ @@ -2521,17 +2919,13 @@ static PyMethodDef @name@type_methods[] = { }; /**end repeat**/ -/**begin repeat - * #name = floating, complexfloating# - */ -static PyMethodDef @name@type_methods[] = { +static PyMethodDef floatingtype_methods[] = { /* Hook for the round() builtin */ {"__round__", - (PyCFunction)@name@type_dunder_round, + (PyCFunction)floatingtype_dunder_round, METH_VARARGS | METH_KEYWORDS, NULL}, {NULL, NULL, 0, NULL} /* sentinel */ }; -/**end repeat**/ static PyMethodDef integertype_methods[] = { /* Hook for the round() builtin */ @@ -3036,13 +3430,7 @@ object_arrtype_alloc(PyTypeObject *type, Py_ssize_t items) * Object scalars should not actually exist, if they exist we should * consider it to be a bug. */ - static PyObject *visibleDeprecationWarning = NULL; - npy_cache_import("numpy", "VisibleDeprecationWarning", - &visibleDeprecationWarning); - if (visibleDeprecationWarning == NULL) { - return NULL; - } - if (PyErr_WarnEx(visibleDeprecationWarning, + if (PyErr_WarnEx(npy_static_pydata.VisibleDeprecationWarning, "Creating a NumPy object scalar. NumPy object scalars should " "never be created. If you see this message please inform the " "NumPy developers. Since this message should never be shown " @@ -3398,19 +3786,6 @@ static PyObject * static PyNumberMethods @name@_arrtype_as_number; /**end repeat**/ -static PyObject * -bool_index(PyObject *a) -{ - if (DEPRECATE( - "In future, it will be an error for 'np.bool' scalars to be " - "interpreted as an index") < 0) { - return NULL; - } - else { - return PyLong_FromLong(PyArrayScalar_VAL(a, Bool)); - } -} - /* Arithmetic methods -- only so we can override &, |, ^. */ NPY_NO_EXPORT PyNumberMethods bool_arrtype_as_number = { .nb_bool = (inquiry)bool_arrtype_nonzero, @@ -3583,45 +3958,25 @@ static inline npy_hash_t * #lname = datetime, timedelta# * #name = Datetime, Timedelta# */ -#if NPY_SIZEOF_HASH_T==NPY_SIZEOF_DATETIME static npy_hash_t @lname@_arrtype_hash(PyObject *obj) { - npy_hash_t x = (npy_hash_t)(PyArrayScalar_VAL(obj, @name@)); - if (x == -1) { - x = -2; - } - return x; -} -#elif NPY_SIZEOF_LONGLONG==NPY_SIZEOF_DATETIME -static npy_hash_t -@lname@_arrtype_hash(PyObject *obj) -{ - npy_hash_t y; - npy_longlong x = (PyArrayScalar_VAL(obj, @name@)); + PyArray_DatetimeMetaData *meta; + npy_@lname@ val = PyArrayScalar_VAL(obj, @name@); - if ((x <= LONG_MAX)) { - y = (npy_hash_t) x; + if (val == NPY_DATETIME_NAT) { + /* Use identity, similar to NaN */ + return PyBaseObject_Type.tp_hash(obj); } - else { - union Mask { - long hashvals[2]; - npy_longlong v; - } both; - both.v = x; - y = both.hashvals[0] + (1000003)*both.hashvals[1]; - } - if (y == -1) { - y = -2; - } - return y; + meta = &((PyDatetimeScalarObject *)obj)->obmeta; + + npy_hash_t res = @lname@_hash(meta, val); + return res; } -#endif /**end repeat**/ - /* Wrong thing to do for longdouble, but....*/ /**begin repeat @@ -4121,8 +4476,6 @@ initialize_numeric_types(void) /**end repeat**/ - PyBoolArrType_Type.tp_as_number->nb_index = (unaryfunc)bool_index; - PyStringArrType_Type.tp_alloc = NULL; PyStringArrType_Type.tp_free = NULL; @@ -4191,8 +4544,8 @@ initialize_numeric_types(void) /**end repeat**/ /**begin repeat - * #name = cfloat, clongdouble, floating, integer, complexfloating# - * #NAME = CFloat, CLongDouble, Floating, Integer, ComplexFloating# + * #name = cfloat, clongdouble, floating, integer# + * #NAME = CFloat, CLongDouble, Floating, Integer# */ Py@NAME@ArrType_Type.tp_methods = @name@type_methods; @@ -4256,6 +4609,7 @@ initialize_numeric_types(void) /**end repeat**/ + PyDatetimeArrType_Type.tp_methods = datetimetype_methods; /**begin repeat * #Type = Byte, UByte, Short, UShort, Int, UInt, Long, @@ -4269,6 +4623,7 @@ initialize_numeric_types(void) PyBoolArrType_Type.tp_str = genbool_type_str; PyBoolArrType_Type.tp_repr = genbool_type_repr; + PyBoolArrType_Type.tp_methods = booleantype_methods; /**begin repeat diff --git a/numpy/_core/src/multiarray/sequence.c b/numpy/_core/src/multiarray/sequence.c index 4c94bb798072..ce2e2059e218 100644 --- a/numpy/_core/src/multiarray/sequence.c +++ b/numpy/_core/src/multiarray/sequence.c @@ -56,7 +56,6 @@ array_concat(PyObject *self, PyObject *other) { /* * Throw a type error, when trying to concat NDArrays - * NOTE: This error is not Thrown when running with PyPy */ PyErr_SetString(PyExc_TypeError, "Concatenation operation is not implemented for NumPy arrays, " diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index e766a61ed12f..a34af9f9f12b 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -7,22 +7,17 @@ #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" - #include "numpy/npy_math.h" #include "npy_config.h" - - - #include "arraywrap.h" #include "ctors.h" - #include "shape.h" - -#include "multiarraymodule.h" /* for interned strings */ +#include "npy_static_data.h" /* for interned strings */ #include "templ_common.h" /* for npy_mul_sizes_with_overflow */ #include "common.h" /* for convert_shape_to_string */ #include "alloc.h" +#include "refcount.h" static int _fix_unknown_dimension(PyArray_Dims *newshape, PyArrayObject *arr); @@ -31,23 +26,12 @@ static int _attempt_nocopy_reshape(PyArrayObject *self, int newnd, const npy_intp *newdims, npy_intp *newstrides, int is_f_order); -static void -_putzero(char *optr, PyObject *zero, PyArray_Descr *dtype); - -/*NUMPY_API - * Resize (reallocate data). Only works if nothing else is referencing this - * array and it is contiguous. If refcheck is 0, then the reference count is - * not checked and assumed to be 1. You still must own this data and have no - * weak-references and no base object. - */ -NPY_NO_EXPORT PyObject * -PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, - NPY_ORDER NPY_UNUSED(order)) +NPY_NO_EXPORT int +PyArray_Resize_int(PyArrayObject *self, PyArray_Dims *newshape, int refcheck) { npy_intp oldnbytes, newnbytes; npy_intp oldsize, newsize; int new_nd=newshape->len, k, elsize; - int refcnt; npy_intp* new_dimensions=newshape->ptr; npy_intp new_strides[NPY_MAXDIMS]; npy_intp *dimptr; @@ -56,7 +40,7 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, if (!PyArray_ISONESEGMENT(self)) { PyErr_SetString(PyExc_ValueError, "resize only works on single-segment arrays"); - return NULL; + return -1; } /* Compute total size of old and new arrays. The new size might overflow */ @@ -70,10 +54,11 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, if (new_dimensions[k] < 0) { PyErr_SetString(PyExc_ValueError, "negative dimensions not allowed"); - return NULL; + return -1; } if (npy_mul_sizes_with_overflow(&newsize, newsize, new_dimensions[k])) { - return PyErr_NoMemory(); + PyErr_NoMemory(); + return -1; } } @@ -81,14 +66,15 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, elsize = PyArray_ITEMSIZE(self); oldnbytes = oldsize * elsize; if (npy_mul_sizes_with_overflow(&newnbytes, newsize, elsize)) { - return PyErr_NoMemory(); + PyErr_NoMemory(); + return -1; } if (oldnbytes != newnbytes) { if (!(PyArray_FLAGS(self) & NPY_ARRAY_OWNDATA)) { PyErr_SetString(PyExc_ValueError, "cannot resize this array: it does not own its data"); - return NULL; + return -1; } if (PyArray_BASE(self) != NULL @@ -97,64 +83,60 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, "cannot resize an array that " "references or is referenced\n" "by another array in this way. Use the np.resize function."); - return NULL; + return -1; } if (refcheck) { -#ifdef PYPY_VERSION - PyErr_SetString(PyExc_ValueError, - "cannot resize an array with refcheck=True on PyPy.\n" - "Use the np.resize function or refcheck=False"); - return NULL; +#if PY_VERSION_HEX >= 0x030E00B0 + if (!PyUnstable_Object_IsUniquelyReferenced((PyObject *)self)) { #else - refcnt = Py_REFCNT(self); -#endif /* PYPY_VERSION */ - } - else { - refcnt = 1; - } - if (refcnt > 2) { - PyErr_SetString(PyExc_ValueError, - "cannot resize an array that " - "references or is referenced\n" - "by another array in this way.\n" - "Use the np.resize function or refcheck=False"); - return NULL; + if (Py_REFCNT(self) > 2) { +#endif + PyErr_SetString( + PyExc_ValueError, + "cannot resize an array that " + "references or is referenced\n" + "by another array in this way.\n" + "Use the np.resize function or refcheck=False"); + return -1; + } } - /* Reallocate space if needed - allocating 0 is forbidden */ PyObject *handler = PyArray_HANDLER(self); if (handler == NULL) { /* This can happen if someone arbitrarily sets NPY_ARRAY_OWNDATA */ PyErr_SetString(PyExc_RuntimeError, "no memory handler found but OWNDATA flag set"); - return NULL; + return -1; } + if (newnbytes < oldnbytes) { + /* Clear now removed data (if dtype has references) */ + if (PyArray_ClearBuffer( + PyArray_DESCR(self), PyArray_BYTES(self) + newnbytes, + elsize, oldsize-newsize, PyArray_ISALIGNED(self)) < 0) { + return -1; + } + } + new_data = PyDataMem_UserRENEW(PyArray_DATA(self), - newnbytes == 0 ? elsize : newnbytes, + newnbytes == 0 ? 1 : newnbytes, handler); if (new_data == NULL) { PyErr_SetString(PyExc_MemoryError, "cannot allocate memory for array"); - return NULL; + return -1; } ((PyArrayObject_fields *)self)->data = new_data; - } - if (newnbytes > oldnbytes && PyArray_ISWRITEABLE(self)) { - /* Fill new memory with zeros */ - if (PyDataType_FLAGCHK(PyArray_DESCR(self), NPY_ITEM_REFCOUNT)) { - PyObject *zero = PyLong_FromLong(0); - char *optr; - optr = PyArray_BYTES(self) + oldnbytes; - npy_intp n_new = newsize - oldsize; - for (npy_intp i = 0; i < n_new; i++) { - _putzero((char *)optr, zero, PyArray_DESCR(self)); - optr += elsize; + if (newnbytes > oldnbytes && PyArray_ISWRITEABLE(self)) { + /* Fill new memory with zeros (PyLong zero for object arrays) */ + npy_intp stride = elsize; + npy_intp size = newsize - oldsize; + char *data = PyArray_BYTES(self) + oldnbytes; + int aligned = PyArray_ISALIGNED(self); + if (PyArray_ZeroContiguousBuffer(PyArray_DESCR(self), data, + stride, size, aligned) < 0) { + return -1; } - Py_DECREF(zero); - } - else{ - memset(PyArray_BYTES(self) + oldnbytes, 0, newnbytes - oldnbytes); } } @@ -167,7 +149,7 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, if (dimptr == NULL) { PyErr_SetString(PyExc_MemoryError, "cannot allocate memory for array"); - return NULL; + return -1; } ((PyArrayObject_fields *)self)->dimensions = dimptr; ((PyArrayObject_fields *)self)->strides = dimptr + new_nd; @@ -180,11 +162,27 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, memmove(PyArray_STRIDES(self), new_strides, new_nd*sizeof(npy_intp)); } else { - PyDimMem_FREE(((PyArrayObject_fields *)self)->dimensions); + npy_free_cache_dim_array(self); ((PyArrayObject_fields *)self)->nd = 0; ((PyArrayObject_fields *)self)->dimensions = NULL; ((PyArrayObject_fields *)self)->strides = NULL; } + return 0; +} + +/*NUMPY_API + * Resize (reallocate data). Only works if nothing else is referencing this + * array and it is contiguous. If refcheck is 0, then the reference count is + * not checked and assumed to be 1. You still must own this data and have no + * weak-references and no base object. + */ +NPY_NO_EXPORT PyObject * +PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, + NPY_ORDER NPY_UNUSED(order)) +{ + if (PyArray_Resize_int(self, newshape, refcheck) < 0) { + return NULL; + } Py_RETURN_NONE; } @@ -338,41 +336,6 @@ PyArray_Reshape(PyArrayObject *self, PyObject *shape) } -static void -_putzero(char *optr, PyObject *zero, PyArray_Descr *dtype) -{ - if (!PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT)) { - memset(optr, 0, dtype->elsize); - } - else if (PyDataType_HASFIELDS(dtype)) { - PyObject *key, *value, *title = NULL; - PyArray_Descr *new; - int offset; - Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(dtype), &pos, &key, &value)) { - if (NPY_TITLE_KEY(key, value)) { - continue; - } - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { - return; - } - _putzero(optr + offset, zero, new); - } - } - else { - npy_intp i; - npy_intp nsize = dtype->elsize / sizeof(zero); - - for (i = 0; i < nsize; i++) { - Py_INCREF(zero); - memcpy(optr, &zero, sizeof(zero)); - optr += sizeof(zero); - } - } - return; -} - - /* * attempt to reshape an array without copying data * @@ -668,10 +631,10 @@ PyArray_SwapAxes(PyArrayObject *ap, int a1, int a2) int n = PyArray_NDIM(ap); int i; - if (check_and_adjust_axis_msg(&a1, n, npy_ma_str_axis1) < 0) { + if (check_and_adjust_axis_msg(&a1, n, npy_interned_str.axis1) < 0) { return NULL; } - if (check_and_adjust_axis_msg(&a2, n, npy_ma_str_axis2) < 0) { + if (check_and_adjust_axis_msg(&a2, n, npy_interned_str.axis2) < 0) { return NULL; } diff --git a/numpy/_core/src/multiarray/shape.h b/numpy/_core/src/multiarray/shape.h index a9b91feb0b4a..5e87116f08df 100644 --- a/numpy/_core/src/multiarray/shape.h +++ b/numpy/_core/src/multiarray/shape.h @@ -3,6 +3,12 @@ #include "conversion_utils.h" +/* + * Internal version of PyArray_Resize that returns -1 on error, 0 otherwise. + */ +NPY_NO_EXPORT int +PyArray_Resize_int(PyArrayObject *self, PyArray_Dims *newshape, int refcheck); + /* * Creates a sorted stride perm matching the KEEPORDER behavior * of the NpyIter object. Because this operates based on multiple diff --git a/numpy/_core/src/multiarray/strfuncs.c b/numpy/_core/src/multiarray/strfuncs.c index 8b9966373466..efe5c8a4fdd8 100644 --- a/numpy/_core/src/multiarray/strfuncs.c +++ b/numpy/_core/src/multiarray/strfuncs.c @@ -7,12 +7,9 @@ #include "numpy/arrayobject.h" #include "npy_pycompat.h" #include "npy_import.h" +#include "multiarraymodule.h" #include "strfuncs.h" -static PyObject *PyArray_StrFunction = NULL; -static PyObject *PyArray_ReprFunction = NULL; - - static void npy_PyErr_SetStringChained(PyObject *type, const char *message) { @@ -30,68 +27,44 @@ npy_PyErr_SetStringChained(PyObject *type, const char *message) NPY_NO_EXPORT void PyArray_SetStringFunction(PyObject *op, int repr) { - if (repr) { - /* Dispose of previous callback */ - Py_XDECREF(PyArray_ReprFunction); - /* Add a reference to new callback */ - Py_XINCREF(op); - /* Remember new callback */ - PyArray_ReprFunction = op; - } - else { - /* Dispose of previous callback */ - Py_XDECREF(PyArray_StrFunction); - /* Add a reference to new callback */ - Py_XINCREF(op); - /* Remember new callback */ - PyArray_StrFunction = op; - } + PyErr_SetString(PyExc_ValueError, "PyArray_SetStringFunction was removed"); } NPY_NO_EXPORT PyObject * array_repr(PyArrayObject *self) { - static PyObject *repr = NULL; - - if (PyArray_ReprFunction != NULL) { - return PyObject_CallFunctionObjArgs(PyArray_ReprFunction, self, NULL); - } - /* * We need to do a delayed import here as initialization on module load * leads to circular import problems. */ - npy_cache_import("numpy._core.arrayprint", "_default_array_repr", &repr); - if (repr == NULL) { + if (npy_cache_import_runtime("numpy._core.arrayprint", "_default_array_repr", + &npy_runtime_imports._default_array_repr) == -1) { npy_PyErr_SetStringChained(PyExc_RuntimeError, "Unable to configure default ndarray.__repr__"); return NULL; } - return PyObject_CallFunctionObjArgs(repr, self, NULL); + return PyObject_CallFunctionObjArgs( + npy_runtime_imports._default_array_repr, self, NULL); } NPY_NO_EXPORT PyObject * array_str(PyArrayObject *self) { - static PyObject *str = NULL; - - if (PyArray_StrFunction != NULL) { - return PyObject_CallFunctionObjArgs(PyArray_StrFunction, self, NULL); - } - /* * We need to do a delayed import here as initialization on module load leads * to circular import problems. */ - npy_cache_import("numpy._core.arrayprint", "_default_array_str", &str); - if (str == NULL) { + if (npy_cache_import_runtime( + "numpy._core.arrayprint", "_default_array_str", + &npy_runtime_imports._default_array_str) == -1) { npy_PyErr_SetStringChained(PyExc_RuntimeError, "Unable to configure default ndarray.__str__"); return NULL; } - return PyObject_CallFunctionObjArgs(str, self, NULL); + return PyObject_CallFunctionObjArgs( + npy_runtime_imports._default_array_str, self, NULL); } diff --git a/numpy/_core/src/multiarray/stringdtype/casts.c b/numpy/_core/src/multiarray/stringdtype/casts.cpp similarity index 50% rename from numpy/_core/src/multiarray/stringdtype/casts.c rename to numpy/_core/src/multiarray/stringdtype/casts.cpp index 44ae6c92d128..20ef3013bf86 100644 --- a/numpy/_core/src/multiarray/stringdtype/casts.c +++ b/numpy/_core/src/multiarray/stringdtype/casts.cpp @@ -1,9 +1,13 @@ #define PY_SSIZE_T_CLEAN #include +#include "numpy/npy_common.h" #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE #define _UMATHMODULE +#include +#include + #include "numpy/ndarraytypes.h" #include "numpy/arrayobject.h" #include "numpy/halffloat.h" @@ -14,41 +18,124 @@ #include "numpyos.h" #include "umathmodule.h" #include "gil_utils.h" +#include "raii_utils.hpp" #include "static_string.h" #include "dtypemeta.h" #include "dtype.h" #include "utf8_utils.h" +#include "casts.h" + +// Get a c string representation of a type number. +static const char * +typenum_to_cstr(NPY_TYPES typenum) { + switch (typenum) { + case NPY_BOOL: + return "bool"; + case NPY_BYTE: + return "byte"; + case NPY_UBYTE: + return "unsigned byte"; + case NPY_SHORT: + return "short"; + case NPY_USHORT: + return "unsigned short"; + case NPY_INT: + return "int"; + case NPY_UINT: + return "unsigned int"; + case NPY_LONG: + return "long"; + case NPY_ULONG: + return "unsigned long"; + case NPY_LONGLONG: + return "long long"; + case NPY_ULONGLONG: + return "unsigned long long"; + case NPY_HALF: + return "half"; + case NPY_FLOAT: + return "float"; + case NPY_DOUBLE: + return "double"; + case NPY_LONGDOUBLE: + return "long double"; + case NPY_CFLOAT: + return "complex float"; + case NPY_CDOUBLE: + return "complex double"; + case NPY_CLONGDOUBLE: + return "complex long double"; + case NPY_OBJECT: + return "object"; + case NPY_STRING: + return "string"; + case NPY_UNICODE: + return "unicode"; + case NPY_VOID: + return "void"; + case NPY_DATETIME: + return "datetime"; + case NPY_TIMEDELTA: + return "timedelta"; + case NPY_CHAR: + return "char"; + case NPY_NOTYPE: + return "no type"; + case NPY_USERDEF: + return "user defined"; + case NPY_VSTRING: + return "vstring"; + default: + return "unknown"; + } +} + +static PyArray_DTypeMeta ** +get_dtypes(PyArray_DTypeMeta *dt1, PyArray_DTypeMeta *dt2) +{ + // If either argument is NULL, an error has happened; return NULL. + if ((dt1 == NULL) || (dt2 == NULL)) { + return NULL; + } + PyArray_DTypeMeta **ret = (PyArray_DTypeMeta **)PyMem_Malloc(2 * sizeof(PyArray_DTypeMeta *)); + if (ret == NULL) { + return reinterpret_cast(PyErr_NoMemory()); + } + + ret[0] = dt1; + ret[1] = dt2; -#define ANY_TO_STRING_RESOLVE_DESCRIPTORS(safety) \ - static NPY_CASTING any_to_string_##safety##_resolve_descriptors( \ - PyObject *NPY_UNUSED(self), \ - PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), \ - PyArray_Descr *given_descrs[2], PyArray_Descr *loop_descrs[2], \ - npy_intp *NPY_UNUSED(view_offset)) \ - { \ - if (given_descrs[1] == NULL) { \ - PyArray_Descr *new = \ - (PyArray_Descr *)new_stringdtype_instance( \ - NULL, 1); \ - if (new == NULL) { \ - return (NPY_CASTING)-1; \ - } \ - loop_descrs[1] = new; \ - } \ - else { \ - Py_INCREF(given_descrs[1]); \ - loop_descrs[1] = given_descrs[1]; \ - } \ - \ - Py_INCREF(given_descrs[0]); \ - loop_descrs[0] = given_descrs[0]; \ - \ - return NPY_##safety##_CASTING; \ + return ret; +} + + +template +static NPY_CASTING +any_to_string_resolve_descriptors( + PyObject *NPY_UNUSED(self), + PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), + PyArray_Descr *given_descrs[2], PyArray_Descr *loop_descrs[2], + npy_intp *NPY_UNUSED(view_offset)) +{ + if (given_descrs[1] == NULL) { + PyArray_Descr *new_instance = + (PyArray_Descr *)new_stringdtype_instance(NULL, 1); + if (new_instance == NULL) { + return (NPY_CASTING)-1; } + loop_descrs[1] = new_instance; + } + else { + Py_INCREF(given_descrs[1]); + loop_descrs[1] = given_descrs[1]; + } -ANY_TO_STRING_RESOLVE_DESCRIPTORS(SAFE) -ANY_TO_STRING_RESOLVE_DESCRIPTORS(SAME_KIND) + Py_INCREF(given_descrs[0]); + loop_descrs[0] = given_descrs[0]; + + return safety; +} static NPY_CASTING @@ -145,13 +232,11 @@ string_to_string(PyArrayMethod_Context *context, char *const data[], } static PyType_Slot s2s_slots[] = { - {NPY_METH_resolve_descriptors, &string_to_string_resolve_descriptors}, - {NPY_METH_strided_loop, &string_to_string}, - {NPY_METH_unaligned_strided_loop, &string_to_string}, + {NPY_METH_resolve_descriptors, (void *)&string_to_string_resolve_descriptors}, + {NPY_METH_strided_loop, (void *)&string_to_string}, + {NPY_METH_unaligned_strided_loop, (void *)&string_to_string}, {0, NULL}}; -static char *s2s_name = "cast_StringDType_to_StringDType"; - // unicode to string static int @@ -226,12 +311,10 @@ unicode_to_string(PyArrayMethod_Context *context, char *const data[], } static PyType_Slot u2s_slots[] = {{NPY_METH_resolve_descriptors, - &any_to_string_SAME_KIND_resolve_descriptors}, - {NPY_METH_strided_loop, &unicode_to_string}, + (void *)&any_to_string_resolve_descriptors}, + {NPY_METH_strided_loop, (void *)&unicode_to_string}, {0, NULL}}; -static char *u2s_name = "cast_Unicode_to_StringDType"; - // string to unicode static NPY_CASTING @@ -271,7 +354,7 @@ load_nullable_string(const npy_packed_static_string *ps, const npy_static_string *default_string, const npy_static_string *na_name, npy_string_allocator *allocator, - char *context) + const char *context) { int is_null = NpyString_load(allocator, ps, s); if (is_null == -1) { @@ -324,12 +407,12 @@ string_to_unicode(PyArrayMethod_Context *context, char *const data[], size_t tot_n_bytes = 0; if (n_bytes == 0) { - for (int i=0; i < max_out_size; i++) { + for (size_t i=0; i < max_out_size; i++) { out[i] = (Py_UCS4)0; } } else { - int i = 0; + size_t i = 0; for (; i < max_out_size && tot_n_bytes < n_bytes; i++) { int num_bytes = utf8_char_to_ucs4_code(this_string, &out[i]); @@ -357,12 +440,10 @@ string_to_unicode(PyArrayMethod_Context *context, char *const data[], } static PyType_Slot s2u_slots[] = { - {NPY_METH_resolve_descriptors, &string_to_fixed_width_resolve_descriptors}, - {NPY_METH_strided_loop, &string_to_unicode}, + {NPY_METH_resolve_descriptors, (void *)&string_to_fixed_width_resolve_descriptors}, + {NPY_METH_strided_loop, (void *)&string_to_unicode}, {0, NULL}}; -static char *s2u_name = "cast_StringDType_to_Unicode"; - // string to bool static NPY_CASTING @@ -451,12 +532,10 @@ string_to_bool(PyArrayMethod_Context *context, char *const data[], } static PyType_Slot s2b_slots[] = { - {NPY_METH_resolve_descriptors, &string_to_bool_resolve_descriptors}, - {NPY_METH_strided_loop, &string_to_bool}, + {NPY_METH_resolve_descriptors, (void *)&string_to_bool_resolve_descriptors}, + {NPY_METH_strided_loop, (void *)&string_to_bool}, {0, NULL}}; -static char *s2b_name = "cast_StringDType_to_Bool"; - // bool to string static int @@ -476,7 +555,7 @@ bool_to_string(PyArrayMethod_Context *context, char *const data[], while (N--) { npy_packed_static_string *out_pss = (npy_packed_static_string *)out; - char *ret_val = NULL; + const char *ret_val = NULL; size_t size = 0; if ((npy_bool)(*in) == NPY_TRUE) { ret_val = "True"; @@ -512,12 +591,10 @@ bool_to_string(PyArrayMethod_Context *context, char *const data[], } static PyType_Slot b2s_slots[] = {{NPY_METH_resolve_descriptors, - &any_to_string_SAFE_resolve_descriptors}, - {NPY_METH_strided_loop, &bool_to_string}, + (void *)&any_to_string_resolve_descriptors}, + {NPY_METH_strided_loop, (void *)&bool_to_string}, {0, NULL}}; -static char *b2s_name = "cast_Bool_to_StringDType"; - // casts between string and (u)int dtypes @@ -529,7 +606,7 @@ load_non_nullable_string(char *in, int has_null, const npy_static_string *defaul const npy_packed_static_string *ps = (npy_packed_static_string *)in; int isnull = NpyString_load(allocator, ps, string_to_load); if (isnull == -1) { - char *msg = "Failed to load string for conversion to a non-nullable type"; + const char msg[] = "Failed to load string for conversion to a non-nullable type"; if (has_gil) { PyErr_SetString(PyExc_MemoryError, msg); @@ -541,7 +618,7 @@ load_non_nullable_string(char *in, int has_null, const npy_static_string *defaul } else if (isnull) { if (has_null) { - char *msg = "Arrays with missing data cannot be converted to a non-nullable type"; + const char msg[] = "Arrays with missing data cannot be converted to a non-nullable type"; if (has_gil) { PyErr_SetString(PyExc_ValueError, msg); @@ -589,27 +666,9 @@ string_to_pylong(char *in, int has_null, return pylong_value; } +template static npy_longlong -stringbuf_to_uint(char *in, npy_ulonglong *value, int has_null, - const npy_static_string *default_string, - npy_string_allocator *allocator) -{ - PyObject *pylong_value = - string_to_pylong(in, has_null, default_string, allocator); - if (pylong_value == NULL) { - return -1; - } - *value = PyLong_AsUnsignedLongLong(pylong_value); - if (*value == (unsigned long long)-1 && PyErr_Occurred()) { - Py_DECREF(pylong_value); - return -1; - } - Py_DECREF(pylong_value); - return 0; -} - -static npy_longlong -stringbuf_to_int(char *in, npy_longlong *value, int has_null, +stringbuf_to_int(char *in, NpyLongType *value, int has_null, const npy_static_string *default_string, npy_string_allocator *allocator) { @@ -618,15 +677,27 @@ stringbuf_to_int(char *in, npy_longlong *value, int has_null, if (pylong_value == NULL) { return -1; } - *value = PyLong_AsLongLong(pylong_value); - if (*value == -1 && PyErr_Occurred()) { - Py_DECREF(pylong_value); - return -1; + + if constexpr (std::is_same_v) { + *value = PyLong_AsUnsignedLongLong(pylong_value); + if (*value == (unsigned long long)-1 && PyErr_Occurred()) { + goto fail; + } + } else { + *value = PyLong_AsLongLong(pylong_value); + if (*value == -1 && PyErr_Occurred()) { + goto fail; + } } Py_DECREF(pylong_value); return 0; + +fail: + Py_DECREF(pylong_value); + return -1; } +// steals reference to obj static int pyobj_to_string(PyObject *obj, char *out, npy_string_allocator *allocator) { @@ -658,205 +729,248 @@ pyobj_to_string(PyObject *obj, char *out, npy_string_allocator *allocator) return 0; } +template +static NPY_CASTING +string_to_int_resolve_descriptors( + PyObject *NPY_UNUSED(self), + PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), + PyArray_Descr *given_descrs[2], + PyArray_Descr *loop_descrs[2], + npy_intp *NPY_UNUSED(view_offset) +) { + if (given_descrs[1] == NULL) { + loop_descrs[1] = PyArray_DescrNewFromType(typenum); + } + else { + Py_INCREF(given_descrs[1]); + loop_descrs[1] = given_descrs[1]; + } + + Py_INCREF(given_descrs[0]); + loop_descrs[0] = given_descrs[0]; + + return NPY_UNSAFE_CASTING; +} + +// Example template parameters: +// NpyType: npy_int8 +// NpyLongType: npy_longlong +// typenum: NPY_BYTE +template +static int +string_to_int( + PyArrayMethod_Context * context, + char *const data[], + npy_intp const dimensions[], + npy_intp const strides[], + NpyAuxData *NPY_UNUSED(auxdata) +) { + PyArray_StringDTypeObject *descr = + ((PyArray_StringDTypeObject *)context->descriptors[0]); + npy_string_allocator *allocator = + NpyString_acquire_allocator(descr); + int has_null = descr->na_object != NULL; + const npy_static_string *default_string = &descr->default_string; + + npy_intp N = dimensions[0]; + char *in = data[0]; + NpyType *out = (NpyType *)data[1]; + + npy_intp in_stride = strides[0]; + npy_intp out_stride = strides[1] / sizeof(NpyType); + + while (N--) { + NpyLongType value; + if (stringbuf_to_int(in, &value, has_null, default_string, allocator) != 0) { + npy_gil_error(PyExc_RuntimeError, "Encountered problem converting string dtype to integer dtype."); + goto fail; + } + *out = (NpyType)value; + + // Cast back to NpyLongType to check for out-of-bounds errors + if (static_cast(*out) != value) { + // out of bounds, raise error following NEP 50 behavior + const char *errmsg = NULL; + if constexpr (std::is_same_v) { + errmsg = "Integer %llu is out of bounds for %s"; + } else if constexpr (std::is_same_v) { + errmsg = "Integer %lli is out of bounds for %s"; + } else { + errmsg = "Unrecognized integer type %i is out of bounds for %s"; + } + npy_gil_error(PyExc_OverflowError, errmsg, value, typenum_to_cstr(typenum)); + goto fail; + } + in += in_stride; + out += out_stride; + } + + NpyString_release_allocator(allocator); + return 0; + + fail: + NpyString_release_allocator(allocator); + return -1; +} + +template +static PyType_Slot s2int_slots[] = { + {NPY_METH_resolve_descriptors, (void *)&string_to_int_resolve_descriptors}, + {NPY_METH_strided_loop, (void *)&string_to_int}, + {0, NULL} +}; + +static const char * +make_s2type_name(NPY_TYPES typenum) { + const char prefix[] = "cast_StringDType_to_"; + size_t plen = sizeof(prefix)/sizeof(char) - 1; + + const char *type_name = typenum_to_cstr(typenum); + size_t nlen = strlen(type_name); + + char *buf = (char *)PyMem_RawCalloc(sizeof(char), plen + nlen + 1); + if (buf == NULL) { + npy_gil_error(PyExc_MemoryError, "Failed allocate memory for cast"); + return NULL; + } + + // memcpy instead of strcpy/strncat to avoid stringop-truncation warning, + // since we are not including the trailing null character + char *p = buf; + memcpy(p, prefix, plen); + p += plen; + memcpy(p, type_name, nlen); + return buf; +} + +static const char * +make_type2s_name(NPY_TYPES typenum) { + const char prefix[] = "cast_"; + size_t plen = sizeof(prefix)/sizeof(char) - 1; + + const char *type_name = typenum_to_cstr(typenum); + size_t nlen = strlen(type_name); + + const char suffix[] = "_to_StringDType"; + size_t slen = sizeof(prefix)/sizeof(char) - 1; + + char *buf = (char *)PyMem_RawCalloc(sizeof(char), plen + nlen + slen + 1); + + // memcpy instead of strcpy/strncat to avoid stringop-truncation warning, + // since we are not including the trailing null character + char *p = buf; + memcpy(p, prefix, plen); + p += plen; + memcpy(p, type_name, nlen); + p += nlen; + memcpy(p, suffix, slen); + return buf; +} + + static int int_to_stringbuf(long long in, char *out, npy_string_allocator *allocator) { PyObject *pylong_val = PyLong_FromLongLong(in); + // steals reference to pylong_val return pyobj_to_string(pylong_val, out, allocator); } static int -uint_to_stringbuf(unsigned long long in, char *out, - npy_string_allocator *allocator) +int_to_stringbuf(unsigned long long in, char *out, npy_string_allocator *allocator) { PyObject *pylong_val = PyLong_FromUnsignedLongLong(in); + // steals reference to pylong_val return pyobj_to_string(pylong_val, out, allocator); } -#define STRING_INT_CASTS(typename, typekind, shortname, numpy_tag, \ - printf_code, npy_longtype, longtype) \ - static NPY_CASTING string_to_##typename##_resolve_descriptors( \ - PyObject *NPY_UNUSED(self), \ - PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), \ - PyArray_Descr *given_descrs[2], PyArray_Descr *loop_descrs[2], \ - npy_intp *NPY_UNUSED(view_offset)) \ - { \ - if (given_descrs[1] == NULL) { \ - loop_descrs[1] = PyArray_DescrNewFromType(numpy_tag); \ - } \ - else { \ - Py_INCREF(given_descrs[1]); \ - loop_descrs[1] = given_descrs[1]; \ - } \ - \ - Py_INCREF(given_descrs[0]); \ - loop_descrs[0] = given_descrs[0]; \ - \ - return NPY_UNSAFE_CASTING; \ - } \ - \ - static int string_to_## \ - typename(PyArrayMethod_Context * context, char *const data[], \ - npy_intp const dimensions[], npy_intp const strides[], \ - NpyAuxData *NPY_UNUSED(auxdata)) \ - { \ - PyArray_StringDTypeObject *descr = \ - ((PyArray_StringDTypeObject *)context->descriptors[0]); \ - npy_string_allocator *allocator = \ - NpyString_acquire_allocator(descr); \ - int has_null = descr->na_object != NULL; \ - const npy_static_string *default_string = &descr->default_string; \ - \ - npy_intp N = dimensions[0]; \ - char *in = data[0]; \ - npy_##typename *out = (npy_##typename *)data[1]; \ - \ - npy_intp in_stride = strides[0]; \ - npy_intp out_stride = strides[1] / sizeof(npy_##typename); \ - \ - while (N--) { \ - npy_longtype value; \ - if (stringbuf_to_##typekind(in, &value, has_null, default_string, \ - allocator) != 0) { \ - goto fail; \ - } \ - *out = (npy_##typename)value; \ - if (*out != value) { \ - /* out of bounds, raise error following NEP 50 behavior */ \ - npy_gil_error(PyExc_OverflowError, \ - "Integer %" #printf_code \ - " is out of bounds " \ - "for " #typename, \ - value); \ - goto fail; \ - } \ - in += in_stride; \ - out += out_stride; \ - } \ - \ - NpyString_release_allocator(allocator); \ - return 0; \ - \ - fail: \ - NpyString_release_allocator(allocator); \ - return -1; \ - } \ - \ - static PyType_Slot s2##shortname##_slots[] = { \ - {NPY_METH_resolve_descriptors, \ - &string_to_##typename##_resolve_descriptors}, \ - {NPY_METH_strided_loop, &string_to_##typename}, \ - {0, NULL}}; \ - \ - static char *s2##shortname##_name = "cast_StringDType_to_" #typename; \ - \ - static int typename##_to_string( \ - PyArrayMethod_Context *context, char *const data[], \ - npy_intp const dimensions[], npy_intp const strides[], \ - NpyAuxData *NPY_UNUSED(auxdata)) \ - { \ - npy_intp N = dimensions[0]; \ - npy_##typename *in = (npy_##typename *)data[0]; \ - char *out = data[1]; \ - \ - npy_intp in_stride = strides[0] / sizeof(npy_##typename); \ - npy_intp out_stride = strides[1]; \ - \ - PyArray_StringDTypeObject *descr = \ - (PyArray_StringDTypeObject *)context->descriptors[1]; \ - npy_string_allocator *allocator = \ - NpyString_acquire_allocator(descr); \ - \ - while (N--) { \ - if (typekind##_to_stringbuf( \ - (longtype)*in, out, allocator) != 0) { \ - goto fail; \ - } \ - \ - in += in_stride; \ - out += out_stride; \ - } \ - \ - NpyString_release_allocator(allocator); \ - return 0; \ - \ - fail: \ - NpyString_release_allocator(allocator); \ - return -1; \ - } \ - \ - static PyType_Slot shortname##2s_slots [] = { \ - {NPY_METH_resolve_descriptors, \ - &any_to_string_SAFE_resolve_descriptors}, \ - {NPY_METH_strided_loop, &typename##_to_string}, \ - {0, NULL}}; \ - \ - static char *shortname##2s_name = "cast_" #typename "_to_StringDType"; - -#define DTYPES_AND_CAST_SPEC(shortname, typename) \ - PyArray_DTypeMeta **s2##shortname##_dtypes = get_dtypes( \ - &PyArray_StringDType, \ - &PyArray_##typename##DType); \ - \ - PyArrayMethod_Spec *StringTo##typename##CastSpec = \ - get_cast_spec( \ - s2##shortname##_name, NPY_UNSAFE_CASTING, \ - NPY_METH_REQUIRES_PYAPI, s2##shortname##_dtypes, \ - s2##shortname##_slots); \ - \ - PyArray_DTypeMeta **shortname##2s_dtypes = get_dtypes( \ - &PyArray_##typename##DType, \ - &PyArray_StringDType); \ - \ - PyArrayMethod_Spec *typename##ToStringCastSpec = get_cast_spec( \ - shortname##2s_name, NPY_SAFE_CASTING, \ - NPY_METH_REQUIRES_PYAPI, shortname##2s_dtypes, \ - shortname##2s_slots); - -STRING_INT_CASTS(int8, int, i8, NPY_INT8, lli, npy_longlong, long long) -STRING_INT_CASTS(int16, int, i16, NPY_INT16, lli, npy_longlong, long long) -STRING_INT_CASTS(int32, int, i32, NPY_INT32, lli, npy_longlong, long long) -STRING_INT_CASTS(int64, int, i64, NPY_INT64, lli, npy_longlong, long long) - -STRING_INT_CASTS(uint8, uint, u8, NPY_UINT8, llu, npy_ulonglong, - unsigned long long) -STRING_INT_CASTS(uint16, uint, u16, NPY_UINT16, llu, npy_ulonglong, - unsigned long long) -STRING_INT_CASTS(uint32, uint, u32, NPY_UINT32, llu, npy_ulonglong, - unsigned long long) -STRING_INT_CASTS(uint64, uint, u64, NPY_UINT64, llu, npy_ulonglong, - unsigned long long) +template +static int +type_to_string( + PyArrayMethod_Context *context, + char *const data[], + npy_intp const dimensions[], + npy_intp const strides[], + NpyAuxData *NPY_UNUSED(auxdata) +) { + npy_intp N = dimensions[0]; + NpyType *in = (NpyType *)data[0]; + char *out = data[1]; -#if NPY_SIZEOF_BYTE == NPY_SIZEOF_SHORT -// byte doesn't have a bitsized alias -STRING_INT_CASTS(byte, int, byte, NPY_BYTE, lli, npy_longlong, long long) -STRING_INT_CASTS(ubyte, uint, ubyte, NPY_UBYTE, llu, npy_ulonglong, - unsigned long long) -#endif -#if NPY_SIZEOF_SHORT == NPY_SIZEOF_INT -// short doesn't have a bitsized alias -STRING_INT_CASTS(short, int, short, NPY_SHORT, lli, npy_longlong, long long) -STRING_INT_CASTS(ushort, uint, ushort, NPY_USHORT, llu, npy_ulonglong, - unsigned long long) -#endif -#if NPY_SIZEOF_INT == NPY_SIZEOF_LONG -// int doesn't have a bitsized alias -STRING_INT_CASTS(int, int, int, NPY_INT, lli, npy_longlong, long long) -STRING_INT_CASTS(uint, uint, uint, NPY_UINT, llu, npy_longlong, long long) -#endif -#if NPY_SIZEOF_LONGLONG == NPY_SIZEOF_LONG -// long long doesn't have a bitsized alias -STRING_INT_CASTS(longlong, int, longlong, NPY_LONGLONG, lli, npy_longlong, - long long) -STRING_INT_CASTS(ulonglong, uint, ulonglong, NPY_ULONGLONG, llu, npy_ulonglong, - unsigned long long) -#endif + npy_intp in_stride = strides[0] / sizeof(NpyType); + npy_intp out_stride = strides[1]; + + PyArray_StringDTypeObject *descr = + (PyArray_StringDTypeObject *)context->descriptors[1]; + npy_string_allocator *allocator = + NpyString_acquire_allocator(descr); + + while (N--) { + if (int_to_stringbuf((TClongType)*in, out, allocator) != 0) { + goto fail; + } + + in += in_stride; + out += out_stride; + } + + NpyString_release_allocator(allocator); + return 0; + + fail: + NpyString_release_allocator(allocator); + return -1; +} + +template +static PyType_Slot int2s_slots[] = { + {NPY_METH_resolve_descriptors, + (void *)&any_to_string_resolve_descriptors}, + {NPY_METH_strided_loop, (void *)&type_to_string}, + {0, NULL}}; + +static PyArray_DTypeMeta ** +get_s2type_dtypes(NPY_TYPES typenum) { + return get_dtypes(&PyArray_StringDType, typenum_to_dtypemeta(typenum)); +} + +template +static PyArrayMethod_Spec * +getStringToIntCastSpec() { + return get_cast_spec( + make_s2type_name(typenum), + NPY_UNSAFE_CASTING, + NPY_METH_REQUIRES_PYAPI, + get_s2type_dtypes(typenum), + s2int_slots + ); +} + + +static PyArray_DTypeMeta ** +get_type2s_dtypes(NPY_TYPES typenum) { + return get_dtypes(typenum_to_dtypemeta(typenum), &PyArray_StringDType); +} + +template +static PyArrayMethod_Spec * +getIntToStringCastSpec() { + return get_cast_spec( + make_type2s_name(typenum), + NPY_SAFE_CASTING, + NPY_METH_REQUIRES_PYAPI, + get_type2s_dtypes(typenum), + int2s_slots + ); +} static PyObject * -string_to_pyfloat(char *in, int has_null, - const npy_static_string *default_string, - npy_string_allocator *allocator) -{ +string_to_pyfloat( + char *in, + int has_null, + const npy_static_string *default_string, + npy_string_allocator *allocator +) { PyObject *val_obj = non_nullable_string_to_pystring( in, has_null, default_string, allocator); if (val_obj == NULL) { @@ -867,154 +981,53 @@ string_to_pyfloat(char *in, int has_null, return pyfloat_value; } -#define STRING_TO_FLOAT_CAST(typename, shortname, isinf_name, \ - double_to_float) \ - static int string_to_## \ - typename(PyArrayMethod_Context * context, char *const data[], \ - npy_intp const dimensions[], npy_intp const strides[], \ - NpyAuxData *NPY_UNUSED(auxdata)) \ - { \ - PyArray_StringDTypeObject *descr = \ - (PyArray_StringDTypeObject *)context->descriptors[0]; \ - npy_string_allocator *allocator = NpyString_acquire_allocator(descr); \ - int has_null = (descr->na_object != NULL); \ - const npy_static_string *default_string = &descr->default_string; \ - \ - npy_intp N = dimensions[0]; \ - char *in = data[0]; \ - npy_##typename *out = (npy_##typename *)data[1]; \ - \ - npy_intp in_stride = strides[0]; \ - npy_intp out_stride = strides[1] / sizeof(npy_##typename); \ - \ - while (N--) { \ - PyObject *pyfloat_value = string_to_pyfloat( \ - in, has_null, default_string, allocator); \ - if (pyfloat_value == NULL) { \ - goto fail; \ - } \ - double dval = PyFloat_AS_DOUBLE(pyfloat_value); \ - Py_DECREF(pyfloat_value); \ - npy_##typename fval = (double_to_float)(dval); \ - \ - if (NPY_UNLIKELY(isinf_name(fval) && !(npy_isinf(dval)))) { \ - if (PyUFunc_GiveFloatingpointErrors("cast", \ - NPY_FPE_OVERFLOW) < 0) { \ - goto fail; \ - } \ - } \ - \ - *out = fval; \ - \ - in += in_stride; \ - out += out_stride; \ - } \ - \ - NpyString_release_allocator(allocator); \ - return 0; \ - fail: \ - NpyString_release_allocator(allocator); \ - return -1; \ - } \ - \ - static PyType_Slot s2##shortname##_slots[] = { \ - {NPY_METH_resolve_descriptors, \ - &string_to_##typename##_resolve_descriptors}, \ - {NPY_METH_strided_loop, &string_to_##typename}, \ - {0, NULL}}; \ - \ - static char *s2##shortname##_name = "cast_StringDType_to_" #typename; - -#define STRING_TO_FLOAT_RESOLVE_DESCRIPTORS(typename, npy_typename) \ - static NPY_CASTING string_to_##typename##_resolve_descriptors( \ - PyObject *NPY_UNUSED(self), \ - PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), \ - PyArray_Descr *given_descrs[2], PyArray_Descr *loop_descrs[2], \ - npy_intp *NPY_UNUSED(view_offset)) \ - { \ - if (given_descrs[1] == NULL) { \ - loop_descrs[1] = PyArray_DescrNewFromType(NPY_##npy_typename); \ - } \ - else { \ - Py_INCREF(given_descrs[1]); \ - loop_descrs[1] = given_descrs[1]; \ - } \ - \ - Py_INCREF(given_descrs[0]); \ - loop_descrs[0] = given_descrs[0]; \ - \ - return NPY_UNSAFE_CASTING; \ - } - -#define FLOAT_TO_STRING_CAST(typename, shortname, float_to_double) \ - static int typename##_to_string( \ - PyArrayMethod_Context *context, char *const data[], \ - npy_intp const dimensions[], npy_intp const strides[], \ - NpyAuxData *NPY_UNUSED(auxdata)) \ - { \ - npy_intp N = dimensions[0]; \ - npy_##typename *in = (npy_##typename *)data[0]; \ - char *out = data[1]; \ - PyArray_Descr *float_descr = context->descriptors[0]; \ - \ - npy_intp in_stride = strides[0] / sizeof(npy_##typename); \ - npy_intp out_stride = strides[1]; \ - \ - PyArray_StringDTypeObject *descr = \ - (PyArray_StringDTypeObject *)context->descriptors[1]; \ - npy_string_allocator *allocator = NpyString_acquire_allocator(descr); \ - \ - while (N--) { \ - PyObject *scalar_val = PyArray_Scalar(in, float_descr, NULL); \ - if (pyobj_to_string(scalar_val, out, allocator) == -1) { \ - goto fail; \ - } \ - \ - in += in_stride; \ - out += out_stride; \ - } \ - \ - NpyString_release_allocator(allocator); \ - return 0; \ - fail: \ - NpyString_release_allocator(allocator); \ - return -1; \ - } \ - \ - static PyType_Slot shortname##2s_slots [] = { \ - {NPY_METH_resolve_descriptors, \ - &any_to_string_SAFE_resolve_descriptors}, \ - {NPY_METH_strided_loop, &typename##_to_string}, \ - {0, NULL}}; \ - \ - static char *shortname##2s_name = "cast_" #typename "_to_StringDType"; - -STRING_TO_FLOAT_RESOLVE_DESCRIPTORS(float64, DOUBLE) - +template< + typename NpyType, + NPY_TYPES typenum, + bool (*npy_is_inf)(NpyType) = nullptr, + bool (*double_is_inf)(double) = nullptr, + NpyType (*double_to_float)(double) = nullptr +> static int -string_to_float64(PyArrayMethod_Context *context, char *const data[], - npy_intp const dimensions[], npy_intp const strides[], - NpyAuxData *NPY_UNUSED(auxdata)) -{ - PyArray_StringDTypeObject *descr = (PyArray_StringDTypeObject *)context->descriptors[0]; +string_to_float( + PyArrayMethod_Context * context, + char *const data[], + npy_intp const dimensions[], + npy_intp const strides[], + NpyAuxData *NPY_UNUSED(auxdata) +) { + PyArray_StringDTypeObject *descr = + (PyArray_StringDTypeObject *)context->descriptors[0]; npy_string_allocator *allocator = NpyString_acquire_allocator(descr); - int has_null = descr->na_object != NULL; + int has_null = (descr->na_object != NULL); const npy_static_string *default_string = &descr->default_string; + npy_intp N = dimensions[0]; char *in = data[0]; - npy_float64 *out = (npy_float64 *)data[1]; + NpyType *out = (NpyType *)data[1]; npy_intp in_stride = strides[0]; - npy_intp out_stride = strides[1] / sizeof(npy_float64); + npy_intp out_stride = strides[1] / sizeof(NpyType); while (N--) { - PyObject *pyfloat_value = - string_to_pyfloat(in, has_null, default_string, allocator); + PyObject *pyfloat_value = string_to_pyfloat( + in, has_null, default_string, allocator + ); if (pyfloat_value == NULL) { goto fail; } - *out = (npy_float64)PyFloat_AS_DOUBLE(pyfloat_value); + double dval = PyFloat_AS_DOUBLE(pyfloat_value); Py_DECREF(pyfloat_value); + NpyType fval = (double_to_float)(dval); + + if (NPY_UNLIKELY(npy_is_inf(fval) && !(double_is_inf(dval)))) { + if (PyUFunc_GiveFloatingpointErrors("cast", + NPY_FPE_OVERFLOW) < 0) { + goto fail; + } + } + + *out = fval; in += in_stride; out += out_stride; @@ -1022,38 +1035,68 @@ string_to_float64(PyArrayMethod_Context *context, char *const data[], NpyString_release_allocator(allocator); return 0; - fail: NpyString_release_allocator(allocator); return -1; } -static PyType_Slot s2f64_slots[] = { - {NPY_METH_resolve_descriptors, &string_to_float64_resolve_descriptors}, - {NPY_METH_strided_loop, &string_to_float64}, - {0, NULL}}; - -static char *s2f64_name = "cast_StringDType_to_float64"; +// Since PyFloat is already 64bit, there's no way it can overflow, making +// that check unnecessary - which is why we have a specialized template +// for this case and not the others. +template<> +int +string_to_float( + PyArrayMethod_Context * context, + char *const data[], + npy_intp const dimensions[], + npy_intp const strides[], + NpyAuxData *NPY_UNUSED(auxdata) +) { + PyArray_StringDTypeObject *descr = + (PyArray_StringDTypeObject *)context->descriptors[0]; + npy_string_allocator *allocator = NpyString_acquire_allocator(descr); + int has_null = (descr->na_object != NULL); + const npy_static_string *default_string = &descr->default_string; -FLOAT_TO_STRING_CAST(float64, f64, double) + npy_intp N = dimensions[0]; + char *in = data[0]; + npy_float64 *out = (npy_float64 *)data[1]; -STRING_TO_FLOAT_RESOLVE_DESCRIPTORS(float32, FLOAT) -STRING_TO_FLOAT_CAST(float32, f32, npy_isinf, npy_float32) -FLOAT_TO_STRING_CAST(float32, f32, double) + npy_intp in_stride = strides[0]; + npy_intp out_stride = strides[1] / sizeof(npy_float64); -STRING_TO_FLOAT_RESOLVE_DESCRIPTORS(float16, HALF) -STRING_TO_FLOAT_CAST(float16, f16, npy_half_isinf, npy_double_to_half) -FLOAT_TO_STRING_CAST(float16, f16, npy_half_to_double) + while (N--) { + PyObject *pyfloat_value = string_to_pyfloat( + in, has_null, default_string, allocator + ); + if (pyfloat_value == NULL) { + goto fail; + } + *out = (npy_float64)PyFloat_AS_DOUBLE(pyfloat_value); + Py_DECREF(pyfloat_value); -// string to longdouble + in += in_stride; + out += out_stride; + } -STRING_TO_FLOAT_RESOLVE_DESCRIPTORS(longdouble, LONGDOUBLE); + NpyString_release_allocator(allocator); + return 0; +fail: + NpyString_release_allocator(allocator); + return -1; +} -static int -string_to_longdouble(PyArrayMethod_Context *context, char *const data[], - npy_intp const dimensions[], npy_intp const strides[], - NpyAuxData *NPY_UNUSED(auxdata)) -{ +// Long double types do not fit in a (64-bit) PyFloat, so we handle this +// case specially here. +template<> +int +string_to_float( + PyArrayMethod_Context *context, + char *const data[], + npy_intp const dimensions[], + npy_intp const strides[], + NpyAuxData *NPY_UNUSED(auxdata) +) { PyArray_StringDTypeObject *descr = (PyArray_StringDTypeObject *)context->descriptors[0]; npy_string_allocator *allocator = NpyString_acquire_allocator(descr); int has_null = descr->na_object != NULL; @@ -1072,7 +1115,7 @@ string_to_longdouble(PyArrayMethod_Context *context, char *const data[], } // allocate temporary null-terminated copy - char *buf = PyMem_RawMalloc(s.size + 1); + char *buf = (char *)PyMem_RawMalloc(s.size + 1); memcpy(buf, s.buf, s.size); buf[s.size] = '\0'; @@ -1082,14 +1125,19 @@ string_to_longdouble(PyArrayMethod_Context *context, char *const data[], if (errno == ERANGE) { /* strtold returns INFINITY of the correct sign. */ - if (PyErr_Warn(PyExc_RuntimeWarning, - "overflow encountered in conversion from string") < 0) { + if ( + npy_gil_warning( + PyExc_RuntimeWarning, + 1, + "overflow encountered in conversion from string" + ) < 0 + ) { PyMem_RawFree(buf); goto fail; } } else if (errno || end == buf || *end) { - PyErr_Format(PyExc_ValueError, + npy_gil_error(PyExc_ValueError, "invalid literal for long double: %s (%s)", buf, strerror(errno)); @@ -1111,23 +1159,107 @@ string_to_longdouble(PyArrayMethod_Context *context, char *const data[], return -1; } -static PyType_Slot s2ld_slots[] = { - {NPY_METH_resolve_descriptors, &string_to_longdouble_resolve_descriptors}, - {NPY_METH_strided_loop, &string_to_longdouble}, +template +static NPY_CASTING +string_to_float_resolve_descriptors( + PyObject *NPY_UNUSED(self), + PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), + PyArray_Descr *given_descrs[2], + PyArray_Descr *loop_descrs[2], + npy_intp *NPY_UNUSED(view_offset) +) { + if (given_descrs[1] == NULL) { + loop_descrs[1] = PyArray_DescrNewFromType(typenum); + } + else { + Py_INCREF(given_descrs[1]); + loop_descrs[1] = given_descrs[1]; + } + + Py_INCREF(given_descrs[0]); + loop_descrs[0] = given_descrs[0]; + + return NPY_UNSAFE_CASTING; +} + +template< + typename NpyType, + NPY_TYPES typenum, + bool (*npy_is_inf)(NpyType) = nullptr, + bool (*double_is_inf)(double) = nullptr, + NpyType (*double_to_float)(double) = nullptr +> +static PyType_Slot s2float_slots[] = { + {NPY_METH_resolve_descriptors, (void *)&string_to_float_resolve_descriptors}, + {NPY_METH_strided_loop, (void *)&string_to_float}, {0, NULL}}; -static char *s2ld_name = "cast_StringDType_to_longdouble"; +template +static int +float_to_string( + PyArrayMethod_Context *context, + char *const data[], + npy_intp const dimensions[], + npy_intp const strides[], + NpyAuxData *NPY_UNUSED(auxdata) +) { + npy_intp N = dimensions[0]; + NpyType *in = (NpyType *)data[0]; + char *out = data[1]; + PyArray_Descr *float_descr = context->descriptors[0]; -// longdouble to string + npy_intp in_stride = strides[0] / sizeof(NpyType); + npy_intp out_stride = strides[1]; + + PyArray_StringDTypeObject *descr = + (PyArray_StringDTypeObject *)context->descriptors[1]; + npy_string_allocator *allocator = NpyString_acquire_allocator(descr); + // borrowed reference + PyObject *na_object = descr->na_object; + + while (N--) { + PyObject *scalar_val = PyArray_Scalar(in, float_descr, NULL); + if (descr->has_nan_na) { + // check for case when scalar_val is the na_object and store a null string + int na_cmp = na_eq_cmp(scalar_val, na_object); + if (na_cmp < 0) { + Py_DECREF(scalar_val); + goto fail; + } + if (na_cmp) { + Py_DECREF(scalar_val); + if (NpyString_pack_null(allocator, (npy_packed_static_string *)out) < 0) { + PyErr_SetString(PyExc_MemoryError, + "Failed to pack null string during float " + "to string cast"); + goto fail; + } + goto next_step; + } + } + // steals reference to scalar_val + if (pyobj_to_string(scalar_val, out, allocator) == -1) { + goto fail; + } -// TODO: this is incorrect. The longdouble to unicode cast is also broken in -// the same way. To fix this we'd need an ldtoa implementation in NumPy. It's -// not in the standard library. Another option would be to use `snprintf` but we'd -// need to somehow pre-calculate the size of the result string. + next_step: + in += in_stride; + out += out_stride; + } -FLOAT_TO_STRING_CAST(longdouble, ld, npy_longdouble) + NpyString_release_allocator(allocator); + return 0; +fail: + NpyString_release_allocator(allocator); + return -1; +} -// string to cfloat +template +static PyType_Slot float2s_slots [] = { + {NPY_METH_resolve_descriptors, (void *)&any_to_string_resolve_descriptors}, + {NPY_METH_strided_loop, (void *)&float_to_string}, + {0, NULL} +}; static PyObject* string_to_pycomplex(char *in, int has_null, @@ -1149,85 +1281,128 @@ string_to_pycomplex(char *in, int has_null, return pycomplex_value; } -#define STRING_TO_CFLOAT_CAST(ctype, suffix, ftype) \ - static int \ - string_to_##ctype(PyArrayMethod_Context *context, char *const data[], \ - npy_intp const dimensions[], npy_intp const strides[], \ - NpyAuxData *NPY_UNUSED(auxdata)) \ - { \ - PyArray_StringDTypeObject *descr = \ - (PyArray_StringDTypeObject *)context->descriptors[0]; \ - npy_string_allocator *allocator = NpyString_acquire_allocator(descr); \ - int has_null = descr->na_object != NULL; \ - const npy_static_string *default_string = &descr->default_string; \ - npy_intp N = dimensions[0]; \ - char *in = data[0]; \ - npy_##ctype *out = (npy_##ctype *)data[1]; \ - \ - npy_intp in_stride = strides[0]; \ - npy_intp out_stride = strides[1] / sizeof(npy_##ctype); \ - \ - while (N--) { \ - PyObject *pycomplex_value = string_to_pycomplex( \ - in, has_null, default_string, allocator); \ - \ - if (pycomplex_value == NULL) { \ - goto fail; \ - } \ - \ - Py_complex complex_value = PyComplex_AsCComplex(pycomplex_value); \ - Py_DECREF(pycomplex_value); \ - \ - if (error_converting(complex_value.real)) { \ - goto fail; \ - } \ - \ - npy_csetreal##suffix(out, (npy_##ftype) complex_value.real); \ - npy_csetimag##suffix(out, (npy_##ftype) complex_value.imag); \ - in += in_stride; \ - out += out_stride; \ - } \ - \ - NpyString_release_allocator(allocator); \ - return 0; \ - \ -fail: \ - NpyString_release_allocator(allocator); \ - return -1; \ - } \ - \ - static PyType_Slot s2##ctype##_slots[] = { \ - {NPY_METH_resolve_descriptors, \ - &string_to_##ctype##_resolve_descriptors}, \ - {NPY_METH_strided_loop, &string_to_##ctype}, \ - {0, NULL}}; \ - \ - static char *s2##ctype##_name = "cast_StringDType_to_" #ctype; - -STRING_TO_FLOAT_RESOLVE_DESCRIPTORS(cfloat, CFLOAT) -STRING_TO_CFLOAT_CAST(cfloat, f, float) - -// cfloat to string - -FLOAT_TO_STRING_CAST(cfloat, cfloat, npy_cfloat) - -// string to cdouble - -STRING_TO_FLOAT_RESOLVE_DESCRIPTORS(cdouble, CDOUBLE) -STRING_TO_CFLOAT_CAST(cdouble, , double) - -// cdouble to string - -FLOAT_TO_STRING_CAST(cdouble, cdouble, npy_cdouble) - -// string to clongdouble - -STRING_TO_FLOAT_RESOLVE_DESCRIPTORS(clongdouble, CLONGDOUBLE) -STRING_TO_CFLOAT_CAST(clongdouble, l, longdouble) - -// longdouble to string - -FLOAT_TO_STRING_CAST(clongdouble, clongdouble, npy_clongdouble) +template < + typename NpyComplexType, + typename NpyFloatType, + void npy_csetrealfunc(NpyComplexType*, NpyFloatType), + void npy_csetimagfunc(NpyComplexType*, NpyFloatType) +> +static int +string_to_complex_float( + PyArrayMethod_Context *context, + char *const data[], + npy_intp const dimensions[], + npy_intp const strides[], + NpyAuxData *NPY_UNUSED(auxdata) +) { + PyArray_StringDTypeObject *descr = + (PyArray_StringDTypeObject *)context->descriptors[0]; + npy_string_allocator *allocator = NpyString_acquire_allocator(descr); + int has_null = descr->na_object != NULL; + const npy_static_string *default_string = &descr->default_string; + npy_intp N = dimensions[0]; + char *in = data[0]; + NpyComplexType *out = (NpyComplexType *)data[1]; + + npy_intp in_stride = strides[0]; + npy_intp out_stride = strides[1] / sizeof(NpyComplexType); + + while (N--) { + PyObject *pycomplex_value = string_to_pycomplex( + in, has_null, default_string, allocator); + + if (pycomplex_value == NULL) { + goto fail; + } + + Py_complex complex_value = PyComplex_AsCComplex(pycomplex_value); + Py_DECREF(pycomplex_value); + + if (error_converting(complex_value.real)) { + goto fail; + } + + npy_csetrealfunc(out, (NpyFloatType) complex_value.real); + npy_csetimagfunc(out, (NpyFloatType) complex_value.real); + in += in_stride; + out += out_stride; + } + + NpyString_release_allocator(allocator); + return 0; + +fail: + NpyString_release_allocator(allocator); + return -1; +} + +template < + typename NpyComplexType, + typename NpyFloatType, + NPY_TYPES typenum, + void npy_csetrealfunc(NpyComplexType*, NpyFloatType), + void npy_csetimagfunc(NpyComplexType*, NpyFloatType) +> +static PyType_Slot s2ctype_slots[] = { + {NPY_METH_resolve_descriptors, (void *)&string_to_float_resolve_descriptors}, + {NPY_METH_strided_loop, (void *)&string_to_complex_float}, + {0, NULL} +}; + + +template < + typename NpyComplexType, + typename NpyFloatType, + NPY_TYPES typenum, + void npy_csetrealfunc(NpyComplexType*, NpyFloatType), + void npy_csetimagfunc(NpyComplexType*, NpyFloatType) +> +static PyArrayMethod_Spec * +getStringToComplexCastSpec() { + return get_cast_spec( + make_s2type_name(typenum), + NPY_UNSAFE_CASTING, + NPY_METH_REQUIRES_PYAPI, + get_s2type_dtypes(typenum), + s2ctype_slots + ); +} + +template< + typename NpyType, + NPY_TYPES typenum, + bool (*npy_is_inf)(NpyType) = nullptr, + bool (*double_is_inf)(double) = nullptr, + NpyType (*double_to_float)(double) = nullptr, + NPY_ARRAYMETHOD_FLAGS flags = NPY_METH_REQUIRES_PYAPI +> +static PyArrayMethod_Spec * +getStringToFloatCastSpec( +) { + return get_cast_spec( + make_s2type_name(typenum), + NPY_UNSAFE_CASTING, + flags, + get_s2type_dtypes(typenum), + s2float_slots + ); +} + +template< + typename NpyType, + NPY_TYPES typenum, + NPY_ARRAYMETHOD_FLAGS flags = NPY_METH_REQUIRES_PYAPI +> +static PyArrayMethod_Spec * +getFloatToStringCastSpec() { + return get_cast_spec( + make_type2s_name(typenum), + NPY_SAFE_CASTING, + flags, + get_type2s_dtypes(typenum), + float2s_slots + ); +} // string to datetime @@ -1283,8 +1458,8 @@ string_to_datetime(PyArrayMethod_Context *context, char *const data[], npy_intp out_stride = strides[1] / sizeof(npy_datetime); npy_datetimestruct dts; - NPY_DATETIMEUNIT in_unit = -1; - PyArray_DatetimeMetaData in_meta = {0, 1}; + NPY_DATETIMEUNIT in_unit = NPY_FR_ERROR; + PyArray_DatetimeMetaData in_meta = {NPY_FR_Y, 1}; npy_bool out_special; _PyArray_LegacyDescr *dt_descr = (_PyArray_LegacyDescr *)context->descriptors[1]; @@ -1338,12 +1513,10 @@ string_to_datetime(PyArrayMethod_Context *context, char *const data[], } static PyType_Slot s2dt_slots[] = { - {NPY_METH_resolve_descriptors, - &string_to_datetime_timedelta_resolve_descriptors}, - {NPY_METH_strided_loop, &string_to_datetime}, - {0, NULL}}; - -static char *s2dt_name = "cast_StringDType_to_Datetime"; + {NPY_METH_resolve_descriptors, (void *)&string_to_datetime_timedelta_resolve_descriptors}, + {NPY_METH_strided_loop, (void *)&string_to_datetime}, + {0, NULL} +}; // datetime to string @@ -1428,12 +1601,10 @@ datetime_to_string(PyArrayMethod_Context *context, char *const data[], } static PyType_Slot dt2s_slots[] = { - {NPY_METH_resolve_descriptors, - &any_to_string_SAFE_resolve_descriptors}, - {NPY_METH_strided_loop, &datetime_to_string}, - {0, NULL}}; - -static char *dt2s_name = "cast_Datetime_to_StringDType"; + {NPY_METH_resolve_descriptors, (void *)&any_to_string_resolve_descriptors}, + {NPY_METH_strided_loop, (void *)&datetime_to_string}, + {0, NULL} +}; // string to timedelta @@ -1468,13 +1639,17 @@ string_to_timedelta(PyArrayMethod_Context *context, char *const data[], if (is_null) { if (has_null && !has_string_na) { *out = NPY_DATETIME_NAT; - goto next_step; + in += in_stride; + out += out_stride; + continue; } s = *default_string; } if (is_nat_string(&s)) { *out = NPY_DATETIME_NAT; - goto next_step; + in += in_stride; + out += out_stride; + continue; } PyObject *pystr = PyUnicode_FromStringAndSize(s.buf, s.size); @@ -1497,7 +1672,6 @@ string_to_timedelta(PyArrayMethod_Context *context, char *const data[], *out = (npy_timedelta)value; - next_step: in += in_stride; out += out_stride; } @@ -1511,12 +1685,10 @@ string_to_timedelta(PyArrayMethod_Context *context, char *const data[], } static PyType_Slot s2td_slots[] = { - {NPY_METH_resolve_descriptors, - &string_to_datetime_timedelta_resolve_descriptors}, - {NPY_METH_strided_loop, &string_to_timedelta}, - {0, NULL}}; - -static char *s2td_name = "cast_StringDType_to_Timedelta"; + {NPY_METH_resolve_descriptors, (void *)&string_to_datetime_timedelta_resolve_descriptors}, + {NPY_METH_strided_loop, (void *)&string_to_timedelta}, + {0, NULL} +}; // timedelta to string @@ -1574,12 +1746,10 @@ timedelta_to_string(PyArrayMethod_Context *context, char *const data[], } static PyType_Slot td2s_slots[] = { - {NPY_METH_resolve_descriptors, - &any_to_string_SAFE_resolve_descriptors}, - {NPY_METH_strided_loop, &timedelta_to_string}, - {0, NULL}}; - -static char *td2s_name = "cast_Timedelta_to_StringDType"; + {NPY_METH_resolve_descriptors, (void *)&any_to_string_resolve_descriptors}, + {NPY_METH_strided_loop, (void *)&timedelta_to_string}, + {0, NULL} +}; // string to void @@ -1669,11 +1839,10 @@ string_to_void(PyArrayMethod_Context *context, char *const data[], } static PyType_Slot s2v_slots[] = { - {NPY_METH_resolve_descriptors, &string_to_void_resolve_descriptors}, - {NPY_METH_strided_loop, &string_to_void}, - {0, NULL}}; - -static char *s2v_name = "cast_StringDType_to_Void"; + {NPY_METH_resolve_descriptors, (void *)&string_to_void_resolve_descriptors}, + {NPY_METH_strided_loop, (void *)&string_to_void}, + {0, NULL} +}; // void to string @@ -1728,12 +1897,11 @@ void_to_string(PyArrayMethod_Context *context, char *const data[], return -1; } -static PyType_Slot v2s_slots[] = {{NPY_METH_resolve_descriptors, - &any_to_string_SAME_KIND_resolve_descriptors}, - {NPY_METH_strided_loop, &void_to_string}, - {0, NULL}}; - -static char *v2s_name = "cast_Void_to_StringDType"; +static PyType_Slot v2s_slots[] = { + {NPY_METH_resolve_descriptors, (void *)&any_to_string_resolve_descriptors}, + {NPY_METH_strided_loop, (void *)&void_to_string}, + {0, NULL} +}; // string to bytes @@ -1743,7 +1911,8 @@ string_to_bytes(PyArrayMethod_Context *context, char *const data[], NpyAuxData *NPY_UNUSED(auxdata)) { PyArray_StringDTypeObject *descr = (PyArray_StringDTypeObject *)context->descriptors[0]; - npy_string_allocator *allocator = NpyString_acquire_allocator(descr); + np::raii::NpyStringAcquireAllocator alloc(descr); + int has_null = descr->na_object != NULL; int has_string_na = descr->has_string_na; const npy_static_string *default_string = &descr->default_string; @@ -1759,22 +1928,43 @@ string_to_bytes(PyArrayMethod_Context *context, char *const data[], const npy_packed_static_string *ps = (npy_packed_static_string *)in; npy_static_string s = {0, NULL}; if (load_nullable_string(ps, &s, has_null, has_string_na, - default_string, na_name, allocator, + default_string, na_name, alloc.allocator(), "in string to bytes cast") == -1) { - goto fail; + return -1; } for (size_t i=0; i 127) { - NPY_ALLOW_C_API_DEF; - NPY_ALLOW_C_API; + np::raii::EnsureGIL ensure_gil{}; + + PyObject *str = PyUnicode_FromStringAndSize(s.buf, s.size); + + if (str == NULL) { + PyErr_SetString( + PyExc_UnicodeEncodeError, "Invalid character encountered during unicode encoding." + ); + return -1; + } + PyObject *exc = PyObject_CallFunction( - PyExc_UnicodeEncodeError, "ss#nns", "ascii", s.buf, - (Py_ssize_t)s.size, (Py_ssize_t)i, (Py_ssize_t)(i+1), "ordinal not in range(128)"); + PyExc_UnicodeEncodeError, + "sOnns", + "ascii", + str, + (Py_ssize_t)i, + (Py_ssize_t)(i+1), + "ordinal not in range(128)" + ); + + if (exc == NULL) { + Py_DECREF(str); + return -1; + } + PyErr_SetObject(PyExceptionInstance_Class(exc), exc); Py_DECREF(exc); - NPY_DISABLE_C_API; - goto fail; + Py_DECREF(str); + return -1; } } @@ -1787,23 +1977,14 @@ string_to_bytes(PyArrayMethod_Context *context, char *const data[], out += out_stride; } - NpyString_release_allocator(allocator); - return 0; - -fail: - - NpyString_release_allocator(allocator); - - return -1; } static PyType_Slot s2bytes_slots[] = { - {NPY_METH_resolve_descriptors, &string_to_fixed_width_resolve_descriptors}, - {NPY_METH_strided_loop, &string_to_bytes}, - {0, NULL}}; - -static char *s2bytes_name = "cast_StringDType_to_Bytes"; + {NPY_METH_resolve_descriptors, (void *)&string_to_fixed_width_resolve_descriptors}, + {NPY_METH_strided_loop, (void *)&string_to_bytes}, + {0, NULL} +}; // bytes to string @@ -1862,19 +2043,28 @@ bytes_to_string(PyArrayMethod_Context *context, char *const data[], static PyType_Slot bytes2s_slots[] = { - {NPY_METH_resolve_descriptors, &any_to_string_SAME_KIND_resolve_descriptors}, - {NPY_METH_strided_loop, &bytes_to_string}, - {0, NULL}}; - -static char *bytes2s_name = "cast_Bytes_to_StringDType"; - + {NPY_METH_resolve_descriptors, (void *)&any_to_string_resolve_descriptors}, + {NPY_METH_strided_loop, (void *)&bytes_to_string}, + {0, NULL} +}; + +static PyArrayMethod_Spec * +get_cast_spec( + const char *name, + NPY_CASTING casting, + NPY_ARRAYMETHOD_FLAGS flags, + PyArray_DTypeMeta **dtypes, + PyType_Slot *slots +) { + // If dtypes or slots are NULL, an error has happened; return NULL. + if ((slots == NULL) || (dtypes == NULL)) { + return NULL; + } -PyArrayMethod_Spec * -get_cast_spec(const char *name, NPY_CASTING casting, - NPY_ARRAYMETHOD_FLAGS flags, PyArray_DTypeMeta **dtypes, - PyType_Slot *slots) -{ - PyArrayMethod_Spec *ret = PyMem_Malloc(sizeof(PyArrayMethod_Spec)); + PyArrayMethod_Spec *ret = (PyArrayMethod_Spec *)PyMem_Malloc(sizeof(PyArrayMethod_Spec)); + if (ret == NULL) { + return reinterpret_cast(PyErr_NoMemory()); + } ret->name = name; ret->nin = 1; @@ -1887,29 +2077,43 @@ get_cast_spec(const char *name, NPY_CASTING casting, return ret; } -PyArray_DTypeMeta ** -get_dtypes(PyArray_DTypeMeta *dt1, PyArray_DTypeMeta *dt2) -{ - PyArray_DTypeMeta **ret = PyMem_Malloc(2 * sizeof(PyArray_DTypeMeta *)); - - ret[0] = dt1; - ret[1] = dt2; - - return ret; +// Check if the argument is inf using `isinf_func`, and cast the result +// to a bool; if `isinf_func` is unspecified, use std::isinf. +// Needed to ensure the right return type for getStringToFloatCastSpec. +template +static bool +is_inf(T x) { + return std::isinf(x); +} +template +static bool +is_inf(T x) { + return static_cast(isinf_func(x)); } -PyArrayMethod_Spec ** -get_casts() -{ - char *t2t_name = s2s_name; - - PyArray_DTypeMeta **t2t_dtypes = - get_dtypes(&PyArray_StringDType, - &PyArray_StringDType); +// Cast the argument to the given type. +// Needed because getStringToFloatCastSpec takes a function rather than +// a type (for casting) as its double_to_float template parameter +template +static NpyType +to_float(double x) { + return static_cast(x); +} - PyArrayMethod_Spec *ThisToThisCastSpec = - get_cast_spec(t2t_name, NPY_UNSAFE_CASTING, - NPY_METH_SUPPORTS_UNALIGNED, t2t_dtypes, s2s_slots); +NPY_NO_EXPORT PyArrayMethod_Spec ** +get_casts() { + PyArray_DTypeMeta **t2t_dtypes = get_dtypes( + &PyArray_StringDType, + &PyArray_StringDType + ); + + PyArrayMethod_Spec *ThisToThisCastSpec = get_cast_spec( + make_s2type_name(NPY_VSTRING), + NPY_UNSAFE_CASTING, + NPY_METH_SUPPORTS_UNALIGNED, + t2t_dtypes, + s2s_slots + ); int num_casts = 43; @@ -1930,140 +2134,140 @@ get_casts() &PyArray_UnicodeDType, &PyArray_StringDType); PyArrayMethod_Spec *UnicodeToStringCastSpec = get_cast_spec( - u2s_name, NPY_SAME_KIND_CASTING, NPY_METH_NO_FLOATINGPOINT_ERRORS, - u2s_dtypes, u2s_slots); + make_type2s_name(NPY_UNICODE), + NPY_SAME_KIND_CASTING, + NPY_METH_NO_FLOATINGPOINT_ERRORS, + u2s_dtypes, + u2s_slots + ); PyArray_DTypeMeta **s2u_dtypes = get_dtypes( &PyArray_StringDType, &PyArray_UnicodeDType); PyArrayMethod_Spec *StringToUnicodeCastSpec = get_cast_spec( - s2u_name, NPY_SAME_KIND_CASTING, NPY_METH_NO_FLOATINGPOINT_ERRORS, - s2u_dtypes, s2u_slots); + make_s2type_name(NPY_UNICODE), + NPY_SAME_KIND_CASTING, + NPY_METH_NO_FLOATINGPOINT_ERRORS, + s2u_dtypes, + s2u_slots + ); PyArray_DTypeMeta **s2b_dtypes = get_dtypes(&PyArray_StringDType, &PyArray_BoolDType); PyArrayMethod_Spec *StringToBoolCastSpec = get_cast_spec( - s2b_name, NPY_SAME_KIND_CASTING, NPY_METH_NO_FLOATINGPOINT_ERRORS, - s2b_dtypes, s2b_slots); + make_s2type_name(NPY_BOOL), + NPY_SAME_KIND_CASTING, + NPY_METH_NO_FLOATINGPOINT_ERRORS, + s2b_dtypes, + s2b_slots + ); PyArray_DTypeMeta **b2s_dtypes = get_dtypes(&PyArray_BoolDType, &PyArray_StringDType); PyArrayMethod_Spec *BoolToStringCastSpec = get_cast_spec( - b2s_name, NPY_SAME_KIND_CASTING, NPY_METH_NO_FLOATINGPOINT_ERRORS, - b2s_dtypes, b2s_slots); - - DTYPES_AND_CAST_SPEC(i8, Int8) - DTYPES_AND_CAST_SPEC(i16, Int16) - DTYPES_AND_CAST_SPEC(i32, Int32) - DTYPES_AND_CAST_SPEC(i64, Int64) - DTYPES_AND_CAST_SPEC(u8, UInt8) - DTYPES_AND_CAST_SPEC(u16, UInt16) - DTYPES_AND_CAST_SPEC(u32, UInt32) - DTYPES_AND_CAST_SPEC(u64, UInt64) -#if NPY_SIZEOF_BYTE == NPY_SIZEOF_SHORT - DTYPES_AND_CAST_SPEC(byte, Byte) - DTYPES_AND_CAST_SPEC(ubyte, UByte) -#endif -#if NPY_SIZEOF_SHORT == NPY_SIZEOF_INT - DTYPES_AND_CAST_SPEC(short, Short) - DTYPES_AND_CAST_SPEC(ushort, UShort) -#endif -#if NPY_SIZEOF_INT == NPY_SIZEOF_LONG - DTYPES_AND_CAST_SPEC(int, Int) - DTYPES_AND_CAST_SPEC(uint, UInt) -#endif -#if NPY_SIZEOF_LONGLONG == NPY_SIZEOF_LONG - DTYPES_AND_CAST_SPEC(longlong, LongLong) - DTYPES_AND_CAST_SPEC(ulonglong, ULongLong) -#endif - - DTYPES_AND_CAST_SPEC(f64, Double) - DTYPES_AND_CAST_SPEC(f32, Float) - DTYPES_AND_CAST_SPEC(f16, Half) + make_type2s_name(NPY_BOOL), + NPY_SAME_KIND_CASTING, + NPY_METH_NO_FLOATINGPOINT_ERRORS, + b2s_dtypes, + b2s_slots + ); PyArray_DTypeMeta **s2dt_dtypes = get_dtypes( &PyArray_StringDType, &PyArray_DatetimeDType); PyArrayMethod_Spec *StringToDatetimeCastSpec = get_cast_spec( - s2dt_name, NPY_UNSAFE_CASTING, - NPY_METH_NO_FLOATINGPOINT_ERRORS | NPY_METH_REQUIRES_PYAPI, - s2dt_dtypes, s2dt_slots); + make_s2type_name(NPY_DATETIME), + NPY_UNSAFE_CASTING, + static_cast(NPY_METH_NO_FLOATINGPOINT_ERRORS | NPY_METH_REQUIRES_PYAPI), + s2dt_dtypes, + s2dt_slots + ); PyArray_DTypeMeta **dt2s_dtypes = get_dtypes( &PyArray_DatetimeDType, &PyArray_StringDType); PyArrayMethod_Spec *DatetimeToStringCastSpec = get_cast_spec( - dt2s_name, NPY_SAFE_CASTING, - NPY_METH_NO_FLOATINGPOINT_ERRORS | NPY_METH_REQUIRES_PYAPI, - dt2s_dtypes, dt2s_slots); + make_type2s_name(NPY_DATETIME), + NPY_SAFE_CASTING, + static_cast(NPY_METH_NO_FLOATINGPOINT_ERRORS | NPY_METH_REQUIRES_PYAPI), + dt2s_dtypes, + dt2s_slots + ); PyArray_DTypeMeta **s2td_dtypes = get_dtypes( &PyArray_StringDType, &PyArray_TimedeltaDType); PyArrayMethod_Spec *StringToTimedeltaCastSpec = get_cast_spec( - s2td_name, NPY_UNSAFE_CASTING, - NPY_METH_NO_FLOATINGPOINT_ERRORS | NPY_METH_REQUIRES_PYAPI, - s2td_dtypes, s2td_slots); + make_s2type_name(NPY_TIMEDELTA), + NPY_UNSAFE_CASTING, + static_cast(NPY_METH_NO_FLOATINGPOINT_ERRORS | NPY_METH_REQUIRES_PYAPI), + s2td_dtypes, + s2td_slots + ); PyArray_DTypeMeta **td2s_dtypes = get_dtypes( &PyArray_TimedeltaDType, &PyArray_StringDType); PyArrayMethod_Spec *TimedeltaToStringCastSpec = get_cast_spec( - td2s_name, NPY_SAFE_CASTING, - NPY_METH_NO_FLOATINGPOINT_ERRORS | NPY_METH_REQUIRES_PYAPI, - td2s_dtypes, td2s_slots); - - PyArray_DTypeMeta **s2ld_dtypes = get_dtypes( - &PyArray_StringDType, &PyArray_LongDoubleDType); - - PyArrayMethod_Spec *StringToLongDoubleCastSpec = get_cast_spec( - s2ld_name, NPY_UNSAFE_CASTING, NPY_METH_NO_FLOATINGPOINT_ERRORS, - s2ld_dtypes, s2ld_slots); - - PyArray_DTypeMeta **ld2s_dtypes = get_dtypes( - &PyArray_LongDoubleDType, &PyArray_StringDType); - - PyArrayMethod_Spec *LongDoubleToStringCastSpec = get_cast_spec( - ld2s_name, NPY_SAFE_CASTING, - NPY_METH_NO_FLOATINGPOINT_ERRORS | NPY_METH_REQUIRES_PYAPI, - ld2s_dtypes, ld2s_slots); - - DTYPES_AND_CAST_SPEC(cfloat, CFloat) - DTYPES_AND_CAST_SPEC(cdouble, CDouble) - DTYPES_AND_CAST_SPEC(clongdouble, CLongDouble) + make_type2s_name(NPY_TIMEDELTA), + NPY_SAFE_CASTING, + static_cast(NPY_METH_NO_FLOATINGPOINT_ERRORS | NPY_METH_REQUIRES_PYAPI), + td2s_dtypes, + td2s_slots + ); PyArray_DTypeMeta **s2v_dtypes = get_dtypes( &PyArray_StringDType, &PyArray_VoidDType); PyArrayMethod_Spec *StringToVoidCastSpec = get_cast_spec( - s2v_name, NPY_SAME_KIND_CASTING, NPY_METH_NO_FLOATINGPOINT_ERRORS, - s2v_dtypes, s2v_slots); + make_s2type_name(NPY_VOID), + NPY_SAME_KIND_CASTING, + NPY_METH_NO_FLOATINGPOINT_ERRORS, + s2v_dtypes, + s2v_slots + ); PyArray_DTypeMeta **v2s_dtypes = get_dtypes( &PyArray_VoidDType, &PyArray_StringDType); PyArrayMethod_Spec *VoidToStringCastSpec = get_cast_spec( - v2s_name, NPY_SAME_KIND_CASTING, NPY_METH_NO_FLOATINGPOINT_ERRORS, - v2s_dtypes, v2s_slots); + make_type2s_name(NPY_VOID), + NPY_SAME_KIND_CASTING, + NPY_METH_NO_FLOATINGPOINT_ERRORS, + v2s_dtypes, + v2s_slots + ); PyArray_DTypeMeta **s2bytes_dtypes = get_dtypes( &PyArray_StringDType, &PyArray_BytesDType); PyArrayMethod_Spec *StringToBytesCastSpec = get_cast_spec( - s2bytes_name, NPY_SAME_KIND_CASTING, NPY_METH_NO_FLOATINGPOINT_ERRORS, - s2bytes_dtypes, s2bytes_slots); + make_s2type_name(NPY_BYTE), + NPY_SAME_KIND_CASTING, + NPY_METH_NO_FLOATINGPOINT_ERRORS, + s2bytes_dtypes, + s2bytes_slots + ); PyArray_DTypeMeta **bytes2s_dtypes = get_dtypes( &PyArray_BytesDType, &PyArray_StringDType); PyArrayMethod_Spec *BytesToStringCastSpec = get_cast_spec( - bytes2s_name, NPY_SAME_KIND_CASTING, NPY_METH_NO_FLOATINGPOINT_ERRORS, - bytes2s_dtypes, bytes2s_slots); - - PyArrayMethod_Spec **casts = - PyMem_Malloc((num_casts + 1) * sizeof(PyArrayMethod_Spec *)); + make_type2s_name(NPY_BYTE), + NPY_SAME_KIND_CASTING, + NPY_METH_NO_FLOATINGPOINT_ERRORS, + bytes2s_dtypes, + bytes2s_slots + ); + + PyArrayMethod_Spec **casts = (PyArrayMethod_Spec **)PyMem_Malloc( + (num_casts + 1) * sizeof(PyArrayMethod_Spec *) + ); + if (casts == NULL) { + return reinterpret_cast(PyErr_NoMemory()); + } int cast_i = 0; @@ -2072,70 +2276,93 @@ get_casts() casts[cast_i++] = StringToUnicodeCastSpec; casts[cast_i++] = StringToBoolCastSpec; casts[cast_i++] = BoolToStringCastSpec; - casts[cast_i++] = StringToInt8CastSpec; - casts[cast_i++] = Int8ToStringCastSpec; - casts[cast_i++] = StringToInt16CastSpec; - casts[cast_i++] = Int16ToStringCastSpec; - casts[cast_i++] = StringToInt32CastSpec; - casts[cast_i++] = Int32ToStringCastSpec; - casts[cast_i++] = StringToInt64CastSpec; - casts[cast_i++] = Int64ToStringCastSpec; - casts[cast_i++] = StringToUInt8CastSpec; - casts[cast_i++] = UInt8ToStringCastSpec; - casts[cast_i++] = StringToUInt16CastSpec; - casts[cast_i++] = UInt16ToStringCastSpec; - casts[cast_i++] = StringToUInt32CastSpec; - casts[cast_i++] = UInt32ToStringCastSpec; - casts[cast_i++] = StringToUInt64CastSpec; - casts[cast_i++] = UInt64ToStringCastSpec; + + casts[cast_i++] = getStringToIntCastSpec(); + casts[cast_i++] = getStringToIntCastSpec(); + casts[cast_i++] = getStringToIntCastSpec(); + casts[cast_i++] = getStringToIntCastSpec(); + casts[cast_i++] = getIntToStringCastSpec(); + casts[cast_i++] = getIntToStringCastSpec(); + casts[cast_i++] = getIntToStringCastSpec(); + casts[cast_i++] = getIntToStringCastSpec(); + + casts[cast_i++] = getStringToIntCastSpec(); + casts[cast_i++] = getStringToIntCastSpec(); + casts[cast_i++] = getStringToIntCastSpec(); + casts[cast_i++] = getStringToIntCastSpec(); + casts[cast_i++] = getIntToStringCastSpec(); + casts[cast_i++] = getIntToStringCastSpec(); + casts[cast_i++] = getIntToStringCastSpec(); + casts[cast_i++] = getIntToStringCastSpec(); + #if NPY_SIZEOF_BYTE == NPY_SIZEOF_SHORT - casts[cast_i++] = StringToByteCastSpec; - casts[cast_i++] = ByteToStringCastSpec; - casts[cast_i++] = StringToUByteCastSpec; - casts[cast_i++] = UByteToStringCastSpec; + casts[cast_i++] = getStringToIntCastSpec(); + casts[cast_i++] = getStringToIntCastSpec(); + casts[cast_i++] = getIntToStringCastSpec(); + casts[cast_i++] = getIntToStringCastSpec(); #endif #if NPY_SIZEOF_SHORT == NPY_SIZEOF_INT - casts[cast_i++] = StringToShortCastSpec; - casts[cast_i++] = ShortToStringCastSpec; - casts[cast_i++] = StringToUShortCastSpec; - casts[cast_i++] = UShortToStringCastSpec; + casts[cast_i++] = getStringToIntCastSpec(); + casts[cast_i++] = getStringToIntCastSpec(); + casts[cast_i++] = getIntToStringCastSpec(); + casts[cast_i++] = getIntToStringCastSpec(); #endif #if NPY_SIZEOF_INT == NPY_SIZEOF_LONG - casts[cast_i++] = StringToIntCastSpec; - casts[cast_i++] = IntToStringCastSpec; - casts[cast_i++] = StringToUIntCastSpec; - casts[cast_i++] = UIntToStringCastSpec; + casts[cast_i++] = getStringToIntCastSpec(); + casts[cast_i++] = getStringToIntCastSpec(); + casts[cast_i++] = getIntToStringCastSpec(); + casts[cast_i++] = getIntToStringCastSpec(); #endif #if NPY_SIZEOF_LONGLONG == NPY_SIZEOF_LONG - casts[cast_i++] = StringToLongLongCastSpec; - casts[cast_i++] = LongLongToStringCastSpec; - casts[cast_i++] = StringToULongLongCastSpec; - casts[cast_i++] = ULongLongToStringCastSpec; + casts[cast_i++] = getStringToIntCastSpec(); + casts[cast_i++] = getStringToIntCastSpec(); + casts[cast_i++] = getIntToStringCastSpec(); + casts[cast_i++] = getIntToStringCastSpec(); #endif - casts[cast_i++] = StringToDoubleCastSpec; - casts[cast_i++] = DoubleToStringCastSpec; - casts[cast_i++] = StringToFloatCastSpec; - casts[cast_i++] = FloatToStringCastSpec; - casts[cast_i++] = StringToHalfCastSpec; - casts[cast_i++] = HalfToStringCastSpec; + + casts[cast_i++] = getStringToFloatCastSpec, is_inf, npy_double_to_half>(); + casts[cast_i++] = getStringToFloatCastSpec, is_inf, to_float>(); + casts[cast_i++] = getFloatToStringCastSpec(); + casts[cast_i++] = getFloatToStringCastSpec(); + + // Special handling for f64 and longdouble types because they don't fit in a PyFloat + casts[cast_i++] = getStringToFloatCastSpec(); + casts[cast_i++] = getFloatToStringCastSpec(); + + // TODO: this is incorrect. The longdouble to unicode cast is also broken in + // the same way. To fix this we'd need an ldtoa implementation in NumPy. It's + // not in the standard library. Another option would be to use `snprintf` but we'd + // need to somehow pre-calculate the size of the result string. + // + // TODO: Add a concrete implementation to properly handle 80-bit long doubles on Linux. + casts[cast_i++] = getStringToFloatCastSpec(NPY_METH_NO_FLOATINGPOINT_ERRORS | NPY_METH_REQUIRES_PYAPI)>(); + casts[cast_i++] = getFloatToStringCastSpec(NPY_METH_NO_FLOATINGPOINT_ERRORS | NPY_METH_REQUIRES_PYAPI)>(); + + casts[cast_i++] = getStringToComplexCastSpec(); + casts[cast_i++] = getStringToComplexCastSpec(); + casts[cast_i++] = getStringToComplexCastSpec(); + casts[cast_i++] = getFloatToStringCastSpec(); + casts[cast_i++] = getFloatToStringCastSpec(); + casts[cast_i++] = getFloatToStringCastSpec(); + casts[cast_i++] = StringToDatetimeCastSpec; casts[cast_i++] = DatetimeToStringCastSpec; casts[cast_i++] = StringToTimedeltaCastSpec; casts[cast_i++] = TimedeltaToStringCastSpec; - casts[cast_i++] = StringToLongDoubleCastSpec; - casts[cast_i++] = LongDoubleToStringCastSpec; - casts[cast_i++] = StringToCFloatCastSpec; - casts[cast_i++] = CFloatToStringCastSpec; - casts[cast_i++] = StringToCDoubleCastSpec; - casts[cast_i++] = CDoubleToStringCastSpec; - casts[cast_i++] = StringToCLongDoubleCastSpec; - casts[cast_i++] = CLongDoubleToStringCastSpec; casts[cast_i++] = StringToVoidCastSpec; casts[cast_i++] = VoidToStringCastSpec; casts[cast_i++] = StringToBytesCastSpec; casts[cast_i++] = BytesToStringCastSpec; casts[cast_i++] = NULL; + // Check that every cast spec is valid + if (PyErr_Occurred() != NULL) { + return NULL; + } + for (int i = 0; idescriptors[0]; + + int ret = _compare((void *)a, (void *)b, sdescr, sdescr); + return ret; +} + // PyArray_ArgFunc // The max element is the one with the highest unicode code point. int @@ -623,11 +639,16 @@ PyArray_Descr * stringdtype_finalize_descr(PyArray_Descr *dtype) { PyArray_StringDTypeObject *sdtype = (PyArray_StringDTypeObject *)dtype; + // acquire the allocator lock in case the descriptor we want to finalize + // is shared between threads, see gh-28813 + npy_string_allocator *allocator = NpyString_acquire_allocator(sdtype); if (sdtype->array_owned == 0) { sdtype->array_owned = 1; + NpyString_release_allocator(allocator); Py_INCREF(dtype); return dtype; } + NpyString_release_allocator(allocator); PyArray_StringDTypeObject *ret = (PyArray_StringDTypeObject *)new_stringdtype_instance( sdtype->na_object, sdtype->coerce); ret->array_owned = 1; @@ -652,6 +673,111 @@ static PyType_Slot PyArray_StringDType_Slots[] = { {0, NULL}}; +/* + * Wrap the sort loop to acquire/release the string allocator, + * and pick the correct internal implementation. + */ +static int +stringdtype_wrap_sort_loop( + PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata) +{ + PyArray_StringDTypeObject *sdescr = + (PyArray_StringDTypeObject *)context->descriptors[0]; + + npy_string_allocator *allocator = NpyString_acquire_allocator(sdescr); + int ret = npy_default_sort_loop(context, data, dimensions, strides, transferdata); + NpyString_release_allocator(allocator); + return ret; +} + +/* + * This is currently required even though the default implementation would work, + * because the output, though enforced to be equal to the input, is parametric. + */ +static NPY_CASTING +stringdtype_sort_resolve_descriptors( + PyArrayMethodObject *method, + PyArray_DTypeMeta *const *dtypes, + PyArray_Descr *const *input_descrs, + PyArray_Descr **output_descrs, + npy_intp *view_offset) +{ + output_descrs[0] = NPY_DT_CALL_ensure_canonical(input_descrs[0]); + if (NPY_UNLIKELY(output_descrs[0] == NULL)) { + return -1; + } + output_descrs[1] = NPY_DT_CALL_ensure_canonical(input_descrs[1]); + if (NPY_UNLIKELY(output_descrs[1] == NULL)) { + Py_XDECREF(output_descrs[0]); + return -1; + } + + return method->casting; +} + +static int +stringdtype_wrap_argsort_loop( + PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata) +{ + PyArray_StringDTypeObject *sdescr = + (PyArray_StringDTypeObject *)context->descriptors[0]; + + npy_string_allocator *allocator = NpyString_acquire_allocator(sdescr); + int ret = npy_default_argsort_loop(context, data, dimensions, strides, transferdata); + NpyString_release_allocator(allocator); + return ret; +} + +static int +stringdtype_get_sort_loop( + PyArrayMethod_Context *context, + int aligned, int move_references, + const npy_intp *strides, + PyArrayMethod_StridedLoop **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; + *flags |= NPY_METH_NO_FLOATINGPOINT_ERRORS; + + if ((parameters->flags == NPY_SORT_STABLE) + || parameters->flags == NPY_SORT_DEFAULT) { + *out_loop = (PyArrayMethod_StridedLoop *)stringdtype_wrap_sort_loop; + } + else { + PyErr_SetString(PyExc_RuntimeError, "unsupported sort kind"); + return -1; + } + return 0; +} + +static int +stringdtype_get_argsort_loop( + PyArrayMethod_Context *context, + int aligned, int move_references, + const npy_intp *strides, + PyArrayMethod_StridedLoop **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; + *flags |= NPY_METH_NO_FLOATINGPOINT_ERRORS; + + if (parameters->flags == NPY_SORT_STABLE + || parameters->flags == NPY_SORT_DEFAULT) { + *out_loop = (PyArrayMethod_StridedLoop *)stringdtype_wrap_argsort_loop; + } + else { + PyErr_SetString(PyExc_RuntimeError, "unsupported sort kind"); + return -1; + } + return 0; +} + static PyObject * stringdtype_new(PyTypeObject *NPY_UNUSED(cls), PyObject *args, PyObject *kwds) { @@ -707,8 +833,6 @@ stringdtype_repr(PyArray_StringDTypeObject *self) return ret; } -static PyObject *_convert_to_stringdtype_kwargs = NULL; - // implementation of __reduce__ magic method to reconstruct a StringDType // object from the serialized data in the pickle. Uses the python // _convert_to_stringdtype_kwargs for convenience because this isn't @@ -716,19 +840,21 @@ static PyObject *_convert_to_stringdtype_kwargs = NULL; static PyObject * stringdtype__reduce__(PyArray_StringDTypeObject *self, PyObject *NPY_UNUSED(args)) { - npy_cache_import("numpy._core._internal", "_convert_to_stringdtype_kwargs", - &_convert_to_stringdtype_kwargs); - - if (_convert_to_stringdtype_kwargs == NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", "_convert_to_stringdtype_kwargs", + &npy_runtime_imports._convert_to_stringdtype_kwargs) == -1) { return NULL; } if (self->na_object != NULL) { - return Py_BuildValue("O(iO)", _convert_to_stringdtype_kwargs, - self->coerce, self->na_object); + return Py_BuildValue( + "O(iO)", npy_runtime_imports._convert_to_stringdtype_kwargs, + self->coerce, self->na_object); } - return Py_BuildValue("O(i)", _convert_to_stringdtype_kwargs, self->coerce); + return Py_BuildValue( + "O(i)", npy_runtime_imports._convert_to_stringdtype_kwargs, + self->coerce); } static PyMethodDef PyArray_StringDType_methods[] = { @@ -771,11 +897,9 @@ PyArray_StringDType_richcompare(PyObject *self, PyObject *other, int op) } if ((op == Py_EQ && eq) || (op == Py_NE && !eq)) { - Py_INCREF(Py_True); - return Py_True; + Py_RETURN_TRUE; } - Py_INCREF(Py_False); - return Py_False; + Py_RETURN_FALSE; } static Py_hash_t @@ -818,6 +942,62 @@ PyArray_DTypeMeta PyArray_StringDType = { /* rest, filled in during DTypeMeta initialization */ }; +NPY_NO_EXPORT int +init_stringdtype_sorts(void) +{ + PyArray_DTypeMeta *stringdtype = &PyArray_StringDType; + + PyArray_DTypeMeta *sort_dtypes[2] = {stringdtype, stringdtype}; + PyType_Slot sort_slots[4] = { + {NPY_METH_resolve_descriptors, &stringdtype_sort_resolve_descriptors}, + {NPY_METH_get_loop, &stringdtype_get_sort_loop}, + {_NPY_METH_static_data, &_sort_compare}, + {0, NULL} + }; + PyArrayMethod_Spec sort_spec = { + .name = "stringdtype_sort", + .nin = 1, + .nout = 1, + .dtypes = sort_dtypes, + .slots = sort_slots, + .flags = NPY_METH_NO_FLOATINGPOINT_ERRORS, + }; + + PyBoundArrayMethodObject *sort_method = PyArrayMethod_FromSpec_int( + &sort_spec, 1); + if (sort_method == NULL) { + return -1; + } + NPY_DT_SLOTS(stringdtype)->sort_meth = sort_method->method; + Py_INCREF(sort_method->method); + Py_DECREF(sort_method); + + PyArray_DTypeMeta *argsort_dtypes[2] = {stringdtype, &PyArray_IntpDType}; + PyType_Slot argsort_slots[3] = { + {NPY_METH_get_loop, &stringdtype_get_argsort_loop}, + {_NPY_METH_static_data, &_sort_compare}, + {0, NULL} + }; + PyArrayMethod_Spec argsort_spec = { + .name = "stringdtype_argsort", + .nin = 1, + .nout = 1, + .dtypes = argsort_dtypes, + .slots = argsort_slots, + .flags = NPY_METH_NO_FLOATINGPOINT_ERRORS, + }; + + PyBoundArrayMethodObject *argsort_method = PyArrayMethod_FromSpec_int( + &argsort_spec, 1); + if (argsort_method == NULL) { + return -1; + } + NPY_DT_SLOTS(stringdtype)->argsort_meth = argsort_method->method; + Py_INCREF(argsort_method->method); + Py_DECREF(argsort_method); + return 0; +} + NPY_NO_EXPORT int init_string_dtype(void) { @@ -842,23 +1022,31 @@ init_string_dtype(void) return -1; } - PyArray_Descr *singleton = - NPY_DT_CALL_default_descr(&PyArray_StringDType); + PyArray_StringDTypeObject *singleton = + (PyArray_StringDTypeObject *)NPY_DT_CALL_default_descr(&PyArray_StringDType); if (singleton == NULL) { return -1; } - PyArray_StringDType.singleton = singleton; + // never associate the singleton with an array + singleton->array_owned = 1; + + PyArray_StringDType.singleton = (PyArray_Descr *)singleton; PyArray_StringDType.type_num = NPY_VSTRING; for (int i = 0; PyArray_StringDType_casts[i] != NULL; i++) { PyMem_Free(PyArray_StringDType_casts[i]->dtypes); + PyMem_RawFree((void *)PyArray_StringDType_casts[i]->name); PyMem_Free(PyArray_StringDType_casts[i]); } PyMem_Free(PyArray_StringDType_casts); + if (init_stringdtype_sorts() < 0) { + return -1; + } + return 0; } diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.h b/numpy/_core/src/multiarray/stringdtype/dtype.h index 2c2719602c32..9baad65d5c88 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.h +++ b/numpy/_core/src/multiarray/stringdtype/dtype.h @@ -52,6 +52,9 @@ _eq_comparison(int scoerce, int ocoerce, PyObject *sna, PyObject *ona); NPY_NO_EXPORT int stringdtype_compatible_na(PyObject *na1, PyObject *na2, PyObject **out_na); +NPY_NO_EXPORT int +na_eq_cmp(PyObject *a, PyObject *b); + #ifdef __cplusplus } #endif diff --git a/numpy/_core/src/multiarray/stringdtype/static_string.c b/numpy/_core/src/multiarray/stringdtype/static_string.c index c9b5620211dc..c437fab2d336 100644 --- a/numpy/_core/src/multiarray/stringdtype/static_string.c +++ b/numpy/_core/src/multiarray/stringdtype/static_string.c @@ -17,9 +17,6 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE -// work around Python 3.10 and earlier issue, see -// the commit message of 82fd2b8 for more details -// also needed for the allocator mutex #define PY_SSIZE_T_CLEAN #include @@ -131,7 +128,11 @@ struct npy_string_allocator { npy_string_free_func free; npy_string_realloc_func realloc; npy_string_arena arena; +#if PY_VERSION_HEX < 0x30d00b3 PyThread_type_lock *allocator_lock; +#else + PyMutex allocator_lock; +#endif }; static void @@ -154,8 +155,6 @@ vstring_buffer(npy_string_arena *arena, _npy_static_string_u *string) return (char *)((size_t)arena->buffer + string->vstring.offset); } -#define ARENA_EXPAND_FACTOR 1.25 - static char * arena_malloc(npy_string_arena *arena, npy_string_realloc_func r, size_t size) { @@ -167,24 +166,17 @@ arena_malloc(npy_string_arena *arena, npy_string_realloc_func r, size_t size) else { string_storage_size = size + sizeof(size_t); } - if ((arena->size - arena->cursor) <= string_storage_size) { - // realloc the buffer so there is enough room - // first guess is to double the size of the buffer - size_t newsize; - if (arena->size == 0) { - newsize = string_storage_size; - } - else if (((ARENA_EXPAND_FACTOR * arena->size) - arena->cursor) > - string_storage_size) { - newsize = ARENA_EXPAND_FACTOR * arena->size; + if ((arena->size - arena->cursor) < string_storage_size) { + size_t minsize = arena->cursor + string_storage_size; + if (minsize < arena->cursor) { + return NULL; // overflow means out of memory } - else { - newsize = arena->size + string_storage_size; - } - if ((arena->cursor + size) >= newsize) { - // need extra room beyond the expansion factor, leave some padding - newsize = ARENA_EXPAND_FACTOR * (arena->cursor + size); + // Allocate 25% more than needed for this string. + size_t newsize = minsize + minsize / 4; + if (newsize < minsize) { + return NULL; // overflow means out of memory } + // passing a NULL buffer to realloc is the same as malloc char *newbuf = r(arena->buffer, newsize); if (newbuf == NULL) { @@ -245,18 +237,22 @@ NpyString_new_allocator(npy_string_malloc_func m, npy_string_free_func f, if (allocator == NULL) { return NULL; } +#if PY_VERSION_HEX < 0x30d00b3 PyThread_type_lock *allocator_lock = PyThread_allocate_lock(); if (allocator_lock == NULL) { f(allocator); PyErr_SetString(PyExc_MemoryError, "Unable to allocate thread lock"); return NULL; } + allocator->allocator_lock = allocator_lock; +#else + memset(&allocator->allocator_lock, 0, sizeof(PyMutex)); +#endif allocator->malloc = m; allocator->free = f; allocator->realloc = r; // arena buffer gets allocated in arena_malloc allocator->arena = NEW_ARENA; - allocator->allocator_lock = allocator_lock; return allocator; } @@ -269,9 +265,11 @@ NpyString_free_allocator(npy_string_allocator *allocator) if (allocator->arena.buffer != NULL) { f(allocator->arena.buffer); } +#if PY_VERSION_HEX < 0x30d00b3 if (allocator->allocator_lock != NULL) { PyThread_free_lock(allocator->allocator_lock); } +#endif f(allocator); } @@ -288,9 +286,13 @@ NpyString_free_allocator(npy_string_allocator *allocator) NPY_NO_EXPORT npy_string_allocator * NpyString_acquire_allocator(const PyArray_StringDTypeObject *descr) { +#if PY_VERSION_HEX < 0x30d00b3 if (!PyThread_acquire_lock(descr->allocator->allocator_lock, NOWAIT_LOCK)) { PyThread_acquire_lock(descr->allocator->allocator_lock, WAIT_LOCK); } +#else + PyMutex_Lock(&descr->allocator->allocator_lock); +#endif return descr->allocator; } @@ -358,7 +360,11 @@ NpyString_acquire_allocators(size_t n_descriptors, NPY_NO_EXPORT void NpyString_release_allocator(npy_string_allocator *allocator) { +#if PY_VERSION_HEX < 0x30d00b3 PyThread_release_lock(allocator->allocator_lock); +#else + PyMutex_Unlock(&allocator->allocator_lock); +#endif } /*NUMPY_API @@ -389,7 +395,7 @@ NpyString_release_allocators(size_t length, npy_string_allocator *allocators[]) } } -static const char * const EMPTY_STRING = ""; +static const char EMPTY_STRING[] = ""; /*NUMPY_API * Extract the packed contents of *packed_string* into *unpacked_string*. @@ -463,7 +469,7 @@ heap_or_arena_allocate(npy_string_allocator *allocator, if (*flags == 0) { // string isn't previously allocated, so add to existing arena allocation char *ret = arena_malloc(arena, allocator->realloc, sizeof(char) * size); - if (size < NPY_MEDIUM_STRING_MAX_SIZE) { + if (size <= NPY_MEDIUM_STRING_MAX_SIZE) { *flags = NPY_STRING_INITIALIZED; } else { diff --git a/numpy/_core/src/multiarray/stringdtype/utf8_utils.c b/numpy/_core/src/multiarray/stringdtype/utf8_utils.c index 2bbbb0caa6ba..b40d23841471 100644 --- a/numpy/_core/src/multiarray/stringdtype/utf8_utils.c +++ b/numpy/_core/src/multiarray/stringdtype/utf8_utils.c @@ -55,19 +55,6 @@ find_previous_utf8_character(const unsigned char *c, size_t nchar) return c; } -NPY_NO_EXPORT int -num_bytes_for_utf8_character(const unsigned char *c) { - if (c[0] <= 0x7F) { - return 1; - } - else if (c[0] <= 0xDF) { - return 2; - } - else if (c[0] <= 0xEF) { - return 3; - } - return 4; -} NPY_NO_EXPORT int num_utf8_bytes_for_codepoint(uint32_t code) diff --git a/numpy/_core/src/multiarray/stringdtype/utf8_utils.h b/numpy/_core/src/multiarray/stringdtype/utf8_utils.h index a2c231bf57f5..7901afb02bed 100644 --- a/numpy/_core/src/multiarray/stringdtype/utf8_utils.h +++ b/numpy/_core/src/multiarray/stringdtype/utf8_utils.h @@ -8,8 +8,16 @@ extern "C" { NPY_NO_EXPORT size_t utf8_char_to_ucs4_code(const unsigned char *c, Py_UCS4 *code); -NPY_NO_EXPORT int -num_bytes_for_utf8_character(const unsigned char *c); +static inline int num_bytes_for_utf8_character(const unsigned char *c) +{ + // adapted from https://github.com/skeeto/branchless-utf8 + // the first byte of a UTF-8 character encodes the length of the character + static const char LENGTHS_LUT[] = { + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 3, 4, 0 + }; + return LENGTHS_LUT[c[0] >> 3]; +} NPY_NO_EXPORT const unsigned char* find_previous_utf8_character(const unsigned char *c, size_t nchar); diff --git a/numpy/_core/src/multiarray/temp_elide.c b/numpy/_core/src/multiarray/temp_elide.c index 662a2fa52b06..6a26ee1f7485 100644 --- a/numpy/_core/src/multiarray/temp_elide.c +++ b/numpy/_core/src/multiarray/temp_elide.c @@ -5,6 +5,7 @@ #include #include "npy_config.h" +#include "npy_pycompat.h" #include "numpy/arrayobject.h" #define NPY_NUMBER_MAX(a, b) ((a) > (b) ? (a) : (b)) @@ -58,7 +59,7 @@ * supported too by using the appropriate Windows APIs. */ -#if defined HAVE_BACKTRACE && defined HAVE_DLFCN_H && ! defined PYPY_VERSION +#if defined HAVE_BACKTRACE && defined HAVE_DLFCN_H #include @@ -109,6 +110,23 @@ find_addr(void * addresses[], npy_intp naddr, void * addr) return 0; } +static int +check_unique_temporary(PyObject *lhs) +{ +#if PY_VERSION_HEX == 0x030E00A7 +#error "NumPy is broken on CPython 3.14.0a7, please update to a newer version" +#elif PY_VERSION_HEX >= 0x030E00B1 + // Python 3.14 changed the semantics for reference counting temporaries + // see https://github.com/python/cpython/issues/133164 + return PyUnstable_Object_IsUniqueReferencedTemporary(lhs); +#else + // equivalent to Py_REFCNT(lhs) == 1 except on 3.13t + // we need to use the backport on 3.13t because + // this function was first exposed in 3.14 + return PyUnstable_Object_IsUniquelyReferenced(lhs); +#endif +} + static int check_callers(int * cannot) { @@ -290,7 +308,8 @@ can_elide_temp(PyObject *olhs, PyObject *orhs, int *cannot) * array of a basic type, own its data and size larger than threshold */ PyArrayObject *alhs = (PyArrayObject *)olhs; - if (Py_REFCNT(olhs) != 1 || !PyArray_CheckExact(olhs) || + if (!check_unique_temporary(olhs) || + !PyArray_CheckExact(olhs) || !PyArray_ISNUMBER(alhs) || !PyArray_CHKFLAGS(alhs, NPY_ARRAY_OWNDATA) || !PyArray_ISWRITEABLE(alhs) || @@ -368,7 +387,8 @@ NPY_NO_EXPORT int can_elide_temp_unary(PyArrayObject * m1) { int cannot; - if (Py_REFCNT(m1) != 1 || !PyArray_CheckExact(m1) || + if (!check_unique_temporary((PyObject *)m1) || + !PyArray_CheckExact(m1) || !PyArray_ISNUMBER(m1) || !PyArray_CHKFLAGS(m1, NPY_ARRAY_OWNDATA) || !PyArray_ISWRITEABLE(m1) || diff --git a/numpy/_core/src/multiarray/textreading/conversions.c b/numpy/_core/src/multiarray/textreading/conversions.c index e9bea72e0bd1..692b67a95264 100644 --- a/numpy/_core/src/multiarray/textreading/conversions.c +++ b/numpy/_core/src/multiarray/textreading/conversions.c @@ -13,6 +13,7 @@ #include "conversions.h" #include "str_to_int.h" +#include "alloc.h" #include "array_coercion.h" @@ -63,20 +64,13 @@ double_from_ucs4( return -1; /* empty or only whitespace: not a floating point number */ } - /* We convert to ASCII for the Python parser, use stack if small: */ - char stack_buf[128]; - char *heap_buf = NULL; - char *ascii = stack_buf; - size_t str_len = end - str + 1; - if (str_len > 128) { - heap_buf = PyMem_MALLOC(str_len); - if (heap_buf == NULL) { - PyErr_NoMemory(); - return -1; - } - ascii = heap_buf; + /* We convert to ASCII for the Python parser, use stack if small: */ + NPY_ALLOC_WORKSPACE(ascii, char, 128, str_len); + if (ascii == NULL) { + return -1; } + char *c = ascii; for (; str < end; str++, c++) { if (NPY_UNLIKELY(*str >= 128)) { @@ -93,7 +87,7 @@ double_from_ucs4( /* Rewind `end` to the first UCS4 character not parsed: */ end = end - (c - end_parsed); - PyMem_FREE(heap_buf); + npy_free_workspace(ascii); if (*result == -1. && PyErr_Occurred()) { return -1; diff --git a/numpy/_core/src/multiarray/textreading/conversions.h b/numpy/_core/src/multiarray/textreading/conversions.h index 09f2510413b5..e30b28a9a7af 100644 --- a/numpy/_core/src/multiarray/textreading/conversions.h +++ b/numpy/_core/src/multiarray/textreading/conversions.h @@ -1,12 +1,12 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_TEXTREADING_CONVERSIONS_H_ #define NUMPY_CORE_SRC_MULTIARRAY_TEXTREADING_CONVERSIONS_H_ -#include - #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE #include "numpy/arrayobject.h" +#include + #include "textreading/parser_config.h" NPY_NO_EXPORT int diff --git a/numpy/_core/src/multiarray/textreading/parser_config.h b/numpy/_core/src/multiarray/textreading/parser_config.h index 022ba952c796..67b5c848341b 100644 --- a/numpy/_core/src/multiarray/textreading/parser_config.h +++ b/numpy/_core/src/multiarray/textreading/parser_config.h @@ -59,11 +59,6 @@ typedef struct { */ bool python_byte_converters; bool c_byte_converters; - /* - * Flag to store whether a warning was already given for an integer being - * parsed by first converting to a float. - */ - bool gave_int_via_float_warning; } parser_config; diff --git a/numpy/_core/src/multiarray/textreading/readtext.c b/numpy/_core/src/multiarray/textreading/readtext.c index e8defcc4dd2d..4df2446302d6 100644 --- a/numpy/_core/src/multiarray/textreading/readtext.c +++ b/numpy/_core/src/multiarray/textreading/readtext.c @@ -201,7 +201,6 @@ _load_from_filelike(PyObject *NPY_UNUSED(mod), .imaginary_unit = 'j', .python_byte_converters = false, .c_byte_converters = false, - .gave_int_via_float_warning = false, }; bool filelike = true; diff --git a/numpy/_core/src/multiarray/textreading/rows.c b/numpy/_core/src/multiarray/textreading/rows.c index 19c07b18fb51..7f3797b58928 100644 --- a/numpy/_core/src/multiarray/textreading/rows.c +++ b/numpy/_core/src/multiarray/textreading/rows.c @@ -6,7 +6,9 @@ #define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "numpy/npy_3kcompat.h" +#include "npy_pycompat.h" #include "alloc.h" +#include "shape.h" // For PyArray_Resize_int #include #include @@ -58,13 +60,16 @@ create_conv_funcs( PyObject *key, *value; Py_ssize_t pos = 0; - while (PyDict_Next(converters, &pos, &key, &value)) { + int error = 0; + Py_BEGIN_CRITICAL_SECTION(converters); + while (PyDict_Next(converters, &pos, &key, &value)) { // noqa: borrowed-ref OK Py_ssize_t column = PyNumber_AsSsize_t(key, PyExc_IndexError); if (column == -1 && PyErr_Occurred()) { PyErr_Format(PyExc_TypeError, "keys of the converters dictionary must be integers; " "got %.100R", key); - goto error; + error = 1; + break; } if (usecols != NULL) { /* @@ -92,7 +97,8 @@ create_conv_funcs( PyErr_Format(PyExc_ValueError, "converter specified for column %zd, which is invalid " "for the number of fields %zd.", column, num_fields); - goto error; + error = 1; + break; } if (column < 0) { column += num_fields; @@ -102,11 +108,18 @@ create_conv_funcs( PyErr_Format(PyExc_TypeError, "values of the converters dictionary must be callable, " "but the value associated with key %R is not", key); - goto error; + error = 1; + break; } Py_INCREF(value); conv_funcs[column] = value; } + Py_END_CRITICAL_SECTION(); + + if (error) { + goto error; + } + return conv_funcs; error: @@ -142,11 +155,9 @@ create_conv_funcs( * @param out_descr The dtype used for allocating a new array. This is not * used if `data_array` is provided. Note that the actual dtype of the * returned array can differ for strings. - * @param num_cols Pointer in which the actual (discovered) number of columns - * is returned. This is only relevant if `homogeneous` is true. * @param homogeneous Whether the datatype of the array is not homogeneous, * i.e. not structured. In this case the number of columns has to be - * discovered an the returned array will be 2-dimensional rather than + * discovered and the returned array will be 2-dimensional rather than * 1-dimensional. * * @returns Returns the result as an array object or NULL on error. The result @@ -165,10 +176,9 @@ read_rows(stream *s, npy_intp row_size = out_descr->elsize; PyObject **conv_funcs = NULL; - bool needs_init = PyDataType_FLAGCHK(out_descr, NPY_NEEDS_INIT); - int ndim = homogeneous ? 2 : 1; npy_intp result_shape[2] = {0, 1}; + PyArray_Dims new_dims = {result_shape, ndim}; /* for resizing */ bool data_array_allocated = data_array == NULL; /* Make sure we own `data_array` for the purpose of error handling */ @@ -301,9 +311,6 @@ read_rows(stream *s, if (data_array == NULL) { goto error; } - if (needs_init) { - memset(PyArray_BYTES(data_array), 0, PyArray_NBYTES(data_array)); - } } else { assert(max_rows >=0); @@ -344,22 +351,15 @@ read_rows(stream *s, "providing a maximum number of rows to read may help."); goto error; } - - char *new_data = PyDataMem_UserRENEW( - PyArray_BYTES(data_array), alloc_size ? alloc_size : 1, - PyArray_HANDLER(data_array)); - if (new_data == NULL) { - PyErr_NoMemory(); + /* + * Resize the array. + */ + result_shape[0] = new_rows; + if (PyArray_Resize_int(data_array, &new_dims, 0) < 0) { goto error; } - /* Replace the arrays data since it may have changed */ - ((PyArrayObject_fields *)data_array)->data = new_data; - ((PyArrayObject_fields *)data_array)->dimensions[0] = new_rows; - data_ptr = new_data + row_count * row_size; + data_ptr = (char *)PyArray_DATA(data_array) + row_count * row_size; data_allocated_rows = new_rows; - if (needs_init) { - memset(data_ptr, '\0', (new_rows - row_count) * row_size); - } } for (Py_ssize_t i = 0; i < actual_num_fields; ++i) { @@ -464,22 +464,21 @@ read_rows(stream *s, /* * Note that if there is no data, `data_array` may still be NULL and - * row_count is 0. In that case, always realloc just in case. + * row_count is 0. In that case, always resize just in case. */ if (data_array_allocated && data_allocated_rows != row_count) { - size_t size = row_count * row_size; - char *new_data = PyDataMem_UserRENEW( - PyArray_BYTES(data_array), size ? size : 1, - PyArray_HANDLER(data_array)); - if (new_data == NULL) { - Py_DECREF(data_array); - PyErr_NoMemory(); - return NULL; + result_shape[0] = row_count; + if (PyArray_Resize_int(data_array, &new_dims, 0) < 0) { + goto error; } - ((PyArrayObject_fields *)data_array)->data = new_data; - ((PyArrayObject_fields *)data_array)->dimensions[0] = row_count; } + /* + * If row_size is too big, F_CONTIGUOUS is always set + * as array was created for only one row of data. + * We just update the contiguous flags here. + */ + PyArray_UpdateFlags(data_array, NPY_ARRAY_F_CONTIGUOUS); return data_array; error: diff --git a/numpy/_core/src/multiarray/textreading/str_to_int.c b/numpy/_core/src/multiarray/textreading/str_to_int.c index 40b7c67a981c..5f58067228d1 100644 --- a/numpy/_core/src/multiarray/textreading/str_to_int.c +++ b/numpy/_core/src/multiarray/textreading/str_to_int.c @@ -11,16 +11,6 @@ #include "conversions.h" /* For the deprecated parse-via-float path */ -const char *deprecation_msg = ( - "loadtxt(): Parsing an integer via a float is deprecated. To avoid " - "this warning, you can:\n" - " * make sure the original data is stored as integers.\n" - " * use the `converters=` keyword argument. If you only use\n" - " NumPy 1.23 or later, `converters=float` will normally work.\n" - " * Use `np.loadtxt(...).astype(np.int64)` parsing the file as\n" - " floating point and then convert it. (On all NumPy versions.)\n" - " (Deprecated NumPy 1.23)"); - #define DECLARE_TO_INT(intw, INT_MIN, INT_MAX, byteswap_unaligned) \ NPY_NO_EXPORT int \ npy_to_##intw(PyArray_Descr *descr, \ @@ -32,22 +22,7 @@ const char *deprecation_msg = ( \ if (NPY_UNLIKELY( \ str_to_int64(str, end, INT_MIN, INT_MAX, &parsed) < 0)) { \ - /* DEPRECATED 2022-07-03, NumPy 1.23 */ \ - double fval; \ - PyArray_Descr *d_descr = PyArray_DescrFromType(NPY_DOUBLE); \ - Py_DECREF(d_descr); /* borrowed */ \ - if (npy_to_double(d_descr, str, end, (char *)&fval, pconfig) < 0) { \ - return -1; \ - } \ - if (!pconfig->gave_int_via_float_warning) { \ - pconfig->gave_int_via_float_warning = true; \ - if (PyErr_WarnEx(PyExc_DeprecationWarning, \ - deprecation_msg, 3) < 0) { \ - return -1; \ - } \ - } \ - pconfig->gave_int_via_float_warning = true; \ - x = (intw##_t)fval; \ + return -1; \ } \ else { \ x = (intw##_t)parsed; \ @@ -70,23 +45,8 @@ const char *deprecation_msg = ( \ if (NPY_UNLIKELY( \ str_to_uint64(str, end, UINT_MAX, &parsed) < 0)) { \ - /* DEPRECATED 2022-07-03, NumPy 1.23 */ \ - double fval; \ - PyArray_Descr *d_descr = PyArray_DescrFromType(NPY_DOUBLE); \ - Py_DECREF(d_descr); /* borrowed */ \ - if (npy_to_double(d_descr, str, end, (char *)&fval, pconfig) < 0) { \ - return -1; \ - } \ - if (!pconfig->gave_int_via_float_warning) { \ - pconfig->gave_int_via_float_warning = true; \ - if (PyErr_WarnEx(PyExc_DeprecationWarning, \ - deprecation_msg, 3) < 0) { \ - return -1; \ - } \ - } \ - pconfig->gave_int_via_float_warning = true; \ - x = (uintw##_t)fval; \ - } \ + return -1; \ + } \ else { \ x = (uintw##_t)parsed; \ } \ diff --git a/numpy/_core/src/multiarray/unique.cpp b/numpy/_core/src/multiarray/unique.cpp new file mode 100644 index 000000000000..a8b897446182 --- /dev/null +++ b/numpy/_core/src/multiarray/unique.cpp @@ -0,0 +1,527 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "numpy/arrayobject.h" +#include "gil_utils.h" +#include "raii_utils.hpp" +extern "C" { + #include "fnv.h" + #include "npy_argparse.h" + #include "numpy/npy_math.h" + #include "numpy/halffloat.h" +} + +// HASH_TABLE_INITIAL_BUCKETS is the reserve hashset capacity used in the +// std::unordered_set instances in the various unique_* functions. +// We use min(input_size, HASH_TABLE_INITIAL_BUCKETS) as the initial bucket +// count: +// - Reserving for all elements (isize) may over-allocate when there are few +// unique values. +// - Using a moderate upper bound HASH_TABLE_INITIAL_BUCKETS(1024) keeps +// memory usage reasonable (4 KiB for pointers). +// See https://github.com/numpy/numpy/pull/28767#discussion_r2064267631 +const npy_intp HASH_TABLE_INITIAL_BUCKETS = 1024; + +// +// Create a 1-d array with the given length that has the same +// dtype as the input `arr`. +// +static inline PyArrayObject * +empty_array_like(PyArrayObject *arr, npy_intp length) +{ + PyArray_Descr *descr = PyArray_DESCR(arr); + Py_INCREF(descr); + + // Create the output array. + PyArrayObject *res_obj = + reinterpret_cast( + PyArray_NewFromDescr( + &PyArray_Type, + descr, + 1, // ndim + &length, // shape + NULL, // strides + NULL, // data + NPY_ARRAY_WRITEABLE, // flags + NULL // obj + ) + ); + return res_obj; +} + +template +size_t hash_integer(const T *value, npy_bool equal_nan) { + return npy_fnv1a(reinterpret_cast(value), sizeof(T)); +} + +template +size_t hash_complex(const T *value, npy_bool equal_nan) { + std::complex z = *reinterpret_cast *>(value); + int hasnan = npy_isnan(z.real()) || npy_isnan(z.imag()); + if (equal_nan && hasnan) { + return 0; + } + + // Now, equal_nan is false or neither of the values is not NaN. + // So we don't need to worry about NaN here. + + // Convert -0.0 to 0.0. + if (z.real() == 0.0) { + z.real(NPY_PZERO); + } + if (z.imag() == 0.0) { + z.imag(NPY_PZERO); + } + + size_t hash = npy_fnv1a(reinterpret_cast(&z), sizeof(z)); + return hash; +} + +size_t hash_complex_clongdouble(const npy_clongdouble *value, npy_bool equal_nan) { + std::complex z = + *reinterpret_cast *>(value); + int hasnan = npy_isnan(z.real()) || npy_isnan(z.imag()); + if (equal_nan && hasnan) { + return 0; + } + + // Now, equal_nan is false or neither of the values is not NaN. + // So we don't need to worry about NaN here. + + // Convert -0.0 to 0.0. + if (z.real() == 0.0) { + z.real(NPY_PZEROL); + } + if (z.imag() == 0.0) { + z.imag(NPY_PZEROL); + } + + // Some floating-point complex dtypes (e.g., npy_complex256) include undefined or + // unused bits in their binary representation + // (see: https://github.com/numpy/numpy/blob/main/numpy/_core/src/npymath/npy_math_private.h#L254-L261). + // Because hashing the raw bit pattern would make the hash depend on those + // undefined bits, we extract the mantissa, exponent, and sign components + // explicitly and pack them into a buffer to ensure the hash is well-defined. + #if defined(HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE) || \ + defined(HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE) || \ + defined(HAVE_LDOUBLE_MOTOROLA_EXTENDED_12_BYTES_BE) + + constexpr size_t SIZEOF_LDOUBLE_MAN = sizeof(ldouble_man_t); + constexpr size_t SIZEOF_LDOUBLE_EXP = sizeof(ldouble_exp_t); + constexpr size_t SIZEOF_LDOUBLE_SIGN = sizeof(ldouble_sign_t); + constexpr size_t SIZEOF_BUFFER = 2 * (SIZEOF_LDOUBLE_MAN + SIZEOF_LDOUBLE_MAN + SIZEOF_LDOUBLE_EXP + SIZEOF_LDOUBLE_SIGN); + unsigned char buffer[SIZEOF_BUFFER]; + + union IEEEl2bitsrep bits_real{z.real()}, bits_imag{z.imag()}; + size_t offset = 0; + + for (const IEEEl2bitsrep &bits: {bits_real, bits_imag}) { + ldouble_man_t manh = GET_LDOUBLE_MANH(bits); + ldouble_man_t manl = GET_LDOUBLE_MANL(bits); + ldouble_exp_t exp = GET_LDOUBLE_EXP(bits); + ldouble_sign_t sign = GET_LDOUBLE_SIGN(bits); + + std::memcpy(buffer + offset, &manh, SIZEOF_LDOUBLE_MAN); + offset += SIZEOF_LDOUBLE_MAN; + std::memcpy(buffer + offset, &manl, SIZEOF_LDOUBLE_MAN); + offset += SIZEOF_LDOUBLE_MAN; + std::memcpy(buffer + offset, &exp, SIZEOF_LDOUBLE_EXP); + offset += SIZEOF_LDOUBLE_EXP; + std::memcpy(buffer + offset, &sign, SIZEOF_LDOUBLE_SIGN); + offset += SIZEOF_LDOUBLE_SIGN; + } + #else + + const unsigned char* buffer = reinterpret_cast(&z); + constexpr size_t SIZEOF_BUFFER = sizeof(z); + + #endif + + size_t hash = npy_fnv1a(buffer, SIZEOF_BUFFER); + + return hash; +} + +template +int equal_integer(const T *lhs, const T *rhs, npy_bool equal_nan) { + return *lhs == *rhs; +} + +template +int equal_complex(const T *lhs, const T *rhs, npy_bool equal_nan) { + S lhs_real = real(*lhs); + S lhs_imag = imag(*lhs); + int lhs_isnan = npy_isnan(lhs_real) || npy_isnan(lhs_imag); + S rhs_real = real(*rhs); + S rhs_imag = imag(*rhs); + int rhs_isnan = npy_isnan(rhs_real) || npy_isnan(rhs_imag); + + if (lhs_isnan && rhs_isnan) { + return equal_nan; + } + if (lhs_isnan || rhs_isnan) { + return false; + } + // Now both lhs and rhs are not NaN. + return (lhs_real == rhs_real) && (lhs_imag == rhs_imag); +} + +template +void copy_integer(char *data, T *value) { + std::copy_n(value, 1, (T *)data); + return; +} + +template < + typename S, + typename T, + S (*real)(T), + S (*imag)(T), + void (*setreal)(T *, const S), + void (*setimag)(T *, const S) +> +void copy_complex(char *data, T *value) { + setreal((T *)data, real(*value)); + setimag((T *)data, imag(*value)); + return; +} + +template < + typename T, + size_t (*hash_func)(const T *, npy_bool), + int (*equal_func)(const T *, const T *, npy_bool), + void (*copy_func)(char *, T *) +> +static PyObject* +unique_numeric(PyArrayObject *self, npy_bool equal_nan) +{ + /* + * Returns a new NumPy array containing the unique values of the input + * array of numeric (integer or complex). + * This function uses hashing to identify uniqueness efficiently. + */ + + auto hash = [equal_nan](const T *value) -> size_t { + return hash_func(value, equal_nan); + }; + auto equal = [equal_nan](const T *lhs, const T *rhs) -> bool { + return equal_func(lhs, rhs, equal_nan); + }; + + using set_type = std::unordered_set; + + // number of elements in the input array + npy_intp isize = PyArray_SIZE(self); + set_type hashset(std::min(isize, HASH_TABLE_INITIAL_BUCKETS), hash, equal); + + { + np::raii::SaveThreadState save_thread_state{}; + + char *idata = PyArray_BYTES(self); + npy_intp istride = PyArray_STRIDES(self)[0]; + for (npy_intp i = 0; i < isize; i++, idata += istride) { + hashset.insert(reinterpret_cast(idata)); + } + } + + PyArrayObject *res_obj = empty_array_like(self, hashset.size()); + if (res_obj == NULL) { + return NULL; + } + + { + np::raii::SaveThreadState save_thread_state{}; + + char *odata = PyArray_BYTES(res_obj); + npy_intp ostride = PyArray_STRIDES(res_obj)[0]; + for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { + copy_func(odata, *it); + } + } + + return reinterpret_cast(res_obj); +} + +template +static PyObject* +unique_string(PyArrayObject *self, npy_bool equal_nan) +{ + /* + * Returns a new NumPy array containing the unique values of the input + * array of fixed size strings. + * This function uses hashing to identify uniqueness efficiently. + */ + + PyArray_Descr *descr = PyArray_DESCR(self); + // variables for the string + npy_intp itemsize = descr->elsize; + npy_intp num_chars = itemsize / sizeof(T); + + auto hash = [num_chars](const T *value) -> size_t { + return npy_fnv1a(value, num_chars * sizeof(T)); + }; + auto equal = [itemsize](const T *lhs, const T *rhs) -> bool { + return std::memcmp(lhs, rhs, itemsize) == 0; + }; + + using set_type = std::unordered_set; + + // number of elements in the input array + npy_intp isize = PyArray_SIZE(self); + set_type hashset(std::min(isize, HASH_TABLE_INITIAL_BUCKETS), hash, equal); + + { + np::raii::SaveThreadState save_thread_state{}; + + char *idata = PyArray_BYTES(self); + npy_intp istride = PyArray_STRIDES(self)[0]; + for (npy_intp i = 0; i < isize; i++, idata += istride) { + hashset.insert(reinterpret_cast(idata)); + } + } + + PyArrayObject *res_obj = empty_array_like(self, hashset.size()); + if (res_obj == NULL) { + return NULL; + } + + { + np::raii::SaveThreadState save_thread_state{}; + + char *odata = PyArray_BYTES(res_obj); + npy_intp ostride = PyArray_STRIDES(res_obj)[0]; + for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { + std::memcpy(odata, *it, itemsize); + } + } + + return reinterpret_cast(res_obj); +} + +static PyObject* +unique_vstring(PyArrayObject *self, npy_bool equal_nan) +{ + /* + * Returns a new NumPy array containing the unique values of the input array. + * This function uses hashing to identify uniqueness efficiently. + */ + + auto hash = [equal_nan](const npy_static_string *value) -> size_t { + if (value->buf == NULL) { + if (equal_nan) { + return 0; + } else { + return std::hash{}(value); + } + } + return npy_fnv1a(value->buf, value->size * sizeof(char)); + }; + auto equal = [equal_nan](const npy_static_string *lhs, const npy_static_string *rhs) -> bool { + if (lhs->buf == NULL && rhs->buf == NULL) { + if (equal_nan) { + return true; + } else { + return lhs == rhs; + } + } + if (lhs->buf == NULL || rhs->buf == NULL) { + return false; + } + if (lhs->size != rhs->size) { + return false; + } + return std::memcmp(lhs->buf, rhs->buf, lhs->size) == 0; + }; + + npy_intp isize = PyArray_SIZE(self); + // unpacked_strings must live as long as hashset because hashset points + // to values in this vector. + std::vector unpacked_strings(isize, {0, NULL}); + + using set_type = std::unordered_set; + set_type hashset(std::min(isize, HASH_TABLE_INITIAL_BUCKETS), hash, equal); + + { + PyArray_StringDTypeObject *descr = + reinterpret_cast(PyArray_DESCR(self)); + np::raii::NpyStringAcquireAllocator alloc(descr); + np::raii::SaveThreadState save_thread_state{}; + + char *idata = PyArray_BYTES(self); + npy_intp istride = PyArray_STRIDES(self)[0]; + + for (npy_intp i = 0; i < isize; i++, idata += istride) { + npy_packed_static_string *packed_string = + reinterpret_cast(idata); + int is_null = NpyString_load(alloc.allocator(), packed_string, + &unpacked_strings[i]); + if (is_null == -1) { + // Unexpected error. Throw a C++ exception that will be caught + // by the caller of unique_vstring() and converted into a Python + // RuntimeError. + throw std::runtime_error("Failed to load string from packed " + "static string."); + } + hashset.insert(&unpacked_strings[i]); + } + } + + PyArrayObject *res_obj = empty_array_like(self, hashset.size()); + if (res_obj == NULL) { + return NULL; + } + + { + PyArray_StringDTypeObject *res_descr = + reinterpret_cast(PyArray_DESCR(res_obj)); + np::raii::NpyStringAcquireAllocator alloc(res_descr); + np::raii::SaveThreadState save_thread_state{}; + + char *odata = PyArray_BYTES(res_obj); + npy_intp ostride = PyArray_STRIDES(res_obj)[0]; + for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { + npy_packed_static_string *packed_string = + reinterpret_cast(odata); + int pack_status = 0; + if ((*it)->buf == NULL) { + pack_status = NpyString_pack_null(alloc.allocator(), packed_string); + } else { + pack_status = NpyString_pack(alloc.allocator(), packed_string, + (*it)->buf, (*it)->size); + } + if (pack_status == -1) { + // string packing failed + return NULL; + } + } + } + return reinterpret_cast(res_obj); +} + + +// this map contains the functions used for each item size. +typedef std::function function_type; +std::unordered_map unique_funcs = { + {NPY_BYTE, unique_numeric, equal_integer, copy_integer>}, + {NPY_UBYTE, unique_numeric, equal_integer, copy_integer>}, + {NPY_SHORT, unique_numeric, equal_integer, copy_integer>}, + {NPY_USHORT, unique_numeric, equal_integer, copy_integer>}, + {NPY_INT, unique_numeric, equal_integer, copy_integer>}, + {NPY_UINT, unique_numeric, equal_integer, copy_integer>}, + {NPY_LONG, unique_numeric, equal_integer, copy_integer>}, + {NPY_ULONG, unique_numeric, equal_integer, copy_integer>}, + {NPY_LONGLONG, unique_numeric, equal_integer, copy_integer>}, + {NPY_ULONGLONG, unique_numeric, equal_integer, copy_integer>}, + {NPY_CFLOAT, unique_numeric< + npy_cfloat, + hash_complex, + equal_complex, + copy_complex + > + }, + {NPY_CDOUBLE, unique_numeric< + npy_cdouble, + hash_complex, + equal_complex, + copy_complex + > + }, + {NPY_CLONGDOUBLE, unique_numeric< + npy_clongdouble, + hash_complex_clongdouble, + equal_complex, + copy_complex + > + }, + {NPY_INT8, unique_numeric, equal_integer, copy_integer>}, + {NPY_INT16, unique_numeric, equal_integer, copy_integer>}, + {NPY_INT32, unique_numeric, equal_integer, copy_integer>}, + {NPY_INT64, unique_numeric, equal_integer, copy_integer>}, + {NPY_UINT8, unique_numeric, equal_integer, copy_integer>}, + {NPY_UINT16, unique_numeric, equal_integer, copy_integer>}, + {NPY_UINT32, unique_numeric, equal_integer, copy_integer>}, + {NPY_UINT64, unique_numeric, equal_integer, copy_integer>}, + {NPY_DATETIME, unique_numeric, equal_integer, copy_integer>}, + {NPY_COMPLEX64, unique_numeric< + npy_complex64, + hash_complex, + equal_complex, + copy_complex + > + }, + {NPY_COMPLEX128, unique_numeric< + npy_complex128, + hash_complex, + equal_complex, + copy_complex + > + }, + {NPY_STRING, unique_string}, + {NPY_UNICODE, unique_string}, + {NPY_VSTRING, unique_vstring}, +}; + + +/** + * Python exposed implementation of `_unique_hash`. + * + * This is a C only function wrapping code that may cause C++ exceptions into + * try/catch. + * + * @param arr NumPy array to find the unique values of. + * @return Base-class NumPy array with unique values, `NotImplemented` if the + * type is unsupported or `NULL` with an error set. + */ +extern "C" NPY_NO_EXPORT PyObject * +array__unique_hash(PyObject *NPY_UNUSED(module), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) +{ + PyArrayObject *arr = NULL; + npy_bool equal_nan = NPY_TRUE; // default to True + + NPY_PREPARE_ARGPARSER; + if (npy_parse_arguments("_unique_hash", args, len_args, kwnames, + "arr", &PyArray_Converter, &arr, + "|equal_nan", &PyArray_BoolConverter, &equal_nan, + NULL, NULL, NULL + ) < 0 + ) { + Py_XDECREF(arr); + return NULL; + } + + PyObject *result = NULL; + try { + auto type = PyArray_TYPE(arr); + // we only support data types present in our unique_funcs map + if (unique_funcs.find(type) == unique_funcs.end()) { + result = Py_NewRef(Py_NotImplemented); + } + else { + result = unique_funcs[type](arr, equal_nan); + } + } + catch (const std::bad_alloc &e) { + PyErr_NoMemory(); + result = NULL; + } + catch (const std::exception &e) { + PyErr_SetString(PyExc_RuntimeError, e.what()); + result = NULL; + } + Py_DECREF(arr); + return result; +} diff --git a/numpy/_core/src/multiarray/unique.h b/numpy/_core/src/multiarray/unique.h new file mode 100644 index 000000000000..7b3fb143ada4 --- /dev/null +++ b/numpy/_core/src/multiarray/unique.h @@ -0,0 +1,15 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_MULTIARRAY_UNIQUE_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_MULTIARRAY_UNIQUE_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +PyObject* array__unique_hash(PyObject *NPY_UNUSED(dummy), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames); + +#ifdef __cplusplus +} +#endif + +#endif // NUMPY_CORE_INCLUDE_NUMPY_MULTIARRAY_UNIQUE_H_ diff --git a/numpy/_core/src/multiarray/usertypes.c b/numpy/_core/src/multiarray/usertypes.c index 8d90f5cc968f..71c95a8ae39c 100644 --- a/numpy/_core/src/multiarray/usertypes.c +++ b/numpy/_core/src/multiarray/usertypes.c @@ -306,8 +306,9 @@ PyArray_RegisterDataType(PyArray_DescrProto *descr_proto) descr->type_num = typenum; /* update prototype to notice duplicate registration */ descr_proto->type_num = typenum; - if (dtypemeta_wrap_legacy_descriptor( - descr, descr_proto->f, &PyArrayDescr_Type, name, NULL) < 0) { + PyArray_DTypeMeta *wrapped_dtype = dtypemeta_wrap_legacy_descriptor( + descr, descr_proto->f, &PyArrayDescr_Type, name, NULL); + if (wrapped_dtype == NULL) { descr->type_num = -1; NPY_NUMUSERTYPES--; /* Override the type, it might be wrong and then decref crashes */ @@ -344,7 +345,7 @@ static int _warn_if_cast_exists_already( if (to_DType == NULL) { return -1; } - PyObject *cast_impl = PyDict_GetItemWithError( + PyObject *cast_impl = PyDict_GetItemWithError( // noqa: borrowed-ref OK NPY_DT_SLOTS(NPY_DTYPE(descr))->castingimpls, (PyObject *)to_DType); Py_DECREF(to_DType); if (cast_impl == NULL) { @@ -618,8 +619,8 @@ legacy_userdtype_common_dtype_function( * used for legacy user-dtypes, but for example numeric to/from datetime * casts were only defined that way as well. * - * @param from - * @param to + * @param from Source DType + * @param to Destination DType * @param casting If `NPY_NO_CASTING` will check the legacy registered cast, * otherwise uses the provided cast. */ diff --git a/numpy/_core/src/npymath/halffloat.cpp b/numpy/_core/src/npymath/halffloat.cpp index aa582c1b9517..9289a659f5f5 100644 --- a/numpy/_core/src/npymath/halffloat.cpp +++ b/numpy/_core/src/npymath/halffloat.cpp @@ -198,41 +198,21 @@ npy_half npy_half_divmod(npy_half h1, npy_half h2, npy_half *modulus) npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f) { - if constexpr (Half::kNativeConversion) { - return BitCast(Half(BitCast(f))); - } - else { - return half_private::FromFloatBits(f); - } + return BitCast(Half(BitCast(f))); } npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d) { - if constexpr (Half::kNativeConversion) { - return BitCast(Half(BitCast(d))); - } - else { - return half_private::FromDoubleBits(d); - } + return BitCast(Half(BitCast(d))); } npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h) { - if constexpr (Half::kNativeConversion) { - return BitCast(static_cast(Half::FromBits(h))); - } - else { - return half_private::ToFloatBits(h); - } + return BitCast(static_cast(Half::FromBits(h))); } npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h) { - if constexpr (Half::kNativeConversion) { - return BitCast(static_cast(Half::FromBits(h))); - } - else { - return half_private::ToDoubleBits(h); - } + return BitCast(static_cast(Half::FromBits(h))); } diff --git a/numpy/_core/src/npymath/npy_math_internal.h.src b/numpy/_core/src/npymath/npy_math_internal.h.src index 01f77b14a5b8..2f3849744688 100644 --- a/numpy/_core/src/npymath/npy_math_internal.h.src +++ b/numpy/_core/src/npymath/npy_math_internal.h.src @@ -506,6 +506,14 @@ NPY_INPLACE @type@ npy_logaddexp2@c@(@type@ x, @type@ y) } } + +/* Define a macro for the ARM64 Clang specific condition */ +#if defined(__aarch64__) && defined(__clang__) + #define IS_ARM64_CLANG 1 +#else + #define IS_ARM64_CLANG 0 +#endif + /* * Wrapper function for remainder edge cases * Internally calls npy_divmod* @@ -514,34 +522,48 @@ NPY_INPLACE @type@ npy_remainder@c@(@type@ a, @type@ b) { @type@ mod; - if (NPY_UNLIKELY(!b)) { + + if (NPY_UNLIKELY(!b) || + NPY_UNLIKELY(IS_ARM64_CLANG && sizeof(@type@) == sizeof(long double) && (npy_isnan(a) || npy_isnan(b)))) { /* - * in2 == 0 (and not NaN): normal fmod will give the correct - * result (always NaN). `divmod` may set additional FPE for the - * division by zero creating an inf. + * Handle two cases: + * 1. in2 == 0 (and not NaN): normal fmod will give the correct + * result (always NaN). `divmod` may set additional FPE for the + * division by zero creating an inf. + * 2. ARM64 with Clang: Special handling to avoid FPE with float128 + * TODO: This is a workaround for a known Clang issue on ARM64 where + * float128 operations trigger incorrect FPE behavior. This can be + * removed once fixed: + * https://github.com/llvm/llvm-project/issues/59924 */ - mod = npy_fmod@c@(a, b); - } - else { - npy_divmod@c@(a, b, &mod); + return npy_fmod@c@(a, b); } + + npy_divmod@c@(a, b, &mod); return mod; } NPY_INPLACE @type@ npy_floor_divide@c@(@type@ a, @type@ b) { @type@ div, mod; - if (NPY_UNLIKELY(!b)) { + + if (NPY_UNLIKELY(!b) || + NPY_UNLIKELY(IS_ARM64_CLANG && sizeof(@type@) == sizeof(long double) && (npy_isnan(a) || npy_isnan(b)))) { /* - * in2 == 0 (and not NaN): normal division will give the correct - * result (Inf or NaN). `divmod` may set additional FPE for the modulo - * evaluating to NaN. + * Handle two cases: + * 1. in2 == 0 (and not NaN): normal division will give the correct + * result (Inf or NaN). `divmod` may set additional FPE for the modulo + * evaluating to NaN. + * 2. ARM64 with Clang: Special handling to avoid FPE with float128 + * TODO: This is a workaround for a known Clang issue on ARM64 where + * float128 operations trigger incorrect FPE behavior. This can be + * removed once fixed: + * https://github.com/llvm/llvm-project/issues/59924 */ - div = a / b; - } - else { - div = npy_divmod@c@(a, b, &mod); + return a / b; } + + div = npy_divmod@c@(a, b, &mod); return div; } diff --git a/numpy/_core/src/npysort/binsearch.cpp b/numpy/_core/src/npysort/binsearch.cpp index f3f091e99fca..3ec4fecef0c6 100644 --- a/numpy/_core/src/npysort/binsearch.cpp +++ b/numpy/_core/src/npysort/binsearch.cpp @@ -64,43 +64,111 @@ binsearch(const char *arr, const char *key, char *ret, npy_intp arr_len, { using T = typename Tag::type; auto cmp = side_to_cmp::value; - npy_intp min_idx = 0; - npy_intp max_idx = arr_len; - T last_key_val; - if (key_len == 0) { + // If the array length is 0 we return all 0s + if (arr_len <= 0) { + for (npy_intp i = 0; i < key_len; ++i) { + *(npy_intp *)(ret + i * ret_str) = 0; + } return; } - last_key_val = *(const T *)key; - for (; key_len > 0; key_len--, key += key_str, ret += ret_str) { - const T key_val = *(const T *)key; - /* - * Updating only one of the indices based on the previous key - * gives the search a big boost when keys are sorted, but slightly - * slows down things for purely random ones. - */ - if (cmp(last_key_val, key_val)) { - max_idx = arr_len; - } - else { - min_idx = 0; - max_idx = (max_idx < arr_len) ? (max_idx + 1) : arr_len; - } + /* + In this binary search implementation, the candidate insertion indices for + the j-th key are in the range [base_j, base_j+length] and on each + iteration we pick a pivot at the mid-point of the range to compare against + the j-th key. Depending on the comparison result, we adjust the base_j and + halve the length of the interval. + + To batch multiple queries, we process all bases with a fixed length. The + length is halved on each iteration of an outer loop and all bases are + updated in an inner loop. To avoid consuming extra memory, we use the + result array to store intermediate values of each base until they become + the final result in the last step. + + There are two benefits of this approach: + + 1. Cache locallity of pivots. In early iterations each key is compared + against the same set of pivots. For example, in the first iteration all + keys are compared against the median. In the second iteration, all keys + end up being compared against 1st and 3rd quartiles. + + 2. Independent calculations for out-of-order execution. In the single-key + version, step i+1 depends on computation of step i. Meaning that step i+1 + must wait for step i to complete before proceeding. When batching multiple + keys, we compute each step for all keys before continuing on the next + step. All the computations at a given step are independent across + different keys. Meaning that the CPU can execute multiple keys + out-of-order in parallel. + + Invariant (for every j): + - cmp(arr[i], key_val_j) == true for all i < base_j + - cmp(arr[i], key_val_j) == false for all i >= base_j + length + + where cmp(a, b) operator depends on side input: + - For side = "left", cmp operator is < + - For side = "right", cmp operator is <= + + The insertion index candidates are in range [base, base+length] and + on each iteration we shrink the range into either + [base, ceil(length / 2)] + or + [base + floor(length / 2), ceil(length / 2)] + + The outer loop terminates when length = 1. At that point, for each j + the insertion order is either base_j or base_j + 1. An additional + comparison is required to determine which of the two values. + If cmp(arr[base_j], key_val_j) == true, insertion index is base_j + 1. + Otherwise the insertion order is base_j. + + Optimization: we unroll the first iteration for the following reasons: + 1. ret is not initialized with the bases, so we save |keys| writes + by not having to intialize it with 0s. + 2. By assuming the initial base for every key is 0, we also save + |keys| reads. + 3. In the first iteration, all elements are compared against the + median. So we can store it in a variable and use it for all keys. + + This initial block replaces the initialization loop that is used for the + arr_len==0 case. Note that when arr_len = 1, then half is 0 so the + following block initializes the array as with 0s. + */ + npy_intp interval_length = arr_len; + npy_intp half = interval_length >> 1; + interval_length -= half; // length -> ceil(length / 2) + + npy_intp base = 0; + const T mid_val = *(const T *)(arr + (base + half) * arr_str); + + for (npy_intp i = 0; i < key_len; ++i) { + const T key_val = *(const T *)(key + i * key_str); + *(npy_intp *)(ret + i * ret_str) = cmp(mid_val, key_val) * half; + } - last_key_val = key_val; + while (interval_length > 1) { + npy_intp half = interval_length >> 1; + interval_length -= half; // length -> ceil(length / 2) - while (min_idx < max_idx) { - const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1); - const T mid_val = *(const T *)(arr + mid_idx * arr_str); - if (cmp(mid_val, key_val)) { - min_idx = mid_idx + 1; - } - else { - max_idx = mid_idx; - } + for (npy_intp i = 0; i < key_len; ++i) { + npy_intp &base = *(npy_intp *)(ret + i * ret_str); + const T mid_val = *(const T *)(arr + (base + half) * arr_str); + const T key_val = *(const T *)(key + i * key_str); + base += cmp(mid_val, key_val) * half; } - *(npy_intp *)ret = min_idx; + } + + /* + At this point interval_length == 1, so the candidates are in the + interval [base, base + 1]. + + We have two options: + If cmp(arr[base], key_val) == true, insertion index is base + 1 + Otherwise the insertion order is just base + */ + for (npy_intp i = 0; i < key_len; ++i) { + npy_intp &base = *(npy_intp *)(ret + i * ret_str); + const T key_val = *(const T *)(key + i * key_str); + base += cmp(*(const T *)(arr + base * arr_str), key_val); } } @@ -113,51 +181,55 @@ argbinsearch(const char *arr, const char *key, const char *sort, char *ret, { using T = typename Tag::type; auto cmp = side_to_cmp::value; - npy_intp min_idx = 0; - npy_intp max_idx = arr_len; - T last_key_val; - if (key_len == 0) { + // If the array length is 0 we return all 0s + if (arr_len <= 0) { + for (npy_intp i = 0; i < key_len; ++i) { + *(npy_intp *)(ret + i * ret_str) = 0; + } return 0; } - last_key_val = *(const T *)key; - for (; key_len > 0; key_len--, key += key_str, ret += ret_str) { - const T key_val = *(const T *)key; - /* - * Updating only one of the indices based on the previous key - * gives the search a big boost when keys are sorted, but slightly - * slows down things for purely random ones. - */ - if (cmp(last_key_val, key_val)) { - max_idx = arr_len; - } - else { - min_idx = 0; - max_idx = (max_idx < arr_len) ? (max_idx + 1) : arr_len; - } + npy_intp interval_length = arr_len; + npy_intp half = interval_length >> 1; + interval_length -= half; // length -> ceil(length / 2) - last_key_val = key_val; + npy_intp base = 0; + npy_intp mid_idx = *(npy_intp *)(sort + (base + half) * sort_str); + if (mid_idx < 0 || mid_idx >= arr_len) { + return -1; + } + const T mid_val = *(const T *)(arr + mid_idx * arr_str); - while (min_idx < max_idx) { - const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1); - const npy_intp sort_idx = *(npy_intp *)(sort + mid_idx * sort_str); - T mid_val; + for (npy_intp i = 0; i < key_len; ++i) { + const T key_val = *(const T *)(key + i * key_str); + *(npy_intp *)(ret + i * ret_str) = cmp(mid_val, key_val) * half; + } - if (sort_idx < 0 || sort_idx >= arr_len) { + while (interval_length > 1) { + npy_intp half = interval_length >> 1; + interval_length -= half; // length -> ceil(length / 2) + + for (npy_intp i = 0; i < key_len; ++i) { + npy_intp &base = *(npy_intp *)(ret + i * ret_str); + npy_intp mid_idx = *(npy_intp *)(sort + (base + half) * sort_str); + if (mid_idx < 0 || mid_idx >= arr_len) { return -1; } + const T mid_val = *(const T *)(arr + mid_idx * arr_str); + const T key_val = *(const T *)(key + i * key_str); + base += cmp(mid_val, key_val) * half; + } + } - mid_val = *(const T *)(arr + sort_idx * arr_str); - - if (cmp(mid_val, key_val)) { - min_idx = mid_idx + 1; - } - else { - max_idx = mid_idx; - } + for (npy_intp i = 0; i < key_len; ++i) { + npy_intp &base = *(npy_intp *)(ret + i * ret_str); + npy_intp mid_idx = *(npy_intp *)(sort + base * sort_str); + if (mid_idx < 0 || mid_idx >= arr_len) { + return -1; } - *(npy_intp *)ret = min_idx; + const T key_val = *(const T *)(key + i * key_str); + base += cmp(*(const T *)(arr + mid_idx * arr_str), key_val); } return 0; } diff --git a/numpy/_core/src/npysort/highway_qsort.dispatch.cpp b/numpy/_core/src/npysort/highway_qsort.dispatch.cpp index 38adfc6de894..2893e817af08 100644 --- a/numpy/_core/src/npysort/highway_qsort.dispatch.cpp +++ b/numpy/_core/src/npysort/highway_qsort.dispatch.cpp @@ -1,20 +1,27 @@ -#include "highway_qsort.hpp" #define VQSORT_ONLY_STATIC 1 +#include "hwy/highway.h" #include "hwy/contrib/sort/vqsort-inl.h" -#define DISPATCH_VQSORT(TYPE) \ -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(TYPE *arr, intptr_t size) \ -{ \ - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); \ -} \ +#include "highway_qsort.hpp" +#include "quicksort.hpp" + +namespace np::highway::qsort_simd { +template +void NPY_CPU_DISPATCH_CURFX(QSort)(T *arr, npy_intp size) +{ +#if VQSORT_ENABLED + hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); +#else + sort::Quick(arr, size); +#endif +} -namespace np { namespace highway { namespace qsort_simd { +template void NPY_CPU_DISPATCH_CURFX(QSort)(int32_t*, npy_intp); +template void NPY_CPU_DISPATCH_CURFX(QSort)(uint32_t*, npy_intp); +template void NPY_CPU_DISPATCH_CURFX(QSort)(int64_t*, npy_intp); +template void NPY_CPU_DISPATCH_CURFX(QSort)(uint64_t*, npy_intp); +template void NPY_CPU_DISPATCH_CURFX(QSort)(float*, npy_intp); +template void NPY_CPU_DISPATCH_CURFX(QSort)(double*, npy_intp); - DISPATCH_VQSORT(int32_t) - DISPATCH_VQSORT(uint32_t) - DISPATCH_VQSORT(int64_t) - DISPATCH_VQSORT(uint64_t) - DISPATCH_VQSORT(double) - DISPATCH_VQSORT(float) +} // np::highway::qsort_simd -} } } // np::highway::qsort_simd diff --git a/numpy/_core/src/npysort/highway_qsort.hpp b/numpy/_core/src/npysort/highway_qsort.hpp index e08fb3629ec8..371f2c2fbe7d 100644 --- a/numpy/_core/src/npysort/highway_qsort.hpp +++ b/numpy/_core/src/npysort/highway_qsort.hpp @@ -3,21 +3,14 @@ #include "common.hpp" -namespace np { namespace highway { namespace qsort_simd { +namespace np::highway::qsort_simd { -#ifndef NPY_DISABLE_OPTIMIZATION - #include "highway_qsort.dispatch.h" -#endif +#include "highway_qsort.dispatch.h" NPY_CPU_DISPATCH_DECLARE(template void QSort, (T *arr, npy_intp size)) -NPY_CPU_DISPATCH_DECLARE(template void QSelect, (T* arr, npy_intp num, npy_intp kth)) - -#ifndef NPY_DISABLE_OPTIMIZATION - #include "highway_qsort_16bit.dispatch.h" -#endif +#include "highway_qsort_16bit.dispatch.h" NPY_CPU_DISPATCH_DECLARE(template void QSort, (T *arr, npy_intp size)) -NPY_CPU_DISPATCH_DECLARE(template void QSelect, (T* arr, npy_intp num, npy_intp kth)) -} } } // np::highway::qsort_simd +} // np::highway::qsort_simd #endif // NUMPY_SRC_COMMON_NPYSORT_HWY_SIMD_QSORT_HPP diff --git a/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp b/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp index 35b6cc58c7e8..a7466709654d 100644 --- a/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp +++ b/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp @@ -1,26 +1,33 @@ -#include "highway_qsort.hpp" #define VQSORT_ONLY_STATIC 1 +#include "hwy/highway.h" #include "hwy/contrib/sort/vqsort-inl.h" +#include "highway_qsort.hpp" #include "quicksort.hpp" -namespace np { namespace highway { namespace qsort_simd { - -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(Half *arr, intptr_t size) +namespace np::highway::qsort_simd { +template +void NPY_CPU_DISPATCH_CURFX(QSort)(T *arr, npy_intp size) { -#if HWY_HAVE_FLOAT16 - hwy::HWY_NAMESPACE::VQSortStatic(reinterpret_cast(arr), size, hwy::SortAscending()); +#if VQSORT_ENABLED + using THwy = std::conditional_t, hwy::float16_t, T>; + hwy::HWY_NAMESPACE::VQSortStatic(reinterpret_cast(arr), size, hwy::SortAscending()); #else sort::Quick(arr, size); #endif } -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(uint16_t *arr, intptr_t size) -{ - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(int16_t *arr, intptr_t size) +#if !HWY_HAVE_FLOAT16 +template <> +void NPY_CPU_DISPATCH_CURFX(QSort)(Half *arr, npy_intp size) { - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); + sort::Quick(arr, size); } +#endif // !HWY_HAVE_FLOAT16 + +template void NPY_CPU_DISPATCH_CURFX(QSort)(int16_t*, npy_intp); +template void NPY_CPU_DISPATCH_CURFX(QSort)(uint16_t*, npy_intp); +#if HWY_HAVE_FLOAT16 +template void NPY_CPU_DISPATCH_CURFX(QSort)(Half*, npy_intp); +#endif -} } } // np::highway::qsort_simd +} // np::highway::qsort_simd diff --git a/numpy/_core/src/npysort/mergesort.cpp b/numpy/_core/src/npysort/mergesort.cpp index 2fac0ccfafcd..1cfe04b1d266 100644 --- a/numpy/_core/src/npysort/mergesort.cpp +++ b/numpy/_core/src/npysort/mergesort.cpp @@ -337,7 +337,7 @@ string_amergesort_(type *v, npy_intp *tosort, npy_intp num, void *varr) static void npy_mergesort0(char *pl, char *pr, char *pw, char *vp, npy_intp elsize, - PyArray_CompareFunc *cmp, PyArrayObject *arr) + PyArray_CompareFunc *cmp, void *arr) { char *pi, *pj, *pk, *pm; @@ -383,9 +383,19 @@ npy_mergesort0(char *pl, char *pr, char *pw, char *vp, npy_intp elsize, NPY_NO_EXPORT int npy_mergesort(void *start, npy_intp num, void *varr) { - PyArrayObject *arr = (PyArrayObject *)varr; - npy_intp elsize = PyArray_ITEMSIZE(arr); - PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; + void *arr = varr; + npy_intp elsize; + PyArray_CompareFunc *cmp; + get_sort_data_from_array(arr, &elsize, &cmp); + + return npy_mergesort_impl(start, num, varr, elsize, cmp); +} + +NPY_NO_EXPORT int +npy_mergesort_impl(void *start, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp) +{ + void *arr = varr; char *pl = (char *)start; char *pr = pl + num * elsize; char *pw; @@ -413,7 +423,7 @@ npy_mergesort(void *start, npy_intp num, void *varr) static void npy_amergesort0(npy_intp *pl, npy_intp *pr, char *v, npy_intp *pw, - npy_intp elsize, PyArray_CompareFunc *cmp, PyArrayObject *arr) + npy_intp elsize, PyArray_CompareFunc *cmp, void *arr) { char *vp; npy_intp vi, *pi, *pj, *pk, *pm; @@ -459,9 +469,19 @@ npy_amergesort0(npy_intp *pl, npy_intp *pr, char *v, npy_intp *pw, NPY_NO_EXPORT int npy_amergesort(void *v, npy_intp *tosort, npy_intp num, void *varr) { - PyArrayObject *arr = (PyArrayObject *)varr; - npy_intp elsize = PyArray_ITEMSIZE(arr); - PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; + void *arr = varr; + npy_intp elsize; + PyArray_CompareFunc *cmp; + get_sort_data_from_array(arr, &elsize, &cmp); + + return npy_amergesort_impl(v, tosort, num, varr, elsize, cmp); +} + +NPY_NO_EXPORT int +npy_amergesort_impl(void *v, npy_intp *tosort, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp) +{ + void *arr = varr; npy_intp *pl, *pr, *pw; /* Items that have zero size don't make sense to sort */ diff --git a/numpy/_core/src/npysort/npysort_common.h b/numpy/_core/src/npysort/npysort_common.h index 0680ae52afe3..f2b99e3b7f66 100644 --- a/numpy/_core/src/npysort/npysort_common.h +++ b/numpy/_core/src/npysort/npysort_common.h @@ -1,8 +1,8 @@ #ifndef __NPY_SORT_COMMON_H__ #define __NPY_SORT_COMMON_H__ -#include #include +#include #include #include "dtypemeta.h" @@ -40,6 +40,20 @@ extern "C" { /* Need this for the argsort functions */ #define INTP_SWAP(a,b) {npy_intp tmp = (b); (b)=(a); (a) = tmp;} +/* + ****************************************************************************** + ** SORTING WRAPPERS ** + ****************************************************************************** + */ + +static inline void +get_sort_data_from_array(void *varr, npy_intp *elsize, PyArray_CompareFunc **cmp) +{ + PyArrayObject *arr = (PyArrayObject *)varr; + *elsize = PyArray_ITEMSIZE(arr); + *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; +} + /* ***************************************************************************** ** COMPARISON FUNCTIONS ** diff --git a/numpy/_core/src/npysort/quicksort.cpp b/numpy/_core/src/npysort/quicksort.cpp index 15e5668f599d..3371c02aef49 100644 --- a/numpy/_core/src/npysort/quicksort.cpp +++ b/numpy/_core/src/npysort/quicksort.cpp @@ -79,27 +79,23 @@ inline bool quicksort_dispatch(T *start, npy_intp num) #if !defined(__CYGWIN__) using TF = typename np::meta::FixedWidth::Type; void (*dispfunc)(TF*, intptr_t) = nullptr; - if (sizeof(T) == sizeof(uint16_t)) { - #ifndef NPY_DISABLE_OPTIMIZATION - #if defined(NPY_CPU_AMD64) || defined(NPY_CPU_X86) // x86 32-bit and 64-bit - #include "x86_simd_qsort_16bit.dispatch.h" - NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::qsort_simd::template QSort, ); - #else - #include "highway_qsort_16bit.dispatch.h" - NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::highway::qsort_simd::template QSort, ); - #endif - #endif - } - else if (sizeof(T) == sizeof(uint32_t) || sizeof(T) == sizeof(uint64_t)) { - #ifndef NPY_DISABLE_OPTIMIZATION - #if defined(NPY_CPU_AMD64) || defined(NPY_CPU_X86) // x86 32-bit and 64-bit - #include "x86_simd_qsort.dispatch.h" - NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::qsort_simd::template QSort, ); - #else - #include "highway_qsort.dispatch.h" - NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::highway::qsort_simd::template QSort, ); - #endif - #endif + if constexpr (sizeof(T) == sizeof(uint16_t)) { + #if defined(NPY_CPU_AMD64) || defined(NPY_CPU_X86) // x86 32-bit and 64-bit + #include "x86_simd_qsort_16bit.dispatch.h" + NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::qsort_simd::template QSort, ); + #else + #include "highway_qsort_16bit.dispatch.h" + NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::highway::qsort_simd::template QSort, ); + #endif + } + else if constexpr (sizeof(T) == sizeof(uint32_t) || sizeof(T) == sizeof(uint64_t)) { + #if defined(NPY_CPU_AMD64) || defined(NPY_CPU_X86) // x86 32-bit and 64-bit + #include "x86_simd_qsort.dispatch.h" + NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::qsort_simd::template QSort, ); + #else + #include "highway_qsort.dispatch.h" + NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::highway::qsort_simd::template QSort, ); + #endif } if (dispfunc) { (*dispfunc)(reinterpret_cast(start), static_cast(num)); @@ -116,9 +112,7 @@ inline bool aquicksort_dispatch(T *start, npy_intp* arg, npy_intp num) #if !defined(__CYGWIN__) using TF = typename np::meta::FixedWidth::Type; void (*dispfunc)(TF*, npy_intp*, npy_intp) = nullptr; - #ifndef NPY_DISABLE_OPTIMIZATION - #include "x86_simd_argsort.dispatch.h" - #endif + #include "x86_simd_argsort.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::qsort_simd::template ArgQSort, ); if (dispfunc) { (*dispfunc)(reinterpret_cast(start), arg, num); @@ -514,9 +508,18 @@ string_aquicksort_(type *vv, npy_intp *tosort, npy_intp num, void *varr) NPY_NO_EXPORT int npy_quicksort(void *start, npy_intp num, void *varr) { - PyArrayObject *arr = (PyArrayObject *)varr; - npy_intp elsize = PyArray_ITEMSIZE(arr); - PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; + npy_intp elsize; + PyArray_CompareFunc *cmp; + get_sort_data_from_array(varr, &elsize, &cmp); + + return npy_quicksort_impl(start, num, varr, elsize, cmp); +} + +NPY_NO_EXPORT int +npy_quicksort_impl(void *start, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp) +{ + void *arr = varr; char *vp; char *pl = (char *)start; char *pr = pl + (num - 1) * elsize; @@ -618,10 +621,19 @@ npy_quicksort(void *start, npy_intp num, void *varr) NPY_NO_EXPORT int npy_aquicksort(void *vv, npy_intp *tosort, npy_intp num, void *varr) { + npy_intp elsize; + PyArray_CompareFunc *cmp; + get_sort_data_from_array(varr, &elsize, &cmp); + + return npy_aquicksort_impl(vv, tosort, num, varr, elsize, cmp); +} + +NPY_NO_EXPORT int +npy_aquicksort_impl(void *vv, npy_intp *tosort, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp) +{ + void *arr = varr; char *v = (char *)vv; - PyArrayObject *arr = (PyArrayObject *)varr; - npy_intp elsize = PyArray_ITEMSIZE(arr); - PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; char *vp; npy_intp *pl = tosort; npy_intp *pr = tosort + num - 1; diff --git a/numpy/_core/src/npysort/selection.cpp b/numpy/_core/src/npysort/selection.cpp index 225e932ac122..1a479178c9b5 100644 --- a/numpy/_core/src/npysort/selection.cpp +++ b/numpy/_core/src/npysort/selection.cpp @@ -44,15 +44,11 @@ inline bool quickselect_dispatch(T* v, npy_intp num, npy_intp kth) using TF = typename np::meta::FixedWidth::Type; void (*dispfunc)(TF*, npy_intp, npy_intp) = nullptr; if constexpr (sizeof(T) == sizeof(uint16_t)) { - #ifndef NPY_DISABLE_OPTIMIZATION - #include "x86_simd_qsort_16bit.dispatch.h" - #endif + #include "x86_simd_qsort_16bit.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::qsort_simd::template QSelect, ); } else if constexpr (sizeof(T) == sizeof(uint32_t) || sizeof(T) == sizeof(uint64_t)) { - #ifndef NPY_DISABLE_OPTIMIZATION - #include "x86_simd_qsort.dispatch.h" - #endif + #include "x86_simd_qsort.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::qsort_simd::template QSelect, ); } if (dispfunc) { @@ -76,9 +72,7 @@ inline bool argquickselect_dispatch(T* v, npy_intp* arg, npy_intp num, npy_intp (std::is_integral_v || std::is_floating_point_v) && (sizeof(T) == sizeof(uint32_t) || sizeof(T) == sizeof(uint64_t))) { using TF = typename np::meta::FixedWidth::Type; - #ifndef NPY_DISABLE_OPTIMIZATION - #include "x86_simd_argsort.dispatch.h" - #endif + #include "x86_simd_argsort.dispatch.h" void (*dispfunc)(TF*, npy_intp*, npy_intp, npy_intp) = nullptr; NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::qsort_simd::template ArgQSelect, ); if (dispfunc) { @@ -258,7 +252,7 @@ unguarded_partition_(type *v, npy_intp *tosort, const type pivot, npy_intp *ll, /* * select median of median of blocks of 5 * if used as partition pivot it splits the range into at least 30%/70% - * allowing linear time worstcase quickselect + * allowing linear time worst-case quickselect */ template static npy_intp diff --git a/numpy/_core/src/npysort/timsort.cpp b/numpy/_core/src/npysort/timsort.cpp index 0f0f5721e7cf..0dfb4d32f64a 100644 --- a/numpy/_core/src/npysort/timsort.cpp +++ b/numpy/_core/src/npysort/timsort.cpp @@ -39,8 +39,9 @@ #include #include -/* enough for 32 * 1.618 ** 128 elements */ -#define TIMSORT_STACK_SIZE 128 +/* enough for 32 * 1.618 ** 128 elements. + If powersort was used in all cases, 90 would suffice, as 32 * 2 ** 90 >= 32 * 1.618 ** 128 */ +#define RUN_STACK_SIZE 128 static npy_intp compute_min_run(npy_intp num) @@ -58,6 +59,7 @@ compute_min_run(npy_intp num) typedef struct { npy_intp s; /* start pointer */ npy_intp l; /* length */ + int power; /* node "level" for powersort merge strategy */ } run; /* buffer for argsort. Declared here to avoid multiple declarations. */ @@ -383,60 +385,51 @@ merge_at_(type *arr, const run *stack, const npy_intp at, buffer_ *buffer) return 0; } -template +/* See https://github.com/python/cpython/blob/ea23c897cd25702e72a04e06664f6864f07a7c5d/Objects/listsort.txt +* for a detailed explanation. +* In CPython, *num* is called *n*, but we changed it for consistency with the NumPy implementation. +*/ static int -try_collapse_(type *arr, run *stack, npy_intp *stack_ptr, buffer_ *buffer) +powerloop(npy_intp s1, npy_intp n1, npy_intp n2, npy_intp num) { - int ret; - npy_intp A, B, C, top; - top = *stack_ptr; - - while (1 < top) { - B = stack[top - 2].l; - C = stack[top - 1].l; - - if ((2 < top && stack[top - 3].l <= B + C) || - (3 < top && stack[top - 4].l <= stack[top - 3].l + B)) { - A = stack[top - 3].l; - - if (A <= C) { - ret = merge_at_(arr, stack, top - 3, buffer); - - if (NPY_UNLIKELY(ret < 0)) { - return ret; - } - - stack[top - 3].l += B; - stack[top - 2] = stack[top - 1]; - --top; - } - else { - ret = merge_at_(arr, stack, top - 2, buffer); - - if (NPY_UNLIKELY(ret < 0)) { - return ret; - } - - stack[top - 2].l += C; - --top; - } + int result = 0; + npy_intp a = 2 * s1 + n1; /* 2*a */ + npy_intp b = a + n1 + n2; /* 2*b */ + for (;;) { + ++result; + if (a >= num) { /* both quotient bits are 1 */ + a -= num; + b -= num; } - else if (1 < top && B <= C) { - ret = merge_at_(arr, stack, top - 2, buffer); + else if (b >= num) { /* a/num bit is 0, b/num bit is 1 */ + break; + } + a <<= 1; + b <<= 1; + } + return result; +} +template +static int +found_new_run_(type *arr, run *stack, npy_intp *stack_ptr, npy_intp n2, + npy_intp num, buffer_ *buffer) +{ + int ret; + if (*stack_ptr > 0) { + npy_intp s1 = stack[*stack_ptr - 1].s; + npy_intp n1 = stack[*stack_ptr - 1].l; + int power = powerloop(s1, n1, n2, num); + while (*stack_ptr > 1 && stack[*stack_ptr - 2].power > power) { + ret = merge_at_(arr, stack, *stack_ptr - 2, buffer); if (NPY_UNLIKELY(ret < 0)) { return ret; } - - stack[top - 2].l += C; - --top; - } - else { - break; + stack[*stack_ptr - 2].l += stack[*stack_ptr - 1].l; + --(*stack_ptr); } + stack[*stack_ptr - 1].power = power; } - - *stack_ptr = top; return 0; } @@ -491,7 +484,7 @@ timsort_(void *start, npy_intp num) int ret; npy_intp l, n, stack_ptr, minrun; buffer_ buffer; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; buffer.pw = NULL; buffer.size = 0; stack_ptr = 0; @@ -499,15 +492,14 @@ timsort_(void *start, npy_intp num) for (l = 0; l < num;) { n = count_run_((type *)start, l, num, minrun); + ret = found_new_run_((type *)start, stack, &stack_ptr, n, num, &buffer); + if (NPY_UNLIKELY(ret < 0)) + goto cleanup; + + // Push the new run onto the stack. stack[stack_ptr].s = l; stack[stack_ptr].l = n; ++stack_ptr; - ret = try_collapse_((type *)start, stack, &stack_ptr, &buffer); - - if (NPY_UNLIKELY(ret < 0)) { - goto cleanup; - } - l += n; } @@ -790,59 +782,24 @@ amerge_at_(type *arr, npy_intp *tosort, const run *stack, const npy_intp at, template static int -atry_collapse_(type *arr, npy_intp *tosort, run *stack, npy_intp *stack_ptr, - buffer_intp *buffer) +afound_new_run_(type *arr, npy_intp *tosort, run *stack, npy_intp *stack_ptr, npy_intp n2, + npy_intp num, buffer_intp *buffer) { int ret; - npy_intp A, B, C, top; - top = *stack_ptr; - - while (1 < top) { - B = stack[top - 2].l; - C = stack[top - 1].l; - - if ((2 < top && stack[top - 3].l <= B + C) || - (3 < top && stack[top - 4].l <= stack[top - 3].l + B)) { - A = stack[top - 3].l; - - if (A <= C) { - ret = amerge_at_(arr, tosort, stack, top - 3, buffer); - - if (NPY_UNLIKELY(ret < 0)) { - return ret; - } - - stack[top - 3].l += B; - stack[top - 2] = stack[top - 1]; - --top; - } - else { - ret = amerge_at_(arr, tosort, stack, top - 2, buffer); - - if (NPY_UNLIKELY(ret < 0)) { - return ret; - } - - stack[top - 2].l += C; - --top; - } - } - else if (1 < top && B <= C) { - ret = amerge_at_(arr, tosort, stack, top - 2, buffer); - + if (*stack_ptr > 0) { + npy_intp s1 = stack[*stack_ptr - 1].s; + npy_intp n1 = stack[*stack_ptr - 1].l; + int power = powerloop(s1, n1, n2, num); + while (*stack_ptr > 1 && stack[*stack_ptr - 2].power > power) { + ret = amerge_at_(arr, tosort, stack, *stack_ptr - 2, buffer); if (NPY_UNLIKELY(ret < 0)) { return ret; } - - stack[top - 2].l += C; - --top; - } - else { - break; + stack[*stack_ptr - 2].l += stack[*stack_ptr - 1].l; + --(*stack_ptr); } + stack[*stack_ptr - 1].power = power; } - - *stack_ptr = top; return 0; } @@ -897,7 +854,7 @@ atimsort_(void *v, npy_intp *tosort, npy_intp num) int ret; npy_intp l, n, stack_ptr, minrun; buffer_intp buffer; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; buffer.pw = NULL; buffer.size = 0; stack_ptr = 0; @@ -905,16 +862,13 @@ atimsort_(void *v, npy_intp *tosort, npy_intp num) for (l = 0; l < num;) { n = acount_run_((type *)v, tosort, l, num, minrun); - stack[stack_ptr].s = l; - stack[stack_ptr].l = n; - ++stack_ptr; - ret = atry_collapse_((type *)v, tosort, stack, &stack_ptr, - &buffer); - + ret = afound_new_run_((type*)v, tosort, stack, &stack_ptr, n, num, &buffer); if (NPY_UNLIKELY(ret < 0)) { goto cleanup; } - + stack[stack_ptr].s = l; + stack[stack_ptr].l = n; + ++stack_ptr; l += n; } @@ -1371,7 +1325,7 @@ string_timsort_(void *start, npy_intp num, void *varr) size_t len = elsize / sizeof(type); int ret; npy_intp l, n, stack_ptr, minrun; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; string_buffer_ buffer; /* Items that have zero size don't make sense to sort */ @@ -1800,7 +1754,7 @@ string_atimsort_(void *start, npy_intp *tosort, npy_intp num, void *varr) size_t len = elsize / sizeof(type); int ret; npy_intp l, n, stack_ptr, minrun; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; buffer_intp buffer; /* Items that have zero size don't make sense to sort */ @@ -2253,7 +2207,7 @@ npy_timsort(void *start, npy_intp num, void *varr) PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; int ret; npy_intp l, n, stack_ptr, minrun; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; buffer_char buffer; /* Items that have zero size don't make sense to sort */ @@ -2689,7 +2643,7 @@ npy_atimsort(void *start, npy_intp *tosort, npy_intp num, void *varr) PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; int ret; npy_intp l, n, stack_ptr, minrun; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; buffer_intp buffer; /* Items that have zero size don't make sense to sort */ diff --git a/numpy/_core/src/npysort/x86-simd-sort b/numpy/_core/src/npysort/x86-simd-sort index aad3db19def3..5adb33411f3c 160000 --- a/numpy/_core/src/npysort/x86-simd-sort +++ b/numpy/_core/src/npysort/x86-simd-sort @@ -1 +1 @@ -Subproject commit aad3db19def3273843d4390808d63c2b6ebd1dbf +Subproject commit 5adb33411f3cea8bdbafa9d91bd75bc4bf19c7dd diff --git a/numpy/_core/src/npysort/x86_simd_qsort.hpp b/numpy/_core/src/npysort/x86_simd_qsort.hpp index 79ee48c91a55..e12385689deb 100644 --- a/numpy/_core/src/npysort/x86_simd_qsort.hpp +++ b/numpy/_core/src/npysort/x86_simd_qsort.hpp @@ -5,21 +5,15 @@ namespace np { namespace qsort_simd { -#ifndef NPY_DISABLE_OPTIMIZATION - #include "x86_simd_qsort.dispatch.h" -#endif +#include "x86_simd_qsort.dispatch.h" NPY_CPU_DISPATCH_DECLARE(template void QSort, (T *arr, npy_intp size)) NPY_CPU_DISPATCH_DECLARE(template void QSelect, (T* arr, npy_intp num, npy_intp kth)) -#ifndef NPY_DISABLE_OPTIMIZATION - #include "x86_simd_argsort.dispatch.h" -#endif +#include "x86_simd_argsort.dispatch.h" NPY_CPU_DISPATCH_DECLARE(template void ArgQSort, (T *arr, npy_intp* arg, npy_intp size)) NPY_CPU_DISPATCH_DECLARE(template void ArgQSelect, (T *arr, npy_intp* arg, npy_intp kth, npy_intp size)) -#ifndef NPY_DISABLE_OPTIMIZATION - #include "x86_simd_qsort_16bit.dispatch.h" -#endif +#include "x86_simd_qsort_16bit.dispatch.h" NPY_CPU_DISPATCH_DECLARE(template void QSort, (T *arr, npy_intp size)) NPY_CPU_DISPATCH_DECLARE(template void QSelect, (T* arr, npy_intp num, npy_intp kth)) diff --git a/numpy/_core/src/umath/_operand_flag_tests.c b/numpy/_core/src/umath/_operand_flag_tests.c index 11b74af72d28..c97668c4b118 100644 --- a/numpy/_core/src/umath/_operand_flag_tests.c +++ b/numpy/_core/src/umath/_operand_flag_tests.c @@ -57,14 +57,14 @@ PyMODINIT_FUNC PyInit__operand_flag_tests(void) PyObject *m = NULL; PyObject *ufunc; + import_array(); + import_umath(); + m = PyModule_Create(&moduledef); if (m == NULL) { goto fail; } - import_array(); - import_umath(); - ufunc = PyUFunc_FromFuncAndData(funcs, data, types, 1, 2, 0, PyUFunc_None, "inplace_add", "inplace_add_docstring", 0); @@ -77,6 +77,11 @@ PyMODINIT_FUNC PyInit__operand_flag_tests(void) ((PyUFuncObject*)ufunc)->iter_flags = NPY_ITER_REDUCE_OK; PyModule_AddObject(m, "inplace_add", (PyObject*)ufunc); +#ifdef Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; fail: diff --git a/numpy/_core/src/umath/_rational_tests.c b/numpy/_core/src/umath/_rational_tests.c index aa4250e4efc8..c00c8468b651 100644 --- a/numpy/_core/src/umath/_rational_tests.c +++ b/numpy/_core/src/umath/_rational_tests.c @@ -1097,7 +1097,7 @@ rational_ufunc_test_add_rationals(char** args, npy_intp const *dimensions, } -PyMethodDef module_methods[] = { +static PyMethodDef module_methods[] = { {0} /* sentinel */ }; @@ -1355,6 +1355,11 @@ PyMODINIT_FUNC PyInit__rational_tests(void) { GCD_LCM_UFUNC(gcd,NPY_INT64,"greatest common denominator of two integers"); GCD_LCM_UFUNC(lcm,NPY_INT64,"least common multiple of two integers"); +#ifdef Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; fail: diff --git a/numpy/_core/src/umath/_scaled_float_dtype.c b/numpy/_core/src/umath/_scaled_float_dtype.c index 02278806751f..9bf318d97b10 100644 --- a/numpy/_core/src/umath/_scaled_float_dtype.c +++ b/numpy/_core/src/umath/_scaled_float_dtype.c @@ -21,10 +21,12 @@ #include "array_method.h" #include "common.h" #include "numpy/npy_math.h" +#include "npy_sort.h" #include "convert_datatype.h" #include "dtypemeta.h" #include "dispatching.h" #include "gil_utils.h" +#include "multiarraymodule.h" typedef struct { PyArray_Descr base; @@ -106,7 +108,7 @@ sfloat_getitem(char *data, PyArrayObject *arr) static int -sfloat_setitem(PyObject *obj, char *data, PyArrayObject *arr) +sfloat_setitem(PyArray_Descr *descr_, PyObject *obj, char *data) { if (!PyFloat_CheckExact(obj)) { PyErr_SetString(PyExc_NotImplementedError, @@ -114,7 +116,7 @@ sfloat_setitem(PyObject *obj, char *data, PyArrayObject *arr) return -1; } - PyArray_SFloatDescr *descr = (PyArray_SFloatDescr *)PyArray_DESCR(arr); + PyArray_SFloatDescr *descr = (PyArray_SFloatDescr *)descr_; double value = PyFloat_AsDouble(obj); value /= descr->scaling; @@ -130,9 +132,10 @@ NPY_DType_Slots sfloat_slots = { .default_descr = &sfloat_default_descr, .common_dtype = &sfloat_common_dtype, .common_instance = &sfloat_common_instance, + .setitem = &sfloat_setitem, .f = { .getitem = (PyArray_GetItemFunc *)&sfloat_getitem, - .setitem = (PyArray_SetItemFunc *)&sfloat_setitem, + .setitem = NULL, } }; @@ -774,65 +777,272 @@ promote_to_sfloat(PyUFuncObject *NPY_UNUSED(ufunc), } +NPY_NO_EXPORT int +sfloat_stable_sort_loop( + PyArrayMethod_Context *context, + char *const *data, + const npy_intp *dimensions, + const npy_intp *strides, + NpyAuxData *NPY_UNUSED(auxdata)) +{ + assert(data[0] == data[1]); + assert(strides[0] == sizeof(npy_float64) && strides[1] == sizeof(npy_float64)); + assert(((PyArrayMethod_SortParameters *)context->parameters)->flags == NPY_SORT_STABLE); + + npy_intp N = dimensions[0]; + char *in = data[0]; + + return timsort_double(in, N, NULL); +} + + +NPY_NO_EXPORT int +sfloat_default_sort_loop( + PyArrayMethod_Context *context, + char *const *data, + const npy_intp *dimensions, + const npy_intp *strides, + NpyAuxData *NPY_UNUSED(auxdata)) +{ + assert(data[0] == data[1]); + assert(strides[0] == sizeof(npy_float64) && strides[1] == sizeof(npy_float64)); + assert(((PyArrayMethod_SortParameters *)context->parameters)->flags == NPY_SORT_DEFAULT); + + npy_intp N = dimensions[0]; + char *in = data[0]; + + return quicksort_double(in, N, NULL); +} + + +NPY_NO_EXPORT int +sfloat_sort_get_loop( + PyArrayMethod_Context *context, + int aligned, int move_references, + const npy_intp *strides, + PyArrayMethod_StridedLoop **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; + + if (PyDataType_FLAGCHK(context->descriptors[0], NPY_NEEDS_PYAPI)) { + *flags |= NPY_METH_REQUIRES_PYAPI; + } + + if (parameters->flags == NPY_SORT_STABLE) { + *out_loop = (PyArrayMethod_StridedLoop *)sfloat_stable_sort_loop; + } + else if (parameters->flags == NPY_SORT_DEFAULT) { + *out_loop = (PyArrayMethod_StridedLoop *)sfloat_default_sort_loop; + } + else { + PyErr_SetString(PyExc_RuntimeError, "unsupported sort kind"); + return -1; + } + return 0; +} + + +static NPY_CASTING +sfloat_sort_resolve_descriptors( + PyArrayMethodObject *NPY_UNUSED(self), + PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), + PyArray_Descr *given_descrs[2], + PyArray_Descr *loop_descrs[2], + npy_intp *view_offset) +{ + assert(!(given_descrs[1] != given_descrs[0] && given_descrs[1] != NULL)); + assert(PyArray_IsNativeByteOrder(given_descrs[0]->byteorder)); + + loop_descrs[0] = given_descrs[0]; + Py_INCREF(loop_descrs[0]); + loop_descrs[1] = loop_descrs[0]; + Py_INCREF(loop_descrs[1]); + + return NPY_NO_CASTING; +} + + +NPY_NO_EXPORT int +sfloat_stable_argsort_loop( + PyArrayMethod_Context *context, + char *const *data, + const npy_intp *dimensions, + const npy_intp *strides, + NpyAuxData *NPY_UNUSED(auxdata)) +{ + assert(((PyArrayMethod_SortParameters *)context->parameters)->flags == NPY_SORT_STABLE); + assert(strides[0] == sizeof(npy_float64)); + assert(strides[1] == sizeof(npy_intp)); + + npy_intp N = dimensions[0]; + char *in = data[0]; + npy_intp *out = (npy_intp *)data[1]; + + return atimsort_double(in, out, N, NULL); +} + + +NPY_NO_EXPORT int +sfloat_default_argsort_loop( + PyArrayMethod_Context *context, + char *const *data, + const npy_intp *dimensions, + const npy_intp *strides, + NpyAuxData *NPY_UNUSED(auxdata)) +{ + assert(((PyArrayMethod_SortParameters *)context->parameters)->flags == NPY_SORT_DEFAULT); + assert(strides[0] == sizeof(npy_float64)); + assert(strides[1] == sizeof(npy_intp)); + + npy_intp N = dimensions[0]; + char *in = data[0]; + npy_intp *out = (npy_intp *)data[1]; + + return aquicksort_double(in, out, N, NULL); +} + + +NPY_NO_EXPORT int +sfloat_argsort_get_loop( + PyArrayMethod_Context *context, + int aligned, int move_references, + const npy_intp *strides, + PyArrayMethod_StridedLoop **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; + + if (PyDataType_FLAGCHK(context->descriptors[0], NPY_NEEDS_PYAPI)) { + *flags |= NPY_METH_REQUIRES_PYAPI; + } + + if (parameters->flags == NPY_SORT_STABLE) { + *out_loop = (PyArrayMethod_StridedLoop *)sfloat_stable_argsort_loop; + } + else if (parameters->flags == NPY_SORT_DEFAULT) { + *out_loop = (PyArrayMethod_StridedLoop *)sfloat_default_argsort_loop; + } + else { + PyErr_SetString(PyExc_RuntimeError, "unsupported sort kind"); + return -1; + } + return 0; +} + + +NPY_NO_EXPORT NPY_CASTING +sfloat_argsort_resolve_descriptors( + PyArrayMethodObject *NPY_UNUSED(self), + PyArray_DTypeMeta *dtypes[2], + PyArray_Descr *given_descrs[2], + PyArray_Descr *loop_descrs[2], + npy_intp *view_offset) +{ + assert(given_descrs[1] == NULL || given_descrs[1]->type_num == NPY_INTP); + assert(PyArray_IsNativeByteOrder(given_descrs[0]->byteorder)); + + loop_descrs[0] = given_descrs[0]; + Py_INCREF(loop_descrs[0]); + loop_descrs[1] = PyArray_DescrFromType(NPY_INTP); + if (loop_descrs[1] == NULL) { + return -1; + } + return NPY_NO_CASTING; +} + + /* * Add new ufunc loops (this is somewhat clumsy as of writing it, but should * get less so with the introduction of public API). */ static int sfloat_init_ufuncs(void) { - PyArray_DTypeMeta *dtypes[3] = { + PyArray_DTypeMeta *all_sfloat_dtypes[3] = { &PyArray_SFloatDType, &PyArray_SFloatDType, &PyArray_SFloatDType}; - PyType_Slot slots[3] = {{0, NULL}}; - PyArrayMethod_Spec spec = { + PyType_Slot multiply_slots[3] = { + {NPY_METH_resolve_descriptors, &multiply_sfloats_resolve_descriptors}, + {NPY_METH_strided_loop, &multiply_sfloats}, + {0, NULL} + }; + PyArrayMethod_Spec multiply_spec = { .nin = 2, - .nout =1, - .dtypes = dtypes, - .slots = slots, + .nout = 1, + .dtypes = all_sfloat_dtypes, + .slots = multiply_slots, + .name = "sfloat_multiply", + .casting = NPY_NO_CASTING, }; - spec.name = "sfloat_multiply"; - spec.casting = NPY_NO_CASTING; - - slots[0].slot = NPY_METH_resolve_descriptors; - slots[0].pfunc = &multiply_sfloats_resolve_descriptors; - slots[1].slot = NPY_METH_strided_loop; - slots[1].pfunc = &multiply_sfloats; - PyBoundArrayMethodObject *bmeth = PyArrayMethod_FromSpec_int(&spec, 0); - if (bmeth == NULL) { - return -1; - } - int res = sfloat_add_loop("multiply", - bmeth->dtypes, (PyObject *)bmeth->method); - Py_DECREF(bmeth); - if (res < 0) { - return -1; - } - spec.name = "sfloat_add"; - spec.casting = NPY_SAME_KIND_CASTING; + PyType_Slot add_slots[3] = { + {NPY_METH_resolve_descriptors, &add_sfloats_resolve_descriptors}, + {NPY_METH_strided_loop, &add_sfloats}, + {0, NULL} + }; + PyArrayMethod_Spec add_spec = { + .nin = 2, + .nout = 1, + .dtypes = all_sfloat_dtypes, + .slots = add_slots, + .name = "sfloat_add", + .casting = NPY_SAME_KIND_CASTING, + }; - slots[0].slot = NPY_METH_resolve_descriptors; - slots[0].pfunc = &add_sfloats_resolve_descriptors; - slots[1].slot = NPY_METH_strided_loop; - slots[1].pfunc = &add_sfloats; - bmeth = PyArrayMethod_FromSpec_int(&spec, 0); - if (bmeth == NULL) { - return -1; - } - res = sfloat_add_loop("add", - bmeth->dtypes, (PyObject *)bmeth->method); - Py_DECREF(bmeth); - if (res < 0) { + PyArray_DTypeMeta *sort_dtypes[2] = {&PyArray_SFloatDType, &PyArray_SFloatDType}; + PyType_Slot sort_slots[3] = { + {NPY_METH_resolve_descriptors, &sfloat_sort_resolve_descriptors}, + {NPY_METH_get_loop, &sfloat_sort_get_loop}, + {0, NULL} + }; + PyArrayMethod_Spec sort_spec = { + .nin = 1, + .nout = 1, + .dtypes = sort_dtypes, + .slots = sort_slots, + }; + sort_spec.name = "sfloat_sort"; + sort_spec.casting = NPY_NO_CASTING; + sort_spec.flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; + + PyArray_DTypeMeta *argsort_dtypes[2] = {&PyArray_SFloatDType, &PyArray_IntpDType}; + PyType_Slot argsort_slots[3] = { + {NPY_METH_resolve_descriptors, &sfloat_argsort_resolve_descriptors}, + {NPY_METH_get_loop, &sfloat_argsort_get_loop}, + {0, NULL} + }; + PyArrayMethod_Spec argsort_spec = { + .nin = 1, + .nout = 1, + .dtypes = argsort_dtypes, + .slots = argsort_slots, + }; + argsort_spec.name = "sfloat_argsort"; + argsort_spec.casting = NPY_NO_CASTING; + argsort_spec.flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; + + /* here we chose weirdish names to test the lookup mechanism */ + PyUFunc_LoopSlot loops[] = { + {"multiply", &multiply_spec}, + {"_core._multiarray_umath.add", &add_spec}, + {"numpy:sort", &sort_spec}, + {"numpy._core.fromnumeric:argsort", &argsort_spec}, + {NULL, NULL} + }; + if (PyUFunc_AddLoopsFromSpecs(loops) < 0) { return -1; } /* N.B.: Wrapping isn't actually correct if scaling can be negative */ - if (sfloat_add_wrapping_loop("hypot", dtypes) < 0) { + if (sfloat_add_wrapping_loop("hypot", all_sfloat_dtypes) < 0) { return -1; } /* * Add a promoter for both directions of multiply with double. */ + int res = -1; PyArray_DTypeMeta *double_DType = &PyArray_DoubleDType; PyArray_DTypeMeta *promoter_dtypes[3] = { @@ -867,10 +1077,7 @@ sfloat_init_ufuncs(void) { NPY_NO_EXPORT PyObject * get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) { - /* Allow calling the function multiple times. */ - static npy_bool initialized = NPY_FALSE; - - if (initialized) { + if (npy_global_state.get_sfloat_dtype_initialized) { Py_INCREF(&PyArray_SFloatDType); return (PyObject *)&PyArray_SFloatDType; } @@ -899,6 +1106,6 @@ get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) return NULL; } - initialized = NPY_TRUE; + npy_global_state.get_sfloat_dtype_initialized = NPY_TRUE; return (PyObject *)&PyArray_SFloatDType; } diff --git a/numpy/_core/src/umath/_struct_ufunc_tests.c b/numpy/_core/src/umath/_struct_ufunc_tests.c index ee71c4698f79..e85c67f9d903 100644 --- a/numpy/_core/src/umath/_struct_ufunc_tests.c +++ b/numpy/_core/src/umath/_struct_ufunc_tests.c @@ -123,18 +123,18 @@ PyMODINIT_FUNC PyInit__struct_ufunc_tests(void) PyArray_Descr *dtype; PyArray_Descr *dtypes[3]; + import_array(); + import_umath(); + m = PyModule_Create(&moduledef); if (m == NULL) { return NULL; } - import_array(); - import_umath(); - add_triplet = PyUFunc_FromFuncAndData(NULL, NULL, NULL, 0, 2, 1, - PyUFunc_None, "add_triplet", - "add_triplet_docstring", 0); + PyUFunc_None, "add_triplet", + NULL, 0); dtype_dict = Py_BuildValue("[(s, s), (s, s), (s, s)]", "f0", "u8", "f1", "u8", "f2", "u8"); @@ -156,5 +156,11 @@ PyMODINIT_FUNC PyInit__struct_ufunc_tests(void) PyDict_SetItemString(d, "add_triplet", add_triplet); Py_DECREF(add_triplet); + +#ifdef Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; } diff --git a/numpy/_core/src/umath/_umath_tests.c.src b/numpy/_core/src/umath/_umath_tests.c.src index a16a915c09d5..a1b64ecc0444 100644 --- a/numpy/_core/src/umath/_umath_tests.c.src +++ b/numpy/_core/src/umath/_umath_tests.c.src @@ -13,7 +13,7 @@ #undef NPY_INTERNAL_BUILD #endif // for add_INT32_negative_indexed -#define NPY_TARGET_VERSION NPY_2_0_API_VERSION +#define NPY_TARGET_VERSION NPY_2_1_API_VERSION #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "numpy/ndarrayobject.h" @@ -460,6 +460,15 @@ addUfuncs(PyObject *dictionary) { } PyDict_SetItemString(dictionary, "always_error", f); Py_DECREF(f); + f = PyUFunc_FromFuncAndData(always_error_functions, always_error_data, + always_error_signatures, 1, 1, 1, PyUFunc_None, "always_error_unary", + "simply, broken, ufunc that sets an error (but releases the GIL).", + 0); + if (f == NULL) { + return -1; + } + PyDict_SetItemString(dictionary, "always_error_unary", f); + Py_DECREF(f); f = PyUFunc_FromFuncAndDataAndSignature(always_error_functions, always_error_data, always_error_signatures, 1, 2, 1, PyUFunc_None, "always_error_gufunc", @@ -682,9 +691,7 @@ fail: } // Testing the utilities of the CPU dispatcher -#ifndef NPY_DISABLE_OPTIMIZATION - #include "_umath_tests.dispatch.h" -#endif +#include "_umath_tests.dispatch.h" NPY_CPU_DISPATCH_DECLARE(extern const char *_umath_tests_dispatch_var) NPY_CPU_DISPATCH_DECLARE(const char *_umath_tests_dispatch_func, (void)) NPY_CPU_DISPATCH_DECLARE(void _umath_tests_dispatch_attach, (PyObject *list)) @@ -761,6 +768,95 @@ add_INT32_negative_indexed(PyObject *module, PyObject *dict) { return 0; } +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +// Define the gufunc 'conv1d_full' +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#define MAX(a, b) (((a) < (b)) ? (b) : (a)) + +int conv1d_full_process_core_dims(PyUFuncObject *ufunc, + npy_intp *core_dim_sizes) +{ + // + // core_dim_sizes will hold the core dimensions [m, n, p]. + // p will be -1 if the caller did not provide the out argument. + // + npy_intp m = core_dim_sizes[0]; + npy_intp n = core_dim_sizes[1]; + npy_intp p = core_dim_sizes[2]; + npy_intp required_p = m + n - 1; + + if (m == 0 && n == 0) { + PyErr_SetString(PyExc_ValueError, + "conv1d_full: both inputs have core dimension 0; the function " + "requires that at least one input has positive size."); + return -1; + } + if (p == -1) { + core_dim_sizes[2] = required_p; + return 0; + } + if (p != required_p) { + PyErr_Format(PyExc_ValueError, + "conv1d_full: the core dimension p of the out parameter " + "does not equal m + n - 1, where m and n are the core " + "dimensions of the inputs x and y; got m=%zd and n=%zd so " + "p must be %zd, but got p=%zd.", + m, n, required_p, p); + return -1; + } + return 0; +} + +static void +conv1d_full_double_loop(char **args, + npy_intp const *dimensions, + npy_intp const *steps, + void *NPY_UNUSED(func)) +{ + // Input and output arrays + char *p_x = args[0]; + char *p_y = args[1]; + char *p_out = args[2]; + // Number of loops of pdist calculations to execute. + npy_intp nloops = dimensions[0]; + // Core dimensions + npy_intp m = dimensions[1]; + npy_intp n = dimensions[2]; + npy_intp p = dimensions[3]; // Must be m + n - 1. + // Core strides + npy_intp x_stride = steps[0]; + npy_intp y_stride = steps[1]; + npy_intp out_stride = steps[2]; + // Inner strides + npy_intp x_inner_stride = steps[3]; + npy_intp y_inner_stride = steps[4]; + npy_intp out_inner_stride = steps[5]; + + for (npy_intp loop = 0; loop < nloops; ++loop, p_x += x_stride, + p_y += y_stride, + p_out += out_stride) { + // Basic implementation of 1d convolution + for (npy_intp k = 0; k < p; ++k) { + double sum = 0.0; + for (npy_intp i = MAX(0, k - n + 1); i < MIN(m, k + 1); ++i) { + double x_i = *(double *)(p_x + i*x_inner_stride); + double y_k_minus_i = *(double *)(p_y + (k - i)*y_inner_stride); + sum += x_i * y_k_minus_i; + } + *(double *)(p_out + k*out_inner_stride) = sum; + } + } +} + +static PyUFuncGenericFunction conv1d_full_functions[] = { + (PyUFuncGenericFunction) &conv1d_full_double_loop +}; +static void *const conv1d_full_data[] = {NULL}; +static const char conv1d_full_typecodes[] = {NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE}; + + static PyMethodDef UMath_TestsMethods[] = { {"test_signature", UMath_Tests_test_signature, METH_VARARGS, "Test signature parsing of ufunc. \n" @@ -829,5 +925,38 @@ PyMODINIT_FUNC PyInit__umath_tests(void) { "cannot load _umath_tests module."); return NULL; } + + // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + // Define the gufunc 'conv1d_full' + // Shape signature is (m),(n)->(p) where p must be m + n - 1. + // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + PyUFuncObject *gufunc = (PyUFuncObject *) PyUFunc_FromFuncAndDataAndSignature( + conv1d_full_functions, + conv1d_full_data, + conv1d_full_typecodes, + 1, 2, 1, PyUFunc_None, "conv1d_full", + "convolution of x and y ('full' mode)", + 0, "(m),(n)->(p)"); + if (gufunc == NULL) { + Py_DECREF(m); + return NULL; + } + gufunc->process_core_dims_func = &conv1d_full_process_core_dims; + + int status = PyModule_AddObject(m, "conv1d_full", (PyObject *) gufunc); + if (status == -1) { + Py_DECREF(gufunc); + Py_DECREF(m); + return NULL; + } + + // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +#ifdef Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; } diff --git a/numpy/_core/src/umath/_umath_tests.dispatch.c b/numpy/_core/src/umath/_umath_tests.dispatch.c index 70a4c6d825e3..e92356ac09f2 100644 --- a/numpy/_core/src/umath/_umath_tests.dispatch.c +++ b/numpy/_core/src/umath/_umath_tests.dispatch.c @@ -1,10 +1,5 @@ /** * Testing the utilities of the CPU dispatcher - * - * @targets $werror baseline - * SSE2 SSE41 AVX2 - * VSX VSX2 VSX3 - * NEON ASIMD ASIMDHP */ #define PY_SSIZE_T_CLEAN #include @@ -12,10 +7,7 @@ #include "npy_cpu_dispatch.h" #include "numpy/utils.h" // NPY_TOSTRING -#ifndef NPY_DISABLE_OPTIMIZATION - #include "_umath_tests.dispatch.h" -#endif - +#include "_umath_tests.dispatch.h" NPY_CPU_DISPATCH_DECLARE(const char *_umath_tests_dispatch_func, (void)) NPY_CPU_DISPATCH_DECLARE(extern const char *_umath_tests_dispatch_var) NPY_CPU_DISPATCH_DECLARE(void _umath_tests_dispatch_attach, (PyObject *list)) diff --git a/numpy/_core/src/umath/clip.cpp b/numpy/_core/src/umath/clip.cpp index e051692c6d48..127b019ef8ae 100644 --- a/numpy/_core/src/umath/clip.cpp +++ b/numpy/_core/src/umath/clip.cpp @@ -1,7 +1,6 @@ /** * This module provides the inner loops for the clip ufunc */ -#include #define _UMATHMODULE #define _MULTIARRAYMODULE @@ -10,6 +9,7 @@ #define PY_SSIZE_T_CLEAN #include +#include #include "numpy/halffloat.h" #include "numpy/ndarraytypes.h" #include "numpy/npy_common.h" diff --git a/numpy/_core/src/umath/dispatching.c b/numpy/_core/src/umath/dispatching.cpp similarity index 89% rename from numpy/_core/src/umath/dispatching.c rename to numpy/_core/src/umath/dispatching.cpp index 673d4fd68b5c..2998ad0465de 100644 --- a/numpy/_core/src/umath/dispatching.c +++ b/numpy/_core/src/umath/dispatching.cpp @@ -44,9 +44,11 @@ #include "numpy/ndarraytypes.h" #include "numpy/npy_3kcompat.h" +#include "npy_import.h" #include "common.h" #include "npy_pycompat.h" +#include "arrayobject.h" #include "dispatching.h" #include "dtypemeta.h" #include "npy_hashtable.h" @@ -64,7 +66,7 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, PyArrayObject *const ops[], PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *op_dtypes[], - npy_bool allow_legacy_promotion); + npy_bool legacy_promotion_is_possible); /** @@ -177,6 +179,7 @@ PyUFunc_AddLoopFromSpec_int(PyObject *ufunc, PyArrayMethod_Spec *spec, int priv) PyObject *dtypes = PyArray_TupleFromItems( nargs, (PyObject **)bmeth->dtypes, 1); if (dtypes == NULL) { + Py_DECREF(bmeth); return -1; } PyObject *info = PyTuple_Pack(2, dtypes, bmeth->method); @@ -185,7 +188,78 @@ PyUFunc_AddLoopFromSpec_int(PyObject *ufunc, PyArrayMethod_Spec *spec, int priv) if (info == NULL) { return -1; } - return PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0); + int res = PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0); + Py_DECREF(info); + return res; +} + + +/*UFUNC_API + * Add multiple loops to ufuncs from ArrayMethod specs. This also + * handles the registration of sort and argsort methods for dtypes + * from ArrayMethod specs. + */ +NPY_NO_EXPORT int +PyUFunc_AddLoopsFromSpecs(PyUFunc_LoopSlot *slots) +{ + if (npy_cache_import_runtime( + "numpy", "sort", &npy_runtime_imports.sort) < 0) { + return -1; + } + if (npy_cache_import_runtime( + "numpy", "argsort", &npy_runtime_imports.argsort) < 0) { + return -1; + } + + PyUFunc_LoopSlot *slot; + for (slot = slots; slot->name != NULL; slot++) { + PyObject *ufunc = npy_import_entry_point(slot->name); + if (ufunc == NULL) { + return -1; + } + + if (ufunc == npy_runtime_imports.sort) { + Py_DECREF(ufunc); + + PyArray_DTypeMeta *dtype = slot->spec->dtypes[0]; + PyBoundArrayMethodObject *sort_meth = PyArrayMethod_FromSpec_int(slot->spec, 0); + if (sort_meth == NULL) { + return -1; + } + + NPY_DT_SLOTS(dtype)->sort_meth = sort_meth->method; + Py_INCREF(sort_meth->method); + Py_DECREF(sort_meth); + } + else if (ufunc == npy_runtime_imports.argsort) { + Py_DECREF(ufunc); + + PyArray_DTypeMeta *dtype = slot->spec->dtypes[0]; + PyBoundArrayMethodObject *argsort_meth = PyArrayMethod_FromSpec_int(slot->spec, 0); + if (argsort_meth == NULL) { + return -1; + } + + NPY_DT_SLOTS(dtype)->argsort_meth = argsort_meth->method; + Py_INCREF(argsort_meth->method); + Py_DECREF(argsort_meth); + } + else { + if (!PyObject_TypeCheck(ufunc, &PyUFunc_Type)) { + PyErr_Format(PyExc_TypeError, "%s was not a ufunc!", slot->name); + Py_DECREF(ufunc); + return -1; + } + + int ret = PyUFunc_AddLoopFromSpec_int(ufunc, slot->spec, 0); + Py_DECREF(ufunc); + if (ret < 0) { + return -1; + } + } + } + + return 0; } @@ -212,7 +286,7 @@ PyUFunc_AddLoopFromSpec_int(PyObject *ufunc, PyArrayMethod_Spec *spec, int priv) * both are `(f4, f4, f8)`. The cache would need to store also which * output was provided by `dtype=`/`signature=`. * - * @param ufunc + * @param ufunc The universal function to be resolved * @param op_dtypes The DTypes that are either passed in (defined by an * operand) or defined by the `signature` as also passed in as * `fixed_DTypes`. @@ -503,8 +577,9 @@ call_promoter_and_recurse(PyUFuncObject *ufunc, PyObject *info, PyObject *promoter = PyTuple_GET_ITEM(info, 1); if (PyCapsule_CheckExact(promoter)) { /* We could also go the other way and wrap up the python function... */ - PyArrayMethod_PromoterFunction *promoter_function = PyCapsule_GetPointer( - promoter, "numpy._ufunc_promoter"); + PyArrayMethod_PromoterFunction *promoter_function = + (PyArrayMethod_PromoterFunction *)PyCapsule_GetPointer( + promoter, "numpy._ufunc_promoter"); if (promoter_function == NULL) { return NULL; } @@ -751,6 +826,13 @@ add_and_return_legacy_wrapping_ufunc_loop(PyUFuncObject *ufunc, * to use for a ufunc. This function may recurse with `do_legacy_fallback` * set to False. * + * The result is cached in the ufunc's dispatch cache for faster lookup next time. + * It is possible that multiple threads call this function at the same time, and + * there is cache miss, in that case all threads will do the full resolution, however + * only one will store the result in the cache (the others get the stored result). + * This is ensured by `PyArrayIdentityHash_SetItemDefault` which only sets the item + * if it is not already set otherwise returning the existing value. + * * If value-based promotion is necessary, this is handled ahead of time by * `promote_and_get_ufuncimpl`. */ @@ -759,7 +841,7 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, PyArrayObject *const ops[], PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *op_dtypes[], - npy_bool allow_legacy_promotion) + npy_bool legacy_promotion_is_possible) { /* * Fetch the dispatching info which consists of the implementation and @@ -769,8 +851,9 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, * 2. Check all registered loops/promoters to find the best match. * 3. Fall back to the legacy implementation if no match was found. */ - PyObject *info = PyArrayIdentityHash_GetItem(ufunc->_dispatch_cache, - (PyObject **)op_dtypes); + PyObject *info = PyArrayIdentityHash_GetItem( + (PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes); if (info != NULL && PyObject_TypeCheck( PyTuple_GET_ITEM(info, 1), &PyArrayMethod_Type)) { /* Found the ArrayMethod and NOT a promoter: return it */ @@ -792,11 +875,13 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, * Found the ArrayMethod and NOT promoter. Before returning it * add it to the cache for faster lookup in the future. */ - if (PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache, - (PyObject **)op_dtypes, info, 0) < 0) { + PyObject *result = NULL; + if (PyArrayIdentityHash_SetItemDefault( + (PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes, info, &result) < 0) { return NULL; } - return info; + return result; } } @@ -814,11 +899,13 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, } else if (info != NULL) { /* Add result to the cache using the original types: */ - if (PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache, - (PyObject **)op_dtypes, info, 0) < 0) { + PyObject *result = NULL; + if (PyArrayIdentityHash_SetItemDefault( + (PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes, info, &result) < 0) { return NULL; } - return info; + return result; } } @@ -828,7 +915,7 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, * However, we need to give the legacy implementation a chance here. * (it will modify `op_dtypes`). */ - if (!allow_legacy_promotion || ufunc->type_resolver == NULL || + if (!legacy_promotion_is_possible || ufunc->type_resolver == NULL || (ufunc->ntypes == 0 && ufunc->userloops == NULL)) { /* Already tried or not a "legacy" ufunc (no loop found, return) */ return NULL; @@ -880,15 +967,21 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, Py_XDECREF(new_op_dtypes[i]); } - /* Add this to the cache using the original types: */ - if (cacheable && PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache, - (PyObject **)op_dtypes, info, 0) < 0) { + if (info == NULL) { return NULL; } + if (cacheable) { + PyObject *result = NULL; + /* Add this to the cache using the original types: */ + if (PyArrayIdentityHash_SetItemDefault((PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes, info, &result) < 0) { + return NULL; + } + return result; + } return info; } - /** * The central entry-point for the promotion and dispatching machinery. * @@ -935,11 +1028,13 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *op_dtypes[], npy_bool force_legacy_promotion, - npy_bool allow_legacy_promotion, npy_bool promoting_pyscalars, npy_bool ensure_reduce_compatible) { int nin = ufunc->nin, nargs = ufunc->nargs; + npy_bool legacy_promotion_is_possible = NPY_TRUE; + PyObject *all_dtypes = NULL; + PyArrayMethodObject *method = NULL; /* * Get the actual DTypes we operate with by setting op_dtypes[i] from @@ -964,57 +1059,26 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, */ Py_CLEAR(op_dtypes[i]); } - } - - int current_promotion_state = get_npy_promotion_state(); - - if (force_legacy_promotion - && current_promotion_state == NPY_USE_LEGACY_PROMOTION - && (ufunc->ntypes != 0 || ufunc->userloops != NULL)) { /* - * We must use legacy promotion for value-based logic. Call the old - * resolver once up-front to get the "actual" loop dtypes. - * After this (additional) promotion, we can even use normal caching. + * If the op_dtype ends up being a non-legacy one, then we cannot use + * legacy promotion (unless this is a python scalar). */ - int cacheable = 1; /* unused, as we modify the original `op_dtypes` */ - if (legacy_promote_using_legacy_type_resolver(ufunc, - ops, signature, op_dtypes, &cacheable, NPY_FALSE) < 0) { - goto handle_error; + if (op_dtypes[i] != NULL && !NPY_DT_is_legacy(op_dtypes[i]) && ( + signature[i] != NULL || // signature cannot be a pyscalar + !(PyArray_FLAGS(ops[i]) & NPY_ARRAY_WAS_PYTHON_LITERAL))) { + legacy_promotion_is_possible = NPY_FALSE; } } - /* Pause warnings and always use "new" path */ - set_npy_promotion_state(NPY_USE_WEAK_PROMOTION); PyObject *info = promote_and_get_info_and_ufuncimpl(ufunc, - ops, signature, op_dtypes, allow_legacy_promotion); - set_npy_promotion_state(current_promotion_state); + ops, signature, op_dtypes, legacy_promotion_is_possible); if (info == NULL) { goto handle_error; } - PyArrayMethodObject *method = (PyArrayMethodObject *)PyTuple_GET_ITEM(info, 1); - PyObject *all_dtypes = PyTuple_GET_ITEM(info, 0); - - /* If necessary, check if the old result would have been different */ - if (NPY_UNLIKELY(current_promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN) - && (force_legacy_promotion || promoting_pyscalars) - && npy_give_promotion_warnings()) { - PyArray_DTypeMeta *check_dtypes[NPY_MAXARGS]; - for (int i = 0; i < nargs; i++) { - check_dtypes[i] = (PyArray_DTypeMeta *)PyTuple_GET_ITEM( - all_dtypes, i); - } - /* Before calling to the legacy promotion, pretend that is the state: */ - set_npy_promotion_state(NPY_USE_LEGACY_PROMOTION); - int res = legacy_promote_using_legacy_type_resolver(ufunc, - ops, signature, check_dtypes, NULL, NPY_TRUE); - /* Reset the promotion state: */ - set_npy_promotion_state(NPY_USE_WEAK_PROMOTION_AND_WARN); - if (res < 0) { - goto handle_error; - } - } + method = (PyArrayMethodObject *)PyTuple_GET_ITEM(info, 1); + all_dtypes = PyTuple_GET_ITEM(info, 0); /* * In certain cases (only the logical ufuncs really), the loop we found may @@ -1032,7 +1096,7 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, Py_INCREF(signature[0]); return promote_and_get_ufuncimpl(ufunc, ops, signature, op_dtypes, - force_legacy_promotion, allow_legacy_promotion, + force_legacy_promotion, promoting_pyscalars, NPY_FALSE); } @@ -1062,7 +1126,7 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, * then we chain it, because DTypePromotionError effectively means that there * is no loop available. (We failed finding a loop by using promotion.) */ - else if (PyErr_ExceptionMatches(npy_DTypePromotionError)) { + else if (PyErr_ExceptionMatches(npy_static_pydata.DTypePromotionError)) { PyObject *err_type = NULL, *err_value = NULL, *err_traceback = NULL; PyErr_Fetch(&err_type, &err_value, &err_traceback); raise_no_loop_found_error(ufunc, (PyObject **)op_dtypes); @@ -1245,7 +1309,7 @@ install_logical_ufunc_promoter(PyObject *ufunc) if (dtype_tuple == NULL) { return -1; } - PyObject *promoter = PyCapsule_New(&logical_ufunc_promoter, + PyObject *promoter = PyCapsule_New((void *)&logical_ufunc_promoter, "numpy._ufunc_promoter", NULL); if (promoter == NULL) { Py_DECREF(dtype_tuple); @@ -1258,8 +1322,9 @@ install_logical_ufunc_promoter(PyObject *ufunc) if (info == NULL) { return -1; } - - return PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0); + int res = PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0); + Py_DECREF(info); + return res; } /* @@ -1333,5 +1398,7 @@ PyUFunc_AddPromoter( if (info == NULL) { return -1; } - return PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0); + int res = PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0); + Py_DECREF(info); + return res; } diff --git a/numpy/_core/src/umath/dispatching.h b/numpy/_core/src/umath/dispatching.h index c711a66688c6..7ca8bd7a1598 100644 --- a/numpy/_core/src/umath/dispatching.h +++ b/numpy/_core/src/umath/dispatching.h @@ -16,13 +16,15 @@ PyUFunc_AddLoop(PyUFuncObject *ufunc, PyObject *info, int ignore_duplicate); NPY_NO_EXPORT int PyUFunc_AddLoopFromSpec_int(PyObject *ufunc, PyArrayMethod_Spec *spec, int priv); +NPY_NO_EXPORT int +PyUFunc_AddLoopsFromSpecs(PyUFunc_LoopSlot *slots); + NPY_NO_EXPORT PyArrayMethodObject * promote_and_get_ufuncimpl(PyUFuncObject *ufunc, PyArrayObject *const ops[], PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *op_dtypes[], npy_bool force_legacy_promotion, - npy_bool allow_legacy_promotion, npy_bool promote_pyscalars, npy_bool ensure_reduce_compatible); @@ -44,6 +46,10 @@ object_only_ufunc_promoter(PyObject *ufunc, NPY_NO_EXPORT int install_logical_ufunc_promoter(PyObject *ufunc); +NPY_NO_EXPORT PyObject * +get_info_no_cast(PyUFuncObject *ufunc, PyArray_DTypeMeta *op_dtype, + int ndtypes); + #ifdef __cplusplus } #endif diff --git a/numpy/_core/src/umath/extobj.c b/numpy/_core/src/umath/extobj.c index d32feaaa31da..77a76873d20f 100644 --- a/numpy/_core/src/umath/extobj.c +++ b/numpy/_core/src/umath/extobj.c @@ -14,16 +14,8 @@ #include "extobj.h" #include "numpy/ufuncobject.h" -#include "ufunc_object.h" /* for npy_um_str_pyvals_name */ #include "common.h" - - -/* - * The global ContextVar to store the extobject. It is exposed to Python - * as `_extobj_contextvar`. - */ -static PyObject *default_extobj_capsule = NULL; -NPY_NO_EXPORT PyObject *npy_extobj_contextvar = NULL; +#include "npy_pycompat.h" #define UFUNC_ERR_IGNORE 0 @@ -44,11 +36,6 @@ NPY_NO_EXPORT PyObject *npy_extobj_contextvar = NULL; #define UFUNC_SHIFT_UNDERFLOW 6 #define UFUNC_SHIFT_INVALID 9 -/* The python strings for the above error modes defined in extobj.h */ -const char *errmode_cstrings[] = { - "ignore", "warn", "raise", "call", "print", "log"}; -static PyObject *errmode_strings[6] = {NULL}; - /* Default user error mode (underflows are ignored, others warn) */ #define UFUNC_ERR_DEFAULT \ (UFUNC_ERR_WARN << UFUNC_SHIFT_DIVIDEBYZERO) + \ @@ -131,7 +118,8 @@ fetch_curr_extobj_state(npy_extobj *extobj) { PyObject *capsule; if (PyContextVar_Get( - npy_extobj_contextvar, default_extobj_capsule, &capsule) < 0) { + npy_static_pydata.npy_extobj_contextvar, + npy_static_pydata.default_extobj_capsule, &capsule) < 0) { return -1; } npy_extobj *obj = PyCapsule_GetPointer(capsule, "numpy.ufunc.extobj"); @@ -153,26 +141,22 @@ fetch_curr_extobj_state(npy_extobj *extobj) NPY_NO_EXPORT int init_extobj(void) { - /* - * First initialize the string constants we need to parse `errstate()` - * inputs. - */ - for (int i = 0; i <= UFUNC_ERR_LOG; i++) { - errmode_strings[i] = PyUnicode_InternFromString(errmode_cstrings[i]); - if (errmode_strings[i] == NULL) { - return -1; - } - } - - default_extobj_capsule = make_extobj_capsule( + npy_static_pydata.default_extobj_capsule = make_extobj_capsule( NPY_BUFSIZE, UFUNC_ERR_DEFAULT, Py_None); - if (default_extobj_capsule == NULL) { + if (npy_static_pydata.default_extobj_capsule == NULL) { + return -1; + } +#ifdef Py_GIL_DISABLED + if (PyUnstable_SetImmortal(npy_static_pydata.default_extobj_capsule) == 0) { + PyErr_SetString(PyExc_RuntimeError, "Could not mark extobj capsule as immortal"); + Py_CLEAR(npy_static_pydata.default_extobj_capsule); return -1; } - npy_extobj_contextvar = PyContextVar_New( - "numpy.ufunc.extobj", default_extobj_capsule); - if (npy_extobj_contextvar == NULL) { - Py_CLEAR(default_extobj_capsule); +#endif + npy_static_pydata.npy_extobj_contextvar = PyContextVar_New( + "numpy.ufunc.extobj", npy_static_pydata.default_extobj_capsule); + if (npy_static_pydata.npy_extobj_contextvar == NULL) { + Py_CLEAR(npy_static_pydata.default_extobj_capsule); return -1; } return 0; @@ -191,7 +175,8 @@ errmodeconverter(PyObject *obj, int *mode) } int i = 0; for (; i <= UFUNC_ERR_LOG; i++) { - int eq = PyObject_RichCompareBool(obj, errmode_strings[i], Py_EQ); + int eq = PyObject_RichCompareBool( + obj, npy_interned_str.errmode_strings[i], Py_EQ); if (eq == -1) { return 0; } @@ -212,7 +197,7 @@ errmodeconverter(PyObject *obj, int *mode) /* * This function is currently exposed as `umath._seterrobj()`, it is private * and returns a capsule representing the errstate. This capsule is then - * assigned to the `npy_extobj_contextvar` in Python. + * assigned to the `_extobj_contextvar` in Python. */ NPY_NO_EXPORT PyObject * extobj_make_extobj(PyObject *NPY_UNUSED(mod), @@ -338,19 +323,23 @@ extobj_get_extobj_dict(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(noarg)) } /* Set all error modes: */ mode = (extobj.errmask & UFUNC_MASK_DIVIDEBYZERO) >> UFUNC_SHIFT_DIVIDEBYZERO; - if (PyDict_SetItemString(result, "divide", errmode_strings[mode]) < 0) { + if (PyDict_SetItemString(result, "divide", + npy_interned_str.errmode_strings[mode]) < 0) { goto fail; } mode = (extobj.errmask & UFUNC_MASK_OVERFLOW) >> UFUNC_SHIFT_OVERFLOW; - if (PyDict_SetItemString(result, "over", errmode_strings[mode]) < 0) { + if (PyDict_SetItemString(result, "over", + npy_interned_str.errmode_strings[mode]) < 0) { goto fail; } mode = (extobj.errmask & UFUNC_MASK_UNDERFLOW) >> UFUNC_SHIFT_UNDERFLOW; - if (PyDict_SetItemString(result, "under", errmode_strings[mode]) < 0) { + if (PyDict_SetItemString(result, "under", + npy_interned_str.errmode_strings[mode]) < 0) { goto fail; } mode = (extobj.errmask & UFUNC_MASK_INVALID) >> UFUNC_SHIFT_INVALID; - if (PyDict_SetItemString(result, "invalid", errmode_strings[mode]) < 0) { + if (PyDict_SetItemString(result, "invalid", + npy_interned_str.errmode_strings[mode]) < 0) { goto fail; } @@ -417,7 +406,7 @@ _error_handler(const char *name, int method, PyObject *pyfunc, char *errtype, switch(method) { case UFUNC_ERR_WARN: PyOS_snprintf(msg, sizeof(msg), "%s encountered in %s", errtype, name); - if (PyErr_Warn(PyExc_RuntimeWarning, msg) < 0) { + if (PyErr_WarnEx(PyExc_RuntimeWarning, msg, 1) < 0) { goto fail; } break; diff --git a/numpy/_core/src/umath/extobj.h b/numpy/_core/src/umath/extobj.h index 0cd5afd76218..9176af6a3539 100644 --- a/numpy/_core/src/umath/extobj.h +++ b/numpy/_core/src/umath/extobj.h @@ -4,9 +4,6 @@ #include /* for NPY_NO_EXPORT */ -/* For the private exposure of the extobject contextvar to Python */ -extern NPY_NO_EXPORT PyObject *npy_extobj_contextvar; - /* * Represent the current ufunc error (and buffer) state. we are using a * capsule for now to store this, but it could make sense to refactor it into diff --git a/numpy/_core/src/umath/fast_loop_macros.h b/numpy/_core/src/umath/fast_loop_macros.h index b8c1926b2f7e..42c2c9d8d04f 100644 --- a/numpy/_core/src/umath/fast_loop_macros.h +++ b/numpy/_core/src/umath/fast_loop_macros.h @@ -10,13 +10,13 @@ #ifndef _NPY_UMATH_FAST_LOOP_MACROS_H_ #define _NPY_UMATH_FAST_LOOP_MACROS_H_ -#include - #include "simd/simd.h" +#include + /* * largest simd vector size in bytes numpy supports - * it is currently a extremely large value as it is only used for memory + * it is currently an extremely large value as it is only used for memory * overlap checks */ #if NPY_SIMD > 0 @@ -315,7 +315,7 @@ abs_ptrdiff(char *a, char *b) /* * stride is equal to element size and input and destination are equal or * don't overlap within one register. The check of the steps against - * esize also quarantees that steps are >= 0. + * esize also guarantees that steps are >= 0. */ #define IS_BLOCKABLE_UNARY(esize, vsize) \ (steps[0] == (esize) && steps[0] == steps[1] && \ @@ -323,34 +323,6 @@ abs_ptrdiff(char *a, char *b) ((abs_ptrdiff(args[1], args[0]) >= (vsize)) || \ ((abs_ptrdiff(args[1], args[0]) == 0)))) -/* - * Avoid using SIMD for very large step sizes for several reasons: - * 1) Supporting large step sizes requires use of i64gather/scatter_ps instructions, - * in which case we need two i64gather instructions and an additional vinsertf32x8 - * instruction to load a single zmm register (since one i64gather instruction - * loads into a ymm register). This is not ideal for performance. - * 2) Gather and scatter instructions can be slow when the loads/stores - * cross page boundaries. - * - * We instead rely on i32gather/scatter_ps instructions which use a 32-bit index - * element. The index needs to be < INT_MAX to avoid overflow. MAX_STEP_SIZE - * ensures this. The condition also requires that the input and output arrays - * should have no overlap in memory. - */ -#define IS_BINARY_SMALL_STEPS_AND_NOMEMOVERLAP \ - ((labs(steps[0]) < MAX_STEP_SIZE) && \ - (labs(steps[1]) < MAX_STEP_SIZE) && \ - (labs(steps[2]) < MAX_STEP_SIZE) && \ - (nomemoverlap(args[0], steps[0] * dimensions[0], args[2], steps[2] * dimensions[0])) && \ - (nomemoverlap(args[1], steps[1] * dimensions[0], args[2], steps[2] * dimensions[0]))) - -#define IS_UNARY_TWO_OUT_SMALL_STEPS_AND_NOMEMOVERLAP \ - ((labs(steps[0]) < MAX_STEP_SIZE) && \ - (labs(steps[1]) < MAX_STEP_SIZE) && \ - (labs(steps[2]) < MAX_STEP_SIZE) && \ - (nomemoverlap(args[0], steps[0] * dimensions[0], args[2], steps[2] * dimensions[0])) && \ - (nomemoverlap(args[0], steps[0] * dimensions[0], args[1], steps[1] * dimensions[0]))) - /* * 1) Output should be contiguous, can handle strided input data * 2) Input step should be smaller than MAX_STEP_SIZE for performance @@ -359,7 +331,7 @@ abs_ptrdiff(char *a, char *b) #define IS_OUTPUT_BLOCKABLE_UNARY(esizein, esizeout, vsize) \ ((steps[0] & (esizein-1)) == 0 && \ steps[1] == (esizeout) && llabs(steps[0]) < MAX_STEP_SIZE && \ - (nomemoverlap(args[1], steps[1] * dimensions[0], args[0], steps[0] * dimensions[0]))) + (nomemoverlap(args[1], steps[1], args[0], steps[0], dimensions[0]))) #define IS_BLOCKABLE_REDUCE(esize, vsize) \ (steps[1] == (esize) && abs_ptrdiff(args[1], args[0]) >= (vsize) && \ diff --git a/numpy/_core/src/umath/funcs.inc.src b/numpy/_core/src/umath/funcs.inc.src index df81c835034a..d1b0b5522927 100644 --- a/numpy/_core/src/umath/funcs.inc.src +++ b/numpy/_core/src/umath/funcs.inc.src @@ -9,7 +9,8 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #include "npy_import.h" - +#include "npy_static_data.h" +#include "multiarraymodule.h" /* ***************************************************************************** @@ -145,47 +146,30 @@ npy_ObjectLogicalNot(PyObject *i1) return NULL; } else if (retcode) { - Py_INCREF(Py_True); - return Py_True; + Py_RETURN_TRUE; } else { - Py_INCREF(Py_False); - return Py_False; + Py_RETURN_FALSE; } } } static PyObject * npy_ObjectFloor(PyObject *obj) { - static PyObject *math_floor_func = NULL; - - npy_cache_import("math", "floor", &math_floor_func); - if (math_floor_func == NULL) { - return NULL; - } - return PyObject_CallFunction(math_floor_func, "O", obj); + return PyObject_CallFunction(npy_static_pydata.math_floor_func, + "O", obj); } static PyObject * npy_ObjectCeil(PyObject *obj) { - static PyObject *math_ceil_func = NULL; - - npy_cache_import("math", "ceil", &math_ceil_func); - if (math_ceil_func == NULL) { - return NULL; - } - return PyObject_CallFunction(math_ceil_func, "O", obj); + return PyObject_CallFunction(npy_static_pydata.math_ceil_func, + "O", obj); } static PyObject * npy_ObjectTrunc(PyObject *obj) { - static PyObject *math_trunc_func = NULL; - - npy_cache_import("math", "trunc", &math_trunc_func); - if (math_trunc_func == NULL) { - return NULL; - } - return PyObject_CallFunction(math_trunc_func, "O", obj); + return PyObject_CallFunction(npy_static_pydata.math_trunc_func, + "O", obj); } static PyObject * @@ -195,13 +179,8 @@ npy_ObjectGCD(PyObject *i1, PyObject *i2) /* use math.gcd if valid on the provided types */ { - static PyObject *math_gcd_func = NULL; - - npy_cache_import("math", "gcd", &math_gcd_func); - if (math_gcd_func == NULL) { - return NULL; - } - gcd = PyObject_CallFunction(math_gcd_func, "OO", i1, i2); + gcd = PyObject_CallFunction(npy_static_pydata.math_gcd_func, + "OO", i1, i2); if (gcd != NULL) { return gcd; } @@ -211,13 +190,12 @@ npy_ObjectGCD(PyObject *i1, PyObject *i2) /* otherwise, use our internal one, written in python */ { - static PyObject *internal_gcd_func = NULL; - - npy_cache_import("numpy._core._internal", "_gcd", &internal_gcd_func); - if (internal_gcd_func == NULL) { + if (npy_cache_import_runtime("numpy._core._internal", "_gcd", + &npy_runtime_imports.internal_gcd_func) == -1) { return NULL; } - gcd = PyObject_CallFunction(internal_gcd_func, "OO", i1, i2); + gcd = PyObject_CallFunction(npy_runtime_imports.internal_gcd_func, + "OO", i1, i2); if (gcd == NULL) { return NULL; } diff --git a/numpy/_core/src/umath/legacy_array_method.c b/numpy/_core/src/umath/legacy_array_method.c index 9592df0e1366..7a85937fcc8f 100644 --- a/numpy/_core/src/umath/legacy_array_method.c +++ b/numpy/_core/src/umath/legacy_array_method.c @@ -311,7 +311,7 @@ get_initial_from_ufunc( } } else if (context->descriptors[0]->type_num == NPY_OBJECT - && !reduction_is_empty) { + && !reduction_is_empty) { /* Allows `sum([object()])` to work, but use 0 when empty. */ Py_DECREF(identity_obj); return 0; @@ -323,13 +323,6 @@ get_initial_from_ufunc( return -1; } - if (PyTypeNum_ISNUMBER(context->descriptors[0]->type_num)) { - /* For numbers we can cache to avoid going via Python ints */ - memcpy(context->method->legacy_initial, initial, - context->descriptors[0]->elsize); - context->method->get_reduction_initial = ©_cached_initial; - } - /* Reduction can use the initial value */ return 1; } @@ -427,11 +420,46 @@ PyArray_NewLegacyWrappingArrayMethod(PyUFuncObject *ufunc, }; PyBoundArrayMethodObject *bound_res = PyArrayMethod_FromSpec_int(&spec, 1); + if (bound_res == NULL) { return NULL; } PyArrayMethodObject *res = bound_res->method; + + // set cached initial value for numeric reductions to avoid creating + // a python int in every reduction + if (PyTypeNum_ISNUMBER(bound_res->dtypes[0]->type_num) && + ufunc->nin == 2 && ufunc->nout == 1) { + + PyArray_Descr *descrs[3]; + + for (int i = 0; i < 3; i++) { + // only dealing with numeric legacy dtypes so this should always be + // valid + descrs[i] = bound_res->dtypes[i]->singleton; + } + + PyArrayMethod_Context context; + NPY_context_init(&context, descrs); + context.caller = (PyObject *)ufunc; + context.method = bound_res->method; + + int ret = get_initial_from_ufunc(&context, 0, context.method->legacy_initial); + + if (ret < 0) { + Py_DECREF(bound_res); + return NULL; + } + + // only use the cached initial value if it's valid + if (ret > 0) { + context.method->get_reduction_initial = ©_cached_initial; + } + } + + Py_INCREF(res); Py_DECREF(bound_res); + return res; } diff --git a/numpy/_core/src/umath/loops.c.src b/numpy/_core/src/umath/loops.c.src index 5ac67fa3024b..1ad9cab4666e 100644 --- a/numpy/_core/src/umath/loops.c.src +++ b/numpy/_core/src/umath/loops.c.src @@ -486,6 +486,25 @@ _@TYPE@_squared_exponentiation_helper(@type@ base, @type@ exponent_two, int firs return out; } +static inline @type@ +_@TYPE@_power_fast_path_helper(@type@ in1, @type@ in2, @type@ *op1) { + // Fast path for power calculation + if (in2 == 0 || in1 == 1) { + *op1 = 1; + } + else if (in2 == 1) { + *op1 = in1; + } + else if (in2 == 2) { + *op1 = in1 * in1; + } + else { + return 1; + } + return 0; +} + + NPY_NO_EXPORT void @TYPE@_power(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { @@ -493,21 +512,28 @@ NPY_NO_EXPORT void // stride for second argument is 0 BINARY_DEFS const @type@ in2 = *(@type@ *)ip2; - #if @SIGNED@ - if (in2 < 0) { - npy_gil_error(PyExc_ValueError, - "Integers to negative integer powers are not allowed."); - return; - } - #endif + +#if @SIGNED@ + if (in2 < 0) { + npy_gil_error(PyExc_ValueError, + "Integers to negative integer powers are not allowed."); + return; + } +#endif int first_bit = in2 & 1; @type@ in2start = in2 >> 1; + int fastop_exists = (in2 == 0) || (in2 == 1) || (in2 == 2); + BINARY_LOOP_SLIDING { @type@ in1 = *(@type@ *)ip1; - - *((@type@ *) op1) = _@TYPE@_squared_exponentiation_helper(in1, in2start, first_bit); + if (fastop_exists) { + _@TYPE@_power_fast_path_helper(in1, in2, (@type@ *)op1); + } + else { + *((@type@ *) op1) = _@TYPE@_squared_exponentiation_helper(in1, in2start, first_bit); + } } return; } @@ -518,22 +544,16 @@ NPY_NO_EXPORT void #if @SIGNED@ if (in2 < 0) { npy_gil_error(PyExc_ValueError, - "Integers to negative integer powers are not allowed."); + "Integers to negative integer powers are not allowed."); return; } #endif - if (in2 == 0) { - *((@type@ *)op1) = 1; - continue; - } - if (in1 == 1) { - *((@type@ *)op1) = 1; - continue; - } - int first_bit = in2 & 1; - in2 >>= 1; - *((@type@ *) op1) = _@TYPE@_squared_exponentiation_helper(in1, in2, first_bit); + if (_@TYPE@_power_fast_path_helper(in1, in2, (@type@ *)op1) != 0) { + int first_bit = in2 & 1; + in2 >>= 1; + *((@type@ *) op1) = _@TYPE@_squared_exponentiation_helper(in1, in2, first_bit); + } } } /**end repeat**/ @@ -668,7 +688,12 @@ TIMEDELTA_sign(char **args, npy_intp const *dimensions, npy_intp const *steps, v { UNARY_LOOP { const npy_timedelta in1 = *(npy_timedelta *)ip1; - *((npy_timedelta *)op1) = in1 > 0 ? 1 : (in1 < 0 ? -1 : 0); + if (in1 == NPY_DATETIME_NAT) { + *((npy_double *)op1) = NPY_NAN; + } + else { + *((npy_double *)op1) = in1 > 0 ? 1.0 : (in1 < 0 ? -1.0 : 0.0); + } } } diff --git a/numpy/_core/src/umath/loops.h.src b/numpy/_core/src/umath/loops.h.src index 55db18de4474..4163f2e65c29 100644 --- a/numpy/_core/src/umath/loops.h.src +++ b/numpy/_core/src/umath/loops.h.src @@ -10,6 +10,10 @@ #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN #endif +#ifdef __cplusplus +extern "C" { +#endif + /* ***************************************************************************** ** BOOLEAN LOOPS ** @@ -36,10 +40,8 @@ typedef struct PyArrayMethod_Context_tag PyArrayMethod_Context; typedef struct NpyAuxData_tag NpyAuxData; -#ifndef NPY_DISABLE_OPTIMIZATION - #include "loops_comparison.dispatch.h" -#endif +#include "loops_comparison.dispatch.h" /**begin repeat * #kind = equal, not_equal, greater, greater_equal, less, less_equal# */ @@ -47,10 +49,8 @@ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void BOOL_@kind@, (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))) /**end repeat**/ -#ifndef NPY_DISABLE_OPTIMIZATION - #include "loops_logical.dispatch.h" -#endif +#include "loops_logical.dispatch.h" /**begin repeat * #kind = logical_and, logical_or, logical_not, absolute# */ @@ -69,11 +69,9 @@ BOOL_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void /**end repeat**/ -#ifndef NPY_DISABLE_OPTIMIZATION - #include "loops_autovec.dispatch.h" -#endif +#include "loops_autovec.dispatch.h" /**begin repeat - * #kind = isnan, isinf, isfinite# + * #kind = isnan, isinf, isfinite, floor, ceil, trunc# */ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void BOOL_@kind@, (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))) @@ -85,10 +83,7 @@ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void BOOL_@kind@, ***************************************************************************** */ -#ifndef NPY_DISABLE_OPTIMIZATION - #include "loops_arithmetic.dispatch.h" -#endif - +#include "loops_arithmetic.dispatch.h" /**begin repeat * #TYPE = UBYTE, USHORT, UINT, ULONG, ULONGLONG, BYTE, SHORT, INT, LONG, LONGLONG# @@ -102,10 +97,7 @@ NPY_NO_EXPORT int /**end repeat3**/ /**end repeat**/ -#ifndef NPY_DISABLE_OPTIMIZATION - #include "loops_modulo.dispatch.h" -#endif - +#include "loops_modulo.dispatch.h" /**begin repeat * #TYPE = UBYTE, USHORT, UINT, ULONG, ULONGLONG, BYTE, SHORT, INT, LONG, LONGLONG# @@ -118,10 +110,7 @@ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void @TYPE@_@kind@, /**end repeat1**/ /**end repeat**/ -#ifndef NPY_DISABLE_OPTIMIZATION - #include "loops_comparison.dispatch.h" -#endif - +#include "loops_comparison.dispatch.h" /**begin repeat * #TYPE = UBYTE, USHORT, UINT, ULONG, ULONGLONG, BYTE, SHORT, INT, LONG, LONGLONG# @@ -135,9 +124,8 @@ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void @TYPE@_@kind@, /**end repeat**/ -#ifndef NPY_DISABLE_OPTIMIZATION - #include "loops_autovec.dispatch.h" -#endif + +#include "loops_autovec.dispatch.h" /**begin repeat * #TYPE = UBYTE, USHORT, UINT, ULONG, ULONGLONG, BYTE, SHORT, INT, LONG, LONGLONG# @@ -175,6 +163,12 @@ NPY_NO_EXPORT void NPY_NO_EXPORT void @S@@TYPE@_positive(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); +/**begin repeat2 + * #kind = floor, ceil, trunc# + */ +#define @S@@TYPE@_@kind@ @S@@TYPE@_positive +/**end repeat2**/ + /**begin repeat2 * Arithmetic * #kind = add, subtract, multiply, bitwise_and, bitwise_or, bitwise_xor, @@ -224,9 +218,7 @@ LONGLONG_qQ_bool_@kind@(char **args, npy_intp const *dimensions, npy_intp const /**end repeat**/ -#ifndef NPY_DISABLE_OPTIMIZATION - #include "loops_unary.dispatch.h" -#endif +#include "loops_unary.dispatch.h" /**begin repeat * #TYPE = UBYTE, USHORT, UINT, ULONG, ULONGLONG, * BYTE, SHORT, INT, LONG, LONGLONG# @@ -245,9 +237,7 @@ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void @TYPE@_@kind@, ** FLOAT LOOPS ** ***************************************************************************** */ -#ifndef NPY_DISABLE_OPTIMIZATION - #include "loops_unary_fp.dispatch.h" -#endif +#include "loops_unary_fp.dispatch.h" /**begin repeat * #TYPE = FLOAT, DOUBLE# */ @@ -259,9 +249,7 @@ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void @TYPE@_@kind@, /**end repeat1**/ /**end repeat**/ -#ifndef NPY_DISABLE_OPTIMIZATION - #include "loops_unary_fp_le.dispatch.h" -#endif +#include "loops_unary_fp_le.dispatch.h" /**begin repeat * #TYPE = FLOAT, DOUBLE# */ @@ -273,9 +261,7 @@ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void @TYPE@_@kind@, /**end repeat1**/ /**end repeat**/ -#ifndef NPY_DISABLE_OPTIMIZATION - #include "loops_unary.dispatch.h" -#endif +#include "loops_unary.dispatch.h" /**begin repeat * #TYPE = FLOAT, DOUBLE, LONGDOUBLE# */ @@ -287,9 +273,7 @@ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void @TYPE@_@kind@, /**end repeat1**/ /**end repeat**/ -#ifndef NPY_DISABLE_OPTIMIZATION - #include "loops_arithm_fp.dispatch.h" -#endif +#include "loops_arithm_fp.dispatch.h" /**begin repeat * #TYPE = FLOAT, DOUBLE# */ @@ -306,9 +290,7 @@ NPY_NO_EXPORT int /**end repeat1**/ /**end repeat**/ -#ifndef NPY_DISABLE_OPTIMIZATION - #include "loops_hyperbolic.dispatch.h" -#endif +#include "loops_hyperbolic.dispatch.h" /**begin repeat * #TYPE = FLOAT, DOUBLE# */ @@ -321,10 +303,7 @@ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void @TYPE@_@func@, /**end repeat**/ // SVML -#ifndef NPY_DISABLE_OPTIMIZATION - #include "loops_umath_fp.dispatch.h" -#endif - +#include "loops_umath_fp.dispatch.h" /**begin repeat * #TYPE = FLOAT, DOUBLE# */ @@ -338,6 +317,9 @@ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void @TYPE@_@func@, /**end repeat1**/ /**end repeat**/ +#ifndef NPY_DISABLE_OPTIMIZATION + #include "loops_half.dispatch.h" +#endif /**begin repeat * #func = sin, cos, tan, exp, exp2, log, log2, log10, expm1, log1p, cbrt, arcsin, arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh, arctanh# */ @@ -360,10 +342,7 @@ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void @TYPE@_@func@, /**end repeat1**/ /**end repeat**/ -#ifndef NPY_DISABLE_OPTIMIZATION - #include "loops_trigonometric.dispatch.h" -#endif - +#include "loops_trigonometric.dispatch.h" /**begin repeat * #TYPE = FLOAT, DOUBLE# */ @@ -376,9 +355,7 @@ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void @TYPE@_@func@, ( /**end repeat1**/ /**end repeat**/ -#ifndef NPY_DISABLE_OPTIMIZATION - #include "loops_exponent_log.dispatch.h" -#endif +#include "loops_exponent_log.dispatch.h" /**begin repeat * #TYPE = FLOAT, DOUBLE# */ @@ -391,9 +368,7 @@ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void @TYPE@_@kind@, ( /**end repeat1**/ /**end repeat**/ -#ifndef NPY_DISABLE_OPTIMIZATION - #include "loops_comparison.dispatch.h" -#endif +#include "loops_comparison.dispatch.h" /**begin repeat * #TYPE = FLOAT, DOUBLE# */ @@ -540,9 +515,7 @@ NPY_NO_EXPORT void /**end repeat1**/ /**end repeat**/ -#ifndef NPY_DISABLE_OPTIMIZATION - #include "loops_autovec.dispatch.h" -#endif +#include "loops_autovec.dispatch.h" /**begin repeat * #TYPE = HALF# */ @@ -558,9 +531,7 @@ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void @TYPE@_@kind@, ** COMPLEX LOOPS ** ***************************************************************************** */ -#ifndef NPY_DISABLE_OPTIMIZATION - #include "loops_arithm_fp.dispatch.h" -#endif +#include "loops_arithm_fp.dispatch.h" /**begin repeat * #TYPE = CFLOAT, CDOUBLE# */ @@ -572,9 +543,7 @@ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void @TYPE@_@kind@, /**end repeat1**/ /**end repeat**/ -#ifndef NPY_DISABLE_OPTIMIZATION - #include "loops_unary_complex.dispatch.h" -#endif +#include "loops_unary_complex.dispatch.h" /**begin repeat * #TYPE = CFLOAT, CDOUBLE# */ @@ -795,9 +764,7 @@ TIMEDELTA_mm_qm_divmod(char **args, npy_intp const *dimensions, npy_intp const * /* #define TIMEDELTA_mm_d_floor_divide TIMEDELTA_mm_d_divide */ -#ifndef NPY_DISABLE_OPTIMIZATION - #include "loops_autovec.dispatch.h" -#endif +#include "loops_autovec.dispatch.h" /**begin repeat * #TYPE = TIMEDELTA, DATETIME# */ @@ -839,9 +806,7 @@ PyUFunc_OOO_O(char **args, npy_intp const *dimensions, npy_intp const *steps, vo ***************************************************************************** */ -#ifndef NPY_DISABLE_OPTIMIZATION - #include "loops_minmax.dispatch.h" -#endif +#include "loops_minmax.dispatch.h" //---------- Integers ---------- @@ -875,5 +840,7 @@ PyUFunc_OOO_O(char **args, npy_intp const *dimensions, npy_intp const *steps, vo ** END LOOPS ** ***************************************************************************** */ - +#ifdef __cplusplus +} +#endif #endif diff --git a/numpy/_core/src/umath/loops_arithm_fp.dispatch.c.src b/numpy/_core/src/umath/loops_arithm_fp.dispatch.c.src index a5453501836e..94bc24811e1d 100644 --- a/numpy/_core/src/umath/loops_arithm_fp.dispatch.c.src +++ b/numpy/_core/src/umath/loops_arithm_fp.dispatch.c.src @@ -1,10 +1,3 @@ -/*@targets - ** $maxopt baseline - ** sse2 (avx2 fma3) - ** neon asimd - ** vsx2 vsx3 - ** vx vxe - **/ #define _UMATHMODULE #define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION @@ -22,7 +15,7 @@ * current one kinda slow and it can be optimized by * at least avoiding the division and keep sqrt. * - Vectorize reductions - * - Add support for ASIMD/VCMLA through universal intrinics. + * - Add support for ASIMD/VCMLA through universal intrinsics. */ //############################################################################### @@ -346,14 +339,17 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) && __apple_build_version__ < 14030000 goto loop_scalar; #endif // end affected Apple clang. + if (is_mem_overlap(b_src0, b_ssrc0, b_dst, b_sdst, len) || is_mem_overlap(b_src1, b_ssrc1, b_dst, b_sdst, len) || - b_sdst % sizeof(@ftype@) != 0 || b_sdst == 0 || - b_ssrc0 % sizeof(@ftype@) != 0 || - b_ssrc1 % sizeof(@ftype@) != 0 + !npyv_loadable_stride_@sfx@(b_ssrc0) || + !npyv_loadable_stride_@sfx@(b_ssrc1) || + !npyv_storable_stride_@sfx@(b_sdst) || + b_sdst == 0 ) { goto loop_scalar; } + const @ftype@ *src0 = (@ftype@*)b_src0; const @ftype@ *src1 = (@ftype@*)b_src1; @ftype@ *dst = (@ftype@*)b_dst; @@ -366,10 +362,6 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) const int wstep = vstep * 2; const int hstep = vstep / 2; - const int loadable0 = npyv_loadable_stride_s64(ssrc0); - const int loadable1 = npyv_loadable_stride_s64(ssrc1); - const int storable = npyv_storable_stride_s64(sdst); - // lots**lots of specializations, to squeeze out max performance // contig if (ssrc0 == 2 && ssrc0 == ssrc1 && ssrc0 == sdst) { @@ -414,7 +406,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) } } // non-contig - else if (loadable1 && storable) { + else { for (; len >= vstep; len -= vstep, src1 += ssrc1*vstep, dst += sdst*vstep) { npyv_@sfx@ b0 = npyv_loadn2_@sfx@(src1, ssrc1); npyv_@sfx@ b1 = npyv_loadn2_@sfx@(src1 + ssrc1*hstep, ssrc1); @@ -433,9 +425,6 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npyv_storen2_till_@sfx@(dst, sdst, len, r); } } - else { - goto loop_scalar; - } } // scalar 1 else if (ssrc1 == 0) { @@ -460,7 +449,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) } } // non-contig - else if (loadable0 && storable) { + else { for (; len >= vstep; len -= vstep, src0 += ssrc0*vstep, dst += sdst*vstep) { npyv_@sfx@ a0 = npyv_loadn2_@sfx@(src0, ssrc0); npyv_@sfx@ a1 = npyv_loadn2_@sfx@(src0 + ssrc0*hstep, ssrc0); @@ -479,13 +468,10 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npyv_storen2_till_@sfx@(dst, sdst, len, r); } } - else { - goto loop_scalar; - } } #if @is_mul@ // non-contig - else if (loadable0 && loadable1 && storable) { + else { for (; len >= vstep; len -= vstep, src0 += ssrc0*vstep, src1 += ssrc1*vstep, dst += sdst*vstep ) { @@ -512,12 +498,16 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npyv_storen2_till_@sfx@(dst, sdst, len, r); } } - #endif + #else /* @is_mul@ */ else { + // Only multiply is vectorized for the generic non-contig case. goto loop_scalar; } + #endif /* @is_mul@ */ + npyv_cleanup(); return; + loop_scalar: #endif for (; len > 0; --len, b_src0 += b_ssrc0, b_src1 += b_ssrc1, b_dst += b_sdst) { @@ -580,8 +570,8 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npy_intp b_ssrc = steps[0], b_sdst = steps[1]; #if @VECTOR@ if (is_mem_overlap(b_src, b_ssrc, b_dst, b_sdst, len) || - b_sdst % sizeof(@ftype@) != 0 || - b_ssrc % sizeof(@ftype@) != 0 + !npyv_loadable_stride_@sfx@(b_ssrc) || + !npyv_storable_stride_@sfx@(b_sdst) ) { goto loop_scalar; } @@ -609,7 +599,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npyv_store2_till_@sfx@(dst, len, r); } } - else if (ssrc == 2 && npyv_storable_stride_s64(sdst)) { + else if (ssrc == 2) { for (; len >= vstep; len -= vstep, src += wstep, dst += sdst*vstep) { npyv_@sfx@ a0 = npyv_load_@sfx@(src); npyv_@sfx@ a1 = npyv_load_@sfx@(src + vstep); @@ -624,7 +614,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npyv_storen2_till_@sfx@(dst, sdst, len, r); } } - else if (sdst == 2 && npyv_loadable_stride_s64(ssrc)) { + else if (sdst == 2) { for (; len >= vstep; len -= vstep, src += ssrc*vstep, dst += wstep) { npyv_@sfx@ a0 = npyv_loadn2_@sfx@(src, ssrc); npyv_@sfx@ a1 = npyv_loadn2_@sfx@(src + ssrc*hstep, ssrc); diff --git a/numpy/_core/src/umath/loops_arithmetic.dispatch.c.src b/numpy/_core/src/umath/loops_arithmetic.dispatch.c.src index 16cb6ecb21ac..c9efe5579e71 100644 --- a/numpy/_core/src/umath/loops_arithmetic.dispatch.c.src +++ b/numpy/_core/src/umath/loops_arithmetic.dispatch.c.src @@ -1,10 +1,3 @@ -/*@targets - ** $maxopt baseline - ** sse2 sse41 avx2 avx512f avx512_skx - ** vsx2 vsx4 - ** neon - ** vx - **/ #define _UMATHMODULE #define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION @@ -36,7 +29,7 @@ * q = TRUNC((n - (-dsign ) + (-nsign))/d) - (-qsign); ********************************************************************************/ -#if (defined(NPY_HAVE_VSX) && !defined(NPY_HAVE_VSX4)) || defined(NPY_HAVE_NEON) +#if (defined(NPY_HAVE_VSX) && !defined(NPY_HAVE_VSX4)) || defined(NPY_HAVE_NEON) || defined(NPY_HAVE_LSX) // Due to integer 128-bit multiplication emulation, SIMD 64-bit division // may not perform well on both neon and up to VSX3 compared to scalar // division. @@ -452,7 +445,7 @@ NPY_NO_EXPORT int NPY_CPU_DISPATCH_CURFX(@TYPE@_divide_indexed) * Therefore it's better to disable NPYV in this special case to avoid any unnecessary shuffles. * Power10(VSX4) is an exception here since it has native support for integer vector division. */ -#if NPY_BITSOF_@STYPE@ == 64 && !defined(NPY_HAVE_VSX4) && (defined(NPY_HAVE_VSX) || defined(NPY_HAVE_NEON)) +#if NPY_BITSOF_@STYPE@ == 64 && !defined(NPY_HAVE_VSX4) && (defined(NPY_HAVE_VSX) || defined(NPY_HAVE_NEON) || defined(NPY_HAVE_LSX)) #undef TO_SIMD_SFX #endif NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_divide) diff --git a/numpy/_core/src/umath/loops_autovec.dispatch.c.src b/numpy/_core/src/umath/loops_autovec.dispatch.c.src index 6ccafe577c72..d14b0fba93a3 100644 --- a/numpy/_core/src/umath/loops_autovec.dispatch.c.src +++ b/numpy/_core/src/umath/loops_autovec.dispatch.c.src @@ -1,10 +1,3 @@ -/*@targets - ** $maxopt $autovec baseline - ** sse2 avx2 - ** neon - ** vsx2 - ** vx - **/ #define _UMATHMODULE #define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION @@ -31,6 +24,8 @@ */ #define INT_left_shift_needs_clear_floatstatus #define UINT_left_shift_needs_clear_floatstatus +#define LONG_left_shift_needs_clear_floatstatus +#define ULONG_left_shift_needs_clear_floatstatus /**begin repeat * #TYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, @@ -58,7 +53,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_square) NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_reciprocal) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) { - UNARY_LOOP_FAST(@type@, @type@, *out = 1.0 / in); + UNARY_LOOP_FAST(@type@, @type@, *out = (@type@)(1.0 / in)); } /**begin repeat1 @@ -264,6 +259,17 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(BOOL_@kind@) } /**end repeat**/ +/**begin repeat + * Identity + * #kind = floor, ceil, trunc# + */ +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(BOOL_@kind@) +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) +{ + UNARY_LOOP_FAST(npy_bool, npy_bool, *out = in); +} +/**end repeat**/ + /* ***************************************************************************** ** HALF-FLOAT LOOPS ** diff --git a/numpy/_core/src/umath/loops_comparison.dispatch.c.src b/numpy/_core/src/umath/loops_comparison.dispatch.c.src index 7510808714a3..6450bed962b1 100644 --- a/numpy/_core/src/umath/loops_comparison.dispatch.c.src +++ b/numpy/_core/src/umath/loops_comparison.dispatch.c.src @@ -1,10 +1,3 @@ -/*@targets - ** $maxopt baseline - ** sse2 sse42 avx2 avx512f avx512_skx - ** vsx2 vsx3 - ** neon - ** vx vxe - **/ #define _UMATHMODULE #define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION diff --git a/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src b/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src index 159e275bd45e..316b612f1a02 100644 --- a/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src +++ b/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src @@ -1,8 +1,3 @@ -/*@targets - ** $maxopt baseline - ** (avx2 fma3) avx512f avx512_skx - **/ - #define _UMATHMODULE #define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION @@ -1074,10 +1069,14 @@ AVX512F_log_DOUBLE(npy_double * op, _mm512_mask_storeu_pd(op, load_mask, res); } - /* call glibc's log func when x around 1.0f */ + /* call glibc's log func when x around 1.0f. */ if (glibc_mask != 0) { double NPY_DECL_ALIGNED(64) ip_fback[8]; - _mm512_store_pd(ip_fback, x_in); + /* Using a mask_store_pd instead of store_pd to prevent a fatal + * compiler optimization bug. See + * https://github.com/numpy/numpy/issues/27745#issuecomment-2498684564 + * for details.*/ + _mm512_mask_store_pd(ip_fback, avx512_get_full_load_mask_pd(), x_in); for (int ii = 0; ii < 8; ++ii, glibc_mask >>= 1) { if (glibc_mask & 0x01) { @@ -1315,16 +1314,16 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(DOUBLE_@func@) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) { #if NPY_SIMD && defined(NPY_HAVE_AVX512_SKX) && defined(NPY_CAN_LINK_SVML) - const npy_double *src = (npy_double*)args[0]; - npy_double *dst = (npy_double*)args[1]; - const int lsize = sizeof(src[0]); - const npy_intp ssrc = steps[0] / lsize; - const npy_intp sdst = steps[1] / lsize; const npy_intp len = dimensions[0]; - assert(steps[0] % lsize == 0 && steps[1] % lsize == 0); - if (!is_mem_overlap(src, steps[0], dst, steps[1], len) && - npyv_loadable_stride_f64(ssrc) && - npyv_storable_stride_f64(sdst)) { + + if (!is_mem_overlap(args[0], steps[0], args[1], steps[1], len) && + npyv_loadable_stride_f64(steps[0]) && + npyv_storable_stride_f64(steps[1])) { + const npy_double *src = (npy_double*)args[0]; + npy_double *dst = (npy_double*)args[1]; + const npy_intp ssrc = steps[0] / sizeof(src[0]); + const npy_intp sdst = steps[1] / sizeof(src[0]); + simd_@func@_f64(src, ssrc, dst, sdst, len); return; } @@ -1350,12 +1349,17 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(DOUBLE_@func@) * #TYPE = FLOAT, DOUBLE# * #c = f, # * #C = F, # + * #suffix = f32, f64# */ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_frexp) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { #ifdef SIMD_AVX512_SKX - if (IS_UNARY_TWO_OUT_SMALL_STEPS_AND_NOMEMOVERLAP) { + if ((npyv_loadable_stride_@suffix@(steps[0])) && + (npyv_storable_stride_@suffix@(steps[1])) && + (npyv_storable_stride_@suffix@(steps[2])) && + (!is_mem_overlap(args[0], steps[0], args[2], steps[2], dimensions[0])) && + (!is_mem_overlap(args[0], steps[0], args[1], steps[1], dimensions[0]))) { AVX512_SKX_frexp_@TYPE@(args, dimensions, steps); return; } @@ -1370,7 +1374,11 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_ldexp) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { #ifdef SIMD_AVX512_SKX - if (IS_BINARY_SMALL_STEPS_AND_NOMEMOVERLAP) { + if ((npyv_loadable_stride_@suffix@(steps[0])) && + (npyv_storable_stride_@suffix@(steps[1])) && + (npyv_storable_stride_@suffix@(steps[2])) && + (!is_mem_overlap(args[0], steps[0], args[2], steps[2], dimensions[0])) && + (!is_mem_overlap(args[1], steps[1], args[2], steps[2], dimensions[0]))) { AVX512_SKX_ldexp_@TYPE@(args, dimensions, steps); return; } diff --git a/numpy/_core/src/umath/loops_half.dispatch.c.src b/numpy/_core/src/umath/loops_half.dispatch.c.src new file mode 100644 index 000000000000..a81a64ed0294 --- /dev/null +++ b/numpy/_core/src/umath/loops_half.dispatch.c.src @@ -0,0 +1,97 @@ +#include "numpy/npy_math.h" +#include "simd/simd.h" +#include "loops_utils.h" +#include "loops.h" +#include "npy_svml.h" +#include "fast_loop_macros.h" + + +#if NPY_SIMD && defined(NPY_HAVE_AVX512_SKX) && defined(NPY_CAN_LINK_SVML) + #define NPY__SVML_IS_ENABLED 1 +#else + #define NPY__SVML_IS_ENABLED 0 +#endif + +#if NPY__SVML_IS_ENABLED && !defined(NPY_HAVE_AVX512_SPR) + +typedef __m256i npyvh_f16; +#define npyv_cvt_f16_f32 _mm512_cvtph_ps +#define npyv_cvt_f32_f16 _mm512_cvtps_ph +#define npyvh_load_f16(PTR) _mm256_loadu_si256((const __m256i*)(PTR)) +#define npyvh_store_f16(PTR, data) _mm256_storeu_si256((__m256i*)PTR, data) +NPY_FINLINE npyvh_f16 npyvh_load_till_f16(const npy_half *ptr, npy_uintp nlane, npy_half fill) +{ + assert(nlane > 0); + const __m256i vfill = _mm256_set1_epi16(fill); + const __mmask16 mask = (0x0001 << nlane) - 0x0001; + return _mm256_mask_loadu_epi16(vfill, mask, ptr); +} +NPY_FINLINE void npyvh_store_till_f16(npy_half *ptr, npy_uintp nlane, npyvh_f16 data) +{ + assert(nlane > 0); + const __mmask16 mask = (0x0001 << nlane) - 0x0001; + _mm256_mask_storeu_epi16(ptr, mask, data); +} + +/**begin repeat + * #func = sin, cos, tan, exp, exp2, expm1, log, log2, log10, log1p, cbrt, asin, acos, atan, sinh, cosh, tanh, asinh, acosh, atanh# + * #default_val = 0, 0, 0, 0, 0, 0x3c00, 0x3c00, 0x3c00, 0x3c00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x3c00, 0# + */ +static void +avx512_@func@_f16(const npy_half *src, npy_half *dst, npy_intp len) +{ + const int num_lanes = npyv_nlanes_f32; + npyvh_f16 x, out; + npyv_f32 x_ps, out_ps; + for (; len > 0; len -= num_lanes, src += num_lanes, dst += num_lanes) { + if (len >= num_lanes) { + x = npyvh_load_f16(src); + x_ps = npyv_cvt_f16_f32(x); + out_ps = __svml_@func@f16(x_ps); + out = npyv_cvt_f32_f16(out_ps, 0); + npyvh_store_f16(dst, out); + } + else { + x = npyvh_load_till_f16(src, len, @default_val@); + x_ps = npyv_cvt_f16_f32(x); + out_ps = __svml_@func@f16(x_ps); + out = npyv_cvt_f32_f16(out_ps, 0); + npyvh_store_till_f16(dst, len, out); + } + } + npyv_cleanup(); +} +/**end repeat**/ +#endif // NPY__SVML_IS_ENABLED + +/**begin repeat + * #func = sin, cos, tan, exp, exp2, expm1, log, log2, log10, log1p, cbrt, arcsin, arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh, arctanh# + * #intrin = sin, cos, tan, exp, exp2, expm1, log, log2, log10, log1p, cbrt, asin, acos, atan, sinh, cosh, tanh, asinh, acosh, atanh# + */ +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(HALF_@func@) +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) +{ +#if NPY__SVML_IS_ENABLED + const npy_half *src = (npy_half*)args[0]; + npy_half *dst = (npy_half*)args[1]; + + const npy_intp len = dimensions[0]; + + if (!is_mem_overlap(src, steps[0], dst, steps[1], len) && + (steps[0] == sizeof(npy_half)) && + (steps[1] == sizeof(npy_half))) { + #ifdef NPY_HAVE_AVX512_SPR + __svml_@intrin@s32(src, dst, len); + #else + avx512_@intrin@_f16(src, dst, len); + #endif + return; + } +#endif // NPY__SVML_IS_ENABLED + UNARY_LOOP { + const npy_float in1 = npy_half_to_float(*(npy_half *)ip1); + *((npy_half *)op1) = npy_float_to_half(npy_@intrin@f(in1)); + } +} +/**end repeat**/ + diff --git a/numpy/_core/src/umath/loops_hyperbolic.dispatch.c.src b/numpy/_core/src/umath/loops_hyperbolic.dispatch.cpp.src old mode 100644 new mode 100755 similarity index 66% rename from numpy/_core/src/umath/loops_hyperbolic.dispatch.c.src rename to numpy/_core/src/umath/loops_hyperbolic.dispatch.cpp.src index 8e09de941168..93d288fbdb2e --- a/numpy/_core/src/umath/loops_hyperbolic.dispatch.c.src +++ b/numpy/_core/src/umath/loops_hyperbolic.dispatch.cpp.src @@ -1,16 +1,15 @@ -/*@targets - ** $maxopt baseline - ** (avx2 fma3) AVX512_SKX - ** vsx2 vsx4 - ** neon_vfpv4 - ** vx vxe - **/ #include "numpy/npy_math.h" #include "simd/simd.h" #include "loops_utils.h" #include "loops.h" -#if NPY_SIMD_FMA3 // native support +// Provides the various *_LOOP macros +#include "fast_loop_macros.h" +#include +namespace hn = hwy::HWY_NAMESPACE; + +#if HWY_NATIVE_FMA // native support + /* * NOTE: The following implementation of tanh(f32, f64) have been converted from * Intel SVML to universal intrinsics, and the original code can be found in: @@ -73,21 +72,103 @@ * achieve wider than target precision. * */ + +const hn::ScalableTag f32; +const hn::ScalableTag s32; +const hn::ScalableTag u32; +using vec_f32 = hn::Vec; +using vec_s32 = hn::Vec; +using vec_u32 = hn::Vec; + +const hn::ScalableTag f64; +const hn::ScalableTag s64; +const hn::ScalableTag u64; +using vec_f64 = hn::Vec; +using vec_s64 = hn::Vec; +using vec_u64 = hn::Vec; + +template +HWY_ATTR NPY_FINLINE vtype +load_vector(type_t* src, npy_intp ssrc, npy_intp len){ + auto D = hn::DFromV(); + using DI = hn::RebindToSigned; + DI di; + + auto indices = hn::Iota(di, 0); + auto stride = hn::Set(di, ssrc); + indices = hn::Mul(indices, stride); + + const int nlanes = hn::Lanes(D); + if (len < nlanes){ + if (ssrc == 1) { + return hn::LoadN(D, src, len); + } else { + return hn::GatherIndexN(D, src, indices, len); + } + }else{ + if (ssrc == 1) { + return hn::LoadU(D, src); + } else { + return hn::GatherIndex(D, src, indices); + } + } +} + +template +HWY_ATTR NPY_FINLINE void +store_vector(vtype vec, type_t* dst, npy_intp sdst, npy_intp len){ + auto D = hn::DFromV(); + using DI = hn::RebindToSigned; + DI di; + + auto indices = hn::Iota(di, 0); + auto stride = hn::Set(di, sdst); + indices = hn::Mul(indices, stride); + + const int nlanes = hn::Lanes(D); + if (len < nlanes){ + if (sdst == 1) { + hn::StoreN(vec, D, dst, len); + } else { + hn::ScatterIndexN(vec, D, dst, indices, len); + } + }else{ + if (sdst == 1) { + hn::StoreU(vec, D, dst); + } else { + hn::ScatterIndex(vec, D, dst, indices); + } + } +} + #if NPY_SIMD_F64 - // For architectures without efficient gather / scatter instructions, it is - // better to use a transposed LUT where we can load all coefficients for an - // index linearly. In order to keep the same vertical calculation, we - // transpose the coef. into lanes. 2 lane transpose is all that's - // implemented so we require `npyv_nlanes_f64` == 2. - #if npyv_nlanes_f64 == 2 - #define TANH_TRANSPOSED_LUT - #endif // npyv_nlanes_f64 == 2 +[[maybe_unused]] HWY_ATTR NPY_FINLINE vec_f64 lut_16_f64(const double * lut, vec_u64 idx){ + if constexpr(hn::MaxLanes(f64) == 8){ + const vec_f64 lut0 = hn::Load(f64, lut); + const vec_f64 lut1 = hn::Load(f64, lut + 8); + return hn::TwoTablesLookupLanes(f64, lut0, lut1, hn::IndicesFromVec(f64, idx)); + }else if constexpr (hn::MaxLanes(f64) == 4){ + const vec_f64 lut0 = hn::Load(f64, lut); + const vec_f64 lut1 = hn::Load(f64, lut + 4); + const vec_f64 lut2 = hn::Load(f64, lut + 8); + const vec_f64 lut3 = hn::Load(f64, lut + 12); + + const auto high_mask = hn::Ne(hn::ShiftRight<3>(idx), hn::Zero(u64)); + const auto load_mask = hn::And(idx, hn::Set(u64, 0b111)); + + const vec_f64 lut_low = hn::TwoTablesLookupLanes(f64, lut0, lut1, hn::IndicesFromVec(f64, load_mask)); + const vec_f64 lut_high = hn::TwoTablesLookupLanes(f64, lut2, lut3, hn::IndicesFromVec(f64, load_mask)); + + return hn::IfThenElse(hn::RebindMask(f64, high_mask), lut_high, lut_low); + }else{ + return hn::GatherIndex(f64, lut, hn::BitCast(s64, idx)); + } +} -static void +HWY_ATTR static void simd_tanh_f64(const double *src, npy_intp ssrc, double *dst, npy_intp sdst, npy_intp len) { -#if defined(TANH_TRANSPOSED_LUT) static const npy_uint64 NPY_DECL_ALIGNED(NPY_SIMD_WIDTH) lut18x16[] = { // 0 0x0ull, 0x0ull, 0x3ff0000000000000ull, 0xbbf0b3ea3fdfaa19ull, // b, c0, c1, c2 @@ -186,7 +267,7 @@ simd_tanh_f64(const double *src, npy_intp ssrc, double *dst, npy_intp sdst, npy_ 0x0ull, 0x0ull, 0x0ull, 0x0ull, 0x0ull, 0x0ull, }; -#else + static const npy_uint64 NPY_DECL_ALIGNED(NPY_SIMD_WIDTH) lut16x18[] = { // 0 0x0ull, 0x3fcc000000000000ull, 0x3fd4000000000000ull, 0x3fdc000000000000ull, @@ -279,132 +360,130 @@ simd_tanh_f64(const double *src, npy_intp ssrc, double *dst, npy_intp sdst, npy_ 0xbe567e924bf5ff6eull, 0x3de3f7f7de6b0eb6ull, 0x3d69ed18bae3ebbcull, 0xbcf7534c4f3dfa71ull, 0xbc730b73f1eaff20ull, 0xbbba2cff8135d462ull, 0xbab5a71b5f7d9035ull, 0x0ull }; -#endif // defined(TANH_TRANSPOSED_LUT) - const int nlanes = npyv_nlanes_f64; - const npyv_f64 qnan = npyv_setall_f64(NPY_NAN); + const int nlanes = hn::Lanes(f64); + const vec_f64 qnan = hn::Set(f64, NPY_NAN); for (; len > 0; len -= nlanes, src += ssrc*nlanes, dst += sdst*nlanes) { - npyv_f64 x; - if (ssrc == 1) { - x = npyv_load_tillz_f64(src, len); - } else { - x = npyv_loadn_tillz_f64(src, ssrc, len); - } - npyv_s64 ndnan = npyv_and_s64(npyv_reinterpret_s64_f64(x), npyv_setall_s64(0x7ff8000000000000ll)); + vec_f64 x = load_vector(src, ssrc, len); + + vec_s64 ndnan = hn::And(hn::BitCast(s64, x), hn::Set(s64, 0x7ff8000000000000ll)); // |x| > HUGE_THRESHOLD, INF and NaNs. - npyv_b64 special_m = npyv_cmple_s64(ndnan, npyv_setall_s64(0x7fe0000000000000ll)); - npyv_b64 nnan_m = npyv_notnan_f64(x); - npyv_s64 idxs = npyv_sub_s64(ndnan, npyv_setall_s64(0x3fc0000000000000ll)); + auto special_m = hn::Le(ndnan, hn::Set(s64, 0x7fe0000000000000ll)); + auto nan_m = hn::IsNaN(x); + vec_s64 idxs = hn::Sub(ndnan, hn::Set(s64, 0x3fc0000000000000ll)); // no native 64-bit for max/min and its fine to use 32-bit max/min // since we're not crossing 32-bit edge - npyv_s32 idxl = npyv_max_s32(npyv_reinterpret_s32_s64(idxs), npyv_zero_s32()); - idxl = npyv_min_s32(idxl, npyv_setall_s32(0x780000)); - npyv_u64 idx = npyv_shri_u64(npyv_reinterpret_u64_s32(idxl), 51); - -#if defined(TANH_TRANSPOSED_LUT) - npyv_f64 e0e1[npyv_nlanes_f64]; - npyv_lanetype_u64 index[npyv_nlanes_f64]; - npyv_store_u64(index, idx); - - /**begin repeat - * #off= 0, 2, 4, 6, 8, 10, 12, 14, 16# - * #e0 = b, c1, c3, c5, c7, c9, c11, c13, c15# - * #e1 = c0,c2, c4, c6, c8, c10,c12, c14, c16# - */ - /**begin repeat1 - * #lane = 0, 1# - */ - e0e1[@lane@] = npyv_reinterpret_f64_u64(npyv_load_u64(lut18x16 + index[@lane@] * 18 + @off@)); - /**end repeat1**/ - npyv_f64 @e0@ = npyv_combinel_f64(e0e1[0], e0e1[1]); - npyv_f64 @e1@ = npyv_combineh_f64(e0e1[0], e0e1[1]); - /**end repeat**/ -#else - npyv_f64 b = npyv_lut16_f64((const double*)lut16x18 + 16*0, idx); - npyv_f64 c0 = npyv_lut16_f64((const double*)lut16x18 + 1*16, idx); - npyv_f64 c1 = npyv_lut16_f64((const double*)lut16x18 + 2*16, idx); - npyv_f64 c2 = npyv_lut16_f64((const double*)lut16x18 + 3*16, idx); - npyv_f64 c3 = npyv_lut16_f64((const double*)lut16x18 + 4*16, idx); - npyv_f64 c4 = npyv_lut16_f64((const double*)lut16x18 + 5*16, idx); - npyv_f64 c5 = npyv_lut16_f64((const double*)lut16x18 + 6*16, idx); - npyv_f64 c6 = npyv_lut16_f64((const double*)lut16x18 + 7*16, idx); - npyv_f64 c7 = npyv_lut16_f64((const double*)lut16x18 + 8*16, idx); - npyv_f64 c8 = npyv_lut16_f64((const double*)lut16x18 + 9*16, idx); - npyv_f64 c9 = npyv_lut16_f64((const double*)lut16x18 + 10*16, idx); - npyv_f64 c10 = npyv_lut16_f64((const double*)lut16x18 + 11*16, idx); - npyv_f64 c11 = npyv_lut16_f64((const double*)lut16x18 + 12*16, idx); - npyv_f64 c12 = npyv_lut16_f64((const double*)lut16x18 + 13*16, idx); - npyv_f64 c13 = npyv_lut16_f64((const double*)lut16x18 + 14*16, idx); - npyv_f64 c14 = npyv_lut16_f64((const double*)lut16x18 + 15*16, idx); - npyv_f64 c15 = npyv_lut16_f64((const double*)lut16x18 + 16*16, idx); - npyv_f64 c16 = npyv_lut16_f64((const double*)lut16x18 + 17*16, idx); -#endif // defined(TANH_TRANSPOSED_LUT) + vec_s32 idxl = hn::Max(hn::BitCast(s32, idxs), hn::Zero(s32)); + idxl = hn::Min(idxl, hn::Set(s32, 0x780000)); + vec_u64 idx = hn::ShiftRightSame(hn::BitCast(u64, idxl), 51); + + // For architectures without efficient gather / scatter instructions, it is + // better to use a transposed LUT where we can load all coefficients for an + // index linearly. In order to keep the same vertical calculation, we + // transpose the coef. into lanes. 2 lane transpose is all that's + // implemented so we require `npyv_nlanes_f64` == 2. + vec_f64 b, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16; + if constexpr(hn::MaxLanes(f64) == 2){ + vec_f64 e0e1_0, e0e1_1; + uint64_t index[hn::MaxLanes(f64)]; + hn::StoreU(idx, u64, index); + + /**begin repeat + * #off = 0, 2, 4, 6, 8, 10, 12, 14, 16# + * #e0 = b, c1, c3, c5, c7, c9, c11,c13,c15# + * #e1 = c0, c2, c4, c6, c8, c10,c12,c14,c16# + */ + e0e1_0 = hn::LoadU(f64, (const double*)lut18x16 + index[0] * 18 + @off@); + e0e1_1 = hn::LoadU(f64, (const double*)lut18x16 + index[1] * 18 + @off@); + @e0@ = hn::ConcatLowerLower(f64, e0e1_1, e0e1_0); + @e1@ = hn::ConcatUpperUpper(f64, e0e1_1, e0e1_0); + /**end repeat**/ + } else { + b = lut_16_f64((const double*)lut16x18 + 16*0, idx); + c0 = lut_16_f64((const double*)lut16x18 + 1*16, idx); + c1 = lut_16_f64((const double*)lut16x18 + 2*16, idx); + c2 = lut_16_f64((const double*)lut16x18 + 3*16, idx); + c3 = lut_16_f64((const double*)lut16x18 + 4*16, idx); + c4 = lut_16_f64((const double*)lut16x18 + 5*16, idx); + c5 = lut_16_f64((const double*)lut16x18 + 6*16, idx); + c6 = lut_16_f64((const double*)lut16x18 + 7*16, idx); + c7 = lut_16_f64((const double*)lut16x18 + 8*16, idx); + c8 = lut_16_f64((const double*)lut16x18 + 9*16, idx); + c9 = lut_16_f64((const double*)lut16x18 + 10*16, idx); + c10 = lut_16_f64((const double*)lut16x18 + 11*16, idx); + c11 = lut_16_f64((const double*)lut16x18 + 12*16, idx); + c12 = lut_16_f64((const double*)lut16x18 + 13*16, idx); + c13 = lut_16_f64((const double*)lut16x18 + 14*16, idx); + c14 = lut_16_f64((const double*)lut16x18 + 15*16, idx); + c15 = lut_16_f64((const double*)lut16x18 + 16*16, idx); + c16 = lut_16_f64((const double*)lut16x18 + 17*16, idx); + } // no need to zerofy nans or avoid FP exceptions by NO_EXC like SVML does // since we're clearing the FP status anyway. - npyv_f64 sign = npyv_and_f64(x, npyv_reinterpret_f64_s64(npyv_setall_s64(0x8000000000000000ull))); - npyv_f64 y = npyv_sub_f64(npyv_abs_f64(x), b); - npyv_f64 r = npyv_muladd_f64(c16, y, c15); - r = npyv_muladd_f64(r, y, c14); - r = npyv_muladd_f64(r, y, c13); - r = npyv_muladd_f64(r, y, c12); - r = npyv_muladd_f64(r, y, c11); - r = npyv_muladd_f64(r, y, c10); - r = npyv_muladd_f64(r, y, c9); - r = npyv_muladd_f64(r, y, c8); - r = npyv_muladd_f64(r, y, c7); - r = npyv_muladd_f64(r, y, c6); - r = npyv_muladd_f64(r, y, c5); - r = npyv_muladd_f64(r, y, c4); - r = npyv_muladd_f64(r, y, c3); - r = npyv_muladd_f64(r, y, c2); - r = npyv_muladd_f64(r, y, c1); - r = npyv_muladd_f64(r, y, c0); + vec_f64 sign = hn::And(x, hn::BitCast(f64, hn::Set(u64, 0x8000000000000000ull))); + vec_f64 y = hn::Sub(hn::Abs(x), b); + vec_f64 r = hn::MulAdd(c16, y, c15); + r = hn::MulAdd(r, y, c14); + r = hn::MulAdd(r, y, c13); + r = hn::MulAdd(r, y, c12); + r = hn::MulAdd(r, y, c11); + r = hn::MulAdd(r, y, c10); + r = hn::MulAdd(r, y, c9); + r = hn::MulAdd(r, y, c8); + r = hn::MulAdd(r, y, c7); + r = hn::MulAdd(r, y, c6); + r = hn::MulAdd(r, y, c5); + r = hn::MulAdd(r, y, c4); + r = hn::MulAdd(r, y, c3); + r = hn::MulAdd(r, y, c2); + r = hn::MulAdd(r, y, c1); + r = hn::MulAdd(r, y, c0); // 1.0 if |x| > HUGE_THRESHOLD || INF - r = npyv_select_f64(special_m, r, npyv_setall_f64(1.0)); - r = npyv_or_f64(r, sign); + r = hn::IfThenElse(hn::RebindMask(f64, special_m), r, hn::Set(f64, 1.0)); + r = hn::Or(r, sign); // qnan if nan - r = npyv_select_f64(nnan_m, r, qnan); - if (sdst == 1) { - npyv_store_till_f64(dst, len, r); - } else { - npyv_storen_till_f64(dst, sdst, len, r); - } + r = hn::IfThenElse(hn::RebindMask(f64, nan_m), qnan, r); + + store_vector(r, dst, sdst, len); } } -#undef TANH_TRANSPOSED_LUT - #endif // NPY_SIMD_F64 #if NPY_SIMD_F32 +HWY_ATTR NPY_FINLINE void zip_f32_lanes(vec_f32 a, vec_f32 b, vec_f32& lower, vec_f32& upper) { + lower = hn::InterleaveLower(f32, a, b); + upper = hn::InterleaveUpper(f32, a, b); +} + +[[maybe_unused]] HWY_ATTR NPY_FINLINE vec_f32 lut_32_f32(const float * lut, vec_u32 idx){ + if constexpr(hn::MaxLanes(f32) == 16){ + const vec_f32 lut0 = hn::Load(f32, lut); + const vec_f32 lut1 = hn::Load(f32, lut + 16); + return hn::TwoTablesLookupLanes(f32, lut0, lut1, hn::IndicesFromVec(f32, idx)); + }else if constexpr (hn::MaxLanes(f32) == 8){ + const vec_f32 lut0 = hn::Load(f32, lut); + const vec_f32 lut1 = hn::Load(f32, lut + 8); + const vec_f32 lut2 = hn::Load(f32, lut + 16); + const vec_f32 lut3 = hn::Load(f32, lut + 24); + + const auto high_mask = hn::Ne(hn::ShiftRight<4>(idx), hn::Zero(u32)); + const auto load_mask = hn::And(idx, hn::Set(u32, 0b1111)); + + const vec_f32 lut_low = hn::TwoTablesLookupLanes(f32, lut0, lut1, hn::IndicesFromVec(f32, load_mask)); + const vec_f32 lut_high = hn::TwoTablesLookupLanes(f32, lut2, lut3, hn::IndicesFromVec(f32, load_mask)); + + return hn::IfThenElse(hn::RebindMask(f32, high_mask), lut_high, lut_low); + }else{ + return hn::GatherIndex(f32, lut, hn::BitCast(s32, idx)); + } +} - // For architectures without efficient gather / scatter instructions, it is - // better to use a transposed LUT where we can load all coefficients for an - // index linearly. In order to keep the same vertical calculation, we - // transpose the coef. into lanes. A 4x4 transpose is all that's - // supported so we require `npyv_nlanes_f32` == 4. - #if npyv_nlanes_f32 == 4 - #define TANHF_TRANSPOSED_LUT - // Define missing universal intrinsics used below - #if !defined(npyv_get_lane_u32) - #if defined(NPY_HAVE_ASIMD) - #define UNDEF_npyv_get_lane_u32 - #define npyv_get_lane_u32 vgetq_lane_u32 - #elif defined(NPY_HAVE_SSE41) - #define UNDEF_npyv_get_lane_u32 - #define npyv_get_lane_u32 _mm_extract_epi32 - #else - #undef TANHF_TRANSPOSED_LUT - #endif - #endif // !defined(npyv_get_lane_u32) - #endif // npyv_nlanes_f32 == 4 - -static void +HWY_ATTR static void simd_tanh_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, npy_intp len) { -#if defined(TANHF_TRANSPOSED_LUT) static const npy_uint32 NPY_DECL_ALIGNED(NPY_SIMD_WIDTH) lut8x32[] = { // c6 c5 c4 c3 c2 c1 c0 b 0xbc0e2f66, 0x3e0910e9, 0xb76dd6b9, 0xbeaaaaa5, 0xb0343c7b, 0x3f800000, 0x0, 0x0, @@ -447,7 +526,7 @@ simd_tanh_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, npy_in 0xb15a1f04, 0x322487b0, 0xb2ab78ac, 0x332b3cb6, 0xb383012c, 0x338306c6, 0x3f7fffff, 0x41100000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3f800000, 0x0, }; -#else + static const npy_uint32 NPY_DECL_ALIGNED(NPY_SIMD_WIDTH) lut32x8[] = { // 0 0x0, 0x3d700000, 0x3d900000, 0x3db00000, 0x3dd00000, 0x3df00000, 0x3e100000, 0x3e300000, @@ -490,116 +569,114 @@ simd_tanh_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, npy_in 0x3c1d7bfb, 0x3c722cd1, 0x3c973f1c, 0x3c33a31b, 0x3b862ef4, 0x3a27b3d0, 0xba3b5907, 0xba0efc22, 0xb97f9f0f, 0xb8c8af50, 0xb7bdddfb, 0xb64f2950, 0xb4e085b1, 0xb3731dfa, 0xb15a1f04, 0x0 }; -#endif // defined(TANHF_TRANSPOSED_LUT) - const int nlanes = npyv_nlanes_f32; - const npyv_f32 qnan = npyv_setall_f32(NPY_NANF); + const int nlanes = hn::Lanes(f32);//npyv_nlanes_f32; + const vec_f32 qnan = hn::Set(f32, NPY_NAN); for (; len > 0; len -= nlanes, src += ssrc*nlanes, dst += sdst*nlanes) { - npyv_f32 x; - if (ssrc == 1) { - x = npyv_load_tillz_f32(src, len); + vec_f32 x = load_vector(src, ssrc, len); + + vec_s32 ndnan = hn::And(hn::BitCast(s32, x), hn::Set(s32, 0x7fe00000)); + // check |x| > HUGE_THRESHOLD, INF and NaNs. + auto special_m = hn::Le(ndnan, hn::Set(s32, 0x7f000000)); + auto nan_m = hn::IsNaN(x); + vec_s32 idxs = hn::Sub(ndnan, hn::Set(s32, 0x3d400000)); + idxs = hn::Max(idxs, hn::Zero(s32)); + idxs = hn::Min(idxs, hn::Set(s32, 0x3e00000)); + vec_u32 idx = hn::ShiftRightSame(hn::BitCast(u32, idxs), 21); + + // For architectures without efficient gather / scatter instructions, it is + // better to use a transposed LUT where we can load all coefficients for an + // index linearly. In order to keep the same vertical calculation, we + // transpose the coef. into lanes. A 4x4 transpose is all that's + // supported so we require `npyv_nlanes_f32` == 4. + vec_f32 b, c0, c1, c2, c3, c4, c5, c6; + if constexpr(hn::MaxLanes(f32) == 4 && HWY_TARGET >= HWY_SSE4){ + vec_f32 c6543_0, c6543_1, c6543_2, c6543_3; + vec_f32 c210b_0, c210b_1, c210b_2, c210b_3; + npyv_lanetype_u32 index[npyv_nlanes_f32]; + + /**begin repeat + * #lane = 0, 1, 2, 3# + */ + index[@lane@] = hn::ExtractLane(idx, @lane@); + c6543_@lane@ = hn::LoadU(f32, (const float*)lut8x32 + index[@lane@] * 8); + c210b_@lane@ = hn::LoadU(f32, (const float*)lut8x32 + index[@lane@] * 8 + 4); + /**end repeat**/ + + // lane0: {c6, c5, c4, c3}, {c2, c1, c0, b} + // lane1: {c6, c5, c4, c3}, {c2, c1, c0, b} + // lane2: {c6, c5, c4, c3}, {c2, c1, c0, b} + // lane3: {c6, c5, c4, c3}, {c2, c1, c0, b} + // + // transposed: + // c6: {lane0, lane1, lane2, lane3} + // c5: {lane0, lane1, lane2, lane3} + // c4: {lane0, lane1, lane2, lane3} + // c3: {lane0, lane1, lane2, lane3} + // c2: {lane0, lane1, lane2, lane3} + // c1: {lane0, lane1, lane2, lane3} + // c0: {lane0, lane1, lane2, lane3} + // b : {lane0, lane1, lane2, lane3} + + vec_f32 c6543_l01_low, c6543_l01_high; + vec_f32 c6543_l23_low, c6543_l23_high; + zip_f32_lanes(c6543_0, c6543_1, c6543_l01_low, c6543_l01_high); + zip_f32_lanes(c6543_2, c6543_3, c6543_l23_low, c6543_l23_high); + + c6 = hn::ConcatLowerLower(f32, c6543_l23_low, c6543_l01_low); + c5 = hn::ConcatUpperUpper(f32, c6543_l23_low, c6543_l01_low); + c4 = hn::ConcatLowerLower(f32, c6543_l23_high, c6543_l01_high); + c3 = hn::ConcatUpperUpper(f32, c6543_l23_high, c6543_l01_high); + + vec_f32 c210b_l01_low, c210b_l01_high; + vec_f32 c210b_l23_low, c210b_l23_high; + zip_f32_lanes(c210b_0, c210b_1, c210b_l01_low, c210b_l01_high); + zip_f32_lanes(c210b_2, c210b_3, c210b_l23_low, c210b_l23_high); + + c2 = hn::ConcatLowerLower(f32, c210b_l23_low, c210b_l01_low); + c1 = hn::ConcatUpperUpper(f32, c210b_l23_low, c210b_l01_low); + c0 = hn::ConcatLowerLower(f32, c210b_l23_high, c210b_l01_high); + b = hn::ConcatUpperUpper(f32, c210b_l23_high, c210b_l01_high); } else { - x = npyv_loadn_tillz_f32(src, ssrc, len); + b = lut_32_f32((const float*)lut32x8 + 32*0, idx); + c0 = lut_32_f32((const float*)lut32x8 + 32*1, idx); + c1 = lut_32_f32((const float*)lut32x8 + 32*2, idx); + c2 = lut_32_f32((const float*)lut32x8 + 32*3, idx); + c3 = lut_32_f32((const float*)lut32x8 + 32*4, idx); + c4 = lut_32_f32((const float*)lut32x8 + 32*5, idx); + c5 = lut_32_f32((const float*)lut32x8 + 32*6, idx); + c6 = lut_32_f32((const float*)lut32x8 + 32*7, idx); } - npyv_s32 ndnan = npyv_and_s32(npyv_reinterpret_s32_f32(x), npyv_setall_s32(0x7fe00000)); - // check |x| > HUGE_THRESHOLD, INF and NaNs. - npyv_b32 special_m = npyv_cmple_s32(ndnan, npyv_setall_s32(0x7f000000)); - npyv_b32 nnan_m = npyv_notnan_f32(x); - npyv_s32 idxs = npyv_sub_s32(ndnan, npyv_setall_s32(0x3d400000)); - idxs = npyv_max_s32(idxs, npyv_zero_s32()); - idxs = npyv_min_s32(idxs, npyv_setall_s32(0x3e00000)); - npyv_u32 idx = npyv_shri_u32(npyv_reinterpret_u32_s32(idxs), 21); - -#if defined(TANHF_TRANSPOSED_LUT) - npyv_f32 c6543[npyv_nlanes_f32]; - npyv_f32 c210b[npyv_nlanes_f32]; - npyv_lanetype_u32 index[npyv_nlanes_f32]; - - /**begin repeat - * #lane = 0, 1, 2, 3# - */ - index[@lane@] = npyv_get_lane_u32(idx, @lane@); - c6543[@lane@] = npyv_reinterpret_f32_u32(npyv_load_u32(lut8x32 + index[@lane@] * 8)); - c210b[@lane@] = npyv_reinterpret_f32_u32(npyv_load_u32(lut8x32 + index[@lane@] * 8 + 4)); - /**end repeat**/ - - // lane0: {c6, c5, c4, c3}, {c2, c1, c0, b} - // lane1: {c6, c5, c4, c3}, {c2, c1, c0, b} - // lane2: {c6, c5, c4, c3}, {c2, c1, c0, b} - // lane3: {c6, c5, c4, c3}, {c2, c1, c0, b} - // - // transposed: - // c6: {lane0, lane1, lane2, lane3} - // c5: {lane0, lane1, lane2, lane3} - // c4: {lane0, lane1, lane2, lane3} - // c3: {lane0, lane1, lane2, lane3} - // c2: {lane0, lane1, lane2, lane3} - // c1: {lane0, lane1, lane2, lane3} - // c0: {lane0, lane1, lane2, lane3} - // b : {lane0, lane1, lane2, lane3} - - npyv_f32x2 c6543_l01 = npyv_zip_f32(c6543[0], c6543[1]); - npyv_f32x2 c6543_l23 = npyv_zip_f32(c6543[2], c6543[3]); - npyv_f32 c6 = npyv_combinel_f32(c6543_l01.val[0], c6543_l23.val[0]); - npyv_f32 c5 = npyv_combineh_f32(c6543_l01.val[0], c6543_l23.val[0]); - npyv_f32 c4 = npyv_combinel_f32(c6543_l01.val[1], c6543_l23.val[1]); - npyv_f32 c3 = npyv_combineh_f32(c6543_l01.val[1], c6543_l23.val[1]); - - npyv_f32x2 c210b_l01 = npyv_zip_f32(c210b[0], c210b[1]); - npyv_f32x2 c210b_l23 = npyv_zip_f32(c210b[2], c210b[3]); - npyv_f32 c2 = npyv_combinel_f32(c210b_l01.val[0], c210b_l23.val[0]); - npyv_f32 c1 = npyv_combineh_f32(c210b_l01.val[0], c210b_l23.val[0]); - npyv_f32 c0 = npyv_combinel_f32(c210b_l01.val[1], c210b_l23.val[1]); - npyv_f32 b = npyv_combineh_f32(c210b_l01.val[1], c210b_l23.val[1]); -#else - npyv_f32 b = npyv_lut32_f32((const float*)lut32x8 + 32*0, idx); - npyv_f32 c0 = npyv_lut32_f32((const float*)lut32x8 + 32*1, idx); - npyv_f32 c1 = npyv_lut32_f32((const float*)lut32x8 + 32*2, idx); - npyv_f32 c2 = npyv_lut32_f32((const float*)lut32x8 + 32*3, idx); - npyv_f32 c3 = npyv_lut32_f32((const float*)lut32x8 + 32*4, idx); - npyv_f32 c4 = npyv_lut32_f32((const float*)lut32x8 + 32*5, idx); - npyv_f32 c5 = npyv_lut32_f32((const float*)lut32x8 + 32*6, idx); - npyv_f32 c6 = npyv_lut32_f32((const float*)lut32x8 + 32*7, idx); -#endif // defined(TANHF_TRANSPOSED_LUT) // no need to zerofy nans or avoid FP exceptions by NO_EXC like SVML does // since we're clearing the FP status anyway. - npyv_f32 sign = npyv_and_f32(x, npyv_reinterpret_f32_u32(npyv_setall_u32(0x80000000))); - npyv_f32 y = npyv_sub_f32(npyv_abs_f32(x), b); - npyv_f32 r = npyv_muladd_f32(c6, y, c5); - r = npyv_muladd_f32(r, y, c4); - r = npyv_muladd_f32(r, y, c3); - r = npyv_muladd_f32(r, y, c2); - r = npyv_muladd_f32(r, y, c1); - r = npyv_muladd_f32(r, y, c0); + vec_f32 sign = hn::And(x, hn::BitCast(f32, hn::Set(s32, 0x80000000))); + vec_f32 y = hn::Sub(hn::Abs(x), b); + vec_f32 r = hn::MulAdd(c6, y, c5); + r = hn::MulAdd(r, y, c4); + r = hn::MulAdd(r, y, c3); + r = hn::MulAdd(r, y, c2); + r = hn::MulAdd(r, y, c1); + r = hn::MulAdd(r, y, c0); // 1.0 if |x| > HUGE_THRESHOLD || INF - r = npyv_select_f32(special_m, r, npyv_setall_f32(1.0f)); - r = npyv_or_f32(r, sign); + r = hn::IfThenElse(hn::RebindMask(f32, special_m), r, hn::Set(f32, 1.0f)); + r = hn::Or(r, sign); // qnan if nan - r = npyv_select_f32(nnan_m, r, qnan); - if (sdst == 1) { - npyv_store_till_f32(dst, len, r); - } else { - npyv_storen_till_f32(dst, sdst, len, r); - } + r = hn::IfThenElse(hn::RebindMask(f32, nan_m), qnan, r); + + store_vector(r, dst, sdst, len); } } -#undef TANHF_TRANSPOSED_LUT -#if defined(UNDEF_npyv_get_lane_u32) -#undef UNDEF_npyv_get_lane_u32 -#undef npyv_get_lane_u32 -#endif - #endif // NPY_SIMD_F32 -#endif // NPY_SIMD_FMA3 +#endif // HWY_NATIVE_FMA /**begin repeat * #TYPE = FLOAT, DOUBLE# * #type = float, double# * #sfx = f32, f64# * #ssfx = f, # - * #simd = NPY_SIMD_FMA3 && NPY_SIMD_F32, NPY_SIMD_FMA3 && NPY_SIMD_F64# + * #simd = HWY_NATIVE_FMA && NPY_SIMD_F32, HWY_NATIVE_FMA && NPY_SIMD_F64# */ /**begin repeat1 * #func = tanh# @@ -608,32 +685,29 @@ simd_tanh_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, npy_in NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) { - const @type@ *src = (@type@*)args[0]; - @type@ *dst = (@type@*)args[1]; - - const int lsize = sizeof(src[0]); - const npy_intp ssrc = steps[0] / lsize; - const npy_intp sdst = steps[1] / lsize; - npy_intp len = dimensions[0]; - assert(len <= 1 || (steps[0] % lsize == 0 && steps[1] % lsize == 0)); #if @simd@ - if (is_mem_overlap(src, steps[0], dst, steps[1], len) || - !npyv_loadable_stride_@sfx@(ssrc) || !npyv_storable_stride_@sfx@(sdst) + npy_intp len = dimensions[0]; + + if (is_mem_overlap(args[0], steps[0], args[1], steps[1], len) || + !npyv_loadable_stride_@sfx@(steps[0]) || + !npyv_storable_stride_@sfx@(steps[1]) ) { - for (; len > 0; --len, src += ssrc, dst += sdst) { - simd_@func@_@sfx@(src, 1, dst, 1, 1); + UNARY_LOOP { + simd_@func@_@sfx@((@type@ *)ip1, 1, (@type@ *)op1, 1, 1); } } else { - simd_@func@_@sfx@(src, ssrc, dst, sdst, len); + npy_intp ssrc = steps[0] / sizeof(@type@); + npy_intp sdst = steps[1] / sizeof(@type@); + simd_@func@_@sfx@((@type@ *)args[0], ssrc, (@type@ *)args[1], sdst, len); } npyv_cleanup(); #if @simd_req_clear@ npy_clear_floatstatus_barrier((char*)dimensions); #endif #else - for (; len > 0; --len, src += ssrc, dst += sdst) { - const @type@ src0 = *src; - *dst = npy_@func@@ssfx@(src0); + UNARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + *(@type@ *)op1 = npy_@func@@ssfx@(in1); } #endif } diff --git a/numpy/_core/src/umath/loops_logical.dispatch.c.src b/numpy/_core/src/umath/loops_logical.dispatch.c.src deleted file mode 100644 index c07525be402a..000000000000 --- a/numpy/_core/src/umath/loops_logical.dispatch.c.src +++ /dev/null @@ -1,377 +0,0 @@ -/*@targets - ** $maxopt baseline - ** neon asimd - ** sse2 avx2 avx512_skx - ** vsx2 - ** vx - **/ -#define _UMATHMODULE -#define _MULTIARRAYMODULE -#define NPY_NO_DEPRECATED_API NPY_API_VERSION - -#include "simd/simd.h" -#include "loops_utils.h" -#include "loops.h" -#include "lowlevel_strided_loops.h" -// Provides the various *_LOOP macros -#include "fast_loop_macros.h" - -/******************************************************************************* - ** Defining the SIMD kernels - ******************************************************************************/ - -#if NPY_SIMD -/* - * convert any bit set to boolean true so vectorized and normal operations are - * consistent, should not be required if bool is used correctly everywhere but - * you never know - */ -NPY_FINLINE npyv_u8 byte_to_true(npyv_u8 v) -{ - const npyv_u8 zero = npyv_zero_u8(); - const npyv_u8 truemask = npyv_setall_u8(1 == 1); - // cmpeq(v, 0) turns 0x00 -> 0xff and non-zero -> 0x00 - npyv_u8 tmp = npyv_cvt_u8_b8(npyv_cmpeq_u8(v, zero)); - // tmp is filled with 0xff/0x00, negate and mask to boolean true - return npyv_andc_u8(truemask, tmp); -} -/* - * convert mask vector (0xff/0x00) to boolean true. similar to byte_to_true(), - * but we've already got a mask and can skip negation. - */ -NPY_FINLINE npyv_u8 mask_to_true(npyv_b8 v) -{ - const npyv_u8 truemask = npyv_setall_u8(1 == 1); - return npyv_and_u8(truemask, npyv_cvt_u8_b8(v)); -} -/* - * For logical_and, we have to be careful to handle non-bool inputs where - * bits of each operand might not overlap. Example: a = 0x01, b = 0x80 - * Both evaluate to boolean true, however, a & b is false. Return value - * should be consistent with byte_to_true(). - */ -NPY_FINLINE npyv_u8 simd_logical_and_u8(npyv_u8 a, npyv_u8 b) -{ - const npyv_u8 zero = npyv_zero_u8(); - const npyv_u8 truemask = npyv_setall_u8(1 == 1); - npyv_b8 ma = npyv_cmpeq_u8(a, zero); - npyv_b8 mb = npyv_cmpeq_u8(b, zero); - npyv_u8 r = npyv_cvt_u8_b8(npyv_or_b8(ma, mb)); - return npyv_andc_u8(truemask, r); -} -/* - * We don't really need the following, but it simplifies the templating code - * below since it is paired with simd_logical_and_u8() above. - */ -NPY_FINLINE npyv_u8 simd_logical_or_u8(npyv_u8 a, npyv_u8 b) -{ - npyv_u8 r = npyv_or_u8(a, b); - return byte_to_true(r); -} - - -/**begin repeat - * #kind = logical_and, logical_or# - * #and = 1, 0# - * #scalar_op = &&, ||# - * #intrin = and, or# - * #reduce = min, max# - * #scalar_cmp = ==, !=# - * #anyall = all, any# - */ -static void -simd_binary_@kind@_BOOL(npy_bool * op, npy_bool * ip1, npy_bool * ip2, npy_intp len) -{ - #define UNROLL 16 - - const int vstep = npyv_nlanes_u8; - const int wstep = vstep * UNROLL; - - // Unrolled vectors loop - for (; len >= wstep; len -= wstep, ip1 += wstep, ip2 += wstep, op += wstep) { - /**begin repeat1 - * #unroll = 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15# - */ - #if UNROLL > @unroll@ - npyv_u8 a@unroll@ = npyv_load_u8(ip1 + vstep * @unroll@); - npyv_u8 b@unroll@ = npyv_load_u8(ip2 + vstep * @unroll@); - npyv_u8 r@unroll@ = simd_logical_@intrin@_u8(a@unroll@, b@unroll@); - npyv_store_u8(op + vstep * @unroll@, r@unroll@); - #endif - /**end repeat1**/ - } - #undef UNROLL - - // Single vectors loop - for (; len >= vstep; len -= vstep, ip1 += vstep, ip2 += vstep, op += vstep) { - npyv_u8 a = npyv_load_u8(ip1); - npyv_u8 b = npyv_load_u8(ip2); - npyv_u8 r = simd_logical_@intrin@_u8(a, b); - npyv_store_u8(op, r); - } - - // Scalar loop to finish off - for (; len > 0; len--, ip1++, ip2++, op++) { - *op = *ip1 @scalar_op@ *ip2; - } -} - -static void -simd_reduce_@kind@_BOOL(npy_bool * op, npy_bool * ip, npy_intp len) -{ - #define UNROLL 8 - - const int vstep = npyv_nlanes_u8; - const int wstep = vstep * UNROLL; - - // Unrolled vectors loop - for (; len >= wstep; len -= wstep, ip += wstep) { - #if defined(NPY_HAVE_SSE2) - NPY_PREFETCH(ip + wstep, 0, 3); - #endif - npyv_u8 v0 = npyv_load_u8(ip + vstep * 0); - npyv_u8 v1 = npyv_load_u8(ip + vstep * 1); - npyv_u8 v2 = npyv_load_u8(ip + vstep * 2); - npyv_u8 v3 = npyv_load_u8(ip + vstep * 3); - npyv_u8 v4 = npyv_load_u8(ip + vstep * 4); - npyv_u8 v5 = npyv_load_u8(ip + vstep * 5); - npyv_u8 v6 = npyv_load_u8(ip + vstep * 6); - npyv_u8 v7 = npyv_load_u8(ip + vstep * 7); - - npyv_u8 m01 = npyv_@reduce@_u8(v0, v1); - npyv_u8 m23 = npyv_@reduce@_u8(v2, v3); - npyv_u8 m45 = npyv_@reduce@_u8(v4, v5); - npyv_u8 m67 = npyv_@reduce@_u8(v6, v7); - - npyv_u8 m0123 = npyv_@reduce@_u8(m01, m23); - npyv_u8 m4567 = npyv_@reduce@_u8(m45, m67); - - npyv_u8 mv = npyv_@reduce@_u8(m0123, m4567); - - if(npyv_@anyall@_u8(mv) @scalar_cmp@ 0){ - *op = !@and@; - return; - } - } - - // Single vectors loop - for (; len >= vstep; len -= vstep, ip += vstep) { - npyv_u8 v0 = npyv_load_u8(ip); - if(npyv_@anyall@_u8(v0) @scalar_cmp@ 0){ - *op = !@and@; - return; - } - } - - // Scalar loop to finish off - for (; len > 0; --len, ++ip) { - *op = *op @scalar_op@ *ip; - if (*op @scalar_cmp@ 0) { - return; - } - } -#undef UNROLL -} -/**end repeat**/ - -/**begin repeat - * #kind = logical_not, absolute# - * #op = ==, !=# - * #not = 1, 0# - */ -static void -simd_@kind@_BOOL(npy_bool * op, npy_bool * ip, npy_intp len) -{ - #define UNROLL 16 - - const int vstep = npyv_nlanes_u8; - const int wstep = vstep * UNROLL; - - #if @not@ - const npyv_u8 zero = npyv_zero_u8(); - #endif - - // Unrolled vectors loop - for (; len >= wstep; len -= wstep, ip += wstep, op += wstep) { - /**begin repeat1 - * #unroll = 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15# - */ - #if UNROLL > @unroll@ - npyv_u8 v@unroll@ = npyv_load_u8(ip + vstep * @unroll@); -#if @not@ - npyv_u8 r@unroll@ = mask_to_true(npyv_cmpeq_u8(v@unroll@, zero)); -#else - npyv_u8 r@unroll@ = byte_to_true(v@unroll@); -#endif - npyv_store_u8(op + vstep * @unroll@, r@unroll@); - #endif - /**end repeat1**/ - } - #undef UNROLL - - // Single vectors loop - for (; len >= vstep; len -= vstep, ip += vstep, op += vstep) { - npyv_u8 v = npyv_load_u8(ip); -#if @not@ - npyv_u8 r = mask_to_true(npyv_cmpeq_u8(v, zero)); -#else - npyv_u8 r = byte_to_true(v); -#endif - npyv_store_u8(op, r); - } - - // Scalar loop to finish off - for (; len > 0; --len, ++ip, ++op) { - *op = (*ip @op@ 0); - } -} -/**end repeat**/ - -#endif // NPY_SIMD - -/******************************************************************************* - ** Defining ufunc inner functions - ******************************************************************************/ - -/**begin repeat - * # kind = logical_or, logical_and# - */ -static NPY_INLINE int -run_binary_simd_@kind@_BOOL(char **args, npy_intp const *dimensions, npy_intp const *steps) -{ -#if NPY_SIMD - if (sizeof(npy_bool) == 1 && - IS_BLOCKABLE_BINARY(sizeof(npy_bool), NPY_SIMD_WIDTH)) { - simd_binary_@kind@_BOOL((npy_bool*)args[2], (npy_bool*)args[0], - (npy_bool*)args[1], dimensions[0]); - return 1; - } -#endif - return 0; -} - - -static NPY_INLINE int -run_reduce_simd_@kind@_BOOL(char **args, npy_intp const *dimensions, npy_intp const *steps) -{ -#if NPY_SIMD - if (sizeof(npy_bool) == 1 && - IS_BLOCKABLE_REDUCE(sizeof(npy_bool), NPY_SIMD_WIDTH)) { - simd_reduce_@kind@_BOOL((npy_bool*)args[0], (npy_bool*)args[1], - dimensions[0]); - return 1; - } -#endif - return 0; -} -/**end repeat**/ - -/**begin repeat - * #kind = logical_not, absolute# - */ -static NPY_INLINE int -run_unary_simd_@kind@_BOOL(char **args, npy_intp const *dimensions, npy_intp const *steps) -{ -#if NPY_SIMD - if (sizeof(npy_bool) == 1 && - IS_BLOCKABLE_UNARY(sizeof(npy_bool), NPY_SIMD_WIDTH)) { - simd_@kind@_BOOL((npy_bool*)args[1], (npy_bool*)args[0], dimensions[0]); - return 1; - } -#endif - return 0; -} -/**end repeat**/ - - -/**begin repeat - * #kind = logical_and, logical_or# - * #OP = &&, ||# - * #SC = ==, !=# - * #and = 1, 0# - */ -NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(BOOL_@kind@) -(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) -{ - if(IS_BINARY_REDUCE) { -#if NPY_SIMD - /* - * stick with our variant for more reliable performance, only known - * platform which outperforms it by ~20% is an i7 with glibc 2.17 - */ - if (run_reduce_simd_@kind@_BOOL(args, dimensions, steps)) { - return; - } -#else - /* for now only use libc on 32-bit/non-x86 */ - if (steps[1] == 1) { - npy_bool * op = (npy_bool *)args[0]; -#if @and@ - /* np.all(), search for a zero (false) */ - if (*op) { - *op = memchr(args[1], 0, dimensions[0]) == NULL; - } -#else - /* - * np.any(), search for a non-zero (true) via comparing against - * zero blocks, memcmp is faster than memchr on SSE4 machines - * with glibc >= 2.12 and memchr can only check for equal 1 - */ - static const npy_bool zero[4096]; /* zero by C standard */ - npy_uintp i, n = dimensions[0]; - - for (i = 0; !*op && i < n - (n % sizeof(zero)); i += sizeof(zero)) { - *op = memcmp(&args[1][i], zero, sizeof(zero)) != 0; - } - if (!*op && n - i > 0) { - *op = memcmp(&args[1][i], zero, n - i) != 0; - } -#endif - return; - } -#endif - else { - BINARY_REDUCE_LOOP(npy_bool) { - const npy_bool in2 = *(npy_bool *)ip2; - io1 = io1 @OP@ in2; - if (io1 @SC@ 0) { - break; - } - } - *((npy_bool *)iop1) = io1; - } - } - else { - if (run_binary_simd_@kind@_BOOL(args, dimensions, steps)) { - return; - } - else { - BINARY_LOOP { - const npy_bool in1 = *(npy_bool *)ip1; - const npy_bool in2 = *(npy_bool *)ip2; - *((npy_bool *)op1) = in1 @OP@ in2; - } - } - } -} -/**end repeat**/ - -/**begin repeat - * #kind = logical_not, absolute# - * #OP = ==, !=# - **/ -NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(BOOL_@kind@) -(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) -{ - if (run_unary_simd_@kind@_BOOL(args, dimensions, steps)) { - return; - } - else { - UNARY_LOOP { - npy_bool in1 = *(npy_bool *)ip1; - *((npy_bool *)op1) = in1 @OP@ 0; - } - } -} -/**end repeat**/ - diff --git a/numpy/_core/src/umath/loops_logical.dispatch.cpp b/numpy/_core/src/umath/loops_logical.dispatch.cpp new file mode 100644 index 000000000000..5c1834cc29e2 --- /dev/null +++ b/numpy/_core/src/umath/loops_logical.dispatch.cpp @@ -0,0 +1,418 @@ +#include "simd/simd.h" +#include "loops_utils.h" +#include "loops.h" +#include "lowlevel_strided_loops.h" +#include "fast_loop_macros.h" +#include +#include "simd/simd.hpp" +#include + +struct logical_and_t {}; +struct logical_or_t {}; +struct absolute_t {}; +struct logical_not_t {}; + +namespace { +using namespace np::simd; + +/******************************************************************************* + ** Defining the SIMD kernels + ******************************************************************************/ +/* + * convert any bit set to boolean true so vectorized and normal operations are + * consistent, should not be required if bool is used correctly everywhere but + * you never know + */ +#if NPY_HWY +HWY_INLINE HWY_ATTR Vec byte_to_true(Vec v) +{ + return hn::IfThenZeroElse(hn::Eq(v, Zero()), Set(uint8_t(1))); +} + +/* + * convert mask vector (0xff/0x00) to boolean true. similar to byte_to_true(), + * but we've already got a mask and can skip negation. + */ +HWY_INLINE HWY_ATTR Vec mask_to_true(Vec v) +{ + return hn::IfThenElseZero(hn::Ne(v, Zero()), Set(uint8_t(1))); +} + +/* + * For logical_and, we have to be careful to handle non-bool inputs where + * bits of each operand might not overlap. Example: a = 0x01, b = 0x80 + * Both evaluate to boolean true, however, a & b is false. Return value + * should be consistent with byte_to_true(). + */ +HWY_INLINE HWY_ATTR Vec simd_logical_and_u8(Vec a, Vec b) +{ + return hn::IfThenZeroElse( + hn::Eq(Zero(), hn::Min(a, b)), + Set(uint8_t(1)) + ); +} +/* + * We don't really need the following, but it simplifies the templating code + * below since it is paired with simd_logical_and_u8() above. + */ +HWY_INLINE HWY_ATTR Vec simd_logical_or_u8(Vec a, Vec b) +{ + auto r = hn::Or(a, b); + return byte_to_true(r); +} + +HWY_INLINE HWY_ATTR bool simd_any_u8(Vec v) +{ + return hn::ReduceMax(_Tag(), v) != 0; +} + +HWY_INLINE HWY_ATTR bool simd_all_u8(Vec v) +{ + return hn::ReduceMin(_Tag(), v) != 0; +} +#endif + +template +struct BinaryLogicalTraits; + +template<> +struct BinaryLogicalTraits { + static constexpr bool is_and = false; + static constexpr auto scalar_op = std::logical_or{}; + static constexpr auto scalar_cmp = std::not_equal_to{}; +#if NPY_HWY + static constexpr auto anyall = simd_any_u8; + + static HWY_INLINE HWY_ATTR Vec simd_op(Vec a, Vec b) { + return simd_logical_or_u8(a, b); + } +#endif +}; + +template<> +struct BinaryLogicalTraits { + static constexpr bool is_and = true; + static constexpr auto scalar_op = std::logical_and{}; + static constexpr auto scalar_cmp = std::equal_to{}; +#if NPY_HWY + static constexpr auto anyall = simd_all_u8; + + static HWY_INLINE HWY_ATTR Vec simd_op(Vec a, Vec b) { + return simd_logical_and_u8(a, b); + } +#endif +}; + +template +struct UnaryLogicalTraits; + +template<> +struct UnaryLogicalTraits { + static constexpr auto scalar_op = std::equal_to{}; + +#if NPY_HWY + static HWY_INLINE HWY_ATTR Vec simd_op(Vec v) { + const auto zero = Zero(); + return mask_to_true(hn::VecFromMask(_Tag(), hn::Eq(v, zero))); + } +#endif +}; + +template<> +struct UnaryLogicalTraits { + static constexpr auto scalar_op = std::not_equal_to{}; + +#if NPY_HWY + static HWY_INLINE HWY_ATTR Vec simd_op(Vec v) { + return byte_to_true(v); + } +#endif +}; + +#if NPY_HWY +template +HWY_ATTR SIMD_MSVC_NOINLINE +static void simd_binary_logical_BOOL(npy_bool* op, npy_bool* ip1, npy_bool* ip2, npy_intp len) { + using Traits = BinaryLogicalTraits; + constexpr int UNROLL = 16; + HWY_LANES_CONSTEXPR int vstep = Lanes(); + const int wstep = vstep * UNROLL; + + // Unrolled vectors loop + for (; len >= wstep; len -= wstep, ip1 += wstep, ip2 += wstep, op += wstep) { + for(int i = 0; i < UNROLL; i++) { + auto a = LoadU(ip1 + vstep * i); + auto b = LoadU(ip2 + vstep * i); + auto r = Traits::simd_op(a, b); + StoreU(r, op + vstep * i); + } + } + + // Single vectors loop + for (; len >= vstep; len -= vstep, ip1 += vstep, ip2 += vstep, op += vstep) { + auto a = LoadU(ip1); + auto b = LoadU(ip2); + auto r = Traits::simd_op(a, b); + StoreU(r, op); + } + + // Scalar loop to finish off + for (; len > 0; len--, ip1++, ip2++, op++) { + *op = Traits::scalar_op(*ip1, *ip2); + } +} + +template +HWY_ATTR SIMD_MSVC_NOINLINE +static void simd_reduce_logical_BOOL(npy_bool* op, npy_bool* ip, npy_intp len) { + using Traits = BinaryLogicalTraits; + constexpr int UNROLL = 8; + HWY_LANES_CONSTEXPR int vstep = Lanes(); + const int wstep = vstep * UNROLL; + + // Unrolled vectors loop + for (; len >= wstep; len -= wstep, ip += wstep) { + #if defined(NPY_HAVE_SSE2) + NPY_PREFETCH(reinterpret_cast(ip + wstep), 0, 3); + #endif + auto v0 = LoadU(ip); + auto v1 = LoadU(ip + vstep); + auto v2 = LoadU(ip + vstep * 2); + auto v3 = LoadU(ip + vstep * 3); + auto v4 = LoadU(ip + vstep * 4); + auto v5 = LoadU(ip + vstep * 5); + auto v6 = LoadU(ip + vstep * 6); + auto v7 = LoadU(ip + vstep * 7); + + auto m01 = Traits::simd_op(v0, v1); + auto m23 = Traits::simd_op(v2, v3); + auto m45 = Traits::simd_op(v4, v5); + auto m67 = Traits::simd_op(v6, v7); + + auto m0123 = Traits::simd_op(m01, m23); + auto m4567 = Traits::simd_op(m45, m67); + + auto mv = Traits::simd_op(m0123, m4567); + + if(Traits::anyall(mv) == !Traits::is_and) { + *op = !Traits::is_and; + return; + } + } + + // Single vectors loop + for (; len >= vstep; len -= vstep, ip += vstep) { + auto v = LoadU(ip); + if(Traits::anyall(v) == !Traits::is_and) { + *op = !Traits::is_and; + return; + } + } + + // Scalar loop to finish off + for (; len > 0; --len, ++ip) { + *op = Traits::scalar_op(*op, *ip); + if (Traits::scalar_cmp(*op, 0)) { + return; + } + } +} + +template +HWY_ATTR SIMD_MSVC_NOINLINE +static void simd_unary_logical_BOOL(npy_bool* op, npy_bool* ip, npy_intp len) { + using Traits = UnaryLogicalTraits; + constexpr int UNROLL = 16; + HWY_LANES_CONSTEXPR int vstep = Lanes(); + const int wstep = vstep * UNROLL; + + // Unrolled vectors loop + for (; len >= wstep; len -= wstep, ip += wstep, op += wstep) { + for(int i = 0; i < UNROLL; i++) { + auto v = LoadU(ip + vstep * i); + auto r = Traits::simd_op(v); + StoreU(r, op + vstep * i); + } + } + + // Single vectors loop + for (; len >= vstep; len -= vstep, ip += vstep, op += vstep) { + auto v = LoadU(ip); + auto r = Traits::simd_op(v); + StoreU(r, op); + } + + // Scalar loop to finish off + for (; len > 0; --len, ++ip, ++op) { + *op = Traits::scalar_op(*ip, 0); + } +} + +#endif //NPY_HWY +} // namespace anonymous + +/******************************************************************************* + ** Defining ufunc inner functions + ******************************************************************************/ +template +static NPY_INLINE int run_binary_simd_logical_BOOL( + char** args, npy_intp const* dimensions, npy_intp const* steps) +{ +#if NPY_HWY + if (sizeof(npy_bool) == 1 && IS_BLOCKABLE_BINARY(sizeof(npy_bool), kMaxLanes)) { + simd_binary_logical_BOOL((npy_bool*)args[2], (npy_bool*)args[0], (npy_bool*)args[1], dimensions[0]); + return 1; + } +#endif + return 0; +} + +template +static NPY_INLINE int run_reduce_simd_logical_BOOL( + char** args, npy_intp const* dimensions, npy_intp const* steps) +{ +#if NPY_HWY + if (sizeof(npy_bool) == 1 && IS_BLOCKABLE_REDUCE(sizeof(npy_bool), kMaxLanes)) { + simd_reduce_logical_BOOL((npy_bool*)args[0], (npy_bool*)args[1], dimensions[0]); + return 1; + } +#endif + return 0; +} + +template +static NPY_INLINE int run_unary_simd_logical_BOOL( + char** args, npy_intp const* dimensions, npy_intp const* steps) +{ +#if NPY_HWY + if (sizeof(npy_bool) == 1 && IS_BLOCKABLE_UNARY(sizeof(npy_bool), kMaxLanes)) { + simd_unary_logical_BOOL((npy_bool*)args[1], (npy_bool*)args[0], dimensions[0]); + return 1; + } +#endif + return 0; +} + +template +void BOOL_binary_func_wrapper(char** args, npy_intp const* dimensions, npy_intp const* steps) { + char *ip1 = args[0], *ip2 = args[1], *op1 = args[2]; + npy_intp is1 = steps[0], is2 = steps[1], os1 = steps[2]; + npy_intp n = dimensions[0]; + using Traits = BinaryLogicalTraits; + +#if NPY_HWY + if (run_binary_simd_logical_BOOL(args, dimensions, steps)) { + return; + } +#endif + + for(npy_intp i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1) { + const npy_bool in1 = *(npy_bool*)ip1; + const npy_bool in2 = *(npy_bool*)ip2; + *((npy_bool*)op1) = Traits::scalar_op(in1, in2); + } +} + +template +void BOOL_binary_reduce_wrapper(char** args, npy_intp const* dimensions, npy_intp const* steps) { + char *iop1 = args[0]; + npy_bool io1 = *(npy_bool *)iop1; + char *ip2 = args[1]; + npy_intp is2 = steps[1]; + npy_intp n = dimensions[0]; + npy_intp i; + using Traits = BinaryLogicalTraits; +#if NPY_HWY + if (run_reduce_simd_logical_BOOL(args, dimensions, steps)) { + return; + } +#else + /* for now only use libc on 32-bit/non-x86 */ + if (steps[1] == 1) { + npy_bool * op = (npy_bool *)args[0]; + if constexpr (Traits::is_and) { + + /* np.all(), search for a zero (false) */ + if (*op) { + *op = memchr(args[1], 0, dimensions[0]) == NULL; + } + } + else { + /* + * np.any(), search for a non-zero (true) via comparing against + * zero blocks, memcmp is faster than memchr on SSE4 machines + * with glibc >= 2.12 and memchr can only check for equal 1 + */ + static const npy_bool zero[4096]={0}; /* zero by C standard */ + + for (i = 0; !*op && i < n - (n % sizeof(zero)); i += sizeof(zero)) { + *op = memcmp(&args[1][i], zero, sizeof(zero)) != 0; + } + if (!*op && n - i > 0) { + *op = memcmp(&args[1][i], zero, n - i) != 0; + } + } + return; + } +#endif + + for(i = 0; i < n; i++, ip2 += is2) { + const npy_bool in2 = *(npy_bool*)ip2; + io1 = Traits::scalar_op(io1, in2); + if ((Traits::is_and && !io1) || (!Traits::is_and && io1)) + break; + } + *((npy_bool*)iop1) = io1; +} + +template +void BOOL_logical_op_wrapper(char** args, npy_intp const* dimensions, npy_intp const* steps) { + if (IS_BINARY_REDUCE) { + BOOL_binary_reduce_wrapper(args, dimensions, steps); + } + else { + BOOL_binary_func_wrapper(args, dimensions, steps); + } +} + +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(BOOL_logical_and)( + char** args, npy_intp const* dimensions, npy_intp const* steps, void* NPY_UNUSED(func)) +{ + BOOL_logical_op_wrapper(args, dimensions, steps); +} + +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(BOOL_logical_or)( + char** args, npy_intp const* dimensions, npy_intp const* steps, void* NPY_UNUSED(func)) +{ + BOOL_logical_op_wrapper(args, dimensions, steps); +} + +template +void BOOL_func_wrapper(char** args, npy_intp const* dimensions, npy_intp const* steps) +{ + char *ip1 = args[0], *op1 = args[1]; + npy_intp is1 = steps[0], os1 = steps[1]; + npy_intp n = dimensions[0]; + using Traits = UnaryLogicalTraits; + + if (run_unary_simd_logical_BOOL(args, dimensions, steps)) { + return; + } + + for(npy_intp i = 0; i < n; i++, ip1 += is1, op1 += os1) { + npy_bool in1 = *(npy_bool*)ip1; + *((npy_bool*)op1) = Traits::scalar_op(in1, 0); + } +} + +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(BOOL_logical_not)( + char** args, npy_intp const* dimensions, npy_intp const* steps, void* NPY_UNUSED(func)) +{ + BOOL_func_wrapper(args, dimensions, steps); +} + +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(BOOL_absolute)( + char** args, npy_intp const* dimensions, npy_intp const* steps, void* NPY_UNUSED(func)) +{ + BOOL_func_wrapper(args, dimensions, steps); +} diff --git a/numpy/_core/src/umath/loops_minmax.dispatch.c.src b/numpy/_core/src/umath/loops_minmax.dispatch.c.src index 319072c01fbe..a33297ca83d5 100644 --- a/numpy/_core/src/umath/loops_minmax.dispatch.c.src +++ b/numpy/_core/src/umath/loops_minmax.dispatch.c.src @@ -1,10 +1,3 @@ -/*@targets - ** $maxopt baseline - ** neon asimd - ** sse2 avx2 avx512_skx - ** vsx2 - ** vx vxe - **/ #define _UMATHMODULE #define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION @@ -130,7 +123,7 @@ simd_reduce_c_@intrin@_@sfx@(const npyv_lanetype_@sfx@ *ip, npyv_lanetype_@sfx@ npyv_@sfx@ acc = npyv_setall_@sfx@(op1[0]); for (; len >= wstep; len -= wstep, ip += wstep) { #ifdef NPY_HAVE_SSE2 - NPY_PREFETCH(ip + wstep, 0, 3); + NPY_PREFETCH((const char*)(ip + wstep), 0, 3); #endif npyv_@sfx@ v0 = npyv_load_@sfx@(ip + vstep * 0); npyv_@sfx@ v1 = npyv_load_@sfx@(ip + vstep * 1); @@ -352,9 +345,9 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) } // unroll scalars faster than non-contiguous vector load/store on Arm #if !defined(NPY_HAVE_NEON) && @is_fp@ - if (TO_SIMD_SFX(npyv_loadable_stride)(is1/sizeof(STYPE)) && - TO_SIMD_SFX(npyv_loadable_stride)(is2/sizeof(STYPE)) && - TO_SIMD_SFX(npyv_storable_stride)(os1/sizeof(STYPE)) + if (TO_SIMD_SFX(npyv_loadable_stride)(is1) && + TO_SIMD_SFX(npyv_loadable_stride)(is2) && + TO_SIMD_SFX(npyv_storable_stride)(os1) ) { TO_SIMD_SFX(simd_binary_@intrin@)( (STYPE*)ip1, is1/sizeof(STYPE), diff --git a/numpy/_core/src/umath/loops_modulo.dispatch.c.src b/numpy/_core/src/umath/loops_modulo.dispatch.c.src index 25edffb1e2c1..4645fe14a487 100644 --- a/numpy/_core/src/umath/loops_modulo.dispatch.c.src +++ b/numpy/_core/src/umath/loops_modulo.dispatch.c.src @@ -1,6 +1,3 @@ -/*@targets - ** baseline vsx4 - **/ #define _UMATHMODULE #define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION @@ -493,12 +490,16 @@ vsx4_simd_@func@_by_scalar_contig_@sfx@(char **args, npy_intp len) #else /* fmod and remainder */ for (; len > 0; --len, ++src1, ++dst1) { const npyv_lanetype_@sfx@ a = *src1; - *dst1 = a % scalar; + if (NPY_UNLIKELY(a == NPY_MIN_INT@len@ && scalar == -1)) { + *dst1 = 0; + } else { + *dst1 = a % scalar; #if @id@ == 1 /* remainder */ - if (!((a > 0) == (scalar > 0) || *dst1 == 0)) { - *dst1 += scalar; - } + if (!((a > 0) == (scalar > 0) || *dst1 == 0)) { + *dst1 += scalar; + } #endif + } } #endif npyv_cleanup(); diff --git a/numpy/_core/src/umath/loops_trigonometric.dispatch.c.src b/numpy/_core/src/umath/loops_trigonometric.dispatch.c.src deleted file mode 100644 index 31de906098e3..000000000000 --- a/numpy/_core/src/umath/loops_trigonometric.dispatch.c.src +++ /dev/null @@ -1,457 +0,0 @@ -/*@targets - ** $maxopt baseline - ** (avx2 fma3) avx512f - ** vsx2 vsx3 vsx4 - ** neon_vfpv4 - ** vxe vxe2 - **/ -#include "numpy/npy_math.h" -#include "simd/simd.h" -#include "loops_utils.h" -#include "loops.h" -#include "fast_loop_macros.h" -/* - * TODO: - * - use vectorized version of Payne-Hanek style reduction for large elements or - * when there's no native FUSED support instead of fallback to libc - */ -#if NPY_SIMD_FMA3 // native support -/**begin repeat - * #check = F64, F32# - * #sfx = f64, f32# - * #enable = 0, 1# - */ -#if NPY_SIMD_@check@ && @enable@ -/* - * Vectorized Cody-Waite range reduction technique - * Performs the reduction step x* = x - y*C in three steps: - * 1) x* = x - y*c1 - * 2) x* = x - y*c2 - * 3) x* = x - y*c3 - * c1, c2 are exact floating points, c3 = C - c1 - c2 simulates higher precision - */ -NPY_FINLINE npyv_@sfx@ -simd_range_reduction_@sfx@(npyv_@sfx@ x, npyv_@sfx@ y, npyv_@sfx@ c1, npyv_@sfx@ c2, npyv_@sfx@ c3) -{ - npyv_@sfx@ reduced_x = npyv_muladd_@sfx@(y, c1, x); - reduced_x = npyv_muladd_@sfx@(y, c2, reduced_x); - reduced_x = npyv_muladd_@sfx@(y, c3, reduced_x); - return reduced_x; -} -#endif -/**end repeat**/ -/* Disable SIMD code and revert to libm: see - * https://mail.python.org/archives/list/numpy-discussion@python.org/thread/C6EYZZSR4EWGVKHAZXLE7IBILRMNVK7L/ - * for detailed discussion on this*/ -#if 0 // NPY_SIMD_F64 -/**begin repeat - * #op = cos, sin# - */ -#if defined(NPY_OS_WIN32) || defined(NPY_OS_CYGWIN) -NPY_FINLINE npyv_f64 -#else -NPY_NOINLINE npyv_f64 -#endif -simd_@op@_scalar_f64(npyv_f64 out, npy_uint64 cmp_bits) -{ - // MSVC doesn't compile with direct vector access, so we copy it here - // as we have no npyv_get_lane/npyv_set_lane intrinsics - npy_double NPY_DECL_ALIGNED(NPY_SIMD_WIDTH) out_copy[npyv_nlanes_f64]; - npyv_storea_f64(out_copy, out); - - for (unsigned i = 0; i < npyv_nlanes_f64; ++i) { - if (cmp_bits & (1 << i)) { - out_copy[i] = npy_@op@(out_copy[i]); - } - } - - return npyv_loada_f64(out_copy); -} -/**end repeat**/ - -/* - * Approximate sine algorithm for x \in [-pi/2, pi/2] - * worst-case error is 3.5 ulp. - * abs error: 0x1.be222a58p-53 in [-pi/2, pi/2]. - */ -NPY_FINLINE npyv_f64 -simd_approx_sine_poly_f64(npyv_f64 r) -{ - const npyv_f64 poly1 = npyv_setall_f64(-0x1.9f4a9c8b21dc9p-41); - const npyv_f64 poly2 = npyv_setall_f64(0x1.60e88a10163f2p-33); - const npyv_f64 poly3 = npyv_setall_f64(-0x1.ae6361b7254e7p-26); - const npyv_f64 poly4 = npyv_setall_f64(0x1.71de382e8d62bp-19); - const npyv_f64 poly5 = npyv_setall_f64(-0x1.a01a019aeb4ffp-13); - const npyv_f64 poly6 = npyv_setall_f64(0x1.111111110b25ep-7); - const npyv_f64 poly7 = npyv_setall_f64(-0x1.55555555554c3p-3); - - npyv_f64 r2 = npyv_mul_f64(r, r); - npyv_f64 y = npyv_muladd_f64(poly1, r2, poly2); - y = npyv_muladd_f64(y, r2, poly3); - y = npyv_muladd_f64(y, r2, poly4); - y = npyv_muladd_f64(y, r2, poly5); - y = npyv_muladd_f64(y, r2, poly6); - y = npyv_muladd_f64(y, r2, poly7); - y = npyv_muladd_f64(npyv_mul_f64(y, r2), r, r); - - return y; -} - -/* r = |x| - n*pi (range reduction into -pi/2 .. pi/2). */ -NPY_FINLINE npyv_f64 -simd_range_reduction_pi2(npyv_f64 r, npyv_f64 n) { - const npyv_f64 pi1 = npyv_setall_f64(-0x1.921fb54442d18p+1); - const npyv_f64 pi2 = npyv_setall_f64(-0x1.1a62633145c06p-53); - const npyv_f64 pi3 = npyv_setall_f64(-0x1.c1cd129024e09p-106); - - return simd_range_reduction_f64(r, n, pi1, pi2, pi3); -} - -NPY_FINLINE npyv_b64 simd_sin_range_check_f64(npyv_u64 ir) { - const npyv_u64 tiny_bound = npyv_setall_u64(0x202); /* top12 (asuint64 (0x1p-509)). */ - const npyv_u64 simd_thresh = npyv_setall_u64(0x214); /* top12 (asuint64 (RangeVal)) - SIMD_TINY_BOUND. */ - - return npyv_cmpge_u64(npyv_sub_u64(npyv_shri_u64(ir, 52), tiny_bound), simd_thresh); -} - -NPY_FINLINE npyv_b64 simd_cos_range_check_f64(npyv_u64 ir) { - const npyv_f64 range_val = npyv_setall_f64(0x1p23); - - return npyv_cmpge_u64(ir, npyv_reinterpret_u64_f64(range_val)); -} - -NPY_FINLINE npyv_f64 -simd_cos_poly_f64(npyv_f64 r, npyv_u64 ir, npyv_u64 sign) -{ - const npyv_f64 inv_pi = npyv_setall_f64(0x1.45f306dc9c883p-2); - const npyv_f64 half_pi = npyv_setall_f64(0x1.921fb54442d18p+0); - const npyv_f64 shift = npyv_setall_f64(0x1.8p52); - - /* n = rint((|x|+pi/2)/pi) - 0.5. */ - npyv_f64 n = npyv_muladd_f64(inv_pi, npyv_add_f64(r, half_pi), shift); - npyv_u64 odd = npyv_shli_u64(npyv_reinterpret_u64_f64(n), 63); - n = npyv_sub_f64(n, shift); - n = npyv_sub_f64(n, npyv_setall_f64(0.5)); - - /* r = |x| - n*pi (range reduction into -pi/2 .. pi/2). */ - r = simd_range_reduction_pi2(r, n); - - /* sin(r) poly approx. */ - npyv_f64 y = simd_approx_sine_poly_f64(r); - - /* sign. */ - return npyv_reinterpret_f64_u64(npyv_xor_u64(npyv_reinterpret_u64_f64(y), odd)); -} - -NPY_FINLINE npyv_f64 -simd_sin_poly_f64(npyv_f64 r, npyv_u64 ir, npyv_u64 sign) -{ - const npyv_f64 inv_pi = npyv_setall_f64(0x1.45f306dc9c883p-2); - const npyv_f64 shift = npyv_setall_f64(0x1.8p52); - - /* n = rint(|x|/pi). */ - npyv_f64 n = npyv_muladd_f64(inv_pi, r, shift); - npyv_u64 odd = npyv_shli_u64(npyv_reinterpret_u64_f64(n), 63); - n = npyv_sub_f64(n, shift); - - /* r = |x| - n*pi (range reduction into -pi/2 .. pi/2). */ - r = simd_range_reduction_pi2(r, n); - - /* sin(r) poly approx. */ - npyv_f64 y = simd_approx_sine_poly_f64(r); - - /* sign. */ - return npyv_reinterpret_f64_u64(npyv_xor_u64(npyv_xor_u64(npyv_reinterpret_u64_f64(y), sign), odd)); -} - -/**begin repeat - * #op = cos, sin# - */ -NPY_FINLINE void -simd_@op@_f64(const double *src, npy_intp ssrc, double *dst, npy_intp sdst, npy_intp len) -{ - const npyv_u64 abs_mask = npyv_setall_u64(0x7fffffffffffffff); - const int vstep = npyv_nlanes_f64; - - npyv_f64 out = npyv_zero_f64(); - npyv_f64 x_in; - - for (; len > 0; len -= vstep, src += ssrc*vstep, dst += sdst*vstep) { - if (ssrc == 1) { - x_in = npyv_load_tillz_f64(src, len); - } else { - x_in = npyv_loadn_tillz_f64(src, ssrc, len); - } - - npyv_u64 ir = npyv_and_u64(npyv_reinterpret_u64_f64(x_in), abs_mask); - npyv_f64 r = npyv_reinterpret_f64_u64(ir); - npyv_u64 sign = npyv_and_u64(npyv_reinterpret_u64_f64(x_in), npyv_not_u64(abs_mask)); - - npyv_b64 cmp = simd_@op@_range_check_f64(ir); - /* If fenv exceptions are to be triggered correctly, set any special lanes - to 1 (which is neutral w.r.t. fenv). These lanes will be fixed by - scalar loop later. */ - r = npyv_select_f64(cmp, npyv_setall_f64(1.0), r); - - // Some in range, at least one calculation is useful - if (!npyv_all_b64(cmp)) { - out = simd_@op@_poly_f64(r, ir, sign); - } - - if (npyv_any_b64(cmp)) { - out = npyv_select_f64(cmp, x_in, out); - out = simd_@op@_scalar_f64(out, npyv_tobits_b64(cmp)); - } - - if (sdst == 1) { - npyv_store_till_f64(dst, len, out); - } else { - npyv_storen_till_f64(dst, sdst, len, out); - } - } - npyv_cleanup(); -} -/**end repeat**/ -#endif // NPY_SIMD_F64 - -#if NPY_SIMD_F32 -/* - * Approximate cosine algorithm for x \in [-PI/4, PI/4] - * Maximum ULP across all 32-bit floats = 0.875 - */ -NPY_FINLINE npyv_f32 -simd_cosine_poly_f32(npyv_f32 x2) -{ - const npyv_f32 invf8 = npyv_setall_f32(0x1.98e616p-16f); - const npyv_f32 invf6 = npyv_setall_f32(-0x1.6c06dcp-10f); - const npyv_f32 invf4 = npyv_setall_f32(0x1.55553cp-05f); - const npyv_f32 invf2 = npyv_setall_f32(-0x1.000000p-01f); - const npyv_f32 invf0 = npyv_setall_f32(0x1.000000p+00f); - - npyv_f32 r = npyv_muladd_f32(invf8, x2, invf6); - r = npyv_muladd_f32(r, x2, invf4); - r = npyv_muladd_f32(r, x2, invf2); - r = npyv_muladd_f32(r, x2, invf0); - return r; -} -/* - * Approximate sine algorithm for x \in [-PI/4, PI/4] - * Maximum ULP across all 32-bit floats = 0.647 - * Polynomial approximation based on unpublished work by T. Myklebust - */ -NPY_FINLINE npyv_f32 -simd_sine_poly_f32(npyv_f32 x, npyv_f32 x2) -{ - const npyv_f32 invf9 = npyv_setall_f32(0x1.7d3bbcp-19f); - const npyv_f32 invf7 = npyv_setall_f32(-0x1.a06bbap-13f); - const npyv_f32 invf5 = npyv_setall_f32(0x1.11119ap-07f); - const npyv_f32 invf3 = npyv_setall_f32(-0x1.555556p-03f); - - npyv_f32 r = npyv_muladd_f32(invf9, x2, invf7); - r = npyv_muladd_f32(r, x2, invf5); - r = npyv_muladd_f32(r, x2, invf3); - r = npyv_muladd_f32(r, x2, npyv_zero_f32()); - r = npyv_muladd_f32(r, x, x); - return r; -} -/* - * Vectorized approximate sine/cosine algorithms: The following code is a - * vectorized version of the algorithm presented here: - * https://stackoverflow.com/questions/30463616/payne-hanek-algorithm-implementation-in-c/30465751#30465751 - * (1) Load data in registers and generate mask for elements that are - * within range [-71476.0625f, 71476.0625f] for cosine and [-117435.992f, - * 117435.992f] for sine. - * (2) For elements within range, perform range reduction using Cody-Waite's - * method: x* = x - y*PI/2, where y = rint(x*2/PI). x* \in [-PI/4, PI/4]. - * (3) Map cos(x) to (+/-)sine or (+/-)cosine of x* based on the quadrant k = - * int(y). - * (4) For elements outside that range, Cody-Waite reduction performs poorly - * leading to catastrophic cancellation. We compute cosine by calling glibc in - * a scalar fashion. - * (5) Vectorized implementation has a max ULP of 1.49 and performs at least - * 5-7x(x86) - 2.5-3x(Power) - 1-2x(Arm) faster than scalar implementations - * when magnitude of all elements in the array < 71476.0625f (117435.992f for sine). - * Worst case performance is when all the elements are large leading to about 1-2% reduction in - * performance. - */ -typedef enum -{ - SIMD_COMPUTE_SIN, - SIMD_COMPUTE_COS -} SIMD_TRIG_OP; - -static void SIMD_MSVC_NOINLINE -simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, - npy_intp len, SIMD_TRIG_OP trig_op) -{ - // Load up frequently used constants - const npyv_f32 zerosf = npyv_zero_f32(); - const npyv_s32 ones = npyv_setall_s32(1); - const npyv_s32 twos = npyv_setall_s32(2); - const npyv_f32 two_over_pi = npyv_setall_f32(0x1.45f306p-1f); - const npyv_f32 codyw_pio2_highf = npyv_setall_f32(-0x1.921fb0p+00f); - const npyv_f32 codyw_pio2_medf = npyv_setall_f32(-0x1.5110b4p-22f); - const npyv_f32 codyw_pio2_lowf = npyv_setall_f32(-0x1.846988p-48f); - const npyv_f32 rint_cvt_magic = npyv_setall_f32(0x1.800000p+23f); - // Cody-Waite's range - float max_codi = 117435.992f; - if (trig_op == SIMD_COMPUTE_COS) { - max_codi = 71476.0625f; - } - const npyv_f32 max_cody = npyv_setall_f32(max_codi); - const int vstep = npyv_nlanes_f32; - - for (; len > 0; len -= vstep, src += ssrc*vstep, dst += sdst*vstep) { - npyv_f32 x_in; - if (ssrc == 1) { - x_in = npyv_load_tillz_f32(src, len); - } else { - x_in = npyv_loadn_tillz_f32(src, ssrc, len); - } - npyv_b32 nnan_mask = npyv_notnan_f32(x_in); - #if NPY_SIMD_CMPSIGNAL - // Eliminate NaN to avoid FP invalid exception - x_in = npyv_and_f32(x_in, npyv_reinterpret_f32_u32(npyv_cvt_u32_b32(nnan_mask))); - #endif - npyv_b32 simd_mask = npyv_cmple_f32(npyv_abs_f32(x_in), max_cody); - npy_uint64 simd_maski = npyv_tobits_b32(simd_mask); - /* - * For elements outside of this range, Cody-Waite's range reduction - * becomes inaccurate and we will call libc to compute cosine for - * these numbers - */ - if (simd_maski != 0) { - npyv_f32 x = npyv_select_f32(npyv_and_b32(nnan_mask, simd_mask), x_in, zerosf); - - npyv_f32 quadrant = npyv_mul_f32(x, two_over_pi); - // round to nearest, -0.0f -> +0.0f, and |a| must be <= 0x1.0p+22 - quadrant = npyv_add_f32(quadrant, rint_cvt_magic); - quadrant = npyv_sub_f32(quadrant, rint_cvt_magic); - - // Cody-Waite's range reduction algorithm - npyv_f32 reduced_x = simd_range_reduction_f32( - x, quadrant, codyw_pio2_highf, codyw_pio2_medf, codyw_pio2_lowf - ); - npyv_f32 reduced_x2 = npyv_square_f32(reduced_x); - - // compute cosine and sine - npyv_f32 cos = simd_cosine_poly_f32(reduced_x2); - npyv_f32 sin = simd_sine_poly_f32(reduced_x, reduced_x2); - - npyv_s32 iquadrant = npyv_round_s32_f32(quadrant); - if (trig_op == SIMD_COMPUTE_COS) { - iquadrant = npyv_add_s32(iquadrant, ones); - } - // blend sin and cos based on the quadrant - npyv_b32 sine_mask = npyv_cmpeq_s32(npyv_and_s32(iquadrant, ones), npyv_zero_s32()); - cos = npyv_select_f32(sine_mask, sin, cos); - - // multiply by -1 for appropriate elements - npyv_b32 negate_mask = npyv_cmpeq_s32(npyv_and_s32(iquadrant, twos), twos); - cos = npyv_ifsub_f32(negate_mask, zerosf, cos, cos); - cos = npyv_select_f32(nnan_mask, cos, npyv_setall_f32(NPY_NANF)); - - if (sdst == 1) { - npyv_store_till_f32(dst, len, cos); - } else { - npyv_storen_till_f32(dst, sdst, len, cos); - } - } - if (simd_maski != (npy_uint64)((1 << vstep) - 1)) { - float NPY_DECL_ALIGNED(NPY_SIMD_WIDTH) ip_fback[npyv_nlanes_f32]; - npyv_storea_f32(ip_fback, x_in); - - // process elements using libc for large elements - if (trig_op == SIMD_COMPUTE_COS) { - for (unsigned i = 0; i < npyv_nlanes_f32; ++i) { - if ((simd_maski >> i) & 1) { - continue; - } - dst[sdst*i] = npy_cosf(ip_fback[i]); - } - } - else { - for (unsigned i = 0; i < npyv_nlanes_f32; ++i) { - if ((simd_maski >> i) & 1) { - continue; - } - dst[sdst*i] = npy_sinf(ip_fback[i]); - } - } - } - } - npyv_cleanup(); -} -#endif // NPY_SIMD_FP32 -#endif // NYP_SIMD_FMA3 - -/**begin repeat - * #func = cos, sin# - */ -NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(DOUBLE_@func@) -(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) -{ - /* Disable SIMD code and revert to libm: see - * https://mail.python.org/archives/list/numpy-discussion@python.org/thread/C6EYZZSR4EWGVKHAZXLE7IBILRMNVK7L/ - * for detailed discussion on this*/ -//#if NPY_SIMD_F64 && NPY_SIMD_FMA3 -#if 0 - const double *src = (double*)args[0]; - double *dst = (double*)args[1]; - const int lsize = sizeof(src[0]); - const npy_intp ssrc = steps[0] / lsize; - const npy_intp sdst = steps[1] / lsize; - npy_intp len = dimensions[0]; - assert(len <= 1 || (steps[0] % lsize == 0 && steps[1] % lsize == 0)); - - if (is_mem_overlap(src, steps[0], dst, steps[1], len) || - !npyv_loadable_stride_f64(ssrc) || !npyv_storable_stride_f64(sdst) - ) { - for (; len > 0; --len, src += ssrc, dst += sdst) { - simd_@func@_f64(src, 1, dst, 1, 1); - } - } else { - simd_@func@_f64(src, ssrc, dst, sdst, len); - } -#else - UNARY_LOOP { - const npy_double in1 = *(npy_double *)ip1; - *(npy_double *)op1 = npy_@func@(in1); - } -#endif -} -/**end repeat**/ - -/**begin repeat - * #func = sin, cos# - * #enum = SIMD_COMPUTE_SIN, SIMD_COMPUTE_COS# - */ -NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(FLOAT_@func@) -(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) -{ -#if NPY_SIMD_F32 && NPY_SIMD_FMA3 - const npy_float *src = (npy_float*)args[0]; - npy_float *dst = (npy_float*)args[1]; - - const int lsize = sizeof(src[0]); - const npy_intp ssrc = steps[0] / lsize; - const npy_intp sdst = steps[1] / lsize; - npy_intp len = dimensions[0]; - assert(len <= 1 || (steps[0] % lsize == 0 && steps[1] % lsize == 0)); - if (is_mem_overlap(src, steps[0], dst, steps[1], len) || - !npyv_loadable_stride_f32(ssrc) || !npyv_storable_stride_f32(sdst) - ) { - for (; len > 0; --len, src += ssrc, dst += sdst) { - simd_sincos_f32(src, 1, dst, 1, 1, @enum@); - } - } else { - simd_sincos_f32(src, ssrc, dst, sdst, len, @enum@); - } -#else - UNARY_LOOP { - const npy_float in1 = *(npy_float *)ip1; - *(npy_float *)op1 = npy_@func@f(in1); - } -#endif -} -/**end repeat**/ diff --git a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp new file mode 100644 index 000000000000..d298a8596cc4 --- /dev/null +++ b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp @@ -0,0 +1,299 @@ +#include "fast_loop_macros.h" +#include "loops.h" +#include "loops_utils.h" + +#include "simd/simd.h" +#include "simd/simd.hpp" +#include + +namespace hn = hwy::HWY_NAMESPACE; + +/* + * Vectorized approximate sine/cosine algorithms: The following code is a + * vectorized version of the algorithm presented here: + * https://stackoverflow.com/questions/30463616/payne-hanek-algorithm-implementation-in-c/30465751#30465751 + * (1) Load data in registers and generate mask for elements that are within + * range [-71476.0625f, 71476.0625f] for cosine and [-117435.992f, 117435.992f] + * for sine. + * (2) For elements within range, perform range reduction using + * Cody-Waite's method: x* = x - y*PI/2, where y = rint(x*2/PI). x* \in [-PI/4, + * PI/4]. + * (3) Map cos(x) to (+/-)sine or (+/-)cosine of x* based on the + * quadrant k = int(y). + * (4) For elements outside that range, Cody-Waite + * reduction performs poorly leading to catastrophic cancellation. We compute + * cosine by calling glibc in a scalar fashion. + * (5) Vectorized implementation + * has a max ULP of 1.49 and performs at least 5-7x(x86) - 2.5-3x(Power) - + * 1-2x(Arm) faster than scalar implementations when magnitude of all elements + * in the array < 71476.0625f (117435.992f for sine). Worst case performance + * is when all the elements are large leading to about 1-2% reduction in + * performance. + * TODO: use vectorized version of Payne-Hanek style reduction for large + * elements or when there's no native FUSED support instead of fallback to libc + */ + +#if NPY_SIMD_FMA3 // native support +typedef enum { + SIMD_COMPUTE_SIN, + SIMD_COMPUTE_COS +} SIMD_TRIG_OP; + +const hn::ScalableTag f32; +const hn::ScalableTag s32; +using vec_f32 = hn::Vec; +using vec_s32 = hn::Vec; +using opmask_t = hn::Mask; + +HWY_INLINE HWY_ATTR vec_f32 +simd_range_reduction_f32(vec_f32 &x, vec_f32 &y, const vec_f32 &c1, + const vec_f32 &c2, const vec_f32 &c3) +{ + vec_f32 reduced_x = hn::MulAdd(y, c1, x); + reduced_x = hn::MulAdd(y, c2, reduced_x); + reduced_x = hn::MulAdd(y, c3, reduced_x); + return reduced_x; +} + +HWY_INLINE HWY_ATTR vec_f32 +simd_cosine_poly_f32(vec_f32 &x2) +{ + const vec_f32 invf8 = hn::Set(f32, 0x1.98e616p-16f); + const vec_f32 invf6 = hn::Set(f32, -0x1.6c06dcp-10f); + const vec_f32 invf4 = hn::Set(f32, 0x1.55553cp-05f); + const vec_f32 invf2 = hn::Set(f32, -0x1.000000p-01f); + const vec_f32 invf0 = hn::Set(f32, 0x1.000000p+00f); + + vec_f32 r = hn::MulAdd(invf8, x2, invf6); + r = hn::MulAdd(r, x2, invf4); + r = hn::MulAdd(r, x2, invf2); + r = hn::MulAdd(r, x2, invf0); + return r; +} +/* + * Approximate sine algorithm for x \in [-PI/4, PI/4] + * Maximum ULP across all 32-bit floats = 0.647 + * Polynomial approximation based on unpublished work by T. Myklebust + */ +HWY_INLINE HWY_ATTR vec_f32 +simd_sine_poly_f32(vec_f32 &x, vec_f32 &x2) +{ + const vec_f32 invf9 = hn::Set(f32, 0x1.7d3bbcp-19f); + const vec_f32 invf7 = hn::Set(f32, -0x1.a06bbap-13f); + const vec_f32 invf5 = hn::Set(f32, 0x1.11119ap-07f); + const vec_f32 invf3 = hn::Set(f32, -0x1.555556p-03f); + + vec_f32 r = hn::MulAdd(invf9, x2, invf7); + r = hn::MulAdd(r, x2, invf5); + r = hn::MulAdd(r, x2, invf3); + r = hn::MulAdd(r, x2, hn::Zero(f32)); + r = hn::MulAdd(r, x, x); + return r; +} + +static void HWY_ATTR SIMD_MSVC_NOINLINE +simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, + npy_intp len, SIMD_TRIG_OP trig_op) +{ + // Load up frequently used constants + const vec_f32 zerosf = hn::Zero(f32); + const vec_s32 ones = hn::Set(s32, 1); + const vec_s32 twos = hn::Set(s32, 2); + const vec_f32 two_over_pi = hn::Set(f32, 0x1.45f306p-1f); + const vec_f32 codyw_pio2_highf = hn::Set(f32, -0x1.921fb0p+00f); + const vec_f32 codyw_pio2_medf = hn::Set(f32, -0x1.5110b4p-22f); + const vec_f32 codyw_pio2_lowf = hn::Set(f32, -0x1.846988p-48f); + const vec_f32 rint_cvt_magic = hn::Set(f32, 0x1.800000p+23f); + // Cody-Waite's range + float max_codi = 117435.992f; + if (trig_op == SIMD_COMPUTE_COS) { + max_codi = 71476.0625f; + } + const vec_f32 max_cody = hn::Set(f32, max_codi); + + const int lanes = hn::Lanes(f32); + const vec_s32 src_index = hn::Mul(hn::Iota(s32, 0), hn::Set(s32, ssrc)); + const vec_s32 dst_index = hn::Mul(hn::Iota(s32, 0), hn::Set(s32, sdst)); + + for (; len > 0; len -= lanes, src += ssrc * lanes, dst += sdst * lanes) { + vec_f32 x_in; + if (ssrc == 1) { + x_in = hn::LoadN(f32, src, len); + } + else { + x_in = hn::GatherIndexN(f32, src, src_index, len); + } + opmask_t nnan_mask = hn::Not(hn::IsNaN(x_in)); + // Eliminate NaN to avoid FP invalid exception + x_in = hn::IfThenElse(nnan_mask, x_in, zerosf); + opmask_t simd_mask = hn::Le(hn::Abs(x_in), max_cody); + /* + * For elements outside of this range, Cody-Waite's range reduction + * becomes inaccurate and we will call libc to compute cosine for + * these numbers + */ + if (!hn::AllFalse(f32, simd_mask)) { + vec_f32 x = hn::IfThenElse(hn::And(nnan_mask, simd_mask), x_in, + zerosf); + + vec_f32 quadrant = hn::Mul(x, two_over_pi); + // round to nearest, -0.0f -> +0.0f, and |a| must be <= 0x1.0p+22 + quadrant = hn::Add(quadrant, rint_cvt_magic); + quadrant = hn::Sub(quadrant, rint_cvt_magic); + + // Cody-Waite's range reduction algorithm + vec_f32 reduced_x = + simd_range_reduction_f32(x, quadrant, codyw_pio2_highf, + codyw_pio2_medf, codyw_pio2_lowf); + vec_f32 reduced_x2 = hn::Mul(reduced_x, reduced_x); + + // compute cosine and sine + vec_f32 cos = simd_cosine_poly_f32(reduced_x2); + vec_f32 sin = simd_sine_poly_f32(reduced_x, reduced_x2); + + vec_s32 iquadrant = hn::NearestInt(quadrant); + if (trig_op == SIMD_COMPUTE_COS) { + iquadrant = hn::Add(iquadrant, ones); + } + // blend sin and cos based on the quadrant + opmask_t sine_mask = hn::RebindMask( + f32, hn::Eq(hn::And(iquadrant, ones), hn::Zero(s32))); + cos = hn::IfThenElse(sine_mask, sin, cos); + + // multiply by -1 for appropriate elements + opmask_t negate_mask = hn::RebindMask( + f32, hn::Eq(hn::And(iquadrant, twos), twos)); + cos = hn::MaskedSubOr(cos, negate_mask, zerosf, cos); + cos = hn::IfThenElse(nnan_mask, cos, hn::Set(f32, NPY_NANF)); + + if (sdst == 1) { + hn::StoreN(cos, f32, dst, len); + } + else { + hn::ScatterIndexN(cos, f32, dst, dst_index, len); + } + } + if (!hn::AllTrue(f32, simd_mask)) { + static_assert(hn::MaxLanes(f32) <= 64, + "The following fallback is not applicable for " + "SIMD widths larger than 2048 bits, or for scalable " + "SIMD in general."); + npy_uint64 simd_maski; + hn::StoreMaskBits(f32, simd_mask, (uint8_t *)&simd_maski); +#if HWY_IS_BIG_ENDIAN + static_assert(hn::MaxLanes(f32) <= 8, + "This conversion is not supported for SIMD widths " + "larger than 256 bits."); + simd_maski = ((uint8_t *)&simd_maski)[0]; +#endif + float NPY_DECL_ALIGNED(NPY_SIMD_WIDTH) ip_fback[hn::MaxLanes(f32)]; + hn::Store(x_in, f32, ip_fback); + + // process elements using libc for large elements + if (trig_op == SIMD_COMPUTE_COS) { + for (unsigned i = 0; i < hn::Lanes(f32); ++i) { + if ((simd_maski >> i) & 1) { + continue; + } + dst[sdst * i] = npy_cosf(ip_fback[i]); + } + } + else { + for (unsigned i = 0; i < hn::Lanes(f32); ++i) { + if ((simd_maski >> i) & 1) { + continue; + } + dst[sdst * i] = npy_sinf(ip_fback[i]); + } + } + } + npyv_cleanup(); + } +} +#endif // NPY_SIMD_FMA3 + +/* Disable SIMD code sin/cos f64 and revert to libm: see + * https://mail.python.org/archives/list/numpy-discussion@python.org/thread/C6EYZZSR4EWGVKHAZXLE7IBILRMNVK7L/ + * for detailed discussion on this*/ +#define DISPATCH_DOUBLE_FUNC(func) \ + NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(DOUBLE_##func)( \ + char **args, npy_intp const *dimensions, npy_intp const *steps, \ + void *NPY_UNUSED(data)) \ + { \ + UNARY_LOOP \ + { \ + const npy_double in1 = *(npy_double *)ip1; \ + *(npy_double *)op1 = npy_##func(in1); \ + } \ + } + +DISPATCH_DOUBLE_FUNC(sin) +DISPATCH_DOUBLE_FUNC(cos) + +NPY_NO_EXPORT void +NPY_CPU_DISPATCH_CURFX(FLOAT_sin)(char **args, npy_intp const *dimensions, + npy_intp const *steps, + void *NPY_UNUSED(data)) +{ +#if NPY_SIMD_FMA3 + npy_intp len = dimensions[0]; + + if (is_mem_overlap(args[0], steps[0], args[1], steps[1], len) || + !npyv_loadable_stride_f32(steps[0]) || + !npyv_storable_stride_f32(steps[1])) { + UNARY_LOOP + { + simd_sincos_f32((npy_float *)ip1, 1, (npy_float *)op1, 1, 1, + SIMD_COMPUTE_SIN); + } + } + else { + const npy_float *src = (npy_float *)args[0]; + npy_float *dst = (npy_float *)args[1]; + const npy_intp ssrc = steps[0] / sizeof(npy_float); + const npy_intp sdst = steps[1] / sizeof(npy_float); + + simd_sincos_f32(src, ssrc, dst, sdst, len, SIMD_COMPUTE_SIN); + } +#else + UNARY_LOOP + { + const npy_float in1 = *(npy_float *)ip1; + *(npy_float *)op1 = npy_sinf(in1); + } +#endif +} + +NPY_NO_EXPORT void +NPY_CPU_DISPATCH_CURFX(FLOAT_cos)(char **args, npy_intp const *dimensions, + npy_intp const *steps, + void *NPY_UNUSED(data)) +{ +#if NPY_SIMD_FMA3 + npy_intp len = dimensions[0]; + + if (is_mem_overlap(args[0], steps[0], args[1], steps[1], len) || + !npyv_loadable_stride_f32(steps[0]) || + !npyv_storable_stride_f32(steps[1])) { + UNARY_LOOP + { + simd_sincos_f32((npy_float *)ip1, 1, (npy_float *)op1, 1, 1, + SIMD_COMPUTE_COS); + } + } + else { + const npy_float *src = (npy_float *)args[0]; + npy_float *dst = (npy_float *)args[1]; + const npy_intp ssrc = steps[0] / sizeof(npy_float); + const npy_intp sdst = steps[1] / sizeof(npy_float); + + simd_sincos_f32(src, ssrc, dst, sdst, len, SIMD_COMPUTE_COS); + } +#else + UNARY_LOOP + { + const npy_float in1 = *(npy_float *)ip1; + *(npy_float *)op1 = npy_cosf(in1); + } +#endif +} diff --git a/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src b/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src index 74af8edaa1f5..1a6dbbb9cac3 100644 --- a/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src +++ b/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src @@ -1,6 +1,3 @@ -/*@targets - ** $maxopt baseline avx512_skx avx512_spr - */ #include "numpy/npy_math.h" #include "simd/simd.h" #include "loops_utils.h" @@ -98,93 +95,8 @@ simd_@func@_@sfx@(const npyv_lanetype_@sfx@ *src1, npy_intp ssrc1, } /**end repeat1**/ /**end repeat**/ - -typedef __m256i npyvh_f16; -#define npyv_cvt_f16_f32 _mm512_cvtph_ps -#define npyv_cvt_f32_f16 _mm512_cvtps_ph -#define npyvh_load_f16(PTR) _mm256_loadu_si256((const __m256i*)(PTR)) -#define npyvh_store_f16(PTR, data) _mm256_storeu_si256((__m256i*)PTR, data) -NPY_FINLINE npyvh_f16 npyvh_load_till_f16(const npy_half *ptr, npy_uintp nlane, npy_half fill) -{ - assert(nlane > 0); - const __m256i vfill = _mm256_set1_epi16(fill); - const __mmask16 mask = (0x0001 << nlane) - 0x0001; - return _mm256_mask_loadu_epi16(vfill, mask, ptr); -} -NPY_FINLINE void npyvh_store_till_f16(npy_half *ptr, npy_uintp nlane, npyvh_f16 data) -{ - assert(nlane > 0); - const __mmask16 mask = (0x0001 << nlane) - 0x0001; - _mm256_mask_storeu_epi16(ptr, mask, data); -} - -/**begin repeat - * #func = sin, cos, tan, exp, exp2, expm1, log, log2, log10, log1p, cbrt, asin, acos, atan, sinh, cosh, tanh, asinh, acosh, atanh# - * #default_val = 0, 0, 0, 0, 0, 0x3c00, 0x3c00, 0x3c00, 0x3c00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x3c00, 0# - */ -static void -avx512_@func@_f16(const npy_half *src, npy_half *dst, npy_intp len) -{ - const int num_lanes = npyv_nlanes_f32; - npyvh_f16 x, out; - npyv_f32 x_ps, out_ps; - for (; len > 0; len -= num_lanes, src += num_lanes, dst += num_lanes) { - if (len >= num_lanes) { - x = npyvh_load_f16(src); - x_ps = npyv_cvt_f16_f32(x); - out_ps = __svml_@func@f16(x_ps); - out = npyv_cvt_f32_f16(out_ps, 0); - npyvh_store_f16(dst, out); - } - else { - x = npyvh_load_till_f16(src, len, @default_val@); - x_ps = npyv_cvt_f16_f32(x); - out_ps = __svml_@func@f16(x_ps); - out = npyv_cvt_f32_f16(out_ps, 0); - npyvh_store_till_f16(dst, len, out); - } - } - npyv_cleanup(); -} -/**end repeat**/ #endif -/**begin repeat - * #func = sin, cos, tan, exp, exp2, expm1, log, log2, log10, log1p, cbrt, arcsin, arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh, arctanh# - * #intrin = sin, cos, tan, exp, exp2, expm1, log, log2, log10, log1p, cbrt, asin, acos, atan, sinh, cosh, tanh, asinh, acosh, atanh# - */ -NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(HALF_@func@) -(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) -{ -#if defined(NPY_HAVE_AVX512_SPR) || defined(NPY_HAVE_AVX512_SKX) -#if NPY_SIMD && defined(NPY_CAN_LINK_SVML) - const npy_half *src = (npy_half*)args[0]; - npy_half *dst = (npy_half*)args[1]; - const int lsize = sizeof(src[0]); - const npy_intp ssrc = steps[0] / lsize; - const npy_intp sdst = steps[1] / lsize; - const npy_intp len = dimensions[0]; - if (!is_mem_overlap(src, steps[0], dst, steps[1], len) && - (ssrc == 1) && - (sdst == 1)) { -#if defined(NPY_HAVE_AVX512_SPR) - __svml_@intrin@s32(src, dst, len); - return; -#endif -#if defined(NPY_HAVE_AVX512_SKX) - avx512_@intrin@_f16(src, dst, len); - return; -#endif - } -#endif // NPY_SIMD && NPY_CAN_LINK_SVML -#endif // SPR or SKX - UNARY_LOOP { - const npy_float in1 = npy_half_to_float(*(npy_half *)ip1); - *((npy_half *)op1) = npy_float_to_half(npy_@intrin@f(in1)); - } -} -/**end repeat**/ - /**begin repeat * #TYPE = DOUBLE, FLOAT# * #type = npy_double, npy_float# @@ -201,14 +113,15 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) #if NPY_SIMD && defined(NPY_HAVE_AVX512_SKX) && defined(NPY_CAN_LINK_SVML) const @type@ *src = (@type@*)args[0]; @type@ *dst = (@type@*)args[1]; - const int lsize = sizeof(src[0]); - const npy_intp ssrc = steps[0] / lsize; - const npy_intp sdst = steps[1] / lsize; + const npy_intp len = dimensions[0]; - assert(len <= 1 || (steps[0] % lsize == 0 && steps[1] % lsize == 0)); + if (!is_mem_overlap(src, steps[0], dst, steps[1], len) && - npyv_loadable_stride_@sfx@(ssrc) && - npyv_storable_stride_@sfx@(sdst)) { + npyv_loadable_stride_@sfx@(steps[0]) && + npyv_storable_stride_@sfx@(steps[1])) + { + const npy_intp ssrc = steps[0] / sizeof(@type@); + const npy_intp sdst = steps[1] / sizeof(@type@); simd_@intrin@_@sfx@(src, ssrc, dst, sdst, len); return; } @@ -239,11 +152,30 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) if (stride_zero) { BINARY_DEFS const @type@ in2 = *(@type@ *)ip2; - if (in2 == 2.0) { - BINARY_LOOP_SLIDING { - const @type@ in1 = *(@type@ *)ip1; + int fastop_found = 1; + BINARY_LOOP_SLIDING { + const @type@ in1 = *(@type@ *)ip1; + if (in2 == -1.0) { + *(@type@ *)op1 = 1.0 / in1; + } + else if (in2 == 0.0) { + *(@type@ *)op1 = 1.0; + } + else if (in2 == 0.5) { + *(@type@ *)op1 = @sqrt@(in1); + } + else if (in2 == 1.0) { + *(@type@ *)op1 = in1; + } + else if (in2 == 2.0) { *(@type@ *)op1 = in1 * in1; } + else { + fastop_found = 0; + break; + } + } + if (fastop_found) { return; } } @@ -251,15 +183,19 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) const @type@ *src1 = (@type@*)args[0]; const @type@ *src2 = (@type@*)args[1]; @type@ *dst = (@type@*)args[2]; - const int lsize = sizeof(src1[0]); - const npy_intp ssrc1 = steps[0] / lsize; - const npy_intp ssrc2 = steps[1] / lsize; - const npy_intp sdst = steps[2] / lsize; + const npy_intp len = dimensions[0]; - assert(len <= 1 || (steps[0] % lsize == 0 && steps[1] % lsize == 0)); - if (!is_mem_overlap(src1, steps[0], dst, steps[2], len) && !is_mem_overlap(src2, steps[1], dst, steps[2], len) && - npyv_loadable_stride_@sfx@(ssrc1) && npyv_loadable_stride_@sfx@(ssrc2) && - npyv_storable_stride_@sfx@(sdst)) { + + if (!is_mem_overlap(src1, steps[0], dst, steps[2], len) && + !is_mem_overlap(src2, steps[1], dst, steps[2], len) && + npyv_loadable_stride_@sfx@(steps[0]) && + npyv_loadable_stride_@sfx@(steps[1]) && + npyv_storable_stride_@sfx@(steps[2]) + ) { + const npy_intp ssrc1 = steps[0] / sizeof(@type@); + const npy_intp ssrc2 = steps[1] / sizeof(@type@); + const npy_intp sdst = steps[2] / sizeof(@type@); + simd_@intrin@_@sfx@(src1, ssrc1, src2, ssrc2, dst, sdst, len); return; } @@ -283,15 +219,19 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) const @type@ *src1 = (@type@*)args[0]; const @type@ *src2 = (@type@*)args[1]; @type@ *dst = (@type@*)args[2]; - const int lsize = sizeof(src1[0]); - const npy_intp ssrc1 = steps[0] / lsize; - const npy_intp ssrc2 = steps[1] / lsize; - const npy_intp sdst = steps[2] / lsize; + const npy_intp len = dimensions[0]; - assert(len <= 1 || (steps[0] % lsize == 0 && steps[1] % lsize == 0)); - if (!is_mem_overlap(src1, steps[0], dst, steps[2], len) && !is_mem_overlap(src2, steps[1], dst, steps[2], len) && - npyv_loadable_stride_@sfx@(ssrc1) && npyv_loadable_stride_@sfx@(ssrc2) && - npyv_storable_stride_@sfx@(sdst)) { + + if (!is_mem_overlap(src1, steps[0], dst, steps[2], len) && + !is_mem_overlap(src2, steps[1], dst, steps[2], len) && + npyv_loadable_stride_@sfx@(steps[0]) && + npyv_loadable_stride_@sfx@(steps[1]) && + npyv_storable_stride_@sfx@(steps[2]) + ) { + const npy_intp ssrc1 = steps[0] / sizeof(@type@); + const npy_intp ssrc2 = steps[1] / sizeof(@type@); + const npy_intp sdst = steps[2] / sizeof(@type@); + simd_@intrin@_@sfx@(src1, ssrc1, src2, ssrc2, dst, sdst, len); return; } diff --git a/numpy/_core/src/umath/loops_unary.dispatch.c.src b/numpy/_core/src/umath/loops_unary.dispatch.c.src index bfe4d892d0c9..951aa5be5240 100644 --- a/numpy/_core/src/umath/loops_unary.dispatch.c.src +++ b/numpy/_core/src/umath/loops_unary.dispatch.c.src @@ -1,11 +1,3 @@ -/*@targets - ** $maxopt baseline - ** neon asimd - ** sse2 avx2 avx512_skx - ** vsx2 - ** vx vxe - **/ - #define _UMATHMODULE #define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION @@ -298,12 +290,12 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) goto clear; } #if @supports_ncontig@ - const npy_intp istride = istep / sizeof(STYPE); - const npy_intp ostride = ostep / sizeof(STYPE); - if (TO_SIMD_SFX(npyv_loadable_stride)(istride) && - TO_SIMD_SFX(npyv_storable_stride)(ostride)) + if (TO_SIMD_SFX(npyv_loadable_stride)(istep) && + TO_SIMD_SFX(npyv_storable_stride)(ostep)) { - if (istride == 1 && ostride != 1) { + const npy_intp istride = istep / sizeof(STYPE); + const npy_intp ostride = ostep / sizeof(STYPE); + if (istride == sizeof(STYPE) && ostride != 1) { // contiguous input, non-contiguous output TO_SIMD_SFX(simd_unary_cn_@intrin@)( (STYPE*)ip, (STYPE*)op, ostride, len diff --git a/numpy/_core/src/umath/loops_unary_complex.dispatch.c.src b/numpy/_core/src/umath/loops_unary_complex.dispatch.c.src index 052ad464c7a8..4b4457e6aada 100644 --- a/numpy/_core/src/umath/loops_unary_complex.dispatch.c.src +++ b/numpy/_core/src/umath/loops_unary_complex.dispatch.c.src @@ -1,10 +1,3 @@ -/*@targets - ** $maxopt baseline - ** sse2 (avx2 fma3) avx512f - ** neon asimd - ** vsx2 vsx3 - ** vx vxe - **/ #define _UMATHMODULE #define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION @@ -88,14 +81,14 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_absolute) { #if @VECTOR@ npy_intp len = dimensions[0]; - npy_intp ssrc = steps[0] / sizeof(@ftype@); - npy_intp sdst = steps[1] / sizeof(@ftype@); if (!is_mem_overlap(args[0], steps[0], args[1], steps[1], len) && - npyv_loadable_stride_@sfx@(ssrc) && npyv_storable_stride_@sfx@(sdst) - && steps[0] % sizeof(@ftype@) == 0 - && steps[1] % sizeof(@ftype@) == 0 + npyv_loadable_stride_@sfx@(steps[0]) && + npyv_storable_stride_@sfx@(steps[1]) ) { + npy_intp ssrc = steps[0] / sizeof(@ftype@); + npy_intp sdst = steps[1] / sizeof(@ftype@); + const @ftype@ *src = (@ftype@*)args[0]; @ftype@ *dst = (@ftype@*)args[1]; diff --git a/numpy/_core/src/umath/loops_unary_fp.dispatch.c.src b/numpy/_core/src/umath/loops_unary_fp.dispatch.c.src index f6404f6f7d68..85f74839eba7 100644 --- a/numpy/_core/src/umath/loops_unary_fp.dispatch.c.src +++ b/numpy/_core/src/umath/loops_unary_fp.dispatch.c.src @@ -1,10 +1,3 @@ -/*@targets - ** $maxopt baseline - ** sse2 sse41 - ** vsx2 - ** neon asimd - ** vx vxe - **/ /** * Force use SSE only on x86, even if AVX2 or AVX512F are enabled * through the baseline, since scatter(AVX512F) and gather very costly @@ -212,15 +205,16 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npy_intp len = dimensions[0]; #if @VCHK@ const int lsize = sizeof(npyv_lanetype_@sfx@); - assert(len <= 1 || (src_step % lsize == 0 && dst_step % lsize == 0)); + if (is_mem_overlap(src, src_step, dst, dst_step, len)) { goto no_unroll; } - const npy_intp ssrc = src_step / lsize; - const npy_intp sdst = dst_step / lsize; - if (!npyv_loadable_stride_@sfx@(ssrc) || !npyv_storable_stride_@sfx@(sdst)) { + if (!npyv_loadable_stride_@sfx@(src_step) || !npyv_storable_stride_@sfx@(dst_step)) { goto no_unroll; } + + const npy_intp ssrc = src_step / lsize; + const npy_intp sdst = dst_step / lsize; if (ssrc == 1 && sdst == 1) { simd_@TYPE@_@kind@_CONTIG_CONTIG(src, 1, dst, 1, len); } diff --git a/numpy/_core/src/umath/loops_unary_fp_le.dispatch.c.src b/numpy/_core/src/umath/loops_unary_fp_le.dispatch.c.src index ba133dc1e60f..ca02bc85608e 100644 --- a/numpy/_core/src/umath/loops_unary_fp_le.dispatch.c.src +++ b/numpy/_core/src/umath/loops_unary_fp_le.dispatch.c.src @@ -1,10 +1,3 @@ -/*@targets - ** $maxopt baseline - ** sse2 sse41 - ** vsx2 - ** neon asimd - **/ - /** * Force use SSE only on x86, even if AVX2 or AVX512F are enabled * through the baseline, since scatter(AVX512F) and gather very costly @@ -528,17 +521,14 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) const npy_intp istep = steps[0]; const npy_intp ostep = steps[1]; npy_intp len = dimensions[0]; - const int ilsize = sizeof(npyv_lanetype_@sfx@); - const int olsize = sizeof(npy_bool); - const npy_intp istride = istep / ilsize; - const npy_intp ostride = ostep / olsize; - assert(len <= 1 || ostep % olsize == 0); - - if ((istep % ilsize == 0) && - !is_mem_overlap(ip, istep, op, ostep, len) && - npyv_loadable_stride_@sfx@(istride) && - npyv_storable_stride_@sfx@(ostride)) + + if (!is_mem_overlap(ip, istep, op, ostep, len) && + npyv_loadable_stride_@sfx@(istep) && + npyv_storable_stride_@sfx@(ostep)) { + const npy_intp istride = istep / sizeof(npyv_lanetype_@sfx@); + const npy_intp ostride = ostep / sizeof(npy_bool); + if (istride == 1 && ostride == 1) { simd_unary_@kind@_@TYPE@_CONTIG_CONTIG(ip, 1, op, 1, len); } diff --git a/numpy/_core/src/umath/loops_utils.h.src b/numpy/_core/src/umath/loops_utils.h.src index 5640a1f0b646..828d16ee635c 100644 --- a/numpy/_core/src/umath/loops_utils.h.src +++ b/numpy/_core/src/umath/loops_utils.h.src @@ -16,28 +16,31 @@ #endif /* * nomemoverlap - returns false if two strided arrays have an overlapping - * region in memory. ip_size/op_size = size of the arrays which can be negative - * indicating negative steps. + * region in memory. */ NPY_FINLINE npy_bool -nomemoverlap(char *ip, npy_intp ip_size, char *op, npy_intp op_size) +nomemoverlap(char *ip, npy_intp ip_step, char *op, npy_intp op_step, npy_intp len) { + // Calculate inclusive ranges for offsets of items in arrays. + // The end pointer points to address of the last item. + const npy_intp ip_offset = ip_step * (len - 1); + const npy_intp op_offset = op_step * (len - 1); char *ip_start, *ip_end, *op_start, *op_end; - if (ip_size < 0) { - ip_start = ip + ip_size; + if (ip_step < 0) { + ip_start = ip + ip_offset; ip_end = ip; } else { ip_start = ip; - ip_end = ip + ip_size; + ip_end = ip + ip_offset; } - if (op_size < 0) { - op_start = op + op_size; + if (op_step < 0) { + op_start = op + op_offset; op_end = op; } else { op_start = op; - op_end = op + op_size; + op_end = op + op_offset; } return (ip_start == op_start && op_end == ip_end) || (ip_start > op_end) || (op_start > ip_end); @@ -48,7 +51,7 @@ nomemoverlap(char *ip, npy_intp ip_size, char *op, npy_intp op_size) NPY_FINLINE npy_bool is_mem_overlap(const void *src, npy_intp src_step, const void *dst, npy_intp dst_step, npy_intp len) { - return !(nomemoverlap((char*)src, src_step*len, (char*)dst, dst_step*len)); + return !(nomemoverlap((char*)src, src_step, (char*)dst, dst_step, len)); } /* diff --git a/numpy/_core/src/umath/matmul.c.src b/numpy/_core/src/umath/matmul.c.src index 37f990f970ed..95d23995e630 100644 --- a/numpy/_core/src/umath/matmul.c.src +++ b/numpy/_core/src/umath/matmul.c.src @@ -16,6 +16,7 @@ +#include "blas_utils.h" #include "npy_cblas.h" #include "arraytypes.h" /* For TYPE_dot functions */ @@ -27,6 +28,8 @@ ***************************************************************************** */ +#define ABS(x) ((x) < 0 ? -(x) : (x)) + #if defined(HAVE_CBLAS) /* * -1 to be conservative, in case blas internally uses a for loop with an @@ -79,11 +82,52 @@ static const npy_cfloat oneF = 1.0f, zeroF = 0.0f; * #step1 = 1.F, 1., &oneF, &oneD# * #step0 = 0.F, 0., &zeroF, &zeroD# */ -NPY_NO_EXPORT void + +static inline void +@name@_matrix_copy(npy_bool transpose, + void *_ip, npy_intp is_m, npy_intp is_n, + void *_op, npy_intp os_m, npy_intp os_n, + npy_intp dm, npy_intp dn) +{ + + char *ip = (char *)_ip, *op = (char *)_op; + + npy_intp m, n, ib, ob; + + if (transpose) { + ib = is_m * dm, ob = os_m * dm; + + for (n = 0; n < dn; n++) { + for (m = 0; m < dm; m++) { + *(@ctype@ *)op = *(@ctype@ *)ip; + ip += is_m; + op += os_m; + } + ip += is_n - ib; + op += os_n - ob; + } + + return; + } + + ib = is_n * dn, ob = os_n * dn; + + for (m = 0; m < dm; m++) { + for (n = 0; n < dn; n++) { + *(@ctype@ *)op = *(@ctype@ *)ip; + ip += is_n; + op += os_n; + } + ip += is_m - ib; + op += os_m - ob; + } +} + +static void @name@_gemv(void *ip1, npy_intp is1_m, npy_intp is1_n, - void *ip2, npy_intp is2_n, npy_intp NPY_UNUSED(is2_p), - void *op, npy_intp op_m, npy_intp NPY_UNUSED(op_p), - npy_intp m, npy_intp n, npy_intp NPY_UNUSED(p)) + void *ip2, npy_intp is2_n, + void *op, npy_intp op_m, + npy_intp m, npy_intp n) { /* * Vector matrix multiplication -- Level 2 BLAS @@ -115,7 +159,7 @@ NPY_NO_EXPORT void is2_n / sizeof(@typ@), @step0@, op, op_m / sizeof(@typ@)); } -NPY_NO_EXPORT void +static void @name@_matmul_matrixmatrix(void *ip1, npy_intp is1_m, npy_intp is1_n, void *ip2, npy_intp is2_n, npy_intp is2_p, void *op, npy_intp os_m, npy_intp os_p, @@ -219,7 +263,7 @@ NPY_NO_EXPORT void * #IS_HALF = 0, 0, 0, 1, 0*13# */ -NPY_NO_EXPORT void +static void @TYPE@_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, void *_ip2, npy_intp is2_n, npy_intp is2_p, void *_op, npy_intp os_m, npy_intp os_p, @@ -277,7 +321,7 @@ NPY_NO_EXPORT void } /**end repeat**/ -NPY_NO_EXPORT void +static void BOOL_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, void *_ip2, npy_intp is2_n, npy_intp is2_p, void *_op, npy_intp os_m, npy_intp os_p, @@ -316,7 +360,7 @@ BOOL_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, } } -NPY_NO_EXPORT void +static void OBJECT_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, void *_ip2, npy_intp is2_n, npy_intp is2_p, void *_op, npy_intp os_m, npy_intp os_p, @@ -429,10 +473,43 @@ NPY_NO_EXPORT void npy_bool i2blasable = i2_c_blasable || i2_f_blasable; npy_bool o_c_blasable = is_blasable2d(os_m, os_p, dm, dp, sz); npy_bool o_f_blasable = is_blasable2d(os_p, os_m, dp, dm, sz); + npy_bool oblasable = o_c_blasable || o_f_blasable; npy_bool vector_matrix = ((dm == 1) && i2blasable && is_blasable2d(is1_n, sz, dn, 1, sz)); npy_bool matrix_vector = ((dp == 1) && i1blasable && is_blasable2d(is2_n, sz, dn, 1, sz)); + npy_bool noblas_fallback = too_big_for_blas || any_zero_dim; + npy_bool matrix_matrix = !noblas_fallback && !special_case; + npy_bool allocate_buffer = matrix_matrix && ( + !i1blasable || !i2blasable || !oblasable + ); + + uint8_t *tmp_ip12op = NULL; + void *tmp_ip1 = NULL, *tmp_ip2 = NULL, *tmp_op = NULL; + + if (allocate_buffer){ + npy_intp ip1_size = i1blasable ? 0 : sz * dm * dn, + ip2_size = i2blasable ? 0 : sz * dn * dp, + op_size = oblasable ? 0 : sz * dm * dp, + total_size = ip1_size + ip2_size + op_size; + + tmp_ip12op = (uint8_t*)malloc(total_size); + + if (tmp_ip12op == NULL) { + PyGILState_STATE gil_state = PyGILState_Ensure(); + PyErr_SetString( + PyExc_MemoryError, "Out of memory in matmul" + ); + PyGILState_Release(gil_state); + + return; + } + + tmp_ip1 = tmp_ip12op; + tmp_ip2 = tmp_ip12op + ip1_size; + tmp_op = tmp_ip12op + ip1_size + ip2_size; + } + #endif for (iOuter = 0; iOuter < dOuter; iOuter++, @@ -440,11 +517,11 @@ NPY_NO_EXPORT void void *ip1=args[0], *ip2=args[1], *op=args[2]; #if @USEBLAS@ && defined(HAVE_CBLAS) /* - * TODO: refactor this out to a inner_loop_selector, in + * TODO: refactor this out to an inner_loop_selector, in * PyUFunc_MatmulLoopSelector. But that call does not have access to * n, m, p and strides. */ - if (too_big_for_blas || any_zero_dim) { + if (noblas_fallback) { @TYPE@_matmul_inner_noblas(ip1, is1_m, is1_n, ip2, is2_n, is2_p, op, os_m, os_p, dm, dn, dp); @@ -465,13 +542,12 @@ NPY_NO_EXPORT void op, os_m, os_p, dm, dn, dp); } else if (vector_matrix) { /* vector @ matrix, switch ip1, ip2, p and m */ - @TYPE@_gemv(ip2, is2_p, is2_n, ip1, is1_n, is1_m, - op, os_p, os_m, dp, dn, dm); + @TYPE@_gemv(ip2, is2_p, is2_n, ip1, is1_n, + op, os_p, dp, dn); } else if (matrix_vector) { /* matrix @ vector */ - @TYPE@_gemv(ip1, is1_m, is1_n, ip2, is2_n, is2_p, - - op, os_m, os_p, dm, dn, dp); + @TYPE@_gemv(ip1, is1_m, is1_n, ip2, is2_n, + op, os_m, dm, dn); } else { /* column @ row, 2d output, no blas needed or non-blas-able input */ @TYPE@_matmul_inner_noblas(ip1, is1_m, is1_n, @@ -479,30 +555,73 @@ NPY_NO_EXPORT void op, os_m, os_p, dm, dn, dp); } } else { - /* matrix @ matrix */ - if (i1blasable && i2blasable && o_c_blasable) { - @TYPE@_matmul_matrixmatrix(ip1, is1_m, is1_n, - ip2, is2_n, is2_p, - op, os_m, os_p, - dm, dn, dp); - } else if (i1blasable && i2blasable && o_f_blasable) { - /* - * Use transpose equivalence: - * matmul(a, b, o) == matmul(b.T, a.T, o.T) - */ - @TYPE@_matmul_matrixmatrix(ip2, is2_p, is2_n, - ip1, is1_n, is1_m, - op, os_p, os_m, - dp, dn, dm); - } else { - /* - * If parameters are castable to int and we copy the - * non-blasable (or non-ccontiguous output) - * we could still use BLAS, see gh-12365. - */ - @TYPE@_matmul_inner_noblas(ip1, is1_m, is1_n, - ip2, is2_n, is2_p, - op, os_m, os_p, dm, dn, dp); + /* matrix @ matrix + * copy if not blasable, see gh-12365 & gh-23588 */ + npy_bool i1_transpose = ABS(is1_m) < ABS(is1_n), + i2_transpose = ABS(is2_n) < ABS(is2_p), + o_transpose = ABS(os_m) < ABS(os_p); + + npy_intp tmp_is1_m = i1_transpose ? sz : sz*dn, + tmp_is1_n = i1_transpose ? sz*dm : sz, + tmp_is2_n = i2_transpose ? sz : sz*dp, + tmp_is2_p = i2_transpose ? sz*dn : sz, + tmp_os_m = o_transpose ? sz : sz*dp, + tmp_os_p = o_transpose ? sz*dm : sz; + + if (!i1blasable) { + @TYPE@_matrix_copy( + i1_transpose, ip1, is1_m, is1_n, + tmp_ip1, tmp_is1_m, tmp_is1_n, + dm, dn + ); + } + + if (!i2blasable) { + @TYPE@_matrix_copy( + i2_transpose, ip2, is2_n, is2_p, + tmp_ip2, tmp_is2_n, tmp_is2_p, + dn, dp + ); + } + + void *ip1_ = i1blasable ? ip1 : tmp_ip1, + *ip2_ = i2blasable ? ip2 : tmp_ip2, + *op_ = oblasable ? op : tmp_op; + + npy_intp is1_m_ = i1blasable ? is1_m : tmp_is1_m, + is1_n_ = i1blasable ? is1_n : tmp_is1_n, + is2_n_ = i2blasable ? is2_n : tmp_is2_n, + is2_p_ = i2blasable ? is2_p : tmp_is2_p, + os_m_ = oblasable ? os_m : tmp_os_m, + os_p_ = oblasable ? os_p : tmp_os_p; + + /* + * Use transpose equivalence: + * matmul(a, b, o) == matmul(b.T, a.T, o.T) + */ + if (o_transpose) { + @TYPE@_matmul_matrixmatrix( + ip2_, is2_p_, is2_n_, + ip1_, is1_n_, is1_m_, + op_, os_p_, os_m_, + dp, dn, dm + ); + } + else { + @TYPE@_matmul_matrixmatrix( + ip1_, is1_m_, is1_n_, + ip2_, is2_n_, is2_p_, + op_, os_m_, os_p_, + dm, dn, dp + ); + } + + if(!oblasable){ + @TYPE@_matrix_copy( + o_transpose, tmp_op, tmp_os_m, tmp_os_p, + op, os_m, os_p, + dm, dp + ); } } #else @@ -512,6 +631,14 @@ NPY_NO_EXPORT void #endif } +#if @USEBLAS@ && defined(HAVE_CBLAS) +#if NPY_BLAS_CHECK_FPE_SUPPORT + if (!npy_blas_supports_fpe()) { + npy_clear_floatstatus_barrier((char*)args); + } +#endif + if (allocate_buffer) free(tmp_ip12op); +#endif } /**end repeat**/ @@ -534,7 +661,7 @@ NPY_NO_EXPORT void * #prefix = c, z, 0# * #USE_BLAS = 1, 1, 0# */ -NPY_NO_EXPORT void +static void @name@_dotc(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, npy_intp n, void *NPY_UNUSED(ignore)) { @@ -630,6 +757,7 @@ OBJECT_dotc(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, npy_intp * CFLOAT, CDOUBLE, CLONGDOUBLE, OBJECT# * #DOT = dot*15, dotc*4# * #CHECK_PYERR = 0*18, 1# + * #CHECK_BLAS = 1*2, 0*13, 1*2, 0*2# */ NPY_NO_EXPORT void @TYPE@_vecdot(char **args, npy_intp const *dimensions, npy_intp const *steps, @@ -653,5 +781,191 @@ NPY_NO_EXPORT void } #endif } +#if @CHECK_BLAS@ && NPY_BLAS_CHECK_FPE_SUPPORT + if (!npy_blas_supports_fpe()) { + npy_clear_floatstatus_barrier((char*)args); + } +#endif +} +/**end repeat**/ + +#if defined(HAVE_CBLAS) +/* + * Blas complex vector-matrix product via gemm (gemv cannot conjugate the vector). + */ +/**begin repeat + * + * #name = CFLOAT, CDOUBLE# + * #typ = npy_cfloat, npy_cdouble# + * #prefix = c, z# + * #step1 = &oneF, &oneD# + * #step0 = &zeroF, &zeroD# + */ +static void +@name@_vecmat_via_gemm(void *ip1, npy_intp is1_n, + void *ip2, npy_intp is2_n, npy_intp is2_m, + void *op, npy_intp os_m, + npy_intp n, npy_intp m) +{ + enum CBLAS_ORDER order = CblasRowMajor; + enum CBLAS_TRANSPOSE trans1, trans2; + CBLAS_INT N, M, lda, ldb, ldc; + assert(n <= BLAS_MAXSIZE && m <= BLAS_MAXSIZE); + N = (CBLAS_INT)n; + M = (CBLAS_INT)m; + + assert(os_m == sizeof(@typ@)); + ldc = (CBLAS_INT)m; + + assert(is_blasable2d(is1_n, sizeof(@typ@), n, 1, sizeof(@typ@))); + trans1 = CblasConjTrans; + lda = (CBLAS_INT)(is1_n / sizeof(@typ@)); + + if (is_blasable2d(is2_n, is2_m, n, m, sizeof(@typ@))) { + trans2 = CblasNoTrans; + ldb = (CBLAS_INT)(is2_n / sizeof(@typ@)); + } + else { + assert(is_blasable2d(is2_m, is2_n, m, n, sizeof(@typ@))); + trans2 = CblasTrans; + ldb = (CBLAS_INT)(is2_m / sizeof(@typ@)); + } + CBLAS_FUNC(cblas_@prefix@gemm)( + order, trans1, trans2, 1, M, N, @step1@, ip1, lda, + ip2, ldb, @step0@, op, ldc); +} +/**end repeat**/ +#endif + +/* + * matvec loops, using blas gemv if possible, and TYPE_dot implementations otherwise. + * signature is (m,n),(n)->(m) + */ +/**begin repeat + * #TYPE = FLOAT, DOUBLE, LONGDOUBLE, HALF, + * CFLOAT, CDOUBLE, CLONGDOUBLE, + * UBYTE, USHORT, UINT, ULONG, ULONGLONG, + * BYTE, SHORT, INT, LONG, LONGLONG, + * BOOL, OBJECT# + * #typ = npy_float,npy_double,npy_longdouble, npy_half, + * npy_cfloat, npy_cdouble, npy_clongdouble, + * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong, + * npy_byte, npy_short, npy_int, npy_long, npy_longlong, + * npy_bool, npy_object# + * #USEBLAS = 1, 1, 0, 0, 1, 1, 0*13# + * #CHECK_PYERR = 0*18, 1# + */ +NPY_NO_EXPORT void +@TYPE@_matvec(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) +{ + npy_intp n_outer = dimensions[0]; + npy_intp s0=steps[0], s1=steps[1], s2=steps[2]; + npy_intp dm = dimensions[1], dn = dimensions[2]; + npy_intp is1_m=steps[3], is1_n=steps[4], is2_n=steps[5], os_m=steps[6]; +#if @USEBLAS@ && defined(HAVE_CBLAS) + npy_bool too_big_for_blas = (dm > BLAS_MAXSIZE || dn > BLAS_MAXSIZE); + npy_bool i1_c_blasable = is_blasable2d(is1_m, is1_n, dm, dn, sizeof(@typ@)); + npy_bool i1_f_blasable = is_blasable2d(is1_n, is1_m, dn, dm, sizeof(@typ@)); + npy_bool i2_blasable = is_blasable2d(is2_n, sizeof(@typ@), dn, 1, sizeof(@typ@)); + npy_bool blasable = ((i1_c_blasable || i1_f_blasable) && i2_blasable + && !too_big_for_blas && dn > 1 && dm > 1); +#endif + for (npy_intp i = 0; i < n_outer; i++, + args[0] += s0, args[1] += s1, args[2] += s2) { + char *ip1=args[0], *ip2=args[1], *op=args[2]; +#if @USEBLAS@ && defined(HAVE_CBLAS) + if (blasable) { + @TYPE@_gemv(ip1, is1_m, is1_n, ip2, is2_n, op, os_m, dm, dn); + continue; + } +#endif + /* + * Dot the different matrix rows with the vector to get output elements. + * (no conjugation for complex, unlike vecdot and vecmat) + */ + for (npy_intp j = 0; j < dm; j++, ip1 += is1_m, op += os_m) { + @TYPE@_dot(ip1, is1_n, ip2, is2_n, op, dn, NULL); +#if @CHECK_PYERR@ + if (PyErr_Occurred()) { + return; + } +#endif + } + } +#if @USEBLAS@ && NPY_BLAS_CHECK_FPE_SUPPORT + if (!npy_blas_supports_fpe()) { + npy_clear_floatstatus_barrier((char*)args); + } +#endif +} +/**end repeat**/ + +/* + * vecmat loops, using blas gemv for float and gemm for complex if possible, + * and TYPE_dot[c] implementations otherwise. + * Note that we cannot use gemv for complex, since we need to conjugate the vector. + * signature is (n),(n,m)->(m) + */ +/**begin repeat + * #TYPE = FLOAT, DOUBLE, LONGDOUBLE, HALF, + * CFLOAT, CDOUBLE, CLONGDOUBLE, + * UBYTE, USHORT, UINT, ULONG, ULONGLONG, + * BYTE, SHORT, INT, LONG, LONGLONG, + * BOOL, OBJECT# + * #typ = npy_float,npy_double,npy_longdouble, npy_half, + * npy_cfloat, npy_cdouble, npy_clongdouble, + * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong, + * npy_byte, npy_short, npy_int, npy_long, npy_longlong, + * npy_bool, npy_object# + * #USEBLAS = 1, 1, 0, 0, 1, 1, 0*13# + * #COMPLEX = 0*4, 1*3, 0*11, 1# + * #DOT = dot*4, dotc*3, dot*11, dotc# + * #CHECK_PYERR = 0*18, 1# + */ +NPY_NO_EXPORT void +@TYPE@_vecmat(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) +{ + npy_intp n_outer = dimensions[0]; + npy_intp s0=steps[0], s1=steps[1], s2=steps[2]; + npy_intp dn = dimensions[1], dm = dimensions[2]; + npy_intp is1_n=steps[3], is2_n=steps[4], is2_m=steps[5], os_m=steps[6]; +#if @USEBLAS@ && defined(HAVE_CBLAS) + npy_bool too_big_for_blas = (dm > BLAS_MAXSIZE || dn > BLAS_MAXSIZE); + npy_bool i1_blasable = is_blasable2d(is1_n, sizeof(@typ@), dn, 1, sizeof(@typ@)); + npy_bool i2_c_blasable = is_blasable2d(is2_n, is2_m, dn, dm, sizeof(@typ@)); + npy_bool i2_f_blasable = is_blasable2d(is2_m, is2_n, dm, dn, sizeof(@typ@)); + npy_bool blasable = (i1_blasable && (i2_c_blasable || i2_f_blasable) + && !too_big_for_blas && dn > 1 && dm > 1); +#endif + for (npy_intp i = 0; i < n_outer; i++, + args[0] += s0, args[1] += s1, args[2] += s2) { + char *ip1=args[0], *ip2=args[1], *op=args[2]; +#if @USEBLAS@ && defined(HAVE_CBLAS) + if (blasable) { +#if @COMPLEX@ + /* For complex, use gemm so we can conjugate the vector */ + @TYPE@_vecmat_via_gemm(ip1, is1_n, ip2, is2_n, is2_m, op, os_m, dn, dm); +#else + /* For float, use gemv (hence flipped order) */ + @TYPE@_gemv(ip2, is2_m, is2_n, ip1, is1_n, op, os_m, dm, dn); +#endif + continue; + } +#endif + /* Dot the vector with different matrix columns to get output elements. */ + for (npy_intp j = 0; j < dm; j++, ip2 += is2_m, op += os_m) { + @TYPE@_@DOT@(ip1, is1_n, ip2, is2_n, op, dn, NULL); +#if @CHECK_PYERR@ + if (PyErr_Occurred()) { + return; + } +#endif + } + } +#if @USEBLAS@ && NPY_BLAS_CHECK_FPE_SUPPORT + if (!npy_blas_supports_fpe()) { + npy_clear_floatstatus_barrier((char*)args); + } +#endif } /**end repeat**/ diff --git a/numpy/_core/src/umath/matmul.h.src b/numpy/_core/src/umath/matmul.h.src index df3f549a545a..bff3d73c8993 100644 --- a/numpy/_core/src/umath/matmul.h.src +++ b/numpy/_core/src/umath/matmul.h.src @@ -7,15 +7,10 @@ **/ NPY_NO_EXPORT void @TYPE@_matmul(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); -/**end repeat**/ - -/**begin repeat - * #TYPE = FLOAT, DOUBLE, LONGDOUBLE, HALF, - * CFLOAT, CDOUBLE, CLONGDOUBLE, - * UBYTE, USHORT, UINT, ULONG, ULONGLONG, - * BYTE, SHORT, INT, LONG, LONGLONG, - * BOOL, OBJECT# - */ NPY_NO_EXPORT void @TYPE@_vecdot(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +@TYPE@_matvec(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +@TYPE@_vecmat(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); /**end repeat**/ diff --git a/numpy/_core/src/umath/override.c b/numpy/_core/src/umath/override.c index d10b86be7b57..421359eb6203 100644 --- a/numpy/_core/src/umath/override.c +++ b/numpy/_core/src/umath/override.c @@ -4,6 +4,8 @@ #include "numpy/ndarraytypes.h" #include "numpy/ufuncobject.h" #include "npy_import.h" +#include "npy_static_data.h" +#include "multiarraymodule.h" #include "npy_pycompat.h" #include "override.h" #include "ufunc_override.h" @@ -110,29 +112,22 @@ initialize_normal_kwds(PyObject *out_args, } } } - static PyObject *out_str = NULL; - if (out_str == NULL) { - out_str = PyUnicode_InternFromString("out"); - if (out_str == NULL) { - return -1; - } - } if (out_args != NULL) { /* Replace `out` argument with the normalized version */ - int res = PyDict_SetItem(normal_kwds, out_str, out_args); + int res = PyDict_SetItem(normal_kwds, npy_interned_str.out, out_args); if (res < 0) { return -1; } } else { /* Ensure that `out` is not present. */ - int res = PyDict_Contains(normal_kwds, out_str); + int res = PyDict_Contains(normal_kwds, npy_interned_str.out); if (res < 0) { return -1; } if (res) { - return PyDict_DelItem(normal_kwds, out_str); + return PyDict_DelItem(normal_kwds, npy_interned_str.out); } } return 0; @@ -182,10 +177,8 @@ copy_positional_args_to_kwargs(const char **keywords, * This is only relevant for reduce, which is the only one with * 5 keyword arguments. */ - static PyObject *NoValue = NULL; assert(strcmp(keywords[i], "initial") == 0); - npy_cache_import("numpy", "_NoValue", &NoValue); - if (args[i] == NoValue) { + if (args[i] == npy_static_pydata._NoValue) { continue; } } @@ -349,7 +342,7 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, } /* * Set override arguments for each call since the tuple must - * not be mutated after use in PyPy + * not be mutated after use * We increase all references since SET_ITEM steals * them and they will be DECREF'd when the tuple is deleted. */ @@ -371,23 +364,23 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, /* Check if there is a method left to call */ if (!override_obj) { /* No acceptable override found. */ - static PyObject *errmsg_formatter = NULL; PyObject *errmsg; - npy_cache_import("numpy._core._internal", - "array_ufunc_errmsg_formatter", - &errmsg_formatter); - - if (errmsg_formatter != NULL) { - /* All tuple items must be set before use */ - Py_INCREF(Py_None); - PyTuple_SET_ITEM(override_args, 0, Py_None); - errmsg = PyObject_Call(errmsg_formatter, override_args, - normal_kwds); - if (errmsg != NULL) { - PyErr_SetObject(PyExc_TypeError, errmsg); - Py_DECREF(errmsg); - } + /* All tuple items must be set before use */ + Py_INCREF(Py_None); + PyTuple_SET_ITEM(override_args, 0, Py_None); + if (npy_cache_import_runtime( + "numpy._core._internal", + "array_ufunc_errmsg_formatter", + &npy_runtime_imports.array_ufunc_errmsg_formatter) == -1) { + goto fail; + } + errmsg = PyObject_Call( + npy_runtime_imports.array_ufunc_errmsg_formatter, + override_args, normal_kwds); + if (errmsg != NULL) { + PyErr_SetObject(PyExc_TypeError, errmsg); + Py_DECREF(errmsg); } Py_DECREF(override_args); goto fail; diff --git a/numpy/_core/src/umath/reduction.c b/numpy/_core/src/umath/reduction.c index 548530e1ca3b..384ac052b226 100644 --- a/numpy/_core/src/umath/reduction.c +++ b/numpy/_core/src/umath/reduction.c @@ -218,10 +218,13 @@ PyUFunc_ReduceWrapper(PyArrayMethod_Context *context, NPY_ITER_ZEROSIZE_OK | NPY_ITER_REFS_OK | NPY_ITER_DELAY_BUFALLOC | + /* + * stride negation (if reorderable) could currently misalign the + * first-visit and initial value copy logic. + */ + NPY_ITER_DONT_NEGATE_STRIDES | NPY_ITER_COPY_IF_OVERLAP; - if (!(context->method->flags & NPY_METH_IS_REORDERABLE)) { - it_flags |= NPY_ITER_DONT_NEGATE_STRIDES; - } + op_flags[0] = NPY_ITER_READWRITE | NPY_ITER_ALIGNED | NPY_ITER_ALLOCATE | @@ -336,10 +339,24 @@ PyUFunc_ReduceWrapper(PyArrayMethod_Context *context, } PyArrayMethod_StridedLoop *strided_loop; - NPY_ARRAYMETHOD_FLAGS flags = 0; + NPY_ARRAYMETHOD_FLAGS flags; + + npy_intp *strideptr = NpyIter_GetInnerStrideArray(iter); + if (wheremask != NULL) { + if (PyArrayMethod_GetMaskedStridedLoop(context, + 1, strideptr, &strided_loop, &auxdata, &flags) < 0) { + goto fail; + } + } + else { + if (context->method->get_strided_loop(context, + 1, 0, strideptr, &strided_loop, &auxdata, &flags) < 0) { + goto fail; + } + } + flags = PyArrayMethod_COMBINED_FLAGS(flags, NpyIter_GetTransferFlags(iter)); int needs_api = (flags & NPY_METH_REQUIRES_PYAPI) != 0; - needs_api |= NpyIter_IterationNeedsAPI(iter); if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { /* Start with the floating-point exception flags cleared */ npy_clear_floatstatus_barrier((char*)&iter); @@ -355,7 +372,7 @@ PyUFunc_ReduceWrapper(PyArrayMethod_Context *context, PyArray_NDIM(result), PyArray_DIMS(result), PyArray_DESCR(result), PyArray_BYTES(result), PyArray_STRIDES(result), - op_dtypes[0], initial_buf); + op_dtypes[0], initial_buf, NPY_UNSAFE_CASTING); if (ret < 0) { goto fail; } @@ -386,29 +403,9 @@ PyUFunc_ReduceWrapper(PyArrayMethod_Context *context, goto fail; } - /* - * Note that we need to ensure that the iterator is reset before getting - * the fixed strides. (The buffer information is uninitialized before.) - */ - npy_intp fixed_strides[3]; - NpyIter_GetInnerFixedStrideArray(iter, fixed_strides); - if (wheremask != NULL) { - if (PyArrayMethod_GetMaskedStridedLoop(context, - 1, fixed_strides, &strided_loop, &auxdata, &flags) < 0) { - goto fail; - } - } - else { - if (context->method->get_strided_loop(context, - 1, 0, fixed_strides, &strided_loop, &auxdata, &flags) < 0) { - goto fail; - } - } - if (!empty_iteration) { NpyIter_IterNextFunc *iternext; char **dataptr; - npy_intp *strideptr; npy_intp *countptr; iternext = NpyIter_GetIterNext(iter, NULL); @@ -416,7 +413,6 @@ PyUFunc_ReduceWrapper(PyArrayMethod_Context *context, goto fail; } dataptr = NpyIter_GetDataPtrArray(iter); - strideptr = NpyIter_GetInnerStrideArray(iter); countptr = NpyIter_GetInnerLoopSizePtr(iter); if (loop(context, strided_loop, auxdata, diff --git a/numpy/_core/src/umath/scalarmath.c.src b/numpy/_core/src/umath/scalarmath.c.src index cc8f82aca11b..e2d7c22f5deb 100644 --- a/numpy/_core/src/umath/scalarmath.c.src +++ b/numpy/_core/src/umath/scalarmath.c.src @@ -798,8 +798,9 @@ typedef enum { */ CONVERT_PYSCALAR, /* - * Other object is an unknown scalar or array-like, we (typically) use + * Other object is an unknown scalar or array-like, we also use * the generic path, which normally ends up in the ufunc machinery. + * (So it ends up identical to PROMOTION_REQUIRED.) */ OTHER_IS_UNKNOWN_OBJECT, /* @@ -825,6 +826,10 @@ typedef enum { * npy_long, npy_ulong, npy_longlong, npy_ulonglong, * npy_half, npy_float, npy_double, npy_longdouble, * npy_cfloat, npy_cdouble, npy_clongdouble# + * #scalar_type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, + * npy_long, npy_ulong, npy_longlong, npy_ulonglong, + * npy_half, npy_float, npy_double, npy_longdouble, + * npy_float, npy_double, npy_longdouble# * #c = x*14, f, , l# */ @@ -845,10 +850,10 @@ typedef enum { *result = npy_float_to_half((float)(value)) #elif defined(IS_CFLOAT) || defined(IS_CDOUBLE) || defined(IS_CLONGDOUBLE) #define CONVERT_TO_RESULT(value) \ - npy_csetreal@c@(result, value); \ + npy_csetreal@c@(result, ((@scalar_type@)(value))); \ npy_csetimag@c@(result, 0) #else - #define CONVERT_TO_RESULT(value) *result = value + #define CONVERT_TO_RESULT(value) *result = ((@type@)(value)) #endif @@ -936,7 +941,7 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) if (PyArray_IsScalar(value, @Name@)) { *result = PyArrayScalar_VAL(value, @Name@); /* - * In principle special, assyemetric, handling could be possible for + * In principle special, asymmetric, handling could be possible for * explicit subclasses. * In practice, we just check the normal deferring logic. */ @@ -953,20 +958,8 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) return CONVERSION_SUCCESS; } - if (PyFloat_Check(value)) { - if (!PyFloat_CheckExact(value)) { - /* A NumPy double is a float subclass, but special. */ - if (PyArray_IsScalar(value, Double)) { - descr = PyArray_DescrFromType(NPY_DOUBLE); - goto numpy_scalar; - } - *may_need_deferring = NPY_TRUE; - } + if (PyFloat_CheckExact(value)) { if (!IS_SAFE(NPY_DOUBLE, NPY_@TYPE@)) { - if (get_npy_promotion_state() != NPY_USE_WEAK_PROMOTION) { - /* Legacy promotion and weak-and-warn not handled here */ - return PROMOTION_REQUIRED; - } /* Weak promotion is used when self is float or complex: */ if (!PyTypeNum_ISFLOAT(NPY_@TYPE@) && !PyTypeNum_ISCOMPLEX(NPY_@TYPE@)) { return PROMOTION_REQUIRED; @@ -977,28 +970,18 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) return CONVERSION_SUCCESS; } - if (PyLong_Check(value)) { - if (!PyLong_CheckExact(value)) { - *may_need_deferring = NPY_TRUE; - } + if (PyLong_CheckExact(value)) { if (!IS_SAFE(NPY_LONG, NPY_@TYPE@)) { /* * long -> (c)longdouble is safe, so `OTHER_IS_UNKNOWN_OBJECT` will * be returned below for huge integers. */ - if (get_npy_promotion_state() != NPY_USE_WEAK_PROMOTION) { - /* Legacy promotion and weak-and-warn not handled here */ - return PROMOTION_REQUIRED; - } return CONVERT_PYSCALAR; } int overflow; long val = PyLong_AsLongAndOverflow(value, &overflow); if (overflow) { /* handle as if "unsafe" */ - if (get_npy_promotion_state() != NPY_USE_WEAK_PROMOTION) { - return OTHER_IS_UNKNOWN_OBJECT; - } return CONVERT_PYSCALAR; } if (error_converting(val)) { @@ -1008,20 +991,8 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) return CONVERSION_SUCCESS; } - if (PyComplex_Check(value)) { - if (!PyComplex_CheckExact(value)) { - /* A NumPy complex double is a float subclass, but special. */ - if (PyArray_IsScalar(value, CDouble)) { - descr = PyArray_DescrFromType(NPY_CDOUBLE); - goto numpy_scalar; - } - *may_need_deferring = NPY_TRUE; - } + if (PyComplex_CheckExact(value)) { if (!IS_SAFE(NPY_CDOUBLE, NPY_@TYPE@)) { - if (get_npy_promotion_state() != NPY_USE_WEAK_PROMOTION) { - /* Legacy promotion and weak-and-warn not handled here */ - return PROMOTION_REQUIRED; - } /* Weak promotion is used when self is float or complex: */ if (!PyTypeNum_ISCOMPLEX(NPY_@TYPE@)) { return PROMOTION_REQUIRED; @@ -1078,7 +1049,6 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) return OTHER_IS_UNKNOWN_OBJECT; } - numpy_scalar: if (descr->typeobj != Py_TYPE(value)) { /* * This is a subclass of a builtin type, we may continue normally, @@ -1262,12 +1232,9 @@ static PyObject * * also integers that are too large to convert to `long`), or * even a subclass of a NumPy scalar (currently). * - * Generally, we try dropping through to the array path here, - * but this can lead to infinite recursions for (c)longdouble. + * We drop through to the generic path here which checks for the + * infinite recursion problem (gh-18548, gh-26767). */ -#if defined(IS_longdouble) || defined(IS_clongdouble) - Py_RETURN_NOTIMPLEMENTED; -#endif case PROMOTION_REQUIRED: /* * Python scalar that is larger than the current one, or two @@ -1279,7 +1246,7 @@ static PyObject * */ return PyGenericArrType_Type.tp_as_number->nb_@oper@(a,b); case CONVERT_PYSCALAR: - if (@NAME@_setitem(other, (char *)&other_val, NULL) < 0) { + if (@NAME@_setitem(NULL, other, (char *)&other_val) < 0) { return NULL; } break; @@ -1391,7 +1358,7 @@ static PyObject * */ PyObject *ret; npy_float64 arg1, arg2, other_val; - @type@ other_val_conv; + @type@ other_val_conv = 0; int is_forward; if (Py_TYPE(a) == &Py@Name@ArrType_Type) { @@ -1411,7 +1378,8 @@ static PyObject * npy_bool may_need_deferring; conversion_result res = convert_to_@name@( other, &other_val_conv, &may_need_deferring); - other_val = other_val_conv; /* Need a float value */ + /* Actual float cast `other_val` is set below on success. */ + if (res == CONVERSION_ERROR) { return NULL; /* an error occurred (should never happen) */ } @@ -1422,13 +1390,14 @@ static PyObject * case DEFER_TO_OTHER_KNOWN_SCALAR: Py_RETURN_NOTIMPLEMENTED; case CONVERSION_SUCCESS: + other_val = (double)other_val_conv; /* Need a float value */ break; /* successfully extracted value we can proceed */ case OTHER_IS_UNKNOWN_OBJECT: case PROMOTION_REQUIRED: return PyGenericArrType_Type.tp_as_number->nb_true_divide(a,b); case CONVERT_PYSCALAR: /* This is the special behavior, convert to float64 directly */ - if (DOUBLE_setitem(other, (char *)&other_val, NULL) < 0) { + if (DOUBLE_setitem(NULL, other, (char *)&other_val) < 0) { return NULL; } break; @@ -1440,12 +1409,12 @@ static PyObject * npy_clear_floatstatus_barrier((char*)&arg1); if (is_forward) { - arg1 = PyArrayScalar_VAL(a, @Name@); + arg1 = (double)PyArrayScalar_VAL(a, @Name@); arg2 = other_val; } else { arg1 = other_val; - arg2 = PyArrayScalar_VAL(b, @Name@); + arg2 = (double)PyArrayScalar_VAL(b, @Name@); } /* Note that arguments are already float64, so we can just divide */ @@ -1544,13 +1513,10 @@ static PyObject * case CONVERSION_SUCCESS: break; /* successfully extracted value we can proceed */ case OTHER_IS_UNKNOWN_OBJECT: -#if defined(IS_longdouble) || defined(IS_clongdouble) - Py_RETURN_NOTIMPLEMENTED; -#endif case PROMOTION_REQUIRED: return PyGenericArrType_Type.tp_as_number->nb_power(a, b, modulo); case CONVERT_PYSCALAR: - if (@NAME@_setitem(other, (char *)&other_val, NULL) < 0) { + if (@NAME@_setitem(NULL, other, (char *)&other_val) < 0) { return NULL; } break; @@ -1788,12 +1754,7 @@ static int static int emit_complexwarning(void) { - static PyObject *cls = NULL; - npy_cache_import("numpy.exceptions", "ComplexWarning", &cls); - if (cls == NULL) { - return -1; - } - return PyErr_WarnEx(cls, + return PyErr_WarnEx(npy_static_pydata.ComplexWarning, "Casting complex values to real discards the imaginary part", 1); } @@ -1874,7 +1835,7 @@ static PyObject * } return @func@(@to_ctype@(npy_creal@n@(PyArrayScalar_VAL(obj, @Name@)))); #else - return @func@(@to_ctype@(PyArrayScalar_VAL(obj, @Name@))); + return @func@((double)(@to_ctype@(PyArrayScalar_VAL(obj, @Name@)))); #endif } /**end repeat**/ @@ -1960,13 +1921,10 @@ static PyObject* case CONVERSION_SUCCESS: break; /* successfully extracted value we can proceed */ case OTHER_IS_UNKNOWN_OBJECT: -#if defined(IS_longdouble) || defined(IS_clongdouble) - Py_RETURN_NOTIMPLEMENTED; -#endif case PROMOTION_REQUIRED: return PyGenericArrType_Type.tp_richcompare(self, other, cmp_op); case CONVERT_PYSCALAR: - if (@NAME@_setitem(other, (char *)&arg2, NULL) < 0) { + if (@NAME@_setitem(NULL, other, (char *)&arg2) < 0) { return NULL; } break; diff --git a/numpy/_core/src/umath/special_integer_comparisons.cpp b/numpy/_core/src/umath/special_integer_comparisons.cpp index 05026be96e67..06babeeda0a8 100644 --- a/numpy/_core/src/umath/special_integer_comparisons.cpp +++ b/numpy/_core/src/umath/special_integer_comparisons.cpp @@ -293,7 +293,7 @@ get_loop(PyArrayMethod_Context *context, /* - * Machinery to add the python integer to NumPy intger comparsisons as well + * Machinery to add the python integer to NumPy integer comparsisons as well * as a special promotion to special case Python int with Python int * comparisons. */ diff --git a/numpy/_core/src/umath/string_buffer.h b/numpy/_core/src/umath/string_buffer.h index c5fc8949f994..cb674484a582 100644 --- a/numpy/_core/src/umath/string_buffer.h +++ b/numpy/_core/src/umath/string_buffer.h @@ -14,9 +14,12 @@ #include "string_fastsearch.h" #include "gil_utils.h" -#define CHECK_OVERFLOW(index) if (buf + (index) >= after) return 0 -#define MSB(val) ((val) >> 7 & 1) - +#ifdef _MSC_VER +// MSVC sometimes complains (C4715: "not all control paths return a value") +// on switch statements over enum classes, even though all enum values are covered. +// This warning is suppressed here to avoid invasive changes. +# pragma warning(disable:4715) +#endif enum class ENCODING { ASCII, UTF32, UTF8 @@ -218,7 +221,7 @@ codepoint_isupper(npy_ucs4 code) template inline bool -codepoint_istitle(npy_ucs4); +codepoint_istitle(npy_ucs4 code); template<> inline bool @@ -297,6 +300,18 @@ struct Buffer { return num_codepoints; } + inline size_t + buffer_width() + { + switch (enc) { + case ENCODING::ASCII: + case ENCODING::UTF8: + return after - buf; + case ENCODING::UTF32: + return (after - buf) / sizeof(npy_ucs4); + } + } + inline Buffer& operator+=(npy_int64 rhs) { @@ -387,19 +402,19 @@ struct Buffer { } inline void - buffer_memcpy(Buffer out, size_t n_chars) + buffer_memcpy(Buffer other, size_t len) { - if (n_chars == 0) { + if (len == 0) { return; } switch (enc) { case ENCODING::ASCII: case ENCODING::UTF8: // for UTF8 we treat n_chars as number of bytes - memcpy(out.buf, buf, n_chars); + memcpy(other.buf, buf, len); break; case ENCODING::UTF32: - memcpy(out.buf, buf, n_chars * sizeof(npy_ucs4)); + memcpy(other.buf, buf, len * sizeof(npy_ucs4)); break; } } @@ -460,7 +475,7 @@ struct Buffer { } inline size_t - num_bytes_next_character(void) { + num_bytes_next_character() { switch (enc) { case ENCODING::ASCII: return 1; @@ -503,6 +518,18 @@ struct Buffer { return unary_loop(); } + inline bool + isdecimal() + { + return unary_loop(); + } + + inline bool + isdigit() + { + return unary_loop(); + } + inline bool first_character_isspace() { @@ -521,12 +548,6 @@ struct Buffer { return unary_loop(); } - inline bool - isdigit() - { - return unary_loop(); - } - inline bool isalnum() { @@ -542,7 +563,7 @@ struct Buffer { } Buffer tmp = *this; - bool cased = 0; + bool cased = false; for (size_t i = 0; i < len; i++) { if (codepoint_isupper(*tmp) || codepoint_istitle(*tmp)) { return false; @@ -564,7 +585,7 @@ struct Buffer { } Buffer tmp = *this; - bool cased = 0; + bool cased = false; for (size_t i = 0; i < len; i++) { if (codepoint_islower(*tmp) || codepoint_istitle(*tmp)) { return false; @@ -616,12 +637,6 @@ struct Buffer { return unary_loop(); } - inline bool - isdecimal() - { - return unary_loop(); - } - inline Buffer rstrip() { @@ -866,7 +881,7 @@ string_find(Buffer buf1, Buffer buf2, npy_int64 start, npy_int64 end) { char ch = *buf2; CheckedIndexer ind(start_loc, end_loc - start_loc); - result = (npy_intp) findchar(ind, end_loc - start_loc, ch); + result = (npy_intp) find_char(ind, end_loc - start_loc, ch); if (enc == ENCODING::UTF8 && result > 0) { result = utf8_character_index( start_loc, start_loc - buf1.buf, start, result, @@ -878,7 +893,7 @@ string_find(Buffer buf1, Buffer buf2, npy_int64 start, npy_int64 end) { npy_ucs4 ch = *buf2; CheckedIndexer ind((npy_ucs4 *)(buf1 + start).buf, end-start); - result = (npy_intp) findchar(ind, end - start, ch); + result = (npy_intp) find_char(ind, end - start, ch); break; } } @@ -895,10 +910,12 @@ string_find(Buffer buf1, Buffer buf2, npy_int64 start, npy_int64 end) npy_intp pos; switch(enc) { case ENCODING::UTF8: - pos = fastsearch(start_loc, end_loc - start_loc, buf2.buf, buf2.after - buf2.buf, -1, FAST_SEARCH); + pos = fastsearch(start_loc, end_loc - start_loc, buf2.buf, + buf2.after - buf2.buf, -1, FAST_SEARCH); // pos is the byte index, but we need the character index if (pos > 0) { - pos = utf8_character_index(start_loc, start_loc - buf1.buf, start, pos, buf1.after - start_loc); + pos = utf8_character_index(start_loc, start_loc - buf1.buf, + start, pos, buf1.after - start_loc); } break; case ENCODING::ASCII: @@ -970,7 +987,7 @@ string_rfind(Buffer buf1, Buffer buf2, npy_int64 start, npy_int64 end) { char ch = *buf2; CheckedIndexer ind(start_loc, end_loc - start_loc); - result = (npy_intp) rfindchar(ind, end_loc - start_loc, ch); + result = (npy_intp) rfind_char(ind, end_loc - start_loc, ch); if (enc == ENCODING::UTF8 && result > 0) { result = utf8_character_index( start_loc, start_loc - buf1.buf, start, result, @@ -982,7 +999,7 @@ string_rfind(Buffer buf1, Buffer buf2, npy_int64 start, npy_int64 end) { npy_ucs4 ch = *buf2; CheckedIndexer ind((npy_ucs4 *)(buf1 + start).buf, end - start); - result = (npy_intp) rfindchar(ind, end - start, ch); + result = (npy_intp) rfind_char(ind, end - start, ch); break; } } @@ -999,10 +1016,12 @@ string_rfind(Buffer buf1, Buffer buf2, npy_int64 start, npy_int64 end) npy_intp pos; switch (enc) { case ENCODING::UTF8: - pos = fastsearch(start_loc, end_loc - start_loc, buf2.buf, buf2.after - buf2.buf, -1, FAST_RSEARCH); + pos = fastsearch(start_loc, end_loc - start_loc, + buf2.buf, buf2.after - buf2.buf, -1, FAST_RSEARCH); // pos is the byte index, but we need the character index if (pos > 0) { - pos = utf8_character_index(start_loc, start_loc - buf1.buf, start, pos, buf1.after - start_loc); + pos = utf8_character_index(start_loc, start_loc - buf1.buf, + start, pos, buf1.after - start_loc); } break; case ENCODING::ASCII: @@ -1064,7 +1083,7 @@ string_count(Buffer buf1, Buffer buf2, npy_int64 start, npy_int64 end) start_loc = (buf1 + start).buf; end_loc = (buf1 + end).buf; } - npy_intp count; + npy_intp count = 0; switch (enc) { case ENCODING::UTF8: count = fastsearch(start_loc, end_loc - start_loc, buf2.buf, @@ -1139,7 +1158,7 @@ enum class STRIPTYPE { template static inline size_t -string_lrstrip_whitespace(Buffer buf, Buffer out, STRIPTYPE striptype) +string_lrstrip_whitespace(Buffer buf, Buffer out, STRIPTYPE strip_type) { size_t len = buf.num_codepoints(); if (len == 0) { @@ -1149,55 +1168,60 @@ string_lrstrip_whitespace(Buffer buf, Buffer out, STRIPTYPE striptype) return 0; } - size_t i = 0; + size_t new_start = 0; size_t num_bytes = (buf.after - buf.buf); Buffer traverse_buf = Buffer(buf.buf, num_bytes); - if (striptype != STRIPTYPE::RIGHTSTRIP) { - while (i < len) { + if (strip_type != STRIPTYPE::RIGHTSTRIP) { + while (new_start < len) { if (!traverse_buf.first_character_isspace()) { break; } num_bytes -= traverse_buf.num_bytes_next_character(); - traverse_buf++; - i++; + new_start++; + traverse_buf++; // may go one beyond buffer } } - npy_intp j = len - 1; // Could also turn negative if we're stripping the whole string + size_t new_stop = len; // New stop is a range (beyond last char) if (enc == ENCODING::UTF8) { traverse_buf = Buffer(buf.after, 0) - 1; } else { - traverse_buf = buf + j; + traverse_buf = buf + (new_stop - 1); } - if (striptype != STRIPTYPE::LEFTSTRIP) { - while (j >= static_cast(i)) { + if (strip_type != STRIPTYPE::LEFTSTRIP) { + while (new_stop > new_start) { if (*traverse_buf != 0 && !traverse_buf.first_character_isspace()) { break; } + num_bytes -= traverse_buf.num_bytes_next_character(); - traverse_buf--; - j--; + new_stop--; + + // Do not step to character -1: can't find it's start for utf-8. + if (new_stop > 0) { + traverse_buf--; + } } } - Buffer offset_buf = buf + i; + Buffer offset_buf = buf + new_start; if (enc == ENCODING::UTF8) { offset_buf.buffer_memcpy(out, num_bytes); return num_bytes; } - offset_buf.buffer_memcpy(out, j - i + 1); - out.buffer_fill_with_zeros_after_index(j - i + 1); - return j - i + 1; + offset_buf.buffer_memcpy(out, new_stop - new_start); + out.buffer_fill_with_zeros_after_index(new_stop - new_start); + return new_stop - new_start; } template static inline size_t -string_lrstrip_chars(Buffer buf1, Buffer buf2, Buffer out, STRIPTYPE striptype) +string_lrstrip_chars(Buffer buf1, Buffer buf2, Buffer out, STRIPTYPE strip_type) { size_t len1 = buf1.num_codepoints(); if (len1 == 0) { @@ -1218,26 +1242,38 @@ string_lrstrip_chars(Buffer buf1, Buffer buf2, Buffer out, STRIPT return len1; } - size_t i = 0; + size_t new_start = 0; size_t num_bytes = (buf1.after - buf1.buf); Buffer traverse_buf = Buffer(buf1.buf, num_bytes); - if (striptype != STRIPTYPE::RIGHTSTRIP) { - while (i < len1) { - Py_ssize_t res; + if (strip_type != STRIPTYPE::RIGHTSTRIP) { + for (; new_start < len1; traverse_buf++) { + Py_ssize_t res = 0; + size_t current_point_bytes = traverse_buf.num_bytes_next_character(); switch (enc) { case ENCODING::ASCII: - case ENCODING::UTF8: { CheckedIndexer ind(buf2.buf, len2); - res = findchar(ind, len2, *traverse_buf); + res = find_char(ind, len2, *traverse_buf); + break; + } + case ENCODING::UTF8: + { + if (current_point_bytes == 1) { + CheckedIndexer ind(buf2.buf, len2); + res = find_char(ind, len2, *traverse_buf); + } else { + res = fastsearch(buf2.buf, buf2.after - buf2.buf, + traverse_buf.buf, current_point_bytes, + -1, FAST_SEARCH); + } break; } case ENCODING::UTF32: { CheckedIndexer ind((npy_ucs4 *)buf2.buf, len2); - res = findchar(ind, len2, *traverse_buf); + res = find_char(ind, len2, *traverse_buf); break; } } @@ -1245,56 +1281,68 @@ string_lrstrip_chars(Buffer buf1, Buffer buf2, Buffer out, STRIPT break; } num_bytes -= traverse_buf.num_bytes_next_character(); - traverse_buf++; - i++; + new_start++; } } - npy_intp j = len1 - 1; + size_t new_stop = len1; // New stop is a range (beyond last char) if (enc == ENCODING::UTF8) { traverse_buf = Buffer(buf1.after, 0) - 1; } else { - traverse_buf = buf1 + j; + traverse_buf = buf1 + (new_stop - 1); } - if (striptype != STRIPTYPE::LEFTSTRIP) { - while (j >= static_cast(i)) { - Py_ssize_t res; + if (strip_type != STRIPTYPE::LEFTSTRIP) { + while (new_stop > new_start) { + size_t current_point_bytes = traverse_buf.num_bytes_next_character(); + Py_ssize_t res = 0; switch (enc) { case ENCODING::ASCII: - case ENCODING::UTF8: { CheckedIndexer ind(buf2.buf, len2); - res = findchar(ind, len2, *traverse_buf); + res = find_char(ind, len2, *traverse_buf); + break; + } + case ENCODING::UTF8: + { + if (current_point_bytes == 1) { + CheckedIndexer ind(buf2.buf, len2); + res = find_char(ind, len2, *traverse_buf); + } else { + res = fastsearch(buf2.buf, buf2.after - buf2.buf, + traverse_buf.buf, current_point_bytes, + -1, FAST_RSEARCH); + } break; } case ENCODING::UTF32: { CheckedIndexer ind((npy_ucs4 *)buf2.buf, len2); - res = findchar(ind, len2, *traverse_buf); + res = find_char(ind, len2, *traverse_buf); break; } } if (res < 0) { break; } - num_bytes -= traverse_buf.num_bytes_next_character(); - j--; - if (j > 0) { + num_bytes -= current_point_bytes;; + new_stop--; + // Do not step to character -1: can't find it's start for utf-8. + if (new_stop > 0) { traverse_buf--; } } } - Buffer offset_buf = buf1 + i; + Buffer offset_buf = buf1 + new_start; if (enc == ENCODING::UTF8) { offset_buf.buffer_memcpy(out, num_bytes); return num_bytes; } - offset_buf.buffer_memcpy(out, j - i + 1); - out.buffer_fill_with_zeros_after_index(j - i + 1); - return j - i + 1; + offset_buf.buffer_memcpy(out, new_stop - new_start); + out.buffer_fill_with_zeros_after_index(new_stop - new_start); + return new_stop - new_start; } template @@ -1306,9 +1354,10 @@ findslice_for_replace(CheckedIndexer buf1, npy_intp len1, return 0; } if (len2 == 1) { - return (npy_intp) findchar(buf1, len1, *buf2); + return (npy_intp) find_char(buf1, len1, *buf2); } - return (npy_intp) fastsearch(buf1.buffer, len1, buf2.buffer, len2, -1, FAST_SEARCH); + return (npy_intp) fastsearch(buf1.buffer, len1, buf2.buffer, len2, + -1, FAST_SEARCH); } @@ -1462,7 +1511,7 @@ string_expandtabs_length(Buffer buf, npy_int64 tabsize) line_pos = 0; } } - if (new_len == PY_SSIZE_T_MAX || new_len < 0) { + if (new_len > INT_MAX || new_len < 0) { npy_gil_error(PyExc_OverflowError, "new string is too long"); return -1; } @@ -1513,8 +1562,8 @@ template static inline npy_intp string_pad(Buffer buf, npy_int64 width, npy_ucs4 fill, JUSTPOSITION pos, Buffer out) { - size_t finalwidth = width > 0 ? width : 0; - if (finalwidth > PY_SSIZE_T_MAX) { + size_t final_width = width > 0 ? width : 0; + if (final_width > PY_SSIZE_T_MAX) { npy_gil_error(PyExc_OverflowError, "padded string is too long"); return -1; } @@ -1530,23 +1579,23 @@ string_pad(Buffer buf, npy_int64 width, npy_ucs4 fill, JUSTPOSITION pos, Bu len = len_codepoints; } - if (len_codepoints >= finalwidth) { + if (len_codepoints >= final_width) { buf.buffer_memcpy(out, len); return (npy_intp) len; } size_t left, right; if (pos == JUSTPOSITION::CENTER) { - size_t pad = finalwidth - len_codepoints; - left = pad / 2 + (pad & finalwidth & 1); + size_t pad = final_width - len_codepoints; + left = pad / 2 + (pad & final_width & 1); right = pad - left; } else if (pos == JUSTPOSITION::LEFT) { left = 0; - right = finalwidth - len_codepoints; + right = final_width - len_codepoints; } else { - left = finalwidth - len_codepoints; + left = final_width - len_codepoints; right = 0; } @@ -1564,7 +1613,7 @@ string_pad(Buffer buf, npy_int64 width, npy_ucs4 fill, JUSTPOSITION pos, Bu out.advance_chars_or_bytes(out.buffer_memset(fill, right)); } - return finalwidth; + return final_width; } @@ -1572,7 +1621,7 @@ template static inline npy_intp string_zfill(Buffer buf, npy_int64 width, Buffer out) { - size_t finalwidth = width > 0 ? width : 0; + size_t final_width = width > 0 ? width : 0; npy_ucs4 fill = '0'; npy_intp new_len = string_pad(buf, width, fill, JUSTPOSITION::RIGHT, out); @@ -1580,7 +1629,7 @@ string_zfill(Buffer buf, npy_int64 width, Buffer out) return -1; } - size_t offset = finalwidth - buf.num_codepoints(); + size_t offset = final_width - buf.num_codepoints(); Buffer tmp = out + offset; npy_ucs4 c = *tmp; diff --git a/numpy/_core/src/umath/string_fastsearch.h b/numpy/_core/src/umath/string_fastsearch.h index 61abdcb5ad19..95d0ee4fb214 100644 --- a/numpy/_core/src/umath/string_fastsearch.h +++ b/numpy/_core/src/umath/string_fastsearch.h @@ -9,6 +9,7 @@ #include #include +#include #include @@ -28,13 +29,37 @@ algorithm, which has worst-case O(n) runtime and best-case O(n/k). Also compute a table of shifts to achieve O(n/k) in more cases, and often (data dependent) deduce larger shifts than pure C&P can - deduce. See stringlib_find_two_way_notes.txt in this folder for a - detailed explanation. */ + deduce. See https://github.com/python/cpython/blob/main/Objects/stringlib/stringlib_find_two_way_notes.txt + in the CPython repository for a detailed explanation.*/ +/** + * @internal + * @brief Mode for counting the number of occurrences of a substring + */ #define FAST_COUNT 0 + +/** + * @internal + * @brief Mode for performing a forward search for a substring + */ #define FAST_SEARCH 1 + +/** + * @internal + * @brief Mode for performing a reverse (backward) search for a substring + */ #define FAST_RSEARCH 2 +/** + * @file_internal + * @brief Defines the bloom filter width based on the size of LONG_BIT. + * + * This macro sets the value of `STRINGLIB_BLOOM_WIDTH` depending on the + * size of the system's LONG_BIT. It ensures that the bloom filter + * width is at least 32 bits. + * + * @error If LONG_BIT is smaller than 32, a compilation error will occur. + */ #if LONG_BIT >= 128 #define STRINGLIB_BLOOM_WIDTH 128 #elif LONG_BIT >= 64 @@ -45,39 +70,98 @@ #error "LONG_BIT is smaller than 32" #endif +/** + * @file_internal + * @brief Adds a character to the bloom filter mask. + * + * This macro sets the bit in the bloom filter `mask` corresponding to the + * character `ch`. It uses the `STRINGLIB_BLOOM_WIDTH` to ensure the bit is + * within range. + * + * @param mask The bloom filter mask where the character will be added. + * @param ch The character to add to the bloom filter mask. + */ #define STRINGLIB_BLOOM_ADD(mask, ch) \ ((mask |= (1UL << ((ch) & (STRINGLIB_BLOOM_WIDTH -1))))) + +/** + * @file_internal + * @brief Checks if a character is present in the bloom filter mask. + * + * This macro checks if the bit corresponding to the character `ch` is set + * in the bloom filter `mask`. + * + * @param mask The bloom filter mask to check. + * @param ch The character to check in the bloom filter mask. + * @return 1 if the character is present, 0 otherwise. + */ #define STRINGLIB_BLOOM(mask, ch) \ ((mask & (1UL << ((ch) & (STRINGLIB_BLOOM_WIDTH -1))))) -#define FORWARD_DIRECTION 1 -#define BACKWARD_DIRECTION -1 +/** + * @file_internal + * @brief Threshold for using memchr or wmemchr in character search. + * + * If the search length exceeds this value, memchr/wmemchr is used. + */ #define MEMCHR_CUT_OFF 15 +/** + * @internal + * @brief A checked indexer for buffers of a specified character type. + * + * This structure provides safe indexing into a buffer with boundary checks. + * + * @internal + * + * @tparam char_type The type of characters stored in the buffer. + */ template struct CheckedIndexer { - char_type *buffer; - size_t length; + char_type *buffer; ///< Pointer to the buffer. + size_t length; ///< Length of the buffer. + /** + * @brief Default constructor that initializes the buffer to NULL and length to 0. + */ CheckedIndexer() { buffer = NULL; length = 0; } + /** + * @brief Constructor that initializes the indexer with a given buffer and length. + * + * @param buf Pointer to the character buffer. + * @param len Length of the buffer. + */ CheckedIndexer(char_type *buf, size_t len) { buffer = buf; length = len; } + /** + * @brief Dereference operator that returns the first character in the buffer. + * + * @return The first character in the buffer. + */ char_type operator*() { return *(this->buffer); } + /** + * @brief Subscript operator for safe indexing into the buffer. + * + * If the index is out of bounds, it returns 0. + * + * @param index Index to access in the buffer. + * @return The character at the specified index or 0 if out of bounds. + */ char_type operator[](size_t index) { @@ -87,6 +171,15 @@ struct CheckedIndexer { return this->buffer[index]; } + /** + * @brief Addition operator to move the indexer forward by a specified number of elements. + * + * @param rhs Number of elements to move forward. + * @return A new CheckedIndexer instance with updated buffer and length. + * + * @note If the specified number of elements to move exceeds the length of the buffer, + * the indexer will be moved to the end of the buffer, and the length will be set to 0. + */ CheckedIndexer operator+(size_t rhs) { @@ -96,6 +189,15 @@ struct CheckedIndexer { return CheckedIndexer(this->buffer + rhs, this->length - rhs); } + /** + * @brief Addition assignment operator to move the indexer forward. + * + * @param rhs Number of elements to move forward. + * @return Reference to the current CheckedIndexer instance. + * + * @note If the specified number of elements to move exceeds the length of the buffer, + * the indexer will be moved to the end of the buffer, and the length will be set to 0. + */ CheckedIndexer& operator+=(size_t rhs) { @@ -107,6 +209,13 @@ struct CheckedIndexer { return *this; } + /** + * @brief Postfix increment operator. + * + * @return A CheckedIndexer instance before incrementing. + * + * @note If the indexer is at the end of the buffer, this operation has no effect. + */ CheckedIndexer operator++(int) { @@ -114,6 +223,14 @@ struct CheckedIndexer { return *this; } + /** + * @brief Subtraction assignment operator to move the indexer backward. + * + * @param rhs Number of elements to move backward. + * @return Reference to the current CheckedIndexer instance. + * + * @note If the indexer moves backward past the start of the buffer, the behavior is undefined. + */ CheckedIndexer& operator-=(size_t rhs) { @@ -122,6 +239,13 @@ struct CheckedIndexer { return *this; } + /** + * @brief Postfix decrement operator. + * + * @return A CheckedIndexer instance before decrementing. + * + * @note If the indexer moves backward past the start of the buffer, the behavior is undefined. + */ CheckedIndexer operator--(int) { @@ -129,42 +253,86 @@ struct CheckedIndexer { return *this; } + /** + * @brief Subtraction operator to calculate the difference between two indexers. + * + * @param rhs Another CheckedIndexer instance to compare. + * @return The difference in pointers between the two indexers. + */ std::ptrdiff_t operator-(CheckedIndexer rhs) { return this->buffer - rhs.buffer; } + /** + * @brief Subtraction operator to move the indexer backward by a specified number of elements. + * + * @param rhs Number of elements to move backward. + * @return A new CheckedIndexer instance with updated buffer and length. + * + * @note If the indexer moves backward past the start of the buffer, the behavior is undefined. + */ CheckedIndexer operator-(size_t rhs) { return CheckedIndexer(this->buffer - rhs, this->length + rhs); } + /** + * @brief Greater-than comparison operator. + * + * @param rhs Another CheckedIndexer instance to compare. + * @return True if this indexer is greater than the right-hand side, otherwise false. + */ int operator>(CheckedIndexer rhs) { return this->buffer > rhs.buffer; } + /** + * @brief Greater-than-or-equal comparison operator. + * + * @param rhs Another CheckedIndexer instance to compare. + * @return True if this indexer is greater than or equal to the right-hand side, otherwise false. + */ int operator>=(CheckedIndexer rhs) { return this->buffer >= rhs.buffer; } + /** + * @brief Less-than comparison operator. + * + * @param rhs Another CheckedIndexer instance to compare. + * @return True if this indexer is less than the right-hand side, otherwise false. + */ int operator<(CheckedIndexer rhs) { return this->buffer < rhs.buffer; } + /** + * @brief Less-than-or-equal comparison operator. + * + * @param rhs Another CheckedIndexer instance to compare. + * @return True if this indexer is less than or equal to the right-hand side, otherwise false. + */ int operator<=(CheckedIndexer rhs) { return this->buffer <= rhs.buffer; } + /** + * @brief Equality comparison operator. + * + * @param rhs Another CheckedIndexer instance to compare. + * @return True if both indexers point to the same buffer, otherwise false. + */ int operator==(CheckedIndexer rhs) { @@ -173,9 +341,27 @@ struct CheckedIndexer { }; +/** + * @internal + * @brief Finds the first occurrence of a specified character in a + * given range of a buffer. + * + * This function searches for the character `ch` in the buffer represented + * by the `CheckedIndexer`. It uses different methods depending on the size + * of the range `n`. If `n` exceeds the `MEMCHR_CUT_OFF`, it utilizes + * `memchr` or `wmemchr` for more efficient searching. + * + * @tparam char_type The type of characters in the buffer. + * @param s The `CheckedIndexer` instance representing the buffer to + * search within. + * @param n The number of characters to search through in the buffer. + * @param ch The character to search for. + * @return The index of the first occurrence of `ch` within the range, + * or -1 if the character is not found or the range is invalid. + */ template inline Py_ssize_t -findchar(CheckedIndexer s, Py_ssize_t n, char_type ch) +find_char(CheckedIndexer s, Py_ssize_t n, char_type ch) { char_type *p = s.buffer, *e = (s + n).buffer; @@ -208,9 +394,27 @@ findchar(CheckedIndexer s, Py_ssize_t n, char_type ch) return -1; } +/** + * @internal + * @brief Finds the last occurrence of a specified character in a + * given range of a buffer. + * + * This function searches for the character `ch` in the buffer represented + * by the `CheckedIndexer`. It scans the buffer from the end towards the + * beginning, returning the index of the last occurrence of the specified + * character. + * + * @tparam char_type The type of characters in the buffer. + * @param s The `CheckedIndexer` instance representing the buffer to + * search within. + * @param n The number of characters to search through in the buffer. + * @param ch The character to search for. + * @return The index of the last occurrence of `ch` within the range, + * or -1 if the character is not found or the range is invalid. + */ template inline Py_ssize_t -rfindchar(CheckedIndexer s, Py_ssize_t n, char_type ch) +rfind_char(CheckedIndexer s, Py_ssize_t n, char_type ch) { CheckedIndexer p = s + n; while (p > s) { @@ -221,35 +425,67 @@ rfindchar(CheckedIndexer s, Py_ssize_t n, char_type ch) return -1; } - -/* Change to a 1 to see logging comments walk through the algorithm. */ +#undef MEMCHR_CUT_OFF + +/** + * @file_internal + * @brief Conditional logging for string fast search. + * + * Set to 1 to enable logging macros. + * + * @note These macros are used internally for debugging purposes + * and will be undefined later in the code. + */ #if 0 && STRINGLIB_SIZEOF_CHAR == 1 -# define LOG(...) printf(__VA_ARGS__) -# define LOG_STRING(s, n) printf("\"%.*s\"", (int)(n), s) -# define LOG_LINEUP() do { \ +/** Logs formatted output. */ +#define LOG(...) printf(__VA_ARGS__) + +/** Logs a string with a given length. */ +#define LOG_STRING(s, n) printf("\"%.*s\"", (int)(n), s) + +/** Logs the current state of the algorithm. */ +#define LOG_LINEUP() do { \ LOG("> "); LOG_STRING(haystack, len_haystack); LOG("\n> "); \ LOG("%*s",(int)(window_last - haystack + 1 - len_needle), ""); \ LOG_STRING(needle, len_needle); LOG("\n"); \ } while(0) #else -# define LOG(...) -# define LOG_STRING(s, n) -# define LOG_LINEUP() +#define LOG(...) +#define LOG_STRING(s, n) +#define LOG_LINEUP() #endif +/** + * @file_internal + * @brief Perform a lexicographic search for the maximal suffix in + * a given string. + * + * This function searches through the `needle` string to find the + * maximal suffix, which is essentially the largest lexicographic suffix. + * Essentially this: + * - max(needle[i:] for i in range(len(needle)+1)) + * + * Additionally, it computes the period of the right half of the string. + * + * @param needle The string to search in. + * @param len_needle The length of the needle string. + * @param return_period Pointer to store the period of the found suffix. + * @param invert_alphabet Flag to invert the comparison logic. + * @return The index of the maximal suffix found in the needle string. + * + * @note If `invert_alphabet` is non-zero, character comparisons are reversed, + * treating smaller characters as larger. + * + */ template static inline Py_ssize_t -_lex_search(CheckedIndexer needle, Py_ssize_t len_needle, +lex_search(CheckedIndexer needle, Py_ssize_t len_needle, Py_ssize_t *return_period, int invert_alphabet) { - /* Do a lexicographic search. Essentially this: - >>> max(needle[i:] for i in range(len(needle)+1)) - Also find the period of the right half. */ - Py_ssize_t max_suffix = 0; - Py_ssize_t candidate = 1; - Py_ssize_t k = 0; - // The period of the right half. - Py_ssize_t period = 1; + Py_ssize_t max_suffix = 0; // Index of the current maximal suffix found. + Py_ssize_t candidate = 1; // Candidate index for potential maximal suffix. + Py_ssize_t k = 0; // Offset for comparing characters. + Py_ssize_t period = 1; // Period of the right half. while (candidate + k < len_needle) { // each loop increases candidate + k + max_suffix @@ -286,51 +522,54 @@ _lex_search(CheckedIndexer needle, Py_ssize_t len_needle, period = 1; } } + *return_period = period; return max_suffix; } +/** + * @file_internal + * @brief Perform a critical factorization on a string. + * + * This function splits the input string into two parts where the local + * period is maximal. + * + * The function divides the input string as follows: + * - needle = (left := needle[:cut]) + (right := needle[cut:]) + * + * The local period is the minimal length of a string `w` such that: + * - left ends with `w` or `w` ends with left. + * - right starts with `w` or `w` starts with right. + * + * According to the Critical Factorization Theorem, this maximal local + * period is the global period of the string. The algorithm finds the + * cut using lexicographical order and its reverse to compute the maximal + * period, as shown by Crochemore and Perrin (1991). + * + * Example: + * For the string "GCAGAGAG", the split position (cut) is at 2, resulting in: + * - left = "GC" + * - right = "AGAGAG" + * The period of the right half is 2, and the repeated substring + * pattern "AG" verifies that this is the correct factorization. + * + * @param needle The input string as a CheckedIndexer. + * @param len_needle Length of the input string. + * @param return_period Pointer to store the computed period of the right half. + * @return The cut position where the string is factorized. + */ template static inline Py_ssize_t -_factorize(CheckedIndexer needle, +factorize(CheckedIndexer needle, Py_ssize_t len_needle, Py_ssize_t *return_period) { - /* Do a "critical factorization", making it so that: - >>> needle = (left := needle[:cut]) + (right := needle[cut:]) - where the "local period" of the cut is maximal. - - The local period of the cut is the minimal length of a string w - such that (left endswith w or w endswith left) - and (right startswith w or w startswith left). - - The Critical Factorization Theorem says that this maximal local - period is the global period of the string. - - Crochemore and Perrin (1991) show that this cut can be computed - as the later of two cuts: one that gives a lexicographically - maximal right half, and one that gives the same with the - with respect to a reversed alphabet-ordering. - - This is what we want to happen: - >>> x = "GCAGAGAG" - >>> cut, period = factorize(x) - >>> x[:cut], (right := x[cut:]) - ('GC', 'AGAGAG') - >>> period # right half period - 2 - >>> right[period:] == right[:-period] - True - - This is how the local period lines up in the above example: - GC | AGAGAG - AGAGAGC = AGAGAGC - The length of this minimal repetition is 7, which is indeed the - period of the original string. */ - Py_ssize_t cut1, period1, cut2, period2, cut, period; - cut1 = _lex_search(needle, len_needle, &period1, 0); - cut2 = _lex_search(needle, len_needle, &period2, 1); + + // Perform lexicographical search to find the first cut (normal order) + cut1 = lex_search(needle, len_needle, &period1, 0); + // Perform lexicographical search to find the second cut (reversed alphabet order) + cut2 = lex_search(needle, len_needle, &period2, 1); // Take the later cut. if (cut1 > cut2) { @@ -351,42 +590,91 @@ _factorize(CheckedIndexer needle, } +/** + * @file_internal + * @brief Internal macro to define the shift type used in the table. + */ #define SHIFT_TYPE uint8_t + +/** + * @file_internal + * @brief Internal macro to define the maximum shift value. + */ #define MAX_SHIFT UINT8_MAX + +/** + * @file_internal + * @brief Internal macro to define the number of bits for the table size. + */ #define TABLE_SIZE_BITS 6u + +/** + * @file_internal + * @brief Internal macro to define the table size based on TABLE_SIZE_BITS. + */ #define TABLE_SIZE (1U << TABLE_SIZE_BITS) + +/** + * @file_internal + * @brief Internal macro to define the table mask used for bitwise operations. + */ #define TABLE_MASK (TABLE_SIZE - 1U) +/** + * @file_internal + * @brief Struct to store precomputed data for string search algorithms. + * + * This structure holds all the necessary precomputed values needed + * to perform efficient string search operations on the given `needle` string. + * + * @tparam char_type Type of the characters in the string. + */ template struct prework { - CheckedIndexer needle; - Py_ssize_t len_needle; - Py_ssize_t cut; - Py_ssize_t period; - Py_ssize_t gap; - int is_periodic; - SHIFT_TYPE table[TABLE_SIZE]; + CheckedIndexer needle; ///< Indexer for the needle (substring). + Py_ssize_t len_needle; ///< Length of the needle. + Py_ssize_t cut; ///< Critical factorization cut point. + Py_ssize_t period; ///< Period of the right half of the needle. + Py_ssize_t gap; ///< Gap value for skipping during search. + int is_periodic; ///< Non-zero if the needle is periodic. + SHIFT_TYPE table[TABLE_SIZE]; ///< Shift table for optimizing search. }; +/** + * @file_internal + * @brief Preprocesses the needle (substring) for optimized string search. + * + * This function performs preprocessing on the given needle (substring) + * to prepare auxiliary data that will be used to optimize the string + * search algorithm. The preprocessing involves factorization of the + * substring, periodicity detection, gap computation, and the generation + * of a Boyer-Moore "Bad Character" shift table. + * + * @tparam char_type The character type of the string. + * @param needle The substring to be searched. + * @param len_needle The length of the substring. + * @param p A pointer to the search_prep_data structure where the preprocessing + * results will be stored. + */ template static void -_preprocess(CheckedIndexer needle, Py_ssize_t len_needle, +preprocess(CheckedIndexer needle, Py_ssize_t len_needle, prework *p) { + // Store the needle and its length, find the cut point and period. p->needle = needle; p->len_needle = len_needle; - p->cut = _factorize(needle, len_needle, &(p->period)); + p->cut = factorize(needle, len_needle, &(p->period)); assert(p->period + p->cut <= len_needle); - int cmp; - if (std::is_same::value) { - cmp = memcmp(needle.buffer, needle.buffer + (p->period * sizeof(npy_ucs4)), (size_t) p->cut); - } - else { - cmp = memcmp(needle.buffer, needle.buffer + p->period, (size_t) p->cut); - } + + // Compare parts of the needle to check for periodicity. + int cmp = memcmp(needle.buffer, needle.buffer + p->period, + (size_t) p->cut); p->is_periodic = (0 == cmp); + + // If periodic, gap is unused; otherwise, calculate period and gap. if (p->is_periodic) { assert(p->cut <= len_needle/2); assert(p->cut < p->period); @@ -407,6 +695,7 @@ _preprocess(CheckedIndexer needle, Py_ssize_t len_needle, } } } + // Fill up a compressed Boyer-Moore "Bad Character" table Py_ssize_t not_found_shift = Py_MIN(len_needle, MAX_SHIFT); for (Py_ssize_t i = 0; i < (Py_ssize_t)TABLE_SIZE; i++) { @@ -420,13 +709,36 @@ _preprocess(CheckedIndexer needle, Py_ssize_t len_needle, } } +/** + * @file_internal + * @brief Searches for a needle (substring) within a haystack (string) + * using the Two-Way string matching algorithm. + * + * This function efficiently searches for a needle within a haystack using + * preprocessed data. It handles both periodic and non-periodic needles + * and optimizes the search process with a bad character shift table. The + * function iterates through the haystack in windows, skipping over sections + * that do not match, improving performance and reducing comparisons. + * + * For more details, refer to the following resources: + * - Crochemore and Perrin's (1991) Two-Way algorithm: + * [Two-Way Algorithm](http://www-igm.univ-mlv.fr/~lecroq/string/node26.html#SECTION00260). + * + * @tparam char_type The type of the characters in the needle and haystack + * (e.g., npy_ucs4). + * @param haystack The string to search within, wrapped in CheckedIndexer. + * @param len_haystack The length of the haystack. + * @param p A pointer to the search_prep_data structure containing + * preprocessed data for the needle. + * @return The starting index of the first occurrence of the needle + * within the haystack, or -1 if the needle is not found. + */ template static Py_ssize_t -_two_way(CheckedIndexer haystack, Py_ssize_t len_haystack, +two_way(CheckedIndexer haystack, Py_ssize_t len_haystack, prework *p) { - // Crochemore and Perrin's (1991) Two-Way algorithm. - // See http://www-igm.univ-mlv.fr/~lecroq/string/node26.html#SECTION00260 + // Initialize key variables for search. const Py_ssize_t len_needle = p->len_needle; const Py_ssize_t cut = p->cut; Py_ssize_t period = p->period; @@ -438,10 +750,13 @@ _two_way(CheckedIndexer haystack, Py_ssize_t len_haystack, LOG("===== Two-way: \"%s\" in \"%s\". =====\n", needle, haystack); if (p->is_periodic) { + // Handle the case where the needle is periodic. + // Memory optimization is used to skip over already checked segments. LOG("Needle is periodic.\n"); Py_ssize_t memory = 0; periodicwindowloop: while (window_last < haystack_end) { + // Bad-character shift loop to skip parts of the haystack. assert(memory == 0); for (;;) { LOG_LINEUP(); @@ -459,6 +774,7 @@ _two_way(CheckedIndexer haystack, Py_ssize_t len_haystack, window = window_last - len_needle + 1; assert((window[len_needle - 1] & TABLE_MASK) == (needle[len_needle - 1] & TABLE_MASK)); + // Check if the right half of the pattern matches the haystack. Py_ssize_t i = Py_MAX(cut, memory); for (; i < len_needle; i++) { if (needle[i] != window[i]) { @@ -468,6 +784,7 @@ _two_way(CheckedIndexer haystack, Py_ssize_t len_haystack, goto periodicwindowloop; } } + // Check if the left half of the pattern matches the haystack. for (i = memory; i < cut; i++) { if (needle[i] != window[i]) { LOG("Left half does not match.\n"); @@ -476,6 +793,7 @@ _two_way(CheckedIndexer haystack, Py_ssize_t len_haystack, if (window_last >= haystack_end) { return -1; } + // Apply memory adjustments and shifts if mismatches occur. Py_ssize_t shift = table[window_last[0] & TABLE_MASK]; if (shift) { // A mismatch has been identified to the right @@ -496,12 +814,15 @@ _two_way(CheckedIndexer haystack, Py_ssize_t len_haystack, } } else { + // Handle the case where the needle is non-periodic. + // General shift logic based on a gap is used to improve performance. Py_ssize_t gap = p->gap; period = Py_MAX(gap, period); LOG("Needle is not periodic.\n"); Py_ssize_t gap_jump_end = Py_MIN(len_needle, cut + gap); windowloop: while (window_last < haystack_end) { + // Bad-character shift loop for non-periodic patterns. for (;;) { LOG_LINEUP(); Py_ssize_t shift = table[window_last[0] & TABLE_MASK]; @@ -517,6 +838,7 @@ _two_way(CheckedIndexer haystack, Py_ssize_t len_haystack, window = window_last - len_needle + 1; assert((window[len_needle - 1] & TABLE_MASK) == (needle[len_needle - 1] & TABLE_MASK)); + // Check the right half of the pattern for a match. for (Py_ssize_t i = cut; i < gap_jump_end; i++) { if (needle[i] != window[i]) { LOG("Early right half mismatch: jump by gap.\n"); @@ -525,6 +847,7 @@ _two_way(CheckedIndexer haystack, Py_ssize_t len_haystack, goto windowloop; } } + // Continue checking the remaining right half of the pattern. for (Py_ssize_t i = gap_jump_end; i < len_needle; i++) { if (needle[i] != window[i]) { LOG("Late right half mismatch.\n"); @@ -533,6 +856,7 @@ _two_way(CheckedIndexer haystack, Py_ssize_t len_haystack, goto windowloop; } } + // Check the left half of the pattern for a match. for (Py_ssize_t i = 0; i < cut; i++) { if (needle[i] != window[i]) { LOG("Left half does not match.\n"); @@ -549,38 +873,70 @@ _two_way(CheckedIndexer haystack, Py_ssize_t len_haystack, } +/** + * @file_internal + * @brief Finds the first occurrence of a needle (substring) within a haystack (string). + * + * This function applies the two-way string matching algorithm to efficiently + * search for a needle (substring) within a haystack (main string). + * + * @tparam char_type The character type of the strings. + * @param haystack The string in which to search for the needle. + * @param len_haystack The length of the haystack string. + * @param needle The substring to search for in the haystack. + * @param len_needle The length of the needle substring. + * @return The position of the first occurrence of the needle in the haystack, + * or -1 if the needle is not found. + */ template static inline Py_ssize_t -_two_way_find(CheckedIndexer haystack, Py_ssize_t len_haystack, +two_way_find(CheckedIndexer haystack, Py_ssize_t len_haystack, CheckedIndexer needle, Py_ssize_t len_needle) { LOG("###### Finding \"%s\" in \"%s\".\n", needle, haystack); prework p; - _preprocess(needle, len_needle, &p); - return _two_way(haystack, len_haystack, &p); + preprocess(needle, len_needle, &p); + return two_way(haystack, len_haystack, &p); } +/** + * @file_internal + * @brief Counts the occurrences of a needle (substring) within a haystack (string). + * + * This function applies the two-way string matching algorithm to count how many + * times a needle (substring) appears within a haystack (main string). It stops + * counting when the maximum number of occurrences (`max_count`) is reached. + * + * @tparam char_type The character type of the strings. + * @param haystack The string in which to search for occurrences of the needle. + * @param len_haystack The length of the haystack string. + * @param needle The substring to search for in the haystack. + * @param len_needle The length of the needle substring. + * @param max_count The maximum number of occurrences to count before returning. + * @return The number of occurrences of the needle in the haystack. + * If the maximum count is reached, it returns `max_count`. + */ template static inline Py_ssize_t -_two_way_count(CheckedIndexer haystack, Py_ssize_t len_haystack, +two_way_count(CheckedIndexer haystack, Py_ssize_t len_haystack, CheckedIndexer needle, Py_ssize_t len_needle, - Py_ssize_t maxcount) + Py_ssize_t max_count) { LOG("###### Counting \"%s\" in \"%s\".\n", needle, haystack); prework p; - _preprocess(needle, len_needle, &p); + preprocess(needle, len_needle, &p); Py_ssize_t index = 0, count = 0; while (1) { Py_ssize_t result; - result = _two_way(haystack + index, - len_haystack - index, &p); + result = two_way(haystack + index, + len_haystack - index, &p); if (result == -1) { return count; } count++; - if (count == maxcount) { - return maxcount; + if (count == max_count) { + return max_count; } index += result + len_needle; } @@ -588,8 +944,8 @@ _two_way_count(CheckedIndexer haystack, Py_ssize_t len_haystack, } #undef SHIFT_TYPE -#undef NOT_FOUND -#undef SHIFT_OVERFLOW +#undef MAX_SHIFT + #undef TABLE_SIZE_BITS #undef TABLE_SIZE #undef TABLE_MASK @@ -598,11 +954,35 @@ _two_way_count(CheckedIndexer haystack, Py_ssize_t len_haystack, #undef LOG_STRING #undef LOG_LINEUP +/** + * @internal + * @brief A function that searches for a substring `p` in the + * string `s` using a bloom filter to optimize character matching. + * + * This function searches for occurrences of a pattern `p` in + * the given string `s`. It uses a bloom filter for fast rejection + * of non-matching characters and performs character-by-character + * comparison for potential matches. The algorithm is based on the + * Boyer-Moore string search technique. + * + * @tparam char_type The type of characters in the strings. + * @param s The haystack (string) to search in. + * @param n The length of the haystack string `s`. + * @param p The needle (substring) to search for. + * @param m The length of the needle substring `p`. + * @param max_count The maximum number of matches to return. + * @param mode The search mode. + * If mode is `FAST_COUNT`, the function counts occurrences of the + * pattern, otherwise it returns the index of the first match. + * @return If mode is not `FAST_COUNT`, returns the index of the first + * occurrence, or `-1` if no match is found. If `FAST_COUNT`, + * returns the number of occurrences found up to `max_count`. + */ template static inline Py_ssize_t default_find(CheckedIndexer s, Py_ssize_t n, CheckedIndexer p, Py_ssize_t m, - Py_ssize_t maxcount, int mode) + Py_ssize_t max_count, int mode) { const Py_ssize_t w = n - m; Py_ssize_t mlast = m - 1, count = 0; @@ -610,6 +990,7 @@ default_find(CheckedIndexer s, Py_ssize_t n, const char_type last = p[mlast]; CheckedIndexer ss = s + mlast; + // Add pattern to bloom filter and calculate the gap. unsigned long mask = 0; for (Py_ssize_t i = 0; i < mlast; i++) { STRINGLIB_BLOOM_ADD(mask, p[i]); @@ -634,8 +1015,8 @@ default_find(CheckedIndexer s, Py_ssize_t n, return i; } count++; - if (count == maxcount) { - return maxcount; + if (count == max_count) { + return max_count; } i = i + mlast; continue; @@ -659,11 +1040,26 @@ default_find(CheckedIndexer s, Py_ssize_t n, } +/** + * @internal + * @brief Performs an adaptive string search using a bloom filter and fallback + * to two-way search for large data. + * + * @tparam char_type The type of characters in the string. + * @param s The haystack to search in. + * @param n Length of the haystack. + * @param p The needle to search for. + * @param m Length of the needle. + * @param max_count Maximum number of matches to count. + * @param mode Search mode. + * @return The index of the first occurrence of the needle, or -1 if not found. + * If in FAST_COUNT mode, returns the number of matches found up to max_count. + */ template static Py_ssize_t adaptive_find(CheckedIndexer s, Py_ssize_t n, CheckedIndexer p, Py_ssize_t m, - Py_ssize_t maxcount, int mode) + Py_ssize_t max_count, int mode) { const Py_ssize_t w = n - m; Py_ssize_t mlast = m - 1, count = 0; @@ -696,8 +1092,8 @@ adaptive_find(CheckedIndexer s, Py_ssize_t n, return i; } count++; - if (count == maxcount) { - return maxcount; + if (count == max_count) { + return max_count; } i = i + mlast; continue; @@ -705,11 +1101,11 @@ adaptive_find(CheckedIndexer s, Py_ssize_t n, hits += j + 1; if (hits > m / 4 && w - i > 2000) { if (mode == FAST_SEARCH) { - res = _two_way_find(s + i, n - i, p, m); + res = two_way_find(s + i, n - i, p, m); return res == -1 ? -1 : res + i; } else { - res = _two_way_count(s + i, n - i, p, m, maxcount - count); + res = two_way_count(s + i, n - i, p, m, max_count - count); return res + count; } } @@ -732,11 +1128,28 @@ adaptive_find(CheckedIndexer s, Py_ssize_t n, } +/** + * @internal + * @brief Performs a reverse Boyer-Moore string search. + * + * This function searches for the last occurrence of a pattern in a string, + * utilizing the Boyer-Moore algorithm with a bloom filter for fast skipping + * of mismatches. + * + * @tparam char_type The type of characters in the string (e.g., char, wchar_t). + * @param s The haystack to search in. + * @param n Length of the haystack. + * @param p The needle (pattern) to search for. + * @param m Length of the needle (pattern). + * @param max_count Maximum number of matches to count (not used in this version). + * @param mode Search mode (not used, only support right find mode). + * @return The index of the last occurrence of the needle, or -1 if not found. + */ template static Py_ssize_t default_rfind(CheckedIndexer s, Py_ssize_t n, CheckedIndexer p, Py_ssize_t m, - Py_ssize_t maxcount, int mode) + Py_ssize_t max_count, int mode) { /* create compressed boyer-moore delta 1 table */ unsigned long mask = 0; @@ -783,17 +1196,32 @@ default_rfind(CheckedIndexer s, Py_ssize_t n, } +/** + * @internal + * @brief Counts occurrences of a specified character in a given string. + * + * This function iterates through the string `s` and counts how many times + * the character `p0` appears, stopping when the count reaches `max_count`. + * + * @tparam char_type The type of characters in the string. + * @param s The string in which to count occurrences of the character. + * @param n The length of the string `s`. + * @param p0 The character to count in the string. + * @param max_count The maximum number of occurrences to count before stopping. + * @return The total count of occurrences of `p0` in `s`, or `max_count` + * if that many occurrences were found. + */ template static inline Py_ssize_t countchar(CheckedIndexer s, Py_ssize_t n, - const char_type p0, Py_ssize_t maxcount) + const char_type p0, Py_ssize_t max_count) { Py_ssize_t i, count = 0; for (i = 0; i < n; i++) { if (s[i] == p0) { count++; - if (count == maxcount) { - return maxcount; + if (count == max_count) { + return max_count; } } } @@ -801,16 +1229,40 @@ countchar(CheckedIndexer s, Py_ssize_t n, } +/** + * @internal + * @brief Searches for occurrences of a substring `p` in the string `s` + * using various optimized search algorithms. + * + * This function determines the most appropriate searching method based on + * the lengths of the input string `s` and the pattern `p`, as well as the + * specified search mode. It handles special cases for patterns of length 0 or 1 + * and selects between default, two-way, adaptive, or reverse search algorithms. + * + * @tparam char_type The type of characters in the strings. + * @param s The haystack (string) to search in. + * @param n The length of the haystack string `s`. + * @param p The needle (substring) to search for. + * @param m The length of the needle substring `p`. + * @param max_count The maximum number of matches to return. + * @param mode The search mode, which can be: + * - `FAST_SEARCH`: Searches for the first occurrence. + * - `FAST_RSEARCH`: Searches for the last occurrence. + * - `FAST_COUNT`: Counts occurrences of the pattern. + * @return If `mode` is not `FAST_COUNT`, returns the index of the first occurrence + * of `p` in `s`, or `-1` if no match is found. If `FAST_COUNT`, returns + * the number of occurrences found up to `max_count`. + */ template inline Py_ssize_t fastsearch(char_type* s, Py_ssize_t n, char_type* p, Py_ssize_t m, - Py_ssize_t maxcount, int mode) + Py_ssize_t max_count, int mode) { CheckedIndexer s_(s, n); CheckedIndexer p_(p, m); - if (n < m || (mode == FAST_COUNT && maxcount == 0)) { + if (n < m || (mode == FAST_COUNT && max_count == 0)) { return -1; } @@ -821,17 +1273,17 @@ fastsearch(char_type* s, Py_ssize_t n, } /* use special case for 1-character strings */ if (mode == FAST_SEARCH) - return findchar(s_, n, p_[0]); + return find_char(s_, n, p_[0]); else if (mode == FAST_RSEARCH) - return rfindchar(s_, n, p_[0]); + return rfind_char(s_, n, p_[0]); else { - return countchar(s_, n, p_[0], maxcount); + return countchar(s_, n, p_[0], max_count); } } if (mode != FAST_RSEARCH) { if (n < 2500 || (m < 100 && n < 30000) || m < 6) { - return default_find(s_, n, p_, m, maxcount, mode); + return default_find(s_, n, p_, m, max_count, mode); } else if ((m >> 2) * 3 < (n >> 2)) { /* 33% threshold, but don't overflow. */ @@ -840,24 +1292,25 @@ fastsearch(char_type* s, Py_ssize_t n, expensive O(m) startup cost of the two-way algorithm will surely pay off. */ if (mode == FAST_SEARCH) { - return _two_way_find(s_, n, p_, m); + return two_way_find(s_, n, p_, m); } else { - return _two_way_count(s_, n, p_, m, maxcount); + return two_way_count(s_, n, p_, m, max_count); } } else { + // ReSharper restore CppRedundantElseKeyword /* To ensure that we have good worst-case behavior, here's an adaptive version of the algorithm, where if we match O(m) characters without any matches of the entire needle, then we predict that the startup cost of the two-way algorithm will probably be worth it. */ - return adaptive_find(s_, n, p_, m, maxcount, mode); + return adaptive_find(s_, n, p_, m, max_count, mode); } } else { /* FAST_RSEARCH */ - return default_rfind(s_, n, p_, m, maxcount, mode); + return default_rfind(s_, n, p_, m, max_count, mode); } } diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index 2bc4ce20acd6..18ed4534ea04 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -15,6 +15,7 @@ #include "dtypemeta.h" #include "convert_datatype.h" #include "gil_utils.h" +#include "templ_common.h" /* for npy_mul_size_with_overflow_size_t */ #include "string_ufuncs.h" #include "string_fastsearch.h" @@ -166,26 +167,48 @@ string_add(Buffer buf1, Buffer buf2, Buffer out) template -static inline void +static inline int string_multiply(Buffer buf1, npy_int64 reps, Buffer out) { size_t len1 = buf1.num_codepoints(); if (reps < 1 || len1 == 0) { out.buffer_fill_with_zeros_after_index(0); - return; + return 0; } + size_t width = out.buffer_width(); + // we know this is positive + size_t reps_ = (size_t)reps; + if (len1 == 1) { - out.buffer_memset(*buf1, reps); - out.buffer_fill_with_zeros_after_index(reps); + size_t end_index = reps_ > width ? width : reps_; + out.buffer_memset(*buf1, end_index); + out.buffer_fill_with_zeros_after_index(end_index); + return 0; } - else { - for (npy_int64 i = 0; i < reps; i++) { - buf1.buffer_memcpy(out, len1); - out += len1; - } - out.buffer_fill_with_zeros_after_index(0); + + size_t newlen; + if (NPY_UNLIKELY(npy_mul_with_overflow_size_t(&newlen, reps, len1) != 0) || newlen > PY_SSIZE_T_MAX) { + return -1; + } + + size_t pad = 0; + if (width < newlen) { + reps = width / len1; + pad = width % len1; + } + + for (npy_int64 i = 0; i < reps; i++) { + buf1.buffer_memcpy(out, len1); + out += len1; } + + buf1.buffer_memcpy(out, pad); + out += pad; + + out.buffer_fill_with_zeros_after_index(0); + + return 0; } @@ -238,7 +261,9 @@ string_multiply_strint_loop(PyArrayMethod_Context *context, while (N--) { Buffer buf(in1, elsize); Buffer outbuf(out, outsize); - string_multiply(buf, *(npy_int64 *)in2, outbuf); + if (NPY_UNLIKELY(string_multiply(buf, *(npy_int64 *)in2, outbuf) < 0)) { + npy_gil_error(PyExc_OverflowError, "Overflow detected in string multiply"); + } in1 += strides[0]; in2 += strides[1]; @@ -267,7 +292,9 @@ string_multiply_intstr_loop(PyArrayMethod_Context *context, while (N--) { Buffer buf(in2, elsize); Buffer outbuf(out, outsize); - string_multiply(buf, *(npy_int64 *)in1, outbuf); + if (NPY_UNLIKELY(string_multiply(buf, *(npy_int64 *)in1, outbuf) < 0)) { + npy_gil_error(PyExc_OverflowError, "Overflow detected in string multiply"); + } in1 += strides[0]; in2 += strides[1]; @@ -633,6 +660,67 @@ string_partition_index_loop(PyArrayMethod_Context *context, } +template +static int +string_slice_loop(PyArrayMethod_Context *context, + char *const data[], npy_intp const dimensions[], + npy_intp const strides[], NpyAuxData *NPY_UNUSED(auxdata)) +{ + int insize = context->descriptors[0]->elsize; + int outsize = context->descriptors[4]->elsize; + + char *in_ptr = data[0]; + char *start_ptr = data[1]; + char *stop_ptr = data[2]; + char *step_ptr = data[3]; + char *out_ptr = data[4]; + + npy_intp N = dimensions[0]; + + while (N--) { + Buffer inbuf(in_ptr, insize); + Buffer outbuf(out_ptr, outsize); + + // get the slice + npy_intp start = *(npy_intp*)start_ptr; + npy_intp stop = *(npy_intp*)stop_ptr; + npy_intp step = *(npy_intp*)step_ptr; + + // adjust slice to string length in codepoints + // and handle negative indices + size_t num_codepoints = inbuf.num_codepoints(); + npy_intp slice_length = PySlice_AdjustIndices(num_codepoints, &start, &stop, step); + + // iterate over slice and copy each character of the string + inbuf.advance_chars_or_bytes(start); + for (npy_intp i = 0; i < slice_length; i++) { + // copy one codepoint + inbuf.buffer_memcpy(outbuf, 1); + + // Move in inbuf by step. + inbuf += step; + + // Move in outbuf by the number of chars or bytes written + outbuf.advance_chars_or_bytes(1); + } + + // fill remaining outbuf with zero bytes + for (char *tmp = outbuf.buf; tmp < outbuf.after; tmp++) { + *tmp = 0; + } + + // Go to the next array element + in_ptr += strides[0]; + start_ptr += strides[1]; + stop_ptr += strides[2]; + step_ptr += strides[3]; + out_ptr += strides[4]; + } + + return 0; +} + + /* Resolve descriptors & promoter functions */ static NPY_CASTING @@ -643,6 +731,20 @@ string_addition_resolve_descriptors( PyArray_Descr *loop_descrs[3], npy_intp *NPY_UNUSED(view_offset)) { + npy_intp result_itemsize = given_descrs[0]->elsize + given_descrs[1]->elsize; + + /* NOTE: elsize can fit more than MAX_INT, but some code may still use ints */ + if (result_itemsize > NPY_MAX_INT || result_itemsize < 0) { + npy_intp length = result_itemsize; + if (given_descrs[0]->type == NPY_UNICODE) { + length /= 4; + } + PyErr_Format(PyExc_TypeError, + "addition result string of length %zd is too large to store inside array.", + length); + return _NPY_ERROR_OCCURRED_IN_CAST; + } + loop_descrs[0] = NPY_DT_CALL_ensure_canonical(given_descrs[0]); if (loop_descrs[0] == NULL) { return _NPY_ERROR_OCCURRED_IN_CAST; @@ -650,11 +752,14 @@ string_addition_resolve_descriptors( loop_descrs[1] = NPY_DT_CALL_ensure_canonical(given_descrs[1]); if (loop_descrs[1] == NULL) { + Py_DECREF(loop_descrs[0]); return _NPY_ERROR_OCCURRED_IN_CAST; } loop_descrs[2] = PyArray_DescrNew(loop_descrs[0]); if (loop_descrs[2] == NULL) { + Py_DECREF(loop_descrs[0]); + Py_DECREF(loop_descrs[1]); return _NPY_ERROR_OCCURRED_IN_CAST; } loop_descrs[2]->elsize += loop_descrs[1]->elsize; @@ -674,10 +779,11 @@ string_multiply_resolve_descriptors( if (given_descrs[2] == NULL) { PyErr_SetString( PyExc_TypeError, - "The 'out' kwarg is necessary. Use numpy.strings.multiply without it."); + "The 'out' kwarg is necessary when using the string multiply ufunc " + "directly. Use numpy.strings.multiply to multiply strings without " + "specifying 'out'."); return _NPY_ERROR_OCCURRED_IN_CAST; } - loop_descrs[0] = NPY_DT_CALL_ensure_canonical(given_descrs[0]); if (loop_descrs[0] == NULL) { return _NPY_ERROR_OCCURRED_IN_CAST; @@ -839,7 +945,7 @@ string_expandtabs_length_promoter(PyObject *NPY_UNUSED(ufunc), PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { - Py_INCREF(op_dtypes[0]); + Py_XINCREF(op_dtypes[0]); new_op_dtypes[0] = op_dtypes[0]; new_op_dtypes[1] = NPY_DT_NewRef(&PyArray_Int64DType); new_op_dtypes[2] = PyArray_DTypeFromTypeNum(NPY_DEFAULT_INT); @@ -1023,9 +1129,9 @@ string_partition_promoter(PyObject *NPY_UNUSED(ufunc), static NPY_CASTING string_partition_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), - PyArray_Descr *const given_descrs[3], - PyArray_Descr *loop_descrs[3], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[6]), + PyArray_Descr *const given_descrs[6], + PyArray_Descr *loop_descrs[6], npy_intp *NPY_UNUSED(view_offset)) { if (!given_descrs[3] || !given_descrs[4] || !given_descrs[5]) { @@ -1047,6 +1153,53 @@ string_partition_resolve_descriptors( } +static int +string_slice_promoter(PyObject *NPY_UNUSED(ufunc), + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], + PyArray_DTypeMeta *new_op_dtypes[]) +{ + Py_INCREF(op_dtypes[0]); + new_op_dtypes[0] = op_dtypes[0]; + new_op_dtypes[1] = NPY_DT_NewRef(&PyArray_IntpDType); + new_op_dtypes[2] = NPY_DT_NewRef(&PyArray_IntpDType); + new_op_dtypes[3] = NPY_DT_NewRef(&PyArray_IntpDType); + Py_INCREF(op_dtypes[0]); + new_op_dtypes[4] = op_dtypes[0]; + return 0; +} + +static NPY_CASTING +string_slice_resolve_descriptors( + PyArrayMethodObject *self, + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[5]), + PyArray_Descr *const given_descrs[5], + PyArray_Descr *loop_descrs[5], + npy_intp *NPY_UNUSED(view_offset)) +{ + if (given_descrs[4]) { + PyErr_Format(PyExc_TypeError, + "The '%s' ufunc does not " + "currently support the 'out' keyword", + self->name); + return _NPY_ERROR_OCCURRED_IN_CAST; + } + + for (int i = 0; i < 4; i++) { + loop_descrs[i] = NPY_DT_CALL_ensure_canonical(given_descrs[i]); + if (loop_descrs[i] == NULL) { + return _NPY_ERROR_OCCURRED_IN_CAST; + } + } + + loop_descrs[4] = PyArray_DescrNew(loop_descrs[0]); + if (loop_descrs[4] == NULL) { + return _NPY_ERROR_OCCURRED_IN_CAST; + } + loop_descrs[4]->elsize = loop_descrs[0]->elsize; + + return NPY_NO_CASTING; +} + /* * Machinery to add the string loops to the existing ufuncs. */ @@ -1396,7 +1549,7 @@ init_string_ufuncs(PyObject *umath) dtypes[0] = NPY_OBJECT; dtypes[1] = NPY_BOOL; - const char *unary_buffer_method_names[] = { + const char *const unary_buffer_method_names[] = { "isalpha", "isalnum", "isdigit", "isspace", "islower", "isupper", "istitle", "isdecimal", "isnumeric", }; @@ -1510,7 +1663,7 @@ init_string_ufuncs(PyObject *umath) dtypes[2] = dtypes[3] = NPY_INT64; dtypes[4] = NPY_BOOL; - const char *startswith_endswith_names[] = { + const char *const startswith_endswith_names[] = { "startswith", "endswith" }; @@ -1539,7 +1692,7 @@ init_string_ufuncs(PyObject *umath) dtypes[0] = dtypes[1] = NPY_OBJECT; - const char *strip_whitespace_names[] = { + const char *const strip_whitespace_names[] = { "_lstrip_whitespace", "_rstrip_whitespace", "_strip_whitespace" }; @@ -1566,7 +1719,7 @@ init_string_ufuncs(PyObject *umath) dtypes[0] = dtypes[1] = dtypes[2] = NPY_OBJECT; - const char *strip_chars_names[] = { + const char *const strip_chars_names[] = { "_lstrip_chars", "_rstrip_chars", "_strip_chars" }; @@ -1625,7 +1778,7 @@ init_string_ufuncs(PyObject *umath) dtypes[1] = NPY_INT64; - const char *center_ljust_rjust_names[] = { + const char *const center_ljust_rjust_names[] = { "_center", "_ljust", "_rjust" }; @@ -1702,7 +1855,7 @@ init_string_ufuncs(PyObject *umath) dtypes[0] = dtypes[1] = dtypes[3] = dtypes[4] = dtypes[5] = NPY_OBJECT; dtypes[2] = NPY_INT64; - const char *partition_names[] = {"_partition_index", "_rpartition_index"}; + const char *const partition_names[] = {"_partition_index", "_rpartition_index"}; static STARTPOSITION partition_startpositions[] = { STARTPOSITION::FRONT, STARTPOSITION::BACK @@ -1727,6 +1880,28 @@ init_string_ufuncs(PyObject *umath) } } + dtypes[0] = NPY_OBJECT; + dtypes[1] = NPY_INTP; + dtypes[2] = NPY_INTP; + dtypes[3] = NPY_INTP; + dtypes[4] = NPY_OBJECT; + if (init_ufunc( + umath, "_slice", 4, 1, dtypes, ENCODING::ASCII, + string_slice_loop, + string_slice_resolve_descriptors, NULL) < 0) { + return -1; + } + if (init_ufunc( + umath, "_slice", 4, 1, dtypes, ENCODING::UTF32, + string_slice_loop, + string_slice_resolve_descriptors, NULL) < 0) { + return -1; + } + if (init_promoter(umath, "_slice", 4, 1, + string_slice_promoter) < 0) { + return -1; + } + return 0; } diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index 3135f9cbf9c0..8e9c3ddbe40c 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -26,6 +26,8 @@ #include "stringdtype/dtype.h" #include "stringdtype/utf8_utils.h" +#include + #define LOAD_TWO_INPUT_STRINGS(CONTEXT) \ const npy_packed_static_string *ps1 = (npy_packed_static_string *)in1; \ npy_static_string s1 = {0, NULL}; \ @@ -135,9 +137,9 @@ static int multiply_loop_core( size_t newsize; int overflowed = npy_mul_with_overflow_size_t( &newsize, cursize, factor); - if (overflowed) { - npy_gil_error(PyExc_MemoryError, - "Failed to allocate string in string multiply"); + if (overflowed || newsize > PY_SSIZE_T_MAX) { + npy_gil_error(PyExc_OverflowError, + "Overflow encountered in string multiply"); goto fail; } @@ -1028,6 +1030,25 @@ all_strings_promoter(PyObject *NPY_UNUSED(ufunc), PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { + if ((op_dtypes[0] != &PyArray_StringDType && + op_dtypes[1] != &PyArray_StringDType && + op_dtypes[2] != &PyArray_StringDType)) { + /* + * This promoter was triggered with only unicode arguments, so use + * unicode. This can happen due to `dtype=` support which sets the + * output DType/signature. + */ + new_op_dtypes[0] = NPY_DT_NewRef(&PyArray_UnicodeDType); + new_op_dtypes[1] = NPY_DT_NewRef(&PyArray_UnicodeDType); + new_op_dtypes[2] = NPY_DT_NewRef(&PyArray_UnicodeDType); + return 0; + } + if ((signature[0] == &PyArray_UnicodeDType && + signature[1] == &PyArray_UnicodeDType && + signature[2] == &PyArray_UnicodeDType)) { + /* Unicode forced, but didn't override a string input: invalid */ + return -1; + } new_op_dtypes[0] = NPY_DT_NewRef(&PyArray_StringDType); new_op_dtypes[1] = NPY_DT_NewRef(&PyArray_StringDType); new_op_dtypes[2] = NPY_DT_NewRef(&PyArray_StringDType); @@ -1579,6 +1600,20 @@ string_expandtabs_strided_loop(PyArrayMethod_Context *context, return -1; } +static int +string_center_ljust_rjust_promoter( + PyObject *NPY_UNUSED(ufunc), + PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], + PyArray_DTypeMeta *new_op_dtypes[]) +{ + new_op_dtypes[0] = NPY_DT_NewRef(&PyArray_StringDType); + new_op_dtypes[1] = NPY_DT_NewRef(&PyArray_Int64DType); + new_op_dtypes[2] = NPY_DT_NewRef(&PyArray_StringDType); + new_op_dtypes[3] = NPY_DT_NewRef(&PyArray_StringDType); + return 0; +} + static NPY_CASTING center_ljust_rjust_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), @@ -1701,7 +1736,7 @@ center_ljust_rjust_strided_loop(PyArrayMethod_Context *context, size_t num_codepoints = inbuf.num_codepoints(); npy_intp width = (npy_intp)*(npy_int64*)in2; - if (num_codepoints > (size_t)width) { + if ((npy_intp)num_codepoints > width) { width = num_codepoints; } @@ -1713,9 +1748,9 @@ center_ljust_rjust_strided_loop(PyArrayMethod_Context *context, width - num_codepoints); newsize += s1.size; - if (overflowed) { - npy_gil_error(PyExc_MemoryError, - "Failed to allocate string in %s", ufunc_name); + if (overflowed || newsize > PY_SSIZE_T_MAX) { + npy_gil_error(PyExc_OverflowError, + "Overflow encountered in %s", ufunc_name); goto fail; } @@ -1831,8 +1866,8 @@ zfill_strided_loop(PyArrayMethod_Context *context, { Buffer inbuf((char *)is.buf, is.size); size_t in_codepoints = inbuf.num_codepoints(); - size_t width = (size_t)*(npy_int64 *)in2; - if (in_codepoints > width) { + npy_intp width = (npy_intp)*(npy_int64*)in2; + if ((npy_intp)in_codepoints > width) { width = in_codepoints; } // number of leading one-byte characters plus the size of the @@ -1893,9 +1928,9 @@ zfill_strided_loop(PyArrayMethod_Context *context, static NPY_CASTING string_partition_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), - PyArray_Descr *const given_descrs[3], - PyArray_Descr *loop_descrs[3], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[5]), + PyArray_Descr *const given_descrs[5], + PyArray_Descr *loop_descrs[5], npy_intp *NPY_UNUSED(view_offset)) { if (given_descrs[2] || given_descrs[3] || given_descrs[4]) { @@ -2109,6 +2144,184 @@ string_inputs_promoter( return 0; } +static int +slice_promoter(PyObject *NPY_UNUSED(ufunc), + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], + PyArray_DTypeMeta *new_op_dtypes[]) +{ + Py_INCREF(op_dtypes[0]); + new_op_dtypes[0] = op_dtypes[0]; + new_op_dtypes[1] = NPY_DT_NewRef(&PyArray_IntpDType); + new_op_dtypes[2] = NPY_DT_NewRef(&PyArray_IntpDType); + new_op_dtypes[3] = NPY_DT_NewRef(&PyArray_IntpDType); + Py_INCREF(op_dtypes[0]); + new_op_dtypes[4] = op_dtypes[0]; + return 0; +} + +static NPY_CASTING +slice_resolve_descriptors(PyArrayMethodObject *self, + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[5]), + PyArray_Descr *const given_descrs[5], + PyArray_Descr *loop_descrs[5], + npy_intp *NPY_UNUSED(view_offset)) +{ + if (given_descrs[4]) { + PyErr_Format(PyExc_TypeError, + "The StringDType '%s' ufunc does not " + "currently support the 'out' keyword", + self->name); + return _NPY_ERROR_OCCURRED_IN_CAST; + } + + for (int i = 0; i < 4; i++) { + Py_INCREF(given_descrs[i]); + loop_descrs[i] = given_descrs[i]; + } + + PyArray_StringDTypeObject *in_descr = + (PyArray_StringDTypeObject *)loop_descrs[0]; + int out_coerce = in_descr->coerce; + PyObject *out_na_object = in_descr->na_object; + loop_descrs[4] = (PyArray_Descr *)new_stringdtype_instance(out_na_object, + out_coerce); + if (loop_descrs[4] == NULL) { + return _NPY_ERROR_OCCURRED_IN_CAST; + } + + return NPY_NO_CASTING; +} + +static int +slice_strided_loop(PyArrayMethod_Context *context, char *const data[], + npy_intp const dimensions[], npy_intp const strides[], + NpyAuxData *NPY_UNUSED(auxdata)) +{ + char *iptr = data[0]; + char *start_ptr = data[1]; + char *stop_ptr = data[2]; + char *step_ptr = data[3]; + char *optr = data[4]; + + npy_intp N = dimensions[0]; + + npy_string_allocator *allocators[5] = {}; + NpyString_acquire_allocators(5, context->descriptors, allocators); + npy_string_allocator *iallocator = allocators[0]; + npy_string_allocator *oallocator = allocators[4]; + + // Build up an index mapping codepoint indices to locations in the encoded + // string. + std::vector codepoint_offsets; + + while (N--) { + // get the slice + npy_intp start = *(npy_intp *)start_ptr; + npy_intp stop = *(npy_intp *)stop_ptr; + npy_intp step = *(npy_intp *)step_ptr; + + npy_static_string is = {0, NULL}; + const npy_packed_static_string *ips = (npy_packed_static_string *)iptr; + npy_static_string os = {0, NULL}; + npy_packed_static_string *ops = (npy_packed_static_string *)optr; + int is_isnull = NpyString_load(iallocator, ips, &is); + if (is_isnull == -1) { + npy_gil_error(PyExc_MemoryError, "Failed to load string in slice"); + goto fail; + } + else if (is_isnull) { + npy_gil_error(PyExc_TypeError, "Cannot slice null string"); + goto fail; + } + + // number of codepoints in string + size_t num_codepoints = 0; + // leaves capacity the same as in previous loop iterations to avoid + // heap thrashing + codepoint_offsets.clear(); + { + const char *inbuf_ptr = is.buf; + const char *inbuf_ptr_end = is.buf + is.size; + + // ignore trailing nulls + while (inbuf_ptr < inbuf_ptr_end && *(inbuf_ptr_end - 1) == 0) { + inbuf_ptr_end--; + } + + while (inbuf_ptr < inbuf_ptr_end) { + num_codepoints++; + int num_bytes = num_bytes_for_utf8_character( + ((unsigned char *)inbuf_ptr)); + codepoint_offsets.push_back((unsigned char *)inbuf_ptr); + inbuf_ptr += num_bytes; + } + } + + // adjust slice to string length in codepoints + // and handle negative indices + npy_intp slice_length = + PySlice_AdjustIndices(num_codepoints, &start, &stop, step); + + if (step == 1) { + // step == 1 is the easy case, we can just use memcpy + unsigned char *start_bounded = ((size_t)start < num_codepoints + ? codepoint_offsets[start] + : (unsigned char *)is.buf + is.size); + unsigned char *stop_bounded = ((size_t)stop < num_codepoints + ? codepoint_offsets[stop] + : (unsigned char *)is.buf + is.size); + npy_intp outsize = stop_bounded - start_bounded; + outsize = outsize < 0 ? 0 : outsize; + + if (load_new_string(ops, &os, outsize, oallocator, "slice") < 0) { + goto fail; + } + + /* explicitly discard const; initializing new buffer */ + char *buf = (char *)os.buf; + + memcpy(buf, start_bounded, outsize); + } + else { + // handle step != 1 + // compute outsize + npy_intp outsize = 0; + for (int i = start; step > 0 ? i < stop : i > stop; i += step) { + outsize += num_bytes_for_utf8_character(codepoint_offsets[i]); + } + + if (outsize > 0) { + if (load_new_string(ops, &os, outsize, oallocator, "slice") < 0) { + goto fail; + } + + /* explicitly discard const; initializing new buffer */ + char *buf = (char *)os.buf; + + for (npy_intp i_idx = start, o_idx = 0; o_idx < slice_length; o_idx++, i_idx += step) { + int num_bytes = num_bytes_for_utf8_character(codepoint_offsets[i_idx]); + memcpy(buf, codepoint_offsets[i_idx], num_bytes); + buf += num_bytes; + } + } + } + + // move to next step + iptr += strides[0]; + start_ptr += strides[1]; + stop_ptr += strides[2]; + step_ptr += strides[3]; + optr += strides[4]; + } + + NpyString_release_allocators(5, allocators); + return 0; + +fail: + NpyString_release_allocators(5, allocators); + return -1; +} + static int string_object_bool_output_promoter( PyObject *ufunc, PyArray_DTypeMeta *const op_dtypes[], @@ -2396,7 +2609,7 @@ add_object_and_unicode_promoters(PyObject *umath, const char* ufunc_name, NPY_NO_EXPORT int init_stringdtype_ufuncs(PyObject *umath) { - static const char *comparison_ufunc_names[6] = { + static const char *const comparison_ufunc_names[6] = { "equal", "not_equal", "less", "less_equal", "greater_equal", "greater", }; @@ -2445,7 +2658,7 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - const char *unary_loop_names[] = { + const char *const unary_loop_names[] = { "isalpha", "isdecimal", "isdigit", "isnumeric", "isspace", "isalnum", "istitle", "isupper", "islower", }; @@ -2532,6 +2745,17 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } + PyArray_DTypeMeta *out_strings_promoter_dtypes[] = { + &PyArray_UnicodeDType, + &PyArray_UnicodeDType, + &PyArray_StringDType, + }; + + if (add_promoter(umath, "add", out_strings_promoter_dtypes, 3, + all_strings_promoter) < 0) { + return -1; + } + INIT_MULTIPLY(Int64, int64); INIT_MULTIPLY(UInt64, uint64); @@ -2565,10 +2789,17 @@ init_stringdtype_ufuncs(PyObject *umath) "find", "rfind", "index", "rindex", "count", }; - PyArray_DTypeMeta *findlike_promoter_dtypes[] = { - &PyArray_StringDType, &PyArray_UnicodeDType, - &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, - &PyArray_DefaultIntDType, + PyArray_DTypeMeta *findlike_promoter_dtypes[2][5] = { + { + &PyArray_StringDType, &PyArray_UnicodeDType, + &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, + &PyArray_IntAbstractDType, + }, + { + &PyArray_UnicodeDType, &PyArray_StringDType, + &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, + &PyArray_IntAbstractDType, + }, }; find_like_function *findlike_functions[] = { @@ -2588,11 +2819,12 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - - if (add_promoter(umath, findlike_names[i], - findlike_promoter_dtypes, - 5, string_findlike_promoter) < 0) { - return -1; + for (int j=0; j<2; j++) { + if (add_promoter(umath, findlike_names[i], + findlike_promoter_dtypes[j], + 5, string_findlike_promoter) < 0) { + return -1; + } } } @@ -2606,10 +2838,17 @@ init_stringdtype_ufuncs(PyObject *umath) "startswith", "endswith", }; - PyArray_DTypeMeta *startswith_endswith_promoter_dtypes[] = { - &PyArray_StringDType, &PyArray_UnicodeDType, - &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, - &PyArray_BoolDType, + PyArray_DTypeMeta *startswith_endswith_promoter_dtypes[2][5] = { + { + &PyArray_StringDType, &PyArray_UnicodeDType, + &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, + &PyArray_BoolDType, + }, + { + &PyArray_UnicodeDType, &PyArray_StringDType, + &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, + &PyArray_BoolDType, + }, }; static STARTPOSITION startswith_endswith_startposition[] = { @@ -2626,11 +2865,12 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - - if (add_promoter(umath, startswith_endswith_names[i], - startswith_endswith_promoter_dtypes, - 5, string_startswith_endswith_promoter) < 0) { - return -1; + for (int j=0; j<2; j++) { + if (add_promoter(umath, startswith_endswith_names[i], + startswith_endswith_promoter_dtypes[j], + 5, string_startswith_endswith_promoter) < 0) { + return -1; + } } } @@ -2638,7 +2878,7 @@ init_stringdtype_ufuncs(PyObject *umath) &PyArray_StringDType, &PyArray_StringDType }; - const char *strip_whitespace_names[] = { + const char *const strip_whitespace_names[] = { "_lstrip_whitespace", "_rstrip_whitespace", "_strip_whitespace", }; @@ -2662,7 +2902,7 @@ init_stringdtype_ufuncs(PyObject *umath) &PyArray_StringDType, &PyArray_StringDType, &PyArray_StringDType }; - const char *strip_chars_names[] = { + const char *const strip_chars_names[] = { "_lstrip_chars", "_rstrip_chars", "_strip_chars", }; @@ -2702,24 +2942,38 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - PyArray_DTypeMeta *replace_promoter_pyint_dtypes[] = { - &PyArray_StringDType, &PyArray_UnicodeDType, &PyArray_UnicodeDType, - &PyArray_IntAbstractDType, &PyArray_StringDType, - }; - - if (add_promoter(umath, "_replace", replace_promoter_pyint_dtypes, 5, - string_replace_promoter) < 0) { - return -1; - } - - PyArray_DTypeMeta *replace_promoter_int64_dtypes[] = { - &PyArray_StringDType, &PyArray_UnicodeDType, &PyArray_UnicodeDType, - &PyArray_Int64DType, &PyArray_StringDType, + PyArray_DTypeMeta *replace_promoter_unicode_dtypes[6][5] = { + { + &PyArray_StringDType, &PyArray_UnicodeDType, &PyArray_UnicodeDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, + }, + { + &PyArray_UnicodeDType, &PyArray_StringDType, &PyArray_UnicodeDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, + }, + { + &PyArray_UnicodeDType, &PyArray_UnicodeDType, &PyArray_StringDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, + }, + { + &PyArray_StringDType, &PyArray_StringDType, &PyArray_UnicodeDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, + }, + { + &PyArray_StringDType, &PyArray_UnicodeDType, &PyArray_StringDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, + }, + { + &PyArray_UnicodeDType, &PyArray_StringDType, &PyArray_StringDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, + }, }; - if (add_promoter(umath, "_replace", replace_promoter_int64_dtypes, 5, - string_replace_promoter) < 0) { - return -1; + for (int j=0; j<6; j++) { + if (add_promoter(umath, "_replace", replace_promoter_unicode_dtypes[j], 5, + string_replace_promoter) < 0) { + return -1; + } } PyArray_DTypeMeta *expandtabs_dtypes[] = { @@ -2737,9 +2991,9 @@ init_stringdtype_ufuncs(PyObject *umath) } PyArray_DTypeMeta *expandtabs_promoter_dtypes[] = { - &PyArray_StringDType, - (PyArray_DTypeMeta *)Py_None, - &PyArray_StringDType + &PyArray_StringDType, + &PyArray_IntAbstractDType, + &PyArray_StringDType }; if (add_promoter(umath, "_expandtabs", expandtabs_promoter_dtypes, @@ -2771,30 +3025,33 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - PyArray_DTypeMeta *int_promoter_dtypes[] = { - &PyArray_StringDType, - (PyArray_DTypeMeta *)Py_None, - &PyArray_StringDType, - &PyArray_StringDType, - }; - - if (add_promoter(umath, center_ljust_rjust_names[i], - int_promoter_dtypes, 4, - string_multiply_promoter) < 0) { - return -1; - } - - PyArray_DTypeMeta *unicode_promoter_dtypes[] = { - &PyArray_StringDType, - (PyArray_DTypeMeta *)Py_None, - &PyArray_UnicodeDType, - &PyArray_StringDType, + PyArray_DTypeMeta *promoter_dtypes[3][4] = { + { + &PyArray_StringDType, + &PyArray_IntAbstractDType, + &PyArray_StringDType, + &PyArray_StringDType, + }, + { + &PyArray_StringDType, + &PyArray_IntAbstractDType, + &PyArray_UnicodeDType, + &PyArray_StringDType, + }, + { + &PyArray_UnicodeDType, + &PyArray_IntAbstractDType, + &PyArray_StringDType, + &PyArray_StringDType, + }, }; - if (add_promoter(umath, center_ljust_rjust_names[i], - unicode_promoter_dtypes, 4, - string_multiply_promoter) < 0) { - return -1; + for (int j=0; j<3; j++) { + if (add_promoter(umath, center_ljust_rjust_names[i], + promoter_dtypes[j], 4, + string_center_ljust_rjust_promoter) < 0) { + return -1; + } } } @@ -2810,13 +3067,13 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - PyArray_DTypeMeta *int_promoter_dtypes[] = { + PyArray_DTypeMeta *zfill_promoter_dtypes[] = { &PyArray_StringDType, - (PyArray_DTypeMeta *)Py_None, + &PyArray_IntAbstractDType, &PyArray_StringDType, }; - if (add_promoter(umath, "_zfill", int_promoter_dtypes, 3, + if (add_promoter(umath, "_zfill", zfill_promoter_dtypes, 3, string_multiply_promoter) < 0) { return -1; } @@ -2829,7 +3086,7 @@ init_stringdtype_ufuncs(PyObject *umath) &PyArray_StringDType }; - const char *partition_names[] = {"_partition", "_rpartition"}; + const char *const partition_names[] = {"_partition", "_rpartition"}; static STARTPOSITION partition_startpositions[] = { STARTPOSITION::FRONT, STARTPOSITION::BACK @@ -2844,5 +3101,32 @@ init_stringdtype_ufuncs(PyObject *umath) } } + PyArray_DTypeMeta *slice_dtypes[] = { + &PyArray_StringDType, + &PyArray_IntpDType, + &PyArray_IntpDType, + &PyArray_IntpDType, + &PyArray_StringDType, + }; + + if (init_ufunc(umath, "_slice", slice_dtypes, slice_resolve_descriptors, + slice_strided_loop, 4, 1, NPY_NO_CASTING, + (NPY_ARRAYMETHOD_FLAGS) 0, NULL) < 0) { + return -1; + } + + PyArray_DTypeMeta *slice_promoter_dtypes[] = { + &PyArray_StringDType, + &PyArray_IntAbstractDType, + &PyArray_IntAbstractDType, + &PyArray_IntAbstractDType, + &PyArray_StringDType, + }; + + if (add_promoter(umath, "_slice", slice_promoter_dtypes, 5, + slice_promoter) < 0) { + return -1; + } + return 0; } diff --git a/numpy/_core/src/umath/svml b/numpy/_core/src/umath/svml index 32bf2a984207..3a713b130183 160000 --- a/numpy/_core/src/umath/svml +++ b/numpy/_core/src/umath/svml @@ -1 +1 @@ -Subproject commit 32bf2a98420762a63ab418aaa0a7d6e17eb9627a +Subproject commit 3a713b13018325451c1b939d3914ceff5ec68e19 diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index efad2a7be2b4..c4c5907e4cda 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -51,6 +51,7 @@ #include "npy_import.h" #include "extobj.h" +#include "alloc.h" #include "arrayobject.h" #include "arraywrap.h" #include "common.h" @@ -62,10 +63,10 @@ #include "legacy_array_method.h" #include "abstractdtypes.h" #include "mapping.h" - -/* TODO: Only for `NpyIter_GetTransferFlags` until it is public */ -#define NPY_ITERATOR_IMPLEMENTATION_CODE -#include "nditer_impl.h" +#include "npy_static_data.h" +#include "multiarraymodule.h" +#include "number.h" +#include "scalartypes.h" // for is_anyscalar_exact and scalar_value /********** PRINTF DEBUG TRACING **************/ #define NPY_UF_DBG_TRACING 0 @@ -99,8 +100,8 @@ static int resolve_descriptors(int nop, PyUFuncObject *ufunc, PyArrayMethodObject *ufuncimpl, PyArrayObject *operands[], PyArray_Descr *dtypes[], - PyArray_DTypeMeta *signature[], PyObject *inputs_tup, - NPY_CASTING casting); + PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *original_DTypes[], + PyObject *inputs_tup, NPY_CASTING casting); /*UFUNC_API*/ @@ -126,6 +127,9 @@ PyUFunc_clearfperr() } +/* This many operands we optimize for on the stack. */ +#define UFUNC_STACK_NARGS 5 + #define NPY_UFUNC_DEFAULT_INPUT_FLAGS \ NPY_ITER_READONLY | \ NPY_ITER_ALIGNED | \ @@ -519,6 +523,11 @@ _set_out_array(PyObject *obj, PyArrayObject **store) return 0; } + if (obj == Py_Ellipsis) { + PyErr_SetString(PyExc_TypeError, + "must use `...` as `out=...` and not per-operand/in a tuple"); + return -1; + } PyErr_SetString(PyExc_TypeError, "return arrays must be of ArrayType"); return -1; @@ -604,7 +613,7 @@ static int convert_ufunc_arguments(PyUFuncObject *ufunc, ufunc_full_args full_args, PyArrayObject *out_op[], PyArray_DTypeMeta *out_op_DTypes[], - npy_bool *force_legacy_promotion, npy_bool *allow_legacy_promotion, + npy_bool *force_legacy_promotion, npy_bool *promoting_pyscalars, PyObject *order_obj, NPY_ORDER *out_order, PyObject *casting_obj, NPY_CASTING *out_casting, @@ -620,7 +629,6 @@ convert_ufunc_arguments(PyUFuncObject *ufunc, /* Convert and fill in input arguments */ npy_bool all_scalar = NPY_TRUE; npy_bool any_scalar = NPY_FALSE; - *allow_legacy_promotion = NPY_TRUE; *force_legacy_promotion = NPY_FALSE; *promoting_pyscalars = NPY_FALSE; for (int i = 0; i < nin; i++) { @@ -655,11 +663,6 @@ convert_ufunc_arguments(PyUFuncObject *ufunc, break; } - if (!NPY_DT_is_legacy(out_op_DTypes[i])) { - *allow_legacy_promotion = NPY_FALSE; - // TODO: A subclass of int, float, complex could reach here and - // it should not be flagged as "weak" if it does. - } if (PyArray_NDIM(out_op[i]) == 0) { any_scalar = NPY_TRUE; } @@ -668,12 +671,6 @@ convert_ufunc_arguments(PyUFuncObject *ufunc, continue; } - // TODO: Is this equivalent/better by removing the logic which enforces - // that we always use weak promotion in the core? - if (get_npy_promotion_state() == NPY_USE_LEGACY_PROMOTION) { - continue; /* Skip use of special dtypes */ - } - /* * Handle the "weak" Python scalars/literals. We use a special DType * for these. @@ -698,23 +695,14 @@ convert_ufunc_arguments(PyUFuncObject *ufunc, * TODO: Just like the general dual NEP 50/legacy promotion * support this is meant as a temporary hack for NumPy 1.25. */ - static PyArrayObject *zero_arr = NULL; - if (NPY_UNLIKELY(zero_arr == NULL)) { - zero_arr = (PyArrayObject *)PyArray_ZEROS( - 0, NULL, NPY_LONG, NPY_FALSE); - if (zero_arr == NULL) { - goto fail; - } - ((PyArrayObject_fields *)zero_arr)->flags |= ( - NPY_ARRAY_WAS_PYTHON_INT|NPY_ARRAY_WAS_INT_AND_REPLACED); - } - Py_INCREF(zero_arr); - Py_SETREF(out_op[i], zero_arr); + Py_INCREF(npy_static_pydata.zero_pyint_like_arr); + Py_SETREF(out_op[i], + (PyArrayObject *)npy_static_pydata.zero_pyint_like_arr); } *promoting_pyscalars = NPY_TRUE; } } - if (*allow_legacy_promotion && (!all_scalar && any_scalar)) { + if ((!all_scalar && any_scalar)) { *force_legacy_promotion = should_use_min_scalar(nin, out_op, 0, NULL); } @@ -1127,7 +1115,7 @@ execute_ufunc_loop(PyArrayMethod_Context *context, int masked, * based on the fixed strides. */ PyArrayMethod_StridedLoop *strided_loop; - NpyAuxData *auxdata; + NpyAuxData *auxdata = NULL; npy_intp fixed_strides[NPY_MAXARGS]; NpyIter_GetInnerFixedStrideArray(iter, fixed_strides); @@ -1341,8 +1329,6 @@ _check_keepdims_support(PyUFuncObject *ufunc) { static int _parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes, PyArrayObject **op, int broadcast_ndim, int **remap_axis) { - static PyObject *AxisError_cls = NULL; - int nin = ufunc->nin; int nop = ufunc->nargs; int iop, list_size; @@ -1384,16 +1370,11 @@ _parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes, * Get axes tuple for operand. If not a tuple already, make it one if * there is only one axis (its content is checked later). */ - op_axes_tuple = PyList_GET_ITEM(axes, iop); + op_axes_tuple = PyList_GET_ITEM(axes, iop); // noqa: borrowed-ref - manual fix needed if (PyTuple_Check(op_axes_tuple)) { if (PyTuple_Size(op_axes_tuple) != op_ncore) { /* must have been a tuple with too many entries. */ - npy_cache_import( - "numpy.exceptions", "AxisError", &AxisError_cls); - if (AxisError_cls == NULL) { - return -1; - } - PyErr_Format(AxisError_cls, + PyErr_Format(npy_static_pydata.AxisError, "%s: operand %d has %d core dimensions, " "but %zd dimensions are specified by axes tuple.", ufunc_get_name_cstr(ufunc), iop, op_ncore, @@ -1417,11 +1398,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes, return -1; } /* If it is a single integer, inform user that more are needed */ - npy_cache_import("numpy.exceptions", "AxisError", &AxisError_cls); - if (AxisError_cls == NULL) { - return -1; - } - PyErr_Format(AxisError_cls, + PyErr_Format(npy_static_pydata.AxisError, "%s: operand %d has %d core dimensions, " "but the axes item is a single integer.", ufunc_get_name_cstr(ufunc), iop, op_ncore); @@ -1614,6 +1591,13 @@ _get_coredim_sizes(PyUFuncObject *ufunc, PyArrayObject **op, } } + if (ufunc->process_core_dims_func != NULL) { + int status = ufunc->process_core_dims_func(ufunc, core_dim_sizes); + if (status != 0) { + return -1; + } + } + /* * Make sure no core dimension is unspecified. */ @@ -1718,7 +1702,6 @@ PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc, int i, j, idim, nop; const char *ufunc_name; int retval; - int needs_api = 0; /* Use remapped axes for generalized ufunc */ int broadcast_ndim, iter_ndim; @@ -2103,11 +2086,10 @@ PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc, NPY_SIZEOF_INTP * nop); /* Final preparation of the arraymethod call */ - PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = operation_descrs, - }; + PyArrayMethod_Context context; + NPY_context_init(&context, operation_descrs); + context.caller = (PyObject *)ufunc; + context.method = ufuncimpl; PyArrayMethod_StridedLoop *strided_loop; NPY_ARRAYMETHOD_FLAGS flags = 0; @@ -2115,8 +2097,9 @@ PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc, &strided_loop, &auxdata, &flags) < 0) { goto fail; } - needs_api = (flags & NPY_METH_REQUIRES_PYAPI) != 0; - needs_api |= NpyIter_IterationNeedsAPI(iter); + flags = PyArrayMethod_COMBINED_FLAGS(flags, NpyIter_GetTransferFlags(iter)); + int needs_api = (flags & NPY_METH_REQUIRES_PYAPI) != 0; + if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { /* Start with the floating-point exception flags cleared */ npy_clear_floatstatus_barrier((char*)&iter); @@ -2149,7 +2132,7 @@ PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc, dataptr, inner_dimensions, inner_strides, auxdata); } while (retval == 0 && iternext(iter)); - if (!needs_api && !NpyIter_IterationNeedsAPI(iter)) { + if (!needs_api) { NPY_END_THREADS; } } @@ -2221,11 +2204,10 @@ PyUFunc_GenericFunctionInternal(PyUFuncObject *ufunc, } /* Final preparation of the arraymethod call */ - PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = operation_descrs, - }; + PyArrayMethod_Context context; + NPY_context_init(&context, operation_descrs); + context.caller = (PyObject *)ufunc; + context.method = ufuncimpl; /* Do the ufunc loop */ if (wheremask != NULL) { @@ -2336,20 +2318,6 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc, */ PyArrayObject *ops[3] = {out ? out : arr, arr, out}; - /* - * TODO: This is a dangerous hack, that works by relying on the GIL, it is - * terrible, terrifying, and trusts that nobody does crazy stuff - * in their type-resolvers. - * By mutating the `out` dimension, we ensure that reduce-likes - * live in a future without value-based promotion even when legacy - * promotion has to be used. - */ - npy_bool evil_ndim_mutating_hack = NPY_FALSE; - if (out != NULL && PyArray_NDIM(out) == 0 && PyArray_NDIM(arr) != 0) { - evil_ndim_mutating_hack = NPY_TRUE; - ((PyArrayObject_fields *)out)->nd = 1; - } - /* * TODO: If `out` is not provided, arguably `initial` could define * the first DType (and maybe also the out one), that way @@ -2369,16 +2337,13 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc, } PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc, - ops, signature, operation_DTypes, NPY_FALSE, NPY_TRUE, - NPY_FALSE, NPY_TRUE); - if (evil_ndim_mutating_hack) { - ((PyArrayObject_fields *)out)->nd = 0; - } - /* DTypes may currently get filled in fallbacks and XDECREF for error: */ - Py_XDECREF(operation_DTypes[0]); - Py_XDECREF(operation_DTypes[1]); - Py_XDECREF(operation_DTypes[2]); + ops, signature, operation_DTypes, NPY_FALSE, NPY_FALSE, NPY_TRUE); + if (ufuncimpl == NULL) { + /* DTypes may currently get filled in fallbacks and XDECREF for error: */ + Py_XDECREF(operation_DTypes[0]); + Py_XDECREF(operation_DTypes[1]); + Py_XDECREF(operation_DTypes[2]); return NULL; } @@ -2389,8 +2354,13 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc, * casting safety could in principle be set to the default same-kind. * (although this should possibly happen through a deprecation) */ - if (resolve_descriptors(3, ufunc, ufuncimpl, - ops, out_descrs, signature, NULL, casting) < 0) { + int res = resolve_descriptors(3, ufunc, ufuncimpl, + ops, out_descrs, signature, operation_DTypes, NULL, casting); + + Py_XDECREF(operation_DTypes[0]); + Py_XDECREF(operation_DTypes[1]); + Py_XDECREF(operation_DTypes[2]); + if (res < 0) { return NULL; } @@ -2583,11 +2553,10 @@ PyUFunc_Reduce(PyUFuncObject *ufunc, return NULL; } - PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = descrs, - }; + PyArrayMethod_Context context; + NPY_context_init(&context, descrs); + context.caller = (PyObject *)ufunc; + context.method = ufuncimpl; PyArrayObject *result = PyUFunc_ReduceWrapper(&context, arr, out, wheremask, axis_flags, keepdims, @@ -2609,8 +2578,12 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, int *op_axes[2] = {op_axes_arrays[0], op_axes_arrays[1]}; npy_uint32 op_flags[2]; int idim, ndim; - int needs_api, need_outer_iterator; + int need_outer_iterator; int res = 0; + + NPY_cast_info copy_info; + NPY_cast_info_init(©_info); + #if NPY_UF_DBG_TRACING const char *ufunc_name = ufunc_get_name_cstr(ufunc); #endif @@ -2655,20 +2628,10 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, assert(PyArray_EquivTypes(descrs[0], descrs[1]) && PyArray_EquivTypes(descrs[0], descrs[2])); - if (PyDataType_REFCHK(descrs[2]) && descrs[2]->type_num != NPY_OBJECT) { - /* This can be removed, but the initial element copy needs fixing */ - PyErr_SetString(PyExc_TypeError, - "accumulation currently only supports `object` dtype with " - "references"); - goto fail; - } - - PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = descrs, - }; - + PyArrayMethod_Context context; + NPY_context_init(&context, descrs); + context.caller = (PyObject *)ufunc, + context.method = ufuncimpl, ndim = PyArray_NDIM(arr); #if NPY_UF_DBG_TRACING @@ -2758,10 +2721,10 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, else { PyArray_Descr *dtype = descrs[0]; Py_INCREF(dtype); - op[0] = out = (PyArrayObject *)PyArray_NewFromDescr( + op[0] = out = (PyArrayObject *)PyArray_NewFromDescr_int( &PyArray_Type, dtype, ndim, PyArray_DIMS(op[1]), NULL, NULL, - 0, NULL); + 0, NULL, NULL, _NPY_ARRAY_ENSURE_DTYPE_IDENTITY); if (out == NULL) { goto fail; } @@ -2775,8 +2738,9 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, else { fixed_strides[0] = PyArray_STRIDES(op[0])[axis]; fixed_strides[1] = PyArray_STRIDES(op[1])[axis]; - fixed_strides[2] = fixed_strides[0]; } + // First argument is also passed as output (e.g. see dataptr below). + fixed_strides[2] = fixed_strides[0]; NPY_ARRAYMETHOD_FLAGS flags = 0; @@ -2784,7 +2748,23 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, 1, 0, fixed_strides, &strided_loop, &auxdata, &flags) < 0) { goto fail; } - needs_api = (flags & NPY_METH_REQUIRES_PYAPI) != 0; + /* Set up function to copy the first element if it has references */ + if (PyDataType_REFCHK(descrs[2])) { + NPY_ARRAYMETHOD_FLAGS copy_flags; + /* Setup guarantees aligned here. */ + if (PyArray_GetDTypeTransferFunction( + 1, 0, 0, descrs[1], descrs[2], 0, ©_info, + ©_flags) == NPY_FAIL) { + goto fail; + } + flags = PyArrayMethod_COMBINED_FLAGS(flags, copy_flags); + } + + if (iter != NULL) { + flags = PyArrayMethod_COMBINED_FLAGS(flags, NpyIter_GetTransferFlags(iter)); + } + + int needs_api = (flags & NPY_METH_REQUIRES_PYAPI) != 0; if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { /* Start with the floating-point exception flags cleared */ npy_clear_floatstatus_barrier((char*)&iter); @@ -2818,7 +2798,6 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, goto fail; } dataptr = NpyIter_GetDataPtrArray(iter); - needs_api |= NpyIter_IterationNeedsAPI(iter); /* Execute the loop with just the outer iterator */ count_m1 = PyArray_DIM(op[1], axis)-1; @@ -2847,18 +2826,17 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, * Output (dataptr[0]) and input (dataptr[1]) may point to * the same memory, e.g. np.add.accumulate(a, out=a). */ - if (descrs[2]->type_num == NPY_OBJECT) { - /* - * Incref before decref to avoid the possibility of the - * reference count being zero temporarily. - */ - Py_XINCREF(*(PyObject **)dataptr_copy[1]); - Py_XDECREF(*(PyObject **)dataptr_copy[0]); - *(PyObject **)dataptr_copy[0] = - *(PyObject **)dataptr_copy[1]; + if (copy_info.func) { + const npy_intp one = 1; + if (copy_info.func( + ©_info.context, &dataptr_copy[1], &one, + &stride_copy[1], copy_info.auxdata) < 0) { + NPY_END_THREADS; + goto fail; + } } else { - memmove(dataptr_copy[0], dataptr_copy[1], itemsize); + memmove(dataptr_copy[2], dataptr_copy[1], itemsize); } if (count_m1 > 0) { @@ -2907,18 +2885,17 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, * Output (dataptr[0]) and input (dataptr[1]) may point to the * same memory, e.g. np.add.accumulate(a, out=a). */ - if (descrs[2]->type_num == NPY_OBJECT) { - /* - * Incref before decref to avoid the possibility of the - * reference count being zero temporarily. - */ - Py_XINCREF(*(PyObject **)dataptr_copy[1]); - Py_XDECREF(*(PyObject **)dataptr_copy[0]); - *(PyObject **)dataptr_copy[0] = - *(PyObject **)dataptr_copy[1]; + if (copy_info.func) { + const npy_intp one = 1; + const npy_intp strides[2] = {itemsize, itemsize}; + if (copy_info.func( + ©_info.context, &dataptr_copy[1], &one, + strides, copy_info.auxdata) < 0) { + goto fail; + } } else { - memmove(dataptr_copy[0], dataptr_copy[1], itemsize); + memmove(dataptr_copy[2], dataptr_copy[1], itemsize); } if (count > 1) { @@ -2928,8 +2905,6 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, NPY_UF_DBG_PRINT1("iterator loop count %d\n", (int)count); - needs_api = PyDataType_REFCHK(descrs[0]); - if (!needs_api) { NPY_BEGIN_THREADS_THRESHOLDED(count); } @@ -2943,6 +2918,7 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, finish: NPY_AUXDATA_FREE(auxdata); + NPY_cast_info_xfree(©_info); Py_DECREF(descrs[0]); Py_DECREF(descrs[1]); Py_DECREF(descrs[2]); @@ -2967,6 +2943,8 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, Py_XDECREF(out); NPY_AUXDATA_FREE(auxdata); + NPY_cast_info_xfree(©_info); + Py_XDECREF(descrs[0]); Py_XDECREF(descrs[1]); Py_XDECREF(descrs[2]); @@ -3007,7 +2985,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, op_axes_arrays[2]}; npy_uint32 op_flags[3]; int idim, ndim; - int needs_api, need_outer_iterator = 0; + int need_outer_iterator = 0; int res = 0; @@ -3023,7 +3001,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, const char *ufunc_name = ufunc_get_name_cstr(ufunc); char *opname = "reduceat"; - /* These parameters comefrom a TLS global */ + /* These parameters come from a TLS global */ int buffersize = 0, errormask = 0; NPY_BEGIN_THREADS_DEF; @@ -3081,12 +3059,10 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, goto fail; } - PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = descrs, - }; - + PyArrayMethod_Context context; + NPY_context_init(&context, descrs); + context.caller = (PyObject *)ufunc, + context.method = ufuncimpl, ndim = PyArray_NDIM(arr); #if NPY_UF_DBG_TRACING @@ -3206,7 +3182,11 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, 1, 0, fixed_strides, &strided_loop, &auxdata, &flags) < 0) { goto fail; } - needs_api = (flags & NPY_METH_REQUIRES_PYAPI) != 0; + if (iter != NULL) { + flags = PyArrayMethod_COMBINED_FLAGS(flags, NpyIter_GetTransferFlags(iter)); + } + + int needs_api = (flags & NPY_METH_REQUIRES_PYAPI) != 0; if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { /* Start with the floating-point exception flags cleared */ npy_clear_floatstatus_barrier((char*)&iter); @@ -3230,7 +3210,6 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, npy_intp stride0_ind = PyArray_STRIDE(op[0], axis); int itemsize = descrs[0]->elsize; - needs_api |= NpyIter_IterationNeedsAPI(iter); /* Get the variables needed for the loop */ iternext = NpyIter_GetIterNext(iter, NULL); @@ -3509,6 +3488,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, */ PyObject *otype_obj = NULL, *out_obj = NULL, *indices_obj = NULL; PyObject *keepdims_obj = NULL, *wheremask_obj = NULL; + npy_bool return_scalar = NPY_TRUE; /* scalar return is disabled for out=... */ if (operation == UFUNC_REDUCEAT) { NPY_PREPARE_ARGPARSER; @@ -3571,6 +3551,11 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, /* Normalize output for PyUFunc_CheckOverride and conversion. */ if (out_is_passed_by_position) { /* in this branch, out is always wrapped in a tuple. */ + if (out_obj == Py_Ellipsis) { + PyErr_SetString(PyExc_TypeError, + "out=... is only allowed as a keyword argument."); + goto fail; + } if (out_obj != Py_None) { full_args.out = PyTuple_Pack(1, out_obj); if (full_args.out == NULL) { @@ -3579,7 +3564,11 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, } } else if (out_obj) { - if (_set_full_args_out(1, out_obj, &full_args) < 0) { + if (out_obj == Py_Ellipsis) { + out_obj = NULL; + return_scalar = NPY_FALSE; + } + else if (_set_full_args_out(1, out_obj, &full_args) < 0) { goto fail; } /* Ensure that out_obj is the array, not the tuple: */ @@ -3618,7 +3607,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, goto fail; } } - if (out_obj && !PyArray_OutputConverter(out_obj, &out)) { + if (out_obj && _set_out_array(out_obj, &out) < 0) { goto fail; } if (keepdims_obj && !PyArray_PythonPyIntFromInt(keepdims_obj, &keepdims)) { @@ -3735,6 +3724,8 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, goto fail; } + Py_XDECREF(out); + Py_DECREF(signature[0]); Py_DECREF(signature[1]); Py_DECREF(signature[2]); @@ -3753,13 +3744,15 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, /* TODO: Data is mutated, so force_wrap like a normal ufunc call does */ PyObject *wrapped_result = npy_apply_wrap( (PyObject *)ret, out_obj, wrap, wrap_type, NULL, - PyArray_NDIM(ret) == 0, NPY_FALSE); + PyArray_NDIM(ret) == 0 && return_scalar, NPY_FALSE); Py_DECREF(ret); Py_DECREF(wrap); Py_DECREF(wrap_type); return wrapped_result; fail: + Py_XDECREF(out); + Py_XDECREF(signature[0]); Py_XDECREF(signature[1]); Py_XDECREF(signature[2]); @@ -3925,14 +3918,9 @@ _get_fixed_signature(PyUFuncObject *ufunc, "a single item type tuple cannot contain None."); return -1; } - if (DEPRECATE("The use of a length 1 tuple for the ufunc " - "`signature` is deprecated. Use `dtype` or fill the" - "tuple with `None`s.") < 0) { - return -1; - } - /* Use the same logic as for `dtype=` */ - return _get_fixed_signature(ufunc, - PyTuple_GET_ITEM(signature_obj, 0), NULL, signature); + PyErr_SetString(PyExc_TypeError, + "Use `dtype` or fill the tuple with more than one 'None'."); + return -1; } if (n != nop) { PyErr_Format(PyExc_ValueError, @@ -3997,13 +3985,9 @@ _get_fixed_signature(PyUFuncObject *ufunc, } if (length == 1 && nin+nout != 1) { Py_DECREF(str_object); - if (DEPRECATE("The use of a length 1 string for the ufunc " - "`signature` is deprecated. Use `dtype` attribute or " - "pass a tuple with `None`s.") < 0) { - return -1; - } - /* `signature="l"` is the same as `dtype="l"` */ - return _get_fixed_signature(ufunc, str_object, NULL, signature); + PyErr_SetString(PyExc_TypeError, + "Use `dtype` or fill the tuple with more than one 'None'."); + return -1; } else { for (int i = 0; i < nin+nout; ++i) { @@ -4041,12 +4025,13 @@ static int resolve_descriptors(int nop, PyUFuncObject *ufunc, PyArrayMethodObject *ufuncimpl, PyArrayObject *operands[], PyArray_Descr *dtypes[], - PyArray_DTypeMeta *signature[], PyObject *inputs_tup, - NPY_CASTING casting) + PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *original_DTypes[], + PyObject *inputs_tup, NPY_CASTING casting) { int retval = -1; NPY_CASTING safety; - PyArray_Descr *original_dtypes[NPY_MAXARGS]; + int n_cleanup = 0; /* number of original_descrs filled (to XDECREF) */ + PyArray_Descr *original_descrs[NPY_MAXARGS]; NPY_UF_DBG_PRINT("Resolving the descriptors\n"); @@ -4061,12 +4046,12 @@ resolve_descriptors(int nop, PyObject *input_scalars[NPY_MAXARGS]; for (int i = 0; i < nop; i++) { if (operands[i] == NULL) { - original_dtypes[i] = NULL; + original_descrs[i] = NULL; } else { /* For abstract DTypes, we might want to change what this is */ - original_dtypes[i] = PyArray_DTYPE(operands[i]); - Py_INCREF(original_dtypes[i]); + original_descrs[i] = PyArray_DTYPE(operands[i]); + Py_INCREF(original_descrs[i]); } /* * Check whether something is a scalar of the given type. @@ -4082,31 +4067,74 @@ resolve_descriptors(int nop, input_scalars[i] = NULL; } } + n_cleanup = nop; npy_intp view_offset = NPY_MIN_INTP; /* currently ignored */ safety = ufuncimpl->resolve_descriptors_with_scalars( - ufuncimpl, signature, original_dtypes, input_scalars, + ufuncimpl, signature, original_descrs, input_scalars, dtypes, &view_offset ); + + /* For scalars, replace the operand if needed (scalars can't be out) */ + for (int i = 0; i < nin; i++) { + if ((PyArray_FLAGS(operands[i]) & NPY_ARRAY_WAS_PYTHON_LITERAL)) { + /* `resolve_descriptors_with_scalars` decides the descr */ + if (npy_update_operand_for_scalar( + &operands[i], input_scalars[i], dtypes[i], + /* ignore cast safety for this op (resolvers job) */ + NPY_SAFE_CASTING) < 0) { + goto finish; + } + } + } goto check_safety; } for (int i = 0; i < nop; ++i) { if (operands[i] == NULL) { - original_dtypes[i] = NULL; + original_descrs[i] = NULL; + continue; } - else { - /* - * The dtype may mismatch the signature, in which case we need - * to make it fit before calling the resolution. - */ - PyArray_Descr *descr = PyArray_DTYPE(operands[i]); - original_dtypes[i] = PyArray_CastDescrToDType(descr, signature[i]); - if (original_dtypes[i] == NULL) { - nop = i; /* only this much is initialized */ + PyArray_Descr *descr = PyArray_DTYPE(operands[i]); + + /* + * If we are working with Python literals/scalars, deal with them. + * If needed, we create new array with the right descriptor. + */ + if ((PyArray_FLAGS(operands[i]) & NPY_ARRAY_WAS_PYTHON_LITERAL)) { + PyObject *input; + if (inputs_tup == NULL) { + input = NULL; + } + else { + input = PyTuple_GET_ITEM(inputs_tup, i); + } + + PyArray_Descr *new_descr = npy_find_descr_for_scalar( + input, descr, original_DTypes[i], signature[i]); + if (new_descr == NULL) { + goto finish; + } + int res = npy_update_operand_for_scalar( + &operands[i], input, new_descr, casting); + Py_DECREF(new_descr); + if (res < 0) { goto finish; } + + /* Descriptor may have been modified along the way */ + descr = PyArray_DESCR(operands[i]); } + + /* + * The dtype may mismatch the signature, in which case we need + * to make it fit before calling the resolution. + */ + original_descrs[i] = PyArray_CastDescrToDType(descr, signature[i]); + if (original_descrs[i] == NULL) { + goto finish; + } + n_cleanup += 1; } if (ufuncimpl->resolve_descriptors != &wrapped_legacy_resolve_descriptors) { @@ -4114,7 +4142,7 @@ resolve_descriptors(int nop, npy_intp view_offset = NPY_MIN_INTP; /* currently ignored */ safety = ufuncimpl->resolve_descriptors(ufuncimpl, - signature, original_dtypes, dtypes, &view_offset); + signature, original_descrs, dtypes, &view_offset); goto check_safety; } else { @@ -4141,8 +4169,8 @@ resolve_descriptors(int nop, retval = 0; finish: - for (int i = 0; i < nop; i++) { - Py_XDECREF(original_dtypes[i]); + for (int i = 0; i < n_cleanup; i++) { + Py_XDECREF(original_descrs[i]); } return retval; } @@ -4167,15 +4195,16 @@ resolve_descriptors(int nop, * None --- array-object passed in don't call PyArray_Return * method --- the __array_wrap__ method to call. * - * @param ufunc + * @param ufunc The universal function to be wrapped * @param full_args Original inputs and outputs * @param subok Whether subclasses are allowed * @param result_arrays The ufunc result(s). REFERENCES ARE STOLEN! + * @param return_scalar Set to NPY_FALSE (out=...) to ensure array return. */ static PyObject * replace_with_wrapped_result_and_return(PyUFuncObject *ufunc, ufunc_full_args full_args, npy_bool subok, - PyArrayObject *result_arrays[]) + PyArrayObject *result_arrays[], npy_bool return_scalar) { PyObject *result = NULL; PyObject *wrap, *wrap_type; @@ -4215,7 +4244,7 @@ replace_with_wrapped_result_and_return(PyUFuncObject *ufunc, PyObject *ret_i = npy_apply_wrap( (PyObject *)result_arrays[out_i], original_out, wrap, wrap_type, /* Always try to return a scalar right now: */ - &context, PyArray_NDIM(result_arrays[out_i]) == 0, NPY_TRUE); + &context, PyArray_NDIM(result_arrays[out_i]) == 0 && return_scalar, NPY_TRUE); Py_CLEAR(result_arrays[out_i]); if (ret_i == NULL) { goto fail; @@ -4243,6 +4272,139 @@ replace_with_wrapped_result_and_return(PyUFuncObject *ufunc, return NULL; } +/* + * Check whether the input object is a known scalar and whether the ufunc has + * a suitable inner loop for it, which takes and returns the data type of the + * input (this function is not called if output or any other argument was given). + * If a loop was found, call it and store the result. + * + * Returns -2 if a short-cut is not possible, 0 on success and -1 on error. + */ +static int +try_trivial_scalar_call( + PyUFuncObject *ufunc, PyObject *const obj, PyObject **result) +{ + assert(ufunc->nin == 1 && ufunc->nout == 1 && !ufunc->core_enabled); + npy_clongdouble cin, cout; // aligned storage, using longest type. + char *in = (char *)&cin, *out = (char *)&cout; + char *data[] = {in, out}; + int ret = -2; + PyArray_Descr *dt; + /* + * For supported input, get input pointer and descriptor. Otherwise, bail. + */ + if (obj == Py_False || obj == Py_True) { + *(npy_bool *)in = (obj == Py_True); + dt = PyArray_DescrFromType(NPY_BOOL); + } + else if (PyFloat_CheckExact(obj)) { + *(double *)in = PyFloat_AS_DOUBLE(obj); + dt = PyArray_DescrFromType(NPY_FLOAT64); + } + else if (PyLong_CheckExact(obj)) { + int overflow; + npy_intp val = PyLong_AsLongAndOverflow(obj, &overflow); + if (overflow) { + return -2; // bail, main code perhaps deals with this. + } + if (error_converting(val)) { + return -1; // should never happen; pass on it if does. + } + *(npy_intp *)in = val; + dt = PyArray_DescrFromType(NPY_INTP); + } + else if (PyComplex_CheckExact(obj)) { + Py_complex oop = PyComplex_AsCComplex(obj); + if (error_converting(oop.real)) { + return -1; // should never happen; pass on it if does. + } + *(double *)in = oop.real; + *(double *)(in+sizeof(double)) = oop.imag; + dt = PyArray_DescrFromType(NPY_COMPLEX128); + } + else if (is_anyscalar_exact(obj)) { + dt = PyArray_DescrFromScalar(obj); + if (!PyDataType_ISNUMBER(dt)) { + goto bail; + } + data[0] = scalar_value(obj, dt); + } + else { + return -2; + } + /* + * Check the ufunc supports our descriptor, bailing (return -2) if not. + */ + // Try getting info from the (private) cache. Fall back if not found, + // so that the the dtype gets registered and things will work next time. + PyArray_DTypeMeta *op_dtypes[2] = {NPY_DTYPE(dt), NULL}; + PyObject *info = PyArrayIdentityHash_GetItem( // borrowed reference. + (PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes); + if (info == NULL) { + goto bail; + } + // Check actual dtype is correct (can be wrong with promotion). + PyObject *all_dtypes = PyTuple_GET_ITEM(info, 0); + if ((PyTuple_GET_ITEM(all_dtypes, 0) != (PyObject *)NPY_DTYPE(dt)) || + (PyTuple_GET_ITEM(all_dtypes, 1) != (PyObject *)NPY_DTYPE(dt))) { + goto bail; + } + // Get method, bailing if not an arraymethod (e.g., a promotor). + PyArrayMethodObject *method = (PyArrayMethodObject *)PyTuple_GET_ITEM(info, 1); + if (!PyObject_TypeCheck(method, &PyArrayMethod_Type)) { + goto bail; + } + // Get loop, requiring that the output and input dtype are the same. + PyArrayMethod_Context context; + PyArray_Descr *descrs[2] = {dt, dt}; + NPY_context_init(&context, descrs); + context.caller = (PyObject *)ufunc; + context.method = method; + npy_intp strides[2] = {0, 0}; // 0 ensures scalar math, not SIMD for half. + PyArrayMethod_StridedLoop *strided_loop; + NpyAuxData *auxdata = NULL; + NPY_ARRAYMETHOD_FLAGS flags = 0; + if (method->get_strided_loop(&context, 1, 0, strides, + &strided_loop, &auxdata, &flags) < 0) { + ret = -1; // Should not happen, so raise error if it does anyway. + goto bail; + } + /* + * Call loop with single element, checking floating point errors. + */ + if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + npy_clear_floatstatus(); + } + npy_intp n = 1; + ret = strided_loop(&context, data, &n, strides, auxdata); + NPY_AUXDATA_FREE(auxdata); + if (ret == 0) { + if (PyErr_Occurred()) { + ret = -1; + goto bail; + } + if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + // Check for any unmasked floating point errors (note: faster + // than _check_ufunc_fperr as one doesn't need mask up front). + int fpe_errors = npy_get_floatstatus(); + if (fpe_errors) { + if (PyUFunc_GiveFloatingpointErrors( + ufunc_get_name_cstr(ufunc), fpe_errors) < 0) { + ret = -1; // Real error, falling back would not help. + goto bail; + } + } + } + *result = PyArray_Scalar(out, dt, NULL); + if (*result == NULL) { + ret = -1; // Real error (should never happen). + } + } + bail: + Py_DECREF(dt); + return ret; +} /* * Main ufunc call implementation. @@ -4261,19 +4423,32 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, int errval; int nin = ufunc->nin, nout = ufunc->nout, nop = ufunc->nargs; + if (len_args == 1 && kwnames == NULL && !PyArray_Check(args[0]) + && nin == 1 && nout == 1 && !ufunc->core_enabled) { + // Possibly scalar input, try the fast path, falling back on failure. + PyObject *result = NULL; + if (try_trivial_scalar_call(ufunc, args[0], &result) != -2) { + return result; + } + } /* All following variables are cleared in the `fail` error path */ - ufunc_full_args full_args; + ufunc_full_args full_args = {NULL, NULL}; PyArrayObject *wheremask = NULL; - PyArray_DTypeMeta *signature[NPY_MAXARGS]; - PyArrayObject *operands[NPY_MAXARGS]; - PyArray_DTypeMeta *operand_DTypes[NPY_MAXARGS]; - PyArray_Descr *operation_descrs[NPY_MAXARGS]; - /* Initialize all arrays (we usually only need a small part) */ - memset(signature, 0, nop * sizeof(*signature)); - memset(operands, 0, nop * sizeof(*operands)); - memset(operand_DTypes, 0, nop * sizeof(*operation_descrs)); - memset(operation_descrs, 0, nop * sizeof(*operation_descrs)); + /* + * Scratch space for operands, dtypes, etc. Note that operands and + * operation_descrs may hold an entry for the wheremask. + */ + NPY_ALLOC_WORKSPACE(scratch_objs, void *, UFUNC_STACK_NARGS * 4 + 2, nop * 4 + 2); + if (scratch_objs == NULL) { + return NULL; + } + memset(scratch_objs, 0, sizeof(void *) * (nop * 4 + 2)); + + PyArray_DTypeMeta **signature = (PyArray_DTypeMeta **)scratch_objs; + PyArrayObject **operands = (PyArrayObject **)(signature + nop); + PyArray_DTypeMeta **operand_DTypes = (PyArray_DTypeMeta **)(operands + nop + 1); + PyArray_Descr **operation_descrs = (PyArray_Descr **)(operand_DTypes + nop); /* * Note that the input (and possibly output) arguments are passed in as @@ -4285,17 +4460,18 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, /* Check number of arguments */ if (NPY_UNLIKELY((len_args < nin) || (len_args > nop))) { + const char *verb = (len_args == 1) ? "was" : "were"; PyErr_Format(PyExc_TypeError, - "%s() takes from %d to %d positional arguments but " - "%zd were given", - ufunc_get_name_cstr(ufunc) , nin, nop, len_args); - return NULL; + "%s() takes from %d to %d positional arguments but " + "%zd %s given", + ufunc_get_name_cstr(ufunc), nin, nop, len_args, verb); + goto fail; } /* Fetch input arguments. */ full_args.in = PyArray_TupleFromItems(ufunc->nin, args, 0); if (full_args.in == NULL) { - return NULL; + goto fail; } /* @@ -4314,6 +4490,11 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, PyObject *tmp; if (i < (int)len_args) { tmp = args[i]; + if (tmp == Py_Ellipsis) { + PyErr_SetString(PyExc_TypeError, + "out=... is only allowed as a keyword argument."); + goto fail; + } if (tmp != Py_None) { all_none = NPY_FALSE; } @@ -4324,6 +4505,21 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, Py_INCREF(tmp); PyTuple_SET_ITEM(full_args.out, i-nin, tmp); } + + /* Extra positional args but no keywords */ + /* DEPRECATED NumPy 2.4, 2025-08 */ + if ((PyObject *)ufunc == n_ops.maximum || (PyObject *)ufunc == n_ops.minimum) { + + if (DEPRECATE( + "Passing more than 2 positional arguments to np.maximum and np.minimum " + "is deprecated. If you meant to use the third argument as an output, " + "use the `out` keyword argument instead. If you hoped to work with " + "more than 2 inputs, combine them into a single array and get the extrema " + "for the relevant axis.") < 0) { + goto fail; + } + } + if (all_none) { Py_SETREF(full_args.out, NULL); } @@ -4341,6 +4537,8 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, PyObject *keepdims_obj = NULL, *casting_obj = NULL, *order_obj = NULL; PyObject *subok_obj = NULL, *signature_obj = NULL, *sig_obj = NULL; PyObject *dtype_obj = NULL; + /* Typically, NumPy defaults to returnin scalars for 0-D results */ + npy_bool return_scalar = NPY_TRUE; /* Skip parsing if there are no keyword arguments, nothing left to do */ if (kwnames != NULL) { @@ -4392,7 +4590,10 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, "positional and keyword argument"); goto fail; } - if (_set_full_args_out(nout, out_obj, &full_args) < 0) { + if (out_obj == Py_Ellipsis) { + return_scalar = NPY_FALSE; + } + else if (_set_full_args_out(nout, out_obj, &full_args) < 0) { goto fail; } } @@ -4428,6 +4629,15 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, return override; } + /* Warn if "where" is used without "out", issue 29561 */ + if ((where_obj != NULL) && (full_args.out == NULL) && (out_obj == NULL)) { + if (PyErr_WarnEx(PyExc_UserWarning, + "'where' used without 'out', expect unitialized memory in output. " + "If this is intentional, use out=None.", 1) < 0) { + goto fail; + } + } + if (outer) { /* Outer uses special preparation of inputs (expand dims) */ PyObject *new_in = prepare_input_arguments_for_outer(full_args.in, ufunc); @@ -4451,13 +4661,12 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, npy_bool subok = NPY_TRUE; int keepdims = -1; /* We need to know if it was passed */ npy_bool force_legacy_promotion; - npy_bool allow_legacy_promotion; npy_bool promoting_pyscalars; if (convert_ufunc_arguments(ufunc, /* extract operand related information: */ full_args, operands, operand_DTypes, - &force_legacy_promotion, &allow_legacy_promotion, + &force_legacy_promotion, &promoting_pyscalars, /* extract general information: */ order_obj, &order, @@ -4478,7 +4687,7 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, */ PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc, operands, signature, - operand_DTypes, force_legacy_promotion, allow_legacy_promotion, + operand_DTypes, force_legacy_promotion, promoting_pyscalars, NPY_FALSE); if (ufuncimpl == NULL) { goto fail; @@ -4486,49 +4695,11 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, /* Find the correct descriptors for the operation */ if (resolve_descriptors(nop, ufunc, ufuncimpl, - operands, operation_descrs, signature, full_args.in, casting) < 0) { + operands, operation_descrs, signature, operand_DTypes, + full_args.in, casting) < 0) { goto fail; } - if (promoting_pyscalars) { - /* - * Python integers need to be cast specially. For other python - * scalars it does not hurt either. It would be nice to never create - * the array in this case, but that is difficult until value-based - * promotion rules are gone. (After that, we may get away with using - * dummy arrays rather than real arrays for the legacy resolvers.) - */ - for (int i = 0; i < nin; i++) { - int orig_flags = PyArray_FLAGS(operands[i]); - if (!(orig_flags & NPY_ARRAY_WAS_PYTHON_LITERAL)) { - continue; - } - /* - * If descriptor matches, no need to convert, but integers may - * have been too large. - */ - if (!(orig_flags & NPY_ARRAY_WAS_INT_AND_REPLACED) - && PyArray_EquivTypes( - PyArray_DESCR(operands[i]), operation_descrs[i])) { - continue; - } - /* Otherwise, replace the operand with a new array */ - PyArray_Descr *descr = operation_descrs[i]; - Py_INCREF(descr); - PyArrayObject *new = (PyArrayObject *)PyArray_NewFromDescr( - &PyArray_Type, descr, 0, NULL, NULL, NULL, 0, NULL); - Py_SETREF(operands[i], new); - if (operands[i] == NULL) { - goto fail; - } - - PyObject *value = PyTuple_GET_ITEM(full_args.in, i); - if (PyArray_SETITEM(new, PyArray_BYTES(operands[i]), value) < 0) { - goto fail; - } - } - } - /* * Do the final preparations and call the inner-loop. */ @@ -4561,10 +4732,11 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, } /* The following steals the references to the outputs: */ PyObject *result = replace_with_wrapped_result_and_return(ufunc, - full_args, subok, operands+nin); + full_args, subok, operands+nin, return_scalar); Py_XDECREF(full_args.in); Py_XDECREF(full_args.out); + npy_free_workspace(scratch_objs); return result; fail: @@ -4577,6 +4749,7 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, Py_XDECREF(operand_DTypes[i]); Py_XDECREF(operation_descrs[i]); } + npy_free_workspace(scratch_objs); return NULL; } @@ -4700,6 +4873,7 @@ PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, voi ufunc->core_signature = NULL; ufunc->core_enabled = 0; ufunc->obj = NULL; + ufunc->dict = NULL; ufunc->core_num_dims = NULL; ufunc->core_num_dim_ix = 0; ufunc->core_offsets = NULL; @@ -4715,6 +4889,8 @@ PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, voi /* Type resolution and inner loop selection functions */ ufunc->type_resolver = &PyUFunc_DefaultTypeResolver; + ufunc->process_core_dims_func = NULL; + ufunc->op_flags = NULL; ufunc->_loops = NULL; if (nin + nout != 0) { @@ -4782,6 +4958,11 @@ PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, voi return NULL; } } + ufunc->dict = PyDict_New(); + if (ufunc->dict == NULL) { + Py_DECREF(ufunc); + return NULL; + } /* * TODO: I tried adding a default promoter here (either all object for * some special cases, or all homogeneous). Those are reasonable @@ -4924,7 +5105,7 @@ PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc, function, arg_typenums, data); if (result == 0) { - cobj = PyDict_GetItemWithError(ufunc->userloops, key); + cobj = PyDict_GetItemWithError(ufunc->userloops, key); // noqa: borrowed-ref OK if (cobj == NULL && PyErr_Occurred()) { result = -1; } @@ -5055,7 +5236,7 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, */ int add_new_loop = 1; for (Py_ssize_t j = 0; j < PyList_GET_SIZE(ufunc->_loops); j++) { - PyObject *item = PyList_GET_ITEM(ufunc->_loops, j); + PyObject *item = PyList_GET_ITEM(ufunc->_loops, j); // noqa: borrowed-ref OK PyObject *existing_tuple = PyTuple_GET_ITEM(item, 0); int cmp = PyObject_RichCompareBool(existing_tuple, signature_tuple, Py_EQ); @@ -5097,7 +5278,7 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, funcdata->nargs = 0; /* Get entry for this user-defined type*/ - cobj = PyDict_GetItemWithError(ufunc->userloops, key); + cobj = PyDict_GetItemWithError(ufunc->userloops, key); // noqa: borrowed-ref OK if (cobj == NULL && PyErr_Occurred()) { goto fail; } @@ -5188,6 +5369,7 @@ ufunc_dealloc(PyUFuncObject *ufunc) Py_DECREF(ufunc->identity_value); } Py_XDECREF(ufunc->obj); + Py_XDECREF(ufunc->dict); Py_XDECREF(ufunc->_loops); if (ufunc->_dispatch_cache != NULL) { PyArrayIdentityHash_Dealloc(ufunc->_dispatch_cache); @@ -5208,6 +5390,7 @@ ufunc_traverse(PyUFuncObject *self, visitproc visit, void *arg) if (self->identity == PyUFunc_IdentityValue) { Py_VISIT(self->identity_value); } + Py_VISIT(self->dict); return 0; } @@ -5254,25 +5437,20 @@ prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) { PyArrayObject *ap1 = NULL; PyObject *tmp; - static PyObject *_numpy_matrix; - npy_cache_import("numpy", "matrix", &_numpy_matrix); + npy_cache_import_runtime("numpy", "matrix", + &npy_runtime_imports.numpy_matrix); const char *matrix_deprecation_msg = ( "%s.outer() was passed a numpy matrix as %s argument. " - "Special handling of matrix is deprecated and will result in an " - "error in most cases. Please convert the matrix to a NumPy " - "array to retain the old behaviour. You can use `matrix.A` " - "to achieve this."); + "Special handling of matrix is removed. Convert to a " + "ndarray via 'matrix.A' "); tmp = PyTuple_GET_ITEM(args, 0); - if (PyObject_IsInstance(tmp, _numpy_matrix)) { - /* DEPRECATED 2020-05-13, NumPy 1.20 */ - if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, - matrix_deprecation_msg, ufunc->name, "first") < 0) { - return NULL; - } - ap1 = (PyArrayObject *) PyArray_FromObject(tmp, NPY_NOTYPE, 0, 0); + if (PyObject_IsInstance(tmp, npy_runtime_imports.numpy_matrix)) { + PyErr_Format(PyExc_TypeError, + matrix_deprecation_msg, ufunc->name, "first"); + return NULL; } else { ap1 = (PyArrayObject *) PyArray_FROM_O(tmp); @@ -5283,14 +5461,10 @@ prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) PyArrayObject *ap2 = NULL; tmp = PyTuple_GET_ITEM(args, 1); - if (PyObject_IsInstance(tmp, _numpy_matrix)) { - /* DEPRECATED 2020-05-13, NumPy 1.20 */ - if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, - matrix_deprecation_msg, ufunc->name, "second") < 0) { - Py_DECREF(ap1); - return NULL; - } - ap2 = (PyArrayObject *) PyArray_FromObject(tmp, NPY_NOTYPE, 0, 0); + if (PyObject_IsInstance(tmp, npy_runtime_imports.numpy_matrix)) { + PyErr_Format(PyExc_TypeError, + matrix_deprecation_msg, ufunc->name, "second"); + return NULL; } else { ap2 = (PyArrayObject *) PyArray_FROM_O(tmp); @@ -5614,9 +5788,9 @@ ufunc_at__slow_iter(PyUFuncObject *ufunc, NPY_ARRAYMETHOD_FLAGS flags, } return -1; } + flags = PyArrayMethod_COMBINED_FLAGS(flags, NpyIter_GetTransferFlags(iter_buffer)); int needs_api = (flags & NPY_METH_REQUIRES_PYAPI) != 0; - needs_api |= NpyIter_IterationNeedsAPI(iter_buffer); if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { /* Start with the floating-point exception flags cleared */ npy_clear_floatstatus_barrier((char*)&iter); @@ -5809,22 +5983,20 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) operand_DTypes[0] = NPY_DTYPE(PyArray_DESCR(op1_array)); Py_INCREF(operand_DTypes[0]); int force_legacy_promotion = 0; - int allow_legacy_promotion = NPY_DT_is_legacy(operand_DTypes[0]); if (op2_array != NULL) { tmp_operands[1] = op2_array; operand_DTypes[1] = NPY_DTYPE(PyArray_DESCR(op2_array)); Py_INCREF(operand_DTypes[1]); - allow_legacy_promotion &= NPY_DT_is_legacy(operand_DTypes[1]); tmp_operands[2] = tmp_operands[0]; operand_DTypes[2] = operand_DTypes[0]; Py_INCREF(operand_DTypes[2]); - if (allow_legacy_promotion && ((PyArray_NDIM(op1_array) == 0) - != (PyArray_NDIM(op2_array) == 0))) { - /* both are legacy and only one is 0-D: force legacy */ - force_legacy_promotion = should_use_min_scalar(2, tmp_operands, 0, NULL); - } + if ((PyArray_NDIM(op1_array) == 0) + != (PyArray_NDIM(op2_array) == 0)) { + /* both are legacy and only one is 0-D: force legacy */ + force_legacy_promotion = should_use_min_scalar(2, tmp_operands, 0, NULL); + } } else { tmp_operands[1] = tmp_operands[0]; @@ -5835,7 +6007,7 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) ufuncimpl = promote_and_get_ufuncimpl(ufunc, tmp_operands, signature, operand_DTypes, force_legacy_promotion, - allow_legacy_promotion, NPY_FALSE, NPY_FALSE); + NPY_FALSE, NPY_FALSE); if (ufuncimpl == NULL) { for (int i = 0; i < 3; i++) { Py_XDECREF(signature[i]); @@ -5846,7 +6018,7 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) /* Find the correct operation_descrs for the operation */ int resolve_result = resolve_descriptors(nop, ufunc, ufuncimpl, - tmp_operands, operation_descrs, signature, NULL, NPY_UNSAFE_CASTING); + tmp_operands, operation_descrs, signature, operand_DTypes, NULL, NPY_UNSAFE_CASTING); for (int i = 0; i < 3; i++) { Py_XDECREF(signature[i]); Py_XDECREF(operand_DTypes[i]); @@ -5891,11 +6063,10 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) } } - PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = operation_descrs, - }; + PyArrayMethod_Context context; + NPY_context_init(&context, operation_descrs); + context.caller = (PyObject *)ufunc; + context.method = ufuncimpl; /* Use contiguous strides; if there is such a loop it may be faster */ npy_intp strides[3] = { @@ -5961,7 +6132,6 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) NPY_AUXDATA_FREE(auxdata); Py_XDECREF(op2_array); - Py_XDECREF(iter); Py_XDECREF(iter2); for (int i = 0; i < nop; i++) { Py_XDECREF(operation_descrs[i]); @@ -5977,9 +6147,13 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) if (PyArray_FLAGS(op1_array) & NPY_ARRAY_WRITEBACKIFCOPY) { PyArray_DiscardWritebackIfCopy(op1_array); } + // iter might own the last reference to op1_array, + // so it must be decref'd second + Py_XDECREF(iter); return NULL; } else { + Py_XDECREF(iter); Py_RETURN_NONE; } } @@ -6072,12 +6246,7 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, PyArray_DTypeMeta *signature[NPY_MAXARGS] = {NULL}; PyArray_Descr *operation_descrs[NPY_MAXARGS] = {NULL}; - /* This entry-point to promotion lives in the NEP 50 future: */ - int original_promotion_state = get_npy_promotion_state(); - set_npy_promotion_state(NPY_USE_WEAK_PROMOTION); - npy_bool promoting_pyscalars = NPY_FALSE; - npy_bool allow_legacy_promotion = NPY_TRUE; if (_get_fixed_signature(ufunc, NULL, signature_obj, signature) < 0) { goto finish; @@ -6110,9 +6279,6 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, } DTypes[i] = NPY_DTYPE(descr); Py_INCREF(DTypes[i]); - if (!NPY_DT_is_legacy(DTypes[i])) { - allow_legacy_promotion = NPY_FALSE; - } } /* Explicitly allow int, float, and complex for the "weak" types. */ else if (descr_obj == (PyObject *)&PyLong_Type) { @@ -6168,14 +6334,14 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, if (!reduction) { ufuncimpl = promote_and_get_ufuncimpl(ufunc, dummy_arrays, signature, DTypes, NPY_FALSE, - allow_legacy_promotion, promoting_pyscalars, NPY_FALSE); + promoting_pyscalars, NPY_FALSE); if (ufuncimpl == NULL) { goto finish; } /* Find the correct descriptors for the operation */ if (resolve_descriptors(ufunc->nargs, ufunc, ufuncimpl, - dummy_arrays, operation_descrs, signature, + dummy_arrays, operation_descrs, signature, DTypes, NULL, casting) < 0) { goto finish; } @@ -6261,8 +6427,6 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, Py_DECREF(capsule); finish: - set_npy_promotion_state(original_promotion_state); - Py_XDECREF(result_dtype_tuple); for (int i = 0; i < ufunc->nargs; i++) { Py_XDECREF(signature[i]); @@ -6412,8 +6576,8 @@ static struct PyMethodDef ufunc_methods[] = { }; -/****************************************************************************** - *** UFUNC GETSET *** +/***************************************************************************** + *** UFUNC GETSET *** *****************************************************************************/ @@ -6432,15 +6596,20 @@ _typecharfromnum(int num) { static PyObject * ufunc_get_doc(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) { - static PyObject *_sig_formatter; PyObject *doc; - npy_cache_import( - "numpy._core._internal", - "_ufunc_doc_signature_formatter", - &_sig_formatter); + // If there is a __doc__ in the instance __dict__, use it. + int result = PyDict_GetItemRef(ufunc->dict, npy_interned_str.__doc__, &doc); + if (result == -1) { + return NULL; + } + else if (result == 1) { + return doc; + } - if (_sig_formatter == NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", "_ufunc_doc_signature_formatter", + &npy_runtime_imports._ufunc_doc_signature_formatter) == -1) { return NULL; } @@ -6449,8 +6618,9 @@ ufunc_get_doc(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) * introspection on name and nin + nout to automate the first part * of it the doc string shouldn't need the calling convention */ - doc = PyObject_CallFunctionObjArgs(_sig_formatter, - (PyObject *)ufunc, NULL); + doc = PyObject_CallFunctionObjArgs( + npy_runtime_imports._ufunc_doc_signature_formatter, + (PyObject *)ufunc, NULL); if (doc == NULL) { return NULL; } @@ -6460,6 +6630,15 @@ ufunc_get_doc(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) return doc; } +static int +ufunc_set_doc(PyUFuncObject *ufunc, PyObject *doc, void *NPY_UNUSED(ignored)) +{ + if (doc == NULL) { + return PyDict_DelItem(ufunc->dict, npy_interned_str.__doc__); + } else { + return PyDict_SetItem(ufunc->dict, npy_interned_str.__doc__, doc); + } +} static PyObject * ufunc_get_nin(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) @@ -6543,13 +6722,12 @@ ufunc_get_signature(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) #undef _typecharfromnum -/* - * Docstring is now set from python - * static char *Ufunctype__doc__ = NULL; - */ static PyGetSetDef ufunc_getset[] = { {"__doc__", - (getter)ufunc_get_doc, + (getter)ufunc_get_doc, (setter)ufunc_set_doc, + NULL, NULL}, + {"__name__", + (getter)ufunc_get_name, NULL, NULL, NULL}, {"nin", (getter)ufunc_get_nin, @@ -6566,19 +6744,28 @@ static PyGetSetDef ufunc_getset[] = { {"types", (getter)ufunc_get_types, NULL, NULL, NULL}, - {"__name__", - (getter)ufunc_get_name, - NULL, NULL, NULL}, {"identity", (getter)ufunc_get_identity, NULL, NULL, NULL}, {"signature", (getter)ufunc_get_signature, NULL, NULL, NULL}, + // __signature__ stored in `__dict__`, see `_globals._SignatureDescriptor` {NULL, NULL, NULL, NULL, NULL}, /* Sentinel */ }; +/****************************************************************************** + *** UFUNC MEMBERS *** + *****************************************************************************/ + +static PyMemberDef ufunc_members[] = { + {"__dict__", T_OBJECT, offsetof(PyUFuncObject, dict), + READONLY}, + {NULL}, +}; + + /****************************************************************************** *** UFUNC TYPE OBJECT *** *****************************************************************************/ @@ -6598,6 +6785,12 @@ NPY_NO_EXPORT PyTypeObject PyUFunc_Type = { .tp_traverse = (traverseproc)ufunc_traverse, .tp_methods = ufunc_methods, .tp_getset = ufunc_getset, + .tp_getattro = PyObject_GenericGetAttr, + .tp_setattro = PyObject_GenericSetAttr, + // TODO when Python 3.12 is the minimum supported version, + // use Py_TPFLAGS_MANAGED_DICT + .tp_members = ufunc_members, + .tp_dictoffset = offsetof(PyUFuncObject, dict), }; /* End of code for ufunc objects */ diff --git a/numpy/_core/src/umath/ufunc_object.h b/numpy/_core/src/umath/ufunc_object.h index 645023f66aa5..dc55a561fba5 100644 --- a/numpy/_core/src/umath/ufunc_object.h +++ b/numpy/_core/src/umath/ufunc_object.h @@ -3,6 +3,9 @@ #include +#ifdef __cplusplus +extern "C" { +#endif NPY_NO_EXPORT const char* ufunc_get_name_cstr(PyUFuncObject *ufunc); @@ -10,9 +13,8 @@ ufunc_get_name_cstr(PyUFuncObject *ufunc); NPY_NO_EXPORT PyObject * PyUFunc_GetDefaultIdentity(PyUFuncObject *ufunc, npy_bool *reorderable); -/* strings from umathmodule.c that are interned on umath import */ -NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_array_ufunc; -NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_array_wrap; -NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_pyvals_name; +#ifdef __cplusplus +} +#endif #endif diff --git a/numpy/_core/src/umath/ufunc_type_resolution.c b/numpy/_core/src/umath/ufunc_type_resolution.c index f6f231223f63..eaea560e9b98 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.c +++ b/numpy/_core/src/umath/ufunc_type_resolution.c @@ -35,10 +35,9 @@ #include "npy_config.h" #include "numpy/npy_common.h" -#include "npy_import.h" - #include "numpy/ndarraytypes.h" #include "numpy/ufuncobject.h" +#include "npy_import.h" #include "ufunc_type_resolution.h" #include "ufunc_object.h" #include "common.h" @@ -78,16 +77,8 @@ npy_casting_to_py_object(NPY_CASTING casting) */ static int raise_binary_type_reso_error(PyUFuncObject *ufunc, PyArrayObject **operands) { - static PyObject *exc_type = NULL; PyObject *exc_value; - npy_cache_import( - "numpy._core._exceptions", "_UFuncBinaryResolutionError", - &exc_type); - if (exc_type == NULL) { - return -1; - } - /* produce an error object */ exc_value = Py_BuildValue( "O(OO)", ufunc, @@ -97,7 +88,8 @@ raise_binary_type_reso_error(PyUFuncObject *ufunc, PyArrayObject **operands) { if (exc_value == NULL){ return -1; } - PyErr_SetObject(exc_type, exc_value); + PyErr_SetObject( + npy_static_pydata._UFuncBinaryResolutionError, exc_value); Py_DECREF(exc_value); return -1; @@ -110,15 +102,6 @@ NPY_NO_EXPORT int raise_no_loop_found_error( PyUFuncObject *ufunc, PyObject **dtypes) { - static PyObject *exc_type = NULL; - - npy_cache_import( - "numpy._core._exceptions", "_UFuncNoLoopError", - &exc_type); - if (exc_type == NULL) { - return -1; - } - PyObject *dtypes_tup = PyArray_TupleFromItems(ufunc->nargs, dtypes, 1); if (dtypes_tup == NULL) { return -1; @@ -129,7 +112,7 @@ raise_no_loop_found_error( if (exc_value == NULL) { return -1; } - PyErr_SetObject(exc_type, exc_value); + PyErr_SetObject(npy_static_pydata._UFuncNoLoopError, exc_value); Py_DECREF(exc_value); return -1; @@ -181,15 +164,8 @@ raise_input_casting_error( PyArray_Descr *to, npy_intp i) { - static PyObject *exc_type = NULL; - npy_cache_import( - "numpy._core._exceptions", "_UFuncInputCastingError", - &exc_type); - if (exc_type == NULL) { - return -1; - } - - return raise_casting_error(exc_type, ufunc, casting, from, to, i); + return raise_casting_error(npy_static_pydata._UFuncInputCastingError, + ufunc, casting, from, to, i); } @@ -204,15 +180,8 @@ raise_output_casting_error( PyArray_Descr *to, npy_intp i) { - static PyObject *exc_type = NULL; - npy_cache_import( - "numpy._core._exceptions", "_UFuncOutputCastingError", - &exc_type); - if (exc_type == NULL) { - return -1; - } - - return raise_casting_error(exc_type, ufunc, casting, from, to, i); + return raise_casting_error(npy_static_pydata._UFuncOutputCastingError, + ufunc, casting, from, to, i); } @@ -460,6 +429,34 @@ PyUFunc_NegativeTypeResolver(PyUFuncObject *ufunc, return ret; } +/* + * This function applies special type resolution rules for the 'sign' ufunc. + * 'sign' converts timedelta64 to float64, so isn't covered by the simple + * unary type resolution. + * + * Returns 0 on success, -1 on error. + */ +NPY_NO_EXPORT int +PyUFunc_SignTypeResolver(PyUFuncObject *ufunc, + NPY_CASTING casting, + PyArrayObject **operands, + PyObject *type_tup, + PyArray_Descr **out_dtypes) +{ + if (PyArray_DESCR(operands[0])->type_num == NPY_TIMEDELTA) { + out_dtypes[0] = NPY_DT_CALL_ensure_canonical(PyArray_DESCR(operands[0])); + if (out_dtypes[0] == NULL) { + return -1; + } + out_dtypes[1] = PyArray_DescrFromType(NPY_DOUBLE); + return 0; + } + else { + return PyUFunc_SimpleUniformOperationTypeResolver(ufunc, casting, + operands, type_tup, out_dtypes); + } +} + /* * The ones_like function shouldn't really be a ufunc, but while it @@ -615,6 +612,9 @@ PyUFunc_SimpleUniformOperationTypeResolver( descr = PyArray_DESCR(operands[0]); } out_dtypes[0] = NPY_DT_CALL_ensure_canonical(descr); + if (out_dtypes[0] == NULL) { + return -1; + } } /* All types are the same - copy the first one to the rest */ @@ -681,6 +681,9 @@ PyUFunc_IsNaTTypeResolver(PyUFuncObject *ufunc, } out_dtypes[0] = NPY_DT_CALL_ensure_canonical(PyArray_DESCR(operands[0])); + if (out_dtypes[0] == NULL) { + return -1; + } out_dtypes[1] = PyArray_DescrFromType(NPY_BOOL); return 0; @@ -700,6 +703,9 @@ PyUFunc_IsFiniteTypeResolver(PyUFuncObject *ufunc, } out_dtypes[0] = NPY_DT_CALL_ensure_canonical(PyArray_DESCR(operands[0])); + if (out_dtypes[0] == NULL) { + return -1; + } out_dtypes[1] = PyArray_DescrFromType(NPY_BOOL); return 0; @@ -1283,9 +1289,10 @@ PyUFunc_DivisionTypeResolver(PyUFuncObject *ufunc, type_num2 = PyArray_DESCR(operands[1])->type_num; /* Use the default when datetime and timedelta are not involved */ - if (!PyTypeNum_ISDATETIME(type_num1) && !PyTypeNum_ISDATETIME(type_num2)) { - return PyUFunc_DefaultTypeResolver(ufunc, casting, operands, - type_tup, out_dtypes); + if ((!PyTypeNum_ISDATETIME(type_num1) && !PyTypeNum_ISDATETIME(type_num2)) || + (PyTypeNum_ISOBJECT(type_num1) || PyTypeNum_ISOBJECT(type_num2))) { + return PyUFunc_DefaultTypeResolver(ufunc, casting, operands, type_tup, + out_dtypes); } if (type_num1 == NPY_TIMEDELTA) { @@ -1441,22 +1448,6 @@ PyUFunc_TrueDivisionTypeResolver(PyUFuncObject *ufunc, PyArray_Descr **out_dtypes) { int type_num1, type_num2; - static PyObject *default_type_tup = NULL; - - /* Set default type for integer inputs to NPY_DOUBLE */ - if (default_type_tup == NULL) { - PyArray_Descr *tmp = PyArray_DescrFromType(NPY_DOUBLE); - - if (tmp == NULL) { - return -1; - } - default_type_tup = PyTuple_Pack(3, tmp, tmp, tmp); - if (default_type_tup == NULL) { - Py_DECREF(tmp); - return -1; - } - Py_DECREF(tmp); - } type_num1 = PyArray_DESCR(operands[0])->type_num; type_num2 = PyArray_DESCR(operands[1])->type_num; @@ -1464,8 +1455,9 @@ PyUFunc_TrueDivisionTypeResolver(PyUFuncObject *ufunc, if (type_tup == NULL && (PyTypeNum_ISINTEGER(type_num1) || PyTypeNum_ISBOOL(type_num1)) && (PyTypeNum_ISINTEGER(type_num2) || PyTypeNum_ISBOOL(type_num2))) { - return PyUFunc_DefaultTypeResolver(ufunc, casting, operands, - default_type_tup, out_dtypes); + return PyUFunc_DefaultTypeResolver( + ufunc, casting, operands, + npy_static_pydata.default_truediv_type_tup, out_dtypes); } return PyUFunc_DivisionTypeResolver(ufunc, casting, operands, type_tup, out_dtypes); @@ -1501,7 +1493,7 @@ find_userloop(PyUFuncObject *ufunc, if (key == NULL) { return -1; } - obj = PyDict_GetItemWithError(ufunc->userloops, key); + obj = PyDict_GetItemWithError(ufunc->userloops, key); // noqa: borrowed-ref - manual fix needed Py_DECREF(key); if (obj == NULL && PyErr_Occurred()){ return -1; @@ -1788,7 +1780,7 @@ linear_search_userloop_type_resolver(PyUFuncObject *self, if (key == NULL) { return -1; } - obj = PyDict_GetItemWithError(self->userloops, key); + obj = PyDict_GetItemWithError(self->userloops, key); // noqa: borrowed-ref - manual fix needed Py_DECREF(key); if (obj == NULL && PyErr_Occurred()){ return -1; @@ -1859,7 +1851,7 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self, if (key == NULL) { return -1; } - obj = PyDict_GetItemWithError(self->userloops, key); + obj = PyDict_GetItemWithError(self->userloops, key); // noqa: borrowed-ref - manual fix needed Py_DECREF(key); if (obj == NULL && PyErr_Occurred()){ return -1; @@ -1965,17 +1957,7 @@ linear_search_type_resolver(PyUFuncObject *self, ufunc_name = ufunc_get_name_cstr(self); - int promotion_state = get_npy_promotion_state(); - - assert(promotion_state != NPY_USE_WEAK_PROMOTION_AND_WARN); - /* Always "use" with new promotion in case of Python int/float/complex */ - int use_min_scalar; - if (promotion_state == NPY_USE_LEGACY_PROMOTION) { - use_min_scalar = should_use_min_scalar(nin, op, 0, NULL); - } - else { - use_min_scalar = should_use_min_scalar_weak_literals(nin, op); - } + int use_min_scalar = should_use_min_scalar_weak_literals(nin, op); /* If the ufunc has userloops, search for them. */ if (self->userloops) { @@ -2169,17 +2151,7 @@ type_tuple_type_resolver(PyUFuncObject *self, ufunc_name = ufunc_get_name_cstr(self); - int promotion_state = get_npy_promotion_state(); - - assert(promotion_state != NPY_USE_WEAK_PROMOTION_AND_WARN); - /* Always "use" with new promotion in case of Python int/float/complex */ - int use_min_scalar; - if (promotion_state == NPY_USE_LEGACY_PROMOTION) { - use_min_scalar = should_use_min_scalar(nin, op, 0, NULL); - } - else { - use_min_scalar = should_use_min_scalar_weak_literals(nin, op); - } + int use_min_scalar = should_use_min_scalar_weak_literals(nin, op); /* Fill in specified_types from the tuple or string */ const char *bad_type_tup_msg = ( @@ -2294,19 +2266,17 @@ PyUFunc_DivmodTypeResolver(PyUFuncObject *ufunc, return PyUFunc_DefaultTypeResolver(ufunc, casting, operands, type_tup, out_dtypes); } - if (type_num1 == NPY_TIMEDELTA) { - if (type_num2 == NPY_TIMEDELTA) { - out_dtypes[0] = PyArray_PromoteTypes(PyArray_DESCR(operands[0]), - PyArray_DESCR(operands[1])); - out_dtypes[1] = out_dtypes[0]; - Py_INCREF(out_dtypes[1]); - out_dtypes[2] = PyArray_DescrFromType(NPY_LONGLONG); - out_dtypes[3] = out_dtypes[0]; - Py_INCREF(out_dtypes[3]); - } - else { - return raise_binary_type_reso_error(ufunc, operands); + if (type_num1 == NPY_TIMEDELTA && type_num2 == NPY_TIMEDELTA) { + out_dtypes[0] = PyArray_PromoteTypes(PyArray_DESCR(operands[0]), + PyArray_DESCR(operands[1])); + if (out_dtypes[0] == NULL) { + return -1; } + out_dtypes[1] = out_dtypes[0]; + Py_INCREF(out_dtypes[1]); + out_dtypes[2] = PyArray_DescrFromType(NPY_LONGLONG); + out_dtypes[3] = out_dtypes[0]; + Py_INCREF(out_dtypes[3]); } else { return raise_binary_type_reso_error(ufunc, operands); diff --git a/numpy/_core/src/umath/ufunc_type_resolution.h b/numpy/_core/src/umath/ufunc_type_resolution.h index 3f8e7505ea39..531e9267afa2 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.h +++ b/numpy/_core/src/umath/ufunc_type_resolution.h @@ -1,6 +1,10 @@ #ifndef _NPY_PRIVATE__UFUNC_TYPE_RESOLUTION_H_ #define _NPY_PRIVATE__UFUNC_TYPE_RESOLUTION_H_ +#ifdef __cplusplus +extern "C" { +#endif + NPY_NO_EXPORT int PyUFunc_SimpleBinaryComparisonTypeResolver(PyUFuncObject *ufunc, NPY_CASTING casting, @@ -15,6 +19,13 @@ PyUFunc_NegativeTypeResolver(PyUFuncObject *ufunc, PyObject *type_tup, PyArray_Descr **out_dtypes); +NPY_NO_EXPORT int +PyUFunc_SignTypeResolver(PyUFuncObject *ufunc, + NPY_CASTING casting, + PyArrayObject **operands, + PyObject *type_tup, + PyArray_Descr **out_dtypes); + NPY_NO_EXPORT int PyUFunc_OnesLikeTypeResolver(PyUFuncObject *ufunc, NPY_CASTING casting, @@ -142,4 +153,8 @@ PyUFunc_DefaultLegacyInnerLoopSelector(PyUFuncObject *ufunc, NPY_NO_EXPORT int raise_no_loop_found_error(PyUFuncObject *ufunc, PyObject **dtypes); +#ifdef __cplusplus +} +#endif + #endif diff --git a/numpy/_core/src/umath/umathmodule.c b/numpy/_core/src/umath/umathmodule.c index 7c774f9fffc3..eac1283b95ff 100644 --- a/numpy/_core/src/umath/umathmodule.c +++ b/numpy/_core/src/umath/umathmodule.c @@ -22,6 +22,7 @@ #include "numpy/ufuncobject.h" #include "numpy/npy_3kcompat.h" #include "npy_pycompat.h" +#include "npy_argparse.h" #include "abstract.h" #include "numpy/npy_math.h" @@ -31,6 +32,7 @@ #include "stringdtype_ufuncs.h" #include "special_integer_comparisons.h" #include "extobj.h" /* for _extobject_contextvar exposure */ +#include "ufunc_type_resolution.h" /* Automatically generated code to define all ufuncs: */ #include "funcs.inc" @@ -161,46 +163,6 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) { return (PyObject *)self; } -/* docstring in numpy.add_newdocs.py */ -PyObject * -add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - PyUFuncObject *ufunc; - PyObject *str; - if (!PyArg_ParseTuple(args, "O!O!:_add_newdoc_ufunc", &PyUFunc_Type, &ufunc, - &PyUnicode_Type, &str)) { - return NULL; - } - if (ufunc->doc != NULL) { - PyErr_SetString(PyExc_ValueError, - "Cannot change docstring of ufunc with non-NULL docstring"); - return NULL; - } - - PyObject *tmp = PyUnicode_AsUTF8String(str); - if (tmp == NULL) { - return NULL; - } - char *docstr = PyBytes_AS_STRING(tmp); - - /* - * This introduces a memory leak, as the memory allocated for the doc - * will not be freed even if the ufunc itself is deleted. In practice - * this should not be a problem since the user would have to - * repeatedly create, document, and throw away ufuncs. - */ - char *newdocstr = malloc(strlen(docstr) + 1); - if (!newdocstr) { - Py_DECREF(tmp); - return PyErr_NoMemory(); - } - strcpy(newdocstr, docstr); - ufunc->doc = newdocstr; - - Py_DECREF(tmp); - Py_RETURN_NONE; -} - /* ***************************************************************************** @@ -208,29 +170,6 @@ add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args) ***************************************************************************** */ -NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_array_ufunc = NULL; -NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_array_wrap = NULL; -NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_pyvals_name = NULL; - -/* intern some strings used in ufuncs, returns 0 on success */ -static int -intern_strings(void) -{ - npy_um_str_array_ufunc = PyUnicode_InternFromString("__array_ufunc__"); - if (npy_um_str_array_ufunc == NULL) { - return -1; - } - npy_um_str_array_wrap = PyUnicode_InternFromString("__array_wrap__"); - if (npy_um_str_array_wrap == NULL) { - return -1; - } - npy_um_str_pyvals_name = PyUnicode_InternFromString(UFUNC_PYVALS_NAME); - if (npy_um_str_pyvals_name == NULL) { - return -1; - } - return 0; -} - /* Setup the umath part of the module */ int initumath(PyObject *m) @@ -272,8 +211,8 @@ int initumath(PyObject *m) #undef ADDSCONST PyModule_AddIntConstant(m, "UFUNC_BUFSIZE_DEFAULT", (long)NPY_BUFSIZE); - Py_INCREF(npy_extobj_contextvar); - PyModule_AddObject(m, "_extobj_contextvar", npy_extobj_contextvar); + Py_INCREF(npy_static_pydata.npy_extobj_contextvar); + PyModule_AddObject(m, "_extobj_contextvar", npy_static_pydata.npy_extobj_contextvar); PyModule_AddObject(m, "PINF", PyFloat_FromDouble(NPY_INFINITY)); PyModule_AddObject(m, "NINF", PyFloat_FromDouble(-NPY_INFINITY)); @@ -281,24 +220,21 @@ int initumath(PyObject *m) PyModule_AddObject(m, "NZERO", PyFloat_FromDouble(NPY_NZERO)); PyModule_AddObject(m, "NAN", PyFloat_FromDouble(NPY_NAN)); - s = PyDict_GetItemString(d, "divide"); + s = PyDict_GetItemString(d, "divide"); // noqa: borrowed-ref OK PyDict_SetItemString(d, "true_divide", s); - s = PyDict_GetItemString(d, "conjugate"); - s2 = PyDict_GetItemString(d, "remainder"); + s = PyDict_GetItemString(d, "conjugate"); // noqa: borrowed-ref OK + s2 = PyDict_GetItemString(d, "remainder"); // noqa: borrowed-ref OK + /* Setup the array object's numerical structures with appropriate ufuncs in d*/ - _PyArray_SetNumericOps(d); + if (_PyArray_SetNumericOps(d) < 0) { + return -1; + } PyDict_SetItemString(d, "conj", s); PyDict_SetItemString(d, "mod", s2); - if (intern_strings() < 0) { - PyErr_SetString(PyExc_RuntimeError, - "cannot intern umath strings while initializing _multiarray_umath."); - return -1; - } - /* * Set up promoters for logical functions * TODO: This should probably be done at a better place, or even in the @@ -346,5 +282,9 @@ int initumath(PyObject *m) return -1; } + if (init_argparse_mutex() < 0) { + return -1; + } + return 0; } diff --git a/numpy/_core/src/umath/wrapping_array_method.c b/numpy/_core/src/umath/wrapping_array_method.c index 9b3970561f3f..924bac9524e9 100644 --- a/numpy/_core/src/umath/wrapping_array_method.c +++ b/numpy/_core/src/umath/wrapping_array_method.c @@ -83,8 +83,8 @@ typedef struct { #define WRAPPING_AUXDATA_FREELIST_SIZE 5 -static int wrapping_auxdata_freenum = 0; -static wrapping_auxdata *wrapping_auxdata_freelist[WRAPPING_AUXDATA_FREELIST_SIZE] = {NULL}; +static NPY_TLS int wrapping_auxdata_freenum = 0; +static NPY_TLS wrapping_auxdata *wrapping_auxdata_freelist[WRAPPING_AUXDATA_FREELIST_SIZE] = {NULL}; static void @@ -114,7 +114,7 @@ get_wrapping_auxdata(void) } else { res = PyMem_Calloc(1, sizeof(wrapping_auxdata)); - if (res < 0) { + if (res == NULL) { PyErr_NoMemory(); return NULL; } diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index 83034705f525..db72d6819ba1 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -3,51 +3,73 @@ operations. """ +import functools import sys + import numpy as np from numpy import ( - equal, not_equal, less, less_equal, greater, greater_equal, - add, multiply as _multiply_ufunc, + add, + equal, + greater, + greater_equal, + less, + less_equal, + multiply as _multiply_ufunc, + not_equal, ) from numpy._core.multiarray import _vec_string +from numpy._core.overrides import array_function_dispatch, set_module from numpy._core.umath import ( + _center, + _expandtabs, + _expandtabs_length, + _ljust, + _lstrip_chars, + _lstrip_whitespace, + _partition, + _partition_index, + _replace, + _rjust, + _rpartition, + _rpartition_index, + _rstrip_chars, + _rstrip_whitespace, + _slice, + _strip_chars, + _strip_whitespace, + _zfill, + count as _count_ufunc, + endswith as _endswith_ufunc, + find as _find_ufunc, + index as _index_ufunc, + isalnum, isalpha, + isdecimal, isdigit, - isspace, - isalnum, islower, - isupper, - istitle, - isdecimal, isnumeric, - str_len, - find as _find_ufunc, + isspace, + istitle, + isupper, rfind as _rfind_ufunc, - index as _index_ufunc, rindex as _rindex_ufunc, - count as _count_ufunc, startswith as _startswith_ufunc, - endswith as _endswith_ufunc, - _lstrip_whitespace, - _lstrip_chars, - _rstrip_whitespace, - _rstrip_chars, - _strip_whitespace, - _strip_chars, - _replace, - _expandtabs_length, - _expandtabs, - _center, - _ljust, - _rjust, - _zfill, - _partition, - _partition_index, - _rpartition, - _rpartition_index, + str_len, ) +def _override___module__(): + for ufunc in [ + isalnum, isalpha, isdecimal, isdigit, islower, isnumeric, isspace, + istitle, isupper, str_len, + ]: + ufunc.__module__ = "numpy.strings" + ufunc.__qualname__ = ufunc.__name__ + + +_override___module__() + + __all__ = [ # UFuncs "equal", "not_equal", "less", "less_equal", "greater", "greater_equal", @@ -55,7 +77,7 @@ "isupper", "istitle", "isdecimal", "isnumeric", "str_len", "find", "rfind", "index", "rindex", "count", "startswith", "endswith", "lstrip", "rstrip", "strip", "replace", "expandtabs", "center", "ljust", "rjust", - "zfill", "partition", "rpartition", + "zfill", "partition", "rpartition", "slice", # _vec_string - Will gradually become ufuncs as well "upper", "lower", "swapcase", "capitalize", "title", @@ -63,13 +85,16 @@ # _vec_string - Will probably not become ufuncs "mod", "decode", "encode", "translate", - # Removed from namespace until behavior has been crystalized + # Removed from namespace until behavior has been crystallized # "join", "split", "rsplit", "splitlines", ] MAX = np.iinfo(np.int64).max +array_function_dispatch = functools.partial( + array_function_dispatch, module='numpy.strings') + def _get_num_chars(a): """ @@ -116,6 +141,12 @@ def _clean_args(*args): return newargs +def _multiply_dispatcher(a, i): + return (a,) + + +@set_module("numpy.strings") +@array_function_dispatch(_multiply_dispatcher) def multiply(a, i): """ Return (a * i), that is string multiple concatenation, @@ -138,6 +169,7 @@ def multiply(a, i): Examples -------- + >>> import numpy as np >>> a = np.array(["a", "b", "c"]) >>> np.strings.multiply(a, 3) array(['aaa', 'bbb', 'ccc'], dtype=' sys.maxsize / np.maximum(i, 1)): - raise MemoryError("repeated string is too long") + raise OverflowError("Overflow encountered in string multiply") buffersizes = a_len * i out_dtype = f"{a.dtype.char}{buffersizes.max()}" @@ -178,6 +210,12 @@ def multiply(a, i): return _multiply_ufunc(a, i, out=out) +def _mod_dispatcher(a, values): + return (a, values) + + +@set_module("numpy.strings") +@array_function_dispatch(_mod_dispatcher) def mod(a, values): """ Return (a % i), that is pre-Python 2.6 string formatting @@ -186,7 +224,7 @@ def mod(a, values): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array_like, with ``bytes_`` or ``str_`` dtype values : array_like of values These values will be element-wise interpolated into the string. @@ -196,12 +234,25 @@ def mod(a, values): out : ndarray Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, depending on input types - + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["NumPy is a %s library"]) + >>> np.strings.mod(a, values=["Python"]) + array(['NumPy is a Python library'], dtype='>> a = np.array([b'%d bytes', b'%d bits']) + >>> values = np.array([8, 64]) + >>> np.strings.mod(a, values) + array([b'8 bytes', b'64 bits'], dtype='|S7') + """ return _to_bytes_or_str_array( _vec_string(a, np.object_, '__mod__', (values,)), a) +@set_module("numpy.strings") def find(a, sub, start=0, end=None): """ For each element, return the lowest index in the string where @@ -212,7 +263,7 @@ def find(a, sub, start=0, end=None): ---------- a : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype - sub : array_like, with `np.bytes_` or `np.str_` dtype + sub : array_like, with ``bytes_`` or ``str_`` dtype The substring to search for. start, end : array_like, with any integer dtype @@ -229,6 +280,7 @@ def find(a, sub, start=0, end=None): Examples -------- + >>> import numpy as np >>> a = np.array(["NumPy is a Python library"]) >>> np.strings.find(a, "Python") array([11]) @@ -238,6 +290,7 @@ def find(a, sub, start=0, end=None): return _find_ufunc(a, sub, start, end) +@set_module("numpy.strings") def rfind(a, sub, start=0, end=None): """ For each element, return the highest index in the string where @@ -263,11 +316,24 @@ def rfind(a, sub, start=0, end=None): -------- str.rfind + Examples + -------- + >>> import numpy as np + >>> a = np.array(["Computer Science"]) + >>> np.strings.rfind(a, "Science", start=0, end=None) + array([9]) + >>> np.strings.rfind(a, "Science", start=0, end=8) + array([-1]) + >>> b = np.array(["Computer Science", "Science"]) + >>> np.strings.rfind(b, "Science", start=0, end=None) + array([9, 0]) + """ end = end if end is not None else MAX return _rfind_ufunc(a, sub, start, end) +@set_module("numpy.strings") def index(a, sub, start=0, end=None): """ Like `find`, but raises :exc:`ValueError` when the substring is not found. @@ -291,6 +357,7 @@ def index(a, sub, start=0, end=None): Examples -------- + >>> import numpy as np >>> a = np.array(["Computer Science"]) >>> np.strings.index(a, "Science", start=0, end=None) array([9]) @@ -300,6 +367,7 @@ def index(a, sub, start=0, end=None): return _index_ufunc(a, sub, start, end) +@set_module("numpy.strings") def rindex(a, sub, start=0, end=None): """ Like `rfind`, but raises :exc:`ValueError` when the substring `sub` is @@ -307,9 +375,9 @@ def rindex(a, sub, start=0, end=None): Parameters ---------- - a : array-like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``bytes_`` or ``str_`` dtype - sub : array-like, with `np.bytes_` or `np.str_` dtype + sub : array-like, with ``bytes_`` or ``str_`` dtype start, end : array-like, with any integer dtype, optional @@ -327,12 +395,13 @@ def rindex(a, sub, start=0, end=None): >>> a = np.array(["Computer Science"]) >>> np.strings.rindex(a, "Science", start=0, end=None) array([9]) - + """ end = end if end is not None else MAX return _rindex_ufunc(a, sub, start, end) +@set_module("numpy.strings") def count(a, sub, start=0, end=None): """ Returns an array with the number of non-overlapping occurrences of @@ -359,6 +428,7 @@ def count(a, sub, start=0, end=None): Examples -------- + >>> import numpy as np >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> c array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> import numpy as np + >>> s = np.array(['foo', 'bar']) + >>> s + array(['foo', 'bar'], dtype='>> np.strings.startswith(s, 'fo') + array([True, False]) + >>> np.strings.startswith(s, 'o', start=1, end=2) + array([True, False]) + """ end = end if end is not None else MAX return _startswith_ufunc(a, prefix, start, end) +@set_module("numpy.strings") def endswith(a, suffix, start=0, end=None): """ Returns a boolean array which is `True` where the string element @@ -431,6 +514,7 @@ def endswith(a, suffix, start=0, end=None): Examples -------- + >>> import numpy as np >>> s = np.array(['foo', 'bar']) >>> s array(['foo', 'bar'], dtype='>> import numpy as np >>> c = np.array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@', ... b'\x81\x82\xc2\xc1\xc2\x82\x81']) >>> c @@ -490,6 +581,8 @@ def decode(a, encoding=None, errors=None): np.str_('')) +@set_module("numpy.strings") +@array_function_dispatch(_code_dispatcher) def encode(a, encoding=None, errors=None): """ Calls :meth:`str.encode` element-wise. @@ -522,17 +615,24 @@ def encode(a, encoding=None, errors=None): Examples -------- + >>> import numpy as np >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.encode(a, encoding='cp037') array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@', b'\x81\x82\xc2\xc1\xc2\x82\x81'], dtype='|S7') - + """ return _to_bytes_or_str_array( _vec_string(a, np.object_, 'encode', _clean_args(encoding, errors)), np.bytes_(b'')) +def _expandtabs_dispatcher(a, tabsize=None): + return (a,) + + +@set_module("numpy.strings") +@array_function_dispatch(_expandtabs_dispatcher) def expandtabs(a, tabsize=8): """ Return a copy of each string element where all tab characters are @@ -566,6 +666,7 @@ def expandtabs(a, tabsize=8): Examples -------- + >>> import numpy as np >>> a = np.array(['\t\tHello\tworld']) >>> np.strings.expandtabs(a, tabsize=4) # doctest: +SKIP array([' Hello world'], dtype='>> import numpy as np >>> c = np.array(['a1b2','1b2a','b2a1','2a1b']); c array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='>> np.strings.center(c, width=9) @@ -625,23 +733,32 @@ def center(a, width, fillchar=' '): array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='>> import numpy as np >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.ljust(c, width=3) array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> import numpy as np >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.rjust(a, width=3) array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> import numpy as np >>> np.strings.zfill(['1', '-1', '+1'], 3) array(['001', '-01', '+01'], dtype='>> import numpy as np >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> c array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> import numpy as np >>> c = np.array(['aAaAaA', 'abBABba']) >>> c array(['aAaAaA', 'abBABba'], dtype='>> import numpy as np >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> c array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> import numpy as np >>> c = np.array(['a1b c', '1bca', 'bca1']); c array(['a1b c', '1bca', 'bca1'], dtype='>> np.strings.upper(c) @@ -962,6 +1119,8 @@ def upper(a): return _vec_string(a_arr, a_arr.dtype, 'upper') +@set_module("numpy.strings") +@array_function_dispatch(_unary_op_dispatcher) def lower(a): """ Return an array with the elements converted to lowercase. @@ -987,6 +1146,7 @@ def lower(a): Examples -------- + >>> import numpy as np >>> c = np.array(['A1B C', '1BCA', 'BCA1']); c array(['A1B C', '1BCA', 'BCA1'], dtype='>> np.strings.lower(c) @@ -997,6 +1157,8 @@ def lower(a): return _vec_string(a_arr, a_arr.dtype, 'lower') +@set_module("numpy.strings") +@array_function_dispatch(_unary_op_dispatcher) def swapcase(a): """ Return element-wise a copy of the string with @@ -1023,6 +1185,7 @@ def swapcase(a): Examples -------- + >>> import numpy as np >>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'], dtype='|S5') @@ -1035,6 +1198,8 @@ def swapcase(a): return _vec_string(a_arr, a_arr.dtype, 'swapcase') +@set_module("numpy.strings") +@array_function_dispatch(_unary_op_dispatcher) def capitalize(a): """ Return a copy of ``a`` with only the first character of each element @@ -1061,6 +1226,7 @@ def capitalize(a): Examples -------- + >>> import numpy as np >>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='|S4') @@ -1073,6 +1239,8 @@ def capitalize(a): return _vec_string(a_arr, a_arr.dtype, 'capitalize') +@set_module("numpy.strings") +@array_function_dispatch(_unary_op_dispatcher) def title(a): """ Return element-wise title cased version of string or unicode. @@ -1101,6 +1269,7 @@ def title(a): Examples -------- + >>> import numpy as np >>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c array(['a1b c', '1b ca', 'b ca1', 'ca1b'], dtype='|S5') @@ -1113,6 +1282,12 @@ def title(a): return _vec_string(a_arr, a_arr.dtype, 'title') +def _replace_dispatcher(a, old, new, count=None): + return (a,) + + +@set_module("numpy.strings") +@array_function_dispatch(_replace_dispatcher) def replace(a, old, new, count=-1): """ For each element in ``a``, return a copy of the string with @@ -1137,9 +1312,10 @@ def replace(a, old, new, count=-1): See Also -------- str.replace - + Examples -------- + >>> import numpy as np >>> a = np.array(["That is a mango", "Monkeys eat mangos"]) >>> np.strings.replace(a, 'mango', 'banana') array(['That is a banana', 'Monkeys eat bananas'], dtype='>> a = np.array(["The dish is fresh", "This is it"]) >>> np.strings.replace(a, 'is', 'was') array(['The dwash was fresh', 'Thwas was it'], dtype='>> import numpy as np >>> np.strings.join('-', 'osd') # doctest: +SKIP array('o-s-d', dtype='>> import numpy as np >>> x = np.array("Numpy is nice!") >>> np.strings.split(x, " ") # doctest: +SKIP array(list(['Numpy', 'is', 'nice!']), dtype=object) # doctest: +SKIP @@ -1246,6 +1441,7 @@ def _split(a, sep=None, maxsplit=None): a, np.object_, 'split', [sep] + _clean_args(maxsplit)) +@array_function_dispatch(_split_dispatcher) def _rsplit(a, sep=None, maxsplit=None): """ For each element in `a`, return a list of the words in the @@ -1278,11 +1474,12 @@ def _rsplit(a, sep=None, maxsplit=None): Examples -------- + >>> import numpy as np >>> a = np.array(['aAaAaA', 'abBABba']) >>> np.strings.rsplit(a, 'A') # doctest: +SKIP array([list(['a', 'a', 'a', '']), # doctest: +SKIP list(['abB', 'Bba'])], dtype=object) # doctest: +SKIP - + """ # This will return an array of lists of different sizes, so we # leave it as an object array @@ -1290,6 +1487,11 @@ def _rsplit(a, sep=None, maxsplit=None): a, np.object_, 'rsplit', [sep] + _clean_args(maxsplit)) +def _splitlines_dispatcher(a, keepends=None): + return (a,) + + +@array_function_dispatch(_splitlines_dispatcher) def _splitlines(a, keepends=None): """ For each element in `a`, return a list of the lines in the @@ -1314,11 +1516,25 @@ def _splitlines(a, keepends=None): -------- str.splitlines + Examples + -------- + >>> np.char.splitlines("first line\\nsecond line") + array(list(['first line', 'second line']), dtype=object) + >>> a = np.array(["first\\nsecond", "third\\nfourth"]) + >>> np.char.splitlines(a) + array([list(['first', 'second']), list(['third', 'fourth'])], dtype=object) + """ return _vec_string( a, np.object_, 'splitlines', _clean_args(keepends)) +def _partition_dispatcher(a, sep): + return (a,) + + +@set_module("numpy.strings") +@array_function_dispatch(_partition_dispatcher) def partition(a, sep): """ Partition each element in ``a`` around ``sep``. @@ -1353,6 +1569,7 @@ def partition(a, sep): Examples -------- + >>> import numpy as np >>> x = np.array(["Numpy is nice!"]) >>> np.strings.partition(x, " ") (array(['Numpy'], dtype='>> import numpy as np >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.rpartition(a, 'A') (array(['aAaAa', ' a', 'abB'], dtype='>> import numpy as np >>> a = np.array(['a1b c', '1bca', 'bca1']) >>> table = a[0].maketrans('abc', '123') >>> deletechars = ' ' >>> np.char.translate(a, table, deletechars) array(['112 3', '1231', '2311'], dtype='>> import numpy as np + >>> a = np.array(['hello', 'world']) + >>> np.strings.slice(a, 2) + array(['he', 'wo'], dtype='>> np.strings.slice(a, 2, None) + array(['llo', 'rld'], dtype='>> np.strings.slice(a, 1, 5, 2) + array(['el', 'ol'], dtype='>> np.strings.slice(a, np.array([1, 2]), np.array([4, 5])) + array(['ell', 'rld'], dtype='>> b = np.array(['hello world', 'ÎŗÎĩΚι ΃ÎŋĪ… ÎēΌ΃ÎŧÎĩ', 'äŊ åĨŊä¸–į•Œ', '👋 🌍'], + ... dtype=np.dtypes.StringDType()) + >>> np.strings.slice(b, -2) + array(['hello wor', 'ÎŗÎĩΚι ΃ÎŋĪ… ÎēΌ΃', 'äŊ åĨŊ', '👋'], dtype=StringDType()) + + >>> np.strings.slice(b, -2, None) + array(['ld', 'ÎŧÎĩ', 'ä¸–į•Œ', ' 🌍'], dtype=StringDType()) + + >>> np.strings.slice(b, [3, -10, 2, -3], [-1, -2, -1, 3]) + array(['lo worl', ' ΃ÎŋĪ… ÎēΌ΃', '世', '👋 🌍'], dtype=StringDType()) + + >>> np.strings.slice(b, None, None, -1) + array(['dlrow olleh', 'ÎĩÎŧ΃ΌÎē Ī…Îŋ΃ ιΚÎĩÎŗ', 'į•Œä¸–åĨŊäŊ ', '🌍 👋'], + dtype=StringDType()) + + """ + # Just like in the construction of a regular slice object, if only start + # is specified then start will become stop, see logic in slice_new. + if stop is np._NoValue: + stop = start + start = None + + # adjust start, stop, step to be integers, see logic in PySlice_Unpack + if step is None: + step = 1 + step = np.asanyarray(step) + if not np.issubdtype(step.dtype, np.integer): + raise TypeError(f"unsupported type {step.dtype} for operand 'step'") + if np.any(step == 0): + raise ValueError("slice step cannot be zero") + + if start is None: + start = np.where(step < 0, np.iinfo(np.intp).max, 0) + + if stop is None: + stop = np.where(step < 0, np.iinfo(np.intp).min, np.iinfo(np.intp).max) + + return _slice(a, start, stop, step) diff --git a/numpy/_core/strings.pyi b/numpy/_core/strings.pyi index 5e335c6f7d4a..475da159f783 100644 --- a/numpy/_core/strings.pyi +++ b/numpy/_core/strings.pyi @@ -1,331 +1,536 @@ -from typing import Any, overload +from typing import overload import numpy as np +from numpy._globals import _NoValueType from numpy._typing import ( NDArray, - _ArrayLikeStr_co as U_co, + _AnyShape, + _ArrayLikeAnyString_co as UST_co, _ArrayLikeBytes_co as S_co, _ArrayLikeInt_co as i_co, - _ArrayLikeBool_co as b_co, + _ArrayLikeStr_co as U_co, + _ArrayLikeString_co as T_co, + _SupportsArray, ) +__all__ = [ + "add", + "capitalize", + "center", + "count", + "decode", + "encode", + "endswith", + "equal", + "expandtabs", + "find", + "greater", + "greater_equal", + "index", + "isalnum", + "isalpha", + "isdecimal", + "isdigit", + "islower", + "isnumeric", + "isspace", + "istitle", + "isupper", + "less", + "less_equal", + "ljust", + "lower", + "lstrip", + "mod", + "multiply", + "not_equal", + "partition", + "replace", + "rfind", + "rindex", + "rjust", + "rpartition", + "rstrip", + "startswith", + "str_len", + "strip", + "swapcase", + "title", + "translate", + "upper", + "zfill", + "slice", +] + +type _StringDTypeArray = np.ndarray[_AnyShape, np.dtypes.StringDType] +type _StringDTypeSupportsArray = _SupportsArray[np.dtypes.StringDType] +type _StringDTypeOrUnicodeArray = NDArray[np.str_] | _StringDTypeArray + @overload def equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @overload def equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... @overload def not_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @overload def not_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def not_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... @overload def greater_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @overload def greater_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def greater_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... @overload def less_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @overload def less_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def less_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... @overload def greater(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @overload def greater(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def greater(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... @overload def less(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @overload def less(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def less(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... @overload def add(x1: U_co, x2: U_co) -> NDArray[np.str_]: ... @overload def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ... +@overload +def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def add(x1: T_co, x2: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ... @overload def multiply(a: S_co, i: i_co) -> NDArray[np.bytes_]: ... +@overload +def multiply(a: _StringDTypeSupportsArray, i: i_co) -> _StringDTypeArray: ... +@overload +def multiply(a: T_co, i: i_co) -> _StringDTypeOrUnicodeArray: ... @overload -def mod(a: U_co, value: Any) -> NDArray[np.str_]: ... +def mod(a: U_co, value: object) -> NDArray[np.str_]: ... @overload -def mod(a: S_co, value: Any) -> NDArray[np.bytes_]: ... +def mod(a: S_co, value: object) -> NDArray[np.bytes_]: ... +@overload +def mod(a: _StringDTypeSupportsArray, value: object) -> _StringDTypeArray: ... +@overload +def mod(a: T_co, value: object) -> _StringDTypeOrUnicodeArray: ... -def isalpha(x: U_co | S_co) -> NDArray[np.bool]: ... -def isalnum(a: U_co | S_co) -> NDArray[np.bool]: ... -def isdigit(x: U_co | S_co) -> NDArray[np.bool]: ... -def isspace(x: U_co | S_co) -> NDArray[np.bool]: ... -def isdecimal(x: U_co) -> NDArray[np.bool]: ... -def isnumeric(x: U_co) -> NDArray[np.bool]: ... -def islower(a: U_co | S_co) -> NDArray[np.bool]: ... -def istitle(a: U_co | S_co) -> NDArray[np.bool]: ... -def isupper(a: U_co | S_co) -> NDArray[np.bool]: ... +def isalpha(x: UST_co) -> NDArray[np.bool]: ... +def isalnum(a: UST_co) -> NDArray[np.bool]: ... +def isdigit(x: UST_co) -> NDArray[np.bool]: ... +def isspace(x: UST_co) -> NDArray[np.bool]: ... +def isdecimal(x: U_co | T_co) -> NDArray[np.bool]: ... +def isnumeric(x: U_co | T_co) -> NDArray[np.bool]: ... +def islower(a: UST_co) -> NDArray[np.bool]: ... +def istitle(a: UST_co) -> NDArray[np.bool]: ... +def isupper(a: UST_co) -> NDArray[np.bool]: ... -def str_len(x: U_co | S_co) -> NDArray[np.int_]: ... +def str_len(x: UST_co) -> NDArray[np.int_]: ... @overload def find( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def find( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.int_]: ... +@overload +def find( + a: T_co, + sub: T_co, + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rfind( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rfind( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.int_]: ... +@overload +def rfind( + a: T_co, + sub: T_co, + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def index( a: U_co, sub: U_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def index( a: S_co, sub: S_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.int_]: ... +@overload +def index( + a: T_co, + sub: T_co, + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rindex( a: U_co, sub: U_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rindex( a: S_co, sub: S_co, - start: i_co = ..., - end: None | i_co = ..., + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.int_]: ... +@overload +def rindex( + a: T_co, + sub: T_co, + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def count( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def count( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.int_]: ... +@overload +def count( + a: T_co, + sub: T_co, + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def startswith( a: U_co, prefix: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def startswith( a: S_co, prefix: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.bool]: ... +@overload +def startswith( + a: T_co, + prefix: T_co, + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( a: U_co, suffix: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( a: S_co, suffix: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.bool]: ... +@overload +def endswith( + a: T_co, + suffix: T_co, + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... def decode( a: S_co, - encoding: None | str = ..., - errors: None | str = ..., + encoding: str | None = None, + errors: str | None = None, ) -> NDArray[np.str_]: ... - def encode( - a: U_co, - encoding: None | str = ..., - errors: None | str = ..., + a: U_co | T_co, + encoding: str | None = None, + errors: str | None = None, ) -> NDArray[np.bytes_]: ... @overload -def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[np.str_]: ... +def expandtabs(a: U_co, tabsize: i_co = 8) -> NDArray[np.str_]: ... +@overload +def expandtabs(a: S_co, tabsize: i_co = 8) -> NDArray[np.bytes_]: ... +@overload +def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = 8) -> _StringDTypeArray: ... @overload -def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[np.bytes_]: ... +def expandtabs(a: T_co, tabsize: i_co = 8) -> _StringDTypeOrUnicodeArray: ... @overload -def center(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[np.str_]: ... +def center(a: U_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.str_]: ... @overload -def center(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[np.bytes_]: ... +def center(a: S_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.bytes_]: ... +@overload +def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: UST_co = " ") -> _StringDTypeArray: ... +@overload +def center(a: T_co, width: i_co, fillchar: UST_co = " ") -> _StringDTypeOrUnicodeArray: ... @overload -def ljust(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[np.str_]: ... +def ljust(a: U_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.str_]: ... +@overload +def ljust(a: S_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.bytes_]: ... @overload -def ljust(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[np.bytes_]: ... +def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: UST_co = " ") -> _StringDTypeArray: ... +@overload +def ljust(a: T_co, width: i_co, fillchar: UST_co = " ") -> _StringDTypeOrUnicodeArray: ... @overload -def rjust( - a: U_co, - width: i_co, - fillchar: U_co = ..., -) -> NDArray[np.str_]: ... +def rjust(a: U_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.str_]: ... @overload -def rjust( - a: S_co, - width: i_co, - fillchar: S_co = ..., -) -> NDArray[np.bytes_]: ... +def rjust(a: S_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.bytes_]: ... +@overload +def rjust(a: _StringDTypeSupportsArray, width: i_co, fillchar: UST_co = " ") -> _StringDTypeArray: ... +@overload +def rjust(a: T_co, width: i_co, fillchar: UST_co = " ") -> _StringDTypeOrUnicodeArray: ... @overload -def lstrip(a: U_co, chars: None | U_co = ...) -> NDArray[np.str_]: ... +def lstrip(a: U_co, chars: U_co | None = None) -> NDArray[np.str_]: ... +@overload +def lstrip(a: S_co, chars: S_co | None = None) -> NDArray[np.bytes_]: ... @overload -def lstrip(a: S_co, chars: None | S_co = ...) -> NDArray[np.bytes_]: ... +def lstrip(a: _StringDTypeSupportsArray, chars: T_co | None = None) -> _StringDTypeArray: ... +@overload +def lstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload -def rstrip(a: U_co, char: None | U_co = ...) -> NDArray[np.str_]: ... +def rstrip(a: U_co, chars: U_co | None = None) -> NDArray[np.str_]: ... +@overload +def rstrip(a: S_co, chars: S_co | None = None) -> NDArray[np.bytes_]: ... @overload -def rstrip(a: S_co, char: None | S_co = ...) -> NDArray[np.bytes_]: ... +def rstrip(a: _StringDTypeSupportsArray, chars: T_co | None = None) -> _StringDTypeArray: ... +@overload +def rstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload -def strip(a: U_co, chars: None | U_co = ...) -> NDArray[np.str_]: ... +def strip(a: U_co, chars: U_co | None = None) -> NDArray[np.str_]: ... +@overload +def strip(a: S_co, chars: S_co | None = None) -> NDArray[np.bytes_]: ... +@overload +def strip(a: _StringDTypeSupportsArray, chars: T_co | None = None) -> _StringDTypeArray: ... @overload -def strip(a: S_co, chars: None | S_co = ...) -> NDArray[np.bytes_]: ... +def strip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload def zfill(a: U_co, width: i_co) -> NDArray[np.str_]: ... @overload def zfill(a: S_co, width: i_co) -> NDArray[np.bytes_]: ... +@overload +def zfill(a: _StringDTypeSupportsArray, width: i_co) -> _StringDTypeArray: ... +@overload +def zfill(a: T_co, width: i_co) -> _StringDTypeOrUnicodeArray: ... @overload def upper(a: U_co) -> NDArray[np.str_]: ... @overload def upper(a: S_co) -> NDArray[np.bytes_]: ... +@overload +def upper(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def upper(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def lower(a: U_co) -> NDArray[np.str_]: ... @overload def lower(a: S_co) -> NDArray[np.bytes_]: ... +@overload +def lower(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def lower(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def swapcase(a: U_co) -> NDArray[np.str_]: ... @overload def swapcase(a: S_co) -> NDArray[np.bytes_]: ... +@overload +def swapcase(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def swapcase(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def capitalize(a: U_co) -> NDArray[np.str_]: ... @overload def capitalize(a: S_co) -> NDArray[np.bytes_]: ... +@overload +def capitalize(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def capitalize(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def title(a: U_co) -> NDArray[np.str_]: ... @overload def title(a: S_co) -> NDArray[np.bytes_]: ... +@overload +def title(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def title(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def replace( a: U_co, old: U_co, new: U_co, - count: i_co = ..., + count: i_co = -1, ) -> NDArray[np.str_]: ... @overload def replace( a: S_co, old: S_co, new: S_co, - count: i_co = ..., + count: i_co = -1, ) -> NDArray[np.bytes_]: ... - @overload -def join(sep: U_co, seq: U_co) -> NDArray[np.str_]: ... +def replace( + a: _StringDTypeSupportsArray, + old: _StringDTypeSupportsArray, + new: _StringDTypeSupportsArray, + count: i_co = -1, +) -> _StringDTypeArray: ... @overload -def join(sep: S_co, seq: S_co) -> NDArray[np.bytes_]: ... +def replace( + a: T_co, + old: T_co, + new: T_co, + count: i_co = -1, +) -> _StringDTypeOrUnicodeArray: ... @overload -def split( - a: U_co, - sep: None | U_co = ..., - maxsplit: None | i_co = ..., -) -> NDArray[np.object_]: ... +def partition(a: U_co, sep: U_co) -> NDArray[np.str_]: ... @overload -def split( - a: S_co, - sep: None | S_co = ..., - maxsplit: None | i_co = ..., -) -> NDArray[np.object_]: ... - +def partition(a: S_co, sep: S_co) -> NDArray[np.bytes_]: ... @overload -def rsplit( - a: U_co, - sep: None | U_co = ..., - maxsplit: None | i_co = ..., -) -> NDArray[np.object_]: ... +def partition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ... @overload -def rsplit( - a: S_co, - sep: None | S_co = ..., - maxsplit: None | i_co = ..., -) -> NDArray[np.object_]: ... +def partition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def splitlines(a: U_co, keepends: None | b_co = ...) -> NDArray[np.object_]: ... +def rpartition(a: U_co, sep: U_co) -> NDArray[np.str_]: ... @overload -def splitlines(a: S_co, keepends: None | b_co = ...) -> NDArray[np.object_]: ... - +def rpartition(a: S_co, sep: S_co) -> NDArray[np.bytes_]: ... @overload -def partition(a: U_co, sep: U_co) -> NDArray[np.str_]: ... +def rpartition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ... @overload -def partition(a: S_co, sep: S_co) -> NDArray[np.bytes_]: ... +def rpartition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def rpartition(a: U_co, sep: U_co) -> NDArray[np.str_]: ... +def translate( + a: U_co, + table: str, + deletechars: str | None = None, +) -> NDArray[np.str_]: ... @overload -def rpartition(a: S_co, sep: S_co) -> NDArray[np.bytes_]: ... - +def translate( + a: S_co, + table: str, + deletechars: str | None = None, +) -> NDArray[np.bytes_]: ... @overload def translate( + a: _StringDTypeSupportsArray, + table: str, + deletechars: str | None = None, +) -> _StringDTypeArray: ... +@overload +def translate( + a: T_co, + table: str, + deletechars: str | None = None, +) -> _StringDTypeOrUnicodeArray: ... + +# +@overload +def slice( a: U_co, - table: U_co, - deletechars: None | U_co = ..., + start: i_co | None = None, + stop: i_co | _NoValueType | None = ..., # = np._NoValue + step: i_co | None = None, + /, ) -> NDArray[np.str_]: ... @overload -def translate( +def slice( a: S_co, - table: S_co, - deletechars: None | S_co = ..., + start: i_co | None = None, + stop: i_co | _NoValueType | None = ..., # = np._NoValue + step: i_co | None = None, + /, ) -> NDArray[np.bytes_]: ... +@overload +def slice( + a: _StringDTypeSupportsArray, + start: i_co | None = None, + stop: i_co | _NoValueType | None = ..., # = np._NoValue + step: i_co | None = None, + /, +) -> _StringDTypeArray: ... +@overload +def slice( + a: T_co, + start: i_co | None = None, + stop: i_co | _NoValueType | None = ..., # = np._NoValue + step: i_co | None = None, + /, +) -> _StringDTypeOrUnicodeArray: ... diff --git a/numpy/_core/tests/_locales.py b/numpy/_core/tests/_locales.py index b1dc55a9b2dc..debda9639c03 100644 --- a/numpy/_core/tests/_locales.py +++ b/numpy/_core/tests/_locales.py @@ -1,8 +1,8 @@ """Provide class for testing in French locale """ -import sys import locale +import sys import pytest @@ -52,8 +52,6 @@ class CommaDecimalPointLocale: to the initial locale. It also serves as context manager with the same effect. If no such locale is available, the test is skipped. - .. versionadded:: 1.15.0 - """ (cur_locale, tst_locale) = find_comma_decimal_point_locale() diff --git a/numpy/_core/tests/_natype.py b/numpy/_core/tests/_natype.py index e529e548cf1e..539dfd2b36e1 100644 --- a/numpy/_core/tests/_natype.py +++ b/numpy/_core/tests/_natype.py @@ -8,16 +8,15 @@ import numpy as np + def _create_binary_propagating_op(name, is_divmod=False): is_cmp = name.strip("_") in ["eq", "ne", "le", "lt", "ge", "gt"] def method(self, other): if ( other is pd_NA - or isinstance(other, (str, bytes)) - or isinstance(other, (numbers.Number, np.bool)) - or isinstance(other, np.ndarray) - and not other.shape + or isinstance(other, (str, bytes, numbers.Number, np.bool)) + or (isinstance(other, np.ndarray) and not other.shape) ): # Need the other.shape clause to handle NumPy scalars, # since we do a setitem on `out` below, which @@ -75,8 +74,7 @@ def __bool__(self): raise TypeError("boolean value of NA is ambiguous") def __hash__(self): - exponent = 31 if is_32bit else 61 - return 2**exponent - 1 + return 2**61 - 1 def __reduce__(self): return "pd_NA" @@ -115,33 +113,6 @@ def __reduce__(self): __abs__ = _create_unary_propagating_op("__abs__") __invert__ = _create_unary_propagating_op("__invert__") - # pow has special - def __pow__(self, other): - if other is pd_NA: - return pd_NA - elif isinstance(other, (numbers.Number, np.bool)): - if other == 0: - # returning positive is correct for +/- 0. - return type(other)(1) - else: - return pd_NA - elif util.is_array(other): - return np.where(other == 0, other.dtype.type(1), pd_NA) - - return NotImplemented - - def __rpow__(self, other): - if other is pd_NA: - return pd_NA - elif isinstance(other, (numbers.Number, np.bool)): - if other == 1: - return other - else: - return pd_NA - elif util.is_array(other): - return np.where(other == 1, other, pd_NA) - return NotImplemented - # Logical ops using Kleene logic def __and__(self, other): @@ -169,30 +140,5 @@ def __xor__(self, other): __rxor__ = __xor__ - __array_priority__ = 1000 - _HANDLED_TYPES = (np.ndarray, numbers.Number, str, np.bool) - - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - types = self._HANDLED_TYPES + (NAType,) - for x in inputs: - if not isinstance(x, types): - return NotImplemented - - if method != "__call__": - raise ValueError(f"ufunc method '{method}' not supported for NA") - result = maybe_dispatch_ufunc_to_dunder_op( - self, ufunc, method, *inputs, **kwargs - ) - if result is NotImplemented: - # For a NumPy ufunc that's not a binop, like np.logaddexp - index = [i for i, x in enumerate(inputs) if x is pd_NA][0] - result = np.broadcast_arrays(*inputs)[index] - if result.ndim == 0: - result = result.item() - if ufunc.nout > 1: - result = (pd_NA,) * ufunc.nout - - return result - pd_NA = NAType() diff --git a/numpy/_core/tests/examples/cython/checks.pyx b/numpy/_core/tests/examples/cython/checks.pyx index b51ab128053f..6dcce1c2606d 100644 --- a/numpy/_core/tests/examples/cython/checks.pyx +++ b/numpy/_core/tests/examples/cython/checks.pyx @@ -129,6 +129,10 @@ def get_default_integer(): return cnp.dtype("intp") return None +def get_ravel_axis(): + return cnp.NPY_RAVEL_AXIS + + def conv_intp(cnp.intp_t val): return val @@ -138,7 +142,7 @@ def get_dtype_flags(cnp.dtype dtype): cdef cnp.NpyIter* npyiter_from_nditer_obj(object it): - """A function to create a NpyIter struct from a nditer object. + """A function to create a NpyIter struct from an nditer object. This function is only meant for testing purposes and only extracts the necessary info from nditer to test the functionality of NpyIter methods @@ -238,6 +242,16 @@ def npyiter_has_multi_index(it: "nditer"): return result +def test_get_multi_index_iter_next(it: "nditer", cnp.ndarray[cnp.float64_t, ndim=2] arr): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + cdef cnp.NpyIter_GetMultiIndexFunc _get_multi_index = \ + cnp.NpyIter_GetGetMultiIndex(cit, NULL) + cdef cnp.NpyIter_IterNextFunc _iternext = \ + cnp.NpyIter_GetIterNext(cit, NULL) + cnp.NpyIter_Deallocate(cit) + return 1 + + def npyiter_has_finished(it: "nditer"): cdef cnp.NpyIter* cit try: @@ -262,3 +276,99 @@ def inc2_cfloat_struct(cnp.ndarray[cnp.cfloat_t] arr): # This works in both modes arr[1].real = arr[1].real + 1 arr[1].imag = arr[1].imag + 1 + + +def npystring_pack(arr): + cdef char *string = "Hello world" + cdef size_t size = 11 + + allocator = cnp.NpyString_acquire_allocator( + cnp.PyArray_DESCR(arr) + ) + + # copy string->packed_string, the pointer to the underlying array buffer + ret = cnp.NpyString_pack( + allocator, cnp.PyArray_DATA(arr), string, size, + ) + + cnp.NpyString_release_allocator(allocator) + return ret + + +def npystring_load(arr): + allocator = cnp.NpyString_acquire_allocator( + cnp.PyArray_DESCR(arr) + ) + + cdef cnp.npy_static_string sdata + sdata.size = 0 + sdata.buf = NULL + + cdef cnp.npy_packed_static_string *packed_string = cnp.PyArray_DATA(arr) + cdef int is_null = cnp.NpyString_load(allocator, packed_string, &sdata) + cnp.NpyString_release_allocator(allocator) + if is_null == -1: + raise ValueError("String unpacking failed.") + elif is_null == 1: + # String in the array buffer is the null string + return "" + else: + # Cython syntax for copying a c string to python bytestring: + # slice the char * by the length of the string + return sdata.buf[:sdata.size].decode('utf-8') + + +def npystring_pack_multiple(arr1, arr2): + cdef cnp.npy_string_allocator *allocators[2] + cdef cnp.PyArray_Descr *descrs[2] + descrs[0] = cnp.PyArray_DESCR(arr1) + descrs[1] = cnp.PyArray_DESCR(arr2) + + cnp.NpyString_acquire_allocators(2, descrs, allocators) + + # Write into the first element of each array + cdef int ret1 = cnp.NpyString_pack( + allocators[0], cnp.PyArray_DATA(arr1), "Hello world", 11, + ) + cdef int ret2 = cnp.NpyString_pack( + allocators[1], cnp.PyArray_DATA(arr2), "test this", 9, + ) + + # Write a null string into the last element + cdef cnp.npy_intp elsize = cnp.PyArray_ITEMSIZE(arr1) + cdef int ret3 = cnp.NpyString_pack_null( + allocators[0], + (cnp.PyArray_DATA(arr1) + 2*elsize), + ) + + cnp.NpyString_release_allocators(2, allocators) + if ret1 == -1 or ret2 == -1 or ret3 == -1: + return -1 + + return 0 + + +def npystring_allocators_other_types(arr1, arr2): + cdef cnp.npy_string_allocator *allocators[2] + cdef cnp.PyArray_Descr *descrs[2] + descrs[0] = cnp.PyArray_DESCR(arr1) + descrs[1] = cnp.PyArray_DESCR(arr2) + + cnp.NpyString_acquire_allocators(2, descrs, allocators) + + # None of the dtypes here are StringDType, so every allocator + # should be NULL upon acquisition. + cdef int ret = 0 + for allocator in allocators: + if allocator != NULL: + ret = -1 + break + + cnp.NpyString_release_allocators(2, allocators) + return ret + + +def check_npy_uintp_type_enum(): + # Regression test for gh-27890: cnp.NPY_UINTP was not defined. + # Cython would fail to compile this before gh-27890 was fixed. + return cnp.NPY_UINTP > 0 diff --git a/numpy/_core/tests/examples/cython/meson.build b/numpy/_core/tests/examples/cython/meson.build index c0ee5f89168c..8362c339ae73 100644 --- a/numpy/_core/tests/examples/cython/meson.build +++ b/numpy/_core/tests/examples/cython/meson.build @@ -10,6 +10,11 @@ if not cy.version().version_compare('>=3.0.6') error('tests requires Cython >= 3.0.6') endif +cython_args = [] +if cy.version().version_compare('>=3.1.0') + cython_args += ['-Xfreethreading_compatible=True'] +endif + npy_include_path = run_command(py, [ '-c', 'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include()))' @@ -34,4 +39,5 @@ py.extension_module( '-DNPY_TARGET_VERSION=NPY_2_0_API_VERSION', ], include_directories: [npy_include_path], + cython_args: cython_args, ) diff --git a/numpy/_core/tests/examples/cython/setup.py b/numpy/_core/tests/examples/cython/setup.py index 97b7b4317ffa..ba0639ebcf1c 100644 --- a/numpy/_core/tests/examples/cython/setup.py +++ b/numpy/_core/tests/examples/cython/setup.py @@ -3,11 +3,15 @@ for testing. """ -import numpy as np -from distutils.core import setup +import os + +import Cython from Cython.Build import cythonize +from distutils.core import setup from setuptools.extension import Extension -import os + +import numpy as np +from numpy._utils import _pep440 macros = [ ("NPY_NO_DEPRECATED_API", 0), @@ -24,6 +28,12 @@ extensions = [checks] +compiler_directives = {} +if _pep440.parse(Cython.__version__) >= _pep440.parse("3.1.0a0"): + compiler_directives['freethreading_compatible'] = True + setup( - ext_modules=cythonize(extensions) + ext_modules=cythonize( + extensions, + compiler_directives=compiler_directives) ) diff --git a/numpy/_core/tests/examples/limited_api/limited_api1.c b/numpy/_core/tests/examples/limited_api/limited_api1.c index 3dbf5698f1d4..115a3f3a6835 100644 --- a/numpy/_core/tests/examples/limited_api/limited_api1.c +++ b/numpy/_core/tests/examples/limited_api/limited_api1.c @@ -1,5 +1,3 @@ -#define Py_LIMITED_API 0x03060000 - #include #include #include diff --git a/numpy/_core/tests/examples/limited_api/limited_api_latest.c b/numpy/_core/tests/examples/limited_api/limited_api_latest.c new file mode 100644 index 000000000000..92d83ea977a1 --- /dev/null +++ b/numpy/_core/tests/examples/limited_api/limited_api_latest.c @@ -0,0 +1,19 @@ +#include +#include +#include + +#if Py_LIMITED_API != PY_VERSION_HEX & 0xffff0000 + # error "Py_LIMITED_API not defined to Python major+minor version" +#endif + +static PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "limited_api_latest" +}; + +PyMODINIT_FUNC PyInit_limited_api_latest(void) +{ + import_array(); + import_umath(); + return PyModule_Create(&moduledef); +} diff --git a/numpy/_core/tests/examples/limited_api/meson.build b/numpy/_core/tests/examples/limited_api/meson.build index a6d290304036..2348b0856d0f 100644 --- a/numpy/_core/tests/examples/limited_api/meson.build +++ b/numpy/_core/tests/examples/limited_api/meson.build @@ -1,4 +1,8 @@ -project('checks', 'c', 'cython') +project( + 'checks', + 'c', 'cython', + meson_version: '>=1.8.3', +) py = import('python').find_installation(pure: false) @@ -31,7 +35,17 @@ py.extension_module( '-DNPY_NO_DEPRECATED_API=NPY_1_21_API_VERSION', ], include_directories: [npy_include_path], - limited_api: '3.6', + limited_api: '3.9', +) + +py.extension_module( + 'limited_api_latest', + 'limited_api_latest.c', + c_args: [ + '-DNPY_NO_DEPRECATED_API=NPY_1_21_API_VERSION', + ], + include_directories: [npy_include_path], + limited_api: py.language_version(), ) py.extension_module( @@ -45,5 +59,5 @@ py.extension_module( '-DCYTHON_LIMITED_API=1', ], include_directories: [npy_include_path], - limited_api: '3.7', + limited_api: '3.9', ) diff --git a/numpy/_core/tests/examples/limited_api/setup.py b/numpy/_core/tests/examples/limited_api/setup.py index 18747dc80896..16adcd12327d 100644 --- a/numpy/_core/tests/examples/limited_api/setup.py +++ b/numpy/_core/tests/examples/limited_api/setup.py @@ -2,10 +2,12 @@ Build an example package using the limited Python C API. """ -import numpy as np -from setuptools import setup, Extension import os +from setuptools import Extension, setup + +import numpy as np + macros = [("NPY_NO_DEPRECATED_API", 0), ("Py_LIMITED_API", "0x03060000")] limited_api = Extension( diff --git a/numpy/_core/tests/test__exceptions.py b/numpy/_core/tests/test__exceptions.py index fe792c8e37da..35782e7a5878 100644 --- a/numpy/_core/tests/test__exceptions.py +++ b/numpy/_core/tests/test__exceptions.py @@ -5,6 +5,7 @@ import pickle import pytest + import numpy as np from numpy.exceptions import AxisError @@ -31,19 +32,19 @@ def test__size_to_string(self): assert f(1) == '1 bytes' assert f(1023) == '1023 bytes' assert f(Ki) == '1.00 KiB' - assert f(Ki+1) == '1.00 KiB' - assert f(10*Ki) == '10.0 KiB' - assert f(int(999.4*Ki)) == '999. KiB' - assert f(int(1023.4*Ki)) == '1023. KiB' - assert f(int(1023.5*Ki)) == '1.00 MiB' - assert f(Ki*Ki) == '1.00 MiB' + assert f(Ki + 1) == '1.00 KiB' + assert f(10 * Ki) == '10.0 KiB' + assert f(int(999.4 * Ki)) == '999. KiB' + assert f(int(1023.4 * Ki)) == '1023. KiB' + assert f(int(1023.5 * Ki)) == '1.00 MiB' + assert f(Ki * Ki) == '1.00 MiB' # 1023.9999 Mib should round to 1 GiB - assert f(int(Ki*Ki*Ki*0.9999)) == '1.00 GiB' - assert f(Ki*Ki*Ki*Ki*Ki*Ki) == '1.00 EiB' + assert f(int(Ki * Ki * Ki * 0.9999)) == '1.00 GiB' + assert f(Ki * Ki * Ki * Ki * Ki * Ki) == '1.00 EiB' # larger than sys.maxsize, adding larger prefixes isn't going to help # anyway. - assert f(Ki*Ki*Ki*Ki*Ki*Ki*123456) == '123456. EiB' + assert f(Ki * Ki * Ki * Ki * Ki * Ki * 123456) == '123456. EiB' def test__total_size(self): """ Test e._total_size """ diff --git a/numpy/_core/tests/test_abc.py b/numpy/_core/tests/test_abc.py index f7ab6b635881..aee1904f1727 100644 --- a/numpy/_core/tests/test_abc.py +++ b/numpy/_core/tests/test_abc.py @@ -1,9 +1,9 @@ -from numpy.testing import assert_ - import numbers import numpy as np from numpy._core.numerictypes import sctypes +from numpy.testing import assert_ + class TestABC: def test_abstract(self): diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index 5b9bdb60f1b3..216a2c75afb8 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -1,13 +1,18 @@ import sys +import pytest + import numpy as np import numpy._core.umath as ncu from numpy._core._rational_tests import rational -import pytest +from numpy.lib import stride_tricks from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises, assert_warns, - HAS_REFCOUNT - ) + HAS_REFCOUNT, + assert_, + assert_array_equal, + assert_equal, + assert_raises, +) def test_array_array(): @@ -56,7 +61,7 @@ def test_array_array(): np.ones((), dtype=np.float64)) assert_equal(np.array("1.0").dtype, U3) assert_equal(np.array("1.0", dtype=str).dtype, U3) - assert_equal(np.array("1.0", dtype=U2), np.array(str("1."))) + assert_equal(np.array("1.0", dtype=U2), np.array("1.")) assert_equal(np.array("1", dtype=U5), np.ones((), dtype=U5)) builtins = getattr(__builtins__, '__dict__', __builtins__) @@ -74,23 +79,23 @@ def test_array_array(): # test array interface a = np.array(100.0, dtype=np.float64) o = type("o", (object,), - dict(__array_interface__=a.__array_interface__)) + {"__array_interface__": a.__array_interface__}) assert_equal(np.array(o, dtype=np.float64), a) # test array_struct interface a = np.array([(1, 4.0, 'Hello'), (2, 6.0, 'World')], dtype=[('f0', int), ('f1', float), ('f2', str)]) o = type("o", (object,), - dict(__array_struct__=a.__array_struct__)) - ## wasn't what I expected... is np.array(o) supposed to equal a ? - ## instead we get a array([...], dtype=">V18") + {"__array_struct__": a.__array_struct__}) + # wasn't what I expected... is np.array(o) supposed to equal a ? + # instead we get an array([...], dtype=">V18") assert_equal(bytes(np.array(o).data), bytes(a.data)) - # test array + # test __array__ def custom__array__(self, dtype=None, copy=None): return np.array(100.0, dtype=dtype, copy=copy) - o = type("o", (object,), dict(__array__=custom__array__))() + o = type("o", (object,), {"__array__": custom__array__})() assert_equal(np.array(o, dtype=np.float64), np.array(100.0, np.float64)) # test recursion @@ -152,6 +157,39 @@ def custom__array__(self, dtype=None, copy=None): assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64), np.ones((10, 10), dtype=np.float64)) + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test___array___refcount(): + class MyArray: + def __init__(self, dtype): + self.val = np.array(-1, dtype=dtype) + + def __array__(self, dtype=None, copy=None): + return self.val.__array__(dtype=dtype, copy=copy) + + # test all possible scenarios: + # dtype(none | same | different) x copy(true | false | none) + dt = np.dtype(np.int32) + old_refcount = sys.getrefcount(dt) + np.array(MyArray(dt)) + assert_equal(old_refcount, sys.getrefcount(dt)) + np.array(MyArray(dt), dtype=dt) + assert_equal(old_refcount, sys.getrefcount(dt)) + np.array(MyArray(dt), copy=None) + assert_equal(old_refcount, sys.getrefcount(dt)) + np.array(MyArray(dt), dtype=dt, copy=None) + assert_equal(old_refcount, sys.getrefcount(dt)) + dt2 = np.dtype(np.int16) + old_refcount2 = sys.getrefcount(dt2) + np.array(MyArray(dt), dtype=dt2) + assert_equal(old_refcount2, sys.getrefcount(dt2)) + np.array(MyArray(dt), dtype=dt2, copy=None) + assert_equal(old_refcount2, sys.getrefcount(dt2)) + with pytest.raises(ValueError): + np.array(MyArray(dt), dtype=dt2, copy=False) + assert_equal(old_refcount2, sys.getrefcount(dt2)) + + @pytest.mark.parametrize("array", [True, False]) def test_array_impossible_casts(array): # All builtin types can be forcibly cast, at least theoretically, @@ -228,21 +266,21 @@ class MyNDArray(np.ndarray): # Make sure converting from string object to fixed length string # does not truncate. - a = np.array([b'a'*100], dtype='O') + a = np.array([b'a' * 100], dtype='O') b = a.astype('S') assert_equal(a, b) assert_equal(b.dtype, np.dtype('S100')) - a = np.array(['a'*100], dtype='O') + a = np.array(['a' * 100], dtype='O') b = a.astype('U') assert_equal(a, b) assert_equal(b.dtype, np.dtype('U100')) # Same test as above but for strings shorter than 64 characters - a = np.array([b'a'*10], dtype='O') + a = np.array([b'a' * 10], dtype='O') b = a.astype('S') assert_equal(a, b) assert_equal(b.dtype, np.dtype('S10')) - a = np.array(['a'*10], dtype='O') + a = np.array(['a' * 10], dtype='O') b = a.astype('U') assert_equal(a, b) assert_equal(b.dtype, np.dtype('U10')) @@ -302,14 +340,14 @@ def test_object_array_astype_to_void(): assert arr.dtype == "V8" @pytest.mark.parametrize("t", - np._core.sctypes['uint'] + - np._core.sctypes['int'] + + np._core.sctypes['uint'] + + np._core.sctypes['int'] + np._core.sctypes['float'] ) def test_array_astype_warning(t): # test ComplexWarning when casting from complex to float or int a = np.array(10, dtype=np.complex128) - assert_warns(np.exceptions.ComplexWarning, a.astype, t) + pytest.warns(np.exceptions.ComplexWarning, a.astype, t) @pytest.mark.parametrize(["dtype", "out_dtype"], [(np.bytes_, np.bool), @@ -335,12 +373,12 @@ def test_string_to_boolean_cast(dtype, out_dtype): [np.complex64, np.complex128, np.clongdouble]) def test_string_to_complex_cast(str_type, scalar_type): value = scalar_type(b"1+3j") - assert scalar_type(value) == 1+3j - assert np.array([value], dtype=object).astype(scalar_type)[()] == 1+3j - assert np.array(value).astype(scalar_type)[()] == 1+3j + assert scalar_type(value) == 1 + 3j + assert np.array([value], dtype=object).astype(scalar_type)[()] == 1 + 3j + assert np.array(value).astype(scalar_type)[()] == 1 + 3j arr = np.zeros(1, dtype=scalar_type) arr[0] = value - assert arr[0] == 1+3j + assert arr[0] == 1 + 3j @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) def test_none_to_nan_cast(dtype): @@ -406,12 +444,43 @@ def test_copyto(): # 'dst' must be an array assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4]) + +def test_copyto_cast_safety(): + with pytest.raises(TypeError): + np.copyto(np.arange(3), 3., casting="safe") + + # Can put integer and float scalars safely (and equiv): + np.copyto(np.arange(3), 3, casting="equiv") + np.copyto(np.arange(3.), 3., casting="equiv") + # And also with less precision safely: + np.copyto(np.arange(3, dtype="uint8"), 3, casting="safe") + np.copyto(np.arange(3., dtype="float32"), 3., casting="safe") + + # But not equiv: + with pytest.raises(TypeError): + np.copyto(np.arange(3, dtype="uint8"), 3, casting="equiv") + + with pytest.raises(TypeError): + np.copyto(np.arange(3., dtype="float32"), 3., casting="equiv") + + # As a special thing, object is equiv currently: + np.copyto(np.arange(3, dtype=object), 3, casting="equiv") + + # The following raises an overflow error/gives a warning but not + # type error (due to casting), though: + with pytest.raises(OverflowError): + np.copyto(np.arange(3), 2**80, casting="safe") + + with pytest.warns(RuntimeWarning): + np.copyto(np.arange(3, dtype=np.float32), 2e300, casting="safe") + + def test_copyto_permut(): # test explicit overflow case pad = 500 l = [True] * pad + [True, True, True, True] - r = np.zeros(len(l)-pad) - d = np.ones(len(l)-pad) + r = np.zeros(len(l) - pad) + d = np.ones(len(l) - pad) mask = np.array(l)[pad:] np.copyto(r, d, where=mask[::-1]) @@ -521,8 +590,8 @@ def check_copy_result(x, y, ccontig, fcontig, strides=False): check_copy_result(res, c, ccontig=False, fcontig=False, strides=True) def test_contiguous_flags(): - a = np.ones((4, 4, 1))[::2,:,:] - a.strides = a.strides[:2] + (-123,) + a = np.ones((4, 4, 1))[::2, :, :] + a = stride_tricks.as_strided(a, strides=a.strides[:2] + (-123,)) b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4) def check_contig(a, ccontig, fcontig): @@ -554,11 +623,12 @@ def check_contig(a, ccontig, fcontig): def test_broadcast_arrays(): # Test user defined dtypes - a = np.array([(1, 2, 3)], dtype='u4,u4,u4') - b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4') + dtype = 'u4,u4,u4' + a = np.array([(1, 2, 3)], dtype=dtype) + b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype=dtype) result = np.broadcast_arrays(a, b) - assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4')) - assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')) + assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype=dtype)) + assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype=dtype)) @pytest.mark.parametrize(["shape", "fill_value", "expected_output"], [((2, 2), [5.0, 6.0], np.array([[5.0, 6.0], [5.0, 6.0]])), diff --git a/numpy/_core/tests/test_argparse.py b/numpy/_core/tests/test_argparse.py index cddee72ea04c..ededced3b9fe 100644 --- a/numpy/_core/tests/test_argparse.py +++ b/numpy/_core/tests/test_argparse.py @@ -11,10 +11,29 @@ def func(arg1, /, arg2, *, arg3): return None """ +import threading + import pytest import numpy as np -from numpy._core._multiarray_tests import argparse_example_function as func +from numpy._core._multiarray_tests import ( + argparse_example_function as func, + threaded_argparse_example_function as thread_func, +) +from numpy.testing import IS_WASM + + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") +def test_thread_safe_argparse_cache(): + b = threading.Barrier(8) + + def call_thread_func(): + b.wait() + thread_func(arg1=3, arg2=None) + + tasks = [threading.Thread(target=call_thread_func) for _ in range(8)] + [t.start() for t in tasks] + [t.join() for t in tasks] def test_invalid_integers(): diff --git a/numpy/_core/tests/test_array_api_info.py b/numpy/_core/tests/test_array_api_info.py index 154b3837325d..4842dbfa9486 100644 --- a/numpy/_core/tests/test_array_api_info.py +++ b/numpy/_core/tests/test_array_api_info.py @@ -1,13 +1,14 @@ -import numpy as np import pytest +import numpy as np + info = np.__array_namespace_info__() def test_capabilities(): caps = info.capabilities() - assert caps["boolean indexing"] == True - assert caps["data-dependent shapes"] == True + assert caps["boolean indexing"] is True + assert caps["data-dependent shapes"] is True # This will be added in the 2024.12 release of the array API standard. diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index a88873fb7fc5..96bbb679d6c9 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -12,9 +12,7 @@ import numpy as np import numpy._core._multiarray_umath as ncu from numpy._core._rational_tests import rational - -from numpy.testing import ( - assert_array_equal, assert_warns, IS_PYPY) +from numpy.testing import IS_64BIT, assert_array_equal def arraylikes(): @@ -38,14 +36,14 @@ def subclass(a): yield subclass - class _SequenceLike(): + class _SequenceLike: # Older NumPy versions, sometimes cared whether a protocol array was # also _SequenceLike. This shouldn't matter, but keep it for now # for __array__ and not the others. def __len__(self): raise TypeError - def __getitem__(self): + def __getitem__(self, _, /): raise TypeError # Array-interface @@ -90,10 +88,10 @@ def scalar_instances(times=True, extended_precision=True, user_dtype=True): yield param(np.sqrt(np.longdouble(5)), id="longdouble") # Complex: - yield param(np.sqrt(np.complex64(2+3j)), id="complex64") - yield param(np.sqrt(np.complex128(2+3j)), id="complex128") + yield param(np.sqrt(np.complex64(2 + 3j)), id="complex64") + yield param(np.sqrt(np.complex128(2 + 3j)), id="complex128") if extended_precision: - yield param(np.sqrt(np.clongdouble(2+3j)), id="clongdouble") + yield param(np.sqrt(np.clongdouble(2 + 3j)), id="clongdouble") # Bool: # XFAIL: Bool should be added, but has some bad properties when it @@ -268,11 +266,6 @@ def test_scalar_coercion(self, scalar): # Ensure we have a full-precision number if available scalar = type(scalar)((scalar * 2)**0.5) - if type(scalar) is rational: - # Rational generally fails due to a missing cast. In the future - # object casts should automatically be defined based on `setitem`. - pytest.xfail("Rational to object cast is undefined currently.") - # Use casting from object: arr = np.array(scalar, dtype=object).astype(scalar.dtype) @@ -289,7 +282,6 @@ def test_scalar_coercion(self, scalar): assert_array_equal(arr, arr3) assert_array_equal(arr, arr4) - @pytest.mark.xfail(IS_PYPY, reason="`int(np.complex128(3))` fails on PyPy") @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") @pytest.mark.parametrize("cast_to", scalar_instances()) def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to): @@ -307,7 +299,7 @@ def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to): scalar = scalar.values[0] if dtype.type == np.void: - if scalar.dtype.fields is not None and dtype.fields is None: + if scalar.dtype.fields is not None and dtype.fields is None: # Here, coercion to "V6" works, but the cast fails. # Since the types are identical, SETITEM takes care of # this, but has different rules than the cast. @@ -324,18 +316,18 @@ def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to): cast = np.array(scalar).astype(dtype) except (TypeError, ValueError, RuntimeError): # coercion should also raise (error type may change) - with pytest.raises(Exception): + with pytest.raises(Exception): # noqa: B017 np.array(scalar, dtype=dtype) if (isinstance(scalar, rational) and np.issubdtype(dtype, np.signedinteger)): return - with pytest.raises(Exception): + with pytest.raises(Exception): # noqa: B017 np.array([scalar], dtype=dtype) # assignment should also raise res = np.zeros((), dtype=dtype) - with pytest.raises(Exception): + with pytest.raises(Exception): # noqa: B017 res[()] = scalar return @@ -469,7 +461,6 @@ def test_coercion_assignment_datetime(self, val, unit, dtype): # the explicit cast fails: np.array(scalar).astype(dtype) - @pytest.mark.parametrize(["val", "unit"], [param(123, "s", id="[s]"), param(123, "D", id="[D]")]) def test_coercion_assignment_timedelta(self, val, unit): @@ -598,6 +589,7 @@ class TestBadSequences: def test_growing_list(self): # List to coerce, `mylist` will append to it during coercion obj = [] + class mylist(list): def __len__(self): obj.append([1, 2]) @@ -615,6 +607,7 @@ def __len__(self): def test_mutated_list(self): # List to coerce, `mylist` will mutate the first element obj = [] + class mylist(list): def __len__(self): obj[0] = [2, 3] # replace with a different list. @@ -628,12 +621,13 @@ def __len__(self): def test_replace_0d_array(self): # List to coerce, `mylist` will mutate the first element obj = [] + class baditem: def __len__(self): obj[0][0] = 2 # replace with a different list. raise ValueError("not actually a sequence!") - def __getitem__(self): + def __getitem__(self, _, /): pass # Runs into a corner case in the new code, the `array(2)` is cached @@ -716,8 +710,8 @@ def __array__(self, dtype=None, copy=None): arr = np.array([ArrayLike]) assert arr[0] is ArrayLike - @pytest.mark.skipif( - np.dtype(np.intp).itemsize < 8, reason="Needs 64bit platform") + @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") + @pytest.mark.thread_unsafe(reason="large slow test in parallel") def test_too_large_array_error_paths(self): """Test the error paths, including for memory leaks""" arr = np.array(0, dtype="uint8") @@ -755,13 +749,25 @@ def test_bad_array_like_bad_length(self, error): class BadSequence: def __len__(self): raise error - def __getitem__(self): + + def __getitem__(self, _, /): # must have getitem to be a Sequence return 1 with pytest.raises(error): np.array(BadSequence()) + def test_array_interface_descr_optional(self): + # The descr should be optional regression test for gh-27249 + arr = np.ones(10, dtype="V10") + iface = arr.__array_interface__ + iface.pop("descr") + + class MyClass: + __array_interface__ = iface + + assert_array_equal(np.asarray(MyClass), arr) + class TestAsArray: """Test expected behaviors of ``asarray``.""" @@ -898,3 +904,24 @@ def test_empty_string(): assert_array_equal(res, b"") assert res.shape == (2, 10) assert res.dtype == "S1" + + +@pytest.mark.parametrize("dtype", ["S", "U", object]) +@pytest.mark.parametrize("res_dt,hug_val", + [("float16", "1e30"), ("float32", "1e200")]) +def test_string_to_float_coercion_errors(dtype, res_dt, hug_val): + # This test primarly tests setitem + val = np.array(["3M"], dtype=dtype)[0] # use the scalar + + with pytest.raises(ValueError): + np.array(val, dtype=res_dt) + + val = np.array([hug_val], dtype=dtype)[0] # use the scalar + + with np.errstate(all="warn"): + with pytest.warns(RuntimeWarning): + np.array(val, dtype=res_dt) + + with np.errstate(all="raise"): + with pytest.raises(FloatingPointError): + np.array(val, dtype=res_dt) diff --git a/numpy/_core/tests/test_array_interface.py b/numpy/_core/tests/test_array_interface.py index f049eea55d8a..afb19f4e280f 100644 --- a/numpy/_core/tests/test_array_interface.py +++ b/numpy/_core/tests/test_array_interface.py @@ -1,7 +1,10 @@ import sys +import sysconfig + import pytest + import numpy as np -from numpy.testing import extbuild, IS_WASM, IS_EDITABLE +from numpy.testing import IS_EDITABLE, IS_WASM, extbuild @pytest.fixture @@ -9,8 +12,7 @@ def get_module(tmp_path): """ Some codes to generate data and manage temporary buffers use when sharing with numpy via the array interface protocol. """ - - if not sys.platform.startswith('linux'): + if sys.platform.startswith('cygwin'): pytest.skip('link fails on cygwin') if IS_WASM: pytest.skip("Can't build module inside Wasm") @@ -124,6 +126,8 @@ def get_module(tmp_path): pass # if it does not exist, build and load it + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") return extbuild.build_and_import_extension('array_interface_testing', functions, prologue=prologue, @@ -168,9 +172,8 @@ def __array_struct__(self): # share the data stderr.write(' ---- share data via the array interface protocol ---- \n') arr = np.array(buf, copy=False) - stderr.write('arr.__array_interface___ = %s\n' % ( - str(arr.__array_interface__))) - stderr.write('arr.base = %s\n' % (str(arr.base))) + stderr.write(f'arr.__array_interface___ = {str(arr.__array_interface__)}\n') + stderr.write(f'arr.base = {str(arr.base)}\n') stderr.write(' ---- OK!\n\n') # release the source of the shared data. this will not release the data @@ -189,7 +192,7 @@ def __array_struct__(self): # called then reading the values here may cause a SEGV and will be reported # as invalid reads by valgrind stderr.write(' ---- read shared data ---- \n') - stderr.write('arr = %s\n' % (str(arr))) + stderr.write(f'arr = {str(arr)}\n') stderr.write(' ---- OK!\n\n') # write to the shared buffer. If the shared data was prematurely deleted @@ -197,15 +200,14 @@ def __array_struct__(self): stderr.write(' ---- modify shared data ---- \n') arr *= multiplier expected_value *= multiplier - stderr.write('arr.__array_interface___ = %s\n' % ( - str(arr.__array_interface__))) - stderr.write('arr.base = %s\n' % (str(arr.base))) + stderr.write(f'arr.__array_interface___ = {str(arr.__array_interface__)}\n') + stderr.write(f'arr.base = {str(arr.base)}\n') stderr.write(' ---- OK!\n\n') # read the data. If the shared data was prematurely deleted this # will may cause a SEGV and valgrind will report invalid reads stderr.write(' ---- read modified shared data ---- \n') - stderr.write('arr = %s\n' % (str(arr))) + stderr.write(f'arr = {str(arr)}\n') stderr.write(' ---- OK!\n\n') # check that we got the expected data. If the PyCapsule destructor we diff --git a/numpy/_core/tests/test_arraymethod.py b/numpy/_core/tests/test_arraymethod.py index f10d9b984987..4400fccf32e8 100644 --- a/numpy/_core/tests/test_arraymethod.py +++ b/numpy/_core/tests/test_arraymethod.py @@ -3,9 +3,6 @@ this is private API, but when added, public API may be added here. """ -from __future__ import annotations - -import sys import types from typing import Any @@ -14,6 +11,9 @@ import numpy as np from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl +# accessing `numpy.char.charray` will issue a deprecation warning +from numpy._core.defchararray import chararray + class TestResolveDescriptors: # Test mainly error paths of the resolve_descriptors function, @@ -54,8 +54,8 @@ class TestSimpleStridedCall: ValueError), # not 1-D (((np.ones(3, dtype="d"), np.ones(4, dtype="f")),), ValueError), # different length - (((np.frombuffer(b"\0x00"*3*2, dtype="d"), - np.frombuffer(b"\0x00"*3, dtype="f")),), + (((np.frombuffer(b"\0x00" * 3 * 2, dtype="d"), + np.frombuffer(b"\0x00" * 3, dtype="f")),), ValueError), # output not writeable ]) def test_invalid_arguments(self, args, error): @@ -65,9 +65,7 @@ def test_invalid_arguments(self, args, error): @pytest.mark.parametrize( - "cls", [ - np.ndarray, np.recarray, np.char.chararray, np.matrix, np.memmap - ] + "cls", [np.ndarray, np.recarray, chararray, np.matrix, np.memmap] ) class TestClassGetItem: def test_class_getitem(self, cls: type[np.ndarray]) -> None: diff --git a/numpy/_core/tests/test_arrayobject.py b/numpy/_core/tests/test_arrayobject.py index ccab929b2a09..f4e268b377b3 100644 --- a/numpy/_core/tests/test_arrayobject.py +++ b/numpy/_core/tests/test_arrayobject.py @@ -1,7 +1,9 @@ +import sys + import pytest import numpy as np -from numpy.testing import assert_array_equal +from numpy.testing import HAS_REFCOUNT, assert_array_equal def test_matrix_transpose_raises_error_for_1d(): @@ -31,3 +33,63 @@ def test_matrix_transpose_equals_swapaxes(shape): tgt = np.swapaxes(arr, num_of_axes - 2, num_of_axes - 1) mT = arr.mT assert_array_equal(tgt, mT) + + +class MyArr(np.ndarray): + def __array_wrap__(self, arr, context=None, return_scalar=None): + return super().__array_wrap__(arr, context, return_scalar) + + +class MyArrNoWrap(np.ndarray): + pass + + +@pytest.mark.parametrize("subclass_self", [np.ndarray, MyArr, MyArrNoWrap]) +@pytest.mark.parametrize("subclass_arr", [np.ndarray, MyArr, MyArrNoWrap]) +def test_array_wrap(subclass_self, subclass_arr): + # NumPy should allow `__array_wrap__` to be called on arrays, it's logic + # is designed in a way that: + # + # * Subclasses never return scalars by default (to preserve their + # information). They can choose to if they wish. + # * NumPy returns scalars, if `return_scalar` is passed as True to allow + # manual calls to `arr.__array_wrap__` to do the right thing. + # * The type of the input should be ignored (it should be a base-class + # array, but I am not sure this is guaranteed). + + arr = np.arange(3).view(subclass_self) + + arr0d = np.array(3, dtype=np.int8).view(subclass_arr) + # With third argument True, ndarray allows "decay" to scalar. + # (I don't think NumPy would pass `None`, but it seems clear to support) + if subclass_self is np.ndarray: + assert type(arr.__array_wrap__(arr0d, None, True)) is np.int8 + else: + assert type(arr.__array_wrap__(arr0d, None, True)) is type(arr) + + # Otherwise, result should be viewed as the subclass + assert type(arr.__array_wrap__(arr0d)) is type(arr) + assert type(arr.__array_wrap__(arr0d, None, None)) is type(arr) + assert type(arr.__array_wrap__(arr0d, None, False)) is type(arr) + + # Non 0-D array can't be converted to scalar, so we ignore that + arr1d = np.array([3], dtype=np.int8).view(subclass_arr) + assert type(arr.__array_wrap__(arr1d, None, True)) is type(arr) + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_cleanup_with_refs_non_contig(): + # Regression test, leaked the dtype (but also good for rest) + dtype = np.dtype("O,i") + obj = object() + expected_ref_dtype = sys.getrefcount(dtype) + expected_ref_obj = sys.getrefcount(obj) + proto = np.full((3, 4, 5, 6, 7), np.array((obj, 2), dtype=dtype)) + # Give array a non-trivial order to exercise more cleanup paths. + arr = proto.transpose((2, 0, 3, 1, 4)).copy("K") + del proto, arr + + actual_ref_dtype = sys.getrefcount(dtype) + actual_ref_obj = sys.getrefcount(obj) + assert actual_ref_dtype == expected_ref_dtype + assert actual_ref_obj == actual_ref_dtype diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index d9caced3c1bc..e6cbb6f72229 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -1,16 +1,23 @@ -import sys import gc +import sys +import textwrap + +import pytest from hypothesis import given from hypothesis.extra import numpy as hynp -import pytest import numpy as np +from numpy._core.arrayprint import _typelessdata from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT, + HAS_REFCOUNT, + IS_WASM, + assert_, + assert_equal, + assert_raises, assert_raises_regex, - ) -from numpy._core.arrayprint import _typelessdata -import textwrap +) +from numpy.testing._private.utils import run_threaded + class TestArrayRepr: def test_nan_inf(self): @@ -18,7 +25,8 @@ def test_nan_inf(self): assert_equal(repr(x), 'array([nan, inf])') def test_subclass(self): - class sub(np.ndarray): pass + class sub(np.ndarray): + pass # one dimensional x1d = np.array([1, 2]).view(sub) @@ -31,7 +39,7 @@ class sub(np.ndarray): pass ' [3, 4]])') # two dimensional with flexible dtype - xstruct = np.ones((2,2), dtype=[('a', 'f4')]) assert str(scalar) == "(1.0, 2.0)" + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't support asyncio") +def test_printoptions_asyncio_safe(): + asyncio = pytest.importorskip("asyncio") + + b = asyncio.Barrier(2) + + async def legacy_113(): + np.set_printoptions(legacy='1.13', precision=12) + await b.wait() + po = np.get_printoptions() + assert po['legacy'] == '1.13' + assert po['precision'] == 12 + orig_linewidth = po['linewidth'] + with np.printoptions(linewidth=34, legacy='1.21'): + po = np.get_printoptions() + assert po['legacy'] == '1.21' + assert po['precision'] == 12 + assert po['linewidth'] == 34 + po = np.get_printoptions() + assert po['linewidth'] == orig_linewidth + assert po['legacy'] == '1.13' + assert po['precision'] == 12 + + async def legacy_125(): + np.set_printoptions(legacy='1.25', precision=7) + await b.wait() + po = np.get_printoptions() + assert po['legacy'] == '1.25' + assert po['precision'] == 7 + orig_linewidth = po['linewidth'] + with np.printoptions(linewidth=6, legacy='1.13'): + po = np.get_printoptions() + assert po['legacy'] == '1.13' + assert po['precision'] == 7 + assert po['linewidth'] == 6 + po = np.get_printoptions() + assert po['linewidth'] == orig_linewidth + assert po['legacy'] == '1.25' + assert po['precision'] == 7 + + async def main(): + await asyncio.gather(legacy_125(), legacy_125()) + + loop = asyncio.new_event_loop() + asyncio.run(main()) + loop.close() + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't support threads") +@pytest.mark.thread_unsafe(reason="test is already explicitly multi-threaded") +def test_multithreaded_array_printing(): + # the dragon4 implementation uses a static scratch space for performance + # reasons this test makes sure it is set up in a thread-safe manner + + run_threaded(TestPrintOptions().test_floatmode, 500) diff --git a/numpy/_core/tests/test_casting_floatingpoint_errors.py b/numpy/_core/tests/test_casting_floatingpoint_errors.py index d448b94d9798..2f9c01f907c4 100644 --- a/numpy/_core/tests/test_casting_floatingpoint_errors.py +++ b/numpy/_core/tests/test_casting_floatingpoint_errors.py @@ -1,7 +1,8 @@ import pytest from pytest import param -from numpy.testing import IS_WASM + import numpy as np +from numpy.testing import IS_WASM def values_and_dtypes(): @@ -36,7 +37,7 @@ def values_and_dtypes(): # Cast to complex32: yield param(2e300, "complex64", id="float-to-c8") - yield param(2e300+0j, "complex64", id="complex-to-c8") + yield param(2e300 + 0j, "complex64", id="complex-to-c8") yield param(2e300j, "complex64", id="complex-to-c8") yield param(np.longdouble(2e300), "complex64", id="longdouble-to-c8") @@ -151,4 +152,3 @@ def test_floatingpoint_errors_casting(dtype, value): with np.errstate(all="raise"): with pytest.raises(FloatingPointError, match=match): operation() - diff --git a/numpy/_core/tests/test_casting_unittests.py b/numpy/_core/tests/test_casting_unittests.py index 087d12a0af53..f090524d6e78 100644 --- a/numpy/_core/tests/test_casting_unittests.py +++ b/numpy/_core/tests/test_casting_unittests.py @@ -6,18 +6,18 @@ than integration tests. """ -import pytest -import textwrap +import ctypes import enum import random -import ctypes +import textwrap +import warnings -import numpy as np -from numpy.lib.stride_tricks import as_strided +import pytest -from numpy.testing import assert_array_equal +import numpy as np from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl - +from numpy.lib.stride_tricks import as_strided +from numpy.testing import assert_array_equal, assert_equal # Simple skips object, parametric and long double (unsupported by struct) simple_dtypes = "?bhilqBHILQefdFD" @@ -77,7 +77,10 @@ class Casting(enum.IntEnum): safe = 2 same_kind = 3 unsafe = 4 + same_value = 64 + +same_value_dtypes = tuple(type(np.dtype(c)) for c in "?bhilqBHILQefdgFDG") def _get_cancast_table(): table = textwrap.dedent(""" @@ -118,9 +121,13 @@ def _get_cancast_table(): cancast[from_dt] = {} for to_dt, c in zip(dtypes, row[2::2]): cancast[from_dt][to_dt] = convert_cast[c] + # Of the types checked, numeric cast support same-value + if from_dt in same_value_dtypes and to_dt in same_value_dtypes: + cancast[from_dt][to_dt] |= Casting.same_value return cancast + CAST_TABLE = _get_cancast_table() @@ -267,14 +274,16 @@ def test_simple_cancast(self, from_Dt): for to_dt in [to_Dt(), to_Dt().newbyteorder()]: casting, (from_res, to_res), view_off = ( cast._resolve_descriptors((from_dt, to_dt))) - assert(type(from_res) == from_Dt) - assert(type(to_res) == to_Dt) + assert type(from_res) is from_Dt + assert type(to_res) is to_Dt if view_off is not None: # If a view is acceptable, this is "no" casting # and byte order must be matching. - assert casting == Casting.no - # The above table lists this as "equivalent" - assert Casting.equiv == CAST_TABLE[from_Dt][to_Dt] + assert casting == Casting.no | Casting.same_value + # The above table lists this as "equivalent", perhaps + # with "same_value" + v = CAST_TABLE[from_Dt][to_Dt] & ~Casting.same_value + assert Casting.equiv == v # Note that to_res may not be the same as from_dt assert from_res.isnative == to_res.isnative else: @@ -284,8 +293,8 @@ def test_simple_cancast(self, from_Dt): assert casting == CAST_TABLE[from_Dt][to_Dt] if from_Dt is to_Dt: - assert(from_dt is from_res) - assert(to_dt is to_res) + assert from_dt is from_res + assert to_dt is to_res @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") @pytest.mark.parametrize("from_dt", simple_dtype_instances()) @@ -304,6 +313,7 @@ def test_simple_direct_casts(self, from_dt): to_dt = to_dt.values[0] cast = get_castingimpl(type(from_dt), type(to_dt)) + # print("from_dt", from_dt, "to_dt", to_dt) casting, (from_res, to_res), view_off = cast._resolve_descriptors( (from_dt, to_dt)) @@ -317,7 +327,9 @@ def test_simple_direct_casts(self, from_dt): arr1, arr2, values = self.get_data(from_dt, to_dt) + # print("2", arr1, arr2, cast) cast._simple_strided_call((arr1, arr2)) + # print("3") # Check via python list assert arr2.tolist() == values @@ -433,7 +445,7 @@ def test_time_to_time(self, from_dt, to_dt, to_dt = np.dtype(to_dt) # Test a few values for casting (results generated with NumPy 1.19) - values = np.array([-2**63, 1, 2**63-1, 10000, -10000, 2**32]) + values = np.array([-2**63, 1, 2**63 - 1, 10000, -10000, 2**32]) values = values.astype(np.dtype("int64").newbyteorder(from_dt.byteorder)) assert values.dtype.byteorder == from_dt.byteorder assert np.isnat(values.view(from_dt)[0]) @@ -752,11 +764,11 @@ def test_structured_field_offsets(self, to_dt, expected_off): # field cases (field to field is tested explicitly also): # Not considered viewable, because a negative offset would allow # may structured dtype to indirectly access invalid memory. - ("i", dict(names=["a"], formats=["i"], offsets=[2]), None), - (dict(names=["a"], formats=["i"], offsets=[2]), "i", 2), + ("i", {"names": ["a"], "formats": ["i"], "offsets": [2]}, None), + ({"names": ["a"], "formats": ["i"], "offsets": [2]}, "i", 2), # Currently considered not viewable, due to multiple fields # even though they overlap (maybe we should not allow that?) - ("i", dict(names=["a", "b"], formats=["i", "i"], offsets=[2, 2]), + ("i", {"names": ["a", "b"], "formats": ["i", "i"], "offsets": [2, 2]}, None), # different number of fields can't work, should probably just fail # so it never reports "viewable": @@ -776,7 +788,7 @@ def test_structured_field_offsets(self, to_dt, expected_off): # completely invalid/impossible cast: ("i,i", "i,i,i", None), ]) - def test_structured_view_offsets_paramteric( + def test_structured_view_offsets_parametric( self, from_dt, to_dt, expected_off): # TODO: While this test is fairly thorough, right now, it does not # really test some paths that may have nonzero offsets (they don't @@ -801,7 +813,7 @@ def test_object_casts_NULL_None_equivalence(self, dtype): expected = arr_normal.astype(dtype) except TypeError: with pytest.raises(TypeError): - arr_NULLs.astype(dtype), + arr_NULLs.astype(dtype) else: assert_array_equal(expected, arr_NULLs.astype(dtype)) @@ -816,3 +828,128 @@ def test_nonstandard_bool_to_other(self, dtype): expected = [0, 1, 1] assert_array_equal(res, expected) + @pytest.mark.parametrize("to_dtype", + np.typecodes["AllInteger"] + np.typecodes["AllFloat"]) + @pytest.mark.parametrize("from_dtype", + np.typecodes["AllInteger"] + np.typecodes["AllFloat"]) + @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") + def test_same_value_overflow(self, from_dtype, to_dtype): + if from_dtype == to_dtype: + return + top1 = 0 + top2 = 0 + try: + top1 = np.iinfo(from_dtype).max + except ValueError: + top1 = np.finfo(from_dtype).max + try: + top2 = np.iinfo(to_dtype).max + except ValueError: + top2 = np.finfo(to_dtype).max + # No need to test if top2 > top1, since the test will also do the + # reverse dtype matching. Catch then warning if the comparison warns, + # i.e. np.int16(65535) < np.float16(6.55e4) + with warnings.catch_warnings(record=True): + warnings.simplefilter("always", RuntimeWarning) + if top2 >= top1: + # will be tested when the dtypes are reversed + return + # Happy path + arr1 = np.array([0] * 10, dtype=from_dtype) + arr2 = np.array([0] * 10, dtype=to_dtype) + arr1_astype = arr1.astype(to_dtype, casting='same_value') + assert_equal(arr1_astype, arr2, strict=True) + # Make it overflow, both aligned and unaligned + arr1[0] = top1 + aligned = np.empty(arr1.itemsize * arr1.size + 1, 'uint8') + unaligned = aligned[1:].view(arr1.dtype) + unaligned[:] = arr1 + with pytest.raises(ValueError): + # Casting float to float with overflow should raise + # RuntimeWarning (fperror) + # Casting float to int with overflow sometimes raises + # RuntimeWarning (fperror) + # Casting with overflow and 'same_value', should raise ValueError + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", RuntimeWarning) + arr1.astype(to_dtype, casting='same_value') + assert len(w) < 2 + with pytest.raises(ValueError): + # again, unaligned + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", RuntimeWarning) + unaligned.astype(to_dtype, casting='same_value') + assert len(w) < 2 + + @pytest.mark.parametrize("to_dtype", + np.typecodes["AllInteger"]) + @pytest.mark.parametrize("from_dtype", + np.typecodes["AllFloat"]) + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_same_value_float_to_int(self, from_dtype, to_dtype): + # Should not raise, since the values can round trip + arr1 = np.arange(10, dtype=from_dtype) + aligned = np.empty(arr1.itemsize * arr1.size + 1, 'uint8') + unaligned = aligned[1:].view(arr1.dtype) + unaligned[:] = arr1 + arr2 = np.arange(10, dtype=to_dtype) + assert_array_equal(arr1.astype(to_dtype, casting='same_value'), arr2) + assert_array_equal(unaligned.astype(to_dtype, casting='same_value'), arr2) + + # Should raise, since values cannot round trip. Might warn too about + # FPE errors + arr1_66 = arr1 + 0.666 + unaligned_66 = unaligned + 0.66 + with pytest.raises(ValueError): + arr1_66.astype(to_dtype, casting='same_value') + with pytest.raises(ValueError): + unaligned_66.astype(to_dtype, casting='same_value') + + @pytest.mark.parametrize("to_dtype", + np.typecodes["AllInteger"]) + @pytest.mark.parametrize("from_dtype", + np.typecodes["AllFloat"]) + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_same_value_float_to_int_scalar(self, from_dtype, to_dtype): + # Should not raise, since the values can round trip + s1 = np.array(10, dtype=from_dtype) + assert s1.astype(to_dtype, casting='same_value') == 10 + + # Should raise, since values cannot round trip + s1_66 = s1 + 0.666 + with pytest.raises(ValueError): + s1_66.astype(to_dtype, casting='same_value') + + @pytest.mark.parametrize("value", [np.nan, np.inf, -np.inf]) + @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_same_value_naninf(self, value): + # These work, but may trigger FPE warnings on macOS + np.array([value], dtype=np.half).astype(np.cdouble, casting='same_value') + np.array([value], dtype=np.half).astype(np.double, casting='same_value') + np.array([value], dtype=np.float32).astype(np.cdouble, casting='same_value') + np.array([value], dtype=np.float32).astype(np.double, casting='same_value') + np.array([value], dtype=np.float32).astype(np.half, casting='same_value') + np.array([value], dtype=np.complex64).astype(np.half, casting='same_value') + # These fail + with pytest.raises(ValueError): + np.array([value], dtype=np.half).astype(np.int64, casting='same_value') + with pytest.raises(ValueError): + np.array([value], dtype=np.complex64).astype(np.int64, casting='same_value') + with pytest.raises(ValueError): + np.array([value], dtype=np.float32).astype(np.int64, casting='same_value') + + @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") + def test_same_value_complex(self): + arr = np.array([complex(1, 1)], dtype=np.cdouble) + # This works + arr.astype(np.complex64, casting='same_value') + # Casting with a non-zero imag part fails + with pytest.raises(ValueError): + arr.astype(np.float32, casting='same_value') + + def test_same_value_scalar(self): + i = np.array(123, dtype=np.int64) + f = np.array(123, dtype=np.float64) + assert i.astype(np.float64, casting='same_value') == f + assert f.astype(np.int64, casting='same_value') == f diff --git a/numpy/_core/tests/test_conversion_utils.py b/numpy/_core/tests/test_conversion_utils.py index 51676320fa0d..067c2973c592 100644 --- a/numpy/_core/tests/test_conversion_utils.py +++ b/numpy/_core/tests/test_conversion_utils.py @@ -2,14 +2,12 @@ Tests for numpy/_core/src/multiarray/conversion_utils.c """ import re -import sys import pytest -import numpy as np import numpy._core._multiarray_tests as mt -from numpy._core.multiarray import CLIP, WRAP, RAISE -from numpy.testing import assert_warns, IS_PYPY +from numpy._core.multiarray import CLIP, RAISE, WRAP +from numpy.testing import assert_raises class StringConverterTestCase: @@ -19,13 +17,13 @@ class StringConverterTestCase: warn = True def _check_value_error(self, val): - pattern = r'\(got {}\)'.format(re.escape(repr(val))) + pattern = fr'\(got {re.escape(repr(val))}\)' with pytest.raises(ValueError, match=pattern) as exc: self.conv(val) def _check_conv_assert_warn(self, val, expected): if self.warn: - with assert_warns(DeprecationWarning) as exc: + with assert_raises(ValueError) as exc: assert self.conv(val) == expected else: assert self.conv(val) == expected @@ -123,6 +121,7 @@ def test_valid(self): class TestSearchsideConverter(StringConverterTestCase): """ Tests of PyArray_SearchsideConverter """ conv = mt.run_searchside_converter + def test_valid(self): self._check('left', 'NPY_SEARCHLEFT') self._check('right', 'NPY_SEARCHRIGHT') @@ -151,6 +150,7 @@ def test_flatten_invalid_order(self): class TestClipmodeConverter(StringConverterTestCase): """ Tests of PyArray_ClipmodeConverter """ conv = mt.run_clipmode_converter + def test_valid(self): self._check('clip', 'NPY_CLIP') self._check('wrap', 'NPY_WRAP') @@ -172,9 +172,12 @@ def test_valid(self): self._check("no", "NPY_NO_CASTING") self._check("equiv", "NPY_EQUIV_CASTING") self._check("safe", "NPY_SAFE_CASTING") - self._check("same_kind", "NPY_SAME_KIND_CASTING") self._check("unsafe", "NPY_UNSAFE_CASTING") + self._check("same_kind", "NPY_SAME_KIND_CASTING") + def test_invalid(self): + # Currently, 'same_value' is supported only in ndarray.astype + self._check_value_error("same_value") class TestIntpConverter: """ Tests of PyArray_IntpConverter """ @@ -187,12 +190,9 @@ def test_basic(self): assert self.conv(()) == () def test_none(self): - # once the warning expires, this will raise TypeError - with pytest.warns(DeprecationWarning): + with pytest.raises(TypeError): assert self.conv(None) == () - @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_float(self): with pytest.raises(TypeError): self.conv(1.0) @@ -204,6 +204,6 @@ def test_too_large(self): self.conv(2**64) def test_too_many_dims(self): - assert self.conv([1]*64) == (1,)*64 + assert self.conv([1] * 64) == (1,) * 64 with pytest.raises(ValueError): - self.conv([1]*65) + self.conv([1] * 65) diff --git a/numpy/_core/tests/test_cpu_dispatcher.py b/numpy/_core/tests/test_cpu_dispatcher.py index 959725ea7bc8..04acf13c228d 100644 --- a/numpy/_core/tests/test_cpu_dispatcher.py +++ b/numpy/_core/tests/test_cpu_dispatcher.py @@ -1,25 +1,28 @@ +from numpy._core import _umath_tests from numpy._core._multiarray_umath import ( - __cpu_features__, __cpu_baseline__, __cpu_dispatch__ + __cpu_baseline__, + __cpu_dispatch__, + __cpu_features__, ) -from numpy._core import _umath_tests from numpy.testing import assert_equal + def test_dispatcher(): """ Testing the utilities of the CPU dispatcher """ targets = ( - "SSE2", "SSE41", "AVX2", + "X86_V2", "X86_V3", "VSX", "VSX2", "VSX3", "NEON", "ASIMD", "ASIMDHP", - "VX", "VXE" + "VX", "VXE", "LSX", "RVV" ) - highest_sfx = "" # no suffix for the baseline + highest_sfx = "" # no suffix for the baseline all_sfx = [] for feature in reversed(targets): - # skip baseline features, by the default `CCompilerOpt` do not generate separated objects - # for the baseline, just one object combined all of them via 'baseline' option - # within the configuration statements. + # skip baseline features, by the default `CCompilerOpt` do not generate + # separated objects for the baseline, just one object combined all of them + # via 'baseline' option within the configuration statements. if feature in __cpu_baseline__: continue # check compiler and running machine support @@ -32,14 +35,14 @@ def test_dispatcher(): test = _umath_tests.test_dispatch() assert_equal(test["func"], "func" + highest_sfx) - assert_equal(test["var"], "var" + highest_sfx) + assert_equal(test["var"], "var" + highest_sfx) if highest_sfx: assert_equal(test["func_xb"], "func" + highest_sfx) - assert_equal(test["var_xb"], "var" + highest_sfx) + assert_equal(test["var_xb"], "var" + highest_sfx) else: assert_equal(test["func_xb"], "nobase") assert_equal(test["var_xb"], "nobase") - all_sfx.append("func") # add the baseline + all_sfx.append("func") # add the baseline assert_equal(test["all"], all_sfx) diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index f4a85c54ca6a..c95886752949 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -1,14 +1,18 @@ -import sys, platform, re, pytest +import os +import pathlib +import platform +import re +import subprocess +import sys + +import pytest + from numpy._core._multiarray_umath import ( - __cpu_features__, __cpu_baseline__, __cpu_dispatch__, + __cpu_features__, ) -import numpy as np -import subprocess -import pathlib -import os -import re + def assert_features_equal(actual, desired, fname): __tracebackhide__ = True # Hide traceback for py.test @@ -24,30 +28,30 @@ def assert_features_equal(actual, desired, fname): try: import subprocess - auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1")) + auxv = subprocess.check_output(['/bin/true'], env={"LD_SHOW_AUXV": "1"}) auxv = auxv.decode() except Exception as err: auxv = str(err) import textwrap error_report = textwrap.indent( -""" +f""" ########################################### ### Extra debugging information ########################################### ------------------------------------------- --- NumPy Detections ------------------------------------------- -%s +{detected} ------------------------------------------- --- SYS / CPUINFO ------------------------------------------- -%s.... +{cpuinfo}.... ------------------------------------------- --- SYS / AUXV ------------------------------------------- -%s -""" % (detected, cpuinfo, auxv), prefix='\r') +{auxv} +""", prefix='\r') raise AssertionError(( "Failure Detection\n" @@ -70,11 +74,16 @@ class AbstractTest: def load_flags(self): # a hook pass + def test_features(self): self.load_flags() for gname, features in self.features_groups.items(): test_features = [self.cpu_have(f) for f in features] - assert_features_equal(__cpu_features__.get(gname), all(test_features), gname) + assert_features_equal( + __cpu_features__.get(gname), + all(test_features), + gname, + ) for feature_name in self.features: cpu_have = self.cpu_have(feature_name) @@ -85,10 +94,7 @@ def cpu_have(self, feature_name): map_names = self.features_map.get(feature_name, feature_name) if isinstance(map_names, str): return map_names in self.features_flags - for f in map_names: - if f in self.features_flags: - return True - return False + return any(f in self.features_flags for f in map_names) def load_flags_cpuinfo(self, magic_key): self.features_flags = self.get_cpuinfo_item(magic_key) @@ -105,7 +111,7 @@ def get_cpuinfo_item(self, magic_key): return values def load_flags_auxv(self): - auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1")) + auxv = subprocess.check_output(['/bin/true'], env={"LD_SHOW_AUXV": "1"}) for at in auxv.split(b'\n'): if not at.startswith(b"AT_HWCAP"): continue @@ -117,17 +123,20 @@ def load_flags_auxv(self): @pytest.mark.skipif( sys.platform == 'emscripten', - reason= ( + reason=( "The subprocess module is not available on WASM platforms and" " therefore this test class cannot be properly executed." ), ) +@pytest.mark.thread_unsafe( + reason="setup & tmp_path_factory threads-unsafe, modifies environment variables", +) class TestEnvPrivation: cwd = pathlib.Path(__file__).parent.resolve() env = os.environ.copy() _enable = os.environ.pop('NPY_ENABLE_CPU_FEATURES', None) _disable = os.environ.pop('NPY_DISABLE_CPU_FEATURES', None) - SUBPROCESS_ARGS = dict(cwd=cwd, capture_output=True, text=True, check=True) + SUBPROCESS_ARGS = {"cwd": cwd, "capture_output": True, "text": True, "check": True} unavailable_feats = [ feat for feat in __cpu_dispatch__ if not __cpu_features__[feat] ] @@ -139,7 +148,7 @@ class TestEnvPrivation: SCRIPT = """ def main(): from numpy._core._multiarray_umath import ( - __cpu_features__, + __cpu_features__, __cpu_dispatch__ ) @@ -156,7 +165,6 @@ def setup_class(self, tmp_path_factory): file /= "_runtime_detect.py" file.write_text(self.SCRIPT) self.file = file - return def _run(self): return subprocess.run( @@ -189,7 +197,6 @@ def _expect_error( def setup_method(self): """Ensure that the environment is reset""" self.env = os.environ.copy() - return def test_runtime_feature_selection(self): """ @@ -228,7 +235,6 @@ def test_runtime_feature_selection(self): # Ensure that both features are enabled, and they are exactly the ones # specified by `NPY_ENABLE_CPU_FEATURES` assert set(enabled_features) == set(non_baseline_features) - return @pytest.mark.parametrize("enabled, disabled", [ @@ -330,56 +336,64 @@ def test_impossible_feature_enable(self): ) self._expect_error(msg, err_type) + is_linux = sys.platform.startswith('linux') is_cygwin = sys.platform.startswith('cygwin') -machine = platform.machine() -is_x86 = re.match("^(amd64|x86|i386|i686)", machine, re.IGNORECASE) +machine = platform.machine() +is_x86 = re.match(r"^(amd64|x86|i386|i686)", machine, re.IGNORECASE) @pytest.mark.skipif( - not (is_linux or is_cygwin) or not is_x86, reason="Only for Linux and x86" + not (is_linux or is_cygwin) or not is_x86, + reason="Only for Linux and x86", ) class Test_X86_Features(AbstractTest): - features = [ - "MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "POPCNT", "SSE42", - "AVX", "F16C", "XOP", "FMA4", "FMA3", "AVX2", "AVX512F", "AVX512CD", - "AVX512ER", "AVX512PF", "AVX5124FMAPS", "AVX5124VNNIW", "AVX512VPOPCNTDQ", - "AVX512VL", "AVX512BW", "AVX512DQ", "AVX512VNNI", "AVX512IFMA", - "AVX512VBMI", "AVX512VBMI2", "AVX512BITALG", "AVX512FP16", + features = [] + + features_groups = { + "X86_V2": [ + "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "SSE42", + "POPCNT", "LAHF", "CX16" + ], + } + features_groups["X86_V3"] = features_groups["X86_V2"] + [ + "AVX", "AVX2", "FMA3", "BMI", "BMI2", + "LZCNT", "F16C", "MOVBE" ] - features_groups = dict( - AVX512_KNL = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF"], - AVX512_KNM = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF", "AVX5124FMAPS", - "AVX5124VNNIW", "AVX512VPOPCNTDQ"], - AVX512_SKX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL"], - AVX512_CLX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512VNNI"], - AVX512_CNL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA", - "AVX512VBMI"], - AVX512_ICL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA", - "AVX512VBMI", "AVX512VNNI", "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ"], - AVX512_SPR = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", - "AVX512VL", "AVX512IFMA", "AVX512VBMI", "AVX512VNNI", - "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ", - "AVX512FP16"], - ) - features_map = dict( - SSE3="PNI", SSE41="SSE4_1", SSE42="SSE4_2", FMA3="FMA", - AVX512VNNI="AVX512_VNNI", AVX512BITALG="AVX512_BITALG", AVX512VBMI2="AVX512_VBMI2", - AVX5124FMAPS="AVX512_4FMAPS", AVX5124VNNIW="AVX512_4VNNIW", AVX512VPOPCNTDQ="AVX512_VPOPCNTDQ", - AVX512FP16="AVX512_FP16", + features_groups["X86_V4"] = features_groups["X86_V3"] + [ + "AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL" + ] + features_groups["AVX512_ICL"] = features_groups["X86_V4"] + [ + "AVX512IFMA", "AVX512VBMI", "AVX512VNNI", + "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ", + "VAES", "VPCLMULQDQ", "GFNI" + ] + features_groups["AVX512_SPR"] = ( + features_groups["AVX512_ICL"] + ["AVX512FP16", "AVX512BF16"] ) + + features_map = { + "SSE3": "PNI", "SSE41": "SSE4_1", "SSE42": "SSE4_2", "FMA3": "FMA", + "BMI": "BMI1", "LZCNT": "ABM", "LAHF": "LAHF_LM", + "AVX512VNNI": "AVX512_VNNI", "AVX512BITALG": "AVX512_BITALG", + "AVX512VBMI2": "AVX512_VBMI2", "AVX5124FMAPS": "AVX512_4FMAPS", + "AVX5124VNNIW": "AVX512_4VNNIW", "AVX512VPOPCNTDQ": "AVX512_VPOPCNTDQ", + "AVX512FP16": "AVX512_FP16", "AVX512BF16": "AVX512_BF16" + } + def load_flags(self): self.load_flags_cpuinfo("flags") -is_power = re.match("^(powerpc|ppc)64", machine, re.IGNORECASE) + +is_power = re.match(r"^(powerpc|ppc)64", machine, re.IGNORECASE) @pytest.mark.skipif(not is_linux or not is_power, reason="Only for Linux and Power") class Test_POWER_Features(AbstractTest): features = ["VSX", "VSX2", "VSX3", "VSX4"] - features_map = dict(VSX2="ARCH_2_07", VSX3="ARCH_3_00", VSX4="ARCH_3_1") + features_map = {"VSX2": "ARCH_2_07", "VSX3": "ARCH_3_00", "VSX4": "ARCH_3_1"} def load_flags(self): self.load_flags_auxv() -is_zarch = re.match("^(s390x)", machine, re.IGNORECASE) +is_zarch = re.match(r"^(s390x)", machine, re.IGNORECASE) @pytest.mark.skipif(not is_linux or not is_zarch, reason="Only for Linux and IBM Z") class Test_ZARCH_Features(AbstractTest): @@ -389,29 +403,61 @@ def load_flags(self): self.load_flags_auxv() -is_arm = re.match("^(arm|aarch64)", machine, re.IGNORECASE) +is_arm = re.match(r"^(arm|aarch64)", machine, re.IGNORECASE) @pytest.mark.skipif(not is_linux or not is_arm, reason="Only for Linux and ARM") class Test_ARM_Features(AbstractTest): features = [ "SVE", "NEON", "ASIMD", "FPHP", "ASIMDHP", "ASIMDDP", "ASIMDFHM" ] - features_groups = dict( - NEON_FP16 = ["NEON", "HALF"], - NEON_VFPV4 = ["NEON", "VFPV4"], - ) + features_groups = { + "NEON_FP16": ["NEON", "HALF"], + "NEON_VFPV4": ["NEON", "VFPV4"], + } + def load_flags(self): self.load_flags_cpuinfo("Features") arch = self.get_cpuinfo_item("CPU architecture") - # in case of mounting virtual filesystem of aarch64 kernel - is_rootfs_v8 = int('0'+next(iter(arch))) > 7 if arch else 0 - if re.match("^(aarch64|AARCH64)", machine) or is_rootfs_v8: - self.features_map = dict( - NEON="ASIMD", HALF="ASIMD", VFPV4="ASIMD" - ) + # in case of mounting virtual filesystem of aarch64 kernel without linux32 + is_rootfs_v8 = ( + not re.match(r"^armv[0-9]+l$", machine) and + (int('0' + next(iter(arch))) > 7 if arch else 0) + ) + if re.match(r"^(aarch64|AARCH64)", machine) or is_rootfs_v8: + self.features_map = { + "NEON": "ASIMD", "HALF": "ASIMD", "VFPV4": "ASIMD" + } else: - self.features_map = dict( + self.features_map = { # ELF auxiliary vector and /proc/cpuinfo on Linux kernel(armv8 aarch32) - # doesn't provide information about ASIMD, so we assume that ASIMD is supported + # doesn't provide information about ASIMD + # so we assume that ASIMD is supported # if the kernel reports any one of the following ARM8 features. - ASIMD=("AES", "SHA1", "SHA2", "PMULL", "CRC32") - ) + "ASIMD": ("AES", "SHA1", "SHA2", "PMULL", "CRC32") + } + + +is_loongarch = re.match(r"^(loongarch)", machine, re.IGNORECASE) +@pytest.mark.skipif( + not is_linux or not is_loongarch, + reason="Only for Linux and LoongArch", +) +class Test_LOONGARCH_Features(AbstractTest): + features = ["LSX"] + + def load_flags(self): + self.load_flags_cpuinfo("Features") + + +is_riscv = re.match(r"^(riscv)", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_riscv, reason="Only for Linux and RISC-V") +class Test_RISCV_Features(AbstractTest): + features = ["RVV"] + + def load_flags(self): + self.load_flags_auxv() + if not self.features_flags: + # Let the test fail and dump if we cannot read HWCAP. + return + hwcap = int(next(iter(self.features_flags)), 16) + if hwcap & (1 << 21): # HWCAP_RISCV_V + self.features_flags.add("RVV") diff --git a/numpy/_core/tests/test_custom_dtypes.py b/numpy/_core/tests/test_custom_dtypes.py index e8acb450516b..2acb4adf4c7c 100644 --- a/numpy/_core/tests/test_custom_dtypes.py +++ b/numpy/_core/tests/test_custom_dtypes.py @@ -1,13 +1,13 @@ -import sys from tempfile import NamedTemporaryFile import pytest import numpy as np -from numpy.testing import assert_array_equal from numpy._core._multiarray_umath import ( - _discover_array_parameters as discover_array_params, _get_sfloat_dtype) - + _discover_array_parameters as discover_array_params, + _get_sfloat_dtype, +) +from numpy.testing import assert_array_equal SF = _get_sfloat_dtype() @@ -15,13 +15,13 @@ class TestSFloat: def _get_array(self, scaling, aligned=True): if not aligned: - a = np.empty(3*8 + 1, dtype=np.uint8)[1:] + a = np.empty(3 * 8 + 1, dtype=np.uint8)[1:] a = a.view(np.float64) a[:] = [1., 2., 3.] else: a = np.array([1., 2., 3.]) - a *= 1./scaling # the casting code also uses the reciprocal. + a *= 1. / scaling # the casting code also uses the reciprocal. return a.view(SF(scaling)) def test_sfloat_rescaled(self): @@ -48,6 +48,9 @@ def test_repr(self): # Check the repr, mainly to cover the code paths: assert repr(SF(scaling=1.)) == "_ScaledFloatTestDType(scaling=1.0)" + def test_dtype_str(self): + assert SF(1.).str == "_ScaledFloatTestDType(scaling=1.0)" + def test_dtype_name(self): assert SF(1.).name == "_ScaledFloatTestDType64" @@ -117,7 +120,7 @@ def test_possible_and_impossible_reduce(self): # For reductions to work, the first and last operand must have the # same dtype. For this parametric DType that is not necessarily true. a = self._get_array(2.) - # Addition reductin works (as of writing requires to pass initial + # Addition reduction works (as of writing requires to pass initial # because setting a scaled-float from the default `0` fails). res = np.add.reduce(a, initial=0.) assert res == a.astype(np.float64).sum() @@ -228,6 +231,78 @@ def test_wrapped_and_wrapped_reductions(self): expected = np.hypot.reduce(float_equiv, keepdims=True) assert res.view(np.float64) * 2 == expected + def test_sort(self): + a = self._get_array(1.) + a = a[::-1] # reverse it + + a.sort() + assert_array_equal(a.view(np.float64), [1., 2., 3.]) + + a = self._get_array(1.) + a = a[::-1] # reverse it + + sorted_a = np.sort(a) + assert_array_equal(sorted_a.view(np.float64), [1., 2., 3.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [3., 2., 1.]) + + a = self._get_array(0.5) # different factor + a = a[::2][::-1] # non-contiguous + sorted_a = np.sort(a) + assert_array_equal(sorted_a.view(np.float64), [2., 6.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 2.]) + + a = self._get_array(0.5, aligned=False) + a = a[::-1] # reverse it + sorted_a = np.sort(a) + assert_array_equal(sorted_a.view(np.float64), [2., 4., 6.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + sorted_a = np.sort(a, stable=True) + assert_array_equal(sorted_a.view(np.float64), [2., 4., 6.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + sorted_a = np.sort(a, stable=False) + assert_array_equal(sorted_a.view(np.float64), [2., 4., 6.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + def test_argsort(self): + a = self._get_array(1.) + a = a[::-1] # reverse it + + indices = np.argsort(a) + assert_array_equal(indices, [2, 1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [3., 2., 1.]) + + a = self._get_array(0.5) + a = a[::2][::-1] # reverse it + indices = np.argsort(a) + assert_array_equal(indices, [1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 2.]) + + a = self._get_array(0.5, aligned=False) + a = a[::-1] # reverse it + indices = np.argsort(a) + assert_array_equal(indices, [2, 1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + sorted_indices = np.argsort(a, stable=True) + assert_array_equal(sorted_indices, [2, 1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + sorted_indices = np.argsort(a, stable=False) + assert_array_equal(sorted_indices, [2, 1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + def test_astype_class(self): # Very simple test that we accept `.astype()` also on the class. # ScaledFloat always returns the default descriptor, but it does @@ -252,6 +327,9 @@ def test_creation_class(self): assert np.zeros(3, dtype=SF).dtype == SF(1.) assert np.zeros_like(arr1, dtype=SF).dtype == SF(1.) + @pytest.mark.thread_unsafe( + reason="_ScaledFloatTestDType setup is thread-unsafe (gh-29850)" + ) def test_np_save_load(self): # this monkeypatch is needed because pickle # uses the repr of a type to reconstruct it @@ -295,6 +373,9 @@ def test_flatiter_index(self, index): np.testing.assert_array_equal( arr.view(np.float64), arr2.view(np.float64)) +@pytest.mark.thread_unsafe( + reason="_ScaledFloatTestDType setup is thread-unsafe (gh-29850)" +) def test_type_pickle(): # can't actually unpickle, but we can pickle (if in namespace) import pickle diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index 26a1fafa0066..c405a59e535e 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -1,13 +1,13 @@ -from datetime import datetime import os -import shutil import subprocess import sys -import time +import sysconfig +from datetime import datetime + import pytest import numpy as np -from numpy.testing import assert_array_equal, IS_WASM, IS_EDITABLE +from numpy.testing import IS_EDITABLE, IS_WASM, assert_array_equal # This import is copied from random.tests.test_extending try: @@ -48,12 +48,15 @@ def install_temp(tmpdir_factory): native_file = str(build_dir / 'interpreter-native-file.ini') with open(native_file, 'w') as f: f.write("[binaries]\n") - f.write(f"python = '{sys.executable}'") + f.write(f"python = '{sys.executable}'\n") + f.write(f"python3 = '{sys.executable}'") try: subprocess.check_call(["meson", "--version"]) except FileNotFoundError: pytest.skip("No usable 'meson' found") + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") if sys.platform == "win32": subprocess.check_call(["meson", "setup", "--buildtype=release", @@ -72,7 +75,7 @@ def install_temp(tmpdir_factory): print("----------------") print("meson build failed when doing") print(f"'meson setup --native-file {native_file} {srcdir}'") - print(f"'meson compile -vv'") + print("'meson compile -vv'") print(f"in {build_dir}") print("----------------") raise @@ -153,6 +156,13 @@ def test_default_int(install_temp): assert checks.get_default_integer() is np.dtype(int) + +def test_ravel_axis(install_temp): + import checks + + assert checks.get_ravel_axis() == np.iinfo("intc").min + + def test_convert_datetime64_to_datetimestruct(install_temp): # GH#21199 import checks @@ -210,10 +220,8 @@ def test_multiiter_fields(install_temp, arrays): assert bcast.shape == checks.get_multiiter_shape(bcast) assert bcast.index == checks.get_multiiter_current_index(bcast) assert all( - [ - x.base is y.base - for x, y in zip(bcast.iters, checks.get_multiiter_iters(bcast)) - ] + x.base is y.base + for x, y in zip(bcast.iters, checks.get_multiiter_iters(bcast)) ) @@ -263,6 +271,7 @@ def test_npyiter_api(install_temp): assert checks.get_npyiter_size(it) == it.itersize == np.prod(arr.shape) assert checks.npyiter_has_multi_index(it) == it.has_multi_index == True assert checks.get_npyiter_ndim(it) == it.ndim == 2 + assert checks.test_get_multi_index_iter_next(it, arr) arr2 = np.random.rand(2, 1, 2) it = np.nditer([arr, arr2]) @@ -273,10 +282,8 @@ def test_npyiter_api(install_temp): x is y for x, y in zip(checks.get_npyiter_operands(it), it.operands) ) assert all( - [ - np.allclose(x, y) - for x, y in zip(checks.get_npyiter_itviews(it), it.itviews) - ] + np.allclose(x, y) + for x, y in zip(checks.get_npyiter_itviews(it), it.itviews) ) @@ -289,7 +296,57 @@ def test_fillwithbytes(install_temp): def test_complex(install_temp): from checks import inc2_cfloat_struct - - arr = np.array([0, 10+10j], dtype="F") + + arr = np.array([0, 10 + 10j], dtype="F") inc2_cfloat_struct(arr) assert arr[1] == (12 + 12j) + + +def test_npystring_pack(install_temp): + """Check that the cython API can write to a vstring array.""" + import checks + + arr = np.array(['a', 'b', 'c'], dtype='T') + assert checks.npystring_pack(arr) == 0 + + # checks.npystring_pack writes to the beginning of the array + assert arr[0] == "Hello world" + +def test_npystring_load(install_temp): + """Check that the cython API can load strings from a vstring array.""" + import checks + + arr = np.array(['abcd', 'b', 'c'], dtype='T') + result = checks.npystring_load(arr) + assert result == 'abcd' + + +def test_npystring_multiple_allocators(install_temp): + """Check that the cython API can acquire/release multiple vstring allocators.""" + import checks + + dt = np.dtypes.StringDType(na_object=None) + arr1 = np.array(['abcd', 'b', 'c'], dtype=dt) + arr2 = np.array(['a', 'b', 'c'], dtype=dt) + + assert checks.npystring_pack_multiple(arr1, arr2) == 0 + assert arr1[0] == "Hello world" + assert arr1[-1] is None + assert arr2[0] == "test this" + + +def test_npystring_allocators_other_dtype(install_temp): + """Check that allocators for non-StringDType arrays is NULL.""" + import checks + + arr1 = np.array([1, 2, 3], dtype='i') + arr2 = np.array([4, 5, 6], dtype='i') + + assert checks.npystring_allocators_other_types(arr1, arr2) == 0 + + +@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', + reason='no checks module on win-arm64') +def test_npy_uintp_type_enum(install_temp): + import checks + assert checks.check_npy_uintp_type_enum() diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 70a294796a0d..da964923d2c6 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -1,5 +1,7 @@ import datetime import pickle +import warnings +from zoneinfo import ZoneInfo, ZoneInfoNotFoundError import pytest @@ -7,22 +9,29 @@ import numpy as np from numpy.testing import ( IS_WASM, - assert_, assert_equal, assert_raises, assert_warns, suppress_warnings, - assert_raises_regex, assert_array_equal, - ) - -# Use pytz to test out various time zones if available -try: - from pytz import timezone as tz - _has_pytz = True -except ImportError: - _has_pytz = False + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) try: RecursionError except NameError: RecursionError = RuntimeError # python < 3.5 +try: + ZoneInfo("US/Central") + _has_tz = True +except ZoneInfoNotFoundError: + _has_tz = False + +def _assert_equal_hash(v1, v2): + assert v1 == v2 + assert hash(v1) == hash(v2) + assert v2 in {v1} + class TestDateTime: @@ -45,10 +54,10 @@ def test_datetime_dtype_creation(self): 'h', 'm', 's', 'ms', 'us', 'Îŧs', # alias for us 'ns', 'ps', 'fs', 'as']: - dt1 = np.dtype('M8[750%s]' % unit) - assert_(dt1 == np.dtype('datetime64[750%s]' % unit)) - dt2 = np.dtype('m8[%s]' % unit) - assert_(dt2 == np.dtype('timedelta64[%s]' % unit)) + dt1 = np.dtype(f'M8[750{unit}]') + assert_(dt1 == np.dtype(f'datetime64[750{unit}]')) + dt2 = np.dtype(f'm8[{unit}]') + assert_(dt2 == np.dtype(f'timedelta64[{unit}]')) # Generic units shouldn't add [] to the end assert_equal(str(np.dtype("M8")), "datetime64") @@ -255,10 +264,12 @@ def test_datetime_scalar_construction(self): # Some basic strings and repr assert_equal(str(np.datetime64('NaT')), 'NaT') assert_equal(repr(np.datetime64('NaT')), - "np.datetime64('NaT')") + "np.datetime64('NaT','generic')") assert_equal(str(np.datetime64('2011-02')), '2011-02') assert_equal(repr(np.datetime64('2011-02')), "np.datetime64('2011-02')") + assert_equal(repr(np.datetime64('NaT').astype(np.dtype("datetime64[ns]"))), + "np.datetime64('NaT','ns')") # None gets constructed as NaT assert_equal(np.datetime64(None), np.datetime64('NaT')) @@ -371,7 +382,7 @@ def test_datetime_array_find_type(self): # "generic" to select generic unit ("Y"), ("M"), ("W"), ("D"), ("h"), ("m"), ("s"), ("ms"), ("us"), ("ns"), ("ps"), - ("fs"), ("as"), ("generic") ]) + ("fs"), ("as"), ("generic")]) def test_timedelta_np_int_construction(self, unit): # regression test for gh-7617 if unit != "generic": @@ -488,7 +499,7 @@ def test_timedelta_0_dim_object_array_conversion(self): def test_timedelta_nat_format(self): # gh-17552 - assert_equal('NaT', '{0}'.format(np.timedelta64('nat'))) + assert_equal('NaT', f'{np.timedelta64("nat")}') def test_timedelta_scalar_construction_units(self): # String construction detecting units @@ -624,42 +635,42 @@ def test_datetime_nat_casting(self): def test_days_creation(self): assert_equal(np.array('1599', dtype='M8[D]').astype('i8'), - (1600-1970)*365 - (1972-1600)/4 + 3 - 365) + (1600 - 1970) * 365 - (1972 - 1600) / 4 + 3 - 365) assert_equal(np.array('1600', dtype='M8[D]').astype('i8'), - (1600-1970)*365 - (1972-1600)/4 + 3) + (1600 - 1970) * 365 - (1972 - 1600) / 4 + 3) assert_equal(np.array('1601', dtype='M8[D]').astype('i8'), - (1600-1970)*365 - (1972-1600)/4 + 3 + 366) + (1600 - 1970) * 365 - (1972 - 1600) / 4 + 3 + 366) assert_equal(np.array('1900', dtype='M8[D]').astype('i8'), - (1900-1970)*365 - (1970-1900)//4) + (1900 - 1970) * 365 - (1970 - 1900) // 4) assert_equal(np.array('1901', dtype='M8[D]').astype('i8'), - (1900-1970)*365 - (1970-1900)//4 + 365) - assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3*365 - 1) - assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2*365 - 1) - assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1*365) - assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0*365) - assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1*365) - assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2*365) - assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3*365 + 1) - assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4*365 + 1) + (1900 - 1970) * 365 - (1970 - 1900) // 4 + 365) + assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3 * 365 - 1) + assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2 * 365 - 1) + assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1 * 365) + assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0 * 365) + assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1 * 365) + assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2 * 365) + assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3 * 365 + 1) + assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4 * 365 + 1) assert_equal(np.array('2000', dtype='M8[D]').astype('i8'), - (2000 - 1970)*365 + (2000 - 1972)//4) + (2000 - 1970) * 365 + (2000 - 1972) // 4) assert_equal(np.array('2001', dtype='M8[D]').astype('i8'), - (2000 - 1970)*365 + (2000 - 1972)//4 + 366) + (2000 - 1970) * 365 + (2000 - 1972) // 4 + 366) assert_equal(np.array('2400', dtype='M8[D]').astype('i8'), - (2400 - 1970)*365 + (2400 - 1972)//4 - 3) + (2400 - 1970) * 365 + (2400 - 1972) // 4 - 3) assert_equal(np.array('2401', dtype='M8[D]').astype('i8'), - (2400 - 1970)*365 + (2400 - 1972)//4 - 3 + 366) + (2400 - 1970) * 365 + (2400 - 1972) // 4 - 3 + 366) assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'), - (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 28) + (1600 - 1970) * 365 - (1972 - 1600) // 4 + 3 + 31 + 28) assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'), - (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 29) + (1600 - 1970) * 365 - (1972 - 1600) // 4 + 3 + 31 + 29) assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'), - (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 28) + (2000 - 1970) * 365 + (2000 - 1972) // 4 + 31 + 28) assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'), - (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 29) + (2000 - 1970) * 365 + (2000 - 1972) // 4 + 31 + 29) assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'), - (2000 - 1970)*365 + (2000 - 1972)//4 + 366 + 31 + 28 + 21) + (2000 - 1970) * 365 + (2000 - 1972) // 4 + 366 + 31 + 28 + 21) def test_days_to_pydate(self): assert_equal(np.array('1599', dtype='M8[D]').astype('O'), @@ -809,7 +820,7 @@ def test_datetime_array_str(self): a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M') assert_equal(np.array2string(a, separator=', ', formatter={'datetime': lambda x: - "'%s'" % np.datetime_as_string(x, timezone='UTC')}), + f"'{np.datetime_as_string(x, timezone='UTC')}'"}), "['2011-03-16T13:55Z', '1920-01-01T03:12Z']") # Check that one NaT doesn't corrupt subsequent entries @@ -833,6 +844,21 @@ def test_timedelta_array_str(self): a = np.array([-1, 'NaT', 1234567], dtype=''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n" + \ - b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." - assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) + with pytest.warns(np.exceptions.VisibleDeprecationWarning, + match=r".*align should be passed"): + pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n"\ + b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n"\ + b"I7\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(pkl), np.dtype(''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n"\ + b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) + + def test_gh_29555(self): + # check that dtype metadata round-trips when none + dt = np.dtype('>M8[us]') + assert dt.metadata is None + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + res = pickle.loads(pickle.dumps(dt, protocol=proto)) + assert_equal(res, dt) + assert res.metadata is None def test_setstate(self): "Verify that datetime dtype __setstate__ can handle bad arguments" dt = np.dtype('>M8[us]') - assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1)) + assert_raises(ValueError, dt.__setstate__, + (4, '>', None, None, None, -1, -1, 0, 1)) assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) - assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx'))) + assert_raises(TypeError, dt.__setstate__, + (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx'))) assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) def test_dtype_promotion(self): @@ -874,23 +913,23 @@ def test_dtype_promotion(self): # timedelta timedelta computes the metadata gcd for mM in ['m', 'M']: assert_equal( - np.promote_types(np.dtype(mM+'8[2Y]'), np.dtype(mM+'8[2Y]')), - np.dtype(mM+'8[2Y]')) + np.promote_types(np.dtype(mM + '8[2Y]'), np.dtype(mM + '8[2Y]')), + np.dtype(mM + '8[2Y]')) assert_equal( - np.promote_types(np.dtype(mM+'8[12Y]'), np.dtype(mM+'8[15Y]')), - np.dtype(mM+'8[3Y]')) + np.promote_types(np.dtype(mM + '8[12Y]'), np.dtype(mM + '8[15Y]')), + np.dtype(mM + '8[3Y]')) assert_equal( - np.promote_types(np.dtype(mM+'8[62M]'), np.dtype(mM+'8[24M]')), - np.dtype(mM+'8[2M]')) + np.promote_types(np.dtype(mM + '8[62M]'), np.dtype(mM + '8[24M]')), + np.dtype(mM + '8[2M]')) assert_equal( - np.promote_types(np.dtype(mM+'8[1W]'), np.dtype(mM+'8[2D]')), - np.dtype(mM+'8[1D]')) + np.promote_types(np.dtype(mM + '8[1W]'), np.dtype(mM + '8[2D]')), + np.dtype(mM + '8[1D]')) assert_equal( - np.promote_types(np.dtype(mM+'8[W]'), np.dtype(mM+'8[13s]')), - np.dtype(mM+'8[s]')) + np.promote_types(np.dtype(mM + '8[W]'), np.dtype(mM + '8[13s]')), + np.dtype(mM + '8[s]')) assert_equal( - np.promote_types(np.dtype(mM+'8[13W]'), np.dtype(mM+'8[49s]')), - np.dtype(mM+'8[7s]')) + np.promote_types(np.dtype(mM + '8[13W]'), np.dtype(mM + '8[49s]')), + np.dtype(mM + '8[7s]')) # timedelta timedelta raises when there is no reasonable gcd assert_raises(TypeError, np.promote_types, np.dtype('m8[Y]'), np.dtype('m8[D]')) @@ -937,7 +976,7 @@ def test_pyobject_roundtrip(self): b[8] = 'NaT' assert_equal(b.astype(object).astype(unit), b, - "Error roundtripping unit %s" % unit) + f"Error roundtripping unit {unit}") # With time units for unit in ['M8[as]', 'M8[16fs]', 'M8[ps]', 'M8[us]', 'M8[300as]', 'M8[20us]']: @@ -953,7 +992,7 @@ def test_pyobject_roundtrip(self): b[8] = 'NaT' assert_equal(b.astype(object).astype(unit), b, - "Error roundtripping unit %s" % unit) + f"Error roundtripping unit {unit}") def test_month_truncation(self): # Make sure that months are truncating correctly @@ -971,9 +1010,9 @@ def test_month_truncation(self): def test_different_unit_comparison(self): # Check some years with date units for unit1 in ['Y', 'M', 'D']: - dt1 = np.dtype('M8[%s]' % unit1) + dt1 = np.dtype(f'M8[{unit1}]') for unit2 in ['Y', 'M', 'D']: - dt2 = np.dtype('M8[%s]' % unit2) + dt2 = np.dtype(f'M8[{unit2}]') assert_equal(np.array('1945', dtype=dt1), np.array('1945', dtype=dt2)) assert_equal(np.array('1970', dtype=dt1), @@ -992,9 +1031,9 @@ def test_different_unit_comparison(self): np.datetime64('10000-01-01', unit2)) # Check some datetimes with time units for unit1 in ['6h', 'h', 'm', 's', '10ms', 'ms', 'us']: - dt1 = np.dtype('M8[%s]' % unit1) + dt1 = np.dtype(f'M8[{unit1}]') for unit2 in ['h', 'm', 's', 'ms', 'us']: - dt2 = np.dtype('M8[%s]' % unit2) + dt2 = np.dtype(f'M8[{unit2}]') assert_equal(np.array('1945-03-12T18', dtype=dt1), np.array('1945-03-12T18', dtype=dt2)) assert_equal(np.array('1970-03-12T18', dtype=dt1), @@ -1013,9 +1052,9 @@ def test_different_unit_comparison(self): np.datetime64('10000-01-01T00', unit2)) # Check some days with units that won't overflow for unit1 in ['D', '12h', 'h', 'm', 's', '4s', 'ms', 'us']: - dt1 = np.dtype('M8[%s]' % unit1) + dt1 = np.dtype(f'M8[{unit1}]') for unit2 in ['D', 'h', 'm', 's', 'ms', 'us']: - dt2 = np.dtype('M8[%s]' % unit2) + dt2 = np.dtype(f'M8[{unit2}]') assert_(np.equal(np.array('1932-02-17', dtype='M').astype(dt1), np.array('1932-02-17T00:00:00', dtype='M').astype(dt2), casting='unsafe')) @@ -1041,21 +1080,18 @@ def test_datetime_like(self): assert_equal(np.zeros_like(b).dtype, b.dtype) assert_equal(np.empty_like(b).dtype, b.dtype) - def test_datetime_unary(self): - for tda, tdb, tdzero, tdone, tdmone in \ + def test_timedelta64_unary(self): + for tda, tdb, tdzero in \ [ # One-dimensional arrays (np.array([3], dtype='m8[D]'), np.array([-3], dtype='m8[D]'), - np.array([0], dtype='m8[D]'), - np.array([1], dtype='m8[D]'), - np.array([-1], dtype='m8[D]')), + np.array([0], dtype='m8[D]')), # NumPy scalars (np.timedelta64(3, '[D]'), np.timedelta64(-3, '[D]'), - np.timedelta64(0, '[D]'), - np.timedelta64(1, '[D]'), - np.timedelta64(-1, '[D]'))]: + np.timedelta64(0, '[D]')), + ]: # negative ufunc assert_equal(-tdb, tda) assert_equal((-tdb).dtype, tda.dtype) @@ -1073,13 +1109,24 @@ def test_datetime_unary(self): assert_equal(np.absolute(tdb).dtype, tda.dtype) # sign ufunc - assert_equal(np.sign(tda), tdone) - assert_equal(np.sign(tdb), tdmone) - assert_equal(np.sign(tdzero), tdzero) - assert_equal(np.sign(tda).dtype, tda.dtype) - - # The ufuncs always produce native-endian results - assert_ + assert_equal(np.sign(tda), np.ones_like(tda, dtype=np.float64), + strict=True) + assert_equal(np.sign(tdb), -np.ones_like(tdb, dtype=np.float64), + strict=True) + assert_equal(np.sign(tdzero), np.zeros_like(tdzero, dtype=np.float64), + strict=True) + + def test_timedelta64_sign_nat(self): + x = np.array([np.timedelta64(-123, 's'), + np.timedelta64(0, 's'), + np.timedelta64(88, 's'), + np.timedelta64('NaT', 's')]) + s = np.sign(x) + assert_equal(s, np.array([-1.0, 0.0, 1.0, np.nan]), strict=True) + + def test_timedelta64_sign_nat_scalar(self): + nat = np.timedelta64('nat', 'm') + assert_equal(np.sign(nat), np.nan) def test_datetime_add(self): for dta, dtb, dtc, dtnat, tda, tdb, tdc in \ @@ -1091,7 +1138,7 @@ def test_datetime_add(self): np.array(['NaT'], dtype='M8[D]'), np.array([3], dtype='m8[D]'), np.array([11], dtype='m8[h]'), - np.array([3*24 + 11], dtype='m8[h]')), + np.array([3 * 24 + 11], dtype='m8[h]')), # NumPy scalars (np.datetime64('2012-12-21', '[D]'), np.datetime64('2012-12-24', '[D]'), @@ -1099,7 +1146,7 @@ def test_datetime_add(self): np.datetime64('NaT', '[D]'), np.timedelta64(3, '[D]'), np.timedelta64(11, '[h]'), - np.timedelta64(3*24 + 11, '[h]'))]: + np.timedelta64(3 * 24 + 11, '[h]'))]: # m8 + m8 assert_equal(tda + tdb, tdc) assert_equal((tda + tdb).dtype, np.dtype('m8[h]')) @@ -1107,14 +1154,14 @@ def test_datetime_add(self): assert_equal(tdb + True, tdb + 1) assert_equal((tdb + True).dtype, np.dtype('m8[h]')) # m8 + int - assert_equal(tdb + 3*24, tdc) - assert_equal((tdb + 3*24).dtype, np.dtype('m8[h]')) + assert_equal(tdb + 3 * 24, tdc) + assert_equal((tdb + 3 * 24).dtype, np.dtype('m8[h]')) # bool + m8 assert_equal(False + tdb, tdb) assert_equal((False + tdb).dtype, np.dtype('m8[h]')) # int + m8 - assert_equal(3*24 + tdb, tdc) - assert_equal((3*24 + tdb).dtype, np.dtype('m8[h]')) + assert_equal(3 * 24 + tdb, tdc) + assert_equal((3 * 24 + tdb).dtype, np.dtype('m8[h]')) # M8 + bool assert_equal(dta + True, dta + 1) assert_equal(dtnat + True, dtnat) @@ -1163,7 +1210,7 @@ def test_datetime_subtract(self): np.array(['NaT'], dtype='M8[D]'), np.array([3], dtype='m8[D]'), np.array([11], dtype='m8[h]'), - np.array([3*24 - 11], dtype='m8[h]')), + np.array([3 * 24 - 11], dtype='m8[h]')), # NumPy scalars (np.datetime64('2012-12-21', '[D]'), np.datetime64('2012-12-24', '[D]'), @@ -1173,7 +1220,7 @@ def test_datetime_subtract(self): np.datetime64('NaT', '[D]'), np.timedelta64(3, '[D]'), np.timedelta64(11, '[h]'), - np.timedelta64(3*24 - 11, '[h]'))]: + np.timedelta64(3 * 24 - 11, '[h]'))]: # m8 - m8 assert_equal(tda - tdb, tdc) assert_equal((tda - tdb).dtype, np.dtype('m8[h]')) @@ -1183,14 +1230,14 @@ def test_datetime_subtract(self): assert_equal(tdc - True, tdc - 1) assert_equal((tdc - True).dtype, np.dtype('m8[h]')) # m8 - int - assert_equal(tdc - 3*24, -tdb) - assert_equal((tdc - 3*24).dtype, np.dtype('m8[h]')) + assert_equal(tdc - 3 * 24, -tdb) + assert_equal((tdc - 3 * 24).dtype, np.dtype('m8[h]')) # int - m8 assert_equal(False - tdb, -tdb) assert_equal((False - tdb).dtype, np.dtype('m8[h]')) # int - m8 - assert_equal(3*24 - tdb, tdc) - assert_equal((3*24 - tdb).dtype, np.dtype('m8[h]')) + assert_equal(3 * 24 - tdb, tdc) + assert_equal((3 * 24 - tdb).dtype, np.dtype('m8[h]')) # M8 - bool assert_equal(dtb - True, dtb - 1) assert_equal(dtnat - True, dtnat) @@ -1268,9 +1315,11 @@ def test_datetime_multiply(self): assert_raises(TypeError, np.multiply, 1.5, dta) # NaTs - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in multiply") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "invalid value encountered in multiply", RuntimeWarning) nat = np.timedelta64('NaT') + def check(a, b, res): assert_equal(a * b, res) assert_equal(b * a, res) @@ -1329,7 +1378,7 @@ def test_timedelta_floor_divide(self, op1, op2, exp): np.timedelta64(-1)), ]) def test_timedelta_floor_div_warnings(self, op1, op2): - with assert_warns(RuntimeWarning): + with pytest.warns(RuntimeWarning): actual = op1 // op2 assert_equal(actual, 0) assert_equal(actual.dtype, np.int64) @@ -1390,6 +1439,14 @@ def test_timedelta_divmod(self, op1, op2): expected = (op1 // op2, op1 % op2) assert_equal(divmod(op1, op2), expected) + @pytest.mark.parametrize("op1, op2", [ + # Y and M are incompatible with all units except Y and M + (np.timedelta64(1, 'Y'), np.timedelta64(1, 's')), + (np.timedelta64(1, 'D'), np.timedelta64(1, 'M')), + ]) + def test_timedelta_divmod_typeerror(self, op1, op2): + assert_raises(TypeError, np.divmod, op1, op2) + @pytest.mark.skipif(IS_WASM, reason="does not work in wasm") @pytest.mark.parametrize("op1, op2", [ # reuse cases from floordiv @@ -1405,9 +1462,9 @@ def test_timedelta_divmod(self, op1, op2): np.timedelta64(-1)), ]) def test_timedelta_divmod_warnings(self, op1, op2): - with assert_warns(RuntimeWarning): + with pytest.warns(RuntimeWarning): expected = (op1 // op2, op1 % op2) - with assert_warns(RuntimeWarning): + with pytest.warns(RuntimeWarning): actual = divmod(op1, op2) assert_equal(actual, expected) @@ -1459,8 +1516,9 @@ def test_datetime_divide(self): assert_raises(TypeError, np.divide, 1.5, dta) # NaTs - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, r".*encountered in divide") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', r".*encountered in divide", RuntimeWarning) nat = np.timedelta64('NaT') for tp in (int, float): assert_equal(np.timedelta64(1) / tp(0), nat) @@ -1547,7 +1605,7 @@ def test_datetime_minmax(self): # Also do timedelta a = np.array(3, dtype='m8[h]') - b = np.array(3*3600 - 3, dtype='m8[s]') + b = np.array(3 * 3600 - 3, dtype='m8[s]') assert_equal(np.minimum(a, b), b) assert_equal(np.minimum(a, b).dtype, np.dtype('m8[s]')) assert_equal(np.fmin(a, b), b) @@ -1577,7 +1635,7 @@ def test_datetime_minmax(self): def test_hours(self): t = np.ones(3, dtype='M8[s]') - t[0] = 60*60*24 + 60*60*10 + t[0] = 60 * 60 * 24 + 60 * 60 * 10 assert_(t[0].item().hour == 10) def test_divisor_conversion_year(self): @@ -1747,10 +1805,10 @@ def test_creation_overflow(self): timesteps = np.array([date], dtype='datetime64[s]')[0].astype(np.int64) for unit in ['ms', 'us', 'ns']: timesteps *= 1000 - x = np.array([date], dtype='datetime64[%s]' % unit) + x = np.array([date], dtype=f'datetime64[{unit}]') assert_equal(timesteps, x[0].astype(np.int64), - err_msg='Datetime conversion error for unit %s' % unit) + err_msg=f'Datetime conversion error for unit {unit}') assert_equal(x[0].astype(np.int64), 322689600000000000) @@ -1820,6 +1878,10 @@ def test_datetime_as_string(self): '2032-07-18') assert_equal(np.datetime_as_string(a, unit='D', casting='unsafe'), '2032-07-18') + + with pytest.raises(ValueError): + np.datetime_as_string(a, unit='Y', casting='same_value') + assert_equal(np.datetime_as_string(a, unit='h'), '2032-07-18T12') assert_equal(np.datetime_as_string(a, unit='m'), '2032-07-18T12:23') @@ -1866,7 +1928,7 @@ def test_datetime_as_string(self): np.datetime64('2032-01-01T00:00:00', 'us'), unit='auto'), '2032-01-01') - @pytest.mark.skipif(not _has_pytz, reason="The pytz module is not available.") + @pytest.mark.skipif(not _has_tz, reason="The tzdata module is not available.") def test_datetime_as_string_timezone(self): # timezone='local' vs 'UTC' a = np.datetime64('2010-03-15T06:30', 'm') @@ -1881,29 +1943,29 @@ def test_datetime_as_string_timezone(self): b = np.datetime64('2010-02-15T06:30', 'm') - assert_equal(np.datetime_as_string(a, timezone=tz('US/Central')), + assert_equal(np.datetime_as_string(a, timezone=ZoneInfo('US/Central')), '2010-03-15T01:30-0500') - assert_equal(np.datetime_as_string(a, timezone=tz('US/Eastern')), + assert_equal(np.datetime_as_string(a, timezone=ZoneInfo('US/Eastern')), '2010-03-15T02:30-0400') - assert_equal(np.datetime_as_string(a, timezone=tz('US/Pacific')), + assert_equal(np.datetime_as_string(a, timezone=ZoneInfo('US/Pacific')), '2010-03-14T23:30-0700') - assert_equal(np.datetime_as_string(b, timezone=tz('US/Central')), + assert_equal(np.datetime_as_string(b, timezone=ZoneInfo('US/Central')), '2010-02-15T00:30-0600') - assert_equal(np.datetime_as_string(b, timezone=tz('US/Eastern')), + assert_equal(np.datetime_as_string(b, timezone=ZoneInfo('US/Eastern')), '2010-02-15T01:30-0500') - assert_equal(np.datetime_as_string(b, timezone=tz('US/Pacific')), + assert_equal(np.datetime_as_string(b, timezone=ZoneInfo('US/Pacific')), '2010-02-14T22:30-0800') # Dates to strings with a timezone attached is disabled by default assert_raises(TypeError, np.datetime_as_string, a, unit='D', - timezone=tz('US/Pacific')) + timezone=ZoneInfo('US/Pacific')) # Check that we can print out the date in the specified time zone assert_equal(np.datetime_as_string(a, unit='D', - timezone=tz('US/Pacific'), casting='unsafe'), + timezone=ZoneInfo('US/Pacific'), casting='unsafe'), '2010-03-14') assert_equal(np.datetime_as_string(b, unit='D', - timezone=tz('US/Central'), casting='unsafe'), + timezone=ZoneInfo('US/Central'), casting='unsafe'), '2010-02-15') def test_datetime_arange(self): @@ -2022,7 +2084,7 @@ def test_timedelta_modulus_error(self, val1, val2): @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") def test_timedelta_modulus_div_by_zero(self): - with assert_warns(RuntimeWarning): + with pytest.warns(RuntimeWarning): actual = np.timedelta64(10, 's') % np.timedelta64(0, 's') assert_equal(actual, np.timedelta64('NaT')) @@ -2158,6 +2220,11 @@ def test_datetime_busdaycalendar(self): bdd = np.busdaycalendar(weekmask="0011001") assert_equal(bdd.weekmask, np.array([0, 0, 1, 1, 0, 0, 1], dtype='?')) + # Check length 7 bool array. + mask = np.array([False, True, True, True, True, False, False]) + bdd = np.busdaycalendar(weekmask=mask) + assert_equal(bdd.weekmask, mask, strict=True) + # Check length 7 string weekmask. bdd = np.busdaycalendar(weekmask="Mon Tue") assert_equal(bdd.weekmask, np.array([1, 1, 0, 0, 0, 0, 0], dtype='?')) @@ -2368,7 +2435,7 @@ def test_datetime_busday_holidays_count(self): assert_equal(np.busday_count(dates, '2011-12-31', busdaycal=bdd), expected) # Returns negative value when reversed - expected = -np.arange(366)+1 + expected = -np.arange(366) + 1 expected[0] = 0 assert_equal(np.busday_count('2011-12-31', dates, busdaycal=bdd), expected) @@ -2394,7 +2461,6 @@ def test_datetime_busday_holidays_count(self): assert_equal(np.busday_count(friday, saturday), 1) assert_equal(np.busday_count(saturday, friday), 0) - def test_datetime_is_busday(self): holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24', '2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17', @@ -2446,13 +2512,13 @@ def test_isnat(self): for unit in ['Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', 'ps', 'fs', 'as']: - arr = np.array([123, -321, "NaT"], dtype='datetime64[{unit}]') assert_equal(np.isnat(arr), res) - arr = np.array([123, -321, "NaT"], dtype='timedelta64[{unit}]') assert_equal(np.isnat(arr), res) def test_isnat_error(self): @@ -2478,10 +2544,10 @@ def test_isfinite_isinf_isnan_units(self, unit, dstr): '''check isfinite, isinf, isnan for all units of M, m dtypes ''' arr_val = [123, -321, "NaT"] - arr = np.array(arr_val, dtype= dstr % unit) - pos = np.array([True, True, False]) - neg = np.array([False, False, True]) - false = np.array([False, False, False]) + arr = np.array(arr_val, dtype=(dstr % unit)) + pos = np.array([True, True, False]) + neg = np.array([False, False, True]) + false = np.array([False, False, False]) assert_equal(np.isfinite(arr), pos) assert_equal(np.isinf(arr), false) assert_equal(np.isnan(arr), neg) @@ -2552,6 +2618,160 @@ def test_limit_str_roundtrip(self, time_unit, sign): limit_via_str = np.datetime64(str(limit), time_unit) assert limit_via_str == limit + def test_datetime_hash_nat(self): + nat1 = np.datetime64() + nat2 = np.datetime64() + assert nat1 is not nat2 + assert nat1 != nat2 + assert hash(nat1) != hash(nat2) + + @pytest.mark.parametrize('unit', ('Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_datetime_hash_weeks(self, unit): + dt = np.datetime64(2348, 'W') # 2015-01-01 + dt2 = np.datetime64(dt, unit) + _assert_equal_hash(dt, dt2) + + dt3 = np.datetime64(int(dt2.astype(int)) + 1, unit) + assert hash(dt) != hash(dt3) # doesn't collide + + @pytest.mark.parametrize('unit', ('h', 'm', 's', 'ms', 'us')) + def test_datetime_hash_weeks_vs_pydatetime(self, unit): + dt = np.datetime64(2348, 'W') # 2015-01-01 + dt2 = np.datetime64(dt, unit) + pydt = dt2.astype(datetime.datetime) + assert isinstance(pydt, datetime.datetime) + _assert_equal_hash(pydt, dt2) + + @pytest.mark.parametrize('unit', ('Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_datetime_hash_big_negative(self, unit): + dt = np.datetime64(-102894, 'W') # -002-01-01 + dt2 = np.datetime64(dt, unit) + _assert_equal_hash(dt, dt2) + + # can only go down to "fs" before integer overflow + @pytest.mark.parametrize('unit', ('m', 's', 'ms', 'us', 'ns', 'ps', 'fs')) + def test_datetime_hash_minutes(self, unit): + dt = np.datetime64(3, 'm') + dt2 = np.datetime64(dt, unit) + _assert_equal_hash(dt, dt2) + + @pytest.mark.parametrize('unit', ('ns', 'ps', 'fs', 'as')) + def test_datetime_hash_ns(self, unit): + dt = np.datetime64(3, 'ns') + dt2 = np.datetime64(dt, unit) + _assert_equal_hash(dt, dt2) + + dt3 = np.datetime64(int(dt2.astype(int)) + 1, unit) + assert hash(dt) != hash(dt3) # doesn't collide + + @pytest.mark.parametrize('wk', range(500000, 500010)) # 11552-09-04 + @pytest.mark.parametrize('unit', ('W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_datetime_hash_big_positive(self, wk, unit): + dt = np.datetime64(wk, 'W') + dt2 = np.datetime64(dt, unit) + _assert_equal_hash(dt, dt2) + + def test_timedelta_hash_generic(self): + assert_raises(ValueError, hash, np.timedelta64(123)) # generic + + @pytest.mark.parametrize('unit', ('Y', 'M')) + def test_timedelta_hash_year_month(self, unit): + td = np.timedelta64(45, 'Y') + td2 = np.timedelta64(td, unit) + _assert_equal_hash(td, td2) + + @pytest.mark.parametrize('unit', ('W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_timedelta_hash_weeks(self, unit): + td = np.timedelta64(10, 'W') + td2 = np.timedelta64(td, unit) + _assert_equal_hash(td, td2) + + td3 = np.timedelta64(int(td2.astype(int)) + 1, unit) + assert hash(td) != hash(td3) # doesn't collide + + @pytest.mark.parametrize('unit', ('W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_timedelta_hash_weeks_vs_pydelta(self, unit): + td = np.timedelta64(10, 'W') + td2 = np.timedelta64(td, unit) + pytd = td2.astype(datetime.timedelta) + assert isinstance(pytd, datetime.timedelta) + _assert_equal_hash(pytd, td2) + + @pytest.mark.parametrize('unit', ('ms', 'us', 'ns', 'ps', 'fs', 'as')) + def test_timedelta_hash_ms(self, unit): + td = np.timedelta64(3, 'ms') + td2 = np.timedelta64(td, unit) + _assert_equal_hash(td, td2) + + td3 = np.timedelta64(int(td2.astype(int)) + 1, unit) + assert hash(td) != hash(td3) # doesn't collide + + @pytest.mark.parametrize('wk', range(500000, 500010)) + @pytest.mark.parametrize('unit', ('W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_timedelta_hash_big_positive(self, wk, unit): + td = np.timedelta64(wk, 'W') + td2 = np.timedelta64(td, unit) + _assert_equal_hash(td, td2) + + @pytest.mark.parametrize( + "inputs, divisor, expected", + [ + ( + np.array( + [datetime.timedelta(seconds=20), datetime.timedelta(days=2)], + dtype="object", + ), + np.int64(2), + np.array( + [datetime.timedelta(seconds=10), datetime.timedelta(days=1)], + dtype="object", + ), + ), + ( + np.array( + [datetime.timedelta(seconds=20), datetime.timedelta(days=2)], + dtype="object", + ), + np.timedelta64(2, "s"), + np.array( + [10.0, 24.0 * 60.0 * 60.0], + dtype="object", + ), + ), + ( + datetime.timedelta(seconds=2), + np.array( + [datetime.timedelta(seconds=20), datetime.timedelta(days=2)], + dtype="object", + ), + np.array( + [1.0 / 10.0, 1.0 / (24.0 * 60.0 * 60.0)], + dtype="object", + ), + ), + ], + ) + def test_true_divide_object_by_timedelta( + self, + inputs: np.ndarray | type[np.generic], + divisor: np.ndarray | type[np.generic], + expected: np.ndarray, + ): + # gh-30025 + results = inputs / divisor + assert_array_equal(results, expected) + + @pytest.mark.parametrize( + "atol", [np.timedelta64(1, "s"), np.timedelta64(1, "ms")] + ) + def test_assert_all_close_with_timedelta_atol( + self, atol: np.timedelta64 | datetime.timedelta + ): + # gh-30382 + a = np.array([1, 2], dtype="m8[s]") + b = np.array([3, 4], dtype="m8[s]") + with pytest.raises(AssertionError): + np.testing.assert_allclose(a, b, atol=atol) class TestDateTimeData: diff --git a/numpy/_core/tests/test_defchararray.py b/numpy/_core/tests/test_defchararray.py index 6b688ab443a4..33db8747e0e6 100644 --- a/numpy/_core/tests/test_defchararray.py +++ b/numpy/_core/tests/test_defchararray.py @@ -3,13 +3,22 @@ import numpy as np from numpy._core.multiarray import _vec_string from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises, - assert_raises_regex - ) + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) kw_unicode_true = {'unicode': True} # make 2to3 work properly kw_unicode_false = {'unicode': False} +ignore_charray_deprecation = pytest.mark.filterwarnings( + r"ignore:\w+ (chararray|array|asarray) \w+:DeprecationWarning" +) + + +@ignore_charray_deprecation class TestBasic: def test_from_object_array(self): A = np.array([['abc', 2], @@ -131,123 +140,130 @@ def fail(): assert_raises(ValueError, fail) - +@ignore_charray_deprecation class TestWhitespace: - def setup_method(self): - self.A = np.array([['abc ', '123 '], - ['789 ', 'xyz ']]).view(np.char.chararray) - self.B = np.array([['abc', '123'], - ['789', 'xyz']]).view(np.char.chararray) - def test1(self): - assert_(np.all(self.A == self.B)) - assert_(np.all(self.A >= self.B)) - assert_(np.all(self.A <= self.B)) - assert_(not np.any(self.A > self.B)) - assert_(not np.any(self.A < self.B)) - assert_(not np.any(self.A != self.B)) - + A = np.array([['abc ', '123 '], + ['789 ', 'xyz ']]).view(np.char.chararray) + B = np.array([['abc', '123'], + ['789', 'xyz']]).view(np.char.chararray) + assert_(np.all(A == B)) + assert_(np.all(A >= B)) + assert_(np.all(A <= B)) + assert_(not np.any(A > B)) + assert_(not np.any(A < B)) + assert_(not np.any(A != B)) + +@ignore_charray_deprecation class TestChar: - def setup_method(self): - self.A = np.array('abc1', dtype='c').view(np.char.chararray) - def test_it(self): - assert_equal(self.A.shape, (4,)) - assert_equal(self.A.upper()[:2].tobytes(), b'AB') + A = np.array('abc1', dtype='c').view(np.char.chararray) + assert_equal(A.shape, (4,)) + assert_equal(A.upper()[:2].tobytes(), b'AB') +@ignore_charray_deprecation class TestComparisons: - def setup_method(self): - self.A = np.array([['abc', 'abcc', '123'], - ['789', 'abc', 'xyz']]).view(np.char.chararray) - self.B = np.array([['efg', 'efg', '123 '], - ['051', 'efgg', 'tuv']]).view(np.char.chararray) + def A(self): + return np.array([['abc', 'abcc', '123'], + ['789', 'abc', 'xyz']]).view(np.char.chararray) + + def B(self): + return np.array([['efg', 'efg', '123 '], + ['051', 'efgg', 'tuv']]).view(np.char.chararray) def test_not_equal(self): - assert_array_equal((self.A != self.B), + A, B = self.A(), self.B() + assert_array_equal((A != B), [[True, True, False], [True, True, True]]) def test_equal(self): - assert_array_equal((self.A == self.B), + A, B = self.A(), self.B() + assert_array_equal((A == B), [[False, False, True], [False, False, False]]) def test_greater_equal(self): - assert_array_equal((self.A >= self.B), + A, B = self.A(), self.B() + assert_array_equal((A >= B), [[False, False, True], [True, False, True]]) def test_less_equal(self): - assert_array_equal((self.A <= self.B), + A, B = self.A(), self.B() + assert_array_equal((A <= B), [[True, True, True], [False, True, False]]) def test_greater(self): - assert_array_equal((self.A > self.B), + A, B = self.A(), self.B() + assert_array_equal((A > B), [[False, False, False], [True, False, True]]) def test_less(self): - assert_array_equal((self.A < self.B), + A, B = self.A(), self.B() + assert_array_equal((A < B), [[True, True, False], [False, True, False]]) def test_type(self): - out1 = np.char.equal(self.A, self.B) + A, B = self.A(), self.B() + out1 = np.char.equal(A, B) out2 = np.char.equal('a', 'a') assert_(isinstance(out1, np.ndarray)) assert_(isinstance(out2, np.ndarray)) +@ignore_charray_deprecation class TestComparisonsMixed1(TestComparisons): """Ticket #1276""" - def setup_method(self): - TestComparisons.setup_method(self) - self.B = np.array( + def B(self): + return np.array( [['efg', 'efg', '123 '], ['051', 'efgg', 'tuv']], np.str_).view(np.char.chararray) +@ignore_charray_deprecation class TestComparisonsMixed2(TestComparisons): """Ticket #1276""" - def setup_method(self): - TestComparisons.setup_method(self) - self.A = np.array( + def A(self): + return np.array( [['abc', 'abcc', '123'], ['789', 'abc', 'xyz']], np.str_).view(np.char.chararray) +@ignore_charray_deprecation class TestInformation: - def setup_method(self): - self.A = np.array([[' abc ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']]) \ - .view(np.char.chararray) - self.B = np.array([[' \u03a3 ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']]) \ - .view(np.char.chararray) - # Array with longer strings, > MEMCHR_CUT_OFF in code. - self.C = (np.array(['ABCDEFGHIJKLMNOPQRSTUVWXYZ', - '01234567890123456789012345']) - .view(np.char.chararray)) + def A(self): + return np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]) \ + .view(np.char.chararray) + + def B(self): + return np.array([[' \u03a3 ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]) \ + .view(np.char.chararray) def test_len(self): - assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer)) - assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]]) - assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]]) + A, B = self.A(), self.B() + assert_(issubclass(np.char.str_len(A).dtype.type, np.integer)) + assert_array_equal(np.char.str_len(A), [[5, 0], [5, 9], [12, 5]]) + assert_array_equal(np.char.str_len(B), [[3, 0], [5, 9], [12, 5]]) def test_count(self): - assert_(issubclass(self.A.count('').dtype.type, np.integer)) - assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]]) - assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]]) + A, B = self.A(), self.B() + assert_(issubclass(A.count('').dtype.type, np.integer)) + assert_array_equal(A.count('a'), [[1, 0], [0, 1], [0, 0]]) + assert_array_equal(A.count('123'), [[0, 0], [1, 0], [1, 0]]) # Python doesn't seem to like counting NULL characters - # assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]]) - assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]]) - assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]]) - assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]]) - # assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]]) + assert_array_equal(A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]]) + assert_array_equal(B.count('a'), [[0, 0], [0, 1], [0, 0]]) + assert_array_equal(B.count('123'), [[0, 0], [1, 0], [1, 0]]) def test_endswith(self): - assert_(issubclass(self.A.endswith('').dtype.type, np.bool)) - assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]]) - assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]]) + A = self.A() + assert_(issubclass(A.endswith('').dtype.type, np.bool)) + assert_array_equal(A.endswith(' '), [[1, 0], [0, 0], [1, 0]]) + assert_array_equal(A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]]) def fail(): - self.A.endswith('3', 'fdjk') + A.endswith('3', 'fdjk') assert_raises(TypeError, fail) @@ -257,7 +273,7 @@ def fail(): ("S", lambda x: x.encode('ascii')), ]) def test_find(self, dtype, encode): - A = self.A.astype(dtype) + A = self.A().astype(dtype) assert_(issubclass(A.find(encode('a')).dtype.type, np.integer)) assert_array_equal(A.find(encode('a')), [[1, -1], [-1, 6], [-1, -1]]) @@ -267,103 +283,126 @@ def test_find(self, dtype, encode): [[1, -1], [-1, -1], [-1, -1]]) assert_array_equal(A.find([encode('1'), encode('P')]), [[-1, -1], [0, -1], [0, 1]]) - C = self.C.astype(dtype) + C = (np.array(['ABCDEFGHIJKLMNOPQRSTUVWXYZ', + '01234567890123456789012345']) + .view(np.char.chararray)).astype(dtype) assert_array_equal(C.find(encode('M')), [12, -1]) def test_index(self): + A = self.A() def fail(): - self.A.index('a') + A.index('a') assert_raises(ValueError, fail) assert_(np.char.index('abcba', 'b') == 1) assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer)) def test_isalnum(self): - assert_(issubclass(self.A.isalnum().dtype.type, np.bool)) - assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]]) + A = self.A() + assert_(issubclass(A.isalnum().dtype.type, np.bool)) + assert_array_equal(A.isalnum(), [[False, False], [True, True], [False, True]]) def test_isalpha(self): - assert_(issubclass(self.A.isalpha().dtype.type, np.bool)) - assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]]) + A = self.A() + assert_(issubclass(A.isalpha().dtype.type, np.bool)) + assert_array_equal(A.isalpha(), [[False, False], [False, True], [False, True]]) def test_isdigit(self): - assert_(issubclass(self.A.isdigit().dtype.type, np.bool)) - assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]]) + A = self.A() + assert_(issubclass(A.isdigit().dtype.type, np.bool)) + assert_array_equal(A.isdigit(), [[False, False], [True, False], [False, False]]) def test_islower(self): - assert_(issubclass(self.A.islower().dtype.type, np.bool)) - assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]]) + A = self.A() + assert_(issubclass(A.islower().dtype.type, np.bool)) + assert_array_equal(A.islower(), [[True, False], [False, False], [False, False]]) def test_isspace(self): - assert_(issubclass(self.A.isspace().dtype.type, np.bool)) - assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]]) + A = self.A() + assert_(issubclass(A.isspace().dtype.type, np.bool)) + assert_array_equal( + A.isspace(), + [[False, False], [False, False], [False, False]], + ) def test_istitle(self): - assert_(issubclass(self.A.istitle().dtype.type, np.bool)) - assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]]) + A = self.A() + assert_(issubclass(A.istitle().dtype.type, np.bool)) + assert_array_equal( + A.istitle(), + [[False, False], [False, False], [False, False]], + ) def test_isupper(self): - assert_(issubclass(self.A.isupper().dtype.type, np.bool)) - assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]]) + A = self.A() + assert_(issubclass(A.isupper().dtype.type, np.bool)) + assert_array_equal(A.isupper(), [[False, False], [False, False], [False, True]]) def test_rfind(self): - assert_(issubclass(self.A.rfind('a').dtype.type, np.integer)) - assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]]) - assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]]) - assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) - assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]]) + A = self.A() + assert_(issubclass(A.rfind('a').dtype.type, np.integer)) + assert_array_equal(A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]]) + assert_array_equal(A.rfind('3'), [[-1, -1], [2, -1], [6, -1]]) + assert_array_equal(A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) + assert_array_equal(A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]]) def test_rindex(self): + A = self.A() def fail(): - self.A.rindex('a') + A.rindex('a') assert_raises(ValueError, fail) assert_(np.char.rindex('abcba', 'b') == 3) assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer)) def test_startswith(self): - assert_(issubclass(self.A.startswith('').dtype.type, np.bool)) - assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]]) - assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]]) + A = self.A() + assert_(issubclass(A.startswith('').dtype.type, np.bool)) + assert_array_equal(A.startswith(' '), [[1, 0], [0, 0], [0, 0]]) + assert_array_equal(A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]]) def fail(): - self.A.startswith('3', 'fdjk') + A.startswith('3', 'fdjk') assert_raises(TypeError, fail) - +@ignore_charray_deprecation class TestMethods: - def setup_method(self): - self.A = np.array([[' abc ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']], - dtype='S').view(np.char.chararray) - self.B = np.array([[' \u03a3 ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']]).view( - np.char.chararray) + def A(self): + return np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']], + dtype='S').view(np.char.chararray) + + def B(self): + return np.array([[' \u03a3 ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]) \ + .view(np.char.chararray) def test_capitalize(self): + A, B = self.A(), self.B() tgt = [[b' abc ', b''], [b'12345', b'Mixedcase'], [b'123 \t 345 \0 ', b'Upper']] - assert_(issubclass(self.A.capitalize().dtype.type, np.bytes_)) - assert_array_equal(self.A.capitalize(), tgt) + assert_(issubclass(A.capitalize().dtype.type, np.bytes_)) + assert_array_equal(A.capitalize(), tgt) tgt = [[' \u03c3 ', ''], ['12345', 'Mixedcase'], ['123 \t 345 \0 ', 'Upper']] - assert_(issubclass(self.B.capitalize().dtype.type, np.str_)) - assert_array_equal(self.B.capitalize(), tgt) + assert_(issubclass(B.capitalize().dtype.type, np.str_)) + assert_array_equal(B.capitalize(), tgt) def test_center(self): - assert_(issubclass(self.A.center(10).dtype.type, np.bytes_)) - C = self.A.center([10, 20]) + A = self.A() + assert_(issubclass(A.center(10).dtype.type, np.bytes_)) + C = A.center([10, 20]) assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - C = self.A.center(20, b'#') + C = A.center(20, b'#') assert_(np.all(C.startswith(b'#'))) assert_(np.all(C.endswith(b'#'))) @@ -378,17 +417,17 @@ def test_decode(self): assert_(A.decode('unicode-escape')[0] == '\u03a3') def test_encode(self): - B = self.B.encode('unicode_escape') - assert_(B[0][0] == str(' \\u03a3 ').encode('latin1')) + B = self.B().encode('unicode_escape') + assert_(B[0][0] == ' \\u03a3 '.encode('latin1')) def test_expandtabs(self): - T = self.A.expandtabs() + T = self.A().expandtabs() assert_(T[2, 0] == b'123 345 \0') def test_join(self): # NOTE: list(b'123') == [49, 50, 51] # so that b','.join(b'123') results to an error on Py3 - A0 = self.A.decode('ascii') + A0 = self.A().decode('ascii') A = np.char.join([',', '#'], A0) assert_(issubclass(A.dtype.type, np.str_)) @@ -398,12 +437,13 @@ def test_join(self): assert_array_equal(np.char.join([',', '#'], A0), tgt) def test_ljust(self): - assert_(issubclass(self.A.ljust(10).dtype.type, np.bytes_)) + A = self.A() + assert_(issubclass(A.ljust(10).dtype.type, np.bytes_)) - C = self.A.ljust([10, 20]) + C = A.ljust([10, 20]) assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - C = self.A.ljust(20, b'#') + C = A.ljust(20, b'#') assert_array_equal(C.startswith(b'#'), [ [False, True], [False, False], [False, False]]) assert_(np.all(C.endswith(b'#'))) @@ -415,38 +455,41 @@ def test_ljust(self): assert_array_equal(C, tgt) def test_lower(self): + A, B = self.A(), self.B() tgt = [[b' abc ', b''], [b'12345', b'mixedcase'], [b'123 \t 345 \0 ', b'upper']] - assert_(issubclass(self.A.lower().dtype.type, np.bytes_)) - assert_array_equal(self.A.lower(), tgt) + assert_(issubclass(A.lower().dtype.type, np.bytes_)) + assert_array_equal(A.lower(), tgt) tgt = [[' \u03c3 ', ''], ['12345', 'mixedcase'], ['123 \t 345 \0 ', 'upper']] - assert_(issubclass(self.B.lower().dtype.type, np.str_)) - assert_array_equal(self.B.lower(), tgt) + assert_(issubclass(B.lower().dtype.type, np.str_)) + assert_array_equal(B.lower(), tgt) def test_lstrip(self): + A, B = self.A(), self.B() tgt = [[b'abc ', b''], [b'12345', b'MixedCase'], [b'123 \t 345 \0 ', b'UPPER']] - assert_(issubclass(self.A.lstrip().dtype.type, np.bytes_)) - assert_array_equal(self.A.lstrip(), tgt) + assert_(issubclass(A.lstrip().dtype.type, np.bytes_)) + assert_array_equal(A.lstrip(), tgt) tgt = [[b' abc', b''], [b'2345', b'ixedCase'], [b'23 \t 345 \x00', b'UPPER']] - assert_array_equal(self.A.lstrip([b'1', b'M']), tgt) + assert_array_equal(A.lstrip([b'1', b'M']), tgt) tgt = [['\u03a3 ', ''], ['12345', 'MixedCase'], ['123 \t 345 \0 ', 'UPPER']] - assert_(issubclass(self.B.lstrip().dtype.type, np.str_)) - assert_array_equal(self.B.lstrip(), tgt) + assert_(issubclass(B.lstrip().dtype.type, np.str_)) + assert_array_equal(B.lstrip(), tgt) def test_partition(self): - P = self.A.partition([b'3', b'M']) + A = self.A() + P = A.partition([b'3', b'M']) tgt = [[(b' abc ', b'', b''), (b'', b'', b'')], [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], [(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]] @@ -454,7 +497,8 @@ def test_partition(self): assert_array_equal(P, tgt) def test_replace(self): - R = self.A.replace([b'3', b'a'], + A = self.A() + R = A.replace([b'3', b'a'], [b'##########', b'@']) tgt = [[b' abc ', b''], [b'12##########45', b'MixedC@se'], @@ -463,34 +507,34 @@ def test_replace(self): assert_array_equal(R, tgt) # Test special cases that should just return the input array, # since replacements are not possible or do nothing. - S1 = self.A.replace(b'A very long byte string, longer than A', b'') - assert_array_equal(S1, self.A) - S2 = self.A.replace(b'', b'') - assert_array_equal(S2, self.A) - S3 = self.A.replace(b'3', b'3') - assert_array_equal(S3, self.A) - S4 = self.A.replace(b'3', b'', count=0) - assert_array_equal(S4, self.A) + S1 = A.replace(b'A very long byte string, longer than A', b'') + assert_array_equal(S1, A) + S2 = A.replace(b'', b'') + assert_array_equal(S2, A) + S3 = A.replace(b'3', b'3') + assert_array_equal(S3, A) + S4 = A.replace(b'3', b'', count=0) + assert_array_equal(S4, A) def test_replace_count_and_size(self): a = np.array(['0123456789' * i for i in range(4)] ).view(np.char.chararray) r1 = a.replace('5', 'ABCDE') - assert r1.dtype.itemsize == (3*10 + 3*4) * 4 + assert r1.dtype.itemsize == (3 * 10 + 3 * 4) * 4 assert_array_equal(r1, np.array(['01234ABCDE6789' * i for i in range(4)])) r2 = a.replace('5', 'ABCDE', count=1) - assert r2.dtype.itemsize == (3*10 + 4) * 4 + assert r2.dtype.itemsize == (3 * 10 + 4) * 4 r3 = a.replace('5', 'ABCDE', count=0) assert r3.dtype.itemsize == a.dtype.itemsize assert_array_equal(r3, a) # Negative values mean to replace all. r4 = a.replace('5', 'ABCDE', count=-1) - assert r4.dtype.itemsize == (3*10 + 3*4) * 4 + assert r4.dtype.itemsize == (3 * 10 + 3 * 4) * 4 assert_array_equal(r4, r1) # We can do count on an element-by-element basis. r5 = a.replace('5', 'ABCDE', count=[-1, -1, -1, 1]) - assert r5.dtype.itemsize == (3*10 + 4) * 4 + assert r5.dtype.itemsize == (3 * 10 + 4) * 4 assert_array_equal(r5, np.array( ['01234ABCDE6789' * i for i in range(3)] + ['01234ABCDE6789' + '0123456789' * 2])) @@ -507,12 +551,13 @@ def test_replace_broadcasting(self): assert_array_equal(r3, np.array(['X,X,X', 'X,0', 'X'])) def test_rjust(self): - assert_(issubclass(self.A.rjust(10).dtype.type, np.bytes_)) + A = self.A() + assert_(issubclass(A.rjust(10).dtype.type, np.bytes_)) - C = self.A.rjust([10, 20]) + C = A.rjust([10, 20]) assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - C = self.A.rjust(20, b'#') + C = A.rjust(20, b'#') assert_(np.all(C.startswith(b'#'))) assert_array_equal(C.endswith(b'#'), [[False, True], [False, False], [False, False]]) @@ -524,7 +569,8 @@ def test_rjust(self): assert_array_equal(C, tgt) def test_rpartition(self): - P = self.A.rpartition([b'3', b'M']) + A = self.A() + P = A.rpartition([b'3', b'M']) tgt = [[(b'', b'', b' abc '), (b'', b'', b'')], [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], [(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]] @@ -532,7 +578,7 @@ def test_rpartition(self): assert_array_equal(P, tgt) def test_rsplit(self): - A = self.A.rsplit(b'3') + A = self.A().rsplit(b'3') tgt = [[[b' abc '], [b'']], [[b'12', b'45'], [b'MixedCase']], [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]] @@ -540,45 +586,47 @@ def test_rsplit(self): assert_equal(A.tolist(), tgt) def test_rstrip(self): - assert_(issubclass(self.A.rstrip().dtype.type, np.bytes_)) + A, B = self.A(), self.B() + assert_(issubclass(A.rstrip().dtype.type, np.bytes_)) tgt = [[b' abc', b''], [b'12345', b'MixedCase'], [b'123 \t 345', b'UPPER']] - assert_array_equal(self.A.rstrip(), tgt) + assert_array_equal(A.rstrip(), tgt) tgt = [[b' abc ', b''], [b'1234', b'MixedCase'], [b'123 \t 345 \x00', b'UPP'] ] - assert_array_equal(self.A.rstrip([b'5', b'ER']), tgt) + assert_array_equal(A.rstrip([b'5', b'ER']), tgt) tgt = [[' \u03a3', ''], ['12345', 'MixedCase'], ['123 \t 345', 'UPPER']] - assert_(issubclass(self.B.rstrip().dtype.type, np.str_)) - assert_array_equal(self.B.rstrip(), tgt) + assert_(issubclass(B.rstrip().dtype.type, np.str_)) + assert_array_equal(B.rstrip(), tgt) def test_strip(self): + A, B = self.A(), self.B() tgt = [[b'abc', b''], [b'12345', b'MixedCase'], [b'123 \t 345', b'UPPER']] - assert_(issubclass(self.A.strip().dtype.type, np.bytes_)) - assert_array_equal(self.A.strip(), tgt) + assert_(issubclass(A.strip().dtype.type, np.bytes_)) + assert_array_equal(A.strip(), tgt) tgt = [[b' abc ', b''], [b'234', b'ixedCas'], [b'23 \t 345 \x00', b'UPP']] - assert_array_equal(self.A.strip([b'15', b'EReM']), tgt) + assert_array_equal(A.strip([b'15', b'EReM']), tgt) tgt = [['\u03a3', ''], ['12345', 'MixedCase'], ['123 \t 345', 'UPPER']] - assert_(issubclass(self.B.strip().dtype.type, np.str_)) - assert_array_equal(self.B.strip(), tgt) + assert_(issubclass(B.strip().dtype.type, np.str_)) + assert_array_equal(B.strip(), tgt) def test_split(self): - A = self.A.split(b'3') + A = self.A().split(b'3') tgt = [ [[b' abc '], [b'']], [[b'12', b'45'], [b'MixedCase']], @@ -593,102 +641,116 @@ def test_splitlines(self): assert_(len(A[0]) == 3) def test_swapcase(self): + A, B = self.A(), self.B() tgt = [[b' ABC ', b''], [b'12345', b'mIXEDcASE'], [b'123 \t 345 \0 ', b'upper']] - assert_(issubclass(self.A.swapcase().dtype.type, np.bytes_)) - assert_array_equal(self.A.swapcase(), tgt) + assert_(issubclass(A.swapcase().dtype.type, np.bytes_)) + assert_array_equal(A.swapcase(), tgt) tgt = [[' \u03c3 ', ''], ['12345', 'mIXEDcASE'], ['123 \t 345 \0 ', 'upper']] - assert_(issubclass(self.B.swapcase().dtype.type, np.str_)) - assert_array_equal(self.B.swapcase(), tgt) + assert_(issubclass(B.swapcase().dtype.type, np.str_)) + assert_array_equal(B.swapcase(), tgt) def test_title(self): + A, B = self.A(), self.B() tgt = [[b' Abc ', b''], [b'12345', b'Mixedcase'], [b'123 \t 345 \0 ', b'Upper']] - assert_(issubclass(self.A.title().dtype.type, np.bytes_)) - assert_array_equal(self.A.title(), tgt) + assert_(issubclass(A.title().dtype.type, np.bytes_)) + assert_array_equal(A.title(), tgt) tgt = [[' \u03a3 ', ''], ['12345', 'Mixedcase'], ['123 \t 345 \0 ', 'Upper']] - assert_(issubclass(self.B.title().dtype.type, np.str_)) - assert_array_equal(self.B.title(), tgt) + assert_(issubclass(B.title().dtype.type, np.str_)) + assert_array_equal(B.title(), tgt) def test_upper(self): + A, B = self.A(), self.B() tgt = [[b' ABC ', b''], [b'12345', b'MIXEDCASE'], [b'123 \t 345 \0 ', b'UPPER']] - assert_(issubclass(self.A.upper().dtype.type, np.bytes_)) - assert_array_equal(self.A.upper(), tgt) + assert_(issubclass(A.upper().dtype.type, np.bytes_)) + assert_array_equal(A.upper(), tgt) tgt = [[' \u03a3 ', ''], ['12345', 'MIXEDCASE'], ['123 \t 345 \0 ', 'UPPER']] - assert_(issubclass(self.B.upper().dtype.type, np.str_)) - assert_array_equal(self.B.upper(), tgt) + assert_(issubclass(B.upper().dtype.type, np.str_)) + assert_array_equal(B.upper(), tgt) def test_isnumeric(self): + A, B = self.A(), self.B() def fail(): - self.A.isnumeric() + A.isnumeric() assert_raises(TypeError, fail) - assert_(issubclass(self.B.isnumeric().dtype.type, np.bool)) - assert_array_equal(self.B.isnumeric(), [ + assert_(issubclass(B.isnumeric().dtype.type, np.bool)) + assert_array_equal(B.isnumeric(), [ [False, False], [True, False], [False, False]]) def test_isdecimal(self): + A, B = self.A(), self.B() def fail(): - self.A.isdecimal() + A.isdecimal() assert_raises(TypeError, fail) - assert_(issubclass(self.B.isdecimal().dtype.type, np.bool)) - assert_array_equal(self.B.isdecimal(), [ + assert_(issubclass(B.isdecimal().dtype.type, np.bool)) + assert_array_equal(B.isdecimal(), [ [False, False], [True, False], [False, False]]) - +@ignore_charray_deprecation class TestOperations: - def setup_method(self): - self.A = np.array([['abc', '123'], - ['789', 'xyz']]).view(np.char.chararray) - self.B = np.array([['efg', '456'], - ['051', 'tuv']]).view(np.char.chararray) + def A(self): + return np.array([['abc', '123'], + ['789', 'xyz']]).view(np.char.chararray) + + def B(self): + return np.array([['efg', '456'], + ['051', 'tuv']]).view(np.char.chararray) + + def test_argsort(self): + arr = np.array(['abc'] * 4).view(np.char.chararray) + actual = arr.argsort(stable=True) + assert_array_equal(actual, [0, 1, 2, 3]) def test_add(self): + A, B = self.A(), self.B() AB = np.array([['abcefg', '123456'], ['789051', 'xyztuv']]).view(np.char.chararray) - assert_array_equal(AB, (self.A + self.B)) - assert_(len((self.A + self.B)[0][0]) == 6) + assert_array_equal(AB, (A + B)) + assert_(len((A + B)[0][0]) == 6) def test_radd(self): + A = self.A() QA = np.array([['qabc', 'q123'], ['q789', 'qxyz']]).view(np.char.chararray) - assert_array_equal(QA, ('q' + self.A)) + assert_array_equal(QA, ('q' + A)) def test_mul(self): - A = self.A + A = self.A() for r in (2, 3, 5, 7, 197): - Ar = np.array([[A[0, 0]*r, A[0, 1]*r], - [A[1, 0]*r, A[1, 1]*r]]).view(np.char.chararray) + Ar = np.array([[A[0, 0] * r, A[0, 1] * r], + [A[1, 0] * r, A[1, 1] * r]]).view(np.char.chararray) - assert_array_equal(Ar, (self.A * r)) + assert_array_equal(Ar, (A * r)) for ob in [object(), 'qrs']: with assert_raises_regex(ValueError, 'Can only multiply by integers'): - A*ob + A * ob def test_rmul(self): - A = self.A + A = self.A() for r in (2, 3, 5, 7, 197): - Ar = np.array([[A[0, 0]*r, A[0, 1]*r], - [A[1, 0]*r, A[1, 1]*r]]).view(np.char.chararray) - assert_array_equal(Ar, (r * self.A)) + Ar = np.array([[A[0, 0] * r, A[0, 1] * r], + [A[1, 0] * r, A[1, 1] * r]]).view(np.char.chararray) + assert_array_equal(Ar, (r * A)) for ob in [object(), 'qrs']: with assert_raises_regex(ValueError, @@ -713,13 +775,14 @@ def test_mod(self): assert_array_equal(A2, (A % [[1, 2], [3, 4]])) def test_rmod(self): - assert_(("%s" % self.A) == str(self.A)) - assert_(("%r" % self.A) == repr(self.A)) + A = self.A() + assert_(f"{A}" == str(A)) + assert_(f"{A!r}" == repr(A)) for ob in [42, object()]: with assert_raises_regex( TypeError, "unsupported operand type.* and 'chararray'"): - ob % self.A + ob % A def test_slice(self): """Regression test for https://github.com/numpy/numpy/issues/5982""" @@ -748,27 +811,21 @@ def test_getitem_length_zero_item(self, data): # or does not have length 0. assert_equal(a[1], a.dtype.type()) - class TestMethodsEmptyArray: - def setup_method(self): - self.U = np.array([], dtype='U') - self.S = np.array([], dtype='S') - def test_encode(self): - res = np.char.encode(self.U) + res = np.char.encode(np.array([], dtype='U')) assert_array_equal(res, []) assert_(res.dtype.char == 'S') def test_decode(self): - res = np.char.decode(self.S) + res = np.char.decode(np.array([], dtype='S')) assert_array_equal(res, []) assert_(res.dtype.char == 'U') def test_decode_with_reshape(self): - res = np.char.decode(self.S.reshape((1, 0, 1))) + res = np.char.decode(np.array([], dtype='S').reshape((1, 0, 1))) assert_(res.shape == (1, 0, 1)) - class TestMethodsScalarValues: def test_mod(self): A = np.array([[' abc ', ''], @@ -813,7 +870,7 @@ def test_replace(self): assert_equal(np.char.replace('Python is good', 'good', 'great'), 'Python is great') - +@ignore_charray_deprecation def test_empty_indexing(): """Regression test for ticket 1948.""" # Check that indexing a chararray with an empty list/array returns an diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 648a1d22ea99..b7a2444fbbc0 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -3,27 +3,14 @@ to document how deprecations should eventually be turned into errors. """ -import datetime -import operator +import contextlib import warnings + import pytest -import tempfile -import re -import sys import numpy as np -from numpy.testing import ( - assert_raises, assert_warns, assert_, assert_array_equal, SkipTest, - KnownFailureException, break_cycles, temppath - ) - -from numpy._core._multiarray_tests import fromstring_null_term_c_api - -try: - import pytz - _has_pytz = True -except ImportError: - _has_pytz = False +from numpy._core._multiarray_tests import fromstring_null_term_c_api # noqa: F401 +from numpy.testing import assert_raises class _DeprecationTestCase: @@ -32,22 +19,20 @@ class _DeprecationTestCase: message = '' warning_cls = DeprecationWarning - def setup_method(self): - self.warn_ctx = warnings.catch_warnings(record=True) - self.log = self.warn_ctx.__enter__() - - # Do *not* ignore other DeprecationWarnings. Ignoring warnings - # can give very confusing results because of - # https://bugs.python.org/issue4180 and it is probably simplest to - # try to keep the tests cleanly giving only the right warning type. - # (While checking them set to "error" those are ignored anyway) - # We still have them show up, because otherwise they would be raised - warnings.filterwarnings("always", category=self.warning_cls) - warnings.filterwarnings("always", message=self.message, - category=self.warning_cls) - - def teardown_method(self): - self.warn_ctx.__exit__() + @contextlib.contextmanager + def filter_warnings(self): + with warnings.catch_warnings(record=True) as w: + # Do *not* ignore other DeprecationWarnings. Ignoring warnings + # can give very confusing results because of + # https://bugs.python.org/issue4180 and it is probably simplest to + # try to keep the tests cleanly giving only the right warning type. + # (While checking them set to "error" those are ignored anyway) + # We still have them show up, because otherwise they would be raised + warnings.filterwarnings("always", category=self.warning_cls) + warnings.filterwarnings("always", message=self.message, + category=self.warning_cls) + yield w + return def assert_deprecated(self, function, num=1, ignore_others=False, function_fails=False, @@ -84,20 +69,20 @@ def assert_deprecated(self, function, num=1, ignore_others=False, """ __tracebackhide__ = True # Hide traceback for py.test - # reset the log - self.log[:] = [] - if exceptions is np._NoValue: exceptions = (self.warning_cls,) - try: - function(*args, **kwargs) - except (Exception if function_fails else tuple()): - pass + if function_fails: + context_manager = contextlib.suppress(Exception) + else: + context_manager = contextlib.nullcontext() + with context_manager: + with self.filter_warnings() as w_context: + function(*args, **kwargs) # just in case, clear the registry num_found = 0 - for warning in self.log: + for warning in w_context: if warning.category is self.warning_cls: num_found += 1 elif not ignore_others: @@ -105,8 +90,8 @@ def assert_deprecated(self, function, num=1, ignore_others=False, "expected %s but got: %s" % (self.warning_cls.__name__, warning.category)) if num is not None and num_found != num: - msg = "%i warnings found but %i expected." % (len(self.log), num) - lst = [str(w) for w in self.log] + msg = f"{len(w_context)} warnings found but {num} expected." + lst = [str(w) for w in w_context] raise AssertionError("\n".join([msg] + lst)) with warnings.catch_warnings(): @@ -114,11 +99,11 @@ def assert_deprecated(self, function, num=1, ignore_others=False, category=self.warning_cls) try: function(*args, **kwargs) - if exceptions != tuple(): + if exceptions != (): raise AssertionError( "No error raised during function call") except exceptions: - if exceptions == tuple(): + if exceptions == (): raise AssertionError( "Error raised during function call") @@ -131,34 +116,16 @@ def assert_not_deprecated(self, function, args=(), kwargs={}): exceptions=tuple(), args=args, kwargs=kwargs) """ self.assert_deprecated(function, num=0, ignore_others=True, - exceptions=tuple(), args=args, kwargs=kwargs) + exceptions=(), args=args, kwargs=kwargs) class _VisibleDeprecationTestCase(_DeprecationTestCase): warning_cls = np.exceptions.VisibleDeprecationWarning -class TestDTypeAttributeIsDTypeDeprecation(_DeprecationTestCase): - # Deprecated 2021-01-05, NumPy 1.21 - message = r".*`.dtype` attribute" - - def test_deprecation_dtype_attribute_is_dtype(self): - class dt: - dtype = "f8" - - class vdt(np.void): - dtype = "f,f" - - self.assert_deprecated(lambda: np.dtype(dt)) - self.assert_deprecated(lambda: np.dtype(dt())) - self.assert_deprecated(lambda: np.dtype(vdt)) - self.assert_deprecated(lambda: np.dtype(vdt(1))) - - class TestTestDeprecated: def test_assert_deprecated(self): test_case_instance = _DeprecationTestCase() - test_case_instance.setup_method() assert_raises(AssertionError, test_case_instance.assert_deprecated, lambda: None) @@ -167,406 +134,16 @@ def foo(): warnings.warn("foo", category=DeprecationWarning, stacklevel=2) test_case_instance.assert_deprecated(foo) - test_case_instance.teardown_method() - - -class TestNonNumericConjugate(_DeprecationTestCase): - """ - Deprecate no-op behavior of ndarray.conjugate on non-numeric dtypes, - which conflicts with the error behavior of np.conjugate. - """ - def test_conjugate(self): - for a in np.array(5), np.array(5j): - self.assert_not_deprecated(a.conjugate) - for a in (np.array('s'), np.array('2016', 'M'), - np.array((1, 2), [('a', int), ('b', int)])): - self.assert_deprecated(a.conjugate) - - -class TestDatetimeEvent(_DeprecationTestCase): - # 2017-08-11, 1.14.0 - def test_3_tuple(self): - for cls in (np.datetime64, np.timedelta64): - # two valid uses - (unit, num) and (unit, num, den, None) - self.assert_not_deprecated(cls, args=(1, ('ms', 2))) - self.assert_not_deprecated(cls, args=(1, ('ms', 2, 1, None))) - - # trying to use the event argument, removed in 1.7.0, is deprecated - # it used to be a uint8 - self.assert_deprecated(cls, args=(1, ('ms', 2, 'event'))) - self.assert_deprecated(cls, args=(1, ('ms', 2, 63))) - self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 'event'))) - self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63))) - - -class TestTruthTestingEmptyArrays(_DeprecationTestCase): - # 2017-09-25, 1.14.0 - message = '.*truth value of an empty array is ambiguous.*' - - def test_1d(self): - self.assert_deprecated(bool, args=(np.array([]),)) - - def test_2d(self): - self.assert_deprecated(bool, args=(np.zeros((1, 0)),)) - self.assert_deprecated(bool, args=(np.zeros((0, 1)),)) - self.assert_deprecated(bool, args=(np.zeros((0, 0)),)) - - -class TestBincount(_DeprecationTestCase): - # 2017-06-01, 1.14.0 - def test_bincount_minlength(self): - self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None)) - - - -class TestGeneratorSum(_DeprecationTestCase): - # 2018-02-25, 1.15.0 - def test_generator_sum(self): - self.assert_deprecated(np.sum, args=((i for i in range(5)),)) - - -class TestFromstring(_DeprecationTestCase): - # 2017-10-19, 1.14 - def test_fromstring(self): - self.assert_deprecated(np.fromstring, args=('\x00'*80,)) - - -class TestFromStringAndFileInvalidData(_DeprecationTestCase): - # 2019-06-08, 1.17.0 - # Tests should be moved to real tests when deprecation is done. - message = "string or file could not be read to its end" - - @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"]) - def test_deprecate_unparsable_data_file(self, invalid_str): - x = np.array([1.51, 2, 3.51, 4], dtype=float) - - with tempfile.TemporaryFile(mode="w") as f: - x.tofile(f, sep=',', format='%.2f') - f.write(invalid_str) - - f.seek(0) - self.assert_deprecated(lambda: np.fromfile(f, sep=",")) - f.seek(0) - self.assert_deprecated(lambda: np.fromfile(f, sep=",", count=5)) - # Should not raise: - with warnings.catch_warnings(): - warnings.simplefilter("error", DeprecationWarning) - f.seek(0) - res = np.fromfile(f, sep=",", count=4) - assert_array_equal(res, x) - - @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"]) - def test_deprecate_unparsable_string(self, invalid_str): - x = np.array([1.51, 2, 3.51, 4], dtype=float) - x_str = "1.51,2,3.51,4{}".format(invalid_str) - - self.assert_deprecated(lambda: np.fromstring(x_str, sep=",")) - self.assert_deprecated(lambda: np.fromstring(x_str, sep=",", count=5)) - - # The C-level API can use not fixed size, but 0 terminated strings, - # so test that as well: - bytestr = x_str.encode("ascii") - self.assert_deprecated(lambda: fromstring_null_term_c_api(bytestr)) - - with assert_warns(DeprecationWarning): - # this is slightly strange, in that fromstring leaves data - # potentially uninitialized (would be good to error when all is - # read, but count is larger then actual data maybe). - res = np.fromstring(x_str, sep=",", count=5) - assert_array_equal(res[:-1], x) - - with warnings.catch_warnings(): - warnings.simplefilter("error", DeprecationWarning) - - # Should not raise: - res = np.fromstring(x_str, sep=",", count=4) - assert_array_equal(res, x) - - -class TestToString(_DeprecationTestCase): - # 2020-03-06 1.19.0 - message = re.escape("tostring() is deprecated. Use tobytes() instead.") - - def test_tostring(self): - arr = np.array(list(b"test\xFF"), dtype=np.uint8) - self.assert_deprecated(arr.tostring) - - def test_tostring_matches_tobytes(self): - arr = np.array(list(b"test\xFF"), dtype=np.uint8) - b = arr.tobytes() - with assert_warns(DeprecationWarning): - s = arr.tostring() - assert s == b - - -class TestDTypeCoercion(_DeprecationTestCase): - # 2020-02-06 1.19.0 - message = "Converting .* to a dtype .*is deprecated" - deprecated_types = [ - # The builtin scalar super types: - np.generic, np.flexible, np.number, - np.inexact, np.floating, np.complexfloating, - np.integer, np.unsignedinteger, np.signedinteger, - # character is a deprecated S1 special case: - np.character, - ] - - def test_dtype_coercion(self): - for scalar_type in self.deprecated_types: - self.assert_deprecated(np.dtype, args=(scalar_type,)) - - def test_array_construction(self): - for scalar_type in self.deprecated_types: - self.assert_deprecated(np.array, args=([], scalar_type,)) - - def test_not_deprecated(self): - # All specific types are not deprecated: - for group in np._core.sctypes.values(): - for scalar_type in group: - self.assert_not_deprecated(np.dtype, args=(scalar_type,)) - - for scalar_type in [type, dict, list, tuple]: - # Typical python types are coerced to object currently: - self.assert_not_deprecated(np.dtype, args=(scalar_type,)) - - -class BuiltInRoundComplexDType(_DeprecationTestCase): - # 2020-03-31 1.19.0 - deprecated_types = [np.csingle, np.cdouble, np.clongdouble] - not_deprecated_types = [ - np.int8, np.int16, np.int32, np.int64, - np.uint8, np.uint16, np.uint32, np.uint64, - np.float16, np.float32, np.float64, - ] - - def test_deprecated(self): - for scalar_type in self.deprecated_types: - scalar = scalar_type(0) - self.assert_deprecated(round, args=(scalar,)) - self.assert_deprecated(round, args=(scalar, 0)) - self.assert_deprecated(round, args=(scalar,), kwargs={'ndigits': 0}) - - def test_not_deprecated(self): - for scalar_type in self.not_deprecated_types: - scalar = scalar_type(0) - self.assert_not_deprecated(round, args=(scalar,)) - self.assert_not_deprecated(round, args=(scalar, 0)) - self.assert_not_deprecated(round, args=(scalar,), kwargs={'ndigits': 0}) - - -class TestIncorrectAdvancedIndexWithEmptyResult(_DeprecationTestCase): - # 2020-05-27, NumPy 1.20.0 - message = "Out of bound index found. This was previously ignored.*" - - @pytest.mark.parametrize("index", [([3, 0],), ([0, 0], [3, 0])]) - def test_empty_subspace(self, index): - # Test for both a single and two/multiple advanced indices. These - # This will raise an IndexError in the future. - arr = np.ones((2, 2, 0)) - self.assert_deprecated(arr.__getitem__, args=(index,)) - self.assert_deprecated(arr.__setitem__, args=(index, 0.)) - - # for this array, the subspace is only empty after applying the slice - arr2 = np.ones((2, 2, 1)) - index2 = (slice(0, 0),) + index - self.assert_deprecated(arr2.__getitem__, args=(index2,)) - self.assert_deprecated(arr2.__setitem__, args=(index2, 0.)) - - def test_empty_index_broadcast_not_deprecated(self): - arr = np.ones((2, 2, 2)) - - index = ([[3], [2]], []) # broadcast to an empty result. - self.assert_not_deprecated(arr.__getitem__, args=(index,)) - self.assert_not_deprecated(arr.__setitem__, - args=(index, np.empty((2, 0, 2)))) - - -class TestNonExactMatchDeprecation(_DeprecationTestCase): - # 2020-04-22 - def test_non_exact_match(self): - arr = np.array([[3, 6, 6], [4, 5, 1]]) - # misspelt mode check - self.assert_deprecated(lambda: np.ravel_multi_index(arr, (7, 6), mode='Cilp')) - # using completely different word with first character as R - self.assert_deprecated(lambda: np.searchsorted(arr[0], 4, side='Random')) - - -class TestMatrixInOuter(_DeprecationTestCase): - # 2020-05-13 NumPy 1.20.0 - message = (r"add.outer\(\) was passed a numpy matrix as " - r"(first|second) argument.") - - def test_deprecated(self): - arr = np.array([1, 2, 3]) - m = np.array([1, 2, 3]).view(np.matrix) - self.assert_deprecated(np.add.outer, args=(m, m), num=2) - self.assert_deprecated(np.add.outer, args=(arr, m)) - self.assert_deprecated(np.add.outer, args=(m, arr)) - self.assert_not_deprecated(np.add.outer, args=(arr, arr)) - - -class FlatteningConcatenateUnsafeCast(_DeprecationTestCase): - # NumPy 1.20, 2020-09-03 - message = "concatenate with `axis=None` will use same-kind casting" - - def test_deprecated(self): - self.assert_deprecated(np.concatenate, - args=(([0.], [1.]),), - kwargs=dict(axis=None, out=np.empty(2, dtype=np.int64))) - - def test_not_deprecated(self): - self.assert_not_deprecated(np.concatenate, - args=(([0.], [1.]),), - kwargs={'axis': None, 'out': np.empty(2, dtype=np.int64), - 'casting': "unsafe"}) - - with assert_raises(TypeError): - # Tests should notice if the deprecation warning is given first... - np.concatenate(([0.], [1.]), out=np.empty(2, dtype=np.int64), - casting="same_kind") - - -class TestDeprecatedUnpickleObjectScalar(_DeprecationTestCase): - # Deprecated 2020-11-24, NumPy 1.20 - """ - Technically, it should be impossible to create numpy object scalars, - but there was an unpickle path that would in theory allow it. That - path is invalid and must lead to the warning. - """ - message = "Unpickling a scalar with object dtype is deprecated." - - def test_deprecated(self): - ctor = np._core.multiarray.scalar - self.assert_deprecated(lambda: ctor(np.dtype("O"), 1)) - - -class TestSingleElementSignature(_DeprecationTestCase): - # Deprecated 2021-04-01, NumPy 1.21 - message = r"The use of a length 1" - - def test_deprecated(self): - self.assert_deprecated(lambda: np.add(1, 2, signature="d")) - self.assert_deprecated(lambda: np.add(1, 2, sig=(np.dtype("l"),))) class TestCtypesGetter(_DeprecationTestCase): - # Deprecated 2021-05-18, Numpy 1.21.0 - warning_cls = DeprecationWarning ctypes = np.array([1]).ctypes - @pytest.mark.parametrize( - "name", ["get_data", "get_shape", "get_strides", "get_as_parameter"] - ) - def test_deprecated(self, name: str) -> None: - func = getattr(self.ctypes, name) - self.assert_deprecated(lambda: func()) - - @pytest.mark.parametrize( - "name", ["data", "shape", "strides", "_as_parameter_"] - ) + @pytest.mark.parametrize("name", ["data", "shape", "strides", "_as_parameter_"]) def test_not_deprecated(self, name: str) -> None: self.assert_not_deprecated(lambda: getattr(self.ctypes, name)) -PARTITION_DICT = { - "partition method": np.arange(10).partition, - "argpartition method": np.arange(10).argpartition, - "partition function": lambda kth: np.partition(np.arange(10), kth), - "argpartition function": lambda kth: np.argpartition(np.arange(10), kth), -} - - -@pytest.mark.parametrize("func", PARTITION_DICT.values(), ids=PARTITION_DICT) -class TestPartitionBoolIndex(_DeprecationTestCase): - # Deprecated 2021-09-29, NumPy 1.22 - warning_cls = DeprecationWarning - message = "Passing booleans as partition index is deprecated" - - def test_deprecated(self, func): - self.assert_deprecated(lambda: func(True)) - self.assert_deprecated(lambda: func([False, True])) - - def test_not_deprecated(self, func): - self.assert_not_deprecated(lambda: func(1)) - self.assert_not_deprecated(lambda: func([0, 1])) - - -class TestMachAr(_DeprecationTestCase): - # Deprecated 2022-11-22, NumPy 1.25 - warning_cls = DeprecationWarning - - def test_deprecated_module(self): - self.assert_deprecated(lambda: getattr(np._core, "MachAr")) - - -class TestQuantileInterpolationDeprecation(_DeprecationTestCase): - # Deprecated 2021-11-08, NumPy 1.22 - @pytest.mark.parametrize("func", - [np.percentile, np.quantile, np.nanpercentile, np.nanquantile]) - def test_deprecated(self, func): - self.assert_deprecated( - lambda: func([0., 1.], 0., interpolation="linear")) - self.assert_deprecated( - lambda: func([0., 1.], 0., interpolation="nearest")) - - @pytest.mark.parametrize("func", - [np.percentile, np.quantile, np.nanpercentile, np.nanquantile]) - def test_both_passed(self, func): - with warnings.catch_warnings(): - # catch the DeprecationWarning so that it does not raise: - warnings.simplefilter("always", DeprecationWarning) - with pytest.raises(TypeError): - func([0., 1.], 0., interpolation="nearest", method="nearest") - - -class TestArrayFinalizeNone(_DeprecationTestCase): - message = "Setting __array_finalize__ = None" - - def test_use_none_is_deprecated(self): - # Deprecated way that ndarray itself showed nothing needs finalizing. - class NoFinalize(np.ndarray): - __array_finalize__ = None - - self.assert_deprecated(lambda: np.array(1).view(NoFinalize)) - - -class TestLoadtxtParseIntsViaFloat(_DeprecationTestCase): - # Deprecated 2022-07-03, NumPy 1.23 - # This test can be removed without replacement after the deprecation. - # The tests: - # * numpy/lib/tests/test_loadtxt.py::test_integer_signs - # * lib/tests/test_loadtxt.py::test_implicit_cast_float_to_int_fails - # Have a warning filter that needs to be removed. - message = r"loadtxt\(\): Parsing an integer via a float is deprecated.*" - - @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) - def test_deprecated_warning(self, dtype): - with pytest.warns(DeprecationWarning, match=self.message): - np.loadtxt(["10.5"], dtype=dtype) - - @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) - def test_deprecated_raised(self, dtype): - # The DeprecationWarning is chained when raised, so test manually: - with warnings.catch_warnings(): - warnings.simplefilter("error", DeprecationWarning) - try: - np.loadtxt(["10.5"], dtype=dtype) - except ValueError as e: - assert isinstance(e.__cause__, DeprecationWarning) - - -class TestScalarConversion(_DeprecationTestCase): - # 2023-01-02, 1.25.0 - def test_float_conversion(self): - self.assert_deprecated(float, args=(np.array([3.14]),)) - - def test_behaviour(self): - b = np.array([[3.14]]) - c = np.zeros(5) - with pytest.warns(DeprecationWarning): - c[0] = b - - class TestPyIntConversion(_DeprecationTestCase): message = r".*stop allowing conversion of out-of-bound.*" @@ -629,63 +206,17 @@ def test_attributeerror_includes_info(self, name): getattr(np, name) -class TestDeprecatedFinfo(_DeprecationTestCase): - # Deprecated in NumPy 1.25, 2023-01-16 - def test_deprecated_none(self): - self.assert_deprecated(np.finfo, args=(None,)) - - -class TestMathAlias(_DeprecationTestCase): - def test_deprecated_np_lib_math(self): - self.assert_deprecated(lambda: np.lib.math) - - -class TestLibImports(_DeprecationTestCase): - # Deprecated in Numpy 1.26.0, 2023-09 - def test_lib_functions_deprecation_call(self): - from numpy.lib._utils_impl import safe_eval - from numpy.lib._npyio_impl import recfromcsv, recfromtxt - from numpy.lib._function_base_impl import disp - from numpy.lib._shape_base_impl import get_array_wrap - from numpy._core.numerictypes import maximum_sctype - from numpy.lib.tests.test_io import TextIO - from numpy import in1d, row_stack, trapz - - self.assert_deprecated(lambda: safe_eval("None")) - - data_gen = lambda: TextIO('A,B\n0,1\n2,3') - kwargs = dict(delimiter=",", missing_values="N/A", names=True) - self.assert_deprecated(lambda: recfromcsv(data_gen())) - self.assert_deprecated(lambda: recfromtxt(data_gen(), **kwargs)) - - self.assert_deprecated(lambda: disp("test")) - self.assert_deprecated(lambda: get_array_wrap()) - self.assert_deprecated(lambda: maximum_sctype(int)) - - self.assert_deprecated(lambda: in1d([1], [1])) - self.assert_deprecated(lambda: row_stack([[]])) - self.assert_deprecated(lambda: trapz([1], [1])) - self.assert_deprecated(lambda: np.chararray) +class TestCharArray(_DeprecationTestCase): + def test_deprecated_chararray(self): + self.assert_deprecated(lambda: np.char.chararray) class TestDeprecatedDTypeAliases(_DeprecationTestCase): - - def _check_for_warning(self, func): - with warnings.catch_warnings(record=True) as caught_warnings: - func() - assert len(caught_warnings) == 1 - w = caught_warnings[0] - assert w.category is DeprecationWarning - assert "alias 'a' was deprecated in NumPy 2.0" in str(w.message) - - def test_a_dtype_alias(self): - for dtype in ["a", "a10"]: - f = lambda: np.dtype(dtype) - self._check_for_warning(f) - self.assert_deprecated(f) - f = lambda: np.array(["hello", "world"]).astype("a10") - self._check_for_warning(f) - self.assert_deprecated(f) + @pytest.mark.parametrize("dtype_code", ["a", "a10"]) + def test_a_dtype_alias(self, dtype_code: str): + # Deprecated in 2.0, removed in 2.5, 2025-12 + with pytest.raises(TypeError): + np.dtype(dtype_code) class TestDeprecatedArrayWrap(_DeprecationTestCase): @@ -712,7 +243,16 @@ def __array_wrap__(self, arr): self.assert_deprecated(lambda: np.negative(test2)) assert test2.called +class TestDeprecatedArrayAttributeSetting(_DeprecationTestCase): + message = "Setting the .*on a NumPy array has been deprecated.*" + + def test_deprecated_strides_set(self): + x = np.eye(2) + self.assert_deprecated(setattr, args=(x, 'strides', x.strides)) + def test_deprecated_shape_set(self): + x = np.eye(2) + self.assert_deprecated(setattr, args=(x, "shape", (4, 1))) class TestDeprecatedDTypeParenthesizedRepeatCount(_DeprecationTestCase): message = "Passing in a parenthesized single number" @@ -722,24 +262,106 @@ def test_parenthesized_repeat_count(self, string): self.assert_deprecated(np.dtype, args=(string,)) -class TestDeprecatedSaveFixImports(_DeprecationTestCase): - # Deprecated in Numpy 2.1, 2024-05 - message = "The 'fix_imports' flag is deprecated and has no effect." - +class TestDTypeAlignBool(_VisibleDeprecationTestCase): + # Deprecated in Numpy 2.4, 2025-07 + # NOTE: As you can see, finalizing this deprecation breaks some (very) old + # pickle files. This may be fine, but needs to be done with some care since + # it breaks all of them and not just some. + # (Maybe it should be a 3.0 or only after warning more explicitly around pickles.) + message = r"dtype\(\): align should be passed as Python or NumPy boolean but got " + def test_deprecated(self): - with temppath(suffix='.npy') as path: - sample_args = (path, np.array(np.zeros((1024, 10)))) - self.assert_not_deprecated(np.save, args=sample_args) - self.assert_deprecated(np.save, args=sample_args, - kwargs={'fix_imports': True}) - self.assert_deprecated(np.save, args=sample_args, - kwargs={'fix_imports': False}) - for allow_pickle in [True, False]: - self.assert_not_deprecated(np.save, args=sample_args, - kwargs={'allow_pickle': allow_pickle}) - self.assert_deprecated(np.save, args=sample_args, - kwargs={'allow_pickle': allow_pickle, - 'fix_imports': True}) - self.assert_deprecated(np.save, args=sample_args, - kwargs={'allow_pickle': allow_pickle, - 'fix_imports': False}) + # in particular integers should be rejected because one may think they mean + # alignment, or pass them accidentally as a subarray shape (meaning to pass + # a tuple). + self.assert_deprecated(lambda: np.dtype("f8", align=3)) + + @pytest.mark.parametrize("align", [True, False, np.True_, np.False_]) + def test_not_deprecated(self, align): + # if the user passes a bool, it is accepted. + self.assert_not_deprecated(lambda: np.dtype("f8", align=align)) + + +class TestFlatiterIndexing0dBoolIndex(_DeprecationTestCase): + # Deprecated in Numpy 2.4, 2025-07 + message = r"Indexing flat iterators with a 0-dimensional boolean index" + + def test_0d_boolean_index_deprecated(self): + arr = np.arange(3) + # 0d boolean indices on flat iterators are deprecated + self.assert_deprecated(lambda: arr.flat[True]) + + def test_0d_boolean_assign_index_deprecated(self): + arr = np.arange(3) + + def assign_to_index(): + arr.flat[True] = 10 + + self.assert_deprecated(assign_to_index) + + +class TestFlatiterIndexingFloatIndex(_DeprecationTestCase): + # Deprecated in NumPy 2.4, 2025-07 + message = r"Invalid non-array indices for iterator objects" + + def test_float_index_deprecated(self): + arr = np.arange(3) + # float indices on flat iterators are deprecated + self.assert_deprecated(lambda: arr.flat[[1.]]) + + def test_float_assign_index_deprecated(self): + arr = np.arange(3) + + def assign_to_index(): + arr.flat[[1.]] = 10 + + self.assert_deprecated(assign_to_index) + + +@pytest.mark.thread_unsafe( + reason="warning control utilities are deprecated due to being thread-unsafe" +) +class TestWarningUtilityDeprecations(_DeprecationTestCase): + # Deprecation in NumPy 2.4, 2025-08 + message = r"NumPy warning suppression and assertion utilities are deprecated." + + def test_assert_warns_deprecated(self): + def use_assert_warns(): + with np.testing.assert_warns(RuntimeWarning): + warnings.warn("foo", RuntimeWarning, stacklevel=1) + + self.assert_deprecated(use_assert_warns) + + def test_suppress_warnings_deprecated(self): + def use_suppress_warnings(): + with np.testing.suppress_warnings() as sup: + sup.filter(RuntimeWarning, 'invalid value encountered in divide') + + self.assert_deprecated(use_suppress_warnings) + + +class TestTooManyArgsExtremum(_DeprecationTestCase): + # Deprecated in Numpy 2.4, 2025-08, gh-27639 + message = "Passing more than 2 positional arguments to np.maximum and np.minimum " + + @pytest.mark.parametrize("ufunc", [np.minimum, np.maximum]) + def test_extremem_3_args(self, ufunc): + self.assert_deprecated(ufunc, args=(np.ones(1), np.zeros(1), np.empty(1))) + + +class TestTypenameDeprecation(_DeprecationTestCase): + # Deprecation in Numpy 2.5, 2026-02 + + def test_typename_emits_deprecation_warning(self): + self.assert_deprecated(lambda: np.typename("S1")) + self.assert_deprecated(lambda: np.typename("h")) + +class TestRoundDeprecation(_DeprecationTestCase): + # Deprecation in NumPy 2.5, 2026-02 + + def test_round_emits_deprecation_warning_array(self): + a = np.array([1.5, 2.7, -1.5, -2.7]) + self.assert_deprecated(lambda: np.ma.round_(a)) + + def test_round_emits_deprecation_warning_scalar(self): + self.assert_deprecated(lambda: np.ma.round_(3.14)) diff --git a/numpy/_core/tests/test_dlpack.py b/numpy/_core/tests/test_dlpack.py index d9205912124e..239f34559cef 100644 --- a/numpy/_core/tests/test_dlpack.py +++ b/numpy/_core/tests/test_dlpack.py @@ -1,8 +1,9 @@ import sys + import pytest import numpy as np -from numpy.testing import assert_array_equal, IS_PYPY +from numpy.testing import assert_array_equal def new_and_old_dlpack(): @@ -17,14 +18,13 @@ def __dlpack__(self, stream=None): class TestDLPack: - @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.") @pytest.mark.parametrize("max_version", [(0, 0), None, (1, 0), (100, 3)]) def test_dunder_dlpack_refcount(self, max_version): x = np.arange(5) y = x.__dlpack__(max_version=max_version) - assert sys.getrefcount(x) == 3 + startcount = sys.getrefcount(x) del y - assert sys.getrefcount(x) == 2 + assert startcount - sys.getrefcount(x) == 1 def test_dunder_dlpack_stream(self): x = np.arange(5) @@ -53,14 +53,13 @@ def test_strides_not_multiple_of_itemsize(self): with pytest.raises(BufferError): np.from_dlpack(z) - @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.") @pytest.mark.parametrize("arr", new_and_old_dlpack()) def test_from_dlpack_refcount(self, arr): arr = arr.copy() y = np.from_dlpack(arr) - assert sys.getrefcount(arr) == 3 + startcount = sys.getrefcount(arr) del y - assert sys.getrefcount(arr) == 2 + assert startcount - sys.getrefcount(arr) == 1 @pytest.mark.parametrize("dtype", [ np.bool, @@ -144,6 +143,17 @@ def test_readonly(self): y = np.from_dlpack(x) assert not y.flags.writeable + def test_writeable(self): + x_new, x_old = new_and_old_dlpack() + + # new dlpacks respect writeability + y = np.from_dlpack(x_new) + assert y.flags.writeable + + # old dlpacks are not writeable for backwards compatibility + y = np.from_dlpack(x_old) + assert not y.flags.writeable + def test_ndim0(self): x = np.array(1.0) y = np.from_dlpack(x) @@ -172,7 +182,7 @@ def test_device(self): np.from_dlpack(x, device="cpu") np.from_dlpack(x, device=None) - with pytest.raises(ValueError): + with pytest.raises(BufferError): x.__dlpack__(dl_device=(10, 0)) with pytest.raises(ValueError): np.from_dlpack(x, device="gpu") diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index eb4f915ee452..7b5966d0a56b 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -1,25 +1,31 @@ -import sys -import operator -import pytest +import contextlib import ctypes -import gc +import inspect +import operator +import os +import pickle +import sys import types +from itertools import permutations from typing import Any -import pickle + +import hypothesis +import pytest +from hypothesis.extra import numpy as hynp import numpy as np import numpy.dtypes -from numpy._core._rational_tests import rational from numpy._core._multiarray_tests import create_custom_field_dtype +from numpy._core._rational_tests import rational from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT, - IS_PYSTON, _OLD_PROMOTION) -from itertools import permutations -import random - -import hypothesis -from hypothesis.extra import numpy as hynp - + HAS_REFCOUNT, + IS_64BIT, + assert_, + assert_array_equal, + assert_equal, + assert_raises, +) +from numpy.testing._private.utils import requires_deep_recursion def assert_dtype_equal(a, b): @@ -181,21 +187,21 @@ def test_dtype_from_bytes(self): def test_bad_param(self): # Can't give a size that's too small assert_raises(ValueError, np.dtype, - {'names':['f0', 'f1'], - 'formats':['i4', 'i1'], - 'offsets':[0, 4], - 'itemsize':4}) + {'names': ['f0', 'f1'], + 'formats': ['i4', 'i1'], + 'offsets': [0, 4], + 'itemsize': 4}) # If alignment is enabled, the alignment (4) must divide the itemsize assert_raises(ValueError, np.dtype, - {'names':['f0', 'f1'], - 'formats':['i4', 'i1'], - 'offsets':[0, 4], - 'itemsize':9}, align=True) + {'names': ['f0', 'f1'], + 'formats': ['i4', 'i1'], + 'offsets': [0, 4], + 'itemsize': 9}, align=True) # If alignment is enabled, the individual fields must be aligned assert_raises(ValueError, np.dtype, - {'names':['f0', 'f1'], - 'formats':['i1', 'f4'], - 'offsets':[0, 2]}, align=True) + {'names': ['f0', 'f1'], + 'formats': ['i1', 'f4'], + 'offsets': [0, 2]}, align=True) def test_field_order_equality(self): x = np.dtype({'names': ['A', 'B'], @@ -205,7 +211,7 @@ def test_field_order_equality(self): 'formats': ['i4', 'f4'], 'offsets': [4, 0]}) assert_equal(x == y, False) - # This is an safe cast (not equiv) due to the different names: + # This is a safe cast (not equiv) due to the different names: assert np.can_cast(x, y, casting="safe") @pytest.mark.parametrize( @@ -218,7 +224,7 @@ def test_create_string_dtypes_directly( dtype = dtype_class(8) assert dtype.type is scalar_type - assert dtype.itemsize == 8*char_size + assert dtype.itemsize == 8 * char_size def test_create_invalid_string_errors(self): one_too_big = np.iinfo(np.intc).max + 1 @@ -236,6 +242,22 @@ def test_create_invalid_string_errors(self): with pytest.raises(ValueError): type(np.dtype("U"))(-1) + # OverflowError on 32 bit + with pytest.raises((TypeError, OverflowError)): + # see gh-26556 + type(np.dtype("S"))(2**61) + + with pytest.raises(TypeError): + np.dtype("S1234hello") + + def test_leading_zero_parsing(self): + dt1 = np.dtype('S010') + dt2 = np.dtype('S10') + + assert dt1 == dt2 + assert repr(dt1) == "dtype('S10')" + assert dt1.itemsize == 10 + class TestRecord: def test_equivalent_record(self): @@ -266,7 +288,7 @@ def test_refcount_dictionary_setting(self): formats = ["f8"] titles = ["t1"] offsets = [0] - d = dict(names=names, formats=formats, titles=titles, offsets=offsets) + d = {"names": names, "formats": formats, "titles": titles, "offsets": offsets} refcounts = {k: sys.getrefcount(i) for k, i in d.items()} np.dtype(d) refcounts_new = {k: sys.getrefcount(i) for k, i in d.items()} @@ -310,9 +332,9 @@ def test_not_lists(self): the dtype constructor. """ assert_raises(TypeError, np.dtype, - dict(names={'A', 'B'}, formats=['f8', 'i4'])) + {"names": {'A', 'B'}, "formats": ['f8', 'i4']}) assert_raises(TypeError, np.dtype, - dict(names=['A', 'B'], formats={'f8', 'i4'})) + {"names": ['A', 'B'], "formats": {'f8', 'i4'}}) def test_aligned_size(self): # Check that structured dtypes get padded to an aligned size @@ -320,22 +342,22 @@ def test_aligned_size(self): assert_equal(dt.itemsize, 8) dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True) assert_equal(dt.itemsize, 8) - dt = np.dtype({'names':['f0', 'f1'], - 'formats':['i4', 'u1'], - 'offsets':[0, 4]}, align=True) + dt = np.dtype({'names': ['f0', 'f1'], + 'formats': ['i4', 'u1'], + 'offsets': [0, 4]}, align=True) assert_equal(dt.itemsize, 8) - dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True) + dt = np.dtype({'f0': ('i4', 0), 'f1': ('u1', 4)}, align=True) assert_equal(dt.itemsize, 8) # Nesting should preserve that alignment dt1 = np.dtype([('f0', 'i4'), ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]), ('f2', 'i1')], align=True) assert_equal(dt1.itemsize, 20) - dt2 = np.dtype({'names':['f0', 'f1', 'f2'], - 'formats':['i4', + dt2 = np.dtype({'names': ['f0', 'f1', 'f2'], + 'formats': ['i4', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 'i1'], - 'offsets':[0, 4, 16]}, align=True) + 'offsets': [0, 4, 16]}, align=True) assert_equal(dt2.itemsize, 20) dt3 = np.dtype({'f0': ('i4', 0), 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4), @@ -348,11 +370,11 @@ def test_aligned_size(self): ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]), ('f2', 'i1')], align=False) assert_equal(dt1.itemsize, 11) - dt2 = np.dtype({'names':['f0', 'f1', 'f2'], - 'formats':['i4', + dt2 = np.dtype({'names': ['f0', 'f1', 'f2'], + 'formats': ['i4', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 'i1'], - 'offsets':[0, 4, 10]}, align=False) + 'offsets': [0, 4, 10]}, align=False) assert_equal(dt2.itemsize, 11) dt3 = np.dtype({'f0': ('i4', 0), 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4), @@ -385,23 +407,23 @@ def test_empty_struct_alignment(self): def test_union_struct(self): # Should be able to create union dtypes - dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['= (3, 12), - reason="Python 3.12 has immortal refcounts, this test will no longer " - "work. See gh-23986" -) -@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") -class TestStructuredObjectRefcounting: - """These tests cover various uses of complicated structured types which - include objects and thus require reference counting. - """ - @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], - iter_struct_object_dtypes()) - @pytest.mark.parametrize(["creation_func", "creation_obj"], [ - pytest.param(np.empty, None, - # None is probably used for too many things - marks=pytest.mark.skip("unreliable due to python's behaviour")), - (np.ones, 1), - (np.zeros, 0)]) - def test_structured_object_create_delete(self, dt, pat, count, singleton, - creation_func, creation_obj): - """Structured object reference counting in creation and deletion""" - # The test assumes that 0, 1, and None are singletons. - gc.collect() - before = sys.getrefcount(creation_obj) - arr = creation_func(3, dt) - - now = sys.getrefcount(creation_obj) - assert now - before == count * 3 - del arr - now = sys.getrefcount(creation_obj) - assert now == before - - @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], - iter_struct_object_dtypes()) - def test_structured_object_item_setting(self, dt, pat, count, singleton): - """Structured object reference counting for simple item setting""" - one = 1 - - gc.collect() - before = sys.getrefcount(singleton) - arr = np.array([pat] * 3, dt) - assert sys.getrefcount(singleton) - before == count * 3 - # Fill with `1` and check that it was replaced correctly: - before2 = sys.getrefcount(one) - arr[...] = one - after2 = sys.getrefcount(one) - assert after2 - before2 == count * 3 - del arr - gc.collect() - assert sys.getrefcount(one) == before2 - assert sys.getrefcount(singleton) == before - - @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], - iter_struct_object_dtypes()) - @pytest.mark.parametrize( - ['shape', 'index', 'items_changed'], - [((3,), ([0, 2],), 2), - ((3, 2), ([0, 2], slice(None)), 4), - ((3, 2), ([0, 2], [1]), 2), - ((3,), ([True, False, True]), 2)]) - def test_structured_object_indexing(self, shape, index, items_changed, - dt, pat, count, singleton): - """Structured object reference counting for advanced indexing.""" - # Use two small negative values (should be singletons, but less likely - # to run into race-conditions). This failed in some threaded envs - # When using 0 and 1. If it fails again, should remove all explicit - # checks, and rely on `pytest-leaks` reference count checker only. - val0 = -4 - val1 = -5 - - arr = np.full(shape, val0, dt) - - gc.collect() - before_val0 = sys.getrefcount(val0) - before_val1 = sys.getrefcount(val1) - # Test item getting: - part = arr[index] - after_val0 = sys.getrefcount(val0) - assert after_val0 - before_val0 == count * items_changed - del part - # Test item setting: - arr[index] = val1 - gc.collect() - after_val0 = sys.getrefcount(val0) - after_val1 = sys.getrefcount(val1) - assert before_val0 - after_val0 == count * items_changed - assert after_val1 - before_val1 == count * items_changed - - @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], - iter_struct_object_dtypes()) - def test_structured_object_take_and_repeat(self, dt, pat, count, singleton): - """Structured object reference counting for specialized functions. - The older functions such as take and repeat use different code paths - then item setting (when writing this). - """ - indices = [0, 1] - - arr = np.array([pat] * 3, dt) - gc.collect() - before = sys.getrefcount(singleton) - res = arr.take(indices) - after = sys.getrefcount(singleton) - assert after - before == count * 2 - new = res.repeat(10) - gc.collect() - after_repeat = sys.getrefcount(singleton) - assert after_repeat - after == count * 2 * 10 - - class TestStructuredDtypeSparseFields: """Tests subarray fields which contain sparse dtypes so that not all memory is used by the dtype work. Such dtype's should leave the underlying memory unchanged. """ - dtype = np.dtype([('a', {'names':['aa', 'ab'], 'formats':['f', 'f'], - 'offsets':[0, 4]}, (2, 3))]) - sparse_dtype = np.dtype([('a', {'names':['ab'], 'formats':['f'], - 'offsets':[4]}, (2, 3))]) + dtype = np.dtype([('a', {'names': ['aa', 'ab'], 'formats': ['f', 'f'], + 'offsets': [0, 4]}, (2, 3))]) + sparse_dtype = np.dtype([('a', {'names': ['ab'], 'formats': ['f'], + 'offsets': [4]}, (2, 3))]) def test_sparse_field_assignment(self): arr = np.zeros(3, self.dtype) @@ -954,24 +867,26 @@ def test1(self): ('yi', np.dtype((a, (3, 2))))]) assert_dtype_equal(c, d) - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + @requires_deep_recursion def test_list_recursion(self): - l = list() + l = [] l.append(('f', l)) with pytest.raises(RecursionError): np.dtype(l) - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + @requires_deep_recursion def test_tuple_recursion(self): d = np.int32 for i in range(100000): d = (d, (1,)) - with pytest.raises(RecursionError): + # depending on OS and Python version, this might succeed + # see gh-30370 and cpython issue #142253 + with contextlib.suppress(RecursionError): np.dtype(d) - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + @requires_deep_recursion def test_dict_recursion(self): - d = dict(names=['self'], formats=[None], offsets=[0]) + d = {"names": ['self'], "formats": [None], "offsets": [0]} d['formats'][0] = d with pytest.raises(RecursionError): np.dtype(d) @@ -1156,6 +1071,10 @@ def test_void_subclass_fields(self): assert_equal(str(dt), "(numpy.record, [('a', 'i4,j', [a, a], casting='same_value') + + def test_einsum_sorting_behavior(self): + # Case 1: 26 dimensions (all lowercase indices) + n1 = 26 + x1 = np.random.random((1,) * n1) + path1 = np.einsum_path(x1, range(n1))[1] # Get einsum path details + output_indices1 = path1.split("->")[-1].strip() # Extract output indices + # Assert indices are only uppercase letters and sorted correctly + assert all(c.isupper() for c in output_indices1), ( + "Output indices for n=26 should use uppercase letters only: " + f"{output_indices1}" + ) + assert_equal( + output_indices1, + ''.join(sorted(output_indices1)), + err_msg=( + "Output indices for n=26 are not lexicographically sorted: " + f"{output_indices1}" + ) + ) + + # Case 2: 27 dimensions (includes uppercase indices) + n2 = 27 + x2 = np.random.random((1,) * n2) + path2 = np.einsum_path(x2, range(n2))[1] + output_indices2 = path2.split("->")[-1].strip() + # Assert indices include both uppercase and lowercase letters + assert any(c.islower() for c in output_indices2), ( + "Output indices for n=27 should include uppercase letters: " + f"{output_indices2}" + ) + # Assert output indices are sorted uppercase before lowercase + assert_equal( + output_indices2, + ''.join(sorted(output_indices2)), + err_msg=( + "Output indices for n=27 are not lexicographically sorted: " + f"{output_indices2}" + ) + ) + + # Additional Check: Ensure dimensions correspond correctly to indices + # Generate expected mapping of dimensions to indices + expected_indices = [ + chr(i + ord('A')) if i < 26 else chr(i - 26 + ord('a')) + for i in range(n2) + ] + assert_equal( + output_indices2, + ''.join(expected_indices), + err_msg=( + "Output indices do not map to the correct dimensions. Expected: " + f"{''.join(expected_indices)}, Got: {output_indices2}" + ) + ) + @pytest.mark.parametrize("do_opt", [True, False]) def test_einsum_specific_errors(self, do_opt): # out parameter must be an array @@ -154,7 +218,7 @@ def __rmul__(self, other): assert_raises(CustomException, np.einsum, "ij->i", a) # raised from unbuffered_loop_nop1_ndim3 - b = np.array([DestructoBox(i, 100) for i in range(0, 27)], + b = np.array([DestructoBox(i, 100) for i in range(27)], dtype='object').reshape(3, 3, 3) assert_raises(CustomException, np.einsum, "i...k->...", b) @@ -172,21 +236,20 @@ def __rmul__(self, other): def test_einsum_views(self): # pass-through for do_opt in [True, False]: - a = np.arange(6) - a.shape = (2, 3) + a = np.arange(6).reshape((2, 3)) b = np.einsum("...", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) b = np.einsum(a, [Ellipsis], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) b = np.einsum("ij", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a) b = np.einsum(a, [0, 1], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a) # output is writeable whenever input is writeable @@ -197,118 +260,112 @@ def test_einsum_views(self): assert_(not b.flags['WRITEABLE']) # transpose - a = np.arange(6) - a.shape = (2, 3) + a = np.arange(6).reshape((2, 3)) b = np.einsum("ji", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a.T) b = np.einsum(a, [1, 0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a.T) # diagonal - a = np.arange(9) - a.shape = (3, 3) + a = np.arange(9).reshape((3, 3)) b = np.einsum("ii->i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[i, i] for i in range(3)]) b = np.einsum(a, [0, 0], [0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[i, i] for i in range(3)]) # diagonal with various ways of broadcasting an additional dimension - a = np.arange(27) - a.shape = (3, 3, 3) + a = np.arange(27).reshape((3, 3, 3)) b = np.einsum("...ii->...i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) b = np.einsum("ii...->...i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(2, 0, 1)]) b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(2, 0, 1)]) b = np.einsum("...ii->i...", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum("jii->ij", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum("ii...->i...", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) b = np.einsum("i...i->i...", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) b = np.einsum("i...i->...i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(1, 0, 2)]) b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(1, 0, 2)]) # triple diagonal - a = np.arange(27) - a.shape = (3, 3, 3) + a = np.arange(27).reshape((3, 3, 3)) b = np.einsum("iii->i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[i, i, i] for i in range(3)]) b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[i, i, i] for i in range(3)]) # swap axes - a = np.arange(24) - a.shape = (2, 3, 4) + a = np.arange(24).reshape((2, 3, 4)) b = np.einsum("ijk->jik", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a.swapaxes(0, 1)) b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a.swapaxes(0, 1)) - @np._no_nep50_warning() def check_einsum_sums(self, dtype, do_opt=False): dtype = np.dtype(dtype) # Check various sums. Does many sizes to exercise unrolled loops. @@ -323,7 +380,7 @@ def check_einsum_sums(self, dtype, do_opt=False): assert_equal(np.einsum(a, [0], [], optimize=do_opt), b) for n in range(1, 17): - a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) + a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) b = np.sum(a, axis=-1) if hasattr(b, 'astype'): b = b.astype(dtype) @@ -332,7 +389,7 @@ def check_einsum_sums(self, dtype, do_opt=False): # sum(a, axis=0) for n in range(1, 17): - a = np.arange(2*n, dtype=dtype).reshape(2, n) + a = np.arange(2 * n, dtype=dtype).reshape(2, n) b = np.sum(a, axis=0) if hasattr(b, 'astype'): b = b.astype(dtype) @@ -340,7 +397,7 @@ def check_einsum_sums(self, dtype, do_opt=False): assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), b) for n in range(1, 17): - a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) + a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) b = np.sum(a, axis=0) if hasattr(b, 'astype'): b = b.astype(dtype) @@ -349,7 +406,7 @@ def check_einsum_sums(self, dtype, do_opt=False): # trace(a) for n in range(1, 17): - a = np.arange(n*n, dtype=dtype).reshape(n, n) + a = np.arange(n * n, dtype=dtype).reshape(n, n) b = np.trace(a) if hasattr(b, 'astype'): b = b.astype(dtype) @@ -389,20 +446,20 @@ def check_einsum_sums(self, dtype, do_opt=False): # outer(a,b) for n in range(1, 17): - a = np.arange(3, dtype=dtype)+1 - b = np.arange(n, dtype=dtype)+1 + a = np.arange(3, dtype=dtype) + 1 + b = np.arange(n, dtype=dtype) + 1 assert_equal(np.einsum("i,j", a, b, optimize=do_opt), np.outer(a, b)) assert_equal(np.einsum(a, [0], b, [1], optimize=do_opt), np.outer(a, b)) # Suppress the complex warnings for the 'as f8' tests - with suppress_warnings() as sup: - sup.filter(np.exceptions.ComplexWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', np.exceptions.ComplexWarning) # matvec(a,b) / a.dot(b) where a is matrix, b is vector for n in range(1, 17): - a = np.arange(4*n, dtype=dtype).reshape(4, n) + a = np.arange(4 * n, dtype=dtype).reshape(4, n) b = np.arange(n, dtype=dtype) assert_equal(np.einsum("ij, j", a, b, optimize=do_opt), np.dot(a, b)) @@ -423,7 +480,7 @@ def check_einsum_sums(self, dtype, do_opt=False): b.astype('f8')).astype(dtype)) for n in range(1, 17): - a = np.arange(4*n, dtype=dtype).reshape(4, n) + a = np.arange(4 * n, dtype=dtype).reshape(4, n) b = np.arange(n, dtype=dtype) assert_equal(np.einsum("ji,j", a.T, b.T, optimize=do_opt), np.dot(b.T, a.T)) @@ -446,16 +503,16 @@ def check_einsum_sums(self, dtype, do_opt=False): # matmat(a,b) / a.dot(b) where a is matrix, b is matrix for n in range(1, 17): if n < 8 or dtype != 'f2': - a = np.arange(4*n, dtype=dtype).reshape(4, n) - b = np.arange(n*6, dtype=dtype).reshape(n, 6) + a = np.arange(4 * n, dtype=dtype).reshape(4, n) + b = np.arange(n * 6, dtype=dtype).reshape(n, 6) assert_equal(np.einsum("ij,jk", a, b, optimize=do_opt), np.dot(a, b)) assert_equal(np.einsum(a, [0, 1], b, [1, 2], optimize=do_opt), np.dot(a, b)) for n in range(1, 17): - a = np.arange(4*n, dtype=dtype).reshape(4, n) - b = np.arange(n*6, dtype=dtype).reshape(n, 6) + a = np.arange(4 * n, dtype=dtype).reshape(4, n) + b = np.arange(n * 6, dtype=dtype).reshape(n, 6) c = np.arange(24, dtype=dtype).reshape(4, 6) np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe', optimize=do_opt) @@ -528,10 +585,10 @@ def check_einsum_sums(self, dtype, do_opt=False): np.logical_and(np.logical_and(a != 0, b != 0), c != 0)) a = np.arange(9, dtype=dtype) - assert_equal(np.einsum(",i->", 3, a), 3*np.sum(a)) - assert_equal(np.einsum(3, [], a, [0], []), 3*np.sum(a)) - assert_equal(np.einsum("i,->", a, 3), 3*np.sum(a)) - assert_equal(np.einsum(a, [0], 3, [], []), 3*np.sum(a)) + assert_equal(np.einsum(",i->", 3, a), 3 * np.sum(a)) + assert_equal(np.einsum(3, [], a, [0], []), 3 * np.sum(a)) + assert_equal(np.einsum("i,->", a, 3), 3 * np.sum(a)) + assert_equal(np.einsum(a, [0], 3, [], []), 3 * np.sum(a)) # Various stride0, contiguous, and SSE aligned variants for n in range(1, 25): @@ -540,21 +597,21 @@ def check_einsum_sums(self, dtype, do_opt=False): assert_equal(np.einsum("...,...", a, a, optimize=do_opt), np.multiply(a, a)) assert_equal(np.einsum("i,i", a, a, optimize=do_opt), np.dot(a, a)) - assert_equal(np.einsum("i,->i", a, 2, optimize=do_opt), 2*a) - assert_equal(np.einsum(",i->i", 2, a, optimize=do_opt), 2*a) - assert_equal(np.einsum("i,->", a, 2, optimize=do_opt), 2*np.sum(a)) - assert_equal(np.einsum(",i->", 2, a, optimize=do_opt), 2*np.sum(a)) + assert_equal(np.einsum("i,->i", a, 2, optimize=do_opt), 2 * a) + assert_equal(np.einsum(",i->i", 2, a, optimize=do_opt), 2 * a) + assert_equal(np.einsum("i,->", a, 2, optimize=do_opt), 2 * np.sum(a)) + assert_equal(np.einsum(",i->", 2, a, optimize=do_opt), 2 * np.sum(a)) assert_equal(np.einsum("...,...", a[1:], a[:-1], optimize=do_opt), np.multiply(a[1:], a[:-1])) assert_equal(np.einsum("i,i", a[1:], a[:-1], optimize=do_opt), np.dot(a[1:], a[:-1])) - assert_equal(np.einsum("i,->i", a[1:], 2, optimize=do_opt), 2*a[1:]) - assert_equal(np.einsum(",i->i", 2, a[1:], optimize=do_opt), 2*a[1:]) + assert_equal(np.einsum("i,->i", a[1:], 2, optimize=do_opt), 2 * a[1:]) + assert_equal(np.einsum(",i->i", 2, a[1:], optimize=do_opt), 2 * a[1:]) assert_equal(np.einsum("i,->", a[1:], 2, optimize=do_opt), - 2*np.sum(a[1:])) + 2 * np.sum(a[1:])) assert_equal(np.einsum(",i->", 2, a[1:], optimize=do_opt), - 2*np.sum(a[1:])) + 2 * np.sum(a[1:])) # An object array, summed as the data type a = np.arange(9, dtype=object) @@ -578,8 +635,8 @@ def check_einsum_sums(self, dtype, do_opt=False): assert_equal(np.einsum('z,mz,zm->', p, q, r), 253) # singleton dimensions broadcast (gh-10343) - p = np.ones((10,2)) - q = np.ones((1,2)) + p = np.ones((10, 2)) + q = np.ones((1, 2)) assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), np.einsum('ij,ij->j', p, q, optimize=False)) assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), @@ -712,7 +769,7 @@ def __mul__(self, other): return 42 objMult = np.array([Mult()]) - objNULL = np.ndarray(buffer = b'\0' * np.intp(0).itemsize, shape=1, dtype=object) + objNULL = np.ndarray(buffer=b'\0' * np.intp(0).itemsize, shape=1, dtype=object) with pytest.raises(TypeError): np.einsum("i,j", [1], objNULL) @@ -721,15 +778,21 @@ def __mul__(self, other): assert np.einsum("i,j", objMult, objMult) == 42 def test_subscript_range(self): - # Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used - # when creating a subscript from arrays + # Issue #7741, make sure that all letters of Latin alphabet + # (both uppercase & lowercase) can be used when creating a subscript from arrays a = np.ones((2, 3)) b = np.ones((3, 4)) np.einsum(a, [0, 20], b, [20, 2], [0, 2], optimize=False) np.einsum(a, [0, 27], b, [27, 2], [0, 2], optimize=False) np.einsum(a, [0, 51], b, [51, 2], [0, 2], optimize=False) - assert_raises(ValueError, lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False)) - assert_raises(ValueError, lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False)) + assert_raises( + ValueError, + lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False), + ) + assert_raises( + ValueError, + lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False), + ) def test_einsum_broadcast(self): # Issue #2455 change in handling ellipsis @@ -745,7 +808,8 @@ def test_einsum_broadcast(self): for opt in [True, False]: assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=opt), ref) assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=opt), ref) - assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) # used to raise error + # used to raise error + assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) A = np.arange(12).reshape((4, 3)) B = np.arange(6).reshape((3, 2)) @@ -753,8 +817,9 @@ def test_einsum_broadcast(self): for opt in [True, False]: assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=opt), ref) assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=opt), ref) - assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) # used to raise error - assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) # used to raise error + # used to raise error + assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) + assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) dims = [2, 3, 4, 5] a = np.arange(np.prod(dims)).reshape(dims) @@ -762,16 +827,17 @@ def test_einsum_broadcast(self): ref = np.einsum('ijkl,k->ijl', a, v, optimize=False) for opt in [True, False]: assert_equal(np.einsum('ijkl,k', a, v, optimize=opt), ref) - assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) # used to raise error assert_equal(np.einsum('...kl,k...', a, v, optimize=opt), ref) + # used to raise error + assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) J, K, M = 160, 160, 120 A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M) B = np.arange(J * K * M * 3).reshape(J, K, M, 3) ref = np.einsum('...lmn,...lmno->...o', A, B, optimize=False) for opt in [True, False]: - assert_equal(np.einsum('...lmn,lmno->...o', A, B, - optimize=opt), ref) # used to raise error + # used to raise error + assert_equal(np.einsum("...lmn,lmno->...o", A, B, optimize=opt), ref) def test_einsum_fixedstridebug(self): # Issue #4485 obscure einsum bug @@ -1028,7 +1094,7 @@ def test_broadcasting_dot_cases(self): def test_output_order(self): # Ensure output order is respected for optimize cases, the below - # conraction should yield a reshaped tensor view + # contraction should yield a reshaped tensor view # gh-16415 a = np.ones((2, 3, 5), order='F') @@ -1062,6 +1128,41 @@ def test_output_order(self): tmp = np.einsum('...ft,mf->...mt', d, c, order='a', optimize=opt) assert_(tmp.flags.c_contiguous) + def test_singleton_broadcasting(self): + eq = "ijp,ipq,ikq->ijk" + shapes = ((3, 1, 1), (3, 1, 3), (1, 3, 3)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "jhcabhijaci,dfijejgh->fgje" + shapes = ( + (1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1), + (3, 1, 3, 1, 1, 1, 1, 2), + ) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "baegffahgc,hdggeff->dhg" + shapes = ((2, 1, 4, 1, 1, 1, 1, 2, 1, 1), (1, 1, 1, 1, 4, 1, 1)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "cehgbaifff,fhhdegih->cdghbi" + shapes = ((1, 1, 1, 1, 1, 1, 1, 1, 1, 1), (2, 1, 1, 2, 4, 1, 1, 1)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "gah,cdbcghefg->ef" + shapes = ((2, 3, 1), (1, 3, 1, 1, 1, 2, 1, 4, 1)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "cacc,bcb->" + shapes = ((1, 1, 1, 1), (1, 4, 1)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + class TestEinsumPath: def build_operands(self, string, size_dict=global_size_dict): @@ -1188,7 +1289,7 @@ def test_path_type_input(self): assert_almost_equal(noopt, opt) def test_path_type_input_internal_trace(self): - #gh-20962 + # gh-20962 path_test = self.build_operands('cab,cdd->ab') exp_path = ['einsum_path', (1,), (0, 1)] @@ -1214,7 +1315,7 @@ def test_path_type_input_invalid(self): RuntimeError, np.einsum_path, *path_test, optimize=exp_path) def test_spaces(self): - #gh-10794 + # gh-10794 arr = np.array([[1]]) for sp in itertools.product(['', ' '], repeat=4): # no error for any spacing @@ -1227,6 +1328,33 @@ def test_overlap(): # sanity check c = np.einsum('ij,jk->ik', a, b) assert_equal(c, d) - #gh-10080, out overlaps one of the operands + # gh-10080, out overlaps one of the operands c = np.einsum('ij,jk->ik', a, b, out=b) assert_equal(c, d) + +def test_einsum_chunking_precision(): + """Most einsum operations are reductions and until NumPy 2.3 reductions + never (or almost never?) used the `GROWINNER` mechanism to increase the + inner loop size when no buffers are needed. + Because einsum reductions work roughly: + + def inner(*inputs, out): + accumulate = 0 + for vals in zip(*inputs): + accumulate += prod(vals) + out[0] += accumulate + + Calling the inner-loop more often actually improves accuracy slightly + (same effect as pairwise summation but much less). + Without adding pairwise summation to the inner-loop it seems best to just + not use GROWINNER, a quick tests suggest that is maybe 1% slowdown for + the simplest `einsum("i,i->i", x, x)` case. + + (It is not clear that we should guarantee precision to this extend.) + """ + num = 1_000_000 + value = 1. + np.finfo(np.float64).eps * 8196 + res = np.einsum("i->", np.broadcast_to(np.array(value), num)) / num + + # At with GROWINNER 11 decimals succeed (larger will be less) + assert_almost_equal(res, value, decimal=15) diff --git a/numpy/_core/tests/test_errstate.py b/numpy/_core/tests/test_errstate.py index bd6b8b8caec3..f0735a045a4d 100644 --- a/numpy/_core/tests/test_errstate.py +++ b/numpy/_core/tests/test_errstate.py @@ -1,8 +1,9 @@ -import pytest import sysconfig +import pytest + import numpy as np -from numpy.testing import assert_, assert_raises, IS_WASM +from numpy.testing import IS_WASM, assert_raises # The floating point emulation on ARM EABI systems lacking a hardware FPU is # known to be buggy. This is an attempt to identify these hosts. It may not @@ -46,6 +47,7 @@ def test_divide(self): reason='platform/cpu issue with FPU (gh-15562)') def test_errcall(self): count = 0 + def foo(*args): nonlocal count count += 1 @@ -68,7 +70,7 @@ def test_errstate_decorator(self): def foo(): a = -np.arange(3) a // 0 - + foo() def test_errstate_enter_once(self): @@ -85,7 +87,7 @@ def test_errstate_enter_once(self): @pytest.mark.skipif(IS_WASM, reason="wasm doesn't support asyncio") def test_asyncio_safe(self): - # asyncio may not always work, lets assume its fine if missing + # asyncio may not always work, let's assume its fine if missing # Pyodide/wasm doesn't support it. If this test makes problems, # it should just be skipped liberally (or run differently). asyncio = pytest.importorskip("asyncio") diff --git a/numpy/_core/tests/test_extint128.py b/numpy/_core/tests/test_extint128.py index bd97cc20c016..6e4d74b81d39 100644 --- a/numpy/_core/tests/test_extint128.py +++ b/numpy/_core/tests/test_extint128.py @@ -1,13 +1,12 @@ -import itertools import contextlib +import itertools import operator + import pytest import numpy as np import numpy._core._multiarray_tests as mt - -from numpy.testing import assert_raises, assert_equal - +from numpy.testing import assert_equal, assert_raises INT64_MAX = np.iinfo(np.int64).max INT64_MIN = np.iinfo(np.int64).min @@ -22,8 +21,8 @@ [INT64_MIN + j for j in range(20)] + [INT64_MAX - j for j in range(20)] + [INT64_MID + j for j in range(-20, 20)] + - [2*INT64_MID + j for j in range(-20, 20)] + - [INT64_MID//2 + j for j in range(-20, 20)] + + [2 * INT64_MID + j for j in range(-20, 20)] + + [INT64_MID // 2 + j for j in range(-20, 20)] + list(range(-70, 70)) ) @@ -31,8 +30,8 @@ [INT128_MIN + j for j in range(20)] + [INT128_MAX - j for j in range(20)] + [INT128_MID + j for j in range(-20, 20)] + - [2*INT128_MID + j for j in range(-20, 20)] + - [INT128_MID//2 + j for j in range(-20, 20)] + + [2 * INT128_MID + j for j in range(-20, 20)] + + [INT128_MID // 2 + j for j in range(-20, 20)] + list(range(-70, 70)) + [False] # negative zero ) @@ -58,8 +57,7 @@ def iterate(): yield iterate() except Exception: import traceback - msg = "At: %r\n%s" % (repr(value[0]), - traceback.format_exc()) + msg = f"At: {repr(value[0])!r}\n{traceback.format_exc()}" raise AssertionError(msg) @@ -151,9 +149,9 @@ def test_shl_128(): with exc_iter(INT128_VALUES) as it: for a, in it: if a < 0: - b = -(((-a) << 1) & (2**128-1)) + b = -(((-a) << 1) & (2**128 - 1)) else: - b = (a << 1) & (2**128-1) + b = (a << 1) & (2**128 - 1) c = mt.extint_shl_128(a) if b != c: assert_equal(c, b) @@ -193,10 +191,10 @@ def test_divmod_128_64(): d, dr = mt.extint_divmod_128_64(a, b) - if c != d or d != dr or b*d + dr != a: + if c != d or d != dr or b * d + dr != a: assert_equal(d, c) assert_equal(dr, cr) - assert_equal(b*d + dr, a) + assert_equal(b * d + dr, a) def test_floordiv_128_64(): diff --git a/numpy/_core/tests/test_finfo.py b/numpy/_core/tests/test_finfo.py new file mode 100644 index 000000000000..5703b8d6a765 --- /dev/null +++ b/numpy/_core/tests/test_finfo.py @@ -0,0 +1,86 @@ +import pytest + +import numpy as np +from numpy import exp2, log10 +from numpy._core import numerictypes as ntypes + + +class MachArLike: + """Minimal class to simulate machine arithmetic parameters.""" + def __init__(self, dtype, machep, negep, minexp, maxexp, nmant, iexp): + self.dtype = dtype + self.machep = machep + self.negep = negep + self.minexp = minexp + self.maxexp = maxexp + self.nmant = nmant + self.iexp = iexp + self.eps = exp2(dtype(-nmant)) + self.epsneg = exp2(dtype(negep)) + self.precision = int(-log10(self.eps)) + self.resolution = dtype(10) ** (-self.precision) + + +@pytest.fixture +def float16_ma(): + """Machine arithmetic parameters for float16.""" + f16 = ntypes.float16 + return MachArLike(f16, + machep=-10, + negep=-11, + minexp=-14, + maxexp=16, + nmant=10, + iexp=5) + + +@pytest.fixture +def float32_ma(): + """Machine arithmetic parameters for float32.""" + f32 = ntypes.float32 + return MachArLike(f32, + machep=-23, + negep=-24, + minexp=-126, + maxexp=128, + nmant=23, + iexp=8) + + +@pytest.fixture +def float64_ma(): + """Machine arithmetic parameters for float64.""" + f64 = ntypes.float64 + return MachArLike(f64, + machep=-52, + negep=-53, + minexp=-1022, + maxexp=1024, + nmant=52, + iexp=11) + + +@pytest.mark.parametrize("dtype,ma_fixture", [ + (np.half, "float16_ma"), + (np.float32, "float32_ma"), + (np.float64, "float64_ma"), +]) +@pytest.mark.parametrize("prop", [ + 'machep', 'negep', 'minexp', 'maxexp', 'nmant', 'iexp', + 'eps', 'epsneg', 'precision', 'resolution' +]) +@pytest.mark.thread_unsafe( + reason="complex fixture setup is thread-unsafe (pytest-dev/pytest#13768.)" +) +def test_finfo_properties(dtype, ma_fixture, prop, request): + """Test that finfo properties match expected machine arithmetic values.""" + ma = request.getfixturevalue(ma_fixture) + finfo = np.finfo(dtype) + + actual = getattr(finfo, prop) + expected = getattr(ma, prop) + + assert actual == expected, ( + f"finfo({dtype}) property '{prop}' mismatch: " + f"expected {expected}, got {actual}" + ) diff --git a/numpy/_core/tests/test_function_base.py b/numpy/_core/tests/test_function_base.py index bebc7c52e9df..b78c79a6f032 100644 --- a/numpy/_core/tests/test_function_base.py +++ b/numpy/_core/tests/test_function_base.py @@ -1,20 +1,39 @@ +import platform import sys import pytest import numpy as np from numpy import ( - logspace, linspace, geomspace, dtype, array, arange, isnan, - ndarray, sqrt, nextafter, stack, errstate - ) + arange, + array, + dtype, + errstate, + geomspace, + isnan, + linspace, + logspace, + ndarray, + nextafter, + sqrt, + stack, +) from numpy._core import sctypes from numpy._core.function_base import add_newdoc from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose, - IS_PYPY - ) + assert_, + assert_allclose, + assert_array_equal, + assert_equal, + assert_raises, +) +def _is_armhf(): + # Check if the current platform is ARMHF (32-bit ARM architecture) + architecture = platform.architecture() + return platform.machine().startswith('arm') and architecture[0] == '32bit' + class PhysicalQuantity(float): def __new__(cls, value): return float.__new__(cls, value) @@ -36,10 +55,10 @@ def __mul__(self, x): return PhysicalQuantity(float(x) * float(self)) __rmul__ = __mul__ - def __div__(self, x): + def __truediv__(self, x): return PhysicalQuantity(float(self) / float(x)) - def __rdiv__(self, x): + def __rtruediv__(self, x): return PhysicalQuantity(float(x) / float(self)) @@ -192,29 +211,29 @@ def test_complex(self): assert_allclose(y, [-4j, -12j, -36j, -108j, -324j]) assert_array_equal(y.real, 0) - y = geomspace(1+1j, 1000+1000j, num=4) - assert_allclose(y, [1+1j, 10+10j, 100+100j, 1000+1000j]) + y = geomspace(1 + 1j, 1000 + 1000j, num=4) + assert_allclose(y, [1 + 1j, 10 + 10j, 100 + 100j, 1000 + 1000j]) - y = geomspace(-1+1j, -1000+1000j, num=4) - assert_allclose(y, [-1+1j, -10+10j, -100+100j, -1000+1000j]) + y = geomspace(-1 + 1j, -1000 + 1000j, num=4) + assert_allclose(y, [-1 + 1j, -10 + 10j, -100 + 100j, -1000 + 1000j]) # Logarithmic spirals y = geomspace(-1, 1, num=3, dtype=complex) assert_allclose(y, [-1, 1j, +1]) - y = geomspace(0+3j, -3+0j, 3) - assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j]) - y = geomspace(0+3j, 3+0j, 3) - assert_allclose(y, [0+3j, 3/sqrt(2)+3j/sqrt(2), 3+0j]) - y = geomspace(-3+0j, 0-3j, 3) - assert_allclose(y, [-3+0j, -3/sqrt(2)-3j/sqrt(2), 0-3j]) - y = geomspace(0+3j, -3+0j, 3) - assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j]) - y = geomspace(-2-3j, 5+7j, 7) - assert_allclose(y, [-2-3j, -0.29058977-4.15771027j, - 2.08885354-4.34146838j, 4.58345529-3.16355218j, - 6.41401745-0.55233457j, 6.75707386+3.11795092j, - 5+7j]) + y = geomspace(0 + 3j, -3 + 0j, 3) + assert_allclose(y, [0 + 3j, -3 / sqrt(2) + 3j / sqrt(2), -3 + 0j]) + y = geomspace(0 + 3j, 3 + 0j, 3) + assert_allclose(y, [0 + 3j, 3 / sqrt(2) + 3j / sqrt(2), 3 + 0j]) + y = geomspace(-3 + 0j, 0 - 3j, 3) + assert_allclose(y, [-3 + 0j, -3 / sqrt(2) - 3j / sqrt(2), 0 - 3j]) + y = geomspace(0 + 3j, -3 + 0j, 3) + assert_allclose(y, [0 + 3j, -3 / sqrt(2) + 3j / sqrt(2), -3 + 0j]) + y = geomspace(-2 - 3j, 5 + 7j, 7) + assert_allclose(y, [-2 - 3j, -0.29058977 - 4.15771027j, + 2.08885354 - 4.34146838j, 4.58345529 - 3.16355218j, + 6.41401745 - 0.55233457j, 6.75707386 + 3.11795092j, + 5 + 7j]) # Type promotion should prevent the -5 from becoming a NaN y = geomspace(3j, -5, 2) @@ -223,16 +242,15 @@ def test_complex(self): assert_allclose(y, [-5, 3j]) def test_complex_shortest_path(self): - # test the shortest logorithmic spiral is used, see gh-25644 + # test the shortest logarithmic spiral is used, see gh-25644 x = 1.2 + 3.4j - y = np.exp(1j*(np.pi-.1)) * x + y = np.exp(1j * (np.pi - .1)) * x z = np.geomspace(x, y, 5) expected = np.array([1.2 + 3.4j, -1.47384 + 3.2905616j, -3.33577588 + 1.36842949j, -3.36011056 - 1.30753855j, -1.53343861 - 3.26321406j]) np.testing.assert_array_almost_equal(z, expected) - def test_dtype(self): y = geomspace(1, 1e6, dtype='float32') assert_equal(y.dtype, dtype('float32')) @@ -265,8 +283,8 @@ def test_start_stop_array_scalar(self): def test_start_stop_array(self): # Try to use all special cases. - start = array([1.e0, 32., 1j, -4j, 1+1j, -1]) - stop = array([1.e4, 2., 16j, -324j, 10000+10000j, 1]) + start = array([1.e0, 32., 1j, -4j, 1 + 1j, -1]) + stop = array([1.e4, 2., 16j, -324j, 10000 + 10000j, 1]) t1 = geomspace(start, stop, 5) t2 = stack([geomspace(_start, _stop, 5) for _start, _stop in zip(start, stop)], axis=1) @@ -360,9 +378,9 @@ def test_start_stop_array(self): def test_complex(self): lim1 = linspace(1 + 2j, 3 + 4j, 5) - t1 = array([1.0+2.j, 1.5+2.5j, 2.0+3j, 2.5+3.5j, 3.0+4j]) + t1 = array([1.0 + 2.j, 1.5 + 2.5j, 2.0 + 3j, 2.5 + 3.5j, 3.0 + 4j]) lim2 = linspace(1j, 10, 5) - t2 = array([0.0+1.j, 2.5+0.75j, 5.0+0.5j, 7.5+0.25j, 10.0+0j]) + t2 = array([0.0 + 1.j, 2.5 + 0.75j, 5.0 + 0.5j, 7.5 + 0.25j, 10.0 + 0j]) assert_equal(lim1, t1) assert_equal(lim2, t2) @@ -415,6 +433,9 @@ def __mul__(self, other): assert_equal(linspace(one, five), linspace(1, 5)) + # even when not explicitly enabled via FPSCR register + @pytest.mark.xfail(_is_armhf(), + reason="ARMHF/AArch32 platforms seem to FTZ subnormals") def test_denormal_numbers(self): # Regression test for gh-5437. Will probably fail when compiled # with ICC, which flushes denormals to zero @@ -424,8 +445,8 @@ def test_denormal_numbers(self): def test_equivalent_to_arange(self): for j in range(1000): - assert_equal(linspace(0, j, j+1, dtype=int), - arange(j+1, dtype=int)) + assert_equal(linspace(0, j, j + 1, dtype=int), + arange(j + 1, dtype=int)) def test_retstep(self): for num in [0, 1, 2]: @@ -448,7 +469,7 @@ def test_object(self): stop = array(2, dtype='O') y = linspace(start, stop, 3) assert_array_equal(y, array([1., 1.5, 2.])) - + def test_round_negative(self): y = linspace(-1, 3, num=8, dtype=int) t = array([-1, -1, 0, 0, 1, 1, 2, 3], dtype=int) @@ -460,17 +481,16 @@ def test_any_step_zero_and_not_mult_inplace(self): stop = array([2.0, 1.0]) y = linspace(start, stop, 3) assert_array_equal(y, array([[0.0, 1.0], [1.0, 1.0], [2.0, 1.0]])) - + class TestAdd_newdoc: @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") - @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") def test_add_doc(self): # test that np.add_newdoc did attach a docstring successfully: tgt = "Current flat index into the array." assert_equal(np._core.flatiter.index.__doc__[:len(tgt)], tgt) - assert_(len(np._core.ufunc.identity.__doc__) > 300) + assert_(len(np._core.ufunc.identity.__doc__) > 250) assert_(len(np.lib._index_tricks_impl.mgrid.__doc__) > 300) @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") diff --git a/numpy/_core/tests/test_getlimits.py b/numpy/_core/tests/test_getlimits.py index 8378bad19391..786fd1d494e4 100644 --- a/numpy/_core/tests/test_getlimits.py +++ b/numpy/_core/tests/test_getlimits.py @@ -1,13 +1,15 @@ """ Test functions for limits module. """ +import types import warnings -import numpy as np + import pytest + +import numpy as np +from numpy import double, half, longdouble, single from numpy._core import finfo, iinfo -from numpy import half, single, double, longdouble -from numpy.testing import assert_equal, assert_, assert_raises -from numpy._core.getlimits import _discovered_machar, _float_ma +from numpy.testing import assert_, assert_equal, assert_raises ################################################## @@ -77,10 +79,13 @@ def test_regression_gh23867(self): class NonHashableWithDtype: __hash__ = None dtype = np.dtype('float32') - + x = NonHashableWithDtype() assert np.finfo(x) == np.finfo(x.dtype) - + + def test_no_none_sense(self): + assert_raises(TypeError, finfo, None) + class TestIinfo: def test_basic(self): @@ -106,7 +111,7 @@ def test_iinfo_repr(self): assert_equal(repr(np.iinfo(np.int16)), expected) def test_finfo_repr(self): - expected = "finfo(resolution=1e-06, min=-3.4028235e+38," + \ + expected = "finfo(resolution=1e-06, min=-3.4028235e+38,"\ " max=3.4028235e+38, dtype=float32)" assert_equal(repr(np.finfo(np.float32)), expected) @@ -136,53 +141,20 @@ def test_instances(): finfo(np.int64(1)) -def assert_ma_equal(discovered, ma_like): - # Check MachAr-like objects same as calculated MachAr instances - for key, value in discovered.__dict__.items(): - assert_equal(value, getattr(ma_like, key)) - if hasattr(value, 'shape'): - assert_equal(value.shape, getattr(ma_like, key).shape) - assert_equal(value.dtype, getattr(ma_like, key).dtype) - - -def test_known_types(): - # Test we are correctly compiling parameters for known types - for ftype, ma_like in ((np.float16, _float_ma[16]), - (np.float32, _float_ma[32]), - (np.float64, _float_ma[64])): - assert_ma_equal(_discovered_machar(ftype), ma_like) - # Suppress warning for broken discovery of double double on PPC - with np.errstate(all='ignore'): - ld_ma = _discovered_machar(np.longdouble) - bytes = np.dtype(np.longdouble).itemsize - if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16): - # 80-bit extended precision - assert_ma_equal(ld_ma, _float_ma[80]) - elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16: - # IEE 754 128-bit - assert_ma_equal(ld_ma, _float_ma[128]) - - def test_subnormal_warning(): """Test that the subnormal is zero warning is not being raised.""" - with np.errstate(all='ignore'): - ld_ma = _discovered_machar(np.longdouble) - bytes = np.dtype(np.longdouble).itemsize with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') - if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16): - # 80-bit extended precision - ld_ma.smallest_subnormal - assert len(w) == 0 - elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16: - # IEE 754 128-bit - ld_ma.smallest_subnormal - assert len(w) == 0 - else: - # Double double - ld_ma.smallest_subnormal - # This test may fail on some platforms - assert len(w) == 0 + # Test for common float types + for dtype in [np.float16, np.float32, np.float64]: + f = finfo(dtype) + _ = f.smallest_subnormal + # Also test longdouble + with np.errstate(all='ignore'): + fld = finfo(np.longdouble) + _ = fld.smallest_subnormal + # Check no warnings were raised + assert len(w) == 0 def test_plausible_finfo(): @@ -192,3 +164,11 @@ def test_plausible_finfo(): assert_(info.nmant > 1) assert_(info.minexp < -1) assert_(info.maxexp > 1) + + +class TestRuntimeSubscriptable: + def test_finfo_generic(self): + assert isinstance(np.finfo[np.float64], types.GenericAlias) + + def test_iinfo_generic(self): + assert isinstance(np.iinfo[np.int_], types.GenericAlias) diff --git a/numpy/_core/tests/test_half.py b/numpy/_core/tests/test_half.py index fbc1bf6a0a6d..3ced5b466a44 100644 --- a/numpy/_core/tests/test_half.py +++ b/numpy/_core/tests/test_half.py @@ -1,9 +1,10 @@ import platform + import pytest import numpy as np -from numpy import uint16, float16, float32, float64 -from numpy.testing import assert_, assert_equal, _OLD_PROMOTION, IS_WASM +from numpy import float16, float32, float64, uint16 +from numpy.testing import IS_WASM, assert_, assert_equal def assert_raises_fpe(strmatch, callable, *args, **kwargs): @@ -11,62 +12,69 @@ def assert_raises_fpe(strmatch, callable, *args, **kwargs): callable(*args, **kwargs) except FloatingPointError as exc: assert_(str(exc).find(strmatch) >= 0, - "Did not raise floating point %s error" % strmatch) + f"Did not raise floating point {strmatch} error") else: assert_(False, - "Did not raise floating point %s error" % strmatch) + f"Did not raise floating point {strmatch} error") class TestHalf: - def setup_method(self): + def _create_arrays_all(self): # An array of all possible float16 values - self.all_f16 = np.arange(0x10000, dtype=uint16) - self.all_f16.dtype = float16 + all_f16 = np.arange(0x10000, dtype=uint16) + all_f16 = all_f16.view(float16) - # NaN value can cause an invalid FP exception if HW is been used + # NaN value can cause an invalid FP exception if HW is being used with np.errstate(invalid='ignore'): - self.all_f32 = np.array(self.all_f16, dtype=float32) - self.all_f64 = np.array(self.all_f16, dtype=float64) + all_f32 = np.array(all_f16, dtype=float32) + all_f64 = np.array(all_f16, dtype=float64) + return all_f16, all_f32, all_f64 + def _create_arrays_nonan(self): # An array of all non-NaN float16 values, in sorted order - self.nonan_f16 = np.concatenate( + nonan_f16 = np.concatenate( (np.arange(0xfc00, 0x7fff, -1, dtype=uint16), np.arange(0x0000, 0x7c01, 1, dtype=uint16))) - self.nonan_f16.dtype = float16 - self.nonan_f32 = np.array(self.nonan_f16, dtype=float32) - self.nonan_f64 = np.array(self.nonan_f16, dtype=float64) - - # An array of all finite float16 values, in sorted order - self.finite_f16 = self.nonan_f16[1:-1] - self.finite_f32 = self.nonan_f32[1:-1] - self.finite_f64 = self.nonan_f64[1:-1] + nonan_f16 = nonan_f16.view(float16) + nonan_f32 = np.array(nonan_f16, dtype=float32) + nonan_f64 = np.array(nonan_f16, dtype=float64) + return nonan_f16, nonan_f32, nonan_f64 + + def _create_arrays_finite(self): + nonan_f16, nonan_f32, nonan_f64 = self._create_arrays_nonan() + finite_f16 = nonan_f16[1:-1] + finite_f32 = nonan_f32[1:-1] + finite_f64 = nonan_f64[1:-1] + return finite_f16, finite_f32, finite_f64 def test_half_conversions(self): """Checks that all 16-bit values survive conversion to/from 32-bit and 64-bit float""" # Because the underlying routines preserve the NaN bits, every # value is preserved when converting to/from other floats. + all_f16, all_f32, all_f64 = self._create_arrays_all() + nonan_f16, _, _ = self._create_arrays_nonan() # Convert from float32 back to float16 with np.errstate(invalid='ignore'): - b = np.array(self.all_f32, dtype=float16) - # avoid testing NaNs due to differ bits wither Q/SNaNs + b = np.array(all_f32, dtype=float16) + # avoid testing NaNs due to differing bit patterns in Q/S NaNs b_nn = b == b - assert_equal(self.all_f16[b_nn].view(dtype=uint16), + assert_equal(all_f16[b_nn].view(dtype=uint16), b[b_nn].view(dtype=uint16)) # Convert from float64 back to float16 with np.errstate(invalid='ignore'): - b = np.array(self.all_f64, dtype=float16) + b = np.array(all_f64, dtype=float16) b_nn = b == b - assert_equal(self.all_f16[b_nn].view(dtype=uint16), + assert_equal(all_f16[b_nn].view(dtype=uint16), b[b_nn].view(dtype=uint16)) # Convert float16 to longdouble and back # This doesn't necessarily preserve the extra NaN bits, # so exclude NaNs. - a_ld = np.array(self.nonan_f16, dtype=np.longdouble) + a_ld = np.array(nonan_f16, dtype=np.longdouble) b = np.array(a_ld, dtype=float16) - assert_equal(self.nonan_f16.view(dtype=uint16), + assert_equal(nonan_f16.view(dtype=uint16), b.view(dtype=uint16)) # Check the range for which all integers can be represented @@ -85,6 +93,21 @@ def test_half_conversion_to_string(self, string_dt): arr = np.ones(3, dtype=np.float16).astype(string_dt) assert arr.dtype == expected_dt + @pytest.mark.parametrize("dtype", ["S", "U", object]) + def test_to_half_cast_error(self, dtype): + arr = np.array(["3M"], dtype=dtype) + with pytest.raises(ValueError): + arr.astype(np.float16) + + arr = np.array(["23490349034"], dtype=dtype) + with np.errstate(all="warn"): + with pytest.warns(RuntimeWarning): + arr.astype(np.float16) + + with np.errstate(all="raise"): + with pytest.raises(FloatingPointError): + arr.astype(np.float16) + @pytest.mark.parametrize("string_dt", ["S", "U"]) def test_half_conversion_from_string(self, string_dt): string = np.array("3.1416", dtype=string_dt) @@ -93,14 +116,13 @@ def test_half_conversion_from_string(self, string_dt): @pytest.mark.parametrize("offset", [None, "up", "down"]) @pytest.mark.parametrize("shift", [None, "up", "down"]) @pytest.mark.parametrize("float_t", [np.float32, np.float64]) - @np._no_nep50_warning() def test_half_conversion_rounding(self, float_t, shift, offset): # Assumes that round to even is used during casting. max_pattern = np.float16(np.finfo(np.float16).max).view(np.uint16) # Test all (positive) finite numbers, denormals are most interesting # however: - f16s_patterns = np.arange(0, max_pattern+1, dtype=np.uint16) + f16s_patterns = np.arange(0, max_pattern + 1, dtype=np.uint16) f16s_float = f16s_patterns.view(np.float16).astype(float_t) # Shift the values by half a bit up or a down (or do not shift), @@ -120,8 +142,8 @@ def test_half_conversion_rounding(self, float_t, shift, offset): # Convert back to float16 and its bit pattern: res_patterns = f16s_float.astype(np.float16).view(np.uint16) - # The above calculations tries the original values, or the exact - # mid points between the float16 values. It then further offsets them + # The above calculation tries the original values, or the exact + # midpoints between the float16 values. It then further offsets them # by as little as possible. If no offset occurs, "round to even" # logic will be necessary, an arbitrarily small offset should cause # normal up/down rounding always. @@ -171,34 +193,35 @@ def test_half_conversion_denormal_round_even(self, float_t, uint_t, bits): assert larger_value.astype(np.float16) == smallest_value def test_nans_infs(self): + all_f16, all_f32, _ = self._create_arrays_all() with np.errstate(all='ignore'): # Check some of the ufuncs - assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32)) - assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32)) - assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32)) - assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32)) + assert_equal(np.isnan(all_f16), np.isnan(all_f32)) + assert_equal(np.isinf(all_f16), np.isinf(all_f32)) + assert_equal(np.isfinite(all_f16), np.isfinite(all_f32)) + assert_equal(np.signbit(all_f16), np.signbit(all_f32)) assert_equal(np.spacing(float16(65504)), np.inf) # Check comparisons of all values with NaN nan = float16(np.nan) - assert_(not (self.all_f16 == nan).any()) - assert_(not (nan == self.all_f16).any()) + assert_(not (all_f16 == nan).any()) + assert_(not (nan == all_f16).any()) - assert_((self.all_f16 != nan).all()) - assert_((nan != self.all_f16).all()) + assert_((all_f16 != nan).all()) + assert_((nan != all_f16).all()) - assert_(not (self.all_f16 < nan).any()) - assert_(not (nan < self.all_f16).any()) + assert_(not (all_f16 < nan).any()) + assert_(not (nan < all_f16).any()) - assert_(not (self.all_f16 <= nan).any()) - assert_(not (nan <= self.all_f16).any()) + assert_(not (all_f16 <= nan).any()) + assert_(not (nan <= all_f16).any()) - assert_(not (self.all_f16 > nan).any()) - assert_(not (nan > self.all_f16).any()) + assert_(not (all_f16 > nan).any()) + assert_(not (nan > all_f16).any()) - assert_(not (self.all_f16 >= nan).any()) - assert_(not (nan >= self.all_f16).any()) + assert_(not (all_f16 >= nan).any()) + assert_(not (nan >= all_f16).any()) def test_half_values(self): """Confirms a small number of known half values""" @@ -208,7 +231,7 @@ def test_half_values(self): 65504, -65504, # Maximum magnitude 2.0**(-14), -2.0**(-14), # Minimum normal 2.0**(-24), -2.0**(-24), # Minimum subnormal - 0, -1/1e1000, # Signed zeros + 0, -1 / 1e1000, # Signed zeros np.inf, -np.inf]) b = np.array([0x3c00, 0xbc00, 0x4000, 0xc000, @@ -218,7 +241,7 @@ def test_half_values(self): 0x0001, 0x8001, 0x0000, 0x8000, 0x7c00, 0xfc00], dtype=uint16) - b.dtype = float16 + b = b.view(dtype=float16) assert_equal(a, b) def test_half_rounding(self): @@ -226,16 +249,16 @@ def test_half_rounding(self): a = np.array([2.0**-25 + 2.0**-35, # Rounds to minimum subnormal 2.0**-25, # Underflows to zero (nearest even mode) 2.0**-26, # Underflows to zero - 1.0+2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10) - 1.0+2.0**-11, # rounds to 1.0 (nearest even mode) - 1.0+2.0**-12, # rounds to 1.0 + 1.0 + 2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10) + 1.0 + 2.0**-11, # rounds to 1.0 (nearest even mode) + 1.0 + 2.0**-12, # rounds to 1.0 65519, # rounds to 65504 65520], # rounds to inf dtype=float64) rounded = [2.0**-24, 0.0, 0.0, - 1.0+2.0**(-10), + 1.0 + 2.0**(-10), 1.0, 1.0, 65504, @@ -255,9 +278,10 @@ def test_half_rounding(self): def test_half_correctness(self): """Take every finite float16, and check the casting functions with a manual conversion.""" + finite_f16, finite_f32, finite_f64 = self._create_arrays_finite() # Create an array of all finite float16s - a_bits = self.finite_f16.view(dtype=uint16) + a_bits = finite_f16.view(dtype=uint16) # Convert to 64-bit float manually a_sgn = (-1.0)**((a_bits & 0x8000) >> 15) @@ -270,29 +294,30 @@ def test_half_correctness(self): a_manual = a_sgn * a_man * 2.0**a_exp - a32_fail = np.nonzero(self.finite_f32 != a_manual)[0] + a32_fail = np.nonzero(finite_f32 != a_manual)[0] if len(a32_fail) != 0: bad_index = a32_fail[0] - assert_equal(self.finite_f32, a_manual, + assert_equal(finite_f32, a_manual, "First non-equal is half value 0x%x -> %g != %g" % (a_bits[bad_index], - self.finite_f32[bad_index], + finite_f32[bad_index], a_manual[bad_index])) - a64_fail = np.nonzero(self.finite_f64 != a_manual)[0] + a64_fail = np.nonzero(finite_f64 != a_manual)[0] if len(a64_fail) != 0: bad_index = a64_fail[0] - assert_equal(self.finite_f64, a_manual, + assert_equal(finite_f64, a_manual, "First non-equal is half value 0x%x -> %g != %g" % (a_bits[bad_index], - self.finite_f64[bad_index], + finite_f64[bad_index], a_manual[bad_index])) def test_half_ordering(self): """Make sure comparisons are working right""" + nonan_f16, _, _ = self._create_arrays_nonan() # All non-NaN float16 values in reverse order - a = self.nonan_f16[::-1].copy() + a = nonan_f16[::-1].copy() # 32-bit float copy b = np.array(a, dtype=float32) @@ -308,8 +333,8 @@ def test_half_ordering(self): assert_((a[1:] >= a[:-1]).all()) assert_(not (a[1:] < a[:-1]).any()) # All != except for +/-0 - assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size-2) - assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size-2) + assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size - 2) + assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size - 2) def test_half_funcs(self): """Test the various ArrFuncs""" @@ -324,7 +349,7 @@ def test_half_funcs(self): assert_equal(a, np.ones((5,), dtype=float16)) # nonzero and copyswap - a = np.array([0, 0, -1, -1/1e20, 0, 2.0**-24, 7.629e-6], dtype=float16) + a = np.array([0, 0, -1, -1 / 1e20, 0, 2.0**-24, 7.629e-6], dtype=float16) assert_equal(a.nonzero()[0], [2, 5, 6]) a = a.byteswap() @@ -359,7 +384,7 @@ def test_spacing_nextafter(self): hnan = np.array((np.nan,), dtype=float16) a_f16 = a.view(dtype=float16) - assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1]) + assert_equal(np.spacing(a_f16[:-1]), a_f16[1:] - a_f16[:-1]) assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:]) assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1]) @@ -384,7 +409,7 @@ def test_spacing_nextafter(self): a |= 0x8000 assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1])) - assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:]) + assert_equal(np.spacing(a_f16[1:]), a_f16[:-1] - a_f16[1:]) assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1]) assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1]) @@ -460,8 +485,7 @@ def test_half_ufuncs(self): assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2])) assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12]) - @np._no_nep50_warning() - def test_half_coercion(self, weak_promotion): + def test_half_coercion(self): """Test that half gets coerced properly with the other types""" a16 = np.array((1,), dtype=float16) a32 = np.array((1,), dtype=float32) @@ -471,14 +495,12 @@ def test_half_coercion(self, weak_promotion): assert np.power(a16, 2).dtype == float16 assert np.power(a16, 2.0).dtype == float16 assert np.power(a16, b16).dtype == float16 - expected_dt = float32 if weak_promotion else float16 - assert np.power(a16, b32).dtype == expected_dt + assert np.power(a16, b32).dtype == float32 assert np.power(a16, a16).dtype == float16 assert np.power(a16, a32).dtype == float32 - expected_dt = float16 if weak_promotion else float64 - assert np.power(b16, 2).dtype == expected_dt - assert np.power(b16, 2.0).dtype == expected_dt + assert np.power(b16, 2).dtype == float16 + assert np.power(b16, 2.0).dtype == float16 assert np.power(b16, b16).dtype, float16 assert np.power(b16, b32).dtype, float32 assert np.power(b16, a16).dtype, float16 @@ -486,8 +508,7 @@ def test_half_coercion(self, weak_promotion): assert np.power(a32, a16).dtype == float32 assert np.power(a32, b16).dtype == float32 - expected_dt = float32 if weak_promotion else float16 - assert np.power(b32, a16).dtype == expected_dt + assert np.power(b32, a16).dtype == float32 assert np.power(b32, b16).dtype == float32 @pytest.mark.skipif(platform.machine() == "armv5tel", @@ -502,40 +523,40 @@ def test_half_fpe(self): by16 = float16(1e4) # Underflow errors - assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sx16) - assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sy16) - assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sx16) - assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sy16) - assert_raises_fpe('underflow', lambda a, b:a/b, sx16, bx16) - assert_raises_fpe('underflow', lambda a, b:a/b, sx16, by16) - assert_raises_fpe('underflow', lambda a, b:a/b, sy16, bx16) - assert_raises_fpe('underflow', lambda a, b:a/b, sy16, by16) - assert_raises_fpe('underflow', lambda a, b:a/b, + assert_raises_fpe('underflow', lambda a, b: a * b, sx16, sx16) + assert_raises_fpe('underflow', lambda a, b: a * b, sx16, sy16) + assert_raises_fpe('underflow', lambda a, b: a * b, sy16, sx16) + assert_raises_fpe('underflow', lambda a, b: a * b, sy16, sy16) + assert_raises_fpe('underflow', lambda a, b: a / b, sx16, bx16) + assert_raises_fpe('underflow', lambda a, b: a / b, sx16, by16) + assert_raises_fpe('underflow', lambda a, b: a / b, sy16, bx16) + assert_raises_fpe('underflow', lambda a, b: a / b, sy16, by16) + assert_raises_fpe('underflow', lambda a, b: a / b, float16(2.**-14), float16(2**11)) - assert_raises_fpe('underflow', lambda a, b:a/b, + assert_raises_fpe('underflow', lambda a, b: a / b, float16(-2.**-14), float16(2**11)) - assert_raises_fpe('underflow', lambda a, b:a/b, - float16(2.**-14+2**-24), float16(2)) - assert_raises_fpe('underflow', lambda a, b:a/b, - float16(-2.**-14-2**-24), float16(2)) - assert_raises_fpe('underflow', lambda a, b:a/b, - float16(2.**-14+2**-23), float16(4)) + assert_raises_fpe('underflow', lambda a, b: a / b, + float16(2.**-14 + 2**-24), float16(2)) + assert_raises_fpe('underflow', lambda a, b: a / b, + float16(-2.**-14 - 2**-24), float16(2)) + assert_raises_fpe('underflow', lambda a, b: a / b, + float16(2.**-14 + 2**-23), float16(4)) # Overflow errors - assert_raises_fpe('overflow', lambda a, b:a*b, bx16, bx16) - assert_raises_fpe('overflow', lambda a, b:a*b, bx16, by16) - assert_raises_fpe('overflow', lambda a, b:a*b, by16, bx16) - assert_raises_fpe('overflow', lambda a, b:a*b, by16, by16) - assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sx16) - assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sy16) - assert_raises_fpe('overflow', lambda a, b:a/b, by16, sx16) - assert_raises_fpe('overflow', lambda a, b:a/b, by16, sy16) - assert_raises_fpe('overflow', lambda a, b:a+b, + assert_raises_fpe('overflow', lambda a, b: a * b, bx16, bx16) + assert_raises_fpe('overflow', lambda a, b: a * b, bx16, by16) + assert_raises_fpe('overflow', lambda a, b: a * b, by16, bx16) + assert_raises_fpe('overflow', lambda a, b: a * b, by16, by16) + assert_raises_fpe('overflow', lambda a, b: a / b, bx16, sx16) + assert_raises_fpe('overflow', lambda a, b: a / b, bx16, sy16) + assert_raises_fpe('overflow', lambda a, b: a / b, by16, sx16) + assert_raises_fpe('overflow', lambda a, b: a / b, by16, sy16) + assert_raises_fpe('overflow', lambda a, b: a + b, float16(65504), float16(17)) - assert_raises_fpe('overflow', lambda a, b:a-b, + assert_raises_fpe('overflow', lambda a, b: a - b, float16(-65504), float16(17)) assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf)) - assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) + assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) # noqa: E501 assert_raises_fpe('overflow', np.spacing, float16(65504)) # Invalid value errors @@ -544,9 +565,9 @@ def test_half_fpe(self): assert_raises_fpe('invalid', np.spacing, float16(np.nan)) # These should not raise - float16(65472)+float16(32) - float16(2**-13)/float16(2) - float16(2**-14)/float16(2**10) + float16(65472) + float16(32) + float16(2**-13) / float16(2) + float16(2**-14) / float16(2**10) np.spacing(float16(-65504)) np.nextafter(float16(65504), float16(-np.inf)) np.nextafter(float16(-65504), float16(np.inf)) @@ -554,10 +575,10 @@ def test_half_fpe(self): np.nextafter(float16(-np.inf), float16(0)) np.nextafter(float16(0), float16(np.nan)) np.nextafter(float16(np.nan), float16(0)) - float16(2**-14)/float16(2**10) - float16(-2**-14)/float16(2**10) - float16(2**-14+2**-23)/float16(2) - float16(-2**-14-2**-23)/float16(2) + float16(2**-14) / float16(2**10) + float16(-2**-14) / float16(2**10) + float16(2**-14 + 2**-23) / float16(2) + float16(-2**-14 - 2**-23) / float16(2) def test_half_array_interface(self): """Test that half is compatible with __array_interface__""" diff --git a/numpy/_core/tests/test_hashtable.py b/numpy/_core/tests/test_hashtable.py index e75cfceea412..6c9631b8ebfb 100644 --- a/numpy/_core/tests/test_hashtable.py +++ b/numpy/_core/tests/test_hashtable.py @@ -1,30 +1,156 @@ +import random +from collections import defaultdict +from concurrent.futures import ThreadPoolExecutor + import pytest -import random -from numpy._core._multiarray_tests import identityhash_tester +from numpy._core._multiarray_tests import ( + create_identity_hash, + identity_hash_get_item, + identity_hash_set_item_default, +) +from numpy.testing import IS_WASM @pytest.mark.parametrize("key_length", [1, 3, 6]) @pytest.mark.parametrize("length", [1, 16, 2000]) -def test_identity_hashtable(key_length, length): - # use a 30 object pool for everything (duplicates will happen) - pool = [object() for i in range(20)] +def test_identity_hashtable_get_set(key_length, length): + # no collisions expected keys_vals = [] for i in range(length): - keys = tuple(random.choices(pool, k=key_length)) - keys_vals.append((keys, random.choice(pool))) + keys = tuple(object() for _ in range(key_length)) + keys_vals.append((keys, object())) + + ht = create_identity_hash(key_length) + + for i in range(length): + key, value = keys_vals[i] + assert identity_hash_set_item_default(ht, key, value) is value + + for key, value in keys_vals: + got = identity_hash_get_item(ht, key) + assert got is value + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") +@pytest.mark.parametrize("key_length", [1, 3, 6]) +def test_identity_hashtable_default_thread_safety(key_length): + ht = create_identity_hash(key_length) + + key = tuple(object() for _ in range(key_length)) + val1 = object() + val2 = object() + + got1 = identity_hash_set_item_default(ht, key, val1) + assert got1 is val1 + + def thread_func(val): + return identity_hash_set_item_default(ht, key, val) + + with ThreadPoolExecutor(max_workers=8) as executor: + futures = [executor.submit(thread_func, val2) for _ in range(8)] + results = [f.result() for f in futures] + + assert all(r is val1 for r in results) + + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") +@pytest.mark.parametrize("key_length", [1, 3, 6]) +def test_identity_hashtable_set_thread_safety(key_length): + ht = create_identity_hash(key_length) + + key = tuple(object() for _ in range(key_length)) + val1 = object() + + def thread_func(val): + return identity_hash_set_item_default(ht, key, val) + + with ThreadPoolExecutor(max_workers=8) as executor: + futures = [executor.submit(thread_func, val1) for _ in range(100)] + results = [f.result() for f in futures] + + assert all(r is val1 for r in results) + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") +@pytest.mark.parametrize("key_length", [1, 3, 6]) +def test_identity_hashtable_get_thread_safety(key_length): + ht = create_identity_hash(key_length) + key = tuple(object() for _ in range(key_length)) + value = object() + identity_hash_set_item_default(ht, key, value) + + def thread_func(): + return identity_hash_get_item(ht, key) + + with ThreadPoolExecutor(max_workers=8) as executor: + futures = [executor.submit(thread_func) for _ in range(100)] + results = [f.result() for f in futures] + + assert all(r is value for r in results) + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") +@pytest.mark.parametrize("key_length", [1, 3, 6]) +@pytest.mark.parametrize("length", [1 << 4, 1 << 8, 1 << 12]) +def test_identity_hashtable_get_set_concurrent(key_length, length): + ht = create_identity_hash(key_length) + keys_vals = [] + for i in range(length): + keys = tuple(object() for _ in range(key_length)) + keys_vals.append((keys, object())) + + def set_item(kv): + key, value = kv + got = identity_hash_set_item_default(ht, key, value) + assert got is value + + def get_item(kv): + key, value = kv + got = identity_hash_get_item(ht, key) + assert got is None or got is value + + with ThreadPoolExecutor(max_workers=8) as executor: + futures = [] + for kv in keys_vals: + futures.append(executor.submit(set_item, kv)) + futures.append(executor.submit(get_item, kv)) + for future in futures: + future.result() + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") +@pytest.mark.parametrize("key_length", [3, 6, 10]) +@pytest.mark.parametrize("length", [1 << 4, 1 << 8, 1 << 12]) +def test_identity_hashtable_get_set_concurrent_collisions(key_length, length): + ht = create_identity_hash(key_length) + base_key = tuple(object() for _ in range(key_length - 1)) + keys_vals = defaultdict(list) + for i in range(length): + keys = base_key + (random.choice(base_key), ) + keys_vals[keys].append(object()) + + set_item_results = defaultdict(set) + + def set_item(kv): + key, values = kv + value = random.choice(values) + got = identity_hash_set_item_default(ht, key, value) + set_item_results[key].add(got) - dictionary = dict(keys_vals) + get_item_results = defaultdict(set) - # add a random item at the end: - keys_vals.append(random.choice(keys_vals)) - # the expected one could be different with duplicates: - expected = dictionary[keys_vals[-1][0]] + def get_item(kv): + key, values = kv + got = identity_hash_get_item(ht, key) + get_item_results[key].add(got) - res = identityhash_tester(key_length, keys_vals, replace=True) - assert res is expected + with ThreadPoolExecutor(max_workers=8) as executor: + futures = [] + for keys, values in keys_vals.items(): + futures.append(executor.submit(set_item, (keys, values))) + futures.append(executor.submit(get_item, (keys, values))) + for future in futures: + future.result() - # check that ensuring one duplicate definitely raises: - keys_vals.insert(0, keys_vals[-2]) - with pytest.raises(RuntimeError): - identityhash_tester(key_length, keys_vals) + for key in keys_vals.keys(): + assert len(set_item_results[key]) == 1 + set_item_value = set_item_results[key].pop() + for r in get_item_results[key]: + assert r is None or r is set_item_value diff --git a/numpy/_core/tests/test_indexerrors.py b/numpy/_core/tests/test_indexerrors.py index c1faa9555813..70e97dd6428e 100644 --- a/numpy/_core/tests/test_indexerrors.py +++ b/numpy/_core/tests/test_indexerrors.py @@ -1,7 +1,5 @@ import numpy as np -from numpy.testing import ( - assert_raises, assert_raises_regex, - ) +from numpy.testing import assert_raises, assert_raises_regex class TestIndexErrors: diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index bea1c1017fb2..7a8cd42c59aa 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -1,18 +1,23 @@ -import sys -import warnings import functools +import inspect import operator +import sys +import warnings +from itertools import product import pytest import numpy as np from numpy._core._multiarray_tests import array_indexing -from itertools import product from numpy.exceptions import ComplexWarning, VisibleDeprecationWarning from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_raises_regex, - assert_array_equal, assert_warns, HAS_REFCOUNT, IS_WASM - ) + HAS_REFCOUNT, + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) class TestIndexing: @@ -22,25 +27,25 @@ def test_index_no_floats(self): assert_raises(IndexError, lambda: a[0.0]) assert_raises(IndexError, lambda: a[0, 0.0]) assert_raises(IndexError, lambda: a[0.0, 0]) - assert_raises(IndexError, lambda: a[0.0,:]) + assert_raises(IndexError, lambda: a[0.0, :]) assert_raises(IndexError, lambda: a[:, 0.0]) - assert_raises(IndexError, lambda: a[:, 0.0,:]) - assert_raises(IndexError, lambda: a[0.0,:,:]) + assert_raises(IndexError, lambda: a[:, 0.0, :]) + assert_raises(IndexError, lambda: a[0.0, :, :]) assert_raises(IndexError, lambda: a[0, 0, 0.0]) assert_raises(IndexError, lambda: a[0.0, 0, 0]) assert_raises(IndexError, lambda: a[0, 0.0, 0]) assert_raises(IndexError, lambda: a[-1.4]) assert_raises(IndexError, lambda: a[0, -1.4]) assert_raises(IndexError, lambda: a[-1.4, 0]) - assert_raises(IndexError, lambda: a[-1.4,:]) + assert_raises(IndexError, lambda: a[-1.4, :]) assert_raises(IndexError, lambda: a[:, -1.4]) - assert_raises(IndexError, lambda: a[:, -1.4,:]) - assert_raises(IndexError, lambda: a[-1.4,:,:]) + assert_raises(IndexError, lambda: a[:, -1.4, :]) + assert_raises(IndexError, lambda: a[-1.4, :, :]) assert_raises(IndexError, lambda: a[0, 0, -1.4]) assert_raises(IndexError, lambda: a[-1.4, 0, 0]) assert_raises(IndexError, lambda: a[0, -1.4, 0]) assert_raises(IndexError, lambda: a[0.0:, 0.0]) - assert_raises(IndexError, lambda: a[0.0:, 0.0,:]) + assert_raises(IndexError, lambda: a[0.0:, 0.0, :]) def test_slicing_no_floats(self): a = np.array([[5]]) @@ -49,26 +54,26 @@ def test_slicing_no_floats(self): assert_raises(TypeError, lambda: a[0.0:]) assert_raises(TypeError, lambda: a[0:, 0.0:2]) assert_raises(TypeError, lambda: a[0.0::2, :0]) - assert_raises(TypeError, lambda: a[0.0:1:2,:]) + assert_raises(TypeError, lambda: a[0.0:1:2, :]) assert_raises(TypeError, lambda: a[:, 0.0:]) # stop as float. assert_raises(TypeError, lambda: a[:0.0]) assert_raises(TypeError, lambda: a[:0, 1:2.0]) assert_raises(TypeError, lambda: a[:0.0:2, :0]) - assert_raises(TypeError, lambda: a[:0.0,:]) + assert_raises(TypeError, lambda: a[:0.0, :]) assert_raises(TypeError, lambda: a[:, 0:4.0:2]) # step as float. assert_raises(TypeError, lambda: a[::1.0]) assert_raises(TypeError, lambda: a[0:, :2:2.0]) assert_raises(TypeError, lambda: a[1::4.0, :0]) - assert_raises(TypeError, lambda: a[::5.0,:]) + assert_raises(TypeError, lambda: a[::5.0, :]) assert_raises(TypeError, lambda: a[:, 0:4:2.0]) # mixed. assert_raises(TypeError, lambda: a[1.0:2:2.0]) assert_raises(TypeError, lambda: a[1.0::2.0]) assert_raises(TypeError, lambda: a[0:, :2.0:2.0]) assert_raises(TypeError, lambda: a[1.0:1:4.0, :0]) - assert_raises(TypeError, lambda: a[1.0:5.0:5.0,:]) + assert_raises(TypeError, lambda: a[1.0:5.0:5.0, :]) assert_raises(TypeError, lambda: a[:, 0.4:4.0:2.0]) # should still get the DeprecationWarning if step = 0. assert_raises(TypeError, lambda: a[::0.0]) @@ -113,8 +118,8 @@ def test_same_kind_index_casting(self): arr = np.arange(10).reshape(5, 2) assert_array_equal(arr[index], arr[u_index]) - arr[u_index] = np.arange(5)[:,None] - assert_array_equal(arr, np.arange(5)[:,None].repeat(2, axis=1)) + arr[u_index] = np.arange(5)[:, None] + assert_array_equal(arr, np.arange(5)[:, None].repeat(2, axis=1)) arr = np.arange(25).reshape(5, 5) assert_array_equal(arr[u_index, u_index], arr[index, index]) @@ -133,6 +138,42 @@ def test_empty_fancy_index(self): b = np.array([]) assert_raises(IndexError, a.__getitem__, b) + def test_gh_26542(self): + a = np.array([0, 1, 2]) + idx = np.array([2, 1, 0]) + a[idx] = a + expected = np.array([2, 1, 0]) + assert_equal(a, expected) + + def test_gh_26542_2d(self): + a = np.array([[0, 1, 2]]) + idx_row = np.zeros(3, dtype=int) + idx_col = np.array([2, 1, 0]) + a[idx_row, idx_col] = a + expected = np.array([[2, 1, 0]]) + assert_equal(a, expected) + + def test_gh_26542_index_overlap(self): + arr = np.arange(100) + expected_vals = np.copy(arr[:-10]) + arr[10:] = arr[:-10] + actual_vals = arr[10:] + assert_equal(actual_vals, expected_vals) + + def test_gh_26844(self): + expected = [0, 1, 3, 3, 3] + a = np.arange(5) + a[2:][a[:-2]] = 3 + assert_equal(a, expected) + + def test_gh_26844_segfault(self): + # check for absence of segfault for: + # https://github.com/numpy/numpy/pull/26958/files#r1854589178 + a = np.arange(5) + expected = [0, 1, 3, 3, 3] + a[2:][None, a[:-2]] = 3 + assert_equal(a, expected) + def test_ellipsis_index(self): a = np.array([[1, 2, 3], [4, 5, 6], @@ -145,7 +186,7 @@ def test_ellipsis_index(self): # Slicing with ellipsis can skip an # arbitrary number of dimensions assert_equal(a[0, ...], a[0]) - assert_equal(a[0, ...], a[0,:]) + assert_equal(a[0, ...], a[0, :]) assert_equal(a[..., 0], a[:, 0]) # Slicing with ellipsis always results @@ -197,8 +238,8 @@ def test_boolean_shape_mismatch(self): def test_boolean_indexing_onedim(self): # Indexing a 2-dimensional array with # boolean array of length one - a = np.array([[ 0., 0., 0.]]) - b = np.array([ True], dtype=bool) + a = np.array([[0., 0., 0.]]) + b = np.array([True], dtype=bool) assert_equal(a[b], a) # boolean assignment a[b] = 1. @@ -236,9 +277,9 @@ def test_boolean_indexing_twodim(self): a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) - b = np.array([[ True, False, True], - [False, True, False], - [ True, False, True]]) + b = np.array([[ True, False, True], + [False, True, False], + [ True, False, True]]) assert_equal(a[b], [1, 3, 5, 7, 9]) assert_equal(a[b[1]], [[4, 5, 6]]) assert_equal(a[b[0]], a[b[2]]) @@ -345,7 +386,7 @@ def test_trivial_fancy_not_possible(self): assert_array_equal(a[idx], idx) # this case must not go into the fast path, note that idx is - # a non-contiuguous none 1D array here. + # a non-contiguous none 1D array here. a[idx] = -1 res = np.arange(6) res[0] = -1 @@ -387,21 +428,25 @@ def test_array_like_values(self): a[...] = memoryview(s) assert_array_equal(a, s) - def test_subclass_writeable(self): + @pytest.mark.parametrize("writeable", [True, False]) + def test_subclass_writeable(self, writeable): d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)], dtype=[('target', 'S20'), ('V_mag', '>f4')]) - ind = np.array([False, True, True], dtype=bool) - assert_(d[ind].flags.writeable) + d.flags.writeable = writeable + # Advanced indexing results are always writeable: + ind = np.array([False, True, True], dtype=bool) + assert d[ind].flags.writeable ind = np.array([0, 1]) - assert_(d[ind].flags.writeable) - assert_(d[...].flags.writeable) - assert_(d[0].flags.writeable) + assert d[ind].flags.writeable + # Views should be writeable if the original array is: + assert d[...].flags.writeable == writeable + assert d[0].flags.writeable == writeable def test_memory_order(self): # This is not necessary to preserve. Memory layouts for # more complex indices are not as simple. a = np.arange(10) - b = np.arange(10).reshape(5,2).T + b = np.arange(10).reshape(5, 2).T assert_(a[b].flags.f_contiguous) # Takes a different implementation branch: @@ -466,7 +511,7 @@ def test_unaligned(self): x = x.view(np.dtype("S8")) x[...] = np.array("b" * 8, dtype="S") b = np.arange(d.size) - #trivial + # trivial assert_equal(d[b], d) d[b] = x # nontrivial @@ -564,31 +609,6 @@ def test_too_many_advanced_indices(self, index, num, original_ndim): with pytest.raises(IndexError): arr[(index,) * num] = 1. - @pytest.mark.skipif(IS_WASM, reason="no threading") - def test_structured_advanced_indexing(self): - # Test that copyswap(n) used by integer array indexing is threadsafe - # for structured datatypes, see gh-15387. This test can behave randomly. - from concurrent.futures import ThreadPoolExecutor - - # Create a deeply nested dtype to make a failure more likely: - dt = np.dtype([("", "f8")]) - dt = np.dtype([("", dt)] * 2) - dt = np.dtype([("", dt)] * 2) - # The array should be large enough to likely run into threading issues - arr = np.random.uniform(size=(6000, 8)).view(dt)[:, 0] - - rng = np.random.default_rng() - def func(arr): - indx = rng.integers(0, len(arr), size=6000, dtype=np.intp) - arr[indx] - - tpe = ThreadPoolExecutor(max_workers=8) - futures = [tpe.submit(func, arr) for _ in range(10)] - for f in futures: - f.result() - - assert arr.dtype is dt - def test_nontuple_ndindex(self): a = np.arange(25).reshape((5, 5)) assert_equal(a[[0, 1]], np.array([a[0], a[1]])) @@ -600,7 +620,7 @@ class TestFieldIndexing: def test_scalar_return_type(self): # Field access on an array should return an array, even if it # is 0-d. - a = np.zeros((), [('a','f8')]) + a = np.zeros((), [('a', 'f8')]) assert_(isinstance(a['a'], np.ndarray)) assert_(isinstance(a[['a']], np.ndarray)) @@ -626,9 +646,9 @@ def test_prepend_not_one(self): a = np.zeros(5) # Too large and not only ones. - assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1))) + assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1))) assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1))) - assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2,2,1))) + assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2, 2, 1))) def test_simple_broadcasting_errors(self): assign = self.assign @@ -646,12 +666,12 @@ def test_simple_broadcasting_errors(self): ([0, 1], ..., 0), (..., [1, 2], [1, 2])]) def test_broadcast_error_reports_correct_shape(self, index): - values = np.zeros((100, 100)) # will never broadcast below + values = np.zeros((100, 100)) # will never broadcast below arr = np.zeros((3, 4, 5, 6, 7)) # We currently report without any spaces (could be changed) shape_str = str(arr[index].shape).replace(" ", "") - + with pytest.raises(ValueError) as e: arr[index] = values @@ -666,7 +686,7 @@ def test_index_is_larger(self): def test_broadcast_subspace(self): a = np.zeros((100, 100)) - v = np.arange(100)[:,None] + v = np.arange(100)[:, None] b = np.arange(100)[::-1] a[b] = v assert_((a[::-1] == v).all()) @@ -714,7 +734,6 @@ class SubClass(np.ndarray): s_fancy = s[[0, 1, 2]] assert_(s_fancy.flags.writeable) - def test_finalize_gets_full_info(self): # Array finalize should be called on the filled array. class SubClass(np.ndarray): @@ -727,7 +746,7 @@ def __array_finalize__(self, old): assert_array_equal(new_s.finalize_status, new_s) assert_array_equal(new_s.old, s) - new_s = s[[0,1,2,3]] + new_s = s[[0, 1, 2, 3]] assert_array_equal(new_s.finalize_status, new_s) assert_array_equal(new_s.old, s) @@ -749,35 +768,36 @@ def test_boolean_index_cast_assign(self): assert_equal(zero_array[0, 1], 1) # Fancy indexing works, although we get a cast warning. - assert_warns(ComplexWarning, + pytest.warns(ComplexWarning, zero_array.__setitem__, ([0], [1]), np.array([2 + 1j])) assert_equal(zero_array[0, 1], 2) # No complex part # Cast complex to float, throwing away the imaginary portion. - assert_warns(ComplexWarning, + pytest.warns(ComplexWarning, zero_array.__setitem__, bool_index, np.array([1j])) assert_equal(zero_array[0, 1], 0) + class TestFancyIndexingEquivalence: def test_object_assign(self): # Check that the field and object special case using copyto is active. # The right hand side cannot be converted to an array here. a = np.arange(5, dtype=object) b = a.copy() - a[:3] = [1, (1,2), 3] - b[[0, 1, 2]] = [1, (1,2), 3] + a[:3] = [1, (1, 2), 3] + b[[0, 1, 2]] = [1, (1, 2), 3] assert_array_equal(a, b) # test same for subspace fancy indexing b = np.arange(5, dtype=object)[None, :] - b[[0], :3] = [[1, (1,2), 3]] + b[[0], :3] = [[1, (1, 2), 3]] assert_array_equal(a, b[0]) # Check that swapping of axes works. # There was a bug that made the later assignment throw a ValueError # do to an incorrectly transposed temporary right hand side (gh-5714) b = b.T - b[:3, [0]] = [[1], [(1,2)], [3]] + b[:3, [0]] = [[1], [(1, 2)], [3]] assert_array_equal(a, b[:, 0]) # Another test for the memory order of the subspace @@ -828,10 +848,11 @@ class TestMultiIndexingAutomated: """ - def setup_method(self): - self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6) - self.b = np.empty((3, 0, 5, 6)) - self.complex_indices = ['skip', Ellipsis, + def _create_array(self): + return np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6) + + def _create_complex_indices(self): + return ['skip', Ellipsis, 0, # Boolean indices, up to 3-d for some special cases of eating up # dimensions, also need to test all False @@ -849,13 +870,8 @@ def setup_method(self): np.array([[2], [0], [1]]), np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()), np.array([2, -1], dtype=np.int8), - np.zeros([1]*31, dtype=int), # trigger too large array. + np.zeros([1] * 31, dtype=int), # trigger too large array. np.array([0., 1.])] # invalid datatype - # Some simpler indices that still cover a bit more - self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]), - 'skip'] - # Very simple ones to fill the rest: - self.fill_indices = [slice(None, None), 0] def _get_multi_index(self, arr, indices): """Mimic multi dimensional indexing. @@ -929,7 +945,7 @@ def _get_multi_index(self, arr, indices): except ValueError: raise IndexError in_indices[i] = indx - elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i': + elif indx.dtype.kind not in 'bi': raise IndexError('arrays used as indices must be of ' 'integer (or boolean) type') if indx.ndim != 0: @@ -949,7 +965,7 @@ def _get_multi_index(self, arr, indices): return arr.copy(), no_copy if ellipsis_pos is not None: - in_indices[ellipsis_pos:ellipsis_pos+1] = ([slice(None, None)] * + in_indices[ellipsis_pos:ellipsis_pos + 1] = ([slice(None, None)] * (arr.ndim - ndim)) for ax, indx in enumerate(in_indices): @@ -961,24 +977,24 @@ def _get_multi_index(self, arr, indices): elif indx is None: # this is like taking a slice with one element from a new axis: indices.append(['n', np.array([0], dtype=np.intp)]) - arr = arr.reshape((arr.shape[:ax] + (1,) + arr.shape[ax:])) + arr = arr.reshape(arr.shape[:ax] + (1,) + arr.shape[ax:]) continue if isinstance(indx, np.ndarray) and indx.dtype == bool: - if indx.shape != arr.shape[ax:ax+indx.ndim]: + if indx.shape != arr.shape[ax:ax + indx.ndim]: raise IndexError try: flat_indx = np.ravel_multi_index(np.nonzero(indx), - arr.shape[ax:ax+indx.ndim], mode='raise') + arr.shape[ax:ax + indx.ndim], mode='raise') except Exception: error_unless_broadcast_to_empty = True # fill with 0s instead, and raise error later - flat_indx = np.array([0]*indx.sum(), dtype=np.intp) + flat_indx = np.array([0] * indx.sum(), dtype=np.intp) # concatenate axis into a single one: if indx.ndim != 0: - arr = arr.reshape((arr.shape[:ax] - + (np.prod(arr.shape[ax:ax+indx.ndim]),) - + arr.shape[ax+indx.ndim:])) + arr = arr.reshape(arr.shape[:ax] + + (np.prod(arr.shape[ax:ax + indx.ndim]),) + + arr.shape[ax + indx.ndim:]) indx = flat_indx else: # This could be changed, a 0-d boolean index can @@ -986,12 +1002,12 @@ def _get_multi_index(self, arr, indices): # Note that originally this is could be interpreted as # integer in the full integer special case. raise IndexError - else: - # If the index is a singleton, the bounds check is done - # before the broadcasting. This used to be different in <1.9 - if indx.ndim == 0: - if indx >= arr.shape[ax] or indx < -arr.shape[ax]: - raise IndexError + # If the index is a singleton, the bounds check is done + # before the broadcasting. This used to be different in <1.9 + elif indx.ndim == 0 and not ( + -arr.shape[ax] <= indx < arr.shape[ax] + ): + raise IndexError if indx.ndim == 0: # The index is a scalar. This used to be two fold, but if # fancy indexing was active, the check was done later, @@ -1045,9 +1061,9 @@ def _get_multi_index(self, arr, indices): # First of all, reshape arr to combine fancy axes into one: orig_shape = arr.shape orig_slice = orig_shape[ax:ax + len(indx[1:])] - arr = arr.reshape((arr.shape[:ax] + arr = arr.reshape(arr.shape[:ax] + (np.prod(orig_slice).astype(int),) - + arr.shape[ax + len(indx[1:]):])) + + arr.shape[ax + len(indx[1:]):]) # Check if broadcasting works res = np.broadcast(*indx[1:]) @@ -1061,7 +1077,7 @@ def _get_multi_index(self, arr, indices): if _indx.size == 0: continue if np.any(_indx >= _size) or np.any(_indx < -_size): - raise IndexError + raise IndexError if len(indx[1:]) == len(orig_slice): if np.prod(orig_slice) == 0: # Work around for a crash or IndexError with 'wrap' @@ -1081,9 +1097,9 @@ def _get_multi_index(self, arr, indices): raise ValueError arr = arr.take(mi.ravel(), axis=ax) try: - arr = arr.reshape((arr.shape[:ax] + arr = arr.reshape(arr.shape[:ax] + mi.shape - + arr.shape[ax+1:])) + + arr.shape[ax + 1:]) except ValueError: # too many dimensions, probably raise IndexError @@ -1148,6 +1164,8 @@ def _compare_index_result(self, arr, index, mimic_get, no_copy): """Compare mimicked result to indexing result. """ arr = arr.copy() + if HAS_REFCOUNT: + startcount = sys.getrefcount(arr) indexed_arr = arr[index] assert_array_equal(indexed_arr, mimic_get) # Check if we got a view, unless its a 0-sized or 0-d array. @@ -1158,9 +1176,9 @@ def _compare_index_result(self, arr, index, mimic_get, no_copy): if HAS_REFCOUNT: if no_copy: # refcount increases by one: - assert_equal(sys.getrefcount(arr), 3) + assert_equal(sys.getrefcount(arr), startcount + 1) else: - assert_equal(sys.getrefcount(arr), 2) + assert_equal(sys.getrefcount(arr), startcount) # Test non-broadcast setitem: b = arr.copy() @@ -1186,16 +1204,23 @@ def test_boolean(self): # it is aligned to the left. This is probably correct for # consistency with arr[boolean_array,] also no broadcasting # is done at all + a = self._create_array() self._check_multi_index( - self.a, (np.zeros_like(self.a, dtype=bool),)) + a, (np.zeros_like(a, dtype=bool),)) self._check_multi_index( - self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],)) + a, (np.zeros_like(a, dtype=bool)[..., 0],)) self._check_multi_index( - self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],)) + a, (np.zeros_like(a, dtype=bool)[None, ...],)) def test_multidim(self): # Automatically test combinations with complex indexes on 2nd (or 1st) # spot and the simple ones in one other spot. + a = self._create_array() + b = np.empty((3, 0, 5, 6)) + complex_indices = self._create_complex_indices() + simple_indices = [Ellipsis, None, -1, [1], np.array([True]), 'skip'] + fill_indices = [slice(None, None), 0] + with warnings.catch_warnings(): # This is so that np.array(True) is not accepted in a full integer # index, when running the file separately. @@ -1206,28 +1231,30 @@ def isskip(idx): return isinstance(idx, str) and idx == "skip" for simple_pos in [0, 2, 3]: - tocheck = [self.fill_indices, self.complex_indices, - self.fill_indices, self.fill_indices] - tocheck[simple_pos] = self.simple_indices + tocheck = [fill_indices, complex_indices, + fill_indices, fill_indices] + tocheck[simple_pos] = simple_indices for index in product(*tocheck): index = tuple(i for i in index if not isskip(i)) - self._check_multi_index(self.a, index) - self._check_multi_index(self.b, index) + self._check_multi_index(a, index) + self._check_multi_index(b, index) # Check very simple item getting: - self._check_multi_index(self.a, (0, 0, 0, 0)) - self._check_multi_index(self.b, (0, 0, 0, 0)) + self._check_multi_index(a, (0, 0, 0, 0)) + self._check_multi_index(b, (0, 0, 0, 0)) # Also check (simple cases of) too many indices: - assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0)) - assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0) - assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0)) - assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0) + assert_raises(IndexError, a.__getitem__, (0, 0, 0, 0, 0)) + assert_raises(IndexError, a.__setitem__, (0, 0, 0, 0, 0), 0) + assert_raises(IndexError, a.__getitem__, (0, 0, [1], 0, 0)) + assert_raises(IndexError, a.__setitem__, (0, 0, [1], 0, 0), 0) def test_1d(self): a = np.arange(10) - for index in self.complex_indices: + complex_indices = self._create_complex_indices() + for index in complex_indices: self._check_single_index(a, index) + class TestFloatNonIntegerArgument: """ These test that ``TypeError`` is raised when you try to use @@ -1242,8 +1269,8 @@ def test_valid_indexing(self): a[np.array([0])] a[[0, 0]] a[:, [0, 0]] - a[:, 0,:] - a[:,:,:] + a[:, 0, :] + a[:, :, :] def test_valid_slicing(self): # These should raise no errors. @@ -1276,7 +1303,7 @@ def mult(a, b): mult([1], np.int_(3)) def test_reduce_axis_float_index(self): - d = np.zeros((3,3,3)) + d = np.zeros((3, 3, 3)) assert_raises(TypeError, np.min, d, 0.5) assert_raises(TypeError, np.min, d, (0.5, 1)) assert_raises(TypeError, np.min, d, (1, 2.2)) @@ -1293,7 +1320,7 @@ def test_bool_as_int_argument_errors(self): # Note that operator.index(np.array(True)) does not work, a boolean # array is thus also deprecated, but not with the same message: assert_raises(TypeError, operator.index, np.array(True)) - assert_warns(DeprecationWarning, operator.index, np.True_) + assert_raises(TypeError, operator.index, np.True_) assert_raises(TypeError, np.take, args=(a, [0], False)) def test_boolean_indexing_weirdness(self): @@ -1309,21 +1336,22 @@ def test_boolean_indexing_fast_path(self): a = np.ones((3, 3)) # This used to incorrectly work (and give an array of shape (0,)) - idx1 = np.array([[False]*9]) + idx1 = np.array([[False] * 9]) assert_raises_regex(IndexError, "boolean index did not match indexed array along axis 0; " "size of axis is 3 but size of corresponding boolean axis is 1", lambda: a[idx1]) - # This used to incorrectly give a ValueError: operands could not be broadcast together - idx2 = np.array([[False]*8 + [True]]) + # This used to incorrectly give a ValueError: operands could not be + # broadcast together + idx2 = np.array([[False] * 8 + [True]]) assert_raises_regex(IndexError, "boolean index did not match indexed array along axis 0; " "size of axis is 3 but size of corresponding boolean axis is 1", lambda: a[idx2]) # This is the same as it used to be. The above two should work like this. - idx3 = np.array([[False]*10]) + idx3 = np.array([[False] * 10]) assert_raises_regex(IndexError, "boolean index did not match indexed array along axis 0; " "size of axis is 3 but size of corresponding boolean axis is 1", @@ -1416,3 +1444,247 @@ def test_setitem(self): a = a.reshape(5, 2) assign(a, 4, 10) assert_array_equal(a[-1], [10, 10]) + + +class TestFlatiterIndexing: + def test_flatiter_indexing_single_integer(self): + a = np.arange(9).reshape((3, 3)) + assert_array_equal(a.flat[0], 0) + assert_array_equal(a.flat[4], 4) + assert_array_equal(a.flat[-1], 8) + + with pytest.raises(IndexError, match="index 9 is out of bounds"): + a.flat[9] + + def test_flatiter_indexing_slice(self): + a = np.arange(9).reshape((3, 3)) + assert_array_equal(a.flat[:], np.arange(9)) + assert_array_equal(a.flat[:5], np.arange(5)) + assert_array_equal(a.flat[5:10], np.arange(5, 9)) + assert_array_equal(a.flat[::2], np.arange(0, 9, 2)) + assert_array_equal(a.flat[::-1], np.arange(8, -1, -1)) + assert_array_equal(a.flat[10:5], np.array([])) + + assert_array_equal(a.flat[()], np.arange(9)) + assert_array_equal(a.flat[...], np.arange(9)) + + def test_flatiter_indexing_boolean(self): + a = np.arange(9).reshape((3, 3)) + + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + assert_array_equal(a.flat[True], 0) + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + assert_array_equal(a.flat[False], np.array([])) + + mask = np.zeros(len(a.flat), dtype=bool) + mask[::2] = True + assert_array_equal(a.flat[mask], np.arange(0, 9, 2)) + + wrong_mask = np.zeros(len(a.flat) + 1, dtype=bool) + with pytest.raises(IndexError, + match="boolean index did not match indexed flat iterator"): + a.flat[wrong_mask] + + def test_flatiter_indexing_fancy(self): + a = np.arange(9).reshape((3, 3)) + + indices = np.array([1, 3, 5]) + assert_array_equal(a.flat[indices], indices) + + assert_array_equal(a.flat[[-1, -2]], np.array([8, 7])) + + indices_2d = np.array([[1, 2], [3, 4]]) + assert_array_equal(a.flat[indices_2d], indices_2d) + + assert_array_equal(a.flat[[True, 1]], np.array([1, 1])) + + assert_array_equal(a.flat[[]], np.array([], dtype=a.dtype)) + + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, True]] + + a = np.arange(3) + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, False, True]] + assert_array_equal(a.flat[np.asarray([True, False, True])], np.array([0, 2])) + + def test_flatiter_indexing_not_supported_newaxis_mutlidimensional_float(self): + a = np.arange(9).reshape((3, 3)) + with pytest.raises(IndexError, + match=r"only integers, slices \(`:`\), " + r"ellipsis \(`\.\.\.`\) and " + r"integer or boolean arrays are valid indices"): + a.flat[None] + + with pytest.raises(IndexError, + match=r"too many indices for flat iterator: flat iterator " + r"is 1-dimensional, but 2 were indexed"): + a.flat[1, 2] + + with pytest.warns(DeprecationWarning, + match="Invalid non-array indices for iterator objects are " + "deprecated"): + assert_array_equal(a.flat[[1.0, 2.0]], np.array([1, 2])) + + def test_flatiter_assign_single_integer(self): + a = np.arange(9).reshape((3, 3)) + + a.flat[0] = 10 + assert_array_equal(a, np.array([[10, 1, 2], [3, 4, 5], [6, 7, 8]])) + + a.flat[4] = 20 + assert_array_equal(a, np.array([[10, 1, 2], [3, 20, 5], [6, 7, 8]])) + + a.flat[-1] = 30 + assert_array_equal(a, np.array([[10, 1, 2], [3, 20, 5], [6, 7, 30]])) + + with pytest.raises(IndexError, match="index 9 is out of bounds"): + a.flat[9] = 40 + + def test_flatiter_indexing_slice_assign(self): + a = np.arange(9).reshape((3, 3)) + a.flat[:] = 10 + assert_array_equal(a, np.full((3, 3), 10)) + + a = np.arange(9).reshape((3, 3)) + a.flat[:5] = 20 + assert_array_equal(a, np.array([[20, 20, 20], [20, 20, 5], [6, 7, 8]])) + + a = np.arange(9).reshape((3, 3)) + a.flat[5:10] = 30 + assert_array_equal(a, np.array([[0, 1, 2], [3, 4, 30], [30, 30, 30]])) + + a = np.arange(9).reshape((3, 3)) + a.flat[::2] = 40 + assert_array_equal(a, np.array([[40, 1, 40], [3, 40, 5], [40, 7, 40]])) + + a = np.arange(9).reshape((3, 3)) + a.flat[::-1] = 50 + assert_array_equal(a, np.full((3, 3), 50)) + + a = np.arange(9).reshape((3, 3)) + a.flat[10:5] = 60 + assert_array_equal(a, np.arange(9).reshape((3, 3))) + + a = np.arange(9).reshape((3, 3)) + with pytest.raises(IndexError, + match="Assigning to a flat iterator with a 0-D index"): + a.flat[()] = 70 + + a = np.arange(9).reshape((3, 3)) + a.flat[...] = 80 + assert_array_equal(a, np.full((3, 3), 80)) + + def test_flatiter_indexing_boolean_assign(self): + a = np.arange(9).reshape((3, 3)) + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + a.flat[True] = 10 + assert_array_equal(a, np.array([[10, 1, 2], [3, 4, 5], [6, 7, 8]])) + + a = np.arange(9).reshape((3, 3)) + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + a.flat[False] = 20 + assert_array_equal(a, np.arange(9).reshape((3, 3))) + + a = np.arange(9).reshape((3, 3)) + mask = np.zeros(len(a.flat), dtype=bool) + mask[::2] = True + a.flat[mask] = 30 + assert_array_equal(a, np.array([[30, 1, 30], [3, 30, 5], [30, 7, 30]])) + + wrong_mask = np.zeros(len(a.flat) + 1, dtype=bool) + with pytest.raises(IndexError, + match="boolean index did not match indexed flat iterator"): + a.flat[wrong_mask] = 40 + + def test_flatiter_indexing_fancy_assign(self): + a = np.arange(9).reshape((3, 3)) + indices = np.array([1, 3, 5]) + a.flat[indices] = 10 + assert_array_equal(a, np.array([[0, 10, 2], [10, 4, 10], [6, 7, 8]])) + + a.flat[[-1, -2]] = 20 + assert_array_equal(a, np.array([[0, 10, 2], [10, 4, 10], [6, 20, 20]])) + + a = np.arange(9).reshape((3, 3)) + indices_2d = np.array([[1, 2], [3, 4]]) + a.flat[indices_2d] = 30 + assert_array_equal(a, np.array([[0, 30, 30], [30, 30, 5], [6, 7, 8]])) + + a.flat[[True, 1]] = 40 + assert_array_equal(a, np.array([[0, 40, 30], [30, 30, 5], [6, 7, 8]])) + + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, True]] = 50 + + a = np.arange(3) + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, False, True]] = 20 + a.flat[np.asarray([True, False, True])] = 20 + assert_array_equal(a, np.array([20, 1, 20])) + + def test_flatiter_indexing_fancy_int16_dtype(self): + a = np.arange(9).reshape((3, 3)) + indices = np.array([1, 3, 5], dtype=np.int16) + assert_array_equal(a.flat[indices], np.array([1, 3, 5])) + + a.flat[indices] = 10 + assert_array_equal(a, np.array([[0, 10, 2], [10, 4, 10], [6, 7, 8]])) + + def test_flatiter_indexing_not_supported_newaxis_mutlid_float_assign(self): + a = np.arange(9).reshape((3, 3)) + with pytest.raises(IndexError, + match=r"only integers, slices \(`:`\), " + r"ellipsis \(`\.\.\.`\) and " + r"integer or boolean arrays are valid indices"): + a.flat[None] = 10 + + a.flat[[1, 2]] = 10 + assert_array_equal(a, np.array([[0, 10, 10], [3, 4, 5], [6, 7, 8]])) + + with pytest.warns(DeprecationWarning, + match="Invalid non-array indices for iterator objects are " + "deprecated"): + a.flat[[1.0, 2.0]] = 20 + assert_array_equal(a, np.array([[0, 20, 20], [3, 4, 5], [6, 7, 8]])) + + def test_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array([0, 5, 6]) + assert_equal(a.flat[b.flat], np.array([0, 5, 6])) + + def test_empty_string_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array([], dtype="S") + # This is arguably incorrect, and should be removed (ideally with + # deprecation). But it matches the array path and comes from not + # distinguishing `arr[np.array([]).flat]` and `arr[[]]` and the latter + # must pass. + assert_equal(a.flat[b.flat], np.array([])) + + def test_nonempty_string_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array(["a"], dtype="S") + with pytest.raises(IndexError, + match=r"only integers, slices \(`:`\), ellipsis \(`\.\.\.`\) " + r"and integer or boolean arrays are valid indices"): + a.flat[b.flat] + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.parametrize("methodname", ["__array__", "copy"]) +def test_flatiter_method_signatures(methodname: str): + method = getattr(np.flatiter, methodname) + assert callable(method) + + try: + sig = inspect.signature(method) + except ValueError as e: + pytest.fail(f"Could not get signature for np.flatiter.{methodname}: {e}") + + assert "self" in sig.parameters + assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY diff --git a/numpy/_core/tests/test_item_selection.py b/numpy/_core/tests/test_item_selection.py index 5660ef583edb..0e08b7cfd8e0 100644 --- a/numpy/_core/tests/test_item_selection.py +++ b/numpy/_core/tests/test_item_selection.py @@ -3,9 +3,7 @@ import pytest import numpy as np -from numpy.testing import ( - assert_, assert_raises, assert_array_equal, HAS_REFCOUNT - ) +from numpy.testing import HAS_REFCOUNT, assert_, assert_array_equal, assert_raises class TestTake: @@ -15,7 +13,7 @@ def test_simple(self): modes = ['raise', 'wrap', 'clip'] indices = [-1, 4] index_arrays = [np.empty(0, dtype=np.intp), - np.empty(tuple(), dtype=np.intp), + np.empty((), dtype=np.intp), np.empty((1, 1), dtype=np.intp)] real_indices = {'raise': {-1: 1, 4: IndexError}, 'wrap': {-1: 1, 4: 0}, @@ -31,8 +29,8 @@ def test_simple(self): tresult = list(ta.T.copy()) for index_array in index_arrays: if index_array.size != 0: - tresult[0].shape = (2,) + index_array.shape - tresult[1].shape = (2,) + index_array.shape + tresult[0] = tresult[0].reshape((2,) + index_array.shape) + tresult[1] = tresult[1].reshape((2,) + index_array.shape) for mode in modes: for index in indices: real_index = real_indices[mode][index] @@ -50,19 +48,23 @@ def test_simple(self): def test_refcounting(self): objects = [object() for i in range(10)] + if HAS_REFCOUNT: + orig_rcs = [sys.getrefcount(o) for o in objects] for mode in ('raise', 'clip', 'wrap'): a = np.array(objects) b = np.array([2, 2, 4, 5, 3, 5]) a.take(b, out=a[:6], mode=mode) del a if HAS_REFCOUNT: - assert_(all(sys.getrefcount(o) == 3 for o in objects)) + assert_(all(sys.getrefcount(o) == rc + 1 + for o, rc in zip(objects, orig_rcs))) # not contiguous, example: a = np.array(objects * 2)[::2] a.take(b, out=a[:6], mode=mode) del a if HAS_REFCOUNT: - assert_(all(sys.getrefcount(o) == 3 for o in objects)) + assert_(all(sys.getrefcount(o) == rc + 1 + for o, rc in zip(objects, orig_rcs))) def test_unicode_mode(self): d = np.arange(10) diff --git a/numpy/_core/tests/test_limited_api.py b/numpy/_core/tests/test_limited_api.py index 9b13208d81af..30ed3023cc92 100644 --- a/numpy/_core/tests/test_limited_api.py +++ b/numpy/_core/tests/test_limited_api.py @@ -1,11 +1,11 @@ import os -import shutil import subprocess import sys import sysconfig + import pytest -from numpy.testing import IS_WASM, IS_PYPY, NOGIL_BUILD, IS_EDITABLE +from numpy.testing import IS_EDITABLE, IS_WASM, NOGIL_BUILD # This import is copied from random.tests.test_extending try: @@ -41,22 +41,36 @@ def install_temp(tmpdir_factory): srcdir = os.path.join(os.path.dirname(__file__), 'examples', 'limited_api') build_dir = tmpdir_factory.mktemp("limited_api") / "build" os.makedirs(build_dir, exist_ok=True) + # Ensure we use the correct Python interpreter even when `meson` is + # installed in a different Python environment (see gh-24956) + native_file = str(build_dir / 'interpreter-native-file.ini') + with open(native_file, 'w') as f: + f.write("[binaries]\n") + f.write(f"python = '{sys.executable}'\n") + f.write(f"python3 = '{sys.executable}'") + try: subprocess.check_call(["meson", "--version"]) except FileNotFoundError: pytest.skip("No usable 'meson' found") + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") if sys.platform == "win32": subprocess.check_call(["meson", "setup", + "--werror", "--buildtype=release", - "--vsenv", str(srcdir)], + "--vsenv", "--native-file", native_file, + str(srcdir)], cwd=build_dir, ) else: - subprocess.check_call(["meson", "setup", str(srcdir)], + subprocess.check_call(["meson", "setup", "--werror", + "--native-file", native_file, str(srcdir)], cwd=build_dir ) try: - subprocess.check_call(["meson", "compile", "-vv"], cwd=build_dir) + subprocess.check_call( + ["meson", "compile", "-vv"], cwd=build_dir) except subprocess.CalledProcessError as p: print(f"{p.stdout=}") print(f"{p.stderr=}") @@ -65,7 +79,6 @@ def install_temp(tmpdir_factory): sys.path.append(str(build_dir)) - @pytest.mark.skipif(IS_WASM, reason="Can't start subprocess") @pytest.mark.xfail( sysconfig.get_config_var("Py_DEBUG"), @@ -78,11 +91,11 @@ def install_temp(tmpdir_factory): NOGIL_BUILD, reason="Py_GIL_DISABLED builds do not currently support the limited API", ) -@pytest.mark.skipif(IS_PYPY, reason="no support for limited API in PyPy") def test_limited_api(install_temp): """Test building a third-party C extension with the limited API and building a cython extension with the limited API """ - import limited_api1 - import limited_api2 + import limited_api1 # Earliest (3.6) # noqa: F401 + import limited_api2 # cython # noqa: F401 + import limited_api_latest # Latest version (current Python) # noqa: F401 diff --git a/numpy/_core/tests/test_longdouble.py b/numpy/_core/tests/test_longdouble.py index a7ad5c9e5791..a7aa9145711a 100644 --- a/numpy/_core/tests/test_longdouble.py +++ b/numpy/_core/tests/test_longdouble.py @@ -1,14 +1,18 @@ -import warnings import platform +import warnings + import pytest import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_warns, assert_array_equal, - temppath, IS_MUSL - ) from numpy._core.tests._locales import CommaDecimalPointLocale - +from numpy.testing import ( + IS_MUSL, + assert_, + assert_array_equal, + assert_equal, + assert_raises, + temppath, +) LD_INFO = np.finfo(np.longdouble) longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps) @@ -40,7 +44,7 @@ def test_scalar_extraction(): def test_str_roundtrip(): # We will only see eps in repr if within printing precision. o = 1 + LD_INFO.eps - assert_equal(np.longdouble(str(o)), o, "str was %s" % str(o)) + assert_equal(np.longdouble(str(o)), o, f"str was {str(o)}") @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") @@ -83,10 +87,10 @@ def test_bogus_string(): @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") def test_fromstring(): o = 1 + LD_INFO.eps - s = (" " + str(o))*5 - a = np.array([o]*5) + s = (" " + str(o)) * 5 + a = np.array([o] * 5) assert_equal(np.fromstring(s, sep=" ", dtype=np.longdouble), a, - err_msg="reading '%s'" % s) + err_msg=f"reading '{s}'") def test_fromstring_complex(): @@ -101,48 +105,39 @@ def test_fromstring_complex(): assert_equal(np.fromstring("1+1j,2-2j, -3+3j, -4e1+4j", sep=",", dtype=ctype), np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j])) # Spaces at wrong places - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1+2 j,3", dtype=ctype, sep=","), - np.array([1.])) - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1+ 2j,3", dtype=ctype, sep=","), - np.array([1.])) - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1 +2j,3", dtype=ctype, sep=","), - np.array([1.])) - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1+j", dtype=ctype, sep=","), - np.array([1.])) - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1+", dtype=ctype, sep=","), - np.array([1.])) - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1j+1", dtype=ctype, sep=","), - np.array([1j])) + with assert_raises(ValueError): + np.fromstring("1+2 j,3", dtype=ctype, sep=",") + with assert_raises(ValueError): + np.fromstring("1+ 2j,3", dtype=ctype, sep=",") + with assert_raises(ValueError): + np.fromstring("1 +2j,3", dtype=ctype, sep=",") + with assert_raises(ValueError): + np.fromstring("1+j", dtype=ctype, sep=",") + with assert_raises(ValueError): + np.fromstring("1+", dtype=ctype, sep=",") + with assert_raises(ValueError): + np.fromstring("1j+1", dtype=ctype, sep=",") def test_fromstring_bogus(): - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "), - np.array([1., 2., 3.])) + with assert_raises(ValueError): + np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" ") def test_fromstring_empty(): - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("xxxxx", sep="x"), - np.array([])) + with assert_raises(ValueError): + np.fromstring("xxxxx", sep="x") def test_fromstring_missing(): - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1xx3x4x5x6", sep="x"), - np.array([1])) + with assert_raises(ValueError): + np.fromstring("1xx3x4x5x6", sep="x") class TestFileBased: ldbl = 1 + LD_INFO.eps - tgt = np.array([ldbl]*5) + tgt = np.array([ldbl] * 5) out = ''.join([str(t) + '\n' for t in tgt]) def test_fromfile_bogus(self): @@ -150,9 +145,8 @@ def test_fromfile_bogus(self): with open(path, 'w') as f: f.write("1. 2. 3. flop 4.\n") - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=float, sep=" ") - assert_equal(res, np.array([1., 2., 3.])) + with assert_raises(ValueError): + np.fromfile(path, dtype=float, sep=" ") def test_fromfile_complex(self): for ctype in ["complex", "cdouble"]: @@ -185,56 +179,48 @@ def test_fromfile_complex(self): with open(path, 'w') as f: f.write("1+2 j,3\n") - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.])) + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") # Spaces at wrong places with temppath() as path: with open(path, 'w') as f: f.write("1+ 2j,3\n") - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.])) + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") # Spaces at wrong places with temppath() as path: with open(path, 'w') as f: f.write("1 +2j,3\n") - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.])) + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") - # Spaces at wrong places + # Wrong sep with temppath() as path: with open(path, 'w') as f: f.write("1+j\n") - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.])) + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") - # Spaces at wrong places + # Wrong sep with temppath() as path: with open(path, 'w') as f: f.write("1+\n") - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.])) + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") - # Spaces at wrong places + # Wrong sep with temppath() as path: with open(path, 'w') as f: f.write("1j+1\n") - with assert_warns(DeprecationWarning): - res = np.fromfile(path, dtype=ctype, sep=",") - assert_equal(res, np.array([1.j])) - - + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") @@ -284,8 +270,7 @@ def test_str_exact(): @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") def test_format(): - o = 1 + LD_INFO.eps - assert_("{0:.40g}".format(o) != '1') + assert_(f"{1 + LD_INFO.eps:.40g}" != '1') @pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376") @@ -293,7 +278,7 @@ def test_format(): reason="Need strtold_l") def test_percent(): o = 1 + LD_INFO.eps - assert_("%.40g" % o != '1') + assert_(f"{o:.40g}" != '1') @pytest.mark.skipif(longdouble_longer_than_double, @@ -306,7 +291,8 @@ def test_array_repr(): b = np.array([1], dtype=np.longdouble) if not np.all(a != b): raise ValueError("precision loss creating arrays") - assert_(repr(a) != repr(b)) + with np.printoptions(precision=LD_INFO.precision + 1): + assert_(repr(a) != repr(b)) # # Locale tests: scalar types formatting should be independent of the locale @@ -323,16 +309,6 @@ def test_fromstring_foreign_repr(self): a = np.fromstring(repr(f), dtype=float, sep=" ") assert_equal(a[0], f) - def test_fromstring_best_effort_float(self): - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1,234", dtype=float, sep=" "), - np.array([1.])) - - def test_fromstring_best_effort(self): - with assert_warns(DeprecationWarning): - assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "), - np.array([1.])) - def test_fromstring_foreign(self): s = "1.234" a = np.fromstring(s, dtype=np.longdouble, sep=" ") @@ -344,9 +320,8 @@ def test_fromstring_foreign_sep(self): assert_array_equal(a, b) def test_fromstring_foreign_value(self): - with assert_warns(DeprecationWarning): - b = np.fromstring("1,234", dtype=np.longdouble, sep=" ") - assert_array_equal(b[0], 1) + with assert_raises(ValueError): + np.fromstring("1,234", dtype=np.longdouble, sep=" ") @pytest.mark.parametrize("int_val", [ diff --git a/numpy/_core/tests/test_machar.py b/numpy/_core/tests/test_machar.py deleted file mode 100644 index c7f677075dca..000000000000 --- a/numpy/_core/tests/test_machar.py +++ /dev/null @@ -1,30 +0,0 @@ -""" -Test machar. Given recent changes to hardcode type data, we might want to get -rid of both MachAr and this test at some point. - -""" -from numpy._core._machar import MachAr -import numpy._core.numerictypes as ntypes -from numpy import errstate, array - - -class TestMachAr: - def _run_machar_highprec(self): - # Instantiate MachAr instance with high enough precision to cause - # underflow - try: - hiprec = ntypes.float96 - MachAr(lambda v: array(v, hiprec)) - except AttributeError: - # Fixme, this needs to raise a 'skip' exception. - "Skipping test: no ntypes.float96 available on this platform." - - def test_underlow(self): - # Regression test for #759: - # instantiating MachAr for dtype = np.float96 raises spurious warning. - with errstate(all='raise'): - try: - self._run_machar_highprec() - except FloatingPointError as e: - msg = "Caught %s exception, should not have been raised." % e - raise AssertionError(msg) diff --git a/numpy/_core/tests/test_mem_overlap.py b/numpy/_core/tests/test_mem_overlap.py index 4ea70c044d51..240ea62850ee 100644 --- a/numpy/_core/tests/test_mem_overlap.py +++ b/numpy/_core/tests/test_mem_overlap.py @@ -1,14 +1,12 @@ import itertools + import pytest import numpy as np -from numpy._core._multiarray_tests import solve_diophantine, internal_overlap from numpy._core import _umath_tests +from numpy._core._multiarray_tests import internal_overlap, solve_diophantine from numpy.lib.stride_tricks import as_strided -from numpy.testing import ( - assert_, assert_raises, assert_equal, assert_array_equal - ) - +from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises ndims = 2 size = 10 @@ -63,7 +61,7 @@ def _check_assignment(srcidx, dstidx): arr[dstidx] = arr[srcidx] assert_(np.all(arr == cpy), - 'assigning arr[%s] = arr[%s]' % (dstidx, srcidx)) + f'assigning arr[{dstidx}] = arr[{srcidx}]') def test_overlapping_assignments(): @@ -72,8 +70,8 @@ def test_overlapping_assignments(): inds = _indices(ndims) for ind in inds: - srcidx = tuple([a[0] for a in ind]) - dstidx = tuple([a[1] for a in ind]) + srcidx = tuple(a[0] for a in ind) + dstidx = tuple(a[1] for a in ind) _check_assignment(srcidx, dstidx) @@ -89,7 +87,7 @@ def test_diophantine_fuzz(): feasible_count = 0 infeasible_count = 0 - min_count = 500//(ndim + 1) + min_count = 500 // (ndim + 1) while min(feasible_count, infeasible_count) < min_count: # Ensure big and small integer problems @@ -97,15 +95,15 @@ def test_diophantine_fuzz(): U_max = rng.randint(0, 11, dtype=np.intp)**6 A_max = min(max_int, A_max) - U_max = min(max_int-1, U_max) + U_max = min(max_int - 1, U_max) - A = tuple(int(rng.randint(1, A_max+1, dtype=np.intp)) + A = tuple(int(rng.randint(1, A_max + 1, dtype=np.intp)) for j in range(ndim)) - U = tuple(int(rng.randint(0, U_max+2, dtype=np.intp)) + U = tuple(int(rng.randint(0, U_max + 2, dtype=np.intp)) for j in range(ndim)) - b_ub = min(max_int-2, sum(a*ub for a, ub in zip(A, U))) - b = int(rng.randint(-1, b_ub+2, dtype=np.intp)) + b_ub = min(max_int - 2, sum(a * ub for a, ub in zip(A, U))) + b = int(rng.randint(-1, b_ub + 2, dtype=np.intp)) if ndim == 0 and feasible_count < min_count: b = 0 @@ -120,7 +118,7 @@ def test_diophantine_fuzz(): # Check no solution exists (provided the problem is # small enough so that brute force checking doesn't # take too long) - ranges = tuple(range(0, a*ub+1, a) for a, ub in zip(A, U)) + ranges = tuple(range(0, a * ub + 1, a) for a, ub in zip(A, U)) size = 1 for r in ranges: @@ -134,7 +132,7 @@ def test_diophantine_fuzz(): assert_(X_simplified is not None, (A, U, b, X_simplified)) # Check validity - assert_(sum(a*x for a, x in zip(A, X)) == b) + assert_(sum(a * x for a, x in zip(A, X)) == b) assert_(all(0 <= x <= ub for x, ub in zip(X, U))) feasible_count += 1 @@ -147,9 +145,9 @@ def test_diophantine_overflow(): if max_int64 <= max_intp: # Check that the algorithm works internally in 128-bit; # solving this problem requires large intermediate numbers - A = (max_int64//2, max_int64//2 - 10) - U = (max_int64//2, max_int64//2 - 10) - b = 2*(max_int64//2) - 10 + A = (max_int64 // 2, max_int64 // 2 - 10) + U = (max_int64 // 2, max_int64 // 2 - 10) + b = 2 * (max_int64 // 2) - 10 assert_equal(solve_diophantine(A, U, b), (1, 1)) @@ -167,14 +165,15 @@ def check_may_share_memory_exact(a, b): err_msg = "" if got != exact: + base_delta = a.__array_interface__['data'][0] - b.__array_interface__['data'][0] err_msg = " " + "\n ".join([ - "base_a - base_b = %r" % (a.__array_interface__['data'][0] - b.__array_interface__['data'][0],), - "shape_a = %r" % (a.shape,), - "shape_b = %r" % (b.shape,), - "strides_a = %r" % (a.strides,), - "strides_b = %r" % (b.strides,), - "size_a = %r" % (a.size,), - "size_b = %r" % (b.size,) + f"base_a - base_b = {base_delta!r}", + f"shape_a = {a.shape!r}", + f"shape_b = {b.shape!r}", + f"strides_a = {a.strides!r}", + f"strides_b = {b.strides!r}", + f"size_a = {a.size!r}", + f"size_b = {b.size!r}" ]) assert_equal(got, exact, err_msg=err_msg) @@ -186,24 +185,24 @@ def test_may_share_memory_manual(): # Base arrays xs0 = [ np.zeros([13, 21, 23, 22], dtype=np.int8), - np.zeros([13, 21, 23*2, 22], dtype=np.int8)[:,:,::2,:] + np.zeros([13, 21, 23 * 2, 22], dtype=np.int8)[:, :, ::2, :] ] # Generate all negative stride combinations xs = [] for x in xs0: - for ss in itertools.product(*(([slice(None), slice(None, None, -1)],)*4)): + for ss in itertools.product(*(([slice(None), slice(None, None, -1)],) * 4)): xp = x[ss] xs.append(xp) for x in xs: # The default is a simple extent check - assert_(np.may_share_memory(x[:,0,:], x[:,1,:])) - assert_(np.may_share_memory(x[:,0,:], x[:,1,:], max_work=None)) + assert_(np.may_share_memory(x[:, 0, :], x[:, 1, :])) + assert_(np.may_share_memory(x[:, 0, :], x[:, 1, :], max_work=None)) # Exact checks - check_may_share_memory_exact(x[:,0,:], x[:,1,:]) - check_may_share_memory_exact(x[:,::7], x[:,3::3]) + check_may_share_memory_exact(x[:, 0, :], x[:, 1, :]) + check_may_share_memory_exact(x[:, ::7], x[:, 3::3]) try: xp = x.ravel() @@ -215,15 +214,15 @@ def test_may_share_memory_manual(): # 0-size arrays cannot overlap check_may_share_memory_exact(x.ravel()[6:6], - xp.reshape(13, 21, 23, 11)[:,::7]) + xp.reshape(13, 21, 23, 11)[:, ::7]) # Test itemsize is dealt with - check_may_share_memory_exact(x[:,::7], + check_may_share_memory_exact(x[:, ::7], xp.reshape(13, 21, 23, 11)) - check_may_share_memory_exact(x[:,::7], - xp.reshape(13, 21, 23, 11)[:,3::3]) + check_may_share_memory_exact(x[:, ::7], + xp.reshape(13, 21, 23, 11)[:, 3::3]) check_may_share_memory_exact(x.ravel()[6:7], - xp.reshape(13, 21, 23, 11)[:,::7]) + xp.reshape(13, 21, 23, 11)[:, ::7]) # Check unit size x = np.zeros([1], dtype=np.int8) @@ -235,21 +234,21 @@ def iter_random_view_pairs(x, same_steps=True, equal_size=False): rng = np.random.RandomState(1234) if equal_size and same_steps: - raise ValueError() + raise ValueError def random_slice(n, step): - start = rng.randint(0, n+1, dtype=np.intp) - stop = rng.randint(start, n+1, dtype=np.intp) + start = rng.randint(0, n + 1, dtype=np.intp) + stop = rng.randint(start, n + 1, dtype=np.intp) if rng.randint(0, 2, dtype=np.intp) == 0: stop, start = start, stop step *= -1 return slice(start, stop, step) def random_slice_fixed_size(n, step, size): - start = rng.randint(0, n+1 - size*step) - stop = start + (size-1)*step + 1 + start = rng.randint(0, n + 1 - size * step) + stop = start + (size - 1) * step + 1 if rng.randint(0, 2) == 0: - stop, start = start-1, stop-1 + stop, start = start - 1, stop - 1 if stop < 0: stop = None step *= -1 @@ -259,7 +258,7 @@ def random_slice_fixed_size(n, step, size): yield x, x for j in range(1, 7, 3): yield x[j:], x[:-j] - yield x[...,j:], x[...,:-j] + yield x[..., j:], x[..., :-j] # An array with zero stride internal overlap strides = list(x.strides) @@ -298,7 +297,7 @@ def random_slice_fixed_size(n, step, size): if a.size == 0: continue - steps2 = tuple(rng.randint(1, max(2, p//(1+pa))) + steps2 = tuple(rng.randint(1, max(2, p // (1 + pa))) if rng.randint(0, 5) == 0 else 1 for p, s, pa in zip(x.shape, s1, a.shape)) s2 = tuple(random_slice_fixed_size(p, s, pa) @@ -322,7 +321,7 @@ def random_slice_fixed_size(n, step, size): def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count): # Check that overlap problems with common strides are solved with # little work. - x = np.zeros([17,34,71,97], dtype=np.int16) + x = np.zeros([17, 34, 71, 97], dtype=np.int16) feasible = 0 infeasible = 0 @@ -370,7 +369,7 @@ def test_may_share_memory_harder_fuzz(): # also exist but not be detected here, as the set of problems # comes from RNG. - check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: max(a.size, b.size)//2, + check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: max(a.size, b.size) // 2, same_steps=False, min_count=2000) @@ -381,8 +380,8 @@ def test_shares_memory_api(): assert_equal(np.shares_memory(x, x), True) assert_equal(np.shares_memory(x, x.copy()), False) - a = x[:,::2,::3] - b = x[:,::3,::2] + a = x[:, ::2, ::3] + b = x[:, ::3, ::2] assert_equal(np.shares_memory(a, b), True) assert_equal(np.shares_memory(a, b, max_work=None), True) assert_raises( @@ -404,9 +403,11 @@ def check(A, U, exists=None): exists = (X is not None) if X is not None: - assert_(sum(a*x for a, x in zip(A, X)) == sum(a*u//2 for a, u in zip(A, U))) + sum_ax = sum(a * x for a, x in zip(A, X)) + sum_au_half = sum(a * u // 2 for a, u in zip(A, U)) + assert_(sum_ax == sum_au_half) assert_(all(0 <= x <= u for x, u in zip(X, U))) - assert_(any(x != u//2 for x, u in zip(X, U))) + assert_(any(x != u // 2 for x, u in zip(X, U))) if exists: assert_(X is not None, repr(X)) @@ -414,20 +415,20 @@ def check(A, U, exists=None): assert_(X is None, repr(X)) # Smoke tests - check((3, 2), (2*2, 3*2), exists=True) - check((3*2, 2), (15*2, (3-1)*2), exists=False) + check((3, 2), (2 * 2, 3 * 2), exists=True) + check((3 * 2, 2), (15 * 2, (3 - 1) * 2), exists=False) def test_internal_overlap_slices(): # Slicing an array never generates internal overlap - x = np.zeros([17,34,71,97], dtype=np.int16) + x = np.zeros([17, 34, 71, 97], dtype=np.int16) rng = np.random.RandomState(1234) def random_slice(n, step): - start = rng.randint(0, n+1, dtype=np.intp) - stop = rng.randint(start, n+1, dtype=np.intp) + start = rng.randint(0, n + 1, dtype=np.intp) + stop = rng.randint(start, n + 1, dtype=np.intp) if rng.randint(0, 2, dtype=np.intp) == 0: stop, start = start, stop step *= -1 @@ -456,7 +457,7 @@ def check_internal_overlap(a, manual_expected=None): m = set() ranges = tuple(range(n) for n in a.shape) for v in itertools.product(*ranges): - offset = sum(s*w for s, w in zip(a.strides, v)) + offset = sum(s * w for s, w in zip(a.strides, v)) if offset in m: expected = True break @@ -482,8 +483,8 @@ def test_internal_overlap_manual(): # Check low-dimensional special cases - check_internal_overlap(x, False) # 1-dim - check_internal_overlap(x.reshape([]), False) # 0-dim + check_internal_overlap(x, False) # 1-dim + check_internal_overlap(x.reshape([]), False) # 0-dim a = as_strided(x, strides=(3, 4), shape=(4, 4)) check_internal_overlap(a, False) @@ -640,19 +641,18 @@ def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16, sl = [slice(None)] * ndim if axis is None: if outsize is None: - sl = [slice(0, 1)] + [0]*(ndim - 1) + sl = [slice(0, 1)] + [0] * (ndim - 1) else: - sl = [slice(0, outsize)] + [0]*(ndim - 1) - else: - if outsize is None: - k = b.shape[axis]//2 - if ndim == 1: - sl[axis] = slice(k, k + 1) - else: - sl[axis] = k + sl = [slice(0, outsize)] + [0] * (ndim - 1) + elif outsize is None: + k = b.shape[axis] // 2 + if ndim == 1: + sl[axis] = slice(k, k + 1) else: - assert b.shape[axis] >= outsize - sl[axis] = slice(0, outsize) + sl[axis] = k + else: + assert b.shape[axis] >= outsize + sl[axis] = slice(0, outsize) b_out = b[tuple(sl)] if scalarize: @@ -706,7 +706,7 @@ def get_out_axis_size(a, b, axis): def do_reduceat(a, out, axis): if axis is None: size = len(a) - step = size//len(out) + step = size // len(out) else: size = a.shape[axis] step = a.shape[axis] // out.shape[axis] @@ -753,19 +753,19 @@ def test_unary_gufunc_fuzz(self): # Ensure the shapes are so that euclidean_pdist is happy if b.shape[-1] > b.shape[-2]: - b = b[...,0,:] + b = b[..., 0, :] else: - b = b[...,:,0] + b = b[..., :, 0] n = a.shape[-2] p = n * (n - 1) // 2 if p <= b.shape[-1] and p > 0: - b = b[...,:p] + b = b[..., :p] else: - n = max(2, int(np.sqrt(b.shape[-1]))//2) + n = max(2, int(np.sqrt(b.shape[-1])) // 2) p = n * (n - 1) // 2 - a = a[...,:n,:] - b = b[...,:p] + a = a[..., :n, :] + b = b[..., :p] # Call if np.shares_memory(a, b): @@ -843,17 +843,17 @@ def check(a, b): k = 10 indices = [ np.index_exp[:n], - np.index_exp[k:k+n], - np.index_exp[n-1::-1], - np.index_exp[k+n-1:k-1:-1], - np.index_exp[:2*n:2], - np.index_exp[k:k+2*n:2], - np.index_exp[2*n-1::-2], - np.index_exp[k+2*n-1:k-1:-2], + np.index_exp[k:k + n], + np.index_exp[n - 1::-1], + np.index_exp[k + n - 1:k - 1:-1], + np.index_exp[:2 * n:2], + np.index_exp[k:k + 2 * n:2], + np.index_exp[2 * n - 1::-2], + np.index_exp[k + 2 * n - 1:k - 1:-2], ] for xi, yi in itertools.product(indices, indices): - v = np.arange(1, 1 + n*2 + k, dtype=dtype) + v = np.arange(1, 1 + n * 2 + k, dtype=dtype) x = v[xi] y = v[yi] @@ -901,14 +901,14 @@ def check(a, b, c): indices = [] for p in [1, 2]: indices.extend([ - np.index_exp[:p*n:p], - np.index_exp[k:k+p*n:p], - np.index_exp[p*n-1::-p], - np.index_exp[k+p*n-1:k-1:-p], + np.index_exp[:p * n:p], + np.index_exp[k:k + p * n:p], + np.index_exp[p * n - 1::-p], + np.index_exp[k + p * n - 1:k - 1:-p], ]) for x, y, z in itertools.product(indices, indices, indices): - v = np.arange(6*n).astype(dtype) + v = np.arange(6 * n).astype(dtype) x = v[x] y = v[y] z = v[z] diff --git a/numpy/_core/tests/test_mem_policy.py b/numpy/_core/tests/test_mem_policy.py index 9540d17d03cb..720ea1aa91b8 100644 --- a/numpy/_core/tests/test_mem_policy.py +++ b/numpy/_core/tests/test_mem_policy.py @@ -2,19 +2,14 @@ import gc import os import sys +import sysconfig import threading -import warnings import pytest import numpy as np -from numpy.testing import extbuild, assert_warns, IS_WASM, IS_EDITABLE from numpy._core.multiarray import get_handler_name - - -# FIXME: numpy.testing.extbuild uses `numpy.distutils`, so this won't work on -# Python 3.12 and up. It's an internal test utility, so for now we just skip -# these tests. +from numpy.testing import IS_EDITABLE, IS_WASM, extbuild @pytest.fixture @@ -226,6 +221,8 @@ def get_module(tmp_path): except ImportError: pass # if it does not exist, build and load it + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") return extbuild.build_and_import_extension('mem_policy', functions, prologue=prologue, @@ -234,7 +231,6 @@ def get_module(tmp_path): more_init=more_init) -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") def test_set_policy(get_module): get_handler_name = np._core.multiarray.get_handler_name @@ -267,7 +263,6 @@ def test_set_policy(get_module): get_module.set_wrong_capsule_name_data_policy() -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") def test_default_policy_singleton(get_module): get_handler_name = np._core.multiarray.get_handler_name @@ -289,7 +284,6 @@ def test_default_policy_singleton(get_module): assert def_policy_1 is def_policy_2 is get_module.get_default_policy() -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") def test_policy_propagation(get_module): # The memory policy goes hand-in-hand with flags.owndata @@ -348,11 +342,7 @@ async def async_test_context_locality(get_module): assert np._core.multiarray.get_handler_name() == orig_policy_name -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") def test_context_locality(get_module): - if (sys.implementation.name == 'pypy' - and sys.pypy_version_info[:3] < (7, 3, 6)): - pytest.skip('no context-locality support in PyPy < 7.3.6') asyncio.run(async_test_context_locality(get_module)) @@ -370,7 +360,6 @@ def concurrent_thread2(get_module, event): get_module.set_secret_data_policy() -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") def test_thread_locality(get_module): orig_policy_name = np._core.multiarray.get_handler_name() @@ -389,7 +378,6 @@ def test_thread_locality(get_module): assert np._core.multiarray.get_handler_name() == orig_policy_name -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") @pytest.mark.skip(reason="too slow, see gh-23975") def test_new_policy(get_module): a = np.arange(10) @@ -420,11 +408,8 @@ def test_new_policy(get_module): assert np._core.multiarray.get_handler_name(c) == orig_policy_name -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") -@pytest.mark.xfail(sys.implementation.name == "pypy", - reason=("bad interaction between getenv and " - "os.environ inside pytest")) @pytest.mark.parametrize("policy", ["0", "1", None]) +@pytest.mark.thread_unsafe(reason="modifies environment variables") def test_switch_owner(get_module, policy): a = get_module.get_array() assert np._core.multiarray.get_handler_name(a) is None @@ -442,7 +427,7 @@ def test_switch_owner(get_module, policy): # The policy should be NULL, so we have to assume we can call # "free". A warning is given if the policy == "1" if policy: - with assert_warns(RuntimeWarning) as w: + with pytest.warns(RuntimeWarning) as w: del a gc.collect() else: @@ -454,7 +439,6 @@ def test_switch_owner(get_module, policy): np._core._multiarray_umath._set_numpy_warn_if_no_mem_policy(oldval) -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") def test_owner_is_base(get_module): a = get_module.get_array_with_base() with pytest.warns(UserWarning, match='warn_on_free'): diff --git a/numpy/_core/tests/test_memmap.py b/numpy/_core/tests/test_memmap.py index 9603e8316e1d..8df78da067eb 100644 --- a/numpy/_core/tests/test_memmap.py +++ b/numpy/_core/tests/test_memmap.py @@ -1,33 +1,40 @@ -import sys -import os import mmap -import pytest +import os +import sys +import warnings from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryFile -from numpy import ( - memmap, sum, average, prod, ndarray, isscalar, add, subtract, multiply) - -from numpy import arange, allclose, asarray -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, suppress_warnings, IS_PYPY, - break_cycles - ) +import pytest +from numpy import ( + add, + allclose, + arange, + asarray, + average, + isscalar, + memmap, + multiply, + ndarray, + prod, + subtract, + sum, +) +from numpy.testing import assert_, assert_array_equal, assert_equal + + +@pytest.mark.thread_unsafe(reason="setup & memmap is thread-unsafe (gh-29126)") class TestMemmap: def setup_method(self): self.tmpfp = NamedTemporaryFile(prefix='mmap') self.shape = (3, 4) self.dtype = 'float32' - self.data = arange(12, dtype=self.dtype) - self.data.resize(self.shape) + self.data = arange(12, dtype=self.dtype).reshape(self.shape) def teardown_method(self): self.tmpfp.close() self.data = None - if IS_PYPY: - break_cycles() - break_cycles() def test_roundtrip(self): # Write data to file @@ -151,8 +158,9 @@ def test_ufunc_return_ndarray(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) fp[:] = self.data - with suppress_warnings() as sup: - sup.filter(FutureWarning, "np.average currently does not preserve") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "np.average currently does not preserve", FutureWarning) for unary_op in [sum, average, prod]: result = unary_op(fp) assert_(isscalar(result)) @@ -167,9 +175,9 @@ def test_ufunc_return_ndarray(self): assert_(binary_op(fp, fp).__class__ is ndarray) fp += 1 - assert(fp.__class__ is memmap) + assert fp.__class__ is memmap add(fp, 1, out=fp) - assert(fp.__class__ is memmap) + assert fp.__class__ is memmap def test_getitem(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) @@ -187,11 +195,11 @@ class MemmapSubClass(memmap): fp[:] = self.data # We keep previous behavior for subclasses of memmap, i.e. the - # ufunc and __getitem__ output is never turned into a ndarray + # ufunc and __getitem__ output is never turned into an ndarray assert_(sum(fp, axis=0).__class__ is MemmapSubClass) assert_(sum(fp).__class__ is MemmapSubClass) assert_(fp[1:, :-1].__class__ is MemmapSubClass) - assert(fp[[0, 1]].__class__ is MemmapSubClass) + assert fp[[0, 1]].__class__ is MemmapSubClass def test_mmap_offset_greater_than_allocation_granularity(self): size = 5 * mmap.ALLOCATIONGRANULARITY @@ -199,23 +207,80 @@ def test_mmap_offset_greater_than_allocation_granularity(self): fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset) assert_(fp.offset == offset) + def test_empty_array_with_offset_multiple_of_allocation_granularity(self): + self.tmpfp.write(b'a' * mmap.ALLOCATIONGRANULARITY) + size = 0 + offset = mmap.ALLOCATIONGRANULARITY + fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset) + assert_equal(fp.offset, offset) + def test_no_shape(self): - self.tmpfp.write(b'a'*16) + self.tmpfp.write(b'a' * 16) mm = memmap(self.tmpfp, dtype='float64') assert_equal(mm.shape, (2,)) def test_empty_array(self): # gh-12653 with pytest.raises(ValueError, match='empty file'): - memmap(self.tmpfp, shape=(0,4), mode='w+') + memmap(self.tmpfp, shape=(0, 4), mode='r') - self.tmpfp.write(b'\0') + # gh-27723 + # empty memmap works with mode in ('w+','r+') + memmap(self.tmpfp, shape=(0, 4), mode='w+') # ok now the file is not empty - memmap(self.tmpfp, shape=(0,4), mode='w+') - + memmap(self.tmpfp, shape=(0, 4), mode='w+') + def test_shape_type(self): memmap(self.tmpfp, shape=3, mode='w+') memmap(self.tmpfp, shape=self.shape, mode='w+') memmap(self.tmpfp, shape=list(self.shape), mode='w+') memmap(self.tmpfp, shape=asarray(self.shape), mode='w+') + + +class TestPatternMatching: + """Tests for structural pattern matching support (PEP 634).""" + + def test_match_sequence_pattern_1d(self): + with NamedTemporaryFile() as f: + arr = memmap(f, dtype='int64', mode='w+', shape=(3,)) + arr[:] = [1, 2, 3] + match arr: + case [a, b, c]: + assert a == 1 + assert b == 2 + assert c == 3 + case _: + raise AssertionError("1D memmap did not match sequence pattern") + + def test_match_sequence_pattern_2d(self): + with NamedTemporaryFile() as f: + arr = memmap(f, dtype='int64', mode='w+', shape=(2, 2)) + arr[:] = [[1, 2], [3, 4]] + match arr: + case [row1, row2]: + assert_array_equal(row1, [1, 2]) + assert_array_equal(row2, [3, 4]) + case _: + raise AssertionError("2D memmap did not match sequence pattern") + + def test_match_sequence_pattern_3d(self): + with NamedTemporaryFile() as f: + arr = memmap(f, dtype='int64', mode='w+', shape=(2, 2, 2)) + arr[:] = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] + # outer matching + match arr: + case [plane1, plane2]: + assert_array_equal(plane1, [[1, 2], [3, 4]]) + assert_array_equal(plane2, [[5, 6], [7, 8]]) + case _: + raise AssertionError("3D memmap did not match sequence pattern") + # inner matching + match arr: + case [[row1, row2], [row3, row4]]: + assert_array_equal(row1, [1, 2]) + assert_array_equal(row2, [3, 4]) + assert_array_equal(row3, [5, 6]) + assert_array_equal(row4, [7, 8]) + case _: + raise AssertionError("3D memmap did not match sequence pattern") diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 6923accbab66..1b76c7100b7f 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -1,44 +1,63 @@ -from __future__ import annotations - +import builtins import collections.abc -import tempfile -import sys -import warnings -import operator +import ctypes +import functools +import gc +import importlib +import inspect import io import itertools -import functools -import ctypes +import mmap +import operator import os -import gc +import pathlib +import pickle import re +import sys +import tempfile +import warnings import weakref -import pytest from contextlib import contextmanager -import pickle -import pathlib -import builtins + +# Need to test an object that does not fully implement math interface +from datetime import datetime, timedelta from decimal import Decimal -import mmap + +import pytest import numpy as np import numpy._core._multiarray_tests as _multiarray_tests from numpy._core._rational_tests import rational -from numpy.exceptions import AxisError, ComplexWarning -from numpy.testing import ( - assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal, - assert_array_equal, assert_raises_regex, assert_array_almost_equal, - assert_allclose, IS_PYPY, IS_WASM, IS_PYSTON, HAS_REFCOUNT, - assert_array_less, runstring, temppath, suppress_warnings, break_cycles, - _SUPPORTS_SVE, assert_array_compare, - ) -from numpy.testing._private.utils import requires_memory, _no_tracing +from numpy._core.multiarray import _get_ndarray_c_version, dot from numpy._core.tests._locales import CommaDecimalPointLocale +from numpy.exceptions import AxisError, ComplexWarning +from numpy.lib import stride_tricks from numpy.lib.recfunctions import repack_fields -from numpy._core.multiarray import _get_ndarray_c_version, dot - -# Need to test an object that does not fully implement math interface -from datetime import timedelta, datetime +from numpy.testing import ( + BLAS_SUPPORTS_FPE, + HAS_REFCOUNT, + IS_64BIT, + IS_WASM, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_compare, + assert_array_equal, + assert_array_less, + assert_equal, + assert_raises, + assert_raises_regex, + break_cycles, + check_support_sve, + runstring, + temppath, +) +from numpy.testing._private.utils import ( + _no_tracing, + requires_deep_recursion, + requires_memory, +) def assert_arg_sorted(arr, arg): @@ -72,34 +91,32 @@ def _aligned_zeros(shape, dtype=float, order="C", align=None): if not hasattr(shape, '__len__'): shape = (shape,) size = functools.reduce(operator.mul, shape) * dtype.itemsize - buf = np.empty(size + 2*align + 1, np.uint8) + buf = np.empty(size + 2 * align + 1, np.uint8) ptr = buf.__array_interface__['data'][0] offset = ptr % align if offset != 0: offset = align - offset - if (ptr % (2*align)) == 0: + if (ptr % (2 * align)) == 0: offset += align # Note: slices producing 0-size arrays do not necessarily change # data pointer --- so we use and allocate size+1 - buf = buf[offset:offset+size+1][:-1] + buf = buf[offset:offset + size + 1][:-1] buf.fill(0) data = np.ndarray(shape, dtype, buf, order=order) return data class TestFlags: - def setup_method(self): - self.a = np.arange(10) - def test_writeable(self): + arr = np.arange(10) mydict = locals() - self.a.flags.writeable = False - assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict) - self.a.flags.writeable = True - self.a[0] = 5 - self.a[0] = 0 + arr.flags.writeable = False + assert_raises(ValueError, runstring, 'arr[0] = 3', mydict) + arr.flags.writeable = True + arr[0] = 5 + arr[0] = 0 def test_writeable_any_base(self): # Ensure that any base being writeable is sufficient to change flag; @@ -140,7 +157,7 @@ def test_writeable_from_readonly(self): data = b'\x00' * 100 vals = np.frombuffer(data, 'B') assert_raises(ValueError, vals.setflags, write=True) - types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] ) + types = np.dtype([('vals', 'u1'), ('res3', 'S4')]) values = np._core.records.fromstring(data, types) vals = values['vals'] assert_raises(ValueError, vals.setflags, write=True) @@ -153,7 +170,7 @@ def test_writeable_from_buffer(self): assert_(vals.flags.writeable is False) vals.setflags(write=True) assert_(vals.flags.writeable) - types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] ) + types = np.dtype([('vals', 'u1'), ('res3', 'S4')]) values = np._core.records.fromstring(data, types) vals = values['vals'] assert_(vals.flags.writeable) @@ -162,7 +179,6 @@ def test_writeable_from_buffer(self): vals.setflags(write=True) assert_(vals.flags.writeable) - @pytest.mark.skipif(IS_PYPY, reason="PyPy always copies") def test_writeable_pickle(self): import pickle # Small arrays will be copied without setting base. @@ -206,12 +222,7 @@ def test_writeable_from_c_data(self): with assert_raises(ValueError): view.flags.writeable = True - with warnings.catch_warnings(): - warnings.simplefilter("error", DeprecationWarning) - with assert_raises(DeprecationWarning): - arr.flags.writeable = True - - with assert_warns(DeprecationWarning): + with assert_raises(ValueError): arr.flags.writeable = True def test_warnonwrite(self): @@ -234,7 +245,7 @@ def test_readonly_flag_protocols(self, flag, flag_value, writeable): a = np.arange(10) setattr(a.flags, flag, flag_value) - class MyArr(): + class MyArr: __array_struct__ = a.__array_struct__ assert memoryview(a).readonly is not writeable @@ -242,18 +253,19 @@ class MyArr(): assert np.asarray(MyArr()).flags.writeable is writeable def test_otherflags(self): - assert_equal(self.a.flags.carray, True) - assert_equal(self.a.flags['C'], True) - assert_equal(self.a.flags.farray, False) - assert_equal(self.a.flags.behaved, True) - assert_equal(self.a.flags.fnc, False) - assert_equal(self.a.flags.forc, True) - assert_equal(self.a.flags.owndata, True) - assert_equal(self.a.flags.writeable, True) - assert_equal(self.a.flags.aligned, True) - assert_equal(self.a.flags.writebackifcopy, False) - assert_equal(self.a.flags['X'], False) - assert_equal(self.a.flags['WRITEBACKIFCOPY'], False) + arr = np.arange(10) + assert_equal(arr.flags.carray, True) + assert_equal(arr.flags['C'], True) + assert_equal(arr.flags.farray, False) + assert_equal(arr.flags.behaved, True) + assert_equal(arr.flags.fnc, False) + assert_equal(arr.flags.forc, True) + assert_equal(arr.flags.owndata, True) + assert_equal(arr.flags.writeable, True) + assert_equal(arr.flags.aligned, True) + assert_equal(arr.flags.writebackifcopy, False) + assert_equal(arr.flags['X'], False) + assert_equal(arr.flags['WRITEBACKIFCOPY'], False) def test_string_align(self): a = np.zeros(4, dtype=np.dtype('|S4')) @@ -266,6 +278,17 @@ def test_void_align(self): a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")])) assert_(a.flags.aligned) + @pytest.mark.parametrize("row_size", [5, 1 << 16]) + @pytest.mark.parametrize("row_count", [1, 5]) + @pytest.mark.parametrize("ndmin", [0, 1, 2]) + def test_xcontiguous_load_txt(self, row_size, row_count, ndmin): + s = io.StringIO('\n'.join(['1.0 ' * row_size] * row_count)) + a = np.loadtxt(s, ndmin=ndmin) + + assert a.flags.c_contiguous + x = [i for i in a.shape if i != 1] + assert a.flags.f_contiguous == (len(x) <= 1) + class TestHash: # see #3793 @@ -290,41 +313,48 @@ def test_int(self): class TestAttributes: - def setup_method(self): - self.one = np.arange(10) - self.two = np.arange(20).reshape(4, 5) - self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6) + def _create_arrays(self): + one = np.arange(10) + two = np.arange(20).reshape(4, 5) + three = np.arange(60, dtype=np.float64).reshape(2, 5, 6) + return one, two, three def test_attributes(self): - assert_equal(self.one.shape, (10,)) - assert_equal(self.two.shape, (4, 5)) - assert_equal(self.three.shape, (2, 5, 6)) - self.three.shape = (10, 3, 2) - assert_equal(self.three.shape, (10, 3, 2)) - self.three.shape = (2, 5, 6) - assert_equal(self.one.strides, (self.one.itemsize,)) - num = self.two.itemsize - assert_equal(self.two.strides, (5*num, num)) - num = self.three.itemsize - assert_equal(self.three.strides, (30*num, 6*num, num)) - assert_equal(self.one.ndim, 1) - assert_equal(self.two.ndim, 2) - assert_equal(self.three.ndim, 3) - num = self.two.itemsize - assert_equal(self.two.size, 20) - assert_equal(self.two.nbytes, 20*num) - assert_equal(self.two.itemsize, self.two.dtype.itemsize) - assert_equal(self.two.base, np.arange(20)) + one, two, three = self._create_arrays() + assert_equal(one.shape, (10,)) + assert_equal(two.shape, (4, 5)) + assert_equal(three.shape, (2, 5, 6)) + with warnings.catch_warnings(): # gh-28901 + warnings.filterwarnings('ignore', category=DeprecationWarning) + three.shape = (10, 3, 2) + assert_equal(three.shape, (10, 3, 2)) + with warnings.catch_warnings(): # gh-28901 + warnings.filterwarnings('ignore', category=DeprecationWarning) + three.shape = (2, 5, 6) + assert_equal(one.strides, (one.itemsize,)) + num = two.itemsize + assert_equal(two.strides, (5 * num, num)) + num = three.itemsize + assert_equal(three.strides, (30 * num, 6 * num, num)) + assert_equal(one.ndim, 1) + assert_equal(two.ndim, 2) + assert_equal(three.ndim, 3) + num = two.itemsize + assert_equal(two.size, 20) + assert_equal(two.nbytes, 20 * num) + assert_equal(two.itemsize, two.dtype.itemsize) + assert_equal(two.base, np.arange(20)) def test_dtypeattr(self): - assert_equal(self.one.dtype, np.dtype(np.int_)) - assert_equal(self.three.dtype, np.dtype(np.float64)) - assert_equal(self.one.dtype.char, np.dtype(int).char) - assert self.one.dtype.char in "lq" - assert_equal(self.three.dtype.char, 'd') - assert_(self.three.dtype.str[0] in '<>') - assert_equal(self.one.dtype.str[1], 'i') - assert_equal(self.three.dtype.str[1], 'f') + one, _, three = self._create_arrays() + assert_equal(one.dtype, np.dtype(np.int_)) + assert_equal(three.dtype, np.dtype(np.float64)) + assert_equal(one.dtype.char, np.dtype(int).char) + assert one.dtype.char in "lq" + assert_equal(three.dtype.char, 'd') + assert_(three.dtype.str[0] in '<>') + assert_equal(one.dtype.str[1], 'i') + assert_equal(three.dtype.str[1], 'f') def test_int_subclassing(self): # Regression test for https://github.com/numpy/numpy/pull/3526 @@ -335,32 +365,33 @@ def test_int_subclassing(self): assert_(not isinstance(numpy_int, int)) def test_stridesattr(self): - x = self.one + x, _, _ = self._create_arrays() def make_array(size, offset, strides): return np.ndarray(size, buffer=x, dtype=int, - offset=offset*x.itemsize, - strides=strides*x.itemsize) + offset=offset * x.itemsize, + strides=strides * x.itemsize) assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) assert_raises(ValueError, make_array, 4, 4, -2) assert_raises(ValueError, make_array, 4, 2, -1) assert_raises(ValueError, make_array, 8, 3, 1) - assert_equal(make_array(8, 3, 0), np.array([3]*8)) + assert_equal(make_array(8, 3, 0), np.array([3] * 8)) # Check behavior reported in gh-2503: assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3])) make_array(0, 0, 10) def test_set_stridesattr(self): - x = self.one + x, _, _ = self._create_arrays() def make_array(size, offset, strides): try: r = np.ndarray([size], dtype=int, buffer=x, - offset=offset*x.itemsize) + offset=offset * x.itemsize) except Exception as e: raise RuntimeError(e) - r.strides = strides = strides*x.itemsize + with pytest.warns(DeprecationWarning): + r.strides = strides * x.itemsize return r assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) @@ -370,24 +401,28 @@ def make_array(size, offset, strides): assert_raises(RuntimeError, make_array, 8, 3, 1) # Check that the true extent of the array is used. # Test relies on as_strided base not exposing a buffer. - x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0)) + x = stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0)) def set_strides(arr, strides): - arr.strides = strides + with pytest.warns(DeprecationWarning): + arr.strides = strides - assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize)) + assert_raises(ValueError, set_strides, x, (10 * x.itemsize, x.itemsize)) # Test for offset calculations: - x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], + x = stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], shape=(10,), strides=(-1,)) assert_raises(ValueError, set_strides, x[::-1], -1) a = x[::-1] - a.strides = 1 - a[::2].strides = 2 + with pytest.warns(DeprecationWarning): + a.strides = 1 + with pytest.warns(DeprecationWarning): + a[::2].strides = 2 # test 0d arr_0d = np.array(0) - arr_0d.strides = () + with pytest.warns(DeprecationWarning): + arr_0d.strides = () assert_raises(TypeError, set_strides, arr_0d, None) def test_fill(self): @@ -425,6 +460,18 @@ def test_fill_readonly(self): with pytest.raises(ValueError, match=".*read-only"): a.fill(0) + def test_fill_subarrays(self): + # NOTE: + # This is also a regression test for a crash with PYTHONMALLOC=debug + + dtype = np.dtype("2= 3 + + arg0 = "object" if func is np.array else "a" + assert arg0 in sig.parameters + assert sig.parameters[arg0].default is inspect.Parameter.empty + assert sig.parameters[arg0].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + + assert "dtype" in sig.parameters + assert sig.parameters["dtype"].default is None + assert sig.parameters["dtype"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + + assert "like" in sig.parameters + assert sig.parameters["like"].default is None + assert sig.parameters["like"].kind is inspect.Parameter.KEYWORD_ONLY + class TestAssignment: def test_assignment_broadcasting(self): @@ -594,16 +666,16 @@ def assign(v): ) def test_unicode_assignment(self): # gh-5049 - from numpy._core.arrayprint import set_string_function + from numpy._core.arrayprint import set_printoptions @contextmanager def inject_str(s): """ replace ndarray.__str__ temporarily """ - set_string_function(lambda x: s, repr=False) + set_printoptions(formatter={"all": lambda x: s}) try: yield finally: - set_string_function(None, repr=False) + set_printoptions() a1d = np.array(['test']) a0d = np.array('done') @@ -620,7 +692,7 @@ def test_stringlike_empty_list(self): b = np.array([b'done']) class bad_sequence: - def __getitem__(self): pass + def __getitem__(self, _, /): pass def __len__(self): raise RuntimeError assert_raises(ValueError, operator.setitem, u, 0, []) @@ -663,8 +735,9 @@ def test_longdouble_assignment(self): def test_cast_to_string(self): # cast to str should do "str(scalar)", not "str(scalar.item())" - # Example: In python2, str(float) is truncated, so we want to avoid - # str(np.float64(...).item()) as this would incorrectly truncate. + # When converting a float to a string via array assignment, we + # want to ensure that the conversion uses str(scalar) to preserve + # the expected precision. a = np.zeros(1, dtype='S20') a[:] = np.array(['1.12345678901234567890'], dtype='f8') assert_equal(a[0], b"1.1234567890123457") @@ -693,46 +766,46 @@ def test_structured_non_void(self): class TestZeroRank: - def setup_method(self): - self.d = np.array(0), np.array('x', object) + def _create_arrays(self): + return np.array(0), np.array('x', object) def test_ellipsis_subscript(self): - a, b = self.d + a, b = self._create_arrays() assert_equal(a[...], 0) assert_equal(b[...], 'x') assert_(a[...].base is a) # `a[...] is a` in numpy <1.9. assert_(b[...].base is b) # `b[...] is b` in numpy <1.9. def test_empty_subscript(self): - a, b = self.d + a, b = self._create_arrays() assert_equal(a[()], 0) assert_equal(b[()], 'x') assert_(type(a[()]) is a.dtype.type) assert_(type(b[()]) is str) def test_invalid_subscript(self): - a, b = self.d + a, b = self._create_arrays() assert_raises(IndexError, lambda x: x[0], a) assert_raises(IndexError, lambda x: x[0], b) assert_raises(IndexError, lambda x: x[np.array([], int)], a) assert_raises(IndexError, lambda x: x[np.array([], int)], b) def test_ellipsis_subscript_assignment(self): - a, b = self.d + a, b = self._create_arrays() a[...] = 42 assert_equal(a, 42) b[...] = '' assert_equal(b.item(), '') def test_empty_subscript_assignment(self): - a, b = self.d + a, b = self._create_arrays() a[()] = 42 assert_equal(a, 42) b[()] = '' assert_equal(b.item(), '') def test_invalid_subscript_assignment(self): - a, b = self.d + a, b = self._create_arrays() def assign(x, i, v): x[i] = v @@ -742,7 +815,7 @@ def assign(x, i, v): assert_raises(ValueError, assign, a, (), '') def test_newaxis(self): - a, b = self.d + a, _ = self._create_arrays() assert_equal(a[np.newaxis].shape, (1,)) assert_equal(a[..., np.newaxis].shape, (1,)) assert_equal(a[np.newaxis, ...].shape, (1,)) @@ -750,16 +823,16 @@ def test_newaxis(self): assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1)) assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1)) assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1)) - assert_equal(a[(np.newaxis,)*10].shape, (1,)*10) + assert_equal(a[(np.newaxis,) * 10].shape, (1,) * 10) def test_invalid_newaxis(self): - a, b = self.d + a, _ = self._create_arrays() def subscript(x, i): x[i] assert_raises(IndexError, subscript, a, (np.newaxis, 0)) - assert_raises(IndexError, subscript, a, (np.newaxis,)*70) + assert_raises(IndexError, subscript, a, (np.newaxis,) * 70) def test_constructor(self): x = np.ndarray(()) @@ -797,26 +870,26 @@ def test_real_imag(self): class TestScalarIndexing: - def setup_method(self): - self.d = np.array([0, 1])[0] + def _create_array(self): + return np.array([0, 1])[0] def test_ellipsis_subscript(self): - a = self.d + a = self._create_array() assert_equal(a[...], 0) assert_equal(a[...].shape, ()) def test_empty_subscript(self): - a = self.d + a = self._create_array() assert_equal(a[()], 0) assert_equal(a[()].shape, ()) def test_invalid_subscript(self): - a = self.d + a = self._create_array() assert_raises(IndexError, lambda x: x[0], a) assert_raises(IndexError, lambda x: x[np.array([], int)], a) def test_invalid_subscript_assignment(self): - a = self.d + a = self._create_array() def assign(x, i, v): x[i] = v @@ -824,7 +897,7 @@ def assign(x, i, v): assert_raises(TypeError, assign, a, 0, 42) def test_newaxis(self): - a = self.d + a = self._create_array() assert_equal(a[np.newaxis].shape, (1,)) assert_equal(a[..., np.newaxis].shape, (1,)) assert_equal(a[np.newaxis, ...].shape, (1,)) @@ -832,16 +905,16 @@ def test_newaxis(self): assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1)) assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1)) assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1)) - assert_equal(a[(np.newaxis,)*10].shape, (1,)*10) + assert_equal(a[(np.newaxis,) * 10].shape, (1,) * 10) def test_invalid_newaxis(self): - a = self.d + a = self._create_array() def subscript(x, i): x[i] assert_raises(IndexError, subscript, a, (np.newaxis, 0)) - assert_raises(IndexError, subscript, a, (np.newaxis,)*70) + assert_raises(IndexError, subscript, a, (np.newaxis,) * 70) def test_overlapping_assignment(self): # With positive strides @@ -859,7 +932,7 @@ def test_overlapping_assignment(self): assert_equal(a, [3, 2, 1, 0]) a = np.arange(6).reshape(2, 3) - a[::-1,:] = a[:, ::-1] + a[::-1, :] = a[:, ::-1] assert_equal(a, [[5, 4, 3], [2, 1, 0]]) a = np.arange(6).reshape(2, 3) @@ -908,7 +981,7 @@ def test_from_string(self): nstr = ['123', '123'] result = np.array([123, 123], dtype=int) for type in types: - msg = 'String conversion for %s' % type + msg = f'String conversion for {type}' assert_equal(np.array(nstr, dtype=type), result, err_msg=msg) def test_void(self): @@ -944,7 +1017,6 @@ def test_structured_void_promotion(self, idx): [np.array(1, dtype="i,i")[idx], np.array(2, dtype='i,i,i')[idx]], dtype="V") - def test_too_big_error(self): # 45341 is the smallest integer greater than sqrt(2**31 - 1). # 3037000500 is the smallest integer greater than sqrt(2**63 - 1). @@ -960,8 +1032,9 @@ def test_too_big_error(self): assert_raises(ValueError, np.zeros, shape, dtype=np.int8) assert_raises(ValueError, np.ones, shape, dtype=np.int8) - @pytest.mark.skipif(np.dtype(np.intp).itemsize != 8, + @pytest.mark.skipif(not IS_64BIT, reason="malloc may not fail on 32 bit systems") + @pytest.mark.thread_unsafe(reason="large slow test in parallel") def test_malloc_fails(self): # This test is guaranteed to fail due to a too large allocation with assert_raises(np._core._exceptions._ArrayMemoryError): @@ -999,7 +1072,7 @@ def test_zeros_big(self): # This test can fail on 32-bit systems due to insufficient # contiguous memory. Deallocating the previous array increases the # chance of success. - del(d) + del d def test_zeros_obj(self): # test initialization from PyLong(0) @@ -1016,32 +1089,32 @@ def test_zeros_like_like_zeros(self): for c in np.typecodes['All']: if c == 'V': continue - d = np.zeros((3,3), dtype=c) + d = np.zeros((3, 3), dtype=c) assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) # explicitly check some special cases - d = np.zeros((3,3), dtype='S5') + d = np.zeros((3, 3), dtype='S5') assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) - d = np.zeros((3,3), dtype='U5') + d = np.zeros((3, 3), dtype='U5') assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) - d = np.zeros((3,3), dtype=' ndmax validation + data = np.array([[1, 2, 3], [4, 5, 6]]) + with pytest.raises(ValueError, match="object too deep for desired array"): + np.array(data, ndmax=1, dtype=object) + class TestStructured: def test_subarray_field_access(self): @@ -1445,7 +1604,7 @@ def test_objview(self): def test_setfield(self): # https://github.com/numpy/numpy/issues/3126 struct_dt = np.dtype([('elem', 'i4', 5),]) - dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)]) + dt = np.dtype([('field', 'i4', 10), ('struct', struct_dt)]) x = np.zeros(1, dt) x[0]['field'] = np.ones(10, dtype='i4') x[0]['struct'] = np.ones(1, dtype=struct_dt) @@ -1507,7 +1666,7 @@ def test_zero_width_string(self): assert_equal(xx, [[b'', b''], [b'', b'']]) # check for no uninitialized memory due to viewing S0 array assert_equal(xx[:].dtype, xx.dtype) - assert_array_equal(eval(repr(xx), dict(np=np, array=np.array)), xx) + assert_array_equal(eval(repr(xx), {"np": np, "array": np.array}), xx) b = io.BytesIO() np.save(b, xx) @@ -1532,51 +1691,51 @@ def test_assignment(self): def testassign(arr, v): c = arr.copy() c[0] = v # assign using setitem - c[1:] = v # assign using "dtype_transfer" code paths + c[1:] = v # assign using "dtype_transfer" code paths return c dt = np.dtype([('foo', 'i8'), ('bar', 'i8')]) arr = np.ones(2, dt) - v1 = np.array([(2,3)], dtype=[('foo', 'i8'), ('bar', 'i8')]) - v2 = np.array([(2,3)], dtype=[('bar', 'i8'), ('foo', 'i8')]) - v3 = np.array([(2,3)], dtype=[('bar', 'i8'), ('baz', 'i8')]) + v1 = np.array([(2, 3)], dtype=[('foo', 'i8'), ('bar', 'i8')]) + v2 = np.array([(2, 3)], dtype=[('bar', 'i8'), ('foo', 'i8')]) + v3 = np.array([(2, 3)], dtype=[('bar', 'i8'), ('baz', 'i8')]) v4 = np.array([(2,)], dtype=[('bar', 'i8')]) - v5 = np.array([(2,3)], dtype=[('foo', 'f8'), ('bar', 'f8')]) + v5 = np.array([(2, 3)], dtype=[('foo', 'f8'), ('bar', 'f8')]) w = arr.view({'names': ['bar'], 'formats': ['i8'], 'offsets': [8]}) - ans = np.array([(2,3),(2,3)], dtype=dt) + ans = np.array([(2, 3), (2, 3)], dtype=dt) assert_equal(testassign(arr, v1), ans) assert_equal(testassign(arr, v2), ans) assert_equal(testassign(arr, v3), ans) assert_raises(TypeError, lambda: testassign(arr, v4)) assert_equal(testassign(arr, v5), ans) w[:] = 4 - assert_equal(arr, np.array([(1,4),(1,4)], dtype=dt)) + assert_equal(arr, np.array([(1, 4), (1, 4)], dtype=dt)) # test field-reordering, assignment by position, and self-assignment - a = np.array([(1,2,3)], + a = np.array([(1, 2, 3)], dtype=[('foo', 'i8'), ('bar', 'i8'), ('baz', 'f4')]) a[['foo', 'bar']] = a[['bar', 'foo']] - assert_equal(a[0].item(), (2,1,3)) + assert_equal(a[0].item(), (2, 1, 3)) # test that this works even for 'simple_unaligned' structs # (ie, that PyArray_EquivTypes cares about field order too) - a = np.array([(1,2)], dtype=[('a', 'i4'), ('b', 'i4')]) + a = np.array([(1, 2)], dtype=[('a', 'i4'), ('b', 'i4')]) a[['a', 'b']] = a[['b', 'a']] - assert_equal(a[0].item(), (2,1)) + assert_equal(a[0].item(), (2, 1)) def test_structuredscalar_indexing(self): # test gh-7262 x = np.empty(shape=1, dtype="(2,)3S,(2,)3U") - assert_equal(x[["f0","f1"]][0], x[0][["f0","f1"]]) + assert_equal(x[["f0", "f1"]][0], x[0][["f0", "f1"]]) assert_equal(x[0], x[0][()]) def test_multiindex_titles(self): a = np.zeros(4, dtype=[(('a', 'b'), 'i'), ('c', 'i'), ('d', 'i')]) - assert_raises(KeyError, lambda : a[['a','c']]) - assert_raises(KeyError, lambda : a[['a','a']]) - assert_raises(ValueError, lambda : a[['b','b']]) # field exists, but repeated - a[['b','c']] # no exception + assert_raises(KeyError, lambda: a[['a', 'c']]) + assert_raises(KeyError, lambda: a[['a', 'a']]) + assert_raises(ValueError, lambda: a[['b', 'b']]) # field exists, but repeated + a[['b', 'c']] # no exception def test_structured_cast_promotion_fieldorder(self): # gh-15494 @@ -1626,9 +1785,9 @@ def test_structured_cast_promotion_fieldorder(self): assert_equal(np.concatenate([a, a]).dtype, np.dtype([('x', 'i4')])) @pytest.mark.parametrize("dtype_dict", [ - dict(names=["a", "b"], formats=["i4", "f"], itemsize=100), - dict(names=["a", "b"], formats=["i4", "f"], - offsets=[0, 12])]) + {"names": ["a", "b"], "formats": ["i4", "f"], "itemsize": 100}, + {"names": ["a", "b"], "formats": ["i4", "f"], + "offsets": [0, 12]}]) @pytest.mark.parametrize("align", [True, False]) def test_structured_promotion_packs(self, dtype_dict, align): # Structured dtypes are packed when promoted (we consider the packed @@ -1714,10 +1873,10 @@ def test_count_nonzero_all(self): def test_count_nonzero_unaligned(self): # prevent mistakes as e.g. gh-4060 for o in range(7): - a = np.zeros((18,), dtype=bool)[o+1:] + a = np.zeros((18,), dtype=bool)[o + 1:] a[:o] = True assert_equal(np.count_nonzero(a), builtins.sum(a.tolist())) - a = np.ones((18,), dtype=bool)[o+1:] + a = np.ones((18,), dtype=bool)[o + 1:] a[:o] = False assert_equal(np.count_nonzero(a), builtins.sum(a.tolist())) @@ -1744,11 +1903,9 @@ def _test_cast_from_flexible(self, dtype): def test_cast_from_void(self): self._test_cast_from_flexible(np.void) - @pytest.mark.xfail(reason="See gh-9847") def test_cast_from_unicode(self): self._test_cast_from_flexible(np.str_) - @pytest.mark.xfail(reason="See gh-9847") def test_cast_from_bytes(self): self._test_cast_from_flexible(np.bytes_) @@ -1865,9 +2022,9 @@ def test_all_where(self): [True]]) for _ax in [0, None]: assert_equal(a.all(axis=_ax, where=wh_lower), - np.all(a[wh_lower[:,0],:], axis=_ax)) + np.all(a[wh_lower[:, 0], :], axis=_ax)) assert_equal(np.all(a, axis=_ax, where=wh_lower), - a[wh_lower[:,0],:].all(axis=_ax)) + a[wh_lower[:, 0], :].all(axis=_ax)) assert_equal(a.all(where=wh_full), True) assert_equal(np.all(a, where=wh_full), True) @@ -1886,9 +2043,9 @@ def test_any_where(self): [False]]) for _ax in [0, None]: assert_equal(a.any(axis=_ax, where=wh_middle), - np.any(a[wh_middle[:,0],:], axis=_ax)) + np.any(a[wh_middle[:, 0], :], axis=_ax)) assert_equal(np.any(a, axis=_ax, where=wh_middle), - a[wh_middle[:,0],:].any(axis=_ax)) + a[wh_middle[:, 0], :].any(axis=_ax)) assert_equal(a.any(where=wh_full), False) assert_equal(np.any(a, where=wh_full), False) assert_equal(a.any(where=False), False) @@ -1928,10 +2085,10 @@ def test_compress(self): assert_equal(out, 1) def test_choose(self): - x = 2*np.ones((3,), dtype=int) - y = 3*np.ones((3,), dtype=int) - x2 = 2*np.ones((2, 3), dtype=int) - y2 = 3*np.ones((2, 3), dtype=int) + x = 2 * np.ones((3,), dtype=int) + y = 3 * np.ones((3,), dtype=int) + x2 = 2 * np.ones((2, 3), dtype=int) + y2 = 3 * np.ones((2, 3), dtype=int) ind = np.array([0, 0, 1]) A = ind.choose((x, y)) @@ -1945,7 +2102,7 @@ def test_choose(self): oned = np.ones(1) # gh-12031, caused SEGFAULT - assert_raises(TypeError, oned.choose,np.void(0), [oned]) + assert_raises(TypeError, oned.choose, np.void(0), [oned]) out = np.array(0) ret = np.choose(np.array(1), [10, 20, 30], out=out) @@ -1954,9 +2111,15 @@ def test_choose(self): # gh-6272 check overlap on out x = np.arange(5) - y = np.choose([0,0,0], [x[:3], x[:3], x[:3]], out=x[1:4], mode='wrap') + y = np.choose([0, 0, 0], [x[:3], x[:3], x[:3]], out=x[1:4], mode='wrap') assert_equal(y, np.array([0, 1, 2])) + # gh_28206 check fail when out not writeable + x = np.arange(3) + out = np.zeros(3) + out.setflags(write=False) + assert_raises(ValueError, np.choose, [0, 1, 2], [x, x, x], out=out) + def test_prod(self): ba = [1, 2, 10, 11, 6, 5, 4] ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] @@ -2031,6 +2194,7 @@ def check_round(arr, expected, *round_args): assert_equal(out, expected) assert out is res + check_round(np.array([1, 2, 3]), [1, 2, 3]) check_round(np.array([1.2, 1.5]), [1, 2]) check_round(np.array(1.5), 2) check_round(np.array([12.2, 15.5]), [10, 20], -1) @@ -2039,6 +2203,20 @@ def check_round(arr, expected, *round_args): check_round(np.array([4.5 + 1.5j]), [4 + 2j]) check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1) + @pytest.mark.parametrize('dt', ['uint8', int, float, complex]) + def test_round_copies(self, dt): + a = np.arange(3, dtype=dt) + assert not np.shares_memory(a.round(), a) + assert not np.shares_memory(a.round(decimals=2), a) + + out = np.empty(3, dtype=dt) + assert not np.shares_memory(a.round(out=out), a) + + a = np.arange(12).astype(dt).reshape(3, 4).T + + assert a.flags.f_contiguous + assert np.round(a).flags.f_contiguous + def test_squeeze(self): a = np.array([[[1], [2], [3]]]) assert_equal(a.squeeze(), [1, 2, 3]) @@ -2074,7 +2252,7 @@ def test_sort(self): with assert_raises_regex( ValueError, - "kind` and `stable` parameters can't be provided at the same time" + "`kind` and keyword parameters can't be provided at the same time" ): np.sort(a, kind="stable", stable=True) @@ -2091,7 +2269,7 @@ def test_sort_unsigned(self, dtype): a = np.arange(101, dtype=dtype) b = a[::-1].copy() for kind in self.sort_kinds: - msg = "scalar sort, kind=%s" % kind + msg = f"scalar sort, kind={kind}" c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) @@ -2106,7 +2284,7 @@ def test_sort_signed(self, dtype): a = np.arange(-50, 51, dtype=dtype) b = a[::-1].copy() for kind in self.sort_kinds: - msg = "scalar sort, kind=%s" % (kind) + msg = f"scalar sort, kind={kind}" c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) @@ -2126,12 +2304,12 @@ def test_sort_complex(self, part, dtype): }[dtype] a = np.arange(-50, 51, dtype=dtype) b = a[::-1].copy() - ai = (a * (1+1j)).astype(cdtype) - bi = (b * (1+1j)).astype(cdtype) + ai = (a * (1 + 1j)).astype(cdtype) + bi = (b * (1 + 1j)).astype(cdtype) setattr(ai, part, 1) setattr(bi, part, 1) for kind in self.sort_kinds: - msg = "complex sort, %s part == 1, kind=%s" % (part, kind) + msg = f"complex sort, {part} part == 1, kind={kind}" c = ai.copy() c.sort(kind=kind) assert_equal(c, ai, msg) @@ -2143,10 +2321,10 @@ def test_sort_complex_byte_swapping(self): # test sorting of complex arrays requiring byte-swapping, gh-5441 for endianness in '<>': for dt in np.typecodes['Complex']: - arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt) + arr = np.array([1 + 3.j, 2 + 2.j, 3 + 1.j], dtype=endianness + dt) c = arr.copy() c.sort() - msg = 'byte-swapped complex sort, dtype={0}'.format(dt) + msg = f'byte-swapped complex sort, dtype={dt}' assert_equal(c, arr, msg) @pytest.mark.parametrize('dtype', [np.bytes_, np.str_]) @@ -2155,7 +2333,7 @@ def test_sort_string(self, dtype): a = np.array(['aaaaaaaa' + chr(i) for i in range(101)], dtype=dtype) b = a[::-1].copy() for kind in self.sort_kinds: - msg = "kind=%s" % kind + msg = f"kind={kind}" c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) @@ -2169,7 +2347,7 @@ def test_sort_object(self): a[:] = list(range(101)) b = a[::-1] for kind in ['q', 'h', 'm']: - msg = "kind=%s" % kind + msg = f"kind={kind}" c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) @@ -2183,10 +2361,10 @@ def test_sort_object(self): @pytest.mark.parametrize("step", [1, 2]) def test_sort_structured(self, dt, step): # test record array sorts. - a = np.array([(i, i) for i in range(101*step)], dtype=dt) + a = np.array([(i, i) for i in range(101 * step)], dtype=dt) b = a[::-1] for kind in ['q', 'h', 'm']: - msg = "kind=%s" % kind + msg = f"kind={kind}" c = a.copy()[::step] indx = c.argsort(kind=kind) c.sort(kind=kind) @@ -2195,8 +2373,8 @@ def test_sort_structured(self, dt, step): c = b.copy()[::step] indx = c.argsort(kind=kind) c.sort(kind=kind) - assert_equal(c, a[step-1::step], msg) - assert_equal(b[::step][indx], a[step-1::step], msg) + assert_equal(c, a[step - 1::step], msg) + assert_equal(b[::step][indx], a[step - 1::step], msg) @pytest.mark.parametrize('dtype', ['datetime64[D]', 'timedelta64[D]']) def test_sort_time(self, dtype): @@ -2204,7 +2382,7 @@ def test_sort_time(self, dtype): a = np.arange(0, 101, dtype=dtype) b = a[::-1] for kind in ['q', 'h', 'm']: - msg = "kind=%s" % kind + msg = f"kind={kind}" c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) @@ -2230,10 +2408,9 @@ def test_sort_axis(self): def test_sort_size_0(self): # check axis handling for multidimensional empty arrays - a = np.array([]) - a.shape = (3, 2, 1, 0) + a = np.array([]).reshape((3, 2, 1, 0)) for axis in range(-a.ndim, a.ndim): - msg = 'test empty array sort with axis={0}'.format(axis) + msg = f'test empty array sort with axis={axis}' assert_equal(np.sort(a, axis=axis), a, msg) msg = 'test empty array sort with axis=None' assert_equal(np.sort(a, axis=None), a.ravel(), msg) @@ -2247,7 +2424,7 @@ def __lt__(self, other): a = np.array([Boom()] * 100, dtype=object) for kind in self.sort_kinds: - msg = "kind=%s" % kind + msg = f"kind={kind}" c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) @@ -2266,11 +2443,12 @@ def test_void_sort(self): arr[::-1].sort() def test_sort_raises(self): - #gh-9404 + # gh-9404 arr = np.array([0, datetime.now(), 1], dtype=object) for kind in self.sort_kinds: assert_raises(TypeError, arr.sort, kind=kind) - #gh-3879 + # gh-3879 + class Raiser: def raises_anything(*args, **kwargs): raise TypeError("SOMETHING ERRORED") @@ -2333,6 +2511,20 @@ def test__deepcopy__(self, dtype): with pytest.raises(AssertionError): assert_array_equal(a, b) + def test__deepcopy___void_scalar(self): + # see comments in gh-29643 + value = np.void('Rex', dtype=[('name', 'U10')]) + value_deepcopy = value.__deepcopy__(None) + value[0] = None + assert value_deepcopy[0] == 'Rex' + + @pytest.mark.parametrize("sctype", [np.int64, np.float32, np.float64]) + def test__deepcopy__scalar(self, sctype): + # test optimization from gh-29656 + value = sctype(1.1) + value_deepcopy = value.__deepcopy__(None) + assert value is value_deepcopy + def test__deepcopy__catches_failure(self): class MyObj: def __deepcopy__(self, *args, **kwargs): @@ -2390,30 +2582,30 @@ def test_argsort(self): a = np.arange(101, dtype=dtype) b = a[::-1].copy() for kind in self.sort_kinds: - msg = "scalar argsort, kind=%s, dtype=%s" % (kind, dtype) + msg = f"scalar argsort, kind={kind}, dtype={dtype}" assert_equal(a.copy().argsort(kind=kind), a, msg) assert_equal(b.copy().argsort(kind=kind), b, msg) # test complex argsorts. These use the same code as the scalars # but the compare function differs. - ai = a*1j + 1 - bi = b*1j + 1 + ai = a * 1j + 1 + bi = b * 1j + 1 for kind in self.sort_kinds: - msg = "complex argsort, kind=%s" % kind + msg = f"complex argsort, kind={kind}" assert_equal(ai.copy().argsort(kind=kind), a, msg) assert_equal(bi.copy().argsort(kind=kind), b, msg) ai = a + 1j bi = b + 1j for kind in self.sort_kinds: - msg = "complex argsort, kind=%s" % kind + msg = f"complex argsort, kind={kind}" assert_equal(ai.copy().argsort(kind=kind), a, msg) assert_equal(bi.copy().argsort(kind=kind), b, msg) # test argsort of complex arrays requiring byte-swapping, gh-5441 for endianness in '<>': for dt in np.typecodes['Complex']: - arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt) - msg = 'byte-swapped complex argsort, dtype={0}'.format(dt) + arr = np.array([1 + 3.j, 2 + 2.j, 3 + 1.j], dtype=endianness + dt) + msg = f'byte-swapped complex argsort, dtype={dt}' assert_equal(arr.argsort(), np.arange(len(arr), dtype=np.intp), msg) @@ -2424,7 +2616,7 @@ def test_argsort(self): r = np.arange(101) rr = r[::-1] for kind in self.sort_kinds: - msg = "string argsort, kind=%s" % kind + msg = f"string argsort, kind={kind}" assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) @@ -2435,7 +2627,7 @@ def test_argsort(self): r = np.arange(101) rr = r[::-1] for kind in self.sort_kinds: - msg = "unicode argsort, kind=%s" % kind + msg = f"unicode argsort, kind={kind}" assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) @@ -2446,7 +2638,7 @@ def test_argsort(self): r = np.arange(101) rr = r[::-1] for kind in self.sort_kinds: - msg = "object argsort, kind=%s" % kind + msg = f"object argsort, kind={kind}" assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) @@ -2457,7 +2649,7 @@ def test_argsort(self): r = np.arange(101) rr = r[::-1] for kind in self.sort_kinds: - msg = "structured array argsort, kind=%s" % kind + msg = f"structured array argsort, kind={kind}" assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) @@ -2467,7 +2659,7 @@ def test_argsort(self): r = np.arange(101) rr = r[::-1] for kind in ['q', 'h', 'm']: - msg = "datetime64 argsort, kind=%s" % kind + msg = f"datetime64 argsort, kind={kind}" assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) @@ -2477,7 +2669,7 @@ def test_argsort(self): r = np.arange(101) rr = r[::-1] for kind in ['q', 'h', 'm']: - msg = "timedelta64 argsort, kind=%s" % kind + msg = f"timedelta64 argsort, kind={kind}" assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) @@ -2491,10 +2683,9 @@ def test_argsort(self): assert_equal(a.copy().argsort(), c) # check axis handling for multidimensional empty arrays - a = np.array([]) - a.shape = (3, 2, 1, 0) + a = np.array([]).reshape((3, 2, 1, 0)) for axis in range(-a.ndim, a.ndim): - msg = 'test empty array argsort with axis={0}'.format(axis) + msg = f'test empty array argsort with axis={axis}' assert_equal(np.argsort(a, axis=axis), np.zeros_like(a, dtype=np.intp), msg) msg = 'test empty array argsort with axis=None' @@ -2518,7 +2709,7 @@ def test_argsort(self): with assert_raises_regex( ValueError, - "kind` and `stable` parameters can't be provided at the same time" + "`kind` and keyword parameters can't be provided at the same time" ): np.argsort(a, kind="stable", stable=True) @@ -2537,10 +2728,10 @@ def test_searchsorted_floats(self, a): # test for floats arrays containing nans. Explicitly test # half, single, and double precision floats to verify that # the NaN-handling is correct. - msg = "Test real (%s) searchsorted with nans, side='l'" % a.dtype + msg = f"Test real ({a.dtype}) searchsorted with nans, side='l'" b = a.searchsorted(a, side='left') assert_equal(b, np.arange(3), msg) - msg = "Test real (%s) searchsorted with nans, side='r'" % a.dtype + msg = f"Test real ({a.dtype}) searchsorted with nans, side='r'" b = a.searchsorted(a, side='right') assert_equal(b, np.arange(1, 4), msg) # check keyword arguments @@ -2691,7 +2882,7 @@ def test_searchsorted_with_sorter(self): k = np.linspace(0, 1, 20) assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s)) - a = np.array([0, 1, 2, 3, 5]*20) + a = np.array([0, 1, 2, 3, 5] * 20) s = a.argsort() k = [0, 1, 2, 3, 5] expected = [0, 20, 40, 60, 80] @@ -2809,10 +3000,9 @@ def test_partition_integer(self): def test_partition_empty_array(self, kth_dtype): # check axis handling for multidimensional empty arrays kth = np.array(0, dtype=kth_dtype)[()] - a = np.array([]) - a.shape = (3, 2, 1, 0) + a = np.array([]).reshape((3, 2, 1, 0)) for axis in range(-a.ndim, a.ndim): - msg = 'test empty array partition with axis={0}'.format(axis) + msg = f'test empty array partition with axis={axis}' assert_equal(np.partition(a, kth, axis=axis), a, msg) msg = 'test empty array partition with axis=None' assert_equal(np.partition(a, kth, axis=None), a.ravel(), msg) @@ -2821,10 +3011,9 @@ def test_partition_empty_array(self, kth_dtype): def test_argpartition_empty_array(self, kth_dtype): # check axis handling for multidimensional empty arrays kth = np.array(0, dtype=kth_dtype)[()] - a = np.array([]) - a.shape = (3, 2, 1, 0) + a = np.array([]).reshape((3, 2, 1, 0)) for axis in range(-a.ndim, a.ndim): - msg = 'test empty array argpartition with axis={0}'.format(axis) + msg = f'test empty array argpartition with axis={axis}' assert_equal(np.partition(a, kth, axis=axis), np.zeros_like(a, dtype=np.intp), msg) msg = 'test empty array argpartition with axis=None' @@ -3057,72 +3246,72 @@ def assert_partitioned(self, d, kth): prev = k + 1 def test_partition_iterative(self): - d = np.arange(17) - kth = (0, 1, 2, 429, 231) - assert_raises(ValueError, d.partition, kth) - assert_raises(ValueError, d.argpartition, kth) - d = np.arange(10).reshape((2, 5)) - assert_raises(ValueError, d.partition, kth, axis=0) - assert_raises(ValueError, d.partition, kth, axis=1) - assert_raises(ValueError, np.partition, d, kth, axis=1) - assert_raises(ValueError, np.partition, d, kth, axis=None) - - d = np.array([3, 4, 2, 1]) - p = np.partition(d, (0, 3)) - self.assert_partitioned(p, (0, 3)) - self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3)) - - assert_array_equal(p, np.partition(d, (-3, -1))) - assert_array_equal(p, d[np.argpartition(d, (-3, -1))]) - - d = np.arange(17) - np.random.shuffle(d) - d.partition(range(d.size)) - assert_array_equal(np.arange(17), d) - np.random.shuffle(d) - assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))]) - - # test unsorted kth - d = np.arange(17) - np.random.shuffle(d) - keys = np.array([1, 3, 8, -2]) - np.random.shuffle(d) - p = np.partition(d, keys) - self.assert_partitioned(p, keys) - p = d[np.argpartition(d, keys)] - self.assert_partitioned(p, keys) - np.random.shuffle(keys) - assert_array_equal(np.partition(d, keys), p) - assert_array_equal(d[np.argpartition(d, keys)], p) - - # equal kth - d = np.arange(20)[::-1] - self.assert_partitioned(np.partition(d, [5]*4), [5]) - self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]), - [5]*4 + [6, 13]) - self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5]) - self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])], - [5]*4 + [6, 13]) - - d = np.arange(12) - np.random.shuffle(d) - d1 = np.tile(np.arange(12), (4, 1)) - map(np.random.shuffle, d1) - d0 = np.transpose(d1) - - kth = (1, 6, 7, -1) - p = np.partition(d1, kth, axis=1) - pa = d1[np.arange(d1.shape[0])[:, None], - d1.argpartition(kth, axis=1)] - assert_array_equal(p, pa) - for i in range(d1.shape[0]): - self.assert_partitioned(p[i,:], kth) - p = np.partition(d0, kth, axis=0) - pa = d0[np.argpartition(d0, kth, axis=0), - np.arange(d0.shape[1])[None,:]] - assert_array_equal(p, pa) - for i in range(d0.shape[1]): - self.assert_partitioned(p[:, i], kth) + d = np.arange(17) + kth = (0, 1, 2, 429, 231) + assert_raises(ValueError, d.partition, kth) + assert_raises(ValueError, d.argpartition, kth) + d = np.arange(10).reshape((2, 5)) + assert_raises(ValueError, d.partition, kth, axis=0) + assert_raises(ValueError, d.partition, kth, axis=1) + assert_raises(ValueError, np.partition, d, kth, axis=1) + assert_raises(ValueError, np.partition, d, kth, axis=None) + + d = np.array([3, 4, 2, 1]) + p = np.partition(d, (0, 3)) + self.assert_partitioned(p, (0, 3)) + self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3)) + + assert_array_equal(p, np.partition(d, (-3, -1))) + assert_array_equal(p, d[np.argpartition(d, (-3, -1))]) + + d = np.arange(17) + np.random.shuffle(d) + d.partition(range(d.size)) + assert_array_equal(np.arange(17), d) + np.random.shuffle(d) + assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))]) + + # test unsorted kth + d = np.arange(17) + np.random.shuffle(d) + keys = np.array([1, 3, 8, -2]) + np.random.shuffle(d) + p = np.partition(d, keys) + self.assert_partitioned(p, keys) + p = d[np.argpartition(d, keys)] + self.assert_partitioned(p, keys) + np.random.shuffle(keys) + assert_array_equal(np.partition(d, keys), p) + assert_array_equal(d[np.argpartition(d, keys)], p) + + # equal kth + d = np.arange(20)[::-1] + self.assert_partitioned(np.partition(d, [5] * 4), [5]) + self.assert_partitioned(np.partition(d, [5] * 4 + [6, 13]), + [5] * 4 + [6, 13]) + self.assert_partitioned(d[np.argpartition(d, [5] * 4)], [5]) + self.assert_partitioned(d[np.argpartition(d, [5] * 4 + [6, 13])], + [5] * 4 + [6, 13]) + + d = np.arange(12) + np.random.shuffle(d) + d1 = np.tile(np.arange(12), (4, 1)) + map(np.random.shuffle, d1) + d0 = np.transpose(d1) + + kth = (1, 6, 7, -1) + p = np.partition(d1, kth, axis=1) + pa = d1[np.arange(d1.shape[0])[:, None], + d1.argpartition(kth, axis=1)] + assert_array_equal(p, pa) + for i in range(d1.shape[0]): + self.assert_partitioned(p[i, :], kth) + p = np.partition(d0, kth, axis=0) + pa = d0[np.argpartition(d0, kth, axis=0), + np.arange(d0.shape[1])[None, :]] + assert_array_equal(p, pa) + for i in range(d0.shape[1]): + self.assert_partitioned(p[:, i], kth) def test_partition_cdtype(self): d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), @@ -3166,7 +3355,7 @@ def test_partition_fuzz(self): kth = [0, idx, i, i + 1] tgt = np.sort(d)[kth] assert_array_equal(np.partition(d, kth)[kth], tgt, - err_msg="data: %r\n kth: %r" % (d, kth)) + err_msg=f"data: {d!r}\n kth: {kth!r}") @pytest.mark.parametrize("kth_dtype", np.typecodes["AllInteger"]) def test_argpartition_gh5524(self, kth_dtype): @@ -3174,7 +3363,7 @@ def test_argpartition_gh5524(self, kth_dtype): kth = np.array(1, dtype=kth_dtype)[()] d = [6, 7, 3, 2, 9, 0] p = np.argpartition(d, kth) - self.assert_partitioned(np.array(d)[p],[1]) + self.assert_partitioned(np.array(d)[p], [1]) def test_flatten(self): x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32) @@ -3190,7 +3379,6 @@ def test_flatten(self): assert_equal(x1.flatten('F'), y1f) assert_equal(x1.flatten('F'), x1.T.flatten()) - @pytest.mark.parametrize('func', (np.dot, np.matmul)) def test_arr_mult(self, func): a = np.array([[1, 0], [0, 1]]) @@ -3212,7 +3400,6 @@ def test_arr_mult(self, func): [684, 740, 796, 852, 908, 964]] ) - # gemm vs syrk optimizations for et in [np.float32, np.float64, np.complex64, np.complex128]: eaf = a.astype(et) @@ -3317,9 +3504,38 @@ def test_dot(self): a.dot(b=b, out=c) assert_equal(c, np.dot(a, b)) + @pytest.mark.parametrize("dtype", [np.half, np.double, np.longdouble]) + @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") + def test_dot_errstate(self, dtype): + # Some dtypes use BLAS for 'dot' operation and + # not all BLAS support floating-point errors. + if not BLAS_SUPPORTS_FPE and dtype == np.double: + pytest.skip("BLAS does not support FPE") + + a = np.array([1, 1], dtype=dtype) + b = np.array([-np.inf, np.inf], dtype=dtype) + + with np.errstate(invalid='raise'): + # there are two paths, depending on the number of dimensions - test + # them both + with pytest.raises(FloatingPointError, + match="invalid value encountered in dot"): + np.dot(a, b) + + # test that fp exceptions are properly cleared + np.dot(a, a) + + with pytest.raises(FloatingPointError, + match="invalid value encountered in dot"): + np.dot(a[np.newaxis, np.newaxis, ...], + b[np.newaxis, ..., np.newaxis]) + + np.dot(a[np.newaxis, np.newaxis, ...], + a[np.newaxis, ..., np.newaxis]) + def test_dot_type_mismatch(self): c = 1. - A = np.array((1,1), dtype='i,i') + A = np.array((1, 1), dtype='i,i') assert_raises(TypeError, np.dot, c, A) assert_raises(TypeError, np.dot, A, c) @@ -3490,12 +3706,12 @@ def test_put(self): # test 1-d a = np.zeros(6, dtype=dt) - a.put([1, 3, 5], [True]*3) + a.put([1, 3, 5], [True] * 3) assert_equal(a, tgt) # test 2-d a = np.zeros((2, 3), dtype=dt) - a.put([1, 3, 5], [True]*3) + a.put([1, 3, 5], [True] * 3) assert_equal(a, tgt.reshape(2, 3)) # check must be writeable @@ -3509,17 +3725,16 @@ def test_put(self): bad_array = [1, 2, 3] assert_raises(TypeError, np.put, bad_array, [0, 2], 5) - # when calling np.put, make sure an - # IndexError is raised if the + # when calling np.put, make sure an + # IndexError is raised if the # array is empty - empty_array = np.asarray(list()) - with pytest.raises(IndexError, + empty_array = np.asarray([]) + with pytest.raises(IndexError, match="cannot replace elements of an empty array"): np.put(empty_array, 1, 1, mode="wrap") - with pytest.raises(IndexError, + with pytest.raises(IndexError, match="cannot replace elements of an empty array"): np.put(empty_array, 1, 1, mode="clip") - def test_ravel(self): a = np.array([[0, 1], [2, 3]]) @@ -3563,7 +3778,7 @@ def test_ravel(self): a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2) strides = list(a.strides) strides[1] = 123 - a.strides = strides + a = stride_tricks.as_strided(a, strides=strides) assert_(a.ravel(order='K').flags.owndata) assert_equal(a.ravel('K'), np.arange(0, 15, 2)) @@ -3572,7 +3787,7 @@ def test_ravel(self): a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2) strides = list(a.strides) strides[1] = 123 - a.strides = strides + a = stride_tricks.as_strided(a, strides=strides) assert_(np.may_share_memory(a.ravel(order='K'), a)) assert_equal(a.ravel(order='K'), np.arange(2**3)) @@ -3585,7 +3800,7 @@ def test_ravel(self): # 1-element tidy strides test: a = np.array([[1]]) - a.strides = (123, 432) + a = stride_tricks.as_strided(a, strides=(123, 432)) if np.ones(1).strides == (8,): assert_(np.may_share_memory(a.ravel('K'), a)) assert_equal(a.ravel('K').strides, (a.dtype.itemsize,)) @@ -3626,8 +3841,20 @@ class ArraySubclass(np.ndarray): assert_(isinstance(a.ravel('A'), ArraySubclass)) assert_(isinstance(a.ravel('K'), ArraySubclass)) + @pytest.mark.parametrize("shape", [(3, 224, 224), (8, 512, 512)]) + def test_tobytes_no_copy_fastpath(self, shape): + # Test correctness of non-contiguous paths for `tobytes` + rng = np.random.default_rng(0) + arr = rng.standard_normal(shape, dtype=np.float32) + noncontig = arr.transpose(1, 2, 0) + + # correctness + expected = np.ascontiguousarray(noncontig).tobytes() + got = noncontig.tobytes() + assert got == expected + def test_swapaxes(self): - a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy() + a = np.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4).copy() idx = np.indices(a.shape) assert_(a.flags['OWNDATA']) b = a.copy() @@ -3647,8 +3874,8 @@ def test_swapaxes(self): shape[j] = src.shape[i] assert_equal(c.shape, shape, str((i, j, k))) # check array contents - i0, i1, i2, i3 = [dim-1 for dim in c.shape] - j0, j1, j2, j3 = [dim-1 for dim in src.shape] + i0, i1, i2, i3 = [dim - 1 for dim in c.shape] + j0, j1, j2, j3 = [dim - 1 for dim in src.shape] assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]], c[idx[i0], idx[i1], idx[i2], idx[i3]], str((i, j, k))) @@ -3659,14 +3886,14 @@ def test_swapaxes(self): b = c def test_conjugate(self): - a = np.array([1-1j, 1+1j, 23+23.0j]) + a = np.array([1 - 1j, 1 + 1j, 23 + 23.0j]) ac = a.conj() assert_equal(a.real, ac.real) assert_equal(a.imag, -ac.imag) assert_equal(ac, a.conjugate()) assert_equal(ac, np.conjugate(a)) - a = np.array([1-1j, 1+1j, 23+23.0j], 'F') + a = np.array([1 - 1j, 1 + 1j, 23 + 23.0j], 'F') ac = a.conj() assert_equal(a.real, ac.real) assert_equal(a.imag, -ac.imag) @@ -3685,25 +3912,34 @@ def test_conjugate(self): assert_equal(ac, a.conjugate()) assert_equal(ac, np.conjugate(a)) - a = np.array([1-1j, 1+1j, 1, 2.0], object) + a = np.array([1 - 1j, 1 + 1j, 1, 2.0], object) ac = a.conj() assert_equal(ac, [k.conjugate() for k in a]) assert_equal(ac, a.conjugate()) assert_equal(ac, np.conjugate(a)) - a = np.array([1-1j, 1, 2.0, 'f'], object) - assert_raises(TypeError, lambda: a.conj()) - assert_raises(TypeError, lambda: a.conjugate()) + a = np.array([1 - 1j, 1, 2.0, 'f'], object) + assert_raises(TypeError, a.conj) + assert_raises(TypeError, a.conjugate) def test_conjugate_out(self): # Minimal test for the out argument being passed on correctly # NOTE: The ability to pass `out` is currently undocumented! - a = np.array([1-1j, 1+1j, 23+23.0j]) + a = np.array([1 - 1j, 1 + 1j, 23 + 23.0j]) out = np.empty_like(a) res = a.conjugate(out) assert res is out assert_array_equal(out, a.conjugate()) + def test_conjugate_scalar(self): + for v in 5, 5j: + a = np.array(v) + assert a.conjugate() == v.conjugate() + for a in (np.array('s'), np.array('2016', 'M'), + np.array((1, 2), [('a', int), ('b', int)])): + with pytest.raises(TypeError): + a.conjugate() + def test__complex__(self): dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8', @@ -3711,21 +3947,10 @@ def test__complex__(self): '?', 'O'] for dt in dtypes: a = np.array(7, dtype=dt) - b = np.array([7], dtype=dt) - c = np.array([[[[[7]]]]], dtype=dt) - - msg = 'dtype: {0}'.format(dt) + msg = f'dtype: {dt}' ap = complex(a) assert_equal(ap, a, msg) - with assert_warns(DeprecationWarning): - bp = complex(b) - assert_equal(bp, b, msg) - - with assert_warns(DeprecationWarning): - cp = complex(c) - assert_equal(cp, c, msg) - def test__complex__should_not_work(self): dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8', @@ -3733,7 +3958,11 @@ def test__complex__should_not_work(self): '?', 'O'] for dt in dtypes: a = np.array([1, 2, 3], dtype=dt) + b = np.array([7], dtype=dt) + c = np.array([[[[[7]]]]], dtype=dt) assert_raises(TypeError, complex, a) + assert_raises(TypeError, complex, b) + assert_raises(TypeError, complex, c) dt = np.dtype([('a', 'f8'), ('b', 'i1')]) b = np.array((1.0, 3), dtype=dt) @@ -3746,13 +3975,12 @@ def test__complex__should_not_work(self): assert_raises(TypeError, complex, d) e = np.array(['1+1j'], 'U') - with assert_warns(DeprecationWarning): - assert_raises(TypeError, complex, e) + assert_raises(TypeError, complex, e) class TestCequenceMethods: def test_array_contains(self): - assert_(4.0 in np.arange(16.).reshape(4,4)) - assert_(20.0 not in np.arange(16.).reshape(4,4)) + assert_(4.0 in np.arange(16.).reshape(4, 4)) + assert_(20.0 not in np.arange(16.).reshape(4, 4)) class TestBinop: def test_inplace(self): @@ -3792,7 +4020,6 @@ def test_inplace(self): # - defer if other has __array_ufunc__ and it is None # or other is not a subclass and has higher array priority # - else, call ufunc - @pytest.mark.xfail(IS_PYPY, reason="Bug in pypy3.{9, 10}-v7.3.13, #24862") def test_ufunc_binop_interaction(self): # Python method name (without underscores) # -> (numpy ufunc, has_in_place_version, preferred_dtype) @@ -3846,9 +4073,9 @@ def make_obj(base, array_priority=False, array_ufunc=False, if array_priority is not False: class_namespace["__array_priority__"] = array_priority for op in ops: - class_namespace["__{0}__".format(op)] = op_impl - class_namespace["__r{0}__".format(op)] = rop_impl - class_namespace["__i{0}__".format(op)] = iop_impl + class_namespace[f"__{op}__"] = op_impl + class_namespace[f"__r{op}__"] = rop_impl + class_namespace[f"__i{op}__"] = iop_impl if array_ufunc is not False: class_namespace["__array_ufunc__"] = array_ufunc eval_namespace = {"base": base, @@ -3873,7 +4100,7 @@ def check(obj, binop_override_expected, ufunc_override_expected, if check_scalar: check_objs.append(check_objs[0][0]) for arr in check_objs: - arr_method = getattr(arr, "__{0}__".format(op)) + arr_method = getattr(arr, f"__{op}__") def first_out_arg(result): if op == "divmod": @@ -3888,39 +4115,37 @@ def first_out_arg(result): elif ufunc_override_expected: assert_equal(arr_method(obj)[0], "__array_ufunc__", err_msg) + elif (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + res = first_out_arg(arr_method(obj)) + assert_(res.__class__ is obj.__class__, err_msg) else: - if (isinstance(obj, np.ndarray) and - (type(obj).__array_ufunc__ is - np.ndarray.__array_ufunc__)): - # __array__ gets ignored - res = first_out_arg(arr_method(obj)) - assert_(res.__class__ is obj.__class__, err_msg) - else: - assert_raises((TypeError, Coerced), - arr_method, obj, err_msg=err_msg) + assert_raises((TypeError, Coerced), + arr_method, obj, err_msg=err_msg) # obj __op__ arr - arr_rmethod = getattr(arr, "__r{0}__".format(op)) + arr_rmethod = getattr(arr, f"__r{op}__") if ufunc_override_expected: res = arr_rmethod(obj) assert_equal(res[0], "__array_ufunc__", err_msg=err_msg) assert_equal(res[1], ufunc, err_msg=err_msg) + elif (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + res = first_out_arg(arr_rmethod(obj)) + assert_(res.__class__ is obj.__class__, err_msg) else: - if (isinstance(obj, np.ndarray) and - (type(obj).__array_ufunc__ is - np.ndarray.__array_ufunc__)): - # __array__ gets ignored - res = first_out_arg(arr_rmethod(obj)) - assert_(res.__class__ is obj.__class__, err_msg) - else: - # __array_ufunc__ = "asdf" creates a TypeError - assert_raises((TypeError, Coerced), - arr_rmethod, obj, err_msg=err_msg) + # __array_ufunc__ = "asdf" creates a TypeError + assert_raises((TypeError, Coerced), + arr_rmethod, obj, err_msg=err_msg) # arr __iop__ obj # array scalars don't have in-place operators if has_inplace and isinstance(arr, np.ndarray): - arr_imethod = getattr(arr, "__i{0}__".format(op)) + arr_imethod = getattr(arr, f"__i{op}__") if inplace_override_expected: assert_equal(arr_method(obj), NotImplemented, err_msg=err_msg) @@ -3930,16 +4155,15 @@ def first_out_arg(result): assert_equal(res[1], ufunc, err_msg) assert_(type(res[-1]["out"]) is tuple, err_msg) assert_(res[-1]["out"][0] is arr, err_msg) + elif (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + assert_(arr_imethod(obj) is arr, err_msg) else: - if (isinstance(obj, np.ndarray) and - (type(obj).__array_ufunc__ is - np.ndarray.__array_ufunc__)): - # __array__ gets ignored - assert_(arr_imethod(obj) is arr, err_msg) - else: - assert_raises((TypeError, Coerced), - arr_imethod, obj, - err_msg=err_msg) + assert_raises((TypeError, Coerced), + arr_imethod, obj, + err_msg=err_msg) op_fn = getattr(operator, op, None) if op_fn is None: @@ -4002,6 +4226,18 @@ class LowPriority(np.ndarray): assert res.shape == (3,) assert res[0] == 'result' + @pytest.mark.parametrize("scalar", [ + np.longdouble(1), np.timedelta64(120, 'm')]) + @pytest.mark.parametrize("op", [operator.add, operator.xor]) + def test_scalar_binop_guarantees_ufunc(self, scalar, op): + # Test that __array_ufunc__ will always cause ufunc use even when + # we have to protect some other calls from recursing (see gh-26904). + class SomeClass: + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + return "result" + + assert SomeClass() + scalar == "result" + assert scalar + SomeClass() == "result" def test_ufunc_override_normalize_signature(self): # gh-5674 @@ -4088,27 +4324,6 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kw): assert_equal(A[0], 30) assert_(isinstance(A, OutClass)) - def test_pow_override_with_errors(self): - # regression test for gh-9112 - class PowerOnly(np.ndarray): - def __array_ufunc__(self, ufunc, method, *inputs, **kw): - if ufunc is not np.power: - raise NotImplementedError - return "POWER!" - # explicit cast to float, to ensure the fast power path is taken. - a = np.array(5., dtype=np.float64).view(PowerOnly) - assert_equal(a ** 2.5, "POWER!") - with assert_raises(NotImplementedError): - a ** 0.5 - with assert_raises(NotImplementedError): - a ** 0 - with assert_raises(NotImplementedError): - a ** 1 - with assert_raises(NotImplementedError): - a ** -1 - with assert_raises(NotImplementedError): - a ** 2 - def test_pow_array_object_dtype(self): # test pow on arrays of object dtype class SomeClass: @@ -4119,8 +4334,8 @@ def __init__(self, num=None): def __mul__(self, other): raise AssertionError('__mul__ should not be called') - def __div__(self, other): - raise AssertionError('__div__ should not be called') + def __truediv__(self, other): + raise AssertionError('__truediv__ should not be called') def __pow__(self, exp): return SomeClass(num=self.num ** exp) @@ -4142,6 +4357,13 @@ def pow_for(exp, arr): assert_equal(obj_arr ** -1, pow_for(-1, obj_arr)) assert_equal(obj_arr ** 2, pow_for(2, obj_arr)) + def test_pow_calls_square_structured_dtype(self): + # gh-29388 + dt = np.dtype([('a', 'i4'), ('b', 'i4')]) + a = np.array([(1, 2), (3, 4)], dtype=dt) + with pytest.raises(TypeError, match="ufunc 'square' not supported"): + a ** 2 + def test_pos_array_ufunc_override(self): class A(np.ndarray): def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): @@ -4281,8 +4503,6 @@ def test_intp_sequence_converters(self, converter): @pytest.mark.parametrize("converter", [_multiarray_tests.run_scalar_intp_converter, _multiarray_tests.run_scalar_intp_from_sequence]) - @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_intp_sequence_converters_errors(self, converter): with pytest.raises(TypeError, match="expected a sequence of integers or a single integer, "): @@ -4299,6 +4519,37 @@ def test_intp_sequence_converters_errors(self, converter): # These converters currently convert overflows to a ValueError converter(2**64) + @pytest.mark.parametrize( + "entry_point", + [ + module + item + for item in ("sin", "strings.str_len", "fft._pocketfft_umath.ifft") + for module in ("", "numpy:") + ] + [ + "numpy.strings:str_len", + "functools:reduce", + "functools:reduce.__doc__" + ] + ) + def test_import_entry_point(self, entry_point): + modname, _, items = entry_point.rpartition(":") + if modname: + module = obj = importlib.import_module(modname) + else: + module = np + exp = functools.reduce(getattr, items.split("."), module) + got = _multiarray_tests.npy_import_entry_point(entry_point) + assert got == exp + + @pytest.mark.parametrize( + "entry_point", + ["sin.", "numpy:", "numpy:sin:__call__", "numpy.sin:__call__."] + ) + def test_import_entry_point_errors(self, entry_point): + # Don't really care about precise error. + with pytest.raises((ImportError, AttributeError)): + _multiarray_tests.npy_import_entry_point(entry_point) + class TestSubscripting: def test_test_zero_rank(self): @@ -4308,9 +4559,13 @@ def test_test_zero_rank(self): class TestPickling: - @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5, - reason=('this tests the error messages when trying to' - 'protocol 5 although it is not available')) + @pytest.mark.skipif( + pickle.HIGHEST_PROTOCOL >= 5, + reason=( + "this tests the error messages when trying toprotocol 5 " + "although it is not available" + ), + ) def test_correct_protocol5_error_message(self): array = np.arange(10) @@ -4335,8 +4590,10 @@ def test_record_array_with_object_dtype(self): assert_equal(arr_without_object.dtype, depickled_arr_without_object.dtype) - @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, - reason="requires pickle protocol 5") + @pytest.mark.skipif( + pickle.HIGHEST_PROTOCOL < 5, + reason="requires pickle protocol 5", + ) def test_f_contiguous_array(self): f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F') buffers = [] @@ -4353,6 +4610,56 @@ def test_f_contiguous_array(self): assert_equal(f_contiguous_array, depickled_f_contiguous_array) + @pytest.mark.skipif( + pickle.HIGHEST_PROTOCOL < 5, + reason="requires pickle protocol 5", + ) + @pytest.mark.parametrize( + "transposed_contiguous_array", + [ + np.random.default_rng(42).random((2, 3, 4)).transpose((1, 0, 2)), + np.random.default_rng(42).random((2, 3, 4, 5)).transpose((1, 3, 0, 2)), + ] + + [ + np.random.default_rng(42) + .random(np.arange(2, 7)) + .transpose(np.random.permutation(5)) + for _ in range(3) + ], + ) + def test_transposed_contiguous_array(self, transposed_contiguous_array): + buffers = [] + # When using pickle protocol 5, arrays which can be transposed to c_contiguous + # can be serialized using out-of-band buffers + bytes_string = pickle.dumps(transposed_contiguous_array, protocol=5, + buffer_callback=buffers.append) + + assert len(buffers) > 0 + + depickled_transposed_contiguous_array = pickle.loads(bytes_string, + buffers=buffers) + + assert_equal(transposed_contiguous_array, depickled_transposed_contiguous_array) + + @pytest.mark.skipif( + pickle.HIGHEST_PROTOCOL < 5, + reason="requires pickle protocol 5", + ) + def test_load_legacy_pkl_protocol5(self): + # legacy byte strs are dumped in 2.2.1 + c_contiguous_dumped = b'\x80\x05\x95\x90\x00\x00\x00\x00\x00\x00\x00\x8c\x13numpy._core.numeric\x94\x8c\x0b_frombuffer\x94\x93\x94(\x96\x18\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x94\x8c\x05numpy\x94\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94bK\x03K\x04K\x02\x87\x94\x8c\x01C\x94t\x94R\x94.' # noqa: E501 + f_contiguous_dumped = b'\x80\x05\x95\x90\x00\x00\x00\x00\x00\x00\x00\x8c\x13numpy._core.numeric\x94\x8c\x0b_frombuffer\x94\x93\x94(\x96\x18\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x94\x8c\x05numpy\x94\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94bK\x03K\x04K\x02\x87\x94\x8c\x01F\x94t\x94R\x94.' # noqa: E501 + transposed_contiguous_dumped = b'\x80\x05\x95\xa5\x00\x00\x00\x00\x00\x00\x00\x8c\x16numpy._core.multiarray\x94\x8c\x0c_reconstruct\x94\x93\x94\x8c\x05numpy\x94\x8c\x07ndarray\x94\x93\x94K\x00\x85\x94C\x01b\x94\x87\x94R\x94(K\x01K\x04K\x03K\x02\x87\x94h\x03\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94b\x89C\x18\x00\x01\x08\t\x10\x11\x02\x03\n\x0b\x12\x13\x04\x05\x0c\r\x14\x15\x06\x07\x0e\x0f\x16\x17\x94t\x94b.' # noqa: E501 + no_contiguous_dumped = b'\x80\x05\x95\x91\x00\x00\x00\x00\x00\x00\x00\x8c\x16numpy._core.multiarray\x94\x8c\x0c_reconstruct\x94\x93\x94\x8c\x05numpy\x94\x8c\x07ndarray\x94\x93\x94K\x00\x85\x94C\x01b\x94\x87\x94R\x94(K\x01K\x03K\x02\x86\x94h\x03\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94b\x89C\x06\x00\x01\x04\x05\x08\t\x94t\x94b.' # noqa: E501 + x = np.arange(24, dtype='uint8').reshape(3, 4, 2) + assert_equal(x, pickle.loads(c_contiguous_dumped)) + x = np.arange(24, dtype='uint8').reshape(3, 4, 2, order='F') + assert_equal(x, pickle.loads(f_contiguous_dumped)) + x = np.arange(24, dtype='uint8').reshape(3, 4, 2).transpose((1, 0, 2)) + assert_equal(x, pickle.loads(transposed_contiguous_dumped)) + x = np.arange(12, dtype='uint8').reshape(3, 4)[:, :2] + assert_equal(x, pickle.loads(no_contiguous_dumped)) + def test_non_contiguous_array(self): non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2] assert not non_contiguous_array.flags.c_contiguous @@ -4360,12 +4667,21 @@ def test_non_contiguous_array(self): # make sure non-contiguous arrays can be pickled-depickled # using any protocol + buffers = [] for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + buffer_callback = buffers.append if proto >= 5 else None depickled_non_contiguous_array = pickle.loads( - pickle.dumps(non_contiguous_array, protocol=proto)) + pickle.dumps( + non_contiguous_array, + protocol=proto, + buffer_callback=buffer_callback, + ) + ) + assert_equal(len(buffers), 0) assert_equal(non_contiguous_array, depickled_non_contiguous_array) + @pytest.mark.thread_unsafe(reason="calls gc.collect()") def test_roundtrip(self): for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): carray = np.array([[2, 9], [7, 0], [3, 8]]) @@ -4380,7 +4696,7 @@ def test_roundtrip(self): for a in DATA: assert_equal( a, pickle.loads(pickle.dumps(a, protocol=proto)), - err_msg="%r" % a) + err_msg=f"{a!r}") del a, DATA, carray break_cycles() # check for reference leaks (gh-12793) @@ -4392,45 +4708,112 @@ def _loads(self, obj): # version 0 pickles, using protocol=2 to pickle # version 0 doesn't have a version field + @pytest.mark.filterwarnings( + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning", + ) def test_version0_int8(self): - s = b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb." # noqa + s = ( + b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\n" + b"ndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\n" + b"dtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff" + b"\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb." + ) a = np.array([1, 2, 3, 4], dtype=np.int8) p = self._loads(s) assert_equal(a, p) + @pytest.mark.filterwarnings( + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning", + ) def test_version0_float32(self): - s = b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01= 100).all() + assert (res[x >= 100] == x[x >= 100]).all() def test_record_array(self): rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], @@ -5143,7 +5538,7 @@ def tst_basic(self, x, T, mask, val): def test_ip_types(self): unchecked_types = [bytes, str, np.void] - x = np.random.random(1000)*100 + x = np.random.random(1000) * 100 mask = x < 40 for val in [-100, 0, 15]: @@ -5222,8 +5617,8 @@ def tst_basic(self, x): def test_ip_types(self): unchecked_types = [bytes, str, np.void] - x = np.random.random(24)*100 - x.shape = 2, 3, 4 + x = np.random.random(24) * 100 + x = x.reshape((2, 3, 4)) for types in np._core.sctypes.values(): for T in types: if T not in unchecked_types: @@ -5233,21 +5628,21 @@ def test_ip_types(self): self.tst_basic(x.astype("S3")) def test_raise(self): - x = np.random.random(24)*100 - x.shape = 2, 3, 4 + x = np.random.random(24) * 100 + x = x.reshape((2, 3, 4)) assert_raises(IndexError, x.take, [0, 1, 2], axis=0) assert_raises(IndexError, x.take, [-3], axis=0) assert_array_equal(x.take([-1], axis=0)[0], x[1]) def test_clip(self): - x = np.random.random(24)*100 - x.shape = 2, 3, 4 + x = np.random.random(24) * 100 + x = x.reshape((2, 3, 4)) assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0]) assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1]) def test_wrap(self): - x = np.random.random(24)*100 - x.shape = 2, 3, 4 + x = np.random.random(24) * 100 + x = x.reshape((2, 3, 4)) assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1]) assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0]) assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1]) @@ -5281,7 +5676,7 @@ def test_ret_is_out(self, shape): class TestLexsort: - @pytest.mark.parametrize('dtype',[ + @pytest.mark.parametrize('dtype', [ np.uint8, np.uint16, np.uint32, np.uint64, np.int8, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64 @@ -5303,14 +5698,14 @@ def test_mixed(self): assert_array_equal(idx, expected_idx) def test_datetime(self): - a = np.array([0,0,0], dtype='datetime64[D]') - b = np.array([2,1,0], dtype='datetime64[D]') + a = np.array([0, 0, 0], dtype='datetime64[D]') + b = np.array([2, 1, 0], dtype='datetime64[D]') idx = np.lexsort((b, a)) expected_idx = np.array([2, 1, 0]) assert_array_equal(idx, expected_idx) - a = np.array([0,0,0], dtype='timedelta64[D]') - b = np.array([2,1,0], dtype='timedelta64[D]') + a = np.array([0, 0, 0], dtype='timedelta64[D]') + b = np.array([2, 1, 0], dtype='timedelta64[D]') idx = np.lexsort((b, a)) expected_idx = np.array([2, 1, 0]) assert_array_equal(idx, expected_idx) @@ -5331,15 +5726,30 @@ def test_object(self): # gh-6312 u, v = np.array(u, dtype='object'), np.array(v, dtype='object') assert_array_equal(idx, np.lexsort((u, v))) - def test_invalid_axis(self): # gh-7528 - x = np.linspace(0., 1., 42*3).reshape(42, 3) + def test_strings(self): # gh-27984 + for dtype in "TU": + surnames = np.array(['Hertz', 'Galilei', 'Hertz'], dtype=dtype) + first_names = np.array(['Heinrich', 'Galileo', 'Gustav'], dtype=dtype) + assert_array_equal(np.lexsort((first_names, surnames)), [1, 2, 0]) + + def test_invalid_axis(self): # gh-7528 + x = np.linspace(0., 1., 42 * 3).reshape(42, 3) assert_raises(AxisError, np.lexsort, x, axis=2) + +def normalize_filename(tmp_path, param): + # Handles two cases, where filename should + # be a string, or a path object. + path = tmp_path / "file" + if param == "string": + return str(path) + return path + + class TestIO: """Test tofile, fromfile, tobytes, and fromstring""" - @pytest.fixture() - def x(self): + def _create_data(self): shape = (2, 4, 3) rand = np.random.random x = rand(shape) + rand(shape).astype(complex) * 1j @@ -5347,14 +5757,11 @@ def x(self): return x @pytest.fixture(params=["string", "path_obj"]) - def tmp_filename(self, tmp_path, request): - # This fixture covers two cases: - # one where the filename is a string and - # another where it is a pathlib object - filename = tmp_path / "file" - if request.param == "string": - filename = str(filename) - yield filename + def param_filename(self, request): + # This fixtures returns string or path_obj + # so that every test doesn't need to have the + # parametrize marker. + return request.param def test_nofile(self): # this should probably be supported as a file @@ -5385,19 +5792,23 @@ def test_fromstring_count0(self): d = np.fromstring("1,2", sep=",", dtype=np.int64, count=0) assert d.shape == (0,) - def test_empty_files_text(self, tmp_filename): + def test_empty_files_text(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) with open(tmp_filename, 'w') as f: pass y = np.fromfile(tmp_filename) assert_(y.size == 0, "Array not empty") - def test_empty_files_binary(self, tmp_filename): + def test_empty_files_binary(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) with open(tmp_filename, 'wb') as f: pass y = np.fromfile(tmp_filename, sep=" ") assert_(y.size == 0, "Array not empty") - def test_roundtrip_file(self, x, tmp_filename): + def test_roundtrip_file(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) + x = self._create_data() with open(tmp_filename, 'wb') as f: x.tofile(f) # NB. doesn't work with flush+seek, due to use of C stdio @@ -5405,18 +5816,23 @@ def test_roundtrip_file(self, x, tmp_filename): y = np.fromfile(f, dtype=x.dtype) assert_array_equal(y, x.flat) - def test_roundtrip(self, x, tmp_filename): + def test_roundtrip(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) + x = self._create_data() x.tofile(tmp_filename) y = np.fromfile(tmp_filename, dtype=x.dtype) assert_array_equal(y, x.flat) - def test_roundtrip_dump_pathlib(self, x, tmp_filename): + def test_roundtrip_dump_pathlib(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) + x = self._create_data() p = pathlib.Path(tmp_filename) x.dump(p) y = np.load(p, allow_pickle=True) assert_array_equal(y, x) - def test_roundtrip_binary_str(self, x): + def test_roundtrip_binary_str(self): + x = self._create_data() s = x.tobytes() y = np.frombuffer(s, dtype=x.dtype) assert_array_equal(y, x.flat) @@ -5425,7 +5841,8 @@ def test_roundtrip_binary_str(self, x): y = np.frombuffer(s, dtype=x.dtype) assert_array_equal(y, x.flatten('F')) - def test_roundtrip_str(self, x): + def test_roundtrip_str(self): + x = self._create_data() x = x.real.ravel() s = "@".join(map(str, x)) y = np.fromstring(s, sep="@") @@ -5433,14 +5850,17 @@ def test_roundtrip_str(self, x): assert_array_equal(x[nan_mask], y[nan_mask]) assert_array_equal(x[~nan_mask], y[~nan_mask]) - def test_roundtrip_repr(self, x): + def test_roundtrip_repr(self): + x = self._create_data() x = x.real.ravel() - s = "@".join(map(lambda x: repr(x)[11:-1], x)) + s = "@".join(repr(x)[11:-1] for x in x) y = np.fromstring(s, sep="@") assert_array_equal(x, y) - def test_unseekable_fromfile(self, x, tmp_filename): + def test_unseekable_fromfile(self, tmp_path, param_filename): # gh-6246 + tmp_filename = normalize_filename(tmp_path, param_filename) + x = self._create_data() x.tofile(tmp_filename) def fail(*args, **kwargs): @@ -5451,15 +5871,18 @@ def fail(*args, **kwargs): f.tell = fail assert_raises(OSError, np.fromfile, f, dtype=x.dtype) - def test_io_open_unbuffered_fromfile(self, x, tmp_filename): + def test_io_open_unbuffered_fromfile(self, tmp_path, param_filename): # gh-6632 + tmp_filename = normalize_filename(tmp_path, param_filename) + x = self._create_data() x.tofile(tmp_filename) with open(tmp_filename, 'rb', buffering=0) as f: y = np.fromfile(f, dtype=x.dtype) assert_array_equal(y, x.flat) - def test_largish_file(self, tmp_filename): + def test_largish_file(self, tmp_path, param_filename): # check the fallocate path on files > 16MB + tmp_filename = normalize_filename(tmp_path, param_filename) d = np.zeros(4 * 1024 ** 2) d.tofile(tmp_filename) assert_equal(os.path.getsize(tmp_filename), d.nbytes) @@ -5478,22 +5901,25 @@ def test_largish_file(self, tmp_filename): d.tofile(f) assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2) - def test_io_open_buffered_fromfile(self, x, tmp_filename): + def test_io_open_buffered_fromfile(self, tmp_path, param_filename): # gh-6632 + tmp_filename = normalize_filename(tmp_path, param_filename) + x = self._create_data() x.tofile(tmp_filename) with open(tmp_filename, 'rb', buffering=-1) as f: y = np.fromfile(f, dtype=x.dtype) assert_array_equal(y, x.flat) - def test_file_position_after_fromfile(self, tmp_filename): + def test_file_position_after_fromfile(self, tmp_path, param_filename): # gh-4118 - sizes = [io.DEFAULT_BUFFER_SIZE//8, + sizes = [io.DEFAULT_BUFFER_SIZE // 8, io.DEFAULT_BUFFER_SIZE, - io.DEFAULT_BUFFER_SIZE*8] + io.DEFAULT_BUFFER_SIZE * 8] + tmp_filename = normalize_filename(tmp_path, param_filename) for size in sizes: with open(tmp_filename, 'wb') as f: - f.seek(size-1) + f.seek(size - 1) f.write(b'\0') for mode in ['rb', 'r+b']: @@ -5505,17 +5931,18 @@ def test_file_position_after_fromfile(self, tmp_filename): pos = f.tell() assert_equal(pos, 10, err_msg=err_msg) - def test_file_position_after_tofile(self, tmp_filename): + def test_file_position_after_tofile(self, tmp_path, param_filename): # gh-4118 - sizes = [io.DEFAULT_BUFFER_SIZE//8, + sizes = [io.DEFAULT_BUFFER_SIZE // 8, io.DEFAULT_BUFFER_SIZE, - io.DEFAULT_BUFFER_SIZE*8] + io.DEFAULT_BUFFER_SIZE * 8] + tmp_filename = normalize_filename(tmp_path, param_filename) for size in sizes: err_msg = "%d" % (size,) with open(tmp_filename, 'wb') as f: - f.seek(size-1) + f.seek(size - 1) f.write(b'\0') f.seek(10) f.write(b'12') @@ -5530,8 +5957,9 @@ def test_file_position_after_tofile(self, tmp_filename): pos = f.tell() assert_equal(pos, 10, err_msg=err_msg) - def test_load_object_array_fromfile(self, tmp_filename): + def test_load_object_array_fromfile(self, tmp_path, param_filename): # gh-12300 + tmp_filename = normalize_filename(tmp_path, param_filename) with open(tmp_filename, 'w') as f: # Ensure we have a file with consistent contents pass @@ -5543,7 +5971,9 @@ def test_load_object_array_fromfile(self, tmp_filename): assert_raises_regex(ValueError, "Cannot read into object array", np.fromfile, tmp_filename, dtype=object) - def test_fromfile_offset(self, x, tmp_filename): + def test_fromfile_offset(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) + x = self._create_data() with open(tmp_filename, 'wb') as f: x.tofile(f) @@ -5559,13 +5989,13 @@ def test_fromfile_offset(self, x, tmp_filename): f, dtype=x.dtype, count=count_items, offset=offset_bytes ) assert_array_equal( - y, x.flat[offset_items:offset_items+count_items] + y, x.flat[offset_items:offset_items + count_items] ) # subsequent seeks should stack offset_bytes = x.dtype.itemsize z = np.fromfile(f, dtype=x.dtype, offset=offset_bytes) - assert_array_equal(z, x.flat[offset_items+count_items+1:]) + assert_array_equal(z, x.flat[offset_items + count_items + 1:]) with open(tmp_filename, 'wb') as f: x.tofile(f, sep=",") @@ -5577,23 +6007,21 @@ def test_fromfile_offset(self, x, tmp_filename): np.fromfile, tmp_filename, dtype=x.dtype, sep=",", offset=1) - @pytest.mark.skipif(IS_PYPY, reason="bug in PyPy's PyNumber_AsSsize_t") - def test_fromfile_bad_dup(self, x, tmp_filename): + def test_fromfile_bad_dup(self, tmp_path, param_filename, monkeypatch): def dup_str(fd): return 'abc' def dup_bigint(fd): return 2**68 - old_dup = os.dup - try: - with open(tmp_filename, 'wb') as f: - x.tofile(f) - for dup, exc in ((dup_str, TypeError), (dup_bigint, OSError)): - os.dup = dup - assert_raises(exc, np.fromfile, f) - finally: - os.dup = old_dup + tmp_filename = normalize_filename(tmp_path, param_filename) + x = self._create_data() + + with open(tmp_filename, 'wb') as f: + x.tofile(f) + for dup, exc in ((dup_str, TypeError), (dup_bigint, OSError)): + monkeypatch.setattr(os, "dup", dup) + assert_raises(exc, np.fromfile, f) def _check_from(self, s, value, filename, **kw): if 'sep' not in kw: @@ -5635,38 +6063,44 @@ def test_decimal_comma_separator(): else: assert False, request.param - def test_nan(self, tmp_filename, decimal_sep_localization): + def test_nan(self, tmp_path, param_filename, decimal_sep_localization): + tmp_filename = normalize_filename(tmp_path, param_filename) self._check_from( b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)", [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], tmp_filename, sep=' ') - def test_inf(self, tmp_filename, decimal_sep_localization): + def test_inf(self, tmp_path, param_filename, decimal_sep_localization): + tmp_filename = normalize_filename(tmp_path, param_filename) self._check_from( b"inf +inf -inf infinity -Infinity iNfInItY -inF", [np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf], tmp_filename, sep=' ') - def test_numbers(self, tmp_filename, decimal_sep_localization): + def test_numbers(self, tmp_path, param_filename, decimal_sep_localization): + tmp_filename = normalize_filename(tmp_path, param_filename) self._check_from( b"1.234 -1.234 .3 .3e55 -123133.1231e+133", [1.234, -1.234, .3, .3e55, -123133.1231e+133], tmp_filename, sep=' ') - def test_binary(self, tmp_filename): + def test_binary(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) self._check_from( b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@', np.array([1, 2, 3, 4]), tmp_filename, dtype=' 0) assert_(issubclass(w[0].category, RuntimeWarning)) @@ -6306,7 +6757,8 @@ def test_empty(self): assert_equal(f(A, axis=axis), np.zeros([])) def test_mean_values(self): - for mat in [self.rmat, self.cmat, self.omat]: + rmat, cmat, omat = self._create_data() + for mat in [rmat, cmat, omat]: for axis in [0, 1]: tgt = mat.sum(axis=axis) res = _mean(mat, axis=axis) * mat.shape[axis] @@ -6364,7 +6816,8 @@ def test_mean_where(self): assert_equal(np.mean(a, where=False), np.nan) def test_var_values(self): - for mat in [self.rmat, self.cmat, self.omat]: + rmat, cmat, omat = self._create_data() + for mat in [rmat, cmat, omat]: for axis in [0, 1, None]: msqr = _mean(mat * mat.conj(), axis=axis) mean = _mean(mat, axis=axis) @@ -6378,9 +6831,10 @@ def test_var_values(self): ('clongdouble', 7), )) def test_var_complex_values(self, complex_dtype, ndec): + _, cmat, _ = self._create_data() # Test fast-paths for every builtin complex type for axis in [0, 1, None]: - mat = self.cmat.copy().astype(complex_dtype) + mat = cmat.copy().astype(complex_dtype) msqr = _mean(mat * mat.conj(), axis=axis) mean = _mean(mat, axis=axis) tgt = msqr - mean * mean.conjugate() @@ -6390,7 +6844,8 @@ def test_var_complex_values(self, complex_dtype, ndec): def test_var_dimensions(self): # _var paths for complex number introduce additions on views that # increase dimensions. Ensure this generalizes to higher dims - mat = np.stack([self.cmat]*3) + _, cmat, _ = self._create_data() + mat = np.stack([cmat] * 3) for axis in [0, 1, 2, -1, None]: msqr = _mean(mat * mat.conj(), axis=axis) mean = _mean(mat, axis=axis) @@ -6401,7 +6856,8 @@ def test_var_dimensions(self): def test_var_complex_byteorder(self): # Test that var fast-path does not cause failures for complex arrays # with non-native byteorder - cmat = self.cmat.copy().astype('complex128') + _, cmat, _ = self._create_data() + cmat = cmat.copy().astype('complex128') cmat_swapped = cmat.astype(cmat.dtype.newbyteorder()) assert_almost_equal(cmat.var(), cmat_swapped.var()) @@ -6442,21 +6898,22 @@ def test_var_where(self): assert_allclose(np.var(a, axis=1, where=wh_full), np.var(a[wh_full].reshape((5, 3)), axis=1)) assert_allclose(np.var(a, axis=0, where=wh_partial), - np.var(a[wh_partial[:,0]], axis=0)) + np.var(a[wh_partial[:, 0]], axis=0)) with pytest.warns(RuntimeWarning) as w: assert_equal(a.var(where=False), np.nan) with pytest.warns(RuntimeWarning) as w: assert_equal(np.var(a, where=False), np.nan) def test_std_values(self): - for mat in [self.rmat, self.cmat, self.omat]: + rmat, cmat, omat = self._create_data() + for mat in [rmat, cmat, omat]: for axis in [0, 1, None]: tgt = np.sqrt(_var(mat, axis=axis)) res = _std(mat, axis=axis) assert_almost_equal(res, tgt) def test_std_where(self): - a = np.arange(25).reshape((5,5))[::-1] + a = np.arange(25).reshape((5, 5))[::-1] whf = np.array([[False, True, False, True, True], [True, False, True, False, True], [True, True, False, True, False], @@ -6468,11 +6925,11 @@ def test_std_where(self): [True], [False]]) _cases = [ - (0, True, 7.07106781*np.ones((5))), - (1, True, 1.41421356*np.ones((5))), + (0, True, 7.07106781 * np.ones(5)), + (1, True, 1.41421356 * np.ones(5)), (0, whf, - np.array([4.0824829 , 8.16496581, 5., 7.39509973, 8.49836586])), - (0, whp, 2.5*np.ones((5))) + np.array([4.0824829, 8.16496581, 5., 7.39509973, 8.49836586])), + (0, whp, 2.5 * np.ones(5)) ] for _ax, _wh, _res in _cases: assert_allclose(a.std(axis=_ax, where=_wh), _res) @@ -6487,13 +6944,13 @@ def test_std_where(self): np.array(_res)) assert_allclose(a.std(axis=1, where=whf), - np.std(a[whf].reshape((5,3)), axis=1)) + np.std(a[whf].reshape((5, 3)), axis=1)) assert_allclose(np.std(a, axis=1, where=whf), - (a[whf].reshape((5,3))).std(axis=1)) + (a[whf].reshape((5, 3))).std(axis=1)) assert_allclose(a.std(axis=0, where=whp), - np.std(a[whp[:,0]], axis=0)) + np.std(a[whp[:, 0]], axis=0)) assert_allclose(np.std(a, axis=0, where=whp), - (a[whp[:,0]]).std(axis=0)) + (a[whp[:, 0]]).std(axis=0)) with pytest.warns(RuntimeWarning) as w: assert_equal(a.std(where=False), np.nan) with pytest.warns(RuntimeWarning) as w: @@ -6580,63 +7037,65 @@ def test_vdot_uncontiguous(self): class TestDot: - def setup_method(self): - np.random.seed(128) - self.A = np.random.rand(4, 2) - self.b1 = np.random.rand(2, 1) - self.b2 = np.random.rand(2) - self.b3 = np.random.rand(1, 2) - self.b4 = np.random.rand(4) - self.N = 7 + N = 7 + + def _create_data(self): + rng = np.random.RandomState(128) + A = rng.random((4, 2)) + b1 = rng.random((2, 1)) + b2 = rng.random(2) + b3 = rng.random((1, 2)) + b4 = rng.random(4) + return A, b1, b2, b3, b4 def test_dotmatmat(self): - A = self.A + A, _, _, _, _ = self._create_data() res = np.dot(A.transpose(), A) tgt = np.array([[1.45046013, 0.86323640], [0.86323640, 0.84934569]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotmatvec(self): - A, b1 = self.A, self.b1 + A, b1, _, _, _ = self._create_data() res = np.dot(A, b1) tgt = np.array([[0.32114320], [0.04889721], [0.15696029], [0.33612621]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotmatvec2(self): - A, b2 = self.A, self.b2 + A, _, b2, _, _ = self._create_data() res = np.dot(A, b2) tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat(self): - A, b4 = self.A, self.b4 + A, _, _, _, b4 = self._create_data() res = np.dot(b4, A) tgt = np.array([1.23495091, 1.12222648]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat2(self): - b3, A = self.b3, self.A + A, _, _, b3, _ = self._create_data() res = np.dot(b3, A.transpose()) tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat3(self): - A, b4 = self.A, self.b4 + A, _, _, _, b4 = self._create_data() res = np.dot(A.transpose(), b4) tgt = np.array([1.23495091, 1.12222648]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecvecouter(self): - b1, b3 = self.b1, self.b3 + _, b1, _, b3, _ = self._create_data() res = np.dot(b1, b3) tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecvecinner(self): - b1, b3 = self.b1, self.b3 + _, b1, _, b3, _ = self._create_data() res = np.dot(b3, b1) - tgt = np.array([[ 0.23129668]]) + tgt = np.array([[0.23129668]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotcolumnvect1(self): @@ -6654,19 +7113,19 @@ def test_dotcolumnvect2(self): assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecscalar(self): - np.random.seed(100) - b1 = np.random.rand(1, 1) - b2 = np.random.rand(1, 4) + rng = np.random.RandomState(100) + b1 = rng.random((1, 1)) + b2 = rng.random((1, 4)) res = np.dot(b1, b2) tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecscalar2(self): - np.random.seed(100) - b1 = np.random.rand(4, 1) - b2 = np.random.rand(1, 1) + rng = np.random.RandomState(100) + b1 = rng.random((4, 1)) + b2 = rng.random((1, 1)) res = np.dot(b1, b2) - tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]]) + tgt = np.array([[0.00256425], [0.00131359], [0.00200324], [0.00398638]]) assert_almost_equal(res, tgt, decimal=self.N) def test_all(self): @@ -6703,7 +7162,7 @@ def __mul__(self, other): # with scalar return out def __rmul__(self, other): - return self*other + return self * other U_non_cont = np.transpose([[1., 1.], [1., 2.]]) U_cont = np.ascontiguousarray(U_non_cont) @@ -6729,10 +7188,12 @@ def test_dot_3args(self): v = np.random.random_sample((16, 32)) r = np.empty((1024, 32)) + if HAS_REFCOUNT: + orig_refcount = sys.getrefcount(r) for i in range(12): dot(f, v, r) if HAS_REFCOUNT: - assert_equal(sys.getrefcount(r), 2) + assert_equal(sys.getrefcount(r), orig_refcount) r2 = dot(f, v, out=None) assert_array_equal(r2, r) assert_(r is dot(f, v, out=r)) @@ -6808,7 +7269,7 @@ def aligned_array(shape, align, dtype, order='C'): for offset in range(align): if (address + offset) % align == 0: break - tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype) + tmp = tmp[offset:offset + N * d.nbytes].view(dtype=dtype) return tmp.reshape(shape, order=order) def as_aligned(arr, align, dtype, order='C'): @@ -6865,16 +7326,17 @@ def assert_dot_close(A, X, desired): @pytest.mark.slow @pytest.mark.parametrize("dtype", [np.float64, np.complex128]) @requires_memory(free_bytes=18e9) # complex case needs 18GiB+ + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_huge_vectordot(self, dtype): # Large vector multiplications are chunked with 32bit BLAS # Test that the chunking does the right thing, see also gh-22262 - data = np.ones(2**30+100, dtype=dtype) + data = np.ones(2**30 + 100, dtype=dtype) res = np.dot(data, data) - assert res == 2**30+100 + assert res == 2**30 + 100 def test_dtype_discovery_fails(self): # See gh-14247, error checking was missing for failed dtype discovery - class BadObject(object): + class BadObject: def __array__(self, dtype=None, copy=None): raise TypeError("just this tiny mint leaf") @@ -6932,7 +7394,7 @@ def test_shapes(self): assert_(np.array(c).shape == ()) def test_result_types(self): - mat = np.ones((1,1)) + mat = np.ones((1, 1)) vec = np.ones((1,)) for dt in self.types: m = mat.astype(dt) @@ -6985,9 +7447,9 @@ def test_vector_vector_values(self): def test_vector_matrix_values(self): vec = np.array([1, 2]) mat1 = np.array([[1, 2], [3, 4]]) - mat2 = np.stack([mat1]*2, axis=0) + mat2 = np.stack([mat1] * 2, axis=0) tgt1 = np.array([7, 10]) - tgt2 = np.stack([tgt1]*2, axis=0) + tgt2 = np.stack([tgt1] * 2, axis=0) for dt in self.types[1:]: v = vec.astype(dt) m1 = mat1.astype(dt) @@ -7000,9 +7462,9 @@ def test_vector_matrix_values(self): # boolean type vec = np.array([True, False]) mat1 = np.array([[True, False], [False, True]]) - mat2 = np.stack([mat1]*2, axis=0) + mat2 = np.stack([mat1] * 2, axis=0) tgt1 = np.array([True, False]) - tgt2 = np.stack([tgt1]*2, axis=0) + tgt2 = np.stack([tgt1] * 2, axis=0) res = self.matmul(vec, mat1) assert_equal(res, tgt1) @@ -7012,9 +7474,9 @@ def test_vector_matrix_values(self): def test_matrix_vector_values(self): vec = np.array([1, 2]) mat1 = np.array([[1, 2], [3, 4]]) - mat2 = np.stack([mat1]*2, axis=0) + mat2 = np.stack([mat1] * 2, axis=0) tgt1 = np.array([5, 11]) - tgt2 = np.stack([tgt1]*2, axis=0) + tgt2 = np.stack([tgt1] * 2, axis=0) for dt in self.types[1:]: v = vec.astype(dt) m1 = mat1.astype(dt) @@ -7027,9 +7489,9 @@ def test_matrix_vector_values(self): # boolean type vec = np.array([True, False]) mat1 = np.array([[True, False], [False, True]]) - mat2 = np.stack([mat1]*2, axis=0) + mat2 = np.stack([mat1] * 2, axis=0) tgt1 = np.array([True, False]) - tgt2 = np.stack([tgt1]*2, axis=0) + tgt2 = np.stack([tgt1] * 2, axis=0) res = self.matmul(vec, mat1) assert_equal(res, tgt1) @@ -7131,8 +7593,8 @@ def test_out_arg(self): out = np.zeros((5, 2), dtype=np.complex128) c = self.matmul(a, b, out=out) assert_(c is out) - with suppress_warnings() as sup: - sup.filter(ComplexWarning, '') + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ComplexWarning) c = c.astype(tgt.dtype) assert_array_equal(c, tgt) @@ -7179,6 +7641,7 @@ def test_out_contiguous(self): vc = np.arange(10.) vr = np.arange(6.) m0 = np.zeros((3, 0)) + @pytest.mark.parametrize('args', ( # matrix-matrix (m1, m2), (m2.T, m1.T), (m2.T.copy(), m1.T), (m2.T, m1.T.copy()), @@ -7206,10 +7669,39 @@ def test_dot_equivalent(self, args): r3 = np.matmul(args[0].copy(), args[1].copy()) assert_equal(r1, r3) + # issue 29164 with extra checks + @pytest.mark.parametrize('dtype', ( + np.float32, np.float64, np.complex64, np.complex128 + )) + def test_dot_equivalent_matrix_matrix_blastypes(self, dtype): + modes = list(itertools.product(['C', 'F'], [True, False])) + + def apply_mode(m, mode): + order, is_contiguous = mode + if is_contiguous: + return m.copy() if order == 'C' else m.T.copy().T + + retval = np.zeros( + (m.shape[0] * 2, m.shape[1] * 2), dtype=m.dtype, order=order + )[::2, ::2] + retval[...] = m + return retval + + is_complex = np.issubdtype(dtype, np.complexfloating) + m1 = self.m1.astype(dtype) + (1j if is_complex else 0) + m2 = self.m2.astype(dtype) + (1j if is_complex else 0) + dot_res = np.dot(m1, m2) + mo = np.zeros_like(dot_res) + + for mode in itertools.product(*[modes] * 3): + m1_, m2_, mo_ = [apply_mode(*x) for x in zip([m1, m2, mo], mode)] + assert_equal(np.matmul(m1_, m2_, out=mo_), dot_res) + def test_matmul_object(self): import fractions f = np.vectorize(fractions.Fraction) + def random_ints(): return np.random.randint(1, 1000, size=(10, 3, 3)) M1 = f(random_ints(), random_ints()) @@ -7223,7 +7715,7 @@ def random_ints(): def test_matmul_object_type_scalar(self): from fractions import Fraction as F - v = np.array([F(2,3), F(5,7)]) + v = np.array([F(2, 3), F(5, 7)]) res = self.matmul(v, v) assert_(type(res) is F) @@ -7235,32 +7727,32 @@ def test_matmul_empty(self): def test_matmul_exception_multiply(self): # test that matmul fails if `__mul__` is missing - class add_not_multiply(): + class add_not_multiply: def __add__(self, other): return self - a = np.full((3,3), add_not_multiply()) + a = np.full((3, 3), add_not_multiply()) with assert_raises(TypeError): b = np.matmul(a, a) def test_matmul_exception_add(self): # test that matmul fails if `__add__` is missing - class multiply_not_add(): + class multiply_not_add: def __mul__(self, other): return self - a = np.full((3,3), multiply_not_add()) + a = np.full((3, 3), multiply_not_add()) with assert_raises(TypeError): b = np.matmul(a, a) def test_matmul_bool(self): # gh-14439 - a = np.array([[1, 0],[1, 1]], dtype=bool) + a = np.array([[1, 0], [1, 1]], dtype=bool) assert np.max(a.view(np.uint8)) == 1 b = np.matmul(a, a) # matmul with boolean output should always be 0, 1 assert np.max(b.view(np.uint8)) == 1 rg = np.random.default_rng(np.random.PCG64(43)) - d = rg.integers(2, size=4*5, dtype=np.int8) + d = rg.integers(2, size=4 * 5, dtype=np.int8) d = d.reshape(4, 5) > 0 out1 = np.matmul(d, d.reshape(5, 4)) out2 = np.dot(d, d.reshape(5, 4)) @@ -7360,7 +7852,7 @@ def test_shapes(self, a_shape: tuple[int, ...], b_shape: tuple[int, ...]): def test_matmul_axes(): - a = np.arange(3*4*5).reshape(3, 4, 5) + a = np.arange(3 * 4 * 5).reshape(3, 4, 5) c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)]) assert c.shape == (3, 4, 4) d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)]) @@ -7375,7 +7867,7 @@ class TestInner: def test_inner_type_mismatch(self): c = 1. - A = np.array((1,1), dtype='i,i') + A = np.array((1, 1), dtype='i,i') assert_raises(TypeError, np.inner, c, A) assert_raises(TypeError, np.inner, A, c) @@ -7423,8 +7915,8 @@ def test_inner_product_with_various_contiguities(self): def test_3d_tensor(self): for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': - a = np.arange(24).reshape(2,3,4).astype(dt) - b = np.arange(24, 48).reshape(2,3,4).astype(dt) + a = np.arange(24).reshape(2, 3, 4).astype(dt) + b = np.arange(24, 48).reshape(2, 3, 4).astype(dt) desired = np.array( [[[[ 158, 182, 206], [ 230, 254, 278]], @@ -7445,27 +7937,31 @@ def test_3d_tensor(self): [3230, 3574, 3918]]]] ).astype(dt) assert_equal(np.inner(a, b), desired) - assert_equal(np.inner(b, a).transpose(2,3,0,1), desired) + assert_equal(np.inner(b, a).transpose(2, 3, 0, 1), desired) class TestChoose: - def setup_method(self): - self.x = 2*np.ones((3,), dtype=int) - self.y = 3*np.ones((3,), dtype=int) - self.x2 = 2*np.ones((2, 3), dtype=int) - self.y2 = 3*np.ones((2, 3), dtype=int) - self.ind = [0, 0, 1] + def _create_data(self): + x = 2 * np.ones((3,), dtype=int) + y = 3 * np.ones((3,), dtype=int) + x2 = 2 * np.ones((2, 3), dtype=int) + y2 = 3 * np.ones((2, 3), dtype=int) + ind = [0, 0, 1] + return x, y, x2, y2, ind def test_basic(self): - A = np.choose(self.ind, (self.x, self.y)) + x, y, _, _, ind = self._create_data() + A = np.choose(ind, (x, y)) assert_equal(A, [2, 2, 3]) def test_broadcast1(self): - A = np.choose(self.ind, (self.x2, self.y2)) + _, _, x2, y2, ind = self._create_data() + A = np.choose(ind, (x2, y2)) assert_equal(A, [[2, 2, 3], [2, 2, 3]]) def test_broadcast2(self): - A = np.choose(self.ind, (self.x, self.y2)) + x, _, _, y2, ind = self._create_data() + A = np.choose(ind, (x, y2)) assert_equal(A, [[2, 2, 3], [2, 2, 3]]) @pytest.mark.parametrize("ops", @@ -7475,7 +7971,7 @@ def test_broadcast2(self): (1., np.array([3], dtype=np.float32))],) def test_output_dtype(self, ops): expected_dt = np.result_type(*ops) - assert(np.choose([0], ops).dtype == expected_dt) + assert np.choose([0], ops).dtype == expected_dt def test_dimension_and_args_limit(self): # Maxdims for the legacy iterator is 32, but the maximum number @@ -7495,38 +7991,43 @@ def test_dimension_and_args_limit(self): class TestRepeat: - def setup_method(self): - self.m = np.array([1, 2, 3, 4, 5, 6]) - self.m_rect = self.m.reshape((2, 3)) + def _create_data(self): + m = np.array([1, 2, 3, 4, 5, 6]) + m_rect = m.reshape((2, 3)) + return m, m_rect def test_basic(self): - A = np.repeat(self.m, [1, 3, 2, 1, 1, 2]) + m, _ = self._create_data() + A = np.repeat(m, [1, 3, 2, 1, 1, 2]) assert_equal(A, [1, 2, 2, 2, 3, 3, 4, 5, 6, 6]) def test_broadcast1(self): - A = np.repeat(self.m, 2) + m, _ = self._create_data() + A = np.repeat(m, 2) assert_equal(A, [1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]) def test_axis_spec(self): - A = np.repeat(self.m_rect, [2, 1], axis=0) + _, m_rect = self._create_data() + A = np.repeat(m_rect, [2, 1], axis=0) assert_equal(A, [[1, 2, 3], [1, 2, 3], [4, 5, 6]]) - A = np.repeat(self.m_rect, [1, 3, 2], axis=1) + A = np.repeat(m_rect, [1, 3, 2], axis=1) assert_equal(A, [[1, 2, 2, 2, 3, 3], [4, 5, 5, 5, 6, 6]]) def test_broadcast2(self): - A = np.repeat(self.m_rect, 2, axis=0) + _, m_rect = self._create_data() + A = np.repeat(m_rect, 2, axis=0) assert_equal(A, [[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6]]) - A = np.repeat(self.m_rect, 2, axis=1) + A = np.repeat(m_rect, 2, axis=1) assert_equal(A, [[1, 1, 2, 2, 3, 3], [4, 4, 5, 5, 6, 6]]) @@ -7776,7 +8277,7 @@ class TestWarnings: def test_complex_warning(self): x = np.array([1, 2]) - y = np.array([1-2j, 1+2j]) + y = np.array([1 - 2j, 1 + 2j]) with warnings.catch_warnings(): warnings.simplefilter("error", ComplexWarning) @@ -7787,22 +8288,22 @@ def test_complex_warning(self): class TestMinScalarType: def test_usigned_shortshort(self): - dt = np.min_scalar_type(2**8-1) + dt = np.min_scalar_type(2**8 - 1) wanted = np.dtype('uint8') assert_equal(wanted, dt) def test_usigned_short(self): - dt = np.min_scalar_type(2**16-1) + dt = np.min_scalar_type(2**16 - 1) wanted = np.dtype('uint16') assert_equal(wanted, dt) def test_usigned_int(self): - dt = np.min_scalar_type(2**32-1) + dt = np.min_scalar_type(2**32 - 1) wanted = np.dtype('uint32') assert_equal(wanted, dt) def test_usigned_longlong(self): - dt = np.min_scalar_type(2**63-1) + dt = np.min_scalar_type(2**63 - 1) wanted = np.dtype('uint64') assert_equal(wanted, dt) @@ -7820,7 +8321,7 @@ def _check(self, spec, wanted): dt = np.dtype(wanted) actual = _dtype_from_pep3118(spec) assert_equal(actual, dt, - err_msg="spec %r != dtype %r" % (spec, wanted)) + err_msg=f"spec {spec!r} != dtype {wanted!r}") def test_native_padding(self): align = np.dtype('i').alignment @@ -7829,10 +8330,10 @@ def test_native_padding(self): s = 'bi' else: s = 'b%dxi' % j - self._check('@'+s, {'f0': ('i1', 0), - 'f1': ('i', align*(1 + j//align))}) - self._check('='+s, {'f0': ('i1', 0), - 'f1': ('i', 1+j)}) + self._check('@' + s, {'f0': ('i1', 0), + 'f1': ('i', align * (1 + j // align))}) + self._check('=' + s, {'f0': ('i1', 0), + 'f1': ('i', 1 + j)}) def test_native_padding_2(self): # Native padding should work also for structs and sub-arrays @@ -7846,9 +8347,9 @@ def test_trailing_padding(self): size = np.dtype('i').itemsize def aligned(n): - return align*(1 + (n-1)//align) + return align * (1 + (n - 1) // align) - base = dict(formats=['i'], names=['f0']) + base = {"formats": ['i'], "names": ['f0']} self._check('ix', dict(itemsize=aligned(size + 1), **base)) self._check('ixx', dict(itemsize=aligned(size + 2), **base)) @@ -7893,14 +8394,14 @@ def test_intra_padding(self): size = np.dtype('i').itemsize def aligned(n): - return (align*(1 + (n-1)//align)) + return (align * (1 + (n - 1) // align)) - self._check('(3)T{ix}', (dict( - names=['f0'], - formats=['i'], - offsets=[0], - itemsize=aligned(size + 1) - ), (3,))) + self._check('(3)T{ix}', ({ + "names": ['f0'], + "formats": ['i'], + "offsets": [0], + "itemsize": aligned(size + 1) + }, (3,))) def test_char_vs_string(self): dt = np.dtype('c') @@ -7948,7 +8449,7 @@ def test_roundtrip(self): x = np.array([[1, 2], [3, 4]], dtype=np.float64) self._check_roundtrip(x) - x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] + x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0, :] self._check_roundtrip(x) dt = [('a', 'b'), @@ -8086,7 +8587,7 @@ def test_export_simple_nd(self): assert_equal(y.itemsize, 8) def test_export_discontiguous(self): - x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] + x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0, :] y = memoryview(x) assert_equal(y.format, 'f') assert_equal(y.shape, (3, 3)) @@ -8127,11 +8628,11 @@ def test_export_record(self): assert_equal(y.ndim, 1) assert_equal(y.suboffsets, ()) - sz = sum([np.dtype(b).itemsize for a, b in dt]) + sz = sum(np.dtype(b).itemsize for a, b in dt) if np.dtype('l').itemsize == 4: - assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') + assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') # noqa: E501 else: - assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') + assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') # noqa: E501 assert_equal(y.strides, (sz,)) assert_equal(y.itemsize, sz) @@ -8181,11 +8682,18 @@ def test_export_and_pickle_user_dtype(self, obj, error): res = pickle.loads(pickle_obj) assert_array_equal(res, obj) + def test_repr_user_dtype(self): + dt = np.dtype(rational) + assert_equal(repr(dt), 'dtype(rational)') + def test_padding(self): for j in range(8): x = np.array([(1,), (2,)], dtype={'f0': (int, j)}) self._check_roundtrip(x) + @pytest.mark.thread_unsafe( + reason="test result depends on the reference count of a global object", + ) def test_reference_leak(self): if HAS_REFCOUNT: count_1 = sys.getrefcount(np._core._internal) @@ -8195,7 +8703,6 @@ def test_reference_leak(self): if HAS_REFCOUNT: count_2 = sys.getrefcount(np._core._internal) assert_equal(count_1, count_2) - del c # avoid pyflakes unused variable warning. def test_padded_struct_array(self): dt1 = np.dtype( @@ -8217,10 +8724,13 @@ def test_padded_struct_array(self): self._check_roundtrip(x3) @pytest.mark.valgrind_error(reason="leaks buffer info cache temporarily.") - def test_relaxed_strides(self, c=np.ones((1, 10, 10), dtype='i8')): + def test_relaxed_strides(self, c=stride_tricks.as_strided( # noqa: B008 + np.ones((1, 10, 10), dtype='i8'), # noqa: B008 + strides=(-1, 80, 8) + ) + ): # Note: c defined as parameter so that it is persistent and leak # checks will notice gh-16934 (buffer info cache leak). - c.strides = (-1, 80, 8) # strides need to be fixed at export assert_(memoryview(c).strides == (800, 80, 8)) @@ -8242,12 +8752,12 @@ def test_relaxed_strides(self, c=np.ones((1, 10, 10), dtype='i8')): assert_(strides[-1] == 8) def test_out_of_order_fields(self): - dt = np.dtype(dict( - formats=[' np.array(0, dtype=dt1), "type %s failed" % (dt1,)) - assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,)) + assert_(1 > np.array(0, dtype=dt1), f"type {dt1} failed") + assert_(not 1 < np.array(0, dtype=dt1), f"type {dt1} failed") for dt2 in np.typecodes['AllInteger']: assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") # Unsigned integers for dt1 in 'BHILQP': - assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,)) - assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,)) - assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,)) + assert_(-1 < np.array(1, dtype=dt1), f"type {dt1} failed") + assert_(not -1 > np.array(1, dtype=dt1), f"type {dt1} failed") + assert_(-1 != np.array(1, dtype=dt1), f"type {dt1} failed") # Unsigned vs signed for dt2 in 'bhilqp': assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") # Signed integers and floats for dt1 in 'bhlqp' + np.typecodes['Float']: - assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) - assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) - assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) + assert_(1 > np.array(-1, dtype=dt1), f"type {dt1} failed") + assert_(not 1 < np.array(-1, dtype=dt1), f"type {dt1} failed") + assert_(-1 == np.array(-1, dtype=dt1), f"type {dt1} failed") for dt2 in 'bhlqp' + np.typecodes['Float']: assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2), - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") def test_to_bool_scalar(self): assert_equal(bool(np.array([False])), False) assert_equal(bool(np.array([True])), True) assert_equal(bool(np.array([[42]])), True) - assert_raises(ValueError, bool, np.array([1, 2])) + + @requires_deep_recursion + def test_to_bool_scalar_not_convertible(self): class NotConvertible: def __bool__(self): @@ -8865,9 +9426,6 @@ def __bool__(self): assert_raises(NotImplementedError, bool, np.array(NotConvertible())) assert_raises(NotImplementedError, bool, np.array([NotConvertible()])) - if IS_PYSTON: - pytest.skip("Pyston disables recursion checking") - self_containing = np.array([None]) self_containing[0] = self_containing @@ -8876,15 +9434,23 @@ def __bool__(self): assert_raises(Error, bool, self_containing) # previously stack overflow self_containing[0] = None # resolve circular reference + def test_to_bool_scalar_size_errors(self): + with pytest.raises(ValueError, match=".*one element is ambiguous"): + bool(np.array([1, 2])) + + with pytest.raises(ValueError, match=".*empty array is ambiguous"): + bool(np.empty((3, 0))) + + with pytest.raises(ValueError, match=".*empty array is ambiguous"): + bool(np.empty((0,))) + def test_to_int_scalar(self): # gh-9972 means that these aren't always the same int_funcs = (int, lambda x: x.__int__()) for int_func in int_funcs: assert_equal(int_func(np.array(0)), 0) - with assert_warns(DeprecationWarning): - assert_equal(int_func(np.array([1])), 1) - with assert_warns(DeprecationWarning): - assert_equal(int_func(np.array([[42]])), 42) + assert_raises(TypeError, int_func, np.array([1])) + assert_raises(TypeError, int_func, np.array([[42]])) assert_raises(TypeError, int_func, np.array([1, 2])) # gh-9972 @@ -8892,26 +9458,29 @@ def test_to_int_scalar(self): assert_equal(5, int_func(np.bytes_(b'5'))) assert_equal(6, int_func(np.str_('6'))) - # The delegation of int() to __trunc__ was deprecated in - # Python 3.11. - if sys.version_info < (3, 11): - class HasTrunc: - def __trunc__(self): - return 3 - assert_equal(3, int_func(np.array(HasTrunc()))) - with assert_warns(DeprecationWarning): - assert_equal(3, int_func(np.array([HasTrunc()]))) - else: - pass - class NotConvertible: def __int__(self): raise NotImplementedError assert_raises(NotImplementedError, int_func, np.array(NotConvertible())) - with assert_warns(DeprecationWarning): - assert_raises(NotImplementedError, - int_func, np.array([NotConvertible()])) + assert_raises(TypeError, + int_func, np.array([NotConvertible()])) + + def test_to_float_scalar(self): + float_funcs = (float, lambda x: x.__float__()) + for float_func in float_funcs: + assert_equal(float_func(np.array(0)), 0.0) + assert_equal(float_func(np.array(1.0, np.float64)), 1.0) + assert_raises(TypeError, float_func, np.array([2])) + assert_raises(TypeError, float_func, np.array([3.14])) + assert_raises(TypeError, float_func, np.array([[4.0]])) + + assert_equal(5.0, float_func(np.array('5'))) + assert_equal(5.1, float_func(np.array('5.1'))) + assert_equal(6.0, float_func(np.bytes_(b'6'))) + assert_equal(6.1, float_func(np.bytes_(b'6.1'))) + assert_equal(7.0, float_func(np.str_('7'))) + assert_equal(7.1, float_func(np.str_('7.1'))) class TestWhere: @@ -8980,7 +9549,7 @@ def test_exotic(self): e = float('-Infinity') assert_equal(np.where(True, d, e).dtype, np.float32) # With NEP 50 adopted, the float will overflow here: - e = float(1e150) + e = 1e150 with pytest.warns(RuntimeWarning, match="overflow"): res = np.where(True, d, e) assert res.dtype == np.float32 @@ -8989,15 +9558,15 @@ def test_ndim(self): c = [True, False] a = np.zeros((2, 25)) b = np.ones((2, 25)) - r = np.where(np.array(c)[:,np.newaxis], a, b) + r = np.where(np.array(c)[:, np.newaxis], a, b) assert_array_equal(r[0], a[0]) assert_array_equal(r[1], b[0]) a = a.T b = b.T r = np.where(c, a, b) - assert_array_equal(r[:,0], a[:,0]) - assert_array_equal(r[:,1], b[:,0]) + assert_array_equal(r[:, 0], a[:, 0]) + assert_array_equal(r[:, 1], b[:, 0]) def test_dtype_mix(self): c = np.array([False, True, False, False, False, False, True, False, @@ -9068,7 +9637,7 @@ def test_empty_result(self): x = np.zeros((1, 1)) ibad = np.vstack(np.where(x == 99.)) assert_array_equal(ibad, - np.atleast_2d(np.array([[],[]], dtype=np.intp))) + np.atleast_2d(np.array([[], []], dtype=np.intp))) def test_largedim(self): # invalid read regression gh-9304 @@ -9087,53 +9656,57 @@ def test_kwargs(self): np.where(a, x=a, y=a) -if not IS_PYPY: - # sys.getsizeof() is not valid on PyPy - class TestSizeOf: +class TestSizeOf: - def test_empty_array(self): - x = np.array([]) - assert_(sys.getsizeof(x) > 0) + def test_empty_array(self): + x = np.array([]) + assert_(sys.getsizeof(x) > 0) - def check_array(self, dtype): - elem_size = dtype(0).itemsize + def check_array(self, dtype): + elem_size = dtype(0).itemsize - for length in [10, 50, 100, 500]: - x = np.arange(length, dtype=dtype) - assert_(sys.getsizeof(x) > length * elem_size) + for length in [10, 50, 100, 500]: + x = np.arange(length, dtype=dtype) + assert_(sys.getsizeof(x) > length * elem_size) - def test_array_int32(self): - self.check_array(np.int32) + def test_array_int32(self): + self.check_array(np.int32) - def test_array_int64(self): - self.check_array(np.int64) + def test_array_int64(self): + self.check_array(np.int64) - def test_array_float32(self): - self.check_array(np.float32) + def test_array_float32(self): + self.check_array(np.float32) - def test_array_float64(self): - self.check_array(np.float64) + def test_array_float64(self): + self.check_array(np.float64) - def test_view(self): - d = np.ones(100) - assert_(sys.getsizeof(d[...]) < sys.getsizeof(d)) + def test_view(self): + d = np.ones(100) + assert_(sys.getsizeof(d[...]) < sys.getsizeof(d)) - def test_reshape(self): - d = np.ones(100) - assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy())) + def test_reshape(self): + d = np.ones(100) + assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy())) - @_no_tracing - def test_resize(self): - d = np.ones(100) - old = sys.getsizeof(d) - d.resize(50) - assert_(old > sys.getsizeof(d)) - d.resize(150) - assert_(old < sys.getsizeof(d)) + @_no_tracing + def test_resize(self): + d = np.ones(100) + old = sys.getsizeof(d) + d.resize(50) + assert_(old > sys.getsizeof(d)) + d.resize(150) + assert_(old < sys.getsizeof(d)) + + @pytest.mark.parametrize("dtype", ["u4,f4", "u4,O"]) + def test_resize_structured(self, dtype): + a = np.array([(0, 0.0) for i in range(5)], dtype=dtype) + a.resize(1000) + assert_array_equal(a, np.zeros(1000, dtype=dtype)) - def test_error(self): - d = np.ones(100) - assert_raises(TypeError, d.__sizeof__, "a") + def test_error(self): + d = np.ones(100) + assert_raises(TypeError, d.__sizeof__, "a") class TestHashing: @@ -9179,7 +9752,6 @@ def _all(self, other): __sub__ = __rsub__ = _all __mul__ = __rmul__ = _all __pow__ = __rpow__ = _all - __div__ = __rdiv__ = _all __mod__ = __rmod__ = _all __truediv__ = __rtruediv__ = _all __floordiv__ = __rfloordiv__ = _all @@ -9302,12 +9874,12 @@ class TestFormat: def test_0d(self): a = np.array(np.pi) - assert_equal('{:0.3g}'.format(a), '3.14') - assert_equal('{:0.3g}'.format(a[()]), '3.14') + assert_equal(f'{a:0.3g}', '3.14') + assert_equal(f'{a[()]:0.3g}', '3.14') def test_1d_no_format(self): a = np.array([np.pi]) - assert_equal('{}'.format(a), str(a)) + assert_equal(f'{a}', str(a)) def test_1d_format(self): # until gh-5543, ensure that the behaviour matches what it used to be @@ -9315,9 +9887,6 @@ def test_1d_format(self): assert_raises(TypeError, '{:30}'.format, a) -from numpy.testing import IS_PYPY - - class TestCTypes: def test_ctypes_is_available(self): @@ -9326,6 +9895,7 @@ def test_ctypes_is_available(self): assert_equal(ctypes, test_arr.ctypes._ctypes) assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) + @pytest.mark.thread_unsafe(reason="modifies global module state") def test_ctypes_is_not_available(self): from numpy._core import _internal _internal.ctypes = None @@ -9342,17 +9912,17 @@ def _make_readonly(x): x.flags.writeable = False return x + @pytest.mark.thread_unsafe(reason="calls gc.collect()") @pytest.mark.parametrize('arr', [ np.array([1, 2, 3]), np.array([['one', 'two'], ['three', 'four']]), np.array((1, 2), dtype='i4,i4'), - np.zeros((2,), dtype= - np.dtype(dict( - formats=['2, [44, 55]) + np.place(a, a > 2, [44, 55]) assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]])) # hit one of the failing paths - assert_raises(ValueError, np.place, a, a>20, []) + assert_raises(ValueError, np.place, a, a > 20, []) def test_put_noncontiguous(self): a = np.arange(6).reshape(2, 3).T # force non-c-contiguous @@ -9440,7 +10010,7 @@ def test_put_noncontiguous(self): def test_putmask_noncontiguous(self): a = np.arange(6).reshape(2, 3).T # force non-c-contiguous # uses arr_putmask - np.putmask(a, a>2, a**2) + np.putmask(a, a > 2, a**2) assert_equal(a, np.array([[0, 9], [1, 16], [2, 25]])) def test_take_mode_raise(self): @@ -9451,7 +10021,7 @@ def test_take_mode_raise(self): def test_choose_mod_raise(self): a = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]]) - out = np.empty((3,3), dtype='int') + out = np.empty((3, 3), dtype='int') choices = [-10, 10] np.choose(a, choices, out=out, mode='raise') assert_equal(out, np.array([[ 10, -10, 10], @@ -9473,7 +10043,8 @@ def test_dot_out(self): def test_view_assign(self): from numpy._core._multiarray_tests import ( - npy_create_writebackifcopy, npy_resolve + npy_create_writebackifcopy, + npy_resolve, ) arr = np.arange(9).reshape(3, 3).T @@ -9494,16 +10065,15 @@ def test_view_assign(self): @pytest.mark.leaks_references( reason="increments self in dealloc; ignore since deprecated path.") def test_dealloc_warning(self): - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - arr = np.arange(9).reshape(3, 3) - v = arr.T + arr = np.arange(9).reshape(3, 3) + v = arr.T + with pytest.warns(RuntimeWarning): _multiarray_tests.npy_abuse_writebackifcopy(v) - assert len(sup.log) == 1 def test_view_discard_refcount(self): from numpy._core._multiarray_tests import ( - npy_create_writebackifcopy, npy_discard + npy_create_writebackifcopy, + npy_discard, ) arr = np.arange(9).reshape(3, 3).T @@ -9619,6 +10189,63 @@ def test_error_paths_and_promotion(self, which): # Fails discovering start dtype np.arange(*args) + def test_dtype_attribute_ignored(self): + # Until 2.3 this would raise a DeprecationWarning + class dt: + dtype = "f8" + + class vdt(np.void): + dtype = "f,f" + + assert_raises(ValueError, np.dtype, dt) + assert_raises(ValueError, np.dtype, dt()) + assert_raises(ValueError, np.dtype, vdt) + assert_raises(ValueError, np.dtype, vdt(1)) + + +class TestDTypeCoercionForbidden: + forbidden_types = [ + # The builtin scalar super types: + np.generic, np.flexible, np.number, + np.inexact, np.floating, np.complexfloating, + np.integer, np.unsignedinteger, np.signedinteger, + # character is a deprecated S1 special case: + np.character, + ] + + def test_dtype_coercion(self): + for scalar_type in self.forbidden_types: + assert_raises(TypeError, np.dtype, args=(scalar_type,)) + + def test_array_construction(self): + for scalar_type in self.forbidden_types: + assert_raises(TypeError, np.array, args=([], scalar_type,)) + + def test_not_deprecated(self): + # All specific types work + for group in np._core.sctypes.values(): + for scalar_type in group: + np.dtype(scalar_type) + + for scalar_type in [type, dict, list, tuple]: + # Typical python types are coerced to object currently: + np.dtype(scalar_type) + + +class TestDateTimeCreationTuple: + @pytest.mark.parametrize("cls", [np.datetime64, np.timedelta64]) + def test_dt_tuple(self, cls): + # two valid uses - (unit, num) and (unit, num, den, None) + cls(1, ('ms', 2)) + cls(1, ('ms', 2, 1, None)) + + # trying to use the event argument, removed in 1.7.0 + # it used to be a uint8 + assert_raises(TypeError, cls, args=(1, ('ms', 2, 'event'))) + assert_raises(TypeError, cls, args=(1, ('ms', 2, 63))) + assert_raises(TypeError, cls, args=(1, ('ms', 2, 1, 'event'))) + assert_raises(TypeError, cls, args=(1, ('ms', 2, 1, 63))) + class TestArrayFinalize: """ Tests __array_finalize__ """ @@ -9657,6 +10284,7 @@ def __array_finalize__(self, obj): with pytest.raises(RuntimeError, match="boohoo!"): np.arange(10).view(BadAttributeArray) + @pytest.mark.thread_unsafe(reason="calls gc.collect()") def test_lifetime_on_error(self): # gh-11237 class RaisesInFinalize(np.ndarray): @@ -9665,7 +10293,8 @@ def __array_finalize__(self, obj): raise Exception(self) # a plain object can't be weakref'd - class Dummy: pass + class Dummy: + pass # get a weak reference to an object within an array obj_arr = np.array(Dummy()) @@ -9746,7 +10375,7 @@ class MyArr(np.ndarray): def __array_wrap__(self, new, context=None, return_scalar=False): type(self).called_wrap += 1 - return super().__array_wrap__(new) + return super().__array_wrap__(new, context, return_scalar) numpy_arr = np.zeros(5, dtype=dt1) my_arr = np.zeros(5, dtype=dt2).view(MyArr) @@ -9811,12 +10440,11 @@ def __array__(self, dtype=None, copy=None): def test_richcompare_scalar_boolean_singleton_return(): - # These are currently guaranteed to be the boolean singletons, but maybe - # returning NumPy booleans would also be OK: - assert (np.array(0) == "a") is False - assert (np.array(0) != "a") is True - assert (np.int16(0) == "a") is False - assert (np.int16(0) != "a") is True + # These are currently guaranteed to be the boolean numpy singletons + assert (np.array(0) == "a") is np.bool_(False) + assert (np.array(0) != "a") is np.bool_(True) + assert (np.int16(0) == "a") is np.bool_(False) + assert (np.int16(0) != "a") is np.bool_(True) @pytest.mark.parametrize("op", [ @@ -9855,7 +10483,12 @@ def test_npymath_complex(fun, npfun, x, y, test_dtype): def test_npymath_real(): # Smoketest npymath functions from numpy._core._multiarray_tests import ( - npy_log10, npy_cosh, npy_sinh, npy_tan, npy_tanh) + npy_cosh, + npy_log10, + npy_sinh, + npy_tan, + npy_tanh, + ) funcs = {npy_log10: np.log10, npy_cosh: np.cosh, @@ -9896,18 +10529,18 @@ def test_uintalignment_and_alignment(): # check that C struct matches numpy struct size s = _multiarray_tests.get_struct_alignments() - for d, (alignment, size) in zip([d1,d2,d3], s): + for d, (alignment, size) in zip([d1, d2, d3], s): assert_equal(d.alignment, alignment) assert_equal(d.itemsize, size) # check that ufuncs don't complain in debug mode # (this is probably OK if the aligned flag is true above) - src = np.zeros((2,2), dtype=d1)['f1'] # 4-byte aligned, often + src = np.zeros((2, 2), dtype=d1)['f1'] # 4-byte aligned, often np.exp(src) # assert fails? # check that copy code doesn't complain in debug mode - dst = np.zeros((2,2), dtype='c8') - dst[:,1] = src[:,1] # assert in lowlevel_strided_loops fails? + dst = np.zeros((2, 2), dtype='c8') + dst[:, 1] = src[:, 1] # assert in lowlevel_strided_loops fails? class TestAlignment: # adapted from scipy._lib.tests.test__util.test__aligned_zeros @@ -9932,7 +10565,7 @@ def check(self, shape, dtype, order, align): elif order is None: assert_(x.flags.c_contiguous, err_msg) else: - raise ValueError() + raise ValueError def test_various_alignments(self): for align in [1, 2, 3, 4, 8, 12, 16, 32, 64, None]: @@ -9956,8 +10589,12 @@ def test_strided_loop_alignments(self): xf128 = _aligned_zeros(3, np.longdouble, align=align) # test casting, both to and from misaligned - with suppress_warnings() as sup: - sup.filter(ComplexWarning, "Casting complex values") + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "Casting complex values", + ComplexWarning, + ) xc64.astype('f8') xf64.astype(np.complex64) test = xc64 + xf64 @@ -9994,7 +10631,6 @@ def test_getfield(): pytest.raises(ValueError, a.getfield, 'uint8', 16) pytest.raises(ValueError, a.getfield, 'uint64', 0) - class TestViewDtype: """ Verify that making a view of a non-contiguous array works as expected. @@ -10058,7 +10694,7 @@ def test_non_c_contiguous(self): assert_array_equal(x.view(' 10: + arrs.pop(0) + elif len(arrs) <= 10: + arrs.extend([np.array([1, 2, 3]) for _ in range(1000)]) + + def replace_list_items(b): + b.wait() + rng = np.random.RandomState() + rng.seed(0x4d3d3d3) + while done < 4: + data = rng.randint(0, 1000, size=4) + arrs[data[0]] = data[1:] + + for mutation_func in (replace_list_items, contract_and_expand_list): + b = threading.Barrier(5) + try: + with concurrent.futures.ThreadPoolExecutor(max_workers=5) as tpe: + tasks = [tpe.submit(read_arrs, b) for _ in range(4)] + tasks.append(tpe.submit(mutation_func, b)) + for t in tasks: + t.result() + except RuntimeError as e: + if outcome == "success": + raise + assert "Inconsistent object during array creation?" in str(e) + msg = "replace_list_items should not raise errors" + assert mutation_func is contract_and_expand_list, msg + finally: + if len(tasks) < 5: + b.abort() + +def test_array__buffer__thread_safety(): + import inspect + arr = np.arange(1000) + flags = [inspect.BufferFlags.STRIDED, inspect.BufferFlags.READ] + + def func(b): + b.wait() + for i in range(100): + arr.__buffer__(flags[i % 2]) + + run_threaded(func, max_workers=8, pass_barrier=True) + +def test_void_dtype__buffer__thread_safety(): + import inspect + dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + x = np.array(('ndarray_scalar', (1.2, 3.0)), dtype=dt)[()] + assert isinstance(x, np.void) + flags = [inspect.BufferFlags.STRIDES, inspect.BufferFlags.READ] + + def func(b): + b.wait() + for i in range(100): + x.__buffer__(flags[i % 2]) + + run_threaded(func, max_workers=8, pass_barrier=True) diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index 517e21a92cf8..520e638b2edb 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -1,17 +1,26 @@ +import inspect +import subprocess import sys -import pytest - import textwrap -import subprocess +import warnings + +import pytest import numpy as np -import numpy._core.umath as ncu import numpy._core._multiarray_tests as _multiarray_tests -from numpy import array, arange, nditer, all +import numpy._core.umath as ncu +from numpy import all, arange, array, nditer from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises, - IS_WASM, HAS_REFCOUNT, suppress_warnings, break_cycles, - ) + HAS_REFCOUNT, + IS_64BIT, + IS_WASM, + assert_, + assert_array_equal, + assert_equal, + assert_raises, +) +from numpy.testing._private.utils import requires_memory + def iter_multi_index(i): ret = [] @@ -77,8 +86,6 @@ def test_iter_refcount(): assert_equal(sys.getrefcount(a), rc_a) assert_equal(sys.getrefcount(dt), rc_dt) - del it2 # avoid pyflakes unused variable warning - def test_iter_best_order(): # The iterator should always find the iteration order # with increasing memory addresses @@ -88,7 +95,7 @@ def test_iter_best_order(): a = arange(np.prod(shape)) # Test each combination of positive and negative strides for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) + dirs_index = [slice(None)] * len(shape) for bit in range(len(shape)): if ((2**bit) & dirs): dirs_index[bit] = slice(None, None, -1) @@ -97,14 +104,14 @@ def test_iter_best_order(): aview = a.reshape(shape)[dirs_index] # C-order i = nditer(aview, [], [['readonly']]) - assert_equal([x for x in i], a) + assert_equal(list(i), a) # Fortran-order i = nditer(aview.T, [], [['readonly']]) - assert_equal([x for x in i], a) + assert_equal(list(i), a) # Other order if len(shape) > 2: i = nditer(aview.swapaxes(0, 1), [], [['readonly']]) - assert_equal([x for x in i], a) + assert_equal(list(i), a) def test_iter_c_order(): # Test forcing C order @@ -114,7 +121,7 @@ def test_iter_c_order(): a = arange(np.prod(shape)) # Test each combination of positive and negative strides for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) + dirs_index = [slice(None)] * len(shape) for bit in range(len(shape)): if ((2**bit) & dirs): dirs_index[bit] = slice(None, None, -1) @@ -123,14 +130,14 @@ def test_iter_c_order(): aview = a.reshape(shape)[dirs_index] # C-order i = nditer(aview, order='C') - assert_equal([x for x in i], aview.ravel(order='C')) + assert_equal(list(i), aview.ravel(order='C')) # Fortran-order i = nditer(aview.T, order='C') - assert_equal([x for x in i], aview.T.ravel(order='C')) + assert_equal(list(i), aview.T.ravel(order='C')) # Other order if len(shape) > 2: i = nditer(aview.swapaxes(0, 1), order='C') - assert_equal([x for x in i], + assert_equal(list(i), aview.swapaxes(0, 1).ravel(order='C')) def test_iter_f_order(): @@ -141,7 +148,7 @@ def test_iter_f_order(): a = arange(np.prod(shape)) # Test each combination of positive and negative strides for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) + dirs_index = [slice(None)] * len(shape) for bit in range(len(shape)): if ((2**bit) & dirs): dirs_index[bit] = slice(None, None, -1) @@ -150,14 +157,14 @@ def test_iter_f_order(): aview = a.reshape(shape)[dirs_index] # C-order i = nditer(aview, order='F') - assert_equal([x for x in i], aview.ravel(order='F')) + assert_equal(list(i), aview.ravel(order='F')) # Fortran-order i = nditer(aview.T, order='F') - assert_equal([x for x in i], aview.T.ravel(order='F')) + assert_equal(list(i), aview.T.ravel(order='F')) # Other order if len(shape) > 2: i = nditer(aview.swapaxes(0, 1), order='F') - assert_equal([x for x in i], + assert_equal(list(i), aview.swapaxes(0, 1).ravel(order='F')) def test_iter_c_or_f_order(): @@ -168,7 +175,7 @@ def test_iter_c_or_f_order(): a = arange(np.prod(shape)) # Test each combination of positive and negative strides for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) + dirs_index = [slice(None)] * len(shape) for bit in range(len(shape)): if ((2**bit) & dirs): dirs_index[bit] = slice(None, None, -1) @@ -177,14 +184,14 @@ def test_iter_c_or_f_order(): aview = a.reshape(shape)[dirs_index] # C-order i = nditer(aview, order='A') - assert_equal([x for x in i], aview.ravel(order='A')) + assert_equal(list(i), aview.ravel(order='A')) # Fortran-order i = nditer(aview.T, order='A') - assert_equal([x for x in i], aview.T.ravel(order='A')) + assert_equal(list(i), aview.T.ravel(order='A')) # Other order if len(shape) > 2: i = nditer(aview.swapaxes(0, 1), order='A') - assert_equal([x for x in i], + assert_equal(list(i), aview.swapaxes(0, 1).ravel(order='A')) def test_nditer_multi_index_set(): @@ -195,7 +202,7 @@ def test_nditer_multi_index_set(): # Removes the iteration on two first elements of a[0] it.multi_index = (0, 2,) - assert_equal([i for i in it], [2, 3, 4, 5]) + assert_equal(list(it), [2, 3, 4, 5]) @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") def test_nditer_multi_index_set_refcount(): @@ -254,43 +261,164 @@ def test_iter_best_order_multi_index_3d(): a = arange(12) # 3D C-order i = nditer(a.reshape(2, 3, 2), ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1), - (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1)]) + assert_equal( + iter_multi_index(i), + [ + (0, 0, 0), + (0, 0, 1), + (0, 1, 0), + (0, 1, 1), + (0, 2, 0), + (0, 2, 1), + (1, 0, 0), + (1, 0, 1), + (1, 1, 0), + (1, 1, 1), + (1, 2, 0), + (1, 2, 1), + ], + ) # 3D Fortran-order i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0), - (0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1)]) + assert_equal( + iter_multi_index(i), + [ + (0, 0, 0), + (1, 0, 0), + (0, 1, 0), + (1, 1, 0), + (0, 2, 0), + (1, 2, 0), + (0, 0, 1), + (1, 0, 1), + (0, 1, 1), + (1, 1, 1), + (0, 2, 1), + (1, 2, 1), + ], + ) # 3D reversed C-order i = nditer(a.reshape(2, 3, 2)[::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1), - (0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1)]) + assert_equal( + iter_multi_index(i), + [ + (1, 0, 0), + (1, 0, 1), + (1, 1, 0), + (1, 1, 1), + (1, 2, 0), + (1, 2, 1), + (0, 0, 0), + (0, 0, 1), + (0, 1, 0), + (0, 1, 1), + (0, 2, 0), + (0, 2, 1), + ], + ) i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 2, 0), (0, 2, 1), (0, 1, 0), (0, 1, 1), (0, 0, 0), (0, 0, 1), - (1, 2, 0), (1, 2, 1), (1, 1, 0), (1, 1, 1), (1, 0, 0), (1, 0, 1)]) - i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 0, 1), (0, 0, 0), (0, 1, 1), (0, 1, 0), (0, 2, 1), (0, 2, 0), - (1, 0, 1), (1, 0, 0), (1, 1, 1), (1, 1, 0), (1, 2, 1), (1, 2, 0)]) + assert_equal( + iter_multi_index(i), + [ + (0, 2, 0), + (0, 2, 1), + (0, 1, 0), + (0, 1, 1), + (0, 0, 0), + (0, 0, 1), + (1, 2, 0), + (1, 2, 1), + (1, 1, 0), + (1, 1, 1), + (1, 0, 0), + (1, 0, 1), + ], + ) + i = nditer(a.reshape(2, 3, 2)[:, :, ::-1], ['multi_index'], [['readonly']]) + assert_equal( + iter_multi_index(i), + [ + (0, 0, 1), + (0, 0, 0), + (0, 1, 1), + (0, 1, 0), + (0, 2, 1), + (0, 2, 0), + (1, 0, 1), + (1, 0, 0), + (1, 1, 1), + (1, 1, 0), + (1, 2, 1), + (1, 2, 0), + ], + ) # 3D reversed Fortran-order - i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], - ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(1, 0, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (1, 2, 0), (0, 2, 0), - (1, 0, 1), (0, 0, 1), (1, 1, 1), (0, 1, 1), (1, 2, 1), (0, 2, 1)]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], - ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 2, 0), (1, 2, 0), (0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0), - (0, 2, 1), (1, 2, 1), (0, 1, 1), (1, 1, 1), (0, 0, 1), (1, 0, 1)]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], - ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1), - (0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0)]) + i = nditer( + a.reshape(2, 3, 2).copy(order='F')[::-1], + ['multi_index'], + [['readonly']], + ) + assert_equal( + iter_multi_index(i), + [ + (1, 0, 0), + (0, 0, 0), + (1, 1, 0), + (0, 1, 0), + (1, 2, 0), + (0, 2, 0), + (1, 0, 1), + (0, 0, 1), + (1, 1, 1), + (0, 1, 1), + (1, 2, 1), + (0, 2, 1), + ], + ) + i = nditer( + a.reshape(2, 3, 2).copy(order="F")[:, ::-1], + ["multi_index"], + [["readonly"]], + ) + assert_equal( + iter_multi_index(i), + [ + (0, 2, 0), + (1, 2, 0), + (0, 1, 0), + (1, 1, 0), + (0, 0, 0), + (1, 0, 0), + (0, 2, 1), + (1, 2, 1), + (0, 1, 1), + (1, 1, 1), + (0, 0, 1), + (1, 0, 1), + ], + ) + i = nditer( + a.reshape(2, 3, 2).copy(order="F")[:, :, ::-1], + ["multi_index"], + [["readonly"]], + ) + assert_equal( + iter_multi_index(i), + [ + (0, 0, 1), + (1, 0, 1), + (0, 1, 1), + (1, 1, 1), + (0, 2, 1), + (1, 2, 1), + (0, 0, 0), + (1, 0, 0), + (0, 1, 0), + (1, 1, 0), + (0, 2, 0), + (1, 2, 0), + ], + ) def test_iter_best_order_c_index_1d(): # The C index should be correct with any reordering @@ -352,7 +480,7 @@ def test_iter_best_order_c_index_3d(): i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) - i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['c_index'], [['readonly']]) + i = nditer(a.reshape(2, 3, 2)[:, :, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) # 3D reversed Fortran-order @@ -364,7 +492,7 @@ def test_iter_best_order_c_index_3d(): ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, :, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) @@ -429,7 +557,7 @@ def test_iter_best_order_f_index_3d(): i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) - i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['f_index'], [['readonly']]) + i = nditer(a.reshape(2, 3, 2)[:, :, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) # 3D reversed Fortran-order @@ -441,7 +569,7 @@ def test_iter_best_order_f_index_3d(): ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, :, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) @@ -454,7 +582,7 @@ def test_iter_no_inner_full_coalesce(): a = arange(size) # Test each combination of forward and backwards indexing for dirs in range(2**len(shape)): - dirs_index = [slice(None)]*len(shape) + dirs_index = [slice(None)] * len(shape) for bit in range(len(shape)): if ((2**bit) & dirs): dirs_index[bit] = slice(None, None, -1) @@ -481,15 +609,15 @@ def test_iter_no_inner_dim_coalescing(): # Skipping the last element in a dimension prevents coalescing # with the next-bigger dimension - a = arange(24).reshape(2, 3, 4)[:,:, :-1] + a = arange(24).reshape(2, 3, 4)[:, :, :-1] i = nditer(a, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 2) assert_equal(i[0].shape, (3,)) - a = arange(24).reshape(2, 3, 4)[:, :-1,:] + a = arange(24).reshape(2, 3, 4)[:, :-1, :] i = nditer(a, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 2) assert_equal(i[0].shape, (8,)) - a = arange(24).reshape(2, 3, 4)[:-1,:,:] + a = arange(24).reshape(2, 3, 4)[:-1, :, :] i = nditer(a, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 1) assert_equal(i[0].shape, (12,)) @@ -540,69 +668,69 @@ def test_iter_broadcasting(): # Standard NumPy broadcasting rules # 1D with scalar - i = nditer([arange(6), np.int32(2)], ['multi_index'], [['readonly']]*2) + i = nditer([arange(6), np.int32(2)], ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 6) assert_equal(i.shape, (6,)) # 2D with scalar i = nditer([arange(6).reshape(2, 3), np.int32(2)], - ['multi_index'], [['readonly']]*2) + ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 6) assert_equal(i.shape, (2, 3)) # 2D with 1D i = nditer([arange(6).reshape(2, 3), arange(3)], - ['multi_index'], [['readonly']]*2) + ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 6) assert_equal(i.shape, (2, 3)) i = nditer([arange(2).reshape(2, 1), arange(3)], - ['multi_index'], [['readonly']]*2) + ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 6) assert_equal(i.shape, (2, 3)) # 2D with 2D i = nditer([arange(2).reshape(2, 1), arange(3).reshape(1, 3)], - ['multi_index'], [['readonly']]*2) + ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 6) assert_equal(i.shape, (2, 3)) # 3D with scalar i = nditer([np.int32(2), arange(24).reshape(4, 2, 3)], - ['multi_index'], [['readonly']]*2) + ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) # 3D with 1D i = nditer([arange(3), arange(24).reshape(4, 2, 3)], - ['multi_index'], [['readonly']]*2) + ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) i = nditer([arange(3), arange(8).reshape(4, 2, 1)], - ['multi_index'], [['readonly']]*2) + ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) # 3D with 2D i = nditer([arange(6).reshape(2, 3), arange(24).reshape(4, 2, 3)], - ['multi_index'], [['readonly']]*2) + ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) i = nditer([arange(2).reshape(2, 1), arange(24).reshape(4, 2, 3)], - ['multi_index'], [['readonly']]*2) + ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) i = nditer([arange(3).reshape(1, 3), arange(8).reshape(4, 2, 1)], - ['multi_index'], [['readonly']]*2) + ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) # 3D with 3D i = nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 1, 3), arange(4).reshape(4, 1, 1)], - ['multi_index'], [['readonly']]*3) + ['multi_index'], [['readonly']] * 3) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) i = nditer([arange(6).reshape(1, 2, 3), arange(4).reshape(4, 1, 1)], - ['multi_index'], [['readonly']]*2) + ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) i = nditer([arange(24).reshape(4, 2, 3), arange(12).reshape(4, 1, 3)], - ['multi_index'], [['readonly']]*2) + ['multi_index'], [['readonly']] * 2) assert_equal(i.itersize, 24) assert_equal(i.shape, (4, 2, 3)) @@ -642,25 +770,25 @@ def test_iter_broadcasting_errors(): # 1D with 1D assert_raises(ValueError, nditer, [arange(2), arange(3)], - [], [['readonly']]*2) + [], [['readonly']] * 2) # 2D with 1D assert_raises(ValueError, nditer, [arange(6).reshape(2, 3), arange(2)], - [], [['readonly']]*2) + [], [['readonly']] * 2) # 2D with 2D assert_raises(ValueError, nditer, [arange(6).reshape(2, 3), arange(9).reshape(3, 3)], - [], [['readonly']]*2) + [], [['readonly']] * 2) assert_raises(ValueError, nditer, [arange(6).reshape(2, 3), arange(4).reshape(2, 2)], - [], [['readonly']]*2) + [], [['readonly']] * 2) # 3D with 3D assert_raises(ValueError, nditer, [arange(36).reshape(3, 3, 4), arange(24).reshape(2, 3, 4)], - [], [['readonly']]*2) + [], [['readonly']] * 2) assert_raises(ValueError, nditer, [arange(8).reshape(2, 4, 1), arange(24).reshape(2, 3, 4)], - [], [['readonly']]*2) + [], [['readonly']] * 2) # Verify that the error message mentions the right shapes try: @@ -674,10 +802,10 @@ def test_iter_broadcasting_errors(): msg = str(e) # The message should contain the shape of the 3rd operand assert_(msg.find('(2,3)') >= 0, - 'Message "%s" doesn\'t contain operand shape (2,3)' % msg) + f'Message "{msg}" doesn\'t contain operand shape (2,3)') # The message should contain the broadcast shape assert_(msg.find('(1,2,3)') >= 0, - 'Message "%s" doesn\'t contain broadcast shape (1,2,3)' % msg) + f'Message "{msg}" doesn\'t contain broadcast shape (1,2,3)') try: nditer([arange(6).reshape(2, 3), arange(2)], @@ -690,13 +818,13 @@ def test_iter_broadcasting_errors(): msg = str(e) # The message should contain "shape->remappedshape" for each operand assert_(msg.find('(2,3)->(2,3)') >= 0, - 'Message "%s" doesn\'t contain operand shape (2,3)->(2,3)' % msg) + f'Message "{msg}" doesn\'t contain operand shape (2,3)->(2,3)') assert_(msg.find('(2,)->(2,newaxis)') >= 0, - ('Message "%s" doesn\'t contain remapped operand shape' + + ('Message "%s" doesn\'t contain remapped operand shape' '(2,)->(2,newaxis)') % msg) # The message should contain the itershape parameter assert_(msg.find('(4,3)') >= 0, - 'Message "%s" doesn\'t contain itershape parameter (4,3)' % msg) + f'Message "{msg}" doesn\'t contain itershape parameter (4,3)') try: nditer([np.zeros((2, 1, 1)), np.zeros((2,))], @@ -707,10 +835,10 @@ def test_iter_broadcasting_errors(): msg = str(e) # The message should contain the shape of the bad operand assert_(msg.find('(2,1,1)') >= 0, - 'Message "%s" doesn\'t contain operand shape (2,1,1)' % msg) + f'Message "{msg}" doesn\'t contain operand shape (2,1,1)') # The message should contain the broadcast shape assert_(msg.find('(2,1,2)') >= 0, - 'Message "%s" doesn\'t contain the broadcast shape (2,1,2)' % msg) + f'Message "{msg}" doesn\'t contain the broadcast shape (2,1,2)') def test_iter_flags_errors(): # Check that bad combinations of flags produce errors @@ -719,8 +847,6 @@ def test_iter_flags_errors(): # Not enough operands assert_raises(ValueError, nditer, [], [], []) - # Too many operands - assert_raises(ValueError, nditer, [a]*100, [], [['readonly']]*100) # Bad global flag assert_raises(ValueError, nditer, [a], ['bad flag'], [['readonly']]) # Bad op flag @@ -730,7 +856,7 @@ def test_iter_flags_errors(): # Bad casting parameter assert_raises(ValueError, nditer, [a], [], [['readonly']], casting='noon') # op_flags must match ops - assert_raises(ValueError, nditer, [a]*3, [], [['readonly']]*2) + assert_raises(ValueError, nditer, [a] * 3, [], [['readonly']] * 2) # Cannot track both a C and an F index assert_raises(ValueError, nditer, a, ['c_index', 'f_index'], [['readonly']]) @@ -761,9 +887,9 @@ def test_iter_flags_errors(): a.flags.writeable = True # Multi-indices available only with the multi_index flag i = nditer(arange(6), [], [['readonly']]) - assert_raises(ValueError, lambda i:i.multi_index, i) + assert_raises(ValueError, lambda i: i.multi_index, i) # Index available only with an index flag - assert_raises(ValueError, lambda i:i.index, i) + assert_raises(ValueError, lambda i: i.index, i) # GotoCoords and GotoIndex incompatible with buffering or no_inner def assign_multi_index(i): @@ -836,7 +962,7 @@ def test_iter_nbo_align_contig(): assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) assert_equal(i.operands[0], a) i.operands[0][:] = 2 - assert_equal(au, [2]*6) + assert_equal(au, [2] * 6) del i # should not raise a warning # Byte order change by requesting NBO a = np.arange(6, dtype='f4') @@ -851,11 +977,11 @@ def test_iter_nbo_align_contig(): assert_equal(i.operands[0], a) i.operands[0][:] = 12345 i.operands[0][:] = 2 - assert_equal(au, [2]*6) + assert_equal(au, [2] * 6) # Unaligned input - a = np.zeros((6*4+1,), dtype='i1')[1:] - a.dtype = 'f4' + a = np.zeros((6 * 4 + 1,), dtype='i1')[1:] + a = a.view('f4') a[:] = np.arange(6, dtype='f4') assert_(not a.flags.aligned) # Without 'aligned', shouldn't copy @@ -868,7 +994,7 @@ def test_iter_nbo_align_contig(): # context manager triggers UPDATEIFCOPY on i at exit assert_equal(i.operands[0], a) i.operands[0][:] = 3 - assert_equal(a, [3]*6) + assert_equal(a, [3] * 6) # Discontiguous input a = arange(12) @@ -911,7 +1037,7 @@ def test_iter_array_cast(): # The memory layout of the temporary should match a (a is (48,4,16)) # except negative strides get flipped to positive strides. assert_equal(i.operands[0].strides, (96, 8, 32)) - a = a[::-1,:, ::-1] + a = a[::-1, :, ::-1] i = nditer(a, [], [['readonly', 'copy']], casting='safe', op_dtypes=[np.dtype('f8')]) @@ -1049,7 +1175,7 @@ def test_iter_scalar_cast_errors(): def test_iter_object_arrays_basic(): # Check that object arrays work - obj = {'a':3,'b':'d'} + obj = {'a': 3, 'b': 'd'} a = np.array([[1, 2, 3], None, obj, None], dtype='O') if HAS_REFCOUNT: rc = sys.getrefcount(obj) @@ -1062,7 +1188,7 @@ def test_iter_object_arrays_basic(): i = nditer(a, ['refs_ok'], ['readonly']) vals = [x_[()] for x_ in i] assert_equal(np.array(vals, dtype='O'), a) - vals, i, x = [None]*3 + vals, i, x = [None] * 3 if HAS_REFCOUNT: assert_equal(sys.getrefcount(obj), rc) @@ -1071,7 +1197,7 @@ def test_iter_object_arrays_basic(): assert_(i.iterationneedsapi) vals = [x_[()] for x_ in i] assert_equal(np.array(vals, dtype='O'), a.reshape(2, 2).ravel(order='F')) - vals, i, x = [None]*3 + vals, i, x = [None] * 3 if HAS_REFCOUNT: assert_equal(sys.getrefcount(obj), rc) @@ -1080,10 +1206,10 @@ def test_iter_object_arrays_basic(): with i: for x in i: x[...] = None - vals, i, x = [None]*3 + vals, i, x = [None] * 3 if HAS_REFCOUNT: - assert_(sys.getrefcount(obj) == rc-1) - assert_equal(a, np.array([None]*4, dtype='O')) + assert_(sys.getrefcount(obj) == rc - 1) + assert_equal(a, np.array([None] * 4, dtype='O')) def test_iter_object_arrays_conversions(): # Conversions to/from objects @@ -1093,7 +1219,7 @@ def test_iter_object_arrays_conversions(): with i: for x in i: x[...] += 1 - assert_equal(a, np.arange(6)+1) + assert_equal(a, np.arange(6) + 1) a = np.arange(6, dtype='i4') i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], @@ -1101,7 +1227,7 @@ def test_iter_object_arrays_conversions(): with i: for x in i: x[...] += 1 - assert_equal(a, np.arange(6)+1) + assert_equal(a, np.arange(6) + 1) # Non-contiguous object array a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'O')]) @@ -1112,9 +1238,9 @@ def test_iter_object_arrays_conversions(): with i: for x in i: x[...] += 1 - assert_equal(a, np.arange(6)+1) + assert_equal(a, np.arange(6) + 1) - #Non-contiguous value array + # Non-contiguous value array a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'i4')]) a = a['a'] a[:] = np.arange(6) + 98172488 @@ -1126,9 +1252,10 @@ def test_iter_object_arrays_conversions(): rc = sys.getrefcount(ob) for x in i: x[...] += 1 - if HAS_REFCOUNT: - assert_(sys.getrefcount(ob) == rc-1) - assert_equal(a, np.arange(6)+98172489) + if HAS_REFCOUNT: + newrc = sys.getrefcount(ob) + assert_(newrc == rc - 1) + assert_equal(a, np.arange(6) + 98172489) def test_iter_common_dtype(): # Check that the iterator finds a common data type correctly @@ -1136,38 +1263,38 @@ def test_iter_common_dtype(): i = nditer([array([3], dtype='f4'), array([0], dtype='f8')], ['common_dtype'], - [['readonly', 'copy']]*2, + [['readonly', 'copy']] * 2, casting='safe') assert_equal(i.dtypes[0], np.dtype('f8')) assert_equal(i.dtypes[1], np.dtype('f8')) i = nditer([array([3], dtype='i4'), array([0], dtype='f4')], ['common_dtype'], - [['readonly', 'copy']]*2, + [['readonly', 'copy']] * 2, casting='safe') assert_equal(i.dtypes[0], np.dtype('f8')) assert_equal(i.dtypes[1], np.dtype('f8')) i = nditer([array([3], dtype='f4'), array(0, dtype='f8')], ['common_dtype'], - [['readonly', 'copy']]*2, + [['readonly', 'copy']] * 2, casting='same_kind') assert_equal(i.dtypes[0], np.dtype('f8')) assert_equal(i.dtypes[1], np.dtype('f8')) i = nditer([array([3], dtype='u4'), array(0, dtype='i4')], ['common_dtype'], - [['readonly', 'copy']]*2, + [['readonly', 'copy']] * 2, casting='safe') assert_equal(i.dtypes[0], np.dtype('i8')) assert_equal(i.dtypes[1], np.dtype('i8')) i = nditer([array([3], dtype='u4'), array(-12, dtype='i4')], ['common_dtype'], - [['readonly', 'copy']]*2, + [['readonly', 'copy']] * 2, casting='safe') assert_equal(i.dtypes[0], np.dtype('i8')) assert_equal(i.dtypes[1], np.dtype('i8')) i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), array([2j], dtype='c8'), array([9], dtype='f8')], ['common_dtype'], - [['readonly', 'copy']]*4, + [['readonly', 'copy']] * 4, casting='safe') assert_equal(i.dtypes[0], np.dtype('c16')) assert_equal(i.dtypes[1], np.dtype('c16')) @@ -1216,8 +1343,14 @@ def test_iter_copy_if_overlap(): x = arange(10) a = x b = x - i = nditer([a, b], ['copy_if_overlap'], [['readonly', 'overlap_assume_elementwise'], - ['readwrite', 'overlap_assume_elementwise']]) + i = nditer( + [a, b], + ["copy_if_overlap"], + [ + ["readonly", "overlap_assume_elementwise"], + ["readwrite", "overlap_assume_elementwise"], + ], + ) with i: assert_(i.operands[0] is a and i.operands[1] is b) with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i: @@ -1290,36 +1423,36 @@ def test_iter_op_axes(): # Reverse the axes a = arange(6).reshape(2, 3) - i = nditer([a, a.T], [], [['readonly']]*2, op_axes=[[0, 1], [1, 0]]) + i = nditer([a, a.T], [], [['readonly']] * 2, op_axes=[[0, 1], [1, 0]]) assert_(all([x == y for (x, y) in i])) a = arange(24).reshape(2, 3, 4) - i = nditer([a.T, a], [], [['readonly']]*2, op_axes=[[2, 1, 0], None]) + i = nditer([a.T, a], [], [['readonly']] * 2, op_axes=[[2, 1, 0], None]) assert_(all([x == y for (x, y) in i])) # Broadcast 1D to any dimension a = arange(1, 31).reshape(2, 3, 5) b = arange(1, 3) - i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [0, -1, -1]]) - assert_equal([x*y for (x, y) in i], (a*b.reshape(2, 1, 1)).ravel()) + i = nditer([a, b], [], [['readonly']] * 2, op_axes=[None, [0, -1, -1]]) + assert_equal([x * y for (x, y) in i], (a * b.reshape(2, 1, 1)).ravel()) b = arange(1, 4) - i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [-1, 0, -1]]) - assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 3, 1)).ravel()) + i = nditer([a, b], [], [['readonly']] * 2, op_axes=[None, [-1, 0, -1]]) + assert_equal([x * y for (x, y) in i], (a * b.reshape(1, 3, 1)).ravel()) b = arange(1, 6) - i = nditer([a, b], [], [['readonly']]*2, + i = nditer([a, b], [], [['readonly']] * 2, op_axes=[None, [np.newaxis, np.newaxis, 0]]) - assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 1, 5)).ravel()) + assert_equal([x * y for (x, y) in i], (a * b.reshape(1, 1, 5)).ravel()) # Inner product-style broadcasting a = arange(24).reshape(2, 3, 4) b = arange(40).reshape(5, 2, 4) - i = nditer([a, b], ['multi_index'], [['readonly']]*2, + i = nditer([a, b], ['multi_index'], [['readonly']] * 2, op_axes=[[0, 1, -1, -1], [-1, -1, 0, 1]]) assert_equal(i.shape, (2, 3, 5, 2)) # Matrix product-style broadcasting a = arange(12).reshape(3, 4) b = arange(20).reshape(4, 5) - i = nditer([a, b], ['multi_index'], [['readonly']]*2, + i = nditer([a, b], ['multi_index'], [['readonly']] * 2, op_axes=[[0, -1], [-1, 1]]) assert_equal(i.shape, (3, 5)) @@ -1328,25 +1461,25 @@ def test_iter_op_axes_errors(): # Wrong number of items in op_axes a = arange(6).reshape(2, 3) - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, op_axes=[[0], [1], [0]]) # Out of bounds items in op_axes - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, op_axes=[[2, 1], [0, 1]]) - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, op_axes=[[0, 1], [2, -1]]) # Duplicate items in op_axes - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, op_axes=[[0, 0], [0, 1]]) - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, op_axes=[[0, 1], [1, 1]]) # Different sized arrays in op_axes - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, op_axes=[[0, 1], [0, 1, 0]]) # Non-broadcastable dimensions in the result - assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, op_axes=[[0, 1], [1, 0]]) def test_iter_copy(): @@ -1482,7 +1615,7 @@ def test_iter_copy_casts_structured2(): # Array of two structured scalars: for res in res1, res2: # Cast to tuple by getitem, which may be weird and changeable?: - assert type(res["a"][0]) == tuple + assert isinstance(res["a"][0], tuple) assert res["a"][0] == (1, 1) for res in res1, res2: @@ -1515,7 +1648,7 @@ def test_iter_allocate_output_buffered_readwrite(): i.reset() for x in i: x[1][...] += x[0][...] - assert_equal(i.operands[1], a+1) + assert_equal(i.operands[1], a + 1) def test_iter_allocate_output_itorder(): # The allocated output should match the iteration order @@ -1560,19 +1693,19 @@ def test_iter_allocate_output_types_promotion(): # before NEP 50...) i = nditer([array([3], dtype='f4'), array([0], dtype='f8'), None], [], - [['readonly']]*2+[['writeonly', 'allocate']]) + [['readonly']] * 2 + [['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('f8')) i = nditer([array([3], dtype='i4'), array([0], dtype='f4'), None], [], - [['readonly']]*2+[['writeonly', 'allocate']]) + [['readonly']] * 2 + [['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('f8')) i = nditer([array([3], dtype='f4'), array(0, dtype='f8'), None], [], - [['readonly']]*2+[['writeonly', 'allocate']]) + [['readonly']] * 2 + [['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('f8')) i = nditer([array([3], dtype='u4'), array(0, dtype='i4'), None], [], - [['readonly']]*2+[['writeonly', 'allocate']]) + [['readonly']] * 2 + [['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('i8')) i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), None], [], - [['readonly']]*2+[['writeonly', 'allocate']]) + [['readonly']] * 2 + [['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('i8')) def test_iter_allocate_output_types_byte_order(): @@ -1594,7 +1727,7 @@ def test_iter_allocate_output_types_scalar(): # If the inputs are all scalars, the output should be a scalar i = nditer([None, 1, 2.3, np.float32(12), np.complex128(3)], [], - [['writeonly', 'allocate']] + [['readonly']]*4) + [['writeonly', 'allocate']] + [['readonly']] * 4) assert_equal(i.operands[0].dtype, np.dtype('complex128')) assert_equal(i.operands[0].ndim, 0) @@ -1677,12 +1810,12 @@ def test_iter_remove_axis(): i = nditer(a, ['multi_index']) i.remove_axis(1) - assert_equal([x for x in i], a[:, 0,:].ravel()) + assert_equal(list(i), a[:, 0, :].ravel()) - a = a[::-1,:,:] + a = a[::-1, :, :] i = nditer(a, ['multi_index']) i.remove_axis(0) - assert_equal([x for x in i], a[0,:,:].ravel()) + assert_equal(list(i), a[0, :, :].ravel()) def test_iter_remove_multi_index_inner_loop(): # Check that removing multi-index support works @@ -1695,19 +1828,19 @@ def test_iter_remove_multi_index_inner_loop(): assert_equal(i.itviews[0].shape, (2, 3, 4)) # Removing the multi-index tracking causes all dimensions to coalesce - before = [x for x in i] + before = list(i) i.remove_multi_index() - after = [x for x in i] + after = list(i) assert_equal(before, after) assert_equal(i.ndim, 1) - assert_raises(ValueError, lambda i:i.shape, i) + assert_raises(ValueError, lambda i: i.shape, i) assert_equal(i.itviews[0].shape, (24,)) # Removing the inner loop means there's just one iteration i.reset() assert_equal(i.itersize, 24) - assert_equal(i[0].shape, tuple()) + assert_equal(i[0].shape, ()) i.enable_external_loop() assert_equal(i.itersize, 24) assert_equal(i[0].shape, (24,)) @@ -1798,8 +1931,8 @@ def test_iter_buffering(): # Contiguous 1-dimensional array arrays.append(np.arange(10, dtype='f4')) # Unaligned array - a = np.zeros((4*16+1,), dtype='i1')[1:] - a.dtype = 'i4' + a = np.zeros((4 * 16 + 1,), dtype='i1')[1:] + a = a.view('i4') a[:] = np.arange(16, dtype='i4') arrays.append(a) # 4-D F-order array @@ -1847,9 +1980,9 @@ def test_iter_buffering_delayed_alloc(): casting='unsafe', op_dtypes='f4') assert_(i.has_delayed_bufalloc) - assert_raises(ValueError, lambda i:i.multi_index, i) - assert_raises(ValueError, lambda i:i[0], i) - assert_raises(ValueError, lambda i:i[0:2], i) + assert_raises(ValueError, lambda i: i.multi_index, i) + assert_raises(ValueError, lambda i: i[0], i) + assert_raises(ValueError, lambda i: i[0:2], i) def assign_iter(i): i[0] = 0 @@ -1862,7 +1995,7 @@ def assign_iter(i): assert_equal(i[0], 0) i[1] = 1 assert_equal(i[0:2], [0, 1]) - assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1]*6))) + assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1] * 6))) def test_iter_buffered_cast_simple(): # Test that buffering can handle a simple cast @@ -1877,7 +2010,7 @@ def test_iter_buffered_cast_simple(): for v in i: v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype='f4')) + assert_equal(a, 2 * np.arange(10, dtype='f4')) def test_iter_buffered_cast_byteswapped(): # Test that buffering can handle a cast which requires swap->cast->swap @@ -1893,10 +2026,10 @@ def test_iter_buffered_cast_byteswapped(): for v in i: v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype='f4')) + assert_equal(a, 2 * np.arange(10, dtype='f4')) - with suppress_warnings() as sup: - sup.filter(np.exceptions.ComplexWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', np.exceptions.ComplexWarning) a = np.arange(10, dtype='f8') a = a.view(a.dtype.newbyteorder()).byteswap() @@ -1909,7 +2042,7 @@ def test_iter_buffered_cast_byteswapped(): for v in i: v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype='f8')) + assert_equal(a, 2 * np.arange(10, dtype='f8')) def test_iter_buffered_cast_byteswapped_complex(): # Test that buffering can handle a cast which requires swap->cast->copy @@ -1925,7 +2058,7 @@ def test_iter_buffered_cast_byteswapped_complex(): with i: for v in i: v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) + assert_equal(a, 2 * np.arange(10, dtype='c8') + 4j) a = np.arange(10, dtype='c8') a += 2j @@ -1937,7 +2070,7 @@ def test_iter_buffered_cast_byteswapped_complex(): with i: for v in i: v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) + assert_equal(a, 2 * np.arange(10, dtype='c8') + 4j) a = np.arange(10, dtype=np.clongdouble) a = a.view(a.dtype.newbyteorder()).byteswap() @@ -1950,7 +2083,7 @@ def test_iter_buffered_cast_byteswapped_complex(): with i: for v in i: v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype=np.clongdouble) + 4j) + assert_equal(a, 2 * np.arange(10, dtype=np.clongdouble) + 4j) a = np.arange(10, dtype=np.longdouble) a = a.view(a.dtype.newbyteorder()).byteswap() @@ -1962,7 +2095,7 @@ def test_iter_buffered_cast_byteswapped_complex(): with i: for v in i: v[...] *= 2 - assert_equal(a, 2*np.arange(10, dtype=np.longdouble)) + assert_equal(a, 2 * np.arange(10, dtype=np.longdouble)) def test_iter_buffered_cast_structured_type(): # Tests buffering of structured types @@ -1976,11 +2109,11 @@ def test_iter_buffered_cast_structured_type(): vals = [np.array(x) for x in i] assert_equal(vals[0]['a'], 0.5) assert_equal(vals[0]['b'], 0) - assert_equal(vals[0]['c'], [[(0.5)]*3]*2) + assert_equal(vals[0]['c'], [[(0.5)] * 3] * 2) assert_equal(vals[0]['d'], 0.5) assert_equal(vals[1]['a'], 1.5) assert_equal(vals[1]['b'], 1) - assert_equal(vals[1]['c'], [[(1.5)]*3]*2) + assert_equal(vals[1]['c'], [[(1.5)] * 3] * 2) assert_equal(vals[1]['d'], 1.5) assert_equal(vals[0].dtype, np.dtype(sdt)) @@ -1998,14 +2131,14 @@ def test_iter_buffered_cast_structured_type(): vals = [x.copy() for x in i] assert_equal(vals[0]['a'], 0.5) assert_equal(vals[0]['b'], 0) - assert_equal(vals[0]['c'], [[(0.5)]*3]*2) + assert_equal(vals[0]['c'], [[(0.5)] * 3] * 2) assert_equal(vals[0]['d'], 0.5) assert_equal(vals[1]['a'], 1.5) assert_equal(vals[1]['b'], 1) - assert_equal(vals[1]['c'], [[(1.5)]*3]*2) + assert_equal(vals[1]['c'], [[(1.5)] * 3] * 2) assert_equal(vals[1]['d'], 1.5) assert_equal(vals[0].dtype, np.dtype(sdt)) - vals, i, x = [None]*3 + vals, i, x = [None] * 3 if HAS_REFCOUNT: assert_equal(sys.getrefcount(a[0]), rc) @@ -2072,7 +2205,6 @@ def test_buffered_cast_error_paths(): buf[...] = "a" # cannot be converted to int. @pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") -@pytest.mark.skipif(not HAS_REFCOUNT, reason="PyPy seems to not hit this.") def test_buffered_cast_error_paths_unraisable(): # The following gives an unraisable error. Pytest sometimes captures that # (depending python and/or pytest version). So with Python>=3.8 this can @@ -2122,7 +2254,7 @@ def test_iter_buffered_cast_subarray(): assert_(np.all(x['a'] == count)) x['a'][0] += 2 count += 1 - assert_equal(a['a'], np.arange(6).reshape(6, 1, 1)+2) + assert_equal(a['a'], np.arange(6).reshape(6, 1, 1) + 2) # many -> one element -> back (copies just element 0) sdt1 = [('a', 'O', (3, 2, 2))] @@ -2139,7 +2271,7 @@ def test_iter_buffered_cast_subarray(): assert_equal(x['a'], count) x['a'] += 2 count += 1 - assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1)*np.ones((1, 3, 2, 2))+2) + assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1) * np.ones((1, 3, 2, 2)) + 2) # many -> one element -> back (copies just element 0) sdt1 = [('a', 'f8', (3, 2, 2))] @@ -2173,7 +2305,7 @@ def test_iter_buffered_cast_subarray(): sdt1 = [('a', 'O', (3, 2, 2))] sdt2 = [('a', 'f4', (3, 2, 2))] a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*3*2*2).reshape(6, 3, 2, 2) + a['a'] = np.arange(6 * 3 * 2 * 2).reshape(6, 3, 2, 2) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) @@ -2187,7 +2319,7 @@ def test_iter_buffered_cast_subarray(): sdt1 = [('a', 'f8', (6,))] sdt2 = [('a', 'f4', (2,))] a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*6).reshape(6, 6) + a['a'] = np.arange(6 * 6).reshape(6, 6) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) @@ -2201,7 +2333,7 @@ def test_iter_buffered_cast_subarray(): sdt1 = [('a', 'f8', (2,))] sdt2 = [('a', 'f4', (6,))] a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2).reshape(6, 2) + a['a'] = np.arange(6 * 2).reshape(6, 2) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) @@ -2216,7 +2348,7 @@ def test_iter_buffered_cast_subarray(): sdt1 = [('a', 'f8', (2,))] sdt2 = [('a', 'f4', (2, 2))] a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2).reshape(6, 2) + a['a'] = np.arange(6 * 2).reshape(6, 2) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) @@ -2231,7 +2363,7 @@ def test_iter_buffered_cast_subarray(): sdt1 = [('a', 'f8', (2, 1))] sdt2 = [('a', 'f4', (3, 2))] a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2).reshape(6, 2, 1) + a['a'] = np.arange(6 * 2).reshape(6, 2, 1) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) @@ -2240,14 +2372,14 @@ def test_iter_buffered_cast_subarray(): for x in i: assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) assert_equal(x['a'][:2, 1], a[count]['a'][:, 0]) - assert_equal(x['a'][2,:], [0, 0]) + assert_equal(x['a'][2, :], [0, 0]) count += 1 # matrix -> matrix (truncates and zero-pads) sdt1 = [('a', 'f8', (2, 3))] sdt2 = [('a', 'f4', (3, 2))] a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2*3).reshape(6, 2, 3) + a['a'] = np.arange(6 * 2 * 3).reshape(6, 2, 3) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) @@ -2256,7 +2388,7 @@ def test_iter_buffered_cast_subarray(): for x in i: assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) assert_equal(x['a'][:2, 1], a[count]['a'][:, 1]) - assert_equal(x['a'][2,:], [0, 0]) + assert_equal(x['a'][2, :], [0, 0]) count += 1 def test_iter_buffering_badwriteback(): @@ -2320,10 +2452,82 @@ def test_iter_buffering_growinner(): assert_equal(i[0].size, a.size) +@pytest.mark.parametrize("read_or_readwrite", ["readonly", "readwrite"]) +def test_iter_contig_flag_reduce_error(read_or_readwrite): + # Test that a non-contiguous operand is rejected without buffering. + # NOTE: This is true even for a reduction, where we return a 0-stride + # below! + with pytest.raises(TypeError, match="Iterator operand required buffering"): + it = np.nditer( + (np.zeros(()),), flags=["external_loop", "reduce_ok"], + op_flags=[(read_or_readwrite, "contig"),], itershape=(10,)) + + +@pytest.mark.parametrize("arr", [ + lambda: np.zeros(()), + lambda: np.zeros((20, 1))[::20], + lambda: np.zeros((1, 20))[:, ::20] + ]) +def test_iter_contig_flag_single_operand_strides(arr): + """ + Tests the strides with the contig flag for both broadcast and non-broadcast + operands in 3 cases where the logic is needed: + 1. When everything has a zero stride, the broadcast op needs to repeated + 2. When the reduce axis is the last axis (first to iterate). + 3. When the reduce axis is the first axis (last to iterate). + + NOTE: The semantics of the cast flag are not clearly defined when + it comes to reduction. It is unclear that there are any users. + """ + first_op = np.ones((10, 10)) + broadcast_op = arr() + red_op = arr() + # Add a first operand to ensure no axis-reordering and the result shape. + iterator = np.nditer( + (first_op, broadcast_op, red_op), + flags=["external_loop", "reduce_ok", "buffered", "delay_bufalloc"], + op_flags=[("readonly", "contig")] * 2 + [("readwrite", "contig")]) + + with iterator: + iterator.reset() + for f, b, r in iterator: + # The first operand is contigouos, we should have a view + assert np.shares_memory(f, first_op) + # Although broadcast, the second op always has a contiguous stride + assert b.strides[0] == 8 + assert not np.shares_memory(b, broadcast_op) + # The reduction has a contiguous stride or a 0 stride + if red_op.ndim == 0 or red_op.shape[-1] == 1: + assert r.strides[0] == 0 + else: + # The stride is 8, although it was not originally: + assert r.strides[0] == 8 + # If the reduce stride is 0, buffering makes no difference, but we + # do it anyway right now: + assert not np.shares_memory(r, red_op) + + +@pytest.mark.xfail(reason="The contig flag was always buggy.") +def test_iter_contig_flag_incorrect(): + # This case does the wrong thing... + iterator = np.nditer( + (np.ones((10, 10)).T, np.ones((1, 10))), + flags=["external_loop", "reduce_ok", "buffered", "delay_bufalloc"], + op_flags=[("readonly", "contig")] * 2) + + with iterator: + iterator.reset() + for a, b in iterator: + # Remove a and b from locals (pytest may want to format them) + a, b = a.strides, b.strides + assert a == 8 + assert b == 8 # should be 8 but is 0 due to axis reorder + + @pytest.mark.slow def test_iter_buffered_reduce_reuse(): # large enough array for all views, including negative strides. - a = np.arange(2*3**5)[3**5:3**5+1] + a = np.arange(2 * 3**5)[3**5:3**5 + 1] flags = ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok', 'refs_ok'] op_flags = [('readonly',), ('readwrite', 'allocate')] op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]] @@ -2356,7 +2560,7 @@ def get_params(): comp_res = nditer2.operands[-1] - for bufsize in range(0, 3**3): + for bufsize in range(3**3): nditer1 = np.nditer([arr, None], op_axes=op_axes, flags=flags, op_flags=op_flags, buffersize=bufsize, op_dtypes=op_dtypes) @@ -2372,6 +2576,30 @@ def get_params(): assert_array_equal(res, comp_res) +def test_iter_buffered_reduce_reuse_core(): + # NumPy re-uses buffers for broadcast operands (as of writing when reading). + # Test this even if the offset is manually set at some point during + # the iteration. (not a particularly tricky path) + arr = np.empty((1, 6, 4, 1)).reshape(1, 6, 4, 1)[:, ::3, ::2, :] + arr[...] = np.arange(arr.size).reshape(arr.shape) + # First and last dimension are broadcast dimensions. + arr = np.broadcast_to(arr, (100, 2, 2, 2)) + + flags = ['buffered', 'reduce_ok', 'refs_ok', 'multi_index'] + op_flags = [('readonly',)] + + buffersize = 100 # small enough to not fit the whole array + it = np.nditer(arr, flags=flags, op_flags=op_flags, buffersize=100) + + # Iterate a bit (this will cause buffering internally) + expected = [next(it) for i in range(11)] + # Now, manually advance to inside the core (the +1) + it.iterindex = 10 * (2 * 2 * 2) + 1 + result = [next(it) for i in range(10)] + + assert expected[1:] == result + + def test_iter_no_broadcast(): # Test that the no_broadcast flag works a = np.arange(24).reshape(2, 3, 4) @@ -2543,13 +2771,16 @@ def test_0d(self): i, j = np.nested_iters(a, [[1, 0, 2], []]) vals = [list(j) for _ in i] - assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]]) + assert_equal( + vals, + [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]], + ) i, j, k = np.nested_iters(a, [[2, 0], [], [1]]) vals = [] for x in i: for y in j: - vals.append([z for z in k]) + vals.append(list(k)) assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) def test_iter_nested_iters_dtype_buffered(self): @@ -2689,11 +2920,11 @@ def test_iter_buffering_reduction(): assert_equal(it[0], [1, 2, 1, 2]) # Iterator inner loop should take argument contiguity into account - x = np.ones((7, 13, 8), np.int8)[4:6,1:11:6,1:5].transpose(1, 2, 0) + x = np.ones((7, 13, 8), np.int8)[4:6, 1:11:6, 1:5].transpose(1, 2, 0) x[...] = np.arange(x.size).reshape(x.shape) - y_base = np.arange(4*4, dtype=np.int8).reshape(4, 4) + y_base = np.arange(4 * 4, dtype=np.int8).reshape(4, 4) y_base_copy = y_base.copy() - y = y_base[::2,:,None] + y = y_base[::2, :, None] it = np.nditer([y, x], ['buffered', 'external_loop', 'reduce_ok'], @@ -2795,19 +3026,20 @@ def _is_buffered(iterator): return True return False -@pytest.mark.parametrize("a", +@pytest.mark.parametrize("arrs", [np.zeros((3,), dtype='f8'), - np.zeros((9876, 3*5), dtype='f8')[::2, :], + np.zeros((9876, 3 * 5), dtype='f8')[::2, :], np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, :], # Also test with the last dimension strided (so it does not fit if # there is repeated access) np.zeros((9,), dtype='f8')[::3], - np.zeros((9876, 3*10), dtype='f8')[::2, ::5], + np.zeros((9876, 3 * 10), dtype='f8')[::2, ::5], np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, ::-1]]) -def test_iter_writemasked(a): +def test_iter_writemasked(arrs): # Note, the slicing above is to ensure that nditer cannot combine multiple # axes into one. The repetition is just to make things a bit more # interesting. + a = arrs.copy() shape = a.shape reps = shape[-1] // 3 msk = np.empty(shape, dtype=bool) @@ -2934,7 +3166,7 @@ def test_iter_non_writable_attribute_deletion(): def test_iter_writable_attribute_deletion(): it = np.nditer(np.ones(2)) - attr = [ "multi_index", "index", "iterrange", "iterindex"] + attr = ["multi_index", "index", "iterrange", "iterindex"] for s in attr: assert_raises(AttributeError, delattr, it, s) @@ -2978,7 +3210,7 @@ def test_iter_allocated_array_dtypes(): def test_0d_iter(): # Basic test for iteration of 0-d arrays: - i = nditer([2, 3], ['multi_index'], [['readonly']]*2) + i = nditer([2, 3], ['multi_index'], [['readonly']] * 2) assert_equal(i.ndim, 0) assert_equal(next(i), (2, 3)) assert_equal(i.multi_index, ()) @@ -3011,7 +3243,7 @@ def test_0d_iter(): vals = next(i) assert_equal(vals['a'], 0.5) assert_equal(vals['b'], 0) - assert_equal(vals['c'], [[(0.5)]*3]*2) + assert_equal(vals['c'], [[(0.5)] * 3] * 2) assert_equal(vals['d'], 0.5) def test_object_iter_cleanup(): @@ -3097,10 +3329,17 @@ def test_iter_too_large_with_multiindex(): for i in range(num): for mode in range(6): # an axis with size 1024 is removed: - _multiarray_tests.test_nditer_too_large(arrays, i*2, mode) + _multiarray_tests.test_nditer_too_large(arrays, i * 2, mode) # an axis with size 1 is removed: with assert_raises(ValueError): - _multiarray_tests.test_nditer_too_large(arrays, i*2 + 1, mode) + _multiarray_tests.test_nditer_too_large(arrays, i * 2 + 1, mode) + + +def test_invalid_call_of_enable_external_loop(): + with pytest.raises(ValueError, + match='Iterator flag EXTERNAL_LOOP cannot be used'): + np.nditer(([[1], [2]], [3, 4]), ['multi_index']).enable_external_loop() + def test_writebacks(): a = np.arange(6, dtype='f4') @@ -3120,7 +3359,7 @@ def test_writebacks(): assert_equal(au.flags.writeable, False) it.operands[0][:] = 0 raise ValueError('exit context manager on exception') - except: + except Exception: pass assert_equal(au, 0) assert_equal(au.flags.writeable, True) @@ -3135,8 +3374,8 @@ def test_writebacks(): assert_(x.flags.writebackifcopy) assert_equal(au, 6) assert_(not x.flags.writebackifcopy) - x[:] = 123 # x.data still valid - assert_equal(au, 6) # but not connected to au + x[:] = 123 # x.data still valid + assert_equal(au, 6) # but not connected to au it = nditer(au, [], [['readwrite', 'updateifcopy']], @@ -3174,7 +3413,7 @@ def test_close_equivalent(): def add_close(x, y, out=None): addop = np.add it = np.nditer([x, y, out], [], - [['readonly'], ['readonly'], ['writeonly','allocate']]) + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) for (a, b, c) in it: addop(a, b, out=c) ret = it.operands[2] @@ -3184,7 +3423,7 @@ def add_close(x, y, out=None): def add_context(x, y, out=None): addop = np.add it = np.nditer([x, y, out], [], - [['readonly'], ['readonly'], ['writeonly','allocate']]) + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) with it: for (a, b, c) in it: addop(a, b, out=c) @@ -3196,7 +3435,7 @@ def add_context(x, y, out=None): def test_close_raises(): it = np.nditer(np.arange(3)) - assert_equal (next(it), 0) + assert_equal(next(it), 0) it.close() assert_raises(StopIteration, next, it) assert_raises(ValueError, getattr, it, 'operands') @@ -3210,16 +3449,11 @@ def test_warn_noclose(): a = np.arange(6, dtype='f4') au = a.byteswap() au = au.view(au.dtype.newbyteorder()) - with suppress_warnings() as sup: - sup.record(RuntimeWarning) + with pytest.warns(RuntimeWarning): it = np.nditer(au, [], [['readwrite', 'updateifcopy']], - casting='equiv', op_dtypes=[np.dtype('f4')]) + casting='equiv', op_dtypes=[np.dtype('f4')]) del it - assert len(sup.log) == 1 - -@pytest.mark.skipif(sys.version_info[:2] == (3, 9) and sys.platform == "win32", - reason="Errors with Python 3.9 on Windows") @pytest.mark.parametrize(["in_dtype", "buf_dtype"], [("i", "O"), ("O", "i"), # most simple cases ("i,O", "O,O"), # structured partially only copying O @@ -3285,6 +3519,43 @@ def test_partial_iteration_error(in_dtype, buf_dtype): assert count == sys.getrefcount(value) +def test_arbitrary_number_of_ops(): + # 2*16 + 1 is still just a few kiB, so should be fast and easy to deal with + # but larger than any small custom integer. + ops = [np.arange(10) for a in range(2**16 + 1)] + + it = np.nditer(ops) + for i, vals in enumerate(it): + assert all(v == i for v in vals) + + +def test_arbitrary_number_of_ops_nested(): + # 2*16 + 1 is still just a few kiB, so should be fast and easy to deal with + # but larger than any small custom integer. + ops = [np.arange(10) for a in range(2**16 + 1)] + + it = np.nested_iters(ops, [[0], []]) + for i, vals in enumerate(it): + assert all(v == i for v in vals) + + +@pytest.mark.slow +@pytest.mark.skipif(not IS_64BIT, reason="test requires 64-bit system") +@requires_memory(9 * np.iinfo(np.intc).max) +@pytest.mark.thread_unsafe(reason="crashes with low memory") +def test_arbitrary_number_of_ops_error(): + # A different error may happen for more than integer operands, but that + # is too large to test nicely. + a = np.ones(1) + args = [a] * (np.iinfo(np.intc).max + 1) + with pytest.raises(ValueError, match="Too many operands to nditer"): + np.nditer(args) + + with pytest.raises(ValueError, match="Too many operands to nditer"): + np.nested_iters(args, [[0], []]) + + +@pytest.mark.thread_unsafe(reason="capfd is thread-unsafe") def test_debug_print(capfd): """ Matches the expected output of a debug print with the actual output. @@ -3298,7 +3569,7 @@ def test_debug_print(capfd): expected = """ ------ BEGIN ITERATOR DUMP ------ | Iterator Address: - | ItFlags: BUFFER REDUCE REUSE_REDUCE_LOOPS + | ItFlags: BUFFER REDUCE | NDim: 2 | NOp: 2 | IterSize: 50 @@ -3314,21 +3585,23 @@ def test_debug_print(capfd): | DTypes: dtype('float64') dtype('int32') | InitDataPtrs: | BaseOffsets: 0 0 + | Ptrs: + | User/buffer ptrs: | Operands: | Operand DTypes: dtype('int64') dtype('float64') | OpItFlags: - | Flags[0]: READ CAST ALIGNED - | Flags[1]: READ WRITE CAST ALIGNED REDUCE + | Flags[0]: READ CAST + | Flags[1]: READ WRITE CAST REDUCE | | BufferData: | BufferSize: 50 | Size: 5 | BufIterEnd: 5 + | BUFFER CoreSize: 5 | REDUCE Pos: 0 | REDUCE OuterSize: 10 | REDUCE OuterDim: 1 | Strides: 8 4 - | Ptrs: | REDUCE Outer Strides: 40 0 | REDUCE Outer Ptrs: | ReadTransferFn: @@ -3341,12 +3614,10 @@ def test_debug_print(capfd): | Shape: 5 | Index: 0 | Strides: 16 8 - | Ptrs: | AxisData[1]: | Shape: 10 | Index: 0 | Strides: 80 0 - | Ptrs: ------- END ITERATOR DUMP ------- """.strip().splitlines() @@ -3364,3 +3635,25 @@ def test_debug_print(capfd): # The actual output may have additional pointers listed that are # stripped from the example output: assert res_line.startswith(expected_line.strip()) + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +def test_signature_constructor(): + sig = inspect.signature(np.nditer) + + assert sig.parameters + assert "self" not in sig.parameters + assert "args" not in sig.parameters + assert "kwargs" not in sig.parameters + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.parametrize( + "method", + [fn for name, fn in vars(np.nditer).items() if callable(fn) and name[0] != "_"], +) +def test_signature_methods(method): + sig = inspect.signature(method) + + assert "self" in sig.parameters + assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY diff --git a/numpy/_core/tests/test_nep50_promotions.py b/numpy/_core/tests/test_nep50_promotions.py index e603254f6fec..72f854c7b001 100644 --- a/numpy/_core/tests/test_nep50_promotions.py +++ b/numpy/_core/tests/test_nep50_promotions.py @@ -5,77 +5,49 @@ """ import operator -import threading -import warnings -import numpy as np - -import pytest import hypothesis +import pytest from hypothesis import strategies -from numpy.testing import assert_array_equal, IS_WASM - - -@pytest.fixture(scope="module", autouse=True) -def _weak_promotion_enabled(): - state = np._get_promotion_state() - np._set_promotion_state("weak_and_warn") - yield - np._set_promotion_state(state) +import numpy as np +from numpy.testing import IS_WASM, assert_array_equal @pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for fp errors") def test_nep50_examples(): - with pytest.warns(UserWarning, match="result dtype changed"): - res = np.uint8(1) + 2 + res = np.uint8(1) + 2 assert res.dtype == np.uint8 - with pytest.warns(UserWarning, match="result dtype changed"): - res = np.array([1], np.uint8) + np.int64(1) + res = np.array([1], np.uint8) + np.int64(1) assert res.dtype == np.int64 - with pytest.warns(UserWarning, match="result dtype changed"): - res = np.array([1], np.uint8) + np.array(1, dtype=np.int64) + res = np.array([1], np.uint8) + np.array(1, dtype=np.int64) assert res.dtype == np.int64 - with pytest.warns(UserWarning, match="result dtype changed"): - # Note: For "weak_and_warn" promotion state the overflow warning is - # unfortunately not given (because we use the full array path). - with np.errstate(over="raise"): - res = np.uint8(100) + 200 + with pytest.warns(RuntimeWarning, match="overflow"): + res = np.uint8(100) + 200 assert res.dtype == np.uint8 - with pytest.warns(Warning) as recwarn: + with pytest.warns(RuntimeWarning, match="overflow"): res = np.float32(1) + 3e100 - # Check that both warnings were given in the one call: - warning = str(recwarn.pop(UserWarning).message) - assert warning.startswith("result dtype changed") - warning = str(recwarn.pop(RuntimeWarning).message) - assert warning.startswith("overflow") - assert len(recwarn) == 0 # no further warnings assert np.isinf(res) assert res.dtype == np.float32 - # Changes, but we don't warn for it (too noisy) res = np.array([0.1], np.float32) == np.float64(0.1) assert res[0] == False - # Additional test, since the above silences the warning: - with pytest.warns(UserWarning, match="result dtype changed"): - res = np.array([0.1], np.float32) + np.float64(0.1) + res = np.array([0.1], np.float32) + np.float64(0.1) assert res.dtype == np.float64 - with pytest.warns(UserWarning, match="result dtype changed"): - res = np.array([1.], np.float32) + np.int64(3) + res = np.array([1.], np.float32) + np.int64(3) assert res.dtype == np.float64 @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) def test_nep50_weak_integers(dtype): # Avoids warning (different code path for scalars) - np._set_promotion_state("weak") scalar_type = np.dtype(dtype).type maxint = int(np.iinfo(dtype).max) @@ -94,7 +66,6 @@ def test_nep50_weak_integers(dtype): @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) def test_nep50_weak_integers_with_inexact(dtype): # Avoids warning (different code path for scalars) - np._set_promotion_state("weak") scalar_type = np.dtype(dtype).type too_big_int = int(np.finfo(dtype).max) * 2 @@ -137,7 +108,6 @@ def test_nep50_weak_integers_with_inexact(dtype): @pytest.mark.parametrize("op", [operator.add, operator.pow]) def test_weak_promotion_scalar_path(op): # Some additional paths exercising the weak scalars. - np._set_promotion_state("weak") # Integer path: res = op(np.uint8(3), 5) @@ -154,17 +124,13 @@ def test_weak_promotion_scalar_path(op): def test_nep50_complex_promotion(): - np._set_promotion_state("weak") - with pytest.warns(RuntimeWarning, match=".*overflow"): res = np.complex64(3) + complex(2**300) - assert type(res) == np.complex64 + assert type(res) is np.complex64 def test_nep50_integer_conversion_errors(): - # Do not worry about warnings here (auto-fixture will reset). - np._set_promotion_state("weak") # Implementation for error paths is mostly missing (as of writing) with pytest.raises(OverflowError, match=".*uint8"): np.array([1], np.uint8) + 300 @@ -178,51 +144,24 @@ def test_nep50_integer_conversion_errors(): np.uint8(1) + -1 -def test_nep50_integer_regression(): - # Test the old integer promotion rules. When the integer is too large, - # we need to keep using the old-style promotion. - np._set_promotion_state("legacy") - arr = np.array(1) - assert (arr + 2**63).dtype == np.float64 - assert (arr[()] + 2**63).dtype == np.float64 - - def test_nep50_with_axisconcatenator(): - # I promised that this will be an error in the future in the 1.25 - # release notes; test this (NEP 50 opt-in makes the deprecation an error). - np._set_promotion_state("weak") - + # Concatenate/r_ does not promote, so this has to error: with pytest.raises(OverflowError): np.r_[np.arange(5, dtype=np.int8), 255] @pytest.mark.parametrize("ufunc", [np.add, np.power]) -@pytest.mark.parametrize("state", ["weak", "weak_and_warn"]) -def test_nep50_huge_integers(ufunc, state): +def test_nep50_huge_integers(ufunc): # Very large integers are complicated, because they go to uint64 or - # object dtype. This tests covers a few possible paths (some of which - # cannot give the NEP 50 warnings). - np._set_promotion_state(state) - + # object dtype. This tests covers a few possible paths. with pytest.raises(OverflowError): ufunc(np.int64(0), 2**63) # 2**63 too large for int64 - if state == "weak_and_warn": - with pytest.warns(UserWarning, - match="result dtype changed.*float64.*uint64"): - with pytest.raises(OverflowError): - ufunc(np.uint64(0), 2**64) - else: - with pytest.raises(OverflowError): - ufunc(np.uint64(0), 2**64) # 2**64 cannot be represented by uint64 + with pytest.raises(OverflowError): + ufunc(np.uint64(0), 2**64) # 2**64 cannot be represented by uint64 # However, 2**63 can be represented by the uint64 (and that is used): - if state == "weak_and_warn": - with pytest.warns(UserWarning, - match="result dtype changed.*float64.*uint64"): - res = ufunc(np.uint64(1), 2**63) - else: - res = ufunc(np.uint64(1), 2**63) + res = ufunc(np.uint64(1), 2**63) assert res.dtype == np.uint64 assert res == ufunc(1, 2**63, dtype=object) @@ -240,14 +179,10 @@ def test_nep50_huge_integers(ufunc, state): def test_nep50_in_concat_and_choose(): - np._set_promotion_state("weak_and_warn") - - with pytest.warns(UserWarning, match="result dtype changed"): - res = np.concatenate([np.float32(1), 1.], axis=None) + res = np.concatenate([np.float32(1), 1.], axis=None) assert res.dtype == "float32" - with pytest.warns(UserWarning, match="result dtype changed"): - res = np.choose(1, [np.float32(1), 1.]) + res = np.choose(1, [np.float32(1), 1.]) assert res.dtype == "float32" @@ -263,8 +198,6 @@ def test_nep50_in_concat_and_choose(): ]) @hypothesis.given(data=strategies.data()) def test_expected_promotion(expected, dtypes, optional_dtypes, data): - np._set_promotion_state("weak") - # Sample randomly while ensuring "dtypes" is always present: optional = data.draw(strategies.lists( strategies.sampled_from(dtypes + optional_dtypes))) @@ -279,13 +212,11 @@ def test_expected_promotion(expected, dtypes, optional_dtypes, data): [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64]) @pytest.mark.parametrize("other_val", - [-2*100, -1, 0, 9, 10, 11, 2**63, 2*100]) + [-2 * 100, -1, 0, 9, 10, 11, 2**63, 2 * 100]) @pytest.mark.parametrize("comp", [operator.eq, operator.ne, operator.le, operator.lt, operator.ge, operator.gt]) def test_integer_comparison(sctype, other_val, comp): - np._set_promotion_state("weak") - # Test that comparisons with integers (especially out-of-bound) ones # works correctly. val_obj = 10 @@ -303,12 +234,24 @@ def test_integer_comparison(sctype, other_val, comp): assert_array_equal(comp(other_val, val_obj), comp(other_val, val)) +@pytest.mark.parametrize("arr", [ + np.ones((100, 100), dtype=np.uint8)[::2], # not trivially iterable + np.ones(20000, dtype=">u4"), # cast and >buffersize + np.ones(100, dtype=">u4"), # fast path compatible with cast +]) +def test_integer_comparison_with_cast(arr): + # Similar to above, but mainly test a few cases that cover the slow path + # the test is limited to unsigned ints and -1 for simplicity. + res = arr >= -1 + assert_array_equal(res, np.ones_like(arr, dtype=bool)) + res = arr < -1 + assert_array_equal(res, np.zeros_like(arr, dtype=bool)) + + @pytest.mark.parametrize("comp", [np.equal, np.not_equal, np.less_equal, np.less, np.greater_equal, np.greater]) def test_integer_integer_comparison(comp): - np._set_promotion_state("weak") - # Test that the NumPy comparison ufuncs work with large Python integers assert comp(2**200, -2**200) == comp(2**200, -2**200, dtype=object) @@ -342,33 +285,3 @@ def test_oob_creation(sctype, create): assert create(sctype, iinfo.min) == iinfo.min assert create(sctype, iinfo.max) == iinfo.max - - -@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") -def test_thread_local_promotion_state(): - b = threading.Barrier(2) - - def legacy_no_warn(): - np._set_promotion_state("legacy") - b.wait() - assert np._get_promotion_state() == "legacy" - # turn warnings into errors, this should not warn with - # legacy promotion state - with warnings.catch_warnings(): - warnings.simplefilter("error") - np.float16(1) + 131008 - - def weak_warn(): - np._set_promotion_state("weak") - b.wait() - assert np._get_promotion_state() == "weak" - with pytest.raises(RuntimeWarning): - np.float16(1) + 131008 - - task1 = threading.Thread(target=legacy_no_warn) - task2 = threading.Thread(target=weak_warn) - - task1.start() - task2.start() - task1.join() - task2.join() diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 72f5b74107cb..d32052bda176 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -1,27 +1,34 @@ -import sys -import warnings +import inspect import itertools -import platform -import pytest import math +import platform +import sys +import warnings from decimal import Decimal +import pytest +from hypothesis import given, strategies as st +from hypothesis.extra import numpy as hynp + import numpy as np -from numpy._core import umath, sctypes +from numpy import ma +from numpy._core import sctypes +from numpy._core._rational_tests import rational from numpy._core.numerictypes import obj2sctype -from numpy._core.arrayprint import set_string_function from numpy.exceptions import AxisError from numpy.random import rand, randint, randn from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_raises_regex, - assert_array_equal, assert_almost_equal, assert_array_almost_equal, - assert_warns, assert_array_max_ulp, HAS_REFCOUNT, IS_WASM - ) -from numpy._core._rational_tests import rational -from numpy import ma - -from hypothesis import given, strategies as st -from hypothesis.extra import numpy as hynp + HAS_REFCOUNT, + IS_WASM, + assert_, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_raises, + assert_raises_regex, +) class TestResize: @@ -72,6 +79,13 @@ def test_negative_resize(self): with pytest.raises(ValueError, match=r"negative"): np.resize(A, new_shape=new_shape) + def test_unsigned_resize(self): + # ensure unsigned integer sizes don't lead to underflows + for dt_pair in [(np.int32, np.uint32), (np.int64, np.uint64)]: + arr = np.array([[23, 95], [66, 37]]) + assert_array_equal(np.resize(arr, dt_pair[0](1)), + np.resize(arr, dt_pair[1](1))) + def test_subclass(self): class MyArray(np.ndarray): __array_priority__ = 1. @@ -170,12 +184,6 @@ def test_reshape_shape_arg(self): shape = (3, 4) expected = arr.reshape(shape) - with pytest.raises( - TypeError, - match="You cannot specify 'newshape' and 'shape' " - "arguments at the same time." - ): - np.reshape(arr, shape=shape, newshape=shape) with pytest.raises( TypeError, match=r"reshape\(\) missing 1 required positional " @@ -185,11 +193,9 @@ def test_reshape_shape_arg(self): assert_equal(np.reshape(arr, shape), expected) assert_equal(np.reshape(arr, shape, order="C"), expected) + assert_equal(np.reshape(arr, shape, "C"), expected) assert_equal(np.reshape(arr, shape=shape), expected) assert_equal(np.reshape(arr, shape=shape, order="C"), expected) - with pytest.warns(DeprecationWarning): - actual = np.reshape(arr, newshape=shape) - assert_equal(actual, expected) def test_reshape_copy_arg(self): arr = np.arange(24).reshape(2, 3, 4) @@ -243,7 +249,7 @@ def test_dunder_round(self, dtype): pytest.param(2**31 - 1, -1, marks=pytest.mark.skip(reason="Out of range of int32") ), - (2**31 - 1, 1-math.ceil(math.log10(2**31 - 1))), + (2**31 - 1, 1 - math.ceil(math.log10(2**31 - 1))), (2**31 - 1, -math.ceil(math.log10(2**31 - 1))) ]) def test_dunder_round_edgecases(self, val, ndigits): @@ -277,6 +283,10 @@ def test_size(self): assert_(np.size(A) == 6) assert_(np.size(A, 0) == 2) assert_(np.size(A, 1) == 3) + assert_(np.size(A, ()) == 1) + assert_(np.size(A, (0,)) == 2) + assert_(np.size(A, (1,)) == 3) + assert_(np.size(A, (0, 1)) == 6) def test_squeeze(self): A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]] @@ -334,7 +344,7 @@ def test_take(self): tgt = np.array([1, 3, 3, 4], dtype=array_type) out = np.take(x, ind) assert_equal(out, tgt) - assert_equal(out.dtype, tgt.dtype) + assert_equal(out.dtype, tgt.dtype) def test_trace(self): c = [[1, 2], [3, 4], [5, 6]] @@ -344,6 +354,7 @@ def test_transpose(self): arr = [[1, 2], [3, 4], [5, 6]] tgt = [[1, 3, 5], [2, 4, 6]] assert_equal(np.transpose(arr, (1, 0)), tgt) + assert_equal(np.transpose(arr, (-1, -2)), tgt) assert_equal(np.matrix_transpose(arr), tgt) def test_var(self): @@ -728,27 +739,29 @@ def test_bitwise_xor(self): class TestBoolArray: - def setup_method(self): + def _create_bool_arrays(self): # offset for simd tests - self.t = np.array([True] * 41, dtype=bool)[1::] - self.f = np.array([False] * 41, dtype=bool)[1::] - self.o = np.array([False] * 42, dtype=bool)[2::] - self.nm = self.f.copy() - self.im = self.t.copy() - self.nm[3] = True - self.nm[-2] = True - self.im[3] = False - self.im[-2] = False + t = np.array([True] * 41, dtype=bool)[1::] + f = np.array([False] * 41, dtype=bool)[1::] + o = np.array([False] * 42, dtype=bool)[2::] + nm = f.copy() + im = t.copy() + nm[3] = True + nm[-2] = True + im[3] = False + im[-2] = False + return t, f, o, nm, im def test_all_any(self): - assert_(self.t.all()) - assert_(self.t.any()) - assert_(not self.f.all()) - assert_(not self.f.any()) - assert_(self.nm.any()) - assert_(self.im.any()) - assert_(not self.nm.all()) - assert_(not self.im.all()) + t, f, _, nm, im = self._create_bool_arrays() + assert_(t.all()) + assert_(t.any()) + assert_(not f.all()) + assert_(not f.any()) + assert_(nm.any()) + assert_(im.any()) + assert_(not nm.all()) + assert_(not im.all()) # check bad element in all positions for i in range(256 - 7): d = np.array([False] * 256, dtype=bool)[7::] @@ -762,124 +775,109 @@ def test_all_any(self): for i in list(range(9, 6000, 507)) + [7764, 90021, -10]: d = np.array([False] * 100043, dtype=bool) d[i] = True - assert_(np.any(d), msg="%r" % i) + assert_(np.any(d), msg=f"{i!r}") e = np.array([True] * 100043, dtype=bool) e[i] = False - assert_(not np.all(e), msg="%r" % i) + assert_(not np.all(e), msg=f"{i!r}") def test_logical_not_abs(self): - assert_array_equal(~self.t, self.f) - assert_array_equal(np.abs(~self.t), self.f) - assert_array_equal(np.abs(~self.f), self.t) - assert_array_equal(np.abs(self.f), self.f) - assert_array_equal(~np.abs(self.f), self.t) - assert_array_equal(~np.abs(self.t), self.f) - assert_array_equal(np.abs(~self.nm), self.im) - np.logical_not(self.t, out=self.o) - assert_array_equal(self.o, self.f) - np.abs(self.t, out=self.o) - assert_array_equal(self.o, self.t) + t, f, o, nm, im = self._create_bool_arrays() + assert_array_equal(~t, f) + assert_array_equal(np.abs(~t), f) + assert_array_equal(np.abs(~f), t) + assert_array_equal(np.abs(f), f) + assert_array_equal(~np.abs(f), t) + assert_array_equal(~np.abs(t), f) + assert_array_equal(np.abs(~nm), im) + np.logical_not(t, out=o) + assert_array_equal(o, f) + np.abs(t, out=o) + assert_array_equal(o, t) def test_logical_and_or_xor(self): - assert_array_equal(self.t | self.t, self.t) - assert_array_equal(self.f | self.f, self.f) - assert_array_equal(self.t | self.f, self.t) - assert_array_equal(self.f | self.t, self.t) - np.logical_or(self.t, self.t, out=self.o) - assert_array_equal(self.o, self.t) - assert_array_equal(self.t & self.t, self.t) - assert_array_equal(self.f & self.f, self.f) - assert_array_equal(self.t & self.f, self.f) - assert_array_equal(self.f & self.t, self.f) - np.logical_and(self.t, self.t, out=self.o) - assert_array_equal(self.o, self.t) - assert_array_equal(self.t ^ self.t, self.f) - assert_array_equal(self.f ^ self.f, self.f) - assert_array_equal(self.t ^ self.f, self.t) - assert_array_equal(self.f ^ self.t, self.t) - np.logical_xor(self.t, self.t, out=self.o) - assert_array_equal(self.o, self.f) - - assert_array_equal(self.nm & self.t, self.nm) - assert_array_equal(self.im & self.f, False) - assert_array_equal(self.nm & True, self.nm) - assert_array_equal(self.im & False, self.f) - assert_array_equal(self.nm | self.t, self.t) - assert_array_equal(self.im | self.f, self.im) - assert_array_equal(self.nm | True, self.t) - assert_array_equal(self.im | False, self.im) - assert_array_equal(self.nm ^ self.t, self.im) - assert_array_equal(self.im ^ self.f, self.im) - assert_array_equal(self.nm ^ True, self.im) - assert_array_equal(self.im ^ False, self.im) + t, f, o, nm, im = self._create_bool_arrays() + assert_array_equal(t | t, t) + assert_array_equal(f | f, f) + assert_array_equal(t | f, t) + assert_array_equal(f | t, t) + np.logical_or(t, t, out=o) + assert_array_equal(o, t) + assert_array_equal(t & t, t) + assert_array_equal(f & f, f) + assert_array_equal(t & f, f) + assert_array_equal(f & t, f) + np.logical_and(t, t, out=o) + assert_array_equal(o, t) + assert_array_equal(t ^ t, f) + assert_array_equal(f ^ f, f) + assert_array_equal(t ^ f, t) + assert_array_equal(f ^ t, t) + np.logical_xor(t, t, out=o) + assert_array_equal(o, f) + + assert_array_equal(nm & t, nm) + assert_array_equal(im & f, False) + assert_array_equal(nm & True, nm) + assert_array_equal(im & False, f) + assert_array_equal(nm | t, t) + assert_array_equal(im | f, im) + assert_array_equal(nm | True, t) + assert_array_equal(im | False, im) + assert_array_equal(nm ^ t, im) + assert_array_equal(im ^ f, im) + assert_array_equal(nm ^ True, im) + assert_array_equal(im ^ False, im) class TestBoolCmp: - def setup_method(self): - self.f = np.ones(256, dtype=np.float32) - self.ef = np.ones(self.f.size, dtype=bool) - self.d = np.ones(128, dtype=np.float64) - self.ed = np.ones(self.d.size, dtype=bool) + def _create_data(self, dtype, size): + # generate data using given dtype and num for size of array + a = np.ones(size, dtype=dtype) + e = np.ones(a.size, dtype=bool) # generate values for all permutation of 256bit simd vectors s = 0 - for i in range(32): - self.f[s:s+8] = [i & 2**x for x in range(8)] - self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)] - s += 8 - s = 0 - for i in range(16): - self.d[s:s+4] = [i & 2**x for x in range(4)] - self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)] - s += 4 - - self.nf = self.f.copy() - self.nd = self.d.copy() - self.nf[self.ef] = np.nan - self.nd[self.ed] = np.nan - - self.inff = self.f.copy() - self.infd = self.d.copy() - self.inff[::3][self.ef[::3]] = np.inf - self.infd[::3][self.ed[::3]] = np.inf - self.inff[1::3][self.ef[1::3]] = -np.inf - self.infd[1::3][self.ed[1::3]] = -np.inf - self.inff[2::3][self.ef[2::3]] = np.nan - self.infd[2::3][self.ed[2::3]] = np.nan - self.efnonan = self.ef.copy() - self.efnonan[2::3] = False - self.ednonan = self.ed.copy() - self.ednonan[2::3] = False - - self.signf = self.f.copy() - self.signd = self.d.copy() - self.signf[self.ef] *= -1. - self.signd[self.ed] *= -1. - self.signf[1::6][self.ef[1::6]] = -np.inf - self.signd[1::6][self.ed[1::6]] = -np.inf + r = int(size / 32) + for i in range(int(size / 8)): + a[s:s + r] = [i & 2**x for x in range(r)] + e[s:s + r] = [(i & 2**x) != 0 for x in range(r)] + s += r + n = a.copy() + n[e] = np.nan + + inf = a.copy() + inf[::3][e[::3]] = np.inf + inf[1::3][e[1::3]] = -np.inf + inf[2::3][e[2::3]] = np.nan + enonan = e.copy() + enonan[2::3] = False + + sign = a.copy() + sign[e] *= -1. + sign[1::6][e[1::6]] = -np.inf # On RISC-V, many operations that produce NaNs, such as converting # a -NaN from f64 to f32, return a canonical NaN. The canonical # NaNs are always positive. See section 11.3 NaN Generation and # Propagation of the RISC-V Unprivileged ISA for more details. # We disable the float32 sign test on riscv64 for -np.nan as the sign # of the NaN will be lost when it's converted to a float32. - if platform.machine() != 'riscv64': - self.signf[3::6][self.ef[3::6]] = -np.nan - self.signd[3::6][self.ed[3::6]] = -np.nan - self.signf[4::6][self.ef[4::6]] = -0. - self.signd[4::6][self.ed[4::6]] = -0. + if not (dtype == np.float32 and platform.machine() == 'riscv64'): + sign[3::6][e[3::6]] = -np.nan + sign[4::6][e[4::6]] = -0. + return a, e, n, inf, enonan, sign def test_float(self): # offset for alignment test + f, ef, nf, inff, efnonan, signf = self._create_data(np.float32, 256) for i in range(4): - assert_array_equal(self.f[i:] > 0, self.ef[i:]) - assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:]) - assert_array_equal(self.f[i:] == 0, ~self.ef[i:]) - assert_array_equal(-self.f[i:] < 0, self.ef[i:]) - assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:]) - r = self.f[i:] != 0 - assert_array_equal(r, self.ef[i:]) - r2 = self.f[i:] != np.zeros_like(self.f[i:]) - r3 = 0 != self.f[i:] + assert_array_equal(f[i:] > 0, ef[i:]) + assert_array_equal(f[i:] - 1 >= 0, ef[i:]) + assert_array_equal(f[i:] == 0, ~ef[i:]) + assert_array_equal(-f[i:] < 0, ef[i:]) + assert_array_equal(-f[i:] + 1 <= 0, ef[i:]) + r = f[i:] != 0 + assert_array_equal(r, ef[i:]) + r2 = f[i:] != np.zeros_like(f[i:]) + r3 = 0 != f[i:] assert_array_equal(r, r2) assert_array_equal(r, r3) # check bool == 0x1 @@ -888,24 +886,25 @@ def test_float(self): assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) # isnan on amd64 takes the same code path - assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:]) - assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:]) - assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:]) - assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:]) - assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:]) + assert_array_equal(np.isnan(nf[i:]), ef[i:]) + assert_array_equal(np.isfinite(nf[i:]), ~ef[i:]) + assert_array_equal(np.isfinite(inff[i:]), ~ef[i:]) + assert_array_equal(np.isinf(inff[i:]), efnonan[i:]) + assert_array_equal(np.signbit(signf[i:]), ef[i:]) def test_double(self): # offset for alignment test + d, ed, nd, infd, ednonan, signd = self._create_data(np.float64, 128) for i in range(2): - assert_array_equal(self.d[i:] > 0, self.ed[i:]) - assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:]) - assert_array_equal(self.d[i:] == 0, ~self.ed[i:]) - assert_array_equal(-self.d[i:] < 0, self.ed[i:]) - assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:]) - r = self.d[i:] != 0 - assert_array_equal(r, self.ed[i:]) - r2 = self.d[i:] != np.zeros_like(self.d[i:]) - r3 = 0 != self.d[i:] + assert_array_equal(d[i:] > 0, ed[i:]) + assert_array_equal(d[i:] - 1 >= 0, ed[i:]) + assert_array_equal(d[i:] == 0, ~ed[i:]) + assert_array_equal(-d[i:] < 0, ed[i:]) + assert_array_equal(-d[i:] + 1 <= 0, ed[i:]) + r = d[i:] != 0 + assert_array_equal(r, ed[i:]) + r2 = d[i:] != np.zeros_like(d[i:]) + r3 = 0 != d[i:] assert_array_equal(r, r2) assert_array_equal(r, r3) # check bool == 0x1 @@ -914,21 +913,21 @@ def test_double(self): assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) # isnan on amd64 takes the same code path - assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:]) - assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:]) - assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:]) - assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:]) - assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:]) + assert_array_equal(np.isnan(nd[i:]), ed[i:]) + assert_array_equal(np.isfinite(nd[i:]), ~ed[i:]) + assert_array_equal(np.isfinite(infd[i:]), ~ed[i:]) + assert_array_equal(np.isinf(infd[i:]), ednonan[i:]) + assert_array_equal(np.signbit(signd[i:]), ed[i:]) class TestSeterr: def test_default(self): err = np.geterr() assert_equal(err, - dict(divide='warn', - invalid='warn', - over='warn', - under='ignore') + {'divide': 'warn', + 'invalid': 'warn', + 'over': 'warn', + 'under': 'ignore'} ) def test_set(self): @@ -961,10 +960,10 @@ def assert_raises_fpe(self, fpeerr, flop, x, y): try: flop(x, y) assert_(False, - "Type %s did not raise fpe error '%s'." % (ftype, fpeerr)) + f"Type {ftype} did not raise fpe error '{fpeerr}'.") except FloatingPointError as exc: assert_(str(exc).find(fpeerr) >= 0, - "Type %s raised wrong fpe error '%s'." % (ftype, exc)) + f"Type {ftype} raised wrong fpe error '{exc}'.") def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2): # Check that fpe exception is raised. @@ -993,7 +992,7 @@ def test_floating_exceptions(self, typecode): if np.dtype(ftype).kind == 'f': # Get some extreme values for the type fi = np.finfo(ftype) - ft_tiny = fi._machar.tiny + ft_tiny = fi.tiny ft_max = fi.max ft_eps = fi.eps underflow = 'underflow' @@ -1002,7 +1001,7 @@ def test_floating_exceptions(self, typecode): # 'c', complex, corresponding real dtype rtype = type(ftype(0).real) fi = np.finfo(rtype) - ft_tiny = ftype(fi._machar.tiny) + ft_tiny = ftype(fi.tiny) ft_max = ftype(fi.max) ft_eps = ftype(fi.eps) # The complex types raise different exceptions @@ -1015,34 +1014,37 @@ def test_floating_exceptions(self, typecode): # pass the assert if not np.isnan(ft_tiny): self.assert_raises_fpe(underflow, - lambda a, b: a/b, ft_tiny, ft_max) + lambda a, b: a / b, ft_tiny, ft_max) self.assert_raises_fpe(underflow, - lambda a, b: a*b, ft_tiny, ft_tiny) - self.assert_raises_fpe(overflow, - lambda a, b: a*b, ft_max, ftype(2)) + lambda a, b: a * b, ft_tiny, ft_tiny) self.assert_raises_fpe(overflow, - lambda a, b: a/b, ft_max, ftype(0.5)) + lambda a, b: a * b, ft_max, ftype(2)) self.assert_raises_fpe(overflow, - lambda a, b: a+b, ft_max, ft_max*ft_eps) + lambda a, b: a / b, ft_max, ftype(0.5)) self.assert_raises_fpe(overflow, - lambda a, b: a-b, -ft_max, ft_max*ft_eps) + lambda a, b: a + b, ft_max, ft_max * ft_eps) self.assert_raises_fpe(overflow, - np.power, ftype(2), ftype(2**fi.nexp)) + lambda a, b: a - b, -ft_max, ft_max * ft_eps) + # On AIX, pow() with double does not raise the overflow exception, + # it returns inf. Long double is the same as double. + if sys.platform != 'aix' or typecode not in 'dDgG': + self.assert_raises_fpe(overflow, + np.power, ftype(2), ftype(2**fi.nexp)) self.assert_raises_fpe(divbyzero, - lambda a, b: a/b, ftype(1), ftype(0)) + lambda a, b: a / b, ftype(1), ftype(0)) self.assert_raises_fpe( - invalid, lambda a, b: a/b, ftype(np.inf), ftype(np.inf) + invalid, lambda a, b: a / b, ftype(np.inf), ftype(np.inf) ) self.assert_raises_fpe(invalid, - lambda a, b: a/b, ftype(0), ftype(0)) + lambda a, b: a / b, ftype(0), ftype(0)) self.assert_raises_fpe( - invalid, lambda a, b: a-b, ftype(np.inf), ftype(np.inf) + invalid, lambda a, b: a - b, ftype(np.inf), ftype(np.inf) ) self.assert_raises_fpe( - invalid, lambda a, b: a+b, ftype(np.inf), ftype(-np.inf) + invalid, lambda a, b: a + b, ftype(np.inf), ftype(-np.inf) ) self.assert_raises_fpe(invalid, - lambda a, b: a*b, ftype(0), ftype(np.inf)) + lambda a, b: a * b, ftype(0), ftype(np.inf)) @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") def test_warnings(self): @@ -1134,7 +1136,6 @@ def check_promotion_cases(self, promote_func): assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64)) assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64)) - def test_coercion(self): def res_type(a, b): return np.add(a, b).dtype @@ -1145,26 +1146,26 @@ def res_type(a, b): # shouldn't narrow the float/complex type for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]: b = 1.234 * a - assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype) + assert_equal(b.dtype, np.dtype('f8'), f"array type {a.dtype}") b = np.longdouble(1.234) * a assert_equal(b.dtype, np.dtype(np.longdouble), - "array type %s" % a.dtype) + f"array type {a.dtype}") b = np.float64(1.234) * a - assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype) + assert_equal(b.dtype, np.dtype('f8'), f"array type {a.dtype}") b = np.float32(1.234) * a - assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype) + assert_equal(b.dtype, np.dtype('f4'), f"array type {a.dtype}") b = np.float16(1.234) * a - assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype) + assert_equal(b.dtype, np.dtype('f2'), f"array type {a.dtype}") b = 1.234j * a - assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype) + assert_equal(b.dtype, np.dtype('c16'), f"array type {a.dtype}") b = np.clongdouble(1.234j) * a assert_equal(b.dtype, np.dtype(np.clongdouble), - "array type %s" % a.dtype) + f"array type {a.dtype}") b = np.complex128(1.234j) * a - assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype) + assert_equal(b.dtype, np.dtype('c16'), f"array type {a.dtype}") b = np.complex64(1.234j) * a - assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype) + assert_equal(b.dtype, np.dtype('c8'), f"array type {a.dtype}") # The following use-case is problematic, and to resolve its # tricky side-effects requires more changes. @@ -1250,31 +1251,31 @@ def test_promote_types_strings(self, swap, string_dtype): S = string_dtype # Promote numeric with unsized string: - assert_equal(promote_types('bool', S), np.dtype(S+'5')) - assert_equal(promote_types('b', S), np.dtype(S+'4')) - assert_equal(promote_types('u1', S), np.dtype(S+'3')) - assert_equal(promote_types('u2', S), np.dtype(S+'5')) - assert_equal(promote_types('u4', S), np.dtype(S+'10')) - assert_equal(promote_types('u8', S), np.dtype(S+'20')) - assert_equal(promote_types('i1', S), np.dtype(S+'4')) - assert_equal(promote_types('i2', S), np.dtype(S+'6')) - assert_equal(promote_types('i4', S), np.dtype(S+'11')) - assert_equal(promote_types('i8', S), np.dtype(S+'21')) + assert_equal(promote_types('bool', S), np.dtype(S + '5')) + assert_equal(promote_types('b', S), np.dtype(S + '4')) + assert_equal(promote_types('u1', S), np.dtype(S + '3')) + assert_equal(promote_types('u2', S), np.dtype(S + '5')) + assert_equal(promote_types('u4', S), np.dtype(S + '10')) + assert_equal(promote_types('u8', S), np.dtype(S + '20')) + assert_equal(promote_types('i1', S), np.dtype(S + '4')) + assert_equal(promote_types('i2', S), np.dtype(S + '6')) + assert_equal(promote_types('i4', S), np.dtype(S + '11')) + assert_equal(promote_types('i8', S), np.dtype(S + '21')) # Promote numeric with sized string: - assert_equal(promote_types('bool', S+'1'), np.dtype(S+'5')) - assert_equal(promote_types('bool', S+'30'), np.dtype(S+'30')) - assert_equal(promote_types('b', S+'1'), np.dtype(S+'4')) - assert_equal(promote_types('b', S+'30'), np.dtype(S+'30')) - assert_equal(promote_types('u1', S+'1'), np.dtype(S+'3')) - assert_equal(promote_types('u1', S+'30'), np.dtype(S+'30')) - assert_equal(promote_types('u2', S+'1'), np.dtype(S+'5')) - assert_equal(promote_types('u2', S+'30'), np.dtype(S+'30')) - assert_equal(promote_types('u4', S+'1'), np.dtype(S+'10')) - assert_equal(promote_types('u4', S+'30'), np.dtype(S+'30')) - assert_equal(promote_types('u8', S+'1'), np.dtype(S+'20')) - assert_equal(promote_types('u8', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('bool', S + '1'), np.dtype(S + '5')) + assert_equal(promote_types('bool', S + '30'), np.dtype(S + '30')) + assert_equal(promote_types('b', S + '1'), np.dtype(S + '4')) + assert_equal(promote_types('b', S + '30'), np.dtype(S + '30')) + assert_equal(promote_types('u1', S + '1'), np.dtype(S + '3')) + assert_equal(promote_types('u1', S + '30'), np.dtype(S + '30')) + assert_equal(promote_types('u2', S + '1'), np.dtype(S + '5')) + assert_equal(promote_types('u2', S + '30'), np.dtype(S + '30')) + assert_equal(promote_types('u4', S + '1'), np.dtype(S + '10')) + assert_equal(promote_types('u4', S + '30'), np.dtype(S + '30')) + assert_equal(promote_types('u8', S + '1'), np.dtype(S + '20')) + assert_equal(promote_types('u8', S + '30'), np.dtype(S + '30')) # Promote with object: - assert_equal(promote_types('O', S+'30'), np.dtype('O')) + assert_equal(promote_types('O', S + '30'), np.dtype('O')) @pytest.mark.parametrize(["dtype1", "dtype2"], [[np.dtype("V6"), np.dtype("V10")], # mismatch shape @@ -1488,21 +1489,21 @@ def test_can_cast_structured_to_simple(self): assert_(not np.can_cast([('f0', ('i4,i4'), (2,))], 'i4', casting='unsafe')) - @pytest.mark.xfail(np._get_promotion_state() != "legacy", - reason="NEP 50: no python int/float/complex support (yet)") def test_can_cast_values(self): - # gh-5917 - for dt in sctypes['int'] + sctypes['uint']: - ii = np.iinfo(dt) - assert_(np.can_cast(ii.min, dt)) - assert_(np.can_cast(ii.max, dt)) - assert_(not np.can_cast(ii.min - 1, dt)) - assert_(not np.can_cast(ii.max + 1, dt)) - - for dt in sctypes['float']: - fi = np.finfo(dt) - assert_(np.can_cast(fi.min, dt)) - assert_(np.can_cast(fi.max, dt)) + # With NumPy 2 and NEP 50, can_cast errors on Python scalars. We could + # define this as (usually safe) at some point, and already do so + # in `copyto` and ufuncs (but there an error is raised if the integer + # is out of bounds and a warning for out-of-bound floats). + # Raises even for unsafe, previously checked within range (for floats + # that was approximately whether it would overflow to inf). + with pytest.raises(TypeError): + np.can_cast(4, "int8", casting="unsafe") + + with pytest.raises(TypeError): + np.can_cast(4.0, "float64", casting="unsafe") + + with pytest.raises(TypeError): + np.can_cast(4j, "complex128", casting="unsafe") @pytest.mark.parametrize("dtype", list("?bhilqBHILQefdgFDG") + [rational]) @@ -1554,7 +1555,7 @@ def load_data(self, n, eindex): # Raise an exception at the desired index in the iterator. for e in range(n): if e == eindex: - raise NIterError('error at index %s' % eindex) + raise NIterError(f'error at index {eindex}') yield e @pytest.mark.parametrize("dtype", [int, object]) @@ -1628,6 +1629,7 @@ def test_failed_itemsetting(self): with pytest.raises(ValueError): np.fromiter(iterable, dtype=np.dtype((int, 2))) + class TestNonzero: def test_nonzero_trivial(self): assert_equal(np.count_nonzero(np.array([])), 0) @@ -1657,8 +1659,10 @@ def test_nonzero_onedim(self): # x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)], # dtype=[('a', 'i4'), ('b', 'i2')]) - x = np.array([(1, 2, -5, -3), (0, 0, 2, 7), (1, 1, 0, 1), (-1, 3, 1, 0), (0, 7, 0, 4)], - dtype=[('a', 'i4'), ('b', 'i2'), ('c', 'i1'), ('d', 'i8')]) + x = np.array( + [(1, 2, -5, -3), (0, 0, 2, 7), (1, 1, 0, 1), (-1, 3, 1, 0), (0, 7, 0, 4)], + dtype=[('a', 'i4'), ('b', 'i2'), ('c', 'i1'), ('d', 'i8')] + ) assert_equal(np.count_nonzero(x['a']), 3) assert_equal(np.count_nonzero(x['b']), 4) assert_equal(np.count_nonzero(x['c']), 3) @@ -1704,9 +1708,26 @@ def test_sparse(self): c = np.zeros(400, dtype=bool) c[10 + i:20 + i] = True - c[20 + i*2] = True + c[20 + i * 2] = True assert_equal(np.nonzero(c)[0], - np.concatenate((np.arange(10 + i, 20 + i), [20 + i*2]))) + np.concatenate((np.arange(10 + i, 20 + i), [20 + i * 2]))) + + @pytest.mark.parametrize('dtype', [np.float32, np.float64]) + def test_nonzero_float_dtypes(self, dtype): + rng = np.random.default_rng(seed=10) + x = ((2**33) * rng.normal(size=100)).astype(dtype) + x[rng.choice(50, size=100)] = 0 + idxs = np.nonzero(x)[0] + assert_equal(np.array_equal(np.where(x != 0)[0], idxs), True) + + @pytest.mark.parametrize('dtype', [bool, np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64]) + def test_nonzero_integer_dtypes(self, dtype): + rng = np.random.default_rng(seed=10) + x = rng.integers(0, 255, size=100).astype(dtype) + x[rng.choice(50, size=100)] = 0 + idxs = np.nonzero(x)[0] + assert_equal(np.array_equal(np.where(x != 0)[0], idxs), True) def test_return_type(self): class C(np.ndarray): @@ -1714,7 +1735,7 @@ class C(np.ndarray): for view in (C, np.ndarray): for nd in range(1, 4): - shape = tuple(range(2, 2+nd)) + shape = tuple(range(2, 2 + nd)) x = np.arange(np.prod(shape)).reshape(shape).view(view) for nzx in (np.nonzero(x), x.nonzero()): for nzx_i in nzx: @@ -1864,6 +1885,7 @@ def test_nonzero_sideeffect_safety(self): # gh-13631 class FalseThenTrue: _val = False + def __bool__(self): try: return self._val @@ -1872,6 +1894,7 @@ def __bool__(self): class TrueThenFalse: _val = True + def __bool__(self): try: return self._val @@ -1926,38 +1949,44 @@ def __bool__(self): """ # assert that an exception in first pass is handled correctly - a = np.array([ThrowsAfter(5)]*10) + a = np.array([ThrowsAfter(5)] * 10) assert_raises(ValueError, np.nonzero, a) # raise exception in second pass for 1-dimensional loop - a = np.array([ThrowsAfter(15)]*10) + a = np.array([ThrowsAfter(15)] * 10) assert_raises(ValueError, np.nonzero, a) # raise exception in second pass for n-dimensional loop - a = np.array([[ThrowsAfter(15)]]*10) + a = np.array([[ThrowsAfter(15)]] * 10) assert_raises(ValueError, np.nonzero, a) - @pytest.mark.skipif(IS_WASM, reason="wasm doesn't have threads") - def test_structured_threadsafety(self): - # Nonzero (and some other functions) should be threadsafe for - # structured datatypes, see gh-15387. This test can behave randomly. - from concurrent.futures import ThreadPoolExecutor + def test_nonzero_byteorder(self): + values = [0., -0., 1, float('nan'), 0, 1, + np.float16(0), np.float16(12.3)] + expected_values = [0, 0, 1, 1, 0, 1, 0, 1] + + for value, expected in zip(values, expected_values): + A = np.array([value]) + A_byteswapped = (A.view(A.dtype.newbyteorder()).byteswap()).copy() - # Create a deeply nested dtype to make a failure more likely: - dt = np.dtype([("", "f8")]) - dt = np.dtype([("", dt)]) - dt = np.dtype([("", dt)] * 2) - # The array should be large enough to likely run into threading issues - arr = np.random.uniform(size=(5000, 4)).view(dt)[:, 0] - def func(arr): - arr.nonzero() + assert np.count_nonzero(A) == expected + assert np.count_nonzero(A_byteswapped) == expected - tpe = ThreadPoolExecutor(max_workers=8) - futures = [tpe.submit(func, arr) for _ in range(10)] - for f in futures: - f.result() + def test_count_nonzero_non_aligned_array(self): + # gh-27523 + b = np.zeros(64 + 1, dtype=np.int8)[1:] + b = b.view(int) + b[:] = np.arange(b.size) + b[::2] = 0 + assert b.flags.aligned is False + assert np.count_nonzero(b) == b.size / 2 - assert arr.dtype is dt + b = np.zeros(64 + 1, dtype=np.float16)[1:] + b = b.view(float) + b[:] = np.arange(b.size) + b[::2] = 0 + assert b.flags.aligned is False + assert np.count_nonzero(b) == b.size / 2 class TestIndex: @@ -1967,7 +1996,9 @@ def test_boolean(self): g1 = randint(0, 5, size=15) g2 = randint(0, 8, size=15) V[g1, g2] = -V[g1, g2] - assert_((np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all()) + assert_( + (np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all() + ) def test_boolean_edgecase(self): a = np.array([], dtype='int32') @@ -2016,7 +2047,7 @@ def test_neg_width_boundaries(self): def test_large_neg_int64(self): # See gh-14289. assert_equal(np.binary_repr(np.int64(-2**62), width=64), - '11' + '0'*62) + '11' + '0' * 62) class TestBaseRepr: @@ -2064,7 +2095,7 @@ def _test_array_equal_parametrizations(): yield (e1, e1.copy(), False, True) yield (e1, e1.copy(), True, True) - # Non-nanable – those cannot hold nans + # Non-nanable - those cannot hold nans a12 = np.array([1, 2]) a12b = a12.copy() a123 = np.array([1, 2, 3]) @@ -2177,9 +2208,9 @@ class TestArrayComparisons: ) def test_array_equal_equal_nan(self, bx, by, equal_nan, expected): """ - This test array_equal for a few combinaison: + This test array_equal for a few combinations: - - are the two inputs the same object or not (same object many not + - are the two inputs the same object or not (same object may not be equal if contains NaNs) - Whether we should consider or not, NaNs, being equal. @@ -2191,14 +2222,21 @@ def test_array_equal_equal_nan(self, bx, by, equal_nan, expected): assert_(res is expected) assert_(type(res) is bool) + def test_array_equal_different_scalar_types(self): + # https://github.com/numpy/numpy/issues/27271 + a = np.array("foo") + b = np.array(1) + assert not np.array_equal(a, b) + assert not np.array_equiv(a, b) + def test_none_compares_elementwise(self): a = np.array([None, 1, None], dtype=object) - assert_equal(a == None, [True, False, True]) - assert_equal(a != None, [False, True, False]) + assert_equal(a == None, [True, False, True]) # noqa: E711 + assert_equal(a != None, [False, True, False]) # noqa: E711 a = np.ones(3) - assert_equal(a == None, [False, False, False]) - assert_equal(a != None, [True, True, True]) + assert_equal(a == None, [False, False, False]) # noqa: E711 + assert_equal(a != None, [True, True, True]) # noqa: E711 def test_array_equiv(self): res = np.array_equiv(np.array([1, 2]), np.array([1, 2])) @@ -2226,7 +2264,10 @@ def test_array_equiv(self): res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]])) assert_(not res) assert_(type(res) is bool) - res = np.array_equiv(np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) + res = np.array_equiv( + np.array([1, 2]), + np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), + ) assert_(not res) assert_(type(res) is bool) @@ -2265,16 +2306,15 @@ def assert_array_strict_equal(x, y): class TestClip: - def setup_method(self): - self.nr = 5 - self.nc = 3 + nr = 5 + nc = 3 def fastclip(self, a, m, M, out=None, **kwargs): return a.clip(m, M, out=out, **kwargs) def clip(self, a, m, M, out=None): # use a.choose to verify fastclip result - selector = np.less(a, m) + 2*np.greater(a, M) + selector = np.less(a, m) + 2 * np.greater(a, M) return selector.choose((a, m, M), out=out) # Handy functions @@ -2741,9 +2781,9 @@ def test_object_clip(self): assert actual.tolist() == expected.tolist() def test_clip_all_none(self): - a = np.arange(10, dtype=object) - with assert_raises_regex(ValueError, 'max or min'): - np.clip(a, None, None) + arr = np.arange(10, dtype=object) + assert_equal(np.clip(arr, None, None), arr) + assert_equal(np.clip(arr), arr) def test_clip_invalid_casting(self): a = np.arange(10, dtype=object) @@ -2771,8 +2811,8 @@ def test_clip_value_min_max_flip(self, amin, amax): # case produced by hypothesis (np.zeros(10, dtype=object), 0, - -2**64+1, - np.full(10, -2**64+1, dtype=object)), + -2**64 + 1, + np.full(10, -2**64 + 1, dtype=object)), # for bugs in NPY_TIMEDELTA_MAX, based on a case # produced by hypothesis (np.zeros(10, dtype='m8') - 1, @@ -2797,16 +2837,12 @@ def test_clip_scalar_nan_propagation(self, arr, amin, amax): actual = np.clip(arr, amin, amax) assert_equal(actual, expected) - @pytest.mark.xfail(reason="propagation doesn't match spec") @pytest.mark.parametrize("arr, amin, amax", [ (np.array([1] * 10, dtype='m8'), np.timedelta64('NaT'), np.zeros(10, dtype=np.int32)), ]) - @pytest.mark.filterwarnings("ignore::DeprecationWarning") def test_NaT_propagation(self, arr, amin, amax): - # NOTE: the expected function spec doesn't - # propagate NaT, but clip() now does expected = np.minimum(np.maximum(arr, amin), amax) actual = np.clip(arr, amin, amax) assert_equal(actual, expected) @@ -2860,6 +2896,46 @@ def test_clip_property(self, data, arr): assert result.dtype == t assert_array_equal(result, expected) + def test_clip_min_max_args(self): + arr = np.arange(5) + + assert_array_equal(np.clip(arr), arr) + assert_array_equal(np.clip(arr, min=2, max=3), np.clip(arr, 2, 3)) + assert_array_equal(np.clip(arr, min=None, max=2), + np.clip(arr, None, 2)) + + with assert_raises_regex(TypeError, "missing 1 required positional " + "argument: 'a_max'"): + np.clip(arr, 2) + with assert_raises_regex(TypeError, "missing 1 required positional " + "argument: 'a_min'"): + np.clip(arr, a_max=2) + msg = ("Passing `min` or `max` keyword argument when `a_min` and " + "`a_max` are provided is forbidden.") + with assert_raises_regex(ValueError, msg): + np.clip(arr, 2, 3, max=3) + with assert_raises_regex(ValueError, msg): + np.clip(arr, 2, 3, min=2) + + @pytest.mark.parametrize("dtype,min,max", [ + ("int32", -2**32 - 1, 2**32), + ("int32", -2**320, None), + ("int32", None, 2**300), + ("int32", -1000, 2**32), + ("int32", -2**32 - 1, 1000), + ("uint8", -1, 129), + ]) + def test_out_of_bound_pyints(self, dtype, min, max): + a = np.arange(10000).astype(dtype) + # Check min only + c = np.clip(a, min=min, max=max) + assert not np.may_share_memory(a, c) + assert c.dtype == a.dtype + if min is not None: + assert (c >= min).all() + if max is not None: + assert (c <= max).all() + class TestAllclose: rtol = 1e-5 @@ -2872,10 +2948,10 @@ def teardown_method(self): np.seterr(**self.olderr) def tst_allclose(self, x, y): - assert_(np.allclose(x, y), "%s and %s not close" % (x, y)) + assert_(np.allclose(x, y), f"{x} and {y} not close") def tst_not_allclose(self, x, y): - assert_(not np.allclose(x, y), "%s and %s shouldn't be close" % (x, y)) + assert_(not np.allclose(x, y), f"{x} and {y} shouldn't be close") def test_ip_allclose(self): # Parametric test factory. @@ -2887,10 +2963,10 @@ def test_ip_allclose(self): data = [([1, 0], [1, 0]), ([atol], [0]), - ([1], [1+rtol+atol]), - (arr, arr + arr*rtol), - (arr, arr + arr*rtol + atol*2), - (aran, aran + aran*rtol), + ([1], [1 + rtol + atol]), + (arr, arr + arr * rtol), + (arr, arr + arr * rtol + atol * 2), + (aran, aran + aran * rtol), (np.inf, np.inf), (np.inf, [np.inf])] @@ -2910,9 +2986,9 @@ def test_ip_not_allclose(self): ([np.inf, np.inf], [1, 0]), ([-np.inf, 0], [np.inf, 0]), ([np.nan, 0], [np.nan, 0]), - ([atol*2], [0]), - ([1], [1+rtol+atol*2]), - (aran, aran + aran*atol + atol*2), + ([atol * 2], [0]), + ([1], [1 + rtol + atol * 2]), + (aran, aran + aran * atol + atol * 2), (np.array([np.inf, 1]), np.array([0, np.inf]))] for (x, y) in data: @@ -2960,9 +3036,9 @@ def _setup(self): ([1, 0], [1, 0]), ([atol], [0]), ([1], [1 + rtol + atol]), - (arr, arr + arr*rtol), - (arr, arr + arr*rtol + atol), - (aran, aran + aran*rtol), + (arr, arr + arr * rtol), + (arr, arr + arr * rtol + atol), + (aran, aran + aran * rtol), (np.inf, np.inf), (np.inf, [np.inf]), ([np.inf, -np.inf], [np.inf, -np.inf]), @@ -2973,14 +3049,14 @@ def _setup(self): ([np.inf, np.inf], [1, -np.inf]), ([np.inf, np.inf], [1, 0]), ([np.nan, 0], [np.nan, -np.inf]), - ([atol*2], [0]), - ([1], [1 + rtol + atol*2]), - (aran, aran + rtol*1.1*aran + atol*1.1), + ([atol * 2], [0]), + ([1], [1 + rtol + atol * 2]), + (aran, aran + rtol * 1.1 * aran + atol * 1.1), (np.array([np.inf, 1]), np.array([0, np.inf])), ] self.some_close_tests = [ - ([np.inf, 0], [np.inf, atol*2]), - ([atol, 1, 1e6*(1 + 2*rtol) + atol], [0, np.nan, 1e6]), + ([np.inf, 0], [np.inf, atol * 2]), + ([atol, 1, 1e6 * (1 + 2 * rtol) + atol], [0, np.nan, 1e6]), (np.arange(3), [0, 1, 2.1]), (np.nan, [np.nan, np.nan, np.nan]), ([0], [atol, np.inf, -np.inf, np.nan]), @@ -3019,7 +3095,7 @@ def test_ip_isclose(self): np.isclose(x, y, rtol=rtol) def test_nep50_isclose(self): - below_one = float(1.-np.finfo('f8').eps) + below_one = float(1. - np.finfo('f8').eps) f32 = np.array(below_one, 'f4') # This is just 1 at float32 precision assert f32 > np.array(below_one) # NEP 50 broadcasting of python scalars @@ -3028,13 +3104,13 @@ def test_nep50_isclose(self): # one uses a numpy float64). assert np.isclose(f32, below_one, atol=0, rtol=0) assert np.isclose(f32, np.float32(0), atol=below_one) - assert np.isclose(f32, 2, atol=0, rtol=below_one/2) + assert np.isclose(f32, 2, atol=0, rtol=below_one / 2) assert not np.isclose(f32, np.float64(below_one), atol=0, rtol=0) assert not np.isclose(f32, np.float32(0), atol=np.float64(below_one)) - assert not np.isclose(f32, 2, atol=0, rtol=np.float64(below_one/2)) + assert not np.isclose(f32, 2, atol=0, rtol=np.float64(below_one / 2)) def tst_all_isclose(self, x, y): - assert_(np.all(np.isclose(x, y)), "%s and %s not close" % (x, y)) + assert_(np.all(np.isclose(x, y)), f"{x} and {y} not close") def tst_none_isclose(self, x, y): msg = "%s and %s shouldn't be close" @@ -3046,7 +3122,9 @@ def tst_isclose_allclose(self, x, y): if np.isscalar(x) and np.isscalar(y): assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg2 % (x, y)) else: - assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y)) + assert_array_equal( + np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y) + ) def test_ip_all_isclose(self): self._setup() @@ -3134,47 +3212,72 @@ def test_timedelta(self): assert np.allclose(a, a, atol=0, equal_nan=True) assert np.allclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True) + def test_tol_warnings(self): + a = np.array([1, 2, 3]) + b = np.array([np.inf, np.nan, 1]) + + for i in b: + for j in b: + # Making sure that i and j are not both numbers, + # because that won't create a warning + if (i == 1) and (j == 1): + continue + + with warnings.catch_warnings(record=True) as w: + + warnings.simplefilter("always") + c = np.isclose(a, a, atol=i, rtol=j) + assert len(w) == 1 + assert issubclass(w[-1].category, RuntimeWarning) + expected = f"One of rtol or atol is not valid, atol: {i}, rtol: {j}" + assert expected in str(w[-1].message) + class TestStdVar: - def setup_method(self): - self.A = np.array([1, -1, 1, -1]) - self.real_var = 1 + def _create_data(self): + A = np.array([1, -1, 1, -1]) + real_var = 1 + return A, real_var def test_basic(self): - assert_almost_equal(np.var(self.A), self.real_var) - assert_almost_equal(np.std(self.A)**2, self.real_var) + A, real_var = self._create_data() + assert_almost_equal(np.var(A), real_var) + assert_almost_equal(np.std(A)**2, real_var) def test_scalars(self): assert_equal(np.var(1), 0) assert_equal(np.std(1), 0) def test_ddof1(self): - assert_almost_equal(np.var(self.A, ddof=1), - self.real_var * len(self.A) / (len(self.A) - 1)) - assert_almost_equal(np.std(self.A, ddof=1)**2, - self.real_var*len(self.A) / (len(self.A) - 1)) + A, real_var = self._create_data() + assert_almost_equal(np.var(A, ddof=1), + real_var * len(A) / (len(A) - 1)) + assert_almost_equal(np.std(A, ddof=1)**2, + real_var * len(A) / (len(A) - 1)) def test_ddof2(self): - assert_almost_equal(np.var(self.A, ddof=2), - self.real_var * len(self.A) / (len(self.A) - 2)) - assert_almost_equal(np.std(self.A, ddof=2)**2, - self.real_var * len(self.A) / (len(self.A) - 2)) + A, real_var = self._create_data() + assert_almost_equal(np.var(A, ddof=2), + real_var * len(A) / (len(A) - 2)) + assert_almost_equal(np.std(A, ddof=2)**2, + real_var * len(A) / (len(A) - 2)) def test_correction(self): + A, _ = self._create_data() assert_almost_equal( - np.var(self.A, correction=1), np.var(self.A, ddof=1) + np.var(A, correction=1), np.var(A, ddof=1) ) assert_almost_equal( - np.std(self.A, correction=1), np.std(self.A, ddof=1) + np.std(A, correction=1), np.std(A, ddof=1) ) err_msg = "ddof and correction can't be provided simultaneously." with assert_raises_regex(ValueError, err_msg): - np.var(self.A, ddof=1, correction=0) + np.var(A, ddof=1, correction=0) with assert_raises_regex(ValueError, err_msg): - np.std(self.A, ddof=1, correction=1) + np.std(A, ddof=1, correction=1) def test_out_scalar(self): d = np.arange(10) @@ -3203,26 +3306,22 @@ def test_scalars(self): class TestCreationFuncs: - # Test ones, zeros, empty and full. - - def setup_method(self): - dtypes = {np.dtype(tp) for tp in itertools.chain(*sctypes.values())} - # void, bytes, str - variable_sized = {tp for tp in dtypes if tp.str.endswith('0')} + def check_function(self, func, fill_value=None): + dtypes_info = {np.dtype(tp) for tp in itertools.chain(*sctypes.values())} keyfunc = lambda dtype: dtype.str - self.dtypes = sorted(dtypes - variable_sized | + variable_sized = {tp for tp in dtypes_info if tp.str.endswith('0')} + dtypes = sorted(dtypes_info - variable_sized | {np.dtype(tp.str.replace("0", str(i))) for tp in variable_sized for i in range(1, 10)}, key=keyfunc) - self.dtypes += [type(dt) for dt in sorted(dtypes, key=keyfunc)] - self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'} - self.ndims = 10 + dtypes += [type(dt) for dt in sorted(dtypes_info, key=keyfunc)] + orders = {'C': 'c_contiguous', 'F': 'f_contiguous'} + ndims = 10 - def check_function(self, func, fill_value=None): par = ((0, 1, 2), - range(self.ndims), - self.orders, - self.dtypes) + range(ndims), + orders, + dtypes) fill_kwarg = {} if fill_value is not None: fill_kwarg = {'fill_value': fill_value} @@ -3248,7 +3347,7 @@ def check_function(self, func, fill_value=None): assert_equal(arr.dtype, np.dtype(dtype_str)) else: assert_equal(arr.dtype, np.dtype(dtype.type)) - assert_(getattr(arr.flags, self.orders[order])) + assert_(getattr(arr.flags, orders[order])) if fill_value is not None: if arr.dtype.str.startswith('|S'): @@ -3275,45 +3374,47 @@ def test_for_reference_leak(self): # Make sure we have an object for reference dim = 1 beg = sys.getrefcount(dim) - np.zeros([dim]*10) + np.zeros([dim] * 10) assert_(sys.getrefcount(dim) == beg) - np.ones([dim]*10) + np.ones([dim] * 10) assert_(sys.getrefcount(dim) == beg) - np.empty([dim]*10) + np.empty([dim] * 10) assert_(sys.getrefcount(dim) == beg) - np.full([dim]*10, 0) + np.full([dim] * 10, 0) assert_(sys.getrefcount(dim) == beg) + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.parametrize("func", [np.empty, np.zeros, np.ones, np.full]) + def test_signatures(self, func): + sig = inspect.signature(func) + params = sig.parameters + + assert len(params) in {5, 6} + + assert 'shape' in params + assert params["shape"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert params["shape"].default is inspect.Parameter.empty + + assert 'dtype' in params + assert params["dtype"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert params["dtype"].default is None + + assert 'order' in params + assert params["order"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert params["order"].default == "C" + + assert 'device' in params + assert params["device"].kind is inspect.Parameter.KEYWORD_ONLY + assert params["device"].default is None + + assert 'like' in params + assert params["like"].kind is inspect.Parameter.KEYWORD_ONLY + assert params["like"].default is None + class TestLikeFuncs: '''Test ones_like, zeros_like, empty_like and full_like''' - def setup_method(self): - self.data = [ - # Array scalars - (np.array(3.), None), - (np.array(3), 'f8'), - # 1D arrays - (np.arange(6, dtype='f4'), None), - (np.arange(6), 'c16'), - # 2D C-layout arrays - (np.arange(6).reshape(2, 3), None), - (np.arange(6).reshape(3, 2), 'i1'), - # 2D F-layout arrays - (np.arange(6).reshape((2, 3), order='F'), None), - (np.arange(6).reshape((3, 2), order='F'), 'i1'), - # 3D C-layout arrays - (np.arange(24).reshape(2, 3, 4), None), - (np.arange(24).reshape(4, 3, 2), 'f4'), - # 3D F-layout arrays - (np.arange(24).reshape((2, 3, 4), order='F'), None), - (np.arange(24).reshape((4, 3, 2), order='F'), 'f4'), - # 3D non-C/F-layout arrays - (np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None), - (np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'), - ] - self.shapes = [(), (5,), (5,6,), (5,6,7,)] - def compare_array_value(self, dz, value, fill_value): if value is not None: if fill_value: @@ -3326,16 +3427,41 @@ def compare_array_value(self, dz, value, fill_value): assert_(np.all(dz == value)) def check_like_function(self, like_function, value, fill_value=False): + data = [ + # Array scalars + (np.array(3.), None), + (np.array(3), 'f8'), + # 1D arrays + (np.arange(6, dtype='f4'), None), + (np.arange(6), 'c16'), + # 2D C-layout arrays + (np.arange(6).reshape(2, 3), None), + (np.arange(6).reshape(3, 2), 'i1'), + # 2D F-layout arrays + (np.arange(6).reshape((2, 3), order='F'), None), + (np.arange(6).reshape((3, 2), order='F'), 'i1'), + # 3D C-layout arrays + (np.arange(24).reshape(2, 3, 4), None), + (np.arange(24).reshape(4, 3, 2), 'f4'), + # 3D F-layout arrays + (np.arange(24).reshape((2, 3, 4), order='F'), None), + (np.arange(24).reshape((4, 3, 2), order='F'), 'f4'), + # 3D non-C/F-layout arrays + (np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None), + (np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'), + ] + shapes = [(), (5,), (5, 6,), (5, 6, 7,)] + if fill_value: fill_kwarg = {'fill_value': value} else: fill_kwarg = {} - for d, dtype in self.data: + for d, dtype in data: # default (K) order, dtype dz = like_function(d, dtype=dtype, **fill_kwarg) assert_equal(dz.shape, d.shape) - assert_equal(np.array(dz.strides)*d.dtype.itemsize, - np.array(d.strides)*dz.dtype.itemsize) + assert_equal(np.array(dz.strides) * d.dtype.itemsize, + np.array(d.strides) * dz.dtype.itemsize) assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous) assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous) if dtype is None: @@ -3378,7 +3504,7 @@ def check_like_function(self, like_function, value, fill_value=False): self.compare_array_value(dz, value, fill_value) # Test the 'shape' parameter - for s in self.shapes: + for s in shapes: for o in 'CFA': sz = like_function(d, dtype=dtype, shape=s, order=o, **fill_kwarg) @@ -3434,7 +3560,11 @@ def test_empty_like(self): def test_filled_like(self): self.check_like_function(np.full_like, 0, True) self.check_like_function(np.full_like, 1, True) - self.check_like_function(np.full_like, 1000, True) + # Large integers may overflow, but using int64 is OK (casts) + # see also gh-27075 + with pytest.raises(OverflowError): + np.full_like(np.ones(3, dtype=np.int8), 1000) + self.check_like_function(np.full_like, np.int64(1000), True) self.check_like_function(np.full_like, 123.456, True) # Inf to integer casts cause invalid-value errors: ignore them. with np.errstate(invalid="ignore"): @@ -3449,7 +3579,7 @@ def test_dtype_str_bytes(self, likefunc, dtype): b = a[:, ::2] # Ensure b is not contiguous. kwargs = {'fill_value': ''} if likefunc == np.full_like else {} result = likefunc(b, dtype=dtype, **kwargs) - if dtype == str: + if dtype is str: assert result.strides == (16, 4) else: # dtype is bytes @@ -3499,9 +3629,9 @@ def test_no_overwrite(self): assert_array_equal(k, np.ones(3)) def test_complex(self): - x = np.array([1, 2, 3, 4+1j], dtype=complex) - y = np.array([-1, -2j, 3+1j], dtype=complex) - r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=complex) + x = np.array([1, 2, 3, 4 + 1j], dtype=complex) + y = np.array([-1, -2j, 3 + 1j], dtype=complex) + r_z = np.array([3 - 1j, 6, 8 + 1j, 11 + 5j, -5 + 8j, -4 - 1j], dtype=complex) r_z = r_z[::-1].conjugate() z = np.correlate(y, x, mode='full') assert_array_almost_equal(z, r_z) @@ -3516,13 +3646,12 @@ def test_mode(self): d = np.ones(100) k = np.ones(3) default_mode = np.correlate(d, k, mode='valid') - with assert_warns(DeprecationWarning): - valid_mode = np.correlate(d, k, mode='v') - assert_array_equal(valid_mode, default_mode) + with assert_raises(ValueError): + np.correlate(d, k, mode='v') # integer mode with assert_raises(ValueError): np.correlate(d, k, mode=-1) - assert_array_equal(np.correlate(d, k, mode=0), valid_mode) + # assert_array_equal(np.correlate(d, k, mode=), default_mode) # illegal arguments with assert_raises(TypeError): np.correlate(d, k, mode=None) @@ -3545,24 +3674,33 @@ def test_mode(self): d = np.ones(100) k = np.ones(3) default_mode = np.convolve(d, k, mode='full') - with assert_warns(DeprecationWarning): - full_mode = np.convolve(d, k, mode='f') - assert_array_equal(full_mode, default_mode) + with assert_raises(ValueError): + np.convolve(d, k, mode='f') # integer mode with assert_raises(ValueError): np.convolve(d, k, mode=-1) - assert_array_equal(np.convolve(d, k, mode=2), full_mode) + assert_array_equal(np.convolve(d, k, mode=2), default_mode) # illegal arguments with assert_raises(TypeError): np.convolve(d, k, mode=None) + def test_convolve_empty_input_error_message(self): + """ + Test that convolve raises the correct error message when inputs are empty. + Regression test for gh-30272 (variable swapping bug). + """ + with pytest.raises(ValueError, match="a cannot be empty"): + np.convolve(np.array([]), np.array([1, 2])) + + with pytest.raises(ValueError, match="v cannot be empty"): + np.convolve(np.array([1, 2]), np.array([])) class TestArgwhere: @pytest.mark.parametrize('nd', [0, 1, 2]) def test_nd(self, nd): # get an nd array with multiple elements in every dimension - x = np.empty((2,)*nd, bool) + x = np.empty((2,) * nd, bool) # none x[...] = False @@ -3594,24 +3732,6 @@ def test_list(self): assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]]) -@pytest.mark.filterwarnings( - "ignore:.*set_string_function.*:DeprecationWarning" -) -class TestStringFunction: - - def test_set_string_function(self): - a = np.array([1]) - set_string_function(lambda x: "FOO", repr=True) - assert_equal(repr(a), "FOO") - set_string_function(None, repr=True) - assert_equal(repr(a), "array([1])") - - set_string_function(lambda x: "FOO", repr=False) - assert_equal(str(a), "FOO") - set_string_function(None, repr=False) - assert_equal(str(a), "[1]") - - class TestRoll: def test_roll1d(self): x = np.arange(10) @@ -3669,6 +3789,18 @@ def test_roll_empty(self): x = np.array([]) assert_equal(np.roll(x, 1), np.array([])) + def test_roll_unsigned_shift(self): + x = np.arange(4) + shift = np.uint16(2) + assert_equal(np.roll(x, shift), np.roll(x, 2)) + + shift = np.uint64(2**63 + 2) + assert_equal(np.roll(x, shift), np.roll(x, 2)) + + def test_roll_big_int(self): + x = np.arange(4) + assert_equal(np.roll(x, 2**100), x) + class TestRollaxis: @@ -3688,14 +3820,14 @@ class TestRollaxis: (3, 4): (1, 2, 3, 4)} def test_exceptions(self): - a = np.arange(1*2*3*4).reshape(1, 2, 3, 4) + a = np.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4) assert_raises(AxisError, np.rollaxis, a, -5, 0) assert_raises(AxisError, np.rollaxis, a, 0, -5) assert_raises(AxisError, np.rollaxis, a, 4, 0) assert_raises(AxisError, np.rollaxis, a, 0, 5) def test_results(self): - a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy() + a = np.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4).copy() aind = np.indices(a.shape) assert_(a.flags['OWNDATA']) for (i, j) in self.tgtshape: @@ -3703,7 +3835,7 @@ def test_results(self): res = np.rollaxis(a, axis=i, start=j) i0, i1, i2, i3 = aind[np.array(res.shape) - 1] assert_(np.all(res[i0, i1, i2, i3] == a)) - assert_(res.shape == self.tgtshape[(i, j)], str((i,j))) + assert_(res.shape == self.tgtshape[(i, j)], str((i, j))) assert_(not res.flags['OWNDATA']) # negative axis, positive start @@ -3807,29 +3939,16 @@ def test_array_likes(self): class TestCross: - @pytest.mark.filterwarnings( - "ignore:.*2-dimensional vectors.*:DeprecationWarning" - ) def test_2x2(self): u = [1, 2] v = [3, 4] - z = -2 - cp = np.cross(u, v) - assert_equal(cp, z) - cp = np.cross(v, u) - assert_equal(cp, -z) + assert_raises(ValueError, np.cross, u, v) - @pytest.mark.filterwarnings( - "ignore:.*2-dimensional vectors.*:DeprecationWarning" - ) def test_2x3(self): u = [1, 2] v = [3, 4, 5] - z = np.array([10, -5, -2]) - cp = np.cross(u, v) - assert_equal(cp, z) - cp = np.cross(v, u) - assert_equal(cp, -z) + assert_raises(ValueError, np.cross, u, v) + assert_raises(ValueError, np.cross, v, u) def test_3x3(self): u = [1, 2, 3] @@ -3840,32 +3959,7 @@ def test_3x3(self): cp = np.cross(v, u) assert_equal(cp, -z) - @pytest.mark.filterwarnings( - "ignore:.*2-dimensional vectors.*:DeprecationWarning" - ) def test_broadcasting(self): - # Ticket #2624 (Trac #2032) - u = np.tile([1, 2], (11, 1)) - v = np.tile([3, 4], (11, 1)) - z = -2 - assert_equal(np.cross(u, v), z) - assert_equal(np.cross(v, u), -z) - assert_equal(np.cross(u, u), 0) - - u = np.tile([1, 2], (11, 1)).T - v = np.tile([3, 4, 5], (11, 1)) - z = np.tile([10, -5, -2], (11, 1)) - assert_equal(np.cross(u, v, axisa=0), z) - assert_equal(np.cross(v, u.T), -z) - assert_equal(np.cross(v, v), 0) - - u = np.tile([1, 2, 3], (11, 1)).T - v = np.tile([3, 4], (11, 1)).T - z = np.tile([-12, 9, -2], (11, 1)) - assert_equal(np.cross(u, v, axisa=0, axisb=0), z) - assert_equal(np.cross(v.T, u.T), -z) - assert_equal(np.cross(u.T, u.T), 0) - u = np.tile([1, 2, 3], (5, 1)) v = np.tile([4, 5, 6], (5, 1)).T z = np.tile([-3, 6, -3], (5, 1)) @@ -3873,27 +3967,20 @@ def test_broadcasting(self): assert_equal(np.cross(v.T, u), -z) assert_equal(np.cross(u, u), 0) - @pytest.mark.filterwarnings( - "ignore:.*2-dimensional vectors.*:DeprecationWarning" - ) def test_broadcasting_shapes(self): u = np.ones((2, 1, 3)) v = np.ones((5, 3)) assert_equal(np.cross(u, v).shape, (2, 5, 3)) u = np.ones((10, 3, 5)) - v = np.ones((2, 5)) + v = np.ones((3, 5)) assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3)) assert_raises(AxisError, np.cross, u, v, axisa=1, axisb=2) assert_raises(AxisError, np.cross, u, v, axisa=3, axisb=0) u = np.ones((10, 3, 5, 7)) - v = np.ones((5, 7, 2)) + v = np.ones((5, 7, 3)) assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7)) assert_raises(AxisError, np.cross, u, v, axisa=-5, axisb=2) assert_raises(AxisError, np.cross, u, v, axisa=1, axisb=-4) - # gh-5885 - u = np.ones((3, 4, 2)) - for axisc in range(-2, 2): - assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4)) def test_uint8_int32_mixed_dtypes(self): # regression test for gh-19138 @@ -3914,7 +4001,7 @@ def test_outer_out_param(): arr1 = np.ones((5,)) arr2 = np.ones((2,)) arr3 = np.linspace(-2, 2, 5) - out1 = np.ndarray(shape=(5,5)) + out1 = np.ndarray(shape=(5, 5)) out2 = np.ndarray(shape=(2, 5)) res1 = np.outer(arr1, arr3, out1) assert_equal(res1, out1) @@ -3948,7 +4035,7 @@ def test_scalar_input(self): assert_array_equal([[]], np.indices((0,), sparse=True)) def test_sparse(self): - [x, y] = np.indices((4,3), sparse=True) + [x, y] = np.indices((4, 3), sparse=True) assert_array_equal(x, np.array([[0], [1], [2], [3]])) assert_array_equal(y, np.array([[0, 1, 2]])) @@ -4075,23 +4162,35 @@ def test_number_of_arguments(self): assert_equal(mit.numiter, j) def test_broadcast_error_kwargs(self): - #gh-13455 + # gh-13455 arrs = [np.empty((5, 6, 7))] - mit = np.broadcast(*arrs) - mit2 = np.broadcast(*arrs, **{}) + mit = np.broadcast(*arrs) + mit2 = np.broadcast(*arrs, **{}) # noqa: PIE804 assert_equal(mit.shape, mit2.shape) assert_equal(mit.ndim, mit2.ndim) assert_equal(mit.nd, mit2.nd) assert_equal(mit.numiter, mit2.numiter) assert_(mit.iters[0].base is mit2.iters[0].base) - assert_raises(ValueError, np.broadcast, 1, **{'x': 1}) + assert_raises(ValueError, np.broadcast, 1, x=1) def test_shape_mismatch_error_message(self): with pytest.raises(ValueError, match=r"arg 0 with shape \(1, 3\) and " r"arg 2 with shape \(2,\)"): np.broadcast([[1, 2, 3]], [[4], [5]], [6, 7]) + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + def test_signatures(self): + sig_new = inspect.signature(np.broadcast) + assert len(sig_new.parameters) == 1 + assert "arrays" in sig_new.parameters + assert sig_new.parameters["arrays"].kind == inspect.Parameter.VAR_POSITIONAL + + sig_reset = inspect.signature(np.broadcast.reset) + assert len(sig_reset.parameters) == 1 + assert "self" in sig_reset.parameters + assert sig_reset.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY + class TestKeepdims: @@ -4107,10 +4206,16 @@ def test_raise(self): class TestTensordot: + def test_rejects_duplicate_axes(self): + a = np.ones((2, 3, 3)) + b = np.ones((3, 3, 4)) + with pytest.raises(ValueError): + np.tensordot(a, b, axes=([1, 1], [0, 0])) + def test_zero_dimension(self): # Test resolution to issue #5663 - a = np.ndarray((3,0)) - b = np.ndarray((0,4)) + a = np.ndarray((3, 0)) + b = np.ndarray((0, 4)) td = np.tensordot(a, b, (1, 0)) assert_array_equal(td, np.dot(a, b)) assert_array_equal(td, np.einsum('ij,jk', a, b)) @@ -4118,7 +4223,8 @@ def test_zero_dimension(self): def test_zero_dimensional(self): # gh-12130 arr_0d = np.array(1) - ret = np.tensordot(arr_0d, arr_0d, ([], [])) # contracting no axes is well defined + # contracting no axes is well defined + ret = np.tensordot(arr_0d, arr_0d, ([], [])) assert_array_equal(ret, arr_0d) @@ -4138,5 +4244,10 @@ def test_astype(self): actual, np.astype(actual, actual.dtype, copy=False) ) + actual = np.astype(np.int64(10), np.float64) + expected = np.float64(10) + assert_equal(actual, expected) + assert_equal(actual.dtype, expected.dtype) + with pytest.raises(TypeError, match="Input should be a NumPy array"): np.astype(data, np.float64) diff --git a/numpy/_core/tests/test_numerictypes.py b/numpy/_core/tests/test_numerictypes.py index f09622e422a1..3fd26c32a500 100644 --- a/numpy/_core/tests/test_numerictypes.py +++ b/numpy/_core/tests/test_numerictypes.py @@ -1,15 +1,12 @@ -import sys import itertools +import sys import pytest + import numpy as np import numpy._core.numerictypes as nt -from numpy._core.numerictypes import ( - issctype, sctype2char, maximum_sctype, sctypes -) -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_raises_regex, IS_PYPY -) +from numpy._core.numerictypes import issctype, sctype2char, sctypes +from numpy.testing import assert_, assert_equal, assert_raises, assert_raises_regex # This is the structure of the table used for plain objects: # @@ -63,9 +60,9 @@ ('z', 'u1')] NbufferT = [ - # x Info color info y z - # value y2 Info2 name z2 Name Value - # name value y3 z3 + # depth1: x Info color info y z + # depth2: value y2 Info2 name z2 Name Value + # depth3: name value y3 z3 ([3, 2], (6j, 6., (b'nn', [6j, 4j], [6., 4.], [1, 2]), b'NN', True), b'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), ([4, 3], (7j, 7., (b'oo', [7j, 5j], [7., 5.], [2, 1]), b'OO', False), @@ -73,7 +70,7 @@ ] -byteorder = {'little':'<', 'big':'>'}[sys.byteorder] +byteorder = {'little': '<', 'big': '>'}[sys.byteorder] def normalize_descr(descr): "Normalize a description adding the platform byteorder." @@ -97,8 +94,7 @@ def normalize_descr(descr): l = normalize_descr(dtype) out.append((item[0], l)) else: - raise ValueError("Expected a str or list and got %s" % - (type(item))) + raise ValueError(f"Expected a str or list and got {type(item)}") return out @@ -345,17 +341,16 @@ def test_assign(self): class TestMultipleFields: - def setup_method(self): - self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') - def _bad_call(self): - return self.ary['f0', 'f1'] + ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') + return ary['f0', 'f1'] def test_no_tuple(self): assert_raises(IndexError, self._bad_call) def test_return(self): - res = self.ary[['f0', 'f2']].tolist() + ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') + res = ary[['f0', 'f2']].tolist() assert_(res == [(1, 3), (5, 7)]) @@ -476,7 +471,7 @@ def test_isdtype_invalid_args(self): np.isdtype(np.int64, "int64") def test_sctypes_complete(self): - # issue 26439: int32/intc were masking eachother on 32-bit builds + # issue 26439: int32/intc were masking each other on 32-bit builds assert np.int32 in sctypes['int'] assert np.intc in sctypes['int'] assert np.int64 in sctypes['int'] @@ -495,38 +490,6 @@ def test_ulong(self): assert np.dtype(np.ulong).itemsize == np.dtype(np.long).itemsize -@pytest.mark.filterwarnings("ignore:.*maximum_sctype.*:DeprecationWarning") -class TestMaximumSctype: - - # note that parametrizing with sctype['int'] and similar would skip types - # with the same size (gh-11923) - - @pytest.mark.parametrize( - 't', [np.byte, np.short, np.intc, np.long, np.longlong] - ) - def test_int(self, t): - assert_equal(maximum_sctype(t), np._core.sctypes['int'][-1]) - - @pytest.mark.parametrize( - 't', [np.ubyte, np.ushort, np.uintc, np.ulong, np.ulonglong] - ) - def test_uint(self, t): - assert_equal(maximum_sctype(t), np._core.sctypes['uint'][-1]) - - @pytest.mark.parametrize('t', [np.half, np.single, np.double, np.longdouble]) - def test_float(self, t): - assert_equal(maximum_sctype(t), np._core.sctypes['float'][-1]) - - @pytest.mark.parametrize('t', [np.csingle, np.cdouble, np.clongdouble]) - def test_complex(self, t): - assert_equal(maximum_sctype(t), np._core.sctypes['complex'][-1]) - - @pytest.mark.parametrize('t', [np.bool, np.object_, np.str_, np.bytes_, - np.void]) - def test_other(self, t): - assert_equal(maximum_sctype(t), t) - - class Test_sctype2char: # This function is old enough that we're really just documenting the quirks # at this point. @@ -570,13 +533,14 @@ def test_issctype(rep, expected): # ensure proper identification of scalar # data-types by issctype() actual = issctype(rep) + assert type(actual) is bool assert_equal(actual, expected) -@pytest.mark.skipif(sys.flags.optimize > 1, - reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") -@pytest.mark.xfail(IS_PYPY, - reason="PyPy cannot modify tp_doc after PyType_Ready") +@pytest.mark.skipif( + sys.flags.optimize > 1, + reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1", +) class TestDocStrings: def test_platform_dependent_aliases(self): if np.int64 is np.int_: @@ -614,6 +578,35 @@ def test_names_are_undersood_by_dtype(self, t): assert np.dtype(t.__name__).type is t +class TestScalarTypeOrder: + @pytest.mark.parametrize(('a', 'b'), [ + # signedinteger + (np.byte, np.short), + (np.short, np.intc), + (np.intc, np.long), + (np.long, np.longlong), + # unsignedinteger + (np.ubyte, np.ushort), + (np.ushort, np.uintc), + (np.uintc, np.ulong), + (np.ulong, np.ulonglong), + # floating + (np.half, np.single), + (np.single, np.double), + (np.double, np.longdouble), + # complexfloating + (np.csingle, np.cdouble), + (np.cdouble, np.clongdouble), + # flexible + (np.bytes_, np.str_), + (np.str_, np.void), + # bouncy castles + (np.datetime64, np.timedelta64), + ]) + def test_stable_ordering(self, a: type[np.generic], b: type[np.generic]): + assert np.ScalarType.index(a) <= np.ScalarType.index(b) + + class TestBoolDefinition: def test_bool_definition(self): assert nt.bool is np.bool diff --git a/numpy/_core/tests/test_overrides.py b/numpy/_core/tests/test_overrides.py index 025cd001ff0a..ebcf2f0ce112 100644 --- a/numpy/_core/tests/test_overrides.py +++ b/numpy/_core/tests/test_overrides.py @@ -1,19 +1,21 @@ import inspect -import sys import os +import pickle +import sys import tempfile from io import StringIO from unittest import mock -import pickle import pytest import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_raises_regex) from numpy._core.overrides import ( - _get_implementing_args, array_function_dispatch, - verify_matching_signatures) + _get_implementing_args, + array_function_dispatch, + verify_matching_signatures, +) +from numpy.testing import assert_, assert_equal, assert_raises, assert_raises_regex +from numpy.testing.overrides import get_overridable_numpy_array_functions def _return_not_implemented(self, *args, **kwargs): @@ -133,7 +135,7 @@ class D: assert_equal(_get_implementing_args([a, c, b]), [c, b, a]) def test_too_many_duck_arrays(self): - namespace = dict(__array_function__=_return_not_implemented) + namespace = {'__array_function__': _return_not_implemented} types = [type('A' + str(i), (object,), namespace) for i in range(65)] relevant_args = [t() for t in types] @@ -194,14 +196,22 @@ class OverrideSub(np.ndarray): assert_equal(result, expected.view(OverrideSub)) def test_no_wrapper(self): - # This shouldn't happen unless a user intentionally calls - # __array_function__ with invalid arguments, but check that we raise - # an appropriate error all the same. + # Regular numpy functions have wrappers, but do not presume + # all functions do (array creation ones do not): check that + # we just call the function in that case. array = np.array(1) - func = lambda x: x - with assert_raises_regex(AttributeError, '_implementation'): - array.__array_function__(func=func, types=(np.ndarray,), - args=(array,), kwargs={}) + func = lambda x: x * 2 + result = array.__array_function__(func=func, types=(np.ndarray,), + args=(array,), kwargs={}) + assert_equal(result, array * 2) + + def test_wrong_arguments(self): + # Check our implementation guards against wrong arguments. + a = np.array([1, 2]) + with pytest.raises(TypeError, match="args must be a tuple"): + a.__array_function__(np.reshape, (np.ndarray,), a, (2, 1)) + with pytest.raises(TypeError, match="kwargs must be a dict"): + a.__array_function__(np.reshape, (np.ndarray,), (a,), (2, 1)) class TestArrayFunctionDispatch: @@ -466,7 +476,6 @@ def func(*args): func(*objs) - class TestNDArrayMethods: def test_repr(self): @@ -510,8 +519,10 @@ def test_sum_on_mock_array(self): class ArrayProxy: def __init__(self, value): self.value = value + def __array_function__(self, *args, **kwargs): return self.value.__array_function__(*args, **kwargs) + def __array__(self, *args, **kwargs): return self.value.__array__(*args, **kwargs) @@ -539,8 +550,8 @@ def __array_function__(self, func, types, args, kwargs): class TestArrayLike: - def setup_method(self): - class MyArray(): + def _create_MyArray(self): + class MyArray: def __init__(self, function=None): self.function = function @@ -552,13 +563,22 @@ def __array_function__(self, func, types, args, kwargs): return NotImplemented return my_func(*args, **kwargs) - self.MyArray = MyArray + return MyArray - class MyNoArrayFunctionArray(): + def _create_MyNoArrayFunctionArray(self): + class MyNoArrayFunctionArray: def __init__(self, function=None): self.function = function - self.MyNoArrayFunctionArray = MyNoArrayFunctionArray + return MyNoArrayFunctionArray + + def _create_MySubclass(self): + class MySubclass(np.ndarray): + def __array_function__(self, func, types, args, kwargs): + result = super().__array_function__(func, types, args, kwargs) + return result.view(self.__class__) + + return MySubclass def add_method(self, name, arr_class, enable_value_error=False): def _definition(*args, **kwargs): @@ -575,9 +595,10 @@ def func_args(*args, **kwargs): return args, kwargs def test_array_like_not_implemented(self): - self.add_method('array', self.MyArray) + MyArray = self._create_MyArray() + self.add_method('array', MyArray) - ref = self.MyArray.array() + ref = MyArray.array() with assert_raises_regex(TypeError, 'no implementation found'): array_like = np.asarray(1, like=ref) @@ -604,18 +625,40 @@ def test_array_like_not_implemented(self): delimiter=',')), ] + def test_nep35_functions_as_array_functions(self,): + all_array_functions = get_overridable_numpy_array_functions() + like_array_functions_subset = { + getattr(np, func_name) for func_name, *_ in self.__class__._array_tests + } + assert like_array_functions_subset.issubset(all_array_functions) + + nep35_python_functions = { + np.eye, np.fromfunction, np.full, np.genfromtxt, + np.identity, np.loadtxt, np.ones, np.require, np.tri, + } + assert nep35_python_functions.issubset(all_array_functions) + + nep35_C_functions = { + np.arange, np.array, np.asanyarray, np.asarray, + np.ascontiguousarray, np.asfortranarray, np.empty, + np.frombuffer, np.fromfile, np.fromiter, np.fromstring, + np.zeros, + } + assert nep35_C_functions.issubset(all_array_functions) + @pytest.mark.parametrize('function, args, kwargs', _array_tests) @pytest.mark.parametrize('numpy_ref', [True, False]) def test_array_like(self, function, args, kwargs, numpy_ref): - self.add_method('array', self.MyArray) - self.add_method(function, self.MyArray) + MyArray = self._create_MyArray() + self.add_method('array', MyArray) + self.add_method(function, MyArray) np_func = getattr(np, function) - my_func = getattr(self.MyArray, function) + my_func = getattr(MyArray, function) if numpy_ref is True: ref = np.array(1) else: - ref = self.MyArray.array() + ref = MyArray.array() like_args = tuple(a() if callable(a) else a for a in args) array_like = np_func(*like_args, **kwargs, like=ref) @@ -633,19 +676,20 @@ def test_array_like(self, function, args, kwargs, numpy_ref): assert_equal(array_like, np_arr) else: - assert type(array_like) is self.MyArray + assert type(array_like) is MyArray assert array_like.function is my_func @pytest.mark.parametrize('function, args, kwargs', _array_tests) @pytest.mark.parametrize('ref', [1, [1], "MyNoArrayFunctionArray"]) def test_no_array_function_like(self, function, args, kwargs, ref): - self.add_method('array', self.MyNoArrayFunctionArray) - self.add_method(function, self.MyNoArrayFunctionArray) + MyNoArrayFunctionArray = self._create_MyNoArrayFunctionArray() + self.add_method('array', MyNoArrayFunctionArray) + self.add_method(function, MyNoArrayFunctionArray) np_func = getattr(np, function) # Instantiate ref if it's the MyNoArrayFunctionArray class if ref == "MyNoArrayFunctionArray": - ref = self.MyNoArrayFunctionArray.array() + ref = MyNoArrayFunctionArray.array() like_args = tuple(a() if callable(a) else a for a in args) @@ -653,15 +697,30 @@ def test_no_array_function_like(self, function, args, kwargs, ref): 'The `like` argument must be an array-like that implements'): np_func(*like_args, **kwargs, like=ref) + @pytest.mark.parametrize('function, args, kwargs', _array_tests) + def test_subclass(self, function, args, kwargs): + MySubclass = self._create_MySubclass() + ref = np.array(1).view(MySubclass) + np_func = getattr(np, function) + like_args = tuple(a() if callable(a) else a for a in args) + array_like = np_func(*like_args, **kwargs, like=ref) + assert type(array_like) is MySubclass + if np_func is np.empty: + return + np_args = tuple(a() if callable(a) else a for a in args) + np_arr = np_func(*np_args, **kwargs) + assert_equal(array_like.view(np.ndarray), np_arr) + @pytest.mark.parametrize('numpy_ref', [True, False]) def test_array_like_fromfile(self, numpy_ref): - self.add_method('array', self.MyArray) - self.add_method("fromfile", self.MyArray) + MyArray = self._create_MyArray() + self.add_method('array', MyArray) + self.add_method("fromfile", MyArray) if numpy_ref is True: ref = np.array(1) else: - ref = self.MyArray.array() + ref = MyArray.array() data = np.random.random(5) @@ -676,13 +735,14 @@ def test_array_like_fromfile(self, numpy_ref): assert_equal(np_res, data) assert_equal(array_like, np_res) else: - assert type(array_like) is self.MyArray - assert array_like.function is self.MyArray.fromfile + assert type(array_like) is MyArray + assert array_like.function is MyArray.fromfile def test_exception_handling(self): - self.add_method('array', self.MyArray, enable_value_error=True) + MyArray = self._create_MyArray() + self.add_method('array', MyArray, enable_value_error=True) - ref = self.MyArray.array() + ref = MyArray.array() with assert_raises(TypeError): # Raises the error about `value_error` being invalid first @@ -690,8 +750,9 @@ def test_exception_handling(self): @pytest.mark.parametrize('function, args, kwargs', _array_tests) def test_like_as_none(self, function, args, kwargs): - self.add_method('array', self.MyArray) - self.add_method(function, self.MyArray) + MyArray = self._create_MyArray() + self.add_method('array', MyArray) + self.add_method(function, MyArray) np_func = getattr(np, function) like_args = tuple(a() if callable(a) else a for a in args) @@ -709,7 +770,7 @@ def test_like_as_none(self, function, args, kwargs): def test_function_like(): # We provide a `__get__` implementation, make sure it works - assert type(np.mean) is np._core._multiarray_umath._ArrayFunctionDispatcher + assert type(np.mean) is np._core._multiarray_umath._ArrayFunctionDispatcher class MyClass: def __array__(self, dtype=None, copy=None): diff --git a/numpy/_core/tests/test_print.py b/numpy/_core/tests/test_print.py index 7f16449704a1..95a177b57a7d 100644 --- a/numpy/_core/tests/test_print.py +++ b/numpy/_core/tests/test_print.py @@ -1,13 +1,11 @@ import sys +from io import StringIO import pytest import numpy as np -from numpy.testing import assert_, assert_equal, IS_MUSL from numpy._core.tests._locales import CommaDecimalPointLocale - - -from io import StringIO +from numpy.testing import IS_MUSL, assert_, assert_equal _REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'} @@ -23,15 +21,15 @@ def test_float_types(tp): """ for x in [0, 1, -1, 1e20]: assert_equal(str(tp(x)), str(float(x)), - err_msg='Failed str formatting for type %s' % tp) + err_msg=f'Failed str formatting for type {tp}') if tp(1e16).itemsize > 4: assert_equal(str(tp(1e16)), str(float('1e16')), - err_msg='Failed str formatting for type %s' % tp) + err_msg=f'Failed str formatting for type {tp}') else: ref = '1e+16' assert_equal(str(tp(1e16)), ref, - err_msg='Failed str formatting for type %s' % tp) + err_msg=f'Failed str formatting for type {tp}') @pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) @@ -45,7 +43,7 @@ def test_nan_inf_float(tp): """ for x in [np.inf, -np.inf, np.nan]: assert_equal(str(tp(x)), _REF[x], - err_msg='Failed str formatting for type %s' % tp) + err_msg=f'Failed str formatting for type {tp}') @pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble]) @@ -59,19 +57,19 @@ def test_complex_types(tp): """ for x in [0, 1, -1, 1e20]: assert_equal(str(tp(x)), str(complex(x)), - err_msg='Failed str formatting for type %s' % tp) - assert_equal(str(tp(x*1j)), str(complex(x*1j)), - err_msg='Failed str formatting for type %s' % tp) - assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)), - err_msg='Failed str formatting for type %s' % tp) + err_msg=f'Failed str formatting for type {tp}') + assert_equal(str(tp(x * 1j)), str(complex(x * 1j)), + err_msg=f'Failed str formatting for type {tp}') + assert_equal(str(tp(x + x * 1j)), str(complex(x + x * 1j)), + err_msg=f'Failed str formatting for type {tp}') if tp(1e16).itemsize > 8: assert_equal(str(tp(1e16)), str(complex(1e16)), - err_msg='Failed str formatting for type %s' % tp) + err_msg=f'Failed str formatting for type {tp}') else: ref = '(1e+16+0j)' assert_equal(str(tp(1e16)), ref, - err_msg='Failed str formatting for type %s' % tp) + err_msg=f'Failed str formatting for type {tp}') @pytest.mark.parametrize('dtype', [np.complex64, np.cdouble, np.clongdouble]) @@ -116,9 +114,10 @@ def _test_redirected_print(x, tp, ref=None): sys.stdout = stdout assert_equal(file.getvalue(), file_tp.getvalue(), - err_msg='print failed for type%s' % tp) + err_msg=f'print failed for type{tp}') +@pytest.mark.thread_unsafe(reason="sys.stdout not thread-safe") @pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) def test_float_type_print(tp): """Check formatting when using print """ @@ -129,12 +128,13 @@ def test_float_type_print(tp): _test_redirected_print(float(x), tp, _REF[x]) if tp(1e16).itemsize > 4: - _test_redirected_print(float(1e16), tp) + _test_redirected_print(1e16, tp) else: ref = '1e+16' - _test_redirected_print(float(1e16), tp, ref) + _test_redirected_print(1e16, tp, ref) +@pytest.mark.thread_unsafe(reason="sys.stdout not thread-safe") @pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble]) def test_complex_type_print(tp): """Check formatting when using print """ @@ -170,14 +170,14 @@ def test_scalar_format(): ('{0:g}', 1.5, np.float32), ('{0:g}', 1.5, np.float64), ('{0:g}', 1.5, np.longdouble), - ('{0:g}', 1.5+0.5j, np.complex64), - ('{0:g}', 1.5+0.5j, np.complex128), - ('{0:g}', 1.5+0.5j, np.clongdouble)] + ('{0:g}', 1.5 + 0.5j, np.complex64), + ('{0:g}', 1.5 + 0.5j, np.complex128), + ('{0:g}', 1.5 + 0.5j, np.clongdouble)] for (fmat, val, valtype) in tests: try: assert_equal(fmat.format(val), fmat.format(valtype(val)), - "failed with val %s, type %s" % (val, valtype)) + f"failed with val {val}, type {valtype}") except ValueError as e: assert_(False, "format raised exception (fmt='%s', val=%s, type=%s, exc='%s')" % @@ -191,12 +191,12 @@ def test_scalar_format(): class TestCommaDecimalPointLocale(CommaDecimalPointLocale): def test_locale_single(self): - assert_equal(str(np.float32(1.2)), str(float(1.2))) + assert_equal(str(np.float32(1.2)), str(1.2)) def test_locale_double(self): - assert_equal(str(np.double(1.2)), str(float(1.2))) + assert_equal(str(np.double(1.2)), str(1.2)) @pytest.mark.skipif(IS_MUSL, reason="test flaky on musllinux") def test_locale_longdouble(self): - assert_equal(str(np.longdouble('1.2')), str(float(1.2))) + assert_equal(str(np.longdouble('1.2')), str(1.2)) diff --git a/numpy/_core/tests/test_protocols.py b/numpy/_core/tests/test_protocols.py index 1709629fa89b..96bb600843dc 100644 --- a/numpy/_core/tests/test_protocols.py +++ b/numpy/_core/tests/test_protocols.py @@ -1,5 +1,7 @@ -import pytest import warnings + +import pytest + import numpy as np @@ -24,7 +26,7 @@ def __getattr__(self, name): return getattr(self.array, name) def __repr__(self): - return "".format(self=self) + return f"" array = Wrapper(np.arange(10)) with pytest.raises(UserWarning, match="object got converted"): @@ -38,7 +40,6 @@ class Wrapper: def __array__(self, dtype=None, copy=None): return np.array([self.val], dtype=dtype, copy=copy) - wrapped = Wrapper() arr = np.array(wrapped, dtype=str) assert arr.dtype == 'U100' diff --git a/numpy/_core/tests/test_records.py b/numpy/_core/tests/test_records.py index 975bb322f87c..80f76a865eda 100644 --- a/numpy/_core/tests/test_records.py +++ b/numpy/_core/tests/test_records.py @@ -1,17 +1,21 @@ import collections.abc +import pickle import textwrap from io import BytesIO from os import path from pathlib import Path -import pickle import pytest import numpy as np from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_array_almost_equal, - assert_raises, temppath, - ) + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + temppath, +) class TestFromrecords: @@ -114,9 +118,9 @@ def test_recarray_from_obj(self): mine = np.rec.fromarrays([a, b, c], names='date,data1,data2') for i in range(len(a)): - assert_((mine.date[i] == list(range(1, 10)))) - assert_((mine.data1[i] == 0.0)) - assert_((mine.data2[i] == 0.0)) + assert_(mine.date[i] == list(range(1, 10))) + assert_(mine.data1[i] == 0.0) + assert_(mine.data2[i] == 0.0) def test_recarray_repr(self): a = np.array([(1, 0.1), (2, 0.2)], @@ -146,7 +150,7 @@ def test_0d_recarray_repr(self): dtype=[('f0', 'data contains non-zero floats x = np.array([123456789e199], dtype=np.float64) - if IS_PYPY: - x.resize((m, 0), refcheck=False) - else: - x.resize((m, 0)) + x.resize((m, 0)) y = np.array([123456789e199], dtype=np.float64) - if IS_PYPY: - y.resize((0, n), refcheck=False) - else: - y.resize((0, n)) + y.resize((0, n)) # `dot` should just return zero (m, n) matrix z = np.dot(x, y) @@ -1337,8 +1350,8 @@ def test_array_from_sequence_scalar_array2(self): def test_array_too_big(self): # Ticket #1080. - assert_raises(ValueError, np.zeros, [975]*7, np.int8) - assert_raises(ValueError, np.zeros, [26244]*5, np.int8) + assert_raises(ValueError, np.zeros, [975] * 7, np.int8) + assert_raises(ValueError, np.zeros, [26244] * 5, np.int8) def test_dtype_keyerrors_(self): # Ticket #1106. @@ -1424,8 +1437,8 @@ def test_misaligned_dot_product_objects(self): def test_byteswap_complex_scalar(self): # Ticket #1259 and gh-441 - for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]: - z = np.array([2.2-1.1j], dtype) + for dtype in [np.dtype('<' + t) for t in np.typecodes['Complex']]: + z = np.array([2.2 - 1.1j], dtype) x = z[0] # always native-endian y = x.byteswap() if x.dtype.byteorder == z.dtype.byteorder: @@ -1446,22 +1459,6 @@ def test_structured_arrays_with_objects1(self): x[x.nonzero()] = x.ravel()[:1] assert_(x[0, 1] == x[0, 0]) - @pytest.mark.skipif( - sys.version_info >= (3, 12), - reason="Python 3.12 has immortal refcounts, this test no longer works." - ) - @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") - def test_structured_arrays_with_objects2(self): - # Ticket #1299 second test - stra = 'aaaa' - strb = 'bbbb' - numb = sys.getrefcount(strb) - numa = sys.getrefcount(stra) - x = np.array([[(0, stra), (1, strb)]], 'i8,O') - x[x.nonzero()] = x.ravel()[:1] - assert_(sys.getrefcount(strb) == numb) - assert_(sys.getrefcount(stra) == numa + 2) - def test_duplicate_title_and_name(self): # Ticket #1254 dtspec = [(('a', 'a'), 'i'), ('b', 'i')] @@ -1501,8 +1498,7 @@ def test_fromiter_comparison(self): assert_(np.all(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) def test_fromstring_crash(self): - # Ticket #1345: the following should not cause a crash - with assert_warns(DeprecationWarning): + with assert_raises(ValueError): np.fromstring(b'aa, aa, 1.0', sep=',') def test_ticket_1539(self): @@ -1524,11 +1520,11 @@ def test_ticket_1539(self): if d != 0: failures.append((x, y)) if failures: - raise AssertionError("Failures: %r" % failures) + raise AssertionError(f"Failures: {failures!r}") def test_ticket_1538(self): x = np.finfo(np.float32) - for name in 'eps epsneg max min resolution tiny'.split(): + for name in ('eps', 'epsneg', 'max', 'min', 'resolution', 'tiny'): assert_equal(type(getattr(x, name)), np.float32, err_msg=name) @@ -1570,8 +1566,7 @@ class Subclass(np.ndarray): @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") def test_take_refcount(self): # ticket #939 - a = np.arange(16, dtype=float) - a.shape = (4, 4) + a = np.arange(16, dtype=float).reshape((4, 4)) lut = np.ones((5 + 3, 4), float) rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype) c1 = sys.getrefcount(rgba) @@ -1583,49 +1578,45 @@ def test_take_refcount(self): assert_equal(c1, c2) def test_fromfile_tofile_seeks(self): - # On Python 3, tofile/fromfile used to get (#1610) the Python - # file handle out of sync - f0 = tempfile.NamedTemporaryFile() - f = f0.file - f.write(np.arange(255, dtype='u1').tobytes()) + # tofile/fromfile used to get (#1610) the Python file handle out of sync + with tempfile.NamedTemporaryFile() as f: + f.write(np.arange(255, dtype='u1').tobytes()) - f.seek(20) - ret = np.fromfile(f, count=4, dtype='u1') - assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1')) - assert_equal(f.tell(), 24) + f.seek(20) + ret = np.fromfile(f, count=4, dtype='u1') + assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1')) + assert_equal(f.tell(), 24) - f.seek(40) - np.array([1, 2, 3], dtype='u1').tofile(f) - assert_equal(f.tell(), 43) + f.seek(40) + np.array([1, 2, 3], dtype='u1').tofile(f) + assert_equal(f.tell(), 43) - f.seek(40) - data = f.read(3) - assert_equal(data, b"\x01\x02\x03") + f.seek(40) + data = f.read(3) + assert_equal(data, b"\x01\x02\x03") - f.seek(80) - f.read(4) - data = np.fromfile(f, dtype='u1', count=4) - assert_equal(data, np.array([84, 85, 86, 87], dtype='u1')) - - f.close() + f.seek(80) + f.read(4) + data = np.fromfile(f, dtype='u1', count=4) + assert_equal(data, np.array([84, 85, 86, 87], dtype='u1')) def test_complex_scalar_warning(self): for tp in [np.csingle, np.cdouble, np.clongdouble]: - x = tp(1+2j) - assert_warns(ComplexWarning, float, x) - with suppress_warnings() as sup: - sup.filter(ComplexWarning) + x = tp(1 + 2j) + pytest.warns(ComplexWarning, float, x) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ComplexWarning) assert_equal(float(x), float(x.real)) def test_complex_scalar_complex_cast(self): for tp in [np.csingle, np.cdouble, np.clongdouble]: - x = tp(1+2j) - assert_equal(complex(x), 1+2j) + x = tp(1 + 2j) + assert_equal(complex(x), 1 + 2j) def test_complex_boolean_cast(self): # Ticket #2218 for tp in [np.csingle, np.cdouble, np.clongdouble]: - x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp) + x = np.array([0, 0 + 0.5j, 0.5 + 0j], dtype=tp) assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool)) assert_(np.any(x)) assert_(np.all(x[1:])) @@ -1635,7 +1626,7 @@ def test_uint_int_conversion(self): assert_equal(int(np.uint64(x)), x) def test_duplicate_field_names_assign(self): - ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8') + ra = np.fromiter(((i * 3, i * 2) for i in range(10)), dtype='i8,f8') ra.dtype.names = ('f1', 'f2') repr(ra) # should not cause a segmentation fault assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1')) @@ -1649,7 +1640,7 @@ def test_eq_string_and_object_array(self): def test_nonzero_byteswap(self): a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32) - a.dtype = np.float32 + a = a.view(np.float32) assert_equal(a.nonzero()[0], [1]) a = a.byteswap() a = a.view(a.dtype.newbyteorder()) @@ -1725,17 +1716,17 @@ def __new__(cls, def squeeze(self): return super().squeeze() - oldsqueeze = OldSqueeze(np.array([[1],[2],[3]])) + oldsqueeze = OldSqueeze(np.array([[1], [2], [3]])) # if no axis argument is specified the old API # expectation should give the correct result assert_equal(np.squeeze(oldsqueeze), - np.array([1,2,3])) + np.array([1, 2, 3])) # likewise, axis=None should work perfectly well # with the old API expectation assert_equal(np.squeeze(oldsqueeze, axis=None), - np.array([1,2,3])) + np.array([1, 2, 3])) # however, specification of any particular axis # should raise a TypeError in the context of the @@ -1761,7 +1752,7 @@ def squeeze(self): # attempting to squeeze an axis that is not # of length 1 with assert_raises(ValueError): - np.squeeze(np.array([[1],[2],[3]]), axis=0) + np.squeeze(np.array([[1], [2], [3]]), axis=0) def test_reduce_contiguous(self): # GitHub issue #387 @@ -1771,7 +1762,7 @@ def test_reduce_contiguous(self): assert_(a.flags.f_contiguous) assert_(b.flags.c_contiguous) - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + @requires_deep_recursion def test_object_array_self_reference(self): # Object arrays with references to themselves can cause problems a = np.array(0, dtype=object) @@ -1780,7 +1771,7 @@ def test_object_array_self_reference(self): assert_raises(RecursionError, float, a) a[()] = None - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + @requires_deep_recursion def test_object_array_circular_reference(self): # Test the same for a circular reference. a = np.array(0, dtype=object) @@ -1802,7 +1793,7 @@ def test_object_array_nested(self): a = np.array(0, dtype=object) b = np.array(0, dtype=object) a[()] = b - assert_equal(int(a), int(0)) + assert_equal(int(a), int(0)) # noqa: UP018 assert_equal(float(a), float(0)) def test_object_array_self_copy(self): @@ -1853,15 +1844,15 @@ def test_string_astype(self): def test_ticket_1756(self): # Ticket #1756 s = b'0123456789abcdef' - a = np.array([s]*5) + a = np.array([s] * 5) for i in range(1, 17): a1 = np.array(a, "|S%d" % i) - a2 = np.array([s[:i]]*5) + a2 = np.array([s[:i]] * 5) assert_equal(a1, a2) def test_fields_strides(self): "gh-2355" - r = np.frombuffer(b'abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2') + r = np.frombuffer(b'abcdefghijklmnop' * 4 * 3, dtype='i4,(2,3)u2') assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2]) assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1']) assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()]) @@ -1871,7 +1862,8 @@ def test_alignment_update(self): # Check that alignment flag is updated on stride setting a = np.arange(10) assert_(a.flags.aligned) - a.strides = 3 + with pytest.warns(DeprecationWarning): + a.strides = 3 assert_(not a.flags.aligned) def test_ticket_1770(self): @@ -1900,10 +1892,10 @@ def test_pickle_string_overwrite(self): data = pickle.loads(blob) # Check that loads does not clobber interned strings - s = re.sub("a(.)", "\x01\\1", "a_") + s = re.sub(r"a(.)", "\x01\\1", "a_") assert_equal(s[0], "\x01") data[0] = 0x6a - s = re.sub("a(.)", "\x01\\1", "a_") + s = re.sub(r"a(.)", "\x01\\1", "a_") assert_equal(s[0], "\x01") def test_pickle_bytes_overwrite(self): @@ -1914,18 +1906,30 @@ def test_pickle_bytes_overwrite(self): bytestring = "\x01 ".encode('ascii') assert_equal(bytestring[0:1], '\x01'.encode('ascii')) + @pytest.mark.filterwarnings( + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning", + ) + @pytest.mark.xfail("LSAN_OPTIONS" in os.environ, reason="known leak", run=False) def test_pickle_py2_array_latin1_hack(self): # Check that unpickling hacks in Py3 that support # encoding='latin1' work correctly. # Python2 output for pickle.dumps(numpy.array([129], dtype='b')) - data = b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\np13\ntp14\nb." # noqa + data = ( + b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\n" + b"ndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\n" + b"cnumpy\ndtype\np7\n(S'i1'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'" + b"\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\np13\ntp14\nb." + ) # This should work: result = pickle.loads(data, encoding='latin1') assert_array_equal(result, np.array([129]).astype('b')) # Should not segfault: assert_raises(Exception, pickle.loads, data, encoding='koi8-r') + @pytest.mark.filterwarnings( + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning", + ) def test_pickle_py2_scalar_latin1_hack(self): # Check that scalar unpickling hack in Py3 that supports # encoding='latin1' work correctly. @@ -1934,16 +1938,16 @@ def test_pickle_py2_scalar_latin1_hack(self): datas = [ # (original, python2_pickle, koi8r_validity) (np.str_('\u6bd2'), - b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\ntp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n.", # noqa + b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\ntp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n.", 'invalid'), (np.float64(9e123), - b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\nbS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n.", # noqa + b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\nbS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n.", 'invalid'), # different 8-bit code point in KOI8-R vs latin1 (np.bytes_(b'\x9c'), - b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\ntp8\nRp9\n.", # noqa + b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\ntp8\nRp9\n.", 'different'), ] for original, data, koi8r_validity in datas: @@ -1999,7 +2003,6 @@ def test_assign_obj_listoflists(self): a[...] = [[1, 2]] assert_equal(a, [[1, 2], [1, 2]]) - @pytest.mark.slow_pypy def test_memoryleak(self): # Ticket #1917 - ensure that array data doesn't leak for i in range(1000): @@ -2045,9 +2048,9 @@ def test_string_truncation_ucs2(self): def test_unique_stable(self): # Ticket #2063 must always choose stable sort for argsort to # get consistent results - v = np.array(([0]*5 + [1]*6 + [2]*6)*4) + v = np.array(([0] * 5 + [1] * 6 + [2] * 6) * 4) res = np.unique(v, return_index=True) - tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11])) + tgt = (np.array([0, 1, 2]), np.array([0, 5, 11])) assert_equal(res, tgt) def test_unicode_alloc_dealloc_match(self): @@ -2124,7 +2127,7 @@ def test_format_on_flex_array_element(self): # Ticket #4369. dt = np.dtype([('date', ' 2 ** 31 c_arr = np.ctypeslib.as_ctypes(arr) @@ -2468,21 +2461,22 @@ def test_complex_conversion_error(self): def test__array_interface__descr(self): # gh-17068 - dt = np.dtype(dict(names=['a', 'b'], - offsets=[0, 0], - formats=[np.int64, np.int64])) + dt = np.dtype({'names': ['a', 'b'], + 'offsets': [0, 0], + 'formats': [np.int64, np.int64]}) descr = np.array((1, 1), dtype=dt).__array_interface__['descr'] assert descr == [('', '|V8')] # instead of [(b'', '|V8')] @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python') @requires_memory(free_bytes=9e9) + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_dot_big_stride(self): # gh-17111 # blas stride = stride//itemsize > int32 max int32_max = np.iinfo(np.int32).max n = int32_max + 3 a = np.empty([n], dtype=np.float32) - b = a[::n-1] + b = a[::n - 1] b[...] = 1 assert b.strides[0] > int32_max * b.dtype.itemsize assert np.dot(b, b) == 2.0 @@ -2540,7 +2534,6 @@ def test_nonbool_logical(self): expected = np.ones(size, dtype=np.bool) assert_array_equal(np.logical_and(a, b), expected) - @pytest.mark.skipif(IS_PYPY, reason="PyPy issue 2742") def test_gh_23737(self): with pytest.raises(TypeError, match="not an acceptable base type"): class Y(np.flexible): @@ -2554,7 +2547,10 @@ def test_load_ufunc_pickle(self): # ufuncs are pickled with a semi-private path in # numpy.core._multiarray_umath and must be loadable without warning # despite np.core being deprecated. - test_data = b'\x80\x04\x95(\x00\x00\x00\x00\x00\x00\x00\x8c\x1cnumpy.core._multiarray_umath\x94\x8c\x03add\x94\x93\x94.' # noqa + test_data = ( + b'\x80\x04\x95(\x00\x00\x00\x00\x00\x00\x00\x8c\x1cnumpy.core.' + b'_multiarray_umath\x94\x8c\x03add\x94\x93\x94.' + ) result = pickle.loads(test_data, encoding='bytes') assert result is np.add @@ -2567,21 +2563,25 @@ def test__array_namespace__(self): assert xp is np xp = arr.__array_namespace__(api_version="2022.12") assert xp is np + xp = arr.__array_namespace__(api_version="2023.12") + assert xp is np + xp = arr.__array_namespace__(api_version="2024.12") + assert xp is np xp = arr.__array_namespace__(api_version=None) assert xp is np with pytest.raises( ValueError, - match="Version \"2023.12\" of the Array API Standard " + match="Version \"2025.12\" of the Array API Standard " "is not supported." ): - arr.__array_namespace__(api_version="2023.12") + arr.__array_namespace__(api_version="2025.12") with pytest.raises( ValueError, match="Only None and strings are allowed as the Array API version" ): - arr.__array_namespace__(api_version=2023) + arr.__array_namespace__(api_version=2024) def test_isin_refcnt_bug(self): # gh-25295 @@ -2620,3 +2620,38 @@ def test_vectorize_fixed_width_string(self): f = str.casefold res = np.vectorize(f, otypes=[arr.dtype])(arr) assert res.dtype == "U30" + + def test_repeated_square_consistency(self): + # gh-26940 + buf = np.array([-5.171866611150749e-07 + 2.5618634555957426e-07j, + 0, 0, 0, 0, 0]) + # Test buffer with regular and reverse strides + for in_vec in [buf[:3], buf[:3][::-1]]: + expected_res = np.square(in_vec) + # Output vector immediately follows input vector + # to reproduce off-by-one in nomemoverlap check. + for res in [buf[3:], buf[3:][::-1]]: + res = buf[3:] + np.square(in_vec, out=res) + assert_equal(res, expected_res) + + def test_sort_unique_crash(self): + # gh-27037 + for _ in range(4): + vals = np.linspace(0, 1, num=128) + data = np.broadcast_to(vals, (128, 128, 128)) + data = data.transpose(0, 2, 1).copy() + np.unique(data) + + def test_sort_overlap(self): + # gh-27273 + size = 100 + inp = np.linspace(0, size, num=size, dtype=np.intc) + out = np.sort(inp) + assert_equal(inp, out) + + def test_searchsorted_structured(self): + # gh-28190 + x = np.array([(0, 1.)], dtype=[('time', ' None: + def test_abc(self, cls: type[np.number]) -> None: alias = cls[Any] assert isinstance(alias, types.GenericAlias) assert alias.__origin__ is cls @@ -164,15 +166,19 @@ def test_abc_complexfloating_subscript_tuple(self, arg_len: int) -> None: np.complexfloating[arg_tup] @pytest.mark.parametrize("cls", [np.generic, np.flexible, np.character]) - def test_abc_non_numeric(self, cls: Type[np.generic]) -> None: + def test_abc_non_numeric(self, cls: type[np.generic]) -> None: with pytest.raises(TypeError): cls[Any] @pytest.mark.parametrize("code", np.typecodes["All"]) def test_concrete(self, code: str) -> None: cls = np.dtype(code).type - with pytest.raises(TypeError): - cls[Any] + if cls in {np.bool, np.datetime64}: + # these are intentionally subscriptable + assert cls[Any] + else: + with pytest.raises(TypeError): + cls[Any] @pytest.mark.parametrize("arg_len", range(4)) def test_subscript_tuple(self, arg_len: int) -> None: @@ -186,15 +192,19 @@ def test_subscript_tuple(self, arg_len: int) -> None: def test_subscript_scalar(self) -> None: assert np.number[Any] + @pytest.mark.parametrize("subscript", [Literal[True], Literal[False]]) + def test_subscript_bool(self, subscript: Literal[True, False]) -> None: + assert isinstance(np.bool[subscript], types.GenericAlias) + class TestBitCount: # derived in part from the cpython test "test_bit_count" - @pytest.mark.parametrize("itype", sctypes['int']+sctypes['uint']) + @pytest.mark.parametrize("itype", sctypes['int'] + sctypes['uint']) def test_small(self, itype): for a in range(max(np.iinfo(itype).min, 0), 128): msg = f"Smoke test for {itype}({a}).bit_count()" - assert itype(a).bit_count() == bin(a).count("1"), msg + assert itype(a).bit_count() == a.bit_count(), msg def test_bit_count(self): for exp in [10, 17, 63]: @@ -203,3 +213,115 @@ def test_bit_count(self): assert np.uint64(a - 1).bit_count() == exp assert np.uint64(a ^ 63).bit_count() == 7 assert np.uint64((a - 1) ^ 510).bit_count() == exp - 8 + + +class TestDevice: + """ + Test scalar.device attribute and scalar.to_device() method. + """ + scalars = [np.bool(True), np.int64(1), np.uint64(1), np.float64(1.0), + np.complex128(1 + 1j)] + + @pytest.mark.parametrize("scalar", scalars) + def test_device(self, scalar): + assert scalar.device == "cpu" + + @pytest.mark.parametrize("scalar", scalars) + def test_to_device(self, scalar): + assert scalar.to_device("cpu") is scalar + + @pytest.mark.parametrize("scalar", scalars) + def test___array_namespace__(self, scalar): + assert scalar.__array_namespace__() is np + + +@pytest.mark.parametrize("scalar", [np.bool(True), np.int8(1), np.float64(1)]) +def test_array_wrap(scalar): + # Test scalars array wrap as long as it exists. NumPy itself should + # probably not use it, so it may not be necessary to keep it around. + + arr0d = np.array(3, dtype=np.int8) + # Third argument not passed, None, or True "decays" to scalar. + # (I don't think NumPy would pass `None`, but it seems clear to support) + assert type(scalar.__array_wrap__(arr0d)) is np.int8 + assert type(scalar.__array_wrap__(arr0d, None, None)) is np.int8 + assert type(scalar.__array_wrap__(arr0d, None, True)) is np.int8 + + # Otherwise, result should be the input + assert scalar.__array_wrap__(arr0d, None, False) is arr0d + + # An old bug. A non 0-d array cannot be converted to scalar: + arr1d = np.array([3], dtype=np.int8) + assert scalar.__array_wrap__(arr1d) is arr1d + assert scalar.__array_wrap__(arr1d, None, True) is arr1d + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +class TestSignature: + # test that scalar types have a valid __text_signature__ or __signature__ set + @pytest.mark.parametrize( + "sctype", + [ + *sctypes["int"], + *sctypes["uint"], + *sctypes["float"], + *sctypes["complex"], + *sctypes["others"], + np.datetime64, + np.timedelta64, + ], + ) + def test_constructor_signatures(self, sctype: type[np.generic]): + try: + sig = inspect.signature(sctype) + except ValueError: + pytest.fail(f"missing signature: {sctype}") + + assert sig.parameters + + @pytest.mark.parametrize( + "sctype", + [np.integer, *sctypes["int"], *sctypes["uint"], *sctypes["float"]], + ) + def test_method_signatures_is_integer(self, sctype: type[np.integer | np.floating]): + try: + sig = inspect.signature(sctype.is_integer) + except ValueError: + pytest.fail(f"missing signature: {sctype.__name__}.is_integer") + + assert len(sig.parameters) == 1 + assert sig.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY + + @pytest.mark.parametrize("sctype", sctypes["float"]) + def test_method_signatures_as_integer_ratio(self, sctype: type[np.floating]): + try: + sig = inspect.signature(sctype.as_integer_ratio) + except ValueError: + pytest.fail(f"missing signature: {sctype.__name__}.as_integer_ratio") + + assert len(sig.parameters) == 1 + assert sig.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY + + @pytest.mark.parametrize( + "method_name", + [ + "__array_namespace__", "__copy__", "__deepcopy__", "all", "any", "argmax", + "argmin", "argsort", "astype", "byteswap", "choose", "clip", "compress", + "conj", "conjugate", "copy", "cumprod", "cumsum", "diagonal", "dump", + "dumps", "fill", "flatten", "getfield", "item", "max", "mean", "min", + "nonzero", "prod", "put", "ravel", "repeat", "reshape", "resize", "round", + "searchsorted", "setfield", "setflags", "sort", "squeeze", "std", "sum", + "swapaxes", "take", "to_device", "tobytes", "tofile", "tolist", "trace", + "transpose", "var", "view", + ], + ) + def test_array_scalar_method_signatures(self, method_name: str): + # methods shared by np.generic and np.ndarray should have the same signature + fn_generic = getattr(np.generic, method_name) + sig_generic = inspect.signature(fn_generic) + assert "self" in sig_generic.parameters + assert sig_generic.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY + + fn_ndarray = getattr(np.ndarray, method_name) + sig_ndarray = inspect.signature(fn_ndarray) + assert sig_generic == sig_ndarray diff --git a/numpy/_core/tests/test_scalarbuffer.py b/numpy/_core/tests/test_scalarbuffer.py index 26cf39530f65..4d2744b85e53 100644 --- a/numpy/_core/tests/test_scalarbuffer.py +++ b/numpy/_core/tests/test_scalarbuffer.py @@ -1,11 +1,11 @@ """ Test scalar buffer interface adheres to PEP 3118 """ -import numpy as np -from numpy._core._rational_tests import rational -from numpy._core._multiarray_tests import get_buffer_info import pytest +import numpy as np +from numpy._core._multiarray_tests import get_buffer_info +from numpy._core._rational_tests import rational from numpy.testing import assert_, assert_equal, assert_raises # PEP3118 format strings for native (standard alignment and byteorder) types @@ -55,8 +55,8 @@ def test_scalar_dim(self, scalar): @pytest.mark.parametrize('scalar, code', scalars_and_codes, ids=codes_only) def test_scalar_code_and_properties(self, scalar, code): x = scalar() - expected = dict(strides=(), itemsize=x.dtype.itemsize, ndim=0, - shape=(), format=code, readonly=True) + expected = {'strides': (), 'itemsize': x.dtype.itemsize, 'ndim': 0, + 'shape': (), 'format': code, 'readonly': True} mv_x = memoryview(x) assert self._as_dict(mv_x) == expected @@ -93,8 +93,8 @@ def test_void_scalar_structured_data(self): get_buffer_info(x, ["WRITABLE"]) def _as_dict(self, m): - return dict(strides=m.strides, shape=m.shape, itemsize=m.itemsize, - ndim=m.ndim, format=m.format, readonly=m.readonly) + return {'strides': m.strides, 'shape': m.shape, 'itemsize': m.itemsize, + 'ndim': m.ndim, 'format': m.format, 'readonly': m.readonly} def test_datetime_memoryview(self): # gh-11656 @@ -102,8 +102,8 @@ def test_datetime_memoryview(self): dt1 = np.datetime64('2016-01-01') dt2 = np.datetime64('2017-01-01') - expected = dict(strides=(1,), itemsize=1, ndim=1, shape=(8,), - format='B', readonly=True) + expected = {'strides': (1,), 'itemsize': 1, 'ndim': 1, 'shape': (8,), + 'format': 'B', 'readonly': True} v = memoryview(dt1) assert self._as_dict(v) == expected @@ -128,8 +128,8 @@ def test_str_ucs4(self, s): s = np.str_(s) # only our subclass implements the buffer protocol # all the same, characters always encode as ucs4 - expected = dict(strides=(), itemsize=8, ndim=0, shape=(), format='2w', - readonly=True) + expected = {'strides': (), 'itemsize': 8, 'ndim': 0, 'shape': (), + 'format': '2w', 'readonly': True} v = memoryview(s) assert self._as_dict(v) == expected diff --git a/numpy/_core/tests/test_scalarinherit.py b/numpy/_core/tests/test_scalarinherit.py index f9c574d5798e..746a1574782a 100644 --- a/numpy/_core/tests/test_scalarinherit.py +++ b/numpy/_core/tests/test_scalarinherit.py @@ -54,6 +54,13 @@ def test_gh_15395(self): with pytest.raises(TypeError): B1(1.0, 2.0) + def test_int_repr(self): + # Test that integer repr works correctly for subclasses (gh-27106) + class my_int16(np.int16): + pass + + s = repr(my_int16(3)) + assert s == "my_int16(3)" class TestCharacter: def test_char_radd(self): @@ -86,7 +93,7 @@ class MyBytes(bytes, np.generic): pass ret = s + MyBytes(b'abc') - assert(type(ret) is type(s)) + assert type(ret) is type(s) assert ret == b"defabc" def test_char_repeat(self): diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index 3517023cb5f0..2f01a1dea16c 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -1,22 +1,28 @@ import contextlib -import sys -import warnings import itertools import operator import platform -from numpy._utils import _pep440 +import sys +import warnings + import pytest from hypothesis import given, settings -from hypothesis.strategies import sampled_from from hypothesis.extra import numpy as hynp +from hypothesis.strategies import sampled_from import numpy as np +from numpy._core._rational_tests import rational +from numpy._utils import _pep440 from numpy.exceptions import ComplexWarning from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_almost_equal, - assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data, - assert_warns, _SUPPORTS_SVE, - ) + _gen_alignment_data, + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + check_support_sve, +) types = [np.bool, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, np.int_, np.uint, np.longlong, np.ulonglong, @@ -26,12 +32,15 @@ floating_types = np.floating.__subclasses__() complex_floating_types = np.complexfloating.__subclasses__() -objecty_things = [object(), None] +objecty_things = [object(), None, np.array(None, dtype=object)] -reasonable_operators_for_scalars = [ +binary_operators_for_scalars = [ operator.lt, operator.le, operator.eq, operator.ne, operator.ge, operator.gt, operator.add, operator.floordiv, operator.mod, - operator.mul, operator.pow, operator.sub, operator.truediv, + operator.mul, operator.pow, operator.sub, operator.truediv +] +binary_operators_for_scalar_ints = binary_operators_for_scalars + [ + operator.xor, operator.or_, operator.and_ ] @@ -41,7 +50,7 @@ class TestTypes: def test_types(self): for atype in types: a = atype(1) - assert_(a == 1, "error with %r: got %r" % (atype, a)) + assert_(a == 1, f"error with {atype!r}: got {a!r}") def test_type_add(self): # list of types @@ -64,7 +73,7 @@ def test_type_add(self): (k, np.dtype(atype).char, l, np.dtype(btype).char)) def test_type_create(self): - for k, atype in enumerate(types): + for atype in types: a = np.array([1, 2, 3], atype) b = atype([1, 2, 3]) assert_equal(a, b) @@ -108,7 +117,7 @@ def check_ufunc_scalar_equivalence(op, arr1, arr2): @pytest.mark.slow @settings(max_examples=10000, deadline=2000) -@given(sampled_from(reasonable_operators_for_scalars), +@given(sampled_from(binary_operators_for_scalars), hynp.arrays(dtype=hynp.scalar_dtypes(), shape=()), hynp.arrays(dtype=hynp.scalar_dtypes(), shape=())) def test_array_scalar_ufunc_equivalence(op, arr1, arr2): @@ -121,7 +130,7 @@ def test_array_scalar_ufunc_equivalence(op, arr1, arr2): @pytest.mark.slow -@given(sampled_from(reasonable_operators_for_scalars), +@given(sampled_from(binary_operators_for_scalars), hynp.scalar_dtypes(), hynp.scalar_dtypes()) def test_array_scalar_ufunc_dtypes(op, dt1, dt2): # Same as above, but don't worry about sampling weird values so that we @@ -147,7 +156,7 @@ def test_int_float_promotion_truediv(fscalar): class TestBaseMath: - @pytest.mark.xfail(_SUPPORTS_SVE, reason="gh-22982") + @pytest.mark.xfail(check_support_sve(), reason="gh-22982") def test_blocked(self): # test alignments offsets for simd instructions # alignments for vz + 2 * (vs - 1) + 1 @@ -167,11 +176,11 @@ def test_blocked(self): inp2[...] += np.arange(inp2.size, dtype=dt) + 1 assert_almost_equal(np.square(inp2), - np.multiply(inp2, inp2), err_msg=msg) + np.multiply(inp2, inp2), err_msg=msg) # skip true divide for ints if dt != np.int32: assert_almost_equal(np.reciprocal(inp2), - np.divide(1, inp2), err_msg=msg) + np.divide(1, inp2), err_msg=msg) inp1[...] = np.ones_like(inp1) np.add(inp1, 2, out=out) @@ -198,13 +207,13 @@ def test_small_types(self): for t in [np.int8, np.int16, np.float16]: a = t(3) b = a ** 4 - assert_(b == 81, "error with %r: got %r" % (t, b)) + assert_(b == 81, f"error with {t!r}: got {b!r}") def test_large_types(self): for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]: a = t(51) b = a ** 4 - msg = "error with %r: got %r" % (t, b) + msg = f"error with {t!r}: got {b!r}" if np.issubdtype(t, np.integer): assert_(b == 6765201, msg) else: @@ -255,8 +264,7 @@ def test_mixed_types(self): a = t1(3) b = t2(2) result = a**b - msg = ("error with %r and %r:" - "got %r, expected %r") % (t1, t2, result, 9) + msg = f"error with {t1!r} and {t2!r}:got {result!r}, expected {9!r}" if np.issubdtype(np.dtype(result), np.integer): assert_(result == 9, msg) else: @@ -267,7 +275,7 @@ def test_modular_power(self): a = 5 b = 4 c = 10 - expected = pow(a, b, c) # noqa: F841 + expected = pow(a, b, c) for t in (np.int32, np.float32, np.complex64): # note that 3-operand power only dispatches on the first argument assert_raises(TypeError, operator.pow, t(a), b, c) @@ -294,10 +302,10 @@ def test_modulus_basic(self): for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)): fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) - a = np.array(sg1*71, dtype=dt1)[()] - b = np.array(sg2*19, dtype=dt2)[()] + a = np.array(sg1 * 71, dtype=dt1)[()] + b = np.array(sg2 * 19, dtype=dt2)[()] div, rem = op(a, b) - assert_equal(div*b + rem, a, err_msg=msg) + assert_equal(div * b + rem, a, err_msg=msg) if sg2 == -1: assert_(b < rem <= 0, msg) else: @@ -311,7 +319,7 @@ def test_float_modulus_exact(self): dividend = nlst + [0] + plst divisor = nlst + plst arg = list(itertools.product(dividend, divisor)) - tgt = list(divmod(*t) for t in arg) + tgt = [divmod(*t) for t in arg] a, b = np.array(arg, dtype=int).T # convert exact integer results from Python to float so that @@ -322,11 +330,11 @@ def test_float_modulus_exact(self): for op in [floordiv_and_mod, divmod]: for dt in np.typecodes['Float']: - msg = 'op: %s, dtype: %s' % (op.__name__, dt) + msg = f'op: {op.__name__}, dtype: {dt}' fa = a.astype(dt) fb = b.astype(dt) # use list comprehension so a_ and b_ are scalars - div, rem = zip(*[op(a_, b_) for a_, b_ in zip(fa, fb)]) + div, rem = zip(*[op(a_, b_) for a_, b_ in zip(fa, fb)]) assert_equal(div, tgtdiv, err_msg=msg) assert_equal(rem, tgtrem, err_msg=msg) @@ -338,11 +346,11 @@ def test_float_modulus_roundoff(self): for sg1, sg2 in itertools.product((+1, -1), (+1, -1)): fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) - a = np.array(sg1*78*6e-8, dtype=dt1)[()] - b = np.array(sg2*6e-8, dtype=dt2)[()] + a = np.array(sg1 * 78 * 6e-8, dtype=dt1)[()] + b = np.array(sg2 * 6e-8, dtype=dt2)[()] div, rem = op(a, b) # Equal assertion should hold when fmod is used - assert_equal(div*b + rem, a, err_msg=msg) + assert_equal(div * b + rem, a, err_msg=msg) if sg2 == -1: assert_(b < rem <= 0, msg) else: @@ -354,31 +362,26 @@ def test_float_modulus_corner_cases(self): b = np.array(1.0, dtype=dt) a = np.nextafter(np.array(0.0, dtype=dt), -b) rem = operator.mod(a, b) - assert_(rem <= b, 'dt: %s' % dt) + assert_(rem <= b, f'dt: {dt}') rem = operator.mod(-a, -b) - assert_(rem >= -b, 'dt: %s' % dt) + assert_(rem >= -b, f'dt: {dt}') # Check nans, inf - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in remainder") - sup.filter(RuntimeWarning, "divide by zero encountered in remainder") - sup.filter(RuntimeWarning, "divide by zero encountered in floor_divide") - sup.filter(RuntimeWarning, "divide by zero encountered in divmod") - sup.filter(RuntimeWarning, "invalid value encountered in divmod") + with warnings.catch_warnings(), np.errstate(all='ignore'): for dt in np.typecodes['Float']: fone = np.array(1.0, dtype=dt) fzer = np.array(0.0, dtype=dt) finf = np.array(np.inf, dtype=dt) fnan = np.array(np.nan, dtype=dt) rem = operator.mod(fone, fzer) - assert_(np.isnan(rem), 'dt: %s' % dt) + assert_(np.isnan(rem), f'dt: {dt}') # MSVC 2008 returns NaN here, so disable the check. #rem = operator.mod(fone, finf) #assert_(rem == fone, 'dt: %s' % dt) rem = operator.mod(fone, fnan) - assert_(np.isnan(rem), 'dt: %s' % dt) + assert_(np.isnan(rem), f'dt: {dt}') rem = operator.mod(finf, fone) - assert_(np.isnan(rem), 'dt: %s' % dt) + assert_(np.isnan(rem), f'dt: {dt}') for op in [floordiv_and_mod, divmod]: div, mod = op(fone, fzer) assert_(np.isinf(div)) and assert_(np.isnan(mod)) @@ -393,6 +396,15 @@ def test_inplace_floordiv_handling(self): match=r"Cannot cast ufunc 'floor_divide' output from"): a //= b +class TestComparison: + def test_comparision_different_types(self): + x = np.array(1) + y = np.array('s') + eq = x == y + neq = x != y + assert eq is np.bool_(False) + assert neq is np.bool_(True) + class TestComplexDivision: def test_zero_division(self): @@ -400,17 +412,17 @@ def test_zero_division(self): for t in [np.complex64, np.complex128]: a = t(0.0) b = t(1.0) - assert_(np.isinf(b/a)) + assert_(np.isinf(b / a)) b = t(complex(np.inf, np.inf)) - assert_(np.isinf(b/a)) + assert_(np.isinf(b / a)) b = t(complex(np.inf, np.nan)) - assert_(np.isinf(b/a)) + assert_(np.isinf(b / a)) b = t(complex(np.nan, np.inf)) - assert_(np.isinf(b/a)) + assert_(np.isinf(b / a)) b = t(complex(np.nan, np.nan)) - assert_(np.isnan(b/a)) + assert_(np.isnan(b / a)) b = t(0.) - assert_(np.isnan(b/a)) + assert_(np.isnan(b / a)) def test_signed_zeros(self): with np.errstate(all="ignore"): @@ -418,14 +430,14 @@ def test_signed_zeros(self): # tupled (numerator, denominator, expected) # for testing as expected == numerator/denominator data = ( - (( 0.0,-1.0), ( 0.0, 1.0), (-1.0,-0.0)), - (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), - (( 0.0,-1.0), (-0.0,-1.0), ( 1.0, 0.0)), - (( 0.0,-1.0), (-0.0, 1.0), (-1.0, 0.0)), - (( 0.0, 1.0), ( 0.0,-1.0), (-1.0, 0.0)), - (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), - ((-0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), - ((-0.0, 1.0), ( 0.0,-1.0), (-1.0,-0.0)) + (( 0.0, -1.0), ( 0.0, 1.0), (-1.0, -0.0)), + (( 0.0, -1.0), ( 0.0, -1.0), ( 1.0, -0.0)), + (( 0.0, -1.0), (-0.0, -1.0), ( 1.0, 0.0)), + (( 0.0, -1.0), (-0.0, 1.0), (-1.0, 0.0)), + (( 0.0, 1.0), ( 0.0, -1.0), (-1.0, 0.0)), + (( 0.0, -1.0), ( 0.0, -1.0), ( 1.0, -0.0)), + ((-0.0, -1.0), ( 0.0, -1.0), ( 1.0, -0.0)), + ((-0.0, 1.0), ( 0.0, -1.0), (-1.0, -0.0)) ) for cases in data: n = cases[0] @@ -442,7 +454,7 @@ def test_branches(self): for t in [np.complex64, np.complex128]: # tupled (numerator, denominator, expected) # for testing as expected == numerator/denominator - data = list() + data = [] # trigger branch: real(fabs(denom)) > imag(fabs(denom)) # followed by else condition as neither are == 0 @@ -453,7 +465,7 @@ def test_branches(self): # is performed in test_zero_division(), so this is skipped # trigger else if branch: real(fabs(denom)) < imag(fabs(denom)) - data.append((( 1.0, 2.0), ( 1.0, 2.0), (1.0, 0.0))) + data.append(((1.0, 2.0), (1.0, 2.0), (1.0, 0.0))) for cases in data: n = cases[0] @@ -503,21 +515,9 @@ def test_int_from_infinite_longdouble(self): # gh-627 x = np.longdouble(np.inf) assert_raises(OverflowError, int, x) - with suppress_warnings() as sup: - sup.record(ComplexWarning) + with pytest.warns(ComplexWarning): x = np.clongdouble(np.inf) assert_raises(OverflowError, int, x) - assert_equal(len(sup.log), 1) - - @pytest.mark.skipif(not IS_PYPY, reason="Test is PyPy only (gh-9972)") - def test_int_from_infinite_longdouble___int__(self): - x = np.longdouble(np.inf) - assert_raises(OverflowError, x.__int__) - with suppress_warnings() as sup: - sup.record(ComplexWarning) - x = np.clongdouble(np.inf) - assert_raises(OverflowError, x.__int__) - assert_equal(len(sup.log), 1) @pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), reason="long double is same as double") @@ -541,58 +541,58 @@ def test_int_from_longdouble(self): def test_numpy_scalar_relational_operators(self): # All integer for dt1 in np.typecodes['AllInteger']: - assert_(1 > np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,)) - assert_(not 1 < np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(1 > np.array(0, dtype=dt1)[()], f"type {dt1} failed") + assert_(not 1 < np.array(0, dtype=dt1)[()], f"type {dt1} failed") for dt2 in np.typecodes['AllInteger']: assert_(np.array(1, dtype=dt1)[()] > np.array(0, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") assert_(not np.array(1, dtype=dt1)[()] < np.array(0, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") - #Unsigned integers + # Unsigned integers for dt1 in 'BHILQP': - assert_(-1 < np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) - assert_(not -1 > np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) - assert_(-1 != np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(-1 < np.array(1, dtype=dt1)[()], f"type {dt1} failed") + assert_(not -1 > np.array(1, dtype=dt1)[()], f"type {dt1} failed") + assert_(-1 != np.array(1, dtype=dt1)[()], f"type {dt1} failed") - #unsigned vs signed + # unsigned vs signed for dt2 in 'bhilqp': assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") assert_(np.array(1, dtype=dt1)[()] != np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") - #Signed integers and floats + # Signed integers and floats for dt1 in 'bhlqp' + np.typecodes['Float']: - assert_(1 > np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) - assert_(not 1 < np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) - assert_(-1 == np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(1 > np.array(-1, dtype=dt1)[()], f"type {dt1} failed") + assert_(not 1 < np.array(-1, dtype=dt1)[()], f"type {dt1} failed") + assert_(-1 == np.array(-1, dtype=dt1)[()], f"type {dt1} failed") for dt2 in 'bhlqp' + np.typecodes['Float']: assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") assert_(np.array(-1, dtype=dt1)[()] == np.array(-1, dtype=dt2)[()], - "type %s and %s failed" % (dt1, dt2)) + f"type {dt1} and {dt2} failed") def test_scalar_comparison_to_none(self): # Scalars should just return False and not give a warnings. # The comparisons are flagged by pep8, ignore that. with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', FutureWarning) - assert_(not np.float32(1) == None) - assert_(not np.str_('test') == None) + assert_(not np.float32(1) == None) # noqa: E711 + assert_(not np.str_('test') == None) # noqa: E711 # This is dubious (see below): - assert_(not np.datetime64('NaT') == None) + assert_(not np.datetime64('NaT') == None) # noqa: E711 - assert_(np.float32(1) != None) - assert_(np.str_('test') != None) + assert_(np.float32(1) != None) # noqa: E711 + assert_(np.str_('test') != None) # noqa: E711 # This is dubious (see below): - assert_(np.datetime64('NaT') != None) + assert_(np.datetime64('NaT') != None) # noqa: E711 assert_(len(w) == 0) # For documentation purposes, this is why the datetime is dubious. @@ -615,18 +615,18 @@ def _test_type_repr(self, t): finfo = np.finfo(t) last_fraction_bit_idx = finfo.nexp + finfo.nmant last_exponent_bit_idx = finfo.nexp - storage_bytes = np.dtype(t).itemsize*8 + storage_bytes = np.dtype(t).itemsize * 8 # could add some more types to the list below for which in ['small denorm', 'small norm']: # Values from https://en.wikipedia.org/wiki/IEEE_754 - constr = np.array([0x00]*storage_bytes, dtype=np.uint8) + constr = np.array([0x00] * storage_bytes, dtype=np.uint8) if which == 'small denorm': byte = last_fraction_bit_idx // 8 - bytebit = 7-(last_fraction_bit_idx % 8) + bytebit = 7 - (last_fraction_bit_idx % 8) constr[byte] = 1 << bytebit elif which == 'small norm': byte = last_exponent_bit_idx // 8 - bytebit = 7-(last_exponent_bit_idx % 8) + bytebit = 7 - (last_exponent_bit_idx % 8) constr[byte] = 1 << bytebit else: raise ValueError('hmm') @@ -643,18 +643,16 @@ def test_float_repr(self): self._test_type_repr(t) -if not IS_PYPY: - # sys.getsizeof() is not valid on PyPy - class TestSizeOf: +class TestSizeOf: - def test_equal_nbytes(self): - for type in types: - x = type(0) - assert_(sys.getsizeof(x) > x.nbytes) + def test_equal_nbytes(self): + for type in types: + x = type(0) + assert_(sys.getsizeof(x) > x.nbytes) - def test_error(self): - d = np.float32() - assert_raises(TypeError, d.__sizeof__, "a") + def test_error(self): + d = np.float32() + assert_raises(TypeError, d.__sizeof__, "a") class TestMultiply: @@ -678,12 +676,8 @@ def test_seq_repeat(self): for numpy_type in deprecated_types: i = np.dtype(numpy_type).type() - assert_equal( - assert_warns(DeprecationWarning, operator.mul, seq, i), - seq * int(i)) - assert_equal( - assert_warns(DeprecationWarning, operator.mul, i, seq), - int(i) * seq) + with assert_raises(TypeError): + operator.mul(seq, i) for numpy_type in forbidden_types: i = np.dtype(numpy_type).type() @@ -716,8 +710,8 @@ def test_exceptions(self): def test_result(self): types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) for dt in types: a = np.ones((), dtype=dt)[()] if dt in np.typecodes['UnsignedInteger']: @@ -734,8 +728,8 @@ def test_exceptions(self): def test_result(self): types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) for dt in types: a = np.ones((), dtype=dt)[()] assert_equal(operator.sub(a, a), 0) @@ -756,8 +750,8 @@ def _test_abs_func(self, absfunc, test_dtype): x = test_dtype(np.finfo(test_dtype).max) assert_equal(absfunc(x), x.real) - with suppress_warnings() as sup: - sup.filter(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) x = test_dtype(np.finfo(test_dtype).tiny) assert_equal(absfunc(x), x.real) @@ -814,8 +808,8 @@ def test_shift_all_bits(self, type_code, op): assert_equal(res_scl, 0) # Result on scalars should be the same as on arrays - val_arr = np.array([val_scl]*32, dtype=dt) - shift_arr = np.array([shift]*32, dtype=dt) + val_arr = np.array([val_scl] * 32, dtype=dt) + shift_arr = np.array([shift] * 32, dtype=dt) res_arr = op(val_arr, shift_arr) assert_equal(res_arr, res_scl) @@ -848,7 +842,7 @@ def test_float_and_complex_hashes(self, type_code): def test_complex_hashes(self, type_code): # Test some complex valued hashes specifically: scalar = np.dtype(type_code).type - for val in [np.pi+1j, np.inf-3j, 3j, 6.+1j]: + for val in [np.pi + 1j, np.inf - 3j, 3j, 6. + 1j]: numpy_val = scalar(val) assert hash(complex(numpy_val)) == hash(numpy_val) @@ -864,8 +858,9 @@ def recursionlimit(n): @given(sampled_from(objecty_things), - sampled_from(reasonable_operators_for_scalars), - sampled_from(types)) + sampled_from(binary_operators_for_scalar_ints), + sampled_from(types + [rational])) +@pytest.mark.thread_unsafe(reason="sets recursion limit globally") def test_operator_object_left(o, op, type_): try: with recursionlimit(200): @@ -875,8 +870,9 @@ def test_operator_object_left(o, op, type_): @given(sampled_from(objecty_things), - sampled_from(reasonable_operators_for_scalars), - sampled_from(types)) + sampled_from(binary_operators_for_scalar_ints), + sampled_from(types + [rational])) +@pytest.mark.thread_unsafe(reason="sets recursion limit globally") def test_operator_object_right(o, op, type_): try: with recursionlimit(200): @@ -885,7 +881,7 @@ def test_operator_object_right(o, op, type_): pass -@given(sampled_from(reasonable_operators_for_scalars), +@given(sampled_from(binary_operators_for_scalars), sampled_from(types), sampled_from(types)) def test_operator_scalars(op, type1, type2): @@ -895,7 +891,7 @@ def test_operator_scalars(op, type1, type2): pass -@pytest.mark.parametrize("op", reasonable_operators_for_scalars) +@pytest.mark.parametrize("op", binary_operators_for_scalars) @pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble]) def test_longdouble_operators_with_obj(sctype, op): # This is/used to be tricky, because NumPy generally falls back to @@ -908,6 +904,9 @@ def test_longdouble_operators_with_obj(sctype, op): # # That would recurse infinitely. Other scalars return the python object # on cast, so this type of things works OK. + # + # As of NumPy 2.1, this has been consolidated into the np.generic binops + # and now checks `.item()`. That also allows the below path to work now. try: op(sctype(3), None) except TypeError: @@ -918,7 +917,16 @@ def test_longdouble_operators_with_obj(sctype, op): pass -@pytest.mark.parametrize("op", reasonable_operators_for_scalars) +@pytest.mark.parametrize("op", [operator.add, operator.pow, operator.sub]) +@pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble]) +def test_longdouble_with_arrlike(sctype, op): + # As of NumPy 2.1, longdouble behaves like other types and can coerce + # e.g. lists. (Not necessarily better, but consistent.) + assert_array_equal(op(sctype(3), [1, 2]), op(3, np.array([1, 2]))) + assert_array_equal(op([1, 2], sctype(3)), op(np.array([1, 2]), 3)) + + +@pytest.mark.parametrize("op", binary_operators_for_scalars) @pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble]) @np.errstate(all="ignore") def test_longdouble_operators_with_large_int(sctype, op): @@ -1036,7 +1044,7 @@ def rop_func(self, other): # inheritance has to override, or this is correctly lost: res = op(myf_simple1(1), myf_simple2(2)) - assert type(res) == sctype or type(res) == np.bool + assert type(res) is sctype or type(res) is np.bool assert op(myf_simple1(1), myf_simple2(2)) == op(1, 2) # inherited # Two independent subclasses do not really define an order. This could @@ -1049,14 +1057,16 @@ def test_longdouble_complex(): # Simple test to check longdouble and complex combinations, since these # need to go through promotion, which longdouble needs to be careful about. x = np.longdouble(1) - assert x + 1j == 1+1j - assert 1j + x == 1+1j + assert x + 1j == 1 + 1j + assert 1j + x == 1 + 1j @pytest.mark.parametrize(["__op__", "__rop__", "op", "cmp"], ops_with_names) @pytest.mark.parametrize("subtype", [float, int, complex, np.float16]) -@np._no_nep50_warning() def test_pyscalar_subclasses(subtype, __op__, __rop__, op, cmp): + # This tests that python scalar subclasses behave like a float64 (if they + # don't override it). + # In an earlier version of NEP 50, they behaved like the Python buildins. def op_func(self, other): return __op__ @@ -1071,7 +1081,7 @@ def rop_func(self, other): assert op(myt(1), np.float64(2)) == __op__ assert op(np.float64(1), myt(2)) == __rop__ - if op in {operator.mod, operator.floordiv} and subtype == complex: + if op in {operator.mod, operator.floordiv} and subtype is complex: return # module is not support for complex. Do not test. if __rop__ == __op__: @@ -1079,25 +1089,28 @@ def rop_func(self, other): # When no deferring is indicated, subclasses are handled normally. myt = type("myt", (subtype,), {__rop__: rop_func}) + behaves_like = lambda x: np.array(subtype(x))[()] # Check for float32, as a float subclass float64 may behave differently res = op(myt(1), np.float16(2)) - expected = op(subtype(1), np.float16(2)) + expected = op(behaves_like(1), np.float16(2)) assert res == expected - assert type(res) == type(expected) + assert type(res) is type(expected) res = op(np.float32(2), myt(1)) - expected = op(np.float32(2), subtype(1)) + expected = op(np.float32(2), behaves_like(1)) assert res == expected - assert type(res) == type(expected) - - # Same check for longdouble: + assert type(res) is type(expected) + # Same check for longdouble (compare via dtype to accept float64 when + # longdouble has the identical size), which is currently not perfectly + # consistent. res = op(myt(1), np.longdouble(2)) - expected = op(subtype(1), np.longdouble(2)) + expected = op(behaves_like(1), np.longdouble(2)) assert res == expected - assert type(res) == type(expected) + assert np.dtype(type(res)) == np.dtype(type(expected)) res = op(np.float32(2), myt(1)) - expected = op(np.longdouble(2), subtype(1)) + expected = op(np.float32(2), behaves_like(1)) assert res == expected + assert np.dtype(type(res)) == np.dtype(type(expected)) def test_truediv_int(): @@ -1108,7 +1121,7 @@ def test_truediv_int(): @pytest.mark.slow @pytest.mark.parametrize("op", # TODO: Power is a bit special, but here mostly bools seem to behave oddly - [op for op in reasonable_operators_for_scalars if op is not operator.pow]) + [op for op in binary_operators_for_scalars if op is not operator.pow]) @pytest.mark.parametrize("sctype", types) @pytest.mark.parametrize("other_type", [float, int, complex]) @pytest.mark.parametrize("rop", [True, False]) @@ -1137,7 +1150,7 @@ def test_scalar_matches_array_op_with_pyscalar(op, sctype, other_type, rop): assert res == expected if isinstance(val1, float) and other_type is complex and rop: # Python complex accepts float subclasses, so we don't get a chance - # and the result may be a Python complelx (thus, the `np.array()``) + # and the result may be a Python complex (thus, the `np.array()``) assert np.array(res).dtype == expected.dtype else: assert res.dtype == expected.dtype diff --git a/numpy/_core/tests/test_scalarprint.py b/numpy/_core/tests/test_scalarprint.py index f47542ef779c..38ed7780f2e6 100644 --- a/numpy/_core/tests/test_scalarprint.py +++ b/numpy/_core/tests/test_scalarprint.py @@ -1,31 +1,30 @@ """ Test printing of scalar types. """ -import code import platform + import pytest -import sys -from tempfile import TemporaryFile import numpy as np -from numpy.testing import assert_, assert_equal, assert_raises, IS_MUSL +from numpy.testing import IS_MUSL, assert_, assert_equal, assert_raises + class TestRealScalars: def test_str(self): svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan] styps = [np.float16, np.float32, np.float64, np.longdouble] wanted = [ - ['0.0', '0.0', '0.0', '0.0' ], + ['0.0', '0.0', '0.0', '0.0' ], # noqa: E202 ['-0.0', '-0.0', '-0.0', '-0.0'], - ['1.0', '1.0', '1.0', '1.0' ], + ['1.0', '1.0', '1.0', '1.0' ], # noqa: E202 ['-1.0', '-1.0', '-1.0', '-1.0'], - ['inf', 'inf', 'inf', 'inf' ], + ['inf', 'inf', 'inf', 'inf' ], # noqa: E202 ['-inf', '-inf', '-inf', '-inf'], - ['nan', 'nan', 'nan', 'nan']] + ['nan', 'nan', 'nan', 'nan' ]] # noqa: E202 for wants, val in zip(wanted, svals): for want, styp in zip(wants, styps): - msg = 'for str({}({}))'.format(np.dtype(styp).name, repr(val)) + msg = f'for str({np.dtype(styp).name}({val!r}))' assert_equal(str(styp(val)), want, err_msg=msg) def test_scalar_cutoffs(self): @@ -47,49 +46,33 @@ def check(v): check(1e15) check(1e16) - def test_py2_float_print(self): - # gh-10753 - # In python2, the python float type implements an obsolete method - # tp_print, which overrides tp_repr and tp_str when using "print" to - # output to a "real file" (ie, not a StringIO). Make sure we don't - # inherit it. - x = np.double(0.1999999999999) - with TemporaryFile('r+t') as f: - print(x, file=f) - f.seek(0) - output = f.read() - assert_equal(output, str(x) + '\n') - # In python2 the value float('0.1999999999999') prints with reduced - # precision as '0.2', but we want numpy's np.double('0.1999999999999') - # to print the unique value, '0.1999999999999'. - - # gh-11031 - # Only in the python2 interactive shell and when stdout is a "real" - # file, the output of the last command is printed to stdout without - # Py_PRINT_RAW (unlike the print statement) so `>>> x` and `>>> print - # x` are potentially different. Make sure they are the same. The only - # way I found to get prompt-like output is using an actual prompt from - # the 'code' module. Again, must use tempfile to get a "real" file. - - # dummy user-input which enters one line and then ctrl-Ds. - def userinput(): - yield 'np.sqrt(2)' - raise EOFError - gen = userinput() - input_func = lambda prompt="": next(gen) - - with TemporaryFile('r+t') as fo, TemporaryFile('r+t') as fe: - orig_stdout, orig_stderr = sys.stdout, sys.stderr - sys.stdout, sys.stderr = fo, fe - - code.interact(local={'np': np}, readfunc=input_func, banner='') - - sys.stdout, sys.stderr = orig_stdout, orig_stderr - - fo.seek(0) - capture = fo.read().strip() - - assert_equal(capture, repr(np.sqrt(2))) + test_cases_gh_28679 = [ + (np.half, -0.000099, "-9.9e-05"), + (np.half, 0.0001, "0.0001"), + (np.half, 999, "999.0"), + (np.half, -1000, "-1e+03"), + (np.single, 0.000099, "9.9e-05"), + (np.single, -0.000100001, "-0.000100001"), + (np.single, 999999, "999999.0"), + (np.single, -1000000, "-1e+06") + ] + + @pytest.mark.parametrize("dtype, input_val, expected_str", test_cases_gh_28679) + def test_gh_28679(self, dtype, input_val, expected_str): + # test cutoff to exponent notation for half and single + assert_equal(str(dtype(input_val)), expected_str) + + test_cases_legacy_2_2 = [ + (np.half(65504), "65500.0"), + (np.single(1.e15), "1000000000000000.0"), + (np.single(1.e16), "1e+16"), + ] + + @pytest.mark.parametrize("input_val, expected_str", test_cases_legacy_2_2) + def test_legacy_2_2_mode(self, input_val, expected_str): + # test legacy cutoff to exponent notation for half and single + with np.printoptions(legacy='2.2'): + assert_equal(str(input_val), expected_str) def test_dragon4(self): # these tests are adapted from Ryan Juckett's dragon4 implementation, @@ -124,7 +107,6 @@ def test_dragon4(self): assert_equal(fsci64('9.9999999999999694e-311', **preckwd(16)), '9.9999999999999694e-311') - # test rounding # 3.1415927410 is closest float32 to np.pi assert_equal(fpos32('3.14159265358979323846', **preckwd(10)), @@ -147,7 +129,6 @@ def test_dragon4(self): "3.14159265358979311599796346854418516159057617187500") assert_equal(fpos64('3.14159265358979323846'), "3.141592653589793") - # smallest numbers assert_equal(fpos32(0.5**(126 + 23), unique=False, precision=149), "0.00000000000000000000000000000000000000000000140129846432" @@ -260,53 +241,93 @@ def test_dragon4(self): assert_equal(fpos64('324', unique=False, precision=5, fractional=False), "324.00") - def test_dragon4_interface(self): - tps = [np.float16, np.float32, np.float64] + available_float_dtypes = [np.float16, np.float32, np.float64, np.float128]\ + if hasattr(np, 'float128') else [np.float16, np.float32, np.float64] + + @pytest.mark.parametrize("tp", available_float_dtypes) + def test_dragon4_positional_interface(self, tp): + # test is flaky for musllinux on np.float128 + if IS_MUSL and tp == np.float128: + pytest.skip("Skipping flaky test of float128 on musllinux") + + fpos = np.format_float_positional + + # test padding + assert_equal(fpos(tp('1.0'), pad_left=4, pad_right=4), " 1. ") + assert_equal(fpos(tp('-1.0'), pad_left=4, pad_right=4), " -1. ") + assert_equal(fpos(tp('-10.2'), + pad_left=4, pad_right=4), " -10.2 ") + + # test fixed (non-unique) mode + assert_equal(fpos(tp('1.0'), unique=False, precision=4), "1.0000") + + @pytest.mark.parametrize("tp", available_float_dtypes) + def test_dragon4_positional_interface_trim(self, tp): # test is flaky for musllinux on np.float128 - if hasattr(np, 'float128') and not IS_MUSL: - tps.append(np.float128) + if IS_MUSL and tp == np.float128: + pytest.skip("Skipping flaky test of float128 on musllinux") fpos = np.format_float_positional + # test trimming + # trim of 'k' or '.' only affects non-unique mode, since unique + # mode will not output trailing 0s. + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='k'), + "1.0000") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='.'), + "1.") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='.'), + "1.2" if tp != np.float16 else "1.2002") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='0'), + "1.0") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='0'), + "1.2" if tp != np.float16 else "1.2002") + assert_equal(fpos(tp('1.'), trim='0'), "1.0") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='-'), + "1") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'), + "1.2" if tp != np.float16 else "1.2002") + assert_equal(fpos(tp('1.'), trim='-'), "1") + assert_equal(fpos(tp('1.001'), precision=1, trim='-'), "1") + + @pytest.mark.parametrize("tp", available_float_dtypes) + @pytest.mark.parametrize("pad_val", [10**5, np.iinfo("int32").max]) + def test_dragon4_positional_interface_overflow(self, tp, pad_val): + # test is flaky for musllinux on np.float128 + if IS_MUSL and tp == np.float128: + pytest.skip("Skipping flaky test of float128 on musllinux") + + fpos = np.format_float_positional + + # gh-28068 + with pytest.raises(RuntimeError, + match="Float formatting result too large"): + fpos(tp('1.047'), unique=False, precision=pad_val) + + with pytest.raises(RuntimeError, + match="Float formatting result too large"): + fpos(tp('1.047'), precision=2, pad_left=pad_val) + + with pytest.raises(RuntimeError, + match="Float formatting result too large"): + fpos(tp('1.047'), precision=2, pad_right=pad_val) + + @pytest.mark.parametrize("tp", available_float_dtypes) + def test_dragon4_scientific_interface(self, tp): + # test is flaky for musllinux on np.float128 + if IS_MUSL and tp == np.float128: + pytest.skip("Skipping flaky test of float128 on musllinux") + fsci = np.format_float_scientific - for tp in tps: - # test padding - assert_equal(fpos(tp('1.0'), pad_left=4, pad_right=4), " 1. ") - assert_equal(fpos(tp('-1.0'), pad_left=4, pad_right=4), " -1. ") - assert_equal(fpos(tp('-10.2'), - pad_left=4, pad_right=4), " -10.2 ") - - # test exp_digits - assert_equal(fsci(tp('1.23e1'), exp_digits=5), "1.23e+00001") - - # test fixed (non-unique) mode - assert_equal(fpos(tp('1.0'), unique=False, precision=4), "1.0000") - assert_equal(fsci(tp('1.0'), unique=False, precision=4), - "1.0000e+00") - - # test trimming - # trim of 'k' or '.' only affects non-unique mode, since unique - # mode will not output trailing 0s. - assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='k'), - "1.0000") - - assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='.'), - "1.") - assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='.'), - "1.2" if tp != np.float16 else "1.2002") - - assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='0'), - "1.0") - assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='0'), - "1.2" if tp != np.float16 else "1.2002") - assert_equal(fpos(tp('1.'), trim='0'), "1.0") - - assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='-'), - "1") - assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'), - "1.2" if tp != np.float16 else "1.2002") - assert_equal(fpos(tp('1.'), trim='-'), "1") - assert_equal(fpos(tp('1.001'), precision=1, trim='-'), "1") + # test exp_digits + assert_equal(fsci(tp('1.23e1'), exp_digits=5), "1.23e+00001") + + # test fixed (non-unique) mode + assert_equal(fsci(tp('1.0'), unique=False, precision=4), + "1.0000e+00") @pytest.mark.skipif(not platform.machine().startswith("ppc64"), reason="only applies to ppc float128 values") @@ -316,7 +337,7 @@ def test_ppc64_ibm_double_double128(self): # which happens when the first double is normal and the second is # subnormal. x = np.float128('2.123123123123123123123123123123123e-286') - got = [str(x/np.float128('2e' + str(i))) for i in range(0,40)] + got = [str(x / np.float128('2e' + str(i))) for i in range(40)] expected = [ "1.06156156156156156156156156156157e-286", "1.06156156156156156156156156156158e-287", @@ -363,7 +384,7 @@ def test_ppc64_ibm_double_double128(self): # Note: we follow glibc behavior, but it (or gcc) might not be right. # In particular we can get two values that print the same but are not # equal: - a = np.float128('2')/np.float128('3') + a = np.float128('2') / np.float128('3') b = np.float128(str(a)) assert_equal(str(a), str(b)) assert_(a != b) diff --git a/numpy/_core/tests/test_shape_base.py b/numpy/_core/tests/test_shape_base.py index 5b9de37f5f60..e8a842ba5589 100644 --- a/numpy/_core/tests/test_shape_base.py +++ b/numpy/_core/tests/test_shape_base.py @@ -1,16 +1,36 @@ +import sys + import pytest + import numpy as np from numpy._core import ( - array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack, - newaxis, concatenate, stack - ) + arange, + array, + atleast_1d, + atleast_2d, + atleast_3d, + block, + concatenate, + hstack, + newaxis, + stack, + vstack, +) +from numpy._core.shape_base import ( + _block_concatenate, + _block_dispatcher, + _block_setup, + _block_slicing, +) from numpy.exceptions import AxisError -from numpy._core.shape_base import (_block_dispatcher, _block_setup, - _block_concatenate, _block_slicing) from numpy.testing import ( - assert_, assert_raises, assert_array_equal, assert_equal, - assert_raises_regex, assert_warns, IS_PYPY - ) + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) +from numpy.testing._private.utils import requires_memory class TestAtleast1d: @@ -111,7 +131,7 @@ def test_2D_array(self): a = array([[1, 2], [1, 2]]) b = array([[2, 3], [2, 3]]) res = [atleast_3d(a), atleast_3d(b)] - desired = [a[:,:, newaxis], b[:,:, newaxis]] + desired = [a[:, :, newaxis], b[:, :, newaxis]] assert_array_equal(res, desired) def test_3D_array(self): @@ -154,9 +174,9 @@ def test_2D_array(self): def test_generator(self): with pytest.raises(TypeError, match="arrays to stack must be"): - hstack((np.arange(3) for _ in range(2))) + hstack(np.arange(3) for _ in range(2)) with pytest.raises(TypeError, match="arrays to stack must be"): - hstack(map(lambda x: x, np.ones((3, 2)))) + hstack(x for x in np.ones((3, 2))) def test_casting_and_dtype(self): a = np.array([1, 2, 3]) @@ -164,7 +184,7 @@ def test_casting_and_dtype(self): res = np.hstack((a, b), casting="unsafe", dtype=np.int64) expected_res = np.array([1, 2, 3, 2, 3, 4]) assert_array_equal(res, expected_res) - + def test_casting_and_dtype_type_error(self): a = np.array([1, 2, 3]) b = np.array([2.5, 3.5, 4.5]) @@ -209,7 +229,7 @@ def test_2D_array2(self): def test_generator(self): with pytest.raises(TypeError, match="arrays to stack must be"): - vstack((np.arange(3) for _ in range(2))) + vstack(np.arange(3) for _ in range(2)) def test_casting_and_dtype(self): a = np.array([1, 2, 3]) @@ -217,13 +237,12 @@ def test_casting_and_dtype(self): res = np.vstack((a, b), casting="unsafe", dtype=np.int64) expected_res = np.array([[1, 2, 3], [2, 3, 4]]) assert_array_equal(res, expected_res) - + def test_casting_and_dtype_type_error(self): a = np.array([1, 2, 3]) b = np.array([2.5, 3.5, 4.5]) with pytest.raises(TypeError): vstack((a, b), casting="safe", dtype=np.int64) - class TestConcatenate: @@ -236,7 +255,7 @@ def test_returns_copy(self): def test_exceptions(self): # test axis must be in bounds for ndim in [1, 2, 3]: - a = np.ones((1,)*ndim) + a = np.ones((1,) * ndim) np.concatenate((a, a), axis=0) # OK assert_raises(AxisError, np.concatenate, (a, a), axis=ndim) assert_raises(AxisError, np.concatenate, (a, a), axis=-(ndim + 1)) @@ -262,9 +281,8 @@ def test_exceptions(self): assert_raises_regex( ValueError, "all the input array dimensions except for the concatenation axis " - "must match exactly, but along dimension {}, the array at " - "index 0 has size 1 and the array at index 1 has size 2" - .format(i), + f"must match exactly, but along dimension {i}, the array at " + "index 0 has size 1 and the array at index 1 has size 2", np.concatenate, (a, b), axis=axis[1]) assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2]) a = np.moveaxis(a, -1, 0) @@ -274,6 +292,22 @@ def test_exceptions(self): # No arrays to concatenate raises ValueError assert_raises(ValueError, concatenate, ()) + @pytest.mark.slow + @pytest.mark.skipif( + sys.maxsize < 2**32, + reason="only problematic on 64bit platforms" + ) + @requires_memory(2 * np.iinfo(np.intc).max) + @pytest.mark.thread_unsafe(reason="crashes with low memory") + def test_huge_list_error(self): + a = np.array([1]) + max_int = np.iinfo(np.intc).max + arrs = (a,) * (max_int + 1) + msg = (fr"concatenate\(\) only supports up to {max_int} arrays" + f" but got {max_int + 1}.") + with pytest.raises(ValueError, match=msg): + np.concatenate(arrs) + def test_concatenate_axis_None(self): a = np.arange(4, dtype=np.float64).reshape((2, 2)) b = list(range(3)) @@ -349,12 +383,16 @@ def test_concatenate(self): assert_(out is rout) assert_equal(res, rout) - @pytest.mark.skipif(IS_PYPY, reason="PYPY handles sq_concat, nb_add differently than cpython") + def test_concatenate_same_value(self): + r4 = list(range(4)) + with pytest.raises(ValueError, match="^casting must be one of"): + concatenate([r4, r4], casting="same_value") + def test_operator_concat(self): import operator a = array([1, 2]) b = array([3, 4]) - n = [1,2] + n = [1, 2] res = array([1, 2, 3, 4]) assert_raises(TypeError, operator.concat, a, b) assert_raises(TypeError, operator.concat, a, n) @@ -367,8 +405,8 @@ def test_bad_out_shape(self): b = array([3, 4]) assert_raises(ValueError, concatenate, (a, b), out=np.empty(5)) - assert_raises(ValueError, concatenate, (a, b), out=np.empty((4,1))) - assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4))) + assert_raises(ValueError, concatenate, (a, b), out=np.empty((4, 1))) + assert_raises(ValueError, concatenate, (a, b), out=np.empty((1, 4))) concatenate((a, b), out=np.empty(4)) @pytest.mark.parametrize("axis", [None, 0]) @@ -438,7 +476,7 @@ def test_stack(): assert_array_equal(np.stack((a, b)), r1) assert_array_equal(np.stack((a, b), axis=1), r1.T) # all input types - assert_array_equal(np.stack(list([a, b])), r1) + assert_array_equal(np.stack([a, b]), r1) assert_array_equal(np.stack(array([a, b])), r1) # all shapes for 1d input arrays = [np.random.randn(3) for _ in range(10)] @@ -477,19 +515,52 @@ def test_stack(): # do not accept generators with pytest.raises(TypeError, match="arrays to stack must be"): - stack((x for x in range(3))) + stack(x for x in range(3)) - #casting and dtype test + # casting and dtype test a = np.array([1, 2, 3]) b = np.array([2.5, 3.5, 4.5]) res = np.stack((a, b), axis=1, casting="unsafe", dtype=np.int64) expected_res = np.array([[1, 2], [2, 3], [3, 4]]) assert_array_equal(res, expected_res) - #casting and dtype with TypeError + # casting and dtype with TypeError with assert_raises(TypeError): stack((a, b), dtype=np.int64, axis=1, casting="safe") +def test_unstack(): + a = np.arange(24).reshape((2, 3, 4)) + + for stacks in [np.unstack(a), + np.unstack(a, axis=0), + np.unstack(a, axis=-3)]: + assert isinstance(stacks, tuple) + assert len(stacks) == 2 + assert_array_equal(stacks[0], a[0]) + assert_array_equal(stacks[1], a[1]) + + for stacks in [np.unstack(a, axis=1), + np.unstack(a, axis=-2)]: + assert isinstance(stacks, tuple) + assert len(stacks) == 3 + assert_array_equal(stacks[0], a[:, 0]) + assert_array_equal(stacks[1], a[:, 1]) + assert_array_equal(stacks[2], a[:, 2]) + + for stacks in [np.unstack(a, axis=2), + np.unstack(a, axis=-1)]: + assert isinstance(stacks, tuple) + assert len(stacks) == 4 + assert_array_equal(stacks[0], a[:, :, 0]) + assert_array_equal(stacks[1], a[:, :, 1]) + assert_array_equal(stacks[2], a[:, :, 2]) + assert_array_equal(stacks[3], a[:, :, 3]) + + assert_raises(ValueError, np.unstack, a, axis=3) + assert_raises(ValueError, np.unstack, a, axis=-4) + assert_raises(ValueError, np.unstack, np.array(0), axis=0) + + @pytest.mark.parametrize("axis", [0]) @pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8"]) @pytest.mark.parametrize("casting", @@ -732,9 +803,10 @@ def test_block_with_mismatched_shape(self, block): assert_raises(ValueError, block, [a, b]) assert_raises(ValueError, block, [b, a]) - to_block = [[np.ones((2,3)), np.ones((2,2))], - [np.ones((2,2)), np.ones((2,2))]] + to_block = [[np.ones((2, 3)), np.ones((2, 2))], + [np.ones((2, 2)), np.ones((2, 2))]] assert_raises(ValueError, block, to_block) + def test_no_lists(self, block): assert_equal(block(1), np.array(1)) assert_equal(block(np.eye(3)), np.eye(3)) @@ -784,8 +856,8 @@ def test_different_ndims_depths(self, block): def test_block_memory_order(self, block): # 3D - arr_c = np.zeros((3,)*3, order='C') - arr_f = np.zeros((3,)*3, order='F') + arr_c = np.zeros((3,) * 3, order='C') + arr_f = np.zeros((3,) * 3, order='F') b_c = [[[arr_c, arr_c], [arr_c, arr_c]], diff --git a/numpy/_core/tests/test_simd.py b/numpy/_core/tests/test_simd.py index 9d472555edc4..335abc98c84e 100644 --- a/numpy/_core/tests/test_simd.py +++ b/numpy/_core/tests/test_simd.py @@ -1,10 +1,15 @@ # NOTE: Please avoid the use of numpy.testing since NPYV intrinsics # may be involved in their functionality. -import pytest, math, re import itertools +import math import operator -from numpy._core._simd import targets, clear_floatstatus, get_floatstatus +import re + +import pytest + from numpy._core._multiarray_umath import __cpu_baseline__ +from numpy._core._simd import clear_floatstatus, get_floatstatus, targets + def check_floatstatus(divbyzero=False, overflow=False, underflow=False, invalid=False, @@ -24,7 +29,7 @@ class _Test_Utility: # submodule of the desired SIMD extension, e.g. targets["AVX512F"] npyv = None # the current data type suffix e.g. 's8' - sfx = None + sfx = None # target name can be 'baseline' or one or more of CPU features target_name = None @@ -116,7 +121,7 @@ def _cpu_features(self): if target == "baseline": target = __cpu_baseline__ else: - target = target.split('__') # multi-target separator + target = target.split('__') # multi-target separator return ' '.join(target) class _SIMD_BOOL(_Test_Utility): @@ -160,7 +165,7 @@ def test_operators_logical(self): assert vor == data_or data_xor = [a ^ b for a, b in zip(data_a, data_b)] - vxor = getattr(self, "xor")(vdata_a, vdata_b) + vxor = self.xor(vdata_a, vdata_b) assert vxor == data_xor vnot = getattr(self, "not")(vdata_a) @@ -171,19 +176,19 @@ def test_operators_logical(self): return data_andc = [(a & ~b) & 0xFF for a, b in zip(data_a, data_b)] - vandc = getattr(self, "andc")(vdata_a, vdata_b) + vandc = self.andc(vdata_a, vdata_b) assert data_andc == vandc data_orc = [(a | ~b) & 0xFF for a, b in zip(data_a, data_b)] - vorc = getattr(self, "orc")(vdata_a, vdata_b) + vorc = self.orc(vdata_a, vdata_b) assert data_orc == vorc data_xnor = [~(a ^ b) & 0xFF for a, b in zip(data_a, data_b)] - vxnor = getattr(self, "xnor")(vdata_a, vdata_b) + vxnor = self.xnor(vdata_a, vdata_b) assert data_xnor == vxnor def test_tobits(self): - data2bits = lambda data: sum([int(x != 0) << i for i, x in enumerate(data, 0)]) + data2bits = lambda data: sum(int(x != 0) << i for i, x in enumerate(data, 0)) for data in (self._data(), self._data(reverse=True)): vdata = self._load_b(data) data_bits = data2bits(data) @@ -214,10 +219,10 @@ def test_pack(self): spack = [(i & 0xFF) for i in (list(rdata) + list(data))] vpack = pack_simd(vrdata, vdata) elif self.sfx == "b32": - spack = [(i & 0xFF) for i in (2*list(rdata) + 2*list(data))] + spack = [(i & 0xFF) for i in (2 * list(rdata) + 2 * list(data))] vpack = pack_simd(vrdata, vrdata, vdata, vdata) elif self.sfx == "b64": - spack = [(i & 0xFF) for i in (4*list(rdata) + 4*list(data))] + spack = [(i & 0xFF) for i in (4 * list(rdata) + 4 * list(data))] vpack = pack_simd(vrdata, vrdata, vrdata, vrdata, vdata, vdata, vdata, vdata) assert vpack == spack @@ -266,7 +271,8 @@ def test_operators_shift(self): shr = self.shr(vdata_a, count) assert shr == data_shr_a - # shift by zero or max or out-range immediate constant is not applicable and illogical + # shift by zero or max or out-range immediate constant is not + # applicable and illogical for count in range(1, self._scalar_size()): # load to cast data_shl_a = self.load([a << count for a in data_a]) @@ -365,7 +371,7 @@ class _SIMD_FP(_Test_Utility): To test all float vector types at once """ def test_arithmetic_fused(self): - vdata_a, vdata_b, vdata_c = [self.load(self._data())]*3 + vdata_a, vdata_b, vdata_c = [self.load(self._data())] * 3 vdata_cx2 = self.add(vdata_c, vdata_c) # multiply and add, a*b + c data_fma = self.load([a * b + c for a, b, c in zip(vdata_a, vdata_b, vdata_c)]) @@ -396,7 +402,7 @@ def test_abs(self): abs_cases = ((-0, 0), (ninf, pinf), (pinf, pinf), (nan, nan)) for case, desired in abs_cases: - data_abs = [desired]*self.nlanes + data_abs = [desired] * self.nlanes vabs = self.abs(self.setall(case)) assert vabs == pytest.approx(data_abs, nan_ok=True) @@ -410,11 +416,12 @@ def test_sqrt(self): sqrt_cases = ((-0.0, -0.0), (0.0, 0.0), (-1.0, nan), (ninf, nan), (pinf, pinf)) for case, desired in sqrt_cases: - data_sqrt = [desired]*self.nlanes - sqrt = self.sqrt(self.setall(case)) + data_sqrt = [desired] * self.nlanes + sqrt = self.sqrt(self.setall(case)) assert sqrt == pytest.approx(data_sqrt, nan_ok=True) - data_sqrt = self.load([math.sqrt(x) for x in data]) # load to truncate precision + # load to truncate precision + data_sqrt = self.load([math.sqrt(x) for x in data]) sqrt = self.sqrt(vdata) assert sqrt == data_sqrt @@ -425,11 +432,11 @@ def test_square(self): # square square_cases = ((nan, nan), (pinf, pinf), (ninf, pinf)) for case, desired in square_cases: - data_square = [desired]*self.nlanes - square = self.square(self.setall(case)) + data_square = [desired] * self.nlanes + square = self.square(self.setall(case)) assert square == pytest.approx(data_square, nan_ok=True) - data_square = [x*x for x in data] + data_square = [x * x for x in data] square = self.square(vdata) assert square == data_square @@ -449,13 +456,13 @@ def test_rounding(self, intrin, func): # special cases round_cases = ((nan, nan), (pinf, pinf), (ninf, ninf)) for case, desired in round_cases: - data_round = [desired]*self.nlanes + data_round = [desired] * self.nlanes _round = intrin(self.setall(case)) assert _round == pytest.approx(data_round, nan_ok=True) for x in range(0, 2**20, 256**2): for w in (-1.05, -1.10, -1.15, 1.05, 1.10, 1.15): - data = self.load([(x+a)*w for a in range(self.nlanes)]) + data = self.load([(x + a) * w for a in range(self.nlanes)]) data_round = [func(x) for x in data] _round = intrin(data) assert _round == data_round @@ -505,7 +512,7 @@ def test_max_min(self, intrin): func = eval(intrin[:3]) reduce_intrin = getattr(self, "reduce_" + intrin) intrin = getattr(self, intrin) - hf_nlanes = self.nlanes//2 + hf_nlanes = self.nlanes // 2 cases = ( ([0.0, -0.0], [-0.0, 0.0]), @@ -516,8 +523,8 @@ def test_max_min(self, intrin): ([-10, 10], [-10, 10]) ) for op1, op2 in cases: - vdata_a = self.load(op1*hf_nlanes) - vdata_b = self.load(op2*hf_nlanes) + vdata_a = self.load(op1 * hf_nlanes) + vdata_b = self.load(op2 * hf_nlanes) data = func(vdata_a, vdata_b) simd = intrin(vdata_a, vdata_b) assert simd == data @@ -543,7 +550,7 @@ def test_max_min(self, intrin): (nan, nan) ) for op1, op2 in cases: - vdata_ab = self.load([op1, op2]*hf_nlanes) + vdata_ab = self.load([op1, op2] * hf_nlanes) data = test_nan(op1, op2) simd = reduce_intrin(vdata_ab) assert simd == pytest.approx(data, nan_ok=True) @@ -560,11 +567,11 @@ def test_reciprocal(self): recip_cases = ((nan, nan), (pinf, 0.0), (ninf, -0.0), (0.0, pinf), (-0.0, ninf)) for case, desired in recip_cases: - data_recip = [desired]*self.nlanes + data_recip = [desired] * self.nlanes recip = self.recip(self.setall(case)) assert recip == pytest.approx(data_recip, nan_ok=True) - data_recip = self.load([1/x for x in data]) # load to truncate precision + data_recip = self.load([1 / x for x in data]) # load to truncate precision recip = self.recip(vdata) assert recip == data_recip @@ -574,7 +581,7 @@ def test_special_cases(self): npyv_notnan_##SFX """ nnan = self.notnan(self.setall(self._nan())) - assert nnan == [0]*self.nlanes + assert nnan == [0] * self.nlanes @pytest.mark.parametrize("intrin_name", [ "rint", "trunc", "ceil", "floor" @@ -585,7 +592,7 @@ def test_unary_invalid_fpexception(self, intrin_name): v = self.setall(d) clear_floatstatus() intrin(v) - assert check_floatstatus(invalid=True) == False + assert check_floatstatus(invalid=True) is False @pytest.mark.parametrize('py_comp,np_comp', [ (operator.lt, "cmplt"), @@ -606,8 +613,8 @@ def to_bool(vector): cmp_cases = ((0, nan), (nan, 0), (nan, nan), (pinf, nan), (ninf, nan), (-0.0, +0.0)) for case_operand1, case_operand2 in cmp_cases: - data_a = [case_operand1]*self.nlanes - data_b = [case_operand2]*self.nlanes + data_a = [case_operand1] * self.nlanes + data_b = [case_operand2] * self.nlanes vdata_a = self.setall(case_operand1) vdata_b = self.setall(case_operand2) vcmp = to_bool(intrin(vdata_a, vdata_b)) @@ -655,10 +662,10 @@ def test_memory_load(self): assert loads_data == data # load lower part loadl = self.loadl(data) - loadl_half = list(loadl)[:self.nlanes//2] - data_half = data[:self.nlanes//2] + loadl_half = list(loadl)[:self.nlanes // 2] + data_half = data[:self.nlanes // 2] assert loadl_half == data_half - assert loadl != data # detect overflow + assert loadl != data # detect overflow def test_memory_store(self): data = self._data() @@ -678,12 +685,12 @@ def test_memory_store(self): # store lower part store_l = [0] * self.nlanes self.storel(store_l, vdata) - assert store_l[:self.nlanes//2] == data[:self.nlanes//2] - assert store_l != vdata # detect overflow + assert store_l[:self.nlanes // 2] == data[:self.nlanes // 2] + assert store_l != vdata # detect overflow # store higher part store_h = [0] * self.nlanes self.storeh(store_h, vdata) - assert store_h[:self.nlanes//2] == data[self.nlanes//2:] + assert store_h[:self.nlanes // 2] == data[self.nlanes // 2:] assert store_h != vdata # detect overflow @pytest.mark.parametrize("intrin, elsizes, scale, fill", [ @@ -696,14 +703,14 @@ def test_memory_partial_load(self, intrin, elsizes, scale, fill): npyv_load_tillz, npyv_load_till = eval(intrin) data = self._data() lanes = list(range(1, self.nlanes + 1)) - lanes += [self.nlanes**2, self.nlanes**4] # test out of range + lanes += [self.nlanes**2, self.nlanes**4] # test out of range for n in lanes: load_till = npyv_load_till(data, n, *fill) load_tillz = npyv_load_tillz(data, n) n *= scale - data_till = data[:n] + fill * ((self.nlanes-n) // scale) + data_till = data[:n] + fill * ((self.nlanes - n) // scale) assert load_till == data_till - data_tillz = data[:n] + [0] * (self.nlanes-n) + data_tillz = data[:n] + [0] * (self.nlanes - n) assert load_tillz == data_tillz @pytest.mark.parametrize("intrin, elsizes, scale", [ @@ -721,7 +728,7 @@ def test_memory_partial_store(self, intrin, elsizes, scale): lanes += [self.nlanes**2, self.nlanes**4] for n in lanes: data_till = data_rev.copy() - data_till[:n*scale] = data[:n*scale] + data_till[:n * scale] = data[:n * scale] store_till = self._data(reverse=True) npyv_store_till(store_till, n, vdata) assert store_till == data_till @@ -736,15 +743,15 @@ def test_memory_noncont_load(self, intrin, elsizes, scale): npyv_loadn = eval(intrin) for stride in range(-64, 64): if stride < 0: - data = self._data(stride, -stride*self.nlanes) + data = self._data(stride, -stride * self.nlanes) data_stride = list(itertools.chain( *zip(*[data[-i::stride] for i in range(scale, 0, -1)]) )) elif stride == 0: data = self._data() - data_stride = data[0:scale] * (self.nlanes//scale) + data_stride = data[0:scale] * (self.nlanes // scale) else: - data = self._data(count=stride*self.nlanes) + data = self._data(count=stride * self.nlanes) data_stride = list(itertools.chain( *zip(*[data[i::stride] for i in range(scale)])) ) @@ -764,15 +771,15 @@ def test_memory_noncont_partial_load(self, intrin, elsizes, scale, fill): lanes += [self.nlanes**2, self.nlanes**4] for stride in range(-64, 64): if stride < 0: - data = self._data(stride, -stride*self.nlanes) + data = self._data(stride, -stride * self.nlanes) data_stride = list(itertools.chain( *zip(*[data[-i::stride] for i in range(scale, 0, -1)]) )) elif stride == 0: data = self._data() - data_stride = data[0:scale] * (self.nlanes//scale) + data_stride = data[0:scale] * (self.nlanes // scale) else: - data = self._data(count=stride*self.nlanes) + data = self._data(count=stride * self.nlanes) data_stride = list(itertools.chain( *zip(*[data[i::stride] for i in range(scale)]) )) @@ -781,7 +788,7 @@ def test_memory_noncont_partial_load(self, intrin, elsizes, scale, fill): nscale = n * scale llanes = self.nlanes - nscale data_stride_till = ( - data_stride[:nscale] + fill * (llanes//scale) + data_stride[:nscale] + fill * (llanes // scale) ) loadn_till = npyv_loadn_till(data, stride, n, *fill) assert loadn_till == data_stride_till @@ -802,25 +809,25 @@ def test_memory_noncont_store(self, intrin, elsizes, scale): hlanes = self.nlanes // scale for stride in range(1, 64): data_storen = [0xff] * stride * self.nlanes - for s in range(0, hlanes*stride, stride): - i = (s//stride)*scale - data_storen[s:s+scale] = data[i:i+scale] + for s in range(0, hlanes * stride, stride): + i = (s // stride) * scale + data_storen[s:s + scale] = data[i:i + scale] storen = [0xff] * stride * self.nlanes - storen += [0x7f]*64 + storen += [0x7f] * 64 npyv_storen(storen, stride, vdata) assert storen[:-64] == data_storen - assert storen[-64:] == [0x7f]*64 # detect overflow + assert storen[-64:] == [0x7f] * 64 # detect overflow for stride in range(-64, 0): data_storen = [0xff] * -stride * self.nlanes - for s in range(0, hlanes*stride, stride): - i = (s//stride)*scale - data_storen[s-scale:s or None] = data[i:i+scale] - storen = [0x7f]*64 + for s in range(0, hlanes * stride, stride): + i = (s // stride) * scale + data_storen[s - scale:s or None] = data[i:i + scale] + storen = [0x7f] * 64 storen += [0xff] * -stride * self.nlanes npyv_storen(storen, stride, vdata) assert storen[64:] == data_storen - assert storen[:64] == [0x7f]*64 # detect overflow + assert storen[:64] == [0x7f] * 64 # detect overflow # stride 0 data_storen = [0x7f] * self.nlanes storen = data_storen.copy() @@ -844,34 +851,34 @@ def test_memory_noncont_partial_store(self, intrin, elsizes, scale): for stride in range(1, 64): for n in lanes: data_till = [0xff] * stride * self.nlanes - tdata = data[:n*scale] + [0xff] * (self.nlanes-n*scale) - for s in range(0, hlanes*stride, stride)[:n]: - i = (s//stride)*scale - data_till[s:s+scale] = tdata[i:i+scale] + tdata = data[:n * scale] + [0xff] * (self.nlanes - n * scale) + for s in range(0, hlanes * stride, stride)[:n]: + i = (s // stride) * scale + data_till[s:s + scale] = tdata[i:i + scale] storen_till = [0xff] * stride * self.nlanes - storen_till += [0x7f]*64 + storen_till += [0x7f] * 64 npyv_storen_till(storen_till, stride, n, vdata) assert storen_till[:-64] == data_till - assert storen_till[-64:] == [0x7f]*64 # detect overflow + assert storen_till[-64:] == [0x7f] * 64 # detect overflow for stride in range(-64, 0): for n in lanes: data_till = [0xff] * -stride * self.nlanes - tdata = data[:n*scale] + [0xff] * (self.nlanes-n*scale) - for s in range(0, hlanes*stride, stride)[:n]: - i = (s//stride)*scale - data_till[s-scale:s or None] = tdata[i:i+scale] - storen_till = [0x7f]*64 + tdata = data[:n * scale] + [0xff] * (self.nlanes - n * scale) + for s in range(0, hlanes * stride, stride)[:n]: + i = (s // stride) * scale + data_till[s - scale:s or None] = tdata[i:i + scale] + storen_till = [0x7f] * 64 storen_till += [0xff] * -stride * self.nlanes npyv_storen_till(storen_till, stride, n, vdata) assert storen_till[64:] == data_till - assert storen_till[:64] == [0x7f]*64 # detect overflow + assert storen_till[:64] == [0x7f] * 64 # detect overflow # stride 0 for n in lanes: data_till = [0x7f] * self.nlanes storen_till = data_till.copy() - data_till[0:scale] = data[:n*scale][-scale:] + data_till[0:scale] = data[:n * scale][-scale:] npyv_storen_till(storen_till, 0, n, vdata) assert storen_till == data_till @@ -889,7 +896,7 @@ def test_lut(self, intrin, table_size, elsize): return intrin = eval(intrin) idx_itrin = getattr(self.npyv, f"setall_u{elsize}") - table = range(0, table_size) + table = range(table_size) for i in table: broadi = self.setall(i) idx = idx_itrin(i) @@ -942,14 +949,14 @@ def test_misc(self): self.npyv.cleanup() def test_reorder(self): - data_a, data_b = self._data(), self._data(reverse=True) + data_a, data_b = self._data(), self._data(reverse=True) vdata_a, vdata_b = self.load(data_a), self.load(data_b) # lower half part - data_a_lo = data_a[:self.nlanes//2] - data_b_lo = data_b[:self.nlanes//2] + data_a_lo = data_a[:self.nlanes // 2] + data_b_lo = data_b[:self.nlanes // 2] # higher half part - data_a_hi = data_a[self.nlanes//2:] - data_b_hi = data_b[self.nlanes//2:] + data_a_hi = data_a[self.nlanes // 2:] + data_b_hi = data_b[self.nlanes // 2:] # combine two lower parts combinel = self.combinel(vdata_a, vdata_b) assert combinel == data_a_lo + data_b_lo @@ -969,7 +976,7 @@ def test_reorder(self): ]) vzip = self.zip(vdata_a, vdata_b) assert vzip == (data_zipl, data_ziph) - vzip = [0]*self.nlanes*2 + vzip = [0] * self.nlanes * 2 self._x2("store")(vzip, (vdata_a, vdata_b)) assert vzip == list(data_zipl) + list(data_ziph) @@ -985,8 +992,8 @@ def test_reorder_rev64(self): if ssize == 64: return data_rev64 = [ - y for x in range(0, self.nlanes, 64//ssize) - for y in reversed(range(x, x + 64//ssize)) + y for x in range(0, self.nlanes, 64 // ssize) + for y in reversed(range(x, x + 64 // ssize)) ] rev64 = self.rev64(self.load(range(self.nlanes))) assert rev64 == data_rev64 @@ -1000,16 +1007,16 @@ def test_reorder_permi128(self): if ssize < 32: return data = self.load(self._data()) - permn = 128//ssize - permd = permn-1 - nlane128 = self.nlanes//permn + permn = 128 // ssize + permd = permn - 1 + nlane128 = self.nlanes // permn shfl = [0, 1] if ssize == 64 else [0, 2, 4, 6] for i in range(permn): indices = [(i >> shf) & permd for shf in shfl] vperm = self.permi128(data, *indices) data_vperm = [ data[j + (e & -permn)] - for e, j in enumerate(indices*nlane128) + for e, j in enumerate(indices * nlane128) ] assert vperm == data_vperm @@ -1030,6 +1037,7 @@ def test_operators_comparison(self, func, intrin): intrin = getattr(self, intrin) mask_true = self._true_mask() + def to_bool(vector): return [lane == mask_true for lane in vector] @@ -1057,8 +1065,8 @@ def test_operators_logical(self): vxor = cast(self.xor(vdata_a, vdata_b)) assert vxor == data_xor - data_or = cast_data([a | b for a, b in zip(data_cast_a, data_cast_b)]) - vor = cast(getattr(self, "or")(vdata_a, vdata_b)) + data_or = cast_data([a | b for a, b in zip(data_cast_a, data_cast_b)]) + vor = cast(getattr(self, "or")(vdata_a, vdata_b)) assert vor == data_or data_and = cast_data([a & b for a, b in zip(data_cast_a, data_cast_b)]) @@ -1072,7 +1080,7 @@ def test_operators_logical(self): if self.sfx not in ("u8"): return data_andc = [a & ~b for a, b in zip(data_cast_a, data_cast_b)] - vandc = cast(getattr(self, "andc")(vdata_a, vdata_b)) + vandc = cast(self.andc(vdata_a, vdata_b)) assert vandc == data_andc @pytest.mark.parametrize("intrin", ["any", "all"]) @@ -1101,11 +1109,11 @@ def test_operators_crosstest(self, intrin, data): def test_conversion_boolean(self): bsfx = "b" + self.sfx[1:] - to_boolean = getattr(self.npyv, "cvt_%s_%s" % (bsfx, self.sfx)) - from_boolean = getattr(self.npyv, "cvt_%s_%s" % (self.sfx, bsfx)) + to_boolean = getattr(self.npyv, f"cvt_{bsfx}_{self.sfx}") + from_boolean = getattr(self.npyv, f"cvt_{self.sfx}_{bsfx}") false_vb = to_boolean(self.setall(0)) - true_vb = self.cmpeq(self.setall(0), self.setall(0)) + true_vb = self.cmpeq(self.setall(0), self.setall(0)) assert false_vb != true_vb false_vsfx = from_boolean(false_vb) @@ -1120,16 +1128,16 @@ def test_conversion_expand(self): """ if self.sfx not in ("u8", "u16"): return - totype = self.sfx[0]+str(int(self.sfx[1:])*2) + totype = self.sfx[0] + str(int(self.sfx[1:]) * 2) expand = getattr(self.npyv, f"expand_{totype}_{self.sfx}") # close enough from the edge to detect any deviation - data = self._data(self._int_max() - self.nlanes) + data = self._data(self._int_max() - self.nlanes) vdata = self.load(data) edata = expand(vdata) # lower half part - data_lo = data[:self.nlanes//2] + data_lo = data[:self.nlanes // 2] # higher half part - data_hi = data[self.nlanes//2:] + data_hi = data[self.nlanes // 2:] assert edata == (data_lo, data_hi) def test_arithmetic_subadd(self): @@ -1141,11 +1149,11 @@ def test_arithmetic_subadd(self): vdata_a, vdata_b = self.load(data_a), self.load(data_b) # non-saturated - data_add = self.load([a + b for a, b in zip(data_a, data_b)]) # load to cast - add = self.add(vdata_a, vdata_b) + data_add = self.load([a + b for a, b in zip(data_a, data_b)]) # load to cast + add = self.add(vdata_a, vdata_b) assert add == data_add - data_sub = self.load([a - b for a, b in zip(data_a, data_b)]) - sub = self.sub(vdata_a, vdata_b) + data_sub = self.load([a - b for a, b in zip(data_a, data_b)]) + sub = self.sub(vdata_a, vdata_b) assert sub == data_sub def test_arithmetic_mul(self): @@ -1185,6 +1193,7 @@ def test_arithmetic_intdiv(self): return int_min = self._int_min() + def trunc_div(a, d): """ Divide towards zero works with large integers > 2^53, @@ -1199,17 +1208,17 @@ def trunc_div(a, d): data = [1, -int_min] # to test overflow data += range(0, 2**8, 2**5) - data += range(0, 2**8, 2**5-1) + data += range(0, 2**8, 2**5 - 1) bsize = self._scalar_size() if bsize > 8: data += range(2**8, 2**16, 2**13) - data += range(2**8, 2**16, 2**13-1) + data += range(2**8, 2**16, 2**13 - 1) if bsize > 16: data += range(2**16, 2**32, 2**29) - data += range(2**16, 2**32, 2**29-1) + data += range(2**16, 2**32, 2**29 - 1) if bsize > 32: data += range(2**32, 2**64, 2**61) - data += range(2**32, 2**64, 2**61-1) + data += range(2**32, 2**64, 2**61 - 1) # negate data += [-x for x in data] for dividend, divisor in itertools.product(data, data): @@ -1244,7 +1253,7 @@ def test_arithmetic_reduce_sumup(self): """ if self.sfx not in ("u8", "u16"): return - rdata = (0, self.nlanes, self._int_min(), self._int_max()-self.nlanes) + rdata = (0, self.nlanes, self._int_min(), self._int_max() - self.nlanes) for r in rdata: data = self._data(r) vdata = self.load(data) @@ -1260,7 +1269,7 @@ def test_mask_conditional(self): """ vdata_a = self.load(self._data()) vdata_b = self.load(self._data(reverse=True)) - true_mask = self.cmpeq(self.zero(), self.zero()) + true_mask = self.cmpeq(self.zero(), self.zero()) false_mask = self.cmpneq(self.zero(), self.zero()) data_sub = self.sub(vdata_b, vdata_a) @@ -1287,21 +1296,22 @@ def test_mask_conditional(self): ifdivz = self.ifdivz(false_mask, vdata_a, vdata_b) assert ifdivz == self.zero() + bool_sfx = ("b8", "b16", "b32", "b64") int_sfx = ("u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64") -fp_sfx = ("f32", "f64") +fp_sfx = ("f32", "f64") all_sfx = int_sfx + fp_sfx tests_registry = { bool_sfx: _SIMD_BOOL, - int_sfx : _SIMD_INT, - fp_sfx : _SIMD_FP, + int_sfx: _SIMD_INT, + fp_sfx: _SIMD_FP, ("f32",): _SIMD_FP32, ("f64",): _SIMD_FP64, - all_sfx : _SIMD_ALL + all_sfx: _SIMD_ALL } for target_name, npyv in targets.items(): simd_width = npyv.simd if npyv else '' - pretty_name = target_name.split('__') # multi-target separator + pretty_name = target_name.split('__') # multi-target separator if len(pretty_name) > 1: # multi-target pretty_name = f"({' '.join(pretty_name)})" @@ -1309,7 +1319,7 @@ def test_mask_conditional(self): pretty_name = pretty_name[0] skip = "" - skip_sfx = dict() + skip_sfx = {} if not npyv: skip = f"target '{pretty_name}' isn't supported by current machine" elif not npyv.simd: @@ -1326,8 +1336,10 @@ def test_mask_conditional(self): for sfx in sfxes: skip_m = skip_sfx.get(sfx, skip) inhr = (cls,) - attr = dict(npyv=targets[target_name], sfx=sfx, target_name=target_name) - tcls = type(f"Test{cls.__name__}_{simd_width}_{target_name}_{sfx}", inhr, attr) + attr = {"npyv": targets[target_name], "sfx": sfx, + "target_name": target_name} + type_name = f"Test{cls.__name__}_{simd_width}_{target_name}_{sfx}" + tcls = type(type_name, inhr, attr) if skip_m: pytest.mark.skip(reason=skip_m)(tcls) globals()[tcls.__name__] = tcls diff --git a/numpy/_core/tests/test_simd_module.py b/numpy/_core/tests/test_simd_module.py index 6bd68c22e193..3de1596aa10a 100644 --- a/numpy/_core/tests/test_simd_module.py +++ b/numpy/_core/tests/test_simd_module.py @@ -1,5 +1,7 @@ import pytest + from numpy._core._simd import targets + """ This testing unit only for checking the sanity of common functionality, therefore all we need is just to take one submodule that represents any @@ -21,7 +23,8 @@ int_sfx = unsigned_sfx + signed_sfx all_sfx = unsigned_sfx + int_sfx -@pytest.mark.skipif(not npyv, reason="could not find any SIMD extension with NPYV support") +@pytest.mark.skipif(not npyv, + reason="could not find any SIMD extension with NPYV support") class Test_SIMD_MODULE: @pytest.mark.parametrize('sfx', all_sfx) @@ -36,7 +39,7 @@ def test_type_name(self, sfx): assert vector.__name__ == "npyv_" + sfx def test_raises(self): - a, b = [npyv.setall_u32(1)]*2 + a, b = [npyv.setall_u32(1)] * 2 for sfx in all_sfx: vcb = lambda intrin: getattr(npyv, f"{intrin}_{sfx}") pytest.raises(TypeError, vcb("add"), a) @@ -45,7 +48,8 @@ def test_raises(self): pytest.raises(TypeError, vcb("setall"), [1]) pytest.raises(TypeError, vcb("load"), 1) pytest.raises(ValueError, vcb("load"), [1]) - pytest.raises(ValueError, vcb("store"), [1], getattr(npyv, f"reinterpret_{sfx}_u32")(a)) + value = getattr(npyv, f"reinterpret_{sfx}_u32")(a) + pytest.raises(ValueError, vcb("store"), [1], value) @pytest.mark.skipif(not npyv2, reason=( "could not find a second SIMD extension with NPYV support" @@ -93,7 +97,7 @@ def test_truncate_f32(self): assert round(f32, 1) == 0.1 def test_compare(self): - data_range = range(0, npyv.nlanes_u32) + data_range = range(npyv.nlanes_u32) vdata = npyv.load_u32(data_range) assert vdata == list(data_range) assert vdata == tuple(data_range) diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 228b5e949cfd..281aaf41893e 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -1,4 +1,4 @@ -import concurrent.futures +import copy import itertools import os import pickle @@ -6,29 +6,34 @@ import sys import tempfile -import numpy as np import pytest -from numpy.dtypes import StringDType +import numpy as np from numpy._core.tests._natype import pd_NA -from numpy.testing import assert_array_equal, IS_WASM - - -@pytest.fixture -def string_list(): - return ["abc", "def", "ghi" * 10, "AÂĸ☃â‚Ŧ 😊" * 100, "Abc" * 1000, "DEF"] +from numpy.dtypes import StringDType +from numpy.testing import assert_array_equal -@pytest.fixture -def random_string_list(): +def random_unicode_string_list(): + """Returns an array of 10 100-character strings containing random text""" chars = list(string.ascii_letters + string.digits) chars = np.array(chars, dtype="U1") ret = np.random.choice(chars, size=100 * 10, replace=True) return ret.view("U100") +def get_dtype(na_object, coerce=True): + """Helper to work around pd_NA boolean behavior""" + # explicit is check for pd_NA because != with pd_NA returns pd_NA + if na_object is pd_NA or na_object != "unset": + return np.dtypes.StringDType(na_object=na_object, coerce=coerce) + else: + return np.dtypes.StringDType(coerce=coerce) + + @pytest.fixture(params=[True, False]) def coerce(request): + """Coerce input to strings or raise an error for non-string input""" return request.param @@ -37,25 +42,24 @@ def coerce(request): ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"], ) def na_object(request): + """Possible values for the missing data sentinel""" return request.param -def get_dtype(na_object, coerce=True): - # explicit is check for pd_NA because != with pd_NA returns pd_NA - if na_object is pd_NA or na_object != "unset": - return StringDType(na_object=na_object, coerce=coerce) - else: - return StringDType(coerce=coerce) - - @pytest.fixture() def dtype(na_object, coerce): + """Cartesian project of missing data sentinel and string coercion options""" return get_dtype(na_object, coerce) +@pytest.fixture +def string_list(): + """Mix of short and long strings, some with unicode, some without""" + return ["abc", "def", "ghi" * 10, "AÂĸ☃â‚Ŧ 😊" * 100, "Abc" * 1000, "DEF"] + -# second copy for cast tests to do a cartesian product over dtypes @pytest.fixture(params=[True, False]) def coerce2(request): + """Second copy of the coerce fixture for tests that need two instances""" return request.param @@ -64,11 +68,13 @@ def coerce2(request): ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"], ) def na_object2(request): + """Second copy of the na_object fixture for tests that need two instances""" return request.param @pytest.fixture() def dtype2(na_object2, coerce2): + """Second copy of the dtype fixture for tests that need two instances""" # explicit is check for pd_NA because != with pd_NA returns pd_NA if na_object2 is pd_NA or na_object2 != "unset": return StringDType(na_object=na_object2, coerce=coerce2) @@ -116,13 +122,13 @@ def test_dtype_repr(dtype): if not hasattr(dtype, "na_object") and dtype.coerce: assert repr(dtype) == "StringDType()" elif dtype.coerce: - assert repr(dtype) == f"StringDType(na_object={repr(dtype.na_object)})" + assert repr(dtype) == f"StringDType(na_object={dtype.na_object!r})" elif not hasattr(dtype, "na_object"): assert repr(dtype) == "StringDType(coerce=False)" else: assert ( repr(dtype) - == f"StringDType(na_object={repr(dtype.na_object)}, coerce=False)" + == f"StringDType(na_object={dtype.na_object!r}, coerce=False)" ) @@ -145,12 +151,12 @@ def test_set_replace_na(i): s_long = "-=+" * 100 strings = [s_medium, s_empty, s_short, s_medium, s_long] a = np.array(strings, StringDType(na_object=np.nan)) - for s in [a[i], s_medium+s_short, s_short, s_empty, s_long]: + for s in [a[i], s_medium + s_short, s_short, s_empty, s_long]: a[i] = np.nan assert np.isnan(a[i]) a[i] = s assert a[i] == s - assert_array_equal(a, strings[:i] + [s] + strings[i+1:]) + assert_array_equal(a, strings[:i] + [s] + strings[i + 1:]) def test_null_roundtripping(): @@ -162,8 +168,8 @@ def test_null_roundtripping(): def test_string_too_large_error(): arr = np.array(["a", "b", "c"], dtype=StringDType()) - with pytest.raises(MemoryError): - arr * (2**63 - 2) + with pytest.raises(OverflowError): + arr * (sys.maxsize + 1) @pytest.mark.parametrize( @@ -190,10 +196,14 @@ def test_array_creation_utf8(dtype, data): ], ) def test_scalars_string_conversion(data, dtype): + try: + str_vals = [str(d.decode('utf-8')) for d in data] + except AttributeError: + str_vals = [str(d) for d in data] if dtype.coerce: assert_array_equal( np.array(data, dtype=dtype), - np.array([str(d) for d in data], dtype=dtype), + np.array(str_vals, dtype=dtype), ) else: with pytest.raises(ValueError): @@ -232,12 +242,12 @@ def test_self_casts(dtype, dtype2, strings): if hasattr(dtype, "na_object") and hasattr(dtype2, "na_object"): na1 = dtype.na_object na2 = dtype2.na_object - if ((na1 is not na2 and + if (na1 is not na2 and # check for pd_NA first because bool(pd_NA) is an error ((na1 is pd_NA or na2 is pd_NA) or # the second check is a NaN check, spelled this way # to avoid errors from math.isnan and np.isnan - (na1 != na2 and not (na1 != na1 and na2 != na2))))): + (na1 != na2 and not (na1 != na1 and na2 != na2)))): with pytest.raises(TypeError): arr[:-1] == newarr[:-1] return @@ -271,7 +281,7 @@ def test_unicode_casts(self, dtype, strings): def test_void_casts(self, dtype, strings): sarr = np.array(strings, dtype=dtype) utf8_bytes = [s.encode("utf-8") for s in strings] - void_dtype = f"V{max([len(s) for s in utf8_bytes])}" + void_dtype = f"V{max(len(s) for s in utf8_bytes)}" varr = np.array(utf8_bytes, dtype=void_dtype) assert_array_equal(varr, sarr.astype(void_dtype)) assert_array_equal(varr.astype(dtype), sarr) @@ -280,21 +290,30 @@ def test_bytes_casts(self, dtype, strings): sarr = np.array(strings, dtype=dtype) try: utf8_bytes = [s.encode("ascii") for s in strings] - bytes_dtype = f"S{max([len(s) for s in utf8_bytes])}" + bytes_dtype = f"S{max(len(s) for s in utf8_bytes)}" barr = np.array(utf8_bytes, dtype=bytes_dtype) assert_array_equal(barr, sarr.astype(bytes_dtype)) assert_array_equal(barr.astype(dtype), sarr) + if dtype.coerce: + barr = np.array(utf8_bytes, dtype=dtype) + assert_array_equal(barr, sarr) + barr = np.array(utf8_bytes, dtype="O") + assert_array_equal(barr.astype(dtype), sarr) + else: + with pytest.raises(ValueError): + np.array(utf8_bytes, dtype=dtype) except UnicodeEncodeError: with pytest.raises(UnicodeEncodeError): sarr.astype("S20") -def test_additional_unicode_cast(random_string_list, dtype): - arr = np.array(random_string_list, dtype=dtype) +def test_additional_unicode_cast(dtype): + string_list = random_unicode_string_list() + arr = np.array(string_list, dtype=dtype) # test that this short-circuits correctly assert_array_equal(arr, arr.astype(arr.dtype)) # tests the casts via the comparison promoter - assert_array_equal(arr, arr.astype(random_string_list.dtype)) + assert_array_equal(arr, arr.astype(string_list.dtype)) def test_insert_scalar(dtype, string_list): @@ -393,6 +412,13 @@ def test_pickle(dtype, string_list): os.remove(f.name) +def test_stdlib_copy(dtype, string_list): + arr = np.array(string_list, dtype=dtype) + + assert_array_equal(copy.copy(arr), arr) + assert_array_equal(copy.deepcopy(arr), arr) + + @pytest.mark.parametrize( "strings", [ @@ -415,8 +441,19 @@ def test_sort(dtype, strings): def test_sort(strings, arr_sorted): arr = np.array(strings, dtype=dtype) - np.random.default_rng().shuffle(arr) na_object = getattr(arr.dtype, "na_object", "") + if na_object is None and None in strings: + with pytest.raises( + ValueError, + match="Cannot compare null that is not a nan-like value", + ): + np.argsort(arr) + argsorted = None + elif na_object is pd_NA or na_object != '': + argsorted = None + else: + argsorted = np.argsort(arr) + np.random.default_rng().shuffle(arr) if na_object is None and None in strings: with pytest.raises( ValueError, @@ -426,6 +463,8 @@ def test_sort(strings, arr_sorted): else: arr.sort() assert np.array_equal(arr, arr_sorted, equal_nan=True) + if argsorted is not None: + assert np.array_equal(argsorted, np.argsort(strings)) # make a copy so we don't mutate the lists in the fixture strings = strings.copy() @@ -495,6 +534,50 @@ def test_fancy_indexing(string_list): sarr = np.array(string_list, dtype="T") assert_array_equal(sarr, sarr[np.arange(sarr.shape[0])]) + inds = [ + [True, True], + [0, 1], + ..., + np.array([0, 1], dtype='uint8'), + ] + + lops = [ + ['a' * 25, 'b' * 25], + ['', ''], + ['hello', 'world'], + ['hello', 'world' * 25], + ] + + # see gh-27003 and gh-27053 + for ind in inds: + for lop in lops: + a = np.array(lop, dtype="T") + assert_array_equal(a[ind], a) + rop = ['d' * 25, 'e' * 25] + for b in [rop, np.array(rop, dtype="T")]: + a[ind] = b + assert_array_equal(a, b) + assert a[0] == 'd' * 25 + + # see gh-29279 + data = [ + ["AAAAAAAAAAAAAAAAA"], + ["BBBBBBBBBBBBBBBBBBBBBBBBBBBBB"], + ["CCCCCCCCCCCCCCCCC"], + ["DDDDDDDDDDDDDDDDD"], + ] + sarr = np.array(data, dtype=np.dtypes.StringDType()) + uarr = np.array(data, dtype="U30") + for ind in [[0], [1], [2], [3], [[0, 0]], [[1, 1, 3]], [[1, 1]]]: + assert_array_equal(sarr[ind], uarr[ind]) + + +def test_flatiter_indexing(): + # see gh-29659 + arr = np.array(['hello', 'world'], dtype='T') + arr.flat[:] = 9223372036854775 + assert_array_equal(arr, np.array([9223372036854775] * 2, dtype='T')) + def test_creation_functions(): assert_array_equal(np.zeros(3, dtype="T"), ["", "", ""]) @@ -511,6 +594,12 @@ def test_concatenate(string_list): assert_array_equal(np.concatenate([sarr], axis=0), sarr) +def test_resize_method(string_list): + sarr = np.array(string_list, dtype="T") + sarr.resize(len(string_list) + 3) + assert_array_equal(sarr, np.array(string_list + [''] * 3, dtype="T")) + + def test_create_with_copy_none(string_list): arr = np.array(string_list, dtype=StringDType()) # create another stringdtype array with an arena that has a different @@ -688,6 +777,21 @@ def test_float_casts(typename): assert_array_equal(eres, res) +def test_float_nan_cast_na_object(): + # gh-28157 + dt = np.dtypes.StringDType(na_object=np.nan) + arr1 = np.full((1,), fill_value=np.nan, dtype=dt) + arr2 = np.full_like(arr1, fill_value=np.nan) + + assert arr1.item() is np.nan + assert arr2.item() is np.nan + + inp = [1.2, 2.3, np.nan] + arr = np.array(inp).astype(dt) + assert arr[2] is np.nan + assert arr[0] == '1.2' + + @pytest.mark.parametrize( "typename", [ @@ -828,6 +932,31 @@ def test_add_promoter(string_list): assert_array_equal(op + arr, lresult) assert_array_equal(arr + op, rresult) + # The promoter should be able to handle things if users pass `dtype=` + res = np.add("hello", string_list, dtype=StringDType) + assert res.dtype == StringDType() + + # The promoter should not kick in if users override the input, + # which means arr is cast, this fails because of the unknown length. + with pytest.raises(TypeError, match="cannot cast dtype"): + np.add(arr, "add", signature=("U", "U", None), casting="unsafe") + + # But it must simply reject the following: + with pytest.raises(TypeError, match=".*did not contain a loop"): + np.add(arr, "add", signature=(None, "U", None)) + + with pytest.raises(TypeError, match=".*did not contain a loop"): + np.add("a", "b", signature=("U", "U", StringDType)) + + +def test_add_no_legacy_promote_with_signature(): + # Possibly misplaced, but useful to test with string DType. We check that + # if there is clearly no loop found, a stray `dtype=` doesn't break things + # Regression test for the bad error in gh-26735 + # (If legacy promotion is gone, this can be deleted...) + with pytest.raises(TypeError, match=".*did not contain a loop"): + np.add("3", 6, dtype=StringDType) + def test_add_promoter_reduce(): # Exact TypeError could change, but ensure StringDtype doesn't match @@ -952,6 +1081,62 @@ def test_ufunc_multiply(dtype, string_list, other, other_dtype, use_out): other * arr +def test_findlike_promoters(): + r = "Wally" + l = "Where's Wally?" + s = np.int32(3) + e = np.int8(13) + for dtypes in [("T", "U"), ("U", "T")]: + for function, answer in [ + (np.strings.index, 8), + (np.strings.endswith, True), + ]: + assert answer == function( + np.array(l, dtype=dtypes[0]), np.array(r, dtype=dtypes[1]), s, e + ) + + +def test_strip_promoter(): + arg = ["Hello!!!!", "Hello??!!"] + strip_char = "!" + answer = ["Hello", "Hello??"] + for dtypes in [("T", "U"), ("U", "T")]: + result = np.strings.strip( + np.array(arg, dtype=dtypes[0]), + np.array(strip_char, dtype=dtypes[1]) + ) + assert_array_equal(result, answer) + assert result.dtype.char == "T" + + +def test_replace_promoter(): + arg = ["Hello, planet!", "planet, Hello!"] + old = "planet" + new = "world" + answer = ["Hello, world!", "world, Hello!"] + for dtypes in itertools.product("TU", repeat=3): + if dtypes == ("U", "U", "U"): + continue + answer_arr = np.strings.replace( + np.array(arg, dtype=dtypes[0]), + np.array(old, dtype=dtypes[1]), + np.array(new, dtype=dtypes[2]), + ) + assert_array_equal(answer_arr, answer) + assert answer_arr.dtype.char == "T" + + +def test_center_promoter(): + arg = ["Hello", "planet!"] + fillchar = "/" + for dtypes in [("T", "U"), ("U", "T")]: + answer = np.strings.center( + np.array(arg, dtype=dtypes[0]), 9, np.array(fillchar, dtype=dtypes[1]) + ) + assert_array_equal(answer, ["//Hello//", "/planet!/"]) + assert answer.dtype.char == "T" + + DATETIME_INPUT = [ np.datetime64("1923-04-14T12:43:12"), np.datetime64("1994-06-21T14:43:15"), @@ -1037,7 +1222,7 @@ def test_nat_casts(): for arr in [dt_array, td_array]: assert_array_equal( arr.astype(dtype), - np.array([output_object]*arr.size, dtype=dtype)) + np.array([output_object] * arr.size, dtype=dtype)) def test_nat_conversion(): @@ -1065,38 +1250,22 @@ def test_growing_strings(dtype): assert_array_equal(arr, uarr) -@pytest.mark.skipif(IS_WASM, reason="no threading support in wasm") -def test_threaded_access_and_mutation(dtype, random_string_list): - # this test uses an RNG and may crash or cause deadlocks if there is a - # threading bug - rng = np.random.default_rng(0x4D3D3D3) - - def func(arr): - rnd = rng.random() - # either write to random locations in the array, compute a ufunc, or - # re-initialize the array - if rnd < 0.25: - num = np.random.randint(0, arr.size) - arr[num] = arr[num] + "hello" - elif rnd < 0.5: - if rnd < 0.375: - np.add(arr, arr) - else: - np.add(arr, arr, out=arr) - elif rnd < 0.75: - if rnd < 0.875: - np.multiply(arr, np.int64(2)) - else: - np.multiply(arr, np.int64(2), out=arr) - else: - arr[:] = random_string_list - - with concurrent.futures.ThreadPoolExecutor(max_workers=8) as tpe: - arr = np.array(random_string_list, dtype=dtype) - futures = [tpe.submit(func, arr) for _ in range(500)] +def test_assign_medium_strings(): + # see gh-29261 + N = 9 + src = np.array( + ( + ['0' * 256] * 3 + ['0' * 255] + ['0' * 256] + ['0' * 255] + + ['0' * 256] * 2 + ['0' * 255] + ), dtype='T') + dst = np.array( + ( + ['0' * 255] + ['0' * 256] * 2 + ['0' * 255] + ['0' * 256] + + ['0' * 255] + [''] * 5 + ), dtype='T') - for f in futures: - f.result() + dst[1:N + 1] = src + assert_array_equal(dst[1:N + 1], src) UFUNC_TEST_DATA = [ @@ -1212,11 +1381,10 @@ def test_unary(string_array, unicode_array, function_name): # to avoid these errors we'd need to add NA support to _vec_string with pytest.raises((ValueError, TypeError)): func(na_arr) + elif function_name == "splitlines": + assert func(na_arr)[0] == func(dtype.na_object)[()] else: - if function_name == "splitlines": - assert func(na_arr)[0] == func(dtype.na_object)[()] - else: - assert func(na_arr)[0] == func(dtype.na_object) + assert func(na_arr)[0] == func(dtype.na_object) return if function_name == "str_len" and not is_str: # str_len always errors for any non-string null, even NA ones because @@ -1286,7 +1454,7 @@ def test_unary(string_array, unicode_array, function_name): "strip", "lstrip", "rstrip", - "replace" + "replace", "zfill", ] @@ -1459,6 +1627,40 @@ def test_unset_na_coercion(): arr == op +def test_repeat(string_array): + res = string_array.repeat(1000) + # Create an empty array with expanded dimension, and fill it. Then, + # reshape it to the expected result. + expected = np.empty_like(string_array, shape=string_array.shape + (1000,)) + expected[...] = string_array[:, np.newaxis] + expected = expected.reshape(-1) + + assert_array_equal(res, expected, strict=True) + + +@pytest.mark.parametrize("tile", [1, 6, (2, 5)]) +def test_accumulation(string_array, tile): + """Accumulation is odd for StringDType but tests dtypes with references. + """ + # Fill with mostly empty strings to not create absurdly big strings + arr = np.zeros_like(string_array, shape=(100,)) + arr[:len(string_array)] = string_array + arr[-len(string_array):] = string_array + + # Bloat size a bit (get above thresholds and test >1 ndim). + arr = np.tile(string_array, tile) + + res = np.add.accumulate(arr, axis=0) + res_obj = np.add.accumulate(arr.astype(object), axis=0) + assert_array_equal(res, res_obj.astype(arr.dtype), strict=True) + + if arr.ndim > 1: + res = np.add.accumulate(arr, axis=-1) + res_obj = np.add.accumulate(arr.astype(object), axis=-1) + + assert_array_equal(res, res_obj.astype(arr.dtype), strict=True) + + class TestImplementation: """Check that strings are stored in the arena when possible. @@ -1467,17 +1669,17 @@ class TestImplementation: """ @classmethod - def setup_class(self): - self.MISSING = 0x80 - self.INITIALIZED = 0x40 - self.OUTSIDE_ARENA = 0x20 - self.LONG = 0x10 - self.dtype = StringDType(na_object=np.nan) - self.sizeofstr = self.dtype.itemsize - sp = self.dtype.itemsize // 2 # pointer size = sizeof(size_t) + def setup_class(cls): + cls.MISSING = 0x80 + cls.INITIALIZED = 0x40 + cls.OUTSIDE_ARENA = 0x20 + cls.LONG = 0x10 + cls.dtype = StringDType(na_object=np.nan) + cls.sizeofstr = cls.dtype.itemsize + sp = cls.dtype.itemsize // 2 # pointer size = sizeof(size_t) # Below, size is not strictly correct, since it really uses # 7 (or 3) bytes, but good enough for the tests here. - self.view_dtype = np.dtype([ + cls.view_dtype = np.dtype([ ('offset', f'u{sp}'), ('size', f'u{sp // 2}'), ('xsiz', f'V{sp // 2 - 1}'), @@ -1488,13 +1690,13 @@ def setup_class(self): ('size', f'u{sp // 2}'), ('offset', f'u{sp}'), ]) - self.s_empty = "" - self.s_short = "01234" - self.s_medium = "abcdefghijklmnopqrstuvwxyz" - self.s_long = "-=+" * 100 - self.a = np.array( - [self.s_empty, self.s_short, self.s_medium, self.s_long], - self.dtype) + cls.s_empty = "" + cls.s_short = "01234" + cls.s_medium = "abcdefghijklmnopqrstuvwxyz" + cls.s_long = "-=+" * 100 + cls.a = np.array( + [cls.s_empty, cls.s_short, cls.s_medium, cls.s_long], + cls.dtype) def get_view(self, a): # Cannot view a StringDType as anything else directly, since @@ -1553,12 +1755,12 @@ def test_zeros(self): assert_array_equal(z, "") def test_copy(self): - c = self.a.copy() - assert_array_equal(self.get_flags(c), self.get_flags(self.a)) - assert_array_equal(c, self.a) - offsets = self.get_view(c)['offset'] - assert offsets[2] == 1 - assert offsets[3] == 1 + len(self.s_medium) + self.sizeofstr // 2 + for c in [self.a.copy(), copy.copy(self.a), copy.deepcopy(self.a)]: + assert_array_equal(self.get_flags(c), self.get_flags(self.a)) + assert_array_equal(c, self.a) + offsets = self.get_view(c)['offset'] + assert offsets[2] == 1 + assert offsets[3] == 1 + len(self.s_medium) + self.sizeofstr // 2 def test_arena_use_with_setting(self): c = np.zeros_like(self.a) diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index 64cf42e05adb..5a4b0a6a7f32 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -1,11 +1,12 @@ +import operator import sys + import pytest -import operator import numpy as np - -from numpy.testing import assert_array_equal, assert_raises, IS_PYPY - +from numpy._core._exceptions import _UFuncNoLoopError +from numpy.testing import assert_array_equal, assert_raises +from numpy.testing._private.utils import requires_memory COMPARISONS = [ (operator.eq, np.equal, "=="), @@ -18,8 +19,6 @@ MAX = np.iinfo(np.int64).max -IS_PYPY_LT_7_3_16 = IS_PYPY and sys.implementation.version < (7, 3, 16) - @pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS) def test_mixed_string_comparison_ufuncs_fail(op, ufunc, sym): arr_string = np.array(["a", "b"], dtype="S") @@ -109,6 +108,98 @@ def test_float_to_string_cast(str_dt, float_dt): assert_array_equal(res, np.array(expected, dtype=str_dt)) +@pytest.mark.parametrize("str_dt", "US") +@pytest.mark.parametrize("size", [-1, np.iinfo(np.intc).max]) +def test_string_size_dtype_errors(str_dt, size): + if size > 0: + size = size // np.dtype(f"{str_dt}1").itemsize + 1 + + with pytest.raises(ValueError): + np.dtype((str_dt, size)) + with pytest.raises(TypeError): + np.dtype(f"{str_dt}{size}") + + +@pytest.mark.parametrize("str_dt", "US") +def test_string_size_dtype_large_repr(str_dt): + size = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + size_str = str(size) + + dtype = np.dtype((str_dt, size)) + assert size_str in dtype.str + assert size_str in str(dtype) + assert size_str in repr(dtype) + + +@pytest.mark.slow +@requires_memory(2 * np.iinfo(np.intc).max) +@pytest.mark.parametrize("str_dt", "US") +@pytest.mark.thread_unsafe(reason="crashes with low memory") +def test_large_string_coercion_error(str_dt): + very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + try: + large_string = "A" * (very_large + 1) + except Exception: + # We may not be able to create this Python string on 32bit. + pytest.skip("python failed to create huge string") + + class MyStr: + def __str__(self): + return large_string + + try: + # TypeError from NumPy, or OverflowError from 32bit Python. + with pytest.raises((TypeError, OverflowError)): + np.array([large_string], dtype=str_dt) + + # Same as above, but input has to be converted to a string. + with pytest.raises((TypeError, OverflowError)): + np.array([MyStr()], dtype=str_dt) + except MemoryError: + # Catch memory errors, because `requires_memory` would do so. + raise AssertionError("Ops should raise before any large allocation.") + +@pytest.mark.slow +@requires_memory(2 * np.iinfo(np.intc).max) +@pytest.mark.parametrize("str_dt", "US") +@pytest.mark.thread_unsafe(reason="crashes with low memory") +def test_large_string_addition_error(str_dt): + very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + + a = np.array(["A" * very_large], dtype=str_dt) + b = np.array("B", dtype=str_dt) + try: + with pytest.raises(TypeError): + np.add(a, b) + with pytest.raises(TypeError): + np.add(a, a) + except MemoryError: + # Catch memory errors, because `requires_memory` would do so. + raise AssertionError("Ops should raise before any large allocation.") + + +def test_large_string_cast(): + very_large = np.iinfo(np.intc).max // 4 + # Could be nice to test very large path, but it makes too many huge + # allocations right now (need non-legacy cast loops for this). + # a = np.array([], dtype=np.dtype(("S", very_large))) + # assert a.astype("U").dtype.itemsize == very_large * 4 + + a = np.array([], dtype=np.dtype(("S", very_large + 1))) + # It is not perfect but OK if this raises a MemoryError during setup + # (this happens due clunky code and/or buffer setup.) + with pytest.raises((TypeError, MemoryError)): + a.astype("U") + + +@pytest.mark.parametrize("dt", ["S1", "U1"]) +def test_in_place_mutiply_no_overflow(dt): + # see gh-30495 + a = np.array("a", dtype=dt) + a *= 20 + assert_array_equal(a, np.array("a", dtype=dt)) + + @pytest.mark.parametrize("dt", ["S", "U", "T"]) class TestMethods: @@ -142,9 +233,20 @@ def test_multiply_raises(self, dt): with pytest.raises(TypeError, match="unsupported type"): np.strings.multiply(np.array("abc", dtype=dt), 3.14) - with pytest.raises(MemoryError): + with pytest.raises(OverflowError): np.strings.multiply(np.array("abc", dtype=dt), sys.maxsize) + def test_inplace_multiply(self, dt): + arr = np.array(['foo ', 'bar'], dtype=dt) + arr *= 2 + if dt != "T": + assert_array_equal(arr, np.array(['foo ', 'barb'], dtype=dt)) + else: + assert_array_equal(arr, ['foo foo ', 'barbar']) + + with pytest.raises(OverflowError): + arr *= sys.maxsize + @pytest.mark.parametrize("i_dt", [np.int8, np.int16, np.int32, np.int64, np.int_]) def test_multiply_integer_dtypes(self, i_dt, dt): @@ -281,24 +383,26 @@ def test_str_len(self, in_, out, dt): ("", "xx", 0, None, -1), ("", "xx", 1, 1, -1), ("", "xx", MAX, 0, -1), - pytest.param(99*"a" + "b", "b", 0, None, 99, + pytest.param(99 * "a" + "b", "b", 0, None, 99, id="99*a+b-b-0-None-99"), - pytest.param(98*"a" + "ba", "ba", 0, None, 98, + pytest.param(98 * "a" + "ba", "ba", 0, None, 98, id="98*a+ba-ba-0-None-98"), - pytest.param(100*"a", "b", 0, None, -1, + pytest.param(100 * "a", "b", 0, None, -1, id="100*a-b-0-None--1"), - pytest.param(30000*"a" + 100*"b", 100*"b", 0, None, 30000, + pytest.param(30000 * "a" + 100 * "b", 100 * "b", 0, None, 30000, id="30000*a+100*b-100*b-0-None-30000"), - pytest.param(30000*"a", 100*"b", 0, None, -1, + pytest.param(30000 * "a", 100 * "b", 0, None, -1, id="30000*a-100*b-0-None--1"), - pytest.param(15000*"a" + 15000*"b", 15000*"b", 0, None, 15000, + pytest.param(15000 * "a" + 15000 * "b", 15000 * "b", 0, None, 15000, id="15000*a+15000*b-15000*b-0-None-15000"), - pytest.param(15000*"a" + 15000*"b", 15000*"c", 0, None, -1, + pytest.param(15000 * "a" + 15000 * "b", 15000 * "c", 0, None, -1, id="15000*a+15000*b-15000*c-0-None--1"), (["abcdefghiabc", "rrarrrrrrrrra"], ["def", "arr"], [0, 3], None, [3, -1]), ("AeÂĸ☃â‚Ŧ 😊" * 2, "😊", 0, None, 6), ("AeÂĸ☃â‚Ŧ 😊" * 2, "😊", 7, None, 13), + pytest.param("A" * (2 ** 17), r"[\w]+\Z", 0, None, -1, + id=r"A*2**17-[\w]+\Z-0-None--1"), ]) def test_find(self, a, sub, start, end, out, dt): if "😊" in a and dt == "S": @@ -347,17 +451,17 @@ def test_rfind(self, a, sub, start, end, out, dt): ("aaa", "", -1, None, 2), ("aaa", "", -10, None, 4), ("aaa", "aaaa", 0, None, 0), - pytest.param(98*"a" + "ba", "ba", 0, None, 1, + pytest.param(98 * "a" + "ba", "ba", 0, None, 1, id="98*a+ba-ba-0-None-1"), - pytest.param(30000*"a" + 100*"b", 100*"b", 0, None, 1, + pytest.param(30000 * "a" + 100 * "b", 100 * "b", 0, None, 1, id="30000*a+100*b-100*b-0-None-1"), - pytest.param(30000*"a", 100*"b", 0, None, 0, + pytest.param(30000 * "a", 100 * "b", 0, None, 0, id="30000*a-100*b-0-None-0"), - pytest.param(30000*"a" + 100*"ab", "ab", 0, None, 100, + pytest.param(30000 * "a" + 100 * "ab", "ab", 0, None, 100, id="30000*a+100*ab-ab-0-None-100"), - pytest.param(15000*"a" + 15000*"b", 15000*"b", 0, None, 1, + pytest.param(15000 * "a" + 15000 * "b", 15000 * "b", 0, None, 1, id="15000*a+15000*b-15000*b-0-None-1"), - pytest.param(15000*"a" + 15000*"b", 15000*"c", 0, None, 0, + pytest.param(15000 * "a" + 15000 * "b", 15000 * "c", 0, None, 0, id="15000*a+15000*b-15000*c-0-None-0"), ("", "", 0, None, 1), ("", "", 1, 1, 0), @@ -463,13 +567,16 @@ def test_endswith(self, a, suffix, start, end, out, dt): ("xyxzx", "x", "yxzx"), (["xyzzyhelloxyzzy", "hello"], ["xyz", "xyz"], ["helloxyzzy", "hello"]), + (["ba", "ac", "baa", "bba"], "b", ["a", "ac", "aa", "a"]), ]) def test_lstrip(self, a, chars, out, dt): a = np.array(a, dtype=dt) + out = np.array(out, dtype=dt) if chars is not None: chars = np.array(chars, dtype=dt) - out = np.array(out, dtype=dt) - assert_array_equal(np.strings.lstrip(a, chars), out) + assert_array_equal(np.strings.lstrip(a, chars), out) + else: + assert_array_equal(np.strings.lstrip(a), out) @pytest.mark.parametrize("a,chars,out", [ ("", None, ""), @@ -485,16 +592,20 @@ def test_lstrip(self, a, chars, out, dt): ("xyzzyhelloxyzzy", "xyz", "xyzzyhello"), ("hello", "xyz", "hello"), ("xyxz", "xyxz", ""), + (" ", None, ""), ("xyxzx", "x", "xyxz"), (["xyzzyhelloxyzzy", "hello"], ["xyz", "xyz"], ["xyzzyhello", "hello"]), + (["ab", "ac", "aab", "abb"], "b", ["a", "ac", "aa", "a"]), ]) def test_rstrip(self, a, chars, out, dt): a = np.array(a, dtype=dt) + out = np.array(out, dtype=dt) if chars is not None: chars = np.array(chars, dtype=dt) - out = np.array(out, dtype=dt) - assert_array_equal(np.strings.rstrip(a, chars), out) + assert_array_equal(np.strings.rstrip(a, chars), out) + else: + assert_array_equal(np.strings.rstrip(a), out) @pytest.mark.parametrize("a,chars,out", [ ("", None, ""), @@ -511,6 +622,7 @@ def test_rstrip(self, a, chars, out, dt): ("xyxzx", "x", "yxz"), (["xyzzyhelloxyzzy", "hello"], ["xyz", "xyz"], ["hello", "hello"]), + (["bab", "ac", "baab", "bbabb"], "b", ["a", "ac", "aa", "a"]), ]) def test_strip(self, a, chars, out, dt): a = np.array(a, dtype=dt) @@ -558,7 +670,7 @@ def test_strip(self, a, chars, out, dt): ("ABCADAA", "A", "", -1, "BCD"), ("BCD", "A", "", -1, "BCD"), ("*************", "A", "", -1, "*************"), - ("^"+"A"*1000+"^", "A", "", 999, "^A^"), + ("^" + "A" * 1000 + "^", "A", "", 999, "^A^"), ("the", "the", "", -1, ""), ("theater", "the", "", -1, "ater"), ("thethe", "the", "", -1, ""), @@ -716,6 +828,21 @@ def test_expandtabs(self, buf, tabsize, res, dt): def test_expandtabs_raises_overflow(self, dt): with pytest.raises(OverflowError, match="new string is too long"): np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), sys.maxsize) + np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), 2**61) + + def test_expandtabs_length_not_cause_segfault(self, dt): + # see gh-28829 + with pytest.raises( + _UFuncNoLoopError, + match="did not contain a loop with signature matching types", + ): + np._core.strings._expandtabs_length.reduce(np.zeros(200)) + + with pytest.raises( + _UFuncNoLoopError, + match="did not contain a loop with signature matching types", + ): + np.strings.expandtabs(np.zeros(200)) FILL_ERROR = "The fill character must be exactly one character long" @@ -742,6 +869,7 @@ def test_rjust_raises_multiple_character_fill(self, dt): ('abc', 6, ' ', ' abc '), ('abc', 3, ' ', 'abc'), ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), ('abc', 10, '*', '***abc****'), ]) def test_center(self, buf, width, fillchar, res, dt): @@ -755,6 +883,7 @@ def test_center(self, buf, width, fillchar, res, dt): ('abc', 6, ' ', 'abc '), ('abc', 3, ' ', 'abc'), ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), ('abc', 10, '*', 'abc*******'), ]) def test_ljust(self, buf, width, fillchar, res, dt): @@ -768,6 +897,7 @@ def test_ljust(self, buf, width, fillchar, res, dt): ('abc', 6, ' ', ' abc'), ('abc', 3, ' ', 'abc'), ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), ('abc', 10, '*', '*******abc'), ]) def test_rjust(self, buf, width, fillchar, res, dt): @@ -789,6 +919,7 @@ def test_rjust(self, buf, width, fillchar, res, dt): ('-0123', 5, '-0123'), ('000', 3, '000'), ('34', 1, '34'), + ('34', -1, '34'), ('0034', 4, '0034'), ]) def test_zfill(self, buf, width, res, dt): @@ -850,6 +981,101 @@ def test_rpartition(self, buf, sep, res1, res2, res3, dt): assert_array_equal(act3, res3) assert_array_equal(act1 + act2 + act3, buf) + @pytest.mark.parametrize("args", [ + (None,), + (None, None), + (None, None, -1), + (0,), + (0, None), + (0, None, -1), + (1,), + (1, None), + (1, None, -1), + (3,), + (3, None), + (5,), + (5, None), + (5, 5), + (5, 5, -1), + (6,), # test index past the end + (6, None), + (6, None, -1), + (6, 7), # test start and stop index past the end + (4, 3), # test start > stop index + (-1,), + (-1, None), + (-1, None, -1), + (-3,), + (-3, None), + ([3, 4],), + ([3, 4], None), + ([2, 4],), + ([-3, 5],), + ([-3, 5], None), + ([-3, 5], None, -1), + ([0, -5],), + ([0, -5], None), + ([0, -5], None, -1), + (1, 4), + (-3, 5), + (None, -1), + (0, [4, 2]), + ([1, 2], [-1, -2]), + (1, 5, 2), + (None, None, -1), + ([0, 6], [-1, 0], [2, -1]), + ]) + @pytest.mark.parametrize("buf", [ + ["hello", "world"], + ['hello world', 'ÎŗÎĩΚι ΃ÎŋĪ… ÎēΌ΃ÎŧÎĩ', 'äŊ åĨŊä¸–į•Œ', '👋 🌍'], + ]) + def test_slice(self, args, buf, dt): + if dt == "S" and "äŊ åĨŊä¸–į•Œ" in buf: + pytest.skip("Bytes dtype does not support non-ascii input") + if len(buf) == 4: + args = tuple(s * 2 if isinstance(s, list) else s for s in args) + buf = np.array(buf, dtype=dt) + act = np.strings.slice(buf, *args) + bcast_args = tuple(np.broadcast_to(arg, buf.shape) for arg in args) + res = np.array([s[slice(*arg)] + for s, arg in zip(buf, zip(*bcast_args))], + dtype=dt) + assert_array_equal(act, res) + + def test_slice_unsupported(self, dt): + with pytest.raises(TypeError, match="did not contain a loop"): + np.strings.slice(np.array([1, 2, 3]), 4) + + regexp = (r"Cannot cast ufunc '_slice' input .* " + r"from .* to dtype\('int(64|32)'\)") + with pytest.raises(TypeError, match=regexp): + np.strings.slice(np.array(['foo', 'bar'], dtype=dt), + np.array(['foo', 'bar'], dtype=dt)) + + @pytest.mark.parametrize("int_dt", [np.int8, np.int16, np.int32, + np.int64, np.uint8, np.uint16, + np.uint32, np.uint64]) + def test_slice_int_type_promotion(self, int_dt, dt): + buf = np.array(["hello", "world"], dtype=dt) + np_slice = np.strings.slice + assert_array_equal(np_slice(buf, int_dt(4)), + np.array(["hell", "worl"], dtype=dt)) + assert_array_equal(np_slice(buf, np.array([4, 4], dtype=int_dt)), + np.array(["hell", "worl"], dtype=dt)) + + assert_array_equal(np_slice(buf, int_dt(2), int_dt(4)), + np.array(["ll", "rl"], dtype=dt)) + assert_array_equal(np_slice(buf, np.array([2, 2], dtype=int_dt), + np.array([4, 4], dtype=int_dt)), + np.array(["ll", "rl"], dtype=dt)) + + assert_array_equal(np_slice(buf, int_dt(0), int_dt(4), int_dt(2)), + np.array(["hl", "wr"], dtype=dt)) + assert_array_equal(np_slice(buf, + np.array([0, 0], dtype=int_dt), + np.array([4, 4], dtype=int_dt), + np.array([2, 2], dtype=int_dt)), + np.array(["hl", "wr"], dtype=dt)) @pytest.mark.parametrize("dt", ["U", "T"]) class TestMethodsWithUnicode: @@ -903,10 +1129,7 @@ def test_replace_unicode(self, buf, old, new, count, res, dt): '\U0001D7F6', '\U00011066', '\U000104A0', - pytest.param('\U0001F107', marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY_LT_7_3_16, - reason="PYPY bug in Py_UNICODE_ISALNUM", - strict=True)), + '\U0001F107', ]) def test_isalnum_unicode(self, in_, dt): in_ = np.array(in_, dtype=dt) @@ -920,10 +1143,7 @@ def test_isalnum_unicode(self, in_, dt): ('\U0001F40D', False), ('\U0001F46F', False), ('\u2177', True), - pytest.param('\U00010429', True, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY_LT_7_3_16, - reason="PYPY bug in Py_UNICODE_ISLOWER", - strict=True)), + ('\U00010429', True), ('\U0001044E', True), ]) def test_islower_unicode(self, in_, out, dt): @@ -938,10 +1158,7 @@ def test_islower_unicode(self, in_, out, dt): ('\U0001F40D', False), ('\U0001F46F', False), ('\u2177', False), - pytest.param('\U00010429', False, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY_LT_7_3_16, - reason="PYPY bug in Py_UNICODE_ISUPPER", - strict=True)), + ('\U00010429', False), ('\U0001044E', False), ]) def test_isupper_unicode(self, in_, out, dt): @@ -951,15 +1168,9 @@ def test_isupper_unicode(self, in_, out, dt): @pytest.mark.parametrize("in_,out", [ ('\u1FFc', True), ('Greek \u1FFcitlecases ...', True), - pytest.param('\U00010401\U00010429', True, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY_LT_7_3_16, - reason="PYPY bug in Py_UNICODE_ISISTITLE", - strict=True)), + ('\U00010401\U00010429', True), ('\U00010427\U0001044E', True), - pytest.param('\U00010429', False, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY_LT_7_3_16, - reason="PYPY bug in Py_UNICODE_ISISTITLE", - strict=True)), + ('\U00010429', False), ('\U0001044E', False), ('\U0001F40D', False), ('\U0001F46F', False), @@ -1064,6 +1275,60 @@ def test_rpartition(self, buf, sep, res1, res2, res3, dt): assert_array_equal(act3, res3) assert_array_equal(act1 + act2 + act3, buf) + @pytest.mark.parametrize("method", ["strip", "lstrip", "rstrip"]) + @pytest.mark.parametrize( + "source,strip", + [ + ("ÎģÎŧ", "Îŧ"), + ("ÎģÎŧ", "Îģ"), + ("Îģ" * 5 + "Îŧ" * 2, "Îŧ"), + ("Îģ" * 5 + "Îŧ" * 2, "Îģ"), + ("Îģ" * 5 + "A" + "Îŧ" * 2, "ÎŧÎģ"), + ("ÎģÎŧ" * 5, "Îŧ"), + ("ÎģÎŧ" * 5, "Îģ"), + ]) + def test_strip_functions_unicode(self, source, strip, method, dt): + src_array = np.array([source], dtype=dt) + + npy_func = getattr(np.strings, method) + py_func = getattr(str, method) + + expected = np.array([py_func(source, strip)], dtype=dt) + actual = npy_func(src_array, strip) + + assert_array_equal(actual, expected) + + @pytest.mark.parametrize("args", [ + (None,), + (0,), + (1,), + (5,), + (15,), + (22,), + (-1,), + (-3,), + ([3, 4],), + ([-5, 5],), + ([0, -8],), + (1, 12), + (-12, 15), + (None, -1), + (0, [17, 6]), + ([1, 2], [-1, -2]), + (1, 11, 2), + (None, None, -1), + ([0, 10], [-1, 0], [2, -1]), + ]) + def test_slice(self, args, dt): + buf = np.array(["ĐŸŅ€Đ¸Đ˛Đĩˁ҂ ā¤¨ā¤Žā¤¸āĨā¤¤āĨ‡ שָׁלוֹם", "đŸ˜€đŸ˜ƒđŸ˜„đŸ˜đŸ˜†đŸ˜…đŸ¤ŖđŸ˜‚đŸ™‚đŸ™ƒ"], + dtype=dt) + act = np.strings.slice(buf, *args) + bcast_args = tuple(np.broadcast_to(arg, buf.shape) for arg in args) + res = np.array([s[slice(*arg)] + for s, arg in zip(buf, zip(*bcast_args))], + dtype=dt) + assert_array_equal(act, res) + class TestMixedTypeMethods: def test_center(self): @@ -1141,21 +1406,21 @@ class TestReplaceOnArrays: def test_replace_count_and_size(self, dt): a = np.array(["0123456789" * i for i in range(4)], dtype=dt) r1 = np.strings.replace(a, "5", "ABCDE") - assert r1.dtype.itemsize == check_itemsize(3*10 + 3*4, dt) + assert r1.dtype.itemsize == check_itemsize(3 * 10 + 3 * 4, dt) r1_res = np.array(["01234ABCDE6789" * i for i in range(4)], dtype=dt) assert_array_equal(r1, r1_res) r2 = np.strings.replace(a, "5", "ABCDE", 1) - assert r2.dtype.itemsize == check_itemsize(3*10 + 4, dt) + assert r2.dtype.itemsize == check_itemsize(3 * 10 + 4, dt) r3 = np.strings.replace(a, "5", "ABCDE", 0) assert r3.dtype.itemsize == a.dtype.itemsize assert_array_equal(r3, a) # Negative values mean to replace all. r4 = np.strings.replace(a, "5", "ABCDE", -1) - assert r4.dtype.itemsize == check_itemsize(3*10 + 3*4, dt) + assert r4.dtype.itemsize == check_itemsize(3 * 10 + 3 * 4, dt) assert_array_equal(r4, r1) # We can do count on an element-by-element basis. r5 = np.strings.replace(a, "5", "ABCDE", [-1, -1, -1, 1]) - assert r5.dtype.itemsize == check_itemsize(3*10 + 4, dt) + assert r5.dtype.itemsize == check_itemsize(3 * 10 + 4, dt) assert_array_equal(r5, np.array( ["01234ABCDE6789" * i for i in range(3)] + ["01234ABCDE6789" + "0123456789" * 2], dtype=dt)) @@ -1171,3 +1436,71 @@ def test_replace_broadcasting(self, dt): dtype=dt)) r3 = np.strings.replace(a, ["0", "0,0", "0,0,0"], "X") assert_array_equal(r3, np.array(["X,X,X", "X,0", "X"], dtype=dt)) + + +class TestOverride: + @classmethod + def setup_class(cls): + class Override: + + def __array_function__(self, *args, **kwargs): + return "function" + + def __array_ufunc__(self, *args, **kwargs): + return "ufunc" + + cls.override = Override() + + @pytest.mark.parametrize("func, kwargs", [ + (np.strings.center, dict(width=10)), + (np.strings.capitalize, {}), + (np.strings.decode, {}), + (np.strings.encode, {}), + (np.strings.expandtabs, {}), + (np.strings.ljust, dict(width=10)), + (np.strings.lower, {}), + (np.strings.mod, dict(values=2)), + (np.strings.multiply, dict(i=2)), + (np.strings.partition, dict(sep="foo")), + (np.strings.rjust, dict(width=10)), + (np.strings.rpartition, dict(sep="foo")), + (np.strings.swapcase, {}), + (np.strings.title, {}), + (np.strings.translate, dict(table=None)), + (np.strings.upper, {}), + (np.strings.zfill, dict(width=10)), + ]) + def test_override_function(self, func, kwargs): + assert func(self.override, **kwargs) == "function" + + @pytest.mark.parametrize("func, args, kwargs", [ + (np.strings.add, (None, ), {}), + (np.strings.lstrip, (), {}), + (np.strings.rstrip, (), {}), + (np.strings.strip, (), {}), + (np.strings.equal, (None, ), {}), + (np.strings.not_equal, (None, ), {}), + (np.strings.greater_equal, (None, ), {}), + (np.strings.less_equal, (None, ), {}), + (np.strings.greater, (None, ), {}), + (np.strings.less, (None, ), {}), + (np.strings.count, ("foo", ), {}), + (np.strings.endswith, ("foo", ), {}), + (np.strings.find, ("foo", ), {}), + (np.strings.index, ("foo", ), {}), + (np.strings.isalnum, (), {}), + (np.strings.isalpha, (), {}), + (np.strings.isdecimal, (), {}), + (np.strings.isdigit, (), {}), + (np.strings.islower, (), {}), + (np.strings.isnumeric, (), {}), + (np.strings.isspace, (), {}), + (np.strings.istitle, (), {}), + (np.strings.isupper, (), {}), + (np.strings.rfind, ("foo", ), {}), + (np.strings.rindex, ("foo", ), {}), + (np.strings.startswith, ("foo", ), {}), + (np.strings.str_len, (), {}), + ]) + def test_override_ufunc(self, func, args, kwargs): + assert func(self.override, *args, **kwargs) == "ufunc" diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index dfe20bc577a9..d8ca3f2364f4 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -1,33 +1,40 @@ -import warnings -import itertools -import sys import ctypes as ct +import inspect +import itertools import pickle +import sys +import warnings import pytest from pytest import param import numpy as np -import numpy._core.umath as ncu -import numpy._core._umath_tests as umt -import numpy.linalg._umath_linalg as uml import numpy._core._operand_flag_tests as opflag_tests import numpy._core._rational_tests as _rational_tests +import numpy._core._umath_tests as umt +import numpy._core.umath as ncu +import numpy.linalg._umath_linalg as uml from numpy.exceptions import AxisError from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_array_equal, - assert_almost_equal, assert_array_almost_equal, assert_no_warnings, - assert_allclose, HAS_REFCOUNT, suppress_warnings, IS_WASM, IS_PYPY, - ) + HAS_REFCOUNT, + IS_WASM, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_no_warnings, + assert_raises, +) from numpy.testing._private.utils import requires_memory - UNARY_UFUNCS = [obj for obj in np._core.umath.__dict__.values() if isinstance(obj, np.ufunc)] UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types] # Remove functions that do not support `floats` -UNARY_OBJECT_UFUNCS.remove(getattr(np, 'bitwise_count')) +UNARY_OBJECT_UFUNCS.remove(np.bitwise_count) class TestUfuncKwargs: @@ -153,16 +160,16 @@ def test_binary_PyUFunc_On_Om_method(self, foo=foo): def test_python_complex_conjugate(self): # The conjugate ufunc should fall back to calling the method: - arr = np.array([1+2j, 3-4j], dtype="O") + arr = np.array([1 + 2j, 3 - 4j], dtype="O") assert isinstance(arr[0], complex) res = np.conjugate(arr) assert res.dtype == np.dtype("O") - assert_array_equal(res, np.array([1-2j, 3+4j], dtype="O")) + assert_array_equal(res, np.array([1 - 2j, 3 + 4j], dtype="O")) @pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS) def test_unary_PyUFunc_O_O_method_full(self, ufunc): """Compare the result of the object loop with non-object one""" - val = np.float64(np.pi/4) + val = np.float64(np.pi / 4) class MyFloat(np.float64): def __getattr__(self, attr): @@ -207,7 +214,6 @@ def test_pickle_withstring(self): b"(S'numpy._core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.") assert_(pickle.loads(astring) is np.cos) - @pytest.mark.skipif(IS_PYPY, reason="'is' check does not work on PyPy") def test_pickle_name_is_qualname(self): # This tests that a simplification of our ufunc pickle code will # lead to allowing qualnames as names. Future ufuncs should @@ -310,6 +316,7 @@ def test_all_ufunc(self): # from include/numpy/ufuncobject.h size_inferred = 2 can_ignore = 4 + def test_signature0(self): # the arguments to test_signature are: nin, nout, core_signature enabled, num_dims, ixs, flags, sizes = umt.test_signature( @@ -337,7 +344,7 @@ def test_signature2(self): assert_equal(enabled, 1) assert_equal(num_dims, (2, 1, 1)) assert_equal(ixs, (0, 1, 2, 3)) - assert_equal(flags, (self.size_inferred,)*4) + assert_equal(flags, (self.size_inferred,) * 4) assert_equal(sizes, (-1, -1, -1, -1)) def test_signature3(self): @@ -346,7 +353,7 @@ def test_signature3(self): assert_equal(enabled, 1) assert_equal(num_dims, (2, 1, 2)) assert_equal(ixs, (0, 1, 2, 1, 3)) - assert_equal(flags, (self.size_inferred,)*4) + assert_equal(flags, (self.size_inferred,) * 4) assert_equal(sizes, (-1, -1, -1, -1)) def test_signature4(self): @@ -356,7 +363,7 @@ def test_signature4(self): assert_equal(enabled, 1) assert_equal(num_dims, (2, 2, 2)) assert_equal(ixs, (0, 1, 1, 2, 0, 2)) - assert_equal(flags, (self.size_inferred,)*3) + assert_equal(flags, (self.size_inferred,) * 3) assert_equal(sizes, (-1, -1, -1)) def test_signature5(self): @@ -436,14 +443,13 @@ def test_get_signature(self): assert_equal(np.vecdot.signature, "(n),(n)->()") def test_forced_sig(self): - a = 0.5*np.arange(3, dtype='f8') + a = 0.5 * np.arange(3, dtype='f8') assert_equal(np.add(a, 0.5), [0.5, 1, 1.5]) - with pytest.warns(DeprecationWarning): - assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1]) + with assert_raises(TypeError): + np.add(a, 0.5, sig='i', casting='unsafe') assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1]) - with pytest.warns(DeprecationWarning): - assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'), - [0, 0, 1]) + with assert_raises(TypeError): + np.add(a, 0.5, sig=('i4',), casting='unsafe') assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'), casting='unsafe'), [0, 0, 1]) @@ -451,17 +457,15 @@ def test_forced_sig(self): np.add(a, 0.5, out=b) assert_equal(b, [0.5, 1, 1.5]) b[:] = 0 - with pytest.warns(DeprecationWarning): + with assert_raises(TypeError): np.add(a, 0.5, sig='i', out=b, casting='unsafe') - assert_equal(b, [0, 0, 1]) - b[:] = 0 + assert_equal(b, [0, 0, 0]) np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe') assert_equal(b, [0, 0, 1]) b[:] = 0 - with pytest.warns(DeprecationWarning): + with assert_raises(TypeError): np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe') - assert_equal(b, [0, 0, 1]) - b[:] = 0 + assert_equal(b, [0, 0, 0]) np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe') assert_equal(b, [0, 0, 1]) @@ -486,8 +490,8 @@ def test_signature_dtype_type(self): np.add(3, 4, signature=(float_dtype, float_dtype, None)) @pytest.mark.parametrize("get_kwarg", [ - lambda dt: dict(dtype=x), - lambda dt: dict(signature=(x, None, None))]) + param(lambda dt: {"dtype": dt}, id="dtype"), + param(lambda dt: {"signature": (dt, None, None)}, id="signature")]) def test_signature_dtype_instances_allowed(self, get_kwarg): # We allow certain dtype instances when there is a clear singleton # and the given one is equivalent; mainly for backcompat. @@ -497,13 +501,9 @@ def test_signature_dtype_instances_allowed(self, get_kwarg): assert int64 is not int64_2 assert np.add(1, 2, **get_kwarg(int64_2)).dtype == int64 - td = np.timedelta(2, "s") + td = np.timedelta64(2, "s") assert np.add(td, td, **get_kwarg("m8")).dtype == "m8[s]" - @pytest.mark.parametrize("get_kwarg", [ - param(lambda x: dict(dtype=x), id="dtype"), - param(lambda x: dict(signature=(x, None, None)), id="signature")]) - def test_signature_dtype_instances_allowed(self, get_kwarg): msg = "The `dtype` and `signature` arguments to ufuncs" with pytest.raises(TypeError, match=msg): @@ -537,9 +537,6 @@ def test_partial_signature_mismatch_with_cache(self): with pytest.raises(TypeError): np.add(np.float16(1), np.uint64(2), sig=("e", "d", None)) - @pytest.mark.xfail(np._get_promotion_state() != "legacy", - reason="NEP 50 impl breaks casting checks when `dtype=` is used " - "together with python scalars.") def test_use_output_signature_for_all_arguments(self): # Test that providing only `dtype=` or `signature=(None, None, dtype)` # is sufficient if falling back to a homogeneous signature works. @@ -616,6 +613,31 @@ def call_ufunc(arr, **kwargs): expected = call_ufunc(arr.astype(np.float64)) # upcast assert_array_equal(expected, res) + @pytest.mark.parametrize("ufunc", [np.add, np.equal]) + def test_cast_safety_scalar(self, ufunc): + # We test add and equal, because equal has special scalar handling + # Note that the "equiv" casting behavior should maybe be considered + # a current implementation detail. + with pytest.raises(TypeError): + # this picks an integer loop, which is not safe + ufunc(3., 4., dtype=int, casting="safe") + + with pytest.raises(TypeError): + # We accept python float as float64 but not float32 for equiv. + ufunc(3., 4., dtype="float32", casting="equiv") + + # Special case for object and equal (note that equiv implies safe) + ufunc(3, 4, dtype=object, casting="equiv") + # Picks a double loop for both, first is equiv, second safe: + ufunc(np.array([3.]), 3., casting="equiv") + ufunc(np.array([3.]), 3, casting="safe") + ufunc(np.array([3]), 3, casting="equiv") + + def test_cast_safety_scalar_special(self): + # We allow this (and it succeeds) via object, although the equiv + # part may not be important. + np.equal(np.array([3]), 2**300, casting="equiv") + def test_true_divide(self): a = np.array(10) b = np.array(20) @@ -631,9 +653,9 @@ def test_true_divide(self): # Check with no output type specified if tc in 'FDG': - tgt = complex(x)/complex(y) + tgt = complex(x) / complex(y) else: - tgt = float(x)/float(y) + tgt = float(x) / float(y) res = np.true_divide(x, y) rtol = max(np.finfo(res).resolution, 1e-15) @@ -642,7 +664,7 @@ def test_true_divide(self): if tc in 'bhilqBHILQ': assert_(res.dtype.name == 'float64') else: - assert_(res.dtype.name == dt.name ) + assert_(res.dtype.name == dt.name) # Check with output type specified. This also checks for the # incorrect casts in issue gh-3484 because the unary '-' does @@ -659,11 +681,11 @@ def test_true_divide(self): # Casting complex to float is not allowed assert_raises(TypeError, np.true_divide, x, y, dtype=dtout) else: - tgt = float(x)/float(y) + tgt = float(x) / float(y) rtol = max(np.finfo(dtout).resolution, 1e-15) # The value of tiny for double double is NaN - with suppress_warnings() as sup: - sup.filter(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) if not np.isnan(np.finfo(dtout).tiny): atol = max(np.finfo(dtout).tiny, 3e-308) else: @@ -679,11 +701,11 @@ def test_true_divide(self): for tcout in 'FDG': dtout = np.dtype(tcout) - tgt = complex(x)/complex(y) + tgt = complex(x) / complex(y) rtol = max(np.finfo(dtout).resolution, 1e-15) # The value of tiny for double double is NaN - with suppress_warnings() as sup: - sup.filter(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) if not np.isnan(np.finfo(dtout).tiny): atol = max(np.finfo(dtout).tiny, 3e-308) else: @@ -802,20 +824,77 @@ def test_vecdot(self): actual3 = np.vecdot(arr1.astype("object"), arr2) assert_array_equal(actual3, expected.astype("object")) - def test_vecdot_complex(self): - arr1 = np.array([1, 2j, 3]) - arr2 = np.array([1, 2, 3]) + def test_matvec(self): + arr1 = np.arange(6).reshape((2, 3)) + arr2 = np.arange(3).reshape((1, 3)) + + actual = np.matvec(arr1, arr2) + expected = np.array([[5, 14]]) - actual = np.vecdot(arr1, arr2) - expected = np.array([10-4j]) assert_array_equal(actual, expected) - actual2 = np.vecdot(arr2, arr1) - assert_array_equal(actual2, expected.conj()) + actual2 = np.matvec(arr1.T, arr2.T, axes=[(-1, -2), -2, -1]) + assert_array_equal(actual2, expected) - actual3 = np.vecdot(arr1.astype("object"), arr2.astype("object")) + actual3 = np.matvec(arr1.astype("object"), arr2) assert_array_equal(actual3, expected.astype("object")) + @pytest.mark.parametrize("vec", [ + np.array([[1., 2., 3.], [4., 5., 6.]]), + np.array([[1., 2j, 3.], [4., 5., 6j]]), + np.array([[1., 2., 3.], [4., 5., 6.]], dtype=object), + np.array([[1., 2j, 3.], [4., 5., 6j]], dtype=object)]) + @pytest.mark.parametrize("matrix", [ + None, + np.array([[1. + 1j, 0.5, -0.5j], + [0.25, 2j, 0.], + [4., 0., -1j]])]) + def test_vecmatvec_identity(self, matrix, vec): + """Check that (x†A)x equals x†(Ax).""" + mat = matrix if matrix is not None else np.eye(3) + matvec = np.matvec(mat, vec) # Ax + vecmat = np.vecmat(vec, mat) # x†A + if matrix is None: + assert_array_equal(matvec, vec) + assert_array_equal(vecmat.conj(), vec) + assert_array_equal(matvec, (mat @ vec[..., np.newaxis]).squeeze(-1)) + assert_array_equal(vecmat, (vec[..., np.newaxis].mT.conj() + @ mat).squeeze(-2)) + expected = np.einsum('...i,ij,...j', vec.conj(), mat, vec) + vec_matvec = (vec.conj() * matvec).sum(-1) + vecmat_vec = (vecmat * vec).sum(-1) + assert_array_equal(vec_matvec, expected) + assert_array_equal(vecmat_vec, expected) + + @pytest.mark.parametrize("ufunc, shape1, shape2, conj", [ + (np.vecdot, (3,), (3,), True), + (np.vecmat, (3,), (3, 1), True), + (np.matvec, (1, 3), (3,), False), + (np.matmul, (1, 3), (3, 1), False), + ]) + def test_vecdot_matvec_vecmat_complex(self, ufunc, shape1, shape2, conj): + arr1 = np.array([1, 2j, 3]) + arr2 = np.array([1, 2, 3]) + + actual1 = ufunc(arr1.reshape(shape1), arr2.reshape(shape2)) + expected1 = np.array(((arr1.conj() if conj else arr1) * arr2).sum(), + ndmin=min(len(shape1), len(shape2))) + assert_array_equal(actual1, expected1) + # This would fail for conj=True, since matmul omits the conjugate. + if not conj: + assert_array_equal(arr1.reshape(shape1) @ arr2.reshape(shape2), + expected1) + + actual2 = ufunc(arr2.reshape(shape1), arr1.reshape(shape2)) + expected2 = np.array(((arr2.conj() if conj else arr2) * arr1).sum(), + ndmin=min(len(shape1), len(shape2))) + assert_array_equal(actual2, expected2) + + actual3 = ufunc(arr1.reshape(shape1).astype("object"), + arr2.reshape(shape2).astype("object")) + expected3 = expected1.astype(object) + assert_array_equal(actual3, expected3) + def test_vecdot_subclass(self): class MySubclass(np.ndarray): pass @@ -844,10 +923,10 @@ def test_broadcast(self): msg = "broadcast" a = np.arange(4).reshape((2, 1, 2)) b = np.arange(4).reshape((1, 2, 2)) - assert_array_equal(np.vecdot(a, b), np.sum(a*b, axis=-1), err_msg=msg) + assert_array_equal(np.vecdot(a, b), np.sum(a * b, axis=-1), err_msg=msg) msg = "extend & broadcast loop dimensions" b = np.arange(4).reshape((2, 2)) - assert_array_equal(np.vecdot(a, b), np.sum(a*b, axis=-1), err_msg=msg) + assert_array_equal(np.vecdot(a, b), np.sum(a * b, axis=-1), err_msg=msg) # Broadcast in core dimensions should fail a = np.arange(8).reshape((4, 2)) b = np.arange(4).reshape((4, 1)) @@ -915,31 +994,31 @@ def test_out_broadcast_errors(self, arr, out): def test_type_cast(self): msg = "type cast" a = np.arange(6, dtype='short').reshape((2, 3)) - assert_array_equal(np.vecdot(a, a), np.sum(a*a, axis=-1), + assert_array_equal(np.vecdot(a, a), np.sum(a * a, axis=-1), err_msg=msg) msg = "type cast on one argument" a = np.arange(6).reshape((2, 3)) b = a + 0.1 - assert_array_almost_equal(np.vecdot(a, b), np.sum(a*b, axis=-1), + assert_array_almost_equal(np.vecdot(a, b), np.sum(a * b, axis=-1), err_msg=msg) def test_endian(self): msg = "big endian" a = np.arange(6, dtype='>i4').reshape((2, 3)) - assert_array_equal(np.vecdot(a, a), np.sum(a*a, axis=-1), + assert_array_equal(np.vecdot(a, a), np.sum(a * a, axis=-1), err_msg=msg) msg = "little endian" a = np.arange(6, dtype='()' @@ -1218,18 +1338,18 @@ def test_innerwt(self): a = np.arange(6).reshape((2, 3)) b = np.arange(10, 16).reshape((2, 3)) w = np.arange(20, 26).reshape((2, 3)) - assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) + assert_array_equal(umt.innerwt(a, b, w), np.sum(a * b * w, axis=-1)) a = np.arange(100, 124).reshape((2, 3, 4)) b = np.arange(200, 224).reshape((2, 3, 4)) w = np.arange(300, 324).reshape((2, 3, 4)) - assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) + assert_array_equal(umt.innerwt(a, b, w), np.sum(a * b * w, axis=-1)) def test_innerwt_empty(self): """Test generalized ufunc with zero-sized operands""" a = np.array([], dtype='f8') b = np.array([], dtype='f8') w = np.array([], dtype='f8') - assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) + assert_array_equal(umt.innerwt(a, b, w), np.sum(a * b * w, axis=-1)) def test_cross1d(self): """Test with fixed-sized signature.""" @@ -1330,18 +1450,18 @@ def test_matrix_multiply_umath_empty(self): def compare_matrix_multiply_results(self, tp): d1 = np.array(np.random.rand(2, 3, 4), dtype=tp) d2 = np.array(np.random.rand(2, 3, 4), dtype=tp) - msg = "matrix multiply on type %s" % d1.dtype.name + msg = f"matrix multiply on type {d1.dtype.name}" def permute_n(n): if n == 1: return ([0],) ret = () - base = permute_n(n-1) + base = permute_n(n - 1) for perm in base: for i in range(n): - new = perm + [n-1] - new[n-1] = new[i] - new[i] = n-1 + new = perm + [n - 1] + new[n - 1] = new[i] + new[i] = n - 1 ret += (new,) return ret @@ -1349,17 +1469,17 @@ def slice_n(n): if n == 0: return ((),) ret = () - base = slice_n(n-1) + base = slice_n(n - 1) for sl in base: - ret += (sl+(slice(None),),) - ret += (sl+(slice(0, 1),),) + ret += (sl + (slice(None),),) + ret += (sl + (slice(0, 1),),) return ret def broadcastable(s1, s2): - return s1 == s2 or s1 == 1 or s2 == 1 + return s1 == s2 or 1 in {s1, s2} permute_3 = permute_n(3) - slice_3 = slice_n(3) + ((slice(None, None, -1),)*3,) + slice_3 = slice_n(3) + ((slice(None, None, -1),) * 3,) ref = True for p1 in permute_3: @@ -1375,9 +1495,8 @@ def broadcastable(s1, s2): assert_array_almost_equal( umt.matrix_multiply(a1, a2), np.sum(a2[..., np.newaxis].swapaxes(-3, -1) * - a1[..., np.newaxis,:], axis=-1), - err_msg=msg + ' %s %s' % (str(a1.shape), - str(a2.shape))) + a1[..., np.newaxis, :], axis=-1), + err_msg=msg + f' {str(a1.shape)} {str(a2.shape)}') assert_equal(ref, True, err_msg="reference check") @@ -1433,7 +1552,8 @@ def __eq__(self, other): arr1d = np.array([HasComparisons()]) assert_equal(arr1d == arr1d, np.array([True])) - assert_equal(np.equal(arr1d, arr1d), np.array([True])) # normal behavior is a cast + # normal behavior is a cast + assert_equal(np.equal(arr1d, arr1d), np.array([True])) assert_equal(np.equal(arr1d, arr1d, dtype=object), np.array(['=='])) def test_object_array_reduction(self): @@ -1463,7 +1583,7 @@ def test_object_array_accumulate_inplace(self): np.add.accumulate(arr, out=arr) np.add.accumulate(arr, out=arr) assert_array_equal(arr, - np.array([[1]*i for i in [1, 3, 6, 10]], dtype=object), + np.array([[1] * i for i in [1, 3, 6, 10]], dtype=object), ) # And the same if the axis argument is used @@ -1472,7 +1592,7 @@ def test_object_array_accumulate_inplace(self): np.add.accumulate(arr, out=arr, axis=-1) np.add.accumulate(arr, out=arr, axis=-1) assert_array_equal(arr[0, :], - np.array([[2]*i for i in [1, 3, 6, 10]], dtype=object), + np.array([[2] * i for i in [1, 3, 6, 10]], dtype=object), ) def test_object_array_accumulate_failure(self): @@ -1589,9 +1709,6 @@ def test_where_param(self): assert_equal(a, [[0, 27], [14, 5]]) def test_where_param_buffer_output(self): - # This test is temporarily skipped because it requires - # adding masking features to the nditer to work properly - # With casting on output a = np.ones(10, np.int64) b = np.ones(10, np.int64) @@ -1603,12 +1720,12 @@ def test_where_param_alloc(self): # With casting and allocated output a = np.array([1], dtype=np.int64) m = np.array([True], dtype=bool) - assert_equal(np.sqrt(a, where=m), [1]) + assert_equal(np.sqrt(a, where=m, out=None), [1]) # No casting and allocated output a = np.array([1], dtype=np.float64) m = np.array([True], dtype=bool) - assert_equal(np.sqrt(a, where=m), [1]) + assert_equal(np.sqrt(a, where=m, out=None), [1]) def test_where_with_broadcasting(self): # See gh-17198 @@ -1622,55 +1739,63 @@ def test_where_with_broadcasting(self): assert_array_equal((a[where] < b_where), out[where].astype(bool)) assert not out[~where].any() # outside mask, out remains all 0 - def check_identityless_reduction(self, a): - # np.minimum.reduce is an identityless reduction + def test_where_warns(self): + a = np.arange(7) + mask = a % 2 == 0 + with pytest.warns(UserWarning, match="'where' used without 'out'"): + result1 = np.add(a, a, where=mask) + # Does not warn + result2 = np.add(a, a, where=mask, out=None) + # Sanity check + assert np.all(result1[::2] == [0, 4, 8, 12]) + assert np.all(result2[::2] == [0, 4, 8, 12]) + + @staticmethod + def identityless_reduce_arrs(): + yield np.empty((2, 3, 4), order='C') + yield np.empty((2, 3, 4), order='F') + # Mixed order (reduce order differs outer) + yield np.empty((2, 4, 3), order='C').swapaxes(1, 2) + # Reversed order + yield np.empty((2, 3, 4), order='C')[::-1, ::-1, ::-1] + # Not contiguous + yield np.empty((3, 5, 4), order='C').swapaxes(1, 2)[1:, 1:, 1:] + # Not contiguous and not aligned + a = np.empty((3 * 4 * 5 * 8 + 1,), dtype='i1') + a = a[1:].view(dtype='f8') + a = a.reshape((3, 4, 5)) + a = a[1:, 1:, 1:] + yield a - # Verify that it sees the zero at various positions + @pytest.mark.parametrize("arrs", identityless_reduce_arrs()) + @pytest.mark.parametrize("pos", [(1, 0, 0), (0, 1, 0), (0, 0, 1)]) + def test_identityless_reduction(self, arrs, pos): + # np.minimum.reduce is an identityless reduction + a = arrs.copy() a[...] = 1 - a[1, 0, 0] = 0 - assert_equal(np.minimum.reduce(a, axis=None), 0) - assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(1, 2)), [1, 0]) - assert_equal(np.minimum.reduce(a, axis=0), - [[0, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=1), - [[1, 1, 1, 1], [0, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=2), - [[1, 1, 1], [0, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=()), a) + a[pos] = 0 - a[...] = 1 - a[0, 1, 0] = 0 - assert_equal(np.minimum.reduce(a, axis=None), 0) - assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(0, 2)), [1, 0, 1]) - assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1]) - assert_equal(np.minimum.reduce(a, axis=0), - [[1, 1, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=1), - [[0, 1, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=2), - [[1, 0, 1], [1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=()), a) + for axis in [None, (0, 1), (0, 2), (1, 2), 0, 1, 2, ()]: + if axis is None: + axes = np.array([], dtype=np.intp) + else: + axes = np.delete(np.arange(a.ndim), axis) - a[...] = 1 - a[0, 0, 1] = 0 - assert_equal(np.minimum.reduce(a, axis=None), 0) - assert_equal(np.minimum.reduce(a, axis=(0, 1)), [1, 0, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1]) - assert_equal(np.minimum.reduce(a, axis=0), - [[1, 0, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=1), - [[1, 0, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=2), - [[0, 1, 1], [1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=()), a) + expected_pos = tuple(np.array(pos)[axes]) + expected = np.ones(np.array(a.shape)[axes]) + expected[expected_pos] = 0 + + res = np.minimum.reduce(a, axis=axis) + assert_equal(res, expected, strict=True) + + res = np.full_like(res, np.nan) + np.minimum.reduce(a, axis=axis, out=res) + assert_equal(res, expected, strict=True) @requires_memory(6 * 1024**3) @pytest.mark.skipif(sys.maxsize < 2**32, reason="test array too large for 32bit platform") + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_identityless_reduction_huge_array(self): # Regression test for gh-20921 (copying identity incorrectly failed) arr = np.zeros((2, 2**31), 'uint8') @@ -1681,30 +1806,6 @@ def test_identityless_reduction_huge_array(self): assert res[0] == 3 assert res[-1] == 4 - def test_identityless_reduction_corder(self): - a = np.empty((2, 3, 4), order='C') - self.check_identityless_reduction(a) - - def test_identityless_reduction_forder(self): - a = np.empty((2, 3, 4), order='F') - self.check_identityless_reduction(a) - - def test_identityless_reduction_otherorder(self): - a = np.empty((2, 4, 3), order='C').swapaxes(1, 2) - self.check_identityless_reduction(a) - - def test_identityless_reduction_noncontig(self): - a = np.empty((3, 5, 4), order='C').swapaxes(1, 2) - a = a[1:, 1:, 1:] - self.check_identityless_reduction(a) - - def test_identityless_reduction_noncontig_unaligned(self): - a = np.empty((3*4*5*8 + 1,), dtype='i1') - a = a[1:].view(dtype='f8') - a.shape = (3, 4, 5) - a = a[1:, 1:, 1:] - self.check_identityless_reduction(a) - def test_reduce_identity_depends_on_loop(self): """ The type of the result should always depend on the selected loop, not @@ -1820,7 +1921,7 @@ def test_identityless_reduction_nonreorderable(self): assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1)) def test_reduce_zero_axis(self): - # If we have a n x m array and do a reduction with axis=1, then we are + # If we have an n x m array and do a reduction with axis=1, then we are # doing n reductions, and each reduction takes an m-element array. For # a reduction operation without an identity, then: # n > 0, m > 0: fine @@ -2007,10 +2108,40 @@ def __rmul__(self, other): MyThing.rmul_count += 1 return self - np.float64(5)*MyThing((3, 3)) + np.float64(5) * MyThing((3, 3)) assert_(MyThing.rmul_count == 1, MyThing.rmul_count) assert_(MyThing.getitem_count <= 2, MyThing.getitem_count) + def test_array_wrap_array_priority(self): + class ArrayPriorityBase(np.ndarray): + @classmethod + def __array_wrap__(cls, array, context=None, return_scalar=False): + return cls + + class ArrayPriorityMinus0(ArrayPriorityBase): + __array_priority__ = 0 + + class ArrayPriorityMinus1000(ArrayPriorityBase): + __array_priority__ = -1000 + + class ArrayPriorityMinus1000b(ArrayPriorityBase): + __array_priority__ = -1000 + + class ArrayPriorityMinus2000(ArrayPriorityBase): + __array_priority__ = -2000 + + x = np.ones(2).view(ArrayPriorityMinus1000) + xb = np.ones(2).view(ArrayPriorityMinus1000b) + y = np.ones(2).view(ArrayPriorityMinus2000) + + assert np.add(x, y) is ArrayPriorityMinus1000 + assert np.add(y, x) is ArrayPriorityMinus1000 + assert np.add(x, xb) is ArrayPriorityMinus1000 + assert np.add(xb, x) is ArrayPriorityMinus1000b + y_minus0 = np.zeros(2).view(ArrayPriorityMinus0) + assert np.add(np.zeros(2), y_minus0) is ArrayPriorityMinus0 + assert type(np.add(xb, x, np.zeros(2))) is np.ndarray + @pytest.mark.parametrize("a", ( np.arange(10, dtype=int), np.arange(10, dtype=_rational_tests.rational), @@ -2069,7 +2200,7 @@ def test_ufunc_at_inner_loops(self, typecode, ufunc): for i, v in zip(indx, vals): # Make sure all the work happens inside the ufunc # in order to duplicate error/warning handling - ufunc(atag[i], v, out=atag[i:i+1], casting="unsafe") + ufunc(atag[i], v, out=atag[i:i + 1], casting="unsafe") assert_equal(atag, a) # If w_loop warned, make sure w_at warned as well if len(w_loop) > 0: @@ -2128,14 +2259,14 @@ def test_cast_index_fastpath(self): np.add.at(arr, index, values) assert arr[0] == len(values) - @pytest.mark.parametrize("value", [ - np.ones(1), np.ones(()), np.float64(1.), 1.]) - def test_ufunc_at_scalar_value_fastpath(self, value): - arr = np.zeros(1000) - # index must be cast, which may be buffered in chunks: - index = np.repeat(np.arange(1000), 2) - np.add.at(arr, index, value) - assert_array_equal(arr, np.full_like(arr, 2 * value)) + def test_ufunc_at_scalar_value_fastpath(self): + values = [np.ones(1), np.ones(()), np.float64(1.), 1.] + for value in values: + arr = np.zeros(1000) + # index must be cast, which may be buffered in chunks: + index = np.repeat(np.arange(1000), 2) + np.add.at(arr, index, value) + assert_array_equal(arr, np.full_like(arr, 2 * value)) def test_ufunc_at_multiD(self): a = np.arange(9).reshape(3, 3) @@ -2311,10 +2442,9 @@ def test_at_broadcast_failure(self): with pytest.raises(ValueError): np.add.at(arr, [0, 1], [1, 2, 3]) - def test_reduce_arguments(self): f = np.add.reduce - d = np.ones((5,2), dtype=int) + d = np.ones((5, 2), dtype=int) o = np.ones((2,), dtype=d.dtype) r = o * 5 assert_equal(f(d), r) @@ -2383,11 +2513,11 @@ class MyA(np.ndarray): def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): return getattr(ufunc, method)(*(input.view(np.ndarray) for input in inputs), **kwargs) - a = np.arange(12.).reshape(4,3) + a = np.arange(12.).reshape(4, 3) ra = a.view(dtype=('f8,f8,f8')).squeeze() mra = ra.view(MyA) - target = np.array([ True, False, False, False], dtype=bool) + target = np.array([True, False, False, False], dtype=bool) assert_equal(np.all(target == (mra == ra[0])), True) def test_scalar_equal(self): @@ -2512,7 +2642,7 @@ def test_reducelike_out_promotes(self): # For legacy dtypes, the signature currently has to be forced if `out=` # is passed. The two paths below should differ, without `dtype=` the # expected result should be: `np.prod(arr.astype("f8")).astype("f4")`! - arr = np.full(5, 2**25-1, dtype=np.int64) + arr = np.full(5, 2**25 - 1, dtype=np.int64) # float32 and int64 promote to float64: res = np.zeros((), dtype=np.float32) @@ -2522,8 +2652,8 @@ def test_reducelike_out_promotes(self): assert single_res != res def test_reducelike_output_needs_identical_cast(self): - # Checks the case where the we have a simple byte-swap works, maily - # tests that this is not rejected directly. + # Checks the case where a simple byte-swap works, mainly tests that + # this is not rejected directly. # (interesting because we require descriptor identity in reducelikes). arr = np.ones(20, dtype="f8") out = np.empty((), dtype=arr.dtype.newbyteorder()) @@ -2547,10 +2677,10 @@ def test_reduce_noncontig_output(self): # # gh-8036 - x = np.arange(7*13*8, dtype=np.int16).reshape(7, 13, 8) - x = x[4:6,1:11:6,1:5].transpose(1, 2, 0) - y_base = np.arange(4*4, dtype=np.int16).reshape(4, 4) - y = y_base[::2,:] + x = np.arange(7 * 13 * 8, dtype=np.int16).reshape(7, 13, 8) + x = x[4:6, 1:11:6, 1:5].transpose(1, 2, 0) + y_base = np.arange(4 * 4, dtype=np.int16).reshape(4, 4) + y = y_base[::2, :] y_base_copy = y_base.copy() @@ -2559,8 +2689,8 @@ def test_reduce_noncontig_output(self): # The results should match, and y_base shouldn't get clobbered assert_equal(r0, r1) - assert_equal(y_base[1,:], y_base_copy[1,:]) - assert_equal(y_base[3,:], y_base_copy[3,:]) + assert_equal(y_base[1, :], y_base_copy[1, :]) + assert_equal(y_base[3, :], y_base_copy[3, :]) @pytest.mark.parametrize("with_cast", [True, False]) def test_reduceat_and_accumulate_out_shape_mismatch(self, with_cast): @@ -2652,8 +2782,51 @@ def test_nat_is_not_inf(self, nat): pass # ok, just not implemented +class TestGUFuncProcessCoreDims: + + def test_conv1d_full_without_out(self): + x = np.arange(5.0) + y = np.arange(13.0) + w = umt.conv1d_full(x, y) + assert_equal(w, np.convolve(x, y, mode='full')) + + def test_conv1d_full_with_out(self): + x = np.arange(5.0) + y = np.arange(13.0) + out = np.zeros(len(x) + len(y) - 1) + umt.conv1d_full(x, y, out=out) + assert_equal(out, np.convolve(x, y, mode='full')) + + def test_conv1d_full_basic_broadcast(self): + # x.shape is (3, 6) + x = np.array([[1, 3, 0, -10, 2, 2], + [0, -1, 2, 2, 10, 4], + [8, 9, 10, 2, 23, 3]]) + # y.shape is (2, 1, 7) + y = np.array([[[3, 4, 5, 20, 30, 40, 29]], + [[5, 6, 7, 10, 11, 12, -5]]]) + # result should have shape (2, 3, 12) + result = umt.conv1d_full(x, y) + assert result.shape == (2, 3, 12) + for i in range(2): + for j in range(3): + assert_equal(result[i, j], np.convolve(x[j], y[i, 0])) + + def test_bad_out_shape(self): + x = np.ones((1, 2)) + y = np.ones((2, 3)) + out = np.zeros((2, 3)) # Not the correct shape. + with pytest.raises(ValueError, match=r'does not equal m \+ n - 1'): + umt.conv1d_full(x, y, out=out) + + def test_bad_input_both_inputs_length_zero(self): + with pytest.raises(ValueError, + match='both inputs have core dimension 0'): + umt.conv1d_full([], []) + + @pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np) - if isinstance(getattr(np, x), np.ufunc)]) + if isinstance(getattr(np, x), np.ufunc)]) def test_ufunc_types(ufunc): ''' Check all ufuncs that the correct type is returned. Avoid @@ -2681,7 +2854,6 @@ def test_ufunc_types(ufunc): @pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np) if isinstance(getattr(np, x), np.ufunc)]) -@np._no_nep50_warning() def test_ufunc_noncontiguous(ufunc): ''' Check that contiguous and non-contiguous calls to ufuncs @@ -2693,30 +2865,42 @@ def test_ufunc_noncontiguous(ufunc): # bool, object, datetime are too irregular for this simple test continue inp, out = typ.split('->') - args_c = [np.empty(6, t) for t in inp] - args_n = [np.empty(18, t)[::3] for t in inp] - for a in args_c: - a.flat = range(1,7) - for a in args_n: - a.flat = range(1,7) + args_c = [np.empty((6, 6), t) for t in inp] + # non contiguous (2, 3 step on the two dimensions) + args_n = [np.empty((12, 18), t)[::2, ::3] for t in inp] + # alignment != itemsize is possible. So create an array with such + # an odd step manually. + args_o = [] + for t in inp: + orig_dt = np.dtype(t) + off_dt = f"S{orig_dt.alignment}" # offset by alignment + dtype = np.dtype([("_", off_dt), ("t", orig_dt)], align=False) + args_o.append(np.empty((6, 6), dtype=dtype)["t"]) + for a in args_c + args_n + args_o: + a.flat = range(1, 37) + with warnings.catch_warnings(record=True): warnings.filterwarnings("always") res_c = ufunc(*args_c) res_n = ufunc(*args_n) + res_o = ufunc(*args_o) if len(out) == 1: res_c = (res_c,) res_n = (res_n,) - for c_ar, n_ar in zip(res_c, res_n): + res_o = (res_o,) + for c_ar, n_ar, o_ar in zip(res_c, res_n, res_o): dt = c_ar.dtype if np.issubdtype(dt, np.floating): # for floating point results allow a small fuss in comparisons # since different algorithms (libm vs. intrinsics) can be used # for different input strides res_eps = np.finfo(dt).eps - tol = 2*res_eps + tol = 3 * res_eps assert_allclose(res_c, res_n, atol=tol, rtol=tol) + assert_allclose(res_c, res_o, atol=tol, rtol=tol) else: assert_equal(c_ar, n_ar) + assert_equal(c_ar, o_ar) @pytest.mark.parametrize('ufunc', [np.sign, np.equal]) @@ -2791,6 +2975,20 @@ def test_ufunc_input_floatingpoint_error(bad_offset): np.add(arr, arr, dtype=np.intp, casting="unsafe") +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.parametrize( + "methodname", + ["__call__", "accumulate", "at", "outer", "reduce", "reduceat", "resolve_dtypes"], +) +def test_ufunc_method_signatures(methodname: str): + method = getattr(np.ufunc, methodname) + + try: + _ = inspect.signature(method) + except ValueError as e: + pytest.fail(e.args[0]) + + def test_trivial_loop_invalid_cast(): # This tests the fast-path "invalid cast", see gh-19904. with pytest.raises(TypeError, @@ -2801,7 +2999,7 @@ def test_trivial_loop_invalid_cast(): @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") @pytest.mark.parametrize("offset", - [0, ncu.BUFSIZE//2, int(1.5*ncu.BUFSIZE)]) + [0, ncu.BUFSIZE // 2, int(1.5 * ncu.BUFSIZE)]) def test_reduce_casterrors(offset): # Test reporting of casting errors in reductions, we test various # offsets to where the casting error will occur, since these may occur @@ -2828,6 +3026,45 @@ def test_reduce_casterrors(offset): assert out[()] < value * offset +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_reduction_no_reference_leak(): + # Test that the generic reduction does not leak references. + # gh-29358 + arr = np.array([1, 2, 3], dtype=np.int32) + count = sys.getrefcount(arr) + + np.add.reduce(arr, dtype=np.int32, initial=0) + assert count == sys.getrefcount(arr) + + np.add.accumulate(arr, dtype=np.int32) + assert count == sys.getrefcount(arr) + + np.add.reduceat(arr, [0, 1], dtype=np.int32) + assert count == sys.getrefcount(arr) + + # with `out=` the reference count is not changed + out = np.empty((), dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.reduce(arr, dtype=np.int32, out=out, initial=0) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + out = np.empty(arr.shape, dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.accumulate(arr, dtype=np.int32, out=out) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + out = np.empty((2,), dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.reduceat(arr, [0, 1], dtype=np.int32, out=out) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + def test_object_reduce_cleanup_on_failure(): # Test cleanup, including of the initial value (manually provided or not) with pytest.raises(TypeError): @@ -2898,7 +3135,7 @@ def test_addition_reduce_negative_zero(dtype, use_initial): # Test various length, in case SIMD paths or chunking play a role. # 150 extends beyond the pairwise blocksize; probably not important. - for i in range(0, 150): + for i in range(150): arr = np.array([neg_zero] * i, dtype=dtype) res = np.sum(arr, **kwargs) if i > 0 or use_initial: @@ -2925,7 +3162,7 @@ def test_addition_unicode_inverse_byte_order(order1, order2): arr1 = np.array([element], dtype=f"{order1}U4") arr2 = np.array([element], dtype=f"{order2}U4") result = arr1 + arr2 - assert result == 2*element + assert result == 2 * element @pytest.mark.parametrize("dtype", [np.int8, np.int16, np.int32, np.int64]) @@ -3024,6 +3261,7 @@ def test_resolve_dtypes_reduction_errors(self): @pytest.mark.skipif(not hasattr(ct, "pythonapi"), reason="`ctypes.pythonapi` required for capsule unpacking.") + @pytest.mark.thread_unsafe(reason="modifies global object in the ctypes API") def test_loop_access(self): # This is a basic test for the full strided loop access data_t = ct.c_char_p * 2 @@ -3095,3 +3333,70 @@ def test_long_arrays(self): t[28][414] = 1 tc = np.cos(t) assert_equal(tc[0][0], tc[28][414]) + + +class TestUFuncInspectSignature: + PARAMS_COMMON = { + "casting": "same_kind", + "order": "K", + "dtype": None, + "subok": True, + "signature": None, + } + + PARAMS_UFUNC = { + "where": True, + } | PARAMS_COMMON + + PARAMS_GUFUNC = { + "axes": np._NoValue, + "axis": np._NoValue, + "keepdims": False, + } | PARAMS_COMMON + + @pytest.mark.parametrize("ufunc", [np.log, np.gcd, np.frexp, np.divmod, np.matvec]) + def test_dunder_signature_attr(self, ufunc: np.ufunc): + assert hasattr(ufunc, "__signature__") + assert isinstance(ufunc.__signature__, inspect.Signature) + assert inspect.signature(ufunc) == ufunc.__signature__ + + @pytest.mark.parametrize("ufunc", [np.exp, np.mod, np.frexp, np.divmod, np.vecmat]) + def test_params_common_positional(self, ufunc: np.ufunc): + sig = inspect.signature(ufunc) + + # check positional-only parameters + posonly_params = {name: param.default + for name, param in sig.parameters.items() + if param.kind is param.POSITIONAL_ONLY} + assert len(posonly_params) == ufunc.nin + assert all(default is inspect.Parameter.empty + for default in posonly_params.values()) + + # check 'out' parameter + out_param = sig.parameters.get("out") + assert out_param is not None + assert out_param.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + + @pytest.mark.parametrize("ufunc", [np.sin, np.add, np.frexp, np.divmod]) + def test_params_common_ufunc(self, ufunc: np.ufunc): + assert ufunc.signature is None # sanity check + + sig = inspect.signature(ufunc) + + # check keyword-only parameters + keyword_params = {name: param.default + for name, param in sig.parameters.items() + if param.kind is param.KEYWORD_ONLY} + assert keyword_params == self.PARAMS_UFUNC + + @pytest.mark.parametrize("gufunc", [np.matmul, np.matvec, np.vecdot, np.vecmat]) + def test_params_common_gufunc(self, gufunc: np.ufunc): + assert gufunc.signature is not None # sanity check + + sig = inspect.signature(gufunc) + + # check keyword-only parameters + keyword_params = {name: param.default + for name, param in sig.parameters.items() + if param.kind is param.KEYWORD_ONLY} + assert keyword_params == self.PARAMS_GUFUNC diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index df8ec07dc3f5..8c5af69af9a7 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -1,25 +1,36 @@ -import platform -import warnings import fnmatch +import inspect import itertools -import pytest -import sys -import os import operator +import platform +import sys +import warnings +from collections import namedtuple from fractions import Fraction from functools import reduce -from collections import namedtuple +import pytest + +import numpy as np import numpy._core.umath as ncu from numpy._core import _umath_tests as ncu_tests, sctypes -import numpy as np from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_raises_regex, - assert_array_equal, assert_almost_equal, assert_array_almost_equal, - assert_array_max_ulp, assert_allclose, assert_no_warnings, suppress_warnings, - _gen_alignment_data, assert_array_almost_equal_nulp, IS_WASM, IS_MUSL, - IS_PYPY, HAS_REFCOUNT - ) + HAS_REFCOUNT, + IS_MUSL, + IS_WASM, + _gen_alignment_data, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_almost_equal_nulp, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_no_warnings, + assert_raises, + assert_raises_regex, +) from numpy.testing._private.utils import _glibc_older_than UFUNCS = [obj for obj in np._core.umath.__dict__.values() @@ -205,25 +216,25 @@ def __array_wrap__(self, arr, context=None, return_scalar=False): if subok: assert_(isinstance(r, ArrayWrap)) else: - assert_(type(r) == np.ndarray) + assert_(type(r) is np.ndarray) r = np.add(a, 2, None, subok=subok) if subok: assert_(isinstance(r, ArrayWrap)) else: - assert_(type(r) == np.ndarray) + assert_(type(r) is np.ndarray) r = np.add(a, 2, out=None, subok=subok) if subok: assert_(isinstance(r, ArrayWrap)) else: - assert_(type(r) == np.ndarray) + assert_(type(r) is np.ndarray) r = np.add(a, 2, out=(None,), subok=subok) if subok: assert_(isinstance(r, ArrayWrap)) else: - assert_(type(r) == np.ndarray) + assert_(type(r) is np.ndarray) d = ArrayWrap([5.7]) o1 = np.empty((1,)) @@ -233,31 +244,31 @@ def __array_wrap__(self, arr, context=None, return_scalar=False): if subok: assert_(isinstance(r2, ArrayWrap)) else: - assert_(type(r2) == np.ndarray) + assert_(type(r2) is np.ndarray) r1, r2 = np.frexp(d, o1, None, subok=subok) if subok: assert_(isinstance(r2, ArrayWrap)) else: - assert_(type(r2) == np.ndarray) + assert_(type(r2) is np.ndarray) r1, r2 = np.frexp(d, None, o2, subok=subok) if subok: assert_(isinstance(r1, ArrayWrap)) else: - assert_(type(r1) == np.ndarray) + assert_(type(r1) is np.ndarray) r1, r2 = np.frexp(d, out=(o1, None), subok=subok) if subok: assert_(isinstance(r2, ArrayWrap)) else: - assert_(type(r2) == np.ndarray) + assert_(type(r2) is np.ndarray) r1, r2 = np.frexp(d, out=(None, o2), subok=subok) if subok: assert_(isinstance(r1, ArrayWrap)) else: - assert_(type(r1) == np.ndarray) + assert_(type(r1) is np.ndarray) with assert_raises(TypeError): # Out argument must be tuple, since there are multiple outputs. @@ -270,9 +281,9 @@ class ArrSubclass(np.ndarray): pass arr = np.arange(10).view(ArrSubclass) - + orig_refcount = sys.getrefcount(arr) arr *= 1 - assert sys.getrefcount(arr) == 2 + assert sys.getrefcount(arr) == orig_refcount class TestComparisons: @@ -390,13 +401,13 @@ def test_object_nonbool_dtype_error(self): (operator.eq, np.equal), (operator.ne, np.not_equal) ]) - @pytest.mark.parametrize("vals", [(2**60, 2**60+1), (2**60+1, 2**60)]) + @pytest.mark.parametrize("vals", [(2**60, 2**60 + 1), (2**60 + 1, 2**60)]) def test_large_integer_direct_comparison( self, dtypes, py_comp, np_comp, vals): # Note that float(2**60) + 1 == float(2**60). a1 = np.array([2**60], dtype=dtypes[0]) a2 = np.array([2**60 + 1], dtype=dtypes[1]) - expected = py_comp(2**60, 2**60+1) + expected = py_comp(2**60, 2**60 + 1) assert py_comp(a1, a2) == expected assert np_comp(a1, a2) == expected @@ -502,7 +513,7 @@ def test_division_int_boundary(self, dtype, ex_val): c_div = lambda n, d: ( 0 if d == 0 else ( - fo.min if (n and n == fo.min and d == -1) else n//d + fo.min if (n and n == fo.min and d == -1) else n // d ) ) with np.errstate(divide='ignore'): @@ -564,7 +575,7 @@ def test_division_int_reduce(self, dtype, ex_val): a = eval(ex_val) lst = a.tolist() c_div = lambda n, d: ( - 0 if d == 0 or (n and n == fo.min and d == -1) else n//d + 0 if d == 0 or (n and n == fo.min and d == -1) else n // d ) with np.errstate(divide='ignore'): @@ -586,19 +597,19 @@ def test_division_int_reduce(self, dtype, ex_val): @pytest.mark.parametrize( "dividend,divisor,quotient", - [(np.timedelta64(2,'Y'), np.timedelta64(2,'M'), 12), - (np.timedelta64(2,'Y'), np.timedelta64(-2,'M'), -12), - (np.timedelta64(-2,'Y'), np.timedelta64(2,'M'), -12), - (np.timedelta64(-2,'Y'), np.timedelta64(-2,'M'), 12), - (np.timedelta64(2,'M'), np.timedelta64(-2,'Y'), -1), - (np.timedelta64(2,'Y'), np.timedelta64(0,'M'), 0), - (np.timedelta64(2,'Y'), 2, np.timedelta64(1,'Y')), - (np.timedelta64(2,'Y'), -2, np.timedelta64(-1,'Y')), - (np.timedelta64(-2,'Y'), 2, np.timedelta64(-1,'Y')), - (np.timedelta64(-2,'Y'), -2, np.timedelta64(1,'Y')), - (np.timedelta64(-2,'Y'), -2, np.timedelta64(1,'Y')), - (np.timedelta64(-2,'Y'), -3, np.timedelta64(0,'Y')), - (np.timedelta64(-2,'Y'), 0, np.timedelta64('Nat','Y')), + [(np.timedelta64(2, 'Y'), np.timedelta64(2, 'M'), 12), + (np.timedelta64(2, 'Y'), np.timedelta64(-2, 'M'), -12), + (np.timedelta64(-2, 'Y'), np.timedelta64(2, 'M'), -12), + (np.timedelta64(-2, 'Y'), np.timedelta64(-2, 'M'), 12), + (np.timedelta64(2, 'M'), np.timedelta64(-2, 'Y'), -1), + (np.timedelta64(2, 'Y'), np.timedelta64(0, 'M'), 0), + (np.timedelta64(2, 'Y'), 2, np.timedelta64(1, 'Y')), + (np.timedelta64(2, 'Y'), -2, np.timedelta64(-1, 'Y')), + (np.timedelta64(-2, 'Y'), 2, np.timedelta64(-1, 'Y')), + (np.timedelta64(-2, 'Y'), -2, np.timedelta64(1, 'Y')), + (np.timedelta64(-2, 'Y'), -2, np.timedelta64(1, 'Y')), + (np.timedelta64(-2, 'Y'), -3, np.timedelta64(0, 'Y')), + (np.timedelta64(-2, 'Y'), 0, np.timedelta64('Nat', 'Y')), ]) def test_division_int_timedelta(self, dividend, divisor, quotient): # If either divisor is 0 or quotient is Nat, check for division by 0 @@ -608,8 +619,8 @@ def test_division_int_timedelta(self, dividend, divisor, quotient): # Test for arrays as well msg = "Timedelta arrays floor division check" - dividend_array = np.array([dividend]*5) - quotient_array = np.array([quotient]*5) + dividend_array = np.array([dividend] * 5) + quotient_array = np.array([quotient] * 5) assert all(dividend_array // divisor == quotient_array), msg else: if IS_WASM: @@ -621,31 +632,34 @@ def test_division_int_timedelta(self, dividend, divisor, quotient): def test_division_complex(self): # check that implementation is correct msg = "Complex division implementation check" - x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128) - assert_almost_equal(x**2/x, x, err_msg=msg) + x = np.array([1. + 1. * 1j, 1. + .5 * 1j, 1. + 2. * 1j], dtype=np.complex128) + assert_almost_equal(x**2 / x, x, err_msg=msg) # check overflow, underflow msg = "Complex division overflow/underflow check" x = np.array([1.e+110, 1.e-110], dtype=np.complex128) - y = x**2/x - assert_almost_equal(y/x, [1, 1], err_msg=msg) + y = x**2 / x + assert_almost_equal(y / x, [1, 1], err_msg=msg) def test_zero_division_complex(self): with np.errstate(invalid="ignore", divide="ignore"): x = np.array([0.0], dtype=np.complex128) - y = 1.0/x + y = 1.0 / x assert_(np.isinf(y)[0]) - y = complex(np.inf, np.nan)/x + y = complex(np.inf, np.nan) / x assert_(np.isinf(y)[0]) - y = complex(np.nan, np.inf)/x + y = complex(np.nan, np.inf) / x assert_(np.isinf(y)[0]) - y = complex(np.inf, np.inf)/x + y = complex(np.inf, np.inf) / x assert_(np.isinf(y)[0]) - y = 0.0/x + y = 0.0 / x assert_(np.isnan(y)[0]) def test_floor_division_complex(self): # check that floor division, divmod and remainder raises type errors - x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128) + x = np.array( + [.9 + 1j, -.1 + 1j, .9 + .5 * 1j, .9 + 2. * 1j], + dtype=np.complex128, + ) with pytest.raises(TypeError): x // 7 with pytest.raises(TypeError): @@ -657,8 +671,8 @@ def test_floor_division_signed_zero(self): # Check that the sign bit is correctly set when dividing positive and # negative zero by one. x = np.zeros(10) - assert_equal(np.signbit(x//1), 0) - assert_equal(np.signbit((-x)//1), 1) + assert_equal(np.signbit(x // 1), 0) + assert_equal(np.signbit((-x) // 1), 1) @pytest.mark.skipif(hasattr(np.__config__, "blas_ssl2_info"), reason="gh-22982") @@ -691,14 +705,18 @@ def test_floor_division_corner_cases(self, dtype): fone = np.array(1.0, dtype=dtype) fzer = np.array(0.0, dtype=dtype) finf = np.array(np.inf, dtype=dtype) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in floor_divide") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', + "invalid value encountered in floor_divide", + RuntimeWarning, + ) div = np.floor_divide(fnan, fone) - assert(np.isnan(div)), "dt: %s, div: %s" % (dt, div) + assert np.isnan(div), f"div: {div}" div = np.floor_divide(fone, fnan) - assert(np.isnan(div)), "dt: %s, div: %s" % (dt, div) + assert np.isnan(div), f"div: {div}" div = np.floor_divide(fnan, fzer) - assert(np.isnan(div)), "dt: %s, div: %s" % (dt, div) + assert np.isnan(div), f"div: {div}" # verify 1.0//0.0 computations return inf with np.errstate(divide='ignore'): z = np.floor_divide(y, x) @@ -724,10 +742,10 @@ def test_remainder_basic(self): for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)): fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) - a = np.array(sg1*71, dtype=dt1) - b = np.array(sg2*19, dtype=dt2) + a = np.array(sg1 * 71, dtype=dt1) + b = np.array(sg2 * 19, dtype=dt2) div, rem = op(a, b) - assert_equal(div*b + rem, a, err_msg=msg) + assert_equal(div * b + rem, a, err_msg=msg) if sg2 == -1: assert_(b < rem <= 0, msg) else: @@ -741,7 +759,7 @@ def test_float_remainder_exact(self): dividend = nlst + [0] + plst divisor = nlst + plst arg = list(itertools.product(dividend, divisor)) - tgt = list(divmod(*t) for t in arg) + tgt = [divmod(*t) for t in arg] a, b = np.array(arg, dtype=int).T # convert exact integer results from Python to float so that @@ -752,7 +770,7 @@ def test_float_remainder_exact(self): for op in [floor_divide_and_remainder, np.divmod]: for dt in np.typecodes['Float']: - msg = 'op: %s, dtype: %s' % (op.__name__, dt) + msg = f'op: {op.__name__}, dtype: {dt}' fa = a.astype(dt) fb = b.astype(dt) div, rem = op(fa, fb) @@ -767,11 +785,11 @@ def test_float_remainder_roundoff(self): for sg1, sg2 in itertools.product((+1, -1), (+1, -1)): fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) - a = np.array(sg1*78*6e-8, dtype=dt1) - b = np.array(sg2*6e-8, dtype=dt2) + a = np.array(sg1 * 78 * 6e-8, dtype=dt1) + b = np.array(sg2 * 6e-8, dtype=dt2) div, rem = op(a, b) # Equal assertion should hold when fmod is used - assert_equal(div*b + rem, a, err_msg=msg) + assert_equal(div * b + rem, a, err_msg=msg) if sg2 == -1: assert_(b < rem <= 0, msg) else: @@ -847,30 +865,38 @@ def test_float_divmod_corner_cases(self): fone = np.array(1.0, dtype=dt) fzer = np.array(0.0, dtype=dt) finf = np.array(np.inf, dtype=dt) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in divmod") - sup.filter(RuntimeWarning, "divide by zero encountered in divmod") + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "invalid value encountered in divmod", + RuntimeWarning, + ) + warnings.filterwarnings( + "ignore", + "divide by zero encountered in divmod", + RuntimeWarning, + ) div, rem = np.divmod(fone, fzer) - assert(np.isinf(div)), 'dt: %s, div: %s' % (dt, rem) - assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem) + assert np.isinf(div), f'dt: {dt}, div: {rem}' + assert np.isnan(rem), f'dt: {dt}, rem: {rem}' div, rem = np.divmod(fzer, fzer) - assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem) - assert_(np.isnan(div)), 'dt: %s, rem: %s' % (dt, rem) + assert np.isnan(rem), f'dt: {dt}, rem: {rem}' + assert_(np.isnan(div)), f'dt: {dt}, rem: {rem}' div, rem = np.divmod(finf, finf) - assert(np.isnan(div)), 'dt: %s, rem: %s' % (dt, rem) - assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem) + assert np.isnan(div), f'dt: {dt}, rem: {rem}' + assert np.isnan(rem), f'dt: {dt}, rem: {rem}' div, rem = np.divmod(finf, fzer) - assert(np.isinf(div)), 'dt: %s, rem: %s' % (dt, rem) - assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem) + assert np.isinf(div), f'dt: {dt}, rem: {rem}' + assert np.isnan(rem), f'dt: {dt}, rem: {rem}' div, rem = np.divmod(fnan, fone) - assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem) - assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem) + assert np.isnan(rem), f"dt: {dt}, rem: {rem}" + assert np.isnan(div), f"dt: {dt}, rem: {rem}" div, rem = np.divmod(fone, fnan) - assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem) - assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem) + assert np.isnan(rem), f"dt: {dt}, rem: {rem}" + assert np.isnan(div), f"dt: {dt}, rem: {rem}" div, rem = np.divmod(fnan, fzer) - assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem) - assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem) + assert np.isnan(rem), f"dt: {dt}, rem: {rem}" + assert np.isnan(div), f"dt: {dt}, rem: {rem}" def test_float_remainder_corner_cases(self): # Check remainder magnitude. @@ -881,48 +907,56 @@ def test_float_remainder_corner_cases(self): b = np.array(1.0, dtype=dt) a = np.nextafter(np.array(0.0, dtype=dt), -b) rem = np.remainder(a, b) - assert_(rem <= b, 'dt: %s' % dt) + assert_(rem <= b, f'dt: {dt}') rem = np.remainder(-a, -b) - assert_(rem >= -b, 'dt: %s' % dt) + assert_(rem >= -b, f'dt: {dt}') # Check nans, inf - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in remainder") - sup.filter(RuntimeWarning, "invalid value encountered in fmod") + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "invalid value encountered in remainder", + RuntimeWarning, + ) + warnings.filterwarnings( + "ignore", + "invalid value encountered in fmod", + RuntimeWarning, + ) for dt in np.typecodes['Float']: fone = np.array(1.0, dtype=dt) fzer = np.array(0.0, dtype=dt) finf = np.array(np.inf, dtype=dt) fnan = np.array(np.nan, dtype=dt) rem = np.remainder(fone, fzer) - assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') # MSVC 2008 returns NaN here, so disable the check. #rem = np.remainder(fone, finf) #assert_(rem == fone, 'dt: %s, rem: %s' % (dt, rem)) rem = np.remainder(finf, fone) fmod = np.fmod(finf, fone) - assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod)) - assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {fmod}') + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') rem = np.remainder(finf, finf) fmod = np.fmod(finf, fone) - assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) - assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod)) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {fmod}') rem = np.remainder(finf, fzer) fmod = np.fmod(finf, fzer) - assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) - assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod)) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {fmod}') rem = np.remainder(fone, fnan) fmod = np.fmod(fone, fnan) - assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) - assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod)) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {fmod}') rem = np.remainder(fnan, fzer) fmod = np.fmod(fnan, fzer) - assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) - assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, rem)) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {rem}') rem = np.remainder(fnan, fone) fmod = np.fmod(fnan, fone) - assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) - assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, rem)) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {rem}') class TestDivisionIntegerOverflowsAndDivideByZero: @@ -1009,7 +1043,7 @@ def test_overflows(self, dividend_dtype, divisor_dtype, operation): # that is a multiple of the register's size. We resort to the # default implementation for the leftover elements. # We try to cover all paths here. - arrays = [np.array([np.iinfo(dividend_dtype).min]*i, + arrays = [np.array([np.iinfo(dividend_dtype).min] * i, dtype=dividend_dtype) for i in range(1, 129)] divisor = np.array([-1], dtype=divisor_dtype) # If dividend is a larger type than the divisor (`else` case), @@ -1039,7 +1073,7 @@ def test_overflows(self, dividend_dtype, divisor_dtype, operation): result = np.array(operation(a, divisor)).flatten('f') expected_array = np.array( [self.overflow_results[operation].nocast( - dividend_dtype)]*len(a)).flatten() + dividend_dtype)] * len(a)).flatten() assert_array_equal(result, expected_array) else: # Scalars @@ -1056,7 +1090,7 @@ def test_overflows(self, dividend_dtype, divisor_dtype, operation): result = np.array(operation(a, divisor)).flatten('f') expected_array = np.array( [self.overflow_results[operation].casted( - dividend_dtype)]*len(a)).flatten() + dividend_dtype)] * len(a)).flatten() assert_array_equal(result, expected_array) @@ -1082,7 +1116,7 @@ def test_power_float(self): y = x.copy() y **= 2 assert_equal(y, [1., 4., 9.]) - assert_almost_equal(x**(-1), [1., 0.5, 1./3]) + assert_almost_equal(x**(-1), [1., 0.5, 1. / 3]) assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)]) for out, inp, msg in _gen_alignment_data(dtype=np.float32, @@ -1102,22 +1136,43 @@ def test_power_float(self): assert_equal(out, exp, err_msg=msg) def test_power_complex(self): - x = np.array([1+2j, 2+3j, 3+4j]) + x = np.array([1 + 2j, 2 + 3j, 3 + 4j]) assert_equal(x**0, [1., 1., 1.]) assert_equal(x**1, x) - assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j]) - assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3]) - assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4]) - assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)]) - assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2]) - assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197, - (-117-44j)/15625]) - assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j), - ncu.sqrt(3+4j)]) - norm = 1./((x**14)[0]) - assert_almost_equal(x**14 * norm, - [i * norm for i in [-76443+16124j, 23161315+58317492j, - 5583548873 + 2465133864j]]) + assert_almost_equal(x**2, [-3 + 4j, -5 + 12j, -7 + 24j]) + assert_almost_equal(x**3, [(1 + 2j)**3, (2 + 3j)**3, (3 + 4j)**3]) + assert_almost_equal(x**4, [(1 + 2j)**4, (2 + 3j)**4, (3 + 4j)**4]) + assert_almost_equal(x**(-1), [1 / (1 + 2j), 1 / (2 + 3j), 1 / (3 + 4j)]) + assert_almost_equal( + x**(-2), + [1 / (1 + 2j)**2, + 1 / (2 + 3j)**2, + 1 / (3 + 4j)**2], + ) + assert_almost_equal( + x**(-3), + [(-11 + 2j) / 125, + (-46 - 9j) / 2197, + (-117 - 44j) / 15625], + ) + assert_almost_equal( + x**(0.5), + [ncu.sqrt(1 + 2j), + ncu.sqrt(2 + 3j), + ncu.sqrt(3 + 4j)], + ) + norm = 1. / ((x**14)[0]) + assert_almost_equal( + x**14 * norm, + [ + i * norm + for i in [ + -76443 + 16124j, + 23161315 + 58317492j, + 5583548873 + 2465133864j, + ] + ], + ) # Ticket #836 def assert_complex_equal(x, y): @@ -1128,13 +1183,13 @@ def assert_complex_equal(x, y): z = np.array([z], dtype=np.complex128) with np.errstate(invalid="ignore"): assert_complex_equal(z**1, z) - assert_complex_equal(z**2, z*z) - assert_complex_equal(z**3, z*z*z) + assert_complex_equal(z**2, z * z) + assert_complex_equal(z**3, z * z * z) def test_power_zero(self): # ticket #1271 zero = np.array([0j]) - one = np.array([1+0j]) + one = np.array([1 + 0j]) cnan = np.array([complex(np.nan, np.nan)]) # FIXME cinf not tested. #cinf = np.array([complex(np.inf, 0)]) @@ -1151,38 +1206,38 @@ def assert_complex_equal(x, y): # zero power assert_complex_equal(np.power(zero, 0), one) with np.errstate(invalid="ignore"): - assert_complex_equal(np.power(zero, 0+1j), cnan) + assert_complex_equal(np.power(zero, 0 + 1j), cnan) # negative power for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]: assert_complex_equal(np.power(zero, -p), cnan) - assert_complex_equal(np.power(zero, -1+0.2j), cnan) + assert_complex_equal(np.power(zero, -1 + 0.2j), cnan) @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") def test_zero_power_nonzero(self): # Testing 0^{Non-zero} issue 18378 - zero = np.array([0.0+0.0j]) + zero = np.array([0.0 + 0.0j]) cnan = np.array([complex(np.nan, np.nan)]) def assert_complex_equal(x, y): assert_array_equal(x.real, y.real) assert_array_equal(x.imag, y.imag) - #Complex powers with positive real part will not generate a warning - assert_complex_equal(np.power(zero, 1+4j), zero) - assert_complex_equal(np.power(zero, 2-3j), zero) - #Testing zero values when real part is greater than zero - assert_complex_equal(np.power(zero, 1+1j), zero) - assert_complex_equal(np.power(zero, 1+0j), zero) - assert_complex_equal(np.power(zero, 1-1j), zero) - #Complex powers will negative real part or 0 (provided imaginary + # Complex powers with positive real part will not generate a warning + assert_complex_equal(np.power(zero, 1 + 4j), zero) + assert_complex_equal(np.power(zero, 2 - 3j), zero) + # Testing zero values when real part is greater than zero + assert_complex_equal(np.power(zero, 1 + 1j), zero) + assert_complex_equal(np.power(zero, 1 + 0j), zero) + assert_complex_equal(np.power(zero, 1 - 1j), zero) + # Complex powers will negative real part or 0 (provided imaginary # part is not zero) will generate a NAN and hence a RUNTIME warning with pytest.warns(expected_warning=RuntimeWarning) as r: - assert_complex_equal(np.power(zero, -1+1j), cnan) - assert_complex_equal(np.power(zero, -2-3j), cnan) - assert_complex_equal(np.power(zero, -7+0j), cnan) - assert_complex_equal(np.power(zero, 0+1j), cnan) - assert_complex_equal(np.power(zero, 0-1j), cnan) + assert_complex_equal(np.power(zero, -1 + 1j), cnan) + assert_complex_equal(np.power(zero, -2 - 3j), cnan) + assert_complex_equal(np.power(zero, -7 + 0j), cnan) + assert_complex_equal(np.power(zero, 0 + 1j), cnan) + assert_complex_equal(np.power(zero, 0 - 1j), cnan) assert len(r) == 5 def test_fast_power(self): @@ -1265,7 +1320,7 @@ def test_type_conversion(self): arg_type = '?bhilBHILefdgFDG' res_type = 'ddddddddddddgDDG' for dtin, dtout in zip(arg_type, res_type): - msg = "dtin: %s, dtout: %s" % (dtin, dtout) + msg = f"dtin: {dtin}, dtout: {dtout}" arg = np.ones(1, dtype=dtin) res = np.float_power(arg, arg) assert_(res.dtype.name == np.dtype(dtout).name, msg) @@ -1337,8 +1392,8 @@ def test_logaddexp2_range(self): def test_inf(self): inf = np.inf - x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] - y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] + x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] # noqa: E221 + y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] # noqa: E221 z = [inf, inf, inf, -inf, inf, inf, 1, 1] with np.errstate(invalid='raise'): for dt in ['f', 'd', 'g']: @@ -1368,7 +1423,7 @@ def test_log_values(self): for dt in ['f', 'd', 'g']: log2_ = 0.69314718055994530943 xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt)*log2_ + yf = np.array(y, dtype=dt) * log2_ assert_almost_equal(np.log(xf), yf) # test aliasing(issue #17761) @@ -1392,17 +1447,25 @@ def test_log_values_maxofdtype(self): def test_log_strides(self): np.random.seed(42) - strides = np.array([-4,-3,-2,-1,1,2,3,4]) - sizes = np.arange(2,100) + strides = np.array([-4, -3, -2, -1, 1, 2, 3, 4]) + sizes = np.arange(2, 100) for ii in sizes: - x_f64 = np.float64(np.random.uniform(low=0.01, high=100.0,size=ii)) + x_f64 = np.float64(np.random.uniform(low=0.01, high=100.0, size=ii)) x_special = x_f64.copy() x_special[3:-1:4] = 1.0 y_true = np.log(x_f64) y_special = np.log(x_special) for jj in strides: - assert_array_almost_equal_nulp(np.log(x_f64[::jj]), y_true[::jj], nulp=2) - assert_array_almost_equal_nulp(np.log(x_special[::jj]), y_special[::jj], nulp=2) + assert_array_almost_equal_nulp( + np.log(x_f64[::jj]), + y_true[::jj], + nulp=2, + ) + assert_array_almost_equal_nulp( + np.log(x_special[::jj]), + y_special[::jj], + nulp=2, + ) # Reference values were computed with mpmath, with mp.dps = 200. @pytest.mark.parametrize( @@ -1424,10 +1487,10 @@ def test_log_precision_float64(self, z, wref): # Reference values were computed with mpmath, with mp.dps = 200. @pytest.mark.parametrize( 'z, wref', - [(np.complex64(1.0 + 3e-6j), np.complex64(4.5e-12+3e-06j)), + [(np.complex64(1.0 + 3e-6j), np.complex64(4.5e-12 + 3e-06j)), (np.complex64(1.0 - 2e-5j), np.complex64(1.9999999e-10 - 2e-5j)), (np.complex64(0.9999999 + 1e-06j), - np.complex64(-1.192088e-07+1.0000001e-06j))], + np.complex64(-1.192088e-07 + 1.0000001e-06j))], ) def test_log_precision_float32(self, z, wref): w = np.log(z) @@ -1441,18 +1504,22 @@ def test_exp_values(self): for dt in ['f', 'd', 'g']: log2_ = 0.69314718055994530943 xf = np.array(x, dtype=dt) - yf = np.array(y, dtype=dt)*log2_ + yf = np.array(y, dtype=dt) * log2_ assert_almost_equal(np.exp(yf), xf) def test_exp_strides(self): np.random.seed(42) - strides = np.array([-4,-3,-2,-1,1,2,3,4]) - sizes = np.arange(2,100) + strides = np.array([-4, -3, -2, -1, 1, 2, 3, 4]) + sizes = np.arange(2, 100) for ii in sizes: - x_f64 = np.float64(np.random.uniform(low=0.01, high=709.1,size=ii)) + x_f64 = np.float64(np.random.uniform(low=0.01, high=709.1, size=ii)) y_true = np.exp(x_f64) for jj in strides: - assert_array_almost_equal_nulp(np.exp(x_f64[::jj]), y_true[::jj], nulp=2) + assert_array_almost_equal_nulp( + np.exp(x_f64[::jj]), + y_true[::jj], + nulp=2, + ) class TestSpecialFloats: def test_exp_values(self): @@ -1824,41 +1891,41 @@ def test_expm1(self): @pytest.mark.parametrize("dtype", ('e', 'f', 'd')) @pytest.mark.parametrize("data, escape", ( ([0.03], LTONE_INVALID_ERR), - ([0.03]*32, LTONE_INVALID_ERR), + ([0.03] * 32, LTONE_INVALID_ERR), # neg ([-1.0], NEG_INVALID_ERR), - ([-1.0]*32, NEG_INVALID_ERR), + ([-1.0] * 32, NEG_INVALID_ERR), # flat ([1.0], ONE_INVALID_ERR), - ([1.0]*32, ONE_INVALID_ERR), + ([1.0] * 32, ONE_INVALID_ERR), # zero ([0.0], BYZERO_ERR), - ([0.0]*32, BYZERO_ERR), + ([0.0] * 32, BYZERO_ERR), ([-0.0], BYZERO_ERR), - ([-0.0]*32, BYZERO_ERR), + ([-0.0] * 32, BYZERO_ERR), # nan ([0.5, 0.5, 0.5, np.nan], LTONE_INVALID_ERR), - ([0.5, 0.5, 0.5, np.nan]*32, LTONE_INVALID_ERR), + ([0.5, 0.5, 0.5, np.nan] * 32, LTONE_INVALID_ERR), ([np.nan, 1.0, 1.0, 1.0], ONE_INVALID_ERR), - ([np.nan, 1.0, 1.0, 1.0]*32, ONE_INVALID_ERR), + ([np.nan, 1.0, 1.0, 1.0] * 32, ONE_INVALID_ERR), ([np.nan], []), - ([np.nan]*32, []), + ([np.nan] * 32, []), # inf ([0.5, 0.5, 0.5, np.inf], INF_INVALID_ERR + LTONE_INVALID_ERR), - ([0.5, 0.5, 0.5, np.inf]*32, INF_INVALID_ERR + LTONE_INVALID_ERR), + ([0.5, 0.5, 0.5, np.inf] * 32, INF_INVALID_ERR + LTONE_INVALID_ERR), ([np.inf, 1.0, 1.0, 1.0], INF_INVALID_ERR), - ([np.inf, 1.0, 1.0, 1.0]*32, INF_INVALID_ERR), + ([np.inf, 1.0, 1.0, 1.0] * 32, INF_INVALID_ERR), ([np.inf], INF_INVALID_ERR), - ([np.inf]*32, INF_INVALID_ERR), + ([np.inf] * 32, INF_INVALID_ERR), # ninf ([0.5, 0.5, 0.5, -np.inf], NEG_INVALID_ERR + INF_INVALID_ERR + LTONE_INVALID_ERR), - ([0.5, 0.5, 0.5, -np.inf]*32, + ([0.5, 0.5, 0.5, -np.inf] * 32, NEG_INVALID_ERR + INF_INVALID_ERR + LTONE_INVALID_ERR), ([-np.inf, 1.0, 1.0, 1.0], NEG_INVALID_ERR + INF_INVALID_ERR), - ([-np.inf, 1.0, 1.0, 1.0]*32, NEG_INVALID_ERR + INF_INVALID_ERR), + ([-np.inf, 1.0, 1.0, 1.0] * 32, NEG_INVALID_ERR + INF_INVALID_ERR), ([-np.inf], NEG_INVALID_ERR + INF_INVALID_ERR), - ([-np.inf]*32, NEG_INVALID_ERR + INF_INVALID_ERR), + ([-np.inf] * 32, NEG_INVALID_ERR + INF_INVALID_ERR), )) def test_unary_spurious_fpexception(self, ufunc, dtype, data, escape): if escape and ufunc in escape: @@ -1866,8 +1933,15 @@ def test_unary_spurious_fpexception(self, ufunc, dtype, data, escape): # FIXME: NAN raises FP invalid exception: # - ceil/float16 on MSVC:32-bit # - spacing/float16 on almost all platforms + # - spacing/float32,float64 on Windows MSVC with VS2022 if ufunc in (np.spacing, np.ceil) and dtype == 'e': return + # Skip spacing tests with NaN on Windows MSVC (all dtypes) + import platform + if (ufunc == np.spacing and + platform.system() == 'Windows' and + any(np.isnan(d) if isinstance(d, (int, float)) else False for d in data)): + pytest.skip("spacing with NaN generates warnings on Windows/VS2022") array = np.array(data, dtype=dtype) with assert_no_warnings(): ufunc(array) @@ -1885,15 +1959,53 @@ def test_divide_spurious_fpexception(self, dtype): np.zeros(128 + 1, dtype=dt) / subnorm class TestFPClass: - @pytest.mark.parametrize("stride", [-5, -4, -3, -2, -1, 1, - 2, 4, 5, 6, 7, 8, 9, 10]) + @pytest.mark.parametrize( + "stride", + [-5, -4, -3, -2, -1, 1, 2, 4, 5, 6, 7, 8, 9, 10], + ) def test_fpclass(self, stride): - arr_f64 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 2.2251e-308, -2.2251e-308], dtype='d') - arr_f32 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 1.4013e-045, -1.4013e-045], dtype='f') - nan = np.array([True, True, False, False, False, False, False, False, False, False]) - inf = np.array([False, False, True, True, False, False, False, False, False, False]) - sign = np.array([False, True, False, True, True, False, True, False, False, True]) - finite = np.array([False, False, False, False, True, True, True, True, True, True]) + arr_f64 = np.array( + [ + np.nan, + -np.nan, + np.inf, + -np.inf, + -1.0, + 1.0, + -0.0, + 0.0, + 2.2251e-308, + -2.2251e-308, + ], + dtype="d", + ) + arr_f32 = np.array( + [ + np.nan, + -np.nan, + np.inf, + -np.inf, + -1.0, + 1.0, + -0.0, + 0.0, + 1.4013e-045, + -1.4013e-045, + ], + dtype="f", + ) + nan = np.array( + [True, True, False, False, False, False, False, False, False, False], + ) + inf = np.array( + [False, False, True, True, False, False, False, False, False, False], + ) + sign = np.array( + [False, True, False, True, True, False, True, False, False, True], + ) + finite = np.array( + [False, False, False, False, True, True, True, True, True, True], + ) assert_equal(np.isnan(arr_f32[::stride]), nan[::stride]) assert_equal(np.isnan(arr_f64[::stride]), nan[::stride]) assert_equal(np.isinf(arr_f32[::stride]), inf[::stride]) @@ -1982,53 +2094,65 @@ def test_fp_noncontiguous(self, dtype): assert_equal(np.isfinite(data_split), finite_split) class TestLDExp: - @pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4]) + @pytest.mark.parametrize("stride", [-4, -2, -1, 1, 2, 4]) @pytest.mark.parametrize("dtype", ['f', 'd']) def test_ldexp(self, dtype, stride): mant = np.array([0.125, 0.25, 0.5, 1., 1., 2., 4., 8.], dtype=dtype) - exp = np.array([3, 2, 1, 0, 0, -1, -2, -3], dtype='i') - out = np.zeros(8, dtype=dtype) - assert_equal(np.ldexp(mant[::stride], exp[::stride], out=out[::stride]), np.ones(8, dtype=dtype)[::stride]) + exp = np.array([3, 2, 1, 0, 0, -1, -2, -3], dtype='i') + out = np.zeros(8, dtype=dtype) + assert_equal( + np.ldexp(mant[::stride], exp[::stride], out=out[::stride]), + np.ones(8, dtype=dtype)[::stride], + ) assert_equal(out[::stride], np.ones(8, dtype=dtype)[::stride]) class TestFRExp: - @pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4]) + @pytest.mark.parametrize("stride", [-4, -2, -1, 1, 2, 4]) @pytest.mark.parametrize("dtype", ['f', 'd']) - @pytest.mark.skipif(not sys.platform.startswith('linux'), - reason="np.frexp gives different answers for NAN/INF on windows and linux") + @pytest.mark.skipif( + not sys.platform.startswith('linux'), + reason="np.frexp gives different answers for NAN/INF on windows and linux", + ) @pytest.mark.xfail(IS_MUSL, reason="gh23049") def test_frexp(self, dtype, stride): - arr = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 1.0, -1.0], dtype=dtype) - mant_true = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 0.5, -0.5], dtype=dtype) - exp_true = np.array([0, 0, 0, 0, 0, 0, 1, 1], dtype='i') - out_mant = np.ones(8, dtype=dtype) - out_exp = 2*np.ones(8, dtype='i') + arr = np.array( + [np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 1.0, -1.0], + dtype=dtype, + ) + mant_true = np.array( + [np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 0.5, -0.5], + dtype=dtype, + ) + exp_true = np.array([0, 0, 0, 0, 0, 0, 1, 1], dtype='i') + out_mant = np.ones(8, dtype=dtype) + out_exp = 2 * np.ones(8, dtype='i') mant, exp = np.frexp(arr[::stride], out=(out_mant[::stride], out_exp[::stride])) assert_equal(mant_true[::stride], mant) assert_equal(exp_true[::stride], exp) assert_equal(out_mant[::stride], mant_true[::stride]) assert_equal(out_exp[::stride], exp_true[::stride]) + # func : [maxulperror, low, high] -avx_ufuncs = {'sqrt' :[1, 0., 100.], - 'absolute' :[0, -100., 100.], - 'reciprocal' :[1, 1., 100.], - 'square' :[1, -100., 100.], - 'rint' :[0, -100., 100.], - 'floor' :[0, -100., 100.], - 'ceil' :[0, -100., 100.], - 'trunc' :[0, -100., 100.]} +avx_ufuncs = {'sqrt' : [1, 0., 100.], # noqa: E203 + 'absolute' : [0, -100., 100.], # noqa: E203 + 'reciprocal' : [1, 1., 100.], # noqa: E203 + 'square' : [1, -100., 100.], # noqa: E203 + 'rint' : [0, -100., 100.], # noqa: E203 + 'floor' : [0, -100., 100.], # noqa: E203 + 'ceil' : [0, -100., 100.], # noqa: E203 + 'trunc' : [0, -100., 100.]} # noqa: E203 class TestAVXUfuncs: def test_avx_based_ufunc(self): - strides = np.array([-4,-3,-2,-1,1,2,3,4]) + strides = np.array([-4, -3, -2, -1, 1, 2, 3, 4]) np.random.seed(42) for func, prop in avx_ufuncs.items(): maxulperr = prop[0] minval = prop[1] maxval = prop[2] # various array sizes to ensure masking in AVX is tested - for size in range(1,32): + for size in range(1, 32): myfunc = getattr(np, func) x_f32 = np.random.uniform(low=minval, high=maxval, size=size).astype(np.float32) @@ -2056,40 +2180,48 @@ def test_avx_based_ufunc(self): class TestAVXFloat32Transcendental: def test_exp_float32(self): np.random.seed(42) - x_f32 = np.float32(np.random.uniform(low=0.0,high=88.1,size=1000000)) + x_f32 = np.float32(np.random.uniform(low=0.0, high=88.1, size=1000000)) x_f64 = np.float64(x_f32) assert_array_max_ulp(np.exp(x_f32), np.float32(np.exp(x_f64)), maxulp=3) def test_log_float32(self): np.random.seed(42) - x_f32 = np.float32(np.random.uniform(low=0.0,high=1000,size=1000000)) + x_f32 = np.float32(np.random.uniform(low=0.0, high=1000, size=1000000)) x_f64 = np.float64(x_f32) assert_array_max_ulp(np.log(x_f32), np.float32(np.log(x_f64)), maxulp=4) def test_sincos_float32(self): np.random.seed(42) N = 1000000 - M = np.int_(N/20) + M = np.int_(N / 20) index = np.random.randint(low=0, high=N, size=M) - x_f32 = np.float32(np.random.uniform(low=-100.,high=100.,size=N)) + x_f32 = np.float32(np.random.uniform(low=-100., high=100., size=N)) if not _glibc_older_than("2.17"): # test coverage for elements > 117435.992f for which glibc is used # this is known to be problematic on old glibc, so skip it there - x_f32[index] = np.float32(10E+10*np.random.rand(M)) + x_f32[index] = np.float32(10E+10 * np.random.rand(M)) x_f64 = np.float64(x_f32) assert_array_max_ulp(np.sin(x_f32), np.float32(np.sin(x_f64)), maxulp=2) assert_array_max_ulp(np.cos(x_f32), np.float32(np.cos(x_f64)), maxulp=2) # test aliasing(issue #17761) tx_f32 = x_f32.copy() - assert_array_max_ulp(np.sin(x_f32, out=x_f32), np.float32(np.sin(x_f64)), maxulp=2) - assert_array_max_ulp(np.cos(tx_f32, out=tx_f32), np.float32(np.cos(x_f64)), maxulp=2) + assert_array_max_ulp( + np.sin(x_f32, out=x_f32), + np.float32(np.sin(x_f64)), + maxulp=2, + ) + assert_array_max_ulp( + np.cos(tx_f32, out=tx_f32), + np.float32(np.cos(x_f64)), + maxulp=2, + ) def test_strided_float32(self): np.random.seed(42) - strides = np.array([-4,-3,-2,-1,1,2,3,4]) - sizes = np.arange(2,100) + strides = np.array([-4, -3, -2, -1, 1, 2, 3, 4]) + sizes = np.arange(2, 100) for ii in sizes: - x_f32 = np.float32(np.random.uniform(low=0.01,high=88.1,size=ii)) + x_f32 = np.float32(np.random.uniform(low=0.01, high=88.1, size=ii)) x_f32_large = x_f32.copy() x_f32_large[3:-1:4] = 120000.0 exp_true = np.exp(x_f32) @@ -2097,10 +2229,12 @@ def test_strided_float32(self): sin_true = np.sin(x_f32_large) cos_true = np.cos(x_f32_large) for jj in strides: - assert_array_almost_equal_nulp(np.exp(x_f32[::jj]), exp_true[::jj], nulp=2) - assert_array_almost_equal_nulp(np.log(x_f32[::jj]), log_true[::jj], nulp=2) - assert_array_almost_equal_nulp(np.sin(x_f32_large[::jj]), sin_true[::jj], nulp=2) - assert_array_almost_equal_nulp(np.cos(x_f32_large[::jj]), cos_true[::jj], nulp=2) + x_slice = x_f32[::jj] + x_large = x_f32_large[::jj] + assert_array_almost_equal_nulp(np.exp(x_slice), exp_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.log(x_slice), log_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.sin(x_large), sin_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.cos(x_large), cos_true[::jj], nulp=2) class TestLogAddExp(_FilterInvalids): def test_logaddexp_values(self): @@ -2125,8 +2259,8 @@ def test_logaddexp_range(self): def test_inf(self): inf = np.inf - x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] - y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] + x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] # noqa: E221 + y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] # noqa: E221 z = [inf, inf, inf, -inf, inf, inf, 1, 1] with np.errstate(invalid='raise'): for dt in ['f', 'd', 'g']: @@ -2150,7 +2284,7 @@ def test_reduce(self): class TestLog1p: def test_log1p(self): assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2)) - assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6)) + assert_almost_equal(ncu.log1p(1e-6), ncu.log(1 + 1e-6)) def test_special(self): with np.errstate(invalid="ignore", divide="ignore"): @@ -2163,8 +2297,8 @@ def test_special(self): class TestExpm1: def test_expm1(self): - assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1) - assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1) + assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2) - 1) + assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6) - 1) def test_special(self): assert_equal(ncu.expm1(np.inf), np.inf) @@ -2195,13 +2329,13 @@ def test_reduce(self): def assert_hypot_isnan(x, y): with np.errstate(invalid='ignore'): assert_(np.isnan(ncu.hypot(x, y)), - "hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y))) + f"hypot({x}, {y}) is {ncu.hypot(x, y)}, not nan") def assert_hypot_isinf(x, y): with np.errstate(invalid='ignore'): assert_(np.isinf(ncu.hypot(x, y)), - "hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y))) + f"hypot({x}, {y}) is {ncu.hypot(x, y)}, not inf") class TestHypotSpecialValues: @@ -2222,24 +2356,38 @@ def test_no_fpe(self): def assert_arctan2_isnan(x, y): - assert_(np.isnan(ncu.arctan2(x, y)), "arctan(%s, %s) is %s, not nan" % (x, y, ncu.arctan2(x, y))) + assert_( + np.isnan(ncu.arctan2(x, y)), + f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not nan", + ) def assert_arctan2_ispinf(x, y): - assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), "arctan(%s, %s) is %s, not +inf" % (x, y, ncu.arctan2(x, y))) + assert_( + (np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), + f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not +inf", + ) def assert_arctan2_isninf(x, y): - assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), "arctan(%s, %s) is %s, not -inf" % (x, y, ncu.arctan2(x, y))) + assert_( + (np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), + f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not -inf", + ) def assert_arctan2_ispzero(x, y): - assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not +0" % (x, y, ncu.arctan2(x, y))) + assert_( + (ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), + f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not +0", + ) def assert_arctan2_isnzero(x, y): - assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y))) - + assert_( + (ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), + f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not -0", + ) class TestArctan2SpecialValues: def test_one_one(self): @@ -2362,7 +2510,7 @@ def test_reduce(self): def test_reduce_complex(self): assert_equal(np.maximum.reduce([1, 2j]), 1) - assert_equal(np.maximum.reduce([1+3j, 2j]), 1+3j) + assert_equal(np.maximum.reduce([1 + 3j, 2j]), 1 + 3j) def test_float_nans(self): nan = np.nan @@ -2396,16 +2544,22 @@ def test_object_array(self): assert_equal(np.maximum(arg1, arg2), arg2) def test_strided_array(self): - arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf]) - arr2 = np.array([-2.0,-1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0]) - maxtrue = np.array([-2.0, 1.0, np.nan, 1.0, np.nan, np.nan, np.inf, -3.0]) + arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf]) + arr2 = np.array([-2.0, -1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0]) # noqa: E221 + maxtrue = np.array([-2.0, 1.0, np.nan, 1.0, np.nan, np.nan, np.inf, -3.0]) out = np.ones(8) out_maxtrue = np.array([-2.0, 1.0, 1.0, 10.0, 1.0, 1.0, np.nan, 1.0]) - assert_equal(np.maximum(arr1,arr2), maxtrue) - assert_equal(np.maximum(arr1[::2],arr2[::2]), maxtrue[::2]) - assert_equal(np.maximum(arr1[:4:], arr2[::2]), np.array([-2.0, np.nan, 10.0, 1.0])) + assert_equal(np.maximum(arr1, arr2), maxtrue) + assert_equal(np.maximum(arr1[::2], arr2[::2]), maxtrue[::2]) + assert_equal( + np.maximum(arr1[:4:], arr2[::2]), + np.array([-2.0, np.nan, 10.0, 1.0]), + ) assert_equal(np.maximum(arr1[::3], arr2[:3:]), np.array([-2.0, 0.0, np.nan])) - assert_equal(np.maximum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-2.0, 10., np.nan])) + assert_equal( + np.maximum(arr1[:6:2], arr2[::3], out=out[::3]), + np.array([-2.0, 10.0, np.nan]), + ) assert_equal(out, out_maxtrue) def test_precision(self): @@ -2454,7 +2608,7 @@ def test_reduce(self): def test_reduce_complex(self): assert_equal(np.minimum.reduce([1, 2j]), 2j) - assert_equal(np.minimum.reduce([1+3j, 2j]), 2j) + assert_equal(np.minimum.reduce([1 + 3j, 2j]), 2j) def test_float_nans(self): nan = np.nan @@ -2489,15 +2643,21 @@ def test_object_array(self): def test_strided_array(self): arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf]) - arr2 = np.array([-2.0,-1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0]) - mintrue = np.array([-4.0, -1.0, np.nan, 0.0, np.nan, np.nan, 1.0, -np.inf]) + arr2 = np.array([-2.0, -1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0]) + mintrue = np.array([-4.0, -1.0, np.nan, 0.0, np.nan, np.nan, 1.0, -np.inf]) out = np.ones(8) out_mintrue = np.array([-4.0, 1.0, 1.0, 1.0, 1.0, 1.0, np.nan, 1.0]) - assert_equal(np.minimum(arr1,arr2), mintrue) - assert_equal(np.minimum(arr1[::2],arr2[::2]), mintrue[::2]) - assert_equal(np.minimum(arr1[:4:], arr2[::2]), np.array([-4.0, np.nan, 0.0, 0.0])) + assert_equal(np.minimum(arr1, arr2), mintrue) + assert_equal(np.minimum(arr1[::2], arr2[::2]), mintrue[::2]) + assert_equal( + np.minimum(arr1[:4:], arr2[::2]), + np.array([-4.0, np.nan, 0.0, 0.0]), + ) assert_equal(np.minimum(arr1[::3], arr2[:3:]), np.array([-4.0, -1.0, np.nan])) - assert_equal(np.minimum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-4.0, 1.0, np.nan])) + assert_equal( + np.minimum(arr1[:6:2], arr2[::3], out=out[::3]), + np.array([-4.0, 1.0, np.nan]), + ) assert_equal(out, out_mintrue) def test_precision(self): @@ -2546,7 +2706,7 @@ def test_reduce(self): def test_reduce_complex(self): assert_equal(np.fmax.reduce([1, 2j]), 1) - assert_equal(np.fmax.reduce([1+3j, 2j]), 1+3j) + assert_equal(np.fmax.reduce([1 + 3j, 2j]), 1 + 3j) def test_float_nans(self): nan = np.nan @@ -2609,7 +2769,7 @@ def test_reduce(self): def test_reduce_complex(self): assert_equal(np.fmin.reduce([1, 2j]), 2j) - assert_equal(np.fmin.reduce([1+3j, 2j]), 2j) + assert_equal(np.fmin.reduce([1 + 3j, 2j]), 2j) def test_float_nans(self): nan = np.nan @@ -2727,7 +2887,7 @@ def test_values(self): for dt in self.bitwise_types: zeros = np.array([0], dtype=dt) ones = np.array([-1]).astype(dt) - msg = "dt = '%s'" % dt.char + msg = f"dt = '{dt.char}'" assert_equal(np.bitwise_not(zeros), ones, err_msg=msg) assert_equal(np.bitwise_not(ones), zeros, err_msg=msg) @@ -2751,7 +2911,7 @@ def test_types(self): for dt in self.bitwise_types: zeros = np.array([0], dtype=dt) ones = np.array([-1]).astype(dt) - msg = "dt = '%s'" % dt.char + msg = f"dt = '{dt.char}'" assert_(np.bitwise_not(zeros).dtype == dt, msg) assert_(np.bitwise_or(zeros, zeros).dtype == dt, msg) @@ -2770,7 +2930,7 @@ def test_reduction(self): zeros = np.array([0], dtype=dt) ones = np.array([-1]).astype(dt) for f in binary_funcs: - msg = "dt: '%s', f: '%s'" % (dt, f) + msg = f"dt: '{dt}', f: '{f}'" assert_equal(f.reduce(zeros), zeros, err_msg=msg) assert_equal(f.reduce(ones), ones, err_msg=msg) @@ -2779,7 +2939,7 @@ def test_reduction(self): # No object array types empty = np.array([], dtype=dt) for f in binary_funcs: - msg = "dt: '%s', f: '%s'" % (dt, f) + msg = f"dt: '{dt}', f: '{f}'" tgt = np.array(f.identity).astype(dt) res = f.reduce(empty) assert_equal(res, tgt, err_msg=msg) @@ -2790,7 +2950,7 @@ def test_reduction(self): # function and is not the same as the type returned by the identity # method. for f in binary_funcs: - msg = "dt: '%s'" % (f,) + msg = f"dt: '{f}'" empty = np.array([], dtype=object) tgt = f.identity res = f.reduce(empty) @@ -2798,7 +2958,7 @@ def test_reduction(self): # Non-empty object arrays do not use the identity for f in binary_funcs: - msg = "dt: '%s'" % (f,) + msg = f"dt: '{f}'" btype = np.array([True], dtype=object) assert_(type(f.reduce(btype)) is bool, msg) @@ -2807,10 +2967,6 @@ def test_reduction(self): def test_bitwise_count(self, input_dtype_obj, bitsize): input_dtype = input_dtype_obj.type - # bitwise_count is only in-built in 3.10+ - if sys.version_info < (3, 10) and input_dtype == np.object_: - pytest.skip("Required Python >=3.10") - for i in range(1, bitsize): num = 2**i - 1 msg = f"bitwise_count for {num}" @@ -2819,7 +2975,7 @@ def test_bitwise_count(self, input_dtype_obj, bitsize): input_dtype, np.signedinteger) or input_dtype == np.object_: assert i == np.bitwise_count(input_dtype(-num)), msg - a = np.array([2**i-1 for i in range(1, bitsize)], dtype=input_dtype) + a = np.array([2**i - 1 for i in range(1, bitsize)], dtype=input_dtype) bitwise_count_a = np.bitwise_count(a) expected = np.arange(1, bitsize, dtype=input_dtype) @@ -2846,13 +3002,13 @@ def test_floating_point(self): class TestDegrees: def test_degrees(self): assert_almost_equal(ncu.degrees(np.pi), 180.0) - assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0) + assert_almost_equal(ncu.degrees(-0.5 * np.pi), -90.0) class TestRadians: def test_radians(self): assert_almost_equal(ncu.radians(180.0), np.pi) - assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi) + assert_almost_equal(ncu.radians(-90.0), -0.5 * np.pi) class TestHeavside: @@ -2896,14 +3052,14 @@ def test_sign_complex(self): complex(np.inf, np.inf), complex(np.inf, -np.inf), # nan np.nan, complex(0, np.nan), complex(np.nan, np.nan), # nan 0.0, # 0. - 3.0, -3.0, -2j, 3.0+4.0j, -8.0+6.0j + 3.0, -3.0, -2j, 3.0 + 4.0j, -8.0 + 6.0j ]) out = np.zeros(a.shape, a.dtype) tgt = np.array([ 1., -1., 1j, -1j, ] + [complex(np.nan, np.nan)] * 5 + [ 0.0, - 1.0, -1.0, -1j, 0.6+0.8j, -0.8+0.6j]) + 1.0, -1.0, -1j, 0.6 + 0.8j, -0.8 + 0.6j]) with np.errstate(invalid='ignore'): res = ncu.sign(a) @@ -2940,10 +3096,12 @@ def test_minmax_blocked(self): for i in range(inp.size): inp[:] = np.arange(inp.size, dtype=dt) inp[i] = np.nan - emsg = lambda: '%r\n%s' % (inp, msg) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, - "invalid value encountered in reduce") + emsg = lambda: f'{inp!r}\n{msg}' + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', + "invalid value encountered in reduce", + RuntimeWarning) assert_(np.isnan(inp.max()), msg=emsg) assert_(np.isnan(inp.min()), msg=emsg) @@ -2961,7 +3119,7 @@ def test_lower_align(self): def test_reduce_reorder(self): # gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus - # and put it before the call to an intrisic function that causes + # and put it before the call to an intrinsic function that causes # invalid status to be set. Also make sure warnings are not emitted for n in (2, 4, 8, 16, 32): for dt in (np.float32, np.float16, np.complex64): @@ -2984,7 +3142,7 @@ def test_abs_neg_blocked(self): assert_equal(out, tgt, err_msg=msg) assert_((out >= 0).all()) - tgt = [-1*(i) for i in inp] + tgt = [-1 * (i) for i in inp] np.negative(inp, out=out) assert_equal(out, tgt, err_msg=msg) @@ -2998,7 +3156,7 @@ def test_abs_neg_blocked(self): np.abs(inp, out=out) assert_array_equal(out, d, err_msg=msg) - assert_array_equal(-inp, -1*inp, err_msg=msg) + assert_array_equal(-inp, -1 * inp, err_msg=msg) d = -1 * inp np.negative(inp, out=out) assert_array_equal(out, d, err_msg=msg) @@ -3123,23 +3281,24 @@ def do_test(f_call, f_expected): # assert_equal produces truly useless error messages raise AssertionError("\n".join([ "Bad arguments passed in ufunc call", - " expected: {}".format(expected), - " __array_wrap__ got: {}".format(w) + f" expected: {expected}", + f" __array_wrap__ got: {w}" ])) # method not on the out argument - do_test(lambda a: np.add(a, 0), lambda a: (a, 0)) - do_test(lambda a: np.add(a, 0, None), lambda a: (a, 0)) - do_test(lambda a: np.add(a, 0, out=None), lambda a: (a, 0)) + do_test(lambda a: np.add(a, 0), lambda a: (a, 0)) + do_test(lambda a: np.add(a, 0, None), lambda a: (a, 0)) + do_test(lambda a: np.add(a, 0, out=None), lambda a: (a, 0)) do_test(lambda a: np.add(a, 0, out=(None,)), lambda a: (a, 0)) # method on the out argument - do_test(lambda a: np.add(0, 0, a), lambda a: (0, 0, a)) - do_test(lambda a: np.add(0, 0, out=a), lambda a: (0, 0, a)) - do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a)) + do_test(lambda a: np.add(0, 0, a), lambda a: (0, 0, a)) + do_test(lambda a: np.add(0, 0, out=a), lambda a: (0, 0, a)) + do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a)) # Also check the where mask handling: - do_test(lambda a: np.add(a, 0, where=False), lambda a: (a, 0)) + out = np.zeros([1], dtype=float) + do_test(lambda a: np.add(a, 0, where=False, out=None), lambda a: (a, 0)) do_test(lambda a: np.add(0, 0, a, where=False), lambda a: (0, 0, a)) def test_wrap_with_iterable(self): @@ -3169,7 +3328,7 @@ def __new__(cls): return np.asarray(1.0, 'float64').view(cls).copy() a = A() - x = np.float64(1)*a + x = np.float64(1) * a assert_(isinstance(x, A)) assert_array_equal(x, np.array(1)) @@ -3475,7 +3634,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): assert_equal(res[1], np.multiply) assert_equal(res[2], 'reduce') assert_equal(res[3], (a,)) - assert_equal(res[4], {'dtype':'dtype0', + assert_equal(res[4], {'dtype': 'dtype0', 'out': ('out0',), 'keepdims': 'keep0', 'axis': 'axis0'}) @@ -3488,7 +3647,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): assert_equal(res[1], np.multiply) assert_equal(res[2], 'reduce') assert_equal(res[3], (a,)) - assert_equal(res[4], {'dtype':'dtype0', + assert_equal(res[4], {'dtype': 'dtype0', 'out': ('out0',), 'keepdims': 'keep0', 'axis': 'axis0', @@ -3527,7 +3686,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): assert_equal(res[1], np.multiply) assert_equal(res[2], 'accumulate') assert_equal(res[3], (a,)) - assert_equal(res[4], {'dtype':'dtype0', + assert_equal(res[4], {'dtype': 'dtype0', 'out': ('out0',), 'axis': 'axis0'}) @@ -3538,7 +3697,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): assert_equal(res[1], np.multiply) assert_equal(res[2], 'accumulate') assert_equal(res[3], (a,)) - assert_equal(res[4], {'dtype':'dtype0', + assert_equal(res[4], {'dtype': 'dtype0', 'out': ('out0',), 'axis': 'axis0'}) @@ -3563,7 +3722,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): assert_equal(res[1], np.multiply) assert_equal(res[2], 'reduceat') assert_equal(res[3], (a, [4, 2])) - assert_equal(res[4], {'dtype':'dtype0', + assert_equal(res[4], {'dtype': 'dtype0', 'out': ('out0',), 'axis': 'axis0'}) @@ -3574,7 +3733,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): assert_equal(res[1], np.multiply) assert_equal(res[2], 'reduceat') assert_equal(res[3], (a, [4, 2])) - assert_equal(res[4], {'dtype':'dtype0', + assert_equal(res[4], {'dtype': 'dtype0', 'out': ('out0',), 'axis': 'axis0'}) @@ -3683,7 +3842,7 @@ def _unwrap(self, objs): for obj in objs: if isinstance(obj, cls): obj = np.array(obj) - elif type(obj) != np.ndarray: + elif type(obj) is not np.ndarray: return NotImplemented result.append(obj) return result @@ -3696,7 +3855,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): kwargs = kwargs.copy() if "out" in kwargs: - kwargs["out"] = self._unwrap(kwargs["out"]) + kwargs["out"] = self._unwrap(kwargs["out"])[0] if kwargs["out"] is NotImplemented: return NotImplemented @@ -3727,21 +3886,28 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): array = np.array([1, 2, 3]) where = np.array([True, False, True]) - expected = ufunc(array, where=where) + out = np.zeros(3, dtype=array.dtype) + expected = ufunc(array, where=where, out=out) with pytest.raises(TypeError): - ufunc(array, where=where.view(OverriddenArrayOld)) + ufunc( + array, + where=where.view(OverriddenArrayOld), + out=out, + ) result_1 = ufunc( array, - where=where.view(OverriddenArrayNew) + where=where.view(OverriddenArrayNew), + out=out, ) assert isinstance(result_1, OverriddenArrayNew) assert np.all(np.array(result_1) == expected, where=where) result_2 = ufunc( array.view(OverriddenArrayNew), - where=where.view(OverriddenArrayNew) + where=where.view(OverriddenArrayNew), + out=out.view(OverriddenArrayNew), ) assert isinstance(result_2, OverriddenArrayNew) assert np.all(np.array(result_2) == expected, where=where) @@ -4021,6 +4187,31 @@ def test_array_ufunc_direct_call(self): res = a.__array_ufunc__(np.add, "__call__", a, a) assert_array_equal(res, a + a) + @pytest.mark.thread_unsafe(reason="modifies global module") + def test_ufunc_docstring(self): + original_doc = np.add.__doc__ + new_doc = "new docs" + expected_dict = {"__module__": "numpy", "__qualname__": "add"} + expected_dict["__signature__"] = inspect.signature(np.add) + + np.add.__doc__ = new_doc + assert np.add.__doc__ == new_doc + assert np.add.__dict__["__doc__"] == new_doc + + del np.add.__doc__ + assert np.add.__doc__ == original_doc + assert np.add.__dict__ == expected_dict + + np.add.__dict__["other"] = 1 + np.add.__dict__["__doc__"] = new_doc + assert np.add.__doc__ == new_doc + + del np.add.__dict__["__doc__"] + assert np.add.__doc__ == original_doc + del np.add.__dict__["other"] + assert np.add.__dict__ == expected_dict + + class TestChoose: def test_mixed(self): c = np.array([True, True]) @@ -4053,7 +4244,7 @@ def _test_lcm_inner(self, dtype): # negatives are ignored a = np.array([12, -12, 12, -12], dtype=dtype) b = np.array([20, 20, -20, -20], dtype=dtype) - assert_equal(np.lcm(a, b), [60]*4) + assert_equal(np.lcm(a, b), [60] * 4) # reduce a = np.array([3, 12, 20], dtype=dtype) @@ -4074,7 +4265,7 @@ def _test_gcd_inner(self, dtype): # negatives are ignored a = np.array([12, -12, 12, -12], dtype=dtype) b = np.array([20, 20, -20, -20], dtype=dtype) - assert_equal(np.gcd(a, b), [4]*4) + assert_equal(np.gcd(a, b), [4] * 4) # reduce a = np.array([15, 25, 35], dtype=dtype) @@ -4088,9 +4279,9 @@ def _test_gcd_inner(self, dtype): def test_lcm_overflow(self): # verify that we don't overflow when a*b does overflow big = np.int32(np.iinfo(np.int32).max // 11) - a = 2*big - b = 5*big - assert_equal(np.lcm(a, b), 10*big) + a = 2 * big + b = 5 * big + assert_equal(np.lcm(a, b), 10 * big) def test_gcd_overflow(self): for dtype in (np.int32, np.int64): @@ -4098,16 +4289,16 @@ def test_gcd_overflow(self): # not relevant for lcm, where the result is unrepresentable anyway a = dtype(np.iinfo(dtype).min) # negative power of two q = -(a // 4) - assert_equal(np.gcd(a, q*3), q) - assert_equal(np.gcd(a, -q*3), q) + assert_equal(np.gcd(a, q * 3), q) + assert_equal(np.gcd(a, -q * 3), q) def test_decimal(self): from decimal import Decimal a = np.array([1, 1, -1, -1]) * Decimal('0.20') b = np.array([1, -1, 1, -1]) * Decimal('0.12') - assert_equal(np.gcd(a, b), 4*[Decimal('0.04')]) - assert_equal(np.lcm(a, b), 4*[Decimal('0.60')]) + assert_equal(np.gcd(a, b), 4 * [Decimal('0.04')]) + assert_equal(np.lcm(a, b), 4 * [Decimal('0.60')]) def test_float(self): # not well-defined on float due to rounding errors @@ -4138,6 +4329,13 @@ def test_huge_integers(self): assert_equal(np.gcd(a, b), [2**100, 2**50 * 3**5]) assert_equal(np.lcm(a, b), [2**100 * 3**5 * 5**7, 2**100 * 3**10]) + def test_inf_and_nan(self): + inf = np.array([np.inf], dtype=np.object_) + assert_raises(ValueError, np.gcd, inf, 1) + assert_raises(ValueError, np.gcd, 1, inf) + assert_raises(ValueError, np.gcd, np.nan, inf) + assert_raises(TypeError, np.gcd, 4, float(np.inf)) + class TestRoundingFunctions: @@ -4146,8 +4344,10 @@ def test_object_direct(self): class C: def __floor__(self): return 1 + def __ceil__(self): return 2 + def __trunc__(self): return 3 @@ -4174,6 +4374,15 @@ def test_fraction(self): assert_equal(np.ceil(f), -1) assert_equal(np.trunc(f), -1) + @pytest.mark.parametrize('func', [np.floor, np.ceil, np.trunc]) + @pytest.mark.parametrize('dtype', [np.bool, np.float64, np.float32, + np.int64, np.uint32]) + def test_output_dtype(self, func, dtype): + arr = np.array([-2, 0, 4, 8]).astype(dtype) + result = func(arr) + assert_equal(arr, result) + assert result.dtype == dtype + class TestComplexFunctions: funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh, @@ -4189,8 +4398,8 @@ def test_it(self): x = .5 fr = f(x) fz = f(complex(x)) - assert_almost_equal(fz.real, fr, err_msg='real part %s' % f) - assert_almost_equal(fz.imag, 0., err_msg='imag part %s' % f) + assert_almost_equal(fz.real, fr, err_msg=f'real part {f}') + assert_almost_equal(fz.imag, 0., err_msg=f'imag part {f}') @pytest.mark.xfail(IS_WASM, reason="doesn't work") def test_precisions_consistent(self): @@ -4199,68 +4408,100 @@ def test_precisions_consistent(self): fcf = f(np.csingle(z)) fcd = f(np.cdouble(z)) fcl = f(np.clongdouble(z)) - assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s' % f) - assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s' % f) + assert_almost_equal(fcf, fcd, decimal=6, err_msg=f'fch-fcd {f}') + assert_almost_equal(fcl, fcd, decimal=15, err_msg=f'fch-fcl {f}') @pytest.mark.xfail(IS_WASM, reason="doesn't work") def test_branch_cuts(self): # check branch cuts and continuity on them - _check_branch_cut(np.log, -0.5, 1j, 1, -1, True) - _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True) + _check_branch_cut(np.log, -0.5, 1j, 1, -1, True) # noqa: E221 + _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True) # noqa: E221 _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True) _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True) - _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True) + _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True) # noqa: E221 _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True) _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True) - _check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True) + _check_branch_cut(np.arctan, [0 - 2j, 2j], [1, 1], -1, 1, True) - _check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True) + _check_branch_cut(np.arcsinh, [0 - 2j, 2j], [1, 1], -1, 1, True) _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True) _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True) # check against bogus branch cuts: assert continuity between quadrants - _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1) - _check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1) + _check_branch_cut(np.arcsin, [0 - 2j, 2j], [ 1, 1], 1, 1) + _check_branch_cut(np.arccos, [0 - 2j, 2j], [ 1, 1], 1, 1) _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1) _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1) - _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1) - _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1) + _check_branch_cut(np.arccosh, [0 - 2j, 2j, 2], [1, 1, 1j], 1, 1) + _check_branch_cut(np.arctanh, [0 - 2j, 2j, 0], [1, 1, 1j], 1, 1) @pytest.mark.xfail(IS_WASM, reason="doesn't work") def test_branch_cuts_complex64(self): # check branch cuts and continuity on them - _check_branch_cut(np.log, -0.5, 1j, 1, -1, True, np.complex64) - _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.log, -0.5, 1j, 1, -1, True, np.complex64) # noqa: E221 + _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True, np.complex64) # noqa: E221 _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True, np.complex64) _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True, np.complex64) - _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True, np.complex64) # noqa: E221 _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) - _check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64) - - _check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64) + _check_branch_cut(np.arctan, [0 - 2j, 2j], [1, 1], -1, 1, True, np.complex64) + + _check_branch_cut( + np.arcsinh, + [0 - 2j, 2j], + [1, 1], + -1, + 1, + True, + np.complex64, + ) _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64) _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) # check against bogus branch cuts: assert continuity between quadrants - _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64) - _check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64) + _check_branch_cut(np.arcsin, [0 - 2j, 2j], [ 1, 1], 1, 1, False, np.complex64) + _check_branch_cut(np.arccos, [0 - 2j, 2j], [ 1, 1], 1, 1, False, np.complex64) _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64) - _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64) - _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64) - _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64) + _check_branch_cut( + np.arcsinh, + [-2, 2, 0], + [1j, 1j, 1], + 1, + 1, + False, + np.complex64, + ) + _check_branch_cut( + np.arccosh, + [0 - 2j, 2j, 2], + [1, 1, 1j], + 1, + 1, + False, + np.complex64, + ) + _check_branch_cut( + np.arctanh, + [0 - 2j, 2j, 0], + [1, 1, 1j], + 1, + 1, + False, + np.complex64, + ) def test_against_cmath(self): import cmath - points = [-1-1j, -1+1j, +1-1j, +1+1j] + points = [-1 - 1j, -1 + 1j, +1 - 1j, +1 + 1j] name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan', 'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'} - atol = 4*np.finfo(complex).eps + atol = 4 * np.finfo(complex).eps for func in self.funcs: fname = func.__name__.split('.')[-1] cname = name_map.get(fname, fname) @@ -4273,7 +4514,7 @@ def test_against_cmath(self): b = cfunc(p) assert_( abs(a - b) < atol, - "%s %s: %s; cmath: %s" % (fname, p, a, b) + f"{fname} {p}: {a}; cmath: {b}" ) @pytest.mark.xfail( @@ -4301,22 +4542,22 @@ def check(x, rtol): x = x.astype(real_dtype) z = x.astype(dtype) - d = np.absolute(np.arcsinh(x)/np.arcsinh(z).real - 1) + d = np.absolute(np.arcsinh(x) / np.arcsinh(z).real - 1) assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), 'arcsinh')) - z = (1j*x).astype(dtype) - d = np.absolute(np.arcsinh(x)/np.arcsin(z).imag - 1) + z = (1j * x).astype(dtype) + d = np.absolute(np.arcsinh(x) / np.arcsin(z).imag - 1) assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), 'arcsin')) z = x.astype(dtype) - d = np.absolute(np.arctanh(x)/np.arctanh(z).real - 1) + d = np.absolute(np.arctanh(x) / np.arctanh(z).real - 1) assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), 'arctanh')) - z = (1j*x).astype(dtype) - d = np.absolute(np.arctanh(x)/np.arctan(z).imag - 1) + z = (1j * x).astype(dtype) + d = np.absolute(np.arctanh(x) / np.arctan(z).imag - 1) assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), 'arctan')) @@ -4334,28 +4575,28 @@ def check(x, rtol): # It's not guaranteed that the system-provided arc functions # are accurate down to a few epsilons. (Eg. on Linux 64-bit) # So, give more leeway for long complex tests here: - check(x_series, 50.0*eps) + check(x_series, 50.0 * eps) else: - check(x_series, 2.1*eps) - check(x_basic, 2.0*eps/1e-3) + check(x_series, 2.1 * eps) + check(x_basic, 2.0 * eps / 1e-3) # Check a few points - z = np.array([1e-5*(1+1j)], dtype=dtype) + z = np.array([1e-5 * (1 + 1j)], dtype=dtype) p = 9.999999999333333333e-6 + 1.000000000066666666e-5j - d = np.absolute(1-np.arctanh(z)/p) + d = np.absolute(1 - np.arctanh(z) / p) assert_(np.all(d < 1e-15)) p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j - d = np.absolute(1-np.arcsinh(z)/p) + d = np.absolute(1 - np.arcsinh(z) / p) assert_(np.all(d < 1e-15)) p = 9.999999999333333333e-6j + 1.000000000066666666e-5 - d = np.absolute(1-np.arctan(z)/p) + d = np.absolute(1 - np.arctan(z) / p) assert_(np.all(d < 1e-15)) p = 1.0000000000333333333e-5j + 9.999999999666666667e-6 - d = np.absolute(1-np.arcsin(z)/p) + d = np.absolute(1 - np.arcsin(z) / p) assert_(np.all(d < 1e-15)) # Check continuity across switchover points @@ -4367,15 +4608,15 @@ def check(func, z0, d=1): assert_(np.all(zp != zm), (zp, zm)) # NB: the cancellation error at the switchover is at least eps - good = (abs(func(zp) - func(zm)) < 2*eps) + good = (abs(func(zp) - func(zm)) < 2 * eps) assert_(np.all(good), (func, z0[~good])) for func in (np.arcsinh, np.arcsinh, np.arcsin, np.arctanh, np.arctan): - pts = [rp+1j*ip for rp in (-1e-3, 0, 1e-3) for ip in(-1e-3, 0, 1e-3) + pts = [rp + 1j * ip for rp in (-1e-3, 0, 1e-3) for ip in (-1e-3, 0, 1e-3) if rp != 0 or ip != 0] check(func, pts, 1) check(func, pts, 1j) - check(func, pts, 1+1j) + check(func, pts, 1 + 1j) @np.errstate(all="ignore") def test_promotion_corner_cases(self): @@ -4410,13 +4651,13 @@ class TestSubclass: def test_subclass_op(self): class simple(np.ndarray): - def __new__(subtype, shape): - self = np.ndarray.__new__(subtype, shape, dtype=object) + def __new__(cls, shape): + self = np.ndarray.__new__(cls, shape, dtype=object) self.fill(0) return self a = simple((3, 4)) - assert_equal(a+a, a) + assert_equal(a + a, a) class TestFrompyfunc: @@ -4440,7 +4681,10 @@ def mul(a, b): # with no identity (not reorderable) mul_ufunc = np.frompyfunc(mul, nin=2, nout=1) assert_equal(mul_ufunc.reduce([2, 3, 4]), 24) - assert_raises(ValueError, lambda: mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1))) + assert_raises( + ValueError, + lambda: mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), + ) assert_raises(ValueError, lambda: mul_ufunc.reduce([])) @@ -4479,13 +4723,13 @@ def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False, atol = 1e-4 y0 = f(x0) - yp = f(x0 + dx*scale*np.absolute(x0)/np.absolute(dx)) - ym = f(x0 - dx*scale*np.absolute(x0)/np.absolute(dx)) + yp = f(x0 + dx * scale * np.absolute(x0) / np.absolute(dx)) + ym = f(x0 - dx * scale * np.absolute(x0) / np.absolute(dx)) assert_(np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp)) assert_(np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp)) - assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym)) - assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym)) + assert_(np.all(np.absolute(y0.real - ym.real * re_sign) < atol), (y0, ym)) + assert_(np.all(np.absolute(y0.imag - ym.imag * im_sign) < atol), (y0, ym)) if sig_zero_ok: # check that signed zeros also work as a displacement @@ -4495,15 +4739,28 @@ def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False, x = x0[jr] x.real = ncu.NZERO ym = f(x) - assert_(np.all(np.absolute(y0[jr].real - ym.real*re_sign) < atol), (y0[jr], ym)) - assert_(np.all(np.absolute(y0[jr].imag - ym.imag*im_sign) < atol), (y0[jr], ym)) + assert_( + np.all(np.absolute(y0[jr].real - ym.real * re_sign) < atol), + (y0[jr], ym), + ) + assert_( + np.all(np.absolute(y0[jr].imag - ym.imag * im_sign) < atol), + (y0[jr], ym), + ) if np.any(ji): x = x0[ji] x.imag = ncu.NZERO ym = f(x) - assert_(np.all(np.absolute(y0[ji].real - ym.real*re_sign) < atol), (y0[ji], ym)) - assert_(np.all(np.absolute(y0[ji].imag - ym.imag*im_sign) < atol), (y0[ji], ym)) + assert_( + np.all(np.absolute(y0[ji].real - ym.real * re_sign) < atol), + (y0[ji], ym), + ) + assert_( + np.all(np.absolute(y0[ji].imag - ym.imag * im_sign) < atol), + (y0[ji], ym), + ) + def test_copysign(): assert_(np.copysign(1, -1) == -1) @@ -4544,8 +4801,8 @@ def test_nextafter_0(): for t, direction in itertools.product(np._core.sctypes['float'], (1, -1)): # The value of tiny for double double is NaN, so we need to pass the # assert - with suppress_warnings() as sup: - sup.filter(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) if not np.isnan(np.finfo(t).tiny): tiny = np.finfo(t).tiny assert_( @@ -4648,6 +4905,18 @@ def test_reduceat(): np.setbufsize(ncu.UFUNC_BUFSIZE_DEFAULT) assert_array_almost_equal(h1, h2) +def test_negative_value_raises(): + with pytest.raises(ValueError, match="buffer size must be non-negative"): + np.setbufsize(-5) + + old = np.getbufsize() + try: + prev = np.setbufsize(4096) + assert prev == old + assert np.getbufsize() == 4096 + finally: + np.setbufsize(old) + def test_reduceat_empty(): """Reduceat should work with empty arrays""" indices = np.array([], 'i4') @@ -4678,11 +4947,11 @@ def test_complex_nan_comparisons(): if np.isfinite(x) and np.isfinite(y): continue - assert_equal(x < y, False, err_msg="%r < %r" % (x, y)) - assert_equal(x > y, False, err_msg="%r > %r" % (x, y)) - assert_equal(x <= y, False, err_msg="%r <= %r" % (x, y)) - assert_equal(x >= y, False, err_msg="%r >= %r" % (x, y)) - assert_equal(x == y, False, err_msg="%r == %r" % (x, y)) + assert_equal(x < y, False, err_msg=f"{x!r} < {y!r}") + assert_equal(x > y, False, err_msg=f"{x!r} > {y!r}") + assert_equal(x <= y, False, err_msg=f"{x!r} <= {y!r}") + assert_equal(x >= y, False, err_msg=f"{x!r} >= {y!r}") + assert_equal(x == y, False, err_msg=f"{x!r} == {y!r}") def test_rint_big_int(): @@ -4714,7 +4983,7 @@ def test_memoverlap_accumulate_cmp(ufunc, dtype): if ufunc.signature: pytest.skip('For generic signatures only') for size in (2, 8, 32, 64, 128, 256): - arr = np.array([0, 1, 1]*size, dtype=dtype) + arr = np.array([0, 1, 1] * size, dtype=dtype) acc = ufunc.accumulate(arr, dtype='?') acc_u8 = acc.view(np.uint8) exp = np.array(list(itertools.accumulate(arr, ufunc)), dtype=np.uint8) @@ -4731,7 +5000,7 @@ def test_memoverlap_accumulate_symmetric(ufunc, dtype): pytest.skip('For generic signatures only') with np.errstate(all='ignore'): for size in (2, 8, 32, 64, 128, 256): - arr = np.array([0, 1, 2]*size).astype(dtype) + arr = np.array([0, 1, 2] * size).astype(dtype) acc = ufunc.accumulate(arr, dtype=dtype) exp = np.array(list(itertools.accumulate(arr, ufunc)), dtype=dtype) assert_equal(exp, acc) @@ -4748,7 +5017,8 @@ def test_signaling_nan_exceptions(): ]) def test_outer_subclass_preserve(arr): # for gh-8661 - class foo(np.ndarray): pass + class foo(np.ndarray): + pass actual = np.multiply.outer(arr.view(foo), arr.view(foo)) assert actual.__class__.__name__ == 'foo' @@ -4757,18 +5027,18 @@ class BadArr1(np.ndarray): def __array_finalize__(self, obj): # The outer call reshapes to 3 dims, try to do a bad reshape. if self.ndim == 3: - self.shape = self.shape + (1,) + self._set_shape(self.shape + (1,)) class BadArr2(np.ndarray): def __array_finalize__(self, obj): if isinstance(obj, BadArr2): # outer inserts 1-sized dims. In that case disturb them. if self.shape[-1] == 1: - self.shape = self.shape[::-1] + self._set_shape(self.shape[::-1]) for cls in [BadArr1, BadArr2]: arr = np.ones((2, 3)).view(cls) - with assert_raises(TypeError) as a: + with pytest.raises(TypeError): # The first array gets reshaped (not the second one) np.add.outer(arr, [1, 2]) @@ -4811,6 +5081,15 @@ def test_bad_legacy_ufunc_silent_errors(): ncu_tests.always_error.at(arr, [0, 1, 2], arr) +def test_bad_legacy_unary_ufunc_silent_errors(): + # Unary has a special scalar path right now, so test it explicitly. + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error_unary(np.arange(3).astype(np.float64)) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error_unary(1.5) + + @pytest.mark.parametrize('x1', [np.arange(3.0), [0.0, 1.0, 2.0]]) def test_bad_legacy_gufunc_silent_errors(x1): # Verify that an exception raised in a gufunc loop propagates correctly. @@ -4821,7 +5100,6 @@ def test_bad_legacy_gufunc_silent_errors(x1): class TestAddDocstring: @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") - @pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") def test_add_same_docstring(self): # test for attributes (which are C-level defined) ncu.add_docstring(np.ndarray.flat, np.ndarray.flat.__doc__) @@ -4848,10 +5126,11 @@ def func(): ncu.add_docstring(func, "different docstring") -class TestAdd_newdoc_ufunc: - def test_ufunc_arg(self): - assert_raises(TypeError, ncu._add_newdoc_ufunc, 2, "blah") - assert_raises(ValueError, ncu._add_newdoc_ufunc, np.add, "blah") +class TestHypotErrorMessages: + def test_hypot_error_message_single_arg(self): + with pytest.raises(TypeError, match="hypot\\(\\) takes .* but 1 was given"): + np.hypot(5) - def test_string_arg(self): - assert_raises(TypeError, ncu._add_newdoc_ufunc, np.add, 3) + def test_hypot_error_message_multiple_args(self): + with pytest.raises(TypeError, match="hypot\\(\\) takes .* but 4 were given"): + np.hypot(1, 2, 3, 4) diff --git a/numpy/_core/tests/test_umath_accuracy.py b/numpy/_core/tests/test_umath_accuracy.py index 493c7d6f2d03..3ca2f508672e 100644 --- a/numpy/_core/tests/test_umath_accuracy.py +++ b/numpy/_core/tests/test_umath_accuracy.py @@ -1,20 +1,22 @@ -import numpy as np import os -from os import path import sys +from ctypes import POINTER, c_double, c_float, c_int, c_longlong, cast, pointer +from os import path + import pytest -from ctypes import c_longlong, c_double, c_float, c_int, cast, pointer, POINTER + +import numpy as np +from numpy._core._multiarray_umath import __cpu_features__ from numpy.testing import assert_array_max_ulp from numpy.testing._private.utils import _glibc_older_than -from numpy._core._multiarray_umath import __cpu_features__ UNARY_UFUNCS = [obj for obj in np._core.umath.__dict__.values() if isinstance(obj, np.ufunc)] UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types] # Remove functions that do not support `floats` -UNARY_OBJECT_UFUNCS.remove(getattr(np, 'invert')) -UNARY_OBJECT_UFUNCS.remove(getattr(np, 'bitwise_count')) +UNARY_OBJECT_UFUNCS.remove(np.invert) +UNARY_OBJECT_UFUNCS.remove(np.bitwise_count) IS_AVX = __cpu_features__.get('AVX512F', False) or \ (__cpu_features__.get('FMA3', False) and __cpu_features__.get('AVX2', False)) @@ -41,6 +43,7 @@ def convert(s, datatype="np.float32"): return fp.contents.value # dereference the pointer, get the float + str_to_float = np.vectorize(convert) class TestAccuracy: @@ -53,18 +56,28 @@ def test_validate_transcendentals(self): for filename in files: filepath = path.join(data_dir, filename) with open(filepath) as fid: - file_without_comments = (r for r in fid if not r[0] in ('$', '#')) + file_without_comments = ( + r for r in fid if r[0] not in ('$', '#') + ) data = np.genfromtxt(file_without_comments, - dtype=('|S39','|S39','|S39',int), - names=('type','input','output','ulperr'), + dtype=('|S39', '|S39', '|S39', int), + names=('type', 'input', 'output', 'ulperr'), delimiter=',', skip_header=1) npname = path.splitext(filename)[0].split('-')[3] npfunc = getattr(np, npname) for datatype in np.unique(data['type']): data_subset = data[data['type'] == datatype] - inval = np.array(str_to_float(data_subset['input'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype)) - outval = np.array(str_to_float(data_subset['output'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype)) + data_input_str = data_subset['input'].astype(str) + data_output_str = data_subset['output'].astype(str) + data_type_str = data_subset['type'].astype(str) + + inval = np.array(str_to_float(data_input_str, + data_type_str), + dtype=eval(datatype)) + outval = np.array(str_to_float(data_output_str, + data_type_str), + dtype=eval(datatype)) perm = np.random.permutation(len(inval)) inval = inval[perm] outval = outval[perm] @@ -72,7 +85,7 @@ def test_validate_transcendentals(self): assert_array_max_ulp(npfunc(inval), outval, maxulperr) @pytest.mark.skipif(IS_AVX512FP16, - reason = "SVML FP16 have slightly higher ULP errors") + reason="SVML FP16 have slightly higher ULP errors") @pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS) def test_validate_fp16_transcendentals(self, ufunc): with np.errstate(all='ignore'): diff --git a/numpy/_core/tests/test_umath_complex.py b/numpy/_core/tests/test_umath_complex.py index cc54c16da2e3..24f74bd34569 100644 --- a/numpy/_core/tests/test_umath_complex.py +++ b/numpy/_core/tests/test_umath_complex.py @@ -1,13 +1,19 @@ -import sys import platform +import sys + import pytest import numpy as np + # import the c-extension module directly since _arg is not exported via umath import numpy._core._multiarray_umath as ncu from numpy.testing import ( - assert_raises, assert_equal, assert_array_equal, assert_almost_equal, assert_array_max_ulp - ) + assert_almost_equal, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_raises, +) # TODO: branch cuts (use Pauli code) # TODO: conj 'symmetry' @@ -16,7 +22,7 @@ # At least on Windows the results of many complex functions are not conforming # to the C99 standard. See ticket 1574. # Ditto for Solaris (ticket 1642) and OS X on PowerPC. -#FIXME: this will probably change when we require full C99 compatibility +# FIXME: this will probably change when we require full C99 compatibility with np.errstate(all='ignore'): functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0) or (np.log(complex(ncu.NZERO, 0)).imag != np.pi)) @@ -28,7 +34,6 @@ reason="Inadequate C99 complex support") - class TestCexp: def test_simple(self): check = check_complex_value @@ -61,8 +66,8 @@ def test_special_values(self): check(f, np.inf, 0, np.inf, 0) # cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y - check(f, -np.inf, 1, ncu.PZERO, ncu.PZERO) - check(f, -np.inf, 0.75 * np.pi, ncu.NZERO, ncu.PZERO) + check(f, -np.inf, 1, ncu.PZERO, ncu.PZERO) + check(f, -np.inf, 0.75 * np.pi, ncu.NZERO, ncu.PZERO) # cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y check(f, np.inf, 1, np.inf, np.inf) @@ -131,7 +136,7 @@ def test_special_values2(self): class TestClog: def test_simple(self): - x = np.array([1+0j, 1+2j]) + x = np.array([1 + 0j, 1 + 2j]) y_r = np.log(np.abs(x)) + 1j * np.angle(x) y = np.log(x) assert_almost_equal(y, y_r) @@ -280,7 +285,7 @@ def test_simple(self): check_complex_value(np.sqrt, 1, 0, 1, 0) # sqrt(1i) - rres = 0.5*np.sqrt(2) + rres = 0.5 * np.sqrt(2) ires = rres check_complex_value(np.sqrt, 0, 1, rres, ires, False) @@ -315,9 +320,9 @@ def test_special_values(self): check(f, ncu.PZERO, np.inf, np.inf, np.inf) check(f, ncu.NZERO, np.inf, np.inf, np.inf) - check(f, np.inf, np.inf, np.inf, np.inf) - check(f, -np.inf, np.inf, np.inf, np.inf) - check(f, -np.nan, np.inf, np.inf, np.inf) + check(f, np.inf, np.inf, np.inf, np.inf) + check(f, -np.inf, np.inf, np.inf, np.inf) # noqa: E221 + check(f, -np.nan, np.inf, np.inf, np.inf) # noqa: E221 # csqrt(x + nani) is nan + nani for any finite x check(f, 1, np.nan, np.nan, np.nan) @@ -334,7 +339,7 @@ def test_special_values(self): def _check_ninf_nan(dummy): msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)" z = np.sqrt(np.array(complex(-np.inf, np.nan))) - #Fixme: ugly workaround for isinf bug. + # FIXME: ugly workaround for isinf bug. with np.errstate(invalid='ignore'): if not (np.isnan(z.real) and np.isinf(z.imag)): raise AssertionError(msgform % (z.real, z.imag)) @@ -361,23 +366,23 @@ def teardown_method(self): np.seterr(**self.olderr) def test_simple(self): - x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) + x = np.array([1 + 1j, 0 + 2j, 1 + 2j, np.inf, np.nan]) y_r = x ** 2 y = np.power(x, 2) assert_almost_equal(y, y_r) def test_scalar(self): - x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) - y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) + x = np.array([1, 1j, 2, 2.5 + .37j, np.inf, np.nan]) + y = np.array([1, 1j, -0.5 + 1.5j, -0.5 + 1.5j, 2, 3]) lx = list(range(len(x))) # Hardcode the expected `builtins.complex` values, # as complex exponentiation is broken as of bpo-44698 p_r = [ - 1+0j, - 0.20787957635076193+0j, - 0.35812203996480685+0.6097119028618724j, - 0.12659112128185032+0.48847676699581527j, + 1 + 0j, + 0.20787957635076193 + 0j, + 0.35812203996480685 + 0.6097119028618724j, + 0.12659112128185032 + 0.48847676699581527j, complex(np.inf, np.nan), complex(np.nan, np.nan), ] @@ -387,17 +392,17 @@ def test_scalar(self): assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) def test_array(self): - x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) - y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) + x = np.array([1, 1j, 2, 2.5 + .37j, np.inf, np.nan]) + y = np.array([1, 1j, -0.5 + 1.5j, -0.5 + 1.5j, 2, 3]) lx = list(range(len(x))) # Hardcode the expected `builtins.complex` values, # as complex exponentiation is broken as of bpo-44698 p_r = [ - 1+0j, - 0.20787957635076193+0j, - 0.35812203996480685+0.6097119028618724j, - 0.12659112128185032+0.48847676699581527j, + 1 + 0j, + 0.20787957635076193 + 0j, + 0.35812203996480685 + 0.6097119028618724j, + 0.12659112128185032 + 0.48847676699581527j, complex(np.inf, np.nan), complex(np.nan, np.nan), ] @@ -414,14 +419,14 @@ def teardown_method(self): np.seterr(**self.olderr) def test_simple(self): - x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) + x = np.array([1 + 1j, 0 + 2j, 1 + 2j, np.inf, np.nan]) y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan]) y = np.abs(x) assert_almost_equal(y, y_r) def test_fabs(self): # Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs) - x = np.array([1+0j], dtype=complex) + x = np.array([1 + 0j], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) x = np.array([complex(1, ncu.NZERO)], dtype=complex) @@ -471,9 +476,9 @@ def g(a, b): class TestCarg: def test_simple(self): check_real_value(ncu._arg, 1, 0, 0, False) - check_real_value(ncu._arg, 0, 1, 0.5*np.pi, False) + check_real_value(ncu._arg, 0, 1, 0.5 * np.pi, False) - check_real_value(ncu._arg, 1, 1, 0.25*np.pi, False) + check_real_value(ncu._arg, 1, 1, 0.25 * np.pi, False) check_real_value(ncu._arg, ncu.PZERO, ncu.PZERO, ncu.PZERO) # TODO This can be xfail when the generator functions are got rid of. @@ -554,35 +559,39 @@ def check_complex_value(f, x1, y1, x2, y2, exact=True): assert_almost_equal(f(z1), z2) class TestSpecialComplexAVX: - @pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4]) + @pytest.mark.parametrize("stride", [-4, -2, -1, 1, 2, 4]) @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) def test_array(self, stride, astype): - arr = np.array([complex(np.nan , np.nan), - complex(np.nan , np.inf), - complex(np.inf , np.nan), - complex(np.inf , np.inf), - complex(0. , np.inf), - complex(np.inf , 0.), - complex(0. , 0.), - complex(0. , np.nan), - complex(np.nan , 0.)], dtype=astype) - abs_true = np.array([np.nan, np.inf, np.inf, np.inf, np.inf, np.inf, 0., np.nan, np.nan], dtype=arr.real.dtype) - sq_true = np.array([complex(np.nan, np.nan), - complex(np.nan, np.nan), - complex(np.nan, np.nan), - complex(np.nan, np.inf), - complex(-np.inf, np.nan), - complex(np.inf, np.nan), - complex(0., 0.), - complex(np.nan, np.nan), - complex(np.nan, np.nan)], dtype=astype) + nan = np.nan + inf = np.inf + arr = np.array([complex(nan, nan), + complex(nan, inf), + complex(inf, nan), + complex(inf, inf), + complex(0., inf), + complex(inf, 0.), + complex(0., 0.), + complex(0., nan), + complex(nan, 0.)], dtype=astype) + abs_true = np.array([nan, inf, inf, inf, inf, inf, 0., nan, nan], + dtype=arr.real.dtype) + sq_true = np.array([complex(nan, nan), + complex(nan, nan), + complex(nan, nan), + complex(nan, inf), + complex(-inf, nan), + complex(inf, nan), + complex(0., 0.), + complex(nan, nan), + complex(nan, nan)], dtype=astype) with np.errstate(invalid='ignore'): assert_equal(np.abs(arr[::stride]), abs_true[::stride]) assert_equal(np.square(arr[::stride]), sq_true[::stride]) class TestComplexAbsoluteAVX: - @pytest.mark.parametrize("arraysize", [1,2,3,4,5,6,7,8,9,10,11,13,15,17,18,19]) - @pytest.mark.parametrize("stride", [-4,-3,-2,-1,1,2,3,4]) + @pytest.mark.parametrize("arraysize", + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 18, 19]) + @pytest.mark.parametrize("stride", [-4, -3, -2, -1, 1, 2, 3, 4]) @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) # test to ensure masking and strides work as intended in the AVX implementation def test_array(self, arraysize, stride, astype): @@ -592,25 +601,25 @@ def test_array(self, arraysize, stride, astype): # Testcase taken as is from https://github.com/numpy/numpy/issues/16660 class TestComplexAbsoluteMixedDTypes: - @pytest.mark.parametrize("stride", [-4,-3,-2,-1,1,2,3,4]) + @pytest.mark.parametrize("stride", [-4, -3, -2, -1, 1, 2, 3, 4]) @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) @pytest.mark.parametrize("func", ['abs', 'square', 'conjugate']) - def test_array(self, stride, astype, func): - dtype = [('template_id', ' _ExtOjbDict: ... +def _make_extobj(*, all: _ErrKind = ..., **kwargs: Unpack[_ExtOjbDict]) -> CapsuleType: ... diff --git a/numpy/_distributor_init.py b/numpy/_distributor_init.py index 25b0eed79fca..f608036a2405 100644 --- a/numpy/_distributor_init.py +++ b/numpy/_distributor_init.py @@ -10,6 +10,6 @@ """ try: - from . import _distributor_init_local + from . import _distributor_init_local # noqa: F401 except ImportError: pass diff --git a/numpy/_distributor_init.pyi b/numpy/_distributor_init.pyi new file mode 100644 index 000000000000..94456aba2bcf --- /dev/null +++ b/numpy/_distributor_init.pyi @@ -0,0 +1 @@ +# intentionally left blank diff --git a/numpy/_expired_attrs_2_0.py b/numpy/_expired_attrs_2_0.py index 1dad38c5a60f..be9a84a3a310 100644 --- a/numpy/_expired_attrs_2_0.py +++ b/numpy/_expired_attrs_2_0.py @@ -11,7 +11,7 @@ "lookfor": "Search NumPy's documentation directly.", "who": "Use an IDE variable explorer or `locals()` instead.", "fastCopyAndTranspose": "Use `arr.T.copy()` instead.", - "set_numeric_ops": + "set_numeric_ops": "For the general case, use `PyUFunc_ReplaceLoopBySignature`. " "For ndarray subclasses, define the ``__array_ufunc__`` method " "and override the relevant ufunc.", @@ -19,13 +19,12 @@ "PINF": "Use `np.inf` instead.", "NZERO": "Use `-0.0` instead.", "PZERO": "Use `0.0` instead.", - "add_newdoc": + "add_newdoc": "It's still available as `np.lib.add_newdoc`.", - "add_docstring": + "add_docstring": "It's still available as `np.lib.add_docstring`.", - "add_newdoc_ufunc": + "add_newdoc_ufunc": "It's an internal function and doesn't have a replacement.", - "compat": "There's no replacement, as Python 2 is no longer supported.", "safe_eval": "Use `ast.literal_eval` instead.", "float_": "Use `np.float64` instead.", "complex_": "Use `np.complex128` instead.", @@ -49,7 +48,7 @@ "sctype2char": "Use `np.dtype(obj).char` instead.", "sctypes": "Access dtypes explicitly instead.", "issubsctype": "Use `np.issubdtype` instead.", - "set_string_function": + "set_string_function": "Use `np.set_printoptions` instead with a formatter for " "custom printing of NumPy objects.", "asfarray": "Use `np.asarray` with a proper dtype instead.", @@ -59,20 +58,21 @@ "recfromcsv": "Use `np.genfromtxt` with comma delimiter instead.", "recfromtxt": "Use `np.genfromtxt` instead.", "deprecate": "Emit `DeprecationWarning` with `warnings.warn` directly, " - "or use `typing.deprecated`.", + "or use `warnings.deprecated`.", "deprecate_with_doc": "Emit `DeprecationWarning` with `warnings.warn` " - "directly, or use `typing.deprecated`.", - "disp": "Use your own printing function instead.", - "find_common_type": + "directly, or use `warnings.deprecated`.", + "find_common_type": "Use `numpy.promote_types` or `numpy.result_type` instead. " "To achieve semantics for the `scalar_types` argument, use " "`numpy.result_type` and pass the Python values `0`, `0.0`, or `0j`.", "round_": "Use `np.round` instead.", "get_array_wrap": "", - "DataSource": "It's still available as `np.lib.npyio.DataSource`.", - "nbytes": "Use `np.dtype().itemsize` instead.", + "DataSource": "It's still available as `np.lib.npyio.DataSource`.", + "nbytes": "Use `np.dtype().itemsize` instead.", "byte_bounds": "Now it's available under `np.lib.array_utils.byte_bounds`", - "compare_chararrays": + "compare_chararrays": "It's still available as `np.char.compare_chararrays`.", "format_parser": "It's still available as `np.rec.format_parser`.", + "alltrue": "Use `np.all` instead.", + "sometrue": "Use `np.any` instead.", } diff --git a/numpy/_expired_attrs_2_0.pyi b/numpy/_expired_attrs_2_0.pyi new file mode 100644 index 000000000000..de6c2d10f9b0 --- /dev/null +++ b/numpy/_expired_attrs_2_0.pyi @@ -0,0 +1,61 @@ +from typing import Final, TypedDict, final, type_check_only + +@final +@type_check_only +class _ExpiredAttributesType(TypedDict): + geterrobj: str + seterrobj: str + cast: str + source: str + lookfor: str + who: str + fastCopyAndTranspose: str + set_numeric_ops: str + NINF: str + PINF: str + NZERO: str + PZERO: str + add_newdoc: str + add_docstring: str + add_newdoc_ufunc: str + safe_eval: str + float_: str + complex_: str + longfloat: str + singlecomplex: str + cfloat: str + longcomplex: str + clongfloat: str + string_: str + unicode_: str + Inf: str + Infinity: str + NaN: str + infty: str + issctype: str + maximum_sctype: str + obj2sctype: str + sctype2char: str + sctypes: str + issubsctype: str + set_string_function: str + asfarray: str + issubclass_: str + tracemalloc_domain: str + mat: str + recfromcsv: str + recfromtxt: str + deprecate: str + deprecate_with_doc: str + find_common_type: str + round_: str + get_array_wrap: str + DataSource: str + nbytes: str + byte_bounds: str + compare_chararrays: str + format_parser: str + alltrue: str + sometrue: str + +__expired_attributes__: Final[_ExpiredAttributesType] = ... diff --git a/numpy/_globals.py b/numpy/_globals.py index a1474177fef8..ada8d5c41af0 100644 --- a/numpy/_globals.py +++ b/numpy/_globals.py @@ -49,6 +49,7 @@ class _NoValueType: """ __instance = None + def __new__(cls): # ensure that only one instance exists if not cls.__instance: @@ -93,3 +94,28 @@ def __bool__(self): return False raise ValueError(f"{self} is neither True nor False.") + + +class _SignatureDescriptor: + # A descriptor to store on the ufunc __dict__ that avoids definig a + # signature for the ufunc class/type but allows the instance to have one. + # This is needed because inspect.signature() chokes on normal properties + # (as of 3.14 at least). + # We could also set __signature__ on the instance but this allows deferred + # computation of the signature. + def __get__(self, obj, objtype=None): + # Delay import, not a critical path but need to avoid circular import. + from numpy._core._internal import _ufunc_inspect_signature_builder + + if obj is None: + # could also return None, which is accepted as "not set" by + # inspect.signature(). + raise AttributeError( + "type object 'numpy.ufunc' has no attribute '__signature__'") + + # Store on the instance, after this the descriptor won't be used. + obj.__signature__ = _ufunc_inspect_signature_builder(obj) + return obj.__signature__ + + +_signature_descriptor = _SignatureDescriptor() diff --git a/numpy/_globals.pyi b/numpy/_globals.pyi new file mode 100644 index 000000000000..b2231a9636b0 --- /dev/null +++ b/numpy/_globals.pyi @@ -0,0 +1,17 @@ +__all__ = ["_CopyMode", "_NoValue"] + +import enum +from typing import Final, final + +@final +class _CopyMode(enum.Enum): + ALWAYS = True + NEVER = False + IF_NEEDED = 2 + + def __bool__(self, /) -> bool: ... + +@final +class _NoValueType: ... + +_NoValue: Final[_NoValueType] = ... diff --git a/numpy/compat/tests/__init__.py b/numpy/_pyinstaller/__init__.pyi similarity index 100% rename from numpy/compat/tests/__init__.py rename to numpy/_pyinstaller/__init__.pyi diff --git a/numpy/_pyinstaller/hook-numpy.py b/numpy/_pyinstaller/hook-numpy.py index 0b3b46f2598a..b6f15e6edde8 100644 --- a/numpy/_pyinstaller/hook-numpy.py +++ b/numpy/_pyinstaller/hook-numpy.py @@ -5,8 +5,8 @@ https://pyinstaller.readthedocs.io/en/stable/hooks.html """ -from PyInstaller.compat import is_conda, is_pure_conda -from PyInstaller.utils.hooks import collect_dynamic_libs, is_module_satisfies +from PyInstaller.compat import is_pure_conda +from PyInstaller.utils.hooks import collect_dynamic_libs # Collect all DLLs inside numpy's installation folder, dump them into built # app's root. @@ -31,7 +31,5 @@ "pytest", "f2py", "setuptools", - "numpy.f2py", "distutils", - "numpy.distutils", ] diff --git a/numpy/_pyinstaller/hook-numpy.pyi b/numpy/_pyinstaller/hook-numpy.pyi new file mode 100644 index 000000000000..6da4914d7e5a --- /dev/null +++ b/numpy/_pyinstaller/hook-numpy.pyi @@ -0,0 +1,6 @@ +from typing import Final + +binaries: Final[list[tuple[str, str]]] = ... + +hiddenimports: Final[list[str]] = ... +excludedimports: Final[list[str]] = ... diff --git a/numpy/_pyinstaller/tests/__init__.py b/numpy/_pyinstaller/tests/__init__.py index f7c033bcf503..4ed8fdd53f8c 100644 --- a/numpy/_pyinstaller/tests/__init__.py +++ b/numpy/_pyinstaller/tests/__init__.py @@ -1,6 +1,6 @@ -from numpy.testing import IS_WASM, IS_EDITABLE import pytest +from numpy.testing import IS_EDITABLE, IS_WASM if IS_WASM: pytest.skip( diff --git a/numpy/_pytesttester.py b/numpy/_pytesttester.py index 4548fc6877ec..55583561a19d 100644 --- a/numpy/_pytesttester.py +++ b/numpy/_pytesttester.py @@ -28,8 +28,8 @@ simplify circular import issues. For the same reason, it contains no numpy imports at module scope, instead importing numpy within function calls. """ -import sys import os +import sys __all__ = ['PytestTester'] @@ -37,9 +37,9 @@ def _show_numpy_info(): import numpy as np - print("NumPy version %s" % np.__version__) + print(f"NumPy version {np.__version__}") info = np.lib._utils_impl._opt_info() - print("NumPy CPU features: ", (info if info else 'nothing enabled')) + print("NumPy CPU features: ", (info or 'nothing enabled')) class PytestTester: @@ -74,6 +74,7 @@ class PytestTester: """ def __init__(self, module_name): self.module_name = module_name + self.__module__ = module_name def __call__(self, label='fast', verbose=1, extra_argv=None, doctests=False, coverage=False, durations=-1, tests=None): @@ -123,7 +124,6 @@ def __call__(self, label='fast', verbose=1, extra_argv=None, """ import pytest - import warnings module = sys.modules[self.module_name] module_path = os.path.abspath(module.__path__[0]) @@ -134,14 +134,6 @@ def __call__(self, label='fast', verbose=1, extra_argv=None, # offset verbosity. The "-q" cancels a "-v". pytest_args += ["-q"] - if sys.version_info < (3, 12): - with warnings.catch_warnings(): - warnings.simplefilter("always") - # Filter out distutils cpu warnings (could be localized to - # distutils tests). ASV has problems with top level import, - # so fetch module for suppression here. - from numpy.distutils import cpuinfo - # Filter out annoying import messages. Want these in both develop and # release mode. pytest_args += [ @@ -164,24 +156,19 @@ def __call__(self, label='fast', verbose=1, extra_argv=None, pytest_args += list(extra_argv) if verbose > 1: - pytest_args += ["-" + "v"*(verbose - 1)] + pytest_args += ["-" + "v" * (verbose - 1)] if coverage: pytest_args += ["--cov=" + module_path] if label == "fast": - # not importing at the top level to avoid circular import of module - from numpy.testing import IS_PYPY - if IS_PYPY: - pytest_args += ["-m", "not slow and not slow_pypy"] - else: - pytest_args += ["-m", "not slow"] + pytest_args += ["-m", "not slow"] elif label != "full": pytest_args += ["-m", label] if durations >= 0: - pytest_args += ["--durations=%s" % durations] + pytest_args += [f"--durations={durations}"] if tests is None: tests = [self.module_name] diff --git a/numpy/_pytesttester.pyi b/numpy/_pytesttester.pyi index 67ac87b33de1..bd71239314b4 100644 --- a/numpy/_pytesttester.pyi +++ b/numpy/_pytesttester.pyi @@ -1,18 +1,18 @@ from collections.abc import Iterable from typing import Literal as L -__all__: list[str] +__all__ = ["PytestTester"] class PytestTester: module_name: str def __init__(self, module_name: str) -> None: ... def __call__( self, - label: L["fast", "full"] = ..., - verbose: int = ..., - extra_argv: None | Iterable[str] = ..., - doctests: L[False] = ..., - coverage: bool = ..., - durations: int = ..., - tests: None | Iterable[str] = ..., + label: L["fast", "full"] = "fast", + verbose: int = 1, + extra_argv: Iterable[str] | None = None, + doctests: L[False] = False, + coverage: bool = False, + durations: int = -1, + tests: Iterable[str] | None = None, ) -> bool: ... diff --git a/numpy/_typing/__init__.py b/numpy/_typing/__init__.py index 01c5a7c4cf78..f69368a95e10 100644 --- a/numpy/_typing/__init__.py +++ b/numpy/_typing/__init__.py @@ -1,224 +1,147 @@ """Private counterpart of ``numpy.typing``.""" -from __future__ import annotations - -from .. import ufunc -from .._utils import set_module -from typing import TYPE_CHECKING, final - - -@final # Disallow the creation of arbitrary `NBitBase` subclasses -@set_module("numpy.typing") -class NBitBase: - """ - A type representing `numpy.number` precision during static type checking. - - Used exclusively for the purpose static type checking, `NBitBase` - represents the base of a hierarchical set of subclasses. - Each subsequent subclass is herein used for representing a lower level - of precision, *e.g.* ``64Bit > 32Bit > 16Bit``. - - .. versionadded:: 1.20 - - Examples - -------- - Below is a typical usage example: `NBitBase` is herein used for annotating - a function that takes a float and integer of arbitrary precision - as arguments and returns a new float of whichever precision is largest - (*e.g.* ``np.float16 + np.int64 -> np.float64``). - - .. code-block:: python - - >>> from __future__ import annotations - >>> from typing import TypeVar, TYPE_CHECKING - >>> import numpy as np - >>> import numpy.typing as npt - - >>> T1 = TypeVar("T1", bound=npt.NBitBase) - >>> T2 = TypeVar("T2", bound=npt.NBitBase) - - >>> def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: - ... return a + b - - >>> a = np.float16() - >>> b = np.int64() - >>> out = add(a, b) - - >>> if TYPE_CHECKING: - ... reveal_locals() - ... # note: Revealed local types are: - ... # note: a: numpy.floating[numpy.typing._16Bit*] - ... # note: b: numpy.signedinteger[numpy.typing._64Bit*] - ... # note: out: numpy.floating[numpy.typing._64Bit*] - - """ - - def __init_subclass__(cls) -> None: - allowed_names = { - "NBitBase", "_256Bit", "_128Bit", "_96Bit", "_80Bit", - "_64Bit", "_32Bit", "_16Bit", "_8Bit", - } - if cls.__name__ not in allowed_names: - raise TypeError('cannot inherit from final class "NBitBase"') - super().__init_subclass__() - - -# Silence errors about subclassing a `@final`-decorated class -class _256Bit(NBitBase): # type: ignore[misc] - pass - -class _128Bit(_256Bit): # type: ignore[misc] - pass - -class _96Bit(_128Bit): # type: ignore[misc] - pass - -class _80Bit(_96Bit): # type: ignore[misc] - pass - -class _64Bit(_80Bit): # type: ignore[misc] - pass - -class _32Bit(_64Bit): # type: ignore[misc] - pass - -class _16Bit(_32Bit): # type: ignore[misc] - pass - -class _8Bit(_16Bit): # type: ignore[misc] - pass - - -from ._nested_sequence import ( - _NestedSequence as _NestedSequence, -) -from ._nbit import ( - _NBitByte as _NBitByte, - _NBitShort as _NBitShort, - _NBitIntC as _NBitIntC, - _NBitIntP as _NBitIntP, - _NBitInt as _NBitInt, - _NBitLong as _NBitLong, - _NBitLongLong as _NBitLongLong, - _NBitHalf as _NBitHalf, - _NBitSingle as _NBitSingle, - _NBitDouble as _NBitDouble, - _NBitLongDouble as _NBitLongDouble, +from ._array_like import ( + ArrayLike as ArrayLike, + NDArray as NDArray, + _ArrayLike as _ArrayLike, + _ArrayLikeAnyString_co as _ArrayLikeAnyString_co, + _ArrayLikeBool_co as _ArrayLikeBool_co, + _ArrayLikeBytes_co as _ArrayLikeBytes_co, + _ArrayLikeComplex128_co as _ArrayLikeComplex128_co, + _ArrayLikeComplex_co as _ArrayLikeComplex_co, + _ArrayLikeDT64_co as _ArrayLikeDT64_co, + _ArrayLikeFloat64_co as _ArrayLikeFloat64_co, + _ArrayLikeFloat_co as _ArrayLikeFloat_co, + _ArrayLikeInt as _ArrayLikeInt, + _ArrayLikeInt_co as _ArrayLikeInt_co, + _ArrayLikeNumber_co as _ArrayLikeNumber_co, + _ArrayLikeObject_co as _ArrayLikeObject_co, + _ArrayLikeStr_co as _ArrayLikeStr_co, + _ArrayLikeString_co as _ArrayLikeString_co, + _ArrayLikeTD64_co as _ArrayLikeTD64_co, + _ArrayLikeUInt_co as _ArrayLikeUInt_co, + _ArrayLikeVoid_co as _ArrayLikeVoid_co, + _SupportsArray as _SupportsArray, + _SupportsArrayFunc as _SupportsArrayFunc, ) + +# from ._char_codes import ( _BoolCodes as _BoolCodes, - _UInt8Codes as _UInt8Codes, - _UInt16Codes as _UInt16Codes, - _UInt32Codes as _UInt32Codes, - _UInt64Codes as _UInt64Codes, + _BytesCodes as _BytesCodes, + _CharacterCodes as _CharacterCodes, + _CLongDoubleCodes as _CLongDoubleCodes, + _Complex64Codes as _Complex64Codes, + _Complex128Codes as _Complex128Codes, + _ComplexFloatingCodes as _ComplexFloatingCodes, + _DT64Codes as _DT64Codes, + _FlexibleCodes as _FlexibleCodes, + _Float16Codes as _Float16Codes, + _Float32Codes as _Float32Codes, + _Float64Codes as _Float64Codes, + _FloatingCodes as _FloatingCodes, + _GenericCodes as _GenericCodes, + _InexactCodes as _InexactCodes, _Int8Codes as _Int8Codes, _Int16Codes as _Int16Codes, _Int32Codes as _Int32Codes, _Int64Codes as _Int64Codes, - _Float16Codes as _Float16Codes, - _Float32Codes as _Float32Codes, - _Float64Codes as _Float64Codes, - _Complex64Codes as _Complex64Codes, - _Complex128Codes as _Complex128Codes, - _ByteCodes as _ByteCodes, - _ShortCodes as _ShortCodes, _IntCCodes as _IntCCodes, + _IntegerCodes as _IntegerCodes, _IntPCodes as _IntPCodes, - _IntCodes as _IntCodes, _LongCodes as _LongCodes, + _LongDoubleCodes as _LongDoubleCodes, _LongLongCodes as _LongLongCodes, - _UByteCodes as _UByteCodes, - _UShortCodes as _UShortCodes, + _NumberCodes as _NumberCodes, + _ObjectCodes as _ObjectCodes, + _SignedIntegerCodes as _SignedIntegerCodes, + _StrCodes as _StrCodes, + _StringCodes as _StringCodes, + _TD64Codes as _TD64Codes, + _UInt8Codes as _UInt8Codes, + _UInt16Codes as _UInt16Codes, + _UInt32Codes as _UInt32Codes, + _UInt64Codes as _UInt64Codes, _UIntCCodes as _UIntCCodes, _UIntPCodes as _UIntPCodes, - _UIntCodes as _UIntCodes, _ULongCodes as _ULongCodes, _ULongLongCodes as _ULongLongCodes, - _HalfCodes as _HalfCodes, - _SingleCodes as _SingleCodes, - _DoubleCodes as _DoubleCodes, - _LongDoubleCodes as _LongDoubleCodes, - _CSingleCodes as _CSingleCodes, - _CDoubleCodes as _CDoubleCodes, - _CLongDoubleCodes as _CLongDoubleCodes, - _DT64Codes as _DT64Codes, - _TD64Codes as _TD64Codes, - _StrCodes as _StrCodes, - _BytesCodes as _BytesCodes, + _UnsignedIntegerCodes as _UnsignedIntegerCodes, _VoidCodes as _VoidCodes, - _ObjectCodes as _ObjectCodes, -) -from ._scalars import ( - _CharLike_co as _CharLike_co, - _BoolLike_co as _BoolLike_co, - _UIntLike_co as _UIntLike_co, - _IntLike_co as _IntLike_co, - _FloatLike_co as _FloatLike_co, - _ComplexLike_co as _ComplexLike_co, - _TD64Like_co as _TD64Like_co, - _NumberLike_co as _NumberLike_co, - _ScalarLike_co as _ScalarLike_co, - _VoidLike_co as _VoidLike_co, -) -from ._shape import ( - _Shape as _Shape, - _ShapeLike as _ShapeLike, ) + +# from ._dtype_like import ( DTypeLike as DTypeLike, _DTypeLike as _DTypeLike, - _SupportsDType as _SupportsDType, - _VoidDTypeLike as _VoidDTypeLike, _DTypeLikeBool as _DTypeLikeBool, - _DTypeLikeUInt as _DTypeLikeUInt, - _DTypeLikeInt as _DTypeLikeInt, - _DTypeLikeFloat as _DTypeLikeFloat, + _DTypeLikeBytes as _DTypeLikeBytes, _DTypeLikeComplex as _DTypeLikeComplex, - _DTypeLikeTD64 as _DTypeLikeTD64, + _DTypeLikeComplex_co as _DTypeLikeComplex_co, _DTypeLikeDT64 as _DTypeLikeDT64, + _DTypeLikeFloat as _DTypeLikeFloat, + _DTypeLikeInt as _DTypeLikeInt, _DTypeLikeObject as _DTypeLikeObject, - _DTypeLikeVoid as _DTypeLikeVoid, _DTypeLikeStr as _DTypeLikeStr, - _DTypeLikeBytes as _DTypeLikeBytes, - _DTypeLikeComplex_co as _DTypeLikeComplex_co, + _DTypeLikeTD64 as _DTypeLikeTD64, + _DTypeLikeUInt as _DTypeLikeUInt, + _DTypeLikeVoid as _DTypeLikeVoid, + _HasDType as _HasDType, + _SupportsDType as _SupportsDType, + _VoidDTypeLike as _VoidDTypeLike, ) -from ._array_like import ( - NDArray as NDArray, - ArrayLike as ArrayLike, - _ArrayLike as _ArrayLike, - _FiniteNestedSequence as _FiniteNestedSequence, - _SupportsArray as _SupportsArray, - _SupportsArrayFunc as _SupportsArrayFunc, - _ArrayLikeInt as _ArrayLikeInt, - _ArrayLikeBool_co as _ArrayLikeBool_co, - _ArrayLikeUInt_co as _ArrayLikeUInt_co, - _ArrayLikeInt_co as _ArrayLikeInt_co, - _ArrayLikeFloat_co as _ArrayLikeFloat_co, - _ArrayLikeComplex_co as _ArrayLikeComplex_co, - _ArrayLikeNumber_co as _ArrayLikeNumber_co, - _ArrayLikeTD64_co as _ArrayLikeTD64_co, - _ArrayLikeDT64_co as _ArrayLikeDT64_co, - _ArrayLikeObject_co as _ArrayLikeObject_co, - _ArrayLikeVoid_co as _ArrayLikeVoid_co, - _ArrayLikeStr_co as _ArrayLikeStr_co, - _ArrayLikeBytes_co as _ArrayLikeBytes_co, - _ArrayLikeUnknown as _ArrayLikeUnknown, - _UnknownType as _UnknownType, + +# +from ._nbit import ( + _NBitByte as _NBitByte, + _NBitDouble as _NBitDouble, + _NBitHalf as _NBitHalf, + _NBitInt as _NBitInt, + _NBitIntC as _NBitIntC, + _NBitIntP as _NBitIntP, + _NBitLong as _NBitLong, + _NBitLongDouble as _NBitLongDouble, + _NBitLongLong as _NBitLongLong, + _NBitShort as _NBitShort, + _NBitSingle as _NBitSingle, ) -if TYPE_CHECKING: - from ._ufunc import ( - _UFunc_Nin1_Nout1 as _UFunc_Nin1_Nout1, - _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1, - _UFunc_Nin1_Nout2 as _UFunc_Nin1_Nout2, - _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2, - _GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1, - ) -else: - # Declare the (type-check-only) ufunc subclasses as ufunc aliases during - # runtime; this helps autocompletion tools such as Jedi (numpy/numpy#19834) - _UFunc_Nin1_Nout1 = ufunc - _UFunc_Nin2_Nout1 = ufunc - _UFunc_Nin1_Nout2 = ufunc - _UFunc_Nin2_Nout2 = ufunc - _GUFunc_Nin2_Nout1 = ufunc +# +from ._nbit_base import ( # type: ignore[deprecated] + NBitBase as NBitBase, # pyright: ignore[reportDeprecated] + _8Bit as _8Bit, + _16Bit as _16Bit, + _32Bit as _32Bit, + _64Bit as _64Bit, + _96Bit as _96Bit, + _128Bit as _128Bit, +) + +# +from ._nested_sequence import _NestedSequence as _NestedSequence + +# +from ._scalars import ( + _BoolLike_co as _BoolLike_co, + _CharLike_co as _CharLike_co, + _ComplexLike_co as _ComplexLike_co, + _FloatLike_co as _FloatLike_co, + _IntLike_co as _IntLike_co, + _NumberLike_co as _NumberLike_co, + _ScalarLike_co as _ScalarLike_co, + _TD64Like_co as _TD64Like_co, + _UIntLike_co as _UIntLike_co, + _VoidLike_co as _VoidLike_co, +) + +# +from ._shape import _AnyShape as _AnyShape, _Shape as _Shape, _ShapeLike as _ShapeLike + +# +from ._ufunc import ( + _GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1, + _UFunc_Nin1_Nout1 as _UFunc_Nin1_Nout1, + _UFunc_Nin1_Nout2 as _UFunc_Nin1_Nout2, + _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1, + _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2, +) diff --git a/numpy/_typing/_add_docstring.py b/numpy/_typing/_add_docstring.py index 722d713a7076..883b890a1a16 100644 --- a/numpy/_typing/_add_docstring.py +++ b/numpy/_typing/_add_docstring.py @@ -120,8 +120,9 @@ def _parse_docstrings() -> str: add_newdoc('NDArray', repr(NDArray), """ - A `np.ndarray[Any, np.dtype[+ScalarType]] ` type alias - :term:`generic ` w.r.t. its `dtype.type `. + A `np.ndarray[tuple[Any, ...], np.dtype[ScalarT]] ` + type alias :term:`generic ` w.r.t. its + `dtype.type `. Can be used during runtime for typing arrays with a given dtype and unspecified shape. @@ -136,10 +137,10 @@ def _parse_docstrings() -> str: >>> import numpy.typing as npt >>> print(npt.NDArray) - numpy.ndarray[typing.Any, numpy.dtype[+ScalarType]] + NDArray >>> print(npt.NDArray[np.float64]) - numpy.ndarray[typing.Any, numpy.dtype[numpy.float64]] + NDArray[numpy.float64] >>> NDArrayInt = npt.NDArray[np.int_] >>> a: NDArrayInt = np.arange(10) diff --git a/numpy/_typing/_array_like.py b/numpy/_typing/_array_like.py index 33255693806e..5c249775f810 100644 --- a/numpy/_typing/_array_like.py +++ b/numpy/_typing/_array_like.py @@ -1,35 +1,18 @@ -from __future__ import annotations - -import sys -from collections.abc import Collection, Callable, Sequence -from typing import Any, Protocol, Union, TypeVar, runtime_checkable +from collections.abc import Buffer, Callable, Collection +from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable import numpy as np -from numpy import ( - ndarray, - dtype, - generic, - unsignedinteger, - integer, - floating, - complexfloating, - number, - timedelta64, - datetime64, - object_, - void, - str_, - bytes_, -) -from ._nested_sequence import _NestedSequence -_T = TypeVar("_T") -_ScalarType = TypeVar("_ScalarType", bound=generic) -_ScalarType_co = TypeVar("_ScalarType_co", bound=generic, covariant=True) -_DType = TypeVar("_DType", bound=dtype[Any]) -_DType_co = TypeVar("_DType_co", covariant=True, bound=dtype[Any]) +if TYPE_CHECKING: + from numpy.dtypes import StringDType +else: + from numpy._core.multiarray import StringDType + +from ._nbit_base import _32Bit, _64Bit +from ._nested_sequence import _NestedSequence +from ._shape import _AnyShape -NDArray = ndarray[Any, dtype[_ScalarType_co]] +type NDArray[ScalarT: np.generic] = np.ndarray[_AnyShape, np.dtype[ScalarT]] # The `_SupportsArray` protocol only cares about the default dtype # (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned @@ -37,8 +20,8 @@ # Concrete implementations of the protocol are responsible for adding # any and all remaining overloads @runtime_checkable -class _SupportsArray(Protocol[_DType_co]): - def __array__(self) -> ndarray[Any, _DType_co]: ... +class _SupportsArray[DTypeT: np.dtype](Protocol): + def __array__(self) -> np.ndarray[Any, DTypeT]: ... @runtime_checkable @@ -53,115 +36,57 @@ def __array_function__( ) -> object: ... -# TODO: Wait until mypy supports recursive objects in combination with typevars -_FiniteNestedSequence = Union[ - _T, - Sequence[_T], - Sequence[Sequence[_T]], - Sequence[Sequence[Sequence[_T]]], - Sequence[Sequence[Sequence[Sequence[_T]]]], -] - # A subset of `npt.ArrayLike` that can be parametrized w.r.t. `np.generic` -_ArrayLike = Union[ - _SupportsArray[dtype[_ScalarType]], - _NestedSequence[_SupportsArray[dtype[_ScalarType]]], -] +type _ArrayLike[ScalarT: np.generic] = ( + _SupportsArray[np.dtype[ScalarT]] + | _NestedSequence[_SupportsArray[np.dtype[ScalarT]]] +) # A union representing array-like objects; consists of two typevars: # One representing types that can be parametrized w.r.t. `np.dtype` # and another one for the rest -_DualArrayLike = Union[ - _SupportsArray[_DType], - _NestedSequence[_SupportsArray[_DType]], - _T, - _NestedSequence[_T], -] - -if sys.version_info >= (3, 12): - from collections.abc import Buffer +type _DualArrayLike[DTypeT: np.dtype, BuiltinT] = ( + _SupportsArray[DTypeT] + | _NestedSequence[_SupportsArray[DTypeT]] + | BuiltinT + | _NestedSequence[BuiltinT] +) - ArrayLike = Buffer | _DualArrayLike[ - dtype[Any], - Union[bool, int, float, complex, str, bytes], - ] -else: - ArrayLike = _DualArrayLike[ - dtype[Any], - Union[bool, int, float, complex, str, bytes], - ] +type ArrayLike = Buffer | _DualArrayLike[np.dtype, complex | bytes | str] # `ArrayLike_co`: array-like objects that can be coerced into `X` # given the casting rules `same_kind` -_ArrayLikeBool_co = _DualArrayLike[ - dtype[np.bool], - bool, -] -_ArrayLikeUInt_co = _DualArrayLike[ - dtype[Union[np.bool, unsignedinteger[Any]]], - bool, -] -_ArrayLikeInt_co = _DualArrayLike[ - dtype[Union[np.bool, integer[Any]]], - Union[bool, int], -] -_ArrayLikeFloat_co = _DualArrayLike[ - dtype[Union[np.bool, integer[Any], floating[Any]]], - Union[bool, int, float], -] -_ArrayLikeComplex_co = _DualArrayLike[ - dtype[Union[ - np.bool, - integer[Any], - floating[Any], - complexfloating[Any, Any], - ]], - Union[bool, int, float, complex], -] -_ArrayLikeNumber_co = _DualArrayLike[ - dtype[Union[np.bool, number[Any]]], - Union[bool, int, float, complex], -] -_ArrayLikeTD64_co = _DualArrayLike[ - dtype[Union[np.bool, integer[Any], timedelta64]], - Union[bool, int], -] -_ArrayLikeDT64_co = Union[ - _SupportsArray[dtype[datetime64]], - _NestedSequence[_SupportsArray[dtype[datetime64]]], -] -_ArrayLikeObject_co = Union[ - _SupportsArray[dtype[object_]], - _NestedSequence[_SupportsArray[dtype[object_]]], -] - -_ArrayLikeVoid_co = Union[ - _SupportsArray[dtype[void]], - _NestedSequence[_SupportsArray[dtype[void]]], -] -_ArrayLikeStr_co = _DualArrayLike[ - dtype[str_], - str, -] -_ArrayLikeBytes_co = _DualArrayLike[ - dtype[bytes_], - bytes, -] - -_ArrayLikeInt = _DualArrayLike[ - dtype[integer[Any]], +type _ArrayLikeBool_co = _DualArrayLike[np.dtype[np.bool], bool] +type _ArrayLikeUInt_co = _DualArrayLike[np.dtype[np.bool | np.unsignedinteger], bool] +type _ArrayLikeInt_co = _DualArrayLike[np.dtype[np.bool | np.integer], int] +type _ArrayLikeFloat_co = _DualArrayLike[ + np.dtype[np.bool | np.integer | np.floating], + float, +] +type _ArrayLikeComplex_co = _DualArrayLike[np.dtype[np.bool | np.number], complex] +type _ArrayLikeNumber_co = _ArrayLikeComplex_co +type _ArrayLikeTD64_co = _DualArrayLike[ + np.dtype[np.bool | np.integer | np.timedelta64], int, ] +type _ArrayLikeDT64_co = _ArrayLike[np.datetime64] +type _ArrayLikeObject_co = _ArrayLike[np.object_] -# Extra ArrayLike type so that pyright can deal with NDArray[Any] -# Used as the first overload, should only match NDArray[Any], -# not any actual types. -# https://github.com/numpy/numpy/pull/22193 -class _UnknownType: - ... +type _ArrayLikeVoid_co = _ArrayLike[np.void] +type _ArrayLikeBytes_co = _DualArrayLike[np.dtype[np.bytes_], bytes] +type _ArrayLikeStr_co = _DualArrayLike[np.dtype[np.str_], str] +type _ArrayLikeString_co = _DualArrayLike[StringDType, str] +type _ArrayLikeAnyString_co = _DualArrayLike[ + np.dtype[np.character] | StringDType, + bytes | str, +] +type __Float64_co = np.floating[_64Bit] | np.float32 | np.float16 | np.integer | np.bool +type __Complex128_co = ( + np.number[_64Bit] | np.number[_32Bit] | np.float16 | np.integer | np.bool +) +type _ArrayLikeFloat64_co = _DualArrayLike[np.dtype[__Float64_co], float] +type _ArrayLikeComplex128_co = _DualArrayLike[np.dtype[__Complex128_co], complex] -_ArrayLikeUnknown = _DualArrayLike[ - dtype[_UnknownType], - _UnknownType, -] +# NOTE: This includes `builtins.bool`, but not `numpy.bool`. +type _ArrayLikeInt = _DualArrayLike[np.dtype[np.integer], int] diff --git a/numpy/_typing/_callable.pyi b/numpy/_typing/_callable.pyi deleted file mode 100644 index 843a0d07c2fb..000000000000 --- a/numpy/_typing/_callable.pyi +++ /dev/null @@ -1,336 +0,0 @@ -""" -A module with various ``typing.Protocol`` subclasses that implement -the ``__call__`` magic method. - -See the `Mypy documentation`_ on protocols for more details. - -.. _`Mypy documentation`: https://mypy.readthedocs.io/en/stable/protocols.html#callback-protocols - -""" - -from __future__ import annotations - -from typing import ( - TypeVar, - overload, - Any, - NoReturn, - Protocol, -) - -import numpy as np -from numpy import ( - generic, - timedelta64, - number, - integer, - unsignedinteger, - signedinteger, - int8, - int_, - floating, - float64, - complexfloating, - complex128, -) -from ._nbit import _NBitInt, _NBitDouble -from ._scalars import ( - _BoolLike_co, - _IntLike_co, - _FloatLike_co, - _NumberLike_co, -) -from . import NBitBase -from ._array_like import NDArray -from ._nested_sequence import _NestedSequence - -_T1 = TypeVar("_T1") -_T2 = TypeVar("_T2") -_T1_contra = TypeVar("_T1_contra", contravariant=True) -_T2_contra = TypeVar("_T2_contra", contravariant=True) -_2Tuple = tuple[_T1, _T1] - -_NBit1 = TypeVar("_NBit1", bound=NBitBase) -_NBit2 = TypeVar("_NBit2", bound=NBitBase) - -_IntType = TypeVar("_IntType", bound=integer) -_FloatType = TypeVar("_FloatType", bound=floating) -_NumberType = TypeVar("_NumberType", bound=number) -_NumberType_co = TypeVar("_NumberType_co", covariant=True, bound=number) -_GenericType_co = TypeVar("_GenericType_co", covariant=True, bound=generic) - -class _BoolOp(Protocol[_GenericType_co]): - @overload - def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ... - @overload # platform dependent - def __call__(self, other: int, /) -> int_: ... - @overload - def __call__(self, other: float, /) -> float64: ... - @overload - def __call__(self, other: complex, /) -> complex128: ... - @overload - def __call__(self, other: _NumberType, /) -> _NumberType: ... - -class _BoolBitOp(Protocol[_GenericType_co]): - @overload - def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ... - @overload # platform dependent - def __call__(self, other: int, /) -> int_: ... - @overload - def __call__(self, other: _IntType, /) -> _IntType: ... - -class _BoolSub(Protocol): - # Note that `other: bool` is absent here - @overload - def __call__(self, other: bool, /) -> NoReturn: ... - @overload # platform dependent - def __call__(self, other: int, /) -> int_: ... - @overload - def __call__(self, other: float, /) -> float64: ... - @overload - def __call__(self, other: complex, /) -> complex128: ... - @overload - def __call__(self, other: _NumberType, /) -> _NumberType: ... - -class _BoolTrueDiv(Protocol): - @overload - def __call__(self, other: float | _IntLike_co, /) -> float64: ... - @overload - def __call__(self, other: complex, /) -> complex128: ... - @overload - def __call__(self, other: _NumberType, /) -> _NumberType: ... - -class _BoolMod(Protocol): - @overload - def __call__(self, other: _BoolLike_co, /) -> int8: ... - @overload # platform dependent - def __call__(self, other: int, /) -> int_: ... - @overload - def __call__(self, other: float, /) -> float64: ... - @overload - def __call__(self, other: _IntType, /) -> _IntType: ... - @overload - def __call__(self, other: _FloatType, /) -> _FloatType: ... - -class _BoolDivMod(Protocol): - @overload - def __call__(self, other: _BoolLike_co, /) -> _2Tuple[int8]: ... - @overload # platform dependent - def __call__(self, other: int, /) -> _2Tuple[int_]: ... - @overload - def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ... - @overload - def __call__(self, other: _IntType, /) -> _2Tuple[_IntType]: ... - @overload - def __call__(self, other: _FloatType, /) -> _2Tuple[_FloatType]: ... - -class _TD64Div(Protocol[_NumberType_co]): - @overload - def __call__(self, other: timedelta64, /) -> _NumberType_co: ... - @overload - def __call__(self, other: _BoolLike_co, /) -> NoReturn: ... - @overload - def __call__(self, other: _FloatLike_co, /) -> timedelta64: ... - -class _IntTrueDiv(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> floating[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... - @overload - def __call__( - self, other: complex, /, - ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ... - @overload - def __call__(self, other: integer[_NBit2], /) -> floating[_NBit1 | _NBit2]: ... - -class _UnsignedIntOp(Protocol[_NBit1]): - # NOTE: `uint64 + signedinteger -> float64` - @overload - def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... - @overload - def __call__( - self, other: int | signedinteger[Any], / - ) -> Any: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... - @overload - def __call__( - self, other: complex, /, - ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ... - @overload - def __call__( - self, other: unsignedinteger[_NBit2], / - ) -> unsignedinteger[_NBit1 | _NBit2]: ... - -class _UnsignedIntBitOp(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> signedinteger[Any]: ... - @overload - def __call__(self, other: signedinteger[Any], /) -> signedinteger[Any]: ... - @overload - def __call__( - self, other: unsignedinteger[_NBit2], / - ) -> unsignedinteger[_NBit1 | _NBit2]: ... - -class _UnsignedIntMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... - @overload - def __call__( - self, other: int | signedinteger[Any], / - ) -> Any: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... - @overload - def __call__( - self, other: unsignedinteger[_NBit2], / - ) -> unsignedinteger[_NBit1 | _NBit2]: ... - -class _UnsignedIntDivMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ... - @overload - def __call__( - self, other: int | signedinteger[Any], / - ) -> _2Tuple[Any]: ... - @overload - def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ... - @overload - def __call__( - self, other: unsignedinteger[_NBit2], / - ) -> _2Tuple[unsignedinteger[_NBit1 | _NBit2]]: ... - -class _SignedIntOp(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... - @overload - def __call__( - self, other: complex, /, - ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ... - @overload - def __call__( - self, other: signedinteger[_NBit2], /, - ) -> signedinteger[_NBit1 | _NBit2]: ... - -class _SignedIntBitOp(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ... - @overload - def __call__( - self, other: signedinteger[_NBit2], /, - ) -> signedinteger[_NBit1 | _NBit2]: ... - -class _SignedIntMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... - @overload - def __call__( - self, other: signedinteger[_NBit2], /, - ) -> signedinteger[_NBit1 | _NBit2]: ... - -class _SignedIntDivMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ... - @overload - def __call__(self, other: int, /) -> _2Tuple[signedinteger[_NBit1 | _NBitInt]]: ... - @overload - def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ... - @overload - def __call__( - self, other: signedinteger[_NBit2], /, - ) -> _2Tuple[signedinteger[_NBit1 | _NBit2]]: ... - -class _FloatOp(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> floating[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... - @overload - def __call__( - self, other: complex, /, - ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ... - @overload - def __call__( - self, other: integer[_NBit2] | floating[_NBit2], / - ) -> floating[_NBit1 | _NBit2]: ... - -class _FloatMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> floating[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... - @overload - def __call__( - self, other: integer[_NBit2] | floating[_NBit2], / - ) -> floating[_NBit1 | _NBit2]: ... - -class _FloatDivMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> _2Tuple[floating[_NBit1]]: ... - @overload - def __call__(self, other: int, /) -> _2Tuple[floating[_NBit1 | _NBitInt]]: ... - @overload - def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ... - @overload - def __call__( - self, other: integer[_NBit2] | floating[_NBit2], / - ) -> _2Tuple[floating[_NBit1 | _NBit2]]: ... - -class _ComplexOp(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> complexfloating[_NBit1, _NBit1]: ... - @overload - def __call__(self, other: int, /) -> complexfloating[_NBit1 | _NBitInt, _NBit1 | _NBitInt]: ... - @overload - def __call__( - self, other: complex, /, - ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ... - @overload - def __call__( - self, - other: ( - integer[_NBit2] - | floating[_NBit2] - | complexfloating[_NBit2, _NBit2] - ), /, - ) -> complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]: ... - -class _NumberOp(Protocol): - def __call__(self, other: _NumberLike_co, /) -> Any: ... - -class _SupportsLT(Protocol): - def __lt__(self, other: Any, /) -> object: ... - -class _SupportsGT(Protocol): - def __gt__(self, other: Any, /) -> object: ... - -class _ComparisonOp(Protocol[_T1_contra, _T2_contra]): - @overload - def __call__(self, other: _T1_contra, /) -> np.bool: ... - @overload - def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... - @overload - def __call__( - self, - other: _SupportsLT | _SupportsGT | _NestedSequence[_SupportsLT | _SupportsGT], - /, - ) -> Any: ... diff --git a/numpy/_typing/_char_codes.py b/numpy/_typing/_char_codes.py index e5c4fa5d1bd2..518f9b473e4a 100644 --- a/numpy/_typing/_char_codes.py +++ b/numpy/_typing/_char_codes.py @@ -1,113 +1,156 @@ from typing import Literal -_BoolCodes = Literal["?", "=?", "?", "bool", "bool_"] - -_UInt8Codes = Literal["uint8", "u1", "=u1", "u1"] -_UInt16Codes = Literal["uint16", "u2", "=u2", "u2"] -_UInt32Codes = Literal["uint32", "u4", "=u4", "u4"] -_UInt64Codes = Literal["uint64", "u8", "=u8", "u8"] - -_Int8Codes = Literal["int8", "i1", "=i1", "i1"] -_Int16Codes = Literal["int16", "i2", "=i2", "i2"] -_Int32Codes = Literal["int32", "i4", "=i4", "i4"] -_Int64Codes = Literal["int64", "i8", "=i8", "i8"] - -_Float16Codes = Literal["float16", "f2", "=f2", "f2"] -_Float32Codes = Literal["float32", "f4", "=f4", "f4"] -_Float64Codes = Literal["float64", "f8", "=f8", "f8"] - -_Complex64Codes = Literal["complex64", "c8", "=c8", "c8"] -_Complex128Codes = Literal["complex128", "c16", "=c16", "c16"] - -_ByteCodes = Literal["byte", "b", "=b", "b"] -_ShortCodes = Literal["short", "h", "=h", "h"] -_IntCCodes = Literal["intc", "i", "=i", "i"] -_IntPCodes = Literal["intp", "int", "int_", "n", "=n", "n"] -_LongCodes = Literal["long", "l", "=l", "l"] -_IntCodes = _IntPCodes -_LongLongCodes = Literal["longlong", "q", "=q", "q"] - -_UByteCodes = Literal["ubyte", "B", "=B", "B"] -_UShortCodes = Literal["ushort", "H", "=H", "H"] -_UIntCCodes = Literal["uintc", "I", "=I", "I"] -_UIntPCodes = Literal["uintp", "uint", "N", "=N", "N"] -_ULongCodes = Literal["ulong", "L", "=L", "L"] -_UIntCodes = _UIntPCodes -_ULongLongCodes = Literal["ulonglong", "Q", "=Q", "Q"] - -_HalfCodes = Literal["half", "e", "=e", "e"] -_SingleCodes = Literal["single", "f", "=f", "f"] -_DoubleCodes = Literal["double", "float", "d", "=d", "d"] -_LongDoubleCodes = Literal["longdouble", "g", "=g", "g"] - -_CSingleCodes = Literal["csingle", "F", "=F", "F"] -_CDoubleCodes = Literal["cdouble", "complex", "D", "=D", "D"] -_CLongDoubleCodes = Literal["clongdouble", "G", "=G", "G"] - -_StrCodes = Literal["str", "str_", "unicode", "U", "=U", "U"] -_BytesCodes = Literal["bytes", "bytes_", "S", "=S", "S"] -_VoidCodes = Literal["void", "V", "=V", "V"] -_ObjectCodes = Literal["object", "object_", "O", "=O", "O"] - -_DT64Codes = Literal[ - "datetime64", "=datetime64", "datetime64", - "datetime64[Y]", "=datetime64[Y]", "datetime64[Y]", - "datetime64[M]", "=datetime64[M]", "datetime64[M]", - "datetime64[W]", "=datetime64[W]", "datetime64[W]", - "datetime64[D]", "=datetime64[D]", "datetime64[D]", - "datetime64[h]", "=datetime64[h]", "datetime64[h]", - "datetime64[m]", "=datetime64[m]", "datetime64[m]", - "datetime64[s]", "=datetime64[s]", "datetime64[s]", - "datetime64[ms]", "=datetime64[ms]", "datetime64[ms]", - "datetime64[us]", "=datetime64[us]", "datetime64[us]", - "datetime64[ns]", "=datetime64[ns]", "datetime64[ns]", - "datetime64[ps]", "=datetime64[ps]", "datetime64[ps]", - "datetime64[fs]", "=datetime64[fs]", "datetime64[fs]", - "datetime64[as]", "=datetime64[as]", "datetime64[as]", - "M", "=M", "M", - "M8", "=M8", "M8", - "M8[Y]", "=M8[Y]", "M8[Y]", - "M8[M]", "=M8[M]", "M8[M]", - "M8[W]", "=M8[W]", "M8[W]", - "M8[D]", "=M8[D]", "M8[D]", - "M8[h]", "=M8[h]", "M8[h]", - "M8[m]", "=M8[m]", "M8[m]", - "M8[s]", "=M8[s]", "M8[s]", - "M8[ms]", "=M8[ms]", "M8[ms]", - "M8[us]", "=M8[us]", "M8[us]", - "M8[ns]", "=M8[ns]", "M8[ns]", - "M8[ps]", "=M8[ps]", "M8[ps]", - "M8[fs]", "=M8[fs]", "M8[fs]", - "M8[as]", "=M8[as]", "M8[as]", +type _BoolCodes = Literal["bool", "bool_", "?", "b1", "|b1", "=b1", "b1"] + +type _Int8Codes = Literal["int8", "byte", "b", "i1", "|i1", "=i1", "i1"] +type _Int16Codes = Literal["int16", "short", "h", "i2", "|i2", "=i2", "i2"] +type _Int32Codes = Literal["int32", "i4", "|i4", "=i4", "i4"] +type _Int64Codes = Literal["int64", "i8", "|i8", "=i8", "i8"] + +type _UInt8Codes = Literal["uint8", "ubyte", "B", "u1", "|u1", "=u1", "u1"] +type _UInt16Codes = Literal["uint16", "ushort", "H", "u2", "|u2", "=u2", "u2"] +type _UInt32Codes = Literal["uint32", "u4", "|u4", "=u4", "u4"] +type _UInt64Codes = Literal["uint64", "u8", "|u8", "=u8", "u8"] + +type _IntCCodes = Literal["intc", "i", "|i", "=i", "i"] +type _LongCodes = Literal["long", "l", "|l", "=l", "l"] +type _LongLongCodes = Literal["longlong", "q", "|q", "=q", "q"] +type _IntPCodes = Literal["intp", "int", "int_", "n", "|n", "=n", "n"] + +type _UIntCCodes = Literal["uintc", "I", "|I", "=I", "I"] +type _ULongCodes = Literal["ulong", "L", "|L", "=L", "L"] +type _ULongLongCodes = Literal["ulonglong", "Q", "|Q", "=Q", "Q"] +type _UIntPCodes = Literal["uintp", "uint", "N", "|N", "=N", "N"] + +type _Float16Codes = Literal["float16", "half", "e", "f2", "|f2", "=f2", "f2"] +type _Float32Codes = Literal["float32", "single", "f", "f4", "|f4", "=f4", "f4"] +type _Float64Codes = Literal[ + "float64", "float", "double", "d", "f8", "|f8", "=f8", "f8" +] + +type _LongDoubleCodes = Literal["longdouble", "g", "|g", "=g", "g"] + +type _Complex64Codes = Literal[ + "complex64", "csingle", "F", "c8", "|c8", "=c8", "c8" +] + +type _Complex128Codes = Literal[ + "complex128", "complex", "cdouble", "D", "c16", "|c16", "=c16", "c16" +] + +type _CLongDoubleCodes = Literal["clongdouble", "G", "|G", "=G", "G"] + +type _StrCodes = Literal["str", "str_", "unicode", "U", "|U", "=U", "U"] +type _BytesCodes = Literal["bytes", "bytes_", "S", "|S", "=S", "S"] +type _VoidCodes = Literal["void", "V", "|V", "=V", "V"] +type _ObjectCodes = Literal["object", "object_", "O", "|O", "=O", "O"] + +# datetime64 +type _DT64Codes_any = Literal["datetime64", "M", "M8", "|M8", "=M8", "M8"] +type _DT64Codes_date = Literal[ + "datetime64[Y]", "M8[Y]", "|M8[Y]", "=M8[Y]", "M8[Y]", + "datetime64[M]", "M8[M]", "|M8[M]", "=M8[M]", "M8[M]", + "datetime64[W]", "M8[W]", "|M8[W]", "=M8[W]", "M8[W]", + "datetime64[D]", "M8[D]", "|M8[D]", "=M8[D]", "M8[D]", +] # fmt: skip +type _DT64Codes_datetime = Literal[ + "datetime64[h]", "M8[h]", "|M8[h]", "=M8[h]", "M8[h]", + "datetime64[m]", "M8[m]", "|M8[m]", "=M8[m]", "M8[m]", + "datetime64[s]", "M8[s]", "|M8[s]", "=M8[s]", "M8[s]", + "datetime64[ms]", "M8[ms]", "|M8[ms]", "=M8[ms]", "M8[ms]", + "datetime64[us]", "M8[us]", "|M8[us]", "=M8[us]", "M8[us]", + "datetime64[Îŧs]", "M8[Îŧs]", "|M8[Îŧs]", "=M8[Îŧs]", "M8[Îŧs]", +] # fmt: skip +type _DT64Codes_int = Literal[ + "datetime64[ns]", "M8[ns]", "|M8[ns]", "=M8[ns]", "M8[ns]", + "datetime64[ps]", "M8[ps]", "|M8[ps]", "=M8[ps]", "M8[ps]", + "datetime64[fs]", "M8[fs]", "|M8[fs]", "=M8[fs]", "M8[fs]", + "datetime64[as]", "M8[as]", "|M8[as]", "=M8[as]", "M8[as]", +] # fmt: skip +type _DT64Codes = Literal[ + _DT64Codes_any, + _DT64Codes_date, + _DT64Codes_datetime, + _DT64Codes_int, +] + +# timedelta64 +type _TD64Codes_any = Literal["timedelta64", "m", "m8", "|m8", "=m8", "m8"] +type _TD64Codes_int = Literal[ + "timedelta64[Y]", "m8[Y]", "|m8[Y]", "=m8[Y]", "m8[Y]", + "timedelta64[M]", "m8[M]", "|m8[M]", "=m8[M]", "m8[M]", + "timedelta64[ns]", "m8[ns]", "|m8[ns]", "=m8[ns]", "m8[ns]", + "timedelta64[ps]", "m8[ps]", "|m8[ps]", "=m8[ps]", "m8[ps]", + "timedelta64[fs]", "m8[fs]", "|m8[fs]", "=m8[fs]", "m8[fs]", + "timedelta64[as]", "m8[as]", "|m8[as]", "=m8[as]", "m8[as]", +] # fmt: skip +type _TD64Codes_timedelta = Literal[ + "timedelta64[W]", "m8[W]", "|m8[W]", "=m8[W]", "m8[W]", + "timedelta64[D]", "m8[D]", "|m8[D]", "=m8[D]", "m8[D]", + "timedelta64[h]", "m8[h]", "|m8[h]", "=m8[h]", "m8[h]", + "timedelta64[m]", "m8[m]", "|m8[m]", "=m8[m]", "m8[m]", + "timedelta64[s]", "m8[s]", "|m8[s]", "=m8[s]", "m8[s]", + "timedelta64[ms]", "m8[ms]", "|m8[ms]", "=m8[ms]", "m8[ms]", + "timedelta64[us]", "m8[us]", "|m8[us]", "=m8[us]", "m8[us]", + "timedelta64[Îŧs]", "m8[Îŧs]", "|m8[Îŧs]", "=m8[Îŧs]", "m8[Îŧs]", +] # fmt: skip +type _TD64Codes = Literal[_TD64Codes_any, _TD64Codes_int, _TD64Codes_timedelta] + +# NOTE: `StringDType' has no scalar type, and therefore has no name that can +# be passed to the `dtype` constructor +type _StringCodes = Literal["T", "|T", "=T", "T"] + +# NOTE: Nested literals get flattened and de-duplicated at runtime, which isn't +# the case for a `Union` of `Literal`s. +# So even though they're equivalent when type-checking, they differ at runtime. +# Another advantage of nesting, is that they always have a "flat" +# `Literal.__args__`, which is a tuple of *literally* all its literal values. + +type _SignedIntegerCodes = Literal[ + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntCCodes, + _LongCodes, + _LongLongCodes, + _IntPCodes, +] +type _UnsignedIntegerCodes = Literal[ + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UIntCCodes, + _ULongCodes, + _ULongLongCodes, + _UIntPCodes, +] +type _FloatingCodes = Literal[ + _Float16Codes, + _Float32Codes, + _Float64Codes, + _LongDoubleCodes, ] -_TD64Codes = Literal[ - "timedelta64", "=timedelta64", "timedelta64", - "timedelta64[Y]", "=timedelta64[Y]", "timedelta64[Y]", - "timedelta64[M]", "=timedelta64[M]", "timedelta64[M]", - "timedelta64[W]", "=timedelta64[W]", "timedelta64[W]", - "timedelta64[D]", "=timedelta64[D]", "timedelta64[D]", - "timedelta64[h]", "=timedelta64[h]", "timedelta64[h]", - "timedelta64[m]", "=timedelta64[m]", "timedelta64[m]", - "timedelta64[s]", "=timedelta64[s]", "timedelta64[s]", - "timedelta64[ms]", "=timedelta64[ms]", "timedelta64[ms]", - "timedelta64[us]", "=timedelta64[us]", "timedelta64[us]", - "timedelta64[ns]", "=timedelta64[ns]", "timedelta64[ns]", - "timedelta64[ps]", "=timedelta64[ps]", "timedelta64[ps]", - "timedelta64[fs]", "=timedelta64[fs]", "timedelta64[fs]", - "timedelta64[as]", "=timedelta64[as]", "timedelta64[as]", - "m", "=m", "m", - "m8", "=m8", "m8", - "m8[Y]", "=m8[Y]", "m8[Y]", - "m8[M]", "=m8[M]", "m8[M]", - "m8[W]", "=m8[W]", "m8[W]", - "m8[D]", "=m8[D]", "m8[D]", - "m8[h]", "=m8[h]", "m8[h]", - "m8[m]", "=m8[m]", "m8[m]", - "m8[s]", "=m8[s]", "m8[s]", - "m8[ms]", "=m8[ms]", "m8[ms]", - "m8[us]", "=m8[us]", "m8[us]", - "m8[ns]", "=m8[ns]", "m8[ns]", - "m8[ps]", "=m8[ps]", "m8[ps]", - "m8[fs]", "=m8[fs]", "m8[fs]", - "m8[as]", "=m8[as]", "m8[as]", +type _ComplexFloatingCodes = Literal[ + _Complex64Codes, + _Complex128Codes, + _CLongDoubleCodes, +] +type _IntegerCodes = Literal[_UnsignedIntegerCodes, _SignedIntegerCodes] +type _InexactCodes = Literal[_FloatingCodes, _ComplexFloatingCodes] +type _NumberCodes = Literal[_IntegerCodes, _InexactCodes] + +type _CharacterCodes = Literal[_BytesCodes, _StrCodes] +type _FlexibleCodes = Literal[_CharacterCodes, _VoidCodes] + +type _GenericCodes = Literal[ + _BoolCodes, + _NumberCodes, + _FlexibleCodes, + _DT64Codes, + _TD64Codes, + _ObjectCodes, + # TODO: add `_StringCodes` once it has a scalar type + # _StringCodes, ] diff --git a/numpy/_typing/_dtype_like.py b/numpy/_typing/_dtype_like.py index 73a5f7d7b5a7..09ed1a0084de 100644 --- a/numpy/_typing/_dtype_like.py +++ b/numpy/_typing/_dtype_like.py @@ -1,250 +1,107 @@ from collections.abc import Sequence -from typing import ( - Any, - Sequence, - Union, - TypeVar, - Protocol, - TypedDict, - runtime_checkable, -) +from typing import Any, NotRequired, Protocol, TypedDict, runtime_checkable import numpy as np -from ._shape import _ShapeLike - from ._char_codes import ( _BoolCodes, - _UInt8Codes, - _UInt16Codes, - _UInt32Codes, - _UInt64Codes, - _Int8Codes, - _Int16Codes, - _Int32Codes, - _Int64Codes, - _Float16Codes, - _Float32Codes, - _Float64Codes, - _Complex64Codes, - _Complex128Codes, - _ByteCodes, - _ShortCodes, - _IntCCodes, - _LongCodes, - _LongLongCodes, - _IntPCodes, - _IntCodes, - _UByteCodes, - _UShortCodes, - _UIntCCodes, - _ULongCodes, - _ULongLongCodes, - _UIntPCodes, - _UIntCodes, - _HalfCodes, - _SingleCodes, - _DoubleCodes, - _LongDoubleCodes, - _CSingleCodes, - _CDoubleCodes, - _CLongDoubleCodes, + _BytesCodes, + _ComplexFloatingCodes, _DT64Codes, - _TD64Codes, + _FloatingCodes, + _NumberCodes, + _ObjectCodes, + _SignedIntegerCodes, _StrCodes, - _BytesCodes, + _TD64Codes, + _UnsignedIntegerCodes, _VoidCodes, - _ObjectCodes, ) -_SCT = TypeVar("_SCT", bound=np.generic) -_DType_co = TypeVar("_DType_co", covariant=True, bound=np.dtype[Any]) +type _DTypeLikeNested = Any # TODO: wait for support for recursive types -_DTypeLikeNested = Any # TODO: wait for support for recursive types - -# Mandatory keys -class _DTypeDictBase(TypedDict): +class _DTypeDict(TypedDict): names: Sequence[str] formats: Sequence[_DTypeLikeNested] - - -# Mandatory + optional keys -class _DTypeDict(_DTypeDictBase, total=False): # Only `str` elements are usable as indexing aliases, # but `titles` can in principle accept any object - offsets: Sequence[int] - titles: Sequence[Any] - itemsize: int - aligned: bool + offsets: NotRequired[Sequence[int]] + titles: NotRequired[Sequence[Any]] + itemsize: NotRequired[int] + aligned: NotRequired[bool] # A protocol for anything with the dtype attribute @runtime_checkable -class _SupportsDType(Protocol[_DType_co]): +class _HasDType[DTypeT: np.dtype](Protocol): @property - def dtype(self) -> _DType_co: ... + def dtype(self) -> DTypeT: ... + + +class _HasNumPyDType[DTypeT: np.dtype](Protocol): + @property + def __numpy_dtype__(self, /) -> DTypeT: ... + + +type _SupportsDType[DTypeT: np.dtype] = _HasDType[DTypeT] | _HasNumPyDType[DTypeT] # A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic` -_DTypeLike = Union[ - np.dtype[_SCT], - type[_SCT], - _SupportsDType[np.dtype[_SCT]], -] +type _DTypeLike[ScalarT: np.generic] = ( + type[ScalarT] | np.dtype[ScalarT] | _SupportsDType[np.dtype[ScalarT]] +) # Would create a dtype[np.void] -_VoidDTypeLike = Union[ - # (flexible_dtype, itemsize) - tuple[_DTypeLikeNested, int], - # (fixed_dtype, shape) - tuple[_DTypeLikeNested, _ShapeLike], +type _VoidDTypeLike = ( + # If a tuple, then it can be either: + # - (flexible_dtype, itemsize) + # - (fixed_dtype, shape) + # - (base_dtype, new_dtype) + # But because `_DTypeLikeNested = Any`, the first two cases are redundant + + # tuple[_DTypeLikeNested, int] | tuple[_DTypeLikeNested, _ShapeLike] | + tuple[_DTypeLikeNested, _DTypeLikeNested] + # [(field_name, field_dtype, field_shape), ...] - # # The type here is quite broad because NumPy accepts quite a wide - # range of inputs inside the list; see the tests for some - # examples. - list[Any], - # {'names': ..., 'formats': ..., 'offsets': ..., 'titles': ..., - # 'itemsize': ...} - _DTypeDict, - # (base_dtype, new_dtype) - tuple[_DTypeLikeNested, _DTypeLikeNested], -] + # range of inputs inside the list; see the tests for some examples. + | list[Any] + + # {'names': ..., 'formats': ..., 'offsets': ..., 'titles': ..., 'itemsize': ...} + | _DTypeDict +) + +# Aliases for commonly used dtype-like objects. +# Note that the precision of `np.number` subclasses is ignored herein. +type _DTypeLikeBool = type[bool] | _DTypeLike[np.bool] | _BoolCodes +type _DTypeLikeInt = type[int] | _DTypeLike[np.signedinteger] | _SignedIntegerCodes +type _DTypeLikeUInt = _DTypeLike[np.unsignedinteger] | _UnsignedIntegerCodes +type _DTypeLikeFloat = type[float] | _DTypeLike[np.floating] | _FloatingCodes +type _DTypeLikeComplex = ( + type[complex] | _DTypeLike[np.complexfloating] | _ComplexFloatingCodes +) +type _DTypeLikeComplex_co = ( + type[complex] | _DTypeLike[np.bool | np.number] | _BoolCodes | _NumberCodes +) +type _DTypeLikeDT64 = _DTypeLike[np.timedelta64] | _TD64Codes +type _DTypeLikeTD64 = _DTypeLike[np.datetime64] | _DT64Codes +type _DTypeLikeBytes = type[bytes] | _DTypeLike[np.bytes_] | _BytesCodes +type _DTypeLikeStr = type[str] | _DTypeLike[np.str_] | _StrCodes +type _DTypeLikeVoid = ( + type[memoryview] | _DTypeLike[np.void] | _VoidDTypeLike | _VoidCodes +) +type _DTypeLikeObject = type[object] | _DTypeLike[np.object_] | _ObjectCodes + # Anything that can be coerced into numpy.dtype. # Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html -DTypeLike = Union[ - np.dtype[Any], - # default data type (float64) - None, - # array-scalar types and generic types - type[Any], # NOTE: We're stuck with `type[Any]` due to object dtypes - # anything with a dtype attribute - _SupportsDType[np.dtype[Any]], - # character codes, type strings or comma-separated fields, e.g., 'float64' - str, - _VoidDTypeLike, -] +type DTypeLike = type | str | np.dtype | _SupportsDType[np.dtype] | _VoidDTypeLike # NOTE: while it is possible to provide the dtype as a dict of # dtype-like objects (e.g. `{'field1': ..., 'field2': ..., ...}`), -# this syntax is officially discourged and -# therefore not included in the Union defining `DTypeLike`. +# this syntax is officially discouraged and +# therefore not included in the type-union defining `DTypeLike`. # # See https://github.com/numpy/numpy/issues/16891 for more details. - -# Aliases for commonly used dtype-like objects. -# Note that the precision of `np.number` subclasses is ignored herein. -_DTypeLikeBool = Union[ - type[bool], - type[np.bool], - np.dtype[np.bool], - _SupportsDType[np.dtype[np.bool]], - _BoolCodes, -] -_DTypeLikeUInt = Union[ - type[np.unsignedinteger], - np.dtype[np.unsignedinteger], - _SupportsDType[np.dtype[np.unsignedinteger]], - _UInt8Codes, - _UInt16Codes, - _UInt32Codes, - _UInt64Codes, - _UByteCodes, - _UShortCodes, - _UIntCCodes, - _LongCodes, - _ULongLongCodes, - _UIntPCodes, - _UIntCodes, -] -_DTypeLikeInt = Union[ - type[int], - type[np.signedinteger], - np.dtype[np.signedinteger], - _SupportsDType[np.dtype[np.signedinteger]], - _Int8Codes, - _Int16Codes, - _Int32Codes, - _Int64Codes, - _ByteCodes, - _ShortCodes, - _IntCCodes, - _LongCodes, - _LongLongCodes, - _IntPCodes, - _IntCodes, -] -_DTypeLikeFloat = Union[ - type[float], - type[np.floating], - np.dtype[np.floating], - _SupportsDType[np.dtype[np.floating]], - _Float16Codes, - _Float32Codes, - _Float64Codes, - _HalfCodes, - _SingleCodes, - _DoubleCodes, - _LongDoubleCodes, -] -_DTypeLikeComplex = Union[ - type[complex], - type[np.complexfloating], - np.dtype[np.complexfloating], - _SupportsDType[np.dtype[np.complexfloating]], - _Complex64Codes, - _Complex128Codes, - _CSingleCodes, - _CDoubleCodes, - _CLongDoubleCodes, -] -_DTypeLikeDT64 = Union[ - type[np.timedelta64], - np.dtype[np.timedelta64], - _SupportsDType[np.dtype[np.timedelta64]], - _TD64Codes, -] -_DTypeLikeTD64 = Union[ - type[np.datetime64], - np.dtype[np.datetime64], - _SupportsDType[np.dtype[np.datetime64]], - _DT64Codes, -] -_DTypeLikeStr = Union[ - type[str], - type[np.str_], - np.dtype[np.str_], - _SupportsDType[np.dtype[np.str_]], - _StrCodes, -] -_DTypeLikeBytes = Union[ - type[bytes], - type[np.bytes_], - np.dtype[np.bytes_], - _SupportsDType[np.dtype[np.bytes_]], - _BytesCodes, -] -_DTypeLikeVoid = Union[ - type[np.void], - np.dtype[np.void], - _SupportsDType[np.dtype[np.void]], - _VoidCodes, - _VoidDTypeLike, -] -_DTypeLikeObject = Union[ - type, - np.dtype[np.object_], - _SupportsDType[np.dtype[np.object_]], - _ObjectCodes, -] - -_DTypeLikeComplex_co = Union[ - _DTypeLikeBool, - _DTypeLikeUInt, - _DTypeLikeInt, - _DTypeLikeFloat, - _DTypeLikeComplex, -] diff --git a/numpy/_typing/_extended_precision.py b/numpy/_typing/_extended_precision.py index 7246b47d0ee1..c707e726af7e 100644 --- a/numpy/_typing/_extended_precision.py +++ b/numpy/_typing/_extended_precision.py @@ -6,22 +6,10 @@ """ import numpy as np -from . import ( - _80Bit, - _96Bit, - _128Bit, - _256Bit, -) -uint128 = np.unsignedinteger[_128Bit] -uint256 = np.unsignedinteger[_256Bit] -int128 = np.signedinteger[_128Bit] -int256 = np.signedinteger[_256Bit] -float80 = np.floating[_80Bit] +from . import _96Bit, _128Bit + float96 = np.floating[_96Bit] float128 = np.floating[_128Bit] -float256 = np.floating[_256Bit] -complex160 = np.complexfloating[_80Bit, _80Bit] complex192 = np.complexfloating[_96Bit, _96Bit] complex256 = np.complexfloating[_128Bit, _128Bit] -complex512 = np.complexfloating[_256Bit, _256Bit] diff --git a/numpy/_typing/_nbit.py b/numpy/_typing/_nbit.py index 7a4ca8837a2c..1ad5f017eeb9 100644 --- a/numpy/_typing/_nbit.py +++ b/numpy/_typing/_nbit.py @@ -1,17 +1,17 @@ """A module with the precisions of platform-specific `~numpy.number`s.""" -from typing import Any +from ._nbit_base import _8Bit, _16Bit, _32Bit, _64Bit, _96Bit, _128Bit # To-be replaced with a `npt.NBitBase` subclass by numpy's mypy plugin -_NBitByte = Any -_NBitShort = Any -_NBitIntC = Any -_NBitIntP = Any -_NBitInt = Any -_NBitLong = Any -_NBitLongLong = Any +type _NBitByte = _8Bit +type _NBitShort = _16Bit +type _NBitIntC = _32Bit +type _NBitIntP = _32Bit | _64Bit +type _NBitInt = _NBitIntP +type _NBitLong = _32Bit | _64Bit +type _NBitLongLong = _64Bit -_NBitHalf = Any -_NBitSingle = Any -_NBitDouble = Any -_NBitLongDouble = Any +type _NBitHalf = _16Bit +type _NBitSingle = _32Bit +type _NBitDouble = _64Bit +type _NBitLongDouble = _64Bit | _96Bit | _128Bit diff --git a/numpy/_typing/_nbit_base.py b/numpy/_typing/_nbit_base.py new file mode 100644 index 000000000000..28a60ecbe00f --- /dev/null +++ b/numpy/_typing/_nbit_base.py @@ -0,0 +1,93 @@ +"""A module with the precisions of generic `~numpy.number` types.""" +from typing import final + +from numpy._utils import set_module + + +@final # Disallow the creation of arbitrary `NBitBase` subclasses +@set_module("numpy.typing") +class NBitBase: + """ + A type representing `numpy.number` precision during static type checking. + + Used exclusively for the purpose of static type checking, `NBitBase` + represents the base of a hierarchical set of subclasses. + Each subsequent subclass is herein used for representing a lower level + of precision, *e.g.* ``64Bit > 32Bit > 16Bit``. + + .. versionadded:: 1.20 + + .. deprecated:: 2.3 + Use ``@typing.overload`` or a ``TypeVar`` with a scalar-type as upper + bound, instead. + + Examples + -------- + Below is a typical usage example: `NBitBase` is herein used for annotating + a function that takes a float and integer of arbitrary precision + as arguments and returns a new float of whichever precision is largest + (*e.g.* ``np.float16 + np.int64 -> np.float64``). + + .. code-block:: python + + >>> from typing import TYPE_CHECKING + >>> import numpy as np + >>> import numpy.typing as npt + + >>> def add[S: npt.NBitBase, T: npt.NBitBase]( + ... a: np.floating[S], b: np.integer[T] + ... ) -> np.floating[S | T]: + ... return a + b + + >>> a = np.float16() + >>> b = np.int64() + >>> out = add(a, b) + + >>> if TYPE_CHECKING: + ... reveal_locals() + ... # note: Revealed local types are: + ... # note: a: numpy.floating[numpy.typing._16Bit*] + ... # note: b: numpy.signedinteger[numpy.typing._64Bit*] + ... # note: out: numpy.floating[numpy.typing._64Bit*] + + """ + # Deprecated in NumPy 2.3, 2025-05-01 + + def __init_subclass__(cls) -> None: + allowed_names = { + "NBitBase", "_128Bit", "_96Bit", "_64Bit", "_32Bit", "_16Bit", "_8Bit" + } + if cls.__name__ not in allowed_names: + raise TypeError('cannot inherit from final class "NBitBase"') + super().__init_subclass__() + +@final +@set_module("numpy._typing") +# Silence errors about subclassing a `@final`-decorated class +class _128Bit(NBitBase): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + pass + +@final +@set_module("numpy._typing") +class _96Bit(_128Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + pass + +@final +@set_module("numpy._typing") +class _64Bit(_96Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + pass + +@final +@set_module("numpy._typing") +class _32Bit(_64Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + pass + +@final +@set_module("numpy._typing") +class _16Bit(_32Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + pass + +@final +@set_module("numpy._typing") +class _8Bit(_16Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + pass diff --git a/numpy/_typing/_nbit_base.pyi b/numpy/_typing/_nbit_base.pyi new file mode 100644 index 000000000000..bd317c896094 --- /dev/null +++ b/numpy/_typing/_nbit_base.pyi @@ -0,0 +1,39 @@ +# pyright: reportDeprecated=false +# pyright: reportGeneralTypeIssues=false +# mypy: disable-error-code=misc + +from typing import final +from typing_extensions import deprecated + +# Deprecated in NumPy 2.3, 2025-05-01 +@deprecated( + "`NBitBase` is deprecated and will be removed from numpy.typing in the " + "future. Use `@typing.overload` or a type parameter with a scalar-type as upper " + "bound, instead. (deprecated in NumPy 2.3)", +) +@final +class NBitBase: ... + +@final +class _256Bit(NBitBase): ... # type: ignore[deprecated] + +@final +class _128Bit(_256Bit): ... + +@final +class _96Bit(_128Bit): ... + +@final +class _80Bit(_96Bit): ... + +@final +class _64Bit(_80Bit): ... + +@final +class _32Bit(_64Bit): ... + +@final +class _16Bit(_32Bit): ... + +@final +class _8Bit(_16Bit): ... diff --git a/numpy/_typing/_nested_sequence.py b/numpy/_typing/_nested_sequence.py index 3d0d25ae5b48..13711be397e9 100644 --- a/numpy/_typing/_nested_sequence.py +++ b/numpy/_typing/_nested_sequence.py @@ -1,22 +1,15 @@ """A module containing the `_NestedSequence` protocol.""" -from __future__ import annotations +from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable -from collections.abc import Iterator -from typing import ( - Any, - TypeVar, - Protocol, - runtime_checkable, -) +if TYPE_CHECKING: + from collections.abc import Iterator __all__ = ["_NestedSequence"] -_T_co = TypeVar("_T_co", covariant=True) - @runtime_checkable -class _NestedSequence(Protocol[_T_co]): +class _NestedSequence[T](Protocol): """A protocol for representing nested sequences. Warning @@ -33,8 +26,6 @@ class _NestedSequence(Protocol[_T_co]): -------- .. code-block:: python - >>> from __future__ import annotations - >>> from typing import TYPE_CHECKING >>> import numpy as np >>> from numpy._typing import _NestedSequence @@ -61,7 +52,7 @@ def __len__(self, /) -> int: """Implement ``len(self)``.""" raise NotImplementedError - def __getitem__(self, index: int, /) -> _T_co | _NestedSequence[_T_co]: + def __getitem__(self, index: int, /) -> "T | _NestedSequence[T]": """Implement ``self[x]``.""" raise NotImplementedError @@ -69,11 +60,11 @@ def __contains__(self, x: object, /) -> bool: """Implement ``x in self``.""" raise NotImplementedError - def __iter__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]: + def __iter__(self, /) -> "Iterator[T | _NestedSequence[T]]": """Implement ``iter(self)``.""" raise NotImplementedError - def __reversed__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]: + def __reversed__(self, /) -> "Iterator[T | _NestedSequence[T]]": """Implement ``reversed(self)``.""" raise NotImplementedError diff --git a/numpy/_typing/_scalars.py b/numpy/_typing/_scalars.py index b9274e867c83..2d36c4961c42 100644 --- a/numpy/_typing/_scalars.py +++ b/numpy/_typing/_scalars.py @@ -1,30 +1,20 @@ -from typing import Union, Any +from typing import Any import numpy as np # NOTE: `_StrLike_co` and `_BytesLike_co` are pointless, as `np.str_` and # `np.bytes_` are already subclasses of their builtin counterpart +type _CharLike_co = str | bytes -_CharLike_co = Union[str, bytes] - -# The 6 `Like_co` type-aliases below represent all scalars that can be +# The `Like_co` type-aliases below represent all scalars that can be # coerced into `` (with the casting rule `same_kind`) -_BoolLike_co = Union[bool, np.bool] -_UIntLike_co = Union[_BoolLike_co, np.unsignedinteger[Any]] -_IntLike_co = Union[_BoolLike_co, int, np.integer[Any]] -_FloatLike_co = Union[_IntLike_co, float, np.floating[Any]] -_ComplexLike_co = Union[_FloatLike_co, complex, np.complexfloating[Any, Any]] -_TD64Like_co = Union[_IntLike_co, np.timedelta64] - -_NumberLike_co = Union[int, float, complex, np.number[Any], np.bool] -_ScalarLike_co = Union[ - int, - float, - complex, - str, - bytes, - np.generic, -] - +type _BoolLike_co = bool | np.bool +type _UIntLike_co = bool | np.unsignedinteger | np.bool +type _IntLike_co = int | np.integer | np.bool +type _FloatLike_co = float | np.floating | np.integer | np.bool +type _ComplexLike_co = complex | np.number | np.bool +type _NumberLike_co = _ComplexLike_co +type _TD64Like_co = int | np.timedelta64 | np.integer | np.bool # `_VoidLike_co` is technically not a scalar, but it's close enough -_VoidLike_co = Union[tuple[Any, ...], np.void] +type _VoidLike_co = tuple[Any, ...] | np.void +type _ScalarLike_co = complex | str | bytes | np.generic diff --git a/numpy/_typing/_shape.py b/numpy/_typing/_shape.py index 4f1204e47c6a..132943b283c8 100644 --- a/numpy/_typing/_shape.py +++ b/numpy/_typing/_shape.py @@ -1,7 +1,8 @@ from collections.abc import Sequence -from typing import Union, SupportsIndex +from typing import Any, SupportsIndex -_Shape = tuple[int, ...] +type _Shape = tuple[int, ...] +type _AnyShape = tuple[Any, ...] # Anything that can be coerced to a shape tuple -_ShapeLike = Union[SupportsIndex, Sequence[SupportsIndex]] +type _ShapeLike = SupportsIndex | Sequence[SupportsIndex] diff --git a/numpy/_typing/_ufunc.py b/numpy/_typing/_ufunc.py new file mode 100644 index 000000000000..db52a1fdb318 --- /dev/null +++ b/numpy/_typing/_ufunc.py @@ -0,0 +1,7 @@ +from numpy import ufunc + +_UFunc_Nin1_Nout1 = ufunc +_UFunc_Nin2_Nout1 = ufunc +_UFunc_Nin1_Nout2 = ufunc +_UFunc_Nin2_Nout2 = ufunc +_GUFunc_Nin2_Nout1 = ufunc diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index b6e4db4b5e13..5ace7d90d00a 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -4,38 +4,42 @@ The signatures of the ufuncs are too varied to reasonably type with a single class. So instead, `ufunc` has been expanded into four private subclasses, one for each combination of `~ufunc.nin` and `~ufunc.nout`. - """ +from _typeshed import Incomplete +from types import EllipsisType from typing import ( Any, - Generic, - overload, - TypeVar, Literal, - SupportsIndex, + LiteralString, + Never, + NoReturn, Protocol, + SupportsIndex, + TypedDict, + Unpack, + overload, + override, + type_check_only, ) -from numpy import ufunc, _CastingKind, _OrderKACF -from numpy.typing import NDArray +import numpy as np +from numpy import _CastingKind, _OrderKACF, ufunc -from ._shape import _ShapeLike -from ._scalars import _ScalarLike_co -from ._array_like import ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co +from ._array_like import ArrayLike, NDArray, _ArrayLikeBool_co, _ArrayLikeInt_co from ._dtype_like import DTypeLike +from ._scalars import _ScalarLike_co +from ._shape import _ShapeLike -_T = TypeVar("_T") -_2Tuple = tuple[_T, _T] -_3Tuple = tuple[_T, _T, _T] -_4Tuple = tuple[_T, _T, _T, _T] - -_NTypes = TypeVar("_NTypes", bound=int) -_IDType = TypeVar("_IDType", bound=Any) -_NameType = TypeVar("_NameType", bound=str) -_Signature = TypeVar("_Signature", bound=str) +type _2Tuple[T] = tuple[T, T] +type _3Tuple[T] = tuple[T, T, T] +type _4Tuple[T] = tuple[T, T, T, T] +type _2PTuple[T] = tuple[T, T, *tuple[T, ...]] +type _3PTuple[T] = tuple[T, T, T, *tuple[T, ...]] +type _4PTuple[T] = tuple[T, T, T, T, *tuple[T, ...]] +@type_check_only class _SupportsArrayUFunc(Protocol): def __array_ufunc__( self, @@ -45,24 +49,41 @@ class _SupportsArrayUFunc(Protocol): **kwargs: Any, ) -> Any: ... +@type_check_only +class _UFunc3Kwargs(TypedDict, total=False): + where: _ArrayLikeBool_co | None + casting: _CastingKind + order: _OrderKACF + subok: bool + signature: _3Tuple[str | None] | str | None + +@type_check_only +class _ReduceKwargs(TypedDict, total=False): + initial: Incomplete # = + where: _ArrayLikeBool_co | None # = True # NOTE: `reduce`, `accumulate`, `reduceat` and `outer` raise a ValueError for # ufuncs that don't accept two input arguments and return one output argument. -# In such cases the respective methods are simply typed as `None`. +# In such cases the respective methods return `NoReturn` # NOTE: Similarly, `at` won't be defined for ufuncs that return -# multiple outputs; in such cases `at` is typed as `None` +# multiple outputs; in such cases `at` is typed to return `NoReturn` # NOTE: If 2 output types are returned then `out` must be a # 2-tuple of arrays. Otherwise `None` or a plain array are also acceptable -class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] +# pyright: reportIncompatibleMethodOverride=false + +@type_check_only +class _UFunc_Nin1_Nout1[NameT: LiteralString, NTypesT: int, IdentT](ufunc): # type: ignore[misc] + @property + def __name__(self) -> NameT: ... @property - def __name__(self) -> _NameType: ... + def __qualname__(self) -> NameT: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def ntypes(self) -> _NTypes: ... + def ntypes(self) -> NTypesT: ... @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[1]: ... @property @@ -71,69 +92,67 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def nargs(self) -> Literal[2]: ... @property def signature(self) -> None: ... - @property - def reduce(self) -> None: ... - @property - def accumulate(self) -> None: ... - @property - def reduceat(self) -> None: ... - @property - def outer(self) -> None: ... @overload def __call__( self, - __x1: _ScalarLike_co, - out: None = ..., + x1: _ScalarLike_co, + /, + out: None = None, *, - where: None | _ArrayLikeBool_co = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., - signature: str | _2Tuple[None | str] = ..., - ) -> Any: ... + signature: str | _2Tuple[str | None] = ..., + ) -> Incomplete: ... @overload def __call__( self, - __x1: ArrayLike, - out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + x1: ArrayLike, + /, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, *, - where: None | _ArrayLikeBool_co = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., - signature: str | _2Tuple[None | str] = ..., - ) -> NDArray[Any]: ... + signature: str | _2Tuple[str | None] = ..., + ) -> NDArray[Incomplete]: ... @overload def __call__( self, - __x1: _SupportsArrayUFunc, - out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + x1: _SupportsArrayUFunc, + /, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, *, - where: None | _ArrayLikeBool_co = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., - signature: str | _2Tuple[None | str] = ..., - ) -> Any: ... + signature: str | _2Tuple[str | None] = ..., + ) -> Incomplete: ... - def at( - self, - a: _SupportsArrayUFunc, - indices: _ArrayLikeInt_co, - /, - ) -> None: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + + def at(self, a: np.ndarray | _SupportsArrayUFunc, indices: _ArrayLikeInt_co, /) -> None: ... # type: ignore[override] -class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] +@type_check_only +class _UFunc_Nin2_Nout1[NameT: LiteralString, NTypesT: int, IdentT](ufunc): # type: ignore[misc] @property - def __name__(self) -> _NameType: ... + def __name__(self) -> NameT: ... @property - def ntypes(self) -> _NTypes: ... + def __qualname__(self) -> NameT: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def identity(self) -> _IDType: ... + def ntypes(self) -> NTypesT: ... + @property + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[2]: ... @property @@ -143,108 +162,196 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @property def signature(self) -> None: ... - @overload + @overload # (scalar, scalar) -> scalar def __call__( self, - __x1: _ScalarLike_co, - __x2: _ScalarLike_co, - out: None = ..., + x1: _ScalarLike_co, + x2: _ScalarLike_co, + /, + out: EllipsisType | None = None, *, - where: None | _ArrayLikeBool_co = ..., - casting: _CastingKind = ..., - order: _OrderKACF = ..., - dtype: DTypeLike = ..., - subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., - ) -> Any: ... - @overload + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> Incomplete: ... + @overload # (array-like, array) -> array def __call__( self, - __x1: ArrayLike, - __x2: ArrayLike, - out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + x1: ArrayLike, + x2: np.ndarray, + /, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, *, - where: None | _ArrayLikeBool_co = ..., - casting: _CastingKind = ..., - order: _OrderKACF = ..., - dtype: DTypeLike = ..., - subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., - ) -> NDArray[Any]: ... + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Incomplete]: ... + @overload # (array, array-like) -> array + def __call__( + self, + x1: np.ndarray, + x2: ArrayLike, + /, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Incomplete]: ... + @overload # (array-like, array-like, out=array) -> array + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: np.ndarray | tuple[np.ndarray], + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Incomplete]: ... + @overload # (array-like, array-like) -> array | scalar + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Incomplete] | Incomplete: ... - def at( + def accumulate( self, - a: NDArray[Any], - indices: _ArrayLikeInt_co, - b: ArrayLike, + array: ArrayLike, /, - ) -> None: ... + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: np.ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... + @override # type: ignore[override] + @overload # out=None (default), keepdims=False (default) + def reduce( # pyrefly: ignore[bad-override] + self, + array: ArrayLike, + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: Literal[False] = False, + **kwargs: Unpack[_ReduceKwargs], + ) -> Incomplete: ... + @overload # out=ndarray or out=... def reduce( self, array: ArrayLike, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: None | NDArray[Any] = ..., - keepdims: bool = ..., - initial: Any = ..., - where: _ArrayLikeBool_co = ..., - ) -> Any: ... - - def accumulate( + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + *, + out: np.ndarray | EllipsisType, + keepdims: bool = False, + **kwargs: Unpack[_ReduceKwargs], + ) -> NDArray[Incomplete]: ... + @overload # keepdims=True + def reduce( self, array: ArrayLike, - axis: SupportsIndex = ..., - dtype: DTypeLike = ..., - out: None | NDArray[Any] = ..., - ) -> NDArray[Any]: ... + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: np.ndarray | EllipsisType | None = None, + *, + keepdims: Literal[True], + **kwargs: Unpack[_ReduceKwargs], + ) -> NDArray[Incomplete]: ... + @override def reduceat( self, array: ArrayLike, + /, indices: _ArrayLikeInt_co, - axis: SupportsIndex = ..., - dtype: DTypeLike = ..., - out: None | NDArray[Any] = ..., - ) -> NDArray[Any]: ... + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: np.ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... - # Expand `**kwargs` into explicit keyword-only arguments + @override # type: ignore[override] @overload - def outer( + def outer( # (scalar, scalar) -> scalar self, A: _ScalarLike_co, B: _ScalarLike_co, - /, *, - out: None = ..., - where: None | _ArrayLikeBool_co = ..., - casting: _CastingKind = ..., - order: _OrderKACF = ..., - dtype: DTypeLike = ..., - subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., - ) -> Any: ... - @overload - def outer( # type: ignore[misc] + /, + *, + out: None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> Incomplete: ... + @overload # (array-like, array) -> array + def outer( self, A: ArrayLike, + B: np.ndarray, + /, + *, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Incomplete]: ... + @overload # (array, array-like) -> array + def outer( + self, + A: np.ndarray, B: ArrayLike, - /, *, - out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., - where: None | _ArrayLikeBool_co = ..., - casting: _CastingKind = ..., - order: _OrderKACF = ..., - dtype: DTypeLike = ..., - subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., - ) -> NDArray[Any]: ... + /, + *, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Incomplete]: ... + @overload # (array-like, array-like, out=array) -> array + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, + *, + out: np.ndarray | tuple[np.ndarray] | EllipsisType, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Incomplete]: ... + @overload # (array-like, array-like) -> array | scalar + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, + *, + out: None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Incomplete] | Incomplete: ... + + @override + def at( # type: ignore[override] + self, + a: np.ndarray | _SupportsArrayUFunc, + indices: _ArrayLikeInt_co, + b: ArrayLike, + /, + ) -> None: ... -class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] +@type_check_only +class _UFunc_Nin1_Nout2[NameT: LiteralString, NTypesT: int, IdentT](ufunc): # type: ignore[misc] + @property + def __name__(self) -> NameT: ... @property - def __name__(self) -> _NameType: ... + def __qualname__(self) -> NameT: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def ntypes(self) -> _NTypes: ... + def ntypes(self) -> NTypesT: ... @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[1]: ... @property @@ -253,69 +360,72 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def nargs(self) -> Literal[3]: ... @property def signature(self) -> None: ... - @property - def at(self) -> None: ... - @property - def reduce(self) -> None: ... - @property - def accumulate(self) -> None: ... - @property - def reduceat(self) -> None: ... - @property - def outer(self) -> None: ... @overload def __call__( self, - __x1: _ScalarLike_co, - __out1: None = ..., - __out2: None = ..., + x1: _ScalarLike_co, + out1: EllipsisType | None = ..., + out2: None = None, + /, *, - where: None | _ArrayLikeBool_co = ..., + out: EllipsisType | None = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., - ) -> _2Tuple[Any]: ... + signature: str | _3Tuple[str | None] = ..., + ) -> _2Tuple[Incomplete]: ... @overload def __call__( self, - __x1: ArrayLike, - __out1: None | NDArray[Any] = ..., - __out2: None | NDArray[Any] = ..., + x1: ArrayLike, + out1: np.ndarray | EllipsisType | None = ..., + out2: np.ndarray | None = ..., + /, *, - out: _2Tuple[NDArray[Any]] = ..., - where: None | _ArrayLikeBool_co = ..., + out: _2Tuple[np.ndarray] | EllipsisType = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., - ) -> _2Tuple[NDArray[Any]]: ... + signature: str | _3Tuple[str | None] = ..., + ) -> _2Tuple[NDArray[Incomplete]]: ... @overload def __call__( self, - __x1: _SupportsArrayUFunc, - __out1: None | NDArray[Any] = ..., - __out2: None | NDArray[Any] = ..., + x1: _SupportsArrayUFunc, + out1: np.ndarray | EllipsisType | None = ..., + out2: np.ndarray | None = ..., + /, *, - out: _2Tuple[NDArray[Any]] = ..., - where: None | _ArrayLikeBool_co = ..., + out: _2Tuple[np.ndarray] | EllipsisType = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., - ) -> _2Tuple[Any]: ... + signature: str | _3Tuple[str | None] = ..., + ) -> _2Tuple[Incomplete]: ... + + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, /) -> NoReturn: ... # type: ignore[override] -class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] +@type_check_only +class _UFunc_Nin2_Nout2[NameT: LiteralString, NTypesT: int, IdentT](ufunc): # type: ignore[misc] @property - def __name__(self) -> _NameType: ... + def __name__(self) -> NameT: ... @property - def ntypes(self) -> _NTypes: ... + def __qualname__(self) -> NameT: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def identity(self) -> _IDType: ... + def ntypes(self) -> NTypesT: ... + @property + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[2]: ... @property @@ -324,56 +434,58 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def nargs(self) -> Literal[4]: ... @property def signature(self) -> None: ... - @property - def at(self) -> None: ... - @property - def reduce(self) -> None: ... - @property - def accumulate(self) -> None: ... - @property - def reduceat(self) -> None: ... - @property - def outer(self) -> None: ... @overload def __call__( self, - __x1: _ScalarLike_co, - __x2: _ScalarLike_co, - __out1: None = ..., - __out2: None = ..., + x1: _ScalarLike_co, + x2: _ScalarLike_co, + out1: EllipsisType | None = ..., + out2: None = None, + /, *, - where: None | _ArrayLikeBool_co = ..., + out: EllipsisType | None = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., - signature: str | _4Tuple[None | str] = ..., - ) -> _2Tuple[Any]: ... + signature: str | _4Tuple[str | None] = ..., + ) -> _2Tuple[Incomplete]: ... @overload def __call__( self, - __x1: ArrayLike, - __x2: ArrayLike, - __out1: None | NDArray[Any] = ..., - __out2: None | NDArray[Any] = ..., + x1: ArrayLike, + x2: ArrayLike, + out1: np.ndarray | EllipsisType | None = ..., + out2: np.ndarray | None = ..., + /, *, - out: _2Tuple[NDArray[Any]] = ..., - where: None | _ArrayLikeBool_co = ..., + out: _2Tuple[np.ndarray] | EllipsisType = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., - signature: str | _4Tuple[None | str] = ..., - ) -> _2Tuple[NDArray[Any]]: ... + signature: str | _4Tuple[str | None] = ..., + ) -> _2Tuple[NDArray[Incomplete]]: ... -class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature]): # type: ignore[misc] + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, b: Never, /) -> NoReturn: ... # type: ignore[override] + +@type_check_only +class _GUFunc_Nin2_Nout1[NameT: LiteralString, NTypesT: int, IdentT, SignatureT: LiteralString](ufunc): # type: ignore[misc] + @property + def __name__(self) -> NameT: ... @property - def __name__(self) -> _NameType: ... + def __qualname__(self) -> NameT: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def ntypes(self) -> _NTypes: ... + def ntypes(self) -> NTypesT: ... @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[2]: ... @property @@ -381,44 +493,478 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] @property def nargs(self) -> Literal[3]: ... @property - def signature(self) -> _Signature: ... - @property - def reduce(self) -> None: ... - @property - def accumulate(self) -> None: ... - @property - def reduceat(self) -> None: ... - @property - def outer(self) -> None: ... - @property - def at(self) -> None: ... + def signature(self) -> SignatureT: ... # Scalar for 1D array-likes; ndarray otherwise @overload def __call__( self, - __x1: ArrayLike, - __x2: ArrayLike, - out: None = ..., + x1: ArrayLike, + x2: ArrayLike, + /, + out: EllipsisType | None = None, *, + dtype: DTypeLike | None = None, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., + signature: str | _3Tuple[str | None] = ..., axes: list[_2Tuple[SupportsIndex]] = ..., - ) -> Any: ... + ) -> Incomplete: ... @overload def __call__( self, - __x1: ArrayLike, - __x2: ArrayLike, - out: NDArray[Any] | tuple[NDArray[Any]], + x1: ArrayLike, + x2: ArrayLike, + /, + out: np.ndarray | tuple[np.ndarray] | EllipsisType, *, + dtype: DTypeLike | None = None, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., + signature: str | _3Tuple[str | None] = ..., axes: list[_2Tuple[SupportsIndex]] = ..., - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... + + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, b: Never, /) -> NoReturn: ... # type: ignore[override] + +@type_check_only +class _PyFunc_Kwargs_Nargs2(TypedDict, total=False): + where: _ArrayLikeBool_co | None + casting: _CastingKind + order: _OrderKACF + dtype: DTypeLike + subok: bool + signature: str | tuple[DTypeLike, DTypeLike] + +@type_check_only +class _PyFunc_Kwargs_Nargs3(TypedDict, total=False): + where: _ArrayLikeBool_co | None + casting: _CastingKind + order: _OrderKACF + dtype: DTypeLike + subok: bool + signature: str | tuple[DTypeLike, DTypeLike, DTypeLike] + +@type_check_only +class _PyFunc_Kwargs_Nargs3P(TypedDict, total=False): + where: _ArrayLikeBool_co | None + casting: _CastingKind + order: _OrderKACF + dtype: DTypeLike + subok: bool + signature: str | _3PTuple[DTypeLike] + +@type_check_only +class _PyFunc_Kwargs_Nargs4P(TypedDict, total=False): + where: _ArrayLikeBool_co | None + casting: _CastingKind + order: _OrderKACF + dtype: DTypeLike + subok: bool + signature: str | _4PTuple[DTypeLike] + +@type_check_only +class _PyFunc_Nin1_Nout1[ReturnT, IdentT](ufunc): # type: ignore[misc] + @property + def identity(self) -> IdentT: ... + @property + def nin(self) -> Literal[1]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[2]: ... + @property + def ntypes(self) -> Literal[1]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + /, + out: EllipsisType | None = None, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], + ) -> ReturnT: ... + @overload + def __call__( + self, + x1: ArrayLike, + /, + out: EllipsisType | None = None, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], + ) -> ReturnT | NDArray[np.object_]: ... + @overload + def __call__[OutT: np.ndarray]( + self, + x1: ArrayLike, + /, + out: OutT | tuple[OutT], + **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], + ) -> OutT: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc, + /, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], + ) -> Incomplete: ... + + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + + def at(self, a: np.ndarray | _SupportsArrayUFunc, indices: _ArrayLikeInt_co, /) -> None: ... # type: ignore[override] + +@type_check_only +class _PyFunc_Nin2_Nout1[ReturnT, IdentT](ufunc): # type: ignore[misc] + @property + def identity(self) -> IdentT: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def ntypes(self) -> Literal[1]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + x2: _ScalarLike_co, + /, + out: EllipsisType | None = None, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> ReturnT: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: EllipsisType | None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> ReturnT | NDArray[np.object_]: ... + @overload + def __call__[OutT: np.ndarray]( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: OutT | tuple[OutT], + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> OutT: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc, + x2: _SupportsArrayUFunc | ArrayLike, + /, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> Incomplete: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: _SupportsArrayUFunc, + /, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def accumulate( # pyrefly: ignore[bad-override] + self, + array: ArrayLike, + /, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: EllipsisType | None = None, + ) -> NDArray[np.object_]: ... + @overload + def accumulate[OutT: np.ndarray]( + self, + array: ArrayLike, + /, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + *, + out: OutT, + ) -> OutT: ... + + @override # type: ignore[override] + @overload # out=array + def reduce[OutT: np.ndarray]( # pyrefly: ignore[bad-override] + self, + array: ArrayLike, + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + *, + out: OutT | tuple[OutT], + keepdims: bool = False, + **kwargs: Unpack[_ReduceKwargs], + ) -> OutT: ... + @overload # out=... + def reduce( + self, + array: ArrayLike, + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + *, + out: EllipsisType, + keepdims: bool = False, + **kwargs: Unpack[_ReduceKwargs], + ) -> NDArray[np.object_]: ... + @overload # keepdims=True + def reduce( + self, + array: ArrayLike, + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: EllipsisType | None = None, + *, + keepdims: Literal[True], + **kwargs: Unpack[_ReduceKwargs], + ) -> NDArray[np.object_]: ... + @overload + def reduce( + self, + array: ArrayLike, + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: EllipsisType | None = None, + keepdims: bool = False, + **kwargs: Unpack[_ReduceKwargs], + ) -> ReturnT | NDArray[np.object_]: ... + + @override # type: ignore[override] + @overload + def reduceat[OutT: np.ndarray]( # pyrefly: ignore[bad-override] + self, + array: ArrayLike, + /, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + *, + out: OutT | tuple[OutT], + ) -> OutT: ... + @overload + def reduceat( + self, + array: ArrayLike, + /, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: EllipsisType | None = None, + ) -> NDArray[np.object_]: ... + @overload + def reduceat( + self, + array: _SupportsArrayUFunc, + /, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, + ) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def outer( # pyrefly: ignore[bad-override] + self, + A: _ScalarLike_co, + B: _ScalarLike_co, + /, + *, + out: EllipsisType | None = None, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> ReturnT: ... + @overload + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, + *, + out: EllipsisType | None = None, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> ReturnT | NDArray[np.object_]: ... + @overload + def outer[OutT: np.ndarray]( + self, + A: ArrayLike, + B: ArrayLike, + /, + *, + out: OutT, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> OutT: ... + @overload + def outer( + self, + A: _SupportsArrayUFunc, + B: _SupportsArrayUFunc | ArrayLike, + /, + *, + out: EllipsisType | None = None, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> Incomplete: ... + @overload + def outer( + self, + A: _ScalarLike_co, + B: _SupportsArrayUFunc | ArrayLike, + /, + *, + out: EllipsisType | None = None, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> Incomplete: ... + + @override + def at( # type: ignore[override] + self, + a: np.ndarray | _SupportsArrayUFunc, + indices: _ArrayLikeInt_co, + b: ArrayLike, + /, + ) -> None: ... + +@type_check_only +class _PyFunc_Nin3P_Nout1[ReturnT, IdentT, NInT: int](ufunc): # type: ignore[misc] + @property + def identity(self) -> IdentT: ... + @property + def nin(self) -> NInT: ... + @property + def nout(self) -> Literal[1]: ... + @property + def ntypes(self) -> Literal[1]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + x2: _ScalarLike_co, + x3: _ScalarLike_co, + /, + *xs: _ScalarLike_co, + out: EllipsisType | None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], + ) -> ReturnT: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + x3: ArrayLike, + /, + *xs: ArrayLike, + out: EllipsisType | None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], + ) -> ReturnT | NDArray[np.object_]: ... + @overload + def __call__[OutT: np.ndarray]( + self, + x1: ArrayLike, + x2: ArrayLike, + x3: ArrayLike, + /, + *xs: ArrayLike, + out: OutT | tuple[OutT], + **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], + ) -> OutT: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc | ArrayLike, + x2: _SupportsArrayUFunc | ArrayLike, + x3: _SupportsArrayUFunc | ArrayLike, + /, + *xs: _SupportsArrayUFunc | ArrayLike, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], + ) -> Incomplete: ... + + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, /, *args: Never) -> NoReturn: ... # type: ignore[override] + +@type_check_only +class _PyFunc_Nin1P_Nout2P[ReturnT, IdentT, NInT: int, NOutT: int](ufunc): # type: ignore[misc] + @property + def identity(self) -> IdentT: ... + @property + def nin(self) -> NInT: ... + @property + def nout(self) -> NOutT: ... + @property + def ntypes(self) -> Literal[1]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + /, + *xs: _ScalarLike_co, + out: EllipsisType | None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], + ) -> _2PTuple[ReturnT]: ... + @overload + def __call__( + self, + x1: ArrayLike, + /, + *xs: ArrayLike, + out: EllipsisType | None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], + ) -> _2PTuple[ReturnT | NDArray[np.object_]]: ... + @overload + def __call__[OutT: np.ndarray]( + self, + x1: ArrayLike, + /, + *xs: ArrayLike, + out: _2PTuple[OutT], + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], + ) -> _2PTuple[OutT]: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc | ArrayLike, + /, + *xs: _SupportsArrayUFunc | ArrayLike, + out: _2PTuple[np.ndarray] | EllipsisType | None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], + ) -> Incomplete: ... + + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, /, *args: Never) -> NoReturn: ... # type: ignore[override] diff --git a/numpy/_utils/__init__.py b/numpy/_utils/__init__.py index 9794c4e0c4a1..84ee99db1be8 100644 --- a/numpy/_utils/__init__.py +++ b/numpy/_utils/__init__.py @@ -10,7 +10,8 @@ import functools import warnings -from ._convertions import asunicode, asbytes + +from ._convertions import asbytes, asunicode def set_module(module): @@ -26,6 +27,12 @@ def example(): """ def decorator(func): if module is not None: + if isinstance(func, type): + try: + func._module_source = func.__module__ + except (AttributeError): + pass + func.__module__ = module return func return decorator @@ -66,6 +73,7 @@ def _rename_parameter(old_names, new_names, dep_version=None): def decorator(fun): @functools.wraps(fun) def wrapper(*args, **kwargs): + __tracebackhide__ = True # Hide traceback for py.test for old_name, new_name in zip(old_names, new_names): if old_name in kwargs: if dep_version: diff --git a/numpy/_utils/__init__.pyi b/numpy/_utils/__init__.pyi new file mode 100644 index 000000000000..7a78cabe60f3 --- /dev/null +++ b/numpy/_utils/__init__.pyi @@ -0,0 +1,25 @@ +from _typeshed import IdentityFunction +from collections.abc import Callable, Iterable +from typing import Protocol, overload, type_check_only + +from ._convertions import asbytes as asbytes, asunicode as asunicode + +### + +@type_check_only +class _HasModule(Protocol): + __module__: str + +### + +@overload +def set_module(module: None) -> IdentityFunction: ... +@overload +def set_module[ModuleT: _HasModule](module: str) -> Callable[[ModuleT], ModuleT]: ... + +# +def _rename_parameter[T]( + old_names: Iterable[str], + new_names: Iterable[str], + dep_version: str | None = None, +) -> Callable[[Callable[..., T]], Callable[..., T]]: ... diff --git a/numpy/_utils/_convertions.pyi b/numpy/_utils/_convertions.pyi new file mode 100644 index 000000000000..6cc599acc94f --- /dev/null +++ b/numpy/_utils/_convertions.pyi @@ -0,0 +1,4 @@ +__all__ = ["asbytes", "asunicode"] + +def asunicode(s: bytes | str) -> str: ... +def asbytes(s: bytes | str) -> str: ... diff --git a/numpy/_utils/_inspect.py b/numpy/_utils/_inspect.py index 9a874a71dd0a..b499f5837b08 100644 --- a/numpy/_utils/_inspect.py +++ b/numpy/_utils/_inspect.py @@ -54,10 +54,11 @@ def iscode(object): co_nlocals number of local variables co_stacksize virtual machine stack space required co_varnames tuple of names of arguments and local variables - + """ return isinstance(object, types.CodeType) + # ------------------------------------------------ argument list extraction # These constants are from Python's compile.h. CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8 @@ -117,7 +118,7 @@ def getargvalues(frame): 'args' is a list of the argument names (it may contain nested lists). 'varargs' and 'varkw' are the names of the * and ** arguments or None. 'locals' is the locals dictionary of the given frame. - + """ args, varargs, varkw = getargs(frame.f_code) return args, varargs, varkw, frame.f_locals diff --git a/numpy/_utils/_inspect.pyi b/numpy/_utils/_inspect.pyi new file mode 100644 index 000000000000..dd738025b728 --- /dev/null +++ b/numpy/_utils/_inspect.pyi @@ -0,0 +1,67 @@ +import types +from _typeshed import SupportsLenAndGetItem +from collections.abc import Callable, Mapping +from typing import Any, Final, overload +from typing_extensions import TypeIs + +__all__ = ["formatargspec", "getargspec"] + +### + +type _StrSeq = SupportsLenAndGetItem[str] +type _NestedSeq[T] = list[T | _NestedSeq[T]] | tuple[T | _NestedSeq[T], ...] + +type _JoinFunc[T] = Callable[[list[T]], T] +type _FormatFunc[T] = Callable[[T], str] + +### + +CO_OPTIMIZED: Final = 1 +CO_NEWLOCALS: Final = 2 +CO_VARARGS: Final = 4 +CO_VARKEYWORDS: Final = 8 + +### + +def ismethod(object: object) -> TypeIs[types.MethodType]: ... +def isfunction(object: object) -> TypeIs[types.FunctionType]: ... +def iscode(object: object) -> TypeIs[types.CodeType]: ... + +### + +def getargs(co: types.CodeType) -> tuple[list[str], str | None, str | None]: ... +def getargspec(func: types.MethodType | types.FunctionType) -> tuple[list[str], str | None, str | None, tuple[Any, ...]]: ... +def getargvalues(frame: types.FrameType) -> tuple[list[str], str | None, str | None, dict[str, Any]]: ... + +# +def joinseq(seq: _StrSeq) -> str: ... + +# +@overload +def strseq(object: _NestedSeq[str], convert: Callable[[Any], Any], join: _JoinFunc[str] = ...) -> str: ... +@overload +def strseq[VT, RT](object: _NestedSeq[VT], convert: Callable[[VT], RT], join: _JoinFunc[RT]) -> RT: ... + +# +def formatargspec( + args: _StrSeq, + varargs: str | None = None, + varkw: str | None = None, + defaults: SupportsLenAndGetItem[object] | None = None, + formatarg: _FormatFunc[str] = ..., # str + formatvarargs: _FormatFunc[str] = ..., # "*{}".format + formatvarkw: _FormatFunc[str] = ..., # "**{}".format + formatvalue: _FormatFunc[object] = ..., # "={!r}".format + join: _JoinFunc[str] = ..., # joinseq +) -> str: ... +def formatargvalues( + args: _StrSeq, + varargs: str | None, + varkw: str | None, + locals: Mapping[str, object] | None, + formatarg: _FormatFunc[str] = ..., # str + formatvarargs: _FormatFunc[str] = ..., # "*{}".format + formatvarkw: _FormatFunc[str] = ..., # "**{}".format + formatvalue: _FormatFunc[object] = ..., # "={!r}".format + join: _JoinFunc[str] = ..., # joinseq +) -> str: ... diff --git a/numpy/_utils/_pep440.py b/numpy/_utils/_pep440.py index 73d0afb5e95f..035a0695e5ee 100644 --- a/numpy/_utils/_pep440.py +++ b/numpy/_utils/_pep440.py @@ -33,7 +33,6 @@ import itertools import re - __all__ = [ "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN", ] @@ -172,7 +171,7 @@ def __str__(self): return self._version def __repr__(self): - return "".format(repr(str(self))) + return f"" @property def public(self): @@ -293,7 +292,7 @@ def __init__(self, version): # Validate the version and parse it into pieces match = self._regex.search(version) if not match: - raise InvalidVersion("Invalid version: '{0}'".format(version)) + raise InvalidVersion(f"Invalid version: '{version}'") # Store the parsed out pieces of the version self._version = _Version( @@ -325,14 +324,14 @@ def __init__(self, version): ) def __repr__(self): - return "".format(repr(str(self))) + return f"" def __str__(self): parts = [] # Epoch if self._version.epoch != 0: - parts.append("{0}!".format(self._version.epoch)) + parts.append(f"{self._version.epoch}!") # Release segment parts.append(".".join(str(x) for x in self._version.release)) @@ -343,16 +342,16 @@ def __str__(self): # Post-release if self._version.post is not None: - parts.append(".post{0}".format(self._version.post[1])) + parts.append(f".post{self._version.post[1]}") # Development release if self._version.dev is not None: - parts.append(".dev{0}".format(self._version.dev[1])) + parts.append(f".dev{self._version.dev[1]}") # Local version segment if self._version.local is not None: parts.append( - "+{0}".format(".".join(str(x) for x in self._version.local)) + f"+{'.'.join(str(x) for x in self._version.local)}" ) return "".join(parts) @@ -367,7 +366,7 @@ def base_version(self): # Epoch if self._version.epoch != 0: - parts.append("{0}!".format(self._version.epoch)) + parts.append(f"{self._version.epoch}!") # Release segment parts.append(".".join(str(x) for x in self._version.release)) diff --git a/numpy/_utils/_pep440.pyi b/numpy/_utils/_pep440.pyi new file mode 100644 index 000000000000..593960274814 --- /dev/null +++ b/numpy/_utils/_pep440.pyi @@ -0,0 +1,121 @@ +import re +from collections.abc import Callable +from typing import ( + Any, + ClassVar, + Final, + Generic, + Literal as L, + NamedTuple, + final, + type_check_only, +) +from typing_extensions import TypeIs, TypeVar + +__all__ = ["VERSION_PATTERN", "InvalidVersion", "LegacyVersion", "Version", "parse"] + +### + +_CmpKeyT_co = TypeVar("_CmpKeyT_co", bound=tuple[object, ...], default=tuple[Any, ...], covariant=True) + +### + +VERSION_PATTERN: Final[str] = ... + +class InvalidVersion(ValueError): ... + +@type_check_only +@final +class _InfinityType: + def __hash__(self) -> int: ... + def __eq__(self, other: object, /) -> TypeIs[_InfinityType]: ... + def __ne__(self, other: object, /) -> bool: ... + def __lt__(self, other: object, /) -> L[False]: ... + def __le__(self, other: object, /) -> L[False]: ... + def __gt__(self, other: object, /) -> L[True]: ... + def __ge__(self, other: object, /) -> L[True]: ... + def __neg__(self) -> _NegativeInfinityType: ... + +Infinity: Final[_InfinityType] = ... + +@type_check_only +@final +class _NegativeInfinityType: + def __hash__(self) -> int: ... + def __eq__(self, other: object, /) -> TypeIs[_NegativeInfinityType]: ... + def __ne__(self, other: object, /) -> bool: ... + def __lt__(self, other: object, /) -> L[True]: ... + def __le__(self, other: object, /) -> L[True]: ... + def __gt__(self, other: object, /) -> L[False]: ... + def __ge__(self, other: object, /) -> L[False]: ... + def __neg__(self) -> _InfinityType: ... + +NegativeInfinity: Final[_NegativeInfinityType] = ... + +class _Version(NamedTuple): + epoch: int + release: tuple[int, ...] + dev: tuple[str, int] | None + pre: tuple[str, int] | None + post: tuple[str, int] | None + local: tuple[str | int, ...] | None + +class _BaseVersion(Generic[_CmpKeyT_co]): + _key: _CmpKeyT_co + def __hash__(self) -> int: ... + def __eq__(self, other: _BaseVersion, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __ne__(self, other: _BaseVersion, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __lt__(self, other: _BaseVersion, /) -> bool: ... + def __le__(self, other: _BaseVersion, /) -> bool: ... + def __ge__(self, other: _BaseVersion, /) -> bool: ... + def __gt__(self, other: _BaseVersion, /) -> bool: ... + def _compare[CmpKeyT: tuple[object, ...]]( + self, + /, + other: _BaseVersion[CmpKeyT], + method: Callable[[_CmpKeyT_co, CmpKeyT], bool], + ) -> bool: ... + +class LegacyVersion(_BaseVersion[tuple[L[-1], tuple[str, ...]]]): + _version: Final[str] + def __init__(self, /, version: str) -> None: ... + @property + def public(self) -> str: ... + @property + def base_version(self) -> str: ... + @property + def local(self) -> None: ... + @property + def is_prerelease(self) -> L[False]: ... + @property + def is_postrelease(self) -> L[False]: ... + +class Version( + _BaseVersion[ + tuple[ + int, # epoch + tuple[int, ...], # release + tuple[str, int] | _InfinityType | _NegativeInfinityType, # pre + tuple[str, int] | _NegativeInfinityType, # post + tuple[str, int] | _InfinityType, # dev + tuple[tuple[int, L[""]] | tuple[_NegativeInfinityType, str], ...] | _NegativeInfinityType, # local + ], + ], +): + _regex: ClassVar[re.Pattern[str]] = ... + _version: Final[str] + + def __init__(self, /, version: str) -> None: ... + @property + def public(self) -> str: ... + @property + def base_version(self) -> str: ... + @property + def local(self) -> str | None: ... + @property + def is_prerelease(self) -> bool: ... + @property + def is_postrelease(self) -> bool: ... + +# +def parse(version: str) -> Version | LegacyVersion: ... diff --git a/numpy/char/__init__.py b/numpy/char/__init__.py index 9eb66c180f59..a757fcee58ac 100644 --- a/numpy/char/__init__.py +++ b/numpy/char/__init__.py @@ -1,2 +1,31 @@ from numpy._core.defchararray import __all__, __doc__ -from numpy._core.defchararray import * + +__DEPRECATED = frozenset({"chararray", "array", "asarray"}) + + +def __getattr__(name: str): + if name in __DEPRECATED: + # Deprecated in NumPy 2.5, 2026-01-07 + import warnings + + warnings.warn( + ( + "The chararray class is deprecated and will be removed in a future " + "release. Use an ndarray with a string or bytes dtype instead." + ), + DeprecationWarning, + stacklevel=2, + ) + + import numpy._core.defchararray as char + + if (export := getattr(char, name, None)) is not None: + return export + + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") + + +def __dir__() -> list[str]: + import numpy._core.defchararray as char + + return dir(char) diff --git a/numpy/char/__init__.pyi b/numpy/char/__init__.pyi index 3a98cbb42ecc..f53ff7e483b0 100644 --- a/numpy/char/__init__.pyi +++ b/numpy/char/__init__.pyi @@ -1,57 +1,111 @@ -from numpy._core.defchararray import ( - equal as equal, - not_equal as not_equal, - greater_equal as greater_equal, - less_equal as less_equal, - greater as greater, - less as less, - str_len as str_len, - add as add, - multiply as multiply, - mod as mod, - capitalize as capitalize, - center as center, - count as count, - decode as decode, - encode as encode, - endswith as endswith, - expandtabs as expandtabs, - find as find, - index as index, - isalnum as isalnum, - isalpha as isalpha, - isdigit as isdigit, - islower as islower, - isspace as isspace, - istitle as istitle, - isupper as isupper, - join as join, - ljust as ljust, - lower as lower, - lstrip as lstrip, - partition as partition, - replace as replace, - rfind as rfind, - rindex as rindex, - rjust as rjust, - rpartition as rpartition, - rsplit as rsplit, - rstrip as rstrip, - split as split, - splitlines as splitlines, - startswith as startswith, - strip as strip, - swapcase as swapcase, - title as title, - translate as translate, - upper as upper, - zfill as zfill, - isnumeric as isnumeric, - isdecimal as isdecimal, - array as array, - asarray as asarray, - compare_chararrays as compare_chararrays, - chararray as chararray +from numpy._core.defchararray import ( # type: ignore[deprecated] + add, + array, + asarray, + capitalize, + center, + chararray, # pyrefly: ignore[deprecated] + compare_chararrays, + count, + decode, + encode, + endswith, + equal, + expandtabs, + find, + greater, + greater_equal, + index, + isalnum, + isalpha, + isdecimal, + isdigit, + islower, + isnumeric, + isspace, + istitle, + isupper, + join, + less, + less_equal, + ljust, + lower, + lstrip, + mod, + multiply, + not_equal, + partition, + replace, + rfind, + rindex, + rjust, + rpartition, + rsplit, + rstrip, + split, + splitlines, + startswith, + str_len, + strip, + swapcase, + title, + translate, + upper, + zfill, ) -__all__: list[str] +__all__ = [ + "equal", + "not_equal", + "greater_equal", + "less_equal", + "greater", + "less", + "str_len", + "add", + "multiply", + "mod", + "capitalize", + "center", + "count", + "decode", + "encode", + "endswith", + "expandtabs", + "find", + "index", + "isalnum", + "isalpha", + "isdigit", + "islower", + "isspace", + "istitle", + "isupper", + "join", + "ljust", + "lower", + "lstrip", + "partition", + "replace", + "rfind", + "rindex", + "rjust", + "rpartition", + "rsplit", + "rstrip", + "split", + "splitlines", + "startswith", + "strip", + "swapcase", + "title", + "translate", + "upper", + "zfill", + "isnumeric", + "isdecimal", + "array", + "asarray", + "compare_chararrays", + "chararray", +] diff --git a/numpy/compat/__init__.py b/numpy/compat/__init__.py deleted file mode 100644 index 729265aa9c27..000000000000 --- a/numpy/compat/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -""" -Compatibility module. - -This module contains duplicated code from Python itself or 3rd party -extensions, which may be included for the following reasons: - - * compatibility - * we may only need a small subset of the copied library/module - -This module is deprecated since 1.26.0 and will be removed in future versions. - -""" - -import warnings - -from .._utils import _inspect -from .._utils._inspect import getargspec, formatargspec -from . import py3k -from .py3k import * - -warnings.warn( - "`np.compat`, which was used during the Python 2 to 3 transition," - " is deprecated since 1.26.0, and will be removed", - DeprecationWarning, stacklevel=2 -) - -__all__ = [] -__all__.extend(_inspect.__all__) -__all__.extend(py3k.__all__) diff --git a/numpy/compat/py3k.py b/numpy/compat/py3k.py deleted file mode 100644 index d02c9f8fe341..000000000000 --- a/numpy/compat/py3k.py +++ /dev/null @@ -1,145 +0,0 @@ -""" -Python 3.X compatibility tools. - -While this file was originally intended for Python 2 -> 3 transition, -it is now used to create a compatibility layer between different -minor versions of Python 3. - -While the active version of numpy may not support a given version of python, we -allow downstream libraries to continue to use these shims for forward -compatibility with numpy while they transition their code to newer versions of -Python. -""" -__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar', - 'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested', - 'asstr', 'open_latin1', 'long', 'basestring', 'sixu', - 'integer_types', 'is_pathlib_path', 'npy_load_module', 'Path', - 'pickle', 'contextlib_nullcontext', 'os_fspath', 'os_PathLike'] - -import sys -import os -from pathlib import Path -import io -try: - import pickle5 as pickle -except ImportError: - import pickle - -long = int -integer_types = (int,) -basestring = str -unicode = str -bytes = bytes - -def asunicode(s): - if isinstance(s, bytes): - return s.decode('latin1') - return str(s) - -def asbytes(s): - if isinstance(s, bytes): - return s - return str(s).encode('latin1') - -def asstr(s): - if isinstance(s, bytes): - return s.decode('latin1') - return str(s) - -def isfileobj(f): - if not isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)): - return False - try: - # BufferedReader/Writer may raise OSError when - # fetching `fileno()` (e.g. when wrapping BytesIO). - f.fileno() - return True - except OSError: - return False - -def open_latin1(filename, mode='r'): - return open(filename, mode=mode, encoding='iso-8859-1') - -def sixu(s): - return s - -strchar = 'U' - -def getexception(): - return sys.exc_info()[1] - -def asbytes_nested(x): - if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): - return [asbytes_nested(y) for y in x] - else: - return asbytes(x) - -def asunicode_nested(x): - if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): - return [asunicode_nested(y) for y in x] - else: - return asunicode(x) - -def is_pathlib_path(obj): - """ - Check whether obj is a `pathlib.Path` object. - - Prefer using ``isinstance(obj, os.PathLike)`` instead of this function. - """ - return isinstance(obj, Path) - -# from Python 3.7 -class contextlib_nullcontext: - """Context manager that does no additional processing. - - Used as a stand-in for a normal context manager, when a particular - block of code is only sometimes used with a normal context manager: - - cm = optional_cm if condition else nullcontext() - with cm: - # Perform operation, using optional_cm if condition is True - - .. note:: - Prefer using `contextlib.nullcontext` instead of this context manager. - """ - - def __init__(self, enter_result=None): - self.enter_result = enter_result - - def __enter__(self): - return self.enter_result - - def __exit__(self, *excinfo): - pass - - -def npy_load_module(name, fn, info=None): - """ - Load a module. Uses ``load_module`` which will be deprecated in python - 3.12. An alternative that uses ``exec_module`` is in - numpy.distutils.misc_util.exec_mod_from_location - - .. versionadded:: 1.11.2 - - Parameters - ---------- - name : str - Full module name. - fn : str - Path to module file. - info : tuple, optional - Only here for backward compatibility with Python 2.*. - - Returns - ------- - mod : module - - """ - # Explicitly lazy import this to avoid paying the cost - # of importing importlib at startup - from importlib.machinery import SourceFileLoader - return SourceFileLoader(name, fn).load_module() - - -os_fspath = os.fspath -os_PathLike = os.PathLike diff --git a/numpy/conftest.py b/numpy/conftest.py index a6c329790e16..c8f810aeda64 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -2,14 +2,30 @@ Pytest configuration and fixtures for the Numpy test suite. """ import os +import sys import tempfile +import warnings +from contextlib import contextmanager +from pathlib import Path import hypothesis import pytest -import numpy +import numpy from numpy._core._multiarray_tests import get_fpu_mode +from numpy.testing._private.utils import NOGIL_BUILD + +try: + from scipy_doctest.conftest import dt_config + HAVE_SCPDT = True +except ModuleNotFoundError: + HAVE_SCPDT = False +try: + import pytest_run_parallel # noqa: F401 + PARALLEL_RUN_AVALIABLE = True +except ModuleNotFoundError: + PARALLEL_RUN_AVALIABLE = False _old_fpu_mode = None _collect_results = {} @@ -22,7 +38,7 @@ # We register two custom profiles for Numpy - for details see # https://hypothesis.readthedocs.io/en/latest/settings.html -# The first is designed for our own CI runs; the latter also +# The first is designed for our own CI runs; the latter also # forces determinism and is designed for use via np.test() hypothesis.settings.register_profile( name="numpy-profile", deadline=None, print_blob=True, @@ -32,8 +48,8 @@ deadline=None, print_blob=True, database=None, derandomize=True, suppress_health_check=list(hypothesis.HealthCheck), ) -# Note that the default profile is chosen based on the presence -# of pytest.ini, but can be overridden by passing the +# Note that the default profile is chosen based on the presence +# of pytest.ini, but can be overridden by passing the # --hypothesis-profile=NAME argument to pytest. _pytest_ini = os.path.join(os.path.dirname(__file__), "..", "pytest.ini") hypothesis.settings.load_profile( @@ -50,8 +66,17 @@ def pytest_configure(config): "leaks_references: Tests that are known to leak references.") config.addinivalue_line("markers", "slow: Tests that are very slow.") - config.addinivalue_line("markers", - "slow_pypy: Tests that are very slow on pypy.") + if not PARALLEL_RUN_AVALIABLE: + config.addinivalue_line("markers", + "parallel_threads(n): run the given test function in parallel " + "using `n` threads.", + ) + config.addinivalue_line("markers", + "iterations(n): run the given test function `n` times in each thread", + ) + config.addinivalue_line("markers", + "thread_unsafe: mark the test function as single-threaded", + ) def pytest_addoption(parser): @@ -64,14 +89,33 @@ def pytest_addoption(parser): "automatically.")) +gil_enabled_at_start = True +if NOGIL_BUILD: + gil_enabled_at_start = sys._is_gil_enabled() + + def pytest_sessionstart(session): available_mem = session.config.getoption('available_memory') if available_mem is not None: os.environ['NPY_AVAILABLE_MEM'] = available_mem -#FIXME when yield tests are gone. -@pytest.hookimpl() +def pytest_terminal_summary(terminalreporter, exitstatus, config): + if NOGIL_BUILD and not gil_enabled_at_start and sys._is_gil_enabled(): + tr = terminalreporter + tr.ensure_newline() + tr.section("GIL re-enabled", sep="=", red=True, bold=True) + tr.line("The GIL was re-enabled at runtime during the tests.") + tr.line("This can happen with no test failures if the RuntimeWarning") + tr.line("raised by Python when this happens is filtered by a test.") + tr.line("") + tr.line("Please ensure all new C modules declare support for running") + tr.line("without the GIL. Any new tests that intentionally imports ") + tr.line("code that re-enables the GIL should do so in a subprocess.") + pytest.exit("GIL re-enabled during tests", returncode=1) + +# FIXME when yield tests are gone. +@pytest.hookimpl(tryfirst=True) def pytest_itemcollected(item): """ Check FPU precision mode was not changed during test collection. @@ -90,6 +134,11 @@ def pytest_itemcollected(item): _collect_results[item] = (_old_fpu_mode, mode) _old_fpu_mode = mode + # mark f2py tests as thread unsafe + if Path(item.fspath).parent == Path(__file__).parent / 'f2py' / 'tests': + item.add_marker(pytest.mark.thread_unsafe( + reason="f2py tests are thread-unsafe")) + @pytest.fixture(scope="function", autouse=True) def check_fpu_mode(request): @@ -101,38 +150,100 @@ def check_fpu_mode(request): new_mode = get_fpu_mode() if old_mode != new_mode: - raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}" - " during the test".format(old_mode, new_mode)) + raise AssertionError(f"FPU precision mode changed from {old_mode:#x} to " + f"{new_mode:#x} during the test") collect_result = _collect_results.get(request.node) if collect_result is not None: old_mode, new_mode = collect_result - raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}" - " when collecting the test".format(old_mode, - new_mode)) + raise AssertionError(f"FPU precision mode changed from {old_mode:#x} to " + f"{new_mode:#x} when collecting the test") @pytest.fixture(autouse=True) def add_np(doctest_namespace): doctest_namespace['np'] = numpy -@pytest.fixture(autouse=True) -def env_setup(monkeypatch): - monkeypatch.setenv('PYTHONHASHSEED', '0') - -@pytest.fixture(params=[True, False]) -def weak_promotion(request): - """ - Fixture to ensure "legacy" promotion state or change it to use the new - weak promotion (plus warning). `old_promotion` should be used as a - parameter in the function. - """ - state = numpy._get_promotion_state() - if request.param: - numpy._set_promotion_state("weak_and_warn") - else: - numpy._set_promotion_state("legacy") - - yield request.param - numpy._set_promotion_state(state) +if HAVE_SCPDT: + + @contextmanager + def warnings_errors_and_rng(test=None): + """Filter out the wall of DeprecationWarnings. + """ + msgs = ["The numpy.linalg.linalg", + "The numpy.fft.helper", + "dep_util", + "pkg_resources", + "numpy.core.umath", + "msvccompiler", + "Deprecated call", + "numpy.core", + "Importing from numpy.matlib", + "This function is deprecated.", # random_integers + "Arrays of 2-dimensional vectors", # matlib.cross + "NumPy warning suppression and assertion utilities are deprecated.", + "numpy.fix is deprecated", # fix -> trunc + "The chararray class is deprecated", # char.chararray + "numpy.typename is deprecated", # typename -> dtype.name + "numpy.ma.round_ is deprecated", # ma.round_ -> ma.round + ] + msg = "|".join(msgs) + + msgs_r = [ + "invalid value encountered", + "divide by zero encountered" + ] + msg_r = "|".join(msgs_r) + + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', category=DeprecationWarning, message=msg + ) + warnings.filterwarnings( + 'ignore', category=RuntimeWarning, message=msg_r + ) + yield + + # find and check doctests under this context manager + dt_config.user_context_mgr = warnings_errors_and_rng + + # numpy specific tweaks from refguide-check + dt_config.rndm_markers.add('#uninitialized') + dt_config.rndm_markers.add('# uninitialized') + + # make the checker pick on mismatched dtypes + dt_config.strict_check = True + + import doctest + dt_config.optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS + + # recognize the StringDType repr + dt_config.check_namespace['StringDType'] = numpy.dtypes.StringDType + + # temporary skips + dt_config.skiplist = { + 'numpy.savez', # unclosed file + 'numpy.matlib.savez', + 'numpy.__array_namespace_info__', + 'numpy.matlib.__array_namespace_info__', + } + + # xfail problematic tutorials + dt_config.pytest_extra_xfail = { + 'how-to-verify-bug.rst': '', + 'c-info.ufunc-tutorial.rst': '', + 'basics.interoperability.rst': 'needs pandas', + 'basics.dispatch.rst': 'errors out in /testing/overrides.py', + 'basics.subclassing.rst': '.. testcode:: admonitions not understood', + 'misc.rst': 'manipulates warnings', + } + + # ignores are for things fail doctest collection (optionals etc) + dt_config.pytest_extra_ignore = [ + 'numpy/distutils', + 'numpy/_core/cversions.py', + 'numpy/_pyinstaller', + 'numpy/random/_examples', + 'numpy/f2py/_backends/_distutils.py', + ] diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py index e7d3c678b429..cfd96ede6895 100644 --- a/numpy/core/__init__.py +++ b/numpy/core/__init__.py @@ -4,6 +4,7 @@ `numpy.core` will be removed in the future. """ from numpy import _core + from ._utils import _raise_warning @@ -21,7 +22,7 @@ def _ufunc_reconstruct(module, name): # force lazy-loading of submodules to ensure a warning is printed -__all__ = ["arrayprint", "defchararray", "_dtype_ctypes", "_dtype", +__all__ = ["arrayprint", "defchararray", "_dtype_ctypes", "_dtype", # noqa: F822 "einsumfunc", "fromnumeric", "function_base", "getlimits", "_internal", "multiarray", "_multiarray_umath", "numeric", "numerictypes", "overrides", "records", "shape_base", "umath"] diff --git a/numpy/core/__init__.pyi b/numpy/core/__init__.pyi index e69de29bb2d1..cecacb907939 100644 --- a/numpy/core/__init__.pyi +++ b/numpy/core/__init__.pyi @@ -0,0 +1,45 @@ +# deprecated module + +from types import ModuleType + +from . import ( + _dtype, + _dtype_ctypes, + _internal, + arrayprint, + defchararray, + einsumfunc, + fromnumeric, + function_base, + getlimits, + multiarray, + numeric, + numerictypes, + overrides, + records, + shape_base, + umath, +) + +__all__ = [ + "_dtype", + "_dtype_ctypes", + "_internal", + "_multiarray_umath", + "arrayprint", + "defchararray", + "einsumfunc", + "fromnumeric", + "function_base", + "getlimits", + "multiarray", + "numeric", + "numerictypes", + "overrides", + "records", + "shape_base", + "umath", +] + +# `numpy._core._multiarray_umath` has no stubs, so there's nothing to re-export +_multiarray_umath: ModuleType diff --git a/numpy/core/_dtype.py b/numpy/core/_dtype.py index 613a1d259a15..5446079097bc 100644 --- a/numpy/core/_dtype.py +++ b/numpy/core/_dtype.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import _dtype + from ._utils import _raise_warning ret = getattr(_dtype, attr_name, None) if ret is None: diff --git a/numpy/distutils/tests/__init__.py b/numpy/core/_dtype.pyi similarity index 100% rename from numpy/distutils/tests/__init__.py rename to numpy/core/_dtype.pyi diff --git a/numpy/core/_dtype_ctypes.py b/numpy/core/_dtype_ctypes.py index 0dadd7949ecb..10cfba25ec6a 100644 --- a/numpy/core/_dtype_ctypes.py +++ b/numpy/core/_dtype_ctypes.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import _dtype_ctypes + from ._utils import _raise_warning ret = getattr(_dtype_ctypes, attr_name, None) if ret is None: diff --git a/numpy/core/_dtype_ctypes.pyi b/numpy/core/_dtype_ctypes.pyi new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py index 7755c7c35505..63a6ccc75ef7 100644 --- a/numpy/core/_internal.py +++ b/numpy/core/_internal.py @@ -1,5 +1,6 @@ from numpy._core import _internal + # Build a new array from the information in a pickle. # Note that the name numpy.core._internal._reconstruct is embedded in # pickles of ndarrays made with NumPy before release 1.0 @@ -16,6 +17,7 @@ def _reconstruct(subtype, shape, dtype): def __getattr__(attr_name): from numpy._core import _internal + from ._utils import _raise_warning ret = getattr(_internal, attr_name, None) if ret is None: diff --git a/numpy/core/_internal.pyi b/numpy/core/_internal.pyi new file mode 100644 index 000000000000..449f01c97af7 --- /dev/null +++ b/numpy/core/_internal.pyi @@ -0,0 +1 @@ +# deprecated module diff --git a/numpy/core/_multiarray_umath.py b/numpy/core/_multiarray_umath.py index 04cc88229aac..c1e6b4e8c932 100644 --- a/numpy/core/_multiarray_umath.py +++ b/numpy/core/_multiarray_umath.py @@ -1,5 +1,5 @@ -from numpy._core import _multiarray_umath from numpy import ufunc +from numpy._core import _multiarray_umath for item in _multiarray_umath.__dir__(): # ufuncs appear in pickles with a path in numpy.core._multiarray_umath @@ -11,13 +11,15 @@ def __getattr__(attr_name): from numpy._core import _multiarray_umath + from ._utils import _raise_warning if attr_name in {"_ARRAY_API", "_UFUNC_API"}: - from numpy.version import short_version + import sys import textwrap import traceback - import sys + + from numpy.version import short_version msg = textwrap.dedent(f""" A module that was compiled using NumPy 1.x cannot be run in diff --git a/numpy/core/_utils.py b/numpy/core/_utils.py index ad076b0315f1..5f47f4ba46f8 100644 --- a/numpy/core/_utils.py +++ b/numpy/core/_utils.py @@ -1,7 +1,7 @@ import warnings -def _raise_warning(attr: str, submodule: str = None) -> None: +def _raise_warning(attr: str, submodule: str | None = None) -> None: new_module = "numpy._core" old_module = "numpy.core" if submodule is not None: @@ -16,6 +16,6 @@ def _raise_warning(attr: str, submodule: str = None) -> None: "use the public NumPy API. If not, you are using NumPy internals. " "If you would still like to access an internal attribute, " f"use {new_module}.{attr}.", - DeprecationWarning, + DeprecationWarning, stacklevel=3 ) diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index 4e746546acf0..8be5c5c7cf77 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import arrayprint + from ._utils import _raise_warning ret = getattr(arrayprint, attr_name, None) if ret is None: diff --git a/numpy/core/arrayprint.pyi b/numpy/core/arrayprint.pyi new file mode 100644 index 000000000000..c4e5c5e5cc44 --- /dev/null +++ b/numpy/core/arrayprint.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.arrayprint import * +from numpy._core.arrayprint import __all__ as __all__ diff --git a/numpy/core/defchararray.py b/numpy/core/defchararray.py index ffab82acff5b..1c8706875e1c 100644 --- a/numpy/core/defchararray.py +++ b/numpy/core/defchararray.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import defchararray + from ._utils import _raise_warning ret = getattr(defchararray, attr_name, None) if ret is None: diff --git a/numpy/core/defchararray.pyi b/numpy/core/defchararray.pyi new file mode 100644 index 000000000000..4a2f369c1f7d --- /dev/null +++ b/numpy/core/defchararray.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.defchararray import * +from numpy._core.defchararray import __all__ as __all__ diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py index 74aa410ff4b5..fe5aa399fd17 100644 --- a/numpy/core/einsumfunc.py +++ b/numpy/core/einsumfunc.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import einsumfunc + from ._utils import _raise_warning ret = getattr(einsumfunc, attr_name, None) if ret is None: diff --git a/numpy/core/einsumfunc.pyi b/numpy/core/einsumfunc.pyi new file mode 100644 index 000000000000..476c79bc2006 --- /dev/null +++ b/numpy/core/einsumfunc.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.einsumfunc import * +from numpy._core.einsumfunc import __all__ as __all__ diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 1ea11d799d6f..fae7a0399f10 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import fromnumeric + from ._utils import _raise_warning ret = getattr(fromnumeric, attr_name, None) if ret is None: diff --git a/numpy/core/fromnumeric.pyi b/numpy/core/fromnumeric.pyi new file mode 100644 index 000000000000..8e5ac5b765f5 --- /dev/null +++ b/numpy/core/fromnumeric.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.fromnumeric import * +from numpy._core.fromnumeric import __all__ as __all__ diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py index 20e098b6fe44..e15c9714167c 100644 --- a/numpy/core/function_base.py +++ b/numpy/core/function_base.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import function_base + from ._utils import _raise_warning ret = getattr(function_base, attr_name, None) if ret is None: diff --git a/numpy/core/function_base.pyi b/numpy/core/function_base.pyi new file mode 100644 index 000000000000..fa041a9d3d60 --- /dev/null +++ b/numpy/core/function_base.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.function_base import * +from numpy._core.function_base import __all__ as __all__ diff --git a/numpy/core/getlimits.py b/numpy/core/getlimits.py index faa084ae7770..dc009cbd961a 100644 --- a/numpy/core/getlimits.py +++ b/numpy/core/getlimits.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import getlimits + from ._utils import _raise_warning ret = getattr(getlimits, attr_name, None) if ret is None: diff --git a/numpy/core/getlimits.pyi b/numpy/core/getlimits.pyi new file mode 100644 index 000000000000..91a9dec49d42 --- /dev/null +++ b/numpy/core/getlimits.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.getlimits import * +from numpy._core.getlimits import __all__ as __all__ diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py index 0290c852a8ab..b226709426fc 100644 --- a/numpy/core/multiarray.py +++ b/numpy/core/multiarray.py @@ -12,6 +12,7 @@ def __getattr__(attr_name): from numpy._core import multiarray + from ._utils import _raise_warning ret = getattr(multiarray, attr_name, None) if ret is None: diff --git a/numpy/core/multiarray.pyi b/numpy/core/multiarray.pyi new file mode 100644 index 000000000000..d58f20dcc4c8 --- /dev/null +++ b/numpy/core/multiarray.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.multiarray import * +from numpy._core.multiarray import __all__ as __all__ diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index af0658d4fb66..ddd70b363acc 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import numeric + from ._utils import _raise_warning sentinel = object() diff --git a/numpy/core/numeric.pyi b/numpy/core/numeric.pyi new file mode 100644 index 000000000000..dbb936364c46 --- /dev/null +++ b/numpy/core/numeric.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.numeric import * +from numpy._core.numeric import __all__ as __all__ diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py index 0e887cbf30ad..cf2ad99f911b 100644 --- a/numpy/core/numerictypes.py +++ b/numpy/core/numerictypes.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import numerictypes + from ._utils import _raise_warning ret = getattr(numerictypes, attr_name, None) if ret is None: diff --git a/numpy/core/numerictypes.pyi b/numpy/core/numerictypes.pyi new file mode 100644 index 000000000000..5251eae02b6a --- /dev/null +++ b/numpy/core/numerictypes.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.numerictypes import * +from numpy._core.numerictypes import __all__ as __all__ diff --git a/numpy/core/overrides.py b/numpy/core/overrides.py index 3297999c5b01..17830ed41021 100644 --- a/numpy/core/overrides.py +++ b/numpy/core/overrides.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import overrides + from ._utils import _raise_warning ret = getattr(overrides, attr_name, None) if ret is None: diff --git a/numpy/core/overrides.pyi b/numpy/core/overrides.pyi new file mode 100644 index 000000000000..fab3512626f8 --- /dev/null +++ b/numpy/core/overrides.pyi @@ -0,0 +1,7 @@ +# NOTE: At runtime, this submodule dynamically re-exports any `numpy._core.overrides` +# member, and issues a `DeprecationWarning` when accessed. But since there is no +# `__dir__` or `__all__` present, these annotations would be unverifiable. Because +# this module is also deprecated in favor of `numpy._core`, and therefore not part of +# the public API, we omit the "re-exports", which in practice would require literal +# duplication of the stubs in order for the `@deprecated` decorator to be understood +# by type-checkers. diff --git a/numpy/core/records.py b/numpy/core/records.py index 94c0d26926a0..0cc45037d22d 100644 --- a/numpy/core/records.py +++ b/numpy/core/records.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import records + from ._utils import _raise_warning ret = getattr(records, attr_name, None) if ret is None: diff --git a/numpy/core/records.pyi b/numpy/core/records.pyi new file mode 100644 index 000000000000..f6672b47ba6a --- /dev/null +++ b/numpy/core/records.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.records import * +from numpy._core.records import __all__ as __all__ diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py index 10b8712c8b96..9cffce705908 100644 --- a/numpy/core/shape_base.py +++ b/numpy/core/shape_base.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import shape_base + from ._utils import _raise_warning ret = getattr(shape_base, attr_name, None) if ret is None: diff --git a/numpy/core/shape_base.pyi b/numpy/core/shape_base.pyi new file mode 100644 index 000000000000..0d4d077d7e64 --- /dev/null +++ b/numpy/core/shape_base.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.shape_base import * +from numpy._core.shape_base import __all__ as __all__ diff --git a/numpy/core/umath.py b/numpy/core/umath.py index 6ef031d7d62a..25a60cc9dc62 100644 --- a/numpy/core/umath.py +++ b/numpy/core/umath.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import umath + from ._utils import _raise_warning ret = getattr(umath, attr_name, None) if ret is None: diff --git a/numpy/core/umath.pyi b/numpy/core/umath.pyi new file mode 100644 index 000000000000..b32fc9b11d8f --- /dev/null +++ b/numpy/core/umath.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.umath import * +from numpy._core.umath import __all__ as __all__ diff --git a/numpy/ctypeslib.pyi b/numpy/ctypeslib.pyi deleted file mode 100644 index ce8854ca13c1..000000000000 --- a/numpy/ctypeslib.pyi +++ /dev/null @@ -1,253 +0,0 @@ -# NOTE: Numpy's mypy plugin is used for importing the correct -# platform-specific `ctypes._SimpleCData[int]` sub-type -from ctypes import c_int64 as _c_intp - -import os -import ctypes -from collections.abc import Iterable, Sequence -from typing import ( - Literal as L, - Any, - TypeVar, - Generic, - overload, - ClassVar, -) - -import numpy as np -from numpy import ( - ndarray, - dtype, - generic, - byte, - short, - intc, - long, - longlong, - intp, - ubyte, - ushort, - uintc, - ulong, - ulonglong, - uintp, - single, - double, - longdouble, - void, -) -from numpy._core._internal import _ctypes -from numpy._core.multiarray import flagsobj -from numpy._typing import ( - # Arrays - NDArray, - _ArrayLike, - - # Shapes - _ShapeLike, - - # DTypes - DTypeLike, - _DTypeLike, - _VoidDTypeLike, - _BoolCodes, - _UByteCodes, - _UShortCodes, - _UIntCCodes, - _ULongCodes, - _ULongLongCodes, - _ByteCodes, - _ShortCodes, - _IntCCodes, - _LongCodes, - _LongLongCodes, - _SingleCodes, - _DoubleCodes, - _LongDoubleCodes, -) - -# TODO: Add a proper `_Shape` bound once we've got variadic typevars -_DType = TypeVar("_DType", bound=dtype[Any]) -_DTypeOptional = TypeVar("_DTypeOptional", bound=None | dtype[Any]) -_SCT = TypeVar("_SCT", bound=generic) - -_FlagsKind = L[ - 'C_CONTIGUOUS', 'CONTIGUOUS', 'C', - 'F_CONTIGUOUS', 'FORTRAN', 'F', - 'ALIGNED', 'A', - 'WRITEABLE', 'W', - 'OWNDATA', 'O', - 'WRITEBACKIFCOPY', 'X', -] - -# TODO: Add a shape typevar once we have variadic typevars (PEP 646) -class _ndptr(ctypes.c_void_p, Generic[_DTypeOptional]): - # In practice these 4 classvars are defined in the dynamic class - # returned by `ndpointer` - _dtype_: ClassVar[_DTypeOptional] - _shape_: ClassVar[None] - _ndim_: ClassVar[None | int] - _flags_: ClassVar[None | list[_FlagsKind]] - - @overload - @classmethod - def from_param(cls: type[_ndptr[None]], obj: NDArray[Any]) -> _ctypes[Any]: ... - @overload - @classmethod - def from_param(cls: type[_ndptr[_DType]], obj: ndarray[Any, _DType]) -> _ctypes[Any]: ... - -class _concrete_ndptr(_ndptr[_DType]): - _dtype_: ClassVar[_DType] - _shape_: ClassVar[tuple[int, ...]] - @property - def contents(self) -> ndarray[Any, _DType]: ... - -def load_library( - libname: str | bytes | os.PathLike[str] | os.PathLike[bytes], - loader_path: str | bytes | os.PathLike[str] | os.PathLike[bytes], -) -> ctypes.CDLL: ... - -__all__: list[str] - -c_intp = _c_intp - -@overload -def ndpointer( - dtype: None = ..., - ndim: int = ..., - shape: None | _ShapeLike = ..., - flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ..., -) -> type[_ndptr[None]]: ... -@overload -def ndpointer( - dtype: _DTypeLike[_SCT], - ndim: int = ..., - *, - shape: _ShapeLike, - flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ..., -) -> type[_concrete_ndptr[dtype[_SCT]]]: ... -@overload -def ndpointer( - dtype: DTypeLike, - ndim: int = ..., - *, - shape: _ShapeLike, - flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ..., -) -> type[_concrete_ndptr[dtype[Any]]]: ... -@overload -def ndpointer( - dtype: _DTypeLike[_SCT], - ndim: int = ..., - shape: None = ..., - flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ..., -) -> type[_ndptr[dtype[_SCT]]]: ... -@overload -def ndpointer( - dtype: DTypeLike, - ndim: int = ..., - shape: None = ..., - flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ..., -) -> type[_ndptr[dtype[Any]]]: ... - -@overload -def as_ctypes_type(dtype: _BoolCodes | _DTypeLike[np.bool] | type[ctypes.c_bool]) -> type[ctypes.c_bool]: ... -@overload -def as_ctypes_type(dtype: _ByteCodes | _DTypeLike[byte] | type[ctypes.c_byte]) -> type[ctypes.c_byte]: ... -@overload -def as_ctypes_type(dtype: _ShortCodes | _DTypeLike[short] | type[ctypes.c_short]) -> type[ctypes.c_short]: ... -@overload -def as_ctypes_type(dtype: _IntCCodes | _DTypeLike[intc] | type[ctypes.c_int]) -> type[ctypes.c_int]: ... -@overload -def as_ctypes_type(dtype: _LongCodes | _DTypeLike[long] | type[ctypes.c_long]) -> type[ctypes.c_long]: ... -@overload -def as_ctypes_type(dtype: type[int]) -> type[c_intp]: ... -@overload -def as_ctypes_type(dtype: _LongLongCodes | _DTypeLike[longlong] | type[ctypes.c_longlong]) -> type[ctypes.c_longlong]: ... -@overload -def as_ctypes_type(dtype: _UByteCodes | _DTypeLike[ubyte] | type[ctypes.c_ubyte]) -> type[ctypes.c_ubyte]: ... -@overload -def as_ctypes_type(dtype: _UShortCodes | _DTypeLike[ushort] | type[ctypes.c_ushort]) -> type[ctypes.c_ushort]: ... -@overload -def as_ctypes_type(dtype: _UIntCCodes | _DTypeLike[uintc] | type[ctypes.c_uint]) -> type[ctypes.c_uint]: ... -@overload -def as_ctypes_type(dtype: _ULongCodes | _DTypeLike[ulong] | type[ctypes.c_ulong]) -> type[ctypes.c_ulong]: ... -@overload -def as_ctypes_type(dtype: _ULongLongCodes | _DTypeLike[ulonglong] | type[ctypes.c_ulonglong]) -> type[ctypes.c_ulonglong]: ... -@overload -def as_ctypes_type(dtype: _SingleCodes | _DTypeLike[single] | type[ctypes.c_float]) -> type[ctypes.c_float]: ... -@overload -def as_ctypes_type(dtype: _DoubleCodes | _DTypeLike[double] | type[float | ctypes.c_double]) -> type[ctypes.c_double]: ... -@overload -def as_ctypes_type(dtype: _LongDoubleCodes | _DTypeLike[longdouble] | type[ctypes.c_longdouble]) -> type[ctypes.c_longdouble]: ... -@overload -def as_ctypes_type(dtype: _VoidDTypeLike) -> type[Any]: ... # `ctypes.Union` or `ctypes.Structure` -@overload -def as_ctypes_type(dtype: str) -> type[Any]: ... - -@overload -def as_array(obj: ctypes._PointerLike, shape: Sequence[int]) -> NDArray[Any]: ... -@overload -def as_array(obj: _ArrayLike[_SCT], shape: None | _ShapeLike = ...) -> NDArray[_SCT]: ... -@overload -def as_array(obj: object, shape: None | _ShapeLike = ...) -> NDArray[Any]: ... - -@overload -def as_ctypes(obj: np.bool) -> ctypes.c_bool: ... -@overload -def as_ctypes(obj: byte) -> ctypes.c_byte: ... -@overload -def as_ctypes(obj: short) -> ctypes.c_short: ... -@overload -def as_ctypes(obj: intc) -> ctypes.c_int: ... -@overload -def as_ctypes(obj: long) -> ctypes.c_long: ... -@overload -def as_ctypes(obj: longlong) -> ctypes.c_longlong: ... -@overload -def as_ctypes(obj: ubyte) -> ctypes.c_ubyte: ... -@overload -def as_ctypes(obj: ushort) -> ctypes.c_ushort: ... -@overload -def as_ctypes(obj: uintc) -> ctypes.c_uint: ... -@overload -def as_ctypes(obj: ulong) -> ctypes.c_ulong: ... -@overload -def as_ctypes(obj: ulonglong) -> ctypes.c_ulonglong: ... -@overload -def as_ctypes(obj: single) -> ctypes.c_float: ... -@overload -def as_ctypes(obj: double) -> ctypes.c_double: ... -@overload -def as_ctypes(obj: longdouble) -> ctypes.c_longdouble: ... -@overload -def as_ctypes(obj: void) -> Any: ... # `ctypes.Union` or `ctypes.Structure` -@overload -def as_ctypes(obj: NDArray[np.bool]) -> ctypes.Array[ctypes.c_bool]: ... -@overload -def as_ctypes(obj: NDArray[byte]) -> ctypes.Array[ctypes.c_byte]: ... -@overload -def as_ctypes(obj: NDArray[short]) -> ctypes.Array[ctypes.c_short]: ... -@overload -def as_ctypes(obj: NDArray[intc]) -> ctypes.Array[ctypes.c_int]: ... -@overload -def as_ctypes(obj: NDArray[long]) -> ctypes.Array[ctypes.c_long]: ... -@overload -def as_ctypes(obj: NDArray[longlong]) -> ctypes.Array[ctypes.c_longlong]: ... -@overload -def as_ctypes(obj: NDArray[ubyte]) -> ctypes.Array[ctypes.c_ubyte]: ... -@overload -def as_ctypes(obj: NDArray[ushort]) -> ctypes.Array[ctypes.c_ushort]: ... -@overload -def as_ctypes(obj: NDArray[uintc]) -> ctypes.Array[ctypes.c_uint]: ... -@overload -def as_ctypes(obj: NDArray[ulong]) -> ctypes.Array[ctypes.c_ulong]: ... -@overload -def as_ctypes(obj: NDArray[ulonglong]) -> ctypes.Array[ctypes.c_ulonglong]: ... -@overload -def as_ctypes(obj: NDArray[single]) -> ctypes.Array[ctypes.c_float]: ... -@overload -def as_ctypes(obj: NDArray[double]) -> ctypes.Array[ctypes.c_double]: ... -@overload -def as_ctypes(obj: NDArray[longdouble]) -> ctypes.Array[ctypes.c_longdouble]: ... -@overload -def as_ctypes(obj: NDArray[void]) -> ctypes.Array[Any]: ... # `ctypes.Union` or `ctypes.Structure` diff --git a/numpy/ctypeslib/__init__.py b/numpy/ctypeslib/__init__.py new file mode 100644 index 000000000000..fd3c773e43bb --- /dev/null +++ b/numpy/ctypeslib/__init__.py @@ -0,0 +1,13 @@ +from ._ctypeslib import ( + __all__, + __doc__, + _concrete_ndptr, + _ndptr, + as_array, + as_ctypes, + as_ctypes_type, + c_intp, + ctypes, + load_library, + ndpointer, +) diff --git a/numpy/ctypeslib/__init__.pyi b/numpy/ctypeslib/__init__.pyi new file mode 100644 index 000000000000..f088d0281d33 --- /dev/null +++ b/numpy/ctypeslib/__init__.pyi @@ -0,0 +1,15 @@ +import ctypes +from ctypes import c_int64 as _c_intp + +from ._ctypeslib import ( + __all__ as __all__, + __doc__ as __doc__, + _concrete_ndptr as _concrete_ndptr, + _ndptr as _ndptr, + as_array as as_array, + as_ctypes as as_ctypes, + as_ctypes_type as as_ctypes_type, + c_intp as c_intp, + load_library as load_library, + ndpointer as ndpointer, +) diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib/_ctypeslib.py similarity index 80% rename from numpy/ctypeslib.py rename to numpy/ctypeslib/_ctypeslib.py index 8faf9415375b..a18e11810418 100644 --- a/numpy/ctypeslib.py +++ b/numpy/ctypeslib/_ctypeslib.py @@ -53,10 +53,10 @@ 'as_ctypes_type'] import os -from numpy import ( - integer, ndarray, dtype as _dtype, asarray, frombuffer -) -from numpy._core.multiarray import _flagdict, flagsobj + +import numpy as np +import numpy._core.multiarray as mu +from numpy._utils import set_module try: import ctypes @@ -64,6 +64,7 @@ ctypes = None if ctypes is None: + @set_module("numpy.ctypeslib") def _dummy(*args, **kwds): """ Dummy object that raises an ImportError if ctypes is not available. @@ -77,7 +78,9 @@ def _dummy(*args, **kwds): raise ImportError("ctypes is not available.") load_library = _dummy as_ctypes = _dummy + as_ctypes_type = _dummy as_array = _dummy + ndpointer = _dummy from numpy import intp as c_intp _ndptr_base = object else: @@ -87,6 +90,7 @@ def _dummy(*args, **kwds): _ndptr_base = ctypes.c_void_p # Adapted from Albert Strasheim + @set_module("numpy.ctypeslib") def load_library(libname, loader_path): """ It is possible to load a library using @@ -155,24 +159,25 @@ def load_library(libname, loader_path): try: return ctypes.cdll[libpath] except OSError: - ## defective lib file + # defective lib file raise - ## if no successful return in the libname_ext loop: + # if no successful return in the libname_ext loop: raise OSError("no file with expected extension") def _num_fromflags(flaglist): num = 0 for val in flaglist: - num += _flagdict[val] + num += mu._flagdict[val] return num + _flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE', 'OWNDATA', 'WRITEBACKIFCOPY'] def _flags_fromnum(num): res = [] for key in _flagnames: - value = _flagdict[key] + value = mu._flagdict[key] if (num & value): res.append(key) return res @@ -181,21 +186,20 @@ def _flags_fromnum(num): class _ndptr(_ndptr_base): @classmethod def from_param(cls, obj): - if not isinstance(obj, ndarray): + if not isinstance(obj, np.ndarray): raise TypeError("argument must be an ndarray") if cls._dtype_ is not None \ and obj.dtype != cls._dtype_: - raise TypeError("array must have data type %s" % cls._dtype_) + raise TypeError(f"array must have data type {cls._dtype_}") if cls._ndim_ is not None \ and obj.ndim != cls._ndim_: raise TypeError("array must have %d dimension(s)" % cls._ndim_) if cls._shape_ is not None \ and obj.shape != cls._shape_: - raise TypeError("array must have shape %s" % str(cls._shape_)) + raise TypeError(f"array must have shape {str(cls._shape_)}") if cls._flags_ is not None \ and ((obj.flags.num & cls._flags_) != cls._flags_): - raise TypeError("array must have flags %s" % - _flags_fromnum(cls._flags_)) + raise TypeError(f"array must have flags {_flags_fromnum(cls._flags_)}") return obj.ctypes @@ -221,15 +225,17 @@ def contents(self): This mirrors the `contents` attribute of a normal ctypes pointer """ - full_dtype = _dtype((self._dtype_, self._shape_)) + full_dtype = np.dtype((self._dtype_, self._shape_)) full_ctype = ctypes.c_char * full_dtype.itemsize buffer = ctypes.cast(self, ctypes.POINTER(full_ctype)).contents - return frombuffer(buffer, dtype=full_dtype).squeeze(axis=0) + return np.frombuffer(buffer, dtype=full_dtype).squeeze(axis=0) # Factory for an array-checking class with from_param defined for -# use with ctypes argtypes mechanism +# use with ctypes argtypes mechanism _pointer_type_cache = {} + +@set_module("numpy.ctypeslib") def ndpointer(dtype=None, ndim=None, shape=None, flags=None): """ Array-checking restype/argtypes. @@ -282,19 +288,19 @@ def ndpointer(dtype=None, ndim=None, shape=None, flags=None): """ - # normalize dtype to an Optional[dtype] + # normalize dtype to dtype | None if dtype is not None: - dtype = _dtype(dtype) + dtype = np.dtype(dtype) - # normalize flags to an Optional[int] + # normalize flags to int | None num = None if flags is not None: if isinstance(flags, str): flags = flags.split(',') - elif isinstance(flags, (int, integer)): + elif isinstance(flags, (int, np.integer)): num = flags flags = _flags_fromnum(num) - elif isinstance(flags, flagsobj): + elif isinstance(flags, mu.flagsobj): num = flags.num flags = _flags_fromnum(num) if num is None: @@ -304,7 +310,7 @@ def ndpointer(dtype=None, ndim=None, shape=None, flags=None): raise TypeError("invalid flags specification") from e num = _num_fromflags(flags) - # normalize shape to an Optional[tuple] + # normalize shape to tuple | None if shape is not None: try: shape = tuple(shape) @@ -329,20 +335,20 @@ def ndpointer(dtype=None, ndim=None, shape=None, flags=None): if ndim is not None: name += "_%dd" % ndim if shape is not None: - name += "_"+"x".join(str(x) for x in shape) + name += "_" + "x".join(str(x) for x in shape) if flags is not None: - name += "_"+"_".join(flags) + name += "_" + "_".join(flags) if dtype is not None and shape is not None: base = _concrete_ndptr else: base = _ndptr - klass = type("ndpointer_%s"%name, (base,), + klass = type(f"ndpointer_{name}", (base,), {"_dtype_": dtype, - "_shape_" : shape, - "_ndim_" : ndim, - "_flags_" : num}) + "_shape_": shape, + "_ndim_": ndim, + "_flags_": num}) _pointer_type_cache[cache_key] = klass return klass @@ -356,7 +362,6 @@ def _ctype_ndarray(element_type, shape): element_type.__module__ = None return element_type - def _get_scalar_type_map(): """ Return a dictionary mapping native endian scalar dtype to ctypes types @@ -368,12 +373,10 @@ def _get_scalar_type_map(): ct.c_float, ct.c_double, ct.c_bool, ] - return {_dtype(ctype): ctype for ctype in simple_types} - + return {np.dtype(ctype): ctype for ctype in simple_types} _scalar_type_map = _get_scalar_type_map() - def _ctype_from_dtype_scalar(dtype): # swapping twice ensure that `=` is promoted to <, >, or | dtype_with_endian = dtype.newbyteorder('S').newbyteorder('S') @@ -382,7 +385,7 @@ def _ctype_from_dtype_scalar(dtype): ctype = _scalar_type_map[dtype_native] except KeyError as e: raise NotImplementedError( - "Converting {!r} to a ctypes type".format(dtype) + f"Converting {dtype!r} to a ctypes type" ) from None if dtype_with_endian.byteorder == '>': @@ -392,13 +395,11 @@ def _ctype_from_dtype_scalar(dtype): return ctype - def _ctype_from_dtype_subarray(dtype): element_dtype, shape = dtype.subdtype ctype = _ctype_from_dtype(element_dtype) return _ctype_ndarray(ctype, shape) - def _ctype_from_dtype_structured(dtype): # extract offsets of each field field_data = [] @@ -409,7 +410,7 @@ def _ctype_from_dtype_structured(dtype): # ctypes doesn't care about field order field_data = sorted(field_data, key=lambda f: f[0]) - if len(field_data) > 1 and all(offset == 0 for offset, name, ctype in field_data): + if len(field_data) > 1 and all(offset == 0 for offset, _, _ in field_data): # union, if multiple fields all at address 0 size = 0 _fields_ = [] @@ -422,11 +423,11 @@ def _ctype_from_dtype_structured(dtype): _fields_.append(('', ctypes.c_char * dtype.itemsize)) # we inserted manual padding, so always `_pack_` - return type('union', (ctypes.Union,), dict( - _fields_=_fields_, - _pack_=1, - __module__=None, - )) + return type('union', (ctypes.Union,), { + '_fields_': _fields_, + '_pack_': 1, + '__module__': None, + }) else: last_offset = 0 _fields_ = [] @@ -440,18 +441,16 @@ def _ctype_from_dtype_structured(dtype): _fields_.append((name, ctype)) last_offset = offset + ctypes.sizeof(ctype) - padding = dtype.itemsize - last_offset if padding > 0: _fields_.append(('', ctypes.c_char * padding)) # we inserted manual padding, so always `_pack_` - return type('struct', (ctypes.Structure,), dict( - _fields_=_fields_, - _pack_=1, - __module__=None, - )) - + return type('struct', (ctypes.Structure,), { + '_fields_': _fields_, + '_pack_': 1, + '__module__': None, + }) def _ctype_from_dtype(dtype): if dtype.fields is not None: @@ -461,7 +460,7 @@ def _ctype_from_dtype(dtype): else: return _ctype_from_dtype_scalar(dtype) - + @set_module("numpy.ctypeslib") def as_ctypes_type(dtype): r""" Convert a dtype into a ctypes type. @@ -499,10 +498,26 @@ def as_ctypes_type(dtype): `ctypes.Structure`\ s - insert padding fields - """ - return _ctype_from_dtype(_dtype(dtype)) + Examples + -------- + Converting a simple dtype: + >>> dt = np.dtype(np.int8) + >>> ctype = np.ctypeslib.as_ctypes_type(dt) + >>> ctype + + Converting a structured dtype: + + >>> dt = np.dtype([('x', 'i4'), ('y', 'f4')]) + >>> ctype = np.ctypeslib.as_ctypes_type(dt) + >>> ctype + + + """ + return _ctype_from_dtype(np.dtype(dtype)) + + @set_module("numpy.ctypeslib") def as_array(obj, shape=None): """ Create a numpy array from a ctypes array or POINTER. @@ -511,6 +526,26 @@ def as_array(obj, shape=None): The shape parameter must be given if converting from a ctypes POINTER. The shape parameter is ignored if converting from a ctypes array + + Examples + -------- + Converting a ctypes integer array: + + >>> import ctypes + >>> ctypes_array = (ctypes.c_int * 5)(0, 1, 2, 3, 4) + >>> np_array = np.ctypeslib.as_array(ctypes_array) + >>> np_array + array([0, 1, 2, 3, 4], dtype=int32) + + Converting a ctypes POINTER: + + >>> import ctypes + >>> buffer = (ctypes.c_int * 5)(0, 1, 2, 3, 4) + >>> pointer = ctypes.cast(buffer, ctypes.POINTER(ctypes.c_int)) + >>> np_array = np.ctypeslib.as_array(pointer, (5,)) + >>> np_array + array([0, 1, 2, 3, 4], dtype=int32) + """ if isinstance(obj, ctypes._Pointer): # convert pointers to an array of the desired shape @@ -521,12 +556,35 @@ def as_array(obj, shape=None): p_arr_type = ctypes.POINTER(_ctype_ndarray(obj._type_, shape)) obj = ctypes.cast(obj, p_arr_type).contents - return asarray(obj) - + return np.asarray(obj) + @set_module("numpy.ctypeslib") def as_ctypes(obj): - """Create and return a ctypes object from a numpy array. Actually - anything that exposes the __array_interface__ is accepted.""" + """ + Create and return a ctypes object from a numpy array. Actually + anything that exposes the __array_interface__ is accepted. + + Examples + -------- + Create ctypes object from inferred int ``np.array``: + + >>> inferred_int_array = np.array([1, 2, 3]) + >>> c_int_array = np.ctypeslib.as_ctypes(inferred_int_array) + >>> type(c_int_array) + + >>> c_int_array[:] + [1, 2, 3] + + Create ctypes object from explicit 8 bit unsigned int ``np.array`` : + + >>> exp_int_array = np.array([1, 2, 3], dtype=np.uint8) + >>> c_int_array = np.ctypeslib.as_ctypes(exp_int_array) + >>> type(c_int_array) + + >>> c_int_array[:] + [1, 2, 3] + + """ ai = obj.__array_interface__ if ai["strides"]: raise TypeError("strided arrays not supported") diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi new file mode 100644 index 000000000000..3ab72549f472 --- /dev/null +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -0,0 +1,222 @@ +import ctypes as ct +from _typeshed import StrOrBytesPath +from collections.abc import Iterable, Sequence +from typing import Any, ClassVar, Literal as L, overload + +import numpy as np +from numpy._core._internal import _ctypes +from numpy._core.multiarray import flagsobj +from numpy._typing import ( + DTypeLike, + NDArray, + _AnyShape, + _ArrayLike, + _BoolCodes, + _DTypeLike, + _Float32Codes, + _Float64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntCCodes, + _IntPCodes, + _LongCodes, + _LongDoubleCodes, + _LongLongCodes, + _ShapeLike, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UIntCCodes, + _UIntPCodes, + _ULongCodes, + _ULongLongCodes, + _VoidDTypeLike, +) + +__all__ = ["load_library", "ndpointer", "c_intp", "as_ctypes", "as_array", "as_ctypes_type"] + +type _FlagsKind = L[ + "C_CONTIGUOUS", "CONTIGUOUS", "C", + "F_CONTIGUOUS", "FORTRAN", "F", + "ALIGNED", "A", + "WRITEABLE", "W", + "OWNDATA", "O", + "WRITEBACKIFCOPY", "X", +] + +# TODO: Add a shape type parameter +class _ndptr[OptionalDTypeT: np.dtype | None](ct.c_void_p): + # In practice these 4 classvars are defined in the dynamic class + # returned by `ndpointer` + _dtype_: OptionalDTypeT = ... + _shape_: ClassVar[_AnyShape | None] = ... + _ndim_: ClassVar[int | None] = ... + _flags_: ClassVar[list[_FlagsKind] | None] = ... + + @overload # type: ignore[override] + @classmethod + def from_param(cls: type[_ndptr[None]], obj: np.ndarray) -> _ctypes[Any]: ... # pyrefly: ignore[bad-override] + @overload + @classmethod + def from_param[DTypeT: np.dtype](cls: type[_ndptr[DTypeT]], obj: np.ndarray[Any, DTypeT]) -> _ctypes[Any]: ... # pyright: ignore[reportIncompatibleMethodOverride] + +class _concrete_ndptr[DTypeT: np.dtype](_ndptr[DTypeT]): + _dtype_: DTypeT = ... + _shape_: ClassVar[_AnyShape] = ... # pyright: ignore[reportIncompatibleVariableOverride] + + @property + def contents(self) -> np.ndarray[_AnyShape, DTypeT]: ... + +def load_library(libname: StrOrBytesPath, loader_path: StrOrBytesPath) -> ct.CDLL: ... + +c_intp = ct.c_int64 # most platforms are 64-bit nowadays + +# +@overload +def ndpointer( + dtype: None = None, + ndim: int | None = None, + shape: _ShapeLike | None = None, + flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, +) -> type[_ndptr[None]]: ... +@overload +def ndpointer[ScalarT: np.generic]( + dtype: _DTypeLike[ScalarT], + ndim: int | None = None, + *, + shape: _ShapeLike, + flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, +) -> type[_concrete_ndptr[np.dtype[ScalarT]]]: ... +@overload +def ndpointer( + dtype: DTypeLike | None, + ndim: int | None = None, + *, + shape: _ShapeLike, + flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, +) -> type[_concrete_ndptr[np.dtype]]: ... +@overload +def ndpointer[ScalarT: np.generic]( + dtype: _DTypeLike[ScalarT], + ndim: int | None = None, + shape: None = None, + flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, +) -> type[_ndptr[np.dtype[ScalarT]]]: ... +@overload +def ndpointer( + dtype: DTypeLike | None, + ndim: int | None = None, + shape: None = None, + flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, +) -> type[_ndptr[np.dtype]]: ... + +# +@overload # bool +def as_ctypes_type(dtype: _BoolCodes | _DTypeLike[np.bool] | type[ct.c_bool]) -> type[ct.c_bool]: ... +@overload # int8 +def as_ctypes_type(dtype: _Int8Codes | _DTypeLike[np.int8] | type[ct.c_int8]) -> type[ct.c_int8]: ... +@overload # int16 +def as_ctypes_type(dtype: _Int16Codes | _DTypeLike[np.int16] | type[ct.c_int16]) -> type[ct.c_int16]: ... +@overload # int32 +def as_ctypes_type(dtype: _Int32Codes | _DTypeLike[np.int32] | type[ct.c_int32]) -> type[ct.c_int32]: ... +@overload # int64 +def as_ctypes_type(dtype: _Int64Codes | _DTypeLike[np.int64] | type[ct.c_int64]) -> type[ct.c_int64]: ... +@overload # intc +def as_ctypes_type(dtype: _IntCCodes | type[ct.c_int]) -> type[ct.c_int]: ... +@overload # long +def as_ctypes_type(dtype: _LongCodes | type[ct.c_long]) -> type[ct.c_long]: ... +@overload # longlong +def as_ctypes_type(dtype: _LongLongCodes | type[ct.c_longlong]) -> type[ct.c_longlong]: ... +@overload # intp +def as_ctypes_type(dtype: _IntPCodes | type[ct.c_ssize_t] | type[int]) -> type[ct.c_ssize_t]: ... +@overload # uint8 +def as_ctypes_type(dtype: _UInt8Codes | _DTypeLike[np.uint8] | type[ct.c_uint8]) -> type[ct.c_uint8]: ... +@overload # uint16 +def as_ctypes_type(dtype: _UInt16Codes | _DTypeLike[np.uint16] | type[ct.c_uint16]) -> type[ct.c_uint16]: ... +@overload # uint32 +def as_ctypes_type(dtype: _UInt32Codes | _DTypeLike[np.uint32] | type[ct.c_uint32]) -> type[ct.c_uint32]: ... +@overload # uint64 +def as_ctypes_type(dtype: _UInt64Codes | _DTypeLike[np.uint64] | type[ct.c_uint64]) -> type[ct.c_uint64]: ... +@overload # uintc +def as_ctypes_type(dtype: _UIntCCodes | type[ct.c_uint]) -> type[ct.c_uint]: ... +@overload # ulong +def as_ctypes_type(dtype: _ULongCodes | type[ct.c_ulong]) -> type[ct.c_ulong]: ... +@overload # ulonglong +def as_ctypes_type(dtype: _ULongLongCodes | type[ct.c_ulonglong]) -> type[ct.c_ulonglong]: ... +@overload # uintp +def as_ctypes_type(dtype: _UIntPCodes | type[ct.c_size_t]) -> type[ct.c_size_t]: ... +@overload # float32 +def as_ctypes_type(dtype: _Float32Codes | _DTypeLike[np.float32] | type[ct.c_float]) -> type[ct.c_float]: ... +@overload # float64 +def as_ctypes_type(dtype: _Float64Codes | _DTypeLike[np.float64] | type[float | ct.c_double]) -> type[ct.c_double]: ... +@overload # longdouble +def as_ctypes_type(dtype: _LongDoubleCodes | _DTypeLike[np.longdouble] | type[ct.c_longdouble]) -> type[ct.c_longdouble]: ... +@overload # void +def as_ctypes_type(dtype: _VoidDTypeLike) -> type[Any]: ... # `ct.Union` or `ct.Structure` +@overload # fallback +def as_ctypes_type(dtype: str) -> type[Any]: ... + +# +@overload +def as_array(obj: ct._PointerLike, shape: Sequence[int]) -> NDArray[Any]: ... +@overload +def as_array[ScalarT: np.generic](obj: _ArrayLike[ScalarT], shape: _ShapeLike | None = None) -> NDArray[ScalarT]: ... +@overload +def as_array(obj: object, shape: _ShapeLike | None = None) -> NDArray[Any]: ... + +# +@overload +def as_ctypes(obj: np.bool) -> ct.c_bool: ... +@overload +def as_ctypes(obj: np.int8) -> ct.c_int8: ... +@overload +def as_ctypes(obj: np.int16) -> ct.c_int16: ... +@overload +def as_ctypes(obj: np.int32) -> ct.c_int32: ... +@overload +def as_ctypes(obj: np.int64) -> ct.c_int64: ... +@overload +def as_ctypes(obj: np.uint8) -> ct.c_uint8: ... +@overload +def as_ctypes(obj: np.uint16) -> ct.c_uint16: ... +@overload +def as_ctypes(obj: np.uint32) -> ct.c_uint32: ... +@overload +def as_ctypes(obj: np.uint64) -> ct.c_uint64: ... +@overload +def as_ctypes(obj: np.float32) -> ct.c_float: ... +@overload +def as_ctypes(obj: np.float64) -> ct.c_double: ... +@overload +def as_ctypes(obj: np.longdouble) -> ct.c_longdouble: ... +@overload +def as_ctypes(obj: np.void) -> Any: ... # `ct.Union` or `ct.Structure` +@overload +def as_ctypes(obj: NDArray[np.bool]) -> ct.Array[ct.c_bool]: ... +@overload +def as_ctypes(obj: NDArray[np.int8]) -> ct.Array[ct.c_int8]: ... +@overload +def as_ctypes(obj: NDArray[np.int16]) -> ct.Array[ct.c_int16]: ... +@overload +def as_ctypes(obj: NDArray[np.int32]) -> ct.Array[ct.c_int32]: ... +@overload +def as_ctypes(obj: NDArray[np.int64]) -> ct.Array[ct.c_int64]: ... +@overload +def as_ctypes(obj: NDArray[np.uint8]) -> ct.Array[ct.c_uint8]: ... +@overload +def as_ctypes(obj: NDArray[np.uint16]) -> ct.Array[ct.c_uint16]: ... +@overload +def as_ctypes(obj: NDArray[np.uint32]) -> ct.Array[ct.c_uint32]: ... +@overload +def as_ctypes(obj: NDArray[np.uint64]) -> ct.Array[ct.c_uint64]: ... +@overload +def as_ctypes(obj: NDArray[np.float32]) -> ct.Array[ct.c_float]: ... +@overload +def as_ctypes(obj: NDArray[np.float64]) -> ct.Array[ct.c_double]: ... +@overload +def as_ctypes(obj: NDArray[np.longdouble]) -> ct.Array[ct.c_longdouble]: ... +@overload +def as_ctypes(obj: NDArray[np.void]) -> ct.Array[Any]: ... # `ct.Union` or `ct.Structure` diff --git a/numpy/distutils/__init__.py b/numpy/distutils/__init__.py deleted file mode 100644 index f74ed4d3f6db..000000000000 --- a/numpy/distutils/__init__.py +++ /dev/null @@ -1,64 +0,0 @@ -""" -An enhanced distutils, providing support for Fortran compilers, for BLAS, -LAPACK and other common libraries for numerical computing, and more. - -Public submodules are:: - - misc_util - system_info - cpu_info - log - exec_command - -For details, please see the *Packaging* and *NumPy Distutils User Guide* -sections of the NumPy Reference Guide. - -For configuring the preference for and location of libraries like BLAS and -LAPACK, and for setting include paths and similar build options, please see -``site.cfg.example`` in the root of the NumPy repository or sdist. - -""" - -import warnings - -# Must import local ccompiler ASAP in order to get -# customized CCompiler.spawn effective. -from . import ccompiler -from . import unixccompiler - -from .npy_pkg_config import * - -warnings.warn("\n\n" - " `numpy.distutils` is deprecated since NumPy 1.23.0, as a result\n" - " of the deprecation of `distutils` itself. It will be removed for\n" - " Python >= 3.12. For older Python versions it will remain present.\n" - " It is recommended to use `setuptools < 60.0` for those Python versions.\n" - " For more details, see:\n" - " https://numpy.org/devdocs/reference/distutils_status_migration.html \n\n", - DeprecationWarning, stacklevel=2 -) -del warnings - -# If numpy is installed, add distutils.test() -try: - from . import __config__ - # Normally numpy is installed if the above import works, but an interrupted - # in-place build could also have left a __config__.py. In that case the - # next import may still fail, so keep it inside the try block. - from numpy._pytesttester import PytestTester - test = PytestTester(__name__) - del PytestTester -except ImportError: - pass - - -def customized_fcompiler(plat=None, compiler=None): - from numpy.distutils.fcompiler import new_fcompiler - c = new_fcompiler(plat=plat, compiler=compiler) - c.customize() - return c - -def customized_ccompiler(plat=None, compiler=None, verbose=1): - c = ccompiler.new_compiler(plat=plat, compiler=compiler, verbose=verbose) - c.customize('') - return c diff --git a/numpy/distutils/__init__.pyi b/numpy/distutils/__init__.pyi deleted file mode 100644 index 3938d68de14c..000000000000 --- a/numpy/distutils/__init__.pyi +++ /dev/null @@ -1,4 +0,0 @@ -from typing import Any - -# TODO: remove when the full numpy namespace is defined -def __getattr__(name: str) -> Any: ... diff --git a/numpy/distutils/_shell_utils.py b/numpy/distutils/_shell_utils.py deleted file mode 100644 index 9a1c8ce718c9..000000000000 --- a/numpy/distutils/_shell_utils.py +++ /dev/null @@ -1,87 +0,0 @@ -""" -Helper functions for interacting with the shell, and consuming shell-style -parameters provided in config files. -""" -import os -import shlex -import subprocess - -__all__ = ['WindowsParser', 'PosixParser', 'NativeParser'] - - -class CommandLineParser: - """ - An object that knows how to split and join command-line arguments. - - It must be true that ``argv == split(join(argv))`` for all ``argv``. - The reverse neednt be true - `join(split(cmd))` may result in the addition - or removal of unnecessary escaping. - """ - @staticmethod - def join(argv): - """ Join a list of arguments into a command line string """ - raise NotImplementedError - - @staticmethod - def split(cmd): - """ Split a command line string into a list of arguments """ - raise NotImplementedError - - -class WindowsParser: - """ - The parsing behavior used by `subprocess.call("string")` on Windows, which - matches the Microsoft C/C++ runtime. - - Note that this is _not_ the behavior of cmd. - """ - @staticmethod - def join(argv): - # note that list2cmdline is specific to the windows syntax - return subprocess.list2cmdline(argv) - - @staticmethod - def split(cmd): - import ctypes # guarded import for systems without ctypes - try: - ctypes.windll - except AttributeError: - raise NotImplementedError - - # Windows has special parsing rules for the executable (no quotes), - # that we do not care about - insert a dummy element - if not cmd: - return [] - cmd = 'dummy ' + cmd - - CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW - CommandLineToArgvW.restype = ctypes.POINTER(ctypes.c_wchar_p) - CommandLineToArgvW.argtypes = (ctypes.c_wchar_p, ctypes.POINTER(ctypes.c_int)) - - nargs = ctypes.c_int() - lpargs = CommandLineToArgvW(cmd, ctypes.byref(nargs)) - args = [lpargs[i] for i in range(nargs.value)] - assert not ctypes.windll.kernel32.LocalFree(lpargs) - - # strip the element we inserted - assert args[0] == "dummy" - return args[1:] - - -class PosixParser: - """ - The parsing behavior used by `subprocess.call("string", shell=True)` on Posix. - """ - @staticmethod - def join(argv): - return ' '.join(shlex.quote(arg) for arg in argv) - - @staticmethod - def split(cmd): - return shlex.split(cmd, posix=True) - - -if os.name == 'nt': - NativeParser = WindowsParser -elif os.name == 'posix': - NativeParser = PosixParser diff --git a/numpy/distutils/armccompiler.py b/numpy/distutils/armccompiler.py deleted file mode 100644 index afba7eb3b352..000000000000 --- a/numpy/distutils/armccompiler.py +++ /dev/null @@ -1,26 +0,0 @@ -from distutils.unixccompiler import UnixCCompiler - -class ArmCCompiler(UnixCCompiler): - - """ - Arm compiler. - """ - - compiler_type = 'arm' - cc_exe = 'armclang' - cxx_exe = 'armclang++' - - def __init__(self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__(self, verbose, dry_run, force) - cc_compiler = self.cc_exe - cxx_compiler = self.cxx_exe - self.set_executables(compiler=cc_compiler + - ' -O3 -fPIC', - compiler_so=cc_compiler + - ' -O3 -fPIC', - compiler_cxx=cxx_compiler + - ' -O3 -fPIC', - linker_exe=cc_compiler + - ' -lamath', - linker_so=cc_compiler + - ' -lamath -shared') diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py deleted file mode 100644 index 99f336af1584..000000000000 --- a/numpy/distutils/ccompiler.py +++ /dev/null @@ -1,826 +0,0 @@ -import os -import re -import sys -import platform -import shlex -import time -import subprocess -from copy import copy -from pathlib import Path -from distutils import ccompiler -from distutils.ccompiler import ( - compiler_class, gen_lib_options, get_default_compiler, new_compiler, - CCompiler -) -from distutils.errors import ( - DistutilsExecError, DistutilsModuleError, DistutilsPlatformError, - CompileError, UnknownFileError -) -from distutils.sysconfig import customize_compiler -from distutils.version import LooseVersion - -from numpy.distutils import log -from numpy.distutils.exec_command import ( - filepath_from_subprocess_output, forward_bytes_to_stdout -) -from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \ - get_num_build_jobs, \ - _commandline_dep_string, \ - sanitize_cxx_flags - -# globals for parallel build management -import threading - -_job_semaphore = None -_global_lock = threading.Lock() -_processing_files = set() - - -def _needs_build(obj, cc_args, extra_postargs, pp_opts): - """ - Check if an objects needs to be rebuild based on its dependencies - - Parameters - ---------- - obj : str - object file - - Returns - ------- - bool - """ - # defined in unixcompiler.py - dep_file = obj + '.d' - if not os.path.exists(dep_file): - return True - - # dep_file is a makefile containing 'object: dependencies' - # formatted like posix shell (spaces escaped, \ line continuations) - # the last line contains the compiler commandline arguments as some - # projects may compile an extension multiple times with different - # arguments - with open(dep_file) as f: - lines = f.readlines() - - cmdline =_commandline_dep_string(cc_args, extra_postargs, pp_opts) - last_cmdline = lines[-1] - if last_cmdline != cmdline: - return True - - contents = ''.join(lines[:-1]) - deps = [x for x in shlex.split(contents, posix=True) - if x != "\n" and not x.endswith(":")] - - try: - t_obj = os.stat(obj).st_mtime - - # check if any of the dependencies is newer than the object - # the dependencies includes the source used to create the object - for f in deps: - if os.stat(f).st_mtime > t_obj: - return True - except OSError: - # no object counts as newer (shouldn't happen if dep_file exists) - return True - - return False - - -def replace_method(klass, method_name, func): - # Py3k does not have unbound method anymore, MethodType does not work - m = lambda self, *args, **kw: func(self, *args, **kw) - setattr(klass, method_name, m) - - -###################################################################### -## Method that subclasses may redefine. But don't call this method, -## it i private to CCompiler class and may return unexpected -## results if used elsewhere. So, you have been warned.. - -def CCompiler_find_executables(self): - """ - Does nothing here, but is called by the get_version method and can be - overridden by subclasses. In particular it is redefined in the `FCompiler` - class where more documentation can be found. - - """ - pass - - -replace_method(CCompiler, 'find_executables', CCompiler_find_executables) - - -# Using customized CCompiler.spawn. -def CCompiler_spawn(self, cmd, display=None, env=None): - """ - Execute a command in a sub-process. - - Parameters - ---------- - cmd : str - The command to execute. - display : str or sequence of str, optional - The text to add to the log file kept by `numpy.distutils`. - If not given, `display` is equal to `cmd`. - env : a dictionary for environment variables, optional - - Returns - ------- - None - - Raises - ------ - DistutilsExecError - If the command failed, i.e. the exit status was not 0. - - """ - env = env if env is not None else dict(os.environ) - if display is None: - display = cmd - if is_sequence(display): - display = ' '.join(list(display)) - log.info(display) - try: - if self.verbose: - subprocess.check_output(cmd, env=env) - else: - subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env) - except subprocess.CalledProcessError as exc: - o = exc.output - s = exc.returncode - except OSError as e: - # OSError doesn't have the same hooks for the exception - # output, but exec_command() historically would use an - # empty string for EnvironmentError (base class for - # OSError) - # o = b'' - # still that would make the end-user lost in translation! - o = f"\n\n{e}\n\n\n" - try: - o = o.encode(sys.stdout.encoding) - except AttributeError: - o = o.encode('utf8') - # status previously used by exec_command() for parent - # of OSError - s = 127 - else: - # use a convenience return here so that any kind of - # caught exception will execute the default code after the - # try / except block, which handles various exceptions - return None - - if is_sequence(cmd): - cmd = ' '.join(list(cmd)) - - if self.verbose: - forward_bytes_to_stdout(o) - - if re.search(b'Too many open files', o): - msg = '\nTry rerunning setup command until build succeeds.' - else: - msg = '' - raise DistutilsExecError('Command "%s" failed with exit status %d%s' % - (cmd, s, msg)) - -replace_method(CCompiler, 'spawn', CCompiler_spawn) - -def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''): - """ - Return the name of the object files for the given source files. - - Parameters - ---------- - source_filenames : list of str - The list of paths to source files. Paths can be either relative or - absolute, this is handled transparently. - strip_dir : bool, optional - Whether to strip the directory from the returned paths. If True, - the file name prepended by `output_dir` is returned. Default is False. - output_dir : str, optional - If given, this path is prepended to the returned paths to the - object files. - - Returns - ------- - obj_names : list of str - The list of paths to the object files corresponding to the source - files in `source_filenames`. - - """ - if output_dir is None: - output_dir = '' - obj_names = [] - for src_name in source_filenames: - base, ext = os.path.splitext(os.path.normpath(src_name)) - base = os.path.splitdrive(base)[1] # Chop off the drive - base = base[os.path.isabs(base):] # If abs, chop off leading / - if base.startswith('..'): - # Resolve starting relative path components, middle ones - # (if any) have been handled by os.path.normpath above. - i = base.rfind('..')+2 - d = base[:i] - d = os.path.basename(os.path.abspath(d)) - base = d + base[i:] - if ext not in self.src_extensions: - raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name)) - if strip_dir: - base = os.path.basename(base) - obj_name = os.path.join(output_dir, base + self.obj_extension) - obj_names.append(obj_name) - return obj_names - -replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames) - -def CCompiler_compile(self, sources, output_dir=None, macros=None, - include_dirs=None, debug=0, extra_preargs=None, - extra_postargs=None, depends=None): - """ - Compile one or more source files. - - Please refer to the Python distutils API reference for more details. - - Parameters - ---------- - sources : list of str - A list of filenames - output_dir : str, optional - Path to the output directory. - macros : list of tuples - A list of macro definitions. - include_dirs : list of str, optional - The directories to add to the default include file search path for - this compilation only. - debug : bool, optional - Whether or not to output debug symbols in or alongside the object - file(s). - extra_preargs, extra_postargs : ? - Extra pre- and post-arguments. - depends : list of str, optional - A list of file names that all targets depend on. - - Returns - ------- - objects : list of str - A list of object file names, one per source file `sources`. - - Raises - ------ - CompileError - If compilation fails. - - """ - global _job_semaphore - - jobs = get_num_build_jobs() - - # setup semaphore to not exceed number of compile jobs when parallelized at - # extension level (python >= 3.5) - with _global_lock: - if _job_semaphore is None: - _job_semaphore = threading.Semaphore(jobs) - - if not sources: - return [] - from numpy.distutils.fcompiler import (FCompiler, - FORTRAN_COMMON_FIXED_EXTENSIONS, - has_f90_header) - if isinstance(self, FCompiler): - display = [] - for fc in ['f77', 'f90', 'fix']: - fcomp = getattr(self, 'compiler_'+fc) - if fcomp is None: - continue - display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp))) - display = '\n'.join(display) - else: - ccomp = self.compiler_so - display = "C compiler: %s\n" % (' '.join(ccomp),) - log.info(display) - macros, objects, extra_postargs, pp_opts, build = \ - self._setup_compile(output_dir, macros, include_dirs, sources, - depends, extra_postargs) - cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) - display = "compile options: '%s'" % (' '.join(cc_args)) - if extra_postargs: - display += "\nextra options: '%s'" % (' '.join(extra_postargs)) - log.info(display) - - def single_compile(args): - obj, (src, ext) = args - if not _needs_build(obj, cc_args, extra_postargs, pp_opts): - return - - # check if we are currently already processing the same object - # happens when using the same source in multiple extensions - while True: - # need explicit lock as there is no atomic check and add with GIL - with _global_lock: - # file not being worked on, start working - if obj not in _processing_files: - _processing_files.add(obj) - break - # wait for the processing to end - time.sleep(0.1) - - try: - # retrieve slot from our #job semaphore and build - with _job_semaphore: - self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) - finally: - # register being done processing - with _global_lock: - _processing_files.remove(obj) - - - if isinstance(self, FCompiler): - objects_to_build = list(build.keys()) - f77_objects, other_objects = [], [] - for obj in objects: - if obj in objects_to_build: - src, ext = build[obj] - if self.compiler_type=='absoft': - obj = cyg2win32(obj) - src = cyg2win32(src) - if Path(src).suffix.lower() in FORTRAN_COMMON_FIXED_EXTENSIONS \ - and not has_f90_header(src): - f77_objects.append((obj, (src, ext))) - else: - other_objects.append((obj, (src, ext))) - - # f77 objects can be built in parallel - build_items = f77_objects - # build f90 modules serial, module files are generated during - # compilation and may be used by files later in the list so the - # ordering is important - for o in other_objects: - single_compile(o) - else: - build_items = build.items() - - if len(build) > 1 and jobs > 1: - # build parallel - from concurrent.futures import ThreadPoolExecutor - with ThreadPoolExecutor(jobs) as pool: - res = pool.map(single_compile, build_items) - list(res) # access result to raise errors - else: - # build serial - for o in build_items: - single_compile(o) - - # Return *all* object filenames, not just the ones we just built. - return objects - -replace_method(CCompiler, 'compile', CCompiler_compile) - -def CCompiler_customize_cmd(self, cmd, ignore=()): - """ - Customize compiler using distutils command. - - Parameters - ---------- - cmd : class instance - An instance inheriting from ``distutils.cmd.Command``. - ignore : sequence of str, optional - List of ``distutils.ccompiler.CCompiler`` commands (without ``'set_'``) that should not be - altered. Strings that are checked for are: - ``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs', - 'rpath', 'link_objects')``. - - Returns - ------- - None - - """ - log.info('customize %s using %s' % (self.__class__.__name__, - cmd.__class__.__name__)) - - if ( - hasattr(self, 'compiler') and - 'clang' in self.compiler[0] and - not (platform.machine() == 'arm64' and sys.platform == 'darwin') - ): - # clang defaults to a non-strict floating error point model. - # However, '-ftrapping-math' is not currently supported (2023-04-08) - # for macosx_arm64. - # Since NumPy and most Python libs give warnings for these, override: - self.compiler.append('-ftrapping-math') - self.compiler_so.append('-ftrapping-math') - - def allow(attr): - return getattr(cmd, attr, None) is not None and attr not in ignore - - if allow('include_dirs'): - self.set_include_dirs(cmd.include_dirs) - if allow('define'): - for (name, value) in cmd.define: - self.define_macro(name, value) - if allow('undef'): - for macro in cmd.undef: - self.undefine_macro(macro) - if allow('libraries'): - self.set_libraries(self.libraries + cmd.libraries) - if allow('library_dirs'): - self.set_library_dirs(self.library_dirs + cmd.library_dirs) - if allow('rpath'): - self.set_runtime_library_dirs(cmd.rpath) - if allow('link_objects'): - self.set_link_objects(cmd.link_objects) - -replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd) - -def _compiler_to_string(compiler): - props = [] - mx = 0 - keys = list(compiler.executables.keys()) - for key in ['version', 'libraries', 'library_dirs', - 'object_switch', 'compile_switch', - 'include_dirs', 'define', 'undef', 'rpath', 'link_objects']: - if key not in keys: - keys.append(key) - for key in keys: - if hasattr(compiler, key): - v = getattr(compiler, key) - mx = max(mx, len(key)) - props.append((key, repr(v))) - fmt = '%-' + repr(mx+1) + 's = %s' - lines = [fmt % prop for prop in props] - return '\n'.join(lines) - -def CCompiler_show_customization(self): - """ - Print the compiler customizations to stdout. - - Parameters - ---------- - None - - Returns - ------- - None - - Notes - ----- - Printing is only done if the distutils log threshold is < 2. - - """ - try: - self.get_version() - except Exception: - pass - if log._global_log.threshold<2: - print('*'*80) - print(self.__class__) - print(_compiler_to_string(self)) - print('*'*80) - -replace_method(CCompiler, 'show_customization', CCompiler_show_customization) - -def CCompiler_customize(self, dist, need_cxx=0): - """ - Do any platform-specific customization of a compiler instance. - - This method calls ``distutils.sysconfig.customize_compiler`` for - platform-specific customization, as well as optionally remove a flag - to suppress spurious warnings in case C++ code is being compiled. - - Parameters - ---------- - dist : object - This parameter is not used for anything. - need_cxx : bool, optional - Whether or not C++ has to be compiled. If so (True), the - ``"-Wstrict-prototypes"`` option is removed to prevent spurious - warnings. Default is False. - - Returns - ------- - None - - Notes - ----- - All the default options used by distutils can be extracted with:: - - from distutils import sysconfig - sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS', - 'CCSHARED', 'LDSHARED', 'SO') - - """ - # See FCompiler.customize for suggested usage. - log.info('customize %s' % (self.__class__.__name__)) - customize_compiler(self) - if need_cxx: - # In general, distutils uses -Wstrict-prototypes, but this option is - # not valid for C++ code, only for C. Remove it if it's there to - # avoid a spurious warning on every compilation. - try: - self.compiler_so.remove('-Wstrict-prototypes') - except (AttributeError, ValueError): - pass - - if hasattr(self, 'compiler') and 'cc' in self.compiler[0]: - if not self.compiler_cxx: - if self.compiler[0].startswith('gcc'): - a, b = 'gcc', 'g++' - else: - a, b = 'cc', 'c++' - self.compiler_cxx = [self.compiler[0].replace(a, b)]\ - + self.compiler[1:] - else: - if hasattr(self, 'compiler'): - log.warn("#### %s #######" % (self.compiler,)) - if not hasattr(self, 'compiler_cxx'): - log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__) - - - # check if compiler supports gcc style automatic dependencies - # run on every extension so skip for known good compilers - if hasattr(self, 'compiler') and ('gcc' in self.compiler[0] or - 'g++' in self.compiler[0] or - 'clang' in self.compiler[0]): - self._auto_depends = True - elif os.name == 'posix': - import tempfile - import shutil - tmpdir = tempfile.mkdtemp() - try: - fn = os.path.join(tmpdir, "file.c") - with open(fn, "w") as f: - f.write("int a;\n") - self.compile([fn], output_dir=tmpdir, - extra_preargs=['-MMD', '-MF', fn + '.d']) - self._auto_depends = True - except CompileError: - self._auto_depends = False - finally: - shutil.rmtree(tmpdir) - - return - -replace_method(CCompiler, 'customize', CCompiler_customize) - -def simple_version_match(pat=r'[-.\d]+', ignore='', start=''): - """ - Simple matching of version numbers, for use in CCompiler and FCompiler. - - Parameters - ---------- - pat : str, optional - A regular expression matching version numbers. - Default is ``r'[-.\\d]+'``. - ignore : str, optional - A regular expression matching patterns to skip. - Default is ``''``, in which case nothing is skipped. - start : str, optional - A regular expression matching the start of where to start looking - for version numbers. - Default is ``''``, in which case searching is started at the - beginning of the version string given to `matcher`. - - Returns - ------- - matcher : callable - A function that is appropriate to use as the ``.version_match`` - attribute of a ``distutils.ccompiler.CCompiler`` class. `matcher` takes a single parameter, - a version string. - - """ - def matcher(self, version_string): - # version string may appear in the second line, so getting rid - # of new lines: - version_string = version_string.replace('\n', ' ') - pos = 0 - if start: - m = re.match(start, version_string) - if not m: - return None - pos = m.end() - while True: - m = re.search(pat, version_string[pos:]) - if not m: - return None - if ignore and re.match(ignore, m.group(0)): - pos = m.end() - continue - break - return m.group(0) - return matcher - -def CCompiler_get_version(self, force=False, ok_status=[0]): - """ - Return compiler version, or None if compiler is not available. - - Parameters - ---------- - force : bool, optional - If True, force a new determination of the version, even if the - compiler already has a version attribute. Default is False. - ok_status : list of int, optional - The list of status values returned by the version look-up process - for which a version string is returned. If the status value is not - in `ok_status`, None is returned. Default is ``[0]``. - - Returns - ------- - version : str or None - Version string, in the format of ``distutils.version.LooseVersion``. - - """ - if not force and hasattr(self, 'version'): - return self.version - self.find_executables() - try: - version_cmd = self.version_cmd - except AttributeError: - return None - if not version_cmd or not version_cmd[0]: - return None - try: - matcher = self.version_match - except AttributeError: - try: - pat = self.version_pattern - except AttributeError: - return None - def matcher(version_string): - m = re.match(pat, version_string) - if not m: - return None - version = m.group('version') - return version - - try: - output = subprocess.check_output(version_cmd, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as exc: - output = exc.output - status = exc.returncode - except OSError: - # match the historical returns for a parent - # exception class caught by exec_command() - status = 127 - output = b'' - else: - # output isn't actually a filepath but we do this - # for now to match previous distutils behavior - output = filepath_from_subprocess_output(output) - status = 0 - - version = None - if status in ok_status: - version = matcher(output) - if version: - version = LooseVersion(version) - self.version = version - return version - -replace_method(CCompiler, 'get_version', CCompiler_get_version) - -def CCompiler_cxx_compiler(self): - """ - Return the C++ compiler. - - Parameters - ---------- - None - - Returns - ------- - cxx : class instance - The C++ compiler, as a ``distutils.ccompiler.CCompiler`` instance. - - """ - if self.compiler_type in ('msvc', 'intelw', 'intelemw'): - return self - - cxx = copy(self) - cxx.compiler_cxx = cxx.compiler_cxx - cxx.compiler_so = [cxx.compiler_cxx[0]] + \ - sanitize_cxx_flags(cxx.compiler_so[1:]) - if (sys.platform.startswith(('aix', 'os400')) and - 'ld_so_aix' in cxx.linker_so[0]): - # AIX needs the ld_so_aix script included with Python - cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \ - + cxx.linker_so[2:] - if sys.platform.startswith('os400'): - #This is required by i 7.4 and prievous for PRId64 in printf() call. - cxx.compiler_so.append('-D__STDC_FORMAT_MACROS') - #This a bug of gcc10.3, which failed to handle the TLS init. - cxx.compiler_so.append('-fno-extern-tls-init') - cxx.linker_so.append('-fno-extern-tls-init') - else: - cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:] - return cxx - -replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler) - -compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler', - "Intel C Compiler for 32-bit applications") -compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler', - "Intel C Itanium Compiler for Itanium-based applications") -compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler', - "Intel C Compiler for 64-bit applications") -compiler_class['intelw'] = ('intelccompiler', 'IntelCCompilerW', - "Intel C Compiler for 32-bit applications on Windows") -compiler_class['intelemw'] = ('intelccompiler', 'IntelEM64TCCompilerW', - "Intel C Compiler for 64-bit applications on Windows") -compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler', - "PathScale Compiler for SiCortex-based applications") -compiler_class['arm'] = ('armccompiler', 'ArmCCompiler', - "Arm C Compiler") -compiler_class['fujitsu'] = ('fujitsuccompiler', 'FujitsuCCompiler', - "Fujitsu C Compiler") - -ccompiler._default_compilers += (('linux.*', 'intel'), - ('linux.*', 'intele'), - ('linux.*', 'intelem'), - ('linux.*', 'pathcc'), - ('nt', 'intelw'), - ('nt', 'intelemw')) - -if sys.platform == 'win32': - compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler', - "Mingw32 port of GNU C Compiler for Win32"\ - "(for MSC built Python)") - if mingw32(): - # On windows platforms, we want to default to mingw32 (gcc) - # because msvc can't build blitz stuff. - log.info('Setting mingw32 as default compiler for nt.') - ccompiler._default_compilers = (('nt', 'mingw32'),) \ - + ccompiler._default_compilers - - -_distutils_new_compiler = new_compiler -def new_compiler (plat=None, - compiler=None, - verbose=None, - dry_run=0, - force=0): - # Try first C compilers from numpy.distutils. - if verbose is None: - verbose = log.get_threshold() <= log.INFO - if plat is None: - plat = os.name - try: - if compiler is None: - compiler = get_default_compiler(plat) - (module_name, class_name, long_description) = compiler_class[compiler] - except KeyError: - msg = "don't know how to compile C/C++ code on platform '%s'" % plat - if compiler is not None: - msg = msg + " with '%s' compiler" % compiler - raise DistutilsPlatformError(msg) - module_name = "numpy.distutils." + module_name - try: - __import__ (module_name) - except ImportError as e: - msg = str(e) - log.info('%s in numpy.distutils; trying from distutils', - str(msg)) - module_name = module_name[6:] - try: - __import__(module_name) - except ImportError as e: - msg = str(e) - raise DistutilsModuleError("can't compile C/C++ code: unable to load module '%s'" % \ - module_name) - try: - module = sys.modules[module_name] - klass = vars(module)[class_name] - except KeyError: - raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " + - "in module '%s'") % (class_name, module_name)) - compiler = klass(None, dry_run, force) - compiler.verbose = verbose - log.debug('new_compiler returns %s' % (klass)) - return compiler - -ccompiler.new_compiler = new_compiler - -_distutils_gen_lib_options = gen_lib_options -def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries): - # the version of this function provided by CPython allows the following - # to return lists, which are unpacked automatically: - # - compiler.runtime_library_dir_option - # our version extends the behavior to: - # - compiler.library_dir_option - # - compiler.library_option - # - compiler.find_library_file - r = _distutils_gen_lib_options(compiler, library_dirs, - runtime_library_dirs, libraries) - lib_opts = [] - for i in r: - if is_sequence(i): - lib_opts.extend(list(i)) - else: - lib_opts.append(i) - return lib_opts -ccompiler.gen_lib_options = gen_lib_options - -# Also fix up the various compiler modules, which do -# from distutils.ccompiler import gen_lib_options -# Don't bother with mwerks, as we don't support Classic Mac. -for _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']: - _m = sys.modules.get('distutils.' + _cc + 'compiler') - if _m is not None: - setattr(_m, 'gen_lib_options', gen_lib_options) - diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py deleted file mode 100644 index b1a6fa36061c..000000000000 --- a/numpy/distutils/ccompiler_opt.py +++ /dev/null @@ -1,2668 +0,0 @@ -"""Provides the `CCompilerOpt` class, used for handling the CPU/hardware -optimization, starting from parsing the command arguments, to managing the -relation between the CPU baseline and dispatch-able features, -also generating the required C headers and ending with compiling -the sources with proper compiler's flags. - -`CCompilerOpt` doesn't provide runtime detection for the CPU features, -instead only focuses on the compiler side, but it creates abstract C headers -that can be used later for the final runtime dispatching process.""" - -import atexit -import inspect -import os -import pprint -import re -import subprocess -import textwrap - -class _Config: - """An abstract class holds all configurable attributes of `CCompilerOpt`, - these class attributes can be used to change the default behavior - of `CCompilerOpt` in order to fit other requirements. - - Attributes - ---------- - conf_nocache : bool - Set True to disable memory and file cache. - Default is False. - - conf_noopt : bool - Set True to forces the optimization to be disabled, - in this case `CCompilerOpt` tends to generate all - expected headers in order to 'not' break the build. - Default is False. - - conf_cache_factors : list - Add extra factors to the primary caching factors. The caching factors - are utilized to determine if there are changes had happened that - requires to discard the cache and re-updating it. The primary factors - are the arguments of `CCompilerOpt` and `CCompiler`'s properties(type, flags, etc). - Default is list of two items, containing the time of last modification - of `ccompiler_opt` and value of attribute "conf_noopt" - - conf_tmp_path : str, - The path of temporary directory. Default is auto-created - temporary directory via ``tempfile.mkdtemp()``. - - conf_check_path : str - The path of testing files. Each added CPU feature must have a - **C** source file contains at least one intrinsic or instruction that - related to this feature, so it can be tested against the compiler. - Default is ``./distutils/checks``. - - conf_target_groups : dict - Extra tokens that can be reached from dispatch-able sources through - the special mark ``@targets``. Default is an empty dictionary. - - **Notes**: - - case-insensitive for tokens and group names - - sign '#' must stick in the begin of group name and only within ``@targets`` - - **Example**: - .. code-block:: console - - $ "@targets #avx_group other_tokens" > group_inside.c - - >>> CCompilerOpt.conf_target_groups["avx_group"] = \\ - "$werror $maxopt avx2 avx512f avx512_skx" - >>> cco = CCompilerOpt(cc_instance) - >>> cco.try_dispatch(["group_inside.c"]) - - conf_c_prefix : str - The prefix of public C definitions. Default is ``"NPY_"``. - - conf_c_prefix_ : str - The prefix of internal C definitions. Default is ``"NPY__"``. - - conf_cc_flags : dict - Nested dictionaries defining several compiler flags - that linked to some major functions, the main key - represent the compiler name and sub-keys represent - flags names. Default is already covers all supported - **C** compilers. - - Sub-keys explained as follows: - - "native": str or None - used by argument option `native`, to detect the current - machine support via the compiler. - "werror": str or None - utilized to treat warning as errors during testing CPU features - against the compiler and also for target's policy `$werror` - via dispatch-able sources. - "maxopt": str or None - utilized for target's policy '$maxopt' and the value should - contains the maximum acceptable optimization by the compiler. - e.g. in gcc ``'-O3'`` - - **Notes**: - * case-sensitive for compiler names and flags - * use space to separate multiple flags - * any flag will tested against the compiler and it will skipped - if it's not applicable. - - conf_min_features : dict - A dictionary defines the used CPU features for - argument option ``'min'``, the key represent the CPU architecture - name e.g. ``'x86'``. Default values provide the best effort - on wide range of users platforms. - - **Note**: case-sensitive for architecture names. - - conf_features : dict - Nested dictionaries used for identifying the CPU features. - the primary key is represented as a feature name or group name - that gathers several features. Default values covers all - supported features but without the major options like "flags", - these undefined options handle it by method `conf_features_partial()`. - Default value is covers almost all CPU features for *X86*, *IBM/Power64* - and *ARM 7/8*. - - Sub-keys explained as follows: - - "implies" : str or list, optional, - List of CPU feature names to be implied by it, - the feature name must be defined within `conf_features`. - Default is None. - - "flags": str or list, optional - List of compiler flags. Default is None. - - "detect": str or list, optional - List of CPU feature names that required to be detected - in runtime. By default, its the feature name or features - in "group" if its specified. - - "implies_detect": bool, optional - If True, all "detect" of implied features will be combined. - Default is True. see `feature_detect()`. - - "group": str or list, optional - Same as "implies" but doesn't require the feature name to be - defined within `conf_features`. - - "interest": int, required - a key for sorting CPU features - - "headers": str or list, optional - intrinsics C header file - - "disable": str, optional - force disable feature, the string value should contains the - reason of disabling. - - "autovec": bool or None, optional - True or False to declare that CPU feature can be auto-vectorized - by the compiler. - By default(None), treated as True if the feature contains at - least one applicable flag. see `feature_can_autovec()` - - "extra_checks": str or list, optional - Extra test case names for the CPU feature that need to be tested - against the compiler. - - Each test case must have a C file named ``extra_xxxx.c``, where - ``xxxx`` is the case name in lower case, under 'conf_check_path'. - It should contain at least one intrinsic or function related to the test case. - - If the compiler able to successfully compile the C file then `CCompilerOpt` - will add a C ``#define`` for it into the main dispatch header, e.g. - ``#define {conf_c_prefix}_XXXX`` where ``XXXX`` is the case name in upper case. - - **NOTES**: - * space can be used as separator with options that supports "str or list" - * case-sensitive for all values and feature name must be in upper-case. - * if flags aren't applicable, its will skipped rather than disable the - CPU feature - * the CPU feature will disabled if the compiler fail to compile - the test file - """ - conf_nocache = False - conf_noopt = False - conf_cache_factors = None - conf_tmp_path = None - conf_check_path = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "checks" - ) - conf_target_groups = {} - conf_c_prefix = 'NPY_' - conf_c_prefix_ = 'NPY__' - conf_cc_flags = dict( - gcc = dict( - # native should always fail on arm and ppc64, - # native usually works only with x86 - native = '-march=native', - opt = '-O3', - werror = '-Werror', - ), - clang = dict( - native = '-march=native', - opt = "-O3", - # One of the following flags needs to be applicable for Clang to - # guarantee the sanity of the testing process, however in certain - # cases `-Werror` gets skipped during the availability test due to - # "unused arguments" warnings. - # see https://github.com/numpy/numpy/issues/19624 - werror = '-Werror=switch -Werror', - ), - icc = dict( - native = '-xHost', - opt = '-O3', - werror = '-Werror', - ), - iccw = dict( - native = '/QxHost', - opt = '/O3', - werror = '/Werror', - ), - msvc = dict( - native = None, - opt = '/O2', - werror = '/WX', - ), - fcc = dict( - native = '-mcpu=a64fx', - opt = None, - werror = None, - ) - ) - conf_min_features = dict( - x86 = "SSE SSE2", - x64 = "SSE SSE2 SSE3", - ppc64 = '', # play it safe - ppc64le = "VSX VSX2", - s390x = '', - armhf = '', # play it safe - aarch64 = "NEON NEON_FP16 NEON_VFPV4 ASIMD" - ) - conf_features = dict( - # X86 - SSE = dict( - interest=1, headers="xmmintrin.h", - # enabling SSE without SSE2 is useless also - # it's non-optional for x86_64 - implies="SSE2" - ), - SSE2 = dict(interest=2, implies="SSE", headers="emmintrin.h"), - SSE3 = dict(interest=3, implies="SSE2", headers="pmmintrin.h"), - SSSE3 = dict(interest=4, implies="SSE3", headers="tmmintrin.h"), - SSE41 = dict(interest=5, implies="SSSE3", headers="smmintrin.h"), - POPCNT = dict(interest=6, implies="SSE41", headers="popcntintrin.h"), - SSE42 = dict(interest=7, implies="POPCNT"), - AVX = dict( - interest=8, implies="SSE42", headers="immintrin.h", - implies_detect=False - ), - XOP = dict(interest=9, implies="AVX", headers="x86intrin.h"), - FMA4 = dict(interest=10, implies="AVX", headers="x86intrin.h"), - F16C = dict(interest=11, implies="AVX"), - FMA3 = dict(interest=12, implies="F16C"), - AVX2 = dict(interest=13, implies="F16C"), - AVX512F = dict( - interest=20, implies="FMA3 AVX2", implies_detect=False, - extra_checks="AVX512F_REDUCE" - ), - AVX512CD = dict(interest=21, implies="AVX512F"), - AVX512_KNL = dict( - interest=40, implies="AVX512CD", group="AVX512ER AVX512PF", - detect="AVX512_KNL", implies_detect=False - ), - AVX512_KNM = dict( - interest=41, implies="AVX512_KNL", - group="AVX5124FMAPS AVX5124VNNIW AVX512VPOPCNTDQ", - detect="AVX512_KNM", implies_detect=False - ), - AVX512_SKX = dict( - interest=42, implies="AVX512CD", group="AVX512VL AVX512BW AVX512DQ", - detect="AVX512_SKX", implies_detect=False, - extra_checks="AVX512BW_MASK AVX512DQ_MASK" - ), - AVX512_CLX = dict( - interest=43, implies="AVX512_SKX", group="AVX512VNNI", - detect="AVX512_CLX" - ), - AVX512_CNL = dict( - interest=44, implies="AVX512_SKX", group="AVX512IFMA AVX512VBMI", - detect="AVX512_CNL", implies_detect=False - ), - AVX512_ICL = dict( - interest=45, implies="AVX512_CLX AVX512_CNL", - group="AVX512VBMI2 AVX512BITALG AVX512VPOPCNTDQ", - detect="AVX512_ICL", implies_detect=False - ), - AVX512_SPR = dict( - interest=46, implies="AVX512_ICL", group="AVX512FP16", - detect="AVX512_SPR", implies_detect=False - ), - # IBM/Power - ## Power7/ISA 2.06 - VSX = dict(interest=1, headers="altivec.h", extra_checks="VSX_ASM"), - ## Power8/ISA 2.07 - VSX2 = dict(interest=2, implies="VSX", implies_detect=False), - ## Power9/ISA 3.00 - VSX3 = dict(interest=3, implies="VSX2", implies_detect=False, - extra_checks="VSX3_HALF_DOUBLE"), - ## Power10/ISA 3.1 - VSX4 = dict(interest=4, implies="VSX3", implies_detect=False, - extra_checks="VSX4_MMA"), - # IBM/Z - ## VX(z13) support - VX = dict(interest=1, headers="vecintrin.h"), - ## Vector-Enhancements Facility - VXE = dict(interest=2, implies="VX", implies_detect=False), - ## Vector-Enhancements Facility 2 - VXE2 = dict(interest=3, implies="VXE", implies_detect=False), - # ARM - NEON = dict(interest=1, headers="arm_neon.h"), - NEON_FP16 = dict(interest=2, implies="NEON"), - ## FMA - NEON_VFPV4 = dict(interest=3, implies="NEON_FP16"), - ## Advanced SIMD - ASIMD = dict(interest=4, implies="NEON_FP16 NEON_VFPV4", implies_detect=False), - ## ARMv8.2 half-precision & vector arithm - ASIMDHP = dict(interest=5, implies="ASIMD"), - ## ARMv8.2 dot product - ASIMDDP = dict(interest=6, implies="ASIMD"), - ## ARMv8.2 Single & half-precision Multiply - ASIMDFHM = dict(interest=7, implies="ASIMDHP"), - ) - def conf_features_partial(self): - """Return a dictionary of supported CPU features by the platform, - and accumulate the rest of undefined options in `conf_features`, - the returned dict has same rules and notes in - class attribute `conf_features`, also its override - any options that been set in 'conf_features'. - """ - if self.cc_noopt: - # optimization is disabled - return {} - - on_x86 = self.cc_on_x86 or self.cc_on_x64 - is_unix = self.cc_is_gcc or self.cc_is_clang or self.cc_is_fcc - - if on_x86 and is_unix: return dict( - SSE = dict(flags="-msse"), - SSE2 = dict(flags="-msse2"), - SSE3 = dict(flags="-msse3"), - SSSE3 = dict(flags="-mssse3"), - SSE41 = dict(flags="-msse4.1"), - POPCNT = dict(flags="-mpopcnt"), - SSE42 = dict(flags="-msse4.2"), - AVX = dict(flags="-mavx"), - F16C = dict(flags="-mf16c"), - XOP = dict(flags="-mxop"), - FMA4 = dict(flags="-mfma4"), - FMA3 = dict(flags="-mfma"), - AVX2 = dict(flags="-mavx2"), - AVX512F = dict(flags="-mavx512f -mno-mmx"), - AVX512CD = dict(flags="-mavx512cd"), - AVX512_KNL = dict(flags="-mavx512er -mavx512pf"), - AVX512_KNM = dict( - flags="-mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq" - ), - AVX512_SKX = dict(flags="-mavx512vl -mavx512bw -mavx512dq"), - AVX512_CLX = dict(flags="-mavx512vnni"), - AVX512_CNL = dict(flags="-mavx512ifma -mavx512vbmi"), - AVX512_ICL = dict( - flags="-mavx512vbmi2 -mavx512bitalg -mavx512vpopcntdq" - ), - AVX512_SPR = dict(flags="-mavx512fp16"), - ) - if on_x86 and self.cc_is_icc: return dict( - SSE = dict(flags="-msse"), - SSE2 = dict(flags="-msse2"), - SSE3 = dict(flags="-msse3"), - SSSE3 = dict(flags="-mssse3"), - SSE41 = dict(flags="-msse4.1"), - POPCNT = {}, - SSE42 = dict(flags="-msse4.2"), - AVX = dict(flags="-mavx"), - F16C = {}, - XOP = dict(disable="Intel Compiler doesn't support it"), - FMA4 = dict(disable="Intel Compiler doesn't support it"), - # Intel Compiler doesn't support AVX2 or FMA3 independently - FMA3 = dict( - implies="F16C AVX2", flags="-march=core-avx2" - ), - AVX2 = dict(implies="FMA3", flags="-march=core-avx2"), - # Intel Compiler doesn't support AVX512F or AVX512CD independently - AVX512F = dict( - implies="AVX2 AVX512CD", flags="-march=common-avx512" - ), - AVX512CD = dict( - implies="AVX2 AVX512F", flags="-march=common-avx512" - ), - AVX512_KNL = dict(flags="-xKNL"), - AVX512_KNM = dict(flags="-xKNM"), - AVX512_SKX = dict(flags="-xSKYLAKE-AVX512"), - AVX512_CLX = dict(flags="-xCASCADELAKE"), - AVX512_CNL = dict(flags="-xCANNONLAKE"), - AVX512_ICL = dict(flags="-xICELAKE-CLIENT"), - AVX512_SPR = dict(disable="Not supported yet") - ) - if on_x86 and self.cc_is_iccw: return dict( - SSE = dict(flags="/arch:SSE"), - SSE2 = dict(flags="/arch:SSE2"), - SSE3 = dict(flags="/arch:SSE3"), - SSSE3 = dict(flags="/arch:SSSE3"), - SSE41 = dict(flags="/arch:SSE4.1"), - POPCNT = {}, - SSE42 = dict(flags="/arch:SSE4.2"), - AVX = dict(flags="/arch:AVX"), - F16C = {}, - XOP = dict(disable="Intel Compiler doesn't support it"), - FMA4 = dict(disable="Intel Compiler doesn't support it"), - # Intel Compiler doesn't support FMA3 or AVX2 independently - FMA3 = dict( - implies="F16C AVX2", flags="/arch:CORE-AVX2" - ), - AVX2 = dict( - implies="FMA3", flags="/arch:CORE-AVX2" - ), - # Intel Compiler doesn't support AVX512F or AVX512CD independently - AVX512F = dict( - implies="AVX2 AVX512CD", flags="/Qx:COMMON-AVX512" - ), - AVX512CD = dict( - implies="AVX2 AVX512F", flags="/Qx:COMMON-AVX512" - ), - AVX512_KNL = dict(flags="/Qx:KNL"), - AVX512_KNM = dict(flags="/Qx:KNM"), - AVX512_SKX = dict(flags="/Qx:SKYLAKE-AVX512"), - AVX512_CLX = dict(flags="/Qx:CASCADELAKE"), - AVX512_CNL = dict(flags="/Qx:CANNONLAKE"), - AVX512_ICL = dict(flags="/Qx:ICELAKE-CLIENT"), - AVX512_SPR = dict(disable="Not supported yet") - ) - if on_x86 and self.cc_is_msvc: return dict( - SSE = dict(flags="/arch:SSE") if self.cc_on_x86 else {}, - SSE2 = dict(flags="/arch:SSE2") if self.cc_on_x86 else {}, - SSE3 = {}, - SSSE3 = {}, - SSE41 = {}, - POPCNT = dict(headers="nmmintrin.h"), - SSE42 = {}, - AVX = dict(flags="/arch:AVX"), - F16C = {}, - XOP = dict(headers="ammintrin.h"), - FMA4 = dict(headers="ammintrin.h"), - # MSVC doesn't support FMA3 or AVX2 independently - FMA3 = dict( - implies="F16C AVX2", flags="/arch:AVX2" - ), - AVX2 = dict( - implies="F16C FMA3", flags="/arch:AVX2" - ), - # MSVC doesn't support AVX512F or AVX512CD independently, - # always generate instructions belong to (VL/VW/DQ) - AVX512F = dict( - implies="AVX2 AVX512CD AVX512_SKX", flags="/arch:AVX512" - ), - AVX512CD = dict( - implies="AVX512F AVX512_SKX", flags="/arch:AVX512" - ), - AVX512_KNL = dict( - disable="MSVC compiler doesn't support it" - ), - AVX512_KNM = dict( - disable="MSVC compiler doesn't support it" - ), - AVX512_SKX = dict(flags="/arch:AVX512"), - AVX512_CLX = {}, - AVX512_CNL = {}, - AVX512_ICL = {}, - AVX512_SPR= dict( - disable="MSVC compiler doesn't support it" - ) - ) - - on_power = self.cc_on_ppc64le or self.cc_on_ppc64 - if on_power: - partial = dict( - VSX = dict( - implies=("VSX2" if self.cc_on_ppc64le else ""), - flags="-mvsx" - ), - VSX2 = dict( - flags="-mcpu=power8", implies_detect=False - ), - VSX3 = dict( - flags="-mcpu=power9 -mtune=power9", implies_detect=False - ), - VSX4 = dict( - flags="-mcpu=power10 -mtune=power10", implies_detect=False - ) - ) - if self.cc_is_clang: - partial["VSX"]["flags"] = "-maltivec -mvsx" - partial["VSX2"]["flags"] = "-mcpu=power8" - partial["VSX3"]["flags"] = "-mcpu=power9" - partial["VSX4"]["flags"] = "-mcpu=power10" - - return partial - - on_zarch = self.cc_on_s390x - if on_zarch: - partial = dict( - VX = dict( - flags="-march=arch11 -mzvector" - ), - VXE = dict( - flags="-march=arch12", implies_detect=False - ), - VXE2 = dict( - flags="-march=arch13", implies_detect=False - ) - ) - - return partial - - - if self.cc_on_aarch64 and is_unix: return dict( - NEON = dict( - implies="NEON_FP16 NEON_VFPV4 ASIMD", autovec=True - ), - NEON_FP16 = dict( - implies="NEON NEON_VFPV4 ASIMD", autovec=True - ), - NEON_VFPV4 = dict( - implies="NEON NEON_FP16 ASIMD", autovec=True - ), - ASIMD = dict( - implies="NEON NEON_FP16 NEON_VFPV4", autovec=True - ), - ASIMDHP = dict( - flags="-march=armv8.2-a+fp16" - ), - ASIMDDP = dict( - flags="-march=armv8.2-a+dotprod" - ), - ASIMDFHM = dict( - flags="-march=armv8.2-a+fp16fml" - ), - ) - if self.cc_on_armhf and is_unix: return dict( - NEON = dict( - flags="-mfpu=neon" - ), - NEON_FP16 = dict( - flags="-mfpu=neon-fp16 -mfp16-format=ieee" - ), - NEON_VFPV4 = dict( - flags="-mfpu=neon-vfpv4", - ), - ASIMD = dict( - flags="-mfpu=neon-fp-armv8 -march=armv8-a+simd", - ), - ASIMDHP = dict( - flags="-march=armv8.2-a+fp16" - ), - ASIMDDP = dict( - flags="-march=armv8.2-a+dotprod", - ), - ASIMDFHM = dict( - flags="-march=armv8.2-a+fp16fml" - ) - ) - # TODO: ARM MSVC - return {} - - def __init__(self): - if self.conf_tmp_path is None: - import shutil - import tempfile - tmp = tempfile.mkdtemp() - def rm_temp(): - try: - shutil.rmtree(tmp) - except OSError: - pass - atexit.register(rm_temp) - self.conf_tmp_path = tmp - - if self.conf_cache_factors is None: - self.conf_cache_factors = [ - os.path.getmtime(__file__), - self.conf_nocache - ] - -class _Distutils: - """A helper class that provides a collection of fundamental methods - implemented in a top of Python and NumPy Distutils. - - The idea behind this class is to gather all methods that it may - need to override in case of reuse 'CCompilerOpt' in environment - different than of what NumPy has. - - Parameters - ---------- - ccompiler : `CCompiler` - The generate instance that returned from `distutils.ccompiler.new_compiler()`. - """ - def __init__(self, ccompiler): - self._ccompiler = ccompiler - - def dist_compile(self, sources, flags, ccompiler=None, **kwargs): - """Wrap CCompiler.compile()""" - assert(isinstance(sources, list)) - assert(isinstance(flags, list)) - flags = kwargs.pop("extra_postargs", []) + flags - if not ccompiler: - ccompiler = self._ccompiler - - return ccompiler.compile(sources, extra_postargs=flags, **kwargs) - - def dist_test(self, source, flags, macros=[]): - """Return True if 'CCompiler.compile()' able to compile - a source file with certain flags. - """ - assert(isinstance(source, str)) - from distutils.errors import CompileError - cc = self._ccompiler; - bk_spawn = getattr(cc, 'spawn', None) - if bk_spawn: - cc_type = getattr(self._ccompiler, "compiler_type", "") - if cc_type in ("msvc",): - setattr(cc, 'spawn', self._dist_test_spawn_paths) - else: - setattr(cc, 'spawn', self._dist_test_spawn) - test = False - try: - self.dist_compile( - [source], flags, macros=macros, output_dir=self.conf_tmp_path - ) - test = True - except CompileError as e: - self.dist_log(str(e), stderr=True) - if bk_spawn: - setattr(cc, 'spawn', bk_spawn) - return test - - def dist_info(self): - """ - Return a tuple containing info about (platform, compiler, extra_args), - required by the abstract class '_CCompiler' for discovering the - platform environment. This is also used as a cache factor in order - to detect any changes happening from outside. - """ - if hasattr(self, "_dist_info"): - return self._dist_info - - cc_type = getattr(self._ccompiler, "compiler_type", '') - if cc_type in ("intelem", "intelemw"): - platform = "x86_64" - elif cc_type in ("intel", "intelw", "intele"): - platform = "x86" - else: - from distutils.util import get_platform - platform = get_platform() - - cc_info = getattr(self._ccompiler, "compiler", getattr(self._ccompiler, "compiler_so", '')) - if not cc_type or cc_type == "unix": - if hasattr(cc_info, "__iter__"): - compiler = cc_info[0] - else: - compiler = str(cc_info) - else: - compiler = cc_type - - if hasattr(cc_info, "__iter__") and len(cc_info) > 1: - extra_args = ' '.join(cc_info[1:]) - else: - extra_args = os.environ.get("CFLAGS", "") - extra_args += os.environ.get("CPPFLAGS", "") - - self._dist_info = (platform, compiler, extra_args) - return self._dist_info - - @staticmethod - def dist_error(*args): - """Raise a compiler error""" - from distutils.errors import CompileError - raise CompileError(_Distutils._dist_str(*args)) - - @staticmethod - def dist_fatal(*args): - """Raise a distutils error""" - from distutils.errors import DistutilsError - raise DistutilsError(_Distutils._dist_str(*args)) - - @staticmethod - def dist_log(*args, stderr=False): - """Print a console message""" - from numpy.distutils import log - out = _Distutils._dist_str(*args) - if stderr: - log.warn(out) - else: - log.info(out) - - @staticmethod - def dist_load_module(name, path): - """Load a module from file, required by the abstract class '_Cache'.""" - from .misc_util import exec_mod_from_location - try: - return exec_mod_from_location(name, path) - except Exception as e: - _Distutils.dist_log(e, stderr=True) - return None - - @staticmethod - def _dist_str(*args): - """Return a string to print by log and errors.""" - def to_str(arg): - if not isinstance(arg, str) and hasattr(arg, '__iter__'): - ret = [] - for a in arg: - ret.append(to_str(a)) - return '('+ ' '.join(ret) + ')' - return str(arg) - - stack = inspect.stack()[2] - start = "CCompilerOpt.%s[%d] : " % (stack.function, stack.lineno) - out = ' '.join([ - to_str(a) - for a in (*args,) - ]) - return start + out - - def _dist_test_spawn_paths(self, cmd, display=None): - """ - Fix msvc SDK ENV path same as distutils do - without it we get c1: fatal error C1356: unable to find mspdbcore.dll - """ - if not hasattr(self._ccompiler, "_paths"): - self._dist_test_spawn(cmd) - return - old_path = os.getenv("path") - try: - os.environ["path"] = self._ccompiler._paths - self._dist_test_spawn(cmd) - finally: - os.environ["path"] = old_path - - _dist_warn_regex = re.compile( - # intel and msvc compilers don't raise - # fatal errors when flags are wrong or unsupported - ".*(" - "warning D9002|" # msvc, it should be work with any language. - "invalid argument for option" # intel - ").*" - ) - @staticmethod - def _dist_test_spawn(cmd, display=None): - try: - o = subprocess.check_output(cmd, stderr=subprocess.STDOUT, - text=True) - if o and re.match(_Distutils._dist_warn_regex, o): - _Distutils.dist_error( - "Flags in command", cmd ,"aren't supported by the compiler" - ", output -> \n%s" % o - ) - except subprocess.CalledProcessError as exc: - o = exc.output - s = exc.returncode - except OSError as e: - o = e - s = 127 - else: - return None - _Distutils.dist_error( - "Command", cmd, "failed with exit status %d output -> \n%s" % ( - s, o - )) - -_share_cache = {} -class _Cache: - """An abstract class handles caching functionality, provides two - levels of caching, in-memory by share instances attributes among - each other and by store attributes into files. - - **Note**: - any attributes that start with ``_`` or ``conf_`` will be ignored. - - Parameters - ---------- - cache_path : str or None - The path of cache file, if None then cache in file will disabled. - - *factors : - The caching factors that need to utilize next to `conf_cache_factors`. - - Attributes - ---------- - cache_private : set - Hold the attributes that need be skipped from "in-memory cache". - - cache_infile : bool - Utilized during initializing this class, to determine if the cache was able - to loaded from the specified cache path in 'cache_path'. - """ - - # skip attributes from cache - _cache_ignore = re.compile("^(_|conf_)") - - def __init__(self, cache_path=None, *factors): - self.cache_me = {} - self.cache_private = set() - self.cache_infile = False - self._cache_path = None - - if self.conf_nocache: - self.dist_log("cache is disabled by `Config`") - return - - self._cache_hash = self.cache_hash(*factors, *self.conf_cache_factors) - self._cache_path = cache_path - if cache_path: - if os.path.exists(cache_path): - self.dist_log("load cache from file ->", cache_path) - cache_mod = self.dist_load_module("cache", cache_path) - if not cache_mod: - self.dist_log( - "unable to load the cache file as a module", - stderr=True - ) - elif not hasattr(cache_mod, "hash") or \ - not hasattr(cache_mod, "data"): - self.dist_log("invalid cache file", stderr=True) - elif self._cache_hash == cache_mod.hash: - self.dist_log("hit the file cache") - for attr, val in cache_mod.data.items(): - setattr(self, attr, val) - self.cache_infile = True - else: - self.dist_log("miss the file cache") - - if not self.cache_infile: - other_cache = _share_cache.get(self._cache_hash) - if other_cache: - self.dist_log("hit the memory cache") - for attr, val in other_cache.__dict__.items(): - if attr in other_cache.cache_private or \ - re.match(self._cache_ignore, attr): - continue - setattr(self, attr, val) - - _share_cache[self._cache_hash] = self - atexit.register(self.cache_flush) - - def __del__(self): - for h, o in _share_cache.items(): - if o == self: - _share_cache.pop(h) - break - - def cache_flush(self): - """ - Force update the cache. - """ - if not self._cache_path: - return - # TODO: don't write if the cache doesn't change - self.dist_log("write cache to path ->", self._cache_path) - cdict = self.__dict__.copy() - for attr in self.__dict__.keys(): - if re.match(self._cache_ignore, attr): - cdict.pop(attr) - - d = os.path.dirname(self._cache_path) - if not os.path.exists(d): - os.makedirs(d) - - repr_dict = pprint.pformat(cdict, compact=True) - with open(self._cache_path, "w") as f: - f.write(textwrap.dedent("""\ - # AUTOGENERATED DON'T EDIT - # Please make changes to the code generator \ - (distutils/ccompiler_opt.py) - hash = {} - data = \\ - """).format(self._cache_hash)) - f.write(repr_dict) - - def cache_hash(self, *factors): - # is there a built-in non-crypto hash? - # sdbm - chash = 0 - for f in factors: - for char in str(f): - chash = ord(char) + (chash << 6) + (chash << 16) - chash - chash &= 0xFFFFFFFF - return chash - - @staticmethod - def me(cb): - """ - A static method that can be treated as a decorator to - dynamically cache certain methods. - """ - def cache_wrap_me(self, *args, **kwargs): - # good for normal args - cache_key = str(( - cb.__name__, *args, *kwargs.keys(), *kwargs.values() - )) - if cache_key in self.cache_me: - return self.cache_me[cache_key] - ccb = cb(self, *args, **kwargs) - self.cache_me[cache_key] = ccb - return ccb - return cache_wrap_me - -class _CCompiler: - """A helper class for `CCompilerOpt` containing all utilities that - related to the fundamental compiler's functions. - - Attributes - ---------- - cc_on_x86 : bool - True when the target architecture is 32-bit x86 - cc_on_x64 : bool - True when the target architecture is 64-bit x86 - cc_on_ppc64 : bool - True when the target architecture is 64-bit big-endian powerpc - cc_on_ppc64le : bool - True when the target architecture is 64-bit litle-endian powerpc - cc_on_s390x : bool - True when the target architecture is IBM/ZARCH on linux - cc_on_armhf : bool - True when the target architecture is 32-bit ARMv7+ - cc_on_aarch64 : bool - True when the target architecture is 64-bit Armv8-a+ - cc_on_noarch : bool - True when the target architecture is unknown or not supported - cc_is_gcc : bool - True if the compiler is GNU or - if the compiler is unknown - cc_is_clang : bool - True if the compiler is Clang - cc_is_icc : bool - True if the compiler is Intel compiler (unix like) - cc_is_iccw : bool - True if the compiler is Intel compiler (msvc like) - cc_is_nocc : bool - True if the compiler isn't supported directly, - Note: that cause a fail-back to gcc - cc_has_debug : bool - True if the compiler has debug flags - cc_has_native : bool - True if the compiler has native flags - cc_noopt : bool - True if the compiler has definition 'DISABLE_OPT*', - or 'cc_on_noarch' is True - cc_march : str - The target architecture name, or "unknown" if - the architecture isn't supported - cc_name : str - The compiler name, or "unknown" if the compiler isn't supported - cc_flags : dict - Dictionary containing the initialized flags of `_Config.conf_cc_flags` - """ - def __init__(self): - if hasattr(self, "cc_is_cached"): - return - # attr regex compiler-expression - detect_arch = ( - ("cc_on_x64", ".*(x|x86_|amd)64.*", ""), - ("cc_on_x86", ".*(win32|x86|i386|i686).*", ""), - ("cc_on_ppc64le", ".*(powerpc|ppc)64(el|le).*|.*powerpc.*", - "defined(__powerpc64__) && " - "defined(__LITTLE_ENDIAN__)"), - ("cc_on_ppc64", ".*(powerpc|ppc).*|.*powerpc.*", - "defined(__powerpc64__) && " - "defined(__BIG_ENDIAN__)"), - ("cc_on_aarch64", ".*(aarch64|arm64).*", ""), - ("cc_on_armhf", ".*arm.*", "defined(__ARM_ARCH_7__) || " - "defined(__ARM_ARCH_7A__)"), - ("cc_on_s390x", ".*s390x.*", ""), - # undefined platform - ("cc_on_noarch", "", ""), - ) - detect_compiler = ( - ("cc_is_gcc", r".*(gcc|gnu\-g).*", ""), - ("cc_is_clang", ".*clang.*", ""), - # intel msvc like - ("cc_is_iccw", ".*(intelw|intelemw|iccw).*", ""), - ("cc_is_icc", ".*(intel|icc).*", ""), # intel unix like - ("cc_is_msvc", ".*msvc.*", ""), - ("cc_is_fcc", ".*fcc.*", ""), - # undefined compiler will be treat it as gcc - ("cc_is_nocc", "", ""), - ) - detect_args = ( - ("cc_has_debug", ".*(O0|Od|ggdb|coverage|debug:full).*", ""), - ("cc_has_native", - ".*(-march=native|-xHost|/QxHost|-mcpu=a64fx).*", ""), - # in case if the class run with -DNPY_DISABLE_OPTIMIZATION - ("cc_noopt", ".*DISABLE_OPT.*", ""), - ) - - dist_info = self.dist_info() - platform, compiler_info, extra_args = dist_info - # set False to all attrs - for section in (detect_arch, detect_compiler, detect_args): - for attr, rgex, cexpr in section: - setattr(self, attr, False) - - for detect, searchin in ((detect_arch, platform), (detect_compiler, compiler_info)): - for attr, rgex, cexpr in detect: - if rgex and not re.match(rgex, searchin, re.IGNORECASE): - continue - if cexpr and not self.cc_test_cexpr(cexpr): - continue - setattr(self, attr, True) - break - - for attr, rgex, cexpr in detect_args: - if rgex and not re.match(rgex, extra_args, re.IGNORECASE): - continue - if cexpr and not self.cc_test_cexpr(cexpr): - continue - setattr(self, attr, True) - - if self.cc_on_noarch: - self.dist_log( - "unable to detect CPU architecture which lead to disable the optimization. " - f"check dist_info:<<\n{dist_info}\n>>", - stderr=True - ) - self.cc_noopt = True - - if self.conf_noopt: - self.dist_log("Optimization is disabled by the Config", stderr=True) - self.cc_noopt = True - - if self.cc_is_nocc: - """ - mingw can be treated as a gcc, and also xlc even if it based on clang, - but still has the same gcc optimization flags. - """ - self.dist_log( - "unable to detect compiler type which leads to treating it as GCC. " - "this is a normal behavior if you're using gcc-like compiler such as MinGW or IBM/XLC." - f"check dist_info:<<\n{dist_info}\n>>", - stderr=True - ) - self.cc_is_gcc = True - - self.cc_march = "unknown" - for arch in ("x86", "x64", "ppc64", "ppc64le", - "armhf", "aarch64", "s390x"): - if getattr(self, "cc_on_" + arch): - self.cc_march = arch - break - - self.cc_name = "unknown" - for name in ("gcc", "clang", "iccw", "icc", "msvc", "fcc"): - if getattr(self, "cc_is_" + name): - self.cc_name = name - break - - self.cc_flags = {} - compiler_flags = self.conf_cc_flags.get(self.cc_name) - if compiler_flags is None: - self.dist_fatal( - "undefined flag for compiler '%s', " - "leave an empty dict instead" % self.cc_name - ) - for name, flags in compiler_flags.items(): - self.cc_flags[name] = nflags = [] - if flags: - assert(isinstance(flags, str)) - flags = flags.split() - for f in flags: - if self.cc_test_flags([f]): - nflags.append(f) - - self.cc_is_cached = True - - @_Cache.me - def cc_test_flags(self, flags): - """ - Returns True if the compiler supports 'flags'. - """ - assert(isinstance(flags, list)) - self.dist_log("testing flags", flags) - test_path = os.path.join(self.conf_check_path, "test_flags.c") - test = self.dist_test(test_path, flags) - if not test: - self.dist_log("testing failed", stderr=True) - return test - - @_Cache.me - def cc_test_cexpr(self, cexpr, flags=[]): - """ - Same as the above but supports compile-time expressions. - """ - self.dist_log("testing compiler expression", cexpr) - test_path = os.path.join(self.conf_tmp_path, "npy_dist_test_cexpr.c") - with open(test_path, "w") as fd: - fd.write(textwrap.dedent(f"""\ - #if !({cexpr}) - #error "unsupported expression" - #endif - int dummy; - """)) - test = self.dist_test(test_path, flags) - if not test: - self.dist_log("testing failed", stderr=True) - return test - - def cc_normalize_flags(self, flags): - """ - Remove the conflicts that caused due gathering implied features flags. - - Parameters - ---------- - 'flags' list, compiler flags - flags should be sorted from the lowest to the highest interest. - - Returns - ------- - list, filtered from any conflicts. - - Examples - -------- - >>> self.cc_normalize_flags(['-march=armv8.2-a+fp16', '-march=armv8.2-a+dotprod']) - ['armv8.2-a+fp16+dotprod'] - - >>> self.cc_normalize_flags( - ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', '-mavx', '-march=core-avx2'] - ) - ['-march=core-avx2'] - """ - assert(isinstance(flags, list)) - if self.cc_is_gcc or self.cc_is_clang or self.cc_is_icc: - return self._cc_normalize_unix(flags) - - if self.cc_is_msvc or self.cc_is_iccw: - return self._cc_normalize_win(flags) - return flags - - _cc_normalize_unix_mrgx = re.compile( - # 1- to check the highest of - r"^(-mcpu=|-march=|-x[A-Z0-9\-])" - ) - _cc_normalize_unix_frgx = re.compile( - # 2- to remove any flags starts with - # -march, -mcpu, -x(INTEL) and '-m' without '=' - r"^(?!(-mcpu=|-march=|-x[A-Z0-9\-]|-m[a-z0-9\-\.]*.$))|" - # exclude: - r"(?:-mzvector)" - ) - _cc_normalize_unix_krgx = re.compile( - # 3- keep only the highest of - r"^(-mfpu|-mtune)" - ) - _cc_normalize_arch_ver = re.compile( - r"[0-9.]" - ) - def _cc_normalize_unix(self, flags): - def ver_flags(f): - # arch ver subflag - # -march=armv8.2-a+fp16fml - tokens = f.split('+') - ver = float('0' + ''.join( - re.findall(self._cc_normalize_arch_ver, tokens[0]) - )) - return ver, tokens[0], tokens[1:] - - if len(flags) <= 1: - return flags - # get the highest matched flag - for i, cur_flag in enumerate(reversed(flags)): - if not re.match(self._cc_normalize_unix_mrgx, cur_flag): - continue - lower_flags = flags[:-(i+1)] - upper_flags = flags[-i:] - filtered = list(filter( - self._cc_normalize_unix_frgx.search, lower_flags - )) - # gather subflags - ver, arch, subflags = ver_flags(cur_flag) - if ver > 0 and len(subflags) > 0: - for xflag in lower_flags: - xver, _, xsubflags = ver_flags(xflag) - if ver == xver: - subflags = xsubflags + subflags - cur_flag = arch + '+' + '+'.join(subflags) - - flags = filtered + [cur_flag] - if i > 0: - flags += upper_flags - break - - # to remove overridable flags - final_flags = [] - matched = set() - for f in reversed(flags): - match = re.match(self._cc_normalize_unix_krgx, f) - if not match: - pass - elif match[0] in matched: - continue - else: - matched.add(match[0]) - final_flags.insert(0, f) - return final_flags - - _cc_normalize_win_frgx = re.compile( - r"^(?!(/arch\:|/Qx\:))" - ) - _cc_normalize_win_mrgx = re.compile( - r"^(/arch|/Qx:)" - ) - def _cc_normalize_win(self, flags): - for i, f in enumerate(reversed(flags)): - if not re.match(self._cc_normalize_win_mrgx, f): - continue - i += 1 - return list(filter( - self._cc_normalize_win_frgx.search, flags[:-i] - )) + flags[-i:] - return flags - -class _Feature: - """A helper class for `CCompilerOpt` that managing CPU features. - - Attributes - ---------- - feature_supported : dict - Dictionary containing all CPU features that supported - by the platform, according to the specified values in attribute - `_Config.conf_features` and `_Config.conf_features_partial()` - - feature_min : set - The minimum support of CPU features, according to - the specified values in attribute `_Config.conf_min_features`. - """ - def __init__(self): - if hasattr(self, "feature_is_cached"): - return - self.feature_supported = pfeatures = self.conf_features_partial() - for feature_name in list(pfeatures.keys()): - feature = pfeatures[feature_name] - cfeature = self.conf_features[feature_name] - feature.update({ - k:v for k,v in cfeature.items() if k not in feature - }) - disabled = feature.get("disable") - if disabled is not None: - pfeatures.pop(feature_name) - self.dist_log( - "feature '%s' is disabled," % feature_name, - disabled, stderr=True - ) - continue - # list is used internally for these options - for option in ( - "implies", "group", "detect", "headers", "flags", "extra_checks" - ) : - oval = feature.get(option) - if isinstance(oval, str): - feature[option] = oval.split() - - self.feature_min = set() - min_f = self.conf_min_features.get(self.cc_march, "") - for F in min_f.upper().split(): - if F in self.feature_supported: - self.feature_min.add(F) - - self.feature_is_cached = True - - def feature_names(self, names=None, force_flags=None, macros=[]): - """ - Returns a set of CPU feature names that supported by platform and the **C** compiler. - - Parameters - ---------- - names : sequence or None, optional - Specify certain CPU features to test it against the **C** compiler. - if None(default), it will test all current supported features. - **Note**: feature names must be in upper-case. - - force_flags : list or None, optional - If None(default), default compiler flags for every CPU feature will - be used during the test. - - macros : list of tuples, optional - A list of C macro definitions. - """ - assert( - names is None or ( - not isinstance(names, str) and - hasattr(names, "__iter__") - ) - ) - assert(force_flags is None or isinstance(force_flags, list)) - if names is None: - names = self.feature_supported.keys() - supported_names = set() - for f in names: - if self.feature_is_supported( - f, force_flags=force_flags, macros=macros - ): - supported_names.add(f) - return supported_names - - def feature_is_exist(self, name): - """ - Returns True if a certain feature is exist and covered within - ``_Config.conf_features``. - - Parameters - ---------- - 'name': str - feature name in uppercase. - """ - assert(name.isupper()) - return name in self.conf_features - - def feature_sorted(self, names, reverse=False): - """ - Sort a list of CPU features ordered by the lowest interest. - - Parameters - ---------- - 'names': sequence - sequence of supported feature names in uppercase. - 'reverse': bool, optional - If true, the sorted features is reversed. (highest interest) - - Returns - ------- - list, sorted CPU features - """ - def sort_cb(k): - if isinstance(k, str): - return self.feature_supported[k]["interest"] - # multiple features - rank = max([self.feature_supported[f]["interest"] for f in k]) - # FIXME: that's not a safe way to increase the rank for - # multi targets - rank += len(k) -1 - return rank - return sorted(names, reverse=reverse, key=sort_cb) - - def feature_implies(self, names, keep_origins=False): - """ - Return a set of CPU features that implied by 'names' - - Parameters - ---------- - names : str or sequence of str - CPU feature name(s) in uppercase. - - keep_origins : bool - if False(default) then the returned set will not contain any - features from 'names'. This case happens only when two features - imply each other. - - Examples - -------- - >>> self.feature_implies("SSE3") - {'SSE', 'SSE2'} - >>> self.feature_implies("SSE2") - {'SSE'} - >>> self.feature_implies("SSE2", keep_origins=True) - # 'SSE2' found here since 'SSE' and 'SSE2' imply each other - {'SSE', 'SSE2'} - """ - def get_implies(name, _caller=set()): - implies = set() - d = self.feature_supported[name] - for i in d.get("implies", []): - implies.add(i) - if i in _caller: - # infinity recursive guard since - # features can imply each other - continue - _caller.add(name) - implies = implies.union(get_implies(i, _caller)) - return implies - - if isinstance(names, str): - implies = get_implies(names) - names = [names] - else: - assert(hasattr(names, "__iter__")) - implies = set() - for n in names: - implies = implies.union(get_implies(n)) - if not keep_origins: - implies.difference_update(names) - return implies - - def feature_implies_c(self, names): - """same as feature_implies() but combining 'names'""" - if isinstance(names, str): - names = set((names,)) - else: - names = set(names) - return names.union(self.feature_implies(names)) - - def feature_ahead(self, names): - """ - Return list of features in 'names' after remove any - implied features and keep the origins. - - Parameters - ---------- - 'names': sequence - sequence of CPU feature names in uppercase. - - Returns - ------- - list of CPU features sorted as-is 'names' - - Examples - -------- - >>> self.feature_ahead(["SSE2", "SSE3", "SSE41"]) - ["SSE41"] - # assume AVX2 and FMA3 implies each other and AVX2 - # is the highest interest - >>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"]) - ["AVX2"] - # assume AVX2 and FMA3 don't implies each other - >>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"]) - ["AVX2", "FMA3"] - """ - assert( - not isinstance(names, str) - and hasattr(names, '__iter__') - ) - implies = self.feature_implies(names, keep_origins=True) - ahead = [n for n in names if n not in implies] - if len(ahead) == 0: - # return the highest interested feature - # if all features imply each other - ahead = self.feature_sorted(names, reverse=True)[:1] - return ahead - - def feature_untied(self, names): - """ - same as 'feature_ahead()' but if both features implied each other - and keep the highest interest. - - Parameters - ---------- - 'names': sequence - sequence of CPU feature names in uppercase. - - Returns - ------- - list of CPU features sorted as-is 'names' - - Examples - -------- - >>> self.feature_untied(["SSE2", "SSE3", "SSE41"]) - ["SSE2", "SSE3", "SSE41"] - # assume AVX2 and FMA3 implies each other - >>> self.feature_untied(["SSE2", "SSE3", "SSE41", "FMA3", "AVX2"]) - ["SSE2", "SSE3", "SSE41", "AVX2"] - """ - assert( - not isinstance(names, str) - and hasattr(names, '__iter__') - ) - final = [] - for n in names: - implies = self.feature_implies(n) - tied = [ - nn for nn in final - if nn in implies and n in self.feature_implies(nn) - ] - if tied: - tied = self.feature_sorted(tied + [n]) - if n not in tied[1:]: - continue - final.remove(tied[:1][0]) - final.append(n) - return final - - def feature_get_til(self, names, keyisfalse): - """ - same as `feature_implies_c()` but stop collecting implied - features when feature's option that provided through - parameter 'keyisfalse' is False, also sorting the returned - features. - """ - def til(tnames): - # sort from highest to lowest interest then cut if "key" is False - tnames = self.feature_implies_c(tnames) - tnames = self.feature_sorted(tnames, reverse=True) - for i, n in enumerate(tnames): - if not self.feature_supported[n].get(keyisfalse, True): - tnames = tnames[:i+1] - break - return tnames - - if isinstance(names, str) or len(names) <= 1: - names = til(names) - # normalize the sort - names.reverse() - return names - - names = self.feature_ahead(names) - names = {t for n in names for t in til(n)} - return self.feature_sorted(names) - - def feature_detect(self, names): - """ - Return a list of CPU features that required to be detected - sorted from the lowest to highest interest. - """ - names = self.feature_get_til(names, "implies_detect") - detect = [] - for n in names: - d = self.feature_supported[n] - detect += d.get("detect", d.get("group", [n])) - return detect - - @_Cache.me - def feature_flags(self, names): - """ - Return a list of CPU features flags sorted from the lowest - to highest interest. - """ - names = self.feature_sorted(self.feature_implies_c(names)) - flags = [] - for n in names: - d = self.feature_supported[n] - f = d.get("flags", []) - if not f or not self.cc_test_flags(f): - continue - flags += f - return self.cc_normalize_flags(flags) - - @_Cache.me - def feature_test(self, name, force_flags=None, macros=[]): - """ - Test a certain CPU feature against the compiler through its own - check file. - - Parameters - ---------- - name : str - Supported CPU feature name. - - force_flags : list or None, optional - If None(default), the returned flags from `feature_flags()` - will be used. - - macros : list of tuples, optional - A list of C macro definitions. - """ - if force_flags is None: - force_flags = self.feature_flags(name) - - self.dist_log( - "testing feature '%s' with flags (%s)" % ( - name, ' '.join(force_flags) - )) - # Each CPU feature must have C source code contains at - # least one intrinsic or instruction related to this feature. - test_path = os.path.join( - self.conf_check_path, "cpu_%s.c" % name.lower() - ) - if not os.path.exists(test_path): - self.dist_fatal("feature test file is not exist", test_path) - - test = self.dist_test( - test_path, force_flags + self.cc_flags["werror"], macros=macros - ) - if not test: - self.dist_log("testing failed", stderr=True) - return test - - @_Cache.me - def feature_is_supported(self, name, force_flags=None, macros=[]): - """ - Check if a certain CPU feature is supported by the platform and compiler. - - Parameters - ---------- - name : str - CPU feature name in uppercase. - - force_flags : list or None, optional - If None(default), default compiler flags for every CPU feature will - be used during test. - - macros : list of tuples, optional - A list of C macro definitions. - """ - assert(name.isupper()) - assert(force_flags is None or isinstance(force_flags, list)) - - supported = name in self.feature_supported - if supported: - for impl in self.feature_implies(name): - if not self.feature_test(impl, force_flags, macros=macros): - return False - if not self.feature_test(name, force_flags, macros=macros): - return False - return supported - - @_Cache.me - def feature_can_autovec(self, name): - """ - check if the feature can be auto-vectorized by the compiler - """ - assert(isinstance(name, str)) - d = self.feature_supported[name] - can = d.get("autovec", None) - if can is None: - valid_flags = [ - self.cc_test_flags([f]) for f in d.get("flags", []) - ] - can = valid_flags and any(valid_flags) - return can - - @_Cache.me - def feature_extra_checks(self, name): - """ - Return a list of supported extra checks after testing them against - the compiler. - - Parameters - ---------- - names : str - CPU feature name in uppercase. - """ - assert isinstance(name, str) - d = self.feature_supported[name] - extra_checks = d.get("extra_checks", []) - if not extra_checks: - return [] - - self.dist_log("Testing extra checks for feature '%s'" % name, extra_checks) - flags = self.feature_flags(name) - available = [] - not_available = [] - for chk in extra_checks: - test_path = os.path.join( - self.conf_check_path, "extra_%s.c" % chk.lower() - ) - if not os.path.exists(test_path): - self.dist_fatal("extra check file does not exist", test_path) - - is_supported = self.dist_test(test_path, flags + self.cc_flags["werror"]) - if is_supported: - available.append(chk) - else: - not_available.append(chk) - - if not_available: - self.dist_log("testing failed for checks", not_available, stderr=True) - return available - - - def feature_c_preprocessor(self, feature_name, tabs=0): - """ - Generate C preprocessor definitions and include headers of a CPU feature. - - Parameters - ---------- - 'feature_name': str - CPU feature name in uppercase. - 'tabs': int - if > 0, align the generated strings to the right depend on number of tabs. - - Returns - ------- - str, generated C preprocessor - - Examples - -------- - >>> self.feature_c_preprocessor("SSE3") - /** SSE3 **/ - #define NPY_HAVE_SSE3 1 - #include - """ - assert(feature_name.isupper()) - feature = self.feature_supported.get(feature_name) - assert(feature is not None) - - prepr = [ - "/** %s **/" % feature_name, - "#define %sHAVE_%s 1" % (self.conf_c_prefix, feature_name) - ] - prepr += [ - "#include <%s>" % h for h in feature.get("headers", []) - ] - - extra_defs = feature.get("group", []) - extra_defs += self.feature_extra_checks(feature_name) - for edef in extra_defs: - # Guard extra definitions in case of duplicate with - # another feature - prepr += [ - "#ifndef %sHAVE_%s" % (self.conf_c_prefix, edef), - "\t#define %sHAVE_%s 1" % (self.conf_c_prefix, edef), - "#endif", - ] - - if tabs > 0: - prepr = [('\t'*tabs) + l for l in prepr] - return '\n'.join(prepr) - -class _Parse: - """A helper class that parsing main arguments of `CCompilerOpt`, - also parsing configuration statements in dispatch-able sources. - - Parameters - ---------- - cpu_baseline : str or None - minimal set of required CPU features or special options. - - cpu_dispatch : str or None - dispatched set of additional CPU features or special options. - - Special options can be: - - **MIN**: Enables the minimum CPU features that utilized via `_Config.conf_min_features` - - **MAX**: Enables all supported CPU features by the Compiler and platform. - - **NATIVE**: Enables all CPU features that supported by the current machine. - - **NONE**: Enables nothing - - **Operand +/-**: remove or add features, useful with options **MAX**, **MIN** and **NATIVE**. - NOTE: operand + is only added for nominal reason. - - NOTES: - - Case-insensitive among all CPU features and special options. - - Comma or space can be used as a separator. - - If the CPU feature is not supported by the user platform or compiler, - it will be skipped rather than raising a fatal error. - - Any specified CPU features to 'cpu_dispatch' will be skipped if its part of CPU baseline features - - 'cpu_baseline' force enables implied features. - - Attributes - ---------- - parse_baseline_names : list - Final CPU baseline's feature names(sorted from low to high) - parse_baseline_flags : list - Compiler flags of baseline features - parse_dispatch_names : list - Final CPU dispatch-able feature names(sorted from low to high) - parse_target_groups : dict - Dictionary containing initialized target groups that configured - through class attribute `conf_target_groups`. - - The key is represent the group name and value is a tuple - contains three items : - - bool, True if group has the 'baseline' option. - - list, list of CPU features. - - list, list of extra compiler flags. - - """ - def __init__(self, cpu_baseline, cpu_dispatch): - self._parse_policies = dict( - # POLICY NAME, (HAVE, NOT HAVE, [DEB]) - KEEP_BASELINE = ( - None, self._parse_policy_not_keepbase, - [] - ), - KEEP_SORT = ( - self._parse_policy_keepsort, - self._parse_policy_not_keepsort, - [] - ), - MAXOPT = ( - self._parse_policy_maxopt, None, - [] - ), - WERROR = ( - self._parse_policy_werror, None, - [] - ), - AUTOVEC = ( - self._parse_policy_autovec, None, - ["MAXOPT"] - ) - ) - if hasattr(self, "parse_is_cached"): - return - - self.parse_baseline_names = [] - self.parse_baseline_flags = [] - self.parse_dispatch_names = [] - self.parse_target_groups = {} - - if self.cc_noopt: - # skip parsing baseline and dispatch args and keep parsing target groups - cpu_baseline = cpu_dispatch = None - - self.dist_log("check requested baseline") - if cpu_baseline is not None: - cpu_baseline = self._parse_arg_features("cpu_baseline", cpu_baseline) - baseline_names = self.feature_names(cpu_baseline) - self.parse_baseline_flags = self.feature_flags(baseline_names) - self.parse_baseline_names = self.feature_sorted( - self.feature_implies_c(baseline_names) - ) - - self.dist_log("check requested dispatch-able features") - if cpu_dispatch is not None: - cpu_dispatch_ = self._parse_arg_features("cpu_dispatch", cpu_dispatch) - cpu_dispatch = { - f for f in cpu_dispatch_ - if f not in self.parse_baseline_names - } - conflict_baseline = cpu_dispatch_.difference(cpu_dispatch) - self.parse_dispatch_names = self.feature_sorted( - self.feature_names(cpu_dispatch) - ) - if len(conflict_baseline) > 0: - self.dist_log( - "skip features", conflict_baseline, "since its part of baseline" - ) - - self.dist_log("initialize targets groups") - for group_name, tokens in self.conf_target_groups.items(): - self.dist_log("parse target group", group_name) - GROUP_NAME = group_name.upper() - if not tokens or not tokens.strip(): - # allow empty groups, useful in case if there's a need - # to disable certain group since '_parse_target_tokens()' - # requires at least one valid target - self.parse_target_groups[GROUP_NAME] = ( - False, [], [] - ) - continue - has_baseline, features, extra_flags = \ - self._parse_target_tokens(tokens) - self.parse_target_groups[GROUP_NAME] = ( - has_baseline, features, extra_flags - ) - - self.parse_is_cached = True - - def parse_targets(self, source): - """ - Fetch and parse configuration statements that required for - defining the targeted CPU features, statements should be declared - in the top of source in between **C** comment and start - with a special mark **@targets**. - - Configuration statements are sort of keywords representing - CPU features names, group of statements and policies, combined - together to determine the required optimization. - - Parameters - ---------- - source : str - the path of **C** source file. - - Returns - ------- - - bool, True if group has the 'baseline' option - - list, list of CPU features - - list, list of extra compiler flags - """ - self.dist_log("looking for '@targets' inside -> ", source) - # get lines between /*@targets and */ - with open(source) as fd: - tokens = "" - max_to_reach = 1000 # good enough, isn't? - start_with = "@targets" - start_pos = -1 - end_with = "*/" - end_pos = -1 - for current_line, line in enumerate(fd): - if current_line == max_to_reach: - self.dist_fatal("reached the max of lines") - break - if start_pos == -1: - start_pos = line.find(start_with) - if start_pos == -1: - continue - start_pos += len(start_with) - tokens += line - end_pos = line.find(end_with) - if end_pos != -1: - end_pos += len(tokens) - len(line) - break - - if start_pos == -1: - self.dist_fatal("expected to find '%s' within a C comment" % start_with) - if end_pos == -1: - self.dist_fatal("expected to end with '%s'" % end_with) - - tokens = tokens[start_pos:end_pos] - return self._parse_target_tokens(tokens) - - _parse_regex_arg = re.compile(r'\s|,|([+-])') - def _parse_arg_features(self, arg_name, req_features): - if not isinstance(req_features, str): - self.dist_fatal("expected a string in '%s'" % arg_name) - - final_features = set() - # space and comma can be used as a separator - tokens = list(filter(None, re.split(self._parse_regex_arg, req_features))) - append = True # append is the default - for tok in tokens: - if tok[0] in ("#", "$"): - self.dist_fatal( - arg_name, "target groups and policies " - "aren't allowed from arguments, " - "only from dispatch-able sources" - ) - if tok == '+': - append = True - continue - if tok == '-': - append = False - continue - - TOK = tok.upper() # we use upper-case internally - features_to = set() - if TOK == "NONE": - pass - elif TOK == "NATIVE": - native = self.cc_flags["native"] - if not native: - self.dist_fatal(arg_name, - "native option isn't supported by the compiler" - ) - features_to = self.feature_names( - force_flags=native, macros=[("DETECT_FEATURES", 1)] - ) - elif TOK == "MAX": - features_to = self.feature_supported.keys() - elif TOK == "MIN": - features_to = self.feature_min - else: - if TOK in self.feature_supported: - features_to.add(TOK) - else: - if not self.feature_is_exist(TOK): - self.dist_fatal(arg_name, - ", '%s' isn't a known feature or option" % tok - ) - if append: - final_features = final_features.union(features_to) - else: - final_features = final_features.difference(features_to) - - append = True # back to default - - return final_features - - _parse_regex_target = re.compile(r'\s|[*,/]|([()])') - def _parse_target_tokens(self, tokens): - assert(isinstance(tokens, str)) - final_targets = [] # to keep it sorted as specified - extra_flags = [] - has_baseline = False - - skipped = set() - policies = set() - multi_target = None - - tokens = list(filter(None, re.split(self._parse_regex_target, tokens))) - if not tokens: - self.dist_fatal("expected one token at least") - - for tok in tokens: - TOK = tok.upper() - ch = tok[0] - if ch in ('+', '-'): - self.dist_fatal( - "+/- are 'not' allowed from target's groups or @targets, " - "only from cpu_baseline and cpu_dispatch parms" - ) - elif ch == '$': - if multi_target is not None: - self.dist_fatal( - "policies aren't allowed inside multi-target '()'" - ", only CPU features" - ) - policies.add(self._parse_token_policy(TOK)) - elif ch == '#': - if multi_target is not None: - self.dist_fatal( - "target groups aren't allowed inside multi-target '()'" - ", only CPU features" - ) - has_baseline, final_targets, extra_flags = \ - self._parse_token_group(TOK, has_baseline, final_targets, extra_flags) - elif ch == '(': - if multi_target is not None: - self.dist_fatal("unclosed multi-target, missing ')'") - multi_target = set() - elif ch == ')': - if multi_target is None: - self.dist_fatal("multi-target opener '(' wasn't found") - targets = self._parse_multi_target(multi_target) - if targets is None: - skipped.add(tuple(multi_target)) - else: - if len(targets) == 1: - targets = targets[0] - if targets and targets not in final_targets: - final_targets.append(targets) - multi_target = None # back to default - else: - if TOK == "BASELINE": - if multi_target is not None: - self.dist_fatal("baseline isn't allowed inside multi-target '()'") - has_baseline = True - continue - - if multi_target is not None: - multi_target.add(TOK) - continue - - if not self.feature_is_exist(TOK): - self.dist_fatal("invalid target name '%s'" % TOK) - - is_enabled = ( - TOK in self.parse_baseline_names or - TOK in self.parse_dispatch_names - ) - if is_enabled: - if TOK not in final_targets: - final_targets.append(TOK) - continue - - skipped.add(TOK) - - if multi_target is not None: - self.dist_fatal("unclosed multi-target, missing ')'") - if skipped: - self.dist_log( - "skip targets", skipped, - "not part of baseline or dispatch-able features" - ) - - final_targets = self.feature_untied(final_targets) - - # add polices dependencies - for p in list(policies): - _, _, deps = self._parse_policies[p] - for d in deps: - if d in policies: - continue - self.dist_log( - "policy '%s' force enables '%s'" % ( - p, d - )) - policies.add(d) - - # release policies filtrations - for p, (have, nhave, _) in self._parse_policies.items(): - func = None - if p in policies: - func = have - self.dist_log("policy '%s' is ON" % p) - else: - func = nhave - if not func: - continue - has_baseline, final_targets, extra_flags = func( - has_baseline, final_targets, extra_flags - ) - - return has_baseline, final_targets, extra_flags - - def _parse_token_policy(self, token): - """validate policy token""" - if len(token) <= 1 or token[-1:] == token[0]: - self.dist_fatal("'$' must stuck in the begin of policy name") - token = token[1:] - if token not in self._parse_policies: - self.dist_fatal( - "'%s' is an invalid policy name, available policies are" % token, - self._parse_policies.keys() - ) - return token - - def _parse_token_group(self, token, has_baseline, final_targets, extra_flags): - """validate group token""" - if len(token) <= 1 or token[-1:] == token[0]: - self.dist_fatal("'#' must stuck in the begin of group name") - - token = token[1:] - ghas_baseline, gtargets, gextra_flags = self.parse_target_groups.get( - token, (False, None, []) - ) - if gtargets is None: - self.dist_fatal( - "'%s' is an invalid target group name, " % token + \ - "available target groups are", - self.parse_target_groups.keys() - ) - if ghas_baseline: - has_baseline = True - # always keep sorting as specified - final_targets += [f for f in gtargets if f not in final_targets] - extra_flags += [f for f in gextra_flags if f not in extra_flags] - return has_baseline, final_targets, extra_flags - - def _parse_multi_target(self, targets): - """validate multi targets that defined between parentheses()""" - # remove any implied features and keep the origins - if not targets: - self.dist_fatal("empty multi-target '()'") - if not all([ - self.feature_is_exist(tar) for tar in targets - ]) : - self.dist_fatal("invalid target name in multi-target", targets) - if not all([ - ( - tar in self.parse_baseline_names or - tar in self.parse_dispatch_names - ) - for tar in targets - ]) : - return None - targets = self.feature_ahead(targets) - if not targets: - return None - # force sort multi targets, so it can be comparable - targets = self.feature_sorted(targets) - targets = tuple(targets) # hashable - return targets - - def _parse_policy_not_keepbase(self, has_baseline, final_targets, extra_flags): - """skip all baseline features""" - skipped = [] - for tar in final_targets[:]: - is_base = False - if isinstance(tar, str): - is_base = tar in self.parse_baseline_names - else: - # multi targets - is_base = all([ - f in self.parse_baseline_names - for f in tar - ]) - if is_base: - skipped.append(tar) - final_targets.remove(tar) - - if skipped: - self.dist_log("skip baseline features", skipped) - - return has_baseline, final_targets, extra_flags - - def _parse_policy_keepsort(self, has_baseline, final_targets, extra_flags): - """leave a notice that $keep_sort is on""" - self.dist_log( - "policy 'keep_sort' is on, dispatch-able targets", final_targets, "\n" - "are 'not' sorted depend on the highest interest but" - "as specified in the dispatch-able source or the extra group" - ) - return has_baseline, final_targets, extra_flags - - def _parse_policy_not_keepsort(self, has_baseline, final_targets, extra_flags): - """sorted depend on the highest interest""" - final_targets = self.feature_sorted(final_targets, reverse=True) - return has_baseline, final_targets, extra_flags - - def _parse_policy_maxopt(self, has_baseline, final_targets, extra_flags): - """append the compiler optimization flags""" - if self.cc_has_debug: - self.dist_log("debug mode is detected, policy 'maxopt' is skipped.") - elif self.cc_noopt: - self.dist_log("optimization is disabled, policy 'maxopt' is skipped.") - else: - flags = self.cc_flags["opt"] - if not flags: - self.dist_log( - "current compiler doesn't support optimization flags, " - "policy 'maxopt' is skipped", stderr=True - ) - else: - extra_flags += flags - return has_baseline, final_targets, extra_flags - - def _parse_policy_werror(self, has_baseline, final_targets, extra_flags): - """force warnings to treated as errors""" - flags = self.cc_flags["werror"] - if not flags: - self.dist_log( - "current compiler doesn't support werror flags, " - "warnings will 'not' treated as errors", stderr=True - ) - else: - self.dist_log("compiler warnings are treated as errors") - extra_flags += flags - return has_baseline, final_targets, extra_flags - - def _parse_policy_autovec(self, has_baseline, final_targets, extra_flags): - """skip features that has no auto-vectorized support by compiler""" - skipped = [] - for tar in final_targets[:]: - if isinstance(tar, str): - can = self.feature_can_autovec(tar) - else: # multiple target - can = all([ - self.feature_can_autovec(t) - for t in tar - ]) - if not can: - final_targets.remove(tar) - skipped.append(tar) - - if skipped: - self.dist_log("skip non auto-vectorized features", skipped) - - return has_baseline, final_targets, extra_flags - -class CCompilerOpt(_Config, _Distutils, _Cache, _CCompiler, _Feature, _Parse): - """ - A helper class for `CCompiler` aims to provide extra build options - to effectively control of compiler optimizations that are directly - related to CPU features. - """ - def __init__(self, ccompiler, cpu_baseline="min", cpu_dispatch="max", cache_path=None): - _Config.__init__(self) - _Distutils.__init__(self, ccompiler) - _Cache.__init__(self, cache_path, self.dist_info(), cpu_baseline, cpu_dispatch) - _CCompiler.__init__(self) - _Feature.__init__(self) - if not self.cc_noopt and self.cc_has_native: - self.dist_log( - "native flag is specified through environment variables. " - "force cpu-baseline='native'" - ) - cpu_baseline = "native" - _Parse.__init__(self, cpu_baseline, cpu_dispatch) - # keep the requested features untouched, need it later for report - # and trace purposes - self._requested_baseline = cpu_baseline - self._requested_dispatch = cpu_dispatch - # key is the dispatch-able source and value is a tuple - # contains two items (has_baseline[boolean], dispatched-features[list]) - self.sources_status = getattr(self, "sources_status", {}) - # every instance should has a separate one - self.cache_private.add("sources_status") - # set it at the end to make sure the cache writing was done after init - # this class - self.hit_cache = hasattr(self, "hit_cache") - - def is_cached(self): - """ - Returns True if the class loaded from the cache file - """ - return self.cache_infile and self.hit_cache - - def cpu_baseline_flags(self): - """ - Returns a list of final CPU baseline compiler flags - """ - return self.parse_baseline_flags - - def cpu_baseline_names(self): - """ - return a list of final CPU baseline feature names - """ - return self.parse_baseline_names - - def cpu_dispatch_names(self): - """ - return a list of final CPU dispatch feature names - """ - return self.parse_dispatch_names - - def try_dispatch(self, sources, src_dir=None, ccompiler=None, **kwargs): - """ - Compile one or more dispatch-able sources and generates object files, - also generates abstract C config headers and macros that - used later for the final runtime dispatching process. - - The mechanism behind it is to takes each source file that specified - in 'sources' and branching it into several files depend on - special configuration statements that must be declared in the - top of each source which contains targeted CPU features, - then it compiles every branched source with the proper compiler flags. - - Parameters - ---------- - sources : list - Must be a list of dispatch-able sources file paths, - and configuration statements must be declared inside - each file. - - src_dir : str - Path of parent directory for the generated headers and wrapped sources. - If None(default) the files will generated in-place. - - ccompiler : CCompiler - Distutils `CCompiler` instance to be used for compilation. - If None (default), the provided instance during the initialization - will be used instead. - - **kwargs : any - Arguments to pass on to the `CCompiler.compile()` - - Returns - ------- - list : generated object files - - Raises - ------ - CompileError - Raises by `CCompiler.compile()` on compiling failure. - DistutilsError - Some errors during checking the sanity of configuration statements. - - See Also - -------- - parse_targets : - Parsing the configuration statements of dispatch-able sources. - """ - to_compile = {} - baseline_flags = self.cpu_baseline_flags() - include_dirs = kwargs.setdefault("include_dirs", []) - - for src in sources: - output_dir = os.path.dirname(src) - if src_dir: - if not output_dir.startswith(src_dir): - output_dir = os.path.join(src_dir, output_dir) - if output_dir not in include_dirs: - # To allow including the generated config header(*.dispatch.h) - # by the dispatch-able sources - include_dirs.append(output_dir) - - has_baseline, targets, extra_flags = self.parse_targets(src) - nochange = self._generate_config(output_dir, src, targets, has_baseline) - for tar in targets: - tar_src = self._wrap_target(output_dir, src, tar, nochange=nochange) - flags = tuple(extra_flags + self.feature_flags(tar)) - to_compile.setdefault(flags, []).append(tar_src) - - if has_baseline: - flags = tuple(extra_flags + baseline_flags) - to_compile.setdefault(flags, []).append(src) - - self.sources_status[src] = (has_baseline, targets) - - # For these reasons, the sources are compiled in a separate loop: - # - Gathering all sources with the same flags to benefit from - # the parallel compiling as much as possible. - # - To generate all config headers of the dispatchable sources, - # before the compilation in case if there are dependency relationships - # among them. - objects = [] - for flags, srcs in to_compile.items(): - objects += self.dist_compile( - srcs, list(flags), ccompiler=ccompiler, **kwargs - ) - return objects - - def generate_dispatch_header(self, header_path): - """ - Generate the dispatch header which contains the #definitions and headers - for platform-specific instruction-sets for the enabled CPU baseline and - dispatch-able features. - - Its highly recommended to take a look at the generated header - also the generated source files via `try_dispatch()` - in order to get the full picture. - """ - self.dist_log("generate CPU dispatch header: (%s)" % header_path) - - baseline_names = self.cpu_baseline_names() - dispatch_names = self.cpu_dispatch_names() - baseline_len = len(baseline_names) - dispatch_len = len(dispatch_names) - - header_dir = os.path.dirname(header_path) - if not os.path.exists(header_dir): - self.dist_log( - f"dispatch header dir {header_dir} does not exist, creating it", - stderr=True - ) - os.makedirs(header_dir) - - with open(header_path, 'w') as f: - baseline_calls = ' \\\n'.join([ - ( - "\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))" - ) % (self.conf_c_prefix, f) - for f in baseline_names - ]) - dispatch_calls = ' \\\n'.join([ - ( - "\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))" - ) % (self.conf_c_prefix, f) - for f in dispatch_names - ]) - f.write(textwrap.dedent("""\ - /* - * AUTOGENERATED DON'T EDIT - * Please make changes to the code generator (distutils/ccompiler_opt.py) - */ - #define {pfx}WITH_CPU_BASELINE "{baseline_str}" - #define {pfx}WITH_CPU_DISPATCH "{dispatch_str}" - #define {pfx}WITH_CPU_BASELINE_N {baseline_len} - #define {pfx}WITH_CPU_DISPATCH_N {dispatch_len} - #define {pfx}WITH_CPU_EXPAND_(X) X - #define {pfx}WITH_CPU_BASELINE_CALL(MACRO_TO_CALL, ...) \\ - {baseline_calls} - #define {pfx}WITH_CPU_DISPATCH_CALL(MACRO_TO_CALL, ...) \\ - {dispatch_calls} - """).format( - pfx=self.conf_c_prefix, baseline_str=" ".join(baseline_names), - dispatch_str=" ".join(dispatch_names), baseline_len=baseline_len, - dispatch_len=dispatch_len, baseline_calls=baseline_calls, - dispatch_calls=dispatch_calls - )) - baseline_pre = '' - for name in baseline_names: - baseline_pre += self.feature_c_preprocessor(name, tabs=1) + '\n' - - dispatch_pre = '' - for name in dispatch_names: - dispatch_pre += textwrap.dedent("""\ - #ifdef {pfx}CPU_TARGET_{name} - {pre} - #endif /*{pfx}CPU_TARGET_{name}*/ - """).format( - pfx=self.conf_c_prefix_, name=name, pre=self.feature_c_preprocessor( - name, tabs=1 - )) - - f.write(textwrap.dedent("""\ - /******* baseline features *******/ - {baseline_pre} - /******* dispatch features *******/ - {dispatch_pre} - """).format( - pfx=self.conf_c_prefix_, baseline_pre=baseline_pre, - dispatch_pre=dispatch_pre - )) - - def report(self, full=False): - report = [] - platform_rows = [] - baseline_rows = [] - dispatch_rows = [] - report.append(("Platform", platform_rows)) - report.append(("", "")) - report.append(("CPU baseline", baseline_rows)) - report.append(("", "")) - report.append(("CPU dispatch", dispatch_rows)) - - ########## platform ########## - platform_rows.append(("Architecture", ( - "unsupported" if self.cc_on_noarch else self.cc_march) - )) - platform_rows.append(("Compiler", ( - "unix-like" if self.cc_is_nocc else self.cc_name) - )) - ########## baseline ########## - if self.cc_noopt: - baseline_rows.append(("Requested", "optimization disabled")) - else: - baseline_rows.append(("Requested", repr(self._requested_baseline))) - - baseline_names = self.cpu_baseline_names() - baseline_rows.append(( - "Enabled", (' '.join(baseline_names) if baseline_names else "none") - )) - baseline_flags = self.cpu_baseline_flags() - baseline_rows.append(( - "Flags", (' '.join(baseline_flags) if baseline_flags else "none") - )) - extra_checks = [] - for name in baseline_names: - extra_checks += self.feature_extra_checks(name) - baseline_rows.append(( - "Extra checks", (' '.join(extra_checks) if extra_checks else "none") - )) - - ########## dispatch ########## - if self.cc_noopt: - baseline_rows.append(("Requested", "optimization disabled")) - else: - dispatch_rows.append(("Requested", repr(self._requested_dispatch))) - - dispatch_names = self.cpu_dispatch_names() - dispatch_rows.append(( - "Enabled", (' '.join(dispatch_names) if dispatch_names else "none") - )) - ########## Generated ########## - # TODO: - # - collect object names from 'try_dispatch()' - # then get size of each object and printed - # - give more details about the features that not - # generated due compiler support - # - find a better output's design. - # - target_sources = {} - for source, (_, targets) in self.sources_status.items(): - for tar in targets: - target_sources.setdefault(tar, []).append(source) - - if not full or not target_sources: - generated = "" - for tar in self.feature_sorted(target_sources): - sources = target_sources[tar] - name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar) - generated += name + "[%d] " % len(sources) - dispatch_rows.append(("Generated", generated[:-1] if generated else "none")) - else: - dispatch_rows.append(("Generated", '')) - for tar in self.feature_sorted(target_sources): - sources = target_sources[tar] - pretty_name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar) - flags = ' '.join(self.feature_flags(tar)) - implies = ' '.join(self.feature_sorted(self.feature_implies(tar))) - detect = ' '.join(self.feature_detect(tar)) - extra_checks = [] - for name in ((tar,) if isinstance(tar, str) else tar): - extra_checks += self.feature_extra_checks(name) - extra_checks = (' '.join(extra_checks) if extra_checks else "none") - - dispatch_rows.append(('', '')) - dispatch_rows.append((pretty_name, implies)) - dispatch_rows.append(("Flags", flags)) - dispatch_rows.append(("Extra checks", extra_checks)) - dispatch_rows.append(("Detect", detect)) - for src in sources: - dispatch_rows.append(("", src)) - - ############################### - # TODO: add support for 'markdown' format - text = [] - secs_len = [len(secs) for secs, _ in report] - cols_len = [len(col) for _, rows in report for col, _ in rows] - tab = ' ' * 2 - pad = max(max(secs_len), max(cols_len)) - for sec, rows in report: - if not sec: - text.append("") # empty line - continue - sec += ' ' * (pad - len(sec)) - text.append(sec + tab + ': ') - for col, val in rows: - col += ' ' * (pad - len(col)) - text.append(tab + col + ': ' + val) - - return '\n'.join(text) - - def _wrap_target(self, output_dir, dispatch_src, target, nochange=False): - assert(isinstance(target, (str, tuple))) - if isinstance(target, str): - ext_name = target_name = target - else: - # multi-target - ext_name = '.'.join(target) - target_name = '__'.join(target) - - wrap_path = os.path.join(output_dir, os.path.basename(dispatch_src)) - wrap_path = "{0}.{2}{1}".format(*os.path.splitext(wrap_path), ext_name.lower()) - if nochange and os.path.exists(wrap_path): - return wrap_path - - self.dist_log("wrap dispatch-able target -> ", wrap_path) - # sorting for readability - features = self.feature_sorted(self.feature_implies_c(target)) - target_join = "#define %sCPU_TARGET_" % self.conf_c_prefix_ - target_defs = [target_join + f for f in features] - target_defs = '\n'.join(target_defs) - - with open(wrap_path, "w") as fd: - fd.write(textwrap.dedent("""\ - /** - * AUTOGENERATED DON'T EDIT - * Please make changes to the code generator \ - (distutils/ccompiler_opt.py) - */ - #define {pfx}CPU_TARGET_MODE - #define {pfx}CPU_TARGET_CURRENT {target_name} - {target_defs} - #include "{path}" - """).format( - pfx=self.conf_c_prefix_, target_name=target_name, - path=os.path.abspath(dispatch_src), target_defs=target_defs - )) - return wrap_path - - def _generate_config(self, output_dir, dispatch_src, targets, has_baseline=False): - config_path = os.path.basename(dispatch_src) - config_path = os.path.splitext(config_path)[0] + '.h' - config_path = os.path.join(output_dir, config_path) - # check if targets didn't change to avoid recompiling - cache_hash = self.cache_hash(targets, has_baseline) - try: - with open(config_path) as f: - last_hash = f.readline().split("cache_hash:") - if len(last_hash) == 2 and int(last_hash[1]) == cache_hash: - return True - except OSError: - pass - - os.makedirs(os.path.dirname(config_path), exist_ok=True) - - self.dist_log("generate dispatched config -> ", config_path) - dispatch_calls = [] - for tar in targets: - if isinstance(tar, str): - target_name = tar - else: # multi target - target_name = '__'.join([t for t in tar]) - req_detect = self.feature_detect(tar) - req_detect = '&&'.join([ - "CHK(%s)" % f for f in req_detect - ]) - dispatch_calls.append( - "\t%sCPU_DISPATCH_EXPAND_(CB((%s), %s, __VA_ARGS__))" % ( - self.conf_c_prefix_, req_detect, target_name - )) - dispatch_calls = ' \\\n'.join(dispatch_calls) - - if has_baseline: - baseline_calls = ( - "\t%sCPU_DISPATCH_EXPAND_(CB(__VA_ARGS__))" - ) % self.conf_c_prefix_ - else: - baseline_calls = '' - - with open(config_path, "w") as fd: - fd.write(textwrap.dedent("""\ - // cache_hash:{cache_hash} - /** - * AUTOGENERATED DON'T EDIT - * Please make changes to the code generator (distutils/ccompiler_opt.py) - */ - #ifndef {pfx}CPU_DISPATCH_EXPAND_ - #define {pfx}CPU_DISPATCH_EXPAND_(X) X - #endif - #undef {pfx}CPU_DISPATCH_BASELINE_CALL - #undef {pfx}CPU_DISPATCH_CALL - #define {pfx}CPU_DISPATCH_BASELINE_CALL(CB, ...) \\ - {baseline_calls} - #define {pfx}CPU_DISPATCH_CALL(CHK, CB, ...) \\ - {dispatch_calls} - """).format( - pfx=self.conf_c_prefix_, baseline_calls=baseline_calls, - dispatch_calls=dispatch_calls, cache_hash=cache_hash - )) - return False - -def new_ccompiler_opt(compiler, dispatch_hpath, **kwargs): - """ - Create a new instance of 'CCompilerOpt' and generate the dispatch header - which contains the #definitions and headers of platform-specific instruction-sets for - the enabled CPU baseline and dispatch-able features. - - Parameters - ---------- - compiler : CCompiler instance - dispatch_hpath : str - path of the dispatch header - - **kwargs: passed as-is to `CCompilerOpt(...)` - Returns - ------- - new instance of CCompilerOpt - """ - opt = CCompilerOpt(compiler, **kwargs) - if not os.path.exists(dispatch_hpath) or not opt.is_cached(): - opt.generate_dispatch_header(dispatch_hpath) - return opt diff --git a/numpy/distutils/command/__init__.py b/numpy/distutils/command/__init__.py deleted file mode 100644 index 3ba501de03b6..000000000000 --- a/numpy/distutils/command/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -"""distutils.command - -Package containing implementation of all the standard Distutils -commands. - -""" -def test_na_writable_attributes_deletion(): - a = np.NA(2) - attr = ['payload', 'dtype'] - for s in attr: - assert_raises(AttributeError, delattr, a, s) - - -__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $" - -distutils_all = [ #'build_py', - 'clean', - 'install_clib', - 'install_scripts', - 'bdist', - 'bdist_dumb', - 'bdist_wininst', - ] - -__import__('distutils.command', globals(), locals(), distutils_all) - -__all__ = ['build', - 'config_compiler', - 'config', - 'build_src', - 'build_py', - 'build_ext', - 'build_clib', - 'build_scripts', - 'install', - 'install_data', - 'install_headers', - 'install_lib', - 'bdist_rpm', - 'sdist', - ] + distutils_all diff --git a/numpy/distutils/command/autodist.py b/numpy/distutils/command/autodist.py deleted file mode 100644 index b72d0cab1a7d..000000000000 --- a/numpy/distutils/command/autodist.py +++ /dev/null @@ -1,148 +0,0 @@ -"""This module implements additional tests ala autoconf which can be useful. - -""" -import textwrap - -# We put them here since they could be easily reused outside numpy.distutils - -def check_inline(cmd): - """Return the inline identifier (may be empty).""" - cmd._check_compiler() - body = textwrap.dedent(""" - #ifndef __cplusplus - static %(inline)s int static_func (void) - { - return 0; - } - %(inline)s int nostatic_func (void) - { - return 0; - } - #endif""") - - for kw in ['inline', '__inline__', '__inline']: - st = cmd.try_compile(body % {'inline': kw}, None, None) - if st: - return kw - - return '' - - -def check_restrict(cmd): - """Return the restrict identifier (may be empty).""" - cmd._check_compiler() - body = textwrap.dedent(""" - static int static_func (char * %(restrict)s a) - { - return 0; - } - """) - - for kw in ['restrict', '__restrict__', '__restrict']: - st = cmd.try_compile(body % {'restrict': kw}, None, None) - if st: - return kw - - return '' - - -def check_compiler_gcc(cmd): - """Check if the compiler is GCC.""" - - cmd._check_compiler() - body = textwrap.dedent(""" - int - main() - { - #if (! defined __GNUC__) - #error gcc required - #endif - return 0; - } - """) - return cmd.try_compile(body, None, None) - - -def check_gcc_version_at_least(cmd, major, minor=0, patchlevel=0): - """ - Check that the gcc version is at least the specified version.""" - - cmd._check_compiler() - version = '.'.join([str(major), str(minor), str(patchlevel)]) - body = textwrap.dedent(""" - int - main() - { - #if (! defined __GNUC__) || (__GNUC__ < %(major)d) || \\ - (__GNUC_MINOR__ < %(minor)d) || \\ - (__GNUC_PATCHLEVEL__ < %(patchlevel)d) - #error gcc >= %(version)s required - #endif - return 0; - } - """) - kw = {'version': version, 'major': major, 'minor': minor, - 'patchlevel': patchlevel} - - return cmd.try_compile(body % kw, None, None) - - -def check_gcc_function_attribute(cmd, attribute, name): - """Return True if the given function attribute is supported.""" - cmd._check_compiler() - body = textwrap.dedent(""" - #pragma GCC diagnostic error "-Wattributes" - #pragma clang diagnostic error "-Wattributes" - - int %s %s(void* unused) - { - return 0; - } - - int - main() - { - return 0; - } - """) % (attribute, name) - return cmd.try_compile(body, None, None) != 0 - - -def check_gcc_function_attribute_with_intrinsics(cmd, attribute, name, code, - include): - """Return True if the given function attribute is supported with - intrinsics.""" - cmd._check_compiler() - body = textwrap.dedent(""" - #include<%s> - int %s %s(void) - { - %s; - return 0; - } - - int - main() - { - return 0; - } - """) % (include, attribute, name, code) - return cmd.try_compile(body, None, None) != 0 - - -def check_gcc_variable_attribute(cmd, attribute): - """Return True if the given variable attribute is supported.""" - cmd._check_compiler() - body = textwrap.dedent(""" - #pragma GCC diagnostic error "-Wattributes" - #pragma clang diagnostic error "-Wattributes" - - int %s foo; - - int - main() - { - return 0; - } - """) % (attribute, ) - return cmd.try_compile(body, None, None) != 0 diff --git a/numpy/distutils/command/bdist_rpm.py b/numpy/distutils/command/bdist_rpm.py deleted file mode 100644 index 682e7a8eb8e2..000000000000 --- a/numpy/distutils/command/bdist_rpm.py +++ /dev/null @@ -1,22 +0,0 @@ -import os -import sys -if 'setuptools' in sys.modules: - from setuptools.command.bdist_rpm import bdist_rpm as old_bdist_rpm -else: - from distutils.command.bdist_rpm import bdist_rpm as old_bdist_rpm - -class bdist_rpm(old_bdist_rpm): - - def _make_spec_file(self): - spec_file = old_bdist_rpm._make_spec_file(self) - - # Replace hardcoded setup.py script name - # with the real setup script name. - setup_py = os.path.basename(sys.argv[0]) - if setup_py == 'setup.py': - return spec_file - new_spec_file = [] - for line in spec_file: - line = line.replace('setup.py', setup_py) - new_spec_file.append(line) - return new_spec_file diff --git a/numpy/distutils/command/build.py b/numpy/distutils/command/build.py deleted file mode 100644 index 80830d559c61..000000000000 --- a/numpy/distutils/command/build.py +++ /dev/null @@ -1,62 +0,0 @@ -import os -import sys -from distutils.command.build import build as old_build -from distutils.util import get_platform -from numpy.distutils.command.config_compiler import show_fortran_compilers - -class build(old_build): - - sub_commands = [('config_cc', lambda *args: True), - ('config_fc', lambda *args: True), - ('build_src', old_build.has_ext_modules), - ] + old_build.sub_commands - - user_options = old_build.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ('warn-error', None, - "turn all warnings into errors (-Werror)"), - ('cpu-baseline=', None, - "specify a list of enabled baseline CPU optimizations"), - ('cpu-dispatch=', None, - "specify a list of dispatched CPU optimizations"), - ('disable-optimization', None, - "disable CPU optimized code(dispatch,simd,fast...)"), - ('simd-test=', None, - "specify a list of CPU optimizations to be tested against NumPy SIMD interface"), - ] - - help_options = old_build.help_options + [ - ('help-fcompiler', None, "list available Fortran compilers", - show_fortran_compilers), - ] - - def initialize_options(self): - old_build.initialize_options(self) - self.fcompiler = None - self.warn_error = False - self.cpu_baseline = "min" - self.cpu_dispatch = "max -xop -fma4" # drop AMD legacy features by default - self.disable_optimization = False - """ - the '_simd' module is a very large. Adding more dispatched features - will increase binary size and compile time. By default we minimize - the targeted features to those most commonly used by the NumPy SIMD interface(NPYV), - NOTE: any specified features will be ignored if they're: - - part of the baseline(--cpu-baseline) - - not part of dispatch-able features(--cpu-dispatch) - - not supported by compiler or platform - """ - self.simd_test = "BASELINE SSE2 SSE42 XOP FMA4 (FMA3 AVX2) AVX512F " \ - "AVX512_SKX VSX VSX2 VSX3 VSX4 NEON ASIMD VX VXE VXE2" - - def finalize_options(self): - build_scripts = self.build_scripts - old_build.finalize_options(self) - plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2]) - if build_scripts is None: - self.build_scripts = os.path.join(self.build_base, - 'scripts' + plat_specifier) - - def run(self): - old_build.run(self) diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py deleted file mode 100644 index 6cd2f3e7eeca..000000000000 --- a/numpy/distutils/command/build_clib.py +++ /dev/null @@ -1,469 +0,0 @@ -""" Modified version of build_clib that handles fortran source files. -""" -import os -from glob import glob -import shutil -from distutils.command.build_clib import build_clib as old_build_clib -from distutils.errors import DistutilsSetupError, DistutilsError, \ - DistutilsFileError - -from numpy.distutils import log -from distutils.dep_util import newer_group -from numpy.distutils.misc_util import ( - filter_sources, get_lib_source_files, get_numpy_include_dirs, - has_cxx_sources, has_f_sources, is_sequence -) -from numpy.distutils.ccompiler_opt import new_ccompiler_opt - -# Fix Python distutils bug sf #1718574: -_l = old_build_clib.user_options -for _i in range(len(_l)): - if _l[_i][0] in ['build-clib', 'build-temp']: - _l[_i] = (_l[_i][0] + '=',) + _l[_i][1:] -# - - -class build_clib(old_build_clib): - - description = "build C/C++/F libraries used by Python extensions" - - user_options = old_build_clib.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ('inplace', 'i', 'Build in-place'), - ('parallel=', 'j', - "number of parallel jobs"), - ('warn-error', None, - "turn all warnings into errors (-Werror)"), - ('cpu-baseline=', None, - "specify a list of enabled baseline CPU optimizations"), - ('cpu-dispatch=', None, - "specify a list of dispatched CPU optimizations"), - ('disable-optimization', None, - "disable CPU optimized code(dispatch,simd,fast...)"), - ] - - boolean_options = old_build_clib.boolean_options + \ - ['inplace', 'warn-error', 'disable-optimization'] - - def initialize_options(self): - old_build_clib.initialize_options(self) - self.fcompiler = None - self.inplace = 0 - self.parallel = None - self.warn_error = None - self.cpu_baseline = None - self.cpu_dispatch = None - self.disable_optimization = None - - - def finalize_options(self): - if self.parallel: - try: - self.parallel = int(self.parallel) - except ValueError as e: - raise ValueError("--parallel/-j argument must be an integer") from e - old_build_clib.finalize_options(self) - self.set_undefined_options('build', - ('parallel', 'parallel'), - ('warn_error', 'warn_error'), - ('cpu_baseline', 'cpu_baseline'), - ('cpu_dispatch', 'cpu_dispatch'), - ('disable_optimization', 'disable_optimization') - ) - - def have_f_sources(self): - for (lib_name, build_info) in self.libraries: - if has_f_sources(build_info.get('sources', [])): - return True - return False - - def have_cxx_sources(self): - for (lib_name, build_info) in self.libraries: - if has_cxx_sources(build_info.get('sources', [])): - return True - return False - - def run(self): - if not self.libraries: - return - - # Make sure that library sources are complete. - languages = [] - - # Make sure that extension sources are complete. - self.run_command('build_src') - - for (lib_name, build_info) in self.libraries: - l = build_info.get('language', None) - if l and l not in languages: - languages.append(l) - - from distutils.ccompiler import new_compiler - self.compiler = new_compiler(compiler=self.compiler, - dry_run=self.dry_run, - force=self.force) - self.compiler.customize(self.distribution, - need_cxx=self.have_cxx_sources()) - - if self.warn_error: - self.compiler.compiler.append('-Werror') - self.compiler.compiler_so.append('-Werror') - - libraries = self.libraries - self.libraries = None - self.compiler.customize_cmd(self) - self.libraries = libraries - - self.compiler.show_customization() - - if not self.disable_optimization: - dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h") - dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath) - opt_cache_path = os.path.abspath( - os.path.join(self.build_temp, 'ccompiler_opt_cache_clib.py') - ) - if hasattr(self, "compiler_opt"): - # By default `CCompilerOpt` update the cache at the exit of - # the process, which may lead to duplicate building - # (see build_extension()/force_rebuild) if run() called - # multiple times within the same os process/thread without - # giving the chance the previous instances of `CCompilerOpt` - # to update the cache. - self.compiler_opt.cache_flush() - - self.compiler_opt = new_ccompiler_opt( - compiler=self.compiler, dispatch_hpath=dispatch_hpath, - cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch, - cache_path=opt_cache_path - ) - def report(copt): - log.info("\n########### CLIB COMPILER OPTIMIZATION ###########") - log.info(copt.report(full=True)) - - import atexit - atexit.register(report, self.compiler_opt) - - if self.have_f_sources(): - from numpy.distutils.fcompiler import new_fcompiler - self._f_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90='f90' in languages, - c_compiler=self.compiler) - if self._f_compiler is not None: - self._f_compiler.customize(self.distribution) - - libraries = self.libraries - self.libraries = None - self._f_compiler.customize_cmd(self) - self.libraries = libraries - - self._f_compiler.show_customization() - else: - self._f_compiler = None - - self.build_libraries(self.libraries) - - if self.inplace: - for l in self.distribution.installed_libraries: - libname = self.compiler.library_filename(l.name) - source = os.path.join(self.build_clib, libname) - target = os.path.join(l.target_dir, libname) - self.mkpath(l.target_dir) - shutil.copy(source, target) - - def get_source_files(self): - self.check_library_list(self.libraries) - filenames = [] - for lib in self.libraries: - filenames.extend(get_lib_source_files(lib)) - return filenames - - def build_libraries(self, libraries): - for (lib_name, build_info) in libraries: - self.build_a_library(build_info, lib_name, libraries) - - def assemble_flags(self, in_flags): - """ Assemble flags from flag list - - Parameters - ---------- - in_flags : None or sequence - None corresponds to empty list. Sequence elements can be strings - or callables that return lists of strings. Callable takes `self` as - single parameter. - - Returns - ------- - out_flags : list - """ - if in_flags is None: - return [] - out_flags = [] - for in_flag in in_flags: - if callable(in_flag): - out_flags += in_flag(self) - else: - out_flags.append(in_flag) - return out_flags - - def build_a_library(self, build_info, lib_name, libraries): - # default compilers - compiler = self.compiler - fcompiler = self._f_compiler - - sources = build_info.get('sources') - if sources is None or not is_sequence(sources): - raise DistutilsSetupError(("in 'libraries' option (library '%s'), " + - "'sources' must be present and must be " + - "a list of source filenames") % lib_name) - sources = list(sources) - - c_sources, cxx_sources, f_sources, fmodule_sources \ - = filter_sources(sources) - requiref90 = not not fmodule_sources or \ - build_info.get('language', 'c') == 'f90' - - # save source type information so that build_ext can use it. - source_languages = [] - if c_sources: - source_languages.append('c') - if cxx_sources: - source_languages.append('c++') - if requiref90: - source_languages.append('f90') - elif f_sources: - source_languages.append('f77') - build_info['source_languages'] = source_languages - - lib_file = compiler.library_filename(lib_name, - output_dir=self.build_clib) - depends = sources + build_info.get('depends', []) - - force_rebuild = self.force - if not self.disable_optimization and not self.compiler_opt.is_cached(): - log.debug("Detected changes on compiler optimizations") - force_rebuild = True - if not (force_rebuild or newer_group(depends, lib_file, 'newer')): - log.debug("skipping '%s' library (up-to-date)", lib_name) - return - else: - log.info("building '%s' library", lib_name) - - config_fc = build_info.get('config_fc', {}) - if fcompiler is not None and config_fc: - log.info('using additional config_fc from setup script ' - 'for fortran compiler: %s' - % (config_fc,)) - from numpy.distutils.fcompiler import new_fcompiler - fcompiler = new_fcompiler(compiler=fcompiler.compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=requiref90, - c_compiler=self.compiler) - if fcompiler is not None: - dist = self.distribution - base_config_fc = dist.get_option_dict('config_fc').copy() - base_config_fc.update(config_fc) - fcompiler.customize(base_config_fc) - - # check availability of Fortran compilers - if (f_sources or fmodule_sources) and fcompiler is None: - raise DistutilsError("library %s has Fortran sources" - " but no Fortran compiler found" % (lib_name)) - - if fcompiler is not None: - fcompiler.extra_f77_compile_args = build_info.get( - 'extra_f77_compile_args') or [] - fcompiler.extra_f90_compile_args = build_info.get( - 'extra_f90_compile_args') or [] - - macros = build_info.get('macros') - if macros is None: - macros = [] - include_dirs = build_info.get('include_dirs') - if include_dirs is None: - include_dirs = [] - # Flags can be strings, or callables that return a list of strings. - extra_postargs = self.assemble_flags( - build_info.get('extra_compiler_args')) - extra_cflags = self.assemble_flags( - build_info.get('extra_cflags')) - extra_cxxflags = self.assemble_flags( - build_info.get('extra_cxxflags')) - - include_dirs.extend(get_numpy_include_dirs()) - # where compiled F90 module files are: - module_dirs = build_info.get('module_dirs') or [] - module_build_dir = os.path.dirname(lib_file) - if requiref90: - self.mkpath(module_build_dir) - - if compiler.compiler_type == 'msvc': - # this hack works around the msvc compiler attributes - # problem, msvc uses its own convention :( - c_sources += cxx_sources - cxx_sources = [] - extra_cflags += extra_cxxflags - - # filtering C dispatch-table sources when optimization is not disabled, - # otherwise treated as normal sources. - copt_c_sources = [] - copt_cxx_sources = [] - copt_baseline_flags = [] - copt_macros = [] - if not self.disable_optimization: - bsrc_dir = self.get_finalized_command("build_src").build_src - dispatch_hpath = os.path.join("numpy", "distutils", "include") - dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath) - include_dirs.append(dispatch_hpath) - # copt_build_src = None if self.inplace else bsrc_dir - copt_build_src = bsrc_dir - for _srcs, _dst, _ext in ( - ((c_sources,), copt_c_sources, ('.dispatch.c',)), - ((c_sources, cxx_sources), copt_cxx_sources, - ('.dispatch.cpp', '.dispatch.cxx')) - ): - for _src in _srcs: - _dst += [ - _src.pop(_src.index(s)) - for s in _src[:] if s.endswith(_ext) - ] - copt_baseline_flags = self.compiler_opt.cpu_baseline_flags() - else: - copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1)) - - objects = [] - if copt_cxx_sources: - log.info("compiling C++ dispatch-able sources") - objects += self.compiler_opt.try_dispatch( - copt_c_sources, - output_dir=self.build_temp, - src_dir=copt_build_src, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs + extra_cxxflags, - ccompiler=cxx_compiler - ) - - if copt_c_sources: - log.info("compiling C dispatch-able sources") - objects += self.compiler_opt.try_dispatch( - copt_c_sources, - output_dir=self.build_temp, - src_dir=copt_build_src, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs + extra_cflags) - - if c_sources: - log.info("compiling C sources") - objects += compiler.compile( - c_sources, - output_dir=self.build_temp, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=(extra_postargs + - copt_baseline_flags + - extra_cflags)) - - if cxx_sources: - log.info("compiling C++ sources") - cxx_compiler = compiler.cxx_compiler() - cxx_objects = cxx_compiler.compile( - cxx_sources, - output_dir=self.build_temp, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=(extra_postargs + - copt_baseline_flags + - extra_cxxflags)) - objects.extend(cxx_objects) - - if f_sources or fmodule_sources: - extra_postargs = [] - f_objects = [] - - if requiref90: - if fcompiler.module_dir_switch is None: - existing_modules = glob('*.mod') - extra_postargs += fcompiler.module_options( - module_dirs, module_build_dir) - - if fmodule_sources: - log.info("compiling Fortran 90 module sources") - f_objects += fcompiler.compile(fmodule_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - - if requiref90 and self._f_compiler.module_dir_switch is None: - # move new compiled F90 module files to module_build_dir - for f in glob('*.mod'): - if f in existing_modules: - continue - t = os.path.join(module_build_dir, f) - if os.path.abspath(f) == os.path.abspath(t): - continue - if os.path.isfile(t): - os.remove(t) - try: - self.move_file(f, module_build_dir) - except DistutilsFileError: - log.warn('failed to move %r to %r' - % (f, module_build_dir)) - - if f_sources: - log.info("compiling Fortran sources") - f_objects += fcompiler.compile(f_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - else: - f_objects = [] - - if f_objects and not fcompiler.can_ccompiler_link(compiler): - # Default linker cannot link Fortran object files, and results - # need to be wrapped later. Instead of creating a real static - # library, just keep track of the object files. - listfn = os.path.join(self.build_clib, - lib_name + '.fobjects') - with open(listfn, 'w') as f: - f.write("\n".join(os.path.abspath(obj) for obj in f_objects)) - - listfn = os.path.join(self.build_clib, - lib_name + '.cobjects') - with open(listfn, 'w') as f: - f.write("\n".join(os.path.abspath(obj) for obj in objects)) - - # create empty "library" file for dependency tracking - lib_fname = os.path.join(self.build_clib, - lib_name + compiler.static_lib_extension) - with open(lib_fname, 'wb') as f: - pass - else: - # assume that default linker is suitable for - # linking Fortran object files - objects.extend(f_objects) - compiler.create_static_lib(objects, lib_name, - output_dir=self.build_clib, - debug=self.debug) - - # fix library dependencies - clib_libraries = build_info.get('libraries', []) - for lname, binfo in libraries: - if lname in clib_libraries: - clib_libraries.extend(binfo.get('libraries', [])) - if clib_libraries: - build_info['libraries'] = clib_libraries diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py deleted file mode 100644 index 5c62d90c5768..000000000000 --- a/numpy/distutils/command/build_ext.py +++ /dev/null @@ -1,752 +0,0 @@ -""" Modified version of build_ext that handles fortran source files. - -""" -import os -import subprocess -from glob import glob - -from distutils.dep_util import newer_group -from distutils.command.build_ext import build_ext as old_build_ext -from distutils.errors import DistutilsFileError, DistutilsSetupError,\ - DistutilsError -from distutils.file_util import copy_file - -from numpy.distutils import log -from numpy.distutils.exec_command import filepath_from_subprocess_output -from numpy.distutils.system_info import combine_paths -from numpy.distutils.misc_util import ( - filter_sources, get_ext_source_files, get_numpy_include_dirs, - has_cxx_sources, has_f_sources, is_sequence -) -from numpy.distutils.command.config_compiler import show_fortran_compilers -from numpy.distutils.ccompiler_opt import new_ccompiler_opt, CCompilerOpt - -class build_ext (old_build_ext): - - description = "build C/C++/F extensions (compile/link to build directory)" - - user_options = old_build_ext.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ('parallel=', 'j', - "number of parallel jobs"), - ('warn-error', None, - "turn all warnings into errors (-Werror)"), - ('cpu-baseline=', None, - "specify a list of enabled baseline CPU optimizations"), - ('cpu-dispatch=', None, - "specify a list of dispatched CPU optimizations"), - ('disable-optimization', None, - "disable CPU optimized code(dispatch,simd,fast...)"), - ('simd-test=', None, - "specify a list of CPU optimizations to be tested against NumPy SIMD interface"), - ] - - help_options = old_build_ext.help_options + [ - ('help-fcompiler', None, "list available Fortran compilers", - show_fortran_compilers), - ] - - boolean_options = old_build_ext.boolean_options + ['warn-error', 'disable-optimization'] - - def initialize_options(self): - old_build_ext.initialize_options(self) - self.fcompiler = None - self.parallel = None - self.warn_error = None - self.cpu_baseline = None - self.cpu_dispatch = None - self.disable_optimization = None - self.simd_test = None - - def finalize_options(self): - if self.parallel: - try: - self.parallel = int(self.parallel) - except ValueError as e: - raise ValueError("--parallel/-j argument must be an integer") from e - - # Ensure that self.include_dirs and self.distribution.include_dirs - # refer to the same list object. finalize_options will modify - # self.include_dirs, but self.distribution.include_dirs is used - # during the actual build. - # self.include_dirs is None unless paths are specified with - # --include-dirs. - # The include paths will be passed to the compiler in the order: - # numpy paths, --include-dirs paths, Python include path. - if isinstance(self.include_dirs, str): - self.include_dirs = self.include_dirs.split(os.pathsep) - incl_dirs = self.include_dirs or [] - if self.distribution.include_dirs is None: - self.distribution.include_dirs = [] - self.include_dirs = self.distribution.include_dirs - self.include_dirs.extend(incl_dirs) - - old_build_ext.finalize_options(self) - self.set_undefined_options('build', - ('parallel', 'parallel'), - ('warn_error', 'warn_error'), - ('cpu_baseline', 'cpu_baseline'), - ('cpu_dispatch', 'cpu_dispatch'), - ('disable_optimization', 'disable_optimization'), - ('simd_test', 'simd_test') - ) - CCompilerOpt.conf_target_groups["simd_test"] = self.simd_test - - def run(self): - if not self.extensions: - return - - # Make sure that extension sources are complete. - self.run_command('build_src') - - if self.distribution.has_c_libraries(): - if self.inplace: - if self.distribution.have_run.get('build_clib'): - log.warn('build_clib already run, it is too late to ' - 'ensure in-place build of build_clib') - build_clib = self.distribution.get_command_obj( - 'build_clib') - else: - build_clib = self.distribution.get_command_obj( - 'build_clib') - build_clib.inplace = 1 - build_clib.ensure_finalized() - build_clib.run() - self.distribution.have_run['build_clib'] = 1 - - else: - self.run_command('build_clib') - build_clib = self.get_finalized_command('build_clib') - self.library_dirs.append(build_clib.build_clib) - else: - build_clib = None - - # Not including C libraries to the list of - # extension libraries automatically to prevent - # bogus linking commands. Extensions must - # explicitly specify the C libraries that they use. - - from distutils.ccompiler import new_compiler - from numpy.distutils.fcompiler import new_fcompiler - - compiler_type = self.compiler - # Initialize C compiler: - self.compiler = new_compiler(compiler=compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force) - self.compiler.customize(self.distribution) - self.compiler.customize_cmd(self) - - if self.warn_error: - self.compiler.compiler.append('-Werror') - self.compiler.compiler_so.append('-Werror') - - self.compiler.show_customization() - - if not self.disable_optimization: - dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h") - dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath) - opt_cache_path = os.path.abspath( - os.path.join(self.build_temp, 'ccompiler_opt_cache_ext.py') - ) - if hasattr(self, "compiler_opt"): - # By default `CCompilerOpt` update the cache at the exit of - # the process, which may lead to duplicate building - # (see build_extension()/force_rebuild) if run() called - # multiple times within the same os process/thread without - # giving the chance the previous instances of `CCompilerOpt` - # to update the cache. - self.compiler_opt.cache_flush() - - self.compiler_opt = new_ccompiler_opt( - compiler=self.compiler, dispatch_hpath=dispatch_hpath, - cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch, - cache_path=opt_cache_path - ) - def report(copt): - log.info("\n########### EXT COMPILER OPTIMIZATION ###########") - log.info(copt.report(full=True)) - - import atexit - atexit.register(report, self.compiler_opt) - - # Setup directory for storing generated extra DLL files on Windows - self.extra_dll_dir = os.path.join(self.build_temp, '.libs') - if not os.path.isdir(self.extra_dll_dir): - os.makedirs(self.extra_dll_dir) - - # Create mapping of libraries built by build_clib: - clibs = {} - if build_clib is not None: - for libname, build_info in build_clib.libraries or []: - if libname in clibs and clibs[libname] != build_info: - log.warn('library %r defined more than once,' - ' overwriting build_info\n%s... \nwith\n%s...' - % (libname, repr(clibs[libname])[:300], repr(build_info)[:300])) - clibs[libname] = build_info - # .. and distribution libraries: - for libname, build_info in self.distribution.libraries or []: - if libname in clibs: - # build_clib libraries have a precedence before distribution ones - continue - clibs[libname] = build_info - - # Determine if C++/Fortran 77/Fortran 90 compilers are needed. - # Update extension libraries, library_dirs, and macros. - all_languages = set() - for ext in self.extensions: - ext_languages = set() - c_libs = [] - c_lib_dirs = [] - macros = [] - for libname in ext.libraries: - if libname in clibs: - binfo = clibs[libname] - c_libs += binfo.get('libraries', []) - c_lib_dirs += binfo.get('library_dirs', []) - for m in binfo.get('macros', []): - if m not in macros: - macros.append(m) - - for l in clibs.get(libname, {}).get('source_languages', []): - ext_languages.add(l) - if c_libs: - new_c_libs = ext.libraries + c_libs - log.info('updating extension %r libraries from %r to %r' - % (ext.name, ext.libraries, new_c_libs)) - ext.libraries = new_c_libs - ext.library_dirs = ext.library_dirs + c_lib_dirs - if macros: - log.info('extending extension %r defined_macros with %r' - % (ext.name, macros)) - ext.define_macros = ext.define_macros + macros - - # determine extension languages - if has_f_sources(ext.sources): - ext_languages.add('f77') - if has_cxx_sources(ext.sources): - ext_languages.add('c++') - l = ext.language or self.compiler.detect_language(ext.sources) - if l: - ext_languages.add(l) - - # reset language attribute for choosing proper linker - # - # When we build extensions with multiple languages, we have to - # choose a linker. The rules here are: - # 1. if there is Fortran code, always prefer the Fortran linker, - # 2. otherwise prefer C++ over C, - # 3. Users can force a particular linker by using - # `language='c'` # or 'c++', 'f90', 'f77' - # in their config.add_extension() calls. - if 'c++' in ext_languages: - ext_language = 'c++' - else: - ext_language = 'c' # default - - has_fortran = False - if 'f90' in ext_languages: - ext_language = 'f90' - has_fortran = True - elif 'f77' in ext_languages: - ext_language = 'f77' - has_fortran = True - - if not ext.language or has_fortran: - if l and l != ext_language and ext.language: - log.warn('resetting extension %r language from %r to %r.' % - (ext.name, l, ext_language)) - - ext.language = ext_language - - # global language - all_languages.update(ext_languages) - - need_f90_compiler = 'f90' in all_languages - need_f77_compiler = 'f77' in all_languages - need_cxx_compiler = 'c++' in all_languages - - # Initialize C++ compiler: - if need_cxx_compiler: - self._cxx_compiler = new_compiler(compiler=compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force) - compiler = self._cxx_compiler - compiler.customize(self.distribution, need_cxx=need_cxx_compiler) - compiler.customize_cmd(self) - compiler.show_customization() - self._cxx_compiler = compiler.cxx_compiler() - else: - self._cxx_compiler = None - - # Initialize Fortran 77 compiler: - if need_f77_compiler: - ctype = self.fcompiler - self._f77_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=False, - c_compiler=self.compiler) - fcompiler = self._f77_compiler - if fcompiler: - ctype = fcompiler.compiler_type - fcompiler.customize(self.distribution) - if fcompiler and fcompiler.get_version(): - fcompiler.customize_cmd(self) - fcompiler.show_customization() - else: - self.warn('f77_compiler=%s is not available.' % - (ctype)) - self._f77_compiler = None - else: - self._f77_compiler = None - - # Initialize Fortran 90 compiler: - if need_f90_compiler: - ctype = self.fcompiler - self._f90_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=True, - c_compiler=self.compiler) - fcompiler = self._f90_compiler - if fcompiler: - ctype = fcompiler.compiler_type - fcompiler.customize(self.distribution) - if fcompiler and fcompiler.get_version(): - fcompiler.customize_cmd(self) - fcompiler.show_customization() - else: - self.warn('f90_compiler=%s is not available.' % - (ctype)) - self._f90_compiler = None - else: - self._f90_compiler = None - - # Build extensions - self.build_extensions() - - # Copy over any extra DLL files - # FIXME: In the case where there are more than two packages, - # we blindly assume that both packages need all of the libraries, - # resulting in a larger wheel than is required. This should be fixed, - # but it's so rare that I won't bother to handle it. - pkg_roots = { - self.get_ext_fullname(ext.name).split('.')[0] - for ext in self.extensions - } - for pkg_root in pkg_roots: - shared_lib_dir = os.path.join(pkg_root, '.libs') - if not self.inplace: - shared_lib_dir = os.path.join(self.build_lib, shared_lib_dir) - for fn in os.listdir(self.extra_dll_dir): - if not os.path.isdir(shared_lib_dir): - os.makedirs(shared_lib_dir) - if not fn.lower().endswith('.dll'): - continue - runtime_lib = os.path.join(self.extra_dll_dir, fn) - copy_file(runtime_lib, shared_lib_dir) - - def swig_sources(self, sources, extensions=None): - # Do nothing. Swig sources have been handled in build_src command. - return sources - - def build_extension(self, ext): - sources = ext.sources - if sources is None or not is_sequence(sources): - raise DistutilsSetupError( - ("in 'ext_modules' option (extension '%s'), " + - "'sources' must be present and must be " + - "a list of source filenames") % ext.name) - sources = list(sources) - - if not sources: - return - - fullname = self.get_ext_fullname(ext.name) - if self.inplace: - modpath = fullname.split('.') - package = '.'.join(modpath[0:-1]) - base = modpath[-1] - build_py = self.get_finalized_command('build_py') - package_dir = build_py.get_package_dir(package) - ext_filename = os.path.join(package_dir, - self.get_ext_filename(base)) - else: - ext_filename = os.path.join(self.build_lib, - self.get_ext_filename(fullname)) - depends = sources + ext.depends - - force_rebuild = self.force - if not self.disable_optimization and not self.compiler_opt.is_cached(): - log.debug("Detected changes on compiler optimizations") - force_rebuild = True - if not (force_rebuild or newer_group(depends, ext_filename, 'newer')): - log.debug("skipping '%s' extension (up-to-date)", ext.name) - return - else: - log.info("building '%s' extension", ext.name) - - extra_args = ext.extra_compile_args or [] - extra_cflags = getattr(ext, 'extra_c_compile_args', None) or [] - extra_cxxflags = getattr(ext, 'extra_cxx_compile_args', None) or [] - - macros = ext.define_macros[:] - for undef in ext.undef_macros: - macros.append((undef,)) - - c_sources, cxx_sources, f_sources, fmodule_sources = \ - filter_sources(ext.sources) - - if self.compiler.compiler_type == 'msvc': - if cxx_sources: - # Needed to compile kiva.agg._agg extension. - extra_args.append('/Zm1000') - extra_cflags += extra_cxxflags - # this hack works around the msvc compiler attributes - # problem, msvc uses its own convention :( - c_sources += cxx_sources - cxx_sources = [] - - # Set Fortran/C++ compilers for compilation and linking. - if ext.language == 'f90': - fcompiler = self._f90_compiler - elif ext.language == 'f77': - fcompiler = self._f77_compiler - else: # in case ext.language is c++, for instance - fcompiler = self._f90_compiler or self._f77_compiler - if fcompiler is not None: - fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr( - ext, 'extra_f77_compile_args') else [] - fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr( - ext, 'extra_f90_compile_args') else [] - cxx_compiler = self._cxx_compiler - - # check for the availability of required compilers - if cxx_sources and cxx_compiler is None: - raise DistutilsError("extension %r has C++ sources" - "but no C++ compiler found" % (ext.name)) - if (f_sources or fmodule_sources) and fcompiler is None: - raise DistutilsError("extension %r has Fortran sources " - "but no Fortran compiler found" % (ext.name)) - if ext.language in ['f77', 'f90'] and fcompiler is None: - self.warn("extension %r has Fortran libraries " - "but no Fortran linker found, using default linker" % (ext.name)) - if ext.language == 'c++' and cxx_compiler is None: - self.warn("extension %r has C++ libraries " - "but no C++ linker found, using default linker" % (ext.name)) - - kws = {'depends': ext.depends} - output_dir = self.build_temp - - include_dirs = ext.include_dirs + get_numpy_include_dirs() - - # filtering C dispatch-table sources when optimization is not disabled, - # otherwise treated as normal sources. - copt_c_sources = [] - copt_cxx_sources = [] - copt_baseline_flags = [] - copt_macros = [] - if not self.disable_optimization: - bsrc_dir = self.get_finalized_command("build_src").build_src - dispatch_hpath = os.path.join("numpy", "distutils", "include") - dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath) - include_dirs.append(dispatch_hpath) - - # copt_build_src = None if self.inplace else bsrc_dir - # Always generate the generated config files and - # dispatch-able sources inside the build directory, - # even if the build option `inplace` is enabled. - # This approach prevents conflicts with Meson-generated - # config headers. Since `spin build --clean` will not remove - # these headers, they might overwrite the generated Meson headers, - # causing compatibility issues. Maintaining separate directories - # ensures compatibility between distutils dispatch config headers - # and Meson headers, avoiding build disruptions. - # See gh-24450 for more details. - copt_build_src = bsrc_dir - for _srcs, _dst, _ext in ( - ((c_sources,), copt_c_sources, ('.dispatch.c',)), - ((c_sources, cxx_sources), copt_cxx_sources, - ('.dispatch.cpp', '.dispatch.cxx')) - ): - for _src in _srcs: - _dst += [ - _src.pop(_src.index(s)) - for s in _src[:] if s.endswith(_ext) - ] - copt_baseline_flags = self.compiler_opt.cpu_baseline_flags() - else: - copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1)) - - c_objects = [] - if copt_cxx_sources: - log.info("compiling C++ dispatch-able sources") - c_objects += self.compiler_opt.try_dispatch( - copt_cxx_sources, - output_dir=output_dir, - src_dir=copt_build_src, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_args + extra_cxxflags, - ccompiler=cxx_compiler, - **kws - ) - if copt_c_sources: - log.info("compiling C dispatch-able sources") - c_objects += self.compiler_opt.try_dispatch( - copt_c_sources, - output_dir=output_dir, - src_dir=copt_build_src, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_args + extra_cflags, - **kws) - if c_sources: - log.info("compiling C sources") - c_objects += self.compiler.compile( - c_sources, - output_dir=output_dir, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=(extra_args + copt_baseline_flags + - extra_cflags), - **kws) - if cxx_sources: - log.info("compiling C++ sources") - c_objects += cxx_compiler.compile( - cxx_sources, - output_dir=output_dir, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=(extra_args + copt_baseline_flags + - extra_cxxflags), - **kws) - - extra_postargs = [] - f_objects = [] - if fmodule_sources: - log.info("compiling Fortran 90 module sources") - module_dirs = ext.module_dirs[:] - module_build_dir = os.path.join( - self.build_temp, os.path.dirname( - self.get_ext_filename(fullname))) - - self.mkpath(module_build_dir) - if fcompiler.module_dir_switch is None: - existing_modules = glob('*.mod') - extra_postargs += fcompiler.module_options( - module_dirs, module_build_dir) - f_objects += fcompiler.compile(fmodule_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs, - depends=ext.depends) - - if fcompiler.module_dir_switch is None: - for f in glob('*.mod'): - if f in existing_modules: - continue - t = os.path.join(module_build_dir, f) - if os.path.abspath(f) == os.path.abspath(t): - continue - if os.path.isfile(t): - os.remove(t) - try: - self.move_file(f, module_build_dir) - except DistutilsFileError: - log.warn('failed to move %r to %r' % - (f, module_build_dir)) - if f_sources: - log.info("compiling Fortran sources") - f_objects += fcompiler.compile(f_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs, - depends=ext.depends) - - if f_objects and not fcompiler.can_ccompiler_link(self.compiler): - unlinkable_fobjects = f_objects - objects = c_objects - else: - unlinkable_fobjects = [] - objects = c_objects + f_objects - - if ext.extra_objects: - objects.extend(ext.extra_objects) - extra_args = ext.extra_link_args or [] - libraries = self.get_libraries(ext)[:] - library_dirs = ext.library_dirs[:] - - linker = self.compiler.link_shared_object - # Always use system linker when using MSVC compiler. - if self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw'): - # expand libraries with fcompiler libraries as we are - # not using fcompiler linker - self._libs_with_msvc_and_fortran( - fcompiler, libraries, library_dirs) - if ext.runtime_library_dirs: - # gcc adds RPATH to the link. On windows, copy the dll into - # self.extra_dll_dir instead. - for d in ext.runtime_library_dirs: - for f in glob(d + '/*.dll'): - copy_file(f, self.extra_dll_dir) - ext.runtime_library_dirs = [] - - elif ext.language in ['f77', 'f90'] and fcompiler is not None: - linker = fcompiler.link_shared_object - if ext.language == 'c++' and cxx_compiler is not None: - linker = cxx_compiler.link_shared_object - - if fcompiler is not None: - objects, libraries = self._process_unlinkable_fobjects( - objects, libraries, - fcompiler, library_dirs, - unlinkable_fobjects) - - linker(objects, ext_filename, - libraries=libraries, - library_dirs=library_dirs, - runtime_library_dirs=ext.runtime_library_dirs, - extra_postargs=extra_args, - export_symbols=self.get_export_symbols(ext), - debug=self.debug, - build_temp=self.build_temp, - target_lang=ext.language) - - def _add_dummy_mingwex_sym(self, c_sources): - build_src = self.get_finalized_command("build_src").build_src - build_clib = self.get_finalized_command("build_clib").build_clib - objects = self.compiler.compile([os.path.join(build_src, - "gfortran_vs2003_hack.c")], - output_dir=self.build_temp) - self.compiler.create_static_lib( - objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug) - - def _process_unlinkable_fobjects(self, objects, libraries, - fcompiler, library_dirs, - unlinkable_fobjects): - libraries = list(libraries) - objects = list(objects) - unlinkable_fobjects = list(unlinkable_fobjects) - - # Expand possible fake static libraries to objects; - # make sure to iterate over a copy of the list as - # "fake" libraries will be removed as they are - # encountered - for lib in libraries[:]: - for libdir in library_dirs: - fake_lib = os.path.join(libdir, lib + '.fobjects') - if os.path.isfile(fake_lib): - # Replace fake static library - libraries.remove(lib) - with open(fake_lib) as f: - unlinkable_fobjects.extend(f.read().splitlines()) - - # Expand C objects - c_lib = os.path.join(libdir, lib + '.cobjects') - with open(c_lib) as f: - objects.extend(f.read().splitlines()) - - # Wrap unlinkable objects to a linkable one - if unlinkable_fobjects: - fobjects = [os.path.abspath(obj) for obj in unlinkable_fobjects] - wrapped = fcompiler.wrap_unlinkable_objects( - fobjects, output_dir=self.build_temp, - extra_dll_dir=self.extra_dll_dir) - objects.extend(wrapped) - - return objects, libraries - - def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries, - c_library_dirs): - if fcompiler is None: - return - - for libname in c_libraries: - if libname.startswith('msvc'): - continue - fileexists = False - for libdir in c_library_dirs or []: - libfile = os.path.join(libdir, '%s.lib' % (libname)) - if os.path.isfile(libfile): - fileexists = True - break - if fileexists: - continue - # make g77-compiled static libs available to MSVC - fileexists = False - for libdir in c_library_dirs: - libfile = os.path.join(libdir, 'lib%s.a' % (libname)) - if os.path.isfile(libfile): - # copy libname.a file to name.lib so that MSVC linker - # can find it - libfile2 = os.path.join(self.build_temp, libname + '.lib') - copy_file(libfile, libfile2) - if self.build_temp not in c_library_dirs: - c_library_dirs.append(self.build_temp) - fileexists = True - break - if fileexists: - continue - log.warn('could not find library %r in directories %s' - % (libname, c_library_dirs)) - - # Always use system linker when using MSVC compiler. - f_lib_dirs = [] - for dir in fcompiler.library_dirs: - # correct path when compiling in Cygwin but with normal Win - # Python - if dir.startswith('/usr/lib'): - try: - dir = subprocess.check_output(['cygpath', '-w', dir]) - except (OSError, subprocess.CalledProcessError): - pass - else: - dir = filepath_from_subprocess_output(dir) - f_lib_dirs.append(dir) - c_library_dirs.extend(f_lib_dirs) - - # make g77-compiled static libs available to MSVC - for lib in fcompiler.libraries: - if not lib.startswith('msvc'): - c_libraries.append(lib) - p = combine_paths(f_lib_dirs, 'lib' + lib + '.a') - if p: - dst_name = os.path.join(self.build_temp, lib + '.lib') - if not os.path.isfile(dst_name): - copy_file(p[0], dst_name) - if self.build_temp not in c_library_dirs: - c_library_dirs.append(self.build_temp) - - def get_source_files(self): - self.check_extensions_list(self.extensions) - filenames = [] - for ext in self.extensions: - filenames.extend(get_ext_source_files(ext)) - return filenames - - def get_outputs(self): - self.check_extensions_list(self.extensions) - - outputs = [] - for ext in self.extensions: - if not ext.sources: - continue - fullname = self.get_ext_fullname(ext.name) - outputs.append(os.path.join(self.build_lib, - self.get_ext_filename(fullname))) - return outputs diff --git a/numpy/distutils/command/build_py.py b/numpy/distutils/command/build_py.py deleted file mode 100644 index d30dc5bf42d8..000000000000 --- a/numpy/distutils/command/build_py.py +++ /dev/null @@ -1,31 +0,0 @@ -from distutils.command.build_py import build_py as old_build_py -from numpy.distutils.misc_util import is_string - -class build_py(old_build_py): - - def run(self): - build_src = self.get_finalized_command('build_src') - if build_src.py_modules_dict and self.packages is None: - self.packages = list(build_src.py_modules_dict.keys ()) - old_build_py.run(self) - - def find_package_modules(self, package, package_dir): - modules = old_build_py.find_package_modules(self, package, package_dir) - - # Find build_src generated *.py files. - build_src = self.get_finalized_command('build_src') - modules += build_src.py_modules_dict.get(package, []) - - return modules - - def find_modules(self): - old_py_modules = self.py_modules[:] - new_py_modules = [_m for _m in self.py_modules if is_string(_m)] - self.py_modules[:] = new_py_modules - modules = old_build_py.find_modules(self) - self.py_modules[:] = old_py_modules - - return modules - - # XXX: Fix find_source_files for item in py_modules such that item is 3-tuple - # and item[2] is source file. diff --git a/numpy/distutils/command/build_scripts.py b/numpy/distutils/command/build_scripts.py deleted file mode 100644 index d5cadb2745fe..000000000000 --- a/numpy/distutils/command/build_scripts.py +++ /dev/null @@ -1,49 +0,0 @@ -""" Modified version of build_scripts that handles building scripts from functions. - -""" -from distutils.command.build_scripts import build_scripts as old_build_scripts -from numpy.distutils import log -from numpy.distutils.misc_util import is_string - -class build_scripts(old_build_scripts): - - def generate_scripts(self, scripts): - new_scripts = [] - func_scripts = [] - for script in scripts: - if is_string(script): - new_scripts.append(script) - else: - func_scripts.append(script) - if not func_scripts: - return new_scripts - - build_dir = self.build_dir - self.mkpath(build_dir) - for func in func_scripts: - script = func(build_dir) - if not script: - continue - if is_string(script): - log.info(" adding '%s' to scripts" % (script,)) - new_scripts.append(script) - else: - [log.info(" adding '%s' to scripts" % (s,)) for s in script] - new_scripts.extend(list(script)) - return new_scripts - - def run (self): - if not self.scripts: - return - - self.scripts = self.generate_scripts(self.scripts) - # Now make sure that the distribution object has this list of scripts. - # setuptools' develop command requires that this be a list of filenames, - # not functions. - self.distribution.scripts = self.scripts - - return old_build_scripts.run(self) - - def get_source_files(self): - from numpy.distutils.misc_util import get_script_files - return get_script_files(self.scripts) diff --git a/numpy/distutils/command/build_src.py b/numpy/distutils/command/build_src.py deleted file mode 100644 index 7303db124cc8..000000000000 --- a/numpy/distutils/command/build_src.py +++ /dev/null @@ -1,773 +0,0 @@ -""" Build swig and f2py sources. -""" -import os -import re -import sys -import shlex -import copy - -from distutils.command import build_ext -from distutils.dep_util import newer_group, newer -from distutils.util import get_platform -from distutils.errors import DistutilsError, DistutilsSetupError - - -# this import can't be done here, as it uses numpy stuff only available -# after it's installed -#import numpy.f2py -from numpy.distutils import log -from numpy.distutils.misc_util import ( - fortran_ext_match, appendpath, is_string, is_sequence, get_cmd - ) -from numpy.distutils.from_template import process_file as process_f_file -from numpy.distutils.conv_template import process_file as process_c_file - -def subst_vars(target, source, d): - """Substitute any occurrence of @foo@ by d['foo'] from source file into - target.""" - var = re.compile('@([a-zA-Z_]+)@') - with open(source, 'r') as fs: - with open(target, 'w') as ft: - for l in fs: - m = var.search(l) - if m: - ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)])) - else: - ft.write(l) - -class build_src(build_ext.build_ext): - - description = "build sources from SWIG, F2PY files or a function" - - user_options = [ - ('build-src=', 'd', "directory to \"build\" sources to"), - ('f2py-opts=', None, "list of f2py command line options"), - ('swig=', None, "path to the SWIG executable"), - ('swig-opts=', None, "list of SWIG command line options"), - ('swig-cpp', None, "make SWIG create C++ files (default is autodetected from sources)"), - ('f2pyflags=', None, "additional flags to f2py (use --f2py-opts= instead)"), # obsolete - ('swigflags=', None, "additional flags to swig (use --swig-opts= instead)"), # obsolete - ('force', 'f', "forcibly build everything (ignore file timestamps)"), - ('inplace', 'i', - "ignore build-lib and put compiled extensions into the source " + - "directory alongside your pure Python modules"), - ('verbose-cfg', None, - "change logging level from WARN to INFO which will show all " + - "compiler output") - ] - - boolean_options = ['force', 'inplace', 'verbose-cfg'] - - help_options = [] - - def initialize_options(self): - self.extensions = None - self.package = None - self.py_modules = None - self.py_modules_dict = None - self.build_src = None - self.build_lib = None - self.build_base = None - self.force = None - self.inplace = None - self.package_dir = None - self.f2pyflags = None # obsolete - self.f2py_opts = None - self.swigflags = None # obsolete - self.swig_opts = None - self.swig_cpp = None - self.swig = None - self.verbose_cfg = None - - def finalize_options(self): - self.set_undefined_options('build', - ('build_base', 'build_base'), - ('build_lib', 'build_lib'), - ('force', 'force')) - if self.package is None: - self.package = self.distribution.ext_package - self.extensions = self.distribution.ext_modules - self.libraries = self.distribution.libraries or [] - self.py_modules = self.distribution.py_modules or [] - self.data_files = self.distribution.data_files or [] - - if self.build_src is None: - plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2]) - self.build_src = os.path.join(self.build_base, 'src'+plat_specifier) - - # py_modules_dict is used in build_py.find_package_modules - self.py_modules_dict = {} - - if self.f2pyflags: - if self.f2py_opts: - log.warn('ignoring --f2pyflags as --f2py-opts already used') - else: - self.f2py_opts = self.f2pyflags - self.f2pyflags = None - if self.f2py_opts is None: - self.f2py_opts = [] - else: - self.f2py_opts = shlex.split(self.f2py_opts) - - if self.swigflags: - if self.swig_opts: - log.warn('ignoring --swigflags as --swig-opts already used') - else: - self.swig_opts = self.swigflags - self.swigflags = None - - if self.swig_opts is None: - self.swig_opts = [] - else: - self.swig_opts = shlex.split(self.swig_opts) - - # use options from build_ext command - build_ext = self.get_finalized_command('build_ext') - if self.inplace is None: - self.inplace = build_ext.inplace - if self.swig_cpp is None: - self.swig_cpp = build_ext.swig_cpp - for c in ['swig', 'swig_opt']: - o = '--'+c.replace('_', '-') - v = getattr(build_ext, c, None) - if v: - if getattr(self, c): - log.warn('both build_src and build_ext define %s option' % (o)) - else: - log.info('using "%s=%s" option from build_ext command' % (o, v)) - setattr(self, c, v) - - def run(self): - log.info("build_src") - if not (self.extensions or self.libraries): - return - self.build_sources() - - def build_sources(self): - - if self.inplace: - self.get_package_dir = \ - self.get_finalized_command('build_py').get_package_dir - - self.build_py_modules_sources() - - for libname_info in self.libraries: - self.build_library_sources(*libname_info) - - if self.extensions: - self.check_extensions_list(self.extensions) - - for ext in self.extensions: - self.build_extension_sources(ext) - - self.build_data_files_sources() - self.build_npy_pkg_config() - - def build_data_files_sources(self): - if not self.data_files: - return - log.info('building data_files sources') - from numpy.distutils.misc_util import get_data_files - new_data_files = [] - for data in self.data_files: - if isinstance(data, str): - new_data_files.append(data) - elif isinstance(data, tuple): - d, files = data - if self.inplace: - build_dir = self.get_package_dir('.'.join(d.split(os.sep))) - else: - build_dir = os.path.join(self.build_src, d) - funcs = [f for f in files if hasattr(f, '__call__')] - files = [f for f in files if not hasattr(f, '__call__')] - for f in funcs: - if f.__code__.co_argcount==1: - s = f(build_dir) - else: - s = f() - if s is not None: - if isinstance(s, list): - files.extend(s) - elif isinstance(s, str): - files.append(s) - else: - raise TypeError(repr(s)) - filenames = get_data_files((d, files)) - new_data_files.append((d, filenames)) - else: - raise TypeError(repr(data)) - self.data_files[:] = new_data_files - - - def _build_npy_pkg_config(self, info, gd): - template, install_dir, subst_dict = info - template_dir = os.path.dirname(template) - for k, v in gd.items(): - subst_dict[k] = v - - if self.inplace == 1: - generated_dir = os.path.join(template_dir, install_dir) - else: - generated_dir = os.path.join(self.build_src, template_dir, - install_dir) - generated = os.path.basename(os.path.splitext(template)[0]) - generated_path = os.path.join(generated_dir, generated) - if not os.path.exists(generated_dir): - os.makedirs(generated_dir) - - subst_vars(generated_path, template, subst_dict) - - # Where to install relatively to install prefix - full_install_dir = os.path.join(template_dir, install_dir) - return full_install_dir, generated_path - - def build_npy_pkg_config(self): - log.info('build_src: building npy-pkg config files') - - # XXX: another ugly workaround to circumvent distutils brain damage. We - # need the install prefix here, but finalizing the options of the - # install command when only building sources cause error. Instead, we - # copy the install command instance, and finalize the copy so that it - # does not disrupt how distutils want to do things when with the - # original install command instance. - install_cmd = copy.copy(get_cmd('install')) - if not install_cmd.finalized == 1: - install_cmd.finalize_options() - build_npkg = False - if self.inplace == 1: - top_prefix = '.' - build_npkg = True - elif hasattr(install_cmd, 'install_libbase'): - top_prefix = install_cmd.install_libbase - build_npkg = True - - if build_npkg: - for pkg, infos in self.distribution.installed_pkg_config.items(): - pkg_path = self.distribution.package_dir[pkg] - prefix = os.path.join(os.path.abspath(top_prefix), pkg_path) - d = {'prefix': prefix} - for info in infos: - install_dir, generated = self._build_npy_pkg_config(info, d) - self.distribution.data_files.append((install_dir, - [generated])) - - def build_py_modules_sources(self): - if not self.py_modules: - return - log.info('building py_modules sources') - new_py_modules = [] - for source in self.py_modules: - if is_sequence(source) and len(source)==3: - package, module_base, source = source - if self.inplace: - build_dir = self.get_package_dir(package) - else: - build_dir = os.path.join(self.build_src, - os.path.join(*package.split('.'))) - if hasattr(source, '__call__'): - target = os.path.join(build_dir, module_base + '.py') - source = source(target) - if source is None: - continue - modules = [(package, module_base, source)] - if package not in self.py_modules_dict: - self.py_modules_dict[package] = [] - self.py_modules_dict[package] += modules - else: - new_py_modules.append(source) - self.py_modules[:] = new_py_modules - - def build_library_sources(self, lib_name, build_info): - sources = list(build_info.get('sources', [])) - - if not sources: - return - - log.info('building library "%s" sources' % (lib_name)) - - sources = self.generate_sources(sources, (lib_name, build_info)) - - sources = self.template_sources(sources, (lib_name, build_info)) - - sources, h_files = self.filter_h_files(sources) - - if h_files: - log.info('%s - nothing done with h_files = %s', - self.package, h_files) - - #for f in h_files: - # self.distribution.headers.append((lib_name,f)) - - build_info['sources'] = sources - return - - def build_extension_sources(self, ext): - - sources = list(ext.sources) - - log.info('building extension "%s" sources' % (ext.name)) - - fullname = self.get_ext_fullname(ext.name) - - modpath = fullname.split('.') - package = '.'.join(modpath[0:-1]) - - if self.inplace: - self.ext_target_dir = self.get_package_dir(package) - - sources = self.generate_sources(sources, ext) - sources = self.template_sources(sources, ext) - sources = self.swig_sources(sources, ext) - sources = self.f2py_sources(sources, ext) - sources = self.pyrex_sources(sources, ext) - - sources, py_files = self.filter_py_files(sources) - - if package not in self.py_modules_dict: - self.py_modules_dict[package] = [] - modules = [] - for f in py_files: - module = os.path.splitext(os.path.basename(f))[0] - modules.append((package, module, f)) - self.py_modules_dict[package] += modules - - sources, h_files = self.filter_h_files(sources) - - if h_files: - log.info('%s - nothing done with h_files = %s', - package, h_files) - #for f in h_files: - # self.distribution.headers.append((package,f)) - - ext.sources = sources - - def generate_sources(self, sources, extension): - new_sources = [] - func_sources = [] - for source in sources: - if is_string(source): - new_sources.append(source) - else: - func_sources.append(source) - if not func_sources: - return new_sources - if self.inplace and not is_sequence(extension): - build_dir = self.ext_target_dir - else: - if is_sequence(extension): - name = extension[0] - # if 'include_dirs' not in extension[1]: - # extension[1]['include_dirs'] = [] - # incl_dirs = extension[1]['include_dirs'] - else: - name = extension.name - # incl_dirs = extension.include_dirs - #if self.build_src not in incl_dirs: - # incl_dirs.append(self.build_src) - build_dir = os.path.join(*([self.build_src] - +name.split('.')[:-1])) - self.mkpath(build_dir) - - if self.verbose_cfg: - new_level = log.INFO - else: - new_level = log.WARN - old_level = log.set_threshold(new_level) - - for func in func_sources: - source = func(extension, build_dir) - if not source: - continue - if is_sequence(source): - [log.info(" adding '%s' to sources." % (s,)) for s in source] - new_sources.extend(source) - else: - log.info(" adding '%s' to sources." % (source,)) - new_sources.append(source) - log.set_threshold(old_level) - return new_sources - - def filter_py_files(self, sources): - return self.filter_files(sources, ['.py']) - - def filter_h_files(self, sources): - return self.filter_files(sources, ['.h', '.hpp', '.inc']) - - def filter_files(self, sources, exts = []): - new_sources = [] - files = [] - for source in sources: - (base, ext) = os.path.splitext(source) - if ext in exts: - files.append(source) - else: - new_sources.append(source) - return new_sources, files - - def template_sources(self, sources, extension): - new_sources = [] - if is_sequence(extension): - depends = extension[1].get('depends') - include_dirs = extension[1].get('include_dirs') - else: - depends = extension.depends - include_dirs = extension.include_dirs - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.src': # Template file - if self.inplace: - target_dir = os.path.dirname(base) - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - self.mkpath(target_dir) - target_file = os.path.join(target_dir, os.path.basename(base)) - if (self.force or newer_group([source] + depends, target_file)): - if _f_pyf_ext_match(base): - log.info("from_template:> %s" % (target_file)) - outstr = process_f_file(source) - else: - log.info("conv_template:> %s" % (target_file)) - outstr = process_c_file(source) - with open(target_file, 'w') as fid: - fid.write(outstr) - if _header_ext_match(target_file): - d = os.path.dirname(target_file) - if d not in include_dirs: - log.info(" adding '%s' to include_dirs." % (d)) - include_dirs.append(d) - new_sources.append(target_file) - else: - new_sources.append(source) - return new_sources - - def pyrex_sources(self, sources, extension): - """Pyrex not supported; this remains for Cython support (see below)""" - new_sources = [] - ext_name = extension.name.split('.')[-1] - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.pyx': - target_file = self.generate_a_pyrex_source(base, ext_name, - source, - extension) - new_sources.append(target_file) - else: - new_sources.append(source) - return new_sources - - def generate_a_pyrex_source(self, base, ext_name, source, extension): - """Pyrex is not supported, but some projects monkeypatch this method. - - That allows compiling Cython code, see gh-6955. - This method will remain here for compatibility reasons. - """ - return [] - - def f2py_sources(self, sources, extension): - new_sources = [] - f2py_sources = [] - f_sources = [] - f2py_targets = {} - target_dirs = [] - ext_name = extension.name.split('.')[-1] - skip_f2py = 0 - - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.pyf': # F2PY interface file - if self.inplace: - target_dir = os.path.dirname(base) - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - if os.path.isfile(source): - name = get_f2py_modulename(source) - if name != ext_name: - raise DistutilsSetupError('mismatch of extension names: %s ' - 'provides %r but expected %r' % ( - source, name, ext_name)) - target_file = os.path.join(target_dir, name+'module.c') - else: - log.debug(' source %s does not exist: skipping f2py\'ing.' \ - % (source)) - name = ext_name - skip_f2py = 1 - target_file = os.path.join(target_dir, name+'module.c') - if not os.path.isfile(target_file): - log.warn(' target %s does not exist:\n '\ - 'Assuming %smodule.c was generated with '\ - '"build_src --inplace" command.' \ - % (target_file, name)) - target_dir = os.path.dirname(base) - target_file = os.path.join(target_dir, name+'module.c') - if not os.path.isfile(target_file): - raise DistutilsSetupError("%r missing" % (target_file,)) - log.info(' Yes! Using %r as up-to-date target.' \ - % (target_file)) - target_dirs.append(target_dir) - f2py_sources.append(source) - f2py_targets[source] = target_file - new_sources.append(target_file) - elif fortran_ext_match(ext): - f_sources.append(source) - else: - new_sources.append(source) - - if not (f2py_sources or f_sources): - return new_sources - - for d in target_dirs: - self.mkpath(d) - - f2py_options = extension.f2py_options + self.f2py_opts - - if self.distribution.libraries: - for name, build_info in self.distribution.libraries: - if name in extension.libraries: - f2py_options.extend(build_info.get('f2py_options', [])) - - log.info("f2py options: %s" % (f2py_options)) - - if f2py_sources: - if len(f2py_sources) != 1: - raise DistutilsSetupError( - 'only one .pyf file is allowed per extension module but got'\ - ' more: %r' % (f2py_sources,)) - source = f2py_sources[0] - target_file = f2py_targets[source] - target_dir = os.path.dirname(target_file) or '.' - depends = [source] + extension.depends - if (self.force or newer_group(depends, target_file, 'newer')) \ - and not skip_f2py: - log.info("f2py: %s" % (source)) - from numpy.f2py import f2py2e - f2py2e.run_main(f2py_options - + ['--build-dir', target_dir, source]) - else: - log.debug(" skipping '%s' f2py interface (up-to-date)" % (source)) - else: - #XXX TODO: --inplace support for sdist command - if is_sequence(extension): - name = extension[0] - else: name = extension.name - target_dir = os.path.join(*([self.build_src] - +name.split('.')[:-1])) - target_file = os.path.join(target_dir, ext_name + 'module.c') - new_sources.append(target_file) - depends = f_sources + extension.depends - if (self.force or newer_group(depends, target_file, 'newer')) \ - and not skip_f2py: - log.info("f2py:> %s" % (target_file)) - self.mkpath(target_dir) - from numpy.f2py import f2py2e - f2py2e.run_main(f2py_options + ['--lower', - '--build-dir', target_dir]+\ - ['-m', ext_name]+f_sources) - else: - log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\ - % (target_file)) - - if not os.path.isfile(target_file): - raise DistutilsError("f2py target file %r not generated" % (target_file,)) - - build_dir = os.path.join(self.build_src, target_dir) - target_c = os.path.join(build_dir, 'fortranobject.c') - target_h = os.path.join(build_dir, 'fortranobject.h') - log.info(" adding '%s' to sources." % (target_c)) - new_sources.append(target_c) - if build_dir not in extension.include_dirs: - log.info(" adding '%s' to include_dirs." % (build_dir)) - extension.include_dirs.append(build_dir) - - if not skip_f2py: - import numpy.f2py - d = os.path.dirname(numpy.f2py.__file__) - source_c = os.path.join(d, 'src', 'fortranobject.c') - source_h = os.path.join(d, 'src', 'fortranobject.h') - if newer(source_c, target_c) or newer(source_h, target_h): - self.mkpath(os.path.dirname(target_c)) - self.copy_file(source_c, target_c) - self.copy_file(source_h, target_h) - else: - if not os.path.isfile(target_c): - raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,)) - if not os.path.isfile(target_h): - raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,)) - - for name_ext in ['-f2pywrappers.f', '-f2pywrappers2.f90']: - filename = os.path.join(target_dir, ext_name + name_ext) - if os.path.isfile(filename): - log.info(" adding '%s' to sources." % (filename)) - f_sources.append(filename) - - return new_sources + f_sources - - def swig_sources(self, sources, extension): - # Assuming SWIG 1.3.14 or later. See compatibility note in - # http://www.swig.org/Doc1.3/Python.html#Python_nn6 - - new_sources = [] - swig_sources = [] - swig_targets = {} - target_dirs = [] - py_files = [] # swig generated .py files - target_ext = '.c' - if '-c++' in extension.swig_opts: - typ = 'c++' - is_cpp = True - extension.swig_opts.remove('-c++') - elif self.swig_cpp: - typ = 'c++' - is_cpp = True - else: - typ = None - is_cpp = False - skip_swig = 0 - ext_name = extension.name.split('.')[-1] - - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.i': # SWIG interface file - # the code below assumes that the sources list - # contains not more than one .i SWIG interface file - if self.inplace: - target_dir = os.path.dirname(base) - py_target_dir = self.ext_target_dir - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - py_target_dir = target_dir - if os.path.isfile(source): - name = get_swig_modulename(source) - if name != ext_name[1:]: - raise DistutilsSetupError( - 'mismatch of extension names: %s provides %r' - ' but expected %r' % (source, name, ext_name[1:])) - if typ is None: - typ = get_swig_target(source) - is_cpp = typ=='c++' - else: - typ2 = get_swig_target(source) - if typ2 is None: - log.warn('source %r does not define swig target, assuming %s swig target' \ - % (source, typ)) - elif typ!=typ2: - log.warn('expected %r but source %r defines %r swig target' \ - % (typ, source, typ2)) - if typ2=='c++': - log.warn('resetting swig target to c++ (some targets may have .c extension)') - is_cpp = True - else: - log.warn('assuming that %r has c++ swig target' % (source)) - if is_cpp: - target_ext = '.cpp' - target_file = os.path.join(target_dir, '%s_wrap%s' \ - % (name, target_ext)) - else: - log.warn(' source %s does not exist: skipping swig\'ing.' \ - % (source)) - name = ext_name[1:] - skip_swig = 1 - target_file = _find_swig_target(target_dir, name) - if not os.path.isfile(target_file): - log.warn(' target %s does not exist:\n '\ - 'Assuming %s_wrap.{c,cpp} was generated with '\ - '"build_src --inplace" command.' \ - % (target_file, name)) - target_dir = os.path.dirname(base) - target_file = _find_swig_target(target_dir, name) - if not os.path.isfile(target_file): - raise DistutilsSetupError("%r missing" % (target_file,)) - log.warn(' Yes! Using %r as up-to-date target.' \ - % (target_file)) - target_dirs.append(target_dir) - new_sources.append(target_file) - py_files.append(os.path.join(py_target_dir, name+'.py')) - swig_sources.append(source) - swig_targets[source] = new_sources[-1] - else: - new_sources.append(source) - - if not swig_sources: - return new_sources - - if skip_swig: - return new_sources + py_files - - for d in target_dirs: - self.mkpath(d) - - swig = self.swig or self.find_swig() - swig_cmd = [swig, "-python"] + extension.swig_opts - if is_cpp: - swig_cmd.append('-c++') - for d in extension.include_dirs: - swig_cmd.append('-I'+d) - for source in swig_sources: - target = swig_targets[source] - depends = [source] + extension.depends - if self.force or newer_group(depends, target, 'newer'): - log.info("%s: %s" % (os.path.basename(swig) \ - + (is_cpp and '++' or ''), source)) - self.spawn(swig_cmd + self.swig_opts \ - + ["-o", target, '-outdir', py_target_dir, source]) - else: - log.debug(" skipping '%s' swig interface (up-to-date)" \ - % (source)) - - return new_sources + py_files - -_f_pyf_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match -_header_ext_match = re.compile(r'.*\.(inc|h|hpp)\Z', re.I).match - -#### SWIG related auxiliary functions #### -_swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P[\w_]+)".*\)|)\s*(?P[\w_]+)', - re.I).match -_has_c_header = re.compile(r'-\*-\s*c\s*-\*-', re.I).search -_has_cpp_header = re.compile(r'-\*-\s*c\+\+\s*-\*-', re.I).search - -def get_swig_target(source): - with open(source) as f: - result = None - line = f.readline() - if _has_cpp_header(line): - result = 'c++' - if _has_c_header(line): - result = 'c' - return result - -def get_swig_modulename(source): - with open(source) as f: - name = None - for line in f: - m = _swig_module_name_match(line) - if m: - name = m.group('name') - break - return name - -def _find_swig_target(target_dir, name): - for ext in ['.cpp', '.c']: - target = os.path.join(target_dir, '%s_wrap%s' % (name, ext)) - if os.path.isfile(target): - break - return target - -#### F2PY related auxiliary functions #### - -_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]+)', - re.I).match -_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]*?' - r'__user__[\w_]*)', re.I).match - -def get_f2py_modulename(source): - name = None - with open(source) as f: - for line in f: - m = _f2py_module_name_match(line) - if m: - if _f2py_user_module_name_match(line): # skip *__user__* names - continue - name = m.group('name') - break - return name - -########################################## diff --git a/numpy/distutils/command/config.py b/numpy/distutils/command/config.py deleted file mode 100644 index 8bdfb7ec5823..000000000000 --- a/numpy/distutils/command/config.py +++ /dev/null @@ -1,516 +0,0 @@ -# Added Fortran compiler support to config. Currently useful only for -# try_compile call. try_run works but is untested for most of Fortran -# compilers (they must define linker_exe first). -# Pearu Peterson -import os -import signal -import subprocess -import sys -import textwrap -import warnings - -from distutils.command.config import config as old_config -from distutils.command.config import LANG_EXT -from distutils import log -from distutils.file_util import copy_file -from distutils.ccompiler import CompileError, LinkError -import distutils -from numpy.distutils.exec_command import filepath_from_subprocess_output -from numpy.distutils.mingw32ccompiler import generate_manifest -from numpy.distutils.command.autodist import (check_gcc_function_attribute, - check_gcc_function_attribute_with_intrinsics, - check_gcc_variable_attribute, - check_gcc_version_at_least, - check_inline, - check_restrict, - check_compiler_gcc) - -LANG_EXT['f77'] = '.f' -LANG_EXT['f90'] = '.f90' - -class config(old_config): - old_config.user_options += [ - ('fcompiler=', None, "specify the Fortran compiler type"), - ] - - def initialize_options(self): - self.fcompiler = None - old_config.initialize_options(self) - - def _check_compiler (self): - old_config._check_compiler(self) - from numpy.distutils.fcompiler import FCompiler, new_fcompiler - - if sys.platform == 'win32' and (self.compiler.compiler_type in - ('msvc', 'intelw', 'intelemw')): - # XXX: hack to circumvent a python 2.6 bug with msvc9compiler: - # initialize call query_vcvarsall, which throws an OSError, and - # causes an error along the way without much information. We try to - # catch it here, hoping it is early enough, and print a helpful - # message instead of Error: None. - if not self.compiler.initialized: - try: - self.compiler.initialize() - except OSError as e: - msg = textwrap.dedent("""\ - Could not initialize compiler instance: do you have Visual Studio - installed? If you are trying to build with MinGW, please use "python setup.py - build -c mingw32" instead. If you have Visual Studio installed, check it is - correctly installed, and the right version (VS 2015 as of this writing). - - Original exception was: %s, and the Compiler class was %s - ============================================================================""") \ - % (e, self.compiler.__class__.__name__) - print(textwrap.dedent("""\ - ============================================================================""")) - raise distutils.errors.DistutilsPlatformError(msg) from e - - # After MSVC is initialized, add an explicit /MANIFEST to linker - # flags. See issues gh-4245 and gh-4101 for details. Also - # relevant are issues 4431 and 16296 on the Python bug tracker. - from distutils import msvc9compiler - if msvc9compiler.get_build_version() >= 10: - for ldflags in [self.compiler.ldflags_shared, - self.compiler.ldflags_shared_debug]: - if '/MANIFEST' not in ldflags: - ldflags.append('/MANIFEST') - - if not isinstance(self.fcompiler, FCompiler): - self.fcompiler = new_fcompiler(compiler=self.fcompiler, - dry_run=self.dry_run, force=1, - c_compiler=self.compiler) - if self.fcompiler is not None: - self.fcompiler.customize(self.distribution) - if self.fcompiler.get_version(): - self.fcompiler.customize_cmd(self) - self.fcompiler.show_customization() - - def _wrap_method(self, mth, lang, args): - from distutils.ccompiler import CompileError - from distutils.errors import DistutilsExecError - save_compiler = self.compiler - if lang in ['f77', 'f90']: - self.compiler = self.fcompiler - if self.compiler is None: - raise CompileError('%s compiler is not set' % (lang,)) - try: - ret = mth(*((self,)+args)) - except (DistutilsExecError, CompileError) as e: - self.compiler = save_compiler - raise CompileError from e - self.compiler = save_compiler - return ret - - def _compile (self, body, headers, include_dirs, lang): - src, obj = self._wrap_method(old_config._compile, lang, - (body, headers, include_dirs, lang)) - # _compile in unixcompiler.py sometimes creates .d dependency files. - # Clean them up. - self.temp_files.append(obj + '.d') - return src, obj - - def _link (self, body, - headers, include_dirs, - libraries, library_dirs, lang): - if self.compiler.compiler_type=='msvc': - libraries = (libraries or [])[:] - library_dirs = (library_dirs or [])[:] - if lang in ['f77', 'f90']: - lang = 'c' # always use system linker when using MSVC compiler - if self.fcompiler: - for d in self.fcompiler.library_dirs or []: - # correct path when compiling in Cygwin but with - # normal Win Python - if d.startswith('/usr/lib'): - try: - d = subprocess.check_output(['cygpath', - '-w', d]) - except (OSError, subprocess.CalledProcessError): - pass - else: - d = filepath_from_subprocess_output(d) - library_dirs.append(d) - for libname in self.fcompiler.libraries or []: - if libname not in libraries: - libraries.append(libname) - for libname in libraries: - if libname.startswith('msvc'): continue - fileexists = False - for libdir in library_dirs or []: - libfile = os.path.join(libdir, '%s.lib' % (libname)) - if os.path.isfile(libfile): - fileexists = True - break - if fileexists: continue - # make g77-compiled static libs available to MSVC - fileexists = False - for libdir in library_dirs: - libfile = os.path.join(libdir, 'lib%s.a' % (libname)) - if os.path.isfile(libfile): - # copy libname.a file to name.lib so that MSVC linker - # can find it - libfile2 = os.path.join(libdir, '%s.lib' % (libname)) - copy_file(libfile, libfile2) - self.temp_files.append(libfile2) - fileexists = True - break - if fileexists: continue - log.warn('could not find library %r in directories %s' \ - % (libname, library_dirs)) - elif self.compiler.compiler_type == 'mingw32': - generate_manifest(self) - return self._wrap_method(old_config._link, lang, - (body, headers, include_dirs, - libraries, library_dirs, lang)) - - def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'): - self._check_compiler() - return self.try_compile( - "/* we need a dummy line to make distutils happy */", - [header], include_dirs) - - def check_decl(self, symbol, - headers=None, include_dirs=None): - self._check_compiler() - body = textwrap.dedent(""" - int main(void) - { - #ifndef %s - (void) %s; - #endif - ; - return 0; - }""") % (symbol, symbol) - - return self.try_compile(body, headers, include_dirs) - - def check_macro_true(self, symbol, - headers=None, include_dirs=None): - self._check_compiler() - body = textwrap.dedent(""" - int main(void) - { - #if %s - #else - #error false or undefined macro - #endif - ; - return 0; - }""") % (symbol,) - - return self.try_compile(body, headers, include_dirs) - - def check_type(self, type_name, headers=None, include_dirs=None, - library_dirs=None): - """Check type availability. Return True if the type can be compiled, - False otherwise""" - self._check_compiler() - - # First check the type can be compiled - body = textwrap.dedent(r""" - int main(void) { - if ((%(name)s *) 0) - return 0; - if (sizeof (%(name)s)) - return 0; - } - """) % {'name': type_name} - - st = False - try: - try: - self._compile(body % {'type': type_name}, - headers, include_dirs, 'c') - st = True - except distutils.errors.CompileError: - st = False - finally: - self._clean() - - return st - - def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None): - """Check size of a given type.""" - self._check_compiler() - - # First check the type can be compiled - body = textwrap.dedent(r""" - typedef %(type)s npy_check_sizeof_type; - int main (void) - { - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)]; - test_array [0] = 0 - - ; - return 0; - } - """) - self._compile(body % {'type': type_name}, - headers, include_dirs, 'c') - self._clean() - - if expected: - body = textwrap.dedent(r""" - typedef %(type)s npy_check_sizeof_type; - int main (void) - { - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)]; - test_array [0] = 0 - - ; - return 0; - } - """) - for size in expected: - try: - self._compile(body % {'type': type_name, 'size': size}, - headers, include_dirs, 'c') - self._clean() - return size - except CompileError: - pass - - # this fails to *compile* if size > sizeof(type) - body = textwrap.dedent(r""" - typedef %(type)s npy_check_sizeof_type; - int main (void) - { - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)]; - test_array [0] = 0 - - ; - return 0; - } - """) - - # The principle is simple: we first find low and high bounds of size - # for the type, where low/high are looked up on a log scale. Then, we - # do a binary search to find the exact size between low and high - low = 0 - mid = 0 - while True: - try: - self._compile(body % {'type': type_name, 'size': mid}, - headers, include_dirs, 'c') - self._clean() - break - except CompileError: - #log.info("failure to test for bound %d" % mid) - low = mid + 1 - mid = 2 * mid + 1 - - high = mid - # Binary search: - while low != high: - mid = (high - low) // 2 + low - try: - self._compile(body % {'type': type_name, 'size': mid}, - headers, include_dirs, 'c') - self._clean() - high = mid - except CompileError: - low = mid + 1 - return low - - def check_func(self, func, - headers=None, include_dirs=None, - libraries=None, library_dirs=None, - decl=False, call=False, call_args=None): - # clean up distutils's config a bit: add void to main(), and - # return a value. - self._check_compiler() - body = [] - if decl: - if type(decl) == str: - body.append(decl) - else: - body.append("int %s (void);" % func) - # Handle MSVC intrinsics: force MS compiler to make a function call. - # Useful to test for some functions when built with optimization on, to - # avoid build error because the intrinsic and our 'fake' test - # declaration do not match. - body.append("#ifdef _MSC_VER") - body.append("#pragma function(%s)" % func) - body.append("#endif") - body.append("int main (void) {") - if call: - if call_args is None: - call_args = '' - body.append(" %s(%s);" % (func, call_args)) - else: - body.append(" %s;" % func) - body.append(" return 0;") - body.append("}") - body = '\n'.join(body) + "\n" - - return self.try_link(body, headers, include_dirs, - libraries, library_dirs) - - def check_funcs_once(self, funcs, - headers=None, include_dirs=None, - libraries=None, library_dirs=None, - decl=False, call=False, call_args=None): - """Check a list of functions at once. - - This is useful to speed up things, since all the functions in the funcs - list will be put in one compilation unit. - - Arguments - --------- - funcs : seq - list of functions to test - include_dirs : seq - list of header paths - libraries : seq - list of libraries to link the code snippet to - library_dirs : seq - list of library paths - decl : dict - for every (key, value), the declaration in the value will be - used for function in key. If a function is not in the - dictionary, no declaration will be used. - call : dict - for every item (f, value), if the value is True, a call will be - done to the function f. - """ - self._check_compiler() - body = [] - if decl: - for f, v in decl.items(): - if v: - body.append("int %s (void);" % f) - - # Handle MS intrinsics. See check_func for more info. - body.append("#ifdef _MSC_VER") - for func in funcs: - body.append("#pragma function(%s)" % func) - body.append("#endif") - - body.append("int main (void) {") - if call: - for f in funcs: - if f in call and call[f]: - if not (call_args and f in call_args and call_args[f]): - args = '' - else: - args = call_args[f] - body.append(" %s(%s);" % (f, args)) - else: - body.append(" %s;" % f) - else: - for f in funcs: - body.append(" %s;" % f) - body.append(" return 0;") - body.append("}") - body = '\n'.join(body) + "\n" - - return self.try_link(body, headers, include_dirs, - libraries, library_dirs) - - def check_inline(self): - """Return the inline keyword recognized by the compiler, empty string - otherwise.""" - return check_inline(self) - - def check_restrict(self): - """Return the restrict keyword recognized by the compiler, empty string - otherwise.""" - return check_restrict(self) - - def check_compiler_gcc(self): - """Return True if the C compiler is gcc""" - return check_compiler_gcc(self) - - def check_gcc_function_attribute(self, attribute, name): - return check_gcc_function_attribute(self, attribute, name) - - def check_gcc_function_attribute_with_intrinsics(self, attribute, name, - code, include): - return check_gcc_function_attribute_with_intrinsics(self, attribute, - name, code, include) - - def check_gcc_variable_attribute(self, attribute): - return check_gcc_variable_attribute(self, attribute) - - def check_gcc_version_at_least(self, major, minor=0, patchlevel=0): - """Return True if the GCC version is greater than or equal to the - specified version.""" - return check_gcc_version_at_least(self, major, minor, patchlevel) - - def get_output(self, body, headers=None, include_dirs=None, - libraries=None, library_dirs=None, - lang="c", use_tee=None): - """Try to compile, link to an executable, and run a program - built from 'body' and 'headers'. Returns the exit status code - of the program and its output. - """ - # 2008-11-16, RemoveMe - warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" - "Usage of get_output is deprecated: please do not \n" - "use it anymore, and avoid configuration checks \n" - "involving running executable on the target machine.\n" - "+++++++++++++++++++++++++++++++++++++++++++++++++\n", - DeprecationWarning, stacklevel=2) - self._check_compiler() - exitcode, output = 255, '' - try: - grabber = GrabStdout() - try: - src, obj, exe = self._link(body, headers, include_dirs, - libraries, library_dirs, lang) - grabber.restore() - except Exception: - output = grabber.data - grabber.restore() - raise - exe = os.path.join('.', exe) - try: - # specify cwd arg for consistency with - # historic usage pattern of exec_command() - # also, note that exe appears to be a string, - # which exec_command() handled, but we now - # use a list for check_output() -- this assumes - # that exe is always a single command - output = subprocess.check_output([exe], cwd='.') - except subprocess.CalledProcessError as exc: - exitstatus = exc.returncode - output = '' - except OSError: - # preserve the EnvironmentError exit status - # used historically in exec_command() - exitstatus = 127 - output = '' - else: - output = filepath_from_subprocess_output(output) - if hasattr(os, 'WEXITSTATUS'): - exitcode = os.WEXITSTATUS(exitstatus) - if os.WIFSIGNALED(exitstatus): - sig = os.WTERMSIG(exitstatus) - log.error('subprocess exited with signal %d' % (sig,)) - if sig == signal.SIGINT: - # control-C - raise KeyboardInterrupt - else: - exitcode = exitstatus - log.info("success!") - except (CompileError, LinkError): - log.info("failure.") - self._clean() - return exitcode, output - -class GrabStdout: - - def __init__(self): - self.sys_stdout = sys.stdout - self.data = '' - sys.stdout = self - - def write (self, data): - self.sys_stdout.write(data) - self.data += data - - def flush (self): - self.sys_stdout.flush() - - def restore(self): - sys.stdout = self.sys_stdout diff --git a/numpy/distutils/command/config_compiler.py b/numpy/distutils/command/config_compiler.py deleted file mode 100644 index 44265bfcce89..000000000000 --- a/numpy/distutils/command/config_compiler.py +++ /dev/null @@ -1,126 +0,0 @@ -from distutils.core import Command -from numpy.distutils import log - -#XXX: Linker flags - -def show_fortran_compilers(_cache=None): - # Using cache to prevent infinite recursion. - if _cache: - return - elif _cache is None: - _cache = [] - _cache.append(1) - from numpy.distutils.fcompiler import show_fcompilers - import distutils.core - dist = distutils.core._setup_distribution - show_fcompilers(dist) - -class config_fc(Command): - """ Distutils command to hold user specified options - to Fortran compilers. - - config_fc command is used by the FCompiler.customize() method. - """ - - description = "specify Fortran 77/Fortran 90 compiler information" - - user_options = [ - ('fcompiler=', None, "specify Fortran compiler type"), - ('f77exec=', None, "specify F77 compiler command"), - ('f90exec=', None, "specify F90 compiler command"), - ('f77flags=', None, "specify F77 compiler flags"), - ('f90flags=', None, "specify F90 compiler flags"), - ('opt=', None, "specify optimization flags"), - ('arch=', None, "specify architecture specific optimization flags"), - ('debug', 'g', "compile with debugging information"), - ('noopt', None, "compile without optimization"), - ('noarch', None, "compile without arch-dependent optimization"), - ] - - help_options = [ - ('help-fcompiler', None, "list available Fortran compilers", - show_fortran_compilers), - ] - - boolean_options = ['debug', 'noopt', 'noarch'] - - def initialize_options(self): - self.fcompiler = None - self.f77exec = None - self.f90exec = None - self.f77flags = None - self.f90flags = None - self.opt = None - self.arch = None - self.debug = None - self.noopt = None - self.noarch = None - - def finalize_options(self): - log.info('unifing config_fc, config, build_clib, build_ext, build commands --fcompiler options') - build_clib = self.get_finalized_command('build_clib') - build_ext = self.get_finalized_command('build_ext') - config = self.get_finalized_command('config') - build = self.get_finalized_command('build') - cmd_list = [self, config, build_clib, build_ext, build] - for a in ['fcompiler']: - l = [] - for c in cmd_list: - v = getattr(c, a) - if v is not None: - if not isinstance(v, str): v = v.compiler_type - if v not in l: l.append(v) - if not l: v1 = None - else: v1 = l[0] - if len(l)>1: - log.warn(' commands have different --%s options: %s'\ - ', using first in list as default' % (a, l)) - if v1: - for c in cmd_list: - if getattr(c, a) is None: setattr(c, a, v1) - - def run(self): - # Do nothing. - return - -class config_cc(Command): - """ Distutils command to hold user specified options - to C/C++ compilers. - """ - - description = "specify C/C++ compiler information" - - user_options = [ - ('compiler=', None, "specify C/C++ compiler type"), - ] - - def initialize_options(self): - self.compiler = None - - def finalize_options(self): - log.info('unifing config_cc, config, build_clib, build_ext, build commands --compiler options') - build_clib = self.get_finalized_command('build_clib') - build_ext = self.get_finalized_command('build_ext') - config = self.get_finalized_command('config') - build = self.get_finalized_command('build') - cmd_list = [self, config, build_clib, build_ext, build] - for a in ['compiler']: - l = [] - for c in cmd_list: - v = getattr(c, a) - if v is not None: - if not isinstance(v, str): v = v.compiler_type - if v not in l: l.append(v) - if not l: v1 = None - else: v1 = l[0] - if len(l)>1: - log.warn(' commands have different --%s options: %s'\ - ', using first in list as default' % (a, l)) - if v1: - for c in cmd_list: - if getattr(c, a) is None: setattr(c, a, v1) - return - - def run(self): - # Do nothing. - return diff --git a/numpy/distutils/command/develop.py b/numpy/distutils/command/develop.py deleted file mode 100644 index af24baf2e7e1..000000000000 --- a/numpy/distutils/command/develop.py +++ /dev/null @@ -1,15 +0,0 @@ -""" Override the develop command from setuptools so we can ensure that our -generated files (from build_src or build_scripts) are properly converted to real -files with filenames. - -""" -from setuptools.command.develop import develop as old_develop - -class develop(old_develop): - __doc__ = old_develop.__doc__ - def install_for_development(self): - # Build sources in-place, too. - self.reinitialize_command('build_src', inplace=1) - # Make sure scripts are built. - self.run_command('build_scripts') - old_develop.install_for_development(self) diff --git a/numpy/distutils/command/egg_info.py b/numpy/distutils/command/egg_info.py deleted file mode 100644 index 14c62b4d1b90..000000000000 --- a/numpy/distutils/command/egg_info.py +++ /dev/null @@ -1,25 +0,0 @@ -import sys - -from setuptools.command.egg_info import egg_info as _egg_info - -class egg_info(_egg_info): - def run(self): - if 'sdist' in sys.argv: - import warnings - import textwrap - msg = textwrap.dedent(""" - `build_src` is being run, this may lead to missing - files in your sdist! You want to use distutils.sdist - instead of the setuptools version: - - from distutils.command.sdist import sdist - cmdclass={'sdist': sdist}" - - See numpy's setup.py or gh-7131 for details.""") - warnings.warn(msg, UserWarning, stacklevel=2) - - # We need to ensure that build_src has been executed in order to give - # setuptools' egg_info command real filenames instead of functions which - # generate files. - self.run_command("build_src") - _egg_info.run(self) diff --git a/numpy/distutils/command/install.py b/numpy/distutils/command/install.py deleted file mode 100644 index efa9b4740fc4..000000000000 --- a/numpy/distutils/command/install.py +++ /dev/null @@ -1,79 +0,0 @@ -import sys -if 'setuptools' in sys.modules: - import setuptools.command.install as old_install_mod - have_setuptools = True -else: - import distutils.command.install as old_install_mod - have_setuptools = False -from distutils.file_util import write_file - -old_install = old_install_mod.install - -class install(old_install): - - # Always run install_clib - the command is cheap, so no need to bypass it; - # but it's not run by setuptools -- so it's run again in install_data - sub_commands = old_install.sub_commands + [ - ('install_clib', lambda x: True) - ] - - def finalize_options (self): - old_install.finalize_options(self) - self.install_lib = self.install_libbase - - def setuptools_run(self): - """ The setuptools version of the .run() method. - - We must pull in the entire code so we can override the level used in the - _getframe() call since we wrap this call by one more level. - """ - from distutils.command.install import install as distutils_install - - # Explicit request for old-style install? Just do it - if self.old_and_unmanageable or self.single_version_externally_managed: - return distutils_install.run(self) - - # Attempt to detect whether we were called from setup() or by another - # command. If we were called by setup(), our caller will be the - # 'run_command' method in 'distutils.dist', and *its* caller will be - # the 'run_commands' method. If we were called any other way, our - # immediate caller *might* be 'run_command', but it won't have been - # called by 'run_commands'. This is slightly kludgy, but seems to - # work. - # - caller = sys._getframe(3) - caller_module = caller.f_globals.get('__name__', '') - caller_name = caller.f_code.co_name - - if caller_module != 'distutils.dist' or caller_name!='run_commands': - # We weren't called from the command line or setup(), so we - # should run in backward-compatibility mode to support bdist_* - # commands. - distutils_install.run(self) - else: - self.do_egg_install() - - def run(self): - if not have_setuptools: - r = old_install.run(self) - else: - r = self.setuptools_run() - if self.record: - # bdist_rpm fails when INSTALLED_FILES contains - # paths with spaces. Such paths must be enclosed - # with double-quotes. - with open(self.record) as f: - lines = [] - need_rewrite = False - for l in f: - l = l.rstrip() - if ' ' in l: - need_rewrite = True - l = '"%s"' % (l) - lines.append(l) - if need_rewrite: - self.execute(write_file, - (self.record, lines), - "re-writing list of installed files to '%s'" % - self.record) - return r diff --git a/numpy/distutils/command/install_clib.py b/numpy/distutils/command/install_clib.py deleted file mode 100644 index aa2e5594c3c2..000000000000 --- a/numpy/distutils/command/install_clib.py +++ /dev/null @@ -1,40 +0,0 @@ -import os -from distutils.core import Command -from distutils.ccompiler import new_compiler -from numpy.distutils.misc_util import get_cmd - -class install_clib(Command): - description = "Command to install installable C libraries" - - user_options = [] - - def initialize_options(self): - self.install_dir = None - self.outfiles = [] - - def finalize_options(self): - self.set_undefined_options('install', ('install_lib', 'install_dir')) - - def run (self): - build_clib_cmd = get_cmd("build_clib") - if not build_clib_cmd.build_clib: - # can happen if the user specified `--skip-build` - build_clib_cmd.finalize_options() - build_dir = build_clib_cmd.build_clib - - # We need the compiler to get the library name -> filename association - if not build_clib_cmd.compiler: - compiler = new_compiler(compiler=None) - compiler.customize(self.distribution) - else: - compiler = build_clib_cmd.compiler - - for l in self.distribution.installed_libraries: - target_dir = os.path.join(self.install_dir, l.target_dir) - name = compiler.library_filename(l.name) - source = os.path.join(build_dir, name) - self.mkpath(target_dir) - self.outfiles.append(self.copy_file(source, target_dir)[0]) - - def get_outputs(self): - return self.outfiles diff --git a/numpy/distutils/command/install_data.py b/numpy/distutils/command/install_data.py deleted file mode 100644 index 0a2e68ae192a..000000000000 --- a/numpy/distutils/command/install_data.py +++ /dev/null @@ -1,24 +0,0 @@ -import sys -have_setuptools = ('setuptools' in sys.modules) - -from distutils.command.install_data import install_data as old_install_data - -#data installer with improved intelligence over distutils -#data files are copied into the project directory instead -#of willy-nilly -class install_data (old_install_data): - - def run(self): - old_install_data.run(self) - - if have_setuptools: - # Run install_clib again, since setuptools does not run sub-commands - # of install automatically - self.run_command('install_clib') - - def finalize_options (self): - self.set_undefined_options('install', - ('install_lib', 'install_dir'), - ('root', 'root'), - ('force', 'force'), - ) diff --git a/numpy/distutils/command/install_headers.py b/numpy/distutils/command/install_headers.py deleted file mode 100644 index 91eba6f17c29..000000000000 --- a/numpy/distutils/command/install_headers.py +++ /dev/null @@ -1,25 +0,0 @@ -import os -from distutils.command.install_headers import install_headers as old_install_headers - -class install_headers (old_install_headers): - - def run (self): - headers = self.distribution.headers - if not headers: - return - - prefix = os.path.dirname(self.install_dir) - for header in headers: - if isinstance(header, tuple): - # Kind of a hack, but I don't know where else to change this... - if header[0] == 'numpy._core': - header = ('numpy', header[1]) - if os.path.splitext(header[1])[1] == '.inc': - continue - d = os.path.join(*([prefix]+header[0].split('.'))) - header = header[1] - else: - d = self.install_dir - self.mkpath(d) - (out, _) = self.copy_file(header, d) - self.outfiles.append(out) diff --git a/numpy/distutils/command/sdist.py b/numpy/distutils/command/sdist.py deleted file mode 100644 index e34193883dea..000000000000 --- a/numpy/distutils/command/sdist.py +++ /dev/null @@ -1,27 +0,0 @@ -import sys -if 'setuptools' in sys.modules: - from setuptools.command.sdist import sdist as old_sdist -else: - from distutils.command.sdist import sdist as old_sdist - -from numpy.distutils.misc_util import get_data_files - -class sdist(old_sdist): - - def add_defaults (self): - old_sdist.add_defaults(self) - - dist = self.distribution - - if dist.has_data_files(): - for data in dist.data_files: - self.filelist.extend(get_data_files(data)) - - if dist.has_headers(): - headers = [] - for h in dist.headers: - if isinstance(h, str): headers.append(h) - else: headers.append(h[1]) - self.filelist.extend(headers) - - return diff --git a/numpy/distutils/core.py b/numpy/distutils/core.py deleted file mode 100644 index c4a14e59901f..000000000000 --- a/numpy/distutils/core.py +++ /dev/null @@ -1,215 +0,0 @@ -import sys -from distutils.core import Distribution - -if 'setuptools' in sys.modules: - have_setuptools = True - from setuptools import setup as old_setup - # easy_install imports math, it may be picked up from cwd - from setuptools.command import easy_install - try: - # very old versions of setuptools don't have this - from setuptools.command import bdist_egg - except ImportError: - have_setuptools = False -else: - from distutils.core import setup as old_setup - have_setuptools = False - -import warnings -import distutils.core -import distutils.dist - -from numpy.distutils.extension import Extension # noqa: F401 -from numpy.distutils.numpy_distribution import NumpyDistribution -from numpy.distutils.command import config, config_compiler, \ - build, build_py, build_ext, build_clib, build_src, build_scripts, \ - sdist, install_data, install_headers, install, bdist_rpm, \ - install_clib -from numpy.distutils.misc_util import is_sequence, is_string - -numpy_cmdclass = {'build': build.build, - 'build_src': build_src.build_src, - 'build_scripts': build_scripts.build_scripts, - 'config_cc': config_compiler.config_cc, - 'config_fc': config_compiler.config_fc, - 'config': config.config, - 'build_ext': build_ext.build_ext, - 'build_py': build_py.build_py, - 'build_clib': build_clib.build_clib, - 'sdist': sdist.sdist, - 'install_data': install_data.install_data, - 'install_headers': install_headers.install_headers, - 'install_clib': install_clib.install_clib, - 'install': install.install, - 'bdist_rpm': bdist_rpm.bdist_rpm, - } -if have_setuptools: - # Use our own versions of develop and egg_info to ensure that build_src is - # handled appropriately. - from numpy.distutils.command import develop, egg_info - numpy_cmdclass['bdist_egg'] = bdist_egg.bdist_egg - numpy_cmdclass['develop'] = develop.develop - numpy_cmdclass['easy_install'] = easy_install.easy_install - numpy_cmdclass['egg_info'] = egg_info.egg_info - -def _dict_append(d, **kws): - for k, v in kws.items(): - if k not in d: - d[k] = v - continue - dv = d[k] - if isinstance(dv, tuple): - d[k] = dv + tuple(v) - elif isinstance(dv, list): - d[k] = dv + list(v) - elif isinstance(dv, dict): - _dict_append(dv, **v) - elif is_string(dv): - d[k] = dv + v - else: - raise TypeError(repr(type(dv))) - -def _command_line_ok(_cache=None): - """ Return True if command line does not contain any - help or display requests. - """ - if _cache: - return _cache[0] - elif _cache is None: - _cache = [] - ok = True - display_opts = ['--'+n for n in Distribution.display_option_names] - for o in Distribution.display_options: - if o[1]: - display_opts.append('-'+o[1]) - for arg in sys.argv: - if arg.startswith('--help') or arg=='-h' or arg in display_opts: - ok = False - break - _cache.append(ok) - return ok - -def get_distribution(always=False): - dist = distutils.core._setup_distribution - # XXX Hack to get numpy installable with easy_install. - # The problem is easy_install runs it's own setup(), which - # sets up distutils.core._setup_distribution. However, - # when our setup() runs, that gets overwritten and lost. - # We can't use isinstance, as the DistributionWithoutHelpCommands - # class is local to a function in setuptools.command.easy_install - if dist is not None and \ - 'DistributionWithoutHelpCommands' in repr(dist): - dist = None - if always and dist is None: - dist = NumpyDistribution() - return dist - -def setup(**attr): - - cmdclass = numpy_cmdclass.copy() - - new_attr = attr.copy() - if 'cmdclass' in new_attr: - cmdclass.update(new_attr['cmdclass']) - new_attr['cmdclass'] = cmdclass - - if 'configuration' in new_attr: - # To avoid calling configuration if there are any errors - # or help request in command in the line. - configuration = new_attr.pop('configuration') - - old_dist = distutils.core._setup_distribution - old_stop = distutils.core._setup_stop_after - distutils.core._setup_distribution = None - distutils.core._setup_stop_after = "commandline" - try: - dist = setup(**new_attr) - finally: - distutils.core._setup_distribution = old_dist - distutils.core._setup_stop_after = old_stop - if dist.help or not _command_line_ok(): - # probably displayed help, skip running any commands - return dist - - # create setup dictionary and append to new_attr - config = configuration() - if hasattr(config, 'todict'): - config = config.todict() - _dict_append(new_attr, **config) - - # Move extension source libraries to libraries - libraries = [] - for ext in new_attr.get('ext_modules', []): - new_libraries = [] - for item in ext.libraries: - if is_sequence(item): - lib_name, build_info = item - _check_append_ext_library(libraries, lib_name, build_info) - new_libraries.append(lib_name) - elif is_string(item): - new_libraries.append(item) - else: - raise TypeError("invalid description of extension module " - "library %r" % (item,)) - ext.libraries = new_libraries - if libraries: - if 'libraries' not in new_attr: - new_attr['libraries'] = [] - for item in libraries: - _check_append_library(new_attr['libraries'], item) - - # sources in ext_modules or libraries may contain header files - if ('ext_modules' in new_attr or 'libraries' in new_attr) \ - and 'headers' not in new_attr: - new_attr['headers'] = [] - - # Use our custom NumpyDistribution class instead of distutils' one - new_attr['distclass'] = NumpyDistribution - - return old_setup(**new_attr) - -def _check_append_library(libraries, item): - for libitem in libraries: - if is_sequence(libitem): - if is_sequence(item): - if item[0]==libitem[0]: - if item[1] is libitem[1]: - return - warnings.warn("[0] libraries list contains %r with" - " different build_info" % (item[0],), - stacklevel=2) - break - else: - if item==libitem[0]: - warnings.warn("[1] libraries list contains %r with" - " no build_info" % (item[0],), - stacklevel=2) - break - else: - if is_sequence(item): - if item[0]==libitem: - warnings.warn("[2] libraries list contains %r with" - " no build_info" % (item[0],), - stacklevel=2) - break - else: - if item==libitem: - return - libraries.append(item) - -def _check_append_ext_library(libraries, lib_name, build_info): - for item in libraries: - if is_sequence(item): - if item[0]==lib_name: - if item[1] is build_info: - return - warnings.warn("[3] libraries list contains %r with" - " different build_info" % (lib_name,), - stacklevel=2) - break - elif item==lib_name: - warnings.warn("[4] libraries list contains %r with" - " no build_info" % (lib_name,), - stacklevel=2) - break - libraries.append((lib_name, build_info)) diff --git a/numpy/distutils/cpuinfo.py b/numpy/distutils/cpuinfo.py deleted file mode 100644 index 77620210981d..000000000000 --- a/numpy/distutils/cpuinfo.py +++ /dev/null @@ -1,683 +0,0 @@ -#!/usr/bin/env python3 -""" -cpuinfo - -Copyright 2002 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy (BSD style) license. See LICENSE.txt that came with -this distribution for specifics. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -Pearu Peterson - -""" -__all__ = ['cpu'] - -import os -import platform -import re -import sys -import types -import warnings - -from subprocess import getstatusoutput - - -def getoutput(cmd, successful_status=(0,), stacklevel=1): - try: - status, output = getstatusoutput(cmd) - except OSError as e: - warnings.warn(str(e), UserWarning, stacklevel=stacklevel) - return False, "" - if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status: - return True, output - return False, output - -def command_info(successful_status=(0,), stacklevel=1, **kw): - info = {} - for key in kw: - ok, output = getoutput(kw[key], successful_status=successful_status, - stacklevel=stacklevel+1) - if ok: - info[key] = output.strip() - return info - -def command_by_line(cmd, successful_status=(0,), stacklevel=1): - ok, output = getoutput(cmd, successful_status=successful_status, - stacklevel=stacklevel+1) - if not ok: - return - for line in output.splitlines(): - yield line.strip() - -def key_value_from_command(cmd, sep, successful_status=(0,), - stacklevel=1): - d = {} - for line in command_by_line(cmd, successful_status=successful_status, - stacklevel=stacklevel+1): - l = [s.strip() for s in line.split(sep, 1)] - if len(l) == 2: - d[l[0]] = l[1] - return d - -class CPUInfoBase: - """Holds CPU information and provides methods for requiring - the availability of various CPU features. - """ - - def _try_call(self, func): - try: - return func() - except Exception: - pass - - def __getattr__(self, name): - if not name.startswith('_'): - if hasattr(self, '_'+name): - attr = getattr(self, '_'+name) - if isinstance(attr, types.MethodType): - return lambda func=self._try_call,attr=attr : func(attr) - else: - return lambda : None - raise AttributeError(name) - - def _getNCPUs(self): - return 1 - - def __get_nbits(self): - abits = platform.architecture()[0] - nbits = re.compile(r'(\d+)bit').search(abits).group(1) - return nbits - - def _is_32bit(self): - return self.__get_nbits() == '32' - - def _is_64bit(self): - return self.__get_nbits() == '64' - -class LinuxCPUInfo(CPUInfoBase): - - info = None - - def __init__(self): - if self.info is not None: - return - info = [ {} ] - ok, output = getoutput('uname -m') - if ok: - info[0]['uname_m'] = output.strip() - try: - fo = open('/proc/cpuinfo') - except OSError as e: - warnings.warn(str(e), UserWarning, stacklevel=2) - else: - for line in fo: - name_value = [s.strip() for s in line.split(':', 1)] - if len(name_value) != 2: - continue - name, value = name_value - if not info or name in info[-1]: # next processor - info.append({}) - info[-1][name] = value - fo.close() - self.__class__.info = info - - def _not_impl(self): pass - - # Athlon - - def _is_AMD(self): - return self.info[0]['vendor_id']=='AuthenticAMD' - - def _is_AthlonK6_2(self): - return self._is_AMD() and self.info[0]['model'] == '2' - - def _is_AthlonK6_3(self): - return self._is_AMD() and self.info[0]['model'] == '3' - - def _is_AthlonK6(self): - return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None - - def _is_AthlonK7(self): - return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None - - def _is_AthlonMP(self): - return re.match(r'.*?Athlon\(tm\) MP\b', - self.info[0]['model name']) is not None - - def _is_AMD64(self): - return self.is_AMD() and self.info[0]['family'] == '15' - - def _is_Athlon64(self): - return re.match(r'.*?Athlon\(tm\) 64\b', - self.info[0]['model name']) is not None - - def _is_AthlonHX(self): - return re.match(r'.*?Athlon HX\b', - self.info[0]['model name']) is not None - - def _is_Opteron(self): - return re.match(r'.*?Opteron\b', - self.info[0]['model name']) is not None - - def _is_Hammer(self): - return re.match(r'.*?Hammer\b', - self.info[0]['model name']) is not None - - # Alpha - - def _is_Alpha(self): - return self.info[0]['cpu']=='Alpha' - - def _is_EV4(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4' - - def _is_EV5(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5' - - def _is_EV56(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56' - - def _is_PCA56(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56' - - # Intel - - #XXX - _is_i386 = _not_impl - - def _is_Intel(self): - return self.info[0]['vendor_id']=='GenuineIntel' - - def _is_i486(self): - return self.info[0]['cpu']=='i486' - - def _is_i586(self): - return self.is_Intel() and self.info[0]['cpu family'] == '5' - - def _is_i686(self): - return self.is_Intel() and self.info[0]['cpu family'] == '6' - - def _is_Celeron(self): - return re.match(r'.*?Celeron', - self.info[0]['model name']) is not None - - def _is_Pentium(self): - return re.match(r'.*?Pentium', - self.info[0]['model name']) is not None - - def _is_PentiumII(self): - return re.match(r'.*?Pentium.*?II\b', - self.info[0]['model name']) is not None - - def _is_PentiumPro(self): - return re.match(r'.*?PentiumPro\b', - self.info[0]['model name']) is not None - - def _is_PentiumMMX(self): - return re.match(r'.*?Pentium.*?MMX\b', - self.info[0]['model name']) is not None - - def _is_PentiumIII(self): - return re.match(r'.*?Pentium.*?III\b', - self.info[0]['model name']) is not None - - def _is_PentiumIV(self): - return re.match(r'.*?Pentium.*?(IV|4)\b', - self.info[0]['model name']) is not None - - def _is_PentiumM(self): - return re.match(r'.*?Pentium.*?M\b', - self.info[0]['model name']) is not None - - def _is_Prescott(self): - return self.is_PentiumIV() and self.has_sse3() - - def _is_Nocona(self): - return (self.is_Intel() - and (self.info[0]['cpu family'] == '6' - or self.info[0]['cpu family'] == '15') - and (self.has_sse3() and not self.has_ssse3()) - and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None) - - def _is_Core2(self): - return (self.is_64bit() and self.is_Intel() and - re.match(r'.*?Core\(TM\)2\b', - self.info[0]['model name']) is not None) - - def _is_Itanium(self): - return re.match(r'.*?Itanium\b', - self.info[0]['family']) is not None - - def _is_XEON(self): - return re.match(r'.*?XEON\b', - self.info[0]['model name'], re.IGNORECASE) is not None - - _is_Xeon = _is_XEON - - # Varia - - def _is_singleCPU(self): - return len(self.info) == 1 - - def _getNCPUs(self): - return len(self.info) - - def _has_fdiv_bug(self): - return self.info[0]['fdiv_bug']=='yes' - - def _has_f00f_bug(self): - return self.info[0]['f00f_bug']=='yes' - - def _has_mmx(self): - return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None - - def _has_sse(self): - return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None - - def _has_sse2(self): - return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None - - def _has_sse3(self): - return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None - - def _has_ssse3(self): - return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None - - def _has_3dnow(self): - return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None - - def _has_3dnowext(self): - return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None - -class IRIXCPUInfo(CPUInfoBase): - info = None - - def __init__(self): - if self.info is not None: - return - info = key_value_from_command('sysconf', sep=' ', - successful_status=(0, 1)) - self.__class__.info = info - - def _not_impl(self): pass - - def _is_singleCPU(self): - return self.info.get('NUM_PROCESSORS') == '1' - - def _getNCPUs(self): - return int(self.info.get('NUM_PROCESSORS', 1)) - - def __cputype(self, n): - return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n) - def _is_r2000(self): return self.__cputype(2000) - def _is_r3000(self): return self.__cputype(3000) - def _is_r3900(self): return self.__cputype(3900) - def _is_r4000(self): return self.__cputype(4000) - def _is_r4100(self): return self.__cputype(4100) - def _is_r4300(self): return self.__cputype(4300) - def _is_r4400(self): return self.__cputype(4400) - def _is_r4600(self): return self.__cputype(4600) - def _is_r4650(self): return self.__cputype(4650) - def _is_r5000(self): return self.__cputype(5000) - def _is_r6000(self): return self.__cputype(6000) - def _is_r8000(self): return self.__cputype(8000) - def _is_r10000(self): return self.__cputype(10000) - def _is_r12000(self): return self.__cputype(12000) - def _is_rorion(self): return self.__cputype('orion') - - def get_ip(self): - try: return self.info.get('MACHINE') - except Exception: pass - def __machine(self, n): - return self.info.get('MACHINE').lower() == 'ip%s' % (n) - def _is_IP19(self): return self.__machine(19) - def _is_IP20(self): return self.__machine(20) - def _is_IP21(self): return self.__machine(21) - def _is_IP22(self): return self.__machine(22) - def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000() - def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000() - def _is_IP24(self): return self.__machine(24) - def _is_IP25(self): return self.__machine(25) - def _is_IP26(self): return self.__machine(26) - def _is_IP27(self): return self.__machine(27) - def _is_IP28(self): return self.__machine(28) - def _is_IP30(self): return self.__machine(30) - def _is_IP32(self): return self.__machine(32) - def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000() - def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000() - - -class DarwinCPUInfo(CPUInfoBase): - info = None - - def __init__(self): - if self.info is not None: - return - info = command_info(arch='arch', - machine='machine') - info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=') - self.__class__.info = info - - def _not_impl(self): pass - - def _getNCPUs(self): - return int(self.info['sysctl_hw'].get('hw.ncpu', 1)) - - def _is_Power_Macintosh(self): - return self.info['sysctl_hw']['hw.machine']=='Power Macintosh' - - def _is_i386(self): - return self.info['arch']=='i386' - def _is_ppc(self): - return self.info['arch']=='ppc' - - def __machine(self, n): - return self.info['machine'] == 'ppc%s'%n - def _is_ppc601(self): return self.__machine(601) - def _is_ppc602(self): return self.__machine(602) - def _is_ppc603(self): return self.__machine(603) - def _is_ppc603e(self): return self.__machine('603e') - def _is_ppc604(self): return self.__machine(604) - def _is_ppc604e(self): return self.__machine('604e') - def _is_ppc620(self): return self.__machine(620) - def _is_ppc630(self): return self.__machine(630) - def _is_ppc740(self): return self.__machine(740) - def _is_ppc7400(self): return self.__machine(7400) - def _is_ppc7450(self): return self.__machine(7450) - def _is_ppc750(self): return self.__machine(750) - def _is_ppc403(self): return self.__machine(403) - def _is_ppc505(self): return self.__machine(505) - def _is_ppc801(self): return self.__machine(801) - def _is_ppc821(self): return self.__machine(821) - def _is_ppc823(self): return self.__machine(823) - def _is_ppc860(self): return self.__machine(860) - - -class SunOSCPUInfo(CPUInfoBase): - - info = None - - def __init__(self): - if self.info is not None: - return - info = command_info(arch='arch', - mach='mach', - uname_i='uname_i', - isainfo_b='isainfo -b', - isainfo_n='isainfo -n', - ) - info['uname_X'] = key_value_from_command('uname -X', sep='=') - for line in command_by_line('psrinfo -v 0'): - m = re.match(r'\s*The (?P

[\w\d]+) processor operates at', line) - if m: - info['processor'] = m.group('p') - break - self.__class__.info = info - - def _not_impl(self): pass - - def _is_i386(self): - return self.info['isainfo_n']=='i386' - def _is_sparc(self): - return self.info['isainfo_n']=='sparc' - def _is_sparcv9(self): - return self.info['isainfo_n']=='sparcv9' - - def _getNCPUs(self): - return int(self.info['uname_X'].get('NumCPU', 1)) - - def _is_sun4(self): - return self.info['arch']=='sun4' - - def _is_SUNW(self): - return re.match(r'SUNW', self.info['uname_i']) is not None - def _is_sparcstation5(self): - return re.match(r'.*SPARCstation-5', self.info['uname_i']) is not None - def _is_ultra1(self): - return re.match(r'.*Ultra-1', self.info['uname_i']) is not None - def _is_ultra250(self): - return re.match(r'.*Ultra-250', self.info['uname_i']) is not None - def _is_ultra2(self): - return re.match(r'.*Ultra-2', self.info['uname_i']) is not None - def _is_ultra30(self): - return re.match(r'.*Ultra-30', self.info['uname_i']) is not None - def _is_ultra4(self): - return re.match(r'.*Ultra-4', self.info['uname_i']) is not None - def _is_ultra5_10(self): - return re.match(r'.*Ultra-5_10', self.info['uname_i']) is not None - def _is_ultra5(self): - return re.match(r'.*Ultra-5', self.info['uname_i']) is not None - def _is_ultra60(self): - return re.match(r'.*Ultra-60', self.info['uname_i']) is not None - def _is_ultra80(self): - return re.match(r'.*Ultra-80', self.info['uname_i']) is not None - def _is_ultraenterprice(self): - return re.match(r'.*Ultra-Enterprise', self.info['uname_i']) is not None - def _is_ultraenterprice10k(self): - return re.match(r'.*Ultra-Enterprise-10000', self.info['uname_i']) is not None - def _is_sunfire(self): - return re.match(r'.*Sun-Fire', self.info['uname_i']) is not None - def _is_ultra(self): - return re.match(r'.*Ultra', self.info['uname_i']) is not None - - def _is_cpusparcv7(self): - return self.info['processor']=='sparcv7' - def _is_cpusparcv8(self): - return self.info['processor']=='sparcv8' - def _is_cpusparcv9(self): - return self.info['processor']=='sparcv9' - -class Win32CPUInfo(CPUInfoBase): - - info = None - pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor" - # XXX: what does the value of - # HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0 - # mean? - - def __init__(self): - if self.info is not None: - return - info = [] - try: - #XXX: Bad style to use so long `try:...except:...`. Fix it! - import winreg - - prgx = re.compile(r"family\s+(?P\d+)\s+model\s+(?P\d+)" - r"\s+stepping\s+(?P\d+)", re.IGNORECASE) - chnd=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, self.pkey) - pnum=0 - while True: - try: - proc=winreg.EnumKey(chnd, pnum) - except winreg.error: - break - else: - pnum+=1 - info.append({"Processor":proc}) - phnd=winreg.OpenKey(chnd, proc) - pidx=0 - while True: - try: - name, value, vtpe=winreg.EnumValue(phnd, pidx) - except winreg.error: - break - else: - pidx=pidx+1 - info[-1][name]=value - if name=="Identifier": - srch=prgx.search(value) - if srch: - info[-1]["Family"]=int(srch.group("FML")) - info[-1]["Model"]=int(srch.group("MDL")) - info[-1]["Stepping"]=int(srch.group("STP")) - except Exception as e: - print(e, '(ignoring)') - self.__class__.info = info - - def _not_impl(self): pass - - # Athlon - - def _is_AMD(self): - return self.info[0]['VendorIdentifier']=='AuthenticAMD' - - def _is_Am486(self): - return self.is_AMD() and self.info[0]['Family']==4 - - def _is_Am5x86(self): - return self.is_AMD() and self.info[0]['Family']==4 - - def _is_AMDK5(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model'] in [0, 1, 2, 3] - - def _is_AMDK6(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model'] in [6, 7] - - def _is_AMDK6_2(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==8 - - def _is_AMDK6_3(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==9 - - def _is_AMDK7(self): - return self.is_AMD() and self.info[0]['Family'] == 6 - - # To reliably distinguish between the different types of AMD64 chips - # (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would - # require looking at the 'brand' from cpuid - - def _is_AMD64(self): - return self.is_AMD() and self.info[0]['Family'] == 15 - - # Intel - - def _is_Intel(self): - return self.info[0]['VendorIdentifier']=='GenuineIntel' - - def _is_i386(self): - return self.info[0]['Family']==3 - - def _is_i486(self): - return self.info[0]['Family']==4 - - def _is_i586(self): - return self.is_Intel() and self.info[0]['Family']==5 - - def _is_i686(self): - return self.is_Intel() and self.info[0]['Family']==6 - - def _is_Pentium(self): - return self.is_Intel() and self.info[0]['Family']==5 - - def _is_PentiumMMX(self): - return self.is_Intel() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==4 - - def _is_PentiumPro(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model']==1 - - def _is_PentiumII(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model'] in [3, 5, 6] - - def _is_PentiumIII(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model'] in [7, 8, 9, 10, 11] - - def _is_PentiumIV(self): - return self.is_Intel() and self.info[0]['Family']==15 - - def _is_PentiumM(self): - return self.is_Intel() and self.info[0]['Family'] == 6 \ - and self.info[0]['Model'] in [9, 13, 14] - - def _is_Core2(self): - return self.is_Intel() and self.info[0]['Family'] == 6 \ - and self.info[0]['Model'] in [15, 16, 17] - - # Varia - - def _is_singleCPU(self): - return len(self.info) == 1 - - def _getNCPUs(self): - return len(self.info) - - def _has_mmx(self): - if self.is_Intel(): - return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \ - or (self.info[0]['Family'] in [6, 15]) - elif self.is_AMD(): - return self.info[0]['Family'] in [5, 6, 15] - else: - return False - - def _has_sse(self): - if self.is_Intel(): - return ((self.info[0]['Family']==6 and - self.info[0]['Model'] in [7, 8, 9, 10, 11]) - or self.info[0]['Family']==15) - elif self.is_AMD(): - return ((self.info[0]['Family']==6 and - self.info[0]['Model'] in [6, 7, 8, 10]) - or self.info[0]['Family']==15) - else: - return False - - def _has_sse2(self): - if self.is_Intel(): - return self.is_Pentium4() or self.is_PentiumM() \ - or self.is_Core2() - elif self.is_AMD(): - return self.is_AMD64() - else: - return False - - def _has_3dnow(self): - return self.is_AMD() and self.info[0]['Family'] in [5, 6, 15] - - def _has_3dnowext(self): - return self.is_AMD() and self.info[0]['Family'] in [6, 15] - -if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?) - cpuinfo = LinuxCPUInfo -elif sys.platform.startswith('irix'): - cpuinfo = IRIXCPUInfo -elif sys.platform == 'darwin': - cpuinfo = DarwinCPUInfo -elif sys.platform.startswith('sunos'): - cpuinfo = SunOSCPUInfo -elif sys.platform.startswith('win32'): - cpuinfo = Win32CPUInfo -elif sys.platform.startswith('cygwin'): - cpuinfo = LinuxCPUInfo -#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices. -else: - cpuinfo = CPUInfoBase - -cpu = cpuinfo() - -#if __name__ == "__main__": -# -# cpu.is_blaa() -# cpu.is_Intel() -# cpu.is_Alpha() -# -# print('CPU information:'), -# for name in dir(cpuinfo): -# if name[0]=='_' and name[1]!='_': -# r = getattr(cpu,name[1:])() -# if r: -# if r!=1: -# print('%s=%s' %(name[1:],r)) -# else: -# print(name[1:]), -# print() diff --git a/numpy/distutils/exec_command.py b/numpy/distutils/exec_command.py deleted file mode 100644 index a67453abf624..000000000000 --- a/numpy/distutils/exec_command.py +++ /dev/null @@ -1,315 +0,0 @@ -""" -exec_command - -Implements exec_command function that is (almost) equivalent to -commands.getstatusoutput function but on NT, DOS systems the -returned status is actually correct (though, the returned status -values may be different by a factor). In addition, exec_command -takes keyword arguments for (re-)defining environment variables. - -Provides functions: - - exec_command --- execute command in a specified directory and - in the modified environment. - find_executable --- locate a command using info from environment - variable PATH. Equivalent to posix `which` - command. - -Author: Pearu Peterson -Created: 11 January 2003 - -Requires: Python 2.x - -Successfully tested on: - -======== ============ ================================================= -os.name sys.platform comments -======== ============ ================================================= -posix linux2 Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3 - PyCrust 0.9.3, Idle 1.0.2 -posix linux2 Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2 -posix sunos5 SunOS 5.9, Python 2.2, 2.3.2 -posix darwin Darwin 7.2.0, Python 2.3 -nt win32 Windows Me - Python 2.3(EE), Idle 1.0, PyCrust 0.7.2 - Python 2.1.1 Idle 0.8 -nt win32 Windows 98, Python 2.1.1. Idle 0.8 -nt win32 Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests - fail i.e. redefining environment variables may - not work. FIXED: don't use cygwin echo! - Comment: also `cmd /c echo` will not work - but redefining environment variables do work. -posix cygwin Cygwin 98-4.10, Python 2.3.3(cygming special) -nt win32 Windows XP, Python 2.3.3 -======== ============ ================================================= - -Known bugs: - -* Tests, that send messages to stderr, fail when executed from MSYS prompt - because the messages are lost at some point. - -""" -__all__ = ['exec_command', 'find_executable'] - -import os -import sys -import subprocess -import locale -import warnings - -from numpy.distutils.misc_util import is_sequence, make_temp_file -from numpy.distutils import log - -def filepath_from_subprocess_output(output): - """ - Convert `bytes` in the encoding used by a subprocess into a filesystem-appropriate `str`. - - Inherited from `exec_command`, and possibly incorrect. - """ - mylocale = locale.getpreferredencoding(False) - if mylocale is None: - mylocale = 'ascii' - output = output.decode(mylocale, errors='replace') - output = output.replace('\r\n', '\n') - # Another historical oddity - if output[-1:] == '\n': - output = output[:-1] - return output - - -def forward_bytes_to_stdout(val): - """ - Forward bytes from a subprocess call to the console, without attempting to - decode them. - - The assumption is that the subprocess call already returned bytes in - a suitable encoding. - """ - if hasattr(sys.stdout, 'buffer'): - # use the underlying binary output if there is one - sys.stdout.buffer.write(val) - elif hasattr(sys.stdout, 'encoding'): - # round-trip the encoding if necessary - sys.stdout.write(val.decode(sys.stdout.encoding)) - else: - # make a best-guess at the encoding - sys.stdout.write(val.decode('utf8', errors='replace')) - - -def temp_file_name(): - # 2019-01-30, 1.17 - warnings.warn('temp_file_name is deprecated since NumPy v1.17, use ' - 'tempfile.mkstemp instead', DeprecationWarning, stacklevel=1) - fo, name = make_temp_file() - fo.close() - return name - -def get_pythonexe(): - pythonexe = sys.executable - if os.name in ['nt', 'dos']: - fdir, fn = os.path.split(pythonexe) - fn = fn.upper().replace('PYTHONW', 'PYTHON') - pythonexe = os.path.join(fdir, fn) - assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,) - return pythonexe - -def find_executable(exe, path=None, _cache={}): - """Return full path of a executable or None. - - Symbolic links are not followed. - """ - key = exe, path - try: - return _cache[key] - except KeyError: - pass - log.debug('find_executable(%r)' % exe) - orig_exe = exe - - if path is None: - path = os.environ.get('PATH', os.defpath) - if os.name=='posix': - realpath = os.path.realpath - else: - realpath = lambda a:a - - if exe.startswith('"'): - exe = exe[1:-1] - - suffixes = [''] - if os.name in ['nt', 'dos', 'os2']: - fn, ext = os.path.splitext(exe) - extra_suffixes = ['.exe', '.com', '.bat'] - if ext.lower() not in extra_suffixes: - suffixes = extra_suffixes - - if os.path.isabs(exe): - paths = [''] - else: - paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ] - - for path in paths: - fn = os.path.join(path, exe) - for s in suffixes: - f_ext = fn+s - if not os.path.islink(f_ext): - f_ext = realpath(f_ext) - if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK): - log.info('Found executable %s' % f_ext) - _cache[key] = f_ext - return f_ext - - log.warn('Could not locate executable %s' % orig_exe) - return None - -############################################################ - -def _preserve_environment( names ): - log.debug('_preserve_environment(%r)' % (names)) - env = {name: os.environ.get(name) for name in names} - return env - -def _update_environment( **env ): - log.debug('_update_environment(...)') - for name, value in env.items(): - os.environ[name] = value or '' - -def exec_command(command, execute_in='', use_shell=None, use_tee=None, - _with_python = 1, **env ): - """ - Return (status,output) of executed command. - - .. deprecated:: 1.17 - Use subprocess.Popen instead - - Parameters - ---------- - command : str - A concatenated string of executable and arguments. - execute_in : str - Before running command ``cd execute_in`` and after ``cd -``. - use_shell : {bool, None}, optional - If True, execute ``sh -c command``. Default None (True) - use_tee : {bool, None}, optional - If True use tee. Default None (True) - - - Returns - ------- - res : str - Both stdout and stderr messages. - - Notes - ----- - On NT, DOS systems the returned status is correct for external commands. - Wild cards will not work for non-posix systems or when use_shell=0. - - """ - # 2019-01-30, 1.17 - warnings.warn('exec_command is deprecated since NumPy v1.17, use ' - 'subprocess.Popen instead', DeprecationWarning, stacklevel=1) - log.debug('exec_command(%r,%s)' % (command, - ','.join(['%s=%r'%kv for kv in env.items()]))) - - if use_tee is None: - use_tee = os.name=='posix' - if use_shell is None: - use_shell = os.name=='posix' - execute_in = os.path.abspath(execute_in) - oldcwd = os.path.abspath(os.getcwd()) - - if __name__[-12:] == 'exec_command': - exec_dir = os.path.dirname(os.path.abspath(__file__)) - elif os.path.isfile('exec_command.py'): - exec_dir = os.path.abspath('.') - else: - exec_dir = os.path.abspath(sys.argv[0]) - if os.path.isfile(exec_dir): - exec_dir = os.path.dirname(exec_dir) - - if oldcwd!=execute_in: - os.chdir(execute_in) - log.debug('New cwd: %s' % execute_in) - else: - log.debug('Retaining cwd: %s' % oldcwd) - - oldenv = _preserve_environment( list(env.keys()) ) - _update_environment( **env ) - - try: - st = _exec_command(command, - use_shell=use_shell, - use_tee=use_tee, - **env) - finally: - if oldcwd!=execute_in: - os.chdir(oldcwd) - log.debug('Restored cwd to %s' % oldcwd) - _update_environment(**oldenv) - - return st - - -def _exec_command(command, use_shell=None, use_tee = None, **env): - """ - Internal workhorse for exec_command(). - """ - if use_shell is None: - use_shell = os.name=='posix' - if use_tee is None: - use_tee = os.name=='posix' - - if os.name == 'posix' and use_shell: - # On POSIX, subprocess always uses /bin/sh, override - sh = os.environ.get('SHELL', '/bin/sh') - if is_sequence(command): - command = [sh, '-c', ' '.join(command)] - else: - command = [sh, '-c', command] - use_shell = False - - elif os.name == 'nt' and is_sequence(command): - # On Windows, join the string for CreateProcess() ourselves as - # subprocess does it a bit differently - command = ' '.join(_quote_arg(arg) for arg in command) - - # Inherit environment by default - env = env or None - try: - # text is set to False so that communicate() - # will return bytes. We need to decode the output ourselves - # so that Python will not raise a UnicodeDecodeError when - # it encounters an invalid character; rather, we simply replace it - proc = subprocess.Popen(command, shell=use_shell, env=env, text=False, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - except OSError: - # Return 127, as os.spawn*() and /bin/sh do - return 127, '' - - text, err = proc.communicate() - mylocale = locale.getpreferredencoding(False) - if mylocale is None: - mylocale = 'ascii' - text = text.decode(mylocale, errors='replace') - text = text.replace('\r\n', '\n') - # Another historical oddity - if text[-1:] == '\n': - text = text[:-1] - - if use_tee and text: - print(text) - return proc.returncode, text - - -def _quote_arg(arg): - """ - Quote the argument for safe use in a shell command line. - """ - # If there is a quote in the string, assume relevants parts of the - # string are already quoted (e.g. '-I"C:\\Program Files\\..."') - if '"' not in arg and ' ' in arg: - return '"%s"' % arg - return arg - -############################################################ diff --git a/numpy/distutils/extension.py b/numpy/distutils/extension.py deleted file mode 100644 index 3ede013e0f3c..000000000000 --- a/numpy/distutils/extension.py +++ /dev/null @@ -1,107 +0,0 @@ -"""distutils.extension - -Provides the Extension class, used to describe C/C++ extension -modules in setup scripts. - -Overridden to support f2py. - -""" -import re -from distutils.extension import Extension as old_Extension - - -cxx_ext_re = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match -fortran_pyf_ext_re = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match - - -class Extension(old_Extension): - """ - Parameters - ---------- - name : str - Extension name. - sources : list of str - List of source file locations relative to the top directory of - the package. - extra_compile_args : list of str - Extra command line arguments to pass to the compiler. - extra_f77_compile_args : list of str - Extra command line arguments to pass to the fortran77 compiler. - extra_f90_compile_args : list of str - Extra command line arguments to pass to the fortran90 compiler. - """ - def __init__( - self, name, sources, - include_dirs=None, - define_macros=None, - undef_macros=None, - library_dirs=None, - libraries=None, - runtime_library_dirs=None, - extra_objects=None, - extra_compile_args=None, - extra_link_args=None, - export_symbols=None, - swig_opts=None, - depends=None, - language=None, - f2py_options=None, - module_dirs=None, - extra_c_compile_args=None, - extra_cxx_compile_args=None, - extra_f77_compile_args=None, - extra_f90_compile_args=None,): - - old_Extension.__init__( - self, name, [], - include_dirs=include_dirs, - define_macros=define_macros, - undef_macros=undef_macros, - library_dirs=library_dirs, - libraries=libraries, - runtime_library_dirs=runtime_library_dirs, - extra_objects=extra_objects, - extra_compile_args=extra_compile_args, - extra_link_args=extra_link_args, - export_symbols=export_symbols) - - # Avoid assert statements checking that sources contains strings: - self.sources = sources - - # Python 2.4 distutils new features - self.swig_opts = swig_opts or [] - # swig_opts is assumed to be a list. Here we handle the case where it - # is specified as a string instead. - if isinstance(self.swig_opts, str): - import warnings - msg = "swig_opts is specified as a string instead of a list" - warnings.warn(msg, SyntaxWarning, stacklevel=2) - self.swig_opts = self.swig_opts.split() - - # Python 2.3 distutils new features - self.depends = depends or [] - self.language = language - - # numpy_distutils features - self.f2py_options = f2py_options or [] - self.module_dirs = module_dirs or [] - self.extra_c_compile_args = extra_c_compile_args or [] - self.extra_cxx_compile_args = extra_cxx_compile_args or [] - self.extra_f77_compile_args = extra_f77_compile_args or [] - self.extra_f90_compile_args = extra_f90_compile_args or [] - - return - - def has_cxx_sources(self): - for source in self.sources: - if cxx_ext_re(str(source)): - return True - return False - - def has_f2py_sources(self): - for source in self.sources: - if fortran_pyf_ext_re(source): - return True - return False - -# class Extension diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py deleted file mode 100644 index 5160e2abf54f..000000000000 --- a/numpy/distutils/fcompiler/__init__.py +++ /dev/null @@ -1,1035 +0,0 @@ -"""numpy.distutils.fcompiler - -Contains FCompiler, an abstract base class that defines the interface -for the numpy.distutils Fortran compiler abstraction model. - -Terminology: - -To be consistent, where the term 'executable' is used, it means the single -file, like 'gcc', that is executed, and should be a string. In contrast, -'command' means the entire command line, like ['gcc', '-c', 'file.c'], and -should be a list. - -But note that FCompiler.executables is actually a dictionary of commands. - -""" -__all__ = ['FCompiler', 'new_fcompiler', 'show_fcompilers', - 'dummy_fortran_file'] - -import os -import sys -import re -from pathlib import Path - -from distutils.sysconfig import get_python_lib -from distutils.fancy_getopt import FancyGetopt -from distutils.errors import DistutilsModuleError, \ - DistutilsExecError, CompileError, LinkError, DistutilsPlatformError -from distutils.util import split_quoted, strtobool - -from numpy.distutils.ccompiler import CCompiler, gen_lib_options -from numpy.distutils import log -from numpy.distutils.misc_util import is_string, all_strings, is_sequence, \ - make_temp_file, get_shared_lib_extension -from numpy.distutils.exec_command import find_executable -from numpy.distutils import _shell_utils - -from .environment import EnvironmentConfig - -__metaclass__ = type - - -FORTRAN_COMMON_FIXED_EXTENSIONS = ['.for', '.ftn', '.f77', '.f'] - - -class CompilerNotFound(Exception): - pass - -def flaglist(s): - if is_string(s): - return split_quoted(s) - else: - return s - -def str2bool(s): - if is_string(s): - return strtobool(s) - return bool(s) - -def is_sequence_of_strings(seq): - return is_sequence(seq) and all_strings(seq) - -class FCompiler(CCompiler): - """Abstract base class to define the interface that must be implemented - by real Fortran compiler classes. - - Methods that subclasses may redefine: - - update_executables(), find_executables(), get_version() - get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug() - get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(), - get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(), - get_flags_arch_f90(), get_flags_debug_f90(), - get_flags_fix(), get_flags_linker_so() - - DON'T call these methods (except get_version) after - constructing a compiler instance or inside any other method. - All methods, except update_executables() and find_executables(), - may call the get_version() method. - - After constructing a compiler instance, always call customize(dist=None) - method that finalizes compiler construction and makes the following - attributes available: - compiler_f77 - compiler_f90 - compiler_fix - linker_so - archiver - ranlib - libraries - library_dirs - """ - - # These are the environment variables and distutils keys used. - # Each configuration description is - # (, , , , ) - # The hook names are handled by the self._environment_hook method. - # - names starting with 'self.' call methods in this class - # - names starting with 'exe.' return the key in the executables dict - # - names like 'flags.YYY' return self.get_flag_YYY() - # convert is either None or a function to convert a string to the - # appropriate type used. - - distutils_vars = EnvironmentConfig( - distutils_section='config_fc', - noopt = (None, None, 'noopt', str2bool, False), - noarch = (None, None, 'noarch', str2bool, False), - debug = (None, None, 'debug', str2bool, False), - verbose = (None, None, 'verbose', str2bool, False), - ) - - command_vars = EnvironmentConfig( - distutils_section='config_fc', - compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None, False), - compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None, False), - compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None, False), - version_cmd = ('exe.version_cmd', None, None, None, False), - linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None, False), - linker_exe = ('exe.linker_exe', 'LD', 'ld', None, False), - archiver = (None, 'AR', 'ar', None, False), - ranlib = (None, 'RANLIB', 'ranlib', None, False), - ) - - flag_vars = EnvironmentConfig( - distutils_section='config_fc', - f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist, True), - f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist, True), - free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist, True), - fix = ('flags.fix', None, None, flaglist, False), - opt = ('flags.opt', 'FOPT', 'opt', flaglist, True), - opt_f77 = ('flags.opt_f77', None, None, flaglist, False), - opt_f90 = ('flags.opt_f90', None, None, flaglist, False), - arch = ('flags.arch', 'FARCH', 'arch', flaglist, False), - arch_f77 = ('flags.arch_f77', None, None, flaglist, False), - arch_f90 = ('flags.arch_f90', None, None, flaglist, False), - debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist, True), - debug_f77 = ('flags.debug_f77', None, None, flaglist, False), - debug_f90 = ('flags.debug_f90', None, None, flaglist, False), - flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist, True), - linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist, True), - linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist, True), - ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist, True), - ) - - language_map = {'.f': 'f77', - '.for': 'f77', - '.F': 'f77', # XXX: needs preprocessor - '.ftn': 'f77', - '.f77': 'f77', - '.f90': 'f90', - '.F90': 'f90', # XXX: needs preprocessor - '.f95': 'f90', - } - language_order = ['f90', 'f77'] - - - # These will be set by the subclass - - compiler_type = None - compiler_aliases = () - version_pattern = None - - possible_executables = [] - executables = { - 'version_cmd': ["f77", "-v"], - 'compiler_f77': ["f77"], - 'compiler_f90': ["f90"], - 'compiler_fix': ["f90", "-fixed"], - 'linker_so': ["f90", "-shared"], - 'linker_exe': ["f90"], - 'archiver': ["ar", "-cr"], - 'ranlib': None, - } - - # If compiler does not support compiling Fortran 90 then it can - # suggest using another compiler. For example, gnu would suggest - # gnu95 compiler type when there are F90 sources. - suggested_f90_compiler = None - - compile_switch = "-c" - object_switch = "-o " # Ending space matters! It will be stripped - # but if it is missing then object_switch - # will be prefixed to object file name by - # string concatenation. - library_switch = "-o " # Ditto! - - # Switch to specify where module files are created and searched - # for USE statement. Normally it is a string and also here ending - # space matters. See above. - module_dir_switch = None - - # Switch to specify where module files are searched for USE statement. - module_include_switch = '-I' - - pic_flags = [] # Flags to create position-independent code - - src_extensions = ['.for', '.ftn', '.f77', '.f', '.f90', '.f95', '.F', '.F90', '.FOR'] - obj_extension = ".o" - - shared_lib_extension = get_shared_lib_extension() - static_lib_extension = ".a" # or .lib - static_lib_format = "lib%s%s" # or %s%s - shared_lib_format = "%s%s" - exe_extension = "" - - _exe_cache = {} - - _executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90', - 'compiler_fix', 'linker_so', 'linker_exe', 'archiver', - 'ranlib'] - - # This will be set by new_fcompiler when called in - # command/{build_ext.py, build_clib.py, config.py} files. - c_compiler = None - - # extra_{f77,f90}_compile_args are set by build_ext.build_extension method - extra_f77_compile_args = [] - extra_f90_compile_args = [] - - def __init__(self, *args, **kw): - CCompiler.__init__(self, *args, **kw) - self.distutils_vars = self.distutils_vars.clone(self._environment_hook) - self.command_vars = self.command_vars.clone(self._environment_hook) - self.flag_vars = self.flag_vars.clone(self._environment_hook) - self.executables = self.executables.copy() - for e in self._executable_keys: - if e not in self.executables: - self.executables[e] = None - - # Some methods depend on .customize() being called first, so - # this keeps track of whether that's happened yet. - self._is_customised = False - - def __copy__(self): - obj = self.__new__(self.__class__) - obj.__dict__.update(self.__dict__) - obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook) - obj.command_vars = obj.command_vars.clone(obj._environment_hook) - obj.flag_vars = obj.flag_vars.clone(obj._environment_hook) - obj.executables = obj.executables.copy() - return obj - - def copy(self): - return self.__copy__() - - # Use properties for the attributes used by CCompiler. Setting them - # as attributes from the self.executables dictionary is error-prone, - # so we get them from there each time. - def _command_property(key): - def fget(self): - assert self._is_customised - return self.executables[key] - return property(fget=fget) - version_cmd = _command_property('version_cmd') - compiler_f77 = _command_property('compiler_f77') - compiler_f90 = _command_property('compiler_f90') - compiler_fix = _command_property('compiler_fix') - linker_so = _command_property('linker_so') - linker_exe = _command_property('linker_exe') - archiver = _command_property('archiver') - ranlib = _command_property('ranlib') - - # Make our terminology consistent. - def set_executable(self, key, value): - self.set_command(key, value) - - def set_commands(self, **kw): - for k, v in kw.items(): - self.set_command(k, v) - - def set_command(self, key, value): - if not key in self._executable_keys: - raise ValueError( - "unknown executable '%s' for class %s" % - (key, self.__class__.__name__)) - if is_string(value): - value = split_quoted(value) - assert value is None or is_sequence_of_strings(value[1:]), (key, value) - self.executables[key] = value - - ###################################################################### - ## Methods that subclasses may redefine. But don't call these methods! - ## They are private to FCompiler class and may return unexpected - ## results if used elsewhere. So, you have been warned.. - - def find_executables(self): - """Go through the self.executables dictionary, and attempt to - find and assign appropriate executables. - - Executable names are looked for in the environment (environment - variables, the distutils.cfg, and command line), the 0th-element of - the command list, and the self.possible_executables list. - - Also, if the 0th element is "" or "", the Fortran 77 - or the Fortran 90 compiler executable is used, unless overridden - by an environment setting. - - Subclasses should call this if overridden. - """ - assert self._is_customised - exe_cache = self._exe_cache - def cached_find_executable(exe): - if exe in exe_cache: - return exe_cache[exe] - fc_exe = find_executable(exe) - exe_cache[exe] = exe_cache[fc_exe] = fc_exe - return fc_exe - def verify_command_form(name, value): - if value is not None and not is_sequence_of_strings(value): - raise ValueError( - "%s value %r is invalid in class %s" % - (name, value, self.__class__.__name__)) - def set_exe(exe_key, f77=None, f90=None): - cmd = self.executables.get(exe_key, None) - if not cmd: - return None - # Note that we get cmd[0] here if the environment doesn't - # have anything set - exe_from_environ = getattr(self.command_vars, exe_key) - if not exe_from_environ: - possibles = [f90, f77] + self.possible_executables - else: - possibles = [exe_from_environ] + self.possible_executables - - seen = set() - unique_possibles = [] - for e in possibles: - if e == '': - e = f77 - elif e == '': - e = f90 - if not e or e in seen: - continue - seen.add(e) - unique_possibles.append(e) - - for exe in unique_possibles: - fc_exe = cached_find_executable(exe) - if fc_exe: - cmd[0] = fc_exe - return fc_exe - self.set_command(exe_key, None) - return None - - ctype = self.compiler_type - f90 = set_exe('compiler_f90') - if not f90: - f77 = set_exe('compiler_f77') - if f77: - log.warn('%s: no Fortran 90 compiler found' % ctype) - else: - raise CompilerNotFound('%s: f90 nor f77' % ctype) - else: - f77 = set_exe('compiler_f77', f90=f90) - if not f77: - log.warn('%s: no Fortran 77 compiler found' % ctype) - set_exe('compiler_fix', f90=f90) - - set_exe('linker_so', f77=f77, f90=f90) - set_exe('linker_exe', f77=f77, f90=f90) - set_exe('version_cmd', f77=f77, f90=f90) - set_exe('archiver') - set_exe('ranlib') - - def update_executables(self): - """Called at the beginning of customisation. Subclasses should - override this if they need to set up the executables dictionary. - - Note that self.find_executables() is run afterwards, so the - self.executables dictionary values can contain or as - the command, which will be replaced by the found F77 or F90 - compiler. - """ - pass - - def get_flags(self): - """List of flags common to all compiler types.""" - return [] + self.pic_flags - - def _get_command_flags(self, key): - cmd = self.executables.get(key, None) - if cmd is None: - return [] - return cmd[1:] - - def get_flags_f77(self): - """List of Fortran 77 specific flags.""" - return self._get_command_flags('compiler_f77') - def get_flags_f90(self): - """List of Fortran 90 specific flags.""" - return self._get_command_flags('compiler_f90') - def get_flags_free(self): - """List of Fortran 90 free format specific flags.""" - return [] - def get_flags_fix(self): - """List of Fortran 90 fixed format specific flags.""" - return self._get_command_flags('compiler_fix') - def get_flags_linker_so(self): - """List of linker flags to build a shared library.""" - return self._get_command_flags('linker_so') - def get_flags_linker_exe(self): - """List of linker flags to build an executable.""" - return self._get_command_flags('linker_exe') - def get_flags_ar(self): - """List of archiver flags. """ - return self._get_command_flags('archiver') - def get_flags_opt(self): - """List of architecture independent compiler flags.""" - return [] - def get_flags_arch(self): - """List of architecture dependent compiler flags.""" - return [] - def get_flags_debug(self): - """List of compiler flags to compile with debugging information.""" - return [] - - get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt - get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch - get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug - - def get_libraries(self): - """List of compiler libraries.""" - return self.libraries[:] - def get_library_dirs(self): - """List of compiler library directories.""" - return self.library_dirs[:] - - def get_version(self, force=False, ok_status=[0]): - assert self._is_customised - version = CCompiler.get_version(self, force=force, ok_status=ok_status) - if version is None: - raise CompilerNotFound() - return version - - - ############################################################ - - ## Public methods: - - def customize(self, dist = None): - """Customize Fortran compiler. - - This method gets Fortran compiler specific information from - (i) class definition, (ii) environment, (iii) distutils config - files, and (iv) command line (later overrides earlier). - - This method should be always called after constructing a - compiler instance. But not in __init__ because Distribution - instance is needed for (iii) and (iv). - """ - log.info('customize %s' % (self.__class__.__name__)) - - self._is_customised = True - - self.distutils_vars.use_distribution(dist) - self.command_vars.use_distribution(dist) - self.flag_vars.use_distribution(dist) - - self.update_executables() - - # find_executables takes care of setting the compiler commands, - # version_cmd, linker_so, linker_exe, ar, and ranlib - self.find_executables() - - noopt = self.distutils_vars.get('noopt', False) - noarch = self.distutils_vars.get('noarch', noopt) - debug = self.distutils_vars.get('debug', False) - - f77 = self.command_vars.compiler_f77 - f90 = self.command_vars.compiler_f90 - - f77flags = [] - f90flags = [] - freeflags = [] - fixflags = [] - - if f77: - f77 = _shell_utils.NativeParser.split(f77) - f77flags = self.flag_vars.f77 - if f90: - f90 = _shell_utils.NativeParser.split(f90) - f90flags = self.flag_vars.f90 - freeflags = self.flag_vars.free - # XXX Assuming that free format is default for f90 compiler. - fix = self.command_vars.compiler_fix - # NOTE: this and similar examples are probably just - # excluding --coverage flag when F90 = gfortran --coverage - # instead of putting that flag somewhere more appropriate - # this and similar examples where a Fortran compiler - # environment variable has been customized by CI or a user - # should perhaps eventually be more thoroughly tested and more - # robustly handled - if fix: - fix = _shell_utils.NativeParser.split(fix) - fixflags = self.flag_vars.fix + f90flags - - oflags, aflags, dflags = [], [], [] - # examine get_flags__ for extra flags - # only add them if the method is different from get_flags_ - def get_flags(tag, flags): - # note that self.flag_vars. calls self.get_flags_() - flags.extend(getattr(self.flag_vars, tag)) - this_get = getattr(self, 'get_flags_' + tag) - for name, c, flagvar in [('f77', f77, f77flags), - ('f90', f90, f90flags), - ('f90', fix, fixflags)]: - t = '%s_%s' % (tag, name) - if c and this_get is not getattr(self, 'get_flags_' + t): - flagvar.extend(getattr(self.flag_vars, t)) - if not noopt: - get_flags('opt', oflags) - if not noarch: - get_flags('arch', aflags) - if debug: - get_flags('debug', dflags) - - fflags = self.flag_vars.flags + dflags + oflags + aflags - - if f77: - self.set_commands(compiler_f77=f77+f77flags+fflags) - if f90: - self.set_commands(compiler_f90=f90+freeflags+f90flags+fflags) - if fix: - self.set_commands(compiler_fix=fix+fixflags+fflags) - - - #XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS - linker_so = self.linker_so - if linker_so: - linker_so_flags = self.flag_vars.linker_so - if sys.platform.startswith('aix'): - python_lib = get_python_lib(standard_lib=1) - ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix') - python_exp = os.path.join(python_lib, 'config', 'python.exp') - linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp] - if sys.platform.startswith('os400'): - from distutils.sysconfig import get_config_var - python_config = get_config_var('LIBPL') - ld_so_aix = os.path.join(python_config, 'ld_so_aix') - python_exp = os.path.join(python_config, 'python.exp') - linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp] - self.set_commands(linker_so=linker_so+linker_so_flags) - - linker_exe = self.linker_exe - if linker_exe: - linker_exe_flags = self.flag_vars.linker_exe - self.set_commands(linker_exe=linker_exe+linker_exe_flags) - - ar = self.command_vars.archiver - if ar: - arflags = self.flag_vars.ar - self.set_commands(archiver=[ar]+arflags) - - self.set_library_dirs(self.get_library_dirs()) - self.set_libraries(self.get_libraries()) - - def dump_properties(self): - """Print out the attributes of a compiler instance.""" - props = [] - for key in list(self.executables.keys()) + \ - ['version', 'libraries', 'library_dirs', - 'object_switch', 'compile_switch']: - if hasattr(self, key): - v = getattr(self, key) - props.append((key, None, '= '+repr(v))) - props.sort() - - pretty_printer = FancyGetopt(props) - for l in pretty_printer.generate_help("%s instance properties:" \ - % (self.__class__.__name__)): - if l[:4]==' --': - l = ' ' + l[4:] - print(l) - - ################### - - def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - """Compile 'src' to product 'obj'.""" - src_flags = {} - if Path(src).suffix.lower() in FORTRAN_COMMON_FIXED_EXTENSIONS \ - and not has_f90_header(src): - flavor = ':f77' - compiler = self.compiler_f77 - src_flags = get_f77flags(src) - extra_compile_args = self.extra_f77_compile_args or [] - elif is_free_format(src): - flavor = ':f90' - compiler = self.compiler_f90 - if compiler is None: - raise DistutilsExecError('f90 not supported by %s needed for %s'\ - % (self.__class__.__name__, src)) - extra_compile_args = self.extra_f90_compile_args or [] - else: - flavor = ':fix' - compiler = self.compiler_fix - if compiler is None: - raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\ - % (self.__class__.__name__, src)) - extra_compile_args = self.extra_f90_compile_args or [] - if self.object_switch[-1]==' ': - o_args = [self.object_switch.strip(), obj] - else: - o_args = [self.object_switch.strip()+obj] - - assert self.compile_switch.strip() - s_args = [self.compile_switch, src] - - if extra_compile_args: - log.info('extra %s options: %r' \ - % (flavor[1:], ' '.join(extra_compile_args))) - - extra_flags = src_flags.get(self.compiler_type, []) - if extra_flags: - log.info('using compile options from source: %r' \ - % ' '.join(extra_flags)) - - command = compiler + cc_args + extra_flags + s_args + o_args \ - + extra_postargs + extra_compile_args - - display = '%s: %s' % (os.path.basename(compiler[0]) + flavor, - src) - try: - self.spawn(command, display=display) - except DistutilsExecError as e: - msg = str(e) - raise CompileError(msg) from None - - def module_options(self, module_dirs, module_build_dir): - options = [] - if self.module_dir_switch is not None: - if self.module_dir_switch[-1]==' ': - options.extend([self.module_dir_switch.strip(), module_build_dir]) - else: - options.append(self.module_dir_switch.strip()+module_build_dir) - else: - print('XXX: module_build_dir=%r option ignored' % (module_build_dir)) - print('XXX: Fix module_dir_switch for ', self.__class__.__name__) - if self.module_include_switch is not None: - for d in [module_build_dir]+module_dirs: - options.append('%s%s' % (self.module_include_switch, d)) - else: - print('XXX: module_dirs=%r option ignored' % (module_dirs)) - print('XXX: Fix module_include_switch for ', self.__class__.__name__) - return options - - def library_option(self, lib): - return "-l" + lib - def library_dir_option(self, dir): - return "-L" + dir - - def link(self, target_desc, objects, - output_filename, output_dir=None, libraries=None, - library_dirs=None, runtime_library_dirs=None, - export_symbols=None, debug=0, extra_preargs=None, - extra_postargs=None, build_temp=None, target_lang=None): - objects, output_dir = self._fix_object_args(objects, output_dir) - libraries, library_dirs, runtime_library_dirs = \ - self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) - - lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, - libraries) - if is_string(output_dir): - output_filename = os.path.join(output_dir, output_filename) - elif output_dir is not None: - raise TypeError("'output_dir' must be a string or None") - - if self._need_link(objects, output_filename): - if self.library_switch[-1]==' ': - o_args = [self.library_switch.strip(), output_filename] - else: - o_args = [self.library_switch.strip()+output_filename] - - if is_string(self.objects): - ld_args = objects + [self.objects] - else: - ld_args = objects + self.objects - ld_args = ld_args + lib_opts + o_args - if debug: - ld_args[:0] = ['-g'] - if extra_preargs: - ld_args[:0] = extra_preargs - if extra_postargs: - ld_args.extend(extra_postargs) - self.mkpath(os.path.dirname(output_filename)) - if target_desc == CCompiler.EXECUTABLE: - linker = self.linker_exe[:] - else: - linker = self.linker_so[:] - command = linker + ld_args - try: - self.spawn(command) - except DistutilsExecError as e: - msg = str(e) - raise LinkError(msg) from None - else: - log.debug("skipping %s (up-to-date)", output_filename) - - def _environment_hook(self, name, hook_name): - if hook_name is None: - return None - if is_string(hook_name): - if hook_name.startswith('self.'): - hook_name = hook_name[5:] - hook = getattr(self, hook_name) - return hook() - elif hook_name.startswith('exe.'): - hook_name = hook_name[4:] - var = self.executables[hook_name] - if var: - return var[0] - else: - return None - elif hook_name.startswith('flags.'): - hook_name = hook_name[6:] - hook = getattr(self, 'get_flags_' + hook_name) - return hook() - else: - return hook_name() - - def can_ccompiler_link(self, ccompiler): - """ - Check if the given C compiler can link objects produced by - this compiler. - """ - return True - - def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): - """ - Convert a set of object files that are not compatible with the default - linker, to a file that is compatible. - - Parameters - ---------- - objects : list - List of object files to include. - output_dir : str - Output directory to place generated object files. - extra_dll_dir : str - Output directory to place extra DLL files that need to be - included on Windows. - - Returns - ------- - converted_objects : list of str - List of converted object files. - Note that the number of output files is not necessarily - the same as inputs. - - """ - raise NotImplementedError() - - ## class FCompiler - -_default_compilers = ( - # sys.platform mappings - ('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95', - 'intelvem', 'intelem', 'flang')), - ('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')), - ('linux.*', ('arm', 'gnu95', 'intel', 'lahey', 'pg', 'nv', 'absoft', 'nag', - 'vast', 'compaq', 'intele', 'intelem', 'gnu', 'g95', - 'pathf95', 'nagfor', 'fujitsu')), - ('darwin.*', ('gnu95', 'nag', 'nagfor', 'absoft', 'ibm', 'intel', 'gnu', - 'g95', 'pg')), - ('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')), - ('irix.*', ('mips', 'gnu', 'gnu95',)), - ('aix.*', ('ibm', 'gnu', 'gnu95',)), - # os.name mappings - ('posix', ('gnu', 'gnu95',)), - ('nt', ('gnu', 'gnu95',)), - ('mac', ('gnu95', 'gnu', 'pg')), - ) - -fcompiler_class = None -fcompiler_aliases = None - -def load_all_fcompiler_classes(): - """Cache all the FCompiler classes found in modules in the - numpy.distutils.fcompiler package. - """ - from glob import glob - global fcompiler_class, fcompiler_aliases - if fcompiler_class is not None: - return - pys = os.path.join(os.path.dirname(__file__), '*.py') - fcompiler_class = {} - fcompiler_aliases = {} - for fname in glob(pys): - module_name, ext = os.path.splitext(os.path.basename(fname)) - module_name = 'numpy.distutils.fcompiler.' + module_name - __import__ (module_name) - module = sys.modules[module_name] - if hasattr(module, 'compilers'): - for cname in module.compilers: - klass = getattr(module, cname) - desc = (klass.compiler_type, klass, klass.description) - fcompiler_class[klass.compiler_type] = desc - for alias in klass.compiler_aliases: - if alias in fcompiler_aliases: - raise ValueError("alias %r defined for both %s and %s" - % (alias, klass.__name__, - fcompiler_aliases[alias][1].__name__)) - fcompiler_aliases[alias] = desc - -def _find_existing_fcompiler(compiler_types, - osname=None, platform=None, - requiref90=False, - c_compiler=None): - from numpy.distutils.core import get_distribution - dist = get_distribution(always=True) - for compiler_type in compiler_types: - v = None - try: - c = new_fcompiler(plat=platform, compiler=compiler_type, - c_compiler=c_compiler) - c.customize(dist) - v = c.get_version() - if requiref90 and c.compiler_f90 is None: - v = None - new_compiler = c.suggested_f90_compiler - if new_compiler: - log.warn('Trying %r compiler as suggested by %r ' - 'compiler for f90 support.' % (compiler_type, - new_compiler)) - c = new_fcompiler(plat=platform, compiler=new_compiler, - c_compiler=c_compiler) - c.customize(dist) - v = c.get_version() - if v is not None: - compiler_type = new_compiler - if requiref90 and c.compiler_f90 is None: - raise ValueError('%s does not support compiling f90 codes, ' - 'skipping.' % (c.__class__.__name__)) - except DistutilsModuleError: - log.debug("_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError", compiler_type) - except CompilerNotFound: - log.debug("_find_existing_fcompiler: compiler_type='%s' not found", compiler_type) - if v is not None: - return compiler_type - return None - -def available_fcompilers_for_platform(osname=None, platform=None): - if osname is None: - osname = os.name - if platform is None: - platform = sys.platform - matching_compiler_types = [] - for pattern, compiler_type in _default_compilers: - if re.match(pattern, platform) or re.match(pattern, osname): - for ct in compiler_type: - if ct not in matching_compiler_types: - matching_compiler_types.append(ct) - if not matching_compiler_types: - matching_compiler_types.append('gnu') - return matching_compiler_types - -def get_default_fcompiler(osname=None, platform=None, requiref90=False, - c_compiler=None): - """Determine the default Fortran compiler to use for the given - platform.""" - matching_compiler_types = available_fcompilers_for_platform(osname, - platform) - log.info("get_default_fcompiler: matching types: '%s'", - matching_compiler_types) - compiler_type = _find_existing_fcompiler(matching_compiler_types, - osname=osname, - platform=platform, - requiref90=requiref90, - c_compiler=c_compiler) - return compiler_type - -# Flag to avoid rechecking for Fortran compiler every time -failed_fcompilers = set() - -def new_fcompiler(plat=None, - compiler=None, - verbose=0, - dry_run=0, - force=0, - requiref90=False, - c_compiler = None): - """Generate an instance of some FCompiler subclass for the supplied - platform/compiler combination. - """ - global failed_fcompilers - fcompiler_key = (plat, compiler) - if fcompiler_key in failed_fcompilers: - return None - - load_all_fcompiler_classes() - if plat is None: - plat = os.name - if compiler is None: - compiler = get_default_fcompiler(plat, requiref90=requiref90, - c_compiler=c_compiler) - if compiler in fcompiler_class: - module_name, klass, long_description = fcompiler_class[compiler] - elif compiler in fcompiler_aliases: - module_name, klass, long_description = fcompiler_aliases[compiler] - else: - msg = "don't know how to compile Fortran code on platform '%s'" % plat - if compiler is not None: - msg = msg + " with '%s' compiler." % compiler - msg = msg + " Supported compilers are: %s)" \ - % (','.join(fcompiler_class.keys())) - log.warn(msg) - failed_fcompilers.add(fcompiler_key) - return None - - compiler = klass(verbose=verbose, dry_run=dry_run, force=force) - compiler.c_compiler = c_compiler - return compiler - -def show_fcompilers(dist=None): - """Print list of available compilers (used by the "--help-fcompiler" - option to "config_fc"). - """ - if dist is None: - from distutils.dist import Distribution - from numpy.distutils.command.config_compiler import config_fc - dist = Distribution() - dist.script_name = os.path.basename(sys.argv[0]) - dist.script_args = ['config_fc'] + sys.argv[1:] - try: - dist.script_args.remove('--help-fcompiler') - except ValueError: - pass - dist.cmdclass['config_fc'] = config_fc - dist.parse_config_files() - dist.parse_command_line() - compilers = [] - compilers_na = [] - compilers_ni = [] - if not fcompiler_class: - load_all_fcompiler_classes() - platform_compilers = available_fcompilers_for_platform() - for compiler in platform_compilers: - v = None - log.set_verbosity(-2) - try: - c = new_fcompiler(compiler=compiler, verbose=dist.verbose) - c.customize(dist) - v = c.get_version() - except (DistutilsModuleError, CompilerNotFound) as e: - log.debug("show_fcompilers: %s not found" % (compiler,)) - log.debug(repr(e)) - - if v is None: - compilers_na.append(("fcompiler="+compiler, None, - fcompiler_class[compiler][2])) - else: - c.dump_properties() - compilers.append(("fcompiler="+compiler, None, - fcompiler_class[compiler][2] + ' (%s)' % v)) - - compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers)) - compilers_ni = [("fcompiler="+fc, None, fcompiler_class[fc][2]) - for fc in compilers_ni] - - compilers.sort() - compilers_na.sort() - compilers_ni.sort() - pretty_printer = FancyGetopt(compilers) - pretty_printer.print_help("Fortran compilers found:") - pretty_printer = FancyGetopt(compilers_na) - pretty_printer.print_help("Compilers available for this " - "platform, but not found:") - if compilers_ni: - pretty_printer = FancyGetopt(compilers_ni) - pretty_printer.print_help("Compilers not available on this platform:") - print("For compiler details, run 'config_fc --verbose' setup command.") - - -def dummy_fortran_file(): - fo, name = make_temp_file(suffix='.f') - fo.write(" subroutine dummy()\n end\n") - fo.close() - return name[:-2] - - -_has_f_header = re.compile(r'-\*-\s*fortran\s*-\*-', re.I).search -_has_f90_header = re.compile(r'-\*-\s*f90\s*-\*-', re.I).search -_has_fix_header = re.compile(r'-\*-\s*fix\s*-\*-', re.I).search -_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]', re.I).match - -def is_free_format(file): - """Check if file is in free format Fortran.""" - # f90 allows both fixed and free format, assuming fixed unless - # signs of free format are detected. - result = 0 - with open(file, encoding='latin1') as f: - line = f.readline() - n = 10000 # the number of non-comment lines to scan for hints - if _has_f_header(line) or _has_fix_header(line): - n = 0 - elif _has_f90_header(line): - n = 0 - result = 1 - while n>0 and line: - line = line.rstrip() - if line and line[0]!='!': - n -= 1 - if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&': - result = 1 - break - line = f.readline() - return result - -def has_f90_header(src): - with open(src, encoding='latin1') as f: - line = f.readline() - return _has_f90_header(line) or _has_fix_header(line) - -_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P\w+)\s*\)\s*=\s*(?P.*)', re.I) -def get_f77flags(src): - """ - Search the first 20 lines of fortran 77 code for line pattern - `CF77FLAGS()=` - Return a dictionary {:}. - """ - flags = {} - with open(src, encoding='latin1') as f: - i = 0 - for line in f: - i += 1 - if i>20: break - m = _f77flags_re.match(line) - if not m: continue - fcname = m.group('fcname').strip() - fflags = m.group('fflags').strip() - flags[fcname] = split_quoted(fflags) - return flags - -# TODO: implement get_f90flags and use it in _compile similarly to get_f77flags - -if __name__ == '__main__': - show_fcompilers() diff --git a/numpy/distutils/fcompiler/absoft.py b/numpy/distutils/fcompiler/absoft.py deleted file mode 100644 index 68f516b92751..000000000000 --- a/numpy/distutils/fcompiler/absoft.py +++ /dev/null @@ -1,156 +0,0 @@ - -# Absoft Corporation ceased operations on 12/31/2022. -# Thus, all links to are invalid. - -# Notes: -# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py -# generated extension modules (works for f2py v2.45.241_1936 and up) -import os - -from numpy.distutils.cpuinfo import cpu -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file -from numpy.distutils.misc_util import cyg2win32 - -compilers = ['AbsoftFCompiler'] - -class AbsoftFCompiler(FCompiler): - - compiler_type = 'absoft' - description = 'Absoft Corp Fortran Compiler' - #version_pattern = r'FORTRAN 77 Compiler (?P[^\s*,]*).*?Absoft Corp' - version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler|Absoft Fortran Compiler Version|Copyright Absoft Corporation.*?Version))'+\ - r' (?P[^\s*,]*)(.*?Absoft Corp|)' - - # on windows: f90 -V -c dummy.f - # f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16 - - # samt5735(8)$ f90 -V -c dummy.f - # f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0 - # Note that fink installs g77 as f77, so need to use f90 for detection. - - executables = { - 'version_cmd' : None, # set by update_executables - 'compiler_f77' : ["f77"], - 'compiler_fix' : ["f90"], - 'compiler_f90' : ["f90"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - if os.name=='nt': - library_switch = '/out:' #No space after /out:! - - module_dir_switch = None - module_include_switch = '-p' - - def update_executables(self): - f = cyg2win32(dummy_fortran_file()) - self.executables['version_cmd'] = ['', '-V', '-c', - f+'.f', '-o', f+'.o'] - - def get_flags_linker_so(self): - if os.name=='nt': - opt = ['/dll'] - # The "-K shared" switches are being left in for pre-9.0 versions - # of Absoft though I don't think versions earlier than 9 can - # actually be used to build shared libraries. In fact, version - # 8 of Absoft doesn't recognize "-K shared" and will fail. - elif self.get_version() >= '9.0': - opt = ['-shared'] - else: - opt = ["-K", "shared"] - return opt - - def library_dir_option(self, dir): - if os.name=='nt': - return ['-link', '/PATH:%s' % (dir)] - return "-L" + dir - - def library_option(self, lib): - if os.name=='nt': - return '%s.lib' % (lib) - return "-l" + lib - - def get_library_dirs(self): - opt = FCompiler.get_library_dirs(self) - d = os.environ.get('ABSOFT') - if d: - if self.get_version() >= '10.0': - # use shared libraries, the static libraries were not compiled -fPIC - prefix = 'sh' - else: - prefix = '' - if cpu.is_64bit(): - suffix = '64' - else: - suffix = '' - opt.append(os.path.join(d, '%slib%s' % (prefix, suffix))) - return opt - - def get_libraries(self): - opt = FCompiler.get_libraries(self) - if self.get_version() >= '11.0': - opt.extend(['af90math', 'afio', 'af77math', 'amisc']) - elif self.get_version() >= '10.0': - opt.extend(['af90math', 'afio', 'af77math', 'U77']) - elif self.get_version() >= '8.0': - opt.extend(['f90math', 'fio', 'f77math', 'U77']) - else: - opt.extend(['fio', 'f90math', 'fmath', 'U77']) - if os.name =='nt': - opt.append('COMDLG32') - return opt - - def get_flags(self): - opt = FCompiler.get_flags(self) - if os.name != 'nt': - opt.extend(['-s']) - if self.get_version(): - if self.get_version()>='8.2': - opt.append('-fpic') - return opt - - def get_flags_f77(self): - opt = FCompiler.get_flags_f77(self) - opt.extend(['-N22', '-N90', '-N110']) - v = self.get_version() - if os.name == 'nt': - if v and v>='8.0': - opt.extend(['-f', '-N15']) - else: - opt.append('-f') - if v: - if v<='4.6': - opt.append('-B108') - else: - # Though -N15 is undocumented, it works with - # Absoft 8.0 on Linux - opt.append('-N15') - return opt - - def get_flags_f90(self): - opt = FCompiler.get_flags_f90(self) - opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", - "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) - if self.get_version(): - if self.get_version()>'4.6': - opt.extend(["-YDEALLOC=ALL"]) - return opt - - def get_flags_fix(self): - opt = FCompiler.get_flags_fix(self) - opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", - "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) - opt.extend(["-f", "fixed"]) - return opt - - def get_flags_opt(self): - opt = ['-O'] - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='absoft').get_version()) diff --git a/numpy/distutils/fcompiler/arm.py b/numpy/distutils/fcompiler/arm.py deleted file mode 100644 index 3eb7e9af9c8c..000000000000 --- a/numpy/distutils/fcompiler/arm.py +++ /dev/null @@ -1,71 +0,0 @@ -import sys - -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file -from sys import platform -from os.path import join, dirname, normpath - -compilers = ['ArmFlangCompiler'] - -import functools - -class ArmFlangCompiler(FCompiler): - compiler_type = 'arm' - description = 'Arm Compiler' - version_pattern = r'\s*Arm.*version (?P[\d.-]+).*' - - ar_exe = 'lib.exe' - possible_executables = ['armflang'] - - executables = { - 'version_cmd': ["", "--version"], - 'compiler_f77': ["armflang", "-fPIC"], - 'compiler_fix': ["armflang", "-fPIC", "-ffixed-form"], - 'compiler_f90': ["armflang", "-fPIC"], - 'linker_so': ["armflang", "-fPIC", "-shared"], - 'archiver': ["ar", "-cr"], - 'ranlib': None - } - - pic_flags = ["-fPIC", "-DPIC"] - c_compiler = 'arm' - module_dir_switch = '-module ' # Don't remove ending space! - - def get_libraries(self): - opt = FCompiler.get_libraries(self) - opt.extend(['flang', 'flangrti', 'ompstub']) - return opt - - @functools.lru_cache(maxsize=128) - def get_library_dirs(self): - """List of compiler library directories.""" - opt = FCompiler.get_library_dirs(self) - flang_dir = dirname(self.executables['compiler_f77'][0]) - opt.append(normpath(join(flang_dir, '..', 'lib'))) - - return opt - - def get_flags(self): - return [] - - def get_flags_free(self): - return [] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_opt(self): - return ['-O3'] - - def get_flags_arch(self): - return [] - - def runtime_library_dir_option(self, dir): - return '-Wl,-rpath=%s' % dir - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='armflang').get_version()) - diff --git a/numpy/distutils/fcompiler/compaq.py b/numpy/distutils/fcompiler/compaq.py deleted file mode 100644 index 01314c136acf..000000000000 --- a/numpy/distutils/fcompiler/compaq.py +++ /dev/null @@ -1,120 +0,0 @@ - -#http://www.compaq.com/fortran/docs/ -import os -import sys - -from numpy.distutils.fcompiler import FCompiler -from distutils.errors import DistutilsPlatformError - -compilers = ['CompaqFCompiler'] -if os.name != 'posix' or sys.platform[:6] == 'cygwin' : - # Otherwise we'd get a false positive on posix systems with - # case-insensitive filesystems (like darwin), because we'll pick - # up /bin/df - compilers.append('CompaqVisualFCompiler') - -class CompaqFCompiler(FCompiler): - - compiler_type = 'compaq' - description = 'Compaq Fortran Compiler' - version_pattern = r'Compaq Fortran (?P[^\s]*).*' - - if sys.platform[:5]=='linux': - fc_exe = 'fort' - else: - fc_exe = 'f90' - - executables = { - 'version_cmd' : ['', "-version"], - 'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"], - 'compiler_fix' : [fc_exe, "-fixed"], - 'compiler_f90' : [fc_exe], - 'linker_so' : [''], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - module_dir_switch = '-module ' # not tested - module_include_switch = '-I' - - def get_flags(self): - return ['-assume no2underscore', '-nomixed_str_len_arg'] - def get_flags_debug(self): - return ['-g', '-check bounds'] - def get_flags_opt(self): - return ['-O4', '-align dcommons', '-assume bigarrays', - '-assume nozsize', '-math_library fast'] - def get_flags_arch(self): - return ['-arch host', '-tune host'] - def get_flags_linker_so(self): - if sys.platform[:5]=='linux': - return ['-shared'] - return ['-shared', '-Wl,-expect_unresolved,*'] - -class CompaqVisualFCompiler(FCompiler): - - compiler_type = 'compaqv' - description = 'DIGITAL or Compaq Visual Fortran Compiler' - version_pattern = (r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler' - r' Version (?P[^\s]*).*') - - compile_switch = '/compile_only' - object_switch = '/object:' - library_switch = '/OUT:' #No space after /OUT:! - - static_lib_extension = ".lib" - static_lib_format = "%s%s" - module_dir_switch = '/module:' - module_include_switch = '/I' - - ar_exe = 'lib.exe' - fc_exe = 'DF' - - if sys.platform=='win32': - from numpy.distutils.msvccompiler import MSVCCompiler - - try: - m = MSVCCompiler() - m.initialize() - ar_exe = m.lib - except DistutilsPlatformError: - pass - except AttributeError as e: - if '_MSVCCompiler__root' in str(e): - print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (e)) - else: - raise - except OSError as e: - if not "vcvarsall.bat" in str(e): - print("Unexpected OSError in", __file__) - raise - except ValueError as e: - if not "'path'" in str(e): - print("Unexpected ValueError in", __file__) - raise - - executables = { - 'version_cmd' : ['', "/what"], - 'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"], - 'compiler_fix' : [fc_exe, "/fixed"], - 'compiler_f90' : [fc_exe], - 'linker_so' : [''], - 'archiver' : [ar_exe, "/OUT:"], - 'ranlib' : None - } - - def get_flags(self): - return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)', - '/names:lowercase', '/assume:underscore'] - def get_flags_opt(self): - return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast'] - def get_flags_arch(self): - return ['/threads'] - def get_flags_debug(self): - return ['/debug'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='compaq').get_version()) diff --git a/numpy/distutils/fcompiler/environment.py b/numpy/distutils/fcompiler/environment.py deleted file mode 100644 index ecd4d9989279..000000000000 --- a/numpy/distutils/fcompiler/environment.py +++ /dev/null @@ -1,88 +0,0 @@ -import os -from distutils.dist import Distribution - -__metaclass__ = type - -class EnvironmentConfig: - def __init__(self, distutils_section='ALL', **kw): - self._distutils_section = distutils_section - self._conf_keys = kw - self._conf = None - self._hook_handler = None - - def dump_variable(self, name): - conf_desc = self._conf_keys[name] - hook, envvar, confvar, convert, append = conf_desc - if not convert: - convert = lambda x : x - print('%s.%s:' % (self._distutils_section, name)) - v = self._hook_handler(name, hook) - print(' hook : %s' % (convert(v),)) - if envvar: - v = os.environ.get(envvar, None) - print(' environ: %s' % (convert(v),)) - if confvar and self._conf: - v = self._conf.get(confvar, (None, None))[1] - print(' config : %s' % (convert(v),)) - - def dump_variables(self): - for name in self._conf_keys: - self.dump_variable(name) - - def __getattr__(self, name): - try: - conf_desc = self._conf_keys[name] - except KeyError: - raise AttributeError( - f"'EnvironmentConfig' object has no attribute '{name}'" - ) from None - - return self._get_var(name, conf_desc) - - def get(self, name, default=None): - try: - conf_desc = self._conf_keys[name] - except KeyError: - return default - var = self._get_var(name, conf_desc) - if var is None: - var = default - return var - - def _get_var(self, name, conf_desc): - hook, envvar, confvar, convert, append = conf_desc - if convert is None: - convert = lambda x: x - var = self._hook_handler(name, hook) - if envvar is not None: - envvar_contents = os.environ.get(envvar) - if envvar_contents is not None: - envvar_contents = convert(envvar_contents) - if var and append: - if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '1') == '1': - var.extend(envvar_contents) - else: - # NPY_DISTUTILS_APPEND_FLAGS was explicitly set to 0 - # to keep old (overwrite flags rather than append to - # them) behavior - var = envvar_contents - else: - var = envvar_contents - if confvar is not None and self._conf: - if confvar in self._conf: - source, confvar_contents = self._conf[confvar] - var = convert(confvar_contents) - return var - - - def clone(self, hook_handler): - ec = self.__class__(distutils_section=self._distutils_section, - **self._conf_keys) - ec._hook_handler = hook_handler - return ec - - def use_distribution(self, dist): - if isinstance(dist, Distribution): - self._conf = dist.get_option_dict(self._distutils_section) - else: - self._conf = dist diff --git a/numpy/distutils/fcompiler/fujitsu.py b/numpy/distutils/fcompiler/fujitsu.py deleted file mode 100644 index ddce67456d18..000000000000 --- a/numpy/distutils/fcompiler/fujitsu.py +++ /dev/null @@ -1,46 +0,0 @@ -""" -fujitsu - -Supports Fujitsu compiler function. -This compiler is developed by Fujitsu and is used in A64FX on Fugaku. -""" -from numpy.distutils.fcompiler import FCompiler - -compilers = ['FujitsuFCompiler'] - -class FujitsuFCompiler(FCompiler): - compiler_type = 'fujitsu' - description = 'Fujitsu Fortran Compiler' - - possible_executables = ['frt'] - version_pattern = r'frt \(FRT\) (?P[a-z\d.]+)' - # $ frt --version - # frt (FRT) x.x.x yyyymmdd - - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : ["frt", "-Fixed"], - 'compiler_fix' : ["frt", "-Fixed"], - 'compiler_f90' : ["frt"], - 'linker_so' : ["frt", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-KPIC'] - module_dir_switch = '-M' - module_include_switch = '-I' - - def get_flags_opt(self): - return ['-O3'] - def get_flags_debug(self): - return ['-g'] - def runtime_library_dir_option(self, dir): - return f'-Wl,-rpath={dir}' - def get_libraries(self): - return ['fj90f', 'fj90i', 'fjsrcinfo'] - -if __name__ == '__main__': - from distutils import log - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - print(customized_fcompiler('fujitsu').get_version()) diff --git a/numpy/distutils/fcompiler/g95.py b/numpy/distutils/fcompiler/g95.py deleted file mode 100644 index e109a972a872..000000000000 --- a/numpy/distutils/fcompiler/g95.py +++ /dev/null @@ -1,42 +0,0 @@ -# http://g95.sourceforge.net/ -from numpy.distutils.fcompiler import FCompiler - -compilers = ['G95FCompiler'] - -class G95FCompiler(FCompiler): - compiler_type = 'g95' - description = 'G95 Fortran Compiler' - -# version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95!\) (?P.*)\).*' - # $ g95 --version - # G95 (GCC 4.0.3 (g95!) May 22 2006) - - version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95 (?P.*)!\) (?P.*)\).*' - # $ g95 --version - # G95 (GCC 4.0.3 (g95 0.90!) Aug 22 2006) - - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : ["g95", "-ffixed-form"], - 'compiler_fix' : ["g95", "-ffixed-form"], - 'compiler_f90' : ["g95"], - 'linker_so' : ["", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-fpic'] - module_dir_switch = '-fmod=' - module_include_switch = '-I' - - def get_flags(self): - return ['-fno-second-underscore'] - def get_flags_opt(self): - return ['-O'] - def get_flags_debug(self): - return ['-g'] - -if __name__ == '__main__': - from distutils import log - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - print(customized_fcompiler('g95').get_version()) diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py deleted file mode 100644 index 3472b5d4c095..000000000000 --- a/numpy/distutils/fcompiler/gnu.py +++ /dev/null @@ -1,555 +0,0 @@ -import re -import os -import sys -import warnings -import platform -import tempfile -import hashlib -import base64 -import subprocess -from subprocess import Popen, PIPE, STDOUT -from numpy.distutils.exec_command import filepath_from_subprocess_output -from numpy.distutils.fcompiler import FCompiler -from distutils.version import LooseVersion - -compilers = ['GnuFCompiler', 'Gnu95FCompiler'] - -TARGET_R = re.compile(r"Target: ([a-zA-Z0-9_\-]*)") - -# XXX: handle cross compilation - - -def is_win64(): - return sys.platform == "win32" and platform.architecture()[0] == "64bit" - - -class GnuFCompiler(FCompiler): - compiler_type = 'gnu' - compiler_aliases = ('g77', ) - description = 'GNU Fortran 77 compiler' - - def gnu_version_match(self, version_string): - """Handle the different versions of GNU fortran compilers""" - # Strip warning(s) that may be emitted by gfortran - while version_string.startswith('gfortran: warning'): - version_string =\ - version_string[version_string.find('\n') + 1:].strip() - - # Gfortran versions from after 2010 will output a simple string - # (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older - # gfortrans may still return long version strings (``-dumpversion`` was - # an alias for ``--version``) - if len(version_string) <= 20: - # Try to find a valid version string - m = re.search(r'([0-9.]+)', version_string) - if m: - # g77 provides a longer version string that starts with GNU - # Fortran - if version_string.startswith('GNU Fortran'): - return ('g77', m.group(1)) - - # gfortran only outputs a version string such as #.#.#, so check - # if the match is at the start of the string - elif m.start() == 0: - return ('gfortran', m.group(1)) - else: - # Output probably from --version, try harder: - m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string) - if m: - return ('gfortran', m.group(1)) - m = re.search( - r'GNU Fortran.*?\-?([0-9-.]+\.[0-9-.]+)', version_string) - if m: - v = m.group(1) - if v.startswith('0') or v.startswith('2') or v.startswith('3'): - # the '0' is for early g77's - return ('g77', v) - else: - # at some point in the 4.x series, the ' 95' was dropped - # from the version string - return ('gfortran', v) - - # If still nothing, raise an error to make the problem easy to find. - err = 'A valid Fortran version was not found in this string:\n' - raise ValueError(err + version_string) - - def version_match(self, version_string): - v = self.gnu_version_match(version_string) - if not v or v[0] != 'g77': - return None - return v[1] - - possible_executables = ['g77', 'f77'] - executables = { - 'version_cmd' : [None, "-dumpversion"], - 'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"], - 'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes - 'compiler_fix' : None, - 'linker_so' : [None, "-g", "-Wall"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"], - 'linker_exe' : [None, "-g", "-Wall"] - } - module_dir_switch = None - module_include_switch = None - - # Cygwin: f771: warning: -fPIC ignored for target (all code is - # position independent) - if os.name != 'nt' and sys.platform != 'cygwin': - pic_flags = ['-fPIC'] - - # use -mno-cygwin for g77 when Python is not Cygwin-Python - if sys.platform == 'win32': - for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']: - executables[key].append('-mno-cygwin') - - g2c = 'g2c' - suggested_f90_compiler = 'gnu95' - - def get_flags_linker_so(self): - opt = self.linker_so[1:] - if sys.platform == 'darwin': - target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) - # If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value - # and leave it alone. But, distutils will complain if the - # environment's value is different from the one in the Python - # Makefile used to build Python. We let distutils handle this - # error checking. - if not target: - # If MACOSX_DEPLOYMENT_TARGET is not set in the environment, - # we try to get it first from sysconfig and then - # fall back to setting it to 10.9 This is a reasonable default - # even when using the official Python dist and those derived - # from it. - import sysconfig - target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') - if not target: - target = '10.9' - s = f'Env. variable MACOSX_DEPLOYMENT_TARGET set to {target}' - warnings.warn(s, stacklevel=2) - os.environ['MACOSX_DEPLOYMENT_TARGET'] = str(target) - opt.extend(['-undefined', 'dynamic_lookup', '-bundle']) - else: - opt.append("-shared") - if sys.platform.startswith('sunos'): - # SunOS often has dynamically loaded symbols defined in the - # static library libg2c.a The linker doesn't like this. To - # ignore the problem, use the -mimpure-text flag. It isn't - # the safest thing, but seems to work. 'man gcc' says: - # ".. Instead of using -mimpure-text, you should compile all - # source code with -fpic or -fPIC." - opt.append('-mimpure-text') - return opt - - def get_libgcc_dir(self): - try: - output = subprocess.check_output(self.compiler_f77 + - ['-print-libgcc-file-name']) - except (OSError, subprocess.CalledProcessError): - pass - else: - output = filepath_from_subprocess_output(output) - return os.path.dirname(output) - return None - - def get_libgfortran_dir(self): - if sys.platform[:5] == 'linux': - libgfortran_name = 'libgfortran.so' - elif sys.platform == 'darwin': - libgfortran_name = 'libgfortran.dylib' - else: - libgfortran_name = None - - libgfortran_dir = None - if libgfortran_name: - find_lib_arg = ['-print-file-name={0}'.format(libgfortran_name)] - try: - output = subprocess.check_output( - self.compiler_f77 + find_lib_arg) - except (OSError, subprocess.CalledProcessError): - pass - else: - output = filepath_from_subprocess_output(output) - libgfortran_dir = os.path.dirname(output) - return libgfortran_dir - - def get_library_dirs(self): - opt = [] - if sys.platform[:5] != 'linux': - d = self.get_libgcc_dir() - if d: - # if windows and not cygwin, libg2c lies in a different folder - if sys.platform == 'win32' and not d.startswith('/usr/lib'): - d = os.path.normpath(d) - path = os.path.join(d, "lib%s.a" % self.g2c) - if not os.path.exists(path): - root = os.path.join(d, *((os.pardir, ) * 4)) - d2 = os.path.abspath(os.path.join(root, 'lib')) - path = os.path.join(d2, "lib%s.a" % self.g2c) - if os.path.exists(path): - opt.append(d2) - opt.append(d) - # For Macports / Linux, libgfortran and libgcc are not co-located - lib_gfortran_dir = self.get_libgfortran_dir() - if lib_gfortran_dir: - opt.append(lib_gfortran_dir) - return opt - - def get_libraries(self): - opt = [] - d = self.get_libgcc_dir() - if d is not None: - g2c = self.g2c + '-pic' - f = self.static_lib_format % (g2c, self.static_lib_extension) - if not os.path.isfile(os.path.join(d, f)): - g2c = self.g2c - else: - g2c = self.g2c - - if g2c is not None: - opt.append(g2c) - c_compiler = self.c_compiler - if sys.platform == 'win32' and c_compiler and \ - c_compiler.compiler_type == 'msvc': - opt.append('gcc') - if sys.platform == 'darwin': - opt.append('cc_dynamic') - return opt - - def get_flags_debug(self): - return ['-g'] - - def get_flags_opt(self): - v = self.get_version() - if v and v <= '3.3.3': - # With this compiler version building Fortran BLAS/LAPACK - # with -O3 caused failures in lib.lapack heevr,syevr tests. - opt = ['-O2'] - else: - opt = ['-O3'] - opt.append('-funroll-loops') - return opt - - def _c_arch_flags(self): - """ Return detected arch flags from CFLAGS """ - import sysconfig - try: - cflags = sysconfig.get_config_vars()['CFLAGS'] - except KeyError: - return [] - arch_re = re.compile(r"-arch\s+(\w+)") - arch_flags = [] - for arch in arch_re.findall(cflags): - arch_flags += ['-arch', arch] - return arch_flags - - def get_flags_arch(self): - return [] - - def runtime_library_dir_option(self, dir): - if sys.platform == 'win32' or sys.platform == 'cygwin': - # Linux/Solaris/Unix support RPATH, Windows does not - raise NotImplementedError - - # TODO: could use -Xlinker here, if it's supported - assert "," not in dir - - if sys.platform == 'darwin': - return f'-Wl,-rpath,{dir}' - elif sys.platform.startswith(('aix', 'os400')): - # AIX RPATH is called LIBPATH - return f'-Wl,-blibpath:{dir}' - else: - return f'-Wl,-rpath={dir}' - - -class Gnu95FCompiler(GnuFCompiler): - compiler_type = 'gnu95' - compiler_aliases = ('gfortran', ) - description = 'GNU Fortran 95 compiler' - - def version_match(self, version_string): - v = self.gnu_version_match(version_string) - if not v or v[0] != 'gfortran': - return None - v = v[1] - if LooseVersion(v) >= "4": - # gcc-4 series releases do not support -mno-cygwin option - pass - else: - # use -mno-cygwin flag for gfortran when Python is not - # Cygwin-Python - if sys.platform == 'win32': - for key in [ - 'version_cmd', 'compiler_f77', 'compiler_f90', - 'compiler_fix', 'linker_so', 'linker_exe' - ]: - self.executables[key].append('-mno-cygwin') - return v - - possible_executables = ['gfortran', 'f95'] - executables = { - 'version_cmd' : ["", "-dumpversion"], - 'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form", - "-fno-second-underscore"], - 'compiler_f90' : [None, "-Wall", "-g", - "-fno-second-underscore"], - 'compiler_fix' : [None, "-Wall", "-g","-ffixed-form", - "-fno-second-underscore"], - 'linker_so' : ["", "-Wall", "-g"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"], - 'linker_exe' : [None, "-Wall"] - } - - module_dir_switch = '-J' - module_include_switch = '-I' - - if sys.platform.startswith(('aix', 'os400')): - executables['linker_so'].append('-lpthread') - if platform.architecture()[0][:2] == '64': - for key in ['compiler_f77', 'compiler_f90','compiler_fix','linker_so', 'linker_exe']: - executables[key].append('-maix64') - - g2c = 'gfortran' - - def _universal_flags(self, cmd): - """Return a list of -arch flags for every supported architecture.""" - if not sys.platform == 'darwin': - return [] - arch_flags = [] - # get arches the C compiler gets. - c_archs = self._c_arch_flags() - if "i386" in c_archs: - c_archs[c_archs.index("i386")] = "i686" - # check the arches the Fortran compiler supports, and compare with - # arch flags from C compiler - for arch in ["ppc", "i686", "x86_64", "ppc64", "s390x"]: - if _can_target(cmd, arch) and arch in c_archs: - arch_flags.extend(["-arch", arch]) - return arch_flags - - def get_flags(self): - flags = GnuFCompiler.get_flags(self) - arch_flags = self._universal_flags(self.compiler_f90) - if arch_flags: - flags[:0] = arch_flags - return flags - - def get_flags_linker_so(self): - flags = GnuFCompiler.get_flags_linker_so(self) - arch_flags = self._universal_flags(self.linker_so) - if arch_flags: - flags[:0] = arch_flags - return flags - - def get_library_dirs(self): - opt = GnuFCompiler.get_library_dirs(self) - if sys.platform == 'win32': - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - target = self.get_target() - if target: - d = os.path.normpath(self.get_libgcc_dir()) - root = os.path.join(d, *((os.pardir, ) * 4)) - path = os.path.join(root, "lib") - mingwdir = os.path.normpath(path) - if os.path.exists(os.path.join(mingwdir, "libmingwex.a")): - opt.append(mingwdir) - # For Macports / Linux, libgfortran and libgcc are not co-located - lib_gfortran_dir = self.get_libgfortran_dir() - if lib_gfortran_dir: - opt.append(lib_gfortran_dir) - return opt - - def get_libraries(self): - opt = GnuFCompiler.get_libraries(self) - if sys.platform == 'darwin': - opt.remove('cc_dynamic') - if sys.platform == 'win32': - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - if "gcc" in opt: - i = opt.index("gcc") - opt.insert(i + 1, "mingwex") - opt.insert(i + 1, "mingw32") - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - return [] - else: - pass - return opt - - def get_target(self): - try: - p = subprocess.Popen( - self.compiler_f77 + ['-v'], - stdin=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - stdout, stderr = p.communicate() - output = (stdout or b"") + (stderr or b"") - except (OSError, subprocess.CalledProcessError): - pass - else: - output = filepath_from_subprocess_output(output) - m = TARGET_R.search(output) - if m: - return m.group(1) - return "" - - def _hash_files(self, filenames): - h = hashlib.sha1() - for fn in filenames: - with open(fn, 'rb') as f: - while True: - block = f.read(131072) - if not block: - break - h.update(block) - text = base64.b32encode(h.digest()) - text = text.decode('ascii') - return text.rstrip('=') - - def _link_wrapper_lib(self, objects, output_dir, extra_dll_dir, - chained_dlls, is_archive): - """Create a wrapper shared library for the given objects - - Return an MSVC-compatible lib - """ - - c_compiler = self.c_compiler - if c_compiler.compiler_type != "msvc": - raise ValueError("This method only supports MSVC") - - object_hash = self._hash_files(list(objects) + list(chained_dlls)) - - if is_win64(): - tag = 'win_amd64' - else: - tag = 'win32' - - basename = 'lib' + os.path.splitext( - os.path.basename(objects[0]))[0][:8] - root_name = basename + '.' + object_hash + '.gfortran-' + tag - dll_name = root_name + '.dll' - def_name = root_name + '.def' - lib_name = root_name + '.lib' - dll_path = os.path.join(extra_dll_dir, dll_name) - def_path = os.path.join(output_dir, def_name) - lib_path = os.path.join(output_dir, lib_name) - - if os.path.isfile(lib_path): - # Nothing to do - return lib_path, dll_path - - if is_archive: - objects = (["-Wl,--whole-archive"] + list(objects) + - ["-Wl,--no-whole-archive"]) - self.link_shared_object( - objects, - dll_name, - output_dir=extra_dll_dir, - extra_postargs=list(chained_dlls) + [ - '-Wl,--allow-multiple-definition', - '-Wl,--output-def,' + def_path, - '-Wl,--export-all-symbols', - '-Wl,--enable-auto-import', - '-static', - '-mlong-double-64', - ]) - - # No PowerPC! - if is_win64(): - specifier = '/MACHINE:X64' - else: - specifier = '/MACHINE:X86' - - # MSVC specific code - lib_args = ['/def:' + def_path, '/OUT:' + lib_path, specifier] - if not c_compiler.initialized: - c_compiler.initialize() - c_compiler.spawn([c_compiler.lib] + lib_args) - - return lib_path, dll_path - - def can_ccompiler_link(self, compiler): - # MSVC cannot link objects compiled by GNU fortran - return compiler.compiler_type not in ("msvc", ) - - def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): - """ - Convert a set of object files that are not compatible with the default - linker, to a file that is compatible. - """ - if self.c_compiler.compiler_type == "msvc": - # Compile a DLL and return the lib for the DLL as - # the object. Also keep track of previous DLLs that - # we have compiled so that we can link against them. - - # If there are .a archives, assume they are self-contained - # static libraries, and build separate DLLs for each - archives = [] - plain_objects = [] - for obj in objects: - if obj.lower().endswith('.a'): - archives.append(obj) - else: - plain_objects.append(obj) - - chained_libs = [] - chained_dlls = [] - for archive in archives[::-1]: - lib, dll = self._link_wrapper_lib( - [archive], - output_dir, - extra_dll_dir, - chained_dlls=chained_dlls, - is_archive=True) - chained_libs.insert(0, lib) - chained_dlls.insert(0, dll) - - if not plain_objects: - return chained_libs - - lib, dll = self._link_wrapper_lib( - plain_objects, - output_dir, - extra_dll_dir, - chained_dlls=chained_dlls, - is_archive=False) - return [lib] + chained_libs - else: - raise ValueError("Unsupported C compiler") - - -def _can_target(cmd, arch): - """Return true if the architecture supports the -arch flag""" - newcmd = cmd[:] - fid, filename = tempfile.mkstemp(suffix=".f") - os.close(fid) - try: - d = os.path.dirname(filename) - output = os.path.splitext(filename)[0] + ".o" - try: - newcmd.extend(["-arch", arch, "-c", filename]) - p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d) - p.communicate() - return p.returncode == 0 - finally: - if os.path.exists(output): - os.remove(output) - finally: - os.remove(filename) - - -if __name__ == '__main__': - from distutils import log - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - - print(customized_fcompiler('gnu').get_version()) - try: - print(customized_fcompiler('g95').get_version()) - except Exception as e: - print(e) diff --git a/numpy/distutils/fcompiler/hpux.py b/numpy/distutils/fcompiler/hpux.py deleted file mode 100644 index 09e6483bf5ad..000000000000 --- a/numpy/distutils/fcompiler/hpux.py +++ /dev/null @@ -1,41 +0,0 @@ -from numpy.distutils.fcompiler import FCompiler - -compilers = ['HPUXFCompiler'] - -class HPUXFCompiler(FCompiler): - - compiler_type = 'hpux' - description = 'HP Fortran 90 Compiler' - version_pattern = r'HP F90 (?P[^\s*,]*)' - - executables = { - 'version_cmd' : ["f90", "+version"], - 'compiler_f77' : ["f90"], - 'compiler_fix' : ["f90"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["ld", "-b"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = None #XXX: fix me - module_include_switch = None #XXX: fix me - pic_flags = ['+Z'] - def get_flags(self): - return self.pic_flags + ['+ppu', '+DD64'] - def get_flags_opt(self): - return ['-O3'] - def get_libraries(self): - return ['m'] - def get_library_dirs(self): - opt = ['/usr/lib/hpux64'] - return opt - def get_version(self, force=0, ok_status=[256, 0, 1]): - # XXX status==256 may indicate 'unrecognized option' or - # 'no input file'. So, version_cmd needs more work. - return FCompiler.get_version(self, force, ok_status) - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(10) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='hpux').get_version()) diff --git a/numpy/distutils/fcompiler/ibm.py b/numpy/distutils/fcompiler/ibm.py deleted file mode 100644 index 29927518c703..000000000000 --- a/numpy/distutils/fcompiler/ibm.py +++ /dev/null @@ -1,97 +0,0 @@ -import os -import re -import sys -import subprocess - -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils.exec_command import find_executable -from numpy.distutils.misc_util import make_temp_file -from distutils import log - -compilers = ['IBMFCompiler'] - -class IBMFCompiler(FCompiler): - compiler_type = 'ibm' - description = 'IBM XL Fortran Compiler' - version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P[^\s*]*)' - #IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004 - - executables = { - 'version_cmd' : ["", "-qversion"], - 'compiler_f77' : ["xlf"], - 'compiler_fix' : ["xlf90", "-qfixed"], - 'compiler_f90' : ["xlf90"], - 'linker_so' : ["xlf95"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_version(self,*args,**kwds): - version = FCompiler.get_version(self,*args,**kwds) - - if version is None and sys.platform.startswith('aix'): - # use lslpp to find out xlf version - lslpp = find_executable('lslpp') - xlf = find_executable('xlf') - if os.path.exists(xlf) and os.path.exists(lslpp): - try: - o = subprocess.check_output([lslpp, '-Lc', 'xlfcmp']) - except (OSError, subprocess.CalledProcessError): - pass - else: - m = re.search(r'xlfcmp:(?P\d+([.]\d+)+)', o) - if m: version = m.group('version') - - xlf_dir = '/etc/opt/ibmcmp/xlf' - if version is None and os.path.isdir(xlf_dir): - # linux: - # If the output of xlf does not contain version info - # (that's the case with xlf 8.1, for instance) then - # let's try another method: - l = sorted(os.listdir(xlf_dir)) - l.reverse() - l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))] - if l: - from distutils.version import LooseVersion - self.version = version = LooseVersion(l[0]) - return version - - def get_flags(self): - return ['-qextname'] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_linker_so(self): - opt = [] - if sys.platform=='darwin': - opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress') - else: - opt.append('-bshared') - version = self.get_version(ok_status=[0, 40]) - if version is not None: - if sys.platform.startswith('aix'): - xlf_cfg = '/etc/xlf.cfg' - else: - xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version - fo, new_cfg = make_temp_file(suffix='_xlf.cfg') - log.info('Creating '+new_cfg) - with open(xlf_cfg) as fi: - crt1_match = re.compile(r'\s*crt\s*=\s*(?P.*)/crt1.o').match - for line in fi: - m = crt1_match(line) - if m: - fo.write('crt = %s/bundle1.o\n' % (m.group('path'))) - else: - fo.write(line) - fo.close() - opt.append('-F'+new_cfg) - return opt - - def get_flags_opt(self): - return ['-O3'] - -if __name__ == '__main__': - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - print(customized_fcompiler(compiler='ibm').get_version()) diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py deleted file mode 100644 index 1d6065904110..000000000000 --- a/numpy/distutils/fcompiler/intel.py +++ /dev/null @@ -1,211 +0,0 @@ -# http://developer.intel.com/software/products/compilers/flin/ -import sys - -from numpy.distutils.ccompiler import simple_version_match -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file - -compilers = ['IntelFCompiler', 'IntelVisualFCompiler', - 'IntelItaniumFCompiler', 'IntelItaniumVisualFCompiler', - 'IntelEM64VisualFCompiler', 'IntelEM64TFCompiler'] - - -def intel_version_match(type): - # Match against the important stuff in the version string - return simple_version_match(start=r'Intel.*?Fortran.*?(?:%s).*?Version' % (type,)) - - -class BaseIntelFCompiler(FCompiler): - def update_executables(self): - f = dummy_fortran_file() - self.executables['version_cmd'] = ['', '-FI', '-V', '-c', - f + '.f', '-o', f + '.o'] - - def runtime_library_dir_option(self, dir): - # TODO: could use -Xlinker here, if it's supported - assert "," not in dir - - return '-Wl,-rpath=%s' % dir - - -class IntelFCompiler(BaseIntelFCompiler): - - compiler_type = 'intel' - compiler_aliases = ('ifort',) - description = 'Intel Fortran Compiler for 32-bit apps' - version_match = intel_version_match('32-bit|IA-32') - - possible_executables = ['ifort', 'ifc'] - - executables = { - 'version_cmd' : None, # set by update_executables - 'compiler_f77' : [None, "-72", "-w90", "-w95"], - 'compiler_f90' : [None], - 'compiler_fix' : [None, "-FI"], - 'linker_so' : ["", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - pic_flags = ['-fPIC'] - module_dir_switch = '-module ' # Don't remove ending space! - module_include_switch = '-I' - - def get_flags_free(self): - return ['-FR'] - - def get_flags(self): - return ['-fPIC'] - - def get_flags_opt(self): # Scipy test failures with -O2 - v = self.get_version() - mpopt = 'openmp' if v and v < '15' else 'qopenmp' - return ['-fp-model', 'strict', '-O1', - '-assume', 'minus0', '-{}'.format(mpopt)] - - def get_flags_arch(self): - return [] - - def get_flags_linker_so(self): - opt = FCompiler.get_flags_linker_so(self) - v = self.get_version() - if v and v >= '8.0': - opt.append('-nofor_main') - if sys.platform == 'darwin': - # Here, it's -dynamiclib - try: - idx = opt.index('-shared') - opt.remove('-shared') - except ValueError: - idx = 0 - opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup'] - return opt - - -class IntelItaniumFCompiler(IntelFCompiler): - compiler_type = 'intele' - compiler_aliases = () - description = 'Intel Fortran Compiler for Itanium apps' - - version_match = intel_version_match('Itanium|IA-64') - - possible_executables = ['ifort', 'efort', 'efc'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI", "-w90", "-w95"], - 'compiler_fix' : [None, "-FI"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - -class IntelEM64TFCompiler(IntelFCompiler): - compiler_type = 'intelem' - compiler_aliases = () - description = 'Intel Fortran Compiler for 64-bit apps' - - version_match = intel_version_match('EM64T-based|Intel\\(R\\) 64|64|IA-64|64-bit') - - possible_executables = ['ifort', 'efort', 'efc'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI"], - 'compiler_fix' : [None, "-FI"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - -# Is there no difference in the version string between the above compilers -# and the Visual compilers? - - -class IntelVisualFCompiler(BaseIntelFCompiler): - compiler_type = 'intelv' - description = 'Intel Visual Fortran Compiler for 32-bit apps' - version_match = intel_version_match('32-bit|IA-32') - - def update_executables(self): - f = dummy_fortran_file() - self.executables['version_cmd'] = ['', '/FI', '/c', - f + '.f', '/o', f + '.o'] - - ar_exe = 'lib.exe' - possible_executables = ['ifort', 'ifl'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None], - 'compiler_fix' : [None], - 'compiler_f90' : [None], - 'linker_so' : [None], - 'archiver' : [ar_exe, "/verbose", "/OUT:"], - 'ranlib' : None - } - - compile_switch = '/c ' - object_switch = '/Fo' # No space after /Fo! - library_switch = '/OUT:' # No space after /OUT:! - module_dir_switch = '/module:' # No space after /module: - module_include_switch = '/I' - - def get_flags(self): - opt = ['/nologo', '/MD', '/nbs', '/names:lowercase', - '/assume:underscore', '/fpp'] - return opt - - def get_flags_free(self): - return [] - - def get_flags_debug(self): - return ['/4Yb', '/d2'] - - def get_flags_opt(self): - return ['/O1', '/assume:minus0'] # Scipy test failures with /O2 - - def get_flags_arch(self): - return ["/arch:IA32", "/QaxSSE3"] - - def runtime_library_dir_option(self, dir): - raise NotImplementedError - - -class IntelItaniumVisualFCompiler(IntelVisualFCompiler): - compiler_type = 'intelev' - description = 'Intel Visual Fortran Compiler for Itanium apps' - - version_match = intel_version_match('Itanium') - - possible_executables = ['efl'] # XXX this is a wild guess - ar_exe = IntelVisualFCompiler.ar_exe - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI", "-w90", "-w95"], - 'compiler_fix' : [None, "-FI", "-4L72", "-w"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : [ar_exe, "/verbose", "/OUT:"], - 'ranlib' : None - } - - -class IntelEM64VisualFCompiler(IntelVisualFCompiler): - compiler_type = 'intelvem' - description = 'Intel Visual Fortran Compiler for 64-bit apps' - - version_match = simple_version_match(start=r'Intel\(R\).*?64,') - - def get_flags_arch(self): - return [] - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='intel').get_version()) diff --git a/numpy/distutils/fcompiler/lahey.py b/numpy/distutils/fcompiler/lahey.py deleted file mode 100644 index e925838268b8..000000000000 --- a/numpy/distutils/fcompiler/lahey.py +++ /dev/null @@ -1,45 +0,0 @@ -import os - -from numpy.distutils.fcompiler import FCompiler - -compilers = ['LaheyFCompiler'] - -class LaheyFCompiler(FCompiler): - - compiler_type = 'lahey' - description = 'Lahey/Fujitsu Fortran 95 Compiler' - version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P[^\s*]*)' - - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : ["lf95", "--fix"], - 'compiler_fix' : ["lf95", "--fix"], - 'compiler_f90' : ["lf95"], - 'linker_so' : ["lf95", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - module_dir_switch = None #XXX Fix me - module_include_switch = None #XXX Fix me - - def get_flags_opt(self): - return ['-O'] - def get_flags_debug(self): - return ['-g', '--chk', '--chkglobal'] - def get_library_dirs(self): - opt = [] - d = os.environ.get('LAHEY') - if d: - opt.append(os.path.join(d, 'lib')) - return opt - def get_libraries(self): - opt = [] - opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6']) - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='lahey').get_version()) diff --git a/numpy/distutils/fcompiler/mips.py b/numpy/distutils/fcompiler/mips.py deleted file mode 100644 index a0973804571b..000000000000 --- a/numpy/distutils/fcompiler/mips.py +++ /dev/null @@ -1,54 +0,0 @@ -from numpy.distutils.cpuinfo import cpu -from numpy.distutils.fcompiler import FCompiler - -compilers = ['MIPSFCompiler'] - -class MIPSFCompiler(FCompiler): - - compiler_type = 'mips' - description = 'MIPSpro Fortran Compiler' - version_pattern = r'MIPSpro Compilers: Version (?P[^\s*,]*)' - - executables = { - 'version_cmd' : ["", "-version"], - 'compiler_f77' : ["f77", "-f77"], - 'compiler_fix' : ["f90", "-fixedform"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["f90", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : None - } - module_dir_switch = None #XXX: fix me - module_include_switch = None #XXX: fix me - pic_flags = ['-KPIC'] - - def get_flags(self): - return self.pic_flags + ['-n32'] - def get_flags_opt(self): - return ['-O3'] - def get_flags_arch(self): - opt = [] - for a in '19 20 21 22_4k 22_5k 24 25 26 27 28 30 32_5k 32_10k'.split(): - if getattr(cpu, 'is_IP%s'%a)(): - opt.append('-TARG:platform=IP%s' % a) - break - return opt - def get_flags_arch_f77(self): - r = None - if cpu.is_r10000(): r = 10000 - elif cpu.is_r12000(): r = 12000 - elif cpu.is_r8000(): r = 8000 - elif cpu.is_r5000(): r = 5000 - elif cpu.is_r4000(): r = 4000 - if r is not None: - return ['r%s' % (r)] - return [] - def get_flags_arch_f90(self): - r = self.get_flags_arch_f77() - if r: - r[0] = '-' + r[0] - return r - -if __name__ == '__main__': - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='mips').get_version()) diff --git a/numpy/distutils/fcompiler/nag.py b/numpy/distutils/fcompiler/nag.py deleted file mode 100644 index 939201f44e02..000000000000 --- a/numpy/distutils/fcompiler/nag.py +++ /dev/null @@ -1,87 +0,0 @@ -import sys -import re -from numpy.distutils.fcompiler import FCompiler - -compilers = ['NAGFCompiler', 'NAGFORCompiler'] - -class BaseNAGFCompiler(FCompiler): - version_pattern = r'NAG.* Release (?P[^(\s]*)' - - def version_match(self, version_string): - m = re.search(self.version_pattern, version_string) - if m: - return m.group('version') - else: - return None - - def get_flags_linker_so(self): - return ["-Wl,-shared"] - def get_flags_opt(self): - return ['-O4'] - def get_flags_arch(self): - return [] - -class NAGFCompiler(BaseNAGFCompiler): - - compiler_type = 'nag' - description = 'NAGWare Fortran 95 Compiler' - - executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["f95", "-fixed"], - 'compiler_fix' : ["f95", "-fixed"], - 'compiler_f90' : ["f95"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_flags_linker_so(self): - if sys.platform == 'darwin': - return ['-unsharedf95', '-Wl,-bundle,-flat_namespace,-undefined,suppress'] - return BaseNAGFCompiler.get_flags_linker_so(self) - def get_flags_arch(self): - version = self.get_version() - if version and version < '5.1': - return ['-target=native'] - else: - return BaseNAGFCompiler.get_flags_arch(self) - def get_flags_debug(self): - return ['-g', '-gline', '-g90', '-nan', '-C'] - -class NAGFORCompiler(BaseNAGFCompiler): - - compiler_type = 'nagfor' - description = 'NAG Fortran Compiler' - - executables = { - 'version_cmd' : ["nagfor", "-V"], - 'compiler_f77' : ["nagfor", "-fixed"], - 'compiler_fix' : ["nagfor", "-fixed"], - 'compiler_f90' : ["nagfor"], - 'linker_so' : ["nagfor"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_flags_linker_so(self): - if sys.platform == 'darwin': - return ['-unsharedrts', - '-Wl,-bundle,-flat_namespace,-undefined,suppress'] - return BaseNAGFCompiler.get_flags_linker_so(self) - def get_flags_debug(self): - version = self.get_version() - if version and version > '6.1': - return ['-g', '-u', '-nan', '-C=all', '-thread_safe', - '-kind=unique', '-Warn=allocation', '-Warn=subnormal'] - else: - return ['-g', '-nan', '-C=all', '-u', '-thread_safe'] - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - compiler = customized_fcompiler(compiler='nagfor') - print(compiler.get_version()) - print(compiler.get_flags_debug()) diff --git a/numpy/distutils/fcompiler/none.py b/numpy/distutils/fcompiler/none.py deleted file mode 100644 index ef411fffc7cb..000000000000 --- a/numpy/distutils/fcompiler/none.py +++ /dev/null @@ -1,28 +0,0 @@ -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils import customized_fcompiler - -compilers = ['NoneFCompiler'] - -class NoneFCompiler(FCompiler): - - compiler_type = 'none' - description = 'Fake Fortran compiler' - - executables = {'compiler_f77': None, - 'compiler_f90': None, - 'compiler_fix': None, - 'linker_so': None, - 'linker_exe': None, - 'archiver': None, - 'ranlib': None, - 'version_cmd': None, - } - - def find_executables(self): - pass - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - print(customized_fcompiler(compiler='none').get_version()) diff --git a/numpy/distutils/fcompiler/nv.py b/numpy/distutils/fcompiler/nv.py deleted file mode 100644 index f518c8b0027a..000000000000 --- a/numpy/distutils/fcompiler/nv.py +++ /dev/null @@ -1,53 +0,0 @@ -from numpy.distutils.fcompiler import FCompiler - -compilers = ['NVHPCFCompiler'] - -class NVHPCFCompiler(FCompiler): - """ NVIDIA High Performance Computing (HPC) SDK Fortran Compiler - - https://developer.nvidia.com/hpc-sdk - - Since august 2020 the NVIDIA HPC SDK includes the compilers formerly known as The Portland Group compilers, - https://www.pgroup.com/index.htm. - See also `numpy.distutils.fcompiler.pg`. - """ - - compiler_type = 'nv' - description = 'NVIDIA HPC SDK' - version_pattern = r'\s*(nvfortran|.+ \(aka nvfortran\)) (?P[\d.-]+).*' - - executables = { - 'version_cmd': ["", "-V"], - 'compiler_f77': ["nvfortran"], - 'compiler_fix': ["nvfortran", "-Mfixed"], - 'compiler_f90': ["nvfortran"], - 'linker_so': [""], - 'archiver': ["ar", "-cr"], - 'ranlib': ["ranlib"] - } - pic_flags = ['-fpic'] - - module_dir_switch = '-module ' - module_include_switch = '-I' - - def get_flags(self): - opt = ['-Minform=inform', '-Mnosecond_underscore'] - return self.pic_flags + opt - - def get_flags_opt(self): - return ['-fast'] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_linker_so(self): - return ["-shared", '-fpic'] - - def runtime_library_dir_option(self, dir): - return '-R%s' % dir - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='nv').get_version()) diff --git a/numpy/distutils/fcompiler/pathf95.py b/numpy/distutils/fcompiler/pathf95.py deleted file mode 100644 index 0768cb12e87a..000000000000 --- a/numpy/distutils/fcompiler/pathf95.py +++ /dev/null @@ -1,33 +0,0 @@ -from numpy.distutils.fcompiler import FCompiler - -compilers = ['PathScaleFCompiler'] - -class PathScaleFCompiler(FCompiler): - - compiler_type = 'pathf95' - description = 'PathScale Fortran Compiler' - version_pattern = r'PathScale\(TM\) Compiler Suite: Version (?P[\d.]+)' - - executables = { - 'version_cmd' : ["pathf95", "-version"], - 'compiler_f77' : ["pathf95", "-fixedform"], - 'compiler_fix' : ["pathf95", "-fixedform"], - 'compiler_f90' : ["pathf95"], - 'linker_so' : ["pathf95", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-fPIC'] - module_dir_switch = '-module ' # Don't remove ending space! - module_include_switch = '-I' - - def get_flags_opt(self): - return ['-O3'] - def get_flags_debug(self): - return ['-g'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='pathf95').get_version()) diff --git a/numpy/distutils/fcompiler/pg.py b/numpy/distutils/fcompiler/pg.py deleted file mode 100644 index 72442c4fec61..000000000000 --- a/numpy/distutils/fcompiler/pg.py +++ /dev/null @@ -1,128 +0,0 @@ -# http://www.pgroup.com -import sys - -from numpy.distutils.fcompiler import FCompiler -from sys import platform -from os.path import join, dirname, normpath - -compilers = ['PGroupFCompiler', 'PGroupFlangCompiler'] - - -class PGroupFCompiler(FCompiler): - - compiler_type = 'pg' - description = 'Portland Group Fortran Compiler' - version_pattern = r'\s*pg(f77|f90|hpf|fortran) (?P[\d.-]+).*' - - if platform == 'darwin': - executables = { - 'version_cmd': ["", "-V"], - 'compiler_f77': ["pgfortran", "-dynamiclib"], - 'compiler_fix': ["pgfortran", "-Mfixed", "-dynamiclib"], - 'compiler_f90': ["pgfortran", "-dynamiclib"], - 'linker_so': ["libtool"], - 'archiver': ["ar", "-cr"], - 'ranlib': ["ranlib"] - } - pic_flags = [''] - else: - executables = { - 'version_cmd': ["", "-V"], - 'compiler_f77': ["pgfortran"], - 'compiler_fix': ["pgfortran", "-Mfixed"], - 'compiler_f90': ["pgfortran"], - 'linker_so': [""], - 'archiver': ["ar", "-cr"], - 'ranlib': ["ranlib"] - } - pic_flags = ['-fpic'] - - module_dir_switch = '-module ' - module_include_switch = '-I' - - def get_flags(self): - opt = ['-Minform=inform', '-Mnosecond_underscore'] - return self.pic_flags + opt - - def get_flags_opt(self): - return ['-fast'] - - def get_flags_debug(self): - return ['-g'] - - if platform == 'darwin': - def get_flags_linker_so(self): - return ["-dynamic", '-undefined', 'dynamic_lookup'] - - else: - def get_flags_linker_so(self): - return ["-shared", '-fpic'] - - def runtime_library_dir_option(self, dir): - return '-R%s' % dir - - -import functools - -class PGroupFlangCompiler(FCompiler): - compiler_type = 'flang' - description = 'Portland Group Fortran LLVM Compiler' - version_pattern = r'\s*(flang|clang) version (?P[\d.-]+).*' - - ar_exe = 'lib.exe' - possible_executables = ['flang'] - - executables = { - 'version_cmd': ["", "--version"], - 'compiler_f77': ["flang"], - 'compiler_fix': ["flang"], - 'compiler_f90': ["flang"], - 'linker_so': [None], - 'archiver': [ar_exe, "/verbose", "/OUT:"], - 'ranlib': None - } - - library_switch = '/OUT:' # No space after /OUT:! - module_dir_switch = '-module ' # Don't remove ending space! - - def get_libraries(self): - opt = FCompiler.get_libraries(self) - opt.extend(['flang', 'flangrti', 'ompstub']) - return opt - - @functools.lru_cache(maxsize=128) - def get_library_dirs(self): - """List of compiler library directories.""" - opt = FCompiler.get_library_dirs(self) - flang_dir = dirname(self.executables['compiler_f77'][0]) - opt.append(normpath(join(flang_dir, '..', 'lib'))) - - return opt - - def get_flags(self): - return [] - - def get_flags_free(self): - return [] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_opt(self): - return ['-O3'] - - def get_flags_arch(self): - return [] - - def runtime_library_dir_option(self, dir): - raise NotImplementedError - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - if 'flang' in sys.argv: - print(customized_fcompiler(compiler='flang').get_version()) - else: - print(customized_fcompiler(compiler='pg').get_version()) diff --git a/numpy/distutils/fcompiler/sun.py b/numpy/distutils/fcompiler/sun.py deleted file mode 100644 index d039f0b25705..000000000000 --- a/numpy/distutils/fcompiler/sun.py +++ /dev/null @@ -1,51 +0,0 @@ -from numpy.distutils.ccompiler import simple_version_match -from numpy.distutils.fcompiler import FCompiler - -compilers = ['SunFCompiler'] - -class SunFCompiler(FCompiler): - - compiler_type = 'sun' - description = 'Sun or Forte Fortran 95 Compiler' - # ex: - # f90: Sun WorkShop 6 update 2 Fortran 95 6.2 Patch 111690-10 2003/08/28 - version_match = simple_version_match( - start=r'f9[05]: (Sun|Forte|WorkShop).*Fortran 95') - - executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["f90"], - 'compiler_fix' : ["f90", "-fixed"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["", "-Bdynamic", "-G"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = '-moddir=' - module_include_switch = '-M' - pic_flags = ['-xcode=pic32'] - - def get_flags_f77(self): - ret = ["-ftrap=%none"] - if (self.get_version() or '') >= '7': - ret.append("-f77") - else: - ret.append("-fixed") - return ret - def get_opt(self): - return ['-fast', '-dalign'] - def get_arch(self): - return ['-xtarget=generic'] - def get_libraries(self): - opt = [] - opt.extend(['fsu', 'sunmath', 'mvec']) - return opt - - def runtime_library_dir_option(self, dir): - return '-R%s' % dir - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='sun').get_version()) diff --git a/numpy/distutils/fcompiler/vast.py b/numpy/distutils/fcompiler/vast.py deleted file mode 100644 index 92a1647ba437..000000000000 --- a/numpy/distutils/fcompiler/vast.py +++ /dev/null @@ -1,52 +0,0 @@ -import os - -from numpy.distutils.fcompiler.gnu import GnuFCompiler - -compilers = ['VastFCompiler'] - -class VastFCompiler(GnuFCompiler): - compiler_type = 'vast' - compiler_aliases = () - description = 'Pacific-Sierra Research Fortran 90 Compiler' - version_pattern = (r'\s*Pacific-Sierra Research vf90 ' - r'(Personal|Professional)\s+(?P[^\s]*)') - - # VAST f90 does not support -o with -c. So, object files are created - # to the current directory and then moved to build directory - object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile ' - - executables = { - 'version_cmd' : ["vf90", "-v"], - 'compiler_f77' : ["g77"], - 'compiler_fix' : ["f90", "-Wv,-ya"], - 'compiler_f90' : ["f90"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = None #XXX Fix me - module_include_switch = None #XXX Fix me - - def find_executables(self): - pass - - def get_version_cmd(self): - f90 = self.compiler_f90[0] - d, b = os.path.split(f90) - vf90 = os.path.join(d, 'v'+b) - return vf90 - - def get_flags_arch(self): - vast_version = self.get_version() - gnu = GnuFCompiler() - gnu.customize(None) - self.version = gnu.get_version() - opt = GnuFCompiler.get_flags_arch(self) - self.version = vast_version - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='vast').get_version()) diff --git a/numpy/distutils/from_template.py b/numpy/distutils/from_template.py deleted file mode 100644 index 90d1f4c384c7..000000000000 --- a/numpy/distutils/from_template.py +++ /dev/null @@ -1,261 +0,0 @@ -#!/usr/bin/env python3 -""" - -process_file(filename) - - takes templated file .xxx.src and produces .xxx file where .xxx - is .pyf .f90 or .f using the following template rules: - - '<..>' denotes a template. - - All function and subroutine blocks in a source file with names that - contain '<..>' will be replicated according to the rules in '<..>'. - - The number of comma-separated words in '<..>' will determine the number of - replicates. - - '<..>' may have two different forms, named and short. For example, - - named: - where anywhere inside a block '

' will be replaced with - 'd', 's', 'z', and 'c' for each replicate of the block. - - <_c> is already defined: <_c=s,d,c,z> - <_t> is already defined: <_t=real,double precision,complex,double complex> - - short: - , a short form of the named, useful when no

appears inside - a block. - - In general, '<..>' contains a comma separated list of arbitrary - expressions. If these expression must contain a comma|leftarrow|rightarrow, - then prepend the comma|leftarrow|rightarrow with a backslash. - - If an expression matches '\\' then it will be replaced - by -th expression. - - Note that all '<..>' forms in a block must have the same number of - comma-separated entries. - - Predefined named template rules: - - - - - - -""" -__all__ = ['process_str', 'process_file'] - -import os -import sys -import re - -routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I) -routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I) -function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I) - -def parse_structure(astr): - """ Return a list of tuples for each function or subroutine each - tuple is the start and end of a subroutine or function to be - expanded. - """ - - spanlist = [] - ind = 0 - while True: - m = routine_start_re.search(astr, ind) - if m is None: - break - start = m.start() - if function_start_re.match(astr, start, m.end()): - while True: - i = astr.rfind('\n', ind, start) - if i==-1: - break - start = i - if astr[i:i+7]!='\n $': - break - start += 1 - m = routine_end_re.search(astr, m.end()) - ind = end = m and m.end()-1 or len(astr) - spanlist.append((start, end)) - return spanlist - -template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") -named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>") -list_re = re.compile(r"<\s*((.*?))\s*>") - -def find_repl_patterns(astr): - reps = named_re.findall(astr) - names = {} - for rep in reps: - name = rep[0].strip() or unique_key(names) - repl = rep[1].replace(r'\,', '@comma@') - thelist = conv(repl) - names[name] = thelist - return names - -def find_and_remove_repl_patterns(astr): - names = find_repl_patterns(astr) - astr = re.subn(named_re, '', astr)[0] - return astr, names - -item_re = re.compile(r"\A\\(?P\d+)\Z") -def conv(astr): - b = astr.split(',') - l = [x.strip() for x in b] - for i in range(len(l)): - m = item_re.match(l[i]) - if m: - j = int(m.group('index')) - l[i] = l[j] - return ','.join(l) - -def unique_key(adict): - """ Obtain a unique key given a dictionary.""" - allkeys = list(adict.keys()) - done = False - n = 1 - while not done: - newkey = '__l%s' % (n) - if newkey in allkeys: - n += 1 - else: - done = True - return newkey - - -template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z') -def expand_sub(substr, names): - substr = substr.replace(r'\>', '@rightarrow@') - substr = substr.replace(r'\<', '@leftarrow@') - lnames = find_repl_patterns(substr) - substr = named_re.sub(r"<\1>", substr) # get rid of definition templates - - def listrepl(mobj): - thelist = conv(mobj.group(1).replace(r'\,', '@comma@')) - if template_name_re.match(thelist): - return "<%s>" % (thelist) - name = None - for key in lnames.keys(): # see if list is already in dictionary - if lnames[key] == thelist: - name = key - if name is None: # this list is not in the dictionary yet - name = unique_key(lnames) - lnames[name] = thelist - return "<%s>" % name - - substr = list_re.sub(listrepl, substr) # convert all lists to named templates - # newnames are constructed as needed - - numsubs = None - base_rule = None - rules = {} - for r in template_re.findall(substr): - if r not in rules: - thelist = lnames.get(r, names.get(r, None)) - if thelist is None: - raise ValueError('No replicates found for <%s>' % (r)) - if r not in names and not thelist.startswith('_'): - names[r] = thelist - rule = [i.replace('@comma@', ',') for i in thelist.split(',')] - num = len(rule) - - if numsubs is None: - numsubs = num - rules[r] = rule - base_rule = r - elif num == numsubs: - rules[r] = rule - else: - print("Mismatch in number of replacements (base <%s=%s>)" - " for <%s=%s>. Ignoring." % - (base_rule, ','.join(rules[base_rule]), r, thelist)) - if not rules: - return substr - - def namerepl(mobj): - name = mobj.group(1) - return rules.get(name, (k+1)*[name])[k] - - newstr = '' - for k in range(numsubs): - newstr += template_re.sub(namerepl, substr) + '\n\n' - - newstr = newstr.replace('@rightarrow@', '>') - newstr = newstr.replace('@leftarrow@', '<') - return newstr - -def process_str(allstr): - newstr = allstr - writestr = '' - - struct = parse_structure(newstr) - - oldend = 0 - names = {} - names.update(_special_names) - for sub in struct: - cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]]) - writestr += cleanedstr - names.update(defs) - writestr += expand_sub(newstr[sub[0]:sub[1]], names) - oldend = sub[1] - writestr += newstr[oldend:] - - return writestr - -include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P[\w\d./\\]+\.src)['\"]", re.I) - -def resolve_includes(source): - d = os.path.dirname(source) - with open(source) as fid: - lines = [] - for line in fid: - m = include_src_re.match(line) - if m: - fn = m.group('name') - if not os.path.isabs(fn): - fn = os.path.join(d, fn) - if os.path.isfile(fn): - lines.extend(resolve_includes(fn)) - else: - lines.append(line) - else: - lines.append(line) - return lines - -def process_file(source): - lines = resolve_includes(source) - return process_str(''.join(lines)) - -_special_names = find_repl_patterns(''' -<_c=s,d,c,z> -<_t=real,double precision,complex,double complex> - - - - - -''') - -def main(): - try: - file = sys.argv[1] - except IndexError: - fid = sys.stdin - outfile = sys.stdout - else: - fid = open(file, 'r') - (base, ext) = os.path.splitext(file) - newname = base - outfile = open(newname, 'w') - - allstr = fid.read() - writestr = process_str(allstr) - outfile.write(writestr) - - -if __name__ == "__main__": - main() diff --git a/numpy/distutils/fujitsuccompiler.py b/numpy/distutils/fujitsuccompiler.py deleted file mode 100644 index c25900b34f1d..000000000000 --- a/numpy/distutils/fujitsuccompiler.py +++ /dev/null @@ -1,28 +0,0 @@ -from distutils.unixccompiler import UnixCCompiler - -class FujitsuCCompiler(UnixCCompiler): - - """ - Fujitsu compiler. - """ - - compiler_type = 'fujitsu' - cc_exe = 'fcc' - cxx_exe = 'FCC' - - def __init__(self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__(self, verbose, dry_run, force) - cc_compiler = self.cc_exe - cxx_compiler = self.cxx_exe - self.set_executables( - compiler=cc_compiler + - ' -O3 -Nclang -fPIC', - compiler_so=cc_compiler + - ' -O3 -Nclang -fPIC', - compiler_cxx=cxx_compiler + - ' -O3 -Nclang -fPIC', - linker_exe=cc_compiler + - ' -lfj90i -lfj90f -lfjsrcinfo -lelf -shared', - linker_so=cc_compiler + - ' -lfj90i -lfj90f -lfjsrcinfo -lelf -shared' - ) diff --git a/numpy/distutils/intelccompiler.py b/numpy/distutils/intelccompiler.py deleted file mode 100644 index 0fa1c11dd676..000000000000 --- a/numpy/distutils/intelccompiler.py +++ /dev/null @@ -1,111 +0,0 @@ -import platform - -from distutils.unixccompiler import UnixCCompiler -from numpy.distutils.exec_command import find_executable -from numpy.distutils.ccompiler import simple_version_match -if platform.system() == 'Windows': - from numpy.distutils.msvc9compiler import MSVCCompiler - - -class IntelCCompiler(UnixCCompiler): - """A modified Intel compiler compatible with a GCC-built Python.""" - compiler_type = 'intel' - cc_exe = 'icc' - cc_args = 'fPIC' - - def __init__(self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__(self, verbose, dry_run, force) - - v = self.get_version() - mpopt = 'openmp' if v and v < '15' else 'qopenmp' - self.cc_exe = ('icc -fPIC -fp-model strict -O3 ' - '-fomit-frame-pointer -{}').format(mpopt) - compiler = self.cc_exe - - if platform.system() == 'Darwin': - shared_flag = '-Wl,-undefined,dynamic_lookup' - else: - shared_flag = '-shared' - self.set_executables(compiler=compiler, - compiler_so=compiler, - compiler_cxx=compiler, - archiver='xiar' + ' cru', - linker_exe=compiler + ' -shared-intel', - linker_so=compiler + ' ' + shared_flag + - ' -shared-intel') - - -class IntelItaniumCCompiler(IntelCCompiler): - compiler_type = 'intele' - - # On Itanium, the Intel Compiler used to be called ecc, let's search for - # it (now it's also icc, so ecc is last in the search). - for cc_exe in map(find_executable, ['icc', 'ecc']): - if cc_exe: - break - - -class IntelEM64TCCompiler(UnixCCompiler): - """ - A modified Intel x86_64 compiler compatible with a 64bit GCC-built Python. - """ - compiler_type = 'intelem' - cc_exe = 'icc -m64' - cc_args = '-fPIC' - - def __init__(self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__(self, verbose, dry_run, force) - - v = self.get_version() - mpopt = 'openmp' if v and v < '15' else 'qopenmp' - self.cc_exe = ('icc -std=c99 -m64 -fPIC -fp-model strict -O3 ' - '-fomit-frame-pointer -{}').format(mpopt) - compiler = self.cc_exe - - if platform.system() == 'Darwin': - shared_flag = '-Wl,-undefined,dynamic_lookup' - else: - shared_flag = '-shared' - self.set_executables(compiler=compiler, - compiler_so=compiler, - compiler_cxx=compiler, - archiver='xiar' + ' cru', - linker_exe=compiler + ' -shared-intel', - linker_so=compiler + ' ' + shared_flag + - ' -shared-intel') - - -if platform.system() == 'Windows': - class IntelCCompilerW(MSVCCompiler): - """ - A modified Intel compiler compatible with an MSVC-built Python. - """ - compiler_type = 'intelw' - compiler_cxx = 'icl' - - def __init__(self, verbose=0, dry_run=0, force=0): - MSVCCompiler.__init__(self, verbose, dry_run, force) - version_match = simple_version_match(start=r'Intel\(R\).*?32,') - self.__version = version_match - - def initialize(self, plat_name=None): - MSVCCompiler.initialize(self, plat_name) - self.cc = self.find_exe('icl.exe') - self.lib = self.find_exe('xilib') - self.linker = self.find_exe('xilink') - self.compile_options = ['/nologo', '/O3', '/MD', '/W3', - '/Qstd=c99'] - self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', - '/Qstd=c99', '/Z7', '/D_DEBUG'] - - class IntelEM64TCCompilerW(IntelCCompilerW): - """ - A modified Intel x86_64 compiler compatible with - a 64bit MSVC-built Python. - """ - compiler_type = 'intelemw' - - def __init__(self, verbose=0, dry_run=0, force=0): - MSVCCompiler.__init__(self, verbose, dry_run, force) - version_match = simple_version_match(start=r'Intel\(R\).*?64,') - self.__version = version_match diff --git a/numpy/distutils/lib2def.py b/numpy/distutils/lib2def.py deleted file mode 100644 index 851682c63310..000000000000 --- a/numpy/distutils/lib2def.py +++ /dev/null @@ -1,116 +0,0 @@ -import re -import sys -import subprocess - -__doc__ = """This module generates a DEF file from the symbols in -an MSVC-compiled DLL import library. It correctly discriminates between -data and functions. The data is collected from the output of the program -nm(1). - -Usage: - python lib2def.py [libname.lib] [output.def] -or - python lib2def.py [libname.lib] > output.def - -libname.lib defaults to python.lib and output.def defaults to stdout - -Author: Robert Kern -Last Update: April 30, 1999 -""" - -__version__ = '0.1a' - -py_ver = "%d%d" % tuple(sys.version_info[:2]) - -DEFAULT_NM = ['nm', '-Cs'] - -DEF_HEADER = """LIBRARY python%s.dll -;CODE PRELOAD MOVEABLE DISCARDABLE -;DATA PRELOAD SINGLE - -EXPORTS -""" % py_ver -# the header of the DEF file - -FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE) -DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE) - -def parse_cmd(): - """Parses the command-line arguments. - -libfile, deffile = parse_cmd()""" - if len(sys.argv) == 3: - if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def': - libfile, deffile = sys.argv[1:] - elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib': - deffile, libfile = sys.argv[1:] - else: - print("I'm assuming that your first argument is the library") - print("and the second is the DEF file.") - elif len(sys.argv) == 2: - if sys.argv[1][-4:] == '.def': - deffile = sys.argv[1] - libfile = 'python%s.lib' % py_ver - elif sys.argv[1][-4:] == '.lib': - deffile = None - libfile = sys.argv[1] - else: - libfile = 'python%s.lib' % py_ver - deffile = None - return libfile, deffile - -def getnm(nm_cmd=['nm', '-Cs', 'python%s.lib' % py_ver], shell=True): - """Returns the output of nm_cmd via a pipe. - -nm_output = getnm(nm_cmd = 'nm -Cs py_lib')""" - p = subprocess.Popen(nm_cmd, shell=shell, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, text=True) - nm_output, nm_err = p.communicate() - if p.returncode != 0: - raise RuntimeError('failed to run "%s": "%s"' % ( - ' '.join(nm_cmd), nm_err)) - return nm_output - -def parse_nm(nm_output): - """Returns a tuple of lists: dlist for the list of data -symbols and flist for the list of function symbols. - -dlist, flist = parse_nm(nm_output)""" - data = DATA_RE.findall(nm_output) - func = FUNC_RE.findall(nm_output) - - flist = [] - for sym in data: - if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'): - flist.append(sym) - - dlist = [] - for sym in data: - if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'): - dlist.append(sym) - - dlist.sort() - flist.sort() - return dlist, flist - -def output_def(dlist, flist, header, file = sys.stdout): - """Outputs the final DEF file to a file defaulting to stdout. - -output_def(dlist, flist, header, file = sys.stdout)""" - for data_sym in dlist: - header = header + '\t%s DATA\n' % data_sym - header = header + '\n' # blank line - for func_sym in flist: - header = header + '\t%s\n' % func_sym - file.write(header) - -if __name__ == '__main__': - libfile, deffile = parse_cmd() - if deffile is None: - deffile = sys.stdout - else: - deffile = open(deffile, 'w') - nm_cmd = DEFAULT_NM + [str(libfile)] - nm_output = getnm(nm_cmd, shell=False) - dlist, flist = parse_nm(nm_output) - output_def(dlist, flist, DEF_HEADER, deffile) diff --git a/numpy/distutils/line_endings.py b/numpy/distutils/line_endings.py deleted file mode 100644 index 686e5ebd937f..000000000000 --- a/numpy/distutils/line_endings.py +++ /dev/null @@ -1,77 +0,0 @@ -""" Functions for converting from DOS to UNIX line endings - -""" -import os -import re -import sys - - -def dos2unix(file): - "Replace CRLF with LF in argument files. Print names of changed files." - if os.path.isdir(file): - print(file, "Directory!") - return - - with open(file, "rb") as fp: - data = fp.read() - if '\0' in data: - print(file, "Binary!") - return - - newdata = re.sub("\r\n", "\n", data) - if newdata != data: - print('dos2unix:', file) - with open(file, "wb") as f: - f.write(newdata) - return file - else: - print(file, 'ok') - -def dos2unix_one_dir(modified_files, dir_name, file_names): - for file in file_names: - full_path = os.path.join(dir_name, file) - file = dos2unix(full_path) - if file is not None: - modified_files.append(file) - -def dos2unix_dir(dir_name): - modified_files = [] - os.path.walk(dir_name, dos2unix_one_dir, modified_files) - return modified_files -#---------------------------------- - -def unix2dos(file): - "Replace LF with CRLF in argument files. Print names of changed files." - if os.path.isdir(file): - print(file, "Directory!") - return - - with open(file, "rb") as fp: - data = fp.read() - if '\0' in data: - print(file, "Binary!") - return - newdata = re.sub("\r\n", "\n", data) - newdata = re.sub("\n", "\r\n", newdata) - if newdata != data: - print('unix2dos:', file) - with open(file, "wb") as f: - f.write(newdata) - return file - else: - print(file, 'ok') - -def unix2dos_one_dir(modified_files, dir_name, file_names): - for file in file_names: - full_path = os.path.join(dir_name, file) - unix2dos(full_path) - if file is not None: - modified_files.append(file) - -def unix2dos_dir(dir_name): - modified_files = [] - os.path.walk(dir_name, unix2dos_one_dir, modified_files) - return modified_files - -if __name__ == "__main__": - dos2unix_dir(sys.argv[1]) diff --git a/numpy/distutils/log.py b/numpy/distutils/log.py deleted file mode 100644 index 3347f56d6fe9..000000000000 --- a/numpy/distutils/log.py +++ /dev/null @@ -1,111 +0,0 @@ -# Colored log -import sys -from distutils.log import * # noqa: F403 -from distutils.log import Log as old_Log -from distutils.log import _global_log - -from numpy.distutils.misc_util import (red_text, default_text, cyan_text, - green_text, is_sequence, is_string) - - -def _fix_args(args,flag=1): - if is_string(args): - return args.replace('%', '%%') - if flag and is_sequence(args): - return tuple([_fix_args(a, flag=0) for a in args]) - return args - - -class Log(old_Log): - def _log(self, level, msg, args): - if level >= self.threshold: - if args: - msg = msg % _fix_args(args) - if 0: - if msg.startswith('copying ') and msg.find(' -> ') != -1: - return - if msg.startswith('byte-compiling '): - return - print(_global_color_map[level](msg)) - sys.stdout.flush() - - def good(self, msg, *args): - """ - If we log WARN messages, log this message as a 'nice' anti-warn - message. - - """ - if WARN >= self.threshold: - if args: - print(green_text(msg % _fix_args(args))) - else: - print(green_text(msg)) - sys.stdout.flush() - - -_global_log.__class__ = Log - -good = _global_log.good - -def set_threshold(level, force=False): - prev_level = _global_log.threshold - if prev_level > DEBUG or force: - # If we're running at DEBUG, don't change the threshold, as there's - # likely a good reason why we're running at this level. - _global_log.threshold = level - if level <= DEBUG: - info('set_threshold: setting threshold to DEBUG level,' - ' it can be changed only with force argument') - else: - info('set_threshold: not changing threshold from DEBUG level' - ' %s to %s' % (prev_level, level)) - return prev_level - -def get_threshold(): - return _global_log.threshold - -def set_verbosity(v, force=False): - prev_level = _global_log.threshold - if v < 0: - set_threshold(ERROR, force) - elif v == 0: - set_threshold(WARN, force) - elif v == 1: - set_threshold(INFO, force) - elif v >= 2: - set_threshold(DEBUG, force) - return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level, 1) - - -_global_color_map = { - DEBUG:cyan_text, - INFO:default_text, - WARN:red_text, - ERROR:red_text, - FATAL:red_text -} - -# don't use INFO,.. flags in set_verbosity, these flags are for set_threshold. -set_verbosity(0, force=True) - - -_error = error -_warn = warn -_info = info -_debug = debug - - -def error(msg, *a, **kw): - _error(f"ERROR: {msg}", *a, **kw) - - -def warn(msg, *a, **kw): - _warn(f"WARN: {msg}", *a, **kw) - - -def info(msg, *a, **kw): - _info(f"INFO: {msg}", *a, **kw) - - -def debug(msg, *a, **kw): - _debug(f"DEBUG: {msg}", *a, **kw) diff --git a/numpy/distutils/mingw/gfortran_vs2003_hack.c b/numpy/distutils/mingw/gfortran_vs2003_hack.c deleted file mode 100644 index 485a675d8a1f..000000000000 --- a/numpy/distutils/mingw/gfortran_vs2003_hack.c +++ /dev/null @@ -1,6 +0,0 @@ -int _get_output_format(void) -{ - return 0; -} - -int _imp____lc_codepage = 0; diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py deleted file mode 100644 index 4763f41ad326..000000000000 --- a/numpy/distutils/mingw32ccompiler.py +++ /dev/null @@ -1,591 +0,0 @@ -""" -Support code for building Python extensions on Windows. - - # NT stuff - # 1. Make sure libpython.a exists for gcc. If not, build it. - # 2. Force windows to use gcc (we're struggling with MSVC and g77 support) - # 3. Force windows to use g77 - -""" -import os -import sys -import subprocess -import re -import textwrap - -# Overwrite certain distutils.ccompiler functions: -import numpy.distutils.ccompiler # noqa: F401 -from numpy.distutils import log -# NT stuff -# 1. Make sure libpython.a exists for gcc. If not, build it. -# 2. Force windows to use gcc (we're struggling with MSVC and g77 support) -# --> this is done in numpy/distutils/ccompiler.py -# 3. Force windows to use g77 - -import distutils.cygwinccompiler -from distutils.unixccompiler import UnixCCompiler -from distutils.msvccompiler import get_build_version as get_build_msvc_version -from distutils.errors import UnknownFileError -from numpy.distutils.misc_util import (msvc_runtime_library, - msvc_runtime_version, - msvc_runtime_major, - get_build_architecture) - -def get_msvcr_replacement(): - """Replacement for outdated version of get_msvcr from cygwinccompiler""" - msvcr = msvc_runtime_library() - return [] if msvcr is None else [msvcr] - - -# Useful to generate table of symbols from a dll -_START = re.compile(r'\[Ordinal/Name Pointer\] Table') -_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)') - -# the same as cygwin plus some additional parameters -class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler): - """ A modified MingW32 compiler compatible with an MSVC built Python. - - """ - - compiler_type = 'mingw32' - - def __init__ (self, - verbose=0, - dry_run=0, - force=0): - - distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose, - dry_run, force) - - # **changes: eric jones 4/11/01 - # 1. Check for import library on Windows. Build if it doesn't exist. - - build_import_library() - - # Check for custom msvc runtime library on Windows. Build if it doesn't exist. - msvcr_success = build_msvcr_library() - msvcr_dbg_success = build_msvcr_library(debug=True) - if msvcr_success or msvcr_dbg_success: - # add preprocessor statement for using customized msvcr lib - self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR') - - # Define the MSVC version as hint for MinGW - msvcr_version = msvc_runtime_version() - if msvcr_version: - self.define_macro('__MSVCRT_VERSION__', '0x%04i' % msvcr_version) - - # MS_WIN64 should be defined when building for amd64 on windows, - # but python headers define it only for MS compilers, which has all - # kind of bad consequences, like using Py_ModuleInit4 instead of - # Py_ModuleInit4_64, etc... So we add it here - if get_build_architecture() == 'AMD64': - self.set_executables( - compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall', - compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall ' - '-Wstrict-prototypes', - linker_exe='gcc -g', - linker_so='gcc -g -shared') - else: - self.set_executables( - compiler='gcc -O2 -Wall', - compiler_so='gcc -O2 -Wall -Wstrict-prototypes', - linker_exe='g++ ', - linker_so='g++ -shared') - # added for python2.3 support - # we can't pass it through set_executables because pre 2.2 would fail - self.compiler_cxx = ['g++'] - - # Maybe we should also append -mthreads, but then the finished dlls - # need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support - # thread-safe exception handling on `Mingw32') - - # no additional libraries needed - #self.dll_libraries=[] - return - - # __init__ () - - def link(self, - target_desc, - objects, - output_filename, - output_dir, - libraries, - library_dirs, - runtime_library_dirs, - export_symbols = None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None): - # Include the appropriate MSVC runtime library if Python was built - # with MSVC >= 7.0 (MinGW standard is msvcrt) - runtime_library = msvc_runtime_library() - if runtime_library: - if not libraries: - libraries = [] - libraries.append(runtime_library) - args = (self, - target_desc, - objects, - output_filename, - output_dir, - libraries, - library_dirs, - runtime_library_dirs, - None, #export_symbols, we do this in our def-file - debug, - extra_preargs, - extra_postargs, - build_temp, - target_lang) - func = UnixCCompiler.link - func(*args[:func.__code__.co_argcount]) - return - - def object_filenames (self, - source_filenames, - strip_dir=0, - output_dir=''): - if output_dir is None: output_dir = '' - obj_names = [] - for src_name in source_filenames: - # use normcase to make sure '.rc' is really '.rc' and not '.RC' - (base, ext) = os.path.splitext (os.path.normcase(src_name)) - - # added these lines to strip off windows drive letters - # without it, .o files are placed next to .c files - # instead of the build directory - drv, base = os.path.splitdrive(base) - if drv: - base = base[1:] - - if ext not in (self.src_extensions + ['.rc', '.res']): - raise UnknownFileError( - "unknown file type '%s' (from '%s')" % \ - (ext, src_name)) - if strip_dir: - base = os.path.basename (base) - if ext == '.res' or ext == '.rc': - # these need to be compiled to object files - obj_names.append (os.path.join (output_dir, - base + ext + self.obj_extension)) - else: - obj_names.append (os.path.join (output_dir, - base + self.obj_extension)) - return obj_names - - # object_filenames () - - -def find_python_dll(): - # We can't do much here: - # - find it in the virtualenv (sys.prefix) - # - find it in python main dir (sys.base_prefix, if in a virtualenv) - # - in system32, - # - ortherwise (Sxs), I don't know how to get it. - stems = [sys.prefix] - if sys.base_prefix != sys.prefix: - stems.append(sys.base_prefix) - - sub_dirs = ['', 'lib', 'bin'] - # generate possible combinations of directory trees and sub-directories - lib_dirs = [] - for stem in stems: - for folder in sub_dirs: - lib_dirs.append(os.path.join(stem, folder)) - - # add system directory as well - if 'SYSTEMROOT' in os.environ: - lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'System32')) - - # search in the file system for possible candidates - major_version, minor_version = tuple(sys.version_info[:2]) - implementation = sys.implementation.name - if implementation == 'cpython': - dllname = f'python{major_version}{minor_version}.dll' - elif implementation == 'pypy': - dllname = f'libpypy{major_version}.{minor_version}-c.dll' - else: - dllname = f'Unknown platform {implementation}' - print("Looking for %s" % dllname) - for folder in lib_dirs: - dll = os.path.join(folder, dllname) - if os.path.exists(dll): - return dll - - raise ValueError("%s not found in %s" % (dllname, lib_dirs)) - -def dump_table(dll): - st = subprocess.check_output(["objdump.exe", "-p", dll]) - return st.split(b'\n') - -def generate_def(dll, dfile): - """Given a dll file location, get all its exported symbols and dump them - into the given def file. - - The .def file will be overwritten""" - dump = dump_table(dll) - for i in range(len(dump)): - if _START.match(dump[i].decode()): - break - else: - raise ValueError("Symbol table not found") - - syms = [] - for j in range(i+1, len(dump)): - m = _TABLE.match(dump[j].decode()) - if m: - syms.append((int(m.group(1).strip()), m.group(2))) - else: - break - - if len(syms) == 0: - log.warn('No symbols found in %s' % dll) - - with open(dfile, 'w') as d: - d.write('LIBRARY %s\n' % os.path.basename(dll)) - d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n') - d.write(';DATA PRELOAD SINGLE\n') - d.write('\nEXPORTS\n') - for s in syms: - #d.write('@%d %s\n' % (s[0], s[1])) - d.write('%s\n' % s[1]) - -def find_dll(dll_name): - - arch = {'AMD64' : 'amd64', - 'Intel' : 'x86'}[get_build_architecture()] - - def _find_dll_in_winsxs(dll_name): - # Walk through the WinSxS directory to find the dll. - winsxs_path = os.path.join(os.environ.get('WINDIR', r'C:\WINDOWS'), - 'winsxs') - if not os.path.exists(winsxs_path): - return None - for root, dirs, files in os.walk(winsxs_path): - if dll_name in files and arch in root: - return os.path.join(root, dll_name) - return None - - def _find_dll_in_path(dll_name): - # First, look in the Python directory, then scan PATH for - # the given dll name. - for path in [sys.prefix] + os.environ['PATH'].split(';'): - filepath = os.path.join(path, dll_name) - if os.path.exists(filepath): - return os.path.abspath(filepath) - - return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name) - -def build_msvcr_library(debug=False): - if os.name != 'nt': - return False - - # If the version number is None, then we couldn't find the MSVC runtime at - # all, because we are running on a Python distribution which is customed - # compiled; trust that the compiler is the same as the one available to us - # now, and that it is capable of linking with the correct runtime without - # any extra options. - msvcr_ver = msvc_runtime_major() - if msvcr_ver is None: - log.debug('Skip building import library: ' - 'Runtime is not compiled with MSVC') - return False - - # Skip using a custom library for versions < MSVC 8.0 - if msvcr_ver < 80: - log.debug('Skip building msvcr library:' - ' custom functionality not present') - return False - - msvcr_name = msvc_runtime_library() - if debug: - msvcr_name += 'd' - - # Skip if custom library already exists - out_name = "lib%s.a" % msvcr_name - out_file = os.path.join(sys.prefix, 'libs', out_name) - if os.path.isfile(out_file): - log.debug('Skip building msvcr library: "%s" exists' % - (out_file,)) - return True - - # Find the msvcr dll - msvcr_dll_name = msvcr_name + '.dll' - dll_file = find_dll(msvcr_dll_name) - if not dll_file: - log.warn('Cannot build msvcr library: "%s" not found' % - msvcr_dll_name) - return False - - def_name = "lib%s.def" % msvcr_name - def_file = os.path.join(sys.prefix, 'libs', def_name) - - log.info('Building msvcr library: "%s" (from %s)' \ - % (out_file, dll_file)) - - # Generate a symbol definition file from the msvcr dll - generate_def(dll_file, def_file) - - # Create a custom mingw library for the given symbol definitions - cmd = ['dlltool', '-d', def_file, '-l', out_file] - retcode = subprocess.call(cmd) - - # Clean up symbol definitions - os.remove(def_file) - - return (not retcode) - -def build_import_library(): - if os.name != 'nt': - return - - arch = get_build_architecture() - if arch == 'AMD64': - return _build_import_library_amd64() - elif arch == 'Intel': - return _build_import_library_x86() - else: - raise ValueError("Unhandled arch %s" % arch) - -def _check_for_import_lib(): - """Check if an import library for the Python runtime already exists.""" - major_version, minor_version = tuple(sys.version_info[:2]) - - # patterns for the file name of the library itself - patterns = ['libpython%d%d.a', - 'libpython%d%d.dll.a', - 'libpython%d.%d.dll.a'] - - # directory trees that may contain the library - stems = [sys.prefix] - if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix: - stems.append(sys.base_prefix) - elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix: - stems.append(sys.real_prefix) - - # possible subdirectories within those trees where it is placed - sub_dirs = ['libs', 'lib'] - - # generate a list of candidate locations - candidates = [] - for pat in patterns: - filename = pat % (major_version, minor_version) - for stem_dir in stems: - for folder in sub_dirs: - candidates.append(os.path.join(stem_dir, folder, filename)) - - # test the filesystem to see if we can find any of these - for fullname in candidates: - if os.path.isfile(fullname): - # already exists, in location given - return (True, fullname) - - # needs to be built, preferred location given first - return (False, candidates[0]) - -def _build_import_library_amd64(): - out_exists, out_file = _check_for_import_lib() - if out_exists: - log.debug('Skip building import library: "%s" exists', out_file) - return - - # get the runtime dll for which we are building import library - dll_file = find_python_dll() - log.info('Building import library (arch=AMD64): "%s" (from %s)' % - (out_file, dll_file)) - - # generate symbol list from this library - def_name = "python%d%d.def" % tuple(sys.version_info[:2]) - def_file = os.path.join(sys.prefix, 'libs', def_name) - generate_def(dll_file, def_file) - - # generate import library from this symbol list - cmd = ['dlltool', '-d', def_file, '-l', out_file] - subprocess.check_call(cmd) - -def _build_import_library_x86(): - """ Build the import libraries for Mingw32-gcc on Windows - """ - out_exists, out_file = _check_for_import_lib() - if out_exists: - log.debug('Skip building import library: "%s" exists', out_file) - return - - lib_name = "python%d%d.lib" % tuple(sys.version_info[:2]) - lib_file = os.path.join(sys.prefix, 'libs', lib_name) - if not os.path.isfile(lib_file): - # didn't find library file in virtualenv, try base distribution, too, - # and use that instead if found there. for Python 2.7 venvs, the base - # directory is in attribute real_prefix instead of base_prefix. - if hasattr(sys, 'base_prefix'): - base_lib = os.path.join(sys.base_prefix, 'libs', lib_name) - elif hasattr(sys, 'real_prefix'): - base_lib = os.path.join(sys.real_prefix, 'libs', lib_name) - else: - base_lib = '' # os.path.isfile('') == False - - if os.path.isfile(base_lib): - lib_file = base_lib - else: - log.warn('Cannot build import library: "%s" not found', lib_file) - return - log.info('Building import library (ARCH=x86): "%s"', out_file) - - from numpy.distutils import lib2def - - def_name = "python%d%d.def" % tuple(sys.version_info[:2]) - def_file = os.path.join(sys.prefix, 'libs', def_name) - nm_output = lib2def.getnm( - lib2def.DEFAULT_NM + [lib_file], shell=False) - dlist, flist = lib2def.parse_nm(nm_output) - with open(def_file, 'w') as fid: - lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, fid) - - dll_name = find_python_dll () - - cmd = ["dlltool", - "--dllname", dll_name, - "--def", def_file, - "--output-lib", out_file] - status = subprocess.check_output(cmd) - if status: - log.warn('Failed to build import library for gcc. Linking will fail.') - return - -#===================================== -# Dealing with Visual Studio MANIFESTS -#===================================== - -# Functions to deal with visual studio manifests. Manifest are a mechanism to -# enforce strong DLL versioning on windows, and has nothing to do with -# distutils MANIFEST. manifests are XML files with version info, and used by -# the OS loader; they are necessary when linking against a DLL not in the -# system path; in particular, official python 2.6 binary is built against the -# MS runtime 9 (the one from VS 2008), which is not available on most windows -# systems; python 2.6 installer does install it in the Win SxS (Side by side) -# directory, but this requires the manifest for this to work. This is a big -# mess, thanks MS for a wonderful system. - -# XXX: ideally, we should use exactly the same version as used by python. I -# submitted a patch to get this version, but it was only included for python -# 2.6.1 and above. So for versions below, we use a "best guess". -_MSVCRVER_TO_FULLVER = {} -if sys.platform == 'win32': - try: - import msvcrt - # I took one version in my SxS directory: no idea if it is the good - # one, and we can't retrieve it from python - _MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42" - _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8" - # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0 - # on Windows XP: - _MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460" - crt_ver = getattr(msvcrt, 'CRT_ASSEMBLY_VERSION', None) - if crt_ver is not None: # Available at least back to Python 3.3 - maj, min = re.match(r'(\d+)\.(\d)', crt_ver).groups() - _MSVCRVER_TO_FULLVER[maj + min] = crt_ver - del maj, min - del crt_ver - except ImportError: - # If we are here, means python was not built with MSVC. Not sure what - # to do in that case: manifest building will fail, but it should not be - # used in that case anyway - log.warn('Cannot import msvcrt: using manifest will not be possible') - -def msvc_manifest_xml(maj, min): - """Given a major and minor version of the MSVCR, returns the - corresponding XML file.""" - try: - fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)] - except KeyError: - raise ValueError("Version %d,%d of MSVCRT not supported yet" % - (maj, min)) from None - # Don't be fooled, it looks like an XML, but it is not. In particular, it - # should not have any space before starting, and its size should be - # divisible by 4, most likely for alignment constraints when the xml is - # embedded in the binary... - # This template was copied directly from the python 2.6 binary (using - # strings.exe from mingw on python.exe). - template = textwrap.dedent("""\ - - - - - - - - - - - - - - """) - - return template % {'fullver': fullver, 'maj': maj, 'min': min} - -def manifest_rc(name, type='dll'): - """Return the rc file used to generate the res file which will be embedded - as manifest for given manifest file name, of given type ('dll' or - 'exe'). - - Parameters - ---------- - name : str - name of the manifest file to embed - type : str {'dll', 'exe'} - type of the binary which will embed the manifest - - """ - if type == 'dll': - rctype = 2 - elif type == 'exe': - rctype = 1 - else: - raise ValueError("Type %s not supported" % type) - - return """\ -#include "winuser.h" -%d RT_MANIFEST %s""" % (rctype, name) - -def check_embedded_msvcr_match_linked(msver): - """msver is the ms runtime version used for the MANIFEST.""" - # check msvcr major version are the same for linking and - # embedding - maj = msvc_runtime_major() - if maj: - if not maj == int(msver): - raise ValueError( - "Discrepancy between linked msvcr " \ - "(%d) and the one about to be embedded " \ - "(%d)" % (int(msver), maj)) - -def configtest_name(config): - base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c")) - return os.path.splitext(base)[0] - -def manifest_name(config): - # Get configest name (including suffix) - root = configtest_name(config) - exext = config.compiler.exe_extension - return root + exext + ".manifest" - -def rc_name(config): - # Get configtest name (including suffix) - root = configtest_name(config) - return root + ".rc" - -def generate_manifest(config): - msver = get_build_msvc_version() - if msver is not None: - if msver >= 8: - check_embedded_msvcr_match_linked(msver) - ma_str, mi_str = str(msver).split('.') - # Write the manifest file - manxml = msvc_manifest_xml(int(ma_str), int(mi_str)) - with open(manifest_name(config), "w") as man: - config.temp_files.append(manifest_name(config)) - man.write(manxml) diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py deleted file mode 100644 index 776eb8d3928b..000000000000 --- a/numpy/distutils/misc_util.py +++ /dev/null @@ -1,2493 +0,0 @@ -import os -import re -import sys -import copy -import glob -import atexit -import tempfile -import subprocess -import shutil -import multiprocessing -import textwrap -import importlib.util -from threading import local as tlocal -from functools import reduce - -import distutils -from distutils.errors import DistutilsError - -# stores temporary directory of each thread to only create one per thread -_tdata = tlocal() - -# store all created temporary directories so they can be deleted on exit -_tmpdirs = [] -def clean_up_temporary_directory(): - if _tmpdirs is not None: - for d in _tmpdirs: - try: - shutil.rmtree(d) - except OSError: - pass - -atexit.register(clean_up_temporary_directory) - -__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict', - 'dict_append', 'appendpath', 'generate_config_py', - 'get_cmd', 'allpath', 'get_mathlibs', - 'terminal_has_colors', 'red_text', 'green_text', 'yellow_text', - 'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings', - 'has_f_sources', 'has_cxx_sources', 'filter_sources', - 'get_dependencies', 'is_local_src_dir', 'get_ext_source_files', - 'get_script_files', 'get_lib_source_files', 'get_data_files', - 'dot_join', 'get_frame', 'minrelpath', 'njoin', - 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language', - 'get_build_architecture', 'get_info', 'get_pkg_info', - 'get_num_build_jobs', 'sanitize_cxx_flags', - 'exec_mod_from_location'] - -class InstallableLib: - """ - Container to hold information on an installable library. - - Parameters - ---------- - name : str - Name of the installed library. - build_info : dict - Dictionary holding build information. - target_dir : str - Absolute path specifying where to install the library. - - See Also - -------- - Configuration.add_installed_library - - Notes - ----- - The three parameters are stored as attributes with the same names. - - """ - def __init__(self, name, build_info, target_dir): - self.name = name - self.build_info = build_info - self.target_dir = target_dir - - -def get_num_build_jobs(): - """ - Get number of parallel build jobs set by the --parallel command line - argument of setup.py - If the command did not receive a setting the environment variable - NPY_NUM_BUILD_JOBS is checked. If that is unset, return the number of - processors on the system, with a maximum of 8 (to prevent - overloading the system if there a lot of CPUs). - - Returns - ------- - out : int - number of parallel jobs that can be run - - """ - from numpy.distutils.core import get_distribution - try: - cpu_count = len(os.sched_getaffinity(0)) - except AttributeError: - cpu_count = multiprocessing.cpu_count() - cpu_count = min(cpu_count, 8) - envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", cpu_count)) - dist = get_distribution() - # may be None during configuration - if dist is None: - return envjobs - - # any of these three may have the job set, take the largest - cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None), - getattr(dist.get_command_obj('build_ext'), 'parallel', None), - getattr(dist.get_command_obj('build_clib'), 'parallel', None)) - if all(x is None for x in cmdattr): - return envjobs - else: - return max(x for x in cmdattr if x is not None) - -def quote_args(args): - """Quote list of arguments. - - .. deprecated:: 1.22. - """ - import warnings - warnings.warn('"quote_args" is deprecated.', - DeprecationWarning, stacklevel=2) - # don't used _nt_quote_args as it does not check if - # args items already have quotes or not. - args = list(args) - for i in range(len(args)): - a = args[i] - if ' ' in a and a[0] not in '"\'': - args[i] = '"%s"' % (a) - return args - -def allpath(name): - "Convert a /-separated pathname to one using the OS's path separator." - split = name.split('/') - return os.path.join(*split) - -def rel_path(path, parent_path): - """Return path relative to parent_path.""" - # Use realpath to avoid issues with symlinked dirs (see gh-7707) - pd = os.path.realpath(os.path.abspath(parent_path)) - apath = os.path.realpath(os.path.abspath(path)) - if len(apath) < len(pd): - return path - if apath == pd: - return '' - if pd == apath[:len(pd)]: - assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)])) - path = apath[len(pd)+1:] - return path - -def get_path_from_frame(frame, parent_path=None): - """Return path of the module given a frame object from the call stack. - - Returned path is relative to parent_path when given, - otherwise it is absolute path. - """ - - # First, try to find if the file name is in the frame. - try: - caller_file = eval('__file__', frame.f_globals, frame.f_locals) - d = os.path.dirname(os.path.abspath(caller_file)) - except NameError: - # __file__ is not defined, so let's try __name__. We try this second - # because setuptools spoofs __name__ to be '__main__' even though - # sys.modules['__main__'] might be something else, like easy_install(1). - caller_name = eval('__name__', frame.f_globals, frame.f_locals) - __import__(caller_name) - mod = sys.modules[caller_name] - if hasattr(mod, '__file__'): - d = os.path.dirname(os.path.abspath(mod.__file__)) - else: - # we're probably running setup.py as execfile("setup.py") - # (likely we're building an egg) - d = os.path.abspath('.') - - if parent_path is not None: - d = rel_path(d, parent_path) - - return d or '.' - -def njoin(*path): - """Join two or more pathname components + - - convert a /-separated pathname to one using the OS's path separator. - - resolve `..` and `.` from path. - - Either passing n arguments as in njoin('a','b'), or a sequence - of n names as in njoin(['a','b']) is handled, or a mixture of such arguments. - """ - paths = [] - for p in path: - if is_sequence(p): - # njoin(['a', 'b'], 'c') - paths.append(njoin(*p)) - else: - assert is_string(p) - paths.append(p) - path = paths - if not path: - # njoin() - joined = '' - else: - # njoin('a', 'b') - joined = os.path.join(*path) - if os.path.sep != '/': - joined = joined.replace('/', os.path.sep) - return minrelpath(joined) - -def get_mathlibs(path=None): - """Return the MATHLIB line from numpyconfig.h - """ - if path is not None: - config_file = os.path.join(path, '_numpyconfig.h') - else: - # Look for the file in each of the numpy include directories. - dirs = get_numpy_include_dirs() - for path in dirs: - fn = os.path.join(path, '_numpyconfig.h') - if os.path.exists(fn): - config_file = fn - break - else: - raise DistutilsError('_numpyconfig.h not found in numpy include ' - 'dirs %r' % (dirs,)) - - with open(config_file) as fid: - mathlibs = [] - s = '#define MATHLIB' - for line in fid: - if line.startswith(s): - value = line[len(s):].strip() - if value: - mathlibs.extend(value.split(',')) - return mathlibs - -def minrelpath(path): - """Resolve `..` and '.' from path. - """ - if not is_string(path): - return path - if '.' not in path: - return path - l = path.split(os.sep) - while l: - try: - i = l.index('.', 1) - except ValueError: - break - del l[i] - j = 1 - while l: - try: - i = l.index('..', j) - except ValueError: - break - if l[i-1]=='..': - j += 1 - else: - del l[i], l[i-1] - j = 1 - if not l: - return '' - return os.sep.join(l) - -def sorted_glob(fileglob): - """sorts output of python glob for https://bugs.python.org/issue30461 - to allow extensions to have reproducible build results""" - return sorted(glob.glob(fileglob)) - -def _fix_paths(paths, local_path, include_non_existing): - assert is_sequence(paths), repr(type(paths)) - new_paths = [] - assert not is_string(paths), repr(paths) - for n in paths: - if is_string(n): - if '*' in n or '?' in n: - p = sorted_glob(n) - p2 = sorted_glob(njoin(local_path, n)) - if p2: - new_paths.extend(p2) - elif p: - new_paths.extend(p) - else: - if include_non_existing: - new_paths.append(n) - print('could not resolve pattern in %r: %r' % - (local_path, n)) - else: - n2 = njoin(local_path, n) - if os.path.exists(n2): - new_paths.append(n2) - else: - if os.path.exists(n): - new_paths.append(n) - elif include_non_existing: - new_paths.append(n) - if not os.path.exists(n): - print('non-existing path in %r: %r' % - (local_path, n)) - - elif is_sequence(n): - new_paths.extend(_fix_paths(n, local_path, include_non_existing)) - else: - new_paths.append(n) - return [minrelpath(p) for p in new_paths] - -def gpaths(paths, local_path='', include_non_existing=True): - """Apply glob to paths and prepend local_path if needed. - """ - if is_string(paths): - paths = (paths,) - return _fix_paths(paths, local_path, include_non_existing) - -def make_temp_file(suffix='', prefix='', text=True): - if not hasattr(_tdata, 'tempdir'): - _tdata.tempdir = tempfile.mkdtemp() - _tmpdirs.append(_tdata.tempdir) - fid, name = tempfile.mkstemp(suffix=suffix, - prefix=prefix, - dir=_tdata.tempdir, - text=text) - fo = os.fdopen(fid, 'w') - return fo, name - -# Hooks for colored terminal output. -# See also https://web.archive.org/web/20100314204946/http://www.livinglogic.de/Python/ansistyle -def terminal_has_colors(): - if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ: - # Avoid importing curses that causes illegal operation - # with a message: - # PYTHON2 caused an invalid page fault in - # module CYGNURSES7.DLL as 015f:18bbfc28 - # Details: Python 2.3.3 [GCC 3.3.1 (cygming special)] - # ssh to Win32 machine from debian - # curses.version is 2.2 - # CYGWIN_98-4.10, release 1.5.7(0.109/3/2)) - return 0 - if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty(): - try: - import curses - curses.setupterm() - if (curses.tigetnum("colors") >= 0 - and curses.tigetnum("pairs") >= 0 - and ((curses.tigetstr("setf") is not None - and curses.tigetstr("setb") is not None) - or (curses.tigetstr("setaf") is not None - and curses.tigetstr("setab") is not None) - or curses.tigetstr("scp") is not None)): - return 1 - except Exception: - pass - return 0 - -if terminal_has_colors(): - _colour_codes = dict(black=0, red=1, green=2, yellow=3, - blue=4, magenta=5, cyan=6, white=7, default=9) - def colour_text(s, fg=None, bg=None, bold=False): - seq = [] - if bold: - seq.append('1') - if fg: - fgcode = 30 + _colour_codes.get(fg.lower(), 0) - seq.append(str(fgcode)) - if bg: - bgcode = 40 + _colour_codes.get(bg.lower(), 7) - seq.append(str(bgcode)) - if seq: - return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s) - else: - return s -else: - def colour_text(s, fg=None, bg=None): - return s - -def default_text(s): - return colour_text(s, 'default') -def red_text(s): - return colour_text(s, 'red') -def green_text(s): - return colour_text(s, 'green') -def yellow_text(s): - return colour_text(s, 'yellow') -def cyan_text(s): - return colour_text(s, 'cyan') -def blue_text(s): - return colour_text(s, 'blue') - -######################### - -def cyg2win32(path: str) -> str: - """Convert a path from Cygwin-native to Windows-native. - - Uses the cygpath utility (part of the Base install) to do the - actual conversion. Falls back to returning the original path if - this fails. - - Handles the default ``/cygdrive`` mount prefix as well as the - ``/proc/cygdrive`` portable prefix, custom cygdrive prefixes such - as ``/`` or ``/mnt``, and absolute paths such as ``/usr/src/`` or - ``/home/username`` - - Parameters - ---------- - path : str - The path to convert - - Returns - ------- - converted_path : str - The converted path - - Notes - ----- - Documentation for cygpath utility: - https://cygwin.com/cygwin-ug-net/cygpath.html - Documentation for the C function it wraps: - https://cygwin.com/cygwin-api/func-cygwin-conv-path.html - - """ - if sys.platform != "cygwin": - return path - return subprocess.check_output( - ["/usr/bin/cygpath", "--windows", path], text=True - ) - - -def mingw32(): - """Return true when using mingw32 environment. - """ - if sys.platform=='win32': - if os.environ.get('OSTYPE', '')=='msys': - return True - if os.environ.get('MSYSTEM', '')=='MINGW32': - return True - return False - -def msvc_runtime_version(): - "Return version of MSVC runtime library, as defined by __MSC_VER__ macro" - msc_pos = sys.version.find('MSC v.') - if msc_pos != -1: - msc_ver = int(sys.version[msc_pos+6:msc_pos+10]) - else: - msc_ver = None - return msc_ver - -def msvc_runtime_library(): - "Return name of MSVC runtime library if Python was built with MSVC >= 7" - ver = msvc_runtime_major () - if ver: - if ver < 140: - return "msvcr%i" % ver - else: - return "vcruntime%i" % ver - else: - return None - -def msvc_runtime_major(): - "Return major version of MSVC runtime coded like get_build_msvc_version" - major = {1300: 70, # MSVC 7.0 - 1310: 71, # MSVC 7.1 - 1400: 80, # MSVC 8 - 1500: 90, # MSVC 9 (aka 2008) - 1600: 100, # MSVC 10 (aka 2010) - 1900: 140, # MSVC 14 (aka 2015) - }.get(msvc_runtime_version(), None) - return major - -######################### - -#XXX need support for .C that is also C++ -cxx_ext_match = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match -fortran_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f)\Z', re.I).match -f90_ext_match = re.compile(r'.*\.(f90|f95)\Z', re.I).match -f90_module_name_match = re.compile(r'\s*module\s*(?P[\w_]+)', re.I).match -def _get_f90_modules(source): - """Return a list of Fortran f90 module names that - given source file defines. - """ - if not f90_ext_match(source): - return [] - modules = [] - with open(source) as f: - for line in f: - m = f90_module_name_match(line) - if m: - name = m.group('name') - modules.append(name) - # break # XXX can we assume that there is one module per file? - return modules - -def is_string(s): - return isinstance(s, str) - -def all_strings(lst): - """Return True if all items in lst are string objects. """ - for item in lst: - if not is_string(item): - return False - return True - -def is_sequence(seq): - if is_string(seq): - return False - try: - len(seq) - except Exception: - return False - return True - -def is_glob_pattern(s): - return is_string(s) and ('*' in s or '?' in s) - -def as_list(seq): - if is_sequence(seq): - return list(seq) - else: - return [seq] - -def get_language(sources): - # not used in numpy/scipy packages, use build_ext.detect_language instead - """Determine language value (c,f77,f90) from sources """ - language = None - for source in sources: - if isinstance(source, str): - if f90_ext_match(source): - language = 'f90' - break - elif fortran_ext_match(source): - language = 'f77' - return language - -def has_f_sources(sources): - """Return True if sources contains Fortran files """ - for source in sources: - if fortran_ext_match(source): - return True - return False - -def has_cxx_sources(sources): - """Return True if sources contains C++ files """ - for source in sources: - if cxx_ext_match(source): - return True - return False - -def filter_sources(sources): - """Return four lists of filenames containing - C, C++, Fortran, and Fortran 90 module sources, - respectively. - """ - c_sources = [] - cxx_sources = [] - f_sources = [] - fmodule_sources = [] - for source in sources: - if fortran_ext_match(source): - modules = _get_f90_modules(source) - if modules: - fmodule_sources.append(source) - else: - f_sources.append(source) - elif cxx_ext_match(source): - cxx_sources.append(source) - else: - c_sources.append(source) - return c_sources, cxx_sources, f_sources, fmodule_sources - - -def _get_headers(directory_list): - # get *.h files from list of directories - headers = [] - for d in directory_list: - head = sorted_glob(os.path.join(d, "*.h")) #XXX: *.hpp files?? - headers.extend(head) - return headers - -def _get_directories(list_of_sources): - # get unique directories from list of sources. - direcs = [] - for f in list_of_sources: - d = os.path.split(f) - if d[0] != '' and not d[0] in direcs: - direcs.append(d[0]) - return direcs - -def _commandline_dep_string(cc_args, extra_postargs, pp_opts): - """ - Return commandline representation used to determine if a file needs - to be recompiled - """ - cmdline = 'commandline: ' - cmdline += ' '.join(cc_args) - cmdline += ' '.join(extra_postargs) - cmdline += ' '.join(pp_opts) + '\n' - return cmdline - - -def get_dependencies(sources): - #XXX scan sources for include statements - return _get_headers(_get_directories(sources)) - -def is_local_src_dir(directory): - """Return true if directory is local directory. - """ - if not is_string(directory): - return False - abs_dir = os.path.abspath(directory) - c = os.path.commonprefix([os.getcwd(), abs_dir]) - new_dir = abs_dir[len(c):].split(os.sep) - if new_dir and not new_dir[0]: - new_dir = new_dir[1:] - if new_dir and new_dir[0]=='build': - return False - new_dir = os.sep.join(new_dir) - return os.path.isdir(new_dir) - -def general_source_files(top_path): - pruned_directories = {'CVS':1, '.svn':1, 'build':1} - prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') - for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): - pruned = [ d for d in dirnames if d not in pruned_directories ] - dirnames[:] = pruned - for f in filenames: - if not prune_file_pat.search(f): - yield os.path.join(dirpath, f) - -def general_source_directories_files(top_path): - """Return a directory name relative to top_path and - files contained. - """ - pruned_directories = ['CVS', '.svn', 'build'] - prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') - for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): - pruned = [ d for d in dirnames if d not in pruned_directories ] - dirnames[:] = pruned - for d in dirnames: - dpath = os.path.join(dirpath, d) - rpath = rel_path(dpath, top_path) - files = [] - for f in os.listdir(dpath): - fn = os.path.join(dpath, f) - if os.path.isfile(fn) and not prune_file_pat.search(fn): - files.append(fn) - yield rpath, files - dpath = top_path - rpath = rel_path(dpath, top_path) - filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \ - if not prune_file_pat.search(f)] - files = [f for f in filenames if os.path.isfile(f)] - yield rpath, files - - -def get_ext_source_files(ext): - # Get sources and any include files in the same directory. - filenames = [] - sources = [_m for _m in ext.sources if is_string(_m)] - filenames.extend(sources) - filenames.extend(get_dependencies(sources)) - for d in ext.depends: - if is_local_src_dir(d): - filenames.extend(list(general_source_files(d))) - elif os.path.isfile(d): - filenames.append(d) - return filenames - -def get_script_files(scripts): - scripts = [_m for _m in scripts if is_string(_m)] - return scripts - -def get_lib_source_files(lib): - filenames = [] - sources = lib[1].get('sources', []) - sources = [_m for _m in sources if is_string(_m)] - filenames.extend(sources) - filenames.extend(get_dependencies(sources)) - depends = lib[1].get('depends', []) - for d in depends: - if is_local_src_dir(d): - filenames.extend(list(general_source_files(d))) - elif os.path.isfile(d): - filenames.append(d) - return filenames - -def get_shared_lib_extension(is_python_ext=False): - """Return the correct file extension for shared libraries. - - Parameters - ---------- - is_python_ext : bool, optional - Whether the shared library is a Python extension. Default is False. - - Returns - ------- - so_ext : str - The shared library extension. - - Notes - ----- - For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X, - and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on - POSIX systems according to PEP 3149. - - """ - confvars = distutils.sysconfig.get_config_vars() - so_ext = confvars.get('EXT_SUFFIX', '') - - if not is_python_ext: - # hardcode known values, config vars (including SHLIB_SUFFIX) are - # unreliable (see #3182) - # darwin, windows and debug linux are wrong in 3.3.1 and older - if (sys.platform.startswith('linux') or - sys.platform.startswith('gnukfreebsd')): - so_ext = '.so' - elif sys.platform.startswith('darwin'): - so_ext = '.dylib' - elif sys.platform.startswith('win'): - so_ext = '.dll' - else: - # fall back to config vars for unknown platforms - # fix long extension for Python >=3.2, see PEP 3149. - if 'SOABI' in confvars: - # Does nothing unless SOABI config var exists - so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1) - - return so_ext - -def get_data_files(data): - if is_string(data): - return [data] - sources = data[1] - filenames = [] - for s in sources: - if hasattr(s, '__call__'): - continue - if is_local_src_dir(s): - filenames.extend(list(general_source_files(s))) - elif is_string(s): - if os.path.isfile(s): - filenames.append(s) - else: - print('Not existing data file:', s) - else: - raise TypeError(repr(s)) - return filenames - -def dot_join(*args): - return '.'.join([a for a in args if a]) - -def get_frame(level=0): - """Return frame object from call stack with given level. - """ - try: - return sys._getframe(level+1) - except AttributeError: - frame = sys.exc_info()[2].tb_frame - for _ in range(level+1): - frame = frame.f_back - return frame - - -###################### - -class Configuration: - - _list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs', - 'libraries', 'headers', 'scripts', 'py_modules', - 'installed_libraries', 'define_macros'] - _dict_keys = ['package_dir', 'installed_pkg_config'] - _extra_keys = ['name', 'version'] - - numpy_include_dirs = [] - - def __init__(self, - package_name=None, - parent_name=None, - top_path=None, - package_path=None, - caller_level=1, - setup_name='setup.py', - **attrs): - """Construct configuration instance of a package. - - package_name -- name of the package - Ex.: 'distutils' - parent_name -- name of the parent package - Ex.: 'numpy' - top_path -- directory of the toplevel package - Ex.: the directory where the numpy package source sits - package_path -- directory of package. Will be computed by magic from the - directory of the caller module if not specified - Ex.: the directory where numpy.distutils is - caller_level -- frame level to caller namespace, internal parameter. - """ - self.name = dot_join(parent_name, package_name) - self.version = None - - caller_frame = get_frame(caller_level) - self.local_path = get_path_from_frame(caller_frame, top_path) - # local_path -- directory of a file (usually setup.py) that - # defines a configuration() function. - # local_path -- directory of a file (usually setup.py) that - # defines a configuration() function. - if top_path is None: - top_path = self.local_path - self.local_path = '' - if package_path is None: - package_path = self.local_path - elif os.path.isdir(njoin(self.local_path, package_path)): - package_path = njoin(self.local_path, package_path) - if not os.path.isdir(package_path or '.'): - raise ValueError("%r is not a directory" % (package_path,)) - self.top_path = top_path - self.package_path = package_path - # this is the relative path in the installed package - self.path_in_package = os.path.join(*self.name.split('.')) - - self.list_keys = self._list_keys[:] - self.dict_keys = self._dict_keys[:] - - for n in self.list_keys: - v = copy.copy(attrs.get(n, [])) - setattr(self, n, as_list(v)) - - for n in self.dict_keys: - v = copy.copy(attrs.get(n, {})) - setattr(self, n, v) - - known_keys = self.list_keys + self.dict_keys - self.extra_keys = self._extra_keys[:] - for n in attrs.keys(): - if n in known_keys: - continue - a = attrs[n] - setattr(self, n, a) - if isinstance(a, list): - self.list_keys.append(n) - elif isinstance(a, dict): - self.dict_keys.append(n) - else: - self.extra_keys.append(n) - - if os.path.exists(njoin(package_path, '__init__.py')): - self.packages.append(self.name) - self.package_dir[self.name] = package_path - - self.options = dict( - ignore_setup_xxx_py = False, - assume_default_configuration = False, - delegate_options_to_subpackages = False, - quiet = False, - ) - - caller_instance = None - for i in range(1, 3): - try: - f = get_frame(i) - except ValueError: - break - try: - caller_instance = eval('self', f.f_globals, f.f_locals) - break - except NameError: - pass - if isinstance(caller_instance, self.__class__): - if caller_instance.options['delegate_options_to_subpackages']: - self.set_options(**caller_instance.options) - - self.setup_name = setup_name - - def todict(self): - """ - Return a dictionary compatible with the keyword arguments of distutils - setup function. - - Examples - -------- - >>> setup(**config.todict()) #doctest: +SKIP - """ - - self._optimize_data_files() - d = {} - known_keys = self.list_keys + self.dict_keys + self.extra_keys - for n in known_keys: - a = getattr(self, n) - if a: - d[n] = a - return d - - def info(self, message): - if not self.options['quiet']: - print(message) - - def warn(self, message): - sys.stderr.write('Warning: %s\n' % (message,)) - - def set_options(self, **options): - """ - Configure Configuration instance. - - The following options are available: - - ignore_setup_xxx_py - - assume_default_configuration - - delegate_options_to_subpackages - - quiet - - """ - for key, value in options.items(): - if key in self.options: - self.options[key] = value - else: - raise ValueError('Unknown option: '+key) - - def get_distribution(self): - """Return the distutils distribution object for self.""" - from numpy.distutils.core import get_distribution - return get_distribution() - - def _wildcard_get_subpackage(self, subpackage_name, - parent_name, - caller_level = 1): - l = subpackage_name.split('.') - subpackage_path = njoin([self.local_path]+l) - dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)] - config_list = [] - for d in dirs: - if not os.path.isfile(njoin(d, '__init__.py')): - continue - if 'build' in d.split(os.sep): - continue - n = '.'.join(d.split(os.sep)[-len(l):]) - c = self.get_subpackage(n, - parent_name = parent_name, - caller_level = caller_level+1) - config_list.extend(c) - return config_list - - def _get_configuration_from_setup_py(self, setup_py, - subpackage_name, - subpackage_path, - parent_name, - caller_level = 1): - # In case setup_py imports local modules: - sys.path.insert(0, os.path.dirname(setup_py)) - try: - setup_name = os.path.splitext(os.path.basename(setup_py))[0] - n = dot_join(self.name, subpackage_name, setup_name) - setup_module = exec_mod_from_location( - '_'.join(n.split('.')), setup_py) - if not hasattr(setup_module, 'configuration'): - if not self.options['assume_default_configuration']: - self.warn('Assuming default configuration '\ - '(%s does not define configuration())'\ - % (setup_module)) - config = Configuration(subpackage_name, parent_name, - self.top_path, subpackage_path, - caller_level = caller_level + 1) - else: - pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1])) - args = (pn,) - if setup_module.configuration.__code__.co_argcount > 1: - args = args + (self.top_path,) - config = setup_module.configuration(*args) - if config.name!=dot_join(parent_name, subpackage_name): - self.warn('Subpackage %r configuration returned as %r' % \ - (dot_join(parent_name, subpackage_name), config.name)) - finally: - del sys.path[0] - return config - - def get_subpackage(self,subpackage_name, - subpackage_path=None, - parent_name=None, - caller_level = 1): - """Return list of subpackage configurations. - - Parameters - ---------- - subpackage_name : str or None - Name of the subpackage to get the configuration. '*' in - subpackage_name is handled as a wildcard. - subpackage_path : str - If None, then the path is assumed to be the local path plus the - subpackage_name. If a setup.py file is not found in the - subpackage_path, then a default configuration is used. - parent_name : str - Parent name. - """ - if subpackage_name is None: - if subpackage_path is None: - raise ValueError( - "either subpackage_name or subpackage_path must be specified") - subpackage_name = os.path.basename(subpackage_path) - - # handle wildcards - l = subpackage_name.split('.') - if subpackage_path is None and '*' in subpackage_name: - return self._wildcard_get_subpackage(subpackage_name, - parent_name, - caller_level = caller_level+1) - assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name)) - if subpackage_path is None: - subpackage_path = njoin([self.local_path] + l) - else: - subpackage_path = njoin([subpackage_path] + l[:-1]) - subpackage_path = self.paths([subpackage_path])[0] - setup_py = njoin(subpackage_path, self.setup_name) - if not self.options['ignore_setup_xxx_py']: - if not os.path.isfile(setup_py): - setup_py = njoin(subpackage_path, - 'setup_%s.py' % (subpackage_name)) - if not os.path.isfile(setup_py): - if not self.options['assume_default_configuration']: - self.warn('Assuming default configuration '\ - '(%s/{setup_%s,setup}.py was not found)' \ - % (os.path.dirname(setup_py), subpackage_name)) - config = Configuration(subpackage_name, parent_name, - self.top_path, subpackage_path, - caller_level = caller_level+1) - else: - config = self._get_configuration_from_setup_py( - setup_py, - subpackage_name, - subpackage_path, - parent_name, - caller_level = caller_level + 1) - if config: - return [config] - else: - return [] - - def add_subpackage(self,subpackage_name, - subpackage_path=None, - standalone = False): - """Add a sub-package to the current Configuration instance. - - This is useful in a setup.py script for adding sub-packages to a - package. - - Parameters - ---------- - subpackage_name : str - name of the subpackage - subpackage_path : str - if given, the subpackage path such as the subpackage is in - subpackage_path / subpackage_name. If None,the subpackage is - assumed to be located in the local path / subpackage_name. - standalone : bool - """ - - if standalone: - parent_name = None - else: - parent_name = self.name - config_list = self.get_subpackage(subpackage_name, subpackage_path, - parent_name = parent_name, - caller_level = 2) - if not config_list: - self.warn('No configuration returned, assuming unavailable.') - for config in config_list: - d = config - if isinstance(config, Configuration): - d = config.todict() - assert isinstance(d, dict), repr(type(d)) - - self.info('Appending %s configuration to %s' \ - % (d.get('name'), self.name)) - self.dict_append(**d) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add a subpackage '+ subpackage_name) - - def add_data_dir(self, data_path): - """Recursively add files under data_path to data_files list. - - Recursively add files under data_path to the list of data_files to be - installed (and distributed). The data_path can be either a relative - path-name, or an absolute path-name, or a 2-tuple where the first - argument shows where in the install directory the data directory - should be installed to. - - Parameters - ---------- - data_path : seq or str - Argument can be either - - * 2-sequence (, ) - * path to data directory where python datadir suffix defaults - to package dir. - - Notes - ----- - Rules for installation paths:: - - foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar - (gun, foo/bar) -> parent/gun - foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b - (gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun - (gun/*, foo/*) -> parent/gun/a, parent/gun/b - /foo/bar -> (bar, /foo/bar) -> parent/bar - (gun, /foo/bar) -> parent/gun - (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar - - Examples - -------- - For example suppose the source directory contains fun/foo.dat and - fun/bar/car.dat: - - >>> self.add_data_dir('fun') #doctest: +SKIP - >>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP - >>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP - - Will install data-files to the locations:: - - / - fun/ - foo.dat - bar/ - car.dat - sun/ - foo.dat - bar/ - car.dat - gun/ - foo.dat - car.dat - - """ - if is_sequence(data_path): - d, data_path = data_path - else: - d = None - if is_sequence(data_path): - [self.add_data_dir((d, p)) for p in data_path] - return - if not is_string(data_path): - raise TypeError("not a string: %r" % (data_path,)) - if d is None: - if os.path.isabs(data_path): - return self.add_data_dir((os.path.basename(data_path), data_path)) - return self.add_data_dir((data_path, data_path)) - paths = self.paths(data_path, include_non_existing=False) - if is_glob_pattern(data_path): - if is_glob_pattern(d): - pattern_list = allpath(d).split(os.sep) - pattern_list.reverse() - # /a/*//b/ -> /a/*/b - rl = list(range(len(pattern_list)-1)); rl.reverse() - for i in rl: - if not pattern_list[i]: - del pattern_list[i] - # - for path in paths: - if not os.path.isdir(path): - print('Not a directory, skipping', path) - continue - rpath = rel_path(path, self.local_path) - path_list = rpath.split(os.sep) - path_list.reverse() - target_list = [] - i = 0 - for s in pattern_list: - if is_glob_pattern(s): - if i>=len(path_list): - raise ValueError('cannot fill pattern %r with %r' \ - % (d, path)) - target_list.append(path_list[i]) - else: - assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath)) - target_list.append(s) - i += 1 - if path_list[i:]: - self.warn('mismatch of pattern_list=%s and path_list=%s'\ - % (pattern_list, path_list)) - target_list.reverse() - self.add_data_dir((os.sep.join(target_list), path)) - else: - for path in paths: - self.add_data_dir((d, path)) - return - assert not is_glob_pattern(d), repr(d) - - dist = self.get_distribution() - if dist is not None and dist.data_files is not None: - data_files = dist.data_files - else: - data_files = self.data_files - - for path in paths: - for d1, f in list(general_source_directories_files(path)): - target_path = os.path.join(self.path_in_package, d, d1) - data_files.append((target_path, f)) - - def _optimize_data_files(self): - data_dict = {} - for p, files in self.data_files: - if p not in data_dict: - data_dict[p] = set() - for f in files: - data_dict[p].add(f) - self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()] - - def add_data_files(self,*files): - """Add data files to configuration data_files. - - Parameters - ---------- - files : sequence - Argument(s) can be either - - * 2-sequence (,) - * paths to data files where python datadir prefix defaults - to package dir. - - Notes - ----- - The form of each element of the files sequence is very flexible - allowing many combinations of where to get the files from the package - and where they should ultimately be installed on the system. The most - basic usage is for an element of the files argument sequence to be a - simple filename. This will cause that file from the local path to be - installed to the installation path of the self.name package (package - path). The file argument can also be a relative path in which case the - entire relative path will be installed into the package directory. - Finally, the file can be an absolute path name in which case the file - will be found at the absolute path name but installed to the package - path. - - This basic behavior can be augmented by passing a 2-tuple in as the - file argument. The first element of the tuple should specify the - relative path (under the package install directory) where the - remaining sequence of files should be installed to (it has nothing to - do with the file-names in the source distribution). The second element - of the tuple is the sequence of files that should be installed. The - files in this sequence can be filenames, relative paths, or absolute - paths. For absolute paths the file will be installed in the top-level - package installation directory (regardless of the first argument). - Filenames and relative path names will be installed in the package - install directory under the path name given as the first element of - the tuple. - - Rules for installation paths: - - #. file.txt -> (., file.txt)-> parent/file.txt - #. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt - #. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt - #. ``*``.txt -> parent/a.txt, parent/b.txt - #. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt - #. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt - #. (sun, file.txt) -> parent/sun/file.txt - #. (sun, bar/file.txt) -> parent/sun/file.txt - #. (sun, /foo/bar/file.txt) -> parent/sun/file.txt - #. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt - #. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt - #. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt - - An additional feature is that the path to a data-file can actually be - a function that takes no arguments and returns the actual path(s) to - the data-files. This is useful when the data files are generated while - building the package. - - Examples - -------- - Add files to the list of data_files to be included with the package. - - >>> self.add_data_files('foo.dat', - ... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']), - ... 'bar/cat.dat', - ... '/full/path/to/can.dat') #doctest: +SKIP - - will install these data files to:: - - / - foo.dat - fun/ - gun.dat - nun/ - pun.dat - sun.dat - bar/ - car.dat - can.dat - - where is the package (or sub-package) - directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C: - \\Python2.4 \\Lib \\site-packages \\mypackage') or - '/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C: - \\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage'). - """ - - if len(files)>1: - for f in files: - self.add_data_files(f) - return - assert len(files)==1 - if is_sequence(files[0]): - d, files = files[0] - else: - d = None - if is_string(files): - filepat = files - elif is_sequence(files): - if len(files)==1: - filepat = files[0] - else: - for f in files: - self.add_data_files((d, f)) - return - else: - raise TypeError(repr(type(files))) - - if d is None: - if hasattr(filepat, '__call__'): - d = '' - elif os.path.isabs(filepat): - d = '' - else: - d = os.path.dirname(filepat) - self.add_data_files((d, files)) - return - - paths = self.paths(filepat, include_non_existing=False) - if is_glob_pattern(filepat): - if is_glob_pattern(d): - pattern_list = d.split(os.sep) - pattern_list.reverse() - for path in paths: - path_list = path.split(os.sep) - path_list.reverse() - path_list.pop() # filename - target_list = [] - i = 0 - for s in pattern_list: - if is_glob_pattern(s): - target_list.append(path_list[i]) - i += 1 - else: - target_list.append(s) - target_list.reverse() - self.add_data_files((os.sep.join(target_list), path)) - else: - self.add_data_files((d, paths)) - return - assert not is_glob_pattern(d), repr((d, filepat)) - - dist = self.get_distribution() - if dist is not None and dist.data_files is not None: - data_files = dist.data_files - else: - data_files = self.data_files - - data_files.append((os.path.join(self.path_in_package, d), paths)) - - ### XXX Implement add_py_modules - - def add_define_macros(self, macros): - """Add define macros to configuration - - Add the given sequence of macro name and value duples to the beginning - of the define_macros list This list will be visible to all extension - modules of the current package. - """ - dist = self.get_distribution() - if dist is not None: - if not hasattr(dist, 'define_macros'): - dist.define_macros = [] - dist.define_macros.extend(macros) - else: - self.define_macros.extend(macros) - - - def add_include_dirs(self,*paths): - """Add paths to configuration include directories. - - Add the given sequence of paths to the beginning of the include_dirs - list. This list will be visible to all extension modules of the - current package. - """ - include_dirs = self.paths(paths) - dist = self.get_distribution() - if dist is not None: - if dist.include_dirs is None: - dist.include_dirs = [] - dist.include_dirs.extend(include_dirs) - else: - self.include_dirs.extend(include_dirs) - - def add_headers(self,*files): - """Add installable headers to configuration. - - Add the given sequence of files to the beginning of the headers list. - By default, headers will be installed under // directory. If an item of files - is a tuple, then its first argument specifies the actual installation - location relative to the path. - - Parameters - ---------- - files : str or seq - Argument(s) can be either: - - * 2-sequence (,) - * path(s) to header file(s) where python includedir suffix will - default to package name. - """ - headers = [] - for path in files: - if is_string(path): - [headers.append((self.name, p)) for p in self.paths(path)] - else: - if not isinstance(path, (tuple, list)) or len(path) != 2: - raise TypeError(repr(path)) - [headers.append((path[0], p)) for p in self.paths(path[1])] - dist = self.get_distribution() - if dist is not None: - if dist.headers is None: - dist.headers = [] - dist.headers.extend(headers) - else: - self.headers.extend(headers) - - def paths(self,*paths,**kws): - """Apply glob to paths and prepend local_path if needed. - - Applies glob.glob(...) to each path in the sequence (if needed) and - pre-pends the local_path if needed. Because this is called on all - source lists, this allows wildcard characters to be specified in lists - of sources for extension modules and libraries and scripts and allows - path-names be relative to the source directory. - - """ - include_non_existing = kws.get('include_non_existing', True) - return gpaths(paths, - local_path = self.local_path, - include_non_existing=include_non_existing) - - def _fix_paths_dict(self, kw): - for k in kw.keys(): - v = kw[k] - if k in ['sources', 'depends', 'include_dirs', 'library_dirs', - 'module_dirs', 'extra_objects']: - new_v = self.paths(v) - kw[k] = new_v - - def add_extension(self,name,sources,**kw): - """Add extension to configuration. - - Create and add an Extension instance to the ext_modules list. This - method also takes the following optional keyword arguments that are - passed on to the Extension constructor. - - Parameters - ---------- - name : str - name of the extension - sources : seq - list of the sources. The list of sources may contain functions - (called source generators) which must take an extension instance - and a build directory as inputs and return a source file or list of - source files or None. If None is returned then no sources are - generated. If the Extension instance has no sources after - processing all source generators, then no extension module is - built. - include_dirs : - define_macros : - undef_macros : - library_dirs : - libraries : - runtime_library_dirs : - extra_objects : - extra_compile_args : - extra_link_args : - extra_f77_compile_args : - extra_f90_compile_args : - export_symbols : - swig_opts : - depends : - The depends list contains paths to files or directories that the - sources of the extension module depend on. If any path in the - depends list is newer than the extension module, then the module - will be rebuilt. - language : - f2py_options : - module_dirs : - extra_info : dict or list - dict or list of dict of keywords to be appended to keywords. - - Notes - ----- - The self.paths(...) method is applied to all lists that may contain - paths. - """ - ext_args = copy.copy(kw) - ext_args['name'] = dot_join(self.name, name) - ext_args['sources'] = sources - - if 'extra_info' in ext_args: - extra_info = ext_args['extra_info'] - del ext_args['extra_info'] - if isinstance(extra_info, dict): - extra_info = [extra_info] - for info in extra_info: - assert isinstance(info, dict), repr(info) - dict_append(ext_args,**info) - - self._fix_paths_dict(ext_args) - - # Resolve out-of-tree dependencies - libraries = ext_args.get('libraries', []) - libnames = [] - ext_args['libraries'] = [] - for libname in libraries: - if isinstance(libname, tuple): - self._fix_paths_dict(libname[1]) - - # Handle library names of the form libname@relative/path/to/library - if '@' in libname: - lname, lpath = libname.split('@', 1) - lpath = os.path.abspath(njoin(self.local_path, lpath)) - if os.path.isdir(lpath): - c = self.get_subpackage(None, lpath, - caller_level = 2) - if isinstance(c, Configuration): - c = c.todict() - for l in [l[0] for l in c.get('libraries', [])]: - llname = l.split('__OF__', 1)[0] - if llname == lname: - c.pop('name', None) - dict_append(ext_args,**c) - break - continue - libnames.append(libname) - - ext_args['libraries'] = libnames + ext_args['libraries'] - ext_args['define_macros'] = \ - self.define_macros + ext_args.get('define_macros', []) - - from numpy.distutils.core import Extension - ext = Extension(**ext_args) - self.ext_modules.append(ext) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add an extension '+name) - return ext - - def add_library(self,name,sources,**build_info): - """ - Add library to configuration. - - Parameters - ---------- - name : str - Name of the extension. - sources : sequence - List of the sources. The list of sources may contain functions - (called source generators) which must take an extension instance - and a build directory as inputs and return a source file or list of - source files or None. If None is returned then no sources are - generated. If the Extension instance has no sources after - processing all source generators, then no extension module is - built. - build_info : dict, optional - The following keys are allowed: - - * depends - * macros - * include_dirs - * extra_compiler_args - * extra_f77_compile_args - * extra_f90_compile_args - * f2py_options - * language - - """ - self._add_library(name, sources, None, build_info) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add a library '+ name) - - def _add_library(self, name, sources, install_dir, build_info): - """Common implementation for add_library and add_installed_library. Do - not use directly""" - build_info = copy.copy(build_info) - build_info['sources'] = sources - - # Sometimes, depends is not set up to an empty list by default, and if - # depends is not given to add_library, distutils barfs (#1134) - if not 'depends' in build_info: - build_info['depends'] = [] - - self._fix_paths_dict(build_info) - - # Add to libraries list so that it is build with build_clib - self.libraries.append((name, build_info)) - - def add_installed_library(self, name, sources, install_dir, build_info=None): - """ - Similar to add_library, but the specified library is installed. - - Most C libraries used with ``distutils`` are only used to build python - extensions, but libraries built through this method will be installed - so that they can be reused by third-party packages. - - Parameters - ---------- - name : str - Name of the installed library. - sources : sequence - List of the library's source files. See `add_library` for details. - install_dir : str - Path to install the library, relative to the current sub-package. - build_info : dict, optional - The following keys are allowed: - - * depends - * macros - * include_dirs - * extra_compiler_args - * extra_f77_compile_args - * extra_f90_compile_args - * f2py_options - * language - - Returns - ------- - None - - See Also - -------- - add_library, add_npy_pkg_config, get_info - - Notes - ----- - The best way to encode the options required to link against the specified - C libraries is to use a "libname.ini" file, and use `get_info` to - retrieve the required options (see `add_npy_pkg_config` for more - information). - - """ - if not build_info: - build_info = {} - - install_dir = os.path.join(self.package_path, install_dir) - self._add_library(name, sources, install_dir, build_info) - self.installed_libraries.append(InstallableLib(name, build_info, install_dir)) - - def add_npy_pkg_config(self, template, install_dir, subst_dict=None): - """ - Generate and install a npy-pkg config file from a template. - - The config file generated from `template` is installed in the - given install directory, using `subst_dict` for variable substitution. - - Parameters - ---------- - template : str - The path of the template, relatively to the current package path. - install_dir : str - Where to install the npy-pkg config file, relatively to the current - package path. - subst_dict : dict, optional - If given, any string of the form ``@key@`` will be replaced by - ``subst_dict[key]`` in the template file when installed. The install - prefix is always available through the variable ``@prefix@``, since the - install prefix is not easy to get reliably from setup.py. - - See also - -------- - add_installed_library, get_info - - Notes - ----- - This works for both standard installs and in-place builds, i.e. the - ``@prefix@`` refer to the source directory for in-place builds. - - Examples - -------- - :: - - config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar}) - - Assuming the foo.ini.in file has the following content:: - - [meta] - Name=@foo@ - Version=1.0 - Description=dummy description - - [default] - Cflags=-I@prefix@/include - Libs= - - The generated file will have the following content:: - - [meta] - Name=bar - Version=1.0 - Description=dummy description - - [default] - Cflags=-Iprefix_dir/include - Libs= - - and will be installed as foo.ini in the 'lib' subpath. - - When cross-compiling with numpy distutils, it might be necessary to - use modified npy-pkg-config files. Using the default/generated files - will link with the host libraries (i.e. libnpymath.a). For - cross-compilation you of-course need to link with target libraries, - while using the host Python installation. - - You can copy out the numpy/_core/lib/npy-pkg-config directory, add a - pkgdir value to the .ini files and set NPY_PKG_CONFIG_PATH environment - variable to point to the directory with the modified npy-pkg-config - files. - - Example npymath.ini modified for cross-compilation:: - - [meta] - Name=npymath - Description=Portable, core math library implementing C99 standard - Version=0.1 - - [variables] - pkgname=numpy._core - pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/_core - prefix=${pkgdir} - libdir=${prefix}/lib - includedir=${prefix}/include - - [default] - Libs=-L${libdir} -lnpymath - Cflags=-I${includedir} - Requires=mlib - - [msvc] - Libs=/LIBPATH:${libdir} npymath.lib - Cflags=/INCLUDE:${includedir} - Requires=mlib - - """ - if subst_dict is None: - subst_dict = {} - template = os.path.join(self.package_path, template) - - if self.name in self.installed_pkg_config: - self.installed_pkg_config[self.name].append((template, install_dir, - subst_dict)) - else: - self.installed_pkg_config[self.name] = [(template, install_dir, - subst_dict)] - - - def add_scripts(self,*files): - """Add scripts to configuration. - - Add the sequence of files to the beginning of the scripts list. - Scripts will be installed under the /bin/ directory. - - """ - scripts = self.paths(files) - dist = self.get_distribution() - if dist is not None: - if dist.scripts is None: - dist.scripts = [] - dist.scripts.extend(scripts) - else: - self.scripts.extend(scripts) - - def dict_append(self,**dict): - for key in self.list_keys: - a = getattr(self, key) - a.extend(dict.get(key, [])) - for key in self.dict_keys: - a = getattr(self, key) - a.update(dict.get(key, {})) - known_keys = self.list_keys + self.dict_keys + self.extra_keys - for key in dict.keys(): - if key not in known_keys: - a = getattr(self, key, None) - if a and a==dict[key]: continue - self.warn('Inheriting attribute %r=%r from %r' \ - % (key, dict[key], dict.get('name', '?'))) - setattr(self, key, dict[key]) - self.extra_keys.append(key) - elif key in self.extra_keys: - self.info('Ignoring attempt to set %r (from %r to %r)' \ - % (key, getattr(self, key), dict[key])) - elif key in known_keys: - # key is already processed above - pass - else: - raise ValueError("Don't know about key=%r" % (key)) - - def __str__(self): - from pprint import pformat - known_keys = self.list_keys + self.dict_keys + self.extra_keys - s = '<'+5*'-' + '\n' - s += 'Configuration of '+self.name+':\n' - known_keys.sort() - for k in known_keys: - a = getattr(self, k, None) - if a: - s += '%s = %s\n' % (k, pformat(a)) - s += 5*'-' + '>' - return s - - def get_config_cmd(self): - """ - Returns the numpy.distutils config command instance. - """ - cmd = get_cmd('config') - cmd.ensure_finalized() - cmd.dump_source = 0 - cmd.noisy = 0 - old_path = os.environ.get('PATH') - if old_path: - path = os.pathsep.join(['.', old_path]) - os.environ['PATH'] = path - return cmd - - def get_build_temp_dir(self): - """ - Return a path to a temporary directory where temporary files should be - placed. - """ - cmd = get_cmd('build') - cmd.ensure_finalized() - return cmd.build_temp - - def have_f77c(self): - """Check for availability of Fortran 77 compiler. - - Use it inside source generating function to ensure that - setup distribution instance has been initialized. - - Notes - ----- - True if a Fortran 77 compiler is available (because a simple Fortran 77 - code was able to be compiled successfully). - """ - simple_fortran_subroutine = ''' - subroutine simple - end - ''' - config_cmd = self.get_config_cmd() - flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77') - return flag - - def have_f90c(self): - """Check for availability of Fortran 90 compiler. - - Use it inside source generating function to ensure that - setup distribution instance has been initialized. - - Notes - ----- - True if a Fortran 90 compiler is available (because a simple Fortran - 90 code was able to be compiled successfully) - """ - simple_fortran_subroutine = ''' - subroutine simple - end - ''' - config_cmd = self.get_config_cmd() - flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90') - return flag - - def append_to(self, extlib): - """Append libraries, include_dirs to extension or library item. - """ - if is_sequence(extlib): - lib_name, build_info = extlib - dict_append(build_info, - libraries=self.libraries, - include_dirs=self.include_dirs) - else: - from numpy.distutils.core import Extension - assert isinstance(extlib, Extension), repr(extlib) - extlib.libraries.extend(self.libraries) - extlib.include_dirs.extend(self.include_dirs) - - def _get_svn_revision(self, path): - """Return path's SVN revision number. - """ - try: - output = subprocess.check_output(['svnversion'], cwd=path) - except (subprocess.CalledProcessError, OSError): - pass - else: - m = re.match(rb'(?P\d+)', output) - if m: - return int(m.group('revision')) - - if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None): - entries = njoin(path, '_svn', 'entries') - else: - entries = njoin(path, '.svn', 'entries') - if os.path.isfile(entries): - with open(entries) as f: - fstr = f.read() - if fstr[:5] == '\d+)"', fstr) - if m: - return int(m.group('revision')) - else: # non-xml entries file --- check to be sure that - m = re.search(r'dir[\n\r]+(?P\d+)', fstr) - if m: - return int(m.group('revision')) - return None - - def _get_hg_revision(self, path): - """Return path's Mercurial revision number. - """ - try: - output = subprocess.check_output( - ['hg', 'identify', '--num'], cwd=path) - except (subprocess.CalledProcessError, OSError): - pass - else: - m = re.match(rb'(?P\d+)', output) - if m: - return int(m.group('revision')) - - branch_fn = njoin(path, '.hg', 'branch') - branch_cache_fn = njoin(path, '.hg', 'branch.cache') - - if os.path.isfile(branch_fn): - branch0 = None - with open(branch_fn) as f: - revision0 = f.read().strip() - - branch_map = {} - with open(branch_cache_fn) as f: - for line in f: - branch1, revision1 = line.split()[:2] - if revision1==revision0: - branch0 = branch1 - try: - revision1 = int(revision1) - except ValueError: - continue - branch_map[branch1] = revision1 - - return branch_map.get(branch0) - - return None - - - def get_version(self, version_file=None, version_variable=None): - """Try to get version string of a package. - - Return a version string of the current package or None if the version - information could not be detected. - - Notes - ----- - This method scans files named - __version__.py, _version.py, version.py, and - __svn_version__.py for string variables version, __version__, and - _version, until a version number is found. - """ - version = getattr(self, 'version', None) - if version is not None: - return version - - # Get version from version file. - if version_file is None: - files = ['__version__.py', - self.name.split('.')[-1]+'_version.py', - 'version.py', - '__svn_version__.py', - '__hg_version__.py'] - else: - files = [version_file] - if version_variable is None: - version_vars = ['version', - '__version__', - self.name.split('.')[-1]+'_version'] - else: - version_vars = [version_variable] - for f in files: - fn = njoin(self.local_path, f) - if os.path.isfile(fn): - info = ('.py', 'U', 1) - name = os.path.splitext(os.path.basename(fn))[0] - n = dot_join(self.name, name) - try: - version_module = exec_mod_from_location( - '_'.join(n.split('.')), fn) - except ImportError as e: - self.warn(str(e)) - version_module = None - if version_module is None: - continue - - for a in version_vars: - version = getattr(version_module, a, None) - if version is not None: - break - - # Try if versioneer module - try: - version = version_module.get_versions()['version'] - except AttributeError: - pass - - if version is not None: - break - - if version is not None: - self.version = version - return version - - # Get version as SVN or Mercurial revision number - revision = self._get_svn_revision(self.local_path) - if revision is None: - revision = self._get_hg_revision(self.local_path) - - if revision is not None: - version = str(revision) - self.version = version - - return version - - def make_svn_version_py(self, delete=True): - """Appends a data function to the data_files list that will generate - __svn_version__.py file to the current package directory. - - Generate package __svn_version__.py file from SVN revision number, - it will be removed after python exits but will be available - when sdist, etc commands are executed. - - Notes - ----- - If __svn_version__.py existed before, nothing is done. - - This is - intended for working with source directories that are in an SVN - repository. - """ - target = njoin(self.local_path, '__svn_version__.py') - revision = self._get_svn_revision(self.local_path) - if os.path.isfile(target) or revision is None: - return - else: - def generate_svn_version_py(): - if not os.path.isfile(target): - version = str(revision) - self.info('Creating %s (version=%r)' % (target, version)) - with open(target, 'w') as f: - f.write('version = %r\n' % (version)) - - def rm_file(f=target,p=self.info): - if delete: - try: os.remove(f); p('removed '+f) - except OSError: pass - try: os.remove(f+'c'); p('removed '+f+'c') - except OSError: pass - - atexit.register(rm_file) - - return target - - self.add_data_files(('', generate_svn_version_py())) - - def make_hg_version_py(self, delete=True): - """Appends a data function to the data_files list that will generate - __hg_version__.py file to the current package directory. - - Generate package __hg_version__.py file from Mercurial revision, - it will be removed after python exits but will be available - when sdist, etc commands are executed. - - Notes - ----- - If __hg_version__.py existed before, nothing is done. - - This is intended for working with source directories that are - in an Mercurial repository. - """ - target = njoin(self.local_path, '__hg_version__.py') - revision = self._get_hg_revision(self.local_path) - if os.path.isfile(target) or revision is None: - return - else: - def generate_hg_version_py(): - if not os.path.isfile(target): - version = str(revision) - self.info('Creating %s (version=%r)' % (target, version)) - with open(target, 'w') as f: - f.write('version = %r\n' % (version)) - - def rm_file(f=target,p=self.info): - if delete: - try: os.remove(f); p('removed '+f) - except OSError: pass - try: os.remove(f+'c'); p('removed '+f+'c') - except OSError: pass - - atexit.register(rm_file) - - return target - - self.add_data_files(('', generate_hg_version_py())) - - def make_config_py(self,name='__config__'): - """Generate package __config__.py file containing system_info - information used during building the package. - - This file is installed to the - package installation directory. - - """ - self.py_modules.append((self.name, name, generate_config_py)) - - def get_info(self,*names): - """Get resources information. - - Return information (from system_info.get_info) for all of the names in - the argument list in a single dictionary. - """ - from .system_info import get_info, dict_append - info_dict = {} - for a in names: - dict_append(info_dict,**get_info(a)) - return info_dict - - -def get_cmd(cmdname, _cache={}): - if cmdname not in _cache: - import distutils.core - dist = distutils.core._setup_distribution - if dist is None: - from distutils.errors import DistutilsInternalError - raise DistutilsInternalError( - 'setup distribution instance not initialized') - cmd = dist.get_command_obj(cmdname) - _cache[cmdname] = cmd - return _cache[cmdname] - -def get_numpy_include_dirs(): - # numpy_include_dirs are set by numpy/_core/setup.py, otherwise [] - include_dirs = Configuration.numpy_include_dirs[:] - if not include_dirs: - import numpy - include_dirs = [ numpy.get_include() ] - # else running numpy/_core/setup.py - return include_dirs - -def get_npy_pkg_dir(): - """Return the path where to find the npy-pkg-config directory. - - If the NPY_PKG_CONFIG_PATH environment variable is set, the value of that - is returned. Otherwise, a path inside the location of the numpy module is - returned. - - The NPY_PKG_CONFIG_PATH can be useful when cross-compiling, maintaining - customized npy-pkg-config .ini files for the cross-compilation - environment, and using them when cross-compiling. - - """ - d = os.environ.get('NPY_PKG_CONFIG_PATH') - if d is not None: - return d - spec = importlib.util.find_spec('numpy') - d = os.path.join(os.path.dirname(spec.origin), - '_core', 'lib', 'npy-pkg-config') - return d - -def get_pkg_info(pkgname, dirs=None): - """ - Return library info for the given package. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of additional directories where to look - for npy-pkg-config files. Those directories are searched prior to the - NumPy directory. - - Returns - ------- - pkginfo : class instance - The `LibraryInfo` instance containing the build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - Configuration.add_npy_pkg_config, Configuration.add_installed_library, - get_info - - """ - from numpy.distutils.npy_pkg_config import read_config - - if dirs: - dirs.append(get_npy_pkg_dir()) - else: - dirs = [get_npy_pkg_dir()] - return read_config(pkgname, dirs) - -def get_info(pkgname, dirs=None): - """ - Return an info dict for a given C library. - - The info dict contains the necessary options to use the C library. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of additional directories where to look - for npy-pkg-config files. Those directories are searched prior to the - NumPy directory. - - Returns - ------- - info : dict - The dictionary with build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - Configuration.add_npy_pkg_config, Configuration.add_installed_library, - get_pkg_info - - Examples - -------- - To get the necessary information for the npymath library from NumPy: - - >>> npymath_info = np.distutils.misc_util.get_info('npymath') - >>> npymath_info #doctest: +SKIP - {'define_macros': [], 'libraries': ['npymath'], 'library_dirs': - ['.../numpy/_core/lib'], 'include_dirs': ['.../numpy/_core/include']} - - This info dict can then be used as input to a `Configuration` instance:: - - config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info) - - """ - from numpy.distutils.npy_pkg_config import parse_flags - pkg_info = get_pkg_info(pkgname, dirs) - - # Translate LibraryInfo instance into a build_info dict - info = parse_flags(pkg_info.cflags()) - for k, v in parse_flags(pkg_info.libs()).items(): - info[k].extend(v) - - # add_extension extra_info argument is ANAL - info['define_macros'] = info['macros'] - del info['macros'] - del info['ignored'] - - return info - -def is_bootstrapping(): - import builtins - - try: - builtins.__NUMPY_SETUP__ - return True - except AttributeError: - return False - - -######################### - -def default_config_dict(name = None, parent_name = None, local_path=None): - """Return a configuration dictionary for usage in - configuration() function defined in file setup_.py. - """ - import warnings - warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\ - 'deprecated default_config_dict(%r,%r,%r)' - % (name, parent_name, local_path, - name, parent_name, local_path, - ), stacklevel=2) - c = Configuration(name, parent_name, local_path) - return c.todict() - - -def dict_append(d, **kws): - for k, v in kws.items(): - if k in d: - ov = d[k] - if isinstance(ov, str): - d[k] = v - else: - d[k].extend(v) - else: - d[k] = v - -def appendpath(prefix, path): - if os.path.sep != '/': - prefix = prefix.replace('/', os.path.sep) - path = path.replace('/', os.path.sep) - drive = '' - if os.path.isabs(path): - drive = os.path.splitdrive(prefix)[0] - absprefix = os.path.splitdrive(os.path.abspath(prefix))[1] - pathdrive, path = os.path.splitdrive(path) - d = os.path.commonprefix([absprefix, path]) - if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \ - or os.path.join(path[:len(d)], path[len(d):]) != path: - # Handle invalid paths - d = os.path.dirname(d) - subpath = path[len(d):] - if os.path.isabs(subpath): - subpath = subpath[1:] - else: - subpath = path - return os.path.normpath(njoin(drive + prefix, subpath)) - -def generate_config_py(target): - """Generate config.py file containing system_info information - used during building the package. - - Usage: - config['py_modules'].append((packagename, '__config__',generate_config_py)) - """ - from numpy.distutils.system_info import system_info - from distutils.dir_util import mkpath - mkpath(os.path.dirname(target)) - with open(target, 'w') as f: - f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0]))) - f.write('# It contains system_info results at the time of building this package.\n') - f.write('__all__ = ["get_info","show"]\n\n') - - # For gfortran+msvc combination, extra shared libraries may exist - f.write(textwrap.dedent(""" - import os - import sys - - extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs') - - if sys.platform == 'win32' and os.path.isdir(extra_dll_dir): - os.add_dll_directory(extra_dll_dir) - - """)) - - for k, i in system_info.saved_results.items(): - f.write('%s=%r\n' % (k, i)) - f.write(textwrap.dedent(r''' - def get_info(name): - g = globals() - return g.get(name, g.get(name + "_info", {})) - - def show(): - """ - Show libraries in the system on which NumPy was built. - - Print information about various resources (libraries, library - directories, include directories, etc.) in the system on which - NumPy was built. - - See Also - -------- - get_include : Returns the directory containing NumPy C - header files. - - Notes - ----- - 1. Classes specifying the information to be printed are defined - in the `numpy.distutils.system_info` module. - - Information may include: - - * ``language``: language used to write the libraries (mostly - C or f77) - * ``libraries``: names of libraries found in the system - * ``library_dirs``: directories containing the libraries - * ``include_dirs``: directories containing library header files - * ``src_dirs``: directories containing library source files - * ``define_macros``: preprocessor macros used by - ``distutils.setup`` - * ``baseline``: minimum CPU features required - * ``found``: dispatched features supported in the system - * ``not found``: dispatched features that are not supported - in the system - - 2. NumPy BLAS/LAPACK Installation Notes - - Installing a numpy wheel (``pip install numpy`` or force it - via ``pip install numpy --only-binary :numpy: numpy``) includes - an OpenBLAS implementation of the BLAS and LAPACK linear algebra - APIs. In this case, ``library_dirs`` reports the original build - time configuration as compiled with gcc/gfortran; at run time - the OpenBLAS library is in - ``site-packages/numpy.libs/`` (linux), or - ``site-packages/numpy/.dylibs/`` (macOS), or - ``site-packages/numpy/.libs/`` (windows). - - Installing numpy from source - (``pip install numpy --no-binary numpy``) searches for BLAS and - LAPACK dynamic link libraries at build time as influenced by - environment variables NPY_BLAS_LIBS, NPY_CBLAS_LIBS, and - NPY_LAPACK_LIBS; or NPY_BLAS_ORDER and NPY_LAPACK_ORDER; - or the optional file ``~/.numpy-site.cfg``. - NumPy remembers those locations and expects to load the same - libraries at run-time. - In NumPy 1.21+ on macOS, 'accelerate' (Apple's Accelerate BLAS - library) is in the default build-time search order after - 'openblas'. - - Examples - -------- - >>> import numpy as np - >>> np.show_config() - blas_opt_info: - language = c - define_macros = [('HAVE_CBLAS', None)] - libraries = ['openblas', 'openblas'] - library_dirs = ['/usr/local/lib'] - """ - from numpy._core._multiarray_umath import ( - __cpu_features__, __cpu_baseline__, __cpu_dispatch__ - ) - for name,info_dict in globals().items(): - if name[0] == "_" or type(info_dict) is not type({}): continue - print(name + ":") - if not info_dict: - print(" NOT AVAILABLE") - for k,v in info_dict.items(): - v = str(v) - if k == "sources" and len(v) > 200: - v = v[:60] + " ...\n... " + v[-60:] - print(" %s = %s" % (k,v)) - - features_found, features_not_found = [], [] - for feature in __cpu_dispatch__: - if __cpu_features__[feature]: - features_found.append(feature) - else: - features_not_found.append(feature) - - print("Supported SIMD extensions in this NumPy install:") - print(" baseline = %s" % (','.join(__cpu_baseline__))) - print(" found = %s" % (','.join(features_found))) - print(" not found = %s" % (','.join(features_not_found))) - - ''')) - - return target - -def msvc_version(compiler): - """Return version major and minor of compiler instance if it is - MSVC, raise an exception otherwise.""" - if not compiler.compiler_type == "msvc": - raise ValueError("Compiler instance is not msvc (%s)"\ - % compiler.compiler_type) - return compiler._MSVCCompiler__version - -def get_build_architecture(): - # Importing distutils.msvccompiler triggers a warning on non-Windows - # systems, so delay the import to here. - from distutils.msvccompiler import get_build_architecture - return get_build_architecture() - - -_cxx_ignore_flags = {'-Werror=implicit-function-declaration', '-std=c99'} - - -def sanitize_cxx_flags(cxxflags): - ''' - Some flags are valid for C but not C++. Prune them. - ''' - return [flag for flag in cxxflags if flag not in _cxx_ignore_flags] - - -def exec_mod_from_location(modname, modfile): - ''' - Use importlib machinery to import a module `modname` from the file - `modfile`. Depending on the `spec.loader`, the module may not be - registered in sys.modules. - ''' - spec = importlib.util.spec_from_file_location(modname, modfile) - foo = importlib.util.module_from_spec(spec) - spec.loader.exec_module(foo) - return foo diff --git a/numpy/distutils/msvc9compiler.py b/numpy/distutils/msvc9compiler.py deleted file mode 100644 index 68239495d6c7..000000000000 --- a/numpy/distutils/msvc9compiler.py +++ /dev/null @@ -1,63 +0,0 @@ -import os -from distutils.msvc9compiler import MSVCCompiler as _MSVCCompiler - -from .system_info import platform_bits - - -def _merge(old, new): - """Concatenate two environment paths avoiding repeats. - - Here `old` is the environment string before the base class initialize - function is called and `new` is the string after the call. The new string - will be a fixed string if it is not obtained from the current environment, - or the same as the old string if obtained from the same environment. The aim - here is not to append the new string if it is already contained in the old - string so as to limit the growth of the environment string. - - Parameters - ---------- - old : string - Previous environment string. - new : string - New environment string. - - Returns - ------- - ret : string - Updated environment string. - - """ - if not old: - return new - if new in old: - return old - - # Neither new nor old is empty. Give old priority. - return ';'.join([old, new]) - - -class MSVCCompiler(_MSVCCompiler): - def __init__(self, verbose=0, dry_run=0, force=0): - _MSVCCompiler.__init__(self, verbose, dry_run, force) - - def initialize(self, plat_name=None): - # The 'lib' and 'include' variables may be overwritten - # by MSVCCompiler.initialize, so save them for later merge. - environ_lib = os.getenv('lib') - environ_include = os.getenv('include') - _MSVCCompiler.initialize(self, plat_name) - - # Merge current and previous values of 'lib' and 'include' - os.environ['lib'] = _merge(environ_lib, os.environ['lib']) - os.environ['include'] = _merge(environ_include, os.environ['include']) - - # msvc9 building for 32 bits requires SSE2 to work around a - # compiler bug. - if platform_bits == 32: - self.compile_options += ['/arch:SSE2'] - self.compile_options_debug += ['/arch:SSE2'] - - def manifest_setup_ldargs(self, output_filename, build_temp, ld_args): - ld_args.append('/MANIFEST') - _MSVCCompiler.manifest_setup_ldargs(self, output_filename, - build_temp, ld_args) diff --git a/numpy/distutils/msvccompiler.py b/numpy/distutils/msvccompiler.py deleted file mode 100644 index 2b93221baac8..000000000000 --- a/numpy/distutils/msvccompiler.py +++ /dev/null @@ -1,76 +0,0 @@ -import os -from distutils.msvccompiler import MSVCCompiler as _MSVCCompiler - -from .system_info import platform_bits - - -def _merge(old, new): - """Concatenate two environment paths avoiding repeats. - - Here `old` is the environment string before the base class initialize - function is called and `new` is the string after the call. The new string - will be a fixed string if it is not obtained from the current environment, - or the same as the old string if obtained from the same environment. The aim - here is not to append the new string if it is already contained in the old - string so as to limit the growth of the environment string. - - Parameters - ---------- - old : string - Previous environment string. - new : string - New environment string. - - Returns - ------- - ret : string - Updated environment string. - - """ - if new in old: - return old - if not old: - return new - - # Neither new nor old is empty. Give old priority. - return ';'.join([old, new]) - - -class MSVCCompiler(_MSVCCompiler): - def __init__(self, verbose=0, dry_run=0, force=0): - _MSVCCompiler.__init__(self, verbose, dry_run, force) - - def initialize(self): - # The 'lib' and 'include' variables may be overwritten - # by MSVCCompiler.initialize, so save them for later merge. - environ_lib = os.getenv('lib', '') - environ_include = os.getenv('include', '') - _MSVCCompiler.initialize(self) - - # Merge current and previous values of 'lib' and 'include' - os.environ['lib'] = _merge(environ_lib, os.environ['lib']) - os.environ['include'] = _merge(environ_include, os.environ['include']) - - # msvc9 building for 32 bits requires SSE2 to work around a - # compiler bug. - if platform_bits == 32: - self.compile_options += ['/arch:SSE2'] - self.compile_options_debug += ['/arch:SSE2'] - - -def lib_opts_if_msvc(build_cmd): - """ Add flags if we are using MSVC compiler - - We can't see `build_cmd` in our scope, because we have not initialized - the distutils build command, so use this deferred calculation to run - when we are building the library. - """ - if build_cmd.compiler.compiler_type != 'msvc': - return [] - # Explicitly disable whole-program optimization. - flags = ['/GL-'] - # Disable voltbl section for vc142 to allow link using mingw-w64; see: - # https://github.com/matthew-brett/dll_investigation/issues/1#issuecomment-1100468171 - if build_cmd.compiler_opt.cc_test_flags(['-d2VolatileMetadata-']): - flags.append('-d2VolatileMetadata-') - return flags diff --git a/numpy/distutils/npy_pkg_config.py b/numpy/distutils/npy_pkg_config.py deleted file mode 100644 index 14e8791b14cd..000000000000 --- a/numpy/distutils/npy_pkg_config.py +++ /dev/null @@ -1,441 +0,0 @@ -import sys -import re -import os - -from configparser import RawConfigParser - -__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet', - 'read_config', 'parse_flags'] - -_VAR = re.compile(r'\$\{([a-zA-Z0-9_-]+)\}') - -class FormatError(OSError): - """ - Exception thrown when there is a problem parsing a configuration file. - - """ - def __init__(self, msg): - self.msg = msg - - def __str__(self): - return self.msg - -class PkgNotFound(OSError): - """Exception raised when a package can not be located.""" - def __init__(self, msg): - self.msg = msg - - def __str__(self): - return self.msg - -def parse_flags(line): - """ - Parse a line from a config file containing compile flags. - - Parameters - ---------- - line : str - A single line containing one or more compile flags. - - Returns - ------- - d : dict - Dictionary of parsed flags, split into relevant categories. - These categories are the keys of `d`: - - * 'include_dirs' - * 'library_dirs' - * 'libraries' - * 'macros' - * 'ignored' - - """ - d = {'include_dirs': [], 'library_dirs': [], 'libraries': [], - 'macros': [], 'ignored': []} - - flags = (' ' + line).split(' -') - for flag in flags: - flag = '-' + flag - if len(flag) > 0: - if flag.startswith('-I'): - d['include_dirs'].append(flag[2:].strip()) - elif flag.startswith('-L'): - d['library_dirs'].append(flag[2:].strip()) - elif flag.startswith('-l'): - d['libraries'].append(flag[2:].strip()) - elif flag.startswith('-D'): - d['macros'].append(flag[2:].strip()) - else: - d['ignored'].append(flag) - - return d - -def _escape_backslash(val): - return val.replace('\\', '\\\\') - -class LibraryInfo: - """ - Object containing build information about a library. - - Parameters - ---------- - name : str - The library name. - description : str - Description of the library. - version : str - Version string. - sections : dict - The sections of the configuration file for the library. The keys are - the section headers, the values the text under each header. - vars : class instance - A `VariableSet` instance, which contains ``(name, value)`` pairs for - variables defined in the configuration file for the library. - requires : sequence, optional - The required libraries for the library to be installed. - - Notes - ----- - All input parameters (except "sections" which is a method) are available as - attributes of the same name. - - """ - def __init__(self, name, description, version, sections, vars, requires=None): - self.name = name - self.description = description - if requires: - self.requires = requires - else: - self.requires = [] - self.version = version - self._sections = sections - self.vars = vars - - def sections(self): - """ - Return the section headers of the config file. - - Parameters - ---------- - None - - Returns - ------- - keys : list of str - The list of section headers. - - """ - return list(self._sections.keys()) - - def cflags(self, section="default"): - val = self.vars.interpolate(self._sections[section]['cflags']) - return _escape_backslash(val) - - def libs(self, section="default"): - val = self.vars.interpolate(self._sections[section]['libs']) - return _escape_backslash(val) - - def __str__(self): - m = ['Name: %s' % self.name, 'Description: %s' % self.description] - if self.requires: - m.append('Requires:') - else: - m.append('Requires: %s' % ",".join(self.requires)) - m.append('Version: %s' % self.version) - - return "\n".join(m) - -class VariableSet: - """ - Container object for the variables defined in a config file. - - `VariableSet` can be used as a plain dictionary, with the variable names - as keys. - - Parameters - ---------- - d : dict - Dict of items in the "variables" section of the configuration file. - - """ - def __init__(self, d): - self._raw_data = dict([(k, v) for k, v in d.items()]) - - self._re = {} - self._re_sub = {} - - self._init_parse() - - def _init_parse(self): - for k, v in self._raw_data.items(): - self._init_parse_var(k, v) - - def _init_parse_var(self, name, value): - self._re[name] = re.compile(r'\$\{%s\}' % name) - self._re_sub[name] = value - - def interpolate(self, value): - # Brute force: we keep interpolating until there is no '${var}' anymore - # or until interpolated string is equal to input string - def _interpolate(value): - for k in self._re.keys(): - value = self._re[k].sub(self._re_sub[k], value) - return value - while _VAR.search(value): - nvalue = _interpolate(value) - if nvalue == value: - break - value = nvalue - - return value - - def variables(self): - """ - Return the list of variable names. - - Parameters - ---------- - None - - Returns - ------- - names : list of str - The names of all variables in the `VariableSet` instance. - - """ - return list(self._raw_data.keys()) - - # Emulate a dict to set/get variables values - def __getitem__(self, name): - return self._raw_data[name] - - def __setitem__(self, name, value): - self._raw_data[name] = value - self._init_parse_var(name, value) - -def parse_meta(config): - if not config.has_section('meta'): - raise FormatError("No meta section found !") - - d = dict(config.items('meta')) - - for k in ['name', 'description', 'version']: - if not k in d: - raise FormatError("Option %s (section [meta]) is mandatory, " - "but not found" % k) - - if not 'requires' in d: - d['requires'] = [] - - return d - -def parse_variables(config): - if not config.has_section('variables'): - raise FormatError("No variables section found !") - - d = {} - - for name, value in config.items("variables"): - d[name] = value - - return VariableSet(d) - -def parse_sections(config): - return meta_d, r - -def pkg_to_filename(pkg_name): - return "%s.ini" % pkg_name - -def parse_config(filename, dirs=None): - if dirs: - filenames = [os.path.join(d, filename) for d in dirs] - else: - filenames = [filename] - - config = RawConfigParser() - - n = config.read(filenames) - if not len(n) >= 1: - raise PkgNotFound("Could not find file(s) %s" % str(filenames)) - - # Parse meta and variables sections - meta = parse_meta(config) - - vars = {} - if config.has_section('variables'): - for name, value in config.items("variables"): - vars[name] = _escape_backslash(value) - - # Parse "normal" sections - secs = [s for s in config.sections() if not s in ['meta', 'variables']] - sections = {} - - requires = {} - for s in secs: - d = {} - if config.has_option(s, "requires"): - requires[s] = config.get(s, 'requires') - - for name, value in config.items(s): - d[name] = value - sections[s] = d - - return meta, vars, sections, requires - -def _read_config_imp(filenames, dirs=None): - def _read_config(f): - meta, vars, sections, reqs = parse_config(f, dirs) - # recursively add sections and variables of required libraries - for rname, rvalue in reqs.items(): - nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue)) - - # Update var dict for variables not in 'top' config file - for k, v in nvars.items(): - if not k in vars: - vars[k] = v - - # Update sec dict - for oname, ovalue in nsections[rname].items(): - if ovalue: - sections[rname][oname] += ' %s' % ovalue - - return meta, vars, sections, reqs - - meta, vars, sections, reqs = _read_config(filenames) - - # FIXME: document this. If pkgname is defined in the variables section, and - # there is no pkgdir variable defined, pkgdir is automatically defined to - # the path of pkgname. This requires the package to be imported to work - if not 'pkgdir' in vars and "pkgname" in vars: - pkgname = vars["pkgname"] - if not pkgname in sys.modules: - raise ValueError("You should import %s to get information on %s" % - (pkgname, meta["name"])) - - mod = sys.modules[pkgname] - vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__)) - - return LibraryInfo(name=meta["name"], description=meta["description"], - version=meta["version"], sections=sections, vars=VariableSet(vars)) - -# Trivial cache to cache LibraryInfo instances creation. To be really -# efficient, the cache should be handled in read_config, since a same file can -# be parsed many time outside LibraryInfo creation, but I doubt this will be a -# problem in practice -_CACHE = {} -def read_config(pkgname, dirs=None): - """ - Return library info for a package from its configuration file. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of directories - usually including - the NumPy base directory - where to look for npy-pkg-config files. - - Returns - ------- - pkginfo : class instance - The `LibraryInfo` instance containing the build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - misc_util.get_info, misc_util.get_pkg_info - - Examples - -------- - >>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath') - >>> type(npymath_info) - - >>> print(npymath_info) - Name: npymath - Description: Portable, core math library implementing C99 standard - Requires: - Version: 0.1 #random - - """ - try: - return _CACHE[pkgname] - except KeyError: - v = _read_config_imp(pkg_to_filename(pkgname), dirs) - _CACHE[pkgname] = v - return v - -# TODO: -# - implements version comparison (modversion + atleast) - -# pkg-config simple emulator - useful for debugging, and maybe later to query -# the system -if __name__ == '__main__': - from optparse import OptionParser - import glob - - parser = OptionParser() - parser.add_option("--cflags", dest="cflags", action="store_true", - help="output all preprocessor and compiler flags") - parser.add_option("--libs", dest="libs", action="store_true", - help="output all linker flags") - parser.add_option("--use-section", dest="section", - help="use this section instead of default for options") - parser.add_option("--version", dest="version", action="store_true", - help="output version") - parser.add_option("--atleast-version", dest="min_version", - help="Minimal version") - parser.add_option("--list-all", dest="list_all", action="store_true", - help="Minimal version") - parser.add_option("--define-variable", dest="define_variable", - help="Replace variable with the given value") - - (options, args) = parser.parse_args(sys.argv) - - if len(args) < 2: - raise ValueError("Expect package name on the command line:") - - if options.list_all: - files = glob.glob("*.ini") - for f in files: - info = read_config(f) - print("%s\t%s - %s" % (info.name, info.name, info.description)) - - pkg_name = args[1] - d = os.environ.get('NPY_PKG_CONFIG_PATH') - if d: - info = read_config( - pkg_name, ['numpy/_core/lib/npy-pkg-config', '.', d] - ) - else: - info = read_config( - pkg_name, ['numpy/_core/lib/npy-pkg-config', '.'] - ) - - if options.section: - section = options.section - else: - section = "default" - - if options.define_variable: - m = re.search(r'([\S]+)=([\S]+)', options.define_variable) - if not m: - raise ValueError("--define-variable option should be of " - "the form --define-variable=foo=bar") - else: - name = m.group(1) - value = m.group(2) - info.vars[name] = value - - if options.cflags: - print(info.cflags(section)) - if options.libs: - print(info.libs(section)) - if options.version: - print(info.version) - if options.min_version: - print(info.version >= options.min_version) diff --git a/numpy/distutils/numpy_distribution.py b/numpy/distutils/numpy_distribution.py deleted file mode 100644 index ea8182659cb1..000000000000 --- a/numpy/distutils/numpy_distribution.py +++ /dev/null @@ -1,17 +0,0 @@ -# XXX: Handle setuptools ? -from distutils.core import Distribution - -# This class is used because we add new files (sconscripts, and so on) with the -# scons command -class NumpyDistribution(Distribution): - def __init__(self, attrs = None): - # A list of (sconscripts, pre_hook, post_hook, src, parent_names) - self.scons_data = [] - # A list of installable libraries - self.installed_libraries = [] - # A dict of pkg_config files to generate/install - self.installed_pkg_config = {} - Distribution.__init__(self, attrs) - - def has_scons_scripts(self): - return bool(self.scons_data) diff --git a/numpy/distutils/pathccompiler.py b/numpy/distutils/pathccompiler.py deleted file mode 100644 index 48051810ee21..000000000000 --- a/numpy/distutils/pathccompiler.py +++ /dev/null @@ -1,21 +0,0 @@ -from distutils.unixccompiler import UnixCCompiler - -class PathScaleCCompiler(UnixCCompiler): - - """ - PathScale compiler compatible with an gcc built Python. - """ - - compiler_type = 'pathcc' - cc_exe = 'pathcc' - cxx_exe = 'pathCC' - - def __init__ (self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__ (self, verbose, dry_run, force) - cc_compiler = self.cc_exe - cxx_compiler = self.cxx_exe - self.set_executables(compiler=cc_compiler, - compiler_so=cc_compiler, - compiler_cxx=cxx_compiler, - linker_exe=cc_compiler, - linker_so=cc_compiler + ' -shared') diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py deleted file mode 100644 index edf56909ab5d..000000000000 --- a/numpy/distutils/system_info.py +++ /dev/null @@ -1,3268 +0,0 @@ -#!/usr/bin/env python3 -""" -This file defines a set of system_info classes for getting -information about various resources (libraries, library directories, -include directories, etc.) in the system. Usage: - info_dict = get_info() - where is a string 'atlas','x11','fftw','lapack','blas', - 'lapack_src', 'blas_src', etc. For a complete list of allowed names, - see the definition of get_info() function below. - - Returned info_dict is a dictionary which is compatible with - distutils.setup keyword arguments. If info_dict == {}, then the - asked resource is not available (system_info could not find it). - - Several *_info classes specify an environment variable to specify - the locations of software. When setting the corresponding environment - variable to 'None' then the software will be ignored, even when it - is available in system. - -Global parameters: - system_info.search_static_first - search static libraries (.a) - in precedence to shared ones (.so, .sl) if enabled. - system_info.verbosity - output the results to stdout if enabled. - -The file 'site.cfg' is looked for in - -1) Directory of main setup.py file being run. -2) Home directory of user running the setup.py file as ~/.numpy-site.cfg -3) System wide directory (location of this file...) - -The first one found is used to get system configuration options The -format is that used by ConfigParser (i.e., Windows .INI style). The -section ALL is not intended for general use. - -Appropriate defaults are used if nothing is specified. - -The order of finding the locations of resources is the following: - 1. environment variable - 2. section in site.cfg - 3. DEFAULT section in site.cfg - 4. System default search paths (see ``default_*`` variables below). -Only the first complete match is returned. - -Currently, the following classes are available, along with their section names: - - Numeric_info:Numeric - _numpy_info:Numeric - _pkg_config_info:None - accelerate_info:accelerate - accelerate_lapack_info:accelerate - agg2_info:agg2 - amd_info:amd - atlas_3_10_blas_info:atlas - atlas_3_10_blas_threads_info:atlas - atlas_3_10_info:atlas - atlas_3_10_threads_info:atlas - atlas_blas_info:atlas - atlas_blas_threads_info:atlas - atlas_info:atlas - atlas_threads_info:atlas - blas64__opt_info:ALL # usage recommended (general ILP64 BLAS, 64_ symbol suffix) - blas_ilp64_opt_info:ALL # usage recommended (general ILP64 BLAS) - blas_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 BLAS, no symbol suffix) - blas_info:blas - blas_mkl_info:mkl - blas_ssl2_info:ssl2 - blas_opt_info:ALL # usage recommended - blas_src_info:blas_src - blis_info:blis - boost_python_info:boost_python - dfftw_info:fftw - dfftw_threads_info:fftw - djbfft_info:djbfft - f2py_info:ALL - fft_opt_info:ALL - fftw2_info:fftw - fftw3_info:fftw3 - fftw_info:fftw - fftw_threads_info:fftw - flame_info:flame - freetype2_info:freetype2 - gdk_2_info:gdk_2 - gdk_info:gdk - gdk_pixbuf_2_info:gdk_pixbuf_2 - gdk_pixbuf_xlib_2_info:gdk_pixbuf_xlib_2 - gdk_x11_2_info:gdk_x11_2 - gtkp_2_info:gtkp_2 - gtkp_x11_2_info:gtkp_x11_2 - lapack64__opt_info:ALL # usage recommended (general ILP64 LAPACK, 64_ symbol suffix) - lapack_atlas_3_10_info:atlas - lapack_atlas_3_10_threads_info:atlas - lapack_atlas_info:atlas - lapack_atlas_threads_info:atlas - lapack_ilp64_opt_info:ALL # usage recommended (general ILP64 LAPACK) - lapack_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 LAPACK, no symbol suffix) - lapack_info:lapack - lapack_mkl_info:mkl - lapack_ssl2_info:ssl2 - lapack_opt_info:ALL # usage recommended - lapack_src_info:lapack_src - mkl_info:mkl - ssl2_info:ssl2 - numarray_info:numarray - numerix_info:numerix - numpy_info:numpy - openblas64__info:openblas64_ - openblas64__lapack_info:openblas64_ - openblas_clapack_info:openblas - openblas_ilp64_info:openblas_ilp64 - openblas_ilp64_lapack_info:openblas_ilp64 - openblas_info:openblas - openblas_lapack_info:openblas - sfftw_info:fftw - sfftw_threads_info:fftw - system_info:ALL - umfpack_info:umfpack - wx_info:wx - x11_info:x11 - xft_info:xft - -Note that blas_opt_info and lapack_opt_info honor the NPY_BLAS_ORDER -and NPY_LAPACK_ORDER environment variables to determine the order in which -specific BLAS and LAPACK libraries are searched for. - -This search (or autodetection) can be bypassed by defining the environment -variables NPY_BLAS_LIBS and NPY_LAPACK_LIBS, which should then contain the -exact linker flags to use (language will be set to F77). Building against -Netlib BLAS/LAPACK or stub files, in order to be able to switch BLAS and LAPACK -implementations at runtime. If using this to build NumPy itself, it is -recommended to also define NPY_CBLAS_LIBS (assuming your BLAS library has a -CBLAS interface) to enable CBLAS usage for matrix multiplication (unoptimized -otherwise). - -Example: ----------- -[DEFAULT] -# default section -library_dirs = /usr/lib:/usr/local/lib:/opt/lib -include_dirs = /usr/include:/usr/local/include:/opt/include -src_dirs = /usr/local/src:/opt/src -# search static libraries (.a) in preference to shared ones (.so) -search_static_first = 0 - -[fftw] -libraries = rfftw, fftw - -[atlas] -library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas -# for overriding the names of the atlas libraries -libraries = lapack, f77blas, cblas, atlas - -[x11] -library_dirs = /usr/X11R6/lib -include_dirs = /usr/X11R6/include ----------- - -Note that the ``libraries`` key is the default setting for libraries. - -Authors: - Pearu Peterson , February 2002 - David M. Cooke , April 2002 - -Copyright 2002 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy (BSD style) license. See LICENSE.txt that came with -this distribution for specifics. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. - -""" -import sys -import os -import re -import copy -import warnings -import subprocess -import textwrap - -from glob import glob -from functools import reduce -from configparser import NoOptionError -from configparser import RawConfigParser as ConfigParser -# It seems that some people are importing ConfigParser from here so is -# good to keep its class name. Use of RawConfigParser is needed in -# order to be able to load path names with percent in them, like -# `feature%2Fcool` which is common on git flow branch names. - -from distutils.errors import DistutilsError -from distutils.dist import Distribution -import sysconfig -from numpy.distutils import log -from distutils.util import get_platform - -from numpy.distutils.exec_command import ( - find_executable, filepath_from_subprocess_output, - ) -from numpy.distutils.misc_util import (is_sequence, is_string, - get_shared_lib_extension) -from numpy.distutils.command.config import config as cmd_config -from numpy.distutils import customized_ccompiler as _customized_ccompiler -from numpy.distutils import _shell_utils -import distutils.ccompiler -import tempfile -import shutil - -__all__ = ['system_info'] - -# Determine number of bits -import platform -_bits = {'32bit': 32, '64bit': 64} -platform_bits = _bits[platform.architecture()[0]] - - -global_compiler = None - -def customized_ccompiler(): - global global_compiler - if not global_compiler: - global_compiler = _customized_ccompiler() - return global_compiler - - -def _c_string_literal(s): - """ - Convert a python string into a literal suitable for inclusion into C code - """ - # only these three characters are forbidden in C strings - s = s.replace('\\', r'\\') - s = s.replace('"', r'\"') - s = s.replace('\n', r'\n') - return '"{}"'.format(s) - - -def libpaths(paths, bits): - """Return a list of library paths valid on 32 or 64 bit systems. - - Inputs: - paths : sequence - A sequence of strings (typically paths) - bits : int - An integer, the only valid values are 32 or 64. A ValueError exception - is raised otherwise. - - Examples: - - Consider a list of directories - >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib'] - - For a 32-bit platform, this is already valid: - >>> np.distutils.system_info.libpaths(paths,32) - ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib'] - - On 64 bits, we prepend the '64' postfix - >>> np.distutils.system_info.libpaths(paths,64) - ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib', - '/usr/lib64', '/usr/lib'] - """ - if bits not in (32, 64): - raise ValueError("Invalid bit size in libpaths: 32 or 64 only") - - # Handle 32bit case - if bits == 32: - return paths - - # Handle 64bit case - out = [] - for p in paths: - out.extend([p + '64', p]) - - return out - - -if sys.platform == 'win32': - default_lib_dirs = ['C:\\', - os.path.join(sysconfig.get_config_var('exec_prefix'), - 'libs')] - default_runtime_dirs = [] - default_include_dirs = [] - default_src_dirs = ['.'] - default_x11_lib_dirs = [] - default_x11_include_dirs = [] - _include_dirs = [ - 'include', - 'include/suitesparse', - ] - _lib_dirs = [ - 'lib', - ] - - _include_dirs = [d.replace('/', os.sep) for d in _include_dirs] - _lib_dirs = [d.replace('/', os.sep) for d in _lib_dirs] - def add_system_root(library_root): - """Add a package manager root to the include directories""" - global default_lib_dirs - global default_include_dirs - - library_root = os.path.normpath(library_root) - - default_lib_dirs.extend( - os.path.join(library_root, d) for d in _lib_dirs) - default_include_dirs.extend( - os.path.join(library_root, d) for d in _include_dirs) - - # VCpkg is the de-facto package manager on windows for C/C++ - # libraries. If it is on the PATH, then we append its paths here. - vcpkg = shutil.which('vcpkg') - if vcpkg: - vcpkg_dir = os.path.dirname(vcpkg) - if platform.architecture()[0] == '32bit': - specifier = 'x86' - else: - specifier = 'x64' - - vcpkg_installed = os.path.join(vcpkg_dir, 'installed') - for vcpkg_root in [ - os.path.join(vcpkg_installed, specifier + '-windows'), - os.path.join(vcpkg_installed, specifier + '-windows-static'), - ]: - add_system_root(vcpkg_root) - - # Conda is another popular package manager that provides libraries - conda = shutil.which('conda') - if conda: - conda_dir = os.path.dirname(conda) - add_system_root(os.path.join(conda_dir, '..', 'Library')) - add_system_root(os.path.join(conda_dir, 'Library')) - -else: - default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib', - '/opt/local/lib', '/sw/lib'], platform_bits) - default_runtime_dirs = [] - default_include_dirs = ['/usr/local/include', - '/opt/include', - # path of umfpack under macports - '/opt/local/include/ufsparse', - '/opt/local/include', '/sw/include', - '/usr/include/suitesparse'] - default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src'] - - default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib', - '/usr/lib'], platform_bits) - default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include'] - - if os.path.exists('/usr/lib/X11'): - globbed_x11_dir = glob('/usr/lib/*/libX11.so') - if globbed_x11_dir: - x11_so_dir = os.path.split(globbed_x11_dir[0])[0] - default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11']) - default_x11_include_dirs.extend(['/usr/lib/X11/include', - '/usr/include/X11']) - - with open(os.devnull, 'w') as tmp: - try: - p = subprocess.Popen(["gcc", "-print-multiarch"], stdout=subprocess.PIPE, - stderr=tmp) - except (OSError, DistutilsError): - # OSError if gcc is not installed, or SandboxViolation (DistutilsError - # subclass) if an old setuptools bug is triggered (see gh-3160). - pass - else: - triplet = str(p.communicate()[0].decode().strip()) - if p.returncode == 0: - # gcc supports the "-print-multiarch" option - default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)] - default_lib_dirs += [os.path.join("/usr/lib/", triplet)] - - -if os.path.join(sys.prefix, 'lib') not in default_lib_dirs: - default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib')) - default_include_dirs.append(os.path.join(sys.prefix, 'include')) - default_src_dirs.append(os.path.join(sys.prefix, 'src')) - -default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)] -default_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)] -default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)] -default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)] - -so_ext = get_shared_lib_extension() - - -def get_standard_file(fname): - """Returns a list of files named 'fname' from - 1) System-wide directory (directory-location of this module) - 2) Users HOME directory (os.environ['HOME']) - 3) Local directory - """ - # System-wide file - filenames = [] - try: - f = __file__ - except NameError: - f = sys.argv[0] - sysfile = os.path.join(os.path.split(os.path.abspath(f))[0], - fname) - if os.path.isfile(sysfile): - filenames.append(sysfile) - - # Home directory - # And look for the user config file - try: - f = os.path.expanduser('~') - except KeyError: - pass - else: - user_file = os.path.join(f, fname) - if os.path.isfile(user_file): - filenames.append(user_file) - - # Local file - if os.path.isfile(fname): - filenames.append(os.path.abspath(fname)) - - return filenames - - -def _parse_env_order(base_order, env): - """ Parse an environment variable `env` by splitting with "," and only returning elements from `base_order` - - This method will sequence the environment variable and check for their - individual elements in `base_order`. - - The items in the environment variable may be negated via '^item' or '!itema,itemb'. - It must start with ^/! to negate all options. - - Raises - ------ - ValueError: for mixed negated and non-negated orders or multiple negated orders - - Parameters - ---------- - base_order : list of str - the base list of orders - env : str - the environment variable to be parsed, if none is found, `base_order` is returned - - Returns - ------- - allow_order : list of str - allowed orders in lower-case - unknown_order : list of str - for values not overlapping with `base_order` - """ - order_str = os.environ.get(env, None) - - # ensure all base-orders are lower-case (for easier comparison) - base_order = [order.lower() for order in base_order] - if order_str is None: - return base_order, [] - - neg = order_str.startswith('^') or order_str.startswith('!') - # Check format - order_str_l = list(order_str) - sum_neg = order_str_l.count('^') + order_str_l.count('!') - if neg: - if sum_neg > 1: - raise ValueError(f"Environment variable '{env}' may only contain a single (prefixed) negation: {order_str}") - # remove prefix - order_str = order_str[1:] - elif sum_neg > 0: - raise ValueError(f"Environment variable '{env}' may not mix negated an non-negated items: {order_str}") - - # Split and lower case - orders = order_str.lower().split(',') - - # to inform callee about non-overlapping elements - unknown_order = [] - - # if negated, we have to remove from the order - if neg: - allow_order = base_order.copy() - - for order in orders: - if not order: - continue - - if order not in base_order: - unknown_order.append(order) - continue - - if order in allow_order: - allow_order.remove(order) - - else: - allow_order = [] - - for order in orders: - if not order: - continue - - if order not in base_order: - unknown_order.append(order) - continue - - if order not in allow_order: - allow_order.append(order) - - return allow_order, unknown_order - - -def get_info(name, notfound_action=0): - """ - notfound_action: - 0 - do nothing - 1 - display warning message - 2 - raise error - """ - cl = {'armpl': armpl_info, - 'blas_armpl': blas_armpl_info, - 'lapack_armpl': lapack_armpl_info, - 'fftw3_armpl': fftw3_armpl_info, - 'atlas': atlas_info, # use lapack_opt or blas_opt instead - 'atlas_threads': atlas_threads_info, # ditto - 'atlas_blas': atlas_blas_info, - 'atlas_blas_threads': atlas_blas_threads_info, - 'lapack_atlas': lapack_atlas_info, # use lapack_opt instead - 'lapack_atlas_threads': lapack_atlas_threads_info, # ditto - 'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead - 'atlas_3_10_threads': atlas_3_10_threads_info, # ditto - 'atlas_3_10_blas': atlas_3_10_blas_info, - 'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info, - 'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead - 'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto - 'flame': flame_info, # use lapack_opt instead - 'mkl': mkl_info, - 'ssl2': ssl2_info, - # openblas which may or may not have embedded lapack - 'openblas': openblas_info, # use blas_opt instead - # openblas with embedded lapack - 'openblas_lapack': openblas_lapack_info, # use blas_opt instead - 'openblas_clapack': openblas_clapack_info, # use blas_opt instead - 'blis': blis_info, # use blas_opt instead - 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead - 'blas_mkl': blas_mkl_info, # use blas_opt instead - 'lapack_ssl2': lapack_ssl2_info, - 'blas_ssl2': blas_ssl2_info, - 'accelerate': accelerate_info, # use blas_opt instead - 'accelerate_lapack': accelerate_lapack_info, - 'openblas64_': openblas64__info, - 'openblas64__lapack': openblas64__lapack_info, - 'openblas_ilp64': openblas_ilp64_info, - 'openblas_ilp64_lapack': openblas_ilp64_lapack_info, - 'x11': x11_info, - 'fft_opt': fft_opt_info, - 'fftw': fftw_info, - 'fftw2': fftw2_info, - 'fftw3': fftw3_info, - 'dfftw': dfftw_info, - 'sfftw': sfftw_info, - 'fftw_threads': fftw_threads_info, - 'dfftw_threads': dfftw_threads_info, - 'sfftw_threads': sfftw_threads_info, - 'djbfft': djbfft_info, - 'blas': blas_info, # use blas_opt instead - 'lapack': lapack_info, # use lapack_opt instead - 'lapack_src': lapack_src_info, - 'blas_src': blas_src_info, - 'numpy': numpy_info, - 'f2py': f2py_info, - 'Numeric': Numeric_info, - 'numeric': Numeric_info, - 'numarray': numarray_info, - 'numerix': numerix_info, - 'lapack_opt': lapack_opt_info, - 'lapack_ilp64_opt': lapack_ilp64_opt_info, - 'lapack_ilp64_plain_opt': lapack_ilp64_plain_opt_info, - 'lapack64__opt': lapack64__opt_info, - 'blas_opt': blas_opt_info, - 'blas_ilp64_opt': blas_ilp64_opt_info, - 'blas_ilp64_plain_opt': blas_ilp64_plain_opt_info, - 'blas64__opt': blas64__opt_info, - 'boost_python': boost_python_info, - 'agg2': agg2_info, - 'wx': wx_info, - 'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info, - 'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info, - 'gdk_pixbuf_2': gdk_pixbuf_2_info, - 'gdk-pixbuf-2.0': gdk_pixbuf_2_info, - 'gdk': gdk_info, - 'gdk_2': gdk_2_info, - 'gdk-2.0': gdk_2_info, - 'gdk_x11_2': gdk_x11_2_info, - 'gdk-x11-2.0': gdk_x11_2_info, - 'gtkp_x11_2': gtkp_x11_2_info, - 'gtk+-x11-2.0': gtkp_x11_2_info, - 'gtkp_2': gtkp_2_info, - 'gtk+-2.0': gtkp_2_info, - 'xft': xft_info, - 'freetype2': freetype2_info, - 'umfpack': umfpack_info, - 'amd': amd_info, - }.get(name.lower(), system_info) - return cl().get_info(notfound_action) - - -class NotFoundError(DistutilsError): - """Some third-party program or library is not found.""" - - -class AliasedOptionError(DistutilsError): - """ - Aliases entries in config files should not be existing. - In section '{section}' we found multiple appearances of options {options}.""" - - -class AtlasNotFoundError(NotFoundError): - """ - Atlas (http://github.com/math-atlas/math-atlas) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [atlas]) or by setting - the ATLAS environment variable.""" - - -class FlameNotFoundError(NotFoundError): - """ - FLAME (http://www.cs.utexas.edu/~flame/web/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [flame]).""" - - -class LapackNotFoundError(NotFoundError): - """ - Lapack (http://www.netlib.org/lapack/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [lapack]) or by setting - the LAPACK environment variable.""" - - -class LapackSrcNotFoundError(LapackNotFoundError): - """ - Lapack (http://www.netlib.org/lapack/) sources not found. - Directories to search for the sources can be specified in the - numpy/distutils/site.cfg file (section [lapack_src]) or by setting - the LAPACK_SRC environment variable.""" - - -class LapackILP64NotFoundError(NotFoundError): - """ - 64-bit Lapack libraries not found. - Known libraries in numpy/distutils/site.cfg file are: - openblas64_, openblas_ilp64 - """ - -class BlasOptNotFoundError(NotFoundError): - """ - Optimized (vendor) Blas libraries are not found. - Falls back to netlib Blas library which has worse performance. - A better performance should be easily gained by switching - Blas library.""" - -class BlasNotFoundError(NotFoundError): - """ - Blas (http://www.netlib.org/blas/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [blas]) or by setting - the BLAS environment variable.""" - -class BlasILP64NotFoundError(NotFoundError): - """ - 64-bit Blas libraries not found. - Known libraries in numpy/distutils/site.cfg file are: - openblas64_, openblas_ilp64 - """ - -class BlasSrcNotFoundError(BlasNotFoundError): - """ - Blas (http://www.netlib.org/blas/) sources not found. - Directories to search for the sources can be specified in the - numpy/distutils/site.cfg file (section [blas_src]) or by setting - the BLAS_SRC environment variable.""" - - -class FFTWNotFoundError(NotFoundError): - """ - FFTW (http://www.fftw.org/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [fftw]) or by setting - the FFTW environment variable.""" - - -class DJBFFTNotFoundError(NotFoundError): - """ - DJBFFT (https://cr.yp.to/djbfft.html) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [djbfft]) or by setting - the DJBFFT environment variable.""" - - -class NumericNotFoundError(NotFoundError): - """ - Numeric (https://www.numpy.org/) module not found. - Get it from above location, install it, and retry setup.py.""" - - -class X11NotFoundError(NotFoundError): - """X11 libraries not found.""" - - -class UmfpackNotFoundError(NotFoundError): - """ - UMFPACK sparse solver (https://www.cise.ufl.edu/research/sparse/umfpack/) - not found. Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [umfpack]) or by setting - the UMFPACK environment variable.""" - - -class system_info: - - """ get_info() is the only public method. Don't use others. - """ - dir_env_var = None - # XXX: search_static_first is disabled by default, may disappear in - # future unless it is proved to be useful. - search_static_first = 0 - # The base-class section name is a random word "ALL" and is not really - # intended for general use. It cannot be None nor can it be DEFAULT as - # these break the ConfigParser. See gh-15338 - section = 'ALL' - saved_results = {} - - notfounderror = NotFoundError - - def __init__(self, - default_lib_dirs=default_lib_dirs, - default_include_dirs=default_include_dirs, - ): - self.__class__.info = {} - self.local_prefixes = [] - defaults = {'library_dirs': os.pathsep.join(default_lib_dirs), - 'include_dirs': os.pathsep.join(default_include_dirs), - 'runtime_library_dirs': os.pathsep.join(default_runtime_dirs), - 'rpath': '', - 'src_dirs': os.pathsep.join(default_src_dirs), - 'search_static_first': str(self.search_static_first), - 'extra_compile_args': '', 'extra_link_args': ''} - self.cp = ConfigParser(defaults) - self.files = [] - self.files.extend(get_standard_file('.numpy-site.cfg')) - self.files.extend(get_standard_file('site.cfg')) - self.parse_config_files() - - if self.section is not None: - self.search_static_first = self.cp.getboolean( - self.section, 'search_static_first') - assert isinstance(self.search_static_first, int) - - def parse_config_files(self): - self.cp.read(self.files) - if not self.cp.has_section(self.section): - if self.section is not None: - self.cp.add_section(self.section) - - def calc_libraries_info(self): - libs = self.get_libraries() - dirs = self.get_lib_dirs() - # The extensions use runtime_library_dirs - r_dirs = self.get_runtime_lib_dirs() - # Intrinsic distutils use rpath, we simply append both entries - # as though they were one entry - r_dirs.extend(self.get_runtime_lib_dirs(key='rpath')) - info = {} - for lib in libs: - i = self.check_libs(dirs, [lib]) - if i is not None: - dict_append(info, **i) - else: - log.info('Library %s was not found. Ignoring' % (lib)) - - if r_dirs: - i = self.check_libs(r_dirs, [lib]) - if i is not None: - # Swap library keywords found to runtime_library_dirs - # the libraries are insisting on the user having defined - # them using the library_dirs, and not necessarily by - # runtime_library_dirs - del i['libraries'] - i['runtime_library_dirs'] = i.pop('library_dirs') - dict_append(info, **i) - else: - log.info('Runtime library %s was not found. Ignoring' % (lib)) - - return info - - def set_info(self, **info): - if info: - lib_info = self.calc_libraries_info() - dict_append(info, **lib_info) - # Update extra information - extra_info = self.calc_extra_info() - dict_append(info, **extra_info) - self.saved_results[self.__class__.__name__] = info - - def get_option_single(self, *options): - """ Ensure that only one of `options` are found in the section - - Parameters - ---------- - *options : list of str - a list of options to be found in the section (``self.section``) - - Returns - ------- - str : - the option that is uniquely found in the section - - Raises - ------ - AliasedOptionError : - in case more than one of the options are found - """ - found = [self.cp.has_option(self.section, opt) for opt in options] - if sum(found) == 1: - return options[found.index(True)] - elif sum(found) == 0: - # nothing is found anyways - return options[0] - - # Else we have more than 1 key found - if AliasedOptionError.__doc__ is None: - raise AliasedOptionError() - raise AliasedOptionError(AliasedOptionError.__doc__.format( - section=self.section, options='[{}]'.format(', '.join(options)))) - - - def has_info(self): - return self.__class__.__name__ in self.saved_results - - def calc_extra_info(self): - """ Updates the information in the current information with - respect to these flags: - extra_compile_args - extra_link_args - """ - info = {} - for key in ['extra_compile_args', 'extra_link_args']: - # Get values - opt = self.cp.get(self.section, key) - opt = _shell_utils.NativeParser.split(opt) - if opt: - tmp = {key: opt} - dict_append(info, **tmp) - return info - - def get_info(self, notfound_action=0): - """ Return a dictionary with items that are compatible - with numpy.distutils.setup keyword arguments. - """ - flag = 0 - if not self.has_info(): - flag = 1 - log.info(self.__class__.__name__ + ':') - if hasattr(self, 'calc_info'): - self.calc_info() - if notfound_action: - if not self.has_info(): - if notfound_action == 1: - warnings.warn(self.notfounderror.__doc__, stacklevel=2) - elif notfound_action == 2: - raise self.notfounderror(self.notfounderror.__doc__) - else: - raise ValueError(repr(notfound_action)) - - if not self.has_info(): - log.info(' NOT AVAILABLE') - self.set_info() - else: - log.info(' FOUND:') - - res = self.saved_results.get(self.__class__.__name__) - if log.get_threshold() <= log.INFO and flag: - for k, v in res.items(): - v = str(v) - if k in ['sources', 'libraries'] and len(v) > 270: - v = v[:120] + '...\n...\n...' + v[-120:] - log.info(' %s = %s', k, v) - log.info('') - - return copy.deepcopy(res) - - def get_paths(self, section, key): - dirs = self.cp.get(section, key).split(os.pathsep) - env_var = self.dir_env_var - if env_var: - if is_sequence(env_var): - e0 = env_var[-1] - for e in env_var: - if e in os.environ: - e0 = e - break - if not env_var[0] == e0: - log.info('Setting %s=%s' % (env_var[0], e0)) - env_var = e0 - if env_var and env_var in os.environ: - d = os.environ[env_var] - if d == 'None': - log.info('Disabled %s: %s', - self.__class__.__name__, '(%s is None)' - % (env_var,)) - return [] - if os.path.isfile(d): - dirs = [os.path.dirname(d)] + dirs - l = getattr(self, '_lib_names', []) - if len(l) == 1: - b = os.path.basename(d) - b = os.path.splitext(b)[0] - if b[:3] == 'lib': - log.info('Replacing _lib_names[0]==%r with %r' \ - % (self._lib_names[0], b[3:])) - self._lib_names[0] = b[3:] - else: - ds = d.split(os.pathsep) - ds2 = [] - for d in ds: - if os.path.isdir(d): - ds2.append(d) - for dd in ['include', 'lib']: - d1 = os.path.join(d, dd) - if os.path.isdir(d1): - ds2.append(d1) - dirs = ds2 + dirs - default_dirs = self.cp.get(self.section, key).split(os.pathsep) - dirs.extend(default_dirs) - ret = [] - for d in dirs: - if len(d) > 0 and not os.path.isdir(d): - warnings.warn('Specified path %s is invalid.' % d, stacklevel=2) - continue - - if d not in ret: - ret.append(d) - - log.debug('( %s = %s )', key, ':'.join(ret)) - return ret - - def get_lib_dirs(self, key='library_dirs'): - return self.get_paths(self.section, key) - - def get_runtime_lib_dirs(self, key='runtime_library_dirs'): - path = self.get_paths(self.section, key) - if path == ['']: - path = [] - return path - - def get_include_dirs(self, key='include_dirs'): - return self.get_paths(self.section, key) - - def get_src_dirs(self, key='src_dirs'): - return self.get_paths(self.section, key) - - def get_libs(self, key, default): - try: - libs = self.cp.get(self.section, key) - except NoOptionError: - if not default: - return [] - if is_string(default): - return [default] - return default - return [b for b in [a.strip() for a in libs.split(',')] if b] - - def get_libraries(self, key='libraries'): - if hasattr(self, '_lib_names'): - return self.get_libs(key, default=self._lib_names) - else: - return self.get_libs(key, '') - - def library_extensions(self): - c = customized_ccompiler() - static_exts = [] - if c.compiler_type != 'msvc': - # MSVC doesn't understand binutils - static_exts.append('.a') - if sys.platform == 'win32': - static_exts.append('.lib') # .lib is used by MSVC and others - if self.search_static_first: - exts = static_exts + [so_ext] - else: - exts = [so_ext] + static_exts - if sys.platform == 'cygwin': - exts.append('.dll.a') - if sys.platform == 'darwin': - exts.append('.dylib') - return exts - - def check_libs(self, lib_dirs, libs, opt_libs=[]): - """If static or shared libraries are available then return - their info dictionary. - - Checks for all libraries as shared libraries first, then - static (or vice versa if self.search_static_first is True). - """ - exts = self.library_extensions() - info = None - for ext in exts: - info = self._check_libs(lib_dirs, libs, opt_libs, [ext]) - if info is not None: - break - if not info: - log.info(' libraries %s not found in %s', ','.join(libs), - lib_dirs) - return info - - def check_libs2(self, lib_dirs, libs, opt_libs=[]): - """If static or shared libraries are available then return - their info dictionary. - - Checks each library for shared or static. - """ - exts = self.library_extensions() - info = self._check_libs(lib_dirs, libs, opt_libs, exts) - if not info: - log.info(' libraries %s not found in %s', ','.join(libs), - lib_dirs) - - return info - - def _find_lib(self, lib_dir, lib, exts): - assert is_string(lib_dir) - # under windows first try without 'lib' prefix - if sys.platform == 'win32': - lib_prefixes = ['', 'lib'] - else: - lib_prefixes = ['lib'] - # for each library name, see if we can find a file for it. - for ext in exts: - for prefix in lib_prefixes: - p = self.combine_paths(lib_dir, prefix + lib + ext) - if p: - break - if p: - assert len(p) == 1 - # ??? splitext on p[0] would do this for cygwin - # doesn't seem correct - if ext == '.dll.a': - lib += '.dll' - if ext == '.lib': - lib = prefix + lib - return lib - - return False - - def _find_libs(self, lib_dirs, libs, exts): - # make sure we preserve the order of libs, as it can be important - found_dirs, found_libs = [], [] - for lib in libs: - for lib_dir in lib_dirs: - found_lib = self._find_lib(lib_dir, lib, exts) - if found_lib: - found_libs.append(found_lib) - if lib_dir not in found_dirs: - found_dirs.append(lib_dir) - break - return found_dirs, found_libs - - def _check_libs(self, lib_dirs, libs, opt_libs, exts): - """Find mandatory and optional libs in expected paths. - - Missing optional libraries are silently forgotten. - """ - if not is_sequence(lib_dirs): - lib_dirs = [lib_dirs] - # First, try to find the mandatory libraries - found_dirs, found_libs = self._find_libs(lib_dirs, libs, exts) - if len(found_libs) > 0 and len(found_libs) == len(libs): - # Now, check for optional libraries - opt_found_dirs, opt_found_libs = self._find_libs(lib_dirs, opt_libs, exts) - found_libs.extend(opt_found_libs) - for lib_dir in opt_found_dirs: - if lib_dir not in found_dirs: - found_dirs.append(lib_dir) - info = {'libraries': found_libs, 'library_dirs': found_dirs} - return info - else: - return None - - def combine_paths(self, *args): - """Return a list of existing paths composed by all combinations - of items from the arguments. - """ - return combine_paths(*args) - - -class fft_opt_info(system_info): - - def calc_info(self): - info = {} - fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw') - djbfft_info = get_info('djbfft') - if fftw_info: - dict_append(info, **fftw_info) - if djbfft_info: - dict_append(info, **djbfft_info) - self.set_info(**info) - return - - -class fftw_info(system_info): - #variables to override - section = 'fftw' - dir_env_var = 'FFTW' - notfounderror = FFTWNotFoundError - ver_info = [{'name':'fftw3', - 'libs':['fftw3'], - 'includes':['fftw3.h'], - 'macros':[('SCIPY_FFTW3_H', None)]}, - {'name':'fftw2', - 'libs':['rfftw', 'fftw'], - 'includes':['fftw.h', 'rfftw.h'], - 'macros':[('SCIPY_FFTW_H', None)]}] - - def calc_ver_info(self, ver_param): - """Returns True on successful version detection, else False""" - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - - opt = self.get_option_single(self.section + '_libs', 'libraries') - libs = self.get_libs(opt, ver_param['libs']) - info = self.check_libs(lib_dirs, libs) - if info is not None: - flag = 0 - for d in incl_dirs: - if len(self.combine_paths(d, ver_param['includes'])) \ - == len(ver_param['includes']): - dict_append(info, include_dirs=[d]) - flag = 1 - break - if flag: - dict_append(info, define_macros=ver_param['macros']) - else: - info = None - if info is not None: - self.set_info(**info) - return True - else: - log.info(' %s not found' % (ver_param['name'])) - return False - - def calc_info(self): - for i in self.ver_info: - if self.calc_ver_info(i): - break - - -class fftw2_info(fftw_info): - #variables to override - section = 'fftw' - dir_env_var = 'FFTW' - notfounderror = FFTWNotFoundError - ver_info = [{'name':'fftw2', - 'libs':['rfftw', 'fftw'], - 'includes':['fftw.h', 'rfftw.h'], - 'macros':[('SCIPY_FFTW_H', None)]} - ] - - -class fftw3_info(fftw_info): - #variables to override - section = 'fftw3' - dir_env_var = 'FFTW3' - notfounderror = FFTWNotFoundError - ver_info = [{'name':'fftw3', - 'libs':['fftw3'], - 'includes':['fftw3.h'], - 'macros':[('SCIPY_FFTW3_H', None)]}, - ] - - -class fftw3_armpl_info(fftw_info): - section = 'fftw3' - dir_env_var = 'ARMPL_DIR' - notfounderror = FFTWNotFoundError - ver_info = [{'name': 'fftw3', - 'libs': ['armpl_lp64_mp'], - 'includes': ['fftw3.h'], - 'macros': [('SCIPY_FFTW3_H', None)]}] - - -class dfftw_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'dfftw', - 'libs':['drfftw', 'dfftw'], - 'includes':['dfftw.h', 'drfftw.h'], - 'macros':[('SCIPY_DFFTW_H', None)]}] - - -class sfftw_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'sfftw', - 'libs':['srfftw', 'sfftw'], - 'includes':['sfftw.h', 'srfftw.h'], - 'macros':[('SCIPY_SFFTW_H', None)]}] - - -class fftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'fftw threads', - 'libs':['rfftw_threads', 'fftw_threads'], - 'includes':['fftw_threads.h', 'rfftw_threads.h'], - 'macros':[('SCIPY_FFTW_THREADS_H', None)]}] - - -class dfftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'dfftw threads', - 'libs':['drfftw_threads', 'dfftw_threads'], - 'includes':['dfftw_threads.h', 'drfftw_threads.h'], - 'macros':[('SCIPY_DFFTW_THREADS_H', None)]}] - - -class sfftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'sfftw threads', - 'libs':['srfftw_threads', 'sfftw_threads'], - 'includes':['sfftw_threads.h', 'srfftw_threads.h'], - 'macros':[('SCIPY_SFFTW_THREADS_H', None)]}] - - -class djbfft_info(system_info): - section = 'djbfft' - dir_env_var = 'DJBFFT' - notfounderror = DJBFFTNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend(self.combine_paths(d, ['djbfft']) + [d]) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - info = None - for d in lib_dirs: - p = self.combine_paths(d, ['djbfft.a']) - if p: - info = {'extra_objects': p} - break - p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext]) - if p: - info = {'libraries': ['djbfft'], 'library_dirs': [d]} - break - if info is None: - return - for d in incl_dirs: - if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2: - dict_append(info, include_dirs=[d], - define_macros=[('SCIPY_DJBFFT_H', None)]) - self.set_info(**info) - return - return - - -class mkl_info(system_info): - section = 'mkl' - dir_env_var = 'MKLROOT' - _lib_mkl = ['mkl_rt'] - - def get_mkl_rootdir(self): - mklroot = os.environ.get('MKLROOT', None) - if mklroot is not None: - return mklroot - paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep) - ld_so_conf = '/etc/ld.so.conf' - if os.path.isfile(ld_so_conf): - with open(ld_so_conf) as f: - for d in f: - d = d.strip() - if d: - paths.append(d) - intel_mkl_dirs = [] - for path in paths: - path_atoms = path.split(os.sep) - for m in path_atoms: - if m.startswith('mkl'): - d = os.sep.join(path_atoms[:path_atoms.index(m) + 2]) - intel_mkl_dirs.append(d) - break - for d in paths: - dirs = glob(os.path.join(d, 'mkl', '*')) - dirs += glob(os.path.join(d, 'mkl*')) - for sub_dir in dirs: - if os.path.isdir(os.path.join(sub_dir, 'lib')): - return sub_dir - return None - - def __init__(self): - mklroot = self.get_mkl_rootdir() - if mklroot is None: - system_info.__init__(self) - else: - from .cpuinfo import cpu - if cpu.is_Itanium(): - plt = '64' - elif cpu.is_Intel() and cpu.is_64bit(): - plt = 'intel64' - else: - plt = '32' - system_info.__init__( - self, - default_lib_dirs=[os.path.join(mklroot, 'lib', plt)], - default_include_dirs=[os.path.join(mklroot, 'include')]) - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - opt = self.get_option_single('mkl_libs', 'libraries') - mkl_libs = self.get_libs(opt, self._lib_mkl) - info = self.check_libs2(lib_dirs, mkl_libs) - if info is None: - return - dict_append(info, - define_macros=[('SCIPY_MKL_H', None), - ('HAVE_CBLAS', None)], - include_dirs=incl_dirs) - if sys.platform == 'win32': - pass # win32 has no pthread library - else: - dict_append(info, libraries=['pthread']) - self.set_info(**info) - - -class lapack_mkl_info(mkl_info): - pass - - -class blas_mkl_info(mkl_info): - pass - - -class ssl2_info(system_info): - section = 'ssl2' - dir_env_var = 'SSL2_DIR' - # Multi-threaded version. Python itself must be built by Fujitsu compiler. - _lib_ssl2 = ['fjlapackexsve'] - # Single-threaded version - #_lib_ssl2 = ['fjlapacksve'] - - def get_tcsds_rootdir(self): - tcsdsroot = os.environ.get('TCSDS_PATH', None) - if tcsdsroot is not None: - return tcsdsroot - return None - - def __init__(self): - tcsdsroot = self.get_tcsds_rootdir() - if tcsdsroot is None: - system_info.__init__(self) - else: - system_info.__init__( - self, - default_lib_dirs=[os.path.join(tcsdsroot, 'lib64')], - default_include_dirs=[os.path.join(tcsdsroot, - 'clang-comp/include')]) - - def calc_info(self): - tcsdsroot = self.get_tcsds_rootdir() - - lib_dirs = self.get_lib_dirs() - if lib_dirs is None: - lib_dirs = os.path.join(tcsdsroot, 'lib64') - - incl_dirs = self.get_include_dirs() - if incl_dirs is None: - incl_dirs = os.path.join(tcsdsroot, 'clang-comp/include') - - ssl2_libs = self.get_libs('ssl2_libs', self._lib_ssl2) - - info = self.check_libs2(lib_dirs, ssl2_libs) - if info is None: - return - dict_append(info, - define_macros=[('HAVE_CBLAS', None), - ('HAVE_SSL2', 1)], - include_dirs=incl_dirs,) - self.set_info(**info) - - -class lapack_ssl2_info(ssl2_info): - pass - - -class blas_ssl2_info(ssl2_info): - pass - - - -class armpl_info(system_info): - section = 'armpl' - dir_env_var = 'ARMPL_DIR' - _lib_armpl = ['armpl_lp64_mp'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - armpl_libs = self.get_libs('armpl_libs', self._lib_armpl) - info = self.check_libs2(lib_dirs, armpl_libs) - if info is None: - return - dict_append(info, - define_macros=[('SCIPY_MKL_H', None), - ('HAVE_CBLAS', None)], - include_dirs=incl_dirs) - self.set_info(**info) - -class lapack_armpl_info(armpl_info): - pass - -class blas_armpl_info(armpl_info): - pass - - -class atlas_info(system_info): - section = 'atlas' - dir_env_var = 'ATLAS' - _lib_names = ['f77blas', 'cblas'] - if sys.platform[:7] == 'freebsd': - _lib_atlas = ['atlas_r'] - _lib_lapack = ['alapack_r'] - else: - _lib_atlas = ['atlas'] - _lib_lapack = ['lapack'] - - notfounderror = AtlasNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*', - 'sse', '3dnow', 'sse2']) + [d]) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - opt = self.get_option_single('atlas_libs', 'libraries') - atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas) - lapack_libs = self.get_libs('lapack_libs', self._lib_lapack) - atlas = None - lapack = None - atlas_1 = None - for d in lib_dirs: - atlas = self.check_libs2(d, atlas_libs, []) - if atlas is not None: - lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*']) - lapack = self.check_libs2(lib_dirs2, lapack_libs, []) - if lapack is not None: - break - if atlas: - atlas_1 = atlas - log.info(self.__class__) - if atlas is None: - atlas = atlas_1 - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) - h = h[0] - if h: - h = os.path.dirname(h) - dict_append(info, include_dirs=[h]) - info['language'] = 'c' - if lapack is not None: - dict_append(info, **lapack) - dict_append(info, **atlas) - elif 'lapack_atlas' in atlas['libraries']: - dict_append(info, **atlas) - dict_append(info, - define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)]) - self.set_info(**info) - return - else: - dict_append(info, **atlas) - dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)]) - message = textwrap.dedent(""" - ********************************************************************* - Could not find lapack library within the ATLAS installation. - ********************************************************************* - """) - warnings.warn(message, stacklevel=2) - self.set_info(**info) - return - - # Check if lapack library is complete, only warn if it is not. - lapack_dir = lapack['library_dirs'][0] - lapack_name = lapack['libraries'][0] - lapack_lib = None - lib_prefixes = ['lib'] - if sys.platform == 'win32': - lib_prefixes.append('') - for e in self.library_extensions(): - for prefix in lib_prefixes: - fn = os.path.join(lapack_dir, prefix + lapack_name + e) - if os.path.exists(fn): - lapack_lib = fn - break - if lapack_lib: - break - if lapack_lib is not None: - sz = os.stat(lapack_lib)[6] - if sz <= 4000 * 1024: - message = textwrap.dedent(""" - ********************************************************************* - Lapack library (from ATLAS) is probably incomplete: - size of %s is %sk (expected >4000k) - - Follow the instructions in the KNOWN PROBLEMS section of the file - numpy/INSTALL.txt. - ********************************************************************* - """) % (lapack_lib, sz / 1024) - warnings.warn(message, stacklevel=2) - else: - info['language'] = 'f77' - - atlas_version, atlas_extra_info = get_atlas_version(**atlas) - dict_append(info, **atlas_extra_info) - - self.set_info(**info) - - -class atlas_blas_info(atlas_info): - _lib_names = ['f77blas', 'cblas'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - opt = self.get_option_single('atlas_libs', 'libraries') - atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas) - atlas = self.check_libs2(lib_dirs, atlas_libs, []) - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) - h = h[0] - if h: - h = os.path.dirname(h) - dict_append(info, include_dirs=[h]) - info['language'] = 'c' - info['define_macros'] = [('HAVE_CBLAS', None)] - - atlas_version, atlas_extra_info = get_atlas_version(**atlas) - dict_append(atlas, **atlas_extra_info) - - dict_append(info, **atlas) - - self.set_info(**info) - return - - -class atlas_threads_info(atlas_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['ptf77blas', 'ptcblas'] - - -class atlas_blas_threads_info(atlas_blas_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['ptf77blas', 'ptcblas'] - - -class lapack_atlas_info(atlas_info): - _lib_names = ['lapack_atlas'] + atlas_info._lib_names - - -class lapack_atlas_threads_info(atlas_threads_info): - _lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names - - -class atlas_3_10_info(atlas_info): - _lib_names = ['satlas'] - _lib_atlas = _lib_names - _lib_lapack = _lib_names - - -class atlas_3_10_blas_info(atlas_3_10_info): - _lib_names = ['satlas'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - opt = self.get_option_single('atlas_lib', 'libraries') - atlas_libs = self.get_libs(opt, self._lib_names) - atlas = self.check_libs2(lib_dirs, atlas_libs, []) - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) - h = h[0] - if h: - h = os.path.dirname(h) - dict_append(info, include_dirs=[h]) - info['language'] = 'c' - info['define_macros'] = [('HAVE_CBLAS', None)] - - atlas_version, atlas_extra_info = get_atlas_version(**atlas) - dict_append(atlas, **atlas_extra_info) - - dict_append(info, **atlas) - - self.set_info(**info) - return - - -class atlas_3_10_threads_info(atlas_3_10_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['tatlas'] - _lib_atlas = _lib_names - _lib_lapack = _lib_names - - -class atlas_3_10_blas_threads_info(atlas_3_10_blas_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['tatlas'] - - -class lapack_atlas_3_10_info(atlas_3_10_info): - pass - - -class lapack_atlas_3_10_threads_info(atlas_3_10_threads_info): - pass - - -class lapack_info(system_info): - section = 'lapack' - dir_env_var = 'LAPACK' - _lib_names = ['lapack'] - notfounderror = LapackNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - opt = self.get_option_single('lapack_libs', 'libraries') - lapack_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, lapack_libs, []) - if info is None: - return - info['language'] = 'f77' - self.set_info(**info) - - -class lapack_src_info(system_info): - # LAPACK_SRC is deprecated, please do not use this! - # Build or install a BLAS library via your package manager or from - # source separately. - section = 'lapack_src' - dir_env_var = 'LAPACK_SRC' - notfounderror = LapackSrcNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'dgesv.f')): - src_dir = d - break - if not src_dir: - #XXX: Get sources from netlib. May be ask first. - return - # The following is extracted from LAPACK-3.0/SRC/Makefile. - # Added missing names from lapack-lite-3.1.1/SRC/Makefile - # while keeping removed names for Lapack-3.0 compatibility. - allaux = ''' - ilaenv ieeeck lsame lsamen xerbla - iparmq - ''' # *.f - laux = ''' - bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1 - laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2 - lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre - larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4 - lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1 - lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf - stebz stedc steqr sterf - - larra larrc larrd larr larrk larrj larrr laneg laisnan isnan - lazq3 lazq4 - ''' # [s|d]*.f - lasrc = ''' - gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak - gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv - gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2 - geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd - gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal - gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd - ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein - hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0 - lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb - lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp - laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv - lartv larz larzb larzt laswp lasyf latbs latdf latps latrd - latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv - pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2 - potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri - pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs - spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv - sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2 - tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs - trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs - tzrqf tzrzf - - lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5 - ''' # [s|c|d|z]*.f - sd_lasrc = ''' - laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l - org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr - orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3 - ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx - sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd - stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd - sygvx sytd2 sytrd - ''' # [s|d]*.f - cz_lasrc = ''' - bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev - heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv - hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd - hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf - hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7 - laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe - laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv - spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq - ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2 - unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr - ''' # [c|z]*.f - ####### - sclaux = laux + ' econd ' # s*.f - dzlaux = laux + ' secnd ' # d*.f - slasrc = lasrc + sd_lasrc # s*.f - dlasrc = lasrc + sd_lasrc # d*.f - clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f - zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f - oclasrc = ' icmax1 scsum1 ' # *.f - ozlasrc = ' izmax1 dzsum1 ' # *.f - sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \ - + ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \ - + ['c%s.f' % f for f in (clasrc).split()] \ - + ['z%s.f' % f for f in (zlasrc).split()] \ - + ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()] - sources = [os.path.join(src_dir, f) for f in sources] - # Lapack 3.1: - src_dir2 = os.path.join(src_dir, '..', 'INSTALL') - sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz'] - # Lapack 3.2.1: - sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz'] - sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz'] - sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz'] - # Should we check here actual existence of source files? - # Yes, the file listing is different between 3.0 and 3.1 - # versions. - sources = [f for f in sources if os.path.isfile(f)] - info = {'sources': sources, 'language': 'f77'} - self.set_info(**info) - -atlas_version_c_text = r''' -/* This file is generated from numpy/distutils/system_info.py */ -void ATL_buildinfo(void); -int main(void) { - ATL_buildinfo(); - return 0; -} -''' - -_cached_atlas_version = {} - - -def get_atlas_version(**config): - libraries = config.get('libraries', []) - library_dirs = config.get('library_dirs', []) - key = (tuple(libraries), tuple(library_dirs)) - if key in _cached_atlas_version: - return _cached_atlas_version[key] - c = cmd_config(Distribution()) - atlas_version = None - info = {} - try: - s, o = c.get_output(atlas_version_c_text, - libraries=libraries, library_dirs=library_dirs, - ) - if s and re.search(r'undefined reference to `_gfortran', o, re.M): - s, o = c.get_output(atlas_version_c_text, - libraries=libraries + ['gfortran'], - library_dirs=library_dirs, - ) - if not s: - warnings.warn(textwrap.dedent(""" - ***************************************************** - Linkage with ATLAS requires gfortran. Use - - python setup.py config_fc --fcompiler=gnu95 ... - - when building extension libraries that use ATLAS. - Make sure that -lgfortran is used for C++ extensions. - ***************************************************** - """), stacklevel=2) - dict_append(info, language='f90', - define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)]) - except Exception: # failed to get version from file -- maybe on Windows - # look at directory name - for o in library_dirs: - m = re.search(r'ATLAS_(?P\d+[.]\d+[.]\d+)_', o) - if m: - atlas_version = m.group('version') - if atlas_version is not None: - break - - # final choice --- look at ATLAS_VERSION environment - # variable - if atlas_version is None: - atlas_version = os.environ.get('ATLAS_VERSION', None) - if atlas_version: - dict_append(info, define_macros=[( - 'ATLAS_INFO', _c_string_literal(atlas_version)) - ]) - else: - dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)]) - return atlas_version or '?.?.?', info - - if not s: - m = re.search(r'ATLAS version (?P\d+[.]\d+[.]\d+)', o) - if m: - atlas_version = m.group('version') - if atlas_version is None: - if re.search(r'undefined symbol: ATL_buildinfo', o, re.M): - atlas_version = '3.2.1_pre3.3.6' - else: - log.info('Status: %d', s) - log.info('Output: %s', o) - - elif atlas_version == '3.2.1_pre3.3.6': - dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)]) - else: - dict_append(info, define_macros=[( - 'ATLAS_INFO', _c_string_literal(atlas_version)) - ]) - result = _cached_atlas_version[key] = atlas_version, info - return result - - -class lapack_opt_info(system_info): - notfounderror = LapackNotFoundError - - # List of all known LAPACK libraries, in the default order - lapack_order = ['armpl', 'mkl', 'ssl2', 'openblas', 'flame', - 'accelerate', 'atlas', 'lapack'] - order_env_var_name = 'NPY_LAPACK_ORDER' - - def _calc_info_armpl(self): - info = get_info('lapack_armpl') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_mkl(self): - info = get_info('lapack_mkl') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_ssl2(self): - info = get_info('lapack_ssl2') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_openblas(self): - info = get_info('openblas_lapack') - if info: - self.set_info(**info) - return True - info = get_info('openblas_clapack') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_flame(self): - info = get_info('flame') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_atlas(self): - info = get_info('atlas_3_10_threads') - if not info: - info = get_info('atlas_3_10') - if not info: - info = get_info('atlas_threads') - if not info: - info = get_info('atlas') - if info: - # Figure out if ATLAS has lapack... - # If not we need the lapack library, but not BLAS! - l = info.get('define_macros', []) - if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \ - or ('ATLAS_WITHOUT_LAPACK', None) in l: - # Get LAPACK (with possible warnings) - # If not found we don't accept anything - # since we can't use ATLAS with LAPACK! - lapack_info = self._get_info_lapack() - if not lapack_info: - return False - dict_append(info, **lapack_info) - self.set_info(**info) - return True - return False - - def _calc_info_accelerate(self): - info = get_info('accelerate') - if info: - self.set_info(**info) - return True - return False - - def _get_info_blas(self): - # Default to get the optimized BLAS implementation - info = get_info('blas_opt') - if not info: - warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3) - info_src = get_info('blas_src') - if not info_src: - warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3) - return {} - dict_append(info, libraries=[('fblas_src', info_src)]) - return info - - def _get_info_lapack(self): - info = get_info('lapack') - if not info: - warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=3) - info_src = get_info('lapack_src') - if not info_src: - warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=3) - return {} - dict_append(info, libraries=[('flapack_src', info_src)]) - return info - - def _calc_info_lapack(self): - info = self._get_info_lapack() - if info: - info_blas = self._get_info_blas() - dict_append(info, **info_blas) - dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) - self.set_info(**info) - return True - return False - - def _calc_info_from_envvar(self): - info = {} - info['language'] = 'f77' - info['libraries'] = [] - info['include_dirs'] = [] - info['define_macros'] = [] - info['extra_link_args'] = os.environ['NPY_LAPACK_LIBS'].split() - self.set_info(**info) - return True - - def _calc_info(self, name): - return getattr(self, '_calc_info_{}'.format(name))() - - def calc_info(self): - lapack_order, unknown_order = _parse_env_order(self.lapack_order, self.order_env_var_name) - if len(unknown_order) > 0: - raise ValueError("lapack_opt_info user defined " - "LAPACK order has unacceptable " - "values: {}".format(unknown_order)) - - if 'NPY_LAPACK_LIBS' in os.environ: - # Bypass autodetection, set language to F77 and use env var linker - # flags directly - self._calc_info_from_envvar() - return - - for lapack in lapack_order: - if self._calc_info(lapack): - return - - if 'lapack' not in lapack_order: - # Since the user may request *not* to use any library, we still need - # to raise warnings to signal missing packages! - warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=2) - warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=2) - - -class _ilp64_opt_info_mixin: - symbol_suffix = None - symbol_prefix = None - - def _check_info(self, info): - macros = dict(info.get('define_macros', [])) - prefix = macros.get('BLAS_SYMBOL_PREFIX', '') - suffix = macros.get('BLAS_SYMBOL_SUFFIX', '') - - if self.symbol_prefix not in (None, prefix): - return False - - if self.symbol_suffix not in (None, suffix): - return False - - return bool(info) - - -class lapack_ilp64_opt_info(lapack_opt_info, _ilp64_opt_info_mixin): - notfounderror = LapackILP64NotFoundError - lapack_order = ['openblas64_', 'openblas_ilp64', 'accelerate'] - order_env_var_name = 'NPY_LAPACK_ILP64_ORDER' - - def _calc_info(self, name): - print('lapack_ilp64_opt_info._calc_info(name=%s)' % (name)) - info = get_info(name + '_lapack') - if self._check_info(info): - self.set_info(**info) - return True - else: - print('%s_lapack does not exist' % (name)) - return False - - -class lapack_ilp64_plain_opt_info(lapack_ilp64_opt_info): - # Same as lapack_ilp64_opt_info, but fix symbol names - symbol_prefix = '' - symbol_suffix = '' - - -class lapack64__opt_info(lapack_ilp64_opt_info): - symbol_prefix = '' - symbol_suffix = '64_' - - -class blas_opt_info(system_info): - notfounderror = BlasNotFoundError - # List of all known BLAS libraries, in the default order - - blas_order = ['armpl', 'mkl', 'ssl2', 'blis', 'openblas', - 'accelerate', 'atlas', 'blas'] - order_env_var_name = 'NPY_BLAS_ORDER' - - def _calc_info_armpl(self): - info = get_info('blas_armpl') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_mkl(self): - info = get_info('blas_mkl') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_ssl2(self): - info = get_info('blas_ssl2') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_blis(self): - info = get_info('blis') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_openblas(self): - info = get_info('openblas') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_atlas(self): - info = get_info('atlas_3_10_blas_threads') - if not info: - info = get_info('atlas_3_10_blas') - if not info: - info = get_info('atlas_blas_threads') - if not info: - info = get_info('atlas_blas') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_accelerate(self): - info = get_info('accelerate') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_blas(self): - # Warn about a non-optimized BLAS library - warnings.warn(BlasOptNotFoundError.__doc__ or '', stacklevel=3) - info = {} - dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) - - blas = get_info('blas') - if blas: - dict_append(info, **blas) - else: - # Not even BLAS was found! - warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3) - - blas_src = get_info('blas_src') - if not blas_src: - warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3) - return False - dict_append(info, libraries=[('fblas_src', blas_src)]) - - self.set_info(**info) - return True - - def _calc_info_from_envvar(self): - info = {} - info['language'] = 'f77' - info['libraries'] = [] - info['include_dirs'] = [] - info['define_macros'] = [] - info['extra_link_args'] = os.environ['NPY_BLAS_LIBS'].split() - if 'NPY_CBLAS_LIBS' in os.environ: - info['define_macros'].append(('HAVE_CBLAS', None)) - info['extra_link_args'].extend( - os.environ['NPY_CBLAS_LIBS'].split()) - self.set_info(**info) - return True - - def _calc_info(self, name): - return getattr(self, '_calc_info_{}'.format(name))() - - def calc_info(self): - blas_order, unknown_order = _parse_env_order(self.blas_order, self.order_env_var_name) - if len(unknown_order) > 0: - raise ValueError("blas_opt_info user defined BLAS order has unacceptable values: {}".format(unknown_order)) - - if 'NPY_BLAS_LIBS' in os.environ: - # Bypass autodetection, set language to F77 and use env var linker - # flags directly - self._calc_info_from_envvar() - return - - for blas in blas_order: - if self._calc_info(blas): - return - - if 'blas' not in blas_order: - # Since the user may request *not* to use any library, we still need - # to raise warnings to signal missing packages! - warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=2) - warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=2) - - -class blas_ilp64_opt_info(blas_opt_info, _ilp64_opt_info_mixin): - notfounderror = BlasILP64NotFoundError - blas_order = ['openblas64_', 'openblas_ilp64', 'accelerate'] - order_env_var_name = 'NPY_BLAS_ILP64_ORDER' - - def _calc_info(self, name): - info = get_info(name) - if self._check_info(info): - self.set_info(**info) - return True - return False - - -class blas_ilp64_plain_opt_info(blas_ilp64_opt_info): - symbol_prefix = '' - symbol_suffix = '' - - -class blas64__opt_info(blas_ilp64_opt_info): - symbol_prefix = '' - symbol_suffix = '64_' - - -class cblas_info(system_info): - section = 'cblas' - dir_env_var = 'CBLAS' - # No default as it's used only in blas_info - _lib_names = [] - notfounderror = BlasNotFoundError - - -class blas_info(system_info): - section = 'blas' - dir_env_var = 'BLAS' - _lib_names = ['blas'] - notfounderror = BlasNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - opt = self.get_option_single('blas_libs', 'libraries') - blas_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, blas_libs, []) - if info is None: - return - else: - info['include_dirs'] = self.get_include_dirs() - if platform.system() == 'Windows': - # The check for windows is needed because get_cblas_libs uses the - # same compiler that was used to compile Python and msvc is - # often not installed when mingw is being used. This rough - # treatment is not desirable, but windows is tricky. - info['language'] = 'f77' # XXX: is it generally true? - # If cblas is given as an option, use those - cblas_info_obj = cblas_info() - cblas_opt = cblas_info_obj.get_option_single('cblas_libs', 'libraries') - cblas_libs = cblas_info_obj.get_libs(cblas_opt, None) - if cblas_libs: - info['libraries'] = cblas_libs + blas_libs - info['define_macros'] = [('HAVE_CBLAS', None)] - else: - lib = self.get_cblas_libs(info) - if lib is not None: - info['language'] = 'c' - info['libraries'] = lib - info['define_macros'] = [('HAVE_CBLAS', None)] - self.set_info(**info) - - def get_cblas_libs(self, info): - """ Check whether we can link with CBLAS interface - - This method will search through several combinations of libraries - to check whether CBLAS is present: - - 1. Libraries in ``info['libraries']``, as is - 2. As 1. but also explicitly adding ``'cblas'`` as a library - 3. As 1. but also explicitly adding ``'blas'`` as a library - 4. Check only library ``'cblas'`` - 5. Check only library ``'blas'`` - - Parameters - ---------- - info : dict - system information dictionary for compilation and linking - - Returns - ------- - libraries : list of str or None - a list of libraries that enables the use of CBLAS interface. - Returns None if not found or a compilation error occurs. - - Since 1.17 returns a list. - """ - # primitive cblas check by looking for the header and trying to link - # cblas or blas - c = customized_ccompiler() - tmpdir = tempfile.mkdtemp() - s = textwrap.dedent("""\ - #include - int main(int argc, const char *argv[]) - { - double a[4] = {1,2,3,4}; - double b[4] = {5,6,7,8}; - return cblas_ddot(4, a, 1, b, 1) > 10; - }""") - src = os.path.join(tmpdir, 'source.c') - try: - with open(src, 'w') as f: - f.write(s) - - try: - # check we can compile (find headers) - obj = c.compile([src], output_dir=tmpdir, - include_dirs=self.get_include_dirs()) - except (distutils.ccompiler.CompileError, distutils.ccompiler.LinkError): - return None - - # check we can link (find library) - # some systems have separate cblas and blas libs. - for libs in [info['libraries'], ['cblas'] + info['libraries'], - ['blas'] + info['libraries'], ['cblas'], ['blas']]: - try: - c.link_executable(obj, os.path.join(tmpdir, "a.out"), - libraries=libs, - library_dirs=info['library_dirs'], - extra_postargs=info.get('extra_link_args', [])) - return libs - except distutils.ccompiler.LinkError: - pass - finally: - shutil.rmtree(tmpdir) - return None - - -class openblas_info(blas_info): - section = 'openblas' - dir_env_var = 'OPENBLAS' - _lib_names = ['openblas'] - _require_symbols = [] - notfounderror = BlasNotFoundError - - @property - def symbol_prefix(self): - try: - return self.cp.get(self.section, 'symbol_prefix') - except NoOptionError: - return '' - - @property - def symbol_suffix(self): - try: - return self.cp.get(self.section, 'symbol_suffix') - except NoOptionError: - return '' - - def _calc_info(self): - c = customized_ccompiler() - - lib_dirs = self.get_lib_dirs() - - # Prefer to use libraries over openblas_libs - opt = self.get_option_single('openblas_libs', 'libraries') - openblas_libs = self.get_libs(opt, self._lib_names) - - info = self.check_libs(lib_dirs, openblas_libs, []) - - if c.compiler_type == "msvc" and info is None: - from numpy.distutils.fcompiler import new_fcompiler - f = new_fcompiler(c_compiler=c) - if f and f.compiler_type == 'gnu95': - # Try gfortran-compatible library files - info = self.check_msvc_gfortran_libs(lib_dirs, openblas_libs) - # Skip lapack check, we'd need build_ext to do it - skip_symbol_check = True - elif info: - skip_symbol_check = False - info['language'] = 'c' - - if info is None: - return None - - # Add extra info for OpenBLAS - extra_info = self.calc_extra_info() - dict_append(info, **extra_info) - - if not (skip_symbol_check or self.check_symbols(info)): - return None - - info['define_macros'] = [('HAVE_CBLAS', None)] - if self.symbol_prefix: - info['define_macros'] += [('BLAS_SYMBOL_PREFIX', self.symbol_prefix)] - if self.symbol_suffix: - info['define_macros'] += [('BLAS_SYMBOL_SUFFIX', self.symbol_suffix)] - - return info - - def calc_info(self): - info = self._calc_info() - if info is not None: - self.set_info(**info) - - def check_msvc_gfortran_libs(self, library_dirs, libraries): - # First, find the full path to each library directory - library_paths = [] - for library in libraries: - for library_dir in library_dirs: - # MinGW static ext will be .a - fullpath = os.path.join(library_dir, library + '.a') - if os.path.isfile(fullpath): - library_paths.append(fullpath) - break - else: - return None - - # Generate numpy.distutils virtual static library file - basename = self.__class__.__name__ - tmpdir = os.path.join(os.getcwd(), 'build', basename) - if not os.path.isdir(tmpdir): - os.makedirs(tmpdir) - - info = {'library_dirs': [tmpdir], - 'libraries': [basename], - 'language': 'f77'} - - fake_lib_file = os.path.join(tmpdir, basename + '.fobjects') - fake_clib_file = os.path.join(tmpdir, basename + '.cobjects') - with open(fake_lib_file, 'w') as f: - f.write("\n".join(library_paths)) - with open(fake_clib_file, 'w') as f: - pass - - return info - - def check_symbols(self, info): - res = False - c = customized_ccompiler() - - tmpdir = tempfile.mkdtemp() - - prototypes = "\n".join("void %s%s%s();" % (self.symbol_prefix, - symbol_name, - self.symbol_suffix) - for symbol_name in self._require_symbols) - calls = "\n".join("%s%s%s();" % (self.symbol_prefix, - symbol_name, - self.symbol_suffix) - for symbol_name in self._require_symbols) - s = textwrap.dedent("""\ - %(prototypes)s - int main(int argc, const char *argv[]) - { - %(calls)s - return 0; - }""") % dict(prototypes=prototypes, calls=calls) - src = os.path.join(tmpdir, 'source.c') - out = os.path.join(tmpdir, 'a.out') - # Add the additional "extra" arguments - try: - extra_args = info['extra_link_args'] - except Exception: - extra_args = [] - try: - with open(src, 'w') as f: - f.write(s) - obj = c.compile([src], output_dir=tmpdir) - try: - c.link_executable(obj, out, libraries=info['libraries'], - library_dirs=info['library_dirs'], - extra_postargs=extra_args) - res = True - except distutils.ccompiler.LinkError: - res = False - finally: - shutil.rmtree(tmpdir) - return res - -class openblas_lapack_info(openblas_info): - section = 'openblas' - dir_env_var = 'OPENBLAS' - _lib_names = ['openblas'] - _require_symbols = ['zungqr_'] - notfounderror = BlasNotFoundError - -class openblas_clapack_info(openblas_lapack_info): - _lib_names = ['openblas', 'lapack'] - -class openblas_ilp64_info(openblas_info): - section = 'openblas_ilp64' - dir_env_var = 'OPENBLAS_ILP64' - _lib_names = ['openblas64'] - _require_symbols = ['dgemm_', 'cblas_dgemm'] - notfounderror = BlasILP64NotFoundError - - def _calc_info(self): - info = super()._calc_info() - if info is not None: - info['define_macros'] += [('HAVE_BLAS_ILP64', None)] - return info - -class openblas_ilp64_lapack_info(openblas_ilp64_info): - _require_symbols = ['dgemm_', 'cblas_dgemm', 'zungqr_', 'LAPACKE_zungqr'] - - def _calc_info(self): - info = super()._calc_info() - if info: - info['define_macros'] += [('HAVE_LAPACKE', None)] - return info - -class openblas64__info(openblas_ilp64_info): - # ILP64 Openblas, with default symbol suffix - section = 'openblas64_' - dir_env_var = 'OPENBLAS64_' - _lib_names = ['openblas64_'] - symbol_suffix = '64_' - symbol_prefix = '' - -class openblas64__lapack_info(openblas_ilp64_lapack_info, openblas64__info): - pass - -class blis_info(blas_info): - section = 'blis' - dir_env_var = 'BLIS' - _lib_names = ['blis'] - notfounderror = BlasNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - opt = self.get_option_single('blis_libs', 'libraries') - blis_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs2(lib_dirs, blis_libs, []) - if info is None: - return - - # Add include dirs - incl_dirs = self.get_include_dirs() - dict_append(info, - language='c', - define_macros=[('HAVE_CBLAS', None)], - include_dirs=incl_dirs) - self.set_info(**info) - - -class flame_info(system_info): - """ Usage of libflame for LAPACK operations - - This requires libflame to be compiled with lapack wrappers: - - ./configure --enable-lapack2flame ... - - Be aware that libflame 5.1.0 has some missing names in the shared library, so - if you have problems, try the static flame library. - """ - section = 'flame' - _lib_names = ['flame'] - notfounderror = FlameNotFoundError - - def check_embedded_lapack(self, info): - """ libflame does not necessarily have a wrapper for fortran LAPACK, we need to check """ - c = customized_ccompiler() - - tmpdir = tempfile.mkdtemp() - s = textwrap.dedent("""\ - void zungqr_(); - int main(int argc, const char *argv[]) - { - zungqr_(); - return 0; - }""") - src = os.path.join(tmpdir, 'source.c') - out = os.path.join(tmpdir, 'a.out') - # Add the additional "extra" arguments - extra_args = info.get('extra_link_args', []) - try: - with open(src, 'w') as f: - f.write(s) - obj = c.compile([src], output_dir=tmpdir) - try: - c.link_executable(obj, out, libraries=info['libraries'], - library_dirs=info['library_dirs'], - extra_postargs=extra_args) - return True - except distutils.ccompiler.LinkError: - return False - finally: - shutil.rmtree(tmpdir) - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - flame_libs = self.get_libs('libraries', self._lib_names) - - info = self.check_libs2(lib_dirs, flame_libs, []) - if info is None: - return - - # Add the extra flag args to info - extra_info = self.calc_extra_info() - dict_append(info, **extra_info) - - if self.check_embedded_lapack(info): - # check if the user has supplied all information required - self.set_info(**info) - else: - # Try and get the BLAS lib to see if we can get it to work - blas_info = get_info('blas_opt') - if not blas_info: - # since we already failed once, this ain't going to work either - return - - # Now we need to merge the two dictionaries - for key in blas_info: - if isinstance(blas_info[key], list): - info[key] = info.get(key, []) + blas_info[key] - elif isinstance(blas_info[key], tuple): - info[key] = info.get(key, ()) + blas_info[key] - else: - info[key] = info.get(key, '') + blas_info[key] - - # Now check again - if self.check_embedded_lapack(info): - self.set_info(**info) - - -class accelerate_info(system_info): - section = 'accelerate' - _lib_names = ['accelerate', 'veclib'] - notfounderror = BlasNotFoundError - - def calc_info(self): - # Make possible to enable/disable from config file/env var - libraries = os.environ.get('ACCELERATE') - if libraries: - libraries = [libraries] - else: - libraries = self.get_libs('libraries', self._lib_names) - libraries = [lib.strip().lower() for lib in libraries] - - if (sys.platform == 'darwin' and - not os.getenv('_PYTHON_HOST_PLATFORM', None)): - # Use the system BLAS from Accelerate or vecLib under OSX - args = [] - link_args = [] - if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \ - 'x86_64' in get_platform() or \ - 'i386' in platform.platform(): - intel = 1 - else: - intel = 0 - if (os.path.exists('/System/Library/Frameworks' - '/Accelerate.framework/') and - 'accelerate' in libraries): - if intel: - args.extend(['-msse3']) - args.extend([ - '-I/System/Library/Frameworks/vecLib.framework/Headers']) - link_args.extend(['-Wl,-framework', '-Wl,Accelerate']) - elif (os.path.exists('/System/Library/Frameworks' - '/vecLib.framework/') and - 'veclib' in libraries): - if intel: - args.extend(['-msse3']) - args.extend([ - '-I/System/Library/Frameworks/vecLib.framework/Headers']) - link_args.extend(['-Wl,-framework', '-Wl,vecLib']) - - if args: - macros = [ - ('NO_ATLAS_INFO', 3), - ('HAVE_CBLAS', None), - ('ACCELERATE_NEW_LAPACK', None), - ] - if(os.getenv('NPY_USE_BLAS_ILP64', None)): - print('Setting HAVE_BLAS_ILP64') - macros += [ - ('HAVE_BLAS_ILP64', None), - ('ACCELERATE_LAPACK_ILP64', None), - ] - self.set_info(extra_compile_args=args, - extra_link_args=link_args, - define_macros=macros) - - return - -class accelerate_lapack_info(accelerate_info): - def _calc_info(self): - return super()._calc_info() - -class blas_src_info(system_info): - # BLAS_SRC is deprecated, please do not use this! - # Build or install a BLAS library via your package manager or from - # source separately. - section = 'blas_src' - dir_env_var = 'BLAS_SRC' - notfounderror = BlasSrcNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['blas'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'daxpy.f')): - src_dir = d - break - if not src_dir: - #XXX: Get sources from netlib. May be ask first. - return - blas1 = ''' - caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot - dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2 - srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg - dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax - snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap - scabs1 - ''' - blas2 = ''' - cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv - chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv - dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv - sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger - stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc - zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2 - ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv - ''' - blas3 = ''' - cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k - dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm - ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm - ''' - sources = [os.path.join(src_dir, f + '.f') \ - for f in (blas1 + blas2 + blas3).split()] - #XXX: should we check here actual existence of source files? - sources = [f for f in sources if os.path.isfile(f)] - info = {'sources': sources, 'language': 'f77'} - self.set_info(**info) - - -class x11_info(system_info): - section = 'x11' - notfounderror = X11NotFoundError - _lib_names = ['X11'] - - def __init__(self): - system_info.__init__(self, - default_lib_dirs=default_x11_lib_dirs, - default_include_dirs=default_x11_include_dirs) - - def calc_info(self): - if sys.platform in ['win32']: - return - lib_dirs = self.get_lib_dirs() - include_dirs = self.get_include_dirs() - opt = self.get_option_single('x11_libs', 'libraries') - x11_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, x11_libs, []) - if info is None: - return - inc_dir = None - for d in include_dirs: - if self.combine_paths(d, 'X11/X.h'): - inc_dir = d - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir]) - self.set_info(**info) - - -class _numpy_info(system_info): - section = 'Numeric' - modulename = 'Numeric' - notfounderror = NumericNotFoundError - - def __init__(self): - include_dirs = [] - try: - module = __import__(self.modulename) - prefix = [] - for name in module.__file__.split(os.sep): - if name == 'lib': - break - prefix.append(name) - - # Ask numpy for its own include path before attempting - # anything else - try: - include_dirs.append(getattr(module, 'get_include')()) - except AttributeError: - pass - - include_dirs.append(sysconfig.get_path('include')) - except ImportError: - pass - py_incl_dir = sysconfig.get_path('include') - include_dirs.append(py_incl_dir) - py_pincl_dir = sysconfig.get_path('platinclude') - if py_pincl_dir not in include_dirs: - include_dirs.append(py_pincl_dir) - for d in default_include_dirs: - d = os.path.join(d, os.path.basename(py_incl_dir)) - if d not in include_dirs: - include_dirs.append(d) - system_info.__init__(self, - default_lib_dirs=[], - default_include_dirs=include_dirs) - - def calc_info(self): - try: - module = __import__(self.modulename) - except ImportError: - return - info = {} - macros = [] - for v in ['__version__', 'version']: - vrs = getattr(module, v, None) - if vrs is None: - continue - macros = [(self.modulename.upper() + '_VERSION', - _c_string_literal(vrs)), - (self.modulename.upper(), None)] - break - dict_append(info, define_macros=macros) - include_dirs = self.get_include_dirs() - inc_dir = None - for d in include_dirs: - if self.combine_paths(d, - os.path.join(self.modulename, - 'arrayobject.h')): - inc_dir = d - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir]) - if info: - self.set_info(**info) - return - - -class numarray_info(_numpy_info): - section = 'numarray' - modulename = 'numarray' - - -class Numeric_info(_numpy_info): - section = 'Numeric' - modulename = 'Numeric' - - -class numpy_info(_numpy_info): - section = 'numpy' - modulename = 'numpy' - - -class numerix_info(system_info): - section = 'numerix' - - def calc_info(self): - which = None, None - if os.getenv("NUMERIX"): - which = os.getenv("NUMERIX"), "environment var" - # If all the above fail, default to numpy. - if which[0] is None: - which = "numpy", "defaulted" - try: - import numpy # noqa: F401 - which = "numpy", "defaulted" - except ImportError as e: - msg1 = str(e) - try: - import Numeric # noqa: F401 - which = "numeric", "defaulted" - except ImportError as e: - msg2 = str(e) - try: - import numarray # noqa: F401 - which = "numarray", "defaulted" - except ImportError as e: - msg3 = str(e) - log.info(msg1) - log.info(msg2) - log.info(msg3) - which = which[0].strip().lower(), which[1] - if which[0] not in ["numeric", "numarray", "numpy"]: - raise ValueError("numerix selector must be either 'Numeric' " - "or 'numarray' or 'numpy' but the value obtained" - " from the %s was '%s'." % (which[1], which[0])) - os.environ['NUMERIX'] = which[0] - self.set_info(**get_info(which[0])) - - -class f2py_info(system_info): - def calc_info(self): - try: - import numpy.f2py as f2py - except ImportError: - return - f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src') - self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')], - include_dirs=[f2py_dir]) - return - - -class boost_python_info(system_info): - section = 'boost_python' - dir_env_var = 'BOOST' - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['boost*'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'libs', 'python', 'src', - 'module.cpp')): - src_dir = d - break - if not src_dir: - return - py_incl_dirs = [sysconfig.get_path('include')] - py_pincl_dir = sysconfig.get_path('platinclude') - if py_pincl_dir not in py_incl_dirs: - py_incl_dirs.append(py_pincl_dir) - srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src') - bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp')) - bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp')) - info = {'libraries': [('boost_python_src', - {'include_dirs': [src_dir] + py_incl_dirs, - 'sources':bpl_srcs} - )], - 'include_dirs': [src_dir], - } - if info: - self.set_info(**info) - return - - -class agg2_info(system_info): - section = 'agg2' - dir_env_var = 'AGG2' - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['agg2*'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')): - src_dir = d - break - if not src_dir: - return - if sys.platform == 'win32': - agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform', - 'win32', 'agg_win32_bmp.cpp')) - else: - agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp')) - agg2_srcs += [os.path.join(src_dir, 'src', 'platform', - 'X11', - 'agg_platform_support.cpp')] - - info = {'libraries': - [('agg2_src', - {'sources': agg2_srcs, - 'include_dirs': [os.path.join(src_dir, 'include')], - } - )], - 'include_dirs': [os.path.join(src_dir, 'include')], - } - if info: - self.set_info(**info) - return - - -class _pkg_config_info(system_info): - section = None - config_env_var = 'PKG_CONFIG' - default_config_exe = 'pkg-config' - append_config_exe = '' - version_macro_name = None - release_macro_name = None - version_flag = '--modversion' - cflags_flag = '--cflags' - - def get_config_exe(self): - if self.config_env_var in os.environ: - return os.environ[self.config_env_var] - return self.default_config_exe - - def get_config_output(self, config_exe, option): - cmd = config_exe + ' ' + self.append_config_exe + ' ' + option - try: - o = subprocess.check_output(cmd) - except (OSError, subprocess.CalledProcessError): - pass - else: - o = filepath_from_subprocess_output(o) - return o - - def calc_info(self): - config_exe = find_executable(self.get_config_exe()) - if not config_exe: - log.warn('File not found: %s. Cannot determine %s info.' \ - % (config_exe, self.section)) - return - info = {} - macros = [] - libraries = [] - library_dirs = [] - include_dirs = [] - extra_link_args = [] - extra_compile_args = [] - version = self.get_config_output(config_exe, self.version_flag) - if version: - macros.append((self.__class__.__name__.split('.')[-1].upper(), - _c_string_literal(version))) - if self.version_macro_name: - macros.append((self.version_macro_name + '_%s' - % (version.replace('.', '_')), None)) - if self.release_macro_name: - release = self.get_config_output(config_exe, '--release') - if release: - macros.append((self.release_macro_name + '_%s' - % (release.replace('.', '_')), None)) - opts = self.get_config_output(config_exe, '--libs') - if opts: - for opt in opts.split(): - if opt[:2] == '-l': - libraries.append(opt[2:]) - elif opt[:2] == '-L': - library_dirs.append(opt[2:]) - else: - extra_link_args.append(opt) - opts = self.get_config_output(config_exe, self.cflags_flag) - if opts: - for opt in opts.split(): - if opt[:2] == '-I': - include_dirs.append(opt[2:]) - elif opt[:2] == '-D': - if '=' in opt: - n, v = opt[2:].split('=') - macros.append((n, v)) - else: - macros.append((opt[2:], None)) - else: - extra_compile_args.append(opt) - if macros: - dict_append(info, define_macros=macros) - if libraries: - dict_append(info, libraries=libraries) - if library_dirs: - dict_append(info, library_dirs=library_dirs) - if include_dirs: - dict_append(info, include_dirs=include_dirs) - if extra_link_args: - dict_append(info, extra_link_args=extra_link_args) - if extra_compile_args: - dict_append(info, extra_compile_args=extra_compile_args) - if info: - self.set_info(**info) - return - - -class wx_info(_pkg_config_info): - section = 'wx' - config_env_var = 'WX_CONFIG' - default_config_exe = 'wx-config' - append_config_exe = '' - version_macro_name = 'WX_VERSION' - release_macro_name = 'WX_RELEASE' - version_flag = '--version' - cflags_flag = '--cxxflags' - - -class gdk_pixbuf_xlib_2_info(_pkg_config_info): - section = 'gdk_pixbuf_xlib_2' - append_config_exe = 'gdk-pixbuf-xlib-2.0' - version_macro_name = 'GDK_PIXBUF_XLIB_VERSION' - - -class gdk_pixbuf_2_info(_pkg_config_info): - section = 'gdk_pixbuf_2' - append_config_exe = 'gdk-pixbuf-2.0' - version_macro_name = 'GDK_PIXBUF_VERSION' - - -class gdk_x11_2_info(_pkg_config_info): - section = 'gdk_x11_2' - append_config_exe = 'gdk-x11-2.0' - version_macro_name = 'GDK_X11_VERSION' - - -class gdk_2_info(_pkg_config_info): - section = 'gdk_2' - append_config_exe = 'gdk-2.0' - version_macro_name = 'GDK_VERSION' - - -class gdk_info(_pkg_config_info): - section = 'gdk' - append_config_exe = 'gdk' - version_macro_name = 'GDK_VERSION' - - -class gtkp_x11_2_info(_pkg_config_info): - section = 'gtkp_x11_2' - append_config_exe = 'gtk+-x11-2.0' - version_macro_name = 'GTK_X11_VERSION' - - -class gtkp_2_info(_pkg_config_info): - section = 'gtkp_2' - append_config_exe = 'gtk+-2.0' - version_macro_name = 'GTK_VERSION' - - -class xft_info(_pkg_config_info): - section = 'xft' - append_config_exe = 'xft' - version_macro_name = 'XFT_VERSION' - - -class freetype2_info(_pkg_config_info): - section = 'freetype2' - append_config_exe = 'freetype2' - version_macro_name = 'FREETYPE2_VERSION' - - -class amd_info(system_info): - section = 'amd' - dir_env_var = 'AMD' - _lib_names = ['amd'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - opt = self.get_option_single('amd_libs', 'libraries') - amd_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, amd_libs, []) - if info is None: - return - - include_dirs = self.get_include_dirs() - - inc_dir = None - for d in include_dirs: - p = self.combine_paths(d, 'amd.h') - if p: - inc_dir = os.path.dirname(p[0]) - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir], - define_macros=[('SCIPY_AMD_H', None)], - swig_opts=['-I' + inc_dir]) - - self.set_info(**info) - return - - -class umfpack_info(system_info): - section = 'umfpack' - dir_env_var = 'UMFPACK' - notfounderror = UmfpackNotFoundError - _lib_names = ['umfpack'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - opt = self.get_option_single('umfpack_libs', 'libraries') - umfpack_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, umfpack_libs, []) - if info is None: - return - - include_dirs = self.get_include_dirs() - - inc_dir = None - for d in include_dirs: - p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h') - if p: - inc_dir = os.path.dirname(p[0]) - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir], - define_macros=[('SCIPY_UMFPACK_H', None)], - swig_opts=['-I' + inc_dir]) - - dict_append(info, **get_info('amd')) - - self.set_info(**info) - return - - -def combine_paths(*args, **kws): - """ Return a list of existing paths composed by all combinations of - items from arguments. - """ - r = [] - for a in args: - if not a: - continue - if is_string(a): - a = [a] - r.append(a) - args = r - if not args: - return [] - if len(args) == 1: - result = reduce(lambda a, b: a + b, map(glob, args[0]), []) - elif len(args) == 2: - result = [] - for a0 in args[0]: - for a1 in args[1]: - result.extend(glob(os.path.join(a0, a1))) - else: - result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:])) - log.debug('(paths: %s)', ','.join(result)) - return result - -language_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3} -inv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'} - - -def dict_append(d, **kws): - languages = [] - for k, v in kws.items(): - if k == 'language': - languages.append(v) - continue - if k in d: - if k in ['library_dirs', 'include_dirs', - 'extra_compile_args', 'extra_link_args', - 'runtime_library_dirs', 'define_macros']: - [d[k].append(vv) for vv in v if vv not in d[k]] - else: - d[k].extend(v) - else: - d[k] = v - if languages: - l = inv_language_map[max([language_map.get(l, 0) for l in languages])] - d['language'] = l - return - - -def parseCmdLine(argv=(None,)): - import optparse - parser = optparse.OptionParser("usage: %prog [-v] [info objs]") - parser.add_option('-v', '--verbose', action='store_true', dest='verbose', - default=False, - help='be verbose and print more messages') - - opts, args = parser.parse_args(args=argv[1:]) - return opts, args - - -def show_all(argv=None): - import inspect - if argv is None: - argv = sys.argv - opts, args = parseCmdLine(argv) - if opts.verbose: - log.set_threshold(log.DEBUG) - else: - log.set_threshold(log.INFO) - show_only = [] - for n in args: - if n[-5:] != '_info': - n = n + '_info' - show_only.append(n) - show_all = not show_only - _gdict_ = globals().copy() - for name, c in _gdict_.items(): - if not inspect.isclass(c): - continue - if not issubclass(c, system_info) or c is system_info: - continue - if not show_all: - if name not in show_only: - continue - del show_only[show_only.index(name)] - conf = c() - conf.verbosity = 2 - # we don't need the result, but we want - # the side effect of printing diagnostics - conf.get_info() - if show_only: - log.info('Info classes not defined: %s', ','.join(show_only)) - -if __name__ == "__main__": - show_all() diff --git a/numpy/distutils/tests/test_build_ext.py b/numpy/distutils/tests/test_build_ext.py deleted file mode 100644 index 55e134b2a047..000000000000 --- a/numpy/distutils/tests/test_build_ext.py +++ /dev/null @@ -1,74 +0,0 @@ -'''Tests for numpy.distutils.build_ext.''' - -import os -import subprocess -import sys -from textwrap import indent, dedent -import pytest -from numpy.testing import IS_WASM - -@pytest.mark.skipif(IS_WASM, reason="cannot start subprocess in wasm") -@pytest.mark.slow -def test_multi_fortran_libs_link(tmp_path): - ''' - Ensures multiple "fake" static libraries are correctly linked. - see gh-18295 - ''' - - # We need to make sure we actually have an f77 compiler. - # This is nontrivial, so we'll borrow the utilities - # from f2py tests: - from numpy.distutils.tests.utilities import has_f77_compiler - if not has_f77_compiler(): - pytest.skip('No F77 compiler found') - - # make some dummy sources - with open(tmp_path / '_dummy1.f', 'w') as fid: - fid.write(indent(dedent('''\ - FUNCTION dummy_one() - RETURN - END FUNCTION'''), prefix=' '*6)) - with open(tmp_path / '_dummy2.f', 'w') as fid: - fid.write(indent(dedent('''\ - FUNCTION dummy_two() - RETURN - END FUNCTION'''), prefix=' '*6)) - with open(tmp_path / '_dummy.c', 'w') as fid: - # doesn't need to load - just needs to exist - fid.write('int PyInit_dummyext;') - - # make a setup file - with open(tmp_path / 'setup.py', 'w') as fid: - srctree = os.path.join(os.path.dirname(__file__), '..', '..', '..') - fid.write(dedent(f'''\ - def configuration(parent_package="", top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration("", parent_package, top_path) - config.add_library("dummy1", sources=["_dummy1.f"]) - config.add_library("dummy2", sources=["_dummy2.f"]) - config.add_extension("dummyext", sources=["_dummy.c"], libraries=["dummy1", "dummy2"]) - return config - - - if __name__ == "__main__": - import sys - sys.path.insert(0, r"{srctree}") - from numpy.distutils.core import setup - setup(**configuration(top_path="").todict())''')) - - # build the test extensino and "install" into a temporary directory - build_dir = tmp_path - subprocess.check_call([sys.executable, 'setup.py', 'build', 'install', - '--prefix', str(tmp_path / 'installdir'), - '--record', str(tmp_path / 'tmp_install_log.txt'), - ], - cwd=str(build_dir), - ) - # get the path to the so - so = None - with open(tmp_path /'tmp_install_log.txt') as fid: - for line in fid: - if 'dummyext' in line: - so = line.strip() - break - assert so is not None diff --git a/numpy/distutils/tests/test_ccompiler_opt.py b/numpy/distutils/tests/test_ccompiler_opt.py deleted file mode 100644 index 3714aea0e12e..000000000000 --- a/numpy/distutils/tests/test_ccompiler_opt.py +++ /dev/null @@ -1,808 +0,0 @@ -import re, textwrap, os -from os import sys, path -from distutils.errors import DistutilsError - -is_standalone = __name__ == '__main__' and __package__ is None -if is_standalone: - import unittest, contextlib, tempfile, shutil - sys.path.append(path.abspath(path.join(path.dirname(__file__), ".."))) - from ccompiler_opt import CCompilerOpt - - # from numpy/testing/_private/utils.py - @contextlib.contextmanager - def tempdir(*args, **kwargs): - tmpdir = tempfile.mkdtemp(*args, **kwargs) - try: - yield tmpdir - finally: - shutil.rmtree(tmpdir) - - def assert_(expr, msg=''): - if not expr: - raise AssertionError(msg) -else: - from numpy.distutils.ccompiler_opt import CCompilerOpt - from numpy.testing import assert_, tempdir - -# architectures and compilers to test -arch_compilers = dict( - x86 = ("gcc", "clang", "icc", "iccw", "msvc"), - x64 = ("gcc", "clang", "icc", "iccw", "msvc"), - ppc64 = ("gcc", "clang"), - ppc64le = ("gcc", "clang"), - armhf = ("gcc", "clang"), - aarch64 = ("gcc", "clang", "fcc"), - s390x = ("gcc", "clang"), - noarch = ("gcc",) -) - -class FakeCCompilerOpt(CCompilerOpt): - fake_info = "" - def __init__(self, trap_files="", trap_flags="", *args, **kwargs): - self.fake_trap_files = trap_files - self.fake_trap_flags = trap_flags - CCompilerOpt.__init__(self, None, **kwargs) - - def __repr__(self): - return textwrap.dedent("""\ - <<<< - march : {} - compiler : {} - ---------------- - {} - >>>> - """).format(self.cc_march, self.cc_name, self.report()) - - def dist_compile(self, sources, flags, **kwargs): - assert(isinstance(sources, list)) - assert(isinstance(flags, list)) - if self.fake_trap_files: - for src in sources: - if re.match(self.fake_trap_files, src): - self.dist_error("source is trapped by a fake interface") - if self.fake_trap_flags: - for f in flags: - if re.match(self.fake_trap_flags, f): - self.dist_error("flag is trapped by a fake interface") - # fake objects - return zip(sources, [' '.join(flags)] * len(sources)) - - def dist_info(self): - return FakeCCompilerOpt.fake_info - - @staticmethod - def dist_log(*args, stderr=False): - pass - -class _Test_CCompilerOpt: - arch = None # x86_64 - cc = None # gcc - - def setup_class(self): - FakeCCompilerOpt.conf_nocache = True - self._opt = None - - def nopt(self, *args, **kwargs): - FakeCCompilerOpt.fake_info = (self.arch, self.cc, "") - return FakeCCompilerOpt(*args, **kwargs) - - def opt(self): - if not self._opt: - self._opt = self.nopt() - return self._opt - - def march(self): - return self.opt().cc_march - - def cc_name(self): - return self.opt().cc_name - - def get_targets(self, targets, groups, **kwargs): - FakeCCompilerOpt.conf_target_groups = groups - opt = self.nopt( - cpu_baseline=kwargs.get("baseline", "min"), - cpu_dispatch=kwargs.get("dispatch", "max"), - trap_files=kwargs.get("trap_files", ""), - trap_flags=kwargs.get("trap_flags", "") - ) - with tempdir() as tmpdir: - file = os.path.join(tmpdir, "test_targets.c") - with open(file, 'w') as f: - f.write(targets) - gtargets = [] - gflags = {} - fake_objects = opt.try_dispatch([file]) - for source, flags in fake_objects: - gtar = path.basename(source).split('.')[1:-1] - glen = len(gtar) - if glen == 0: - gtar = "baseline" - elif glen == 1: - gtar = gtar[0].upper() - else: - # converting multi-target into parentheses str format to be equivalent - # to the configuration statements syntax. - gtar = ('('+' '.join(gtar)+')').upper() - gtargets.append(gtar) - gflags[gtar] = flags - - has_baseline, targets = opt.sources_status[file] - targets = targets + ["baseline"] if has_baseline else targets - # convert tuple that represent multi-target into parentheses str format - targets = [ - '('+' '.join(tar)+')' if isinstance(tar, tuple) else tar - for tar in targets - ] - if len(targets) != len(gtargets) or not all(t in gtargets for t in targets): - raise AssertionError( - "'sources_status' returns different targets than the compiled targets\n" - "%s != %s" % (targets, gtargets) - ) - # return targets from 'sources_status' since the order is matters - return targets, gflags - - def arg_regex(self, **kwargs): - map2origin = dict( - x64 = "x86", - ppc64le = "ppc64", - aarch64 = "armhf", - clang = "gcc", - ) - march = self.march(); cc_name = self.cc_name() - map_march = map2origin.get(march, march) - map_cc = map2origin.get(cc_name, cc_name) - for key in ( - march, cc_name, map_march, map_cc, - march + '_' + cc_name, - map_march + '_' + cc_name, - march + '_' + map_cc, - map_march + '_' + map_cc, - ) : - regex = kwargs.pop(key, None) - if regex is not None: - break - if regex: - if isinstance(regex, dict): - for k, v in regex.items(): - if v[-1:] not in ')}$?\\.+*': - regex[k] = v + '$' - else: - assert(isinstance(regex, str)) - if regex[-1:] not in ')}$?\\.+*': - regex += '$' - return regex - - def expect(self, dispatch, baseline="", **kwargs): - match = self.arg_regex(**kwargs) - if match is None: - return - opt = self.nopt( - cpu_baseline=baseline, cpu_dispatch=dispatch, - trap_files=kwargs.get("trap_files", ""), - trap_flags=kwargs.get("trap_flags", "") - ) - features = ' '.join(opt.cpu_dispatch_names()) - if not match: - if len(features) != 0: - raise AssertionError( - 'expected empty features, not "%s"' % features - ) - return - if not re.match(match, features, re.IGNORECASE): - raise AssertionError( - 'dispatch features "%s" not match "%s"' % (features, match) - ) - - def expect_baseline(self, baseline, dispatch="", **kwargs): - match = self.arg_regex(**kwargs) - if match is None: - return - opt = self.nopt( - cpu_baseline=baseline, cpu_dispatch=dispatch, - trap_files=kwargs.get("trap_files", ""), - trap_flags=kwargs.get("trap_flags", "") - ) - features = ' '.join(opt.cpu_baseline_names()) - if not match: - if len(features) != 0: - raise AssertionError( - 'expected empty features, not "%s"' % features - ) - return - if not re.match(match, features, re.IGNORECASE): - raise AssertionError( - 'baseline features "%s" not match "%s"' % (features, match) - ) - - def expect_flags(self, baseline, dispatch="", **kwargs): - match = self.arg_regex(**kwargs) - if match is None: - return - opt = self.nopt( - cpu_baseline=baseline, cpu_dispatch=dispatch, - trap_files=kwargs.get("trap_files", ""), - trap_flags=kwargs.get("trap_flags", "") - ) - flags = ' '.join(opt.cpu_baseline_flags()) - if not match: - if len(flags) != 0: - raise AssertionError( - 'expected empty flags not "%s"' % flags - ) - return - if not re.match(match, flags): - raise AssertionError( - 'flags "%s" not match "%s"' % (flags, match) - ) - - def expect_targets(self, targets, groups={}, **kwargs): - match = self.arg_regex(**kwargs) - if match is None: - return - targets, _ = self.get_targets(targets=targets, groups=groups, **kwargs) - targets = ' '.join(targets) - if not match: - if len(targets) != 0: - raise AssertionError( - 'expected empty targets, not "%s"' % targets - ) - return - if not re.match(match, targets, re.IGNORECASE): - raise AssertionError( - 'targets "%s" not match "%s"' % (targets, match) - ) - - def expect_target_flags(self, targets, groups={}, **kwargs): - match_dict = self.arg_regex(**kwargs) - if match_dict is None: - return - assert(isinstance(match_dict, dict)) - _, tar_flags = self.get_targets(targets=targets, groups=groups) - - for match_tar, match_flags in match_dict.items(): - if match_tar not in tar_flags: - raise AssertionError( - 'expected to find target "%s"' % match_tar - ) - flags = tar_flags[match_tar] - if not match_flags: - if len(flags) != 0: - raise AssertionError( - 'expected to find empty flags in target "%s"' % match_tar - ) - if not re.match(match_flags, flags): - raise AssertionError( - '"%s" flags "%s" not match "%s"' % (match_tar, flags, match_flags) - ) - - def test_interface(self): - wrong_arch = "ppc64" if self.arch != "ppc64" else "x86" - wrong_cc = "clang" if self.cc != "clang" else "icc" - opt = self.opt() - assert_(getattr(opt, "cc_on_" + self.arch)) - assert_(not getattr(opt, "cc_on_" + wrong_arch)) - assert_(getattr(opt, "cc_is_" + self.cc)) - assert_(not getattr(opt, "cc_is_" + wrong_cc)) - - def test_args_empty(self): - for baseline, dispatch in ( - ("", "none"), - (None, ""), - ("none +none", "none - none"), - ("none -max", "min - max"), - ("+vsx2 -VSX2", "vsx avx2 avx512f -max"), - ("max -vsx - avx + avx512f neon -MAX ", - "min -min + max -max -vsx + avx2 -avx2 +NONE") - ) : - opt = self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch) - assert(len(opt.cpu_baseline_names()) == 0) - assert(len(opt.cpu_dispatch_names()) == 0) - - def test_args_validation(self): - if self.march() == "unknown": - return - # check sanity of argument's validation - for baseline, dispatch in ( - ("unkown_feature - max +min", "unknown max min"), # unknowing features - ("#avx2", "$vsx") # groups and polices aren't acceptable - ) : - try: - self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch) - raise AssertionError("excepted an exception for invalid arguments") - except DistutilsError: - pass - - def test_skip(self): - # only takes what platform supports and skip the others - # without casing exceptions - self.expect( - "sse vsx neon", - x86="sse", ppc64="vsx", armhf="neon", unknown="" - ) - self.expect( - "sse41 avx avx2 vsx2 vsx3 neon_vfpv4 asimd", - x86 = "sse41 avx avx2", - ppc64 = "vsx2 vsx3", - armhf = "neon_vfpv4 asimd", - unknown = "" - ) - # any features in cpu_dispatch must be ignored if it's part of baseline - self.expect( - "sse neon vsx", baseline="sse neon vsx", - x86="", ppc64="", armhf="" - ) - self.expect( - "avx2 vsx3 asimdhp", baseline="avx2 vsx3 asimdhp", - x86="", ppc64="", armhf="" - ) - - def test_implies(self): - # baseline combining implied features, so we count - # on it instead of testing 'feature_implies()'' directly - self.expect_baseline( - "fma3 avx2 asimd vsx3", - # .* between two spaces can validate features in between - x86 = "sse .* sse41 .* fma3.*avx2", - ppc64 = "vsx vsx2 vsx3", - armhf = "neon neon_fp16 neon_vfpv4 asimd" - ) - """ - special cases - """ - # in icc and msvc, FMA3 and AVX2 can't be separated - # both need to implies each other, same for avx512f & cd - for f0, f1 in ( - ("fma3", "avx2"), - ("avx512f", "avx512cd"), - ): - diff = ".* sse42 .* %s .*%s$" % (f0, f1) - self.expect_baseline(f0, - x86_gcc=".* sse42 .* %s$" % f0, - x86_icc=diff, x86_iccw=diff - ) - self.expect_baseline(f1, - x86_gcc=".* avx .* %s$" % f1, - x86_icc=diff, x86_iccw=diff - ) - # in msvc, following features can't be separated too - for f in (("fma3", "avx2"), ("avx512f", "avx512cd", "avx512_skx")): - for ff in f: - self.expect_baseline(ff, - x86_msvc=".*%s" % ' '.join(f) - ) - - # in ppc64le VSX and VSX2 can't be separated - self.expect_baseline("vsx", ppc64le="vsx vsx2") - # in aarch64 following features can't be separated - for f in ("neon", "neon_fp16", "neon_vfpv4", "asimd"): - self.expect_baseline(f, aarch64="neon neon_fp16 neon_vfpv4 asimd") - - def test_args_options(self): - # max & native - for o in ("max", "native"): - if o == "native" and self.cc_name() == "msvc": - continue - self.expect(o, - trap_files=".*cpu_(sse|vsx|neon|vx).c", - x86="", ppc64="", armhf="", s390x="" - ) - self.expect(o, - trap_files=".*cpu_(sse3|vsx2|neon_vfpv4|vxe).c", - x86="sse sse2", ppc64="vsx", armhf="neon neon_fp16", - aarch64="", ppc64le="", s390x="vx" - ) - self.expect(o, - trap_files=".*cpu_(popcnt|vsx3).c", - x86="sse .* sse41", ppc64="vsx vsx2", - armhf="neon neon_fp16 .* asimd .*", - s390x="vx vxe vxe2" - ) - self.expect(o, - x86_gcc=".* xop fma4 .* avx512f .* avx512_knl avx512_knm avx512_skx .*", - # in icc, xop and fam4 aren't supported - x86_icc=".* avx512f .* avx512_knl avx512_knm avx512_skx .*", - x86_iccw=".* avx512f .* avx512_knl avx512_knm avx512_skx .*", - # in msvc, avx512_knl avx512_knm aren't supported - x86_msvc=".* xop fma4 .* avx512f .* avx512_skx .*", - armhf=".* asimd asimdhp asimddp .*", - ppc64="vsx vsx2 vsx3 vsx4.*", - s390x="vx vxe vxe2.*" - ) - # min - self.expect("min", - x86="sse sse2", x64="sse sse2 sse3", - armhf="", aarch64="neon neon_fp16 .* asimd", - ppc64="", ppc64le="vsx vsx2", s390x="" - ) - self.expect( - "min", trap_files=".*cpu_(sse2|vsx2).c", - x86="", ppc64le="" - ) - # an exception must triggered if native flag isn't supported - # when option "native" is activated through the args - try: - self.expect("native", - trap_flags=".*(-march=native|-xHost|/QxHost|-mcpu=a64fx).*", - x86=".*", ppc64=".*", armhf=".*", s390x=".*", aarch64=".*", - ) - if self.march() != "unknown": - raise AssertionError( - "excepted an exception for %s" % self.march() - ) - except DistutilsError: - if self.march() == "unknown": - raise AssertionError("excepted no exceptions") - - def test_flags(self): - self.expect_flags( - "sse sse2 vsx vsx2 neon neon_fp16 vx vxe", - x86_gcc="-msse -msse2", x86_icc="-msse -msse2", - x86_iccw="/arch:SSE2", - x86_msvc="/arch:SSE2" if self.march() == "x86" else "", - ppc64_gcc= "-mcpu=power8", - ppc64_clang="-mcpu=power8", - armhf_gcc="-mfpu=neon-fp16 -mfp16-format=ieee", - aarch64="", - s390x="-mzvector -march=arch12" - ) - # testing normalize -march - self.expect_flags( - "asimd", - aarch64="", - armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8-a\+simd" - ) - self.expect_flags( - "asimdhp", - aarch64_gcc=r"-march=armv8.2-a\+fp16", - armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8.2-a\+fp16" - ) - self.expect_flags( - "asimddp", aarch64_gcc=r"-march=armv8.2-a\+dotprod" - ) - self.expect_flags( - # asimdfhm implies asimdhp - "asimdfhm", aarch64_gcc=r"-march=armv8.2-a\+fp16\+fp16fml" - ) - self.expect_flags( - "asimddp asimdhp asimdfhm", - aarch64_gcc=r"-march=armv8.2-a\+dotprod\+fp16\+fp16fml" - ) - self.expect_flags( - "vx vxe vxe2", - s390x=r"-mzvector -march=arch13" - ) - - def test_targets_exceptions(self): - for targets in ( - "bla bla", "/*@targets", - "/*@targets */", - "/*@targets unknown */", - "/*@targets $unknown_policy avx2 */", - "/*@targets #unknown_group avx2 */", - "/*@targets $ */", - "/*@targets # vsx */", - "/*@targets #$ vsx */", - "/*@targets vsx avx2 ) */", - "/*@targets vsx avx2 (avx2 */", - "/*@targets vsx avx2 () */", - "/*@targets vsx avx2 ($autovec) */", # no features - "/*@targets vsx avx2 (xxx) */", - "/*@targets vsx avx2 (baseline) */", - ) : - try: - self.expect_targets( - targets, - x86="", armhf="", ppc64="", s390x="" - ) - if self.march() != "unknown": - raise AssertionError( - "excepted an exception for %s" % self.march() - ) - except DistutilsError: - if self.march() == "unknown": - raise AssertionError("excepted no exceptions") - - def test_targets_syntax(self): - for targets in ( - "/*@targets $keep_baseline sse vsx neon vx*/", - "/*@targets,$keep_baseline,sse,vsx,neon vx*/", - "/*@targets*$keep_baseline*sse*vsx*neon*vx*/", - """ - /* - ** @targets - ** $keep_baseline, sse vsx,neon, vx - */ - """, - """ - /* - ************@targets**************** - ** $keep_baseline, sse vsx, neon, vx - ************************************ - */ - """, - """ - /* - /////////////@targets///////////////// - //$keep_baseline//sse//vsx//neon//vx - ///////////////////////////////////// - */ - """, - """ - /* - @targets - $keep_baseline - SSE VSX NEON VX*/ - """ - ) : - self.expect_targets(targets, - x86="sse", ppc64="vsx", armhf="neon", s390x="vx", unknown="" - ) - - def test_targets(self): - # test skipping baseline features - self.expect_targets( - """ - /*@targets - sse sse2 sse41 avx avx2 avx512f - vsx vsx2 vsx3 vsx4 - neon neon_fp16 asimdhp asimddp - vx vxe vxe2 - */ - """, - baseline="avx vsx2 asimd vx vxe", - x86="avx512f avx2", armhf="asimddp asimdhp", ppc64="vsx4 vsx3", - s390x="vxe2" - ) - # test skipping non-dispatch features - self.expect_targets( - """ - /*@targets - sse41 avx avx2 avx512f - vsx2 vsx3 vsx4 - asimd asimdhp asimddp - vx vxe vxe2 - */ - """, - baseline="", dispatch="sse41 avx2 vsx2 asimd asimddp vxe2", - x86="avx2 sse41", armhf="asimddp asimd", ppc64="vsx2", s390x="vxe2" - ) - # test skipping features that not supported - self.expect_targets( - """ - /*@targets - sse2 sse41 avx2 avx512f - vsx2 vsx3 vsx4 - neon asimdhp asimddp - vx vxe vxe2 - */ - """, - baseline="", - trap_files=".*(avx2|avx512f|vsx3|vsx4|asimddp|vxe2).c", - x86="sse41 sse2", ppc64="vsx2", armhf="asimdhp neon", - s390x="vxe vx" - ) - # test skipping features that implies each other - self.expect_targets( - """ - /*@targets - sse sse2 avx fma3 avx2 avx512f avx512cd - vsx vsx2 vsx3 - neon neon_vfpv4 neon_fp16 neon_fp16 asimd asimdhp - asimddp asimdfhm - */ - """, - baseline="", - x86_gcc="avx512cd avx512f avx2 fma3 avx sse2", - x86_msvc="avx512cd avx2 avx sse2", - x86_icc="avx512cd avx2 avx sse2", - x86_iccw="avx512cd avx2 avx sse2", - ppc64="vsx3 vsx2 vsx", - ppc64le="vsx3 vsx2", - armhf="asimdfhm asimddp asimdhp asimd neon_vfpv4 neon_fp16 neon", - aarch64="asimdfhm asimddp asimdhp asimd" - ) - - def test_targets_policies(self): - # 'keep_baseline', generate objects for baseline features - self.expect_targets( - """ - /*@targets - $keep_baseline - sse2 sse42 avx2 avx512f - vsx2 vsx3 - neon neon_vfpv4 asimd asimddp - vx vxe vxe2 - */ - """, - baseline="sse41 avx2 vsx2 asimd vsx3 vxe", - x86="avx512f avx2 sse42 sse2", - ppc64="vsx3 vsx2", - armhf="asimddp asimd neon_vfpv4 neon", - # neon, neon_vfpv4, asimd implies each other - aarch64="asimddp asimd", - s390x="vxe2 vxe vx" - ) - # 'keep_sort', leave the sort as-is - self.expect_targets( - """ - /*@targets - $keep_baseline $keep_sort - avx512f sse42 avx2 sse2 - vsx2 vsx3 - asimd neon neon_vfpv4 asimddp - vxe vxe2 - */ - """, - x86="avx512f sse42 avx2 sse2", - ppc64="vsx2 vsx3", - armhf="asimd neon neon_vfpv4 asimddp", - # neon, neon_vfpv4, asimd implies each other - aarch64="asimd asimddp", - s390x="vxe vxe2" - ) - # 'autovec', skipping features that can't be - # vectorized by the compiler - self.expect_targets( - """ - /*@targets - $keep_baseline $keep_sort $autovec - avx512f avx2 sse42 sse41 sse2 - vsx3 vsx2 - asimddp asimd neon_vfpv4 neon - */ - """, - x86_gcc="avx512f avx2 sse42 sse41 sse2", - x86_icc="avx512f avx2 sse42 sse41 sse2", - x86_iccw="avx512f avx2 sse42 sse41 sse2", - x86_msvc="avx512f avx2 sse2" - if self.march() == 'x86' else "avx512f avx2", - ppc64="vsx3 vsx2", - armhf="asimddp asimd neon_vfpv4 neon", - # neon, neon_vfpv4, asimd implies each other - aarch64="asimddp asimd" - ) - for policy in ("$maxopt", "$autovec"): - # 'maxopt' and autovec set the max acceptable optimization flags - self.expect_target_flags( - "/*@targets baseline %s */" % policy, - gcc={"baseline":".*-O3.*"}, icc={"baseline":".*-O3.*"}, - iccw={"baseline":".*/O3.*"}, msvc={"baseline":".*/O2.*"}, - unknown={"baseline":".*"} - ) - - # 'werror', force compilers to treat warnings as errors - self.expect_target_flags( - "/*@targets baseline $werror */", - gcc={"baseline":".*-Werror.*"}, icc={"baseline":".*-Werror.*"}, - iccw={"baseline":".*/Werror.*"}, msvc={"baseline":".*/WX.*"}, - unknown={"baseline":".*"} - ) - - def test_targets_groups(self): - self.expect_targets( - """ - /*@targets $keep_baseline baseline #test_group */ - """, - groups=dict( - test_group=(""" - $keep_baseline - asimddp sse2 vsx2 avx2 vsx3 - avx512f asimdhp - """) - ), - x86="avx512f avx2 sse2 baseline", - ppc64="vsx3 vsx2 baseline", - armhf="asimddp asimdhp baseline" - ) - # test skip duplicating and sorting - self.expect_targets( - """ - /*@targets - * sse42 avx avx512f - * #test_group_1 - * vsx2 - * #test_group_2 - * asimddp asimdfhm - */ - """, - groups=dict( - test_group_1=(""" - VSX2 vsx3 asimd avx2 SSE41 - """), - test_group_2=(""" - vsx2 vsx3 asImd aVx2 sse41 - """) - ), - x86="avx512f avx2 avx sse42 sse41", - ppc64="vsx3 vsx2", - # vsx2 part of the default baseline of ppc64le, option ("min") - ppc64le="vsx3", - armhf="asimdfhm asimddp asimd", - # asimd part of the default baseline of aarch64, option ("min") - aarch64="asimdfhm asimddp" - ) - - def test_targets_multi(self): - self.expect_targets( - """ - /*@targets - (avx512_clx avx512_cnl) (asimdhp asimddp) - */ - """, - x86=r"\(avx512_clx avx512_cnl\)", - armhf=r"\(asimdhp asimddp\)", - ) - # test skipping implied features and auto-sort - self.expect_targets( - """ - /*@targets - f16c (sse41 avx sse42) (sse3 avx2 avx512f) - vsx2 (vsx vsx3 vsx2) - (neon neon_vfpv4 asimd asimdhp asimddp) - */ - """, - x86="avx512f f16c avx", - ppc64="vsx3 vsx2", - ppc64le="vsx3", # vsx2 part of baseline - armhf=r"\(asimdhp asimddp\)", - ) - # test skipping implied features and keep sort - self.expect_targets( - """ - /*@targets $keep_sort - (sse41 avx sse42) (sse3 avx2 avx512f) - (vsx vsx3 vsx2) - (asimddp neon neon_vfpv4 asimd asimdhp) - (vx vxe vxe2) - */ - """, - x86="avx avx512f", - ppc64="vsx3", - armhf=r"\(asimdhp asimddp\)", - s390x="vxe2" - ) - # test compiler variety and avoiding duplicating - self.expect_targets( - """ - /*@targets $keep_sort - fma3 avx2 (fma3 avx2) (avx2 fma3) avx2 fma3 - */ - """, - x86_gcc=r"fma3 avx2 \(fma3 avx2\)", - x86_icc="avx2", x86_iccw="avx2", - x86_msvc="avx2" - ) - -def new_test(arch, cc): - if is_standalone: return textwrap.dedent("""\ - class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt, unittest.TestCase): - arch = '{arch}' - cc = '{cc}' - def __init__(self, methodName="runTest"): - unittest.TestCase.__init__(self, methodName) - self.setup_class() - """).format( - class_name=arch + '_' + cc, arch=arch, cc=cc - ) - return textwrap.dedent("""\ - class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt): - arch = '{arch}' - cc = '{cc}' - """).format( - class_name=arch + '_' + cc, arch=arch, cc=cc - ) -""" -if 1 and is_standalone: - FakeCCompilerOpt.fake_info = "x86_icc" - cco = FakeCCompilerOpt(None, cpu_baseline="avx2") - print(' '.join(cco.cpu_baseline_names())) - print(cco.cpu_baseline_flags()) - unittest.main() - sys.exit() -""" -for arch, compilers in arch_compilers.items(): - for cc in compilers: - exec(new_test(arch, cc)) - -if is_standalone: - unittest.main() diff --git a/numpy/distutils/tests/test_ccompiler_opt_conf.py b/numpy/distutils/tests/test_ccompiler_opt_conf.py deleted file mode 100644 index d9e8b2b0a834..000000000000 --- a/numpy/distutils/tests/test_ccompiler_opt_conf.py +++ /dev/null @@ -1,176 +0,0 @@ -import unittest -from os import sys, path - -is_standalone = __name__ == '__main__' and __package__ is None -if is_standalone: - sys.path.append(path.abspath(path.join(path.dirname(__file__), ".."))) - from ccompiler_opt import CCompilerOpt -else: - from numpy.distutils.ccompiler_opt import CCompilerOpt - -arch_compilers = dict( - x86 = ("gcc", "clang", "icc", "iccw", "msvc"), - x64 = ("gcc", "clang", "icc", "iccw", "msvc"), - ppc64 = ("gcc", "clang"), - ppc64le = ("gcc", "clang"), - armhf = ("gcc", "clang"), - aarch64 = ("gcc", "clang"), - narch = ("gcc",) -) - -class FakeCCompilerOpt(CCompilerOpt): - fake_info = ("arch", "compiler", "extra_args") - def __init__(self, *args, **kwargs): - CCompilerOpt.__init__(self, None, **kwargs) - def dist_compile(self, sources, flags, **kwargs): - return sources - def dist_info(self): - return FakeCCompilerOpt.fake_info - @staticmethod - def dist_log(*args, stderr=False): - pass - -class _TestConfFeatures(FakeCCompilerOpt): - """A hook to check the sanity of configured features -- before it called by the abstract class '_Feature' - """ - - def conf_features_partial(self): - conf_all = self.conf_features - for feature_name, feature in conf_all.items(): - self.test_feature( - "attribute conf_features", - conf_all, feature_name, feature - ) - - conf_partial = FakeCCompilerOpt.conf_features_partial(self) - for feature_name, feature in conf_partial.items(): - self.test_feature( - "conf_features_partial()", - conf_partial, feature_name, feature - ) - return conf_partial - - def test_feature(self, log, search_in, feature_name, feature_dict): - error_msg = ( - "during validate '{}' within feature '{}', " - "march '{}' and compiler '{}'\n>> " - ).format(log, feature_name, self.cc_march, self.cc_name) - - if not feature_name.isupper(): - raise AssertionError(error_msg + "feature name must be in uppercase") - - for option, val in feature_dict.items(): - self.test_option_types(error_msg, option, val) - self.test_duplicates(error_msg, option, val) - - self.test_implies(error_msg, search_in, feature_name, feature_dict) - self.test_group(error_msg, search_in, feature_name, feature_dict) - self.test_extra_checks(error_msg, search_in, feature_name, feature_dict) - - def test_option_types(self, error_msg, option, val): - for tp, available in ( - ((str, list), ( - "implies", "headers", "flags", "group", "detect", "extra_checks" - )), - ((str,), ("disable",)), - ((int,), ("interest",)), - ((bool,), ("implies_detect",)), - ((bool, type(None)), ("autovec",)), - ) : - found_it = option in available - if not found_it: - continue - if not isinstance(val, tp): - error_tp = [t.__name__ for t in (*tp,)] - error_tp = ' or '.join(error_tp) - raise AssertionError(error_msg + - "expected '%s' type for option '%s' not '%s'" % ( - error_tp, option, type(val).__name__ - )) - break - - if not found_it: - raise AssertionError(error_msg + "invalid option name '%s'" % option) - - def test_duplicates(self, error_msg, option, val): - if option not in ( - "implies", "headers", "flags", "group", "detect", "extra_checks" - ) : return - - if isinstance(val, str): - val = val.split() - - if len(val) != len(set(val)): - raise AssertionError(error_msg + "duplicated values in option '%s'" % option) - - def test_implies(self, error_msg, search_in, feature_name, feature_dict): - if feature_dict.get("disabled") is not None: - return - implies = feature_dict.get("implies", "") - if not implies: - return - if isinstance(implies, str): - implies = implies.split() - - if feature_name in implies: - raise AssertionError(error_msg + "feature implies itself") - - for impl in implies: - impl_dict = search_in.get(impl) - if impl_dict is not None: - if "disable" in impl_dict: - raise AssertionError(error_msg + "implies disabled feature '%s'" % impl) - continue - raise AssertionError(error_msg + "implies non-exist feature '%s'" % impl) - - def test_group(self, error_msg, search_in, feature_name, feature_dict): - if feature_dict.get("disabled") is not None: - return - group = feature_dict.get("group", "") - if not group: - return - if isinstance(group, str): - group = group.split() - - for f in group: - impl_dict = search_in.get(f) - if not impl_dict or "disable" in impl_dict: - continue - raise AssertionError(error_msg + - "in option 'group', '%s' already exists as a feature name" % f - ) - - def test_extra_checks(self, error_msg, search_in, feature_name, feature_dict): - if feature_dict.get("disabled") is not None: - return - extra_checks = feature_dict.get("extra_checks", "") - if not extra_checks: - return - if isinstance(extra_checks, str): - extra_checks = extra_checks.split() - - for f in extra_checks: - impl_dict = search_in.get(f) - if not impl_dict or "disable" in impl_dict: - continue - raise AssertionError(error_msg + - "in option 'extra_checks', extra test case '%s' already exists as a feature name" % f - ) - -class TestConfFeatures(unittest.TestCase): - def __init__(self, methodName="runTest"): - unittest.TestCase.__init__(self, methodName) - self._setup() - - def _setup(self): - FakeCCompilerOpt.conf_nocache = True - - def test_features(self): - for arch, compilers in arch_compilers.items(): - for cc in compilers: - FakeCCompilerOpt.fake_info = (arch, cc, "") - _TestConfFeatures() - -if is_standalone: - unittest.main() diff --git a/numpy/distutils/tests/test_exec_command.py b/numpy/distutils/tests/test_exec_command.py deleted file mode 100644 index d1a20056a5a2..000000000000 --- a/numpy/distutils/tests/test_exec_command.py +++ /dev/null @@ -1,217 +0,0 @@ -import os -import pytest -import sys -from tempfile import TemporaryFile - -from numpy.distutils import exec_command -from numpy.distutils.exec_command import get_pythonexe -from numpy.testing import tempdir, assert_, assert_warns, IS_WASM - - -# In python 3 stdout, stderr are text (unicode compliant) devices, so to -# emulate them import StringIO from the io module. -from io import StringIO - -class redirect_stdout: - """Context manager to redirect stdout for exec_command test.""" - def __init__(self, stdout=None): - self._stdout = stdout or sys.stdout - - def __enter__(self): - self.old_stdout = sys.stdout - sys.stdout = self._stdout - - def __exit__(self, exc_type, exc_value, traceback): - self._stdout.flush() - sys.stdout = self.old_stdout - # note: closing sys.stdout won't close it. - self._stdout.close() - -class redirect_stderr: - """Context manager to redirect stderr for exec_command test.""" - def __init__(self, stderr=None): - self._stderr = stderr or sys.stderr - - def __enter__(self): - self.old_stderr = sys.stderr - sys.stderr = self._stderr - - def __exit__(self, exc_type, exc_value, traceback): - self._stderr.flush() - sys.stderr = self.old_stderr - # note: closing sys.stderr won't close it. - self._stderr.close() - -class emulate_nonposix: - """Context manager to emulate os.name != 'posix' """ - def __init__(self, osname='non-posix'): - self._new_name = osname - - def __enter__(self): - self._old_name = os.name - os.name = self._new_name - - def __exit__(self, exc_type, exc_value, traceback): - os.name = self._old_name - - -def test_exec_command_stdout(): - # Regression test for gh-2999 and gh-2915. - # There are several packages (nose, scipy.weave.inline, Sage inline - # Fortran) that replace stdout, in which case it doesn't have a fileno - # method. This is tested here, with a do-nothing command that fails if the - # presence of fileno() is assumed in exec_command. - - # The code has a special case for posix systems, so if we are on posix test - # both that the special case works and that the generic code works. - - # Test posix version: - with redirect_stdout(StringIO()): - with redirect_stderr(TemporaryFile()): - with assert_warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - - if os.name == 'posix': - # Test general (non-posix) version: - with emulate_nonposix(): - with redirect_stdout(StringIO()): - with redirect_stderr(TemporaryFile()): - with assert_warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - -def test_exec_command_stderr(): - # Test posix version: - with redirect_stdout(TemporaryFile(mode='w+')): - with redirect_stderr(StringIO()): - with assert_warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - - if os.name == 'posix': - # Test general (non-posix) version: - with emulate_nonposix(): - with redirect_stdout(TemporaryFile()): - with redirect_stderr(StringIO()): - with assert_warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - - -@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") -class TestExecCommand: - def setup_method(self): - self.pyexe = get_pythonexe() - - def check_nt(self, **kws): - s, o = exec_command.exec_command('cmd /C echo path=%path%') - assert_(s == 0) - assert_(o != '') - - s, o = exec_command.exec_command( - '"%s" -c "import sys;sys.stderr.write(sys.platform)"' % self.pyexe) - assert_(s == 0) - assert_(o == 'win32') - - def check_posix(self, **kws): - s, o = exec_command.exec_command("echo Hello", **kws) - assert_(s == 0) - assert_(o == 'Hello') - - s, o = exec_command.exec_command('echo $AAA', **kws) - assert_(s == 0) - assert_(o == '') - - s, o = exec_command.exec_command('echo "$AAA"', AAA='Tere', **kws) - assert_(s == 0) - assert_(o == 'Tere') - - s, o = exec_command.exec_command('echo "$AAA"', **kws) - assert_(s == 0) - assert_(o == '') - - if 'BBB' not in os.environ: - os.environ['BBB'] = 'Hi' - s, o = exec_command.exec_command('echo "$BBB"', **kws) - assert_(s == 0) - assert_(o == 'Hi') - - s, o = exec_command.exec_command('echo "$BBB"', BBB='Hey', **kws) - assert_(s == 0) - assert_(o == 'Hey') - - s, o = exec_command.exec_command('echo "$BBB"', **kws) - assert_(s == 0) - assert_(o == 'Hi') - - del os.environ['BBB'] - - s, o = exec_command.exec_command('echo "$BBB"', **kws) - assert_(s == 0) - assert_(o == '') - - - s, o = exec_command.exec_command('this_is_not_a_command', **kws) - assert_(s != 0) - assert_(o != '') - - s, o = exec_command.exec_command('echo path=$PATH', **kws) - assert_(s == 0) - assert_(o != '') - - s, o = exec_command.exec_command( - '"%s" -c "import sys,os;sys.stderr.write(os.name)"' % - self.pyexe, **kws) - assert_(s == 0) - assert_(o == 'posix') - - def check_basic(self, *kws): - s, o = exec_command.exec_command( - '"%s" -c "raise \'Ignore me.\'"' % self.pyexe, **kws) - assert_(s != 0) - assert_(o != '') - - s, o = exec_command.exec_command( - '"%s" -c "import sys;sys.stderr.write(\'0\');' - 'sys.stderr.write(\'1\');sys.stderr.write(\'2\')"' % - self.pyexe, **kws) - assert_(s == 0) - assert_(o == '012') - - s, o = exec_command.exec_command( - '"%s" -c "import sys;sys.exit(15)"' % self.pyexe, **kws) - assert_(s == 15) - assert_(o == '') - - s, o = exec_command.exec_command( - '"%s" -c "print(\'Heipa\'")' % self.pyexe, **kws) - assert_(s == 0) - assert_(o == 'Heipa') - - def check_execute_in(self, **kws): - with tempdir() as tmpdir: - fn = "file" - tmpfile = os.path.join(tmpdir, fn) - with open(tmpfile, 'w') as f: - f.write('Hello') - - s, o = exec_command.exec_command( - '"%s" -c "f = open(\'%s\', \'r\'); f.close()"' % - (self.pyexe, fn), **kws) - assert_(s != 0) - assert_(o != '') - s, o = exec_command.exec_command( - '"%s" -c "f = open(\'%s\', \'r\'); print(f.read()); ' - 'f.close()"' % (self.pyexe, fn), execute_in=tmpdir, **kws) - assert_(s == 0) - assert_(o == 'Hello') - - def test_basic(self): - with redirect_stdout(StringIO()): - with redirect_stderr(StringIO()): - with assert_warns(DeprecationWarning): - if os.name == "posix": - self.check_posix(use_tee=0) - self.check_posix(use_tee=1) - elif os.name == "nt": - self.check_nt(use_tee=0) - self.check_nt(use_tee=1) - self.check_execute_in(use_tee=0) - self.check_execute_in(use_tee=1) diff --git a/numpy/distutils/tests/test_fcompiler.py b/numpy/distutils/tests/test_fcompiler.py deleted file mode 100644 index dd97f1e72afc..000000000000 --- a/numpy/distutils/tests/test_fcompiler.py +++ /dev/null @@ -1,43 +0,0 @@ -from numpy.testing import assert_ -import numpy.distutils.fcompiler - -customizable_flags = [ - ('f77', 'F77FLAGS'), - ('f90', 'F90FLAGS'), - ('free', 'FREEFLAGS'), - ('arch', 'FARCH'), - ('debug', 'FDEBUG'), - ('flags', 'FFLAGS'), - ('linker_so', 'LDFLAGS'), -] - - -def test_fcompiler_flags(monkeypatch): - monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '0') - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='none') - flag_vars = fc.flag_vars.clone(lambda *args, **kwargs: None) - - for opt, envvar in customizable_flags: - new_flag = '-dummy-{}-flag'.format(opt) - prev_flags = getattr(flag_vars, opt) - - monkeypatch.setenv(envvar, new_flag) - new_flags = getattr(flag_vars, opt) - - monkeypatch.delenv(envvar) - assert_(new_flags == [new_flag]) - - monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '1') - - for opt, envvar in customizable_flags: - new_flag = '-dummy-{}-flag'.format(opt) - prev_flags = getattr(flag_vars, opt) - monkeypatch.setenv(envvar, new_flag) - new_flags = getattr(flag_vars, opt) - - monkeypatch.delenv(envvar) - if prev_flags is None: - assert_(new_flags == [new_flag]) - else: - assert_(new_flags == prev_flags + [new_flag]) - diff --git a/numpy/distutils/tests/test_fcompiler_gnu.py b/numpy/distutils/tests/test_fcompiler_gnu.py deleted file mode 100644 index 0817ae58c214..000000000000 --- a/numpy/distutils/tests/test_fcompiler_gnu.py +++ /dev/null @@ -1,55 +0,0 @@ -from numpy.testing import assert_ - -import numpy.distutils.fcompiler - -g77_version_strings = [ - ('GNU Fortran 0.5.25 20010319 (prerelease)', '0.5.25'), - ('GNU Fortran (GCC 3.2) 3.2 20020814 (release)', '3.2'), - ('GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian)', '3.3.3'), - ('GNU Fortran (GCC) 3.3.3 (Debian 20040401)', '3.3.3'), - ('GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2' - ' 20030222 (Red Hat Linux 3.2.2-5)', '3.2.2'), -] - -gfortran_version_strings = [ - ('GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3))', - '4.0.3'), - ('GNU Fortran 95 (GCC) 4.1.0', '4.1.0'), - ('GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental)', '4.2.0'), - ('GNU Fortran (GCC) 4.3.0 20070316 (experimental)', '4.3.0'), - ('GNU Fortran (rubenvb-4.8.0) 4.8.0', '4.8.0'), - ('4.8.0', '4.8.0'), - ('4.0.3-7', '4.0.3'), - ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n4.9.1", - '4.9.1'), - ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n" - "gfortran: warning: yet another warning\n4.9.1", - '4.9.1'), - ('GNU Fortran (crosstool-NG 8a21ab48) 7.2.0', '7.2.0') -] - -class TestG77Versions: - def test_g77_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') - for vs, version in g77_version_strings: - v = fc.version_match(vs) - assert_(v == version, (vs, v)) - - def test_not_g77(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') - for vs, _ in gfortran_version_strings: - v = fc.version_match(vs) - assert_(v is None, (vs, v)) - -class TestGFortranVersions: - def test_gfortran_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') - for vs, version in gfortran_version_strings: - v = fc.version_match(vs) - assert_(v == version, (vs, v)) - - def test_not_gfortran(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') - for vs, _ in g77_version_strings: - v = fc.version_match(vs) - assert_(v is None, (vs, v)) diff --git a/numpy/distutils/tests/test_fcompiler_intel.py b/numpy/distutils/tests/test_fcompiler_intel.py deleted file mode 100644 index 45c9cdac1910..000000000000 --- a/numpy/distutils/tests/test_fcompiler_intel.py +++ /dev/null @@ -1,30 +0,0 @@ -import numpy.distutils.fcompiler -from numpy.testing import assert_ - - -intel_32bit_version_strings = [ - ("Intel(R) Fortran Intel(R) 32-bit Compiler Professional for applications" - "running on Intel(R) 32, Version 11.1", '11.1'), -] - -intel_64bit_version_strings = [ - ("Intel(R) Fortran IA-64 Compiler Professional for applications" - "running on IA-64, Version 11.0", '11.0'), - ("Intel(R) Fortran Intel(R) 64 Compiler Professional for applications" - "running on Intel(R) 64, Version 11.1", '11.1') -] - -class TestIntelFCompilerVersions: - def test_32bit_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel') - for vs, version in intel_32bit_version_strings: - v = fc.version_match(vs) - assert_(v == version) - - -class TestIntelEM64TFCompilerVersions: - def test_64bit_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem') - for vs, version in intel_64bit_version_strings: - v = fc.version_match(vs) - assert_(v == version) diff --git a/numpy/distutils/tests/test_fcompiler_nagfor.py b/numpy/distutils/tests/test_fcompiler_nagfor.py deleted file mode 100644 index 2e04f5266dc1..000000000000 --- a/numpy/distutils/tests/test_fcompiler_nagfor.py +++ /dev/null @@ -1,22 +0,0 @@ -from numpy.testing import assert_ -import numpy.distutils.fcompiler - -nag_version_strings = [('nagfor', 'NAG Fortran Compiler Release ' - '6.2(Chiyoda) Build 6200', '6.2'), - ('nagfor', 'NAG Fortran Compiler Release ' - '6.1(Tozai) Build 6136', '6.1'), - ('nagfor', 'NAG Fortran Compiler Release ' - '6.0(Hibiya) Build 1021', '6.0'), - ('nagfor', 'NAG Fortran Compiler Release ' - '5.3.2(971)', '5.3.2'), - ('nag', 'NAGWare Fortran 95 compiler Release 5.1' - '(347,355-367,375,380-383,389,394,399,401-402,407,' - '431,435,437,446,459-460,463,472,494,496,503,508,' - '511,517,529,555,557,565)', '5.1')] - -class TestNagFCompilerVersions: - def test_version_match(self): - for comp, vs, version in nag_version_strings: - fc = numpy.distutils.fcompiler.new_fcompiler(compiler=comp) - v = fc.version_match(vs) - assert_(v == version) diff --git a/numpy/distutils/tests/test_from_template.py b/numpy/distutils/tests/test_from_template.py deleted file mode 100644 index 588175496299..000000000000 --- a/numpy/distutils/tests/test_from_template.py +++ /dev/null @@ -1,44 +0,0 @@ - -from numpy.distutils.from_template import process_str -from numpy.testing import assert_equal - - -pyf_src = """ -python module foo - <_rd=real,double precision> - interface - subroutine foosub(tol) - <_rd>, intent(in,out) :: tol - end subroutine foosub - end interface -end python module foo -""" - -expected_pyf = """ -python module foo - interface - subroutine sfoosub(tol) - real, intent(in,out) :: tol - end subroutine sfoosub - subroutine dfoosub(tol) - double precision, intent(in,out) :: tol - end subroutine dfoosub - end interface -end python module foo -""" - - -def normalize_whitespace(s): - """ - Remove leading and trailing whitespace, and convert internal - stretches of whitespace to a single space. - """ - return ' '.join(s.split()) - - -def test_from_template(): - """Regression test for gh-10712.""" - pyf = process_str(pyf_src) - normalized_pyf = normalize_whitespace(pyf) - normalized_expected_pyf = normalize_whitespace(expected_pyf) - assert_equal(normalized_pyf, normalized_expected_pyf) diff --git a/numpy/distutils/tests/test_log.py b/numpy/distutils/tests/test_log.py deleted file mode 100644 index 72fddf37370f..000000000000 --- a/numpy/distutils/tests/test_log.py +++ /dev/null @@ -1,34 +0,0 @@ -import io -import re -from contextlib import redirect_stdout - -import pytest - -from numpy.distutils import log - - -def setup_module(): - f = io.StringIO() # changing verbosity also logs here, capture that - with redirect_stdout(f): - log.set_verbosity(2, force=True) # i.e. DEBUG - - -def teardown_module(): - log.set_verbosity(0, force=True) # the default - - -r_ansi = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") - - -@pytest.mark.parametrize("func_name", ["error", "warn", "info", "debug"]) -def test_log_prefix(func_name): - func = getattr(log, func_name) - msg = f"{func_name} message" - f = io.StringIO() - with redirect_stdout(f): - func(msg) - out = f.getvalue() - assert out # sanity check - clean_out = r_ansi.sub("", out) - line = next(line for line in clean_out.splitlines()) - assert line == f"{func_name.upper()}: {msg}" diff --git a/numpy/distutils/tests/test_mingw32ccompiler.py b/numpy/distutils/tests/test_mingw32ccompiler.py deleted file mode 100644 index ebedacb32448..000000000000 --- a/numpy/distutils/tests/test_mingw32ccompiler.py +++ /dev/null @@ -1,42 +0,0 @@ -import shutil -import subprocess -import sys -import pytest - -from numpy.distutils import mingw32ccompiler - - -@pytest.mark.skipif(sys.platform != 'win32', reason='win32 only test') -def test_build_import(): - '''Test the mingw32ccompiler.build_import_library, which builds a - `python.a` from the MSVC `python.lib` - ''' - - # make sure `nm.exe` exists and supports the current python version. This - # can get mixed up when the PATH has a 64-bit nm but the python is 32-bit - try: - out = subprocess.check_output(['nm.exe', '--help']) - except FileNotFoundError: - pytest.skip("'nm.exe' not on path, is mingw installed?") - supported = out[out.find(b'supported targets:'):] - if sys.maxsize < 2**32: - if b'pe-i386' not in supported: - raise ValueError("'nm.exe' found but it does not support 32-bit " - "dlls when using 32-bit python. Supported " - "formats: '%s'" % supported) - elif b'pe-x86-64' not in supported: - raise ValueError("'nm.exe' found but it does not support 64-bit " - "dlls when using 64-bit python. Supported " - "formats: '%s'" % supported) - # Hide the import library to force a build - has_import_lib, fullpath = mingw32ccompiler._check_for_import_lib() - if has_import_lib: - shutil.move(fullpath, fullpath + '.bak') - - try: - # Whew, now we can actually test the function - mingw32ccompiler.build_import_library() - - finally: - if has_import_lib: - shutil.move(fullpath + '.bak', fullpath) diff --git a/numpy/distutils/tests/test_misc_util.py b/numpy/distutils/tests/test_misc_util.py deleted file mode 100644 index 40e7606eeb76..000000000000 --- a/numpy/distutils/tests/test_misc_util.py +++ /dev/null @@ -1,88 +0,0 @@ -from os.path import join, sep, dirname - -import pytest - -from numpy.distutils.misc_util import ( - appendpath, minrelpath, gpaths, get_shared_lib_extension, get_info - ) -from numpy.testing import ( - assert_, assert_equal, IS_EDITABLE - ) - -ajoin = lambda *paths: join(*((sep,)+paths)) - -class TestAppendpath: - - def test_1(self): - assert_equal(appendpath('prefix', 'name'), join('prefix', 'name')) - assert_equal(appendpath('/prefix', 'name'), ajoin('prefix', 'name')) - assert_equal(appendpath('/prefix', '/name'), ajoin('prefix', 'name')) - assert_equal(appendpath('prefix', '/name'), join('prefix', 'name')) - - def test_2(self): - assert_equal(appendpath('prefix/sub', 'name'), - join('prefix', 'sub', 'name')) - assert_equal(appendpath('prefix/sub', 'sup/name'), - join('prefix', 'sub', 'sup', 'name')) - assert_equal(appendpath('/prefix/sub', '/prefix/name'), - ajoin('prefix', 'sub', 'name')) - - def test_3(self): - assert_equal(appendpath('/prefix/sub', '/prefix/sup/name'), - ajoin('prefix', 'sub', 'sup', 'name')) - assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sup/sup2/name'), - ajoin('prefix', 'sub', 'sub2', 'sup', 'sup2', 'name')) - assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'), - ajoin('prefix', 'sub', 'sub2', 'sup', 'name')) - -class TestMinrelpath: - - def test_1(self): - n = lambda path: path.replace('/', sep) - assert_equal(minrelpath(n('aa/bb')), n('aa/bb')) - assert_equal(minrelpath('..'), '..') - assert_equal(minrelpath(n('aa/..')), '') - assert_equal(minrelpath(n('aa/../bb')), 'bb') - assert_equal(minrelpath(n('aa/bb/..')), 'aa') - assert_equal(minrelpath(n('aa/bb/../..')), '') - assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd')) - assert_equal(minrelpath(n('.././..')), n('../..')) - assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd')) - -class TestGpaths: - - def test_gpaths(self): - local_path = minrelpath(join(dirname(__file__), '..')) - ls = gpaths('command/*.py', local_path) - assert_(join(local_path, 'command', 'build_src.py') in ls, repr(ls)) - f = gpaths('system_info.py', local_path) - assert_(join(local_path, 'system_info.py') == f[0], repr(f)) - -class TestSharedExtension: - - def test_get_shared_lib_extension(self): - import sys - ext = get_shared_lib_extension(is_python_ext=False) - if sys.platform.startswith('linux'): - assert_equal(ext, '.so') - elif sys.platform.startswith('gnukfreebsd'): - assert_equal(ext, '.so') - elif sys.platform.startswith('darwin'): - assert_equal(ext, '.dylib') - elif sys.platform.startswith('win'): - assert_equal(ext, '.dll') - # just check for no crash - assert_(get_shared_lib_extension(is_python_ext=True)) - - -@pytest.mark.skipif( - IS_EDITABLE, - reason="`get_info` .ini lookup method incompatible with editable install" -) -def test_installed_npymath_ini(): - # Regression test for gh-7707. If npymath.ini wasn't installed, then this - # will give an error. - info = get_info('npymath') - - assert isinstance(info, dict) - assert "define_macros" in info diff --git a/numpy/distutils/tests/test_npy_pkg_config.py b/numpy/distutils/tests/test_npy_pkg_config.py deleted file mode 100644 index b287ebe2e832..000000000000 --- a/numpy/distutils/tests/test_npy_pkg_config.py +++ /dev/null @@ -1,84 +0,0 @@ -import os - -from numpy.distutils.npy_pkg_config import read_config, parse_flags -from numpy.testing import temppath, assert_ - -simple = """\ -[meta] -Name = foo -Description = foo lib -Version = 0.1 - -[default] -cflags = -I/usr/include -libs = -L/usr/lib -""" -simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib', - 'version': '0.1', 'name': 'foo'} - -simple_variable = """\ -[meta] -Name = foo -Description = foo lib -Version = 0.1 - -[variables] -prefix = /foo/bar -libdir = ${prefix}/lib -includedir = ${prefix}/include - -[default] -cflags = -I${includedir} -libs = -L${libdir} -""" -simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib', - 'version': '0.1', 'name': 'foo'} - -class TestLibraryInfo: - def test_simple(self): - with temppath('foo.ini') as path: - with open(path, 'w') as f: - f.write(simple) - pkg = os.path.splitext(path)[0] - out = read_config(pkg) - - assert_(out.cflags() == simple_d['cflags']) - assert_(out.libs() == simple_d['libflags']) - assert_(out.name == simple_d['name']) - assert_(out.version == simple_d['version']) - - def test_simple_variable(self): - with temppath('foo.ini') as path: - with open(path, 'w') as f: - f.write(simple_variable) - pkg = os.path.splitext(path)[0] - out = read_config(pkg) - - assert_(out.cflags() == simple_variable_d['cflags']) - assert_(out.libs() == simple_variable_d['libflags']) - assert_(out.name == simple_variable_d['name']) - assert_(out.version == simple_variable_d['version']) - out.vars['prefix'] = '/Users/david' - assert_(out.cflags() == '-I/Users/david/include') - -class TestParseFlags: - def test_simple_cflags(self): - d = parse_flags("-I/usr/include") - assert_(d['include_dirs'] == ['/usr/include']) - - d = parse_flags("-I/usr/include -DFOO") - assert_(d['include_dirs'] == ['/usr/include']) - assert_(d['macros'] == ['FOO']) - - d = parse_flags("-I /usr/include -DFOO") - assert_(d['include_dirs'] == ['/usr/include']) - assert_(d['macros'] == ['FOO']) - - def test_simple_lflags(self): - d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar") - assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) - assert_(d['libraries'] == ['foo', 'bar']) - - d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar") - assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) - assert_(d['libraries'] == ['foo', 'bar']) diff --git a/numpy/distutils/tests/test_shell_utils.py b/numpy/distutils/tests/test_shell_utils.py deleted file mode 100644 index 696d38ddd66a..000000000000 --- a/numpy/distutils/tests/test_shell_utils.py +++ /dev/null @@ -1,79 +0,0 @@ -import pytest -import subprocess -import json -import sys - -from numpy.distutils import _shell_utils -from numpy.testing import IS_WASM - -argv_cases = [ - [r'exe'], - [r'path/exe'], - [r'path\exe'], - [r'\\server\path\exe'], - [r'path to/exe'], - [r'path to\exe'], - - [r'exe', '--flag'], - [r'path/exe', '--flag'], - [r'path\exe', '--flag'], - [r'path to/exe', '--flag'], - [r'path to\exe', '--flag'], - - # flags containing literal quotes in their name - [r'path to/exe', '--flag-"quoted"'], - [r'path to\exe', '--flag-"quoted"'], - [r'path to/exe', '"--flag-quoted"'], - [r'path to\exe', '"--flag-quoted"'], -] - - -@pytest.fixture(params=[ - _shell_utils.WindowsParser, - _shell_utils.PosixParser -]) -def Parser(request): - return request.param - - -@pytest.fixture -def runner(Parser): - if Parser != _shell_utils.NativeParser: - pytest.skip('Unable to run with non-native parser') - - if Parser == _shell_utils.WindowsParser: - return lambda cmd: subprocess.check_output(cmd) - elif Parser == _shell_utils.PosixParser: - # posix has no non-shell string parsing - return lambda cmd: subprocess.check_output(cmd, shell=True) - else: - raise NotImplementedError - - -@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") -@pytest.mark.parametrize('argv', argv_cases) -def test_join_matches_subprocess(Parser, runner, argv): - """ - Test that join produces strings understood by subprocess - """ - # invoke python to return its arguments as json - cmd = [ - sys.executable, '-c', - 'import json, sys; print(json.dumps(sys.argv[1:]))' - ] - joined = Parser.join(cmd + argv) - json_out = runner(joined).decode() - assert json.loads(json_out) == argv - - -@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") -@pytest.mark.parametrize('argv', argv_cases) -def test_roundtrip(Parser, argv): - """ - Test that split is the inverse operation of join - """ - try: - joined = Parser.join(argv) - assert argv == Parser.split(joined) - except NotImplementedError: - pytest.skip("Not implemented") diff --git a/numpy/distutils/tests/test_system_info.py b/numpy/distutils/tests/test_system_info.py deleted file mode 100644 index 9bcc09050503..000000000000 --- a/numpy/distutils/tests/test_system_info.py +++ /dev/null @@ -1,334 +0,0 @@ -import os -import shutil -import pytest -from tempfile import mkstemp, mkdtemp -from subprocess import Popen, PIPE -import importlib.metadata -from distutils.errors import DistutilsError - -from numpy.testing import assert_, assert_equal, assert_raises -from numpy.distutils import ccompiler, customized_ccompiler -from numpy.distutils.system_info import system_info, ConfigParser, mkl_info -from numpy.distutils.system_info import AliasedOptionError -from numpy.distutils.system_info import default_lib_dirs, default_include_dirs -from numpy.distutils import _shell_utils - - -try: - if importlib.metadata.version('setuptools') >= '60': - # pkg-resources gives deprecation warnings, and there may be more - # issues. We only support setuptools <60 - pytest.skip("setuptools is too new", allow_module_level=True) -except importlib.metadata.PackageNotFoundError: - # we don't require `setuptools`; if it is not found, continue - pass - - -def get_class(name, notfound_action=1): - """ - notfound_action: - 0 - do nothing - 1 - display warning message - 2 - raise error - """ - cl = {'temp1': Temp1Info, - 'temp2': Temp2Info, - 'duplicate_options': DuplicateOptionInfo, - }.get(name.lower(), _system_info) - return cl() - -simple_site = """ -[ALL] -library_dirs = {dir1:s}{pathsep:s}{dir2:s} -libraries = {lib1:s},{lib2:s} -extra_compile_args = -I/fake/directory -I"/path with/spaces" -Os -runtime_library_dirs = {dir1:s} - -[temp1] -library_dirs = {dir1:s} -libraries = {lib1:s} -runtime_library_dirs = {dir1:s} - -[temp2] -library_dirs = {dir2:s} -libraries = {lib2:s} -extra_link_args = -Wl,-rpath={lib2_escaped:s} -rpath = {dir2:s} - -[duplicate_options] -mylib_libs = {lib1:s} -libraries = {lib2:s} -""" -site_cfg = simple_site - -fakelib_c_text = """ -/* This file is generated from numpy/distutils/testing/test_system_info.py */ -#include -void foo(void) { - printf("Hello foo"); -} -void bar(void) { - printf("Hello bar"); -} -""" - -def have_compiler(): - """ Return True if there appears to be an executable compiler - """ - compiler = customized_ccompiler() - try: - cmd = compiler.compiler # Unix compilers - except AttributeError: - try: - if not compiler.initialized: - compiler.initialize() # MSVC is different - except (DistutilsError, ValueError): - return False - cmd = [compiler.cc] - try: - p = Popen(cmd, stdout=PIPE, stderr=PIPE) - p.stdout.close() - p.stderr.close() - p.wait() - except OSError: - return False - return True - - -HAVE_COMPILER = have_compiler() - - -class _system_info(system_info): - - def __init__(self, - default_lib_dirs=default_lib_dirs, - default_include_dirs=default_include_dirs, - verbosity=1, - ): - self.__class__.info = {} - self.local_prefixes = [] - defaults = {'library_dirs': '', - 'include_dirs': '', - 'runtime_library_dirs': '', - 'rpath': '', - 'src_dirs': '', - 'search_static_first': "0", - 'extra_compile_args': '', - 'extra_link_args': ''} - self.cp = ConfigParser(defaults) - # We have to parse the config files afterwards - # to have a consistent temporary filepath - - def _check_libs(self, lib_dirs, libs, opt_libs, exts): - """Override _check_libs to return with all dirs """ - info = {'libraries': libs, 'library_dirs': lib_dirs} - return info - - -class Temp1Info(_system_info): - """For testing purposes""" - section = 'temp1' - - -class Temp2Info(_system_info): - """For testing purposes""" - section = 'temp2' - -class DuplicateOptionInfo(_system_info): - """For testing purposes""" - section = 'duplicate_options' - - -class TestSystemInfoReading: - - def setup_method(self): - """ Create the libraries """ - # Create 2 sources and 2 libraries - self._dir1 = mkdtemp() - self._src1 = os.path.join(self._dir1, 'foo.c') - self._lib1 = os.path.join(self._dir1, 'libfoo.so') - self._dir2 = mkdtemp() - self._src2 = os.path.join(self._dir2, 'bar.c') - self._lib2 = os.path.join(self._dir2, 'libbar.so') - # Update local site.cfg - global simple_site, site_cfg - site_cfg = simple_site.format(**{ - 'dir1': self._dir1, - 'lib1': self._lib1, - 'dir2': self._dir2, - 'lib2': self._lib2, - 'pathsep': os.pathsep, - 'lib2_escaped': _shell_utils.NativeParser.join([self._lib2]) - }) - # Write site.cfg - fd, self._sitecfg = mkstemp() - os.close(fd) - with open(self._sitecfg, 'w') as fd: - fd.write(site_cfg) - # Write the sources - with open(self._src1, 'w') as fd: - fd.write(fakelib_c_text) - with open(self._src2, 'w') as fd: - fd.write(fakelib_c_text) - # We create all class-instances - - def site_and_parse(c, site_cfg): - c.files = [site_cfg] - c.parse_config_files() - return c - self.c_default = site_and_parse(get_class('default'), self._sitecfg) - self.c_temp1 = site_and_parse(get_class('temp1'), self._sitecfg) - self.c_temp2 = site_and_parse(get_class('temp2'), self._sitecfg) - self.c_dup_options = site_and_parse(get_class('duplicate_options'), - self._sitecfg) - - def teardown_method(self): - # Do each removal separately - try: - shutil.rmtree(self._dir1) - except Exception: - pass - try: - shutil.rmtree(self._dir2) - except Exception: - pass - try: - os.remove(self._sitecfg) - except Exception: - pass - - def test_all(self): - # Read in all information in the ALL block - tsi = self.c_default - assert_equal(tsi.get_lib_dirs(), [self._dir1, self._dir2]) - assert_equal(tsi.get_libraries(), [self._lib1, self._lib2]) - assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) - extra = tsi.calc_extra_info() - assert_equal(extra['extra_compile_args'], ['-I/fake/directory', '-I/path with/spaces', '-Os']) - - def test_temp1(self): - # Read in all information in the temp1 block - tsi = self.c_temp1 - assert_equal(tsi.get_lib_dirs(), [self._dir1]) - assert_equal(tsi.get_libraries(), [self._lib1]) - assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) - - def test_temp2(self): - # Read in all information in the temp2 block - tsi = self.c_temp2 - assert_equal(tsi.get_lib_dirs(), [self._dir2]) - assert_equal(tsi.get_libraries(), [self._lib2]) - # Now from rpath and not runtime_library_dirs - assert_equal(tsi.get_runtime_lib_dirs(key='rpath'), [self._dir2]) - extra = tsi.calc_extra_info() - assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2]) - - def test_duplicate_options(self): - # Ensure that duplicates are raising an AliasedOptionError - tsi = self.c_dup_options - assert_raises(AliasedOptionError, tsi.get_option_single, "mylib_libs", "libraries") - assert_equal(tsi.get_libs("mylib_libs", [self._lib1]), [self._lib1]) - assert_equal(tsi.get_libs("libraries", [self._lib2]), [self._lib2]) - - @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") - def test_compile1(self): - # Compile source and link the first source - c = customized_ccompiler() - previousDir = os.getcwd() - try: - # Change directory to not screw up directories - os.chdir(self._dir1) - c.compile([os.path.basename(self._src1)], output_dir=self._dir1) - # Ensure that the object exists - assert_(os.path.isfile(self._src1.replace('.c', '.o')) or - os.path.isfile(self._src1.replace('.c', '.obj'))) - finally: - os.chdir(previousDir) - - @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") - @pytest.mark.skipif('msvc' in repr(ccompiler.new_compiler()), - reason="Fails with MSVC compiler ") - def test_compile2(self): - # Compile source and link the second source - tsi = self.c_temp2 - c = customized_ccompiler() - extra_link_args = tsi.calc_extra_info()['extra_link_args'] - previousDir = os.getcwd() - try: - # Change directory to not screw up directories - os.chdir(self._dir2) - c.compile([os.path.basename(self._src2)], output_dir=self._dir2, - extra_postargs=extra_link_args) - # Ensure that the object exists - assert_(os.path.isfile(self._src2.replace('.c', '.o'))) - finally: - os.chdir(previousDir) - - HAS_MKL = "mkl_rt" in mkl_info().calc_libraries_info().get("libraries", []) - - @pytest.mark.xfail(HAS_MKL, reason=("`[DEFAULT]` override doesn't work if " - "numpy is built with MKL support")) - def test_overrides(self): - previousDir = os.getcwd() - cfg = os.path.join(self._dir1, 'site.cfg') - shutil.copy(self._sitecfg, cfg) - try: - os.chdir(self._dir1) - # Check that the '[ALL]' section does not override - # missing values from other sections - info = mkl_info() - lib_dirs = info.cp['ALL']['library_dirs'].split(os.pathsep) - assert info.get_lib_dirs() != lib_dirs - - # But if we copy the values to a '[mkl]' section the value - # is correct - with open(cfg) as fid: - mkl = fid.read().replace('[ALL]', '[mkl]', 1) - with open(cfg, 'w') as fid: - fid.write(mkl) - info = mkl_info() - assert info.get_lib_dirs() == lib_dirs - - # Also, the values will be taken from a section named '[DEFAULT]' - with open(cfg) as fid: - dflt = fid.read().replace('[mkl]', '[DEFAULT]', 1) - with open(cfg, 'w') as fid: - fid.write(dflt) - info = mkl_info() - assert info.get_lib_dirs() == lib_dirs - finally: - os.chdir(previousDir) - - -def test_distutils_parse_env_order(monkeypatch): - from numpy.distutils.system_info import _parse_env_order - env = 'NPY_TESTS_DISTUTILS_PARSE_ENV_ORDER' - - base_order = list('abcdef') - - monkeypatch.setenv(env, 'b,i,e,f') - order, unknown = _parse_env_order(base_order, env) - assert len(order) == 3 - assert order == list('bef') - assert len(unknown) == 1 - - # For when LAPACK/BLAS optimization is disabled - monkeypatch.setenv(env, '') - order, unknown = _parse_env_order(base_order, env) - assert len(order) == 0 - assert len(unknown) == 0 - - for prefix in '^!': - monkeypatch.setenv(env, f'{prefix}b,i,e') - order, unknown = _parse_env_order(base_order, env) - assert len(order) == 4 - assert order == list('acdf') - assert len(unknown) == 1 - - with pytest.raises(ValueError): - monkeypatch.setenv(env, 'b,^e,i') - _parse_env_order(base_order, env) - - with pytest.raises(ValueError): - monkeypatch.setenv(env, '!b,^e,i') - _parse_env_order(base_order, env) diff --git a/numpy/distutils/tests/utilities.py b/numpy/distutils/tests/utilities.py deleted file mode 100644 index 5016a83d2164..000000000000 --- a/numpy/distutils/tests/utilities.py +++ /dev/null @@ -1,90 +0,0 @@ -# Kanged out of numpy.f2py.tests.util for test_build_ext -from numpy.testing import IS_WASM -import textwrap -import shutil -import tempfile -import os -import re -import subprocess -import sys - -# -# Check if compilers are available at all... -# - -_compiler_status = None - - -def _get_compiler_status(): - global _compiler_status - if _compiler_status is not None: - return _compiler_status - - _compiler_status = (False, False, False) - if IS_WASM: - # Can't run compiler from inside WASM. - return _compiler_status - - # XXX: this is really ugly. But I don't know how to invoke Distutils - # in a safer way... - code = textwrap.dedent( - f"""\ - import os - import sys - sys.path = {repr(sys.path)} - - def configuration(parent_name='',top_path=None): - global config - from numpy.distutils.misc_util import Configuration - config = Configuration('', parent_name, top_path) - return config - - from numpy.distutils.core import setup - setup(configuration=configuration) - - config_cmd = config.get_config_cmd() - have_c = config_cmd.try_compile('void foo() {{}}') - print('COMPILERS:%%d,%%d,%%d' %% (have_c, - config.have_f77c(), - config.have_f90c())) - sys.exit(99) - """ - ) - code = code % dict(syspath=repr(sys.path)) - - tmpdir = tempfile.mkdtemp() - try: - script = os.path.join(tmpdir, "setup.py") - - with open(script, "w") as f: - f.write(code) - - cmd = [sys.executable, "setup.py", "config"] - p = subprocess.Popen( - cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=tmpdir - ) - out, err = p.communicate() - finally: - shutil.rmtree(tmpdir) - - m = re.search(rb"COMPILERS:(\d+),(\d+),(\d+)", out) - if m: - _compiler_status = ( - bool(int(m.group(1))), - bool(int(m.group(2))), - bool(int(m.group(3))), - ) - # Finished - return _compiler_status - - -def has_c_compiler(): - return _get_compiler_status()[0] - - -def has_f77_compiler(): - return _get_compiler_status()[1] - - -def has_f90_compiler(): - return _get_compiler_status()[2] diff --git a/numpy/distutils/unixccompiler.py b/numpy/distutils/unixccompiler.py deleted file mode 100644 index 4884960fdf22..000000000000 --- a/numpy/distutils/unixccompiler.py +++ /dev/null @@ -1,141 +0,0 @@ -""" -unixccompiler - can handle very long argument lists for ar. - -""" -import os -import sys -import subprocess -import shlex - -from distutils.errors import CompileError, DistutilsExecError, LibError -from distutils.unixccompiler import UnixCCompiler -from numpy.distutils.ccompiler import replace_method -from numpy.distutils.misc_util import _commandline_dep_string -from numpy.distutils import log - -# Note that UnixCCompiler._compile appeared in Python 2.3 -def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - """Compile a single source files with a Unix-style compiler.""" - # HP ad-hoc fix, see ticket 1383 - ccomp = self.compiler_so - if ccomp[0] == 'aCC': - # remove flags that will trigger ANSI-C mode for aCC - if '-Ae' in ccomp: - ccomp.remove('-Ae') - if '-Aa' in ccomp: - ccomp.remove('-Aa') - # add flags for (almost) sane C++ handling - ccomp += ['-AA'] - self.compiler_so = ccomp - # ensure OPT environment variable is read - if 'OPT' in os.environ: - # XXX who uses this? - from sysconfig import get_config_vars - opt = shlex.join(shlex.split(os.environ['OPT'])) - gcv_opt = shlex.join(shlex.split(get_config_vars('OPT')[0])) - ccomp_s = shlex.join(self.compiler_so) - if opt not in ccomp_s: - ccomp_s = ccomp_s.replace(gcv_opt, opt) - self.compiler_so = shlex.split(ccomp_s) - llink_s = shlex.join(self.linker_so) - if opt not in llink_s: - self.linker_so = self.linker_so + shlex.split(opt) - - display = '%s: %s' % (os.path.basename(self.compiler_so[0]), src) - - # gcc style automatic dependencies, outputs a makefile (-MF) that lists - # all headers needed by a c file as a side effect of compilation (-MMD) - if getattr(self, '_auto_depends', False): - deps = ['-MMD', '-MF', obj + '.d'] - else: - deps = [] - - try: - self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + deps + - extra_postargs, display = display) - except DistutilsExecError as e: - msg = str(e) - raise CompileError(msg) from None - - # add commandline flags to dependency file - if deps: - # After running the compiler, the file created will be in EBCDIC - # but will not be tagged as such. This tags it so the file does not - # have multiple different encodings being written to it - if sys.platform == 'zos': - subprocess.check_output(['chtag', '-tc', 'IBM1047', obj + '.d']) - with open(obj + '.d', 'a') as f: - f.write(_commandline_dep_string(cc_args, extra_postargs, pp_opts)) - -replace_method(UnixCCompiler, '_compile', UnixCCompiler__compile) - - -def UnixCCompiler_create_static_lib(self, objects, output_libname, - output_dir=None, debug=0, target_lang=None): - """ - Build a static library in a separate sub-process. - - Parameters - ---------- - objects : list or tuple of str - List of paths to object files used to build the static library. - output_libname : str - The library name as an absolute or relative (if `output_dir` is used) - path. - output_dir : str, optional - The path to the output directory. Default is None, in which case - the ``output_dir`` attribute of the UnixCCompiler instance. - debug : bool, optional - This parameter is not used. - target_lang : str, optional - This parameter is not used. - - Returns - ------- - None - - """ - objects, output_dir = self._fix_object_args(objects, output_dir) - - output_filename = \ - self.library_filename(output_libname, output_dir=output_dir) - - if self._need_link(objects, output_filename): - try: - # previous .a may be screwed up; best to remove it first - # and recreate. - # Also, ar on OS X doesn't handle updating universal archives - os.unlink(output_filename) - except OSError: - pass - self.mkpath(os.path.dirname(output_filename)) - tmp_objects = objects + self.objects - while tmp_objects: - objects = tmp_objects[:50] - tmp_objects = tmp_objects[50:] - display = '%s: adding %d object files to %s' % ( - os.path.basename(self.archiver[0]), - len(objects), output_filename) - self.spawn(self.archiver + [output_filename] + objects, - display = display) - - # Not many Unices required ranlib anymore -- SunOS 4.x is, I - # think the only major Unix that does. Maybe we need some - # platform intelligence here to skip ranlib if it's not - # needed -- or maybe Python's configure script took care of - # it for us, hence the check for leading colon. - if self.ranlib: - display = '%s:@ %s' % (os.path.basename(self.ranlib[0]), - output_filename) - try: - self.spawn(self.ranlib + [output_filename], - display = display) - except DistutilsExecError as e: - msg = str(e) - raise LibError(msg) from None - else: - log.debug("skipping %s (up-to-date)", output_filename) - return - -replace_method(UnixCCompiler, 'create_static_lib', - UnixCCompiler_create_static_lib) diff --git a/numpy/doc/ufuncs.py b/numpy/doc/ufuncs.py index c99e9abc99a5..f97e9ff3f80c 100644 --- a/numpy/doc/ufuncs.py +++ b/numpy/doc/ufuncs.py @@ -113,7 +113,8 @@ output argument is used, the ufunc still returns a reference to the result. >>> x = np.arange(2) - >>> np.add(np.arange(2),np.arange(2.),x) + >>> np.add(np.arange(2, dtype=np.float64), np.arange(2, dtype=np.float64), x, + ... casting='unsafe') array([0, 2]) >>> x array([0, 2]) diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index cea64282252b..f1f1261d3d32 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -1,43 +1,624 @@ +# Aliases for builtins shadowed by classes to avoid annotations resolving to class members by ty +from builtins import str as py_str, type as py_type +from typing import ( + Any, + Generic, + Literal as L, + LiteralString, + Never, + NoReturn, + Self, + final, + overload, + type_check_only, +) +from typing_extensions import TypeVar + import numpy as np +__all__ = [ + "BoolDType", + "Int8DType", + "ByteDType", + "UInt8DType", + "UByteDType", + "Int16DType", + "ShortDType", + "UInt16DType", + "UShortDType", + "Int32DType", + "IntDType", + "UInt32DType", + "UIntDType", + "Int64DType", + "LongDType", + "UInt64DType", + "ULongDType", + "LongLongDType", + "ULongLongDType", + "Float16DType", + "Float32DType", + "Float64DType", + "LongDoubleDType", + "Complex64DType", + "Complex128DType", + "CLongDoubleDType", + "ObjectDType", + "BytesDType", + "StrDType", + "VoidDType", + "DateTime64DType", + "TimeDelta64DType", + "StringDType", +] + +# Type parameters + +_ItemSizeT_co = TypeVar("_ItemSizeT_co", bound=int, default=int, covariant=True) +_NaObjectT_co = TypeVar("_NaObjectT_co", default=Never, covariant=True) + +# Helper base classes (typing-only) + +@type_check_only +class _SimpleDType[ScalarT: np.generic](np.dtype[ScalarT]): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + names: None # pyright: ignore[reportIncompatibleVariableOverride] # pyrefly: ignore[bad-override] + def __new__(cls, /) -> Self: ... + def __getitem__(self, key: Any, /) -> NoReturn: ... + @property + def base(self) -> np.dtype[ScalarT]: ... + @property + def fields(self) -> None: ... + @property + def isalignedstruct(self) -> L[False]: ... + @property + def isnative(self) -> L[True]: ... + @property + def ndim(self) -> L[0]: ... + @property + def shape(self) -> tuple[()]: ... + @property + def subdtype(self) -> None: ... + +@type_check_only +class _LiteralDType[ScalarT_co: np.generic](_SimpleDType[ScalarT_co]): # type: ignore[misc] + @property + def flags(self) -> L[0]: ... + @property + def hasobject(self) -> L[False]: ... + +# Helper mixins (typing-only): -__all__: list[str] +@type_check_only +class _TypeCodes[KindT: LiteralString, CharT: LiteralString, NumT: int]: + @final + @property + def kind(self) -> KindT: ... + @final + @property + def char(self) -> CharT: ... + @final + @property + def num(self) -> NumT: ... + +@type_check_only +class _NoOrder: + @final + @property + def byteorder(self) -> L["|"]: ... + +@type_check_only +class _NativeOrder: + @final + @property + def byteorder(self) -> L["="]: ... + +@type_check_only +class _NBit[AlignmentT: int, ItemSizeT: int]: + @final + @property + def alignment(self) -> AlignmentT: ... + @final + @property + def itemsize(self) -> ItemSizeT: ... + +@type_check_only +class _8Bit(_NoOrder, _NBit[L[1], L[1]]): ... # Boolean: -BoolDType = np.dtype[np.bool] + +@final +class BoolDType( # type: ignore[misc] + _TypeCodes[L["b"], L["?"], L[0]], + _8Bit, + _LiteralDType[np.bool], +): + @property + def name(self) -> L["bool"]: ... + @property + def str(self) -> L["|b1"]: ... + # Sized integers: -Int8DType = np.dtype[np.int8] -UInt8DType = np.dtype[np.uint8] -Int16DType = np.dtype[np.int16] -UInt16DType = np.dtype[np.uint16] -Int32DType = np.dtype[np.int32] -UInt32DType = np.dtype[np.uint32] -Int64DType = np.dtype[np.int64] -UInt64DType = np.dtype[np.uint64] + +@final +class Int8DType( # type: ignore[misc] + _TypeCodes[L["i"], L["b"], L[1]], + _8Bit, + _LiteralDType[np.int8], +): + @property + def name(self) -> L["int8"]: ... + @property + def str(self) -> L["|i1"]: ... + +@final +class UInt8DType( # type: ignore[misc] + _TypeCodes[L["u"], L["B"], L[2]], + _8Bit, + _LiteralDType[np.uint8], +): + @property + def name(self) -> L["uint8"]: ... + @property + def str(self) -> L["|u1"]: ... + +@final +class Int16DType( # type: ignore[misc] + _TypeCodes[L["i"], L["h"], L[3]], + _NativeOrder, + _NBit[L[2], L[2]], + _LiteralDType[np.int16], +): + @property + def name(self) -> L["int16"]: ... + @property + def str(self) -> L["i2"]: ... + +@final +class UInt16DType( # type: ignore[misc] + _TypeCodes[L["u"], L["H"], L[4]], + _NativeOrder, + _NBit[L[2], L[2]], + _LiteralDType[np.uint16], +): + @property + def name(self) -> L["uint16"]: ... + @property + def str(self) -> L["u2"]: ... + +@final +class Int32DType( # type: ignore[misc] + _TypeCodes[L["i"], L["i", "l"], L[5, 7]], + _NativeOrder, + _NBit[L[4], L[4]], + _LiteralDType[np.int32], +): + @property + def name(self) -> L["int32"]: ... + @property + def str(self) -> L["i4"]: ... + +@final +class UInt32DType( # type: ignore[misc] + _TypeCodes[L["u"], L["I", "L"], L[6, 8]], + _NativeOrder, + _NBit[L[4], L[4]], + _LiteralDType[np.uint32], +): + @property + def name(self) -> L["uint32"]: ... + @property + def str(self) -> L["u4"]: ... + +@final +class Int64DType( # type: ignore[misc] + _TypeCodes[L["i"], L["l", "q"], L[7, 9]], + _NativeOrder, + _NBit[L[8], L[8]], + _LiteralDType[np.int64], +): + @property + def name(self) -> L["int64"]: ... + @property + def str(self) -> L["i8"]: ... + +@final +class UInt64DType( # type: ignore[misc] + _TypeCodes[L["u"], L["L", "Q"], L[8, 10]], + _NativeOrder, + _NBit[L[8], L[8]], + _LiteralDType[np.uint64], +): + @property + def name(self) -> L["uint64"]: ... + @property + def str(self) -> L["u8"]: ... + # Standard C-named version/alias: -ByteDType = np.dtype[np.byte] -UByteDType = np.dtype[np.ubyte] -ShortDType = np.dtype[np.short] -UShortDType = np.dtype[np.ushort] -IntDType = np.dtype[np.intc] -UIntDType = np.dtype[np.uintc] -LongDType = np.dtype[np.long] -ULongDType = np.dtype[np.ulong] -LongLongDType = np.dtype[np.longlong] -ULongLongDType = np.dtype[np.ulonglong] -# Floats -Float16DType = np.dtype[np.float16] -Float32DType = np.dtype[np.float32] -Float64DType = np.dtype[np.float64] -LongDoubleDType = np.dtype[np.longdouble] +# NOTE: Don't make these `Final[_]` or a `type _` it will break stubtest +ByteDType = Int8DType +UByteDType = UInt8DType +ShortDType = Int16DType +UShortDType = UInt16DType + +@final +class IntDType( # type: ignore[misc] + _TypeCodes[L["i"], L["i"], L[5]], + _NativeOrder, + _NBit[L[4], L[4]], + _LiteralDType[np.intc], +): + @property + def name(self) -> L["int32"]: ... + @property + def str(self) -> L["i4"]: ... + +@final +class UIntDType( # type: ignore[misc] + _TypeCodes[L["u"], L["I"], L[6]], + _NativeOrder, + _NBit[L[4], L[4]], + _LiteralDType[np.uintc], +): + @property + def name(self) -> L["uint32"]: ... + @property + def str(self) -> L["u4"]: ... + +@final +class LongDType( # type: ignore[misc] + _TypeCodes[L["i"], L["l"], L[7]], + _NativeOrder, + _NBit[L[4, 8], L[4, 8]], + _LiteralDType[np.long], +): + @property + def name(self) -> L["int32", "int64"]: ... + @property + def str(self) -> L["i4", "i8"]: ... + +@final +class ULongDType( # type: ignore[misc] + _TypeCodes[L["u"], L["L"], L[8]], + _NativeOrder, + _NBit[L[4, 8], L[4, 8]], + _LiteralDType[np.ulong], +): + @property + def name(self) -> L["uint32", "uint64"]: ... + @property + def str(self) -> L["u4", "u8"]: ... + +@final +class LongLongDType( # type: ignore[misc] + _TypeCodes[L["i"], L["q"], L[9]], + _NativeOrder, + _NBit[L[8], L[8]], + _LiteralDType[np.longlong], +): + @property + def name(self) -> L["int64"]: ... + @property + def str(self) -> L["i8"]: ... + +@final +class ULongLongDType( # type: ignore[misc] + _TypeCodes[L["u"], L["Q"], L[10]], + _NativeOrder, + _NBit[L[8], L[8]], + _LiteralDType[np.ulonglong], +): + @property + def name(self) -> L["uint64"]: ... + @property + def str(self) -> L["u8"]: ... + +# Floats: + +@final +class Float16DType( # type: ignore[misc] + _TypeCodes[L["f"], L["e"], L[23]], + _NativeOrder, + _NBit[L[2], L[2]], + _LiteralDType[np.float16], +): + @property + def name(self) -> L["float16"]: ... + @property + def str(self) -> L["f2"]: ... + +@final +class Float32DType( # type: ignore[misc] + _TypeCodes[L["f"], L["f"], L[11]], + _NativeOrder, + _NBit[L[4], L[4]], + _LiteralDType[np.float32], +): + @property + def name(self) -> L["float32"]: ... + @property + def str(self) -> L["f4"]: ... + +@final +class Float64DType( # type: ignore[misc] + _TypeCodes[L["f"], L["d"], L[12]], + _NativeOrder, + _NBit[L[8], L[8]], + _LiteralDType[np.float64], +): + @property + def name(self) -> L["float64"]: ... + @property + def str(self) -> L["f8"]: ... + +@final +class LongDoubleDType( # type: ignore[misc] + _TypeCodes[L["f"], L["g"], L[13]], + _NativeOrder, + _NBit[L[8, 12, 16], L[8, 12, 16]], + _LiteralDType[np.longdouble], +): + @property + def name(self) -> L["float64", "float96", "float128"]: ... + @property + def str(self) -> L["f8", "f12", "f16"]: ... + # Complex: -Complex64DType = np.dtype[np.complex64] -Complex128DType = np.dtype[np.complex128] -CLongDoubleDType = np.dtype[np.clongdouble] -# Others: -ObjectDType = np.dtype[np.object_] -BytesDType = np.dtype[np.bytes_] -StrDType = np.dtype[np.str_] -VoidDType = np.dtype[np.void] -DateTime64DType = np.dtype[np.datetime64] -TimeDelta64DType = np.dtype[np.timedelta64] + +@final +class Complex64DType( # type: ignore[misc] + _TypeCodes[L["c"], L["F"], L[14]], + _NativeOrder, + _NBit[L[4], L[8]], + _LiteralDType[np.complex64], +): + @property + def name(self) -> L["complex64"]: ... + @property + def str(self) -> L["c8"]: ... + +@final +class Complex128DType( # type: ignore[misc] + _TypeCodes[L["c"], L["D"], L[15]], + _NativeOrder, + _NBit[L[8], L[16]], + _LiteralDType[np.complex128], +): + @property + def name(self) -> L["complex128"]: ... + @property + def str(self) -> L["c16"]: ... + +@final +class CLongDoubleDType( # type: ignore[misc] + _TypeCodes[L["c"], L["G"], L[16]], + _NativeOrder, + _NBit[L[8, 12, 16], L[16, 24, 32]], + _LiteralDType[np.clongdouble], +): + @property + def name(self) -> L["complex128", "complex192", "complex256"]: ... + @property + def str(self) -> L["c16", "c24", "c32"]: ... + +# Python objects: + +@final +class ObjectDType( # type: ignore[misc] + _TypeCodes[L["O"], L["O"], L[17]], + _NoOrder, + _NBit[L[8], L[8]], + _SimpleDType[np.object_], +): + @property + def hasobject(self) -> L[True]: ... + @property + def name(self) -> L["object"]: ... + @property + def str(self) -> L["|O"]: ... + +# Flexible: + +@final +class BytesDType( # type: ignore[misc] + _TypeCodes[L["S"], L["S"], L[18]], + _NoOrder, + _NBit[L[1], _ItemSizeT_co], + _SimpleDType[np.bytes_], + Generic[_ItemSizeT_co], +): + def __new__[ItemSizeT: int](cls, size: ItemSizeT, /) -> BytesDType[ItemSizeT]: ... + @property + def hasobject(self) -> L[False]: ... + @property + def name(self) -> LiteralString: ... + @property + def str(self) -> LiteralString: ... + +@final +class StrDType( # type: ignore[misc] + _TypeCodes[L["U"], L["U"], L[19]], + _NativeOrder, + _NBit[L[4], _ItemSizeT_co], + _SimpleDType[np.str_], + Generic[_ItemSizeT_co], +): + def __new__[ItemSizeT: int](cls, size: ItemSizeT, /) -> StrDType[ItemSizeT]: ... + @property + def hasobject(self) -> L[False]: ... + @property + def name(self) -> LiteralString: ... + @property + def str(self) -> LiteralString: ... + +@final +class VoidDType( # type: ignore[misc] + _TypeCodes[L["V"], L["V"], L[20]], + _NoOrder, + _NBit[L[1], _ItemSizeT_co], + np.dtype[np.void], # pyright: ignore[reportGeneralTypeIssues] # pyrefly: ignore[invalid-inheritance] + Generic[_ItemSizeT_co], +): + # NOTE: `VoidDType(...)` raises a `TypeError` at the moment + def __new__(cls, length: _ItemSizeT_co, /) -> NoReturn: ... + @property + def base(self) -> Self: ... + @property + def isalignedstruct(self) -> L[False]: ... + @property + def isnative(self) -> L[True]: ... + @property + def ndim(self) -> L[0]: ... + @property + def shape(self) -> tuple[()]: ... + @property + def subdtype(self) -> None: ... + @property + def name(self) -> LiteralString: ... + @property + def str(self) -> LiteralString: ... + +# Other: + +type _DateUnit = L["Y", "M", "W", "D"] +type _TimeUnit = L["h", "m", "s", "ms", "us", "ns", "ps", "fs", "as"] +type _DateTimeUnit = _DateUnit | _TimeUnit + +@final +class DateTime64DType( # type: ignore[misc] + _TypeCodes[L["M"], L["M"], L[21]], + _NativeOrder, + _NBit[L[8], L[8]], + _LiteralDType[np.datetime64], +): + # NOTE: `DateTime64DType(...)` raises a `TypeError` at the moment + # TODO: Once implemented, don't forget the`unit: L["Îŧs"]` overload. + def __new__(cls, unit: _DateTimeUnit, /) -> NoReturn: ... + @property + def name(self) -> L[ + "datetime64", + "datetime64[Y]", + "datetime64[M]", + "datetime64[W]", + "datetime64[D]", + "datetime64[h]", + "datetime64[m]", + "datetime64[s]", + "datetime64[ms]", + "datetime64[us]", + "datetime64[ns]", + "datetime64[ps]", + "datetime64[fs]", + "datetime64[as]", + ]: ... + @property + def str(self) -> L[ + "M8", + "M8[Y]", + "M8[M]", + "M8[W]", + "M8[D]", + "M8[h]", + "M8[m]", + "M8[s]", + "M8[ms]", + "M8[us]", + "M8[ns]", + "M8[ps]", + "M8[fs]", + "M8[as]", + ]: ... + +@final +class TimeDelta64DType( # type: ignore[misc] + _TypeCodes[L["m"], L["m"], L[22]], + _NativeOrder, + _NBit[L[8], L[8]], + _LiteralDType[np.timedelta64], +): + # NOTE: `TimeDelta64DType(...)` raises a `TypeError` at the moment + # TODO: Once implemented, don't forget to overload on `unit: L["Îŧs"]`. + def __new__(cls, unit: _DateTimeUnit, /) -> NoReturn: ... + @property + def name(self) -> L[ + "timedelta64", + "timedelta64[Y]", + "timedelta64[M]", + "timedelta64[W]", + "timedelta64[D]", + "timedelta64[h]", + "timedelta64[m]", + "timedelta64[s]", + "timedelta64[ms]", + "timedelta64[us]", + "timedelta64[ns]", + "timedelta64[ps]", + "timedelta64[fs]", + "timedelta64[as]", + ]: ... + @property + def str(self) -> L[ + "m8", + "m8[Y]", + "m8[M]", + "m8[W]", + "m8[D]", + "m8[h]", + "m8[m]", + "m8[s]", + "m8[ms]", + "m8[us]", + "m8[ns]", + "m8[ps]", + "m8[fs]", + "m8[as]", + ]: ... + +@final +class StringDType( # type: ignore[misc] + _TypeCodes[L["T"], L["T"], L[2056]], + _NativeOrder, + _NBit[L[8], L[16]], + # TODO(jorenham): change once we have a string scalar type: + # https://github.com/numpy/numpy/issues/28165 + np.dtype[str], # type: ignore[type-var] # pyright: ignore[reportGeneralTypeIssues, reportInvalidTypeArguments] + Generic[_NaObjectT_co], +): + @property + def na_object(self) -> _NaObjectT_co: ... + @property + def coerce(self) -> L[True]: ... + + # + @overload + def __new__(cls, /, *, coerce: bool = True) -> Self: ... + @overload + def __new__(cls, /, *, na_object: _NaObjectT_co, coerce: bool = True) -> Self: ... + + # + def __getitem__(self, key: Never, /) -> NoReturn: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + @property + def fields(self) -> None: ... + @property + def base(self) -> Self: ... + @property + def ndim(self) -> L[0]: ... + @property + def shape(self) -> tuple[()]: ... + + # + @property + def name(self) -> L["StringDType64", "StringDType128"]: ... + @property + def subdtype(self) -> None: ... + @property + def type(self) -> py_type[py_str]: ... + @property + def str(self) -> L["|T8", "|T16"]: ... + + # + @property + def hasobject(self) -> L[True]: ... + @property + def isalignedstruct(self) -> L[False]: ... + @property + def isnative(self) -> L[True]: ... diff --git a/numpy/exceptions.py b/numpy/exceptions.py index b7df57c69fbd..cf70b4a4ce3b 100644 --- a/numpy/exceptions.py +++ b/numpy/exceptions.py @@ -1,14 +1,13 @@ """ -Exceptions and Warnings (:mod:`numpy.exceptions`) -================================================= +Exceptions and Warnings +======================= General exceptions used by NumPy. Note that some exceptions may be module specific, such as linear algebra errors. .. versionadded:: NumPy 1.25 - The exceptions module is new in NumPy 1.25. Older exceptions remain - available through the main NumPy namespace for compatibility. + The exceptions module is new in NumPy 1.25. .. currentmodule:: numpy.exceptions @@ -86,20 +85,20 @@ class VisibleDeprecationWarning(UserWarning): class RankWarning(RuntimeWarning): """Matrix rank warning. - + Issued by polynomial functions when the design matrix is rank deficient. - + """ pass # Exception used in shares_memory() class TooHardError(RuntimeError): - """max_work was exceeded. + """``max_work`` was exceeded. This is raised whenever the maximum number of candidate solutions to consider specified by the ``max_work`` parameter is exceeded. - Assigning a finite number to max_work may have caused the operation + Assigning a finite number to ``max_work`` may have caused the operation to fail. """ @@ -117,8 +116,6 @@ class AxisError(ValueError, IndexError): ``except ValueError`` and ``except IndexError`` statements continue to catch ``AxisError``. - .. versionadded:: 1.13 - Parameters ---------- axis : int or str @@ -146,6 +143,7 @@ class AxisError(ValueError, IndexError): Examples -------- + >>> import numpy as np >>> array_1d = np.arange(10) >>> np.cumsum(array_1d, axis=1) Traceback (most recent call last): @@ -172,7 +170,7 @@ class AxisError(ValueError, IndexError): """ - __slots__ = ("axis", "ndim", "_msg") + __slots__ = ("_msg", "axis", "ndim") def __init__(self, axis, ndim=None, msg_prefix=None): if ndim is msg_prefix is None: @@ -222,7 +220,10 @@ class DTypePromotionError(TypeError): Datetimes and complex numbers are incompatible classes and cannot be promoted: - >>> np.result_type(np.dtype("M8[s]"), np.complex128) + >>> import numpy as np + >>> np.result_type(np.dtype("M8[s]"), np.complex128) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... DTypePromotionError: The DType could not be promoted by . This means that no common DType exists for the given inputs. For example they cannot be stored in a @@ -235,9 +236,11 @@ class DTypePromotionError(TypeError): >>> dtype1 = np.dtype([("field1", np.float64), ("field2", np.int64)]) >>> dtype2 = np.dtype([("field1", np.float64)]) - >>> np.promote_types(dtype1, dtype2) + >>> np.promote_types(dtype1, dtype2) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... DTypePromotionError: field names `('field1', 'field2')` and `('field1',)` mismatch. - """ + """ # noqa: E501 pass diff --git a/numpy/exceptions.pyi b/numpy/exceptions.pyi index 8a99713f7006..4cc4eff5d321 100644 --- a/numpy/exceptions.pyi +++ b/numpy/exceptions.pyi @@ -1,6 +1,13 @@ from typing import overload -__all__: list[str] +__all__ = [ + "ComplexWarning", + "VisibleDeprecationWarning", + "ModuleDeprecationWarning", + "TooHardError", + "AxisError", + "DTypePromotionError", +] class ComplexWarning(RuntimeWarning): ... class ModuleDeprecationWarning(DeprecationWarning): ... @@ -10,10 +17,11 @@ class TooHardError(RuntimeError): ... class DTypePromotionError(TypeError): ... class AxisError(ValueError, IndexError): - axis: None | int - ndim: None | int + __slots__ = "_msg", "axis", "ndim" + + axis: int | None + ndim: int | None @overload - def __init__(self, axis: str, ndim: None = ..., msg_prefix: None = ...) -> None: ... + def __init__(self, axis: str, ndim: None = None, msg_prefix: None = None) -> None: ... @overload - def __init__(self, axis: int, ndim: int, msg_prefix: None | str = ...) -> None: ... - def __str__(self) -> str: ... + def __init__(self, axis: int, ndim: int, msg_prefix: str | None = None) -> None: ... diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py index dfb897671c3f..f545c9c5fd84 100644 --- a/numpy/f2py/__init__.py +++ b/numpy/f2py/__init__.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 """Fortran to Python Interface Generator. Copyright 1999 -- 2011 Pearu Peterson all rights reserved. @@ -10,14 +9,14 @@ """ __all__ = ['run_main', 'get_include'] -import sys -import subprocess import os +import subprocess +import sys import warnings from numpy.exceptions import VisibleDeprecationWarning -from . import f2py2e -from . import diagnose + +from . import diagnose, f2py2e run_main = f2py2e.run_main main = f2py2e.main @@ -27,12 +26,6 @@ def get_include(): """ Return the directory that contains the ``fortranobject.c`` and ``.h`` files. - .. note:: - - This function is not needed when building an extension with - `numpy.distutils` directly from ``.f`` and/or ``.pyf`` files - in one go. - Python extension modules built with f2py-generated code need to use ``fortranobject.c`` as a source file, and include the ``fortranobject.h`` header. This function can be used to obtain the directory containing @@ -80,8 +73,7 @@ def __getattr__(attr): return test else: - raise AttributeError("module {!r} has no attribute " - "{!r}".format(__name__, attr)) + raise AttributeError(f"module {__name__!r} has no attribute {attr!r}") def __dir__(): diff --git a/numpy/f2py/__init__.pyi b/numpy/f2py/__init__.pyi index 81b6a24f39ec..aa7d5918f7d2 100644 --- a/numpy/f2py/__init__.pyi +++ b/numpy/f2py/__init__.pyi @@ -1,42 +1,5 @@ -import os -import subprocess -from collections.abc import Iterable -from typing import Literal as L, Any, overload, TypedDict +from .f2py2e import main as main, run_main -from numpy._pytesttester import PytestTester - -class _F2PyDictBase(TypedDict): - csrc: list[str] - h: list[str] - -class _F2PyDict(_F2PyDictBase, total=False): - fsrc: list[str] - ltx: list[str] - -__all__: list[str] -test: PytestTester - -def run_main(comline_list: Iterable[str]) -> dict[str, _F2PyDict]: ... - -@overload -def compile( # type: ignore[misc] - source: str | bytes, - modulename: str = ..., - extra_args: str | list[str] = ..., - verbose: bool = ..., - source_fn: None | str | bytes | os.PathLike[Any] = ..., - extension: L[".f", ".f90"] = ..., - full_output: L[False] = ..., -) -> int: ... -@overload -def compile( - source: str | bytes, - modulename: str = ..., - extra_args: str | list[str] = ..., - verbose: bool = ..., - source_fn: None | str | bytes | os.PathLike[Any] = ..., - extension: L[".f", ".f90"] = ..., - full_output: L[True] = ..., -) -> subprocess.CompletedProcess[bytes]: ... +__all__ = ["get_include", "run_main"] def get_include() -> str: ... diff --git a/numpy/f2py/__version__.py b/numpy/f2py/__version__.py index e20d7c1dbb38..8d12d955a2f2 100644 --- a/numpy/f2py/__version__.py +++ b/numpy/f2py/__version__.py @@ -1 +1 @@ -from numpy.version import version +from numpy.version import version # noqa: F401 diff --git a/numpy/f2py/__version__.pyi b/numpy/f2py/__version__.pyi new file mode 100644 index 000000000000..85b422529d38 --- /dev/null +++ b/numpy/f2py/__version__.pyi @@ -0,0 +1 @@ +from numpy.version import version as version diff --git a/numpy/f2py/_backends/__init__.py b/numpy/f2py/_backends/__init__.py index e91393c14be3..beb2bab2384d 100644 --- a/numpy/f2py/_backends/__init__.py +++ b/numpy/f2py/_backends/__init__.py @@ -2,8 +2,5 @@ def f2py_build_generator(name): if name == "meson": from ._meson import MesonBackend return MesonBackend - elif name == "distutils": - from ._distutils import DistutilsBackend - return DistutilsBackend else: raise ValueError(f"Unknown backend: {name}") diff --git a/numpy/f2py/_backends/__init__.pyi b/numpy/f2py/_backends/__init__.pyi new file mode 100644 index 000000000000..11e3743be541 --- /dev/null +++ b/numpy/f2py/_backends/__init__.pyi @@ -0,0 +1,5 @@ +from typing import Literal as L + +from ._backend import Backend + +def f2py_build_generator(name: L["meson"]) -> Backend: ... diff --git a/numpy/f2py/_backends/_backend.py b/numpy/f2py/_backends/_backend.py index a7d43d2587b2..5dda4004375e 100644 --- a/numpy/f2py/_backends/_backend.py +++ b/numpy/f2py/_backends/_backend.py @@ -1,5 +1,3 @@ -from __future__ import annotations - from abc import ABC, abstractmethod diff --git a/numpy/f2py/_backends/_backend.pyi b/numpy/f2py/_backends/_backend.pyi new file mode 100644 index 000000000000..ed24519ab914 --- /dev/null +++ b/numpy/f2py/_backends/_backend.pyi @@ -0,0 +1,46 @@ +import abc +from pathlib import Path +from typing import Any, Final + +class Backend(abc.ABC): + modulename: Final[str] + sources: Final[list[str | Path]] + extra_objects: Final[list[str]] + build_dir: Final[str | Path] + include_dirs: Final[list[str | Path]] + library_dirs: Final[list[str | Path]] + libraries: Final[list[str]] + define_macros: Final[list[tuple[str, str | None]]] + undef_macros: Final[list[str]] + f2py_flags: Final[list[str]] + sysinfo_flags: Final[list[str]] + fc_flags: Final[list[str]] + flib_flags: Final[list[str]] + setup_flags: Final[list[str]] + remove_build_dir: Final[bool] + extra_dat: Final[dict[str, Any]] + + def __init__( + self, + /, + modulename: str, + sources: list[str | Path], + extra_objects: list[str], + build_dir: str | Path, + include_dirs: list[str | Path], + library_dirs: list[str | Path], + libraries: list[str], + define_macros: list[tuple[str, str | None]], + undef_macros: list[str], + f2py_flags: list[str], + sysinfo_flags: list[str], + fc_flags: list[str], + flib_flags: list[str], + setup_flags: list[str], + remove_build_dir: bool, + extra_dat: dict[str, Any], + ) -> None: ... + + # + @abc.abstractmethod + def compile(self) -> None: ... diff --git a/numpy/f2py/_backends/_distutils.py b/numpy/f2py/_backends/_distutils.py deleted file mode 100644 index f2436f86a7e6..000000000000 --- a/numpy/f2py/_backends/_distutils.py +++ /dev/null @@ -1,75 +0,0 @@ -from ._backend import Backend - -from numpy.distutils.core import setup, Extension -from numpy.distutils.system_info import get_info -from numpy.distutils.misc_util import dict_append -from numpy.exceptions import VisibleDeprecationWarning -import os -import sys -import shutil -import warnings - - -class DistutilsBackend(Backend): - def __init__(sef, *args, **kwargs): - warnings.warn( - "\ndistutils has been deprecated since NumPy 1.26.x\n" - "Use the Meson backend instead, or generate wrappers" - " without -c and use a custom build script", - VisibleDeprecationWarning, - stacklevel=2, - ) - super().__init__(*args, **kwargs) - - def compile(self): - num_info = {} - if num_info: - self.include_dirs.extend(num_info.get("include_dirs", [])) - ext_args = { - "name": self.modulename, - "sources": self.sources, - "include_dirs": self.include_dirs, - "library_dirs": self.library_dirs, - "libraries": self.libraries, - "define_macros": self.define_macros, - "undef_macros": self.undef_macros, - "extra_objects": self.extra_objects, - "f2py_options": self.f2py_flags, - } - - if self.sysinfo_flags: - for n in self.sysinfo_flags: - i = get_info(n) - if not i: - print( - f"No {repr(n)} resources found" - "in system (try `f2py --help-link`)" - ) - dict_append(ext_args, **i) - - ext = Extension(**ext_args) - - sys.argv = [sys.argv[0]] + self.setup_flags - sys.argv.extend( - [ - "build", - "--build-temp", - self.build_dir, - "--build-base", - self.build_dir, - "--build-platlib", - ".", - "--disable-optimization", - ] - ) - - if self.fc_flags: - sys.argv.extend(["config_fc"] + self.fc_flags) - if self.flib_flags: - sys.argv.extend(["build_ext"] + self.flib_flags) - - setup(ext_modules=[ext]) - - if self.remove_build_dir and os.path.exists(self.build_dir): - print(f"Removing build directory {self.build_dir}") - shutil.rmtree(self.build_dir) diff --git a/numpy/f2py/_backends/_meson.py b/numpy/f2py/_backends/_meson.py index d4b650857e74..4c498bab2f25 100644 --- a/numpy/f2py/_backends/_meson.py +++ b/numpy/f2py/_backends/_meson.py @@ -1,18 +1,14 @@ -from __future__ import annotations - -import os import errno +import os +import re import shutil import subprocess import sys -import re +from itertools import chain from pathlib import Path - -from ._backend import Backend from string import Template -from itertools import chain -import warnings +from ._backend import Backend class MesonTemplate: @@ -28,7 +24,7 @@ def __init__( include_dirs: list[Path], object_files: list[Path], linker_args: list[str], - c_args: list[str], + fortran_args: list[str], build_type: str, python_exe: str, ): @@ -46,12 +42,19 @@ def __init__( self.include_dirs = [] self.substitutions = {} self.objects = object_files + # Convert args to '' wrapped variant for meson + self.fortran_args = [ + f"'{x}'" if not (x.startswith("'") and x.endswith("'")) else x + for x in fortran_args + ] self.pipeline = [ self.initialize_template, self.sources_substitution, + self.objects_substitution, self.deps_substitution, self.include_substitution, self.libraries_substitution, + self.fortran_args_substitution, ] self.build_type = build_type self.python_exe = python_exe @@ -77,6 +80,11 @@ def sources_substitution(self) -> None: [f"{self.indent}'''{source}'''," for source in self.sources] ) + def objects_substitution(self) -> None: + self.substitutions["obj_list"] = ",\n".join( + [f"{self.indent}'''{obj}'''," for obj in self.objects] + ) + def deps_substitution(self) -> None: self.substitutions["dep_list"] = f",\n{self.indent}".join( [f"{self.indent}dependency('{dep}')," for dep in self.deps] @@ -92,13 +100,13 @@ def libraries_substitution(self) -> None: self.substitutions["lib_declarations"] = "\n".join( [ - f"{lib} = declare_dependency(link_args : ['-l{lib}'])" + f"{lib.replace('.', '_')} = declare_dependency(link_args : ['-l{lib}'])" for lib in self.libraries ] ) self.substitutions["lib_list"] = f"\n{self.indent}".join( - [f"{self.indent}{lib}," for lib in self.libraries] + [f"{self.indent}{lib.replace('.', '_')}," for lib in self.libraries] ) self.substitutions["lib_dir_list"] = f"\n{self.indent}".join( [f"{self.indent}lib_dir_{i}," for i in range(len(self.library_dirs))] @@ -109,12 +117,20 @@ def include_substitution(self) -> None: [f"{self.indent}'''{inc}'''," for inc in self.include_dirs] ) + def fortran_args_substitution(self) -> None: + if self.fortran_args: + self.substitutions["fortran_args"] = ( + f"{self.indent}fortran_args: [{', '.join(list(self.fortran_args))}]," + ) + else: + self.substitutions["fortran_args"] = "" + def generate_meson_build(self): for node in self.pipeline: node() template = Template(self.meson_build_template()) meson_build = template.substitute(self.substitutions) - meson_build = re.sub(r",,", ",", meson_build) + meson_build = meson_build.replace(",,", ",") return meson_build @@ -126,12 +142,14 @@ def __init__(self, *args, **kwargs): self.build_type = ( "debug" if any("debug" in flag for flag in self.fc_flags) else "release" ) + self.fc_flags = _get_flags(self.fc_flags) def _move_exec_to_root(self, build_dir: Path): walk_dir = Path(build_dir) / self.meson_build_dir path_objects = chain( walk_dir.glob(f"{self.modulename}*.so"), walk_dir.glob(f"{self.modulename}*.pyd"), + walk_dir.glob(f"{self.modulename}*.dll"), ) # Same behavior as distutils # https://github.com/numpy/numpy/issues/24874#issuecomment-1835632293 @@ -174,6 +192,7 @@ def run_meson(self, build_dir: Path): def compile(self) -> None: self.sources = _prepare_sources(self.modulename, self.sources, self.build_dir) + _prepare_objects(self.modulename, self.extra_objects, self.build_dir) self.write_meson_build(self.build_dir) self.run_meson(self.build_dir) self._move_exec_to_root(self.build_dir) @@ -203,3 +222,23 @@ def _prepare_sources(mname, sources, bdir): if not Path(source).suffix == ".pyf" ] return extended_sources + +def _prepare_objects(mname, objects, bdir): + Path(bdir).mkdir(parents=True, exist_ok=True) + # Copy objects + for obj in objects: + if Path(obj).exists() and Path(obj).is_file(): + shutil.copy(obj, bdir) + +def _get_flags(fc_flags): + flag_values = [] + flag_pattern = re.compile(r"--f(77|90)flags=(.*)") + for flag in fc_flags: + match_result = flag_pattern.match(flag) + if match_result: + values = match_result.group(2).strip().split() + values = [val.strip("'\"") for val in values] + flag_values.extend(values) + # Hacky way to preserve order of flags + unique_flags = list(dict.fromkeys(flag_values)) + return unique_flags diff --git a/numpy/f2py/_backends/_meson.pyi b/numpy/f2py/_backends/_meson.pyi new file mode 100644 index 000000000000..55ff9f7ae78d --- /dev/null +++ b/numpy/f2py/_backends/_meson.pyi @@ -0,0 +1,61 @@ +from collections.abc import Callable +from pathlib import Path +from typing import Final, Literal as L, override + +from ._backend import Backend + +class MesonTemplate: + modulename: Final[str] + build_template_path: Final[Path] + sources: Final[list[str | Path]] + deps: Final[list[str]] + libraries: Final[list[str]] + library_dirs: Final[list[str | Path]] + include_dirs: Final[list[str | Path]] + substitutions: Final[dict[str, str]] + objects: Final[list[str | Path]] + fortran_args: Final[list[str]] + pipeline: Final[list[Callable[[], None]]] + build_type: Final[str] + python_exe: Final[str] + indent: Final[str] + + def __init__( + self, + /, + modulename: str, + sources: list[Path], + deps: list[str], + libraries: list[str], + library_dirs: list[str | Path], + include_dirs: list[str | Path], + object_files: list[str | Path], + linker_args: list[str], + fortran_args: list[str], + build_type: str, + python_exe: str, + ) -> None: ... + + # + def initialize_template(self) -> None: ... + def sources_substitution(self) -> None: ... + def objects_substitution(self) -> None: ... + def deps_substitution(self) -> None: ... + def libraries_substitution(self) -> None: ... + def include_substitution(self) -> None: ... + def fortran_args_substitution(self) -> None: ... + + # + def meson_build_template(self) -> str: ... + def generate_meson_build(self) -> str: ... + +class MesonBackend(Backend): + dependencies: list[str] + meson_build_dir: L["bdir"] + build_type: L["debug", "release"] + + def __init__(self, /, *args: object, **kwargs: object) -> None: ... + def write_meson_build(self, /, build_dir: Path) -> None: ... + def run_meson(self, /, build_dir: Path) -> None: ... + @override + def compile(self) -> None: ... diff --git a/numpy/f2py/_backends/meson.build.template b/numpy/f2py/_backends/meson.build.template index 092b1112c262..58c6758cc503 100644 --- a/numpy/f2py/_backends/meson.build.template +++ b/numpy/f2py/_backends/meson.build.template @@ -43,6 +43,9 @@ ${source_list}, include_directories: [ inc_np, ${inc_list} + ], + objects: [ +${obj_list} ], dependencies : [ py_dep, @@ -51,4 +54,5 @@ ${dep_list} ${lib_list} ${lib_dir_list} ], +${fortran_args} install : true) diff --git a/numpy/f2py/_isocbind.pyi b/numpy/f2py/_isocbind.pyi new file mode 100644 index 000000000000..b972f5603956 --- /dev/null +++ b/numpy/f2py/_isocbind.pyi @@ -0,0 +1,13 @@ +from typing import Any, Final + +iso_c_binding_map: Final[dict[str, dict[str, str]]] = ... + +isoc_c2pycode_map: Final[dict[str, Any]] = {} # not implemented +iso_c2py_map: Final[dict[str, Any]] = {} # not implemented + +isoc_kindmap: Final[dict[str, str]] = ... + +# namespace pollution +c_type: str +c_type_dict: dict[str, str] +fortran_type: str diff --git a/numpy/f2py/_src_pyf.py b/numpy/f2py/_src_pyf.py index 6247b95bfe46..b5c424f99334 100644 --- a/numpy/f2py/_src_pyf.py +++ b/numpy/f2py/_src_pyf.py @@ -1,3 +1,4 @@ +import os import re # START OF CODE VENDORED FROM `numpy.distutils.from_template` @@ -67,17 +68,18 @@ def parse_structure(astr): if function_start_re.match(astr, start, m.end()): while True: i = astr.rfind('\n', ind, start) - if i==-1: + if i == -1: break start = i - if astr[i:i+7]!='\n $': + if astr[i:i + 7] != '\n $': break start += 1 m = routine_end_re.search(astr, m.end()) - ind = end = m and m.end()-1 or len(astr) + ind = end = (m and m.end() - 1) or len(astr) spanlist.append((start, end)) return spanlist + template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>") list_re = re.compile(r"<\s*((.*?))\s*>") @@ -97,6 +99,7 @@ def find_and_remove_repl_patterns(astr): astr = re.subn(named_re, '', astr)[0] return astr, names + item_re = re.compile(r"\A\\(?P\d+)\Z") def conv(astr): b = astr.split(',') @@ -114,7 +117,7 @@ def unique_key(adict): done = False n = 1 while not done: - newkey = '__l%s' % (n) + newkey = f'__l{n}' if newkey in allkeys: n += 1 else: @@ -132,7 +135,7 @@ def expand_sub(substr, names): def listrepl(mobj): thelist = conv(mobj.group(1).replace(r'\,', '@comma@')) if template_name_re.match(thelist): - return "<%s>" % (thelist) + return f"<{thelist}>" name = None for key in lnames.keys(): # see if list is already in dictionary if lnames[key] == thelist: @@ -140,10 +143,11 @@ def listrepl(mobj): if name is None: # this list is not in the dictionary yet name = unique_key(lnames) lnames[name] = thelist - return "<%s>" % name + return f"<{name}>" - substr = list_re.sub(listrepl, substr) # convert all lists to named templates - # newnames are constructed as needed + # convert all lists to named templates + # new names are constructed as needed + substr = list_re.sub(listrepl, substr) numsubs = None base_rule = None @@ -152,7 +156,7 @@ def listrepl(mobj): if r not in rules: thelist = lnames.get(r, names.get(r, None)) if thelist is None: - raise ValueError('No replicates found for <%s>' % (r)) + raise ValueError(f'No replicates found for <{r}>') if r not in names and not thelist.startswith('_'): names[r] = thelist rule = [i.replace('@comma@', ',') for i in thelist.split(',')] @@ -165,14 +169,16 @@ def listrepl(mobj): elif num == numsubs: rules[r] = rule else: - print("Mismatch in number of replacements (base <{}={}>) " - "for <{}={}>. Ignoring.".format(base_rule, ','.join(rules[base_rule]), r, thelist)) + rules_base_rule = ','.join(rules[base_rule]) + print("Mismatch in number of replacements " + f"(base <{base_rule}={rules_base_rule}>) " + f"for <{r}={thelist}>. Ignoring.") if not rules: return substr def namerepl(mobj): name = mobj.group(1) - return rules.get(name, (k+1)*[name])[k] + return rules.get(name, (k + 1) * [name])[k] newstr = '' for k in range(numsubs): @@ -196,11 +202,12 @@ def process_str(allstr): writestr += cleanedstr names.update(defs) writestr += expand_sub(newstr[sub[0]:sub[1]], names) - oldend = sub[1] + oldend = sub[1] writestr += newstr[oldend:] return writestr + include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P[\w\d./\\]+\.src)['\"]", re.I) def resolve_includes(source): @@ -225,6 +232,7 @@ def process_file(source): lines = resolve_includes(source) return process_str(''.join(lines)) + _special_names = find_repl_patterns(''' <_c=s,d,c,z> <_t=real,double precision,complex,double complex> diff --git a/numpy/f2py/_src_pyf.pyi b/numpy/f2py/_src_pyf.pyi new file mode 100644 index 000000000000..50ddd07bf638 --- /dev/null +++ b/numpy/f2py/_src_pyf.pyi @@ -0,0 +1,28 @@ +import re +from _typeshed import StrOrBytesPath +from collections.abc import Mapping +from typing import Final + +routine_start_re: Final[re.Pattern[str]] = ... +routine_end_re: Final[re.Pattern[str]] = ... +function_start_re: Final[re.Pattern[str]] = ... +template_re: Final[re.Pattern[str]] = ... +named_re: Final[re.Pattern[str]] = ... +list_re: Final[re.Pattern[str]] = ... +item_re: Final[re.Pattern[str]] = ... +template_name_re: Final[re.Pattern[str]] = ... +include_src_re: Final[re.Pattern[str]] = ... + +def parse_structure(astr: str) -> list[tuple[int, int]]: ... +def find_repl_patterns(astr: str) -> dict[str, str]: ... +def find_and_remove_repl_patterns(astr: str) -> tuple[str, dict[str, str]]: ... +def conv(astr: str) -> str: ... + +# +def unique_key(adict: Mapping[str, object]) -> str: ... +def expand_sub(substr: str, names: dict[str, str]) -> str: ... +def process_str(allstr: str) -> str: ... + +# +def resolve_includes(source: StrOrBytesPath) -> list[str]: ... +def process_file(source: StrOrBytesPath) -> str: ... diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index 13a1074b447e..a5af31d976ec 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -9,14 +9,13 @@ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. """ import pprint -import sys import re +import sys import types from functools import reduce -from copy import deepcopy -from . import __version__ -from . import cfuncs +from . import __version__, cfuncs +from .cfuncs import errmess __all__ = [ 'applyrules', 'debugcapi', 'dictappend', 'errmess', 'gentitle', @@ -26,7 +25,7 @@ 'hasexternals', 'hasinitvalue', 'hasnote', 'hasresultnote', 'isallocatable', 'isarray', 'isarrayofstrings', 'ischaracter', 'ischaracterarray', 'ischaracter_or_characterarray', - 'iscomplex', + 'iscomplex', 'iscstyledirective', 'iscomplexarray', 'iscomplexfunction', 'iscomplexfunction_warn', 'isdouble', 'isdummyroutine', 'isexternal', 'isfunction', 'isfunction_wrap', 'isint1', 'isint1array', 'isinteger', 'isintent_aux', @@ -35,23 +34,21 @@ 'isintent_nothide', 'isintent_out', 'isintent_overwrite', 'islogical', 'islogicalfunction', 'islong_complex', 'islong_double', 'islong_doublefunction', 'islong_long', 'islong_longfunction', - 'ismodule', 'ismoduleroutine', 'isoptional', 'isprivate', 'isrequired', - 'isroutine', 'isscalar', 'issigned_long_longarray', 'isstring', - 'isstringarray', 'isstring_or_stringarray', 'isstringfunction', - 'issubroutine', 'get_f2py_modulename', - 'issubroutine_wrap', 'isthreadsafe', 'isunsigned', 'isunsigned_char', - 'isunsigned_chararray', 'isunsigned_long_long', - 'isunsigned_long_longarray', 'isunsigned_short', - 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', - 'replace', 'show', 'stripcomma', 'throw_error', 'isattr_value', - 'getuseblocks', 'process_f2cmap_dict' + 'ismodule', 'ismoduleroutine', 'isoptional', 'isprivate', 'isvariable', + 'isrequired', 'isroutine', 'isscalar', 'issigned_long_longarray', + 'isstring', 'isstringarray', 'isstring_or_stringarray', 'isstringfunction', + 'issubroutine', 'get_f2py_modulename', 'issubroutine_wrap', 'isthreadsafe', + 'isunsigned', 'isunsigned_char', 'isunsigned_chararray', + 'isunsigned_long_long', 'isunsigned_long_longarray', 'isunsigned_short', + 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', 'replace', + 'show', 'stripcomma', 'throw_error', 'isattr_value', 'getuseblocks', + 'process_f2cmap_dict', 'containscommon', 'containsderivedtypes' ] f2py_version = __version__.version -errmess = sys.stderr.write show = pprint.pprint options = {} @@ -418,13 +415,18 @@ def getdimension(var): dimpattern = r"\((.*?)\)" if 'attrspec' in var.keys(): if any('dimension' in s for s in var['attrspec']): - return [re.findall(dimpattern, v) for v in var['attrspec']][0] + return next(re.findall(dimpattern, v) for v in var['attrspec']) def isrequired(var): return not isoptional(var) and isintent_nothide(var) +def iscstyledirective(f2py_line): + directives = {"callstatement", "callprotoargument", "pymethoddef"} + return any(directive in f2py_line.lower() for directive in directives) + + def isintent_in(var): if 'intent' not in var: return 1 @@ -518,6 +520,15 @@ def isprivate(var): return 'attrspec' in var and 'private' in var['attrspec'] +def isvariable(var): + # heuristic to find public/private declarations of filtered subroutines + if len(var) == 1 and 'attrspec' in var and \ + var['attrspec'][0] in ('public', 'private'): + is_var = False + else: + is_var = True + return is_var + def hasinitvalue(var): return '=' in var @@ -558,6 +569,20 @@ def containscommon(rout): return 0 +def hasderivedtypes(rout): + return ('block' in rout) and rout['block'] == 'type' + + +def containsderivedtypes(rout): + if hasderivedtypes(rout): + return 1 + if hasbody(rout): + for b in rout['body']: + if hasderivedtypes(b): + return 1 + return 0 + + def containsmodule(block): if ismodule(block): return 1 @@ -595,7 +620,7 @@ def __init__(self, mess): self.mess = mess def __call__(self, var): - mess = '\n\n var = %s\n Message: %s\n' % (var, self.mess) + mess = f'\n\n var = {var}\n Message: {self.mess}\n' raise F2PYError(mess) @@ -604,7 +629,7 @@ def l_and(*f): for i in range(len(f)): l1 = '%s,f%d=f[%d]' % (l1, i, i) l2.append('f%d(v)' % (i)) - return eval('%s:%s' % (l1, ' and '.join(l2))) + return eval(f"{l1}:{' and '.join(l2)}") def l_or(*f): @@ -612,7 +637,7 @@ def l_or(*f): for i in range(len(f)): l1 = '%s,f%d=f[%d]' % (l1, i, i) l2.append('f%d(v)' % (i)) - return eval('%s:%s' % (l1, ' or '.join(l2))) + return eval(f"{l1}:{' or '.join(l2)}") def l_not(f): @@ -632,8 +657,7 @@ def getfortranname(rout): if name == '': raise KeyError if not name: - errmess('Failed to use fortranname from %s\n' % - (rout['f2pyenhancements'])) + errmess(f"Failed to use fortranname from {rout['f2pyenhancements']}\n") raise KeyError except KeyError: name = rout['name'] @@ -665,8 +689,7 @@ def getmultilineblock(rout, blockname, comment=1, counter=0): else: r = r[:-3] else: - errmess("%s multiline block should end with `'''`: %s\n" - % (blockname, repr(r))) + errmess(f"{blockname} multiline block should end with `'''`: {repr(r)}\n") return r @@ -698,12 +721,11 @@ def getcallprotoargument(rout, cb_map={}): pass elif isstring(var): pass - else: - if not isattr_value(var): - ctype = ctype + '*' - if ((isstring(var) + elif not isattr_value(var): + ctype = ctype + '*' + if (isstring(var) or isarrayofstrings(var) # obsolete? - or isstringarray(var))): + or isstringarray(var)): arg_types2.append('size_t') arg_types.append(ctype) @@ -769,7 +791,7 @@ def getrestdoc(rout): def gentitle(name): ln = (80 - len(name) - 6) // 2 - return '/*%s %s %s*/' % (ln * '*', name, ln * '*') + return f"/*{ln * '*'} {name} {ln * '*'}*/" def flatlist(lst): @@ -797,9 +819,9 @@ def replace(str, d, defaultsep=''): else: sep = defaultsep if isinstance(d[k], list): - str = str.replace('#%s#' % (k), sep.join(flatlist(d[k]))) + str = str.replace(f'#{k}#', sep.join(flatlist(d[k]))) else: - str = str.replace('#%s#' % (k), d[k]) + str = str.replace(f'#{k}#', d[k]) return str @@ -870,22 +892,16 @@ def applyrules(rules, d, var={}): for i in rules[k][k1]: if isinstance(i, dict): res = applyrules({'supertext': i}, d, var) - if 'supertext' in res: - i = res['supertext'] - else: - i = '' + i = res.get('supertext', '') ret[k].append(replace(i, d)) else: i = rules[k][k1] if isinstance(i, dict): res = applyrules({'supertext': i}, d) - if 'supertext' in res: - i = res['supertext'] - else: - i = '' + i = res.get('supertext', '') ret[k].append(replace(i, d)) else: - errmess('applyrules: ignoring rule %s.\n' % repr(rules[k])) + errmess(f'applyrules: ignoring rule {repr(rules[k])}.\n') if isinstance(ret[k], list): if len(ret[k]) == 1: ret[k] = ret[k][0] @@ -893,6 +909,7 @@ def applyrules(rules, d, var={}): del ret[k] return ret + _f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]+)', re.I).match _f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]*?' @@ -904,7 +921,7 @@ def get_f2py_modulename(source): for line in f: m = _f2py_module_name_match(line) if m: - if _f2py_user_module_name_match(line): # skip *__user__* names + if _f2py_user_module_name_match(line): # skip *__user__* names continue name = m.group('name') break @@ -918,7 +935,7 @@ def getuseblocks(pymod): all_uses.extend([x for x in modblock.get("use").keys() if "__" not in x]) return all_uses -def process_f2cmap_dict(f2cmap_all, new_map, c2py_map, verbose = False): +def process_f2cmap_dict(f2cmap_all, new_map, c2py_map, verbose=False): """ Update the Fortran-to-C type mapping dictionary with new mappings and return a list of successfully mapped C types. @@ -976,13 +993,12 @@ def process_f2cmap_dict(f2cmap_all, new_map, c2py_map, verbose = False): ) f2cmap_all[k][k1] = v1 if verbose: - outmess('\tMapping "%s(kind=%s)" to "%s"\n' % (k, k1, v1)) + outmess(f'\tMapping "{k}(kind={k1})" to "{v1}\"\n') f2cmap_mapped.append(v1) - else: - if verbose: - errmess( - "\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n" - % (k, k1, v1, v1, list(c2py_map.keys())) - ) + elif verbose: + errmess( + "\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n" + % (k, k1, v1, v1, list(c2py_map.keys())) + ) return f2cmap_all, f2cmap_mapped diff --git a/numpy/f2py/auxfuncs.pyi b/numpy/f2py/auxfuncs.pyi new file mode 100644 index 000000000000..fbf0ad764aae --- /dev/null +++ b/numpy/f2py/auxfuncs.pyi @@ -0,0 +1,259 @@ +from _typeshed import FileDescriptorOrPath +from collections.abc import Callable, Mapping +from pprint import pprint as show +from typing import Any, Final, Literal as L, Never, overload + +from .cfuncs import errmess + +__all__ = [ + "applyrules", + "containscommon", + "containsderivedtypes", + "debugcapi", + "dictappend", + "errmess", + "gentitle", + "get_f2py_modulename", + "getargs2", + "getcallprotoargument", + "getcallstatement", + "getdimension", + "getfortranname", + "getpymethoddef", + "getrestdoc", + "getuseblocks", + "getusercode", + "getusercode1", + "hasbody", + "hascallstatement", + "hascommon", + "hasexternals", + "hasinitvalue", + "hasnote", + "hasresultnote", + "isallocatable", + "isarray", + "isarrayofstrings", + "isattr_value", + "ischaracter", + "ischaracter_or_characterarray", + "ischaracterarray", + "iscomplex", + "iscomplexarray", + "iscomplexfunction", + "iscomplexfunction_warn", + "iscstyledirective", + "isdouble", + "isdummyroutine", + "isexternal", + "isfunction", + "isfunction_wrap", + "isint1", + "isint1array", + "isinteger", + "isintent_aux", + "isintent_c", + "isintent_callback", + "isintent_copy", + "isintent_dict", + "isintent_hide", + "isintent_in", + "isintent_inout", + "isintent_inplace", + "isintent_nothide", + "isintent_out", + "isintent_overwrite", + "islogical", + "islogicalfunction", + "islong_complex", + "islong_double", + "islong_doublefunction", + "islong_long", + "islong_longfunction", + "ismodule", + "ismoduleroutine", + "isoptional", + "isprivate", + "isrequired", + "isroutine", + "isscalar", + "issigned_long_longarray", + "isstring", + "isstring_or_stringarray", + "isstringarray", + "isstringfunction", + "issubroutine", + "issubroutine_wrap", + "isthreadsafe", + "isunsigned", + "isunsigned_char", + "isunsigned_chararray", + "isunsigned_long_long", + "isunsigned_long_longarray", + "isunsigned_short", + "isunsigned_shortarray", + "isvariable", + "l_and", + "l_not", + "l_or", + "outmess", + "process_f2cmap_dict", + "replace", + "show", + "stripcomma", + "throw_error", +] + +### + +type _Var = Mapping[str, list[str]] +type _ROut = Mapping[str, str] +type _F2CMap = Mapping[str, Mapping[str, str]] + +type _Bool = bool | L[0, 1] +type _Intent = L[ + "INTENT_IN", + "INTENT_OUT", + "INTENT_INOUT", + "INTENT_C", + "INTENT_CACHE", + "INTENT_HIDE", + "INTENT_INPLACE", + "INTENT_ALIGNED4", + "INTENT_ALIGNED8", + "INTENT_ALIGNED16", + "OPTIONAL", +] + +### + +isintent_dict: dict[Callable[[_Var], _Bool], _Intent] + +class F2PYError(Exception): ... + +class throw_error: + mess: Final[str] + def __init__(self, /, mess: str) -> None: ... + def __call__(self, /, var: _Var) -> Never: ... # raises F2PYError + +# +def l_and[VT, RT](*f: tuple[str, Callable[[VT], RT]]) -> Callable[[VT], RT]: ... +def l_or[VT, RT](*f: tuple[str, Callable[[VT], RT]]) -> Callable[[VT], RT]: ... +def l_not[VT, RT](f: tuple[str, Callable[[VT], RT]]) -> Callable[[VT], RT]: ... + +# +def outmess(t: str) -> None: ... +def debugcapi(var: _Var) -> bool: ... + +# +def hasinitvalue(var: _Var | str) -> bool: ... +def hasnote(var: _Var | str) -> bool: ... +def ischaracter(var: _Var) -> bool: ... +def ischaracterarray(var: _Var) -> bool: ... +def ischaracter_or_characterarray(var: _Var) -> bool: ... +def isstring(var: _Var) -> bool: ... +def isstringarray(var: _Var) -> bool: ... +def isstring_or_stringarray(var: _Var) -> bool: ... +def isarray(var: _Var) -> bool: ... +def isarrayofstrings(var: _Var) -> bool: ... +def isscalar(var: _Var) -> bool: ... +def iscomplex(var: _Var) -> bool: ... +def islogical(var: _Var) -> bool: ... +def isinteger(var: _Var) -> bool: ... +def isint1(var: _Var) -> bool: ... +def isint1array(var: _Var) -> bool: ... +def islong_long(var: _Var) -> _Bool: ... +def isunsigned(var: _Var) -> _Bool: ... +def isunsigned_char(var: _Var) -> _Bool: ... +def isunsigned_chararray(var: _Var) -> bool: ... +def isunsigned_short(var: _Var) -> _Bool: ... +def isunsigned_shortarray(var: _Var) -> bool: ... +def isunsigned_long_long(var: _Var) -> _Bool: ... +def isunsigned_long_longarray(var: _Var) -> bool: ... +def issigned_long_longarray(var: _Var) -> bool: ... +def isdouble(var: _Var) -> _Bool: ... +def islong_double(var: _Var) -> _Bool: ... +def islong_complex(var: _Var) -> _Bool: ... +def iscomplexarray(var: _Var) -> bool: ... +def isallocatable(var: _Var) -> bool: ... +def isattr_value(var: _Var) -> bool: ... +def isoptional(var: _Var) -> bool: ... +def isexternal(var: _Var) -> bool: ... +def isrequired(var: _Var) -> bool: ... +def isprivate(var: _Var) -> bool: ... +def isvariable(var: _Var) -> bool: ... +def isintent_in(var: _Var) -> _Bool: ... +def isintent_inout(var: _Var) -> bool: ... +def isintent_out(var: _Var) -> bool: ... +def isintent_hide(var: _Var) -> bool: ... +def isintent_nothide(var: _Var) -> bool: ... +def isintent_c(var: _Var) -> bool: ... +def isintent_cache(var: _Var) -> bool: ... +def isintent_copy(var: _Var) -> bool: ... +def isintent_overwrite(var: _Var) -> bool: ... +def isintent_callback(var: _Var) -> bool: ... +def isintent_inplace(var: _Var) -> bool: ... +def isintent_aux(var: _Var) -> bool: ... + +# +def containsderivedtypes(rout: _ROut) -> L[0, 1]: ... +def containscommon(rout: _ROut) -> _Bool: ... +def hasexternals(rout: _ROut) -> bool: ... +def hasresultnote(rout: _ROut) -> _Bool: ... +def hasbody(rout: _ROut) -> _Bool: ... +def hascommon(rout: _ROut) -> bool: ... +def hasderivedtypes(rout: _ROut) -> bool: ... +def hascallstatement(rout: _ROut) -> bool: ... +def isroutine(rout: _ROut) -> bool: ... +def ismodule(rout: _ROut) -> bool: ... +def ismoduleroutine(rout: _ROut) -> bool: ... +def issubroutine(rout: _ROut) -> bool: ... +def issubroutine_wrap(rout: _ROut) -> _Bool: ... +def isfunction(rout: _ROut) -> bool: ... +def isfunction_wrap(rout: _ROut) -> _Bool: ... +def islogicalfunction(rout: _ROut) -> _Bool: ... +def islong_longfunction(rout: _ROut) -> _Bool: ... +def islong_doublefunction(rout: _ROut) -> _Bool: ... +def iscomplexfunction(rout: _ROut) -> _Bool: ... +def iscomplexfunction_warn(rout: _ROut) -> _Bool: ... +def isstringfunction(rout: _ROut) -> _Bool: ... +def isthreadsafe(rout: _ROut) -> bool: ... +def isdummyroutine(rout: _ROut) -> _Bool: ... +def iscstyledirective(f2py_line: str) -> bool: ... + +# . +def getdimension(var: _Var) -> list[Any] | None: ... +def getfortranname(rout: _ROut) -> str: ... +def getmultilineblock(rout: _ROut, blockname: str, comment: _Bool = 1, counter: int = 0) -> str | None: ... +def getcallstatement(rout: _ROut) -> str | None: ... +def getcallprotoargument(rout: _ROut, cb_map: dict[str, str] = {}) -> str: ... +def getusercode(rout: _ROut) -> str | None: ... +def getusercode1(rout: _ROut) -> str | None: ... +def getpymethoddef(rout: _ROut) -> str | None: ... +def getargs(rout: _ROut) -> tuple[list[str], list[str]]: ... +def getargs2(rout: _ROut) -> tuple[list[str], list[str]]: ... +def getrestdoc(rout: _ROut) -> str | None: ... + +# +def gentitle(name: str) -> str: ... +def stripcomma(s: str) -> str: ... +@overload +def replace(str: str, d: list[str], defaultsep: str = "") -> list[str]: ... +@overload +def replace(str: list[str], d: str, defaultsep: str = "") -> list[str]: ... +@overload +def replace(str: str, d: str, defaultsep: str = "") -> str: ... + +# +def dictappend(rd: Mapping[str, object], ar: Mapping[str, object] | list[Mapping[str, object]]) -> dict[str, Any]: ... +def applyrules(rules: Mapping[str, object], d: Mapping[str, object], var: _Var = {}) -> dict[str, Any]: ... + +# +def get_f2py_modulename(source: FileDescriptorOrPath) -> str: ... +def getuseblocks(pymod: Mapping[str, Mapping[str, Mapping[str, str]]]) -> list[str]: ... +def process_f2cmap_dict( + f2cmap_all: _F2CMap, + new_map: _F2CMap, + c2py_map: _F2CMap, + verbose: bool = False, +) -> tuple[dict[str, dict[str, str]], list[str]]: ... diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py index fa477a5b9aca..290ac2f467ad 100644 --- a/numpy/f2py/capi_maps.py +++ b/numpy/f2py/capi_maps.py @@ -7,19 +7,21 @@ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. """ from . import __version__ + f2py_version = __version__.version import copy -import re import os -from .crackfortran import markoutercomma +import re + from . import cb_rules -from ._isocbind import iso_c_binding_map, isoc_c2pycode_map, iso_c2py_map +from ._isocbind import iso_c2py_map, iso_c_binding_map, isoc_c2pycode_map # The environment provided by auxfuncs.py is needed for some calls to eval. # As the needed functions cannot be determined by static inspection of the # code, it is safest to use import * pending a major refactoring of f2py. from .auxfuncs import * +from .crackfortran import markoutercomma __all__ = [ 'getctype', 'getstrlength', 'getarrdims', 'getpydocsign', @@ -152,13 +154,13 @@ def load_f2cmap_file(f2cmap_file): # interpreted as C 'float'. This feature is useful for F90/95 users if # they use PARAMETERS in type specifications. try: - outmess('Reading f2cmap from {!r} ...\n'.format(f2cmap_file)) + outmess(f'Reading f2cmap from {f2cmap_file!r} ...\n') with open(f2cmap_file) as f: d = eval(f.read().lower(), {}, {}) f2cmap_all, f2cmap_mapped = process_f2cmap_dict(f2cmap_all, d, c2py_map, True) outmess('Successfully applied user defined f2cmap changes\n') except Exception as msg: - errmess('Failed to apply user defined f2cmap changes: %s. Skipping.\n' % (msg)) + errmess(f'Failed to apply user defined f2cmap changes: {msg}. Skipping.\n') cformat_map = {'double': '%g', @@ -197,7 +199,7 @@ def getctype(var): if a in var['vars']: return getctype(var['vars'][a]) else: - errmess('getctype: function %s has no return value?!\n' % a) + errmess(f'getctype: function {a} has no return value?!\n') elif issubroutine(var): return ctype elif ischaracter_or_characterarray(var): @@ -229,9 +231,8 @@ def getctype(var): errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="")) in %s/.f2py_f2cmap file).\n' % (typespec, var['kindselector']['kind'], ctype, typespec, var['kindselector']['kind'], os.getcwd())) - else: - if not isexternal(var): - errmess('getctype: No C-type found in "%s", assuming void.\n' % var) + elif not isexternal(var): + errmess(f'getctype: No C-type found in "{var}", assuming void.\n') return ctype @@ -259,10 +260,10 @@ def getstrlength(var): if a in var['vars']: return getstrlength(var['vars'][a]) else: - errmess('getstrlength: function %s has no return value?!\n' % a) + errmess(f'getstrlength: function {a} has no return value?!\n') if not isstring(var): errmess( - 'getstrlength: expected a signature of a string but got: %s\n' % (repr(var))) + f'getstrlength: expected a signature of a string but got: {repr(var)}\n') len = '1' if 'charselector' in var: a = var['charselector'] @@ -331,7 +332,7 @@ def getarrdims(a, var, verbose=0): ret['cbsetdims'], i, 0) elif verbose: errmess( - 'getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\n' % (repr(a), repr(d))) + f'getarrdims: If in call-back function: array argument {repr(a)} must have bounded dimensions: got {repr(d)}\n') if ret['cbsetdims']: ret['cbsetdims'] = ret['cbsetdims'][:-1] # if not isintent_c(var): @@ -349,7 +350,7 @@ def getpydocsign(a, var): if af in var['vars']: return getpydocsign(af, var['vars'][af]) else: - errmess('getctype: function %s has no return value?!\n' % af) + errmess(f'getctype: function {af} has no return value?!\n') return '', '' sig, sigout = a, a opt = '' @@ -368,22 +369,21 @@ def getpydocsign(a, var): if hasinitvalue(var): init, showinit = getinit(a, var) - init = ', optional\\n Default: %s' % showinit + init = f', optional\\n Default: {showinit}' if isscalar(var): if isintent_inout(var): sig = '%s : %s rank-0 array(%s,\'%s\')%s' % (a, opt, c2py_map[ctype], c2pycode_map[ctype], init) else: - sig = '%s : %s %s%s' % (a, opt, c2py_map[ctype], init) - sigout = '%s : %s' % (out_a, c2py_map[ctype]) + sig = f'{a} : {opt} {c2py_map[ctype]}{init}' + sigout = f'{out_a} : {c2py_map[ctype]}' elif isstring(var): if isintent_inout(var): sig = '%s : %s rank-0 array(string(len=%s),\'c\')%s' % ( a, opt, getstrlength(var), init) else: - sig = '%s : %s string(len=%s)%s' % ( - a, opt, getstrlength(var), init) - sigout = '%s : string(len=%s)' % (out_a, getstrlength(var)) + sig = f'{a} : {opt} string(len={getstrlength(var)}){init}' + sigout = f'{out_a} : string(len={getstrlength(var)})' elif isarray(var): dim = var['dimension'] rank = repr(len(dim)) @@ -402,25 +402,23 @@ def getpydocsign(a, var): if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]: ua = lcb2_map[lcb_map[a]]['argname'] if not ua == a: - ua = ' => %s' % ua + ua = f' => {ua}' else: ua = '' - sig = '%s : call-back function%s' % (a, ua) + sig = f'{a} : call-back function{ua}' sigout = sig else: errmess( - 'getpydocsign: Could not resolve docsignature for "%s".\n' % a) + f'getpydocsign: Could not resolve docsignature for "{a}".\n') return sig, sigout def getarrdocsign(a, var): ctype = getctype(var) if isstring(var) and (not isarray(var)): - sig = '%s : rank-0 array(string(len=%s),\'c\')' % (a, - getstrlength(var)) + sig = f'{a} : rank-0 array(string(len={getstrlength(var)}),\'c\')' elif isscalar(var): - sig = '%s : rank-0 array(%s,\'%s\')' % (a, c2py_map[ctype], - c2pycode_map[ctype],) + sig = f'{a} : rank-0 array({c2py_map[ctype]},\'{c2pycode_map[ctype]}\')' elif isarray(var): dim = var['dimension'] rank = repr(len(dim)) @@ -452,17 +450,16 @@ def getinit(a, var): ret['init.r'], ret['init.i'] = str(v.real), str(v.imag) except Exception: raise ValueError( - 'getinit: expected complex number `(r,i)\' but got `%s\' as initial value of %r.' % (init, a)) + f'getinit: expected complex number `(r,i)\' but got `{init}\' as initial value of {a!r}.') if isarray(var): - init = '(capi_c.r=%s,capi_c.i=%s,capi_c)' % ( - ret['init.r'], ret['init.i']) + init = f"(capi_c.r={ret['init.r']},capi_c.i={ret['init.i']},capi_c)" elif isstring(var): if not init: init, showinit = '""', "''" if init[0] == "'": init = '"%s"' % (init[1:-1].replace('"', '\\"')) if init[0] == '"': - showinit = "'%s'" % (init[1:-1]) + showinit = f"'{init[1:-1]}'" return init, showinit @@ -499,7 +496,7 @@ def sign2map(a, var): intent_flags = [] for f, s in isintent_dict.items(): if f(var): - intent_flags.append('F2PY_%s' % s) + intent_flags.append(f'F2PY_{s}') if intent_flags: # TODO: Evaluate intent_flags here. ret['intent'] = '|'.join(intent_flags) @@ -555,29 +552,27 @@ def sign2map(a, var): if il[i](var): rl.append(il[i + 1]) if isstring(var): - rl.append('slen(%s)=%s' % (a, ret['length'])) + rl.append(f"slen({a})={ret['length']}") if isarray(var): ddim = ','.join( - map(lambda x, y: '%s|%s' % (x, y), var['dimension'], dim)) - rl.append('dims(%s)' % ddim) + map(lambda x, y: f'{x}|{y}', var['dimension'], dim)) + rl.append(f'dims({ddim})') if isexternal(var): - ret['vardebuginfo'] = 'debug-capi:%s=>%s:%s' % ( - a, ret['cbname'], ','.join(rl)) + ret['vardebuginfo'] = f"debug-capi:{a}=>{ret['cbname']}:{','.join(rl)}" else: ret['vardebuginfo'] = 'debug-capi:%s %s=%s:%s' % ( ret['ctype'], a, ret['showinit'], ','.join(rl)) if isscalar(var): if ret['ctype'] in cformat_map: - ret['vardebugshowvalue'] = 'debug-capi:%s=%s' % ( - a, cformat_map[ret['ctype']]) + ret['vardebugshowvalue'] = f"debug-capi:{a}={cformat_map[ret['ctype']]}" if isstring(var): ret['vardebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( a, a) if isexternal(var): - ret['vardebugshowvalue'] = 'debug-capi:%s=%%p' % (a) + ret['vardebugshowvalue'] = f'debug-capi:{a}=%p' if ret['ctype'] in cformat_map: - ret['varshowvalue'] = '#name#:%s=%s' % (a, cformat_map[ret['ctype']]) - ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) + ret['varshowvalue'] = f"#name#:{a}={cformat_map[ret['ctype']]}" + ret['showvalueformat'] = f"{cformat_map[ret['ctype']]}" if isstring(var): ret['varshowvalue'] = '#name#:slen(%s)=%%d %s=\\"%%s\\"' % (a, a) ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) @@ -600,7 +595,7 @@ def routsign2map(rout): 'name_lower': name.lower(), 'NAME': name.upper(), 'begintitle': gentitle(name), - 'endtitle': gentitle('end of %s' % name), + 'endtitle': gentitle(f'end of {name}'), 'fortranname': fname, 'FORTRANNAME': fname.upper(), 'callstatement': getcallstatement(rout) or '', @@ -627,7 +622,7 @@ def routsign2map(rout): ln = k break lcb_map[ln] = un[1] - elif 'externals' in rout and rout['externals']: + elif rout.get('externals'): errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n' % ( ret['name'], repr(rout['externals']))) ret['callprotoargument'] = getcallprotoargument(rout, lcb_map) or '' @@ -689,6 +684,8 @@ def modsign2map(m): else: ret['interface_usercode'] = '' ret['pymethoddef'] = getpymethoddef(m) or '' + if 'gil_used' in m: + ret['gil_used'] = m['gil_used'] if 'coutput' in m: ret['coutput'] = m['coutput'] if 'f2py_wrapper_output' in m: @@ -704,7 +701,7 @@ def cb_sign2map(a, var, index=None): ret['atype'] = c2capi_map[ret['ctype']] ret['elsize'] = get_elsize(var) if ret['ctype'] in cformat_map: - ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) + ret['showvalueformat'] = f"{cformat_map[ret['ctype']]}" if isarray(var): ret = dictappend(ret, getarrdims(a, var)) ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) @@ -719,25 +716,21 @@ def cb_routsign2map(rout, um): name,begintitle,endtitle,argname ctype,rctype,maxnofargs,nofoptargs,returncptr """ - ret = {'name': 'cb_%s_in_%s' % (rout['name'], um), + ret = {'name': f"cb_{rout['name']}_in_{um}", 'returncptr': ''} if isintent_callback(rout): if '_' in rout['name']: F_FUNC = 'F_FUNC_US' else: F_FUNC = 'F_FUNC' - ret['callbackname'] = '%s(%s,%s)' \ - % (F_FUNC, - rout['name'].lower(), - rout['name'].upper(), - ) + ret['callbackname'] = f"{F_FUNC}({rout['name'].lower()},{rout['name'].upper()})" ret['static'] = 'extern' else: ret['callbackname'] = ret['name'] ret['static'] = 'static' ret['argname'] = rout['name'] ret['begintitle'] = gentitle(ret['name']) - ret['endtitle'] = gentitle('end of %s' % ret['name']) + ret['endtitle'] = gentitle(f"end of {ret['name']}") ret['ctype'] = getctype(rout) ret['rctype'] = 'void' if ret['ctype'] == 'string': @@ -754,7 +747,7 @@ def cb_routsign2map(rout, um): else: ret['returncptr'] = 'return_value=' if ret['ctype'] in cformat_map: - ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) + ret['showvalueformat'] = f"{cformat_map[ret['ctype']]}" if isstringfunction(rout): ret['strlength'] = getstrlength(rout) if isfunction(rout): @@ -775,10 +768,9 @@ def cb_routsign2map(rout, um): void #endif """ - else: - if hasnote(rout): - ret['note'] = rout['note'] - rout['note'] = ['See elsewhere.'] + elif hasnote(rout): + ret['note'] = rout['note'] + rout['note'] = ['See elsewhere.'] nofargs = 0 nofoptargs = 0 if 'args' in rout and 'vars' in rout: @@ -796,7 +788,7 @@ def cb_routsign2map(rout, um): return ret -def common_sign2map(a, var): # obsolute +def common_sign2map(a, var): # obsolete ret = {'varname': a, 'ctype': getctype(var)} if isstringarray(var): ret['ctype'] = 'char' @@ -804,7 +796,7 @@ def common_sign2map(a, var): # obsolute ret['atype'] = c2capi_map[ret['ctype']] ret['elsize'] = get_elsize(var) if ret['ctype'] in cformat_map: - ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) + ret['showvalueformat'] = f"{cformat_map[ret['ctype']]}" if isarray(var): ret = dictappend(ret, getarrdims(a, var)) elif isstring(var): diff --git a/numpy/f2py/capi_maps.pyi b/numpy/f2py/capi_maps.pyi new file mode 100644 index 000000000000..9266003658a0 --- /dev/null +++ b/numpy/f2py/capi_maps.pyi @@ -0,0 +1,33 @@ +from .auxfuncs import _ROut, _Var, process_f2cmap_dict + +__all__ = [ + "cb_routsign2map", + "cb_sign2map", + "common_sign2map", + "getarrdims", + "getarrdocsign", + "getctype", + "getinit", + "getpydocsign", + "getstrlength", + "modsign2map", + "process_f2cmap_dict", + "routsign2map", + "sign2map", +] + +### + +def getctype(var: _Var) -> str: ... +def f2cexpr(expr: str) -> str: ... +def getstrlength(var: _Var) -> str: ... +def getarrdims(a: str, var: _Var, verbose: int = 0) -> dict[str, str]: ... +def getpydocsign(a: str, var: _Var) -> tuple[str, str]: ... +def getarrdocsign(a: str, var: _Var) -> str: ... +def getinit(a: str, var: _Var) -> tuple[str, str]: ... +def sign2map(a: str, var: _Var) -> dict[str, str]: ... +def routsign2map(rout: _ROut) -> dict[str, str]: ... +def modsign2map(m: _ROut) -> dict[str, str]: ... +def cb_sign2map(a: str, var: _Var, index: object | None = None) -> dict[str, str]: ... +def cb_routsign2map(rout: _ROut, um: str) -> dict[str, str]: ... +def common_sign2map(a: str, var: _Var) -> dict[str, str]: ... # obsolete diff --git a/numpy/f2py/cb_rules.py b/numpy/f2py/cb_rules.py index faf8dd401301..dcc75ec6f969 100644 --- a/numpy/f2py/cb_rules.py +++ b/numpy/f2py/cb_rules.py @@ -8,16 +8,39 @@ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. """ -from . import __version__ +from . import __version__, cfuncs from .auxfuncs import ( - applyrules, debugcapi, dictappend, errmess, getargs, hasnote, isarray, - iscomplex, iscomplexarray, iscomplexfunction, isfunction, isintent_c, - isintent_hide, isintent_in, isintent_inout, isintent_nothide, - isintent_out, isoptional, isrequired, isscalar, isstring, - isstringfunction, issubroutine, l_and, l_not, l_or, outmess, replace, - stripcomma, throw_error + applyrules, + debugcapi, + dictappend, + errmess, + getargs, + hasnote, + isarray, + iscomplex, + iscomplexarray, + iscomplexfunction, + isfunction, + isintent_c, + isintent_hide, + isintent_in, + isintent_inout, + isintent_nothide, + isintent_out, + isoptional, + isrequired, + isscalar, + isstring, + isstringfunction, + issubroutine, + l_and, + l_not, + l_or, + outmess, + replace, + stripcomma, + throw_error, ) -from . import cfuncs f2py_version = __version__.version @@ -120,31 +143,15 @@ goto capi_fail; } #setdims# -#ifdef PYPY_VERSION -#define CAPI_ARGLIST_SETITEM(idx, value) PyList_SetItem((PyObject *)capi_arglist_list, idx, value) - capi_arglist_list = PySequence_List((PyObject *)capi_arglist); - if (capi_arglist_list == NULL) goto capi_fail; -#else #define CAPI_ARGLIST_SETITEM(idx, value) PyTuple_SetItem((PyObject *)capi_arglist, idx, value) -#endif #pyobjfrom# #undef CAPI_ARGLIST_SETITEM -#ifdef PYPY_VERSION - CFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist_list); -#else - CFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist); -#endif - CFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\"); +CFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist); +CFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\"); #ifdef F2PY_REPORT_ATEXIT f2py_cb_start_call_clock(); #endif -#ifdef PYPY_VERSION - capi_return = PyObject_CallObject(cb->capi,(PyObject *)capi_arglist_list); - Py_DECREF(capi_arglist_list); - capi_arglist_list = NULL; -#else - capi_return = PyObject_CallObject(cb->capi,(PyObject *)capi_arglist); -#endif +capi_return = PyObject_CallObject(cb->capi,(PyObject *)capi_arglist); #ifdef F2PY_REPORT_ATEXIT f2py_cb_stop_call_clock(); #endif @@ -384,11 +391,11 @@ def #argname#(#docsignature#): return #docreturn#\\n\\ ' if (capi_j>capi_i)\n GETSCALARFROMPYTUPLE(capi_return,capi_i++,#varname_i#_cb_capi,#ctype#,"#ctype#_from_pyobj failed in converting argument #varname# of call-back function #name# to C #ctype#\\n");'}, {l_and(debugcapi, l_and(l_not(iscomplex), isintent_c)): ' fprintf(stderr,"#showvalueformat#.\\n",#varname_i#);'}, - {l_and(debugcapi, l_and(l_not(iscomplex), l_not( isintent_c))): + {l_and(debugcapi, l_and(l_not(iscomplex), l_not(isintent_c))): ' fprintf(stderr,"#showvalueformat#.\\n",*#varname_i#_cb_capi);'}, {l_and(debugcapi, l_and(iscomplex, isintent_c)): ' fprintf(stderr,"#showvalueformat#.\\n",(#varname_i#).r,(#varname_i#).i);'}, - {l_and(debugcapi, l_and(iscomplex, l_not( isintent_c))): + {l_and(debugcapi, l_and(iscomplex, l_not(isintent_c))): ' fprintf(stderr,"#showvalueformat#.\\n",(*#varname_i#_cb_capi).r,(*#varname_i#_cb_capi).i);'}, ], 'need': [{isintent_out: ['#ctype#_from_pyobj', 'GETSCALARFROMPYTUPLE']}, @@ -513,14 +520,13 @@ def buildcallbacks(m): if b: buildcallback(b, m['name']) else: - errmess('warning: empty body for %s\n' % (m['name'])) + errmess(f"warning: empty body for {m['name']}\n") def buildcallback(rout, um): from . import capi_maps - outmess(' Constructing call-back function "cb_%s_in_%s"\n' % - (rout['name'], um)) + outmess(f" Constructing call-back function \"cb_{rout['name']}_in_{um}\"\n") args, depargs = getargs(rout) capi_maps.depargs = depargs var = rout['vars'] @@ -639,6 +645,5 @@ def buildcallback(rout, um): 'latexdocstr': ar['latexdocstr'], 'argname': rd['argname'] } - outmess(' %s\n' % (ar['docstrshort'])) - return + outmess(f" {ar['docstrshort']}\n") ################## Build call-back function ############# diff --git a/numpy/f2py/cb_rules.pyi b/numpy/f2py/cb_rules.pyi new file mode 100644 index 000000000000..b22f5448aaaf --- /dev/null +++ b/numpy/f2py/cb_rules.pyi @@ -0,0 +1,17 @@ +from collections.abc import Mapping +from typing import Any, Final + +from .__version__ import version + +## + +f2py_version: Final = version + +cb_routine_rules: Final[dict[str, str | list[str]]] = ... +cb_rout_rules: Final[list[dict[str, str | Any]]] = ... +cb_arg_rules: Final[list[dict[str, str | Any]]] = ... + +cb_map: Final[dict[str, list[list[str]]]] = ... + +def buildcallbacks(m: Mapping[str, object]) -> None: ... +def buildcallback(rout: Mapping[str, object], um: Mapping[str, object]) -> None: ... diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index 4328a6e5004c..f48617f8e878 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 """ C declarations, CPP macros, and C functions for f2py2e. Only required declarations/macros/functions will be used. @@ -10,16 +9,26 @@ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. """ -import sys import copy +import sys from . import __version__ f2py_version = __version__.version -errmess = sys.stderr.write + + +def errmess(s: str) -> None: + """ + Write an error message to stderr. + + This indirection is needed because sys.stderr might not always be available (see #26862). + """ + if sys.stderr is not None: + sys.stderr.write(s) ##################### Definitions ################## + outneeds = {'includes0': [], 'includes': [], 'typedefs': [], 'typedefs_generated': [], 'userincludes': [], 'cppmacros': [], 'cfuncs': [], 'callbacks': [], 'f90modhooks': [], @@ -540,24 +549,32 @@ #error You need to install NumPy version 0.13 or higher. See https://scipy.org/install.html #endif """ + +# Defining the correct value to indicate thread-local storage in C without +# running a compile-time check (which we have no control over in generated +# code used outside of NumPy) is hard. Therefore we support overriding this +# via an external define - the f2py-using package can then use the same +# compile-time checks as we use for `NPY_TLS` when building NumPy (see +# scipy#21860 for an example of that). +# +# __STDC_NO_THREADS__ should not be coupled to the availability of _Thread_local. +# In case we get a bug report, guard it with __STDC_NO_THREADS__ after all. +# +# `thread_local` has become a keyword in C23, but don't try to use that yet +# (too new, doing so while C23 support is preliminary will likely cause more +# problems than it solves). +# +# Note: do not try to use `threads.h`, its availability is very low +# *and* threads.h isn't actually used where `F2PY_THREAD_LOCAL_DECL` is +# in the generated code. See gh-27718 for more details. cppmacros["F2PY_THREAD_LOCAL_DECL"] = """ #ifndef F2PY_THREAD_LOCAL_DECL #if defined(_MSC_VER) #define F2PY_THREAD_LOCAL_DECL __declspec(thread) #elif defined(NPY_OS_MINGW) #define F2PY_THREAD_LOCAL_DECL __thread -#elif defined(__STDC_VERSION__) \\ - && (__STDC_VERSION__ >= 201112L) \\ - && !defined(__STDC_NO_THREADS__) \\ - && (!defined(__GLIBC__) || __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 12)) \\ - && !defined(NPY_OS_OPENBSD) && !defined(NPY_OS_HAIKU) -/* __STDC_NO_THREADS__ was first defined in a maintenance release of glibc 2.12, - see https://lists.gnu.org/archive/html/commit-hurd/2012-07/msg00180.html, - so `!defined(__STDC_NO_THREADS__)` may give false positive for the existence - of `threads.h` when using an older release of glibc 2.12 - See gh-19437 for details on OpenBSD */ -#include -#define F2PY_THREAD_LOCAL_DECL thread_local +#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) +#define F2PY_THREAD_LOCAL_DECL _Thread_local #elif defined(__GNUC__) \\ && (__GNUC__ > 4 || (__GNUC__ == 4 && (__GNUC_MINOR__ >= 4))) #define F2PY_THREAD_LOCAL_DECL __thread @@ -581,32 +598,37 @@ return ii; }""" cfuncs['forcomb'] = """ -static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache; -static int initforcomb(npy_intp *dims,int nd,int tr) { +struct ForcombCache { int nd;npy_intp *d;int *i,*i_tr,tr; }; +static int initforcomb(struct ForcombCache *cache, npy_intp *dims,int nd,int tr) { int k; if (dims==NULL) return 0; if (nd<0) return 0; - forcombcache.nd = nd; - forcombcache.d = dims; - forcombcache.tr = tr; - if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0; - if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0; + cache->nd = nd; + cache->d = dims; + cache->tr = tr; + + cache->i = (int *)malloc(sizeof(int)*nd); + if (cache->i==NULL) return 0; + cache->i_tr = (int *)malloc(sizeof(int)*nd); + if (cache->i_tr==NULL) {free(cache->i); return 0;}; + for (k=1;ki[k] = cache->i_tr[nd-k-1] = 0; } - forcombcache.i[0] = forcombcache.i_tr[nd-1] = -1; + cache->i[0] = cache->i_tr[nd-1] = -1; return 1; } -static int *nextforcomb(void) { +static int *nextforcomb(struct ForcombCache *cache) { + if (cache==NULL) return NULL; int j,*i,*i_tr,k; - int nd=forcombcache.nd; - if ((i=forcombcache.i) == NULL) return NULL; - if ((i_tr=forcombcache.i_tr) == NULL) return NULL; - if (forcombcache.d == NULL) return NULL; + int nd=cache->nd; + if ((i=cache->i) == NULL) return NULL; + if ((i_tr=cache->i_tr) == NULL) return NULL; + if (cache->d == NULL) return NULL; i[0]++; - if (i[0]==forcombcache.d[0]) { + if (i[0]==cache->d[0]) { j=1; - while ((jd[j]-1)) j++; if (j==nd) { free(i); free(i_tr); @@ -617,7 +639,7 @@ i_tr[nd-j-1]++; } else i_tr[nd-1]++; - if (forcombcache.tr) return i_tr; + if (cache->tr) return i_tr; return i; }""" needs['try_pyarr_from_string'] = ['STRINGCOPYN', 'PRINTPYOBJERR', 'string'] @@ -1030,9 +1052,12 @@ PyArray_ScalarAsCtype(obj, v); return 1; } - else if (PyArray_Check(obj) && PyArray_TYPE(obj) == NPY_LONGDOUBLE) { - (*v) = *((npy_longdouble *)PyArray_DATA(obj)); - return 1; + else if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + if (PyArray_TYPE(arr) == NPY_LONGDOUBLE) { + (*v) = *((npy_longdouble *)PyArray_DATA(arr)); + return 1; + } } } if (double_from_pyobj(&d, obj, errmess)) { @@ -1114,10 +1139,13 @@ PyArray_ScalarAsCtype(obj, v); return 1; } - else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) { - (*v).r = npy_creall(*(((npy_clongdouble *)PyArray_DATA(obj)))); - (*v).i = npy_cimagl(*(((npy_clongdouble *)PyArray_DATA(obj)))); - return 1; + else if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + if (PyArray_TYPE(arr)==NPY_CLONGDOUBLE) { + (*v).r = npy_creall(*(((npy_clongdouble *)PyArray_DATA(arr)))); + (*v).i = npy_cimagl(*(((npy_clongdouble *)PyArray_DATA(arr)))); + return 1; + } } } if (complex_double_from_pyobj(&cd,obj,errmess)) { @@ -1318,7 +1346,7 @@ Py_INCREF(tmp_fun); tot = maxnofargs; if (PyCFunction_Check(fun)) { - /* In case the function has a co_argcount (like on PyPy) */ + /* In case the function has a co_argcount */ di = 0; } if (xa != NULL) @@ -1422,14 +1450,14 @@ def buildcfuncs(): from .capi_maps import c2capi_map for k in c2capi_map.keys(): - m = 'pyarr_from_p_%s1' % k + m = f'pyarr_from_p_{k}1' cppmacros[ - m] = '#define %s(v) (PyArray_SimpleNewFromData(0,NULL,%s,(char *)v))' % (m, c2capi_map[k]) + m] = f'#define {m}(v) (PyArray_SimpleNewFromData(0,NULL,{c2capi_map[k]},(char *)v))' k = 'string' - m = 'pyarr_from_p_%s1' % k + m = f'pyarr_from_p_{k}1' # NPY_CHAR compatibility, NPY_STRING with itemsize 1 cppmacros[ - m] = '#define %s(v,dims) (PyArray_New(&PyArray_Type, 1, dims, NPY_STRING, NULL, v, 1, NPY_ARRAY_CARRAY, NULL))' % (m) + m] = f'#define {m}(v,dims) (PyArray_New(&PyArray_Type, 1, dims, NPY_STRING, NULL, v, 1, NPY_ARRAY_CARRAY, NULL))' ############ Auxiliary functions for sorting needs ################### @@ -1461,7 +1489,7 @@ def append_needs(need, flag=1): elif need in commonhooks: n = 'commonhooks' else: - errmess('append_needs: unknown need %s\n' % (repr(need))) + errmess(f'append_needs: unknown need {repr(need)}\n') return if need in outneeds[n]: return @@ -1497,8 +1525,7 @@ def append_needs(need, flag=1): tmp[n].append(need) return tmp else: - errmess('append_needs: expected list or string but got :%s\n' % - (repr(need))) + errmess(f'append_needs: expected list or string but got :{repr(need)}\n') def get_needs(): diff --git a/numpy/f2py/cfuncs.pyi b/numpy/f2py/cfuncs.pyi new file mode 100644 index 000000000000..2187368797a4 --- /dev/null +++ b/numpy/f2py/cfuncs.pyi @@ -0,0 +1,31 @@ +from typing import Final + +from .__version__ import version + +### + +type _NeedListDict = dict[str, list[str]] +type _NeedDict = dict[str, str] + +### + +f2py_version: Final = version + +outneeds: Final[_NeedListDict] = ... +needs: Final[_NeedListDict] = ... + +includes0: Final[_NeedDict] = ... +includes: Final[_NeedDict] = ... +userincludes: Final[_NeedDict] = ... +typedefs: Final[_NeedDict] = ... +typedefs_generated: Final[_NeedDict] = ... +cppmacros: Final[_NeedDict] = ... +cfuncs: Final[_NeedDict] = ... +callbacks: Final[_NeedDict] = ... +f90modhooks: Final[_NeedDict] = ... +commonhooks: Final[_NeedDict] = ... + +def errmess(s: str) -> None: ... +def buildcfuncs() -> None: ... +def get_needs() -> _NeedListDict: ... +def append_needs(need: str | list[str], flag: int = 1) -> _NeedListDict: ... diff --git a/numpy/f2py/common_rules.py b/numpy/f2py/common_rules.py index 64347b737454..cef757b6c5a3 100644 --- a/numpy/f2py/common_rules.py +++ b/numpy/f2py/common_rules.py @@ -9,13 +9,11 @@ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. """ from . import __version__ + f2py_version = __version__.version -from .auxfuncs import ( - hasbody, hascommon, hasnote, isintent_hide, outmess, getuseblocks -) -from . import capi_maps -from . import func2subr +from . import capi_maps, func2subr +from .auxfuncs import getuseblocks, hasbody, hascommon, hasnote, isintent_hide, outmess from .crackfortran import rmbadname @@ -45,19 +43,19 @@ def buildhooks(m): fwrap = [''] def fadd(line, s=fwrap): - s[0] = '%s\n %s' % (s[0], line) + s[0] = f'{s[0]}\n {line}' chooks = [''] def cadd(line, s=chooks): - s[0] = '%s\n%s' % (s[0], line) + s[0] = f'{s[0]}\n{line}' ihooks = [''] def iadd(line, s=ihooks): - s[0] = '%s\n%s' % (s[0], line) + s[0] = f'{s[0]}\n{line}' doc = [''] def dadd(line, s=doc): - s[0] = '%s\n%s' % (s[0], line) + s[0] = f'{s[0]}\n{line}' for (name, vnames, vars) in findcommonblocks(m): lower_name = name.lower() hnames, inames = [], [] @@ -72,17 +70,17 @@ def dadd(line, s=doc): else: outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n' % ( name, ','.join(inames))) - fadd('subroutine f2pyinit%s(setupfunc)' % name) + fadd(f'subroutine f2pyinit{name}(setupfunc)') for usename in getuseblocks(m): fadd(f'use {usename}') fadd('external setupfunc') for n in vnames: fadd(func2subr.var2fixfortran(vars, n)) if name == '_BLNK_': - fadd('common %s' % (','.join(vnames))) + fadd(f"common {','.join(vnames)}") else: - fadd('common /%s/ %s' % (name, ','.join(vnames))) - fadd('call setupfunc(%s)' % (','.join(inames))) + fadd(f"common /{name}/ {','.join(vnames)}") + fadd(f"call setupfunc({','.join(inames)})") fadd('end\n') cadd('static FortranDataDef f2py_%s_def[] = {' % (name)) idims = [] @@ -92,7 +90,7 @@ def dadd(line, s=doc): at = capi_maps.c2capi_map[ct] dm = capi_maps.getarrdims(n, vars[n]) if dm['dims']: - idims.append('(%s)' % (dm['dims'])) + idims.append(f"({dm['dims']})") else: idims.append('') dms = dm['dims'].strip() @@ -106,7 +104,7 @@ def dadd(line, s=doc): cadd('static void f2py_setup_%s(%s) {' % (name, inames1_tps)) cadd('\tint i_f2py=0;') for n in inames1: - cadd('\tf2py_%s_def[i_f2py++].data = %s;' % (name, n)) + cadd(f'\tf2py_{name}_def[i_f2py++].data = {n};') cadd('}') if '_' in lower_name: F_FUNC = 'F_FUNC_US' @@ -119,10 +117,9 @@ def dadd(line, s=doc): cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);' % (F_FUNC, lower_name, name.upper(), name)) cadd('}\n') - iadd('\ttmp = PyFortranObject_New(f2py_%s_def,f2py_init_%s);' % (name, name)) + iadd(f'\ttmp = PyFortranObject_New(f2py_{name}_def,f2py_init_{name});') iadd('\tif (tmp == NULL) return NULL;') - iadd('\tif (F2PyDict_SetItemString(d, \"%s\", tmp) == -1) return NULL;' - % name) + iadd(f'\tif (F2PyDict_SetItemString(d, "{name}", tmp) == -1) return NULL;') iadd('\tPy_DECREF(tmp);') tname = name.replace('_', '\\_') dadd('\\subsection{Common block \\texttt{%s}}\n' % (tname)) @@ -134,10 +131,10 @@ def dadd(line, s=doc): note = vars[n]['note'] if isinstance(note, list): note = '\n'.join(note) - dadd('--- %s' % (note)) + dadd(f'--- {note}') dadd('\\end{description}') ret['docs'].append( - '"\t/%s/ %s\\n"' % (name, ','.join(map(lambda v, d: v + d, inames, idims)))) + f"\"\t/{name}/ {','.join(map(lambda v, d: v + d, inames, idims))}\\n\"") ret['commonhooks'] = chooks ret['initcommonhooks'] = ihooks ret['latexdoc'] = doc[0] diff --git a/numpy/f2py/common_rules.pyi b/numpy/f2py/common_rules.pyi new file mode 100644 index 000000000000..d840de0005d6 --- /dev/null +++ b/numpy/f2py/common_rules.pyi @@ -0,0 +1,9 @@ +from collections.abc import Mapping +from typing import Any, Final + +from .__version__ import version + +f2py_version: Final = version + +def findcommonblocks(block: Mapping[str, object], top: int = 1) -> list[tuple[str, list[str], dict[str, Any]]]: ... +def buildhooks(m: Mapping[str, object]) -> tuple[dict[str, Any], str]: ... diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py old mode 100755 new mode 100644 index 2c6fa83889ca..d75fe4df6cd6 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 """ crackfortran --- read fortran (77,90) code and extract declaration information. @@ -137,27 +136,27 @@ The above may be solved by creating appropriate preprocessor program, for example. """ -import sys -import string +import codecs +import copy import fileinput -import re import os -import copy import platform -import codecs +import re +import string +import sys from pathlib import Path + try: import charset_normalizer except ImportError: charset_normalizer = None -from . import __version__ +from . import __version__, symbolic # The environment provided by auxfuncs.py is needed for some calls to eval. # As the needed functions cannot be determined by static inspection of the # code, it is safest to use import * pending a major refactoring of f2py. from .auxfuncs import * -from . import symbolic f2py_version = __version__.version @@ -243,6 +242,7 @@ def outmess(line, flag=1): sys.stdout.write(filepositiontext) sys.stdout.write(line) + re._MAXCACHE = 50 defaultimplicitrules = {} for c in "abcdefghopqrstuvwxyz$_": @@ -266,8 +266,7 @@ def outmess(line, flag=1): def rmbadname1(name): if name in badnames: - errmess('rmbadname1: Replacing "%s" with "%s".\n' % - (name, badnames[name])) + errmess(f'rmbadname1: Replacing "{name}" with "{badnames[name]}".\n') return badnames[name] return name @@ -278,8 +277,7 @@ def rmbadname(names): def undo_rmbadname1(name): if name in invbadnames: - errmess('undo_rmbadname1: Replacing "%s" with "%s".\n' - % (name, invbadnames[name])) + errmess(f'undo_rmbadname1: Replacing "{name}" with "{invbadnames[name]}".\n') return invbadnames[name] return name @@ -417,7 +415,7 @@ def readfortrancode(ffile, dowithline=show, istop=1): beginpattern = beginpattern90 outmess('\tReading file %s (format:%s%s)\n' % (repr(currentfilename), sourcecodeform, - strictf77 and ',strict' or '')) + (strictf77 and ',strict') or '')) l = l.expandtabs().replace('\xa0', ' ') # Get rid of newline characters @@ -425,11 +423,14 @@ def readfortrancode(ffile, dowithline=show, istop=1): if l[-1] not in "\n\r\f": break l = l[:-1] + # Do not lower for directives, gh-2547, gh-27697, gh-26681 + is_f2py_directive = False # Unconditionally remove comments (l, rl) = split_by_unquoted(l, '!') l += ' ' if rl[:5].lower() == '!f2py': # f2py directive l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!') + is_f2py_directive = True if l.strip() == '': # Skip empty line if sourcecodeform == 'free': # In free form, a statement continues in the next line @@ -449,13 +450,15 @@ def readfortrancode(ffile, dowithline=show, istop=1): if l[0] in ['*', 'c', '!', 'C', '#']: if l[1:5].lower() == 'f2py': # f2py directive l = ' ' + l[5:] + is_f2py_directive = True else: # Skip comment line cont = False + is_f2py_directive = False continue elif strictf77: if len(l) > 72: l = l[:72] - if not (l[0] in spacedigits): + if l[0] not in spacedigits: raise Exception('readfortrancode: Found non-(space,digit) char ' 'in the first column.\n\tAre you sure that ' 'this code is in fix form?\n\tline=%s' % repr(l)) @@ -468,7 +471,7 @@ def readfortrancode(ffile, dowithline=show, istop=1): else: r = cont1.match(l) if r: - l = r.group('line') # Continuation follows .. + l = r.group('line') # Continuation follows .. if cont: ll = ll + cont2.match(l).group('line') finalline = '' @@ -476,6 +479,7 @@ def readfortrancode(ffile, dowithline=show, istop=1): else: # clean up line beginning from possible digits. l = ' ' + l[5:] + # f2py directives are already stripped by this point if localdolowercase: finalline = ll.lower() else: @@ -505,7 +509,9 @@ def readfortrancode(ffile, dowithline=show, istop=1): origfinalline = '' else: if localdolowercase: - finalline = ll.lower() + # only skip lowering for C style constructs + # gh-2547, gh-27697, gh-26681, gh-28014 + finalline = ll.lower() if not (is_f2py_directive and iscstyledirective(ll)) else ll else: finalline = ll origfinalline = ll @@ -513,7 +519,7 @@ def readfortrancode(ffile, dowithline=show, istop=1): cont = (r is not None) else: raise ValueError( - "Flag sourcecodeform must be either 'fix' or 'free': %s" % repr(sourcecodeform)) + f"Flag sourcecodeform must be either 'fix' or 'free': {repr(sourcecodeform)}") filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( fin.filelineno() - 1, currentfilename, l1) m = includeline.match(origfinalline) @@ -537,6 +543,7 @@ def readfortrancode(ffile, dowithline=show, istop=1): else: dowithline(finalline) l1 = ll + # Last line should never have an f2py directive anyway if localdolowercase: finalline = ll.lower() else: @@ -571,9 +578,10 @@ def readfortrancode(ffile, dowithline=show, istop=1): gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ beginpattern, quiet, verbose, dolowercase = saveglobals + # Crack line -beforethisafter = r'\s*(?P%s(?=\s*(\b(%s)\b)))' + \ - r'\s*(?P(\b(%s)\b))' + \ +beforethisafter = r'\s*(?P%s(?=\s*(\b(%s)\b)))'\ + r'\s*(?P(\b(%s)\b))'\ r'\s*(?P%s)\s*\Z' ## fortrantypes = r'character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte' @@ -592,7 +600,7 @@ def readfortrancode(ffile, dowithline=show, istop=1): beginpattern77 = re.compile( beforethisafter % ('', groupbegins77, groupbegins77, '.*'), re.I), 'begin' groupbegins90 = groupbegins77 + \ - r'|module(?!\s*procedure)|python\s*module|(abstract|)\s*interface|' + \ + r'|module(?!\s*procedure)|python\s*module|(abstract|)\s*interface|'\ r'type(?!\s*\()' beginpattern90 = re.compile( beforethisafter % ('', groupbegins90, groupbegins90, '.*'), re.I), 'begin' @@ -601,7 +609,7 @@ def readfortrancode(ffile, dowithline=show, istop=1): endpattern = re.compile( beforethisafter % ('', groupends, groupends, '.*'), re.I), 'end' # block, the Fortran 2008 construct needs special handling in the rest of the file -endifs = r'end\s*(if|do|where|select|while|forall|associate|' + \ +endifs = r'end\s*(if|do|where|select|while|forall|associate|'\ r'critical|enum|team)' endifpattern = re.compile( beforethisafter % (r'[\w]*?', endifs, endifs, '.*'), re.I), 'endif' @@ -663,8 +671,8 @@ def split_by_unquoted(line, characters): r = re.compile( r"\A(?P({single_quoted}|{double_quoted}|{not_quoted})*)" r"(?P{char}.*)\Z".format( - not_quoted="[^\"'{}]".format(re.escape(characters)), - char="[{}]".format(re.escape(characters)), + not_quoted=f"[^\"'{re.escape(characters)}]", + char=f"[{re.escape(characters)}]", single_quoted=r"('([^'\\]|(\\.))*')", double_quoted=r'("([^"\\]|(\\.))*")')) m = r.match(line) @@ -681,6 +689,7 @@ def _simplifyargs(argsline): a.append(n) return ','.join(a) + crackline_re_1 = re.compile(r'\s*(?P\b[a-z]+\w*\b)\s*=.*', re.I) crackline_bind_1 = re.compile(r'\s*(?P\b[a-z]+\w*\b)\s*=.*', re.I) crackline_bindlang = re.compile(r'\s*bind\(\s*(?P[^,]+)\s*,\s*name\s*=\s*"(?P[^"]+)"\s*\)', re.I) @@ -782,14 +791,13 @@ def crackline(line, reset=0): m2 = re_1.match(m1.group('before')) a = _simplifyargs(m1.group('args')) if m2: - line = 'callfun %s(%s) result (%s)' % ( - name, a, m2.group('result')) + line = f"callfun {name}({a}) result ({m2.group('result')})" else: - line = 'callfun %s(%s)' % (name, a) + line = f'callfun {name}({a})' m = callfunpattern[0].match(line) if not m: outmess( - 'crackline: could not resolve function call for line=%s.\n' % repr(line)) + f'crackline: could not resolve function call for line={repr(line)}.\n') return analyzeline(m, 'callfun', line) return @@ -806,7 +814,7 @@ def crackline(line, reset=0): raise Exception('crackline: groupcounter(=%s) is nonpositive. ' 'Check the blocks.' % (groupcounter)) - m1 = beginpattern[0].match((line)) + m1 = beginpattern[0].match(line) if (m1) and (not m1.group('this') == groupname[groupcounter]): raise Exception('crackline: End group %s does not match with ' 'previous Begin group %s\n\t%s' % @@ -911,12 +919,13 @@ def appenddecl(decl, decl2, force=1): pass elif k in ['intent', 'check', 'dimension', 'optional', 'required', 'depend']: - errmess('appenddecl: "%s" not implemented.\n' % k) + errmess(f'appenddecl: "{k}" not implemented.\n') else: raise Exception('appenddecl: Unknown variable definition key: ' + str(k)) return decl + selectpattern = re.compile( r'\s*(?P(@\(@.*?@\)@|\*[\d*]+|\*\s*@\(@.*?@\)@|))(?P.*)\Z', re.I) typedefpattern = re.compile( @@ -1002,7 +1011,7 @@ def analyzeline(m, case, line): and not skipemptyends and groupcounter < 1: newname = os.path.basename(currentfilename).split('.')[0] outmess( - 'analyzeline: no group yet. Creating program group with name "%s".\n' % newname) + f'analyzeline: no group yet. Creating program group with name "{newname}".\n') gotnextfile = 0 groupcounter = groupcounter + 1 groupname[groupcounter] = 'program' @@ -1025,7 +1034,7 @@ def analyzeline(m, case, line): block = 'abstract interface' if block == 'type': name, attrs, _ = _resolvetypedefpattern(m.group('after')) - groupcache[groupcounter]['vars'][name] = dict(attrspec = attrs) + groupcache[groupcounter]['vars'][name] = {'attrspec': attrs} args = [] result = None else: @@ -1115,13 +1124,12 @@ def analyzeline(m, case, line): groupcache[groupcounter]['result'] = result if groupcounter == 1: groupcache[groupcounter]['from'] = currentfilename + elif f77modulename and groupcounter == 3: + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], currentfilename) else: - if f77modulename and groupcounter == 3: - groupcache[groupcounter]['from'] = '%s:%s' % ( - groupcache[groupcounter - 1]['from'], currentfilename) - else: - groupcache[groupcounter]['from'] = '%s:%s' % ( - groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) for k in list(groupcache[groupcounter].keys()): if not groupcache[groupcounter][k]: del groupcache[groupcounter][k] @@ -1151,7 +1159,7 @@ def analyzeline(m, case, line): if bindcline: bindcdat = re.search(crackline_bindlang, bindcline) if bindcdat: - groupcache[groupcounter]['bindlang'] = {name : {}} + groupcache[groupcounter]['bindlang'] = {name: {}} groupcache[groupcounter]['bindlang'][name]["lang"] = bindcdat.group('lang') if bindcdat.group('lang_name'): groupcache[groupcounter]['bindlang'][name]["name"] = bindcdat.group('lang_name') @@ -1188,7 +1196,7 @@ def analyzeline(m, case, line): groupcounter = groupcounter - 1 # end interface elif case == 'entry': - name, args, result, _= _resolvenameargspattern(m.group('after')) + name, args, result, _ = _resolvenameargspattern(m.group('after')) if name is not None: if args: args = rmbadname([x.strip() @@ -1241,8 +1249,7 @@ def analyzeline(m, case, line): continue else: k = rmbadname1(m1.group('name')) - if case in ['public', 'private'] and \ - (k == 'operator' or k == 'assignment'): + if case in ['public', 'private'] and k in {'operator', 'assignment'}: k += m1.group('after') if k not in edecl: edecl[k] = {} @@ -1263,7 +1270,7 @@ def analyzeline(m, case, line): groupcache[groupcounter]['args'].append(k) else: errmess( - 'analyzeline: intent(callback) %s is ignored\n' % (k)) + f'analyzeline: intent(callback) {k} is ignored\n') else: errmess('analyzeline: intent(callback) %s is already' ' in argument list\n' % (k)) @@ -1298,7 +1305,7 @@ def analyzeline(m, case, line): k, initexpr = [x.strip() for x in e.split('=')] except Exception: outmess( - 'analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n' % (e, ll)) + f'analyzeline: could not extract name,expr in parameter statement "{e}" of "{ll}\"\n') continue params = get_parameters(edecl) k = rmbadname1(k) @@ -1337,10 +1344,7 @@ def analyzeline(m, case, line): if m.group('after').strip().lower() == 'none': groupcache[groupcounter]['implicit'] = None elif m.group('after'): - if 'implicit' in groupcache[groupcounter]: - impl = groupcache[groupcounter]['implicit'] - else: - impl = {} + impl = groupcache[groupcounter].get('implicit', {}) if impl is None: outmess( 'analyzeline: Overwriting earlier "implicit none" statement.\n') @@ -1351,12 +1355,12 @@ def analyzeline(m, case, line): r'\s*(?P.*?)\s*(\(\s*(?P[a-z-, ]+)\s*\)\s*|)\Z', e, re.I) if not m1: outmess( - 'analyzeline: could not extract info of implicit statement part "%s"\n' % (e)) + f'analyzeline: could not extract info of implicit statement part "{e}\"\n') continue m2 = typespattern4implicit.match(m1.group('this')) if not m2: outmess( - 'analyzeline: could not extract types pattern of implicit statement part "%s"\n' % (e)) + f'analyzeline: could not extract types pattern of implicit statement part "{e}\"\n') continue typespec, selector, attr, edecl = cracktypespec0( m2.group('this'), m2.group('after')) @@ -1375,13 +1379,13 @@ def analyzeline(m, case, line): begc, endc = [x.strip() for x in r.split('-')] except Exception: outmess( - 'analyzeline: expected "-" instead of "%s" in range list of implicit statement\n' % r) + f'analyzeline: expected "-" instead of "{r}" in range list of implicit statement\n') continue else: begc = endc = r.strip() if not len(begc) == len(endc) == 1: outmess( - 'analyzeline: expected "-" instead of "%s" in range list of implicit statement (2)\n' % r) + f'analyzeline: expected "-" instead of "{r}" in range list of implicit statement (2)\n') continue for o in range(ord(begc), ord(endc) + 1): impl[chr(o)] = decl @@ -1424,15 +1428,13 @@ def analyzeline(m, case, line): vars = groupcache[groupcounter].get('vars', {}) last_name = None for l in ll: - l[0], l[1] = l[0].strip(), l[1].strip() - if l[0].startswith(','): - l[0] = l[0][1:] + l[0], l[1] = l[0].strip().removeprefix(','), l[1].strip() if l[0].startswith('('): - outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % l[0]) + outmess(f'analyzeline: implied-DO list "{l[0]}" is not supported. Skipping.\n') continue for idx, v in enumerate(rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')])): if v.startswith('('): - outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % v) + outmess(f'analyzeline: implied-DO list "{v}" is not supported. Skipping.\n') # XXX: subsequent init expressions may get wrong values. # Ignoring since data statements are irrelevant for # wrapping. @@ -1443,14 +1445,14 @@ def analyzeline(m, case, line): # integer dimension(3) :: mytab # common /mycom/ mytab # Since in any case it is initialized in the Fortran code - outmess('Comment line in declaration "%s" is not supported. Skipping.\n' % l[1]) + outmess(f'Comment line in declaration "{l[1]}" is not supported. Skipping.\n') continue vars.setdefault(v, {}) vtype = vars[v].get('typespec') vdim = getdimension(vars[v]) matches = re.findall(r"\(.*?\)", l[1]) if vtype == 'complex' else l[1].split(',') try: - new_val = "(/{}/)".format(", ".join(matches)) if vdim else matches[idx] + new_val = f"(/{', '.join(matches)}/)" if vdim else matches[idx] except IndexError: # gh-24746 # Runs only if above code fails. Fixes the line @@ -1463,15 +1465,15 @@ def analyzeline(m, case, line): try: multiplier, value = match.split("*") expanded_list.extend([value.strip()] * int(multiplier)) - except ValueError: # if int(multiplier) fails + except ValueError: # if int(multiplier) fails expanded_list.append(match.strip()) else: expanded_list.append(match.strip()) matches = expanded_list - new_val = "(/{}/)".format(", ".join(matches)) if vdim else matches[idx] + new_val = f"(/{', '.join(matches)}/)" if vdim else matches[idx] current_val = vars[v].get('=') if current_val and (current_val != new_val): - outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n' % (v, current_val, new_val)) + outmess(f'analyzeline: changing init expression of "{v}" ("{current_val}") to "{new_val}\"\n') vars[v]['='] = new_val last_name = v groupcache[groupcounter]['vars'] = vars @@ -1481,26 +1483,9 @@ def analyzeline(m, case, line): line = m.group('after').strip() if not line[0] == '/': line = '//' + line + cl = [] - f = 0 - bn = '' - ol = '' - for c in line: - if c == '/': - f = f + 1 - continue - if f >= 3: - bn = bn.strip() - if not bn: - bn = '_BLNK_' - cl.append([bn, ol]) - f = f - 2 - bn = '' - ol = '' - if f % 2: - bn = bn + c - else: - ol = ol + c + [_, bn, ol] = re.split('/', line, maxsplit=2) bn = bn.strip() if not bn: bn = '_BLNK_' @@ -1541,12 +1526,10 @@ def analyzeline(m, case, line): 'use').strip() else: outmess( - 'analyzeline: Not local=>use pattern found in %s\n' % repr(l)) + f'analyzeline: Not local=>use pattern found in {repr(l)}\n') else: rl[l] = l groupcache[groupcounter]['use'][name]['map'] = rl - else: - pass else: print(m.groupdict()) outmess('analyzeline: Could not crack the use statement.\n') @@ -1569,10 +1552,9 @@ def analyzeline(m, case, line): appendmultiline(groupcache[gc], previous_context[:2], m.group('this')) - else: - if verbose > 1: - print(m.groupdict()) - outmess('analyzeline: No code implemented for line.\n') + elif verbose > 1: + print(m.groupdict()) + outmess('analyzeline: No code implemented for line.\n') def appendmultiline(group, context_name, ml): @@ -1582,7 +1564,6 @@ def appendmultiline(group, context_name, ml): if context_name not in d: d[context_name] = [] d[context_name].append(ml) - return def cracktypespec0(typespec, ll): @@ -1610,6 +1591,8 @@ def cracktypespec0(typespec, ll): attr = ll[:i].strip() ll = ll[i + 2:] return typespec, selector, attr, ll + + ##### namepattern = re.compile(r'\s*(?P\b\w+\b)\s*(?P.*)\s*\Z', re.I) kindselector = re.compile( @@ -1641,7 +1624,7 @@ def removespaces(expr): def markinnerspaces(line): """ - The function replace all spaces in the input variable line which are + The function replace all spaces in the input variable line which are surrounded with quotation marks, with the triplet "@_@". For instance, for the input "a 'b c'" the function returns "a 'b@_@c'" @@ -1654,7 +1637,7 @@ def markinnerspaces(line): ------- str - """ + """ fragment = '' inside = False current_quote = None @@ -1712,7 +1695,7 @@ def updatevars(typespec, selector, attrspec, entitydecl): m = namepattern.match(e) if not m: outmess( - 'updatevars: no name pattern found for entity=%s. Skipping.\n' % (repr(e))) + f'updatevars: no name pattern found for entity={repr(e)}. Skipping.\n') continue ename = rmbadname1(m.group('name')) edecl = {} @@ -1820,7 +1803,7 @@ def updatevars(typespec, selector, attrspec, entitydecl): edecl['='] = d1['init'] if 'array' in d1: - dm = 'dimension(%s)' % d1['array'] + dm = f"dimension({d1['array']})" if 'attrspec' not in edecl or (not edecl['attrspec']): edecl['attrspec'] = [dm] else: @@ -1854,7 +1837,7 @@ def cracktypespec(typespec, selector): kindselect = kindselector.match(selector) if not kindselect: outmess( - 'cracktypespec: no kindselector pattern found for %s\n' % (repr(selector))) + f'cracktypespec: no kindselector pattern found for {repr(selector)}\n') return kindselect = kindselect.groupdict() kindselect['*'] = kindselect['kind2'] @@ -1868,7 +1851,7 @@ def cracktypespec(typespec, selector): charselect = charselector.match(selector) if not charselect: outmess( - 'cracktypespec: no charselector pattern found for %s\n' % (repr(selector))) + f'cracktypespec: no charselector pattern found for {repr(selector)}\n') return charselect = charselect.groupdict() charselect['*'] = charselect['charlen'] @@ -1899,8 +1882,7 @@ def cracktypespec(typespec, selector): outmess('cracktypespec: no typename found in %s\n' % (repr(typespec + selector))) else: - outmess('cracktypespec: no selector used for %s\n' % - (repr(selector))) + outmess(f'cracktypespec: no selector used for {repr(selector)}\n') return kindselect, charselect, typename ###### @@ -1973,7 +1955,7 @@ def setmesstext(block): global filepositiontext try: - filepositiontext = 'In: %s:%s\n' % (block['from'], block['name']) + filepositiontext = f"In: {block['from']}:{block['name']}\n" except Exception: pass @@ -2007,7 +1989,7 @@ def get_useparameters(block, param_map=None): continue # XXX: apply mapping if mapping: - errmess('get_useparameters: mapping for %s not impl.\n' % (mapping)) + errmess(f'get_useparameters: mapping for {mapping} not impl.\n') for k, v in list(params.items()): if k in param_map: outmess('get_useparameters: overriding parameter %s with' @@ -2027,7 +2009,7 @@ def postcrack2(block, tab='', param_map=None): for g in block] return ret setmesstext(block) - outmess('%sBlock: %s\n' % (tab, block['name']), 0) + outmess(f"{tab}Block: {block['name']}\n", 0) if param_map is None: param_map = get_useparameters(block) @@ -2074,12 +2056,12 @@ def postcrack(block, args=None, tab=''): raise Exception('postcrack: Expected block dictionary instead of ' + str(block)) if 'name' in block and not block['name'] == 'unknown_interface': - outmess('%sBlock: %s\n' % (tab, block['name']), 0) + outmess(f"{tab}Block: {block['name']}\n", 0) block = analyzeargs(block) block = analyzecommon(block) block['vars'] = analyzevars(block) block['sortvars'] = sortvarnames(block['vars']) - if 'args' in block and block['args']: + if block.get('args'): args = block['args'] block['body'] = analyzebody(block, args, tab=tab) @@ -2095,7 +2077,7 @@ def postcrack(block, args=None, tab=''): if 'name' in block: name = block['name'] # and not userisdefined: # Build a __user__ module - if 'externals' in block and block['externals']: + if block.get('externals'): interfaced = [] if 'interfaced' in block: interfaced = block['interfaced'] @@ -2106,9 +2088,9 @@ def postcrack(block, args=None, tab=''): mname = 'unknown__user__routines' if mname in userisdefined: i = 1 - while '%s_%i' % (mname, i) in userisdefined: + while f"{mname}_{i}" in userisdefined: i = i + 1 - mname = '%s_%i' % (mname, i) + mname = f"{mname}_{i}" interface = {'block': 'interface', 'body': [], 'vars': {}, 'name': name + '_user_interface'} for e in block['externals']: @@ -2131,9 +2113,8 @@ def postcrack(block, args=None, tab=''): del interfaced[interfaced.index(e)] break interface['body'].append(edef) - else: - if e in mvars and not isexternal(mvars[e]): - interface['vars'][e] = mvars[e] + elif e in mvars and not isexternal(mvars[e]): + interface['vars'][e] = mvars[e] if interface['vars'] or interface['body']: block['interfaced'] = interfaced mblock = {'block': 'python module', 'body': [ @@ -2197,22 +2178,21 @@ def analyzecommon(block): if n in block['vars']: if 'attrspec' in block['vars'][n]: block['vars'][n]['attrspec'].append( - 'dimension(%s)' % (','.join(dims))) + f"dimension({','.join(dims)})") else: block['vars'][n]['attrspec'] = [ - 'dimension(%s)' % (','.join(dims))] + f"dimension({','.join(dims)})"] + elif dims: + block['vars'][n] = { + 'attrspec': [f"dimension({','.join(dims)})"]} else: - if dims: - block['vars'][n] = { - 'attrspec': ['dimension(%s)' % (','.join(dims))]} - else: - block['vars'][n] = {} + block['vars'][n] = {} if n not in commonvars: commonvars.append(n) else: n = e errmess( - 'analyzecommon: failed to extract "[()]" from "%s" in common /%s/.\n' % (e, k)) + f'analyzecommon: failed to extract "[()]" from "{e}" in common /{k}/.\n') comvars.append(n) block['common'][k] = comvars if 'commonvars' not in block: @@ -2276,7 +2256,7 @@ def buildimplicitrules(block): implicitrules = None if verbose > 1: outmess( - 'buildimplicitrules: no implicit rules for routine %s.\n' % repr(block['name'])) + f"buildimplicitrules: no implicit rules for routine {repr(block['name'])}.\n") else: for k in list(block['implicit'].keys()): if block['implicit'][k].get('typespec') not in ['static', 'automatic']: @@ -2291,7 +2271,8 @@ def myeval(e, g=None, l=None): r = eval(e, g, l) if type(r) in [int, float]: return r - raise ValueError('r=%r' % (r)) + raise ValueError(f'r={r!r}') + getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I) @@ -2337,27 +2318,23 @@ def getlincoef(e, xset): # e = a*x+b ; x in xset try: m1 = re_1.match(e) while m1: - ee = '%s(%s)%s' % ( - m1.group('before'), 0, m1.group('after')) + ee = f"{m1.group('before')}({0}){m1.group('after')}" m1 = re_1.match(ee) b = myeval(ee, {}, {}) m1 = re_1.match(e) while m1: - ee = '%s(%s)%s' % ( - m1.group('before'), 1, m1.group('after')) + ee = f"{m1.group('before')}({1}){m1.group('after')}" m1 = re_1.match(ee) a = myeval(ee, {}, {}) - b m1 = re_1.match(e) while m1: - ee = '%s(%s)%s' % ( - m1.group('before'), 0.5, m1.group('after')) + ee = f"{m1.group('before')}({0.5}){m1.group('after')}" m1 = re_1.match(ee) c = myeval(ee, {}, {}) # computing another point to be sure that expression is linear m1 = re_1.match(e) while m1: - ee = '%s(%s)%s' % ( - m1.group('before'), 1.5, m1.group('after')) + ee = f"{m1.group('before')}({1.5}){m1.group('after')}" m1 = re_1.match(ee) c2 = myeval(ee, {}, {}) if (a * 0.5 + b == c and a * 1.5 + b == c2): @@ -2387,7 +2364,7 @@ def _get_depend_dict(name, vars, deps): if w not in words: words.append(w) else: - outmess('_get_depend_dict: no dependence info for %s\n' % (repr(name))) + outmess(f'_get_depend_dict: no dependence info for {repr(name)}\n') words = [] deps[name] = words return words @@ -2457,11 +2434,10 @@ def _selected_real_kind_func(p, r=0, radix=0): if machine.startswith(('aarch64', 'alpha', 'arm64', 'loongarch', 'mips', 'power', 'ppc', 'riscv', 's390x', 'sparc')): if p <= 33: return 16 - else: - if p < 19: - return 10 - elif p <= 33: - return 16 + elif p < 19: + return 10 + elif p <= 33: + return 16 return -1 @@ -2517,7 +2493,7 @@ def get_parameters(vars, global_params={}): if not selected_kind_re.match(v): v_ = v.split('_') # In case there are additive parameters - if len(v_) > 1: + if len(v_) > 1: v = ''.join(v_[:-1]).lower().replace(v_[-1].lower(), '') # Currently this will not work for complex numbers. @@ -2539,7 +2515,7 @@ def get_parameters(vars, global_params={}): outmess(f'get_parameters[TODO]: ' f'implement evaluation of complex expression {v}\n') - dimspec = ([s.lstrip('dimension').strip() + dimspec = ([s.removeprefix('dimension').strip() for s in vars[n]['attrspec'] if s.startswith('dimension')] or [None])[0] @@ -2607,7 +2583,7 @@ def analyzevars(block): del vars[''] if 'attrspec' in block['vars']['']: gen = block['vars']['']['attrspec'] - for n in set(vars) | set(b['name'] for b in block['body']): + for n in set(vars) | {b['name'] for b in block['body']}: for k in ['public', 'private']: if k in gen: vars[n] = setattrspec(vars.get(n, {}), k) @@ -2640,7 +2616,7 @@ def analyzevars(block): if n[0] in list(attrrules.keys()): vars[n] = setattrspec(vars[n], attrrules[n[0]]) if 'typespec' not in vars[n]: - if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']): + if not ('attrspec' in vars[n] and 'external' in vars[n]['attrspec']): if implicitrules: ln0 = n[0].lower() for k in list(implicitrules[ln0].keys()): @@ -2735,8 +2711,8 @@ def analyzevars(block): d = param_parse(d, params) except (ValueError, IndexError, KeyError): outmess( - ('analyzevars: could not parse dimension for ' - f'variable {d!r}\n') + 'analyzevars: could not parse dimension for ' + f'variable {d!r}\n' ) dim_char = ':' if d == ':' else '*' @@ -2776,9 +2752,9 @@ def solve_v(s, a=a, b=b): # solve_v function here. solve_v = None all_symbols = set(dsize.symbols()) - v_deps = set( + v_deps = { s.data for s in all_symbols - if s.data in vars) + if s.data in vars} solver_and_deps[v] = solve_v, list(v_deps) # Note that dsize may contain symbols that are # not defined in block['vars']. Here we assume @@ -2816,9 +2792,9 @@ def compute_deps(v, deps): compute_deps(v1, deps) all_deps = set() compute_deps(v, all_deps) - if ((v in n_deps + if (v in n_deps or '=' in vars[v] - or 'depend' in vars[v])): + or 'depend' in vars[v]): # Skip a variable that # - n depends on # - has user-defined initialization expression @@ -2950,8 +2926,8 @@ def compute_deps(v, deps): vars[n] = setattrspec(vars[n], 'recursive') else: outmess( - 'analyzevars: prefix (%s) were not used\n' % repr(block['prefix'])) - if not block['block'] in ['module', 'pythonmodule', 'python module', 'block data']: + f"analyzevars: prefix ({repr(block['prefix'])}) were not used\n") + if block['block'] not in ['module', 'pythonmodule', 'python module', 'block data']: if 'commonvars' in block: neededvars = copy.copy(block['args'] + block['commonvars']) else: @@ -3014,7 +2990,7 @@ def param_eval(v, g_params, params, dimspec=None): # This is an array parameter. # First, we parse the dimension information - if len(dimspec) < 2 or dimspec[::len(dimspec)-1] != "()": + if len(dimspec) < 2 or dimspec[::len(dimspec) - 1] != "()": raise ValueError(f'param_eval: dimension {dimspec} can\'t be parsed') dimrange = dimspec[1:-1].split(',') if len(dimrange) == 1: @@ -3023,14 +2999,14 @@ def param_eval(v, g_params, params, dimspec=None): # now, dimrange is a list of 1 or 2 elements if len(dimrange) == 1: bound = param_parse(dimrange[0], params) - dimrange = range(1, int(bound)+1) + dimrange = range(1, int(bound) + 1) else: lbound = param_parse(dimrange[0], params) ubound = param_parse(dimrange[1], params) - dimrange = range(int(lbound), int(ubound)+1) + dimrange = range(int(lbound), int(ubound) + 1) else: - raise ValueError(f'param_eval: multidimensional array parameters ' - '{dimspec} not supported') + raise ValueError('param_eval: multidimensional array parameters ' + f'{dimspec} not supported') # Parse parameter value v = (v[2:-2] if v.startswith('(/') else v).split(',') @@ -3107,7 +3083,7 @@ def param_parse(d, params): if "(" in d: # this dimension expression is an array dname = d[:d.find("(")] - ddims = d[d.find("(")+1:d.rfind(")")] + ddims = d[d.find("(") + 1:d.rfind(")")] # this dimension expression is also a parameter; # parse it recursively index = int(param_parse(ddims, params)) @@ -3155,10 +3131,7 @@ def expr2name(a, block, args=[]): block['vars'][a] = at else: if a not in block['vars']: - if orig_a in block['vars']: - block['vars'][a] = block['vars'][orig_a] - else: - block['vars'][a] = {} + block['vars'][a] = block['vars'].get(orig_a, {}) if 'externals' in block and orig_a in block['externals'] + block['interfaced']: block['vars'][a] = setattrspec(block['vars'][a], 'external') return a @@ -3190,6 +3163,7 @@ def analyzeargs(block): block['vars'][block['result']] = {} return block + determineexprtype_re_1 = re.compile(r'\A\(.+?,.+?\)\Z', re.I) determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(?P\w+)|)\Z', re.I) determineexprtype_re_3 = re.compile( @@ -3220,13 +3194,13 @@ def determineexprtype(expr, vars, rules={}): if m: if 'name' in m.groupdict() and m.group('name'): outmess( - 'determineexprtype: selected kind types not supported (%s)\n' % repr(expr)) + f'determineexprtype: selected kind types not supported ({repr(expr)})\n') return {'typespec': 'integer'} m = determineexprtype_re_3.match(expr) if m: if 'name' in m.groupdict() and m.group('name'): outmess( - 'determineexprtype: selected kind types not supported (%s)\n' % repr(expr)) + f'determineexprtype: selected kind types not supported ({repr(expr)})\n') return {'typespec': 'real'} for op in ['+', '-', '*', '/']: for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@' + op + '@')]: @@ -3249,7 +3223,7 @@ def determineexprtype(expr, vars, rules={}): return {'typespec': 'character', 'charselector': {'*': '*'}} if not t: outmess( - 'determineexprtype: could not determine expressions (%s) type.\n' % (repr(expr))) + f'determineexprtype: could not determine expressions ({repr(expr)}) type.\n') return t ###### @@ -3285,7 +3259,7 @@ def crack2fortrangen(block, tab='\n', as_interface=False): if not isintent_callback(vars[a]): argsl.append(a) if block['block'] == 'function' or argsl: - args = '(%s)' % ','.join(argsl) + args = f"({','.join(argsl)})" f2pyenhancements = '' if 'f2pyenhancements' in block: for k in list(block['f2pyenhancements'].keys()): @@ -3308,7 +3282,7 @@ def crack2fortrangen(block, tab='\n', as_interface=False): name = '' result = '' if 'result' in block: - result = ' result (%s)' % block['result'] + result = f" result ({block['result']})" if block['result'] not in argsl: argsl.append(block['result']) body = crack2fortrangen(block['body'], tab + tabchar, as_interface=as_interface) @@ -3316,12 +3290,11 @@ def crack2fortrangen(block, tab='\n', as_interface=False): block, block['vars'], argsl, tab + tabchar, as_interface=as_interface) mess = '' if 'from' in block and not as_interface: - mess = '! in %s' % block['from'] + mess = f"! in {block['from']}" if 'entry' in block: entry_stmts = '' for k, i in list(block['entry'].items()): - entry_stmts = '%s%sentry %s(%s)' \ - % (entry_stmts, tab + tabchar, k, ','.join(i)) + entry_stmts = f"{entry_stmts}{tab + tabchar}entry {k}({','.join(i)})" body = body + entry_stmts if blocktype == 'block data' and name == '_BLOCK_DATA_': name = '' @@ -3334,30 +3307,30 @@ def common2fortran(common, tab=''): ret = '' for k in list(common.keys()): if k == '_BLNK_': - ret = '%s%scommon %s' % (ret, tab, ','.join(common[k])) + ret = f"{ret}{tab}common {','.join(common[k])}" else: - ret = '%s%scommon /%s/ %s' % (ret, tab, k, ','.join(common[k])) + ret = f"{ret}{tab}common /{k}/ {','.join(common[k])}" return ret def use2fortran(use, tab=''): ret = '' for m in list(use.keys()): - ret = '%s%suse %s,' % (ret, tab, m) + ret = f'{ret}{tab}use {m},' if use[m] == {}: if ret and ret[-1] == ',': ret = ret[:-1] continue if 'only' in use[m] and use[m]['only']: - ret = '%s only:' % (ret) + ret = f'{ret} only:' if 'map' in use[m] and use[m]['map']: c = ' ' for k in list(use[m]['map'].keys()): if k == use[m]['map'][k]: - ret = '%s%s%s' % (ret, c, k) + ret = f'{ret}{c}{k}' c = ',' else: - ret = '%s%s%s=>%s' % (ret, c, k, use[m]['map'][k]) + ret = f"{ret}{c}{k}=>{use[m]['map'][k]}" c = ',' if ret and ret[-1] == ',': ret = ret[:-1] @@ -3369,7 +3342,7 @@ def true_intent_list(var): ret = [] for intent in lst: try: - f = globals()['isintent_%s' % intent] + f = globals()[f'isintent_{intent}'] except KeyError: pass else: @@ -3392,7 +3365,7 @@ def vars2fortran(block, vars, args, tab='', as_interface=False): nout.append(a) else: errmess( - 'vars2fortran: Confused?!: "%s" is not defined in vars.\n' % a) + f'vars2fortran: Confused?!: "{a}" is not defined in vars.\n') if 'varnames' in block: nout.extend(block['varnames']) if not as_interface: @@ -3404,13 +3377,13 @@ def vars2fortran(block, vars, args, tab='', as_interface=False): for d in vars[a]['depend']: if d in vars and 'depend' in vars[d] and a in vars[d]['depend']: errmess( - 'vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n' % (a, d)) + f'vars2fortran: Warning: cross-dependence between variables "{a}" and "{d}\"\n') if 'externals' in block and a in block['externals']: if isintent_callback(vars[a]): - ret = '%s%sintent(callback) %s' % (ret, tab, a) - ret = '%s%sexternal %s' % (ret, tab, a) + ret = f'{ret}{tab}intent(callback) {a}' + ret = f'{ret}{tab}external {a}' if isoptional(vars[a]): - ret = '%s%soptional %s' % (ret, tab, a) + ret = f'{ret}{tab}optional {a}' if a in vars and 'typespec' not in vars[a]: continue cont = 1 @@ -3422,7 +3395,7 @@ def vars2fortran(block, vars, args, tab='', as_interface=False): continue if a not in vars: show(vars) - outmess('vars2fortran: No definition for argument "%s".\n' % a) + outmess(f'vars2fortran: No definition for argument "{a}".\n') continue if a == block['name']: if block['block'] != 'function' or block.get('result'): @@ -3434,14 +3407,14 @@ def vars2fortran(block, vars, args, tab='', as_interface=False): if 'typespec' not in vars[a]: if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']: if a in args: - ret = '%s%sexternal %s' % (ret, tab, a) + ret = f'{ret}{tab}external {a}' continue show(vars[a]) - outmess('vars2fortran: No typespec for argument "%s".\n' % a) + outmess(f'vars2fortran: No typespec for argument "{a}".\n') continue vardef = vars[a]['typespec'] if vardef == 'type' and 'typename' in vars[a]: - vardef = '%s(%s)' % (vardef, vars[a]['typename']) + vardef = f"{vardef}({vars[a]['typename']})" selector = {} if 'kindselector' in vars[a]: selector = vars[a]['kindselector'] @@ -3449,18 +3422,17 @@ def vars2fortran(block, vars, args, tab='', as_interface=False): selector = vars[a]['charselector'] if '*' in selector: if selector['*'] in ['*', ':']: - vardef = '%s*(%s)' % (vardef, selector['*']) + vardef = f"{vardef}*({selector['*']})" else: - vardef = '%s*%s' % (vardef, selector['*']) - else: - if 'len' in selector: - vardef = '%s(len=%s' % (vardef, selector['len']) - if 'kind' in selector: - vardef = '%s,kind=%s)' % (vardef, selector['kind']) - else: - vardef = '%s)' % (vardef) - elif 'kind' in selector: - vardef = '%s(kind=%s)' % (vardef, selector['kind']) + vardef = f"{vardef}*{selector['*']}" + elif 'len' in selector: + vardef = f"{vardef}(len={selector['len']}" + if 'kind' in selector: + vardef = f"{vardef},kind={selector['kind']})" + else: + vardef = f'{vardef})' + elif 'kind' in selector: + vardef = f"{vardef}(kind={selector['kind']})" c = ' ' if 'attrspec' in vars[a]: attr = [l for l in vars[a]['attrspec'] @@ -3473,36 +3445,34 @@ def vars2fortran(block, vars, args, tab='', as_interface=False): # intent(out) to resolve the conflict. attr.remove('intent(out)') if attr: - vardef = '%s, %s' % (vardef, ','.join(attr)) + vardef = f"{vardef}, {','.join(attr)}" c = ',' if 'dimension' in vars[a]: - vardef = '%s%sdimension(%s)' % ( - vardef, c, ','.join(vars[a]['dimension'])) + vardef = f"{vardef}{c}dimension({','.join(vars[a]['dimension'])})" c = ',' if 'intent' in vars[a]: lst = true_intent_list(vars[a]) if lst: - vardef = '%s%sintent(%s)' % (vardef, c, ','.join(lst)) + vardef = f"{vardef}{c}intent({','.join(lst)})" c = ',' if 'check' in vars[a]: - vardef = '%s%scheck(%s)' % (vardef, c, ','.join(vars[a]['check'])) + vardef = f"{vardef}{c}check({','.join(vars[a]['check'])})" c = ',' if 'depend' in vars[a]: - vardef = '%s%sdepend(%s)' % ( - vardef, c, ','.join(vars[a]['depend'])) + vardef = f"{vardef}{c}depend({','.join(vars[a]['depend'])})" c = ',' if '=' in vars[a]: v = vars[a]['='] if vars[a]['typespec'] in ['complex', 'double complex']: try: v = eval(v) - v = '(%s,%s)' % (v.real, v.imag) + v = f'({v.real},{v.imag})' except Exception: pass - vardef = '%s :: %s=%s' % (vardef, a, v) + vardef = f'{vardef} :: {a}={v}' else: - vardef = '%s :: %s' % (vardef, a) - ret = '%s%s%s' % (ret, tab, vardef) + vardef = f'{vardef} :: {a}' + ret = f'{ret}{tab}{vardef}' return ret ###### @@ -3596,16 +3566,16 @@ def visit(item, parents, result, *args, **kwargs): new_result = [] for index, value in enumerate(obj): new_index, new_item = traverse((index, value), visit, - parents=parents + [parent], - result=result, *args, **kwargs) + parents + [parent], result, + *args, **kwargs) if new_index is not None: new_result.append(new_item) elif isinstance(obj, dict): - new_result = dict() + new_result = {} for key, value in obj.items(): new_key, new_value = traverse((key, value), visit, - parents=parents + [parent], - result=result, *args, **kwargs) + parents + [parent], result, + *args, **kwargs) if new_key is not None: new_result[new_key] = new_value else: @@ -3721,7 +3691,7 @@ def fix_usage(varname, value): elif l == '-m': f3 = 1 elif l[0] == '-': - errmess('Unknown option %s\n' % repr(l)) + errmess(f'Unknown option {repr(l)}\n') elif f2: f2 = 0 pyffilename = l @@ -3747,7 +3717,7 @@ def fix_usage(varname, value): postlist = crackfortran(files) if pyffilename: - outmess('Writing fortran code to file %s\n' % repr(pyffilename), 0) + outmess(f'Writing fortran code to file {repr(pyffilename)}\n', 0) pyf = crack2fortran(postlist) with open(pyffilename, 'w') as f: f.write(pyf) diff --git a/numpy/f2py/crackfortran.pyi b/numpy/f2py/crackfortran.pyi new file mode 100644 index 000000000000..09213e156636 --- /dev/null +++ b/numpy/f2py/crackfortran.pyi @@ -0,0 +1,254 @@ +import re +from _typeshed import StrOrBytesPath, StrPath +from collections.abc import Callable, Iterable, Mapping +from typing import IO, Any, Concatenate, Final, Literal as L, Never, overload + +from .__version__ import version +from .auxfuncs import isintent_dict as isintent_dict + +### + +type _VisitResult = list[Any] | dict[str, Any] | None +type _VisitItem = tuple[str | None, _VisitResult] +type _VisitFunc[**Tss] = Callable[Concatenate[_VisitItem, list[_VisitItem], _VisitResult, Tss], _VisitItem | None] + +### + +COMMON_FREE_EXTENSIONS: Final[list[str]] = ... +COMMON_FIXED_EXTENSIONS: Final[list[str]] = ... + +f2py_version: Final = version +tabchar: Final[str] = " " + +f77modulename: str +pyffilename: str +sourcecodeform: L["fix", "gree"] +strictf77: L[0, 1] +quiet: L[0, 1] +verbose: L[0, 1, 2] +skipemptyends: L[0, 1] +ignorecontains: L[1] +dolowercase: L[1] + +beginpattern: str | re.Pattern[str] +currentfilename: str +filepositiontext: str +expectbegin: L[0, 1] +gotnextfile: L[0, 1] +neededmodule: int +skipblocksuntil: int +groupcounter: int +groupname: dict[int, str] | str +groupcache: dict[int, dict[str, Any]] | None +grouplist: dict[int, list[dict[str, Any]]] | None +previous_context: tuple[str, str, int] | None + +f90modulevars: dict[str, dict[str, Any]] = {} +debug: list[Never] = [] +include_paths: list[str] = [] +onlyfuncs: list[str] = [] +skipfuncs: list[str] = [] +skipfunctions: Final[list[str]] = [] +usermodules: Final[list[dict[str, Any]]] = [] + +defaultimplicitrules: Final[dict[str, dict[str, str]]] = {} +badnames: Final[dict[str, str]] = {} +invbadnames: Final[dict[str, str]] = {} + +beforethisafter: Final[str] = ... +fortrantypes: Final[str] = ... +groupbegins77: Final[str] = ... +groupbegins90: Final[str] = ... +groupends: Final[str] = ... +endifs: Final[str] = ... +moduleprocedures: Final[str] = ... + +beginpattern77: Final[tuple[re.Pattern[str], L["begin"]]] = ... +beginpattern90: Final[tuple[re.Pattern[str], L["begin"]]] = ... +callpattern: Final[tuple[re.Pattern[str], L["call"]]] = ... +callfunpattern: Final[tuple[re.Pattern[str], L["callfun"]]] = ... +commonpattern: Final[tuple[re.Pattern[str], L["common"]]] = ... +containspattern: Final[tuple[re.Pattern[str], L["contains"]]] = ... +datapattern: Final[tuple[re.Pattern[str], L["data"]]] = ... +dimensionpattern: Final[tuple[re.Pattern[str], L["dimension"]]] = ... +endifpattern: Final[tuple[re.Pattern[str], L["endif"]]] = ... +endpattern: Final[tuple[re.Pattern[str], L["end"]]] = ... +entrypattern: Final[tuple[re.Pattern[str], L["entry"]]] = ... +externalpattern: Final[tuple[re.Pattern[str], L["external"]]] = ... +f2pyenhancementspattern: Final[tuple[re.Pattern[str], L["f2pyenhancements"]]] = ... +formatpattern: Final[tuple[re.Pattern[str], L["format"]]] = ... +functionpattern: Final[tuple[re.Pattern[str], L["begin"]]] = ... +implicitpattern: Final[tuple[re.Pattern[str], L["implicit"]]] = ... +intentpattern: Final[tuple[re.Pattern[str], L["intent"]]] = ... +intrinsicpattern: Final[tuple[re.Pattern[str], L["intrinsic"]]] = ... +optionalpattern: Final[tuple[re.Pattern[str], L["optional"]]] = ... +moduleprocedurepattern: Final[tuple[re.Pattern[str], L["moduleprocedure"]]] = ... +multilinepattern: Final[tuple[re.Pattern[str], L["multiline"]]] = ... +parameterpattern: Final[tuple[re.Pattern[str], L["parameter"]]] = ... +privatepattern: Final[tuple[re.Pattern[str], L["private"]]] = ... +publicpattern: Final[tuple[re.Pattern[str], L["public"]]] = ... +requiredpattern: Final[tuple[re.Pattern[str], L["required"]]] = ... +subroutinepattern: Final[tuple[re.Pattern[str], L["begin"]]] = ... +typespattern: Final[tuple[re.Pattern[str], L["type"]]] = ... +usepattern: Final[tuple[re.Pattern[str], L["use"]]] = ... + +analyzeargs_re_1: Final[re.Pattern[str]] = ... +callnameargspattern: Final[re.Pattern[str]] = ... +charselector: Final[re.Pattern[str]] = ... +crackline_bind_1: Final[re.Pattern[str]] = ... +crackline_bindlang: Final[re.Pattern[str]] = ... +crackline_re_1: Final[re.Pattern[str]] = ... +determineexprtype_re_1: Final[re.Pattern[str]] = ... +determineexprtype_re_2: Final[re.Pattern[str]] = ... +determineexprtype_re_3: Final[re.Pattern[str]] = ... +determineexprtype_re_4: Final[re.Pattern[str]] = ... +determineexprtype_re_5: Final[re.Pattern[str]] = ... +getlincoef_re_1: Final[re.Pattern[str]] = ... +kindselector: Final[re.Pattern[str]] = ... +lenarraypattern: Final[re.Pattern[str]] = ... +lenkindpattern: Final[re.Pattern[str]] = ... +namepattern: Final[re.Pattern[str]] = ... +nameargspattern: Final[re.Pattern[str]] = ... +operatorpattern: Final[re.Pattern[str]] = ... +real16pattern: Final[re.Pattern[str]] = ... +real8pattern: Final[re.Pattern[str]] = ... +selectpattern: Final[re.Pattern[str]] = ... +typedefpattern: Final[re.Pattern[str]] = ... +typespattern4implicit: Final[re.Pattern[str]] = ... +word_pattern: Final[re.Pattern[str]] = ... + +post_processing_hooks: Final[list[_VisitFunc[...]]] = [] + +# +def outmess(line: str, flag: int = 1) -> None: ... +def reset_global_f2py_vars() -> None: ... + +# +def rmbadname1(name: str) -> str: ... +def undo_rmbadname1(name: str) -> str: ... +def rmbadname(names: Iterable[str]) -> list[str]: ... +def undo_rmbadname(names: Iterable[str]) -> list[str]: ... + +# +def openhook(filename: StrPath, mode: str) -> IO[Any]: ... +def is_free_format(fname: StrPath) -> bool: ... +def readfortrancode( + ffile: StrOrBytesPath | Iterable[StrOrBytesPath], + dowithline: Callable[[str, int], object] = ..., + istop: int = 1, +) -> None: ... + +# +def split_by_unquoted(line: str, characters: str) -> tuple[str, str]: ... + +# +def crackline(line: str, reset: int = 0) -> None: ... +def markouterparen(line: str) -> str: ... +def markoutercomma(line: str, comma: str = ",") -> str: ... +def unmarkouterparen(line: str) -> str: ... +def appenddecl(decl: Mapping[str, object] | None, decl2: Mapping[str, object] | None, force: int = 1) -> dict[str, Any]: ... + +# +def parse_name_for_bind(line: str) -> tuple[str, str | None]: ... +def analyzeline(m: re.Match[str], case: str, line: str) -> None: ... +def appendmultiline(group: dict[str, Any], context_name: str, ml: str) -> None: ... +def cracktypespec0(typespec: str, ll: str | None) -> tuple[str, str | None, str | None, str | None]: ... + +# +def removespaces(expr: str) -> str: ... +def markinnerspaces(line: str) -> str: ... +def updatevars(typespec: str, selector: str | None, attrspec: str, entitydecl: str) -> str: ... +def cracktypespec(typespec: str, selector: str | None) -> tuple[dict[str, str] | None, dict[str, str] | None, str | None]: ... + +# +def setattrspec(decl: dict[str, list[str]], attr: str | None, force: int = 0) -> dict[str, list[str]]: ... +def setkindselector(decl: dict[str, dict[str, str]], sel: dict[str, str], force: int = 0) -> dict[str, dict[str, str]]: ... +def setcharselector(decl: dict[str, dict[str, str]], sel: dict[str, str], force: int = 0) -> dict[str, dict[str, str]]: ... +def getblockname(block: Mapping[str, object], unknown: str = "unknown") -> str: ... +def setmesstext(block: Mapping[str, object]) -> None: ... +def get_usedict(block: Mapping[str, object]) -> dict[str, str]: ... +def get_useparameters(block: Mapping[str, object], param_map: Mapping[str, str] | None = None) -> dict[str, str]: ... + +# +@overload +def postcrack2( + block: dict[str, Any], + tab: str = "", + param_map: Mapping[str, str] | None = None, +) -> dict[str, str | Any]: ... +@overload +def postcrack2( + block: list[dict[str, Any]], + tab: str = "", + param_map: Mapping[str, str] | None = None, +) -> list[dict[str, str | Any]]: ... + +# +@overload +def postcrack(block: dict[str, Any], args: Mapping[str, str] | None = None, tab: str = "") -> dict[str, Any]: ... +@overload +def postcrack(block: list[dict[str, str]], args: Mapping[str, str] | None = None, tab: str = "") -> list[dict[str, Any]]: ... + +# +def sortvarnames(vars: Mapping[str, object]) -> list[str]: ... +def analyzecommon(block: Mapping[str, object]) -> dict[str, Any]: ... +def analyzebody(block: Mapping[str, object], args: Mapping[str, str], tab: str = "") -> list[dict[str, Any]]: ... +def buildimplicitrules(block: Mapping[str, object]) -> tuple[dict[str, dict[str, str]], dict[str, str]]: ... +def myeval(e: str, g: object | None = None, l: object | None = None) -> float: ... + +# +def getlincoef(e: str, xset: set[str]) -> tuple[float | None, float | None, str | None]: ... + +# +def get_sorted_names(vars: Mapping[str, Mapping[str, str]]) -> list[str]: ... +def get_parameters(vars: Mapping[str, Mapping[str, str]], global_params: dict[str, str] = {}) -> dict[str, str]: ... + +# +def analyzevars(block: Mapping[str, Any]) -> dict[str, dict[str, str]]: ... + +# +def param_eval(v: str, g_params: dict[str, Any], params: Mapping[str, object], dimspec: str | None = None) -> dict[str, Any]: ... +def param_parse(d: str, params: Mapping[str, str]) -> str: ... +def expr2name(a: str, block: Mapping[str, object], args: list[str] = []) -> str: ... +def analyzeargs(block: Mapping[str, object]) -> dict[str, Any]: ... + +# +def determineexprtype(expr: str, vars: Mapping[str, object], rules: dict[str, Any] = {}) -> dict[str, Any]: ... +def crack2fortrangen(block: Mapping[str, object], tab: str = "\n", as_interface: bool = False) -> str: ... +def common2fortran(common: Mapping[str, object], tab: str = "") -> str: ... +def use2fortran(use: Mapping[str, object], tab: str = "") -> str: ... +def true_intent_list(var: dict[str, list[str]]) -> list[str]: ... +def vars2fortran( + block: Mapping[str, Mapping[str, object]], + vars: Mapping[str, object], + args: Mapping[str, str], + tab: str = "", + as_interface: bool = False, +) -> str: ... + +# +def crackfortran(files: StrOrBytesPath | Iterable[StrOrBytesPath]) -> list[dict[str, Any]]: ... +def crack2fortran(block: Mapping[str, Any]) -> str: ... + +# +def traverse[**Tss]( + obj: tuple[str | None, _VisitResult], + visit: _VisitFunc[Tss], + parents: list[tuple[str | None, _VisitResult]] = [], + result: list[Any] | dict[str, Any] | None = None, + *args: Tss.args, + **kwargs: Tss.kwargs, +) -> _VisitItem | _VisitResult: ... + +# +def character_backward_compatibility_hook( + item: _VisitItem, + parents: list[_VisitItem], + result: object, # ignored + *args: object, # ignored + **kwargs: object, # ignored +) -> _VisitItem | None: ... + +# namespace pollution +c: str +n: str diff --git a/numpy/f2py/diagnose.py b/numpy/f2py/diagnose.py index 86d7004abad4..3e2c53b0ec1d 100644 --- a/numpy/f2py/diagnose.py +++ b/numpy/f2py/diagnose.py @@ -4,19 +4,13 @@ import tempfile -def run_command(cmd): - print('Running %r:' % (cmd)) - os.system(cmd) - print('------') - - def run(): _path = os.getcwd() os.chdir(tempfile.gettempdir()) print('------') - print('os.name=%r' % (os.name)) + print(f'os.name={os.name!r}') print('------') - print('sys.platform=%r' % (sys.platform)) + print(f'sys.platform={sys.platform!r}') print('------') print('sys.version:') print(sys.version) @@ -24,15 +18,15 @@ def run(): print('sys.prefix:') print(sys.prefix) print('------') - print('sys.path=%r' % (':'.join(sys.path))) + print(f"sys.path={':'.join(sys.path)!r}") print('------') try: import numpy - has_newnumpy = 1 + has_numpy = 1 except ImportError as e: - print('Failed to import new numpy:', e) - has_newnumpy = 0 + print('Failed to import numpy:', e) + has_numpy = 0 try: from numpy.f2py import f2py2e @@ -41,21 +35,9 @@ def run(): print('Failed to import f2py2e:', e) has_f2py2e = 0 - try: - import numpy.distutils - has_numpy_distutils = 2 - except ImportError: + if has_numpy: try: - import numpy_distutils - has_numpy_distutils = 1 - except ImportError as e: - print('Failed to import numpy_distutils:', e) - has_numpy_distutils = 0 - - if has_newnumpy: - try: - print('Found new numpy version %r in %s' % - (numpy.__version__, numpy.__file__)) + print(f'Found numpy version {numpy.__version__!r} in {numpy.__file__}') except Exception as msg: print('error:', msg) print('------') @@ -68,87 +50,8 @@ def run(): print('error:', msg) print('------') - if has_numpy_distutils: - try: - if has_numpy_distutils == 2: - print('Found numpy.distutils version %r in %r' % ( - numpy.distutils.__version__, - numpy.distutils.__file__)) - else: - print('Found numpy_distutils version %r in %r' % ( - numpy_distutils.numpy_distutils_version.numpy_distutils_version, - numpy_distutils.__file__)) - print('------') - except Exception as msg: - print('error:', msg) - print('------') - try: - if has_numpy_distutils == 1: - print( - 'Importing numpy_distutils.command.build_flib ...', end=' ') - import numpy_distutils.command.build_flib as build_flib - print('ok') - print('------') - try: - print( - 'Checking availability of supported Fortran compilers:') - for compiler_class in build_flib.all_compilers: - compiler_class(verbose=1).is_available() - print('------') - except Exception as msg: - print('error:', msg) - print('------') - except Exception as msg: - print( - 'error:', msg, '(ignore it, build_flib is obsolute for numpy.distutils 0.2.2 and up)') - print('------') - try: - if has_numpy_distutils == 2: - print('Importing numpy.distutils.fcompiler ...', end=' ') - import numpy.distutils.fcompiler as fcompiler - else: - print('Importing numpy_distutils.fcompiler ...', end=' ') - import numpy_distutils.fcompiler as fcompiler - print('ok') - print('------') - try: - print('Checking availability of supported Fortran compilers:') - fcompiler.show_fcompilers() - print('------') - except Exception as msg: - print('error:', msg) - print('------') - except Exception as msg: - print('error:', msg) - print('------') - try: - if has_numpy_distutils == 2: - print('Importing numpy.distutils.cpuinfo ...', end=' ') - from numpy.distutils.cpuinfo import cpuinfo - print('ok') - print('------') - else: - try: - print( - 'Importing numpy_distutils.command.cpuinfo ...', end=' ') - from numpy_distutils.command.cpuinfo import cpuinfo - print('ok') - print('------') - except Exception as msg: - print('error:', msg, '(ignore it)') - print('Importing numpy_distutils.cpuinfo ...', end=' ') - from numpy_distutils.cpuinfo import cpuinfo - print('ok') - print('------') - cpu = cpuinfo() - print('CPU information:', end=' ') - for name in dir(cpuinfo): - if name[0] == '_' and name[1] != '_' and getattr(cpu, name[1:])(): - print(name[1:], end=' ') - print('------') - except Exception as msg: - print('error:', msg) - print('------') os.chdir(_path) + + if __name__ == "__main__": run() diff --git a/numpy/f2py/diagnose.pyi b/numpy/f2py/diagnose.pyi new file mode 100644 index 000000000000..b88194ac6bff --- /dev/null +++ b/numpy/f2py/diagnose.pyi @@ -0,0 +1 @@ +def run() -> None: ... diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py old mode 100755 new mode 100644 index f5fab23ab867..eb5a39e088ff --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 """ f2py2e - Fortran to Python C/API generator. 2nd Edition. @@ -11,32 +10,32 @@ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. """ -import sys +import argparse import os import pprint import re -from pathlib import Path -from itertools import dropwhile -import argparse -import copy - -from . import crackfortran -from . import rules -from . import cb_rules -from . import auxfuncs -from . import cfuncs -from . import f90mod_rules -from . import __version__ -from . import capi_maps +import sys + from numpy.f2py._backends import f2py_build_generator +from . import ( + __version__, + auxfuncs, + capi_maps, + cb_rules, + cfuncs, + crackfortran, + f90mod_rules, + rules, +) +from .cfuncs import errmess + f2py_version = __version__.version numpy_version = __version__.version -errmess = sys.stderr.write + # outmess=sys.stdout.write show = pprint.pprint outmess = auxfuncs.outmess -MESON_ONLY_VER = (sys.version_info >= (3, 12)) __usage__ =\ f"""Usage: @@ -106,13 +105,17 @@ functions. --wrap-functions is default because it ensures maximum portability/compiler independence. + --[no-]freethreading-compatible Create a module that declares it does or + doesn't require the GIL. The default is + --freethreading-compatible for backward + compatibility. Inspect the Fortran code you are wrapping for + thread safety issues before passing + --no-freethreading-compatible, as f2py does not analyze + fortran code for thread safety issues. + --include-paths ::... Search include files from the given directories. - --help-link [..] List system resources found by system_info.py. See also - --link- switch below. [..] is optional list - of resources names. E.g. try 'f2py --help-link lapack_opt'. - --f2cmap Load Fortran-to-Python KIND specification from the given file. Default: .f2py_f2cmap in current directory. @@ -199,7 +202,7 @@ def scaninputline(inputline): dorestdoc = 0 wrapfuncs = 1 buildpath = '.' - include_paths, inputline = get_includes(inputline) + include_paths, freethreading_compatible, inputline = get_newer_options(inputline) signsfile, modulename = None, None options = {'buildpath': buildpath, 'coutput': None, @@ -262,7 +265,7 @@ def scaninputline(inputline): elif l == '--skip-empty-wrappers': emptygen = False elif l[0] == '-': - errmess('Unknown option %s\n' % repr(l)) + errmess(f'Unknown option {repr(l)}\n') sys.exit() elif f2: f2 = 0 @@ -298,13 +301,13 @@ def scaninputline(inputline): sys.exit() if not os.path.isdir(buildpath): if not verbose: - outmess('Creating build directory %s\n' % (buildpath)) + outmess(f'Creating build directory {buildpath}\n') os.mkdir(buildpath) if signsfile: signsfile = os.path.join(buildpath, signsfile) if signsfile and os.path.isfile(signsfile) and 'h-overwrite' not in options: errmess( - 'Signature file "%s" exists!!! Use --overwrite-signature to overwrite.\n' % (signsfile)) + f'Signature file "{signsfile}" exists!!! Use --overwrite-signature to overwrite.\n') sys.exit() options['emptygen'] = emptygen @@ -327,6 +330,7 @@ def scaninputline(inputline): options['wrapfuncs'] = wrapfuncs options['buildpath'] = buildpath options['include_paths'] = include_paths + options['requires_gil'] = not freethreading_compatible options.setdefault('f2cmap_file', None) return files, options @@ -345,7 +349,7 @@ def callcrackfortran(files, options): crackfortran.dolowercase = options['do-lower'] postlist = crackfortran.crackfortran(files) if 'signsfile' in options: - outmess('Saving signatures to file "%s"\n' % (options['signsfile'])) + outmess(f"Saving signatures to file \"{options['signsfile']}\"\n") pyf = crackfortran.crack2fortran(postlist) if options['signsfile'][-6:] == 'stdout': sys.stdout.write(pyf) @@ -354,16 +358,23 @@ def callcrackfortran(files, options): f.write(pyf) if options["coutput"] is None: for mod in postlist: - mod["coutput"] = "%smodule.c" % mod["name"] + mod["coutput"] = f"{mod['name']}module.c" else: for mod in postlist: mod["coutput"] = options["coutput"] if options["f2py_wrapper_output"] is None: for mod in postlist: - mod["f2py_wrapper_output"] = "%s-f2pywrappers.f" % mod["name"] + mod["f2py_wrapper_output"] = f"{mod['name']}-f2pywrappers.f" else: for mod in postlist: mod["f2py_wrapper_output"] = options["f2py_wrapper_output"] + for mod in postlist: + if options["requires_gil"]: + mod['gil_used'] = 'Py_MOD_GIL_USED' + else: + mod['gil_used'] = 'Py_MOD_GIL_NOT_USED' + # gh-26718 Reset global + crackfortran.f77modulename = '' return postlist @@ -468,19 +479,19 @@ def run_main(comline_list): isusedby[u] = [] isusedby[u].append(plist['name']) for plist in postlist: - if plist['block'] == 'python module' and '__user__' in plist['name']: - if plist['name'] in isusedby: + module_name = plist['name'] + if plist['block'] == 'python module' and '__user__' in module_name: + if module_name in isusedby: # if not quiet: + usedby = ','.join(f'"{s}"' for s in isusedby[module_name]) outmess( - f'Skipping Makefile build for module "{plist["name"]}" ' - 'which is used by {}\n'.format( - ','.join(f'"{s}"' for s in isusedby[plist['name']]))) + f'Skipping Makefile build for module "{module_name}" ' + f'which is used by {usedby}\n') if 'signsfile' in options: if options['verbose'] > 1: outmess( 'Stopping. Edit the signature file and then run f2py on the signature file: ') - outmess('%s %s\n' % - (os.path.basename(sys.argv[0]), options['signsfile'])) + outmess(f"{os.path.basename(sys.argv[0])} {options['signsfile']}\n") return for plist in postlist: if plist['block'] != 'python module': @@ -528,27 +539,28 @@ def __call__(self, parser, namespace, values, option_string=None): include_paths_set = set(getattr(namespace, 'include_paths', []) or []) if option_string == "--include_paths": outmess("Use --include-paths or -I instead of --include_paths which will be removed") - if option_string == "--include-paths" or option_string == "--include_paths": + if option_string in {"--include-paths", "--include_paths"}: include_paths_set.update(values.split(':')) else: include_paths_set.add(values) - setattr(namespace, 'include_paths', list(include_paths_set)) + namespace.include_paths = list(include_paths_set) -def include_parser(): +def f2py_parser(): parser = argparse.ArgumentParser(add_help=False) parser.add_argument("-I", dest="include_paths", action=CombineIncludePaths) parser.add_argument("--include-paths", dest="include_paths", action=CombineIncludePaths) parser.add_argument("--include_paths", dest="include_paths", action=CombineIncludePaths) + parser.add_argument("--freethreading-compatible", dest="ftcompat", action=argparse.BooleanOptionalAction) return parser -def get_includes(iline): +def get_newer_options(iline): iline = (' '.join(iline)).split() - parser = include_parser() + parser = f2py_parser() args, remain = parser.parse_known_args(iline) ipaths = args.include_paths if args.include_paths is None: ipaths = [] - return ipaths, remain + return ipaths, args.ftcompat, remain def make_f2py_compile_parser(): parser = argparse.ArgumentParser(add_help=False) @@ -566,7 +578,7 @@ def preparse_sysargv(): sys.argv = [sys.argv[0]] + remaining_argv backend_key = args.backend - if MESON_ONLY_VER and backend_key == 'distutils': + if backend_key == 'distutils': outmess("Cannot use distutils backend with Python>=3.12," " using meson backend instead.\n") backend_key = "meson" @@ -615,7 +627,7 @@ def run_compile(): sysinfo_flags = [f[7:] for f in sysinfo_flags] _reg2 = re.compile( - r'--((no-|)(wrap-functions|lower)|debug-capi|quiet|skip-empty-wrappers)|-include') + r'--((no-|)(wrap-functions|lower|freethreading-compatible)|debug-capi|quiet|skip-empty-wrappers)|-include') f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)] sys.argv = [_m for _m in sys.argv if _m not in f2py_flags] f2py_flags2 = [] @@ -635,36 +647,21 @@ def run_compile(): r'--((f(90)?compiler(-exec|)|compiler)=|help-compiler)') flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)] sys.argv = [_m for _m in sys.argv if _m not in flib_flags] - _reg4 = re.compile( - r'--((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help-fcompiler))') - fc_flags = [_m for _m in sys.argv[1:] if _reg4.match(_m)] - sys.argv = [_m for _m in sys.argv if _m not in fc_flags] + # TODO: Once distutils is dropped completely, i.e. min_ver >= 3.12, unify into --fflags + reg_f77_f90_flags = re.compile(r'--f(77|90)flags=') + reg_distutils_flags = re.compile(r'--((f(77|90)exec|opt|arch)=|(debug|noopt|noarch|help-fcompiler))') + fc_flags = [_m for _m in sys.argv[1:] if reg_f77_f90_flags.match(_m)] + distutils_flags = [_m for _m in sys.argv[1:] if reg_distutils_flags.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in (fc_flags + distutils_flags)] del_list = [] for s in flib_flags: v = '--fcompiler=' if s[:len(v)] == v: - if MESON_ONLY_VER or backend_key == 'meson': - outmess( - "--fcompiler cannot be used with meson," - "set compiler with the FC environment variable\n" - ) - else: - from numpy.distutils import fcompiler - fcompiler.load_all_fcompiler_classes() - allowed_keys = list(fcompiler.fcompiler_class.keys()) - nv = ov = s[len(v):].lower() - if ov not in allowed_keys: - vmap = {} # XXX - try: - nv = vmap[ov] - except KeyError: - if ov not in vmap.values(): - print('Unknown vendor: "%s"' % (s[len(v):])) - nv = ov - i = flib_flags.index(s) - flib_flags[i] = '--fcompiler=' + nv - continue + outmess( + "--fcompiler cannot be used with meson," + "set compiler with the FC environment variable\n" + ) for s in del_list: i = flib_flags.index(s) del flib_flags[i] @@ -713,7 +710,7 @@ def run_compile(): run_main(f" {' '.join(f2py_flags)} {' '.join(pyf_files)}".split()) # Order matters here, includes are needed for run_main above - include_dirs, sources = get_includes(sources) + include_dirs, _, sources = get_newer_options(sources) # Now use the builder builder = build_backend( modulename, @@ -752,15 +749,6 @@ def validate_modulename(pyf_files, modulename='untitled'): return modulename def main(): - if '--help-link' in sys.argv[1:]: - sys.argv.remove('--help-link') - if MESON_ONLY_VER: - outmess("Use --dep for meson builds\n") - else: - from numpy.distutils.system_info import show_all - show_all() - return - if '-c' in sys.argv[1:]: run_compile() else: diff --git a/numpy/f2py/f2py2e.pyi b/numpy/f2py/f2py2e.pyi new file mode 100644 index 000000000000..4dd6a9f73ec3 --- /dev/null +++ b/numpy/f2py/f2py2e.pyi @@ -0,0 +1,69 @@ +import argparse +import pprint +from collections.abc import Hashable, Iterable, Mapping, MutableMapping, Sequence +from types import ModuleType +from typing import Any, Final, NotRequired, TypedDict, override, type_check_only + +from .__version__ import version +from .auxfuncs import _Bool, outmess as outmess + +### + +@type_check_only +class _F2PyDict(TypedDict): + csrc: list[str] + h: list[str] + fsrc: NotRequired[list[str]] + ltx: NotRequired[list[str]] + +@type_check_only +class _PreparseResult(TypedDict): + dependencies: list[str] + backend: str + modulename: str + +### + +f2py_version: Final = version +numpy_version: Final = version +__usage__: Final[str] + +show = pprint.pprint + +class CombineIncludePaths(argparse.Action): + @override + def __call__( + self, + /, + parser: argparse.ArgumentParser, + namespace: argparse.Namespace, + values: str | Sequence[str] | None, + option_string: str | None = None, + ) -> None: ... + +# +def run_main(comline_list: Iterable[str]) -> dict[str, _F2PyDict]: ... +def run_compile() -> None: ... +def main() -> None: ... + +# +def scaninputline(inputline: Iterable[str]) -> tuple[list[str], dict[str, _Bool]]: ... +def callcrackfortran(files: list[str], options: dict[str, bool]) -> list[dict[str, Any]]: ... +def buildmodules(lst: Iterable[Mapping[str, object]]) -> dict[str, dict[str, Any]]: ... +def dict_append[KT: Hashable, VT](d_out: MutableMapping[KT, VT], d_in: Mapping[KT, VT]) -> None: ... +def filter_files( + prefix: str, + suffix: str, + files: Iterable[str], + remove_prefix: _Bool | None = None, +) -> tuple[list[str], list[str]]: ... +def get_prefix(module: ModuleType) -> str: ... +def get_newer_options(iline: Iterable[str]) -> tuple[list[str], Any, list[str]]: ... + +# +def f2py_parser() -> argparse.ArgumentParser: ... +def make_f2py_compile_parser() -> argparse.ArgumentParser: ... + +# +def preparse_sysargv() -> _PreparseResult: ... +def validate_modulename(pyf_files: Sequence[str], modulename: str = "untitled") -> str: ... diff --git a/numpy/f2py/f90mod_rules.py b/numpy/f2py/f90mod_rules.py index db53beaf616b..5cd7637a95c2 100644 --- a/numpy/f2py/f90mod_rules.py +++ b/numpy/f2py/f90mod_rules.py @@ -14,14 +14,13 @@ import numpy as np -from . import capi_maps -from . import func2subr -from .crackfortran import undo_rmbadname, undo_rmbadname1 +from . import capi_maps, func2subr # The environment provided by auxfuncs.py is needed for some calls to eval. # As the needed functions cannot be determined by static inspection of the # code, it is safest to use import * pending a major refactoring of f2py. from .auxfuncs import * +from .crackfortran import undo_rmbadname, undo_rmbadname1 options = {} @@ -39,6 +38,7 @@ def findf90modules(m): ret = ret + findf90modules(b) return ret + fgetdims1 = """\ external f2pysetdata logical ns @@ -89,17 +89,14 @@ def buildhooks(pymod): fhooks = [''] def fadd(line, s=fhooks): - s[0] = '%s\n %s' % (s[0], line) + s[0] = f'{s[0]}\n {line}' doc = [''] def dadd(line, s=doc): - s[0] = '%s\n%s' % (s[0], line) + s[0] = f'{s[0]}\n{line}' usenames = getuseblocks(pymod) for m in findf90modules(pymod): - contains_functions_or_subroutines = any( - item for item in m["body"] if item["block"] in ["function", "subroutine"] - ) sargs, fargs, efargs, modobjs, notvars, onlyvars = [], [], [], [], [ m['name']], [] sargsp = [] @@ -110,24 +107,33 @@ def dadd(line, s=doc): notvars.append(b['name']) for n in m['vars'].keys(): var = m['vars'][n] - if (n not in notvars) and (not l_or(isintent_hide, isprivate)(var)): + + if (n not in notvars and isvariable(var)) and (not l_or(isintent_hide, isprivate)(var)): onlyvars.append(n) mfargs.append(n) - outmess('\t\tConstructing F90 module support for "%s"...\n' % - (m['name'])) - if m['name'] in usenames and not contains_functions_or_subroutines: - outmess(f"\t\t\tSkipping {m['name']} since it is in 'use'...\n") + outmess(f"\t\tConstructing F90 module support for \"{m['name']}\"...\n") + if len(onlyvars) == 0 and len(notvars) == 1 and m['name'] in notvars: + outmess(f"\t\t\tSkipping {m['name']} since there are no public vars/func in this module...\n") + continue + + # gh-25186 + if m['name'] in usenames and containscommon(m): + outmess(f"\t\t\tSkipping {m['name']} since it is in 'use' and contains a common block...\n") + continue + # skip modules with derived types + if m['name'] in usenames and containsderivedtypes(m): + outmess(f"\t\t\tSkipping {m['name']} since it is in 'use' and contains a derived type...\n") continue if onlyvars: - outmess('\t\t Variables: %s\n' % (' '.join(onlyvars))) + outmess(f"\t\t Variables: {' '.join(onlyvars)}\n") chooks = [''] def cadd(line, s=chooks): - s[0] = '%s\n%s' % (s[0], line) + s[0] = f'{s[0]}\n{line}' ihooks = [''] def iadd(line, s=ihooks): - s[0] = '%s\n%s' % (s[0], line) + s[0] = f'{s[0]}\n{line}' vrd = capi_maps.modsign2map(m) cadd('static FortranDataDef f2py_%s_def[] = {' % (m['name'])) @@ -159,29 +165,28 @@ def iadd(line, s=ihooks): note = var['note'] if isinstance(note, list): note = '\n'.join(note) - dadd('--- %s' % (note)) + dadd(f'--- {note}') if isallocatable(var): - fargs.append('f2py_%s_getdims_%s' % (m['name'], n)) + fargs.append(f"f2py_{m['name']}_getdims_{n}") efargs.append(fargs[-1]) sargs.append( - 'void (*%s)(int*,npy_intp*,void(*)(char*,npy_intp*),int*)' % (n)) + f'void (*{n})(int*,npy_intp*,void(*)(char*,npy_intp*),int*)') sargsp.append('void (*)(int*,npy_intp*,void(*)(char*,npy_intp*),int*)') - iadd('\tf2py_%s_def[i_f2py++].func = %s;' % (m['name'], n)) - fadd('subroutine %s(r,s,f2pysetdata,flag)' % (fargs[-1])) - fadd('use %s, only: d => %s\n' % - (m['name'], undo_rmbadname1(n))) + iadd(f"\tf2py_{m['name']}_def[i_f2py++].func = {n};") + fadd(f'subroutine {fargs[-1]}(r,s,f2pysetdata,flag)') + fadd(f"use {m['name']}, only: d => {undo_rmbadname1(n)}\n") fadd('integer flag\n') fhooks[0] = fhooks[0] + fgetdims1 dms = range(1, int(dm['rank']) + 1) fadd(' allocate(d(%s))\n' % (','.join(['s(%s)' % i for i in dms]))) fhooks[0] = fhooks[0] + use_fgetdims2 - fadd('end subroutine %s' % (fargs[-1])) + fadd(f'end subroutine {fargs[-1]}') else: fargs.append(n) - sargs.append('char *%s' % (n)) + sargs.append(f'char *{n}') sargsp.append('char*') - iadd('\tf2py_%s_def[i_f2py++].data = %s;' % (m['name'], n)) + iadd(f"\tf2py_{m['name']}_def[i_f2py++].data = {n};") if onlyvars: dadd('\\end{description}') if hasbody(m): @@ -190,22 +195,21 @@ def iadd(line, s=ihooks): outmess("f90mod_rules.buildhooks:" f" skipping {b['block']} {b['name']}\n") continue - modobjs.append('%s()' % (b['name'])) + modobjs.append(f"{b['name']}()") b['modulename'] = m['name'] api, wrap = rules.buildapi(b) if isfunction(b): fhooks[0] = fhooks[0] + wrap - fargs.append('f2pywrap_%s_%s' % (m['name'], b['name'])) + fargs.append(f"f2pywrap_{m['name']}_{b['name']}") ifargs.append(func2subr.createfuncwrapper(b, signature=1)) + elif wrap: + fhooks[0] = fhooks[0] + wrap + fargs.append(f"f2pywrap_{m['name']}_{b['name']}") + ifargs.append( + func2subr.createsubrwrapper(b, signature=1)) else: - if wrap: - fhooks[0] = fhooks[0] + wrap - fargs.append('f2pywrap_%s_%s' % (m['name'], b['name'])) - ifargs.append( - func2subr.createsubrwrapper(b, signature=1)) - else: - fargs.append(b['name']) - mfargs.append(fargs[-1]) + fargs.append(b['name']) + mfargs.append(fargs[-1]) api['externroutines'] = [] ar = applyrules(api, vrd) ar['docs'] = [] @@ -215,10 +219,9 @@ def iadd(line, s=ihooks): 'f2py_rout_#modulename#_%s_%s,' 'doc_f2py_rout_#modulename#_%s_%s},') % (b['name'], m['name'], b['name'], m['name'], b['name'])) - sargs.append('char *%s' % (b['name'])) + sargs.append(f"char *{b['name']}") sargsp.append('char *') - iadd('\tf2py_%s_def[i_f2py++].data = %s;' % - (m['name'], b['name'])) + iadd(f"\tf2py_{m['name']}_def[i_f2py++].data = {b['name']};") cadd('\t{NULL}\n};\n') iadd('}') ihooks[0] = 'static void f2py_setup_%s(%s) {\n\tint i_f2py=0;%s' % ( @@ -234,29 +237,34 @@ def iadd(line, s=ihooks): % (F_FUNC, m['name'], m['name'].upper(), m['name'])) iadd('}\n') ret['f90modhooks'] = ret['f90modhooks'] + chooks + ihooks - ret['initf90modhooks'] = ['\tPyDict_SetItemString(d, "%s", PyFortranObject_New(f2py_%s_def,f2py_init_%s));' % ( - m['name'], m['name'], m['name'])] + ret['initf90modhooks'] + ret['initf90modhooks'] = [ + '\t{', + '\t\tPyObject *tmp = PyFortranObject_New(f2py_%s_def,f2py_init_%s);' + % (m['name'], m['name']), + '\t\tPyDict_SetItemString(d, "%s", tmp);' % (m['name'],), + '\t\tPy_XDECREF(tmp);', + '\t}', + ] + ret["initf90modhooks"] fadd('') - fadd('subroutine f2pyinit%s(f2pysetupfunc)' % (m['name'])) + fadd(f"subroutine f2pyinit{m['name']}(f2pysetupfunc)") if mfargs: for a in undo_rmbadname(mfargs): - fadd('use %s, only : %s' % (m['name'], a)) + fadd(f"use {m['name']}, only : {a}") if ifargs: fadd(' '.join(['interface'] + ifargs)) fadd('end interface') fadd('external f2pysetupfunc') if efargs: for a in undo_rmbadname(efargs): - fadd('external %s' % (a)) - fadd('call f2pysetupfunc(%s)' % (','.join(undo_rmbadname(fargs)))) - fadd('end subroutine f2pyinit%s\n' % (m['name'])) + fadd(f'external {a}') + fadd(f"call f2pysetupfunc({','.join(undo_rmbadname(fargs))})") + fadd(f"end subroutine f2pyinit{m['name']}\n") dadd('\n'.join(ret['latexdoc']).replace( r'\subsection{', r'\subsubsection{')) ret['latexdoc'] = [] - ret['docs'].append('"\t%s --- %s"' % (m['name'], - ','.join(undo_rmbadname(modobjs)))) + ret['docs'].append(f"\"\t{m['name']} --- {','.join(undo_rmbadname(modobjs))}\"") ret['routine_defs'] = '' ret['doc'] = [] diff --git a/numpy/f2py/f90mod_rules.pyi b/numpy/f2py/f90mod_rules.pyi new file mode 100644 index 000000000000..4df004eef856 --- /dev/null +++ b/numpy/f2py/f90mod_rules.pyi @@ -0,0 +1,16 @@ +from collections.abc import Mapping +from typing import Any, Final + +from .auxfuncs import isintent_dict as isintent_dict + +__version__: Final[str] = ... +f2py_version: Final = "See `f2py -v`" + +options: Final[dict[str, bool]] + +fgetdims1: Final[str] = ... +fgetdims2: Final[str] = ... +fgetdims2_sa: Final[str] = ... + +def findf90modules(m: Mapping[str, object]) -> list[dict[str, Any]]: ... +def buildhooks(pymod: Mapping[str, object]) -> dict[str, Any]: ... diff --git a/numpy/f2py/func2subr.py b/numpy/f2py/func2subr.py index b9aa9fc007cb..09b67f7c3085 100644 --- a/numpy/f2py/func2subr.py +++ b/numpy/f2py/func2subr.py @@ -11,28 +11,38 @@ """ import copy +from ._isocbind import isoc_kindmap from .auxfuncs import ( - getfortranname, isexternal, isfunction, isfunction_wrap, isintent_in, - isintent_out, islogicalfunction, ismoduleroutine, isscalar, - issubroutine, issubroutine_wrap, outmess, show + getfortranname, + isexternal, + isfunction, + isfunction_wrap, + isintent_in, + isintent_out, + islogicalfunction, + ismoduleroutine, + isscalar, + issubroutine, + issubroutine_wrap, + outmess, + show, ) -from ._isocbind import isoc_kindmap def var2fixfortran(vars, a, fa=None, f90mode=None): if fa is None: fa = a if a not in vars: show(vars) - outmess('var2fixfortran: No definition for argument "%s".\n' % a) + outmess(f'var2fixfortran: No definition for argument "{a}".\n') return '' if 'typespec' not in vars[a]: show(vars[a]) - outmess('var2fixfortran: No typespec for argument "%s".\n' % a) + outmess(f'var2fixfortran: No typespec for argument "{a}".\n') return '' vardef = vars[a]['typespec'] if vardef == 'type' and 'typename' in vars[a]: - vardef = '%s(%s)' % (vardef, vars[a]['typename']) + vardef = f"{vardef}({vars[a]['typename']})" selector = {} lk = '' if 'kindselector' in vars[a]: @@ -44,32 +54,30 @@ def var2fixfortran(vars, a, fa=None, f90mode=None): if '*' in selector: if f90mode: if selector['*'] in ['*', ':', '(*)']: - vardef = '%s(len=*)' % (vardef) + vardef = f'{vardef}(len=*)' else: - vardef = '%s(%s=%s)' % (vardef, lk, selector['*']) + vardef = f"{vardef}({lk}={selector['*']})" + elif selector['*'] in ['*', ':']: + vardef = f"{vardef}*({selector['*']})" else: - if selector['*'] in ['*', ':']: - vardef = '%s*(%s)' % (vardef, selector['*']) - else: - vardef = '%s*%s' % (vardef, selector['*']) - else: - if 'len' in selector: - vardef = '%s(len=%s' % (vardef, selector['len']) - if 'kind' in selector: - vardef = '%s,kind=%s)' % (vardef, selector['kind']) - else: - vardef = '%s)' % (vardef) - elif 'kind' in selector: - vardef = '%s(kind=%s)' % (vardef, selector['kind']) + vardef = f"{vardef}*{selector['*']}" + elif 'len' in selector: + vardef = f"{vardef}(len={selector['len']}" + if 'kind' in selector: + vardef = f"{vardef},kind={selector['kind']})" + else: + vardef = f'{vardef})' + elif 'kind' in selector: + vardef = f"{vardef}(kind={selector['kind']})" - vardef = '%s %s' % (vardef, fa) + vardef = f'{vardef} {fa}' if 'dimension' in vars[a]: - vardef = '%s(%s)' % (vardef, ','.join(vars[a]['dimension'])) + vardef = f"{vardef}({','.join(vars[a]['dimension'])})" return vardef def useiso_c_binding(rout): useisoc = False - for key, value in rout['vars'].items(): + for value in rout['vars'].values(): kind_value = value.get('kindselector', {}).get('kind') if kind_value in isoc_kindmap: return True @@ -84,9 +92,9 @@ def createfuncwrapper(rout, signature=0): v = rout['vars'][a] for i, d in enumerate(v.get('dimension', [])): if d == ':': - dn = 'f2py_%s_d%s' % (a, i) - dv = dict(typespec='integer', intent=['hide']) - dv['='] = 'shape(%s, %s)' % (a, i) + dn = f'f2py_{a}_d{i}' + dv = {'typespec': 'integer', 'intent': ['hide']} + dv['='] = f'shape({a}, {i})' extra_args.append(dn) vars[dn] = dv v['dimension'][i] = dn @@ -96,11 +104,11 @@ def createfuncwrapper(rout, signature=0): ret = [''] def add(line, ret=ret): - ret[0] = '%s\n %s' % (ret[0], line) + ret[0] = f'{ret[0]}\n {line}' name = rout['name'] fortranname = getfortranname(rout) f90mode = ismoduleroutine(rout) - newname = '%sf2pywrap' % (name) + newname = f'{name}f2pywrap' if newname not in vars: vars[newname] = vars[name] @@ -130,18 +138,17 @@ def add(line, ret=ret): sargs = sargs.replace(f"{name}, ", '') args = [arg for arg in args if arg != name] rout['args'] = args - add('subroutine f2pywrap_%s_%s (%s)' % - (rout['modulename'], name, sargs)) + add(f"subroutine f2pywrap_{rout['modulename']}_{name} ({sargs})") if not signature: - add('use %s, only : %s' % (rout['modulename'], fortranname)) + add(f"use {rout['modulename']}, only : {fortranname}") if useisoc: add('use iso_c_binding') else: - add('subroutine f2pywrap%s (%s)' % (name, sargs)) + add(f'subroutine f2pywrap{name} ({sargs})') if useisoc: add('use iso_c_binding') if not need_interface: - add('external %s' % (fortranname)) + add(f'external {fortranname}') rl = l_tmpl.replace('@@@NAME@@@', '') + ' ' + fortranname if need_interface: @@ -153,7 +160,7 @@ def add(line, ret=ret): dumped_args = [] for a in args: if isexternal(vars[a]): - add('external %s' % (a)) + add(f'external {a}') dumped_args.append(a) for a in args: if a in dumped_args: @@ -189,11 +196,11 @@ def add(line, ret=ret): if not signature: if islogicalfunction(rout): - add('%s = .not.(.not.%s(%s))' % (newname, fortranname, sargs)) + add(f'{newname} = .not.(.not.{fortranname}({sargs}))') else: - add('%s = %s(%s)' % (newname, fortranname, sargs)) + add(f'{newname} = {fortranname}({sargs})') if f90mode: - add('end subroutine f2pywrap_%s_%s' % (rout['modulename'], name)) + add(f"end subroutine f2pywrap_{rout['modulename']}_{name}") else: add('end') return ret[0] @@ -208,9 +215,9 @@ def createsubrwrapper(rout, signature=0): v = rout['vars'][a] for i, d in enumerate(v.get('dimension', [])): if d == ':': - dn = 'f2py_%s_d%s' % (a, i) - dv = dict(typespec='integer', intent=['hide']) - dv['='] = 'shape(%s, %s)' % (a, i) + dn = f'f2py_{a}_d{i}' + dv = {'typespec': 'integer', 'intent': ['hide']} + dv['='] = f'shape({a}, {i})' extra_args.append(dn) vars[dn] = dv v['dimension'][i] = dn @@ -220,7 +227,7 @@ def createsubrwrapper(rout, signature=0): ret = [''] def add(line, ret=ret): - ret[0] = '%s\n %s' % (ret[0], line) + ret[0] = f'{ret[0]}\n {line}' name = rout['name'] fortranname = getfortranname(rout) f90mode = ismoduleroutine(rout) @@ -230,18 +237,17 @@ def add(line, ret=ret): useisoc = useiso_c_binding(rout) sargs = ', '.join(args) if f90mode: - add('subroutine f2pywrap_%s_%s (%s)' % - (rout['modulename'], name, sargs)) + add(f"subroutine f2pywrap_{rout['modulename']}_{name} ({sargs})") if useisoc: add('use iso_c_binding') if not signature: - add('use %s, only : %s' % (rout['modulename'], fortranname)) + add(f"use {rout['modulename']}, only : {fortranname}") else: - add('subroutine f2pywrap%s (%s)' % (name, sargs)) + add(f'subroutine f2pywrap{name} ({sargs})') if useisoc: add('use iso_c_binding') if not need_interface: - add('external %s' % (fortranname)) + add(f'external {fortranname}') if need_interface: for line in rout['saved_interface'].split('\n'): @@ -251,7 +257,7 @@ def add(line, ret=ret): dumped_args = [] for a in args: if isexternal(vars[a]): - add('external %s' % (a)) + add(f'external {a}') dumped_args.append(a) for a in args: if a in dumped_args: @@ -279,9 +285,9 @@ def add(line, ret=ret): sargs = ', '.join([a for a in args if a not in extra_args]) if not signature: - add('call %s(%s)' % (fortranname, sargs)) + add(f'call {fortranname}({sargs})') if f90mode: - add('end subroutine f2pywrap_%s_%s' % (rout['modulename'], name)) + add(f"end subroutine f2pywrap_{rout['modulename']}_{name}") else: add('end') return ret[0] @@ -310,7 +316,7 @@ def assubr(rout): flag = 0 break if flag: - fvar['intent'].append('out=%s' % (rname)) + fvar['intent'].append(f'out={rname}') rout['args'][:] = [fname] + rout['args'] return rout, createfuncwrapper(rout) if issubroutine_wrap(rout): diff --git a/numpy/f2py/func2subr.pyi b/numpy/f2py/func2subr.pyi new file mode 100644 index 000000000000..8d2b3dbaa1b9 --- /dev/null +++ b/numpy/f2py/func2subr.pyi @@ -0,0 +1,7 @@ +from .auxfuncs import _Bool, _ROut, _Var + +def var2fixfortran(vars: _Var, a: str, fa: str | None = None, f90mode: _Bool | None = None) -> str: ... +def useiso_c_binding(rout: _ROut) -> bool: ... +def createfuncwrapper(rout: _ROut, signature: int = 0) -> str: ... +def createsubrwrapper(rout: _ROut, signature: int = 0) -> str: ... +def assubr(rout: _ROut) -> tuple[dict[str, str], str]: ... diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py old mode 100755 new mode 100644 index 009365e04761..6ad941f98287 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 """ Rules for building C/API module with f2py2e. @@ -47,41 +46,93 @@ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. """ -import os, sys -import time import copy +import os +import sys +import time from pathlib import Path # __version__.version is now the same as the NumPy version -from . import __version__ - +from . import ( + __version__, + capi_maps, + cfuncs, + common_rules, + f90mod_rules, + func2subr, + use_rules, +) from .auxfuncs import ( - applyrules, debugcapi, dictappend, errmess, gentitle, getargs2, - hascallstatement, hasexternals, hasinitvalue, hasnote, - hasresultnote, isarray, isarrayofstrings, ischaracter, - ischaracterarray, ischaracter_or_characterarray, iscomplex, - iscomplexarray, iscomplexfunction, iscomplexfunction_warn, - isdummyroutine, isexternal, isfunction, isfunction_wrap, isint1, - isint1array, isintent_aux, isintent_c, isintent_callback, - isintent_copy, isintent_hide, isintent_inout, isintent_nothide, - isintent_out, isintent_overwrite, islogical, islong_complex, - islong_double, islong_doublefunction, islong_long, - islong_longfunction, ismoduleroutine, isoptional, isrequired, - isscalar, issigned_long_longarray, isstring, isstringarray, - isstringfunction, issubroutine, isattr_value, - issubroutine_wrap, isthreadsafe, isunsigned, isunsigned_char, - isunsigned_chararray, isunsigned_long_long, - isunsigned_long_longarray, isunsigned_short, isunsigned_shortarray, - l_and, l_not, l_or, outmess, replace, stripcomma, requiresf90wrapper + applyrules, + debugcapi, + dictappend, + errmess, + gentitle, + getargs2, + hascallstatement, + hasexternals, + hasinitvalue, + hasnote, + hasresultnote, + isarray, + isarrayofstrings, + isattr_value, + ischaracter, + ischaracter_or_characterarray, + ischaracterarray, + iscomplex, + iscomplexarray, + iscomplexfunction, + iscomplexfunction_warn, + isdummyroutine, + isexternal, + isfunction, + isfunction_wrap, + isint1, + isint1array, + isintent_aux, + isintent_c, + isintent_callback, + isintent_copy, + isintent_hide, + isintent_inout, + isintent_inplace, + isintent_nothide, + isintent_out, + isintent_overwrite, + islogical, + islong_complex, + islong_double, + islong_doublefunction, + islong_long, + islong_longfunction, + ismoduleroutine, + isoptional, + isrequired, + isscalar, + issigned_long_longarray, + isstring, + isstringarray, + isstringfunction, + issubroutine, + issubroutine_wrap, + isthreadsafe, + isunsigned, + isunsigned_char, + isunsigned_chararray, + isunsigned_long_long, + isunsigned_long_longarray, + isunsigned_short, + isunsigned_shortarray, + l_and, + l_not, + l_or, + outmess, + replace, + requiresf90wrapper, + stripcomma, ) -from . import capi_maps -from . import cfuncs -from . import common_rules -from . import use_rules -from . import f90mod_rules -from . import func2subr - f2py_version = __version__.version numpy_version = __version__.version @@ -236,10 +287,20 @@ #initcommonhooks# #interface_usercode# +#ifdef Py_GIL_DISABLED + // signal whether this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m , #gil_used#); +#endif + #ifdef F2PY_REPORT_ATEXIT if (! PyErr_Occurred()) on_exit(f2py_report_on_exit,(void*)\"#modulename#\"); #endif + + if (PyType_Ready(&PyFortran_Type) < 0) { + return NULL; + } + return m; } #ifdef __cplusplus @@ -454,7 +515,7 @@ { extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void); PyObject* o = PyDict_GetItemString(d,"#name#"); - tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL); + tmp = F2PyCapsule_FromVoidPtr((void*)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),NULL); PyObject_SetAttrString(o,"_cpointer", tmp); Py_DECREF(tmp); s = PyUnicode_FromString("#name#"); @@ -598,21 +659,20 @@ }, 'decl': [' #ctype# #name#_return_value = NULL;', ' int #name#_return_value_len = 0;'], - 'callfortran':'#name#_return_value,#name#_return_value_len,', - 'callfortranroutine':[' #name#_return_value_len = #rlength#;', - ' if ((#name#_return_value = (string)malloc(' - + '#name#_return_value_len+1) == NULL) {', - ' PyErr_SetString(PyExc_MemoryError, \"out of memory\");', - ' f2py_success = 0;', - ' } else {', - " (#name#_return_value)[#name#_return_value_len] = '\\0';", - ' }', - ' if (f2py_success) {', - {hasexternals: """\ + 'callfortran': '#name#_return_value,#name#_return_value_len,', + 'callfortranroutine': [' #name#_return_value_len = #rlength#;', + ' if ((#name#_return_value = (string)malloc(#name#_return_value_len+1) == NULL) {', + ' PyErr_SetString(PyExc_MemoryError, \"out of memory\");', + ' f2py_success = 0;', + ' } else {', + " (#name#_return_value)[#name#_return_value_len] = '\\0';", + ' }', + ' if (f2py_success) {', + {hasexternals: """\ if (#setjmpbuf#) { f2py_success = 0; } else {"""}, - {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, """\ #ifdef USESCOMPAQFORTRAN (*f2py_func)(#callcompaqfortran#); @@ -620,17 +680,17 @@ (*f2py_func)(#callfortran#); #endif """, - {isthreadsafe: ' Py_END_ALLOW_THREADS'}, - {hasexternals: ' }'}, - {debugcapi: + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'}, + {debugcapi: ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'}, - ' } /* if (f2py_success) after (string)malloc */', + ' } /* if (f2py_success) after (string)malloc */', ], 'returnformat': '#rformat#', 'return': ',#name#_return_value', 'freemem': ' STRINGFREE(#name#_return_value);', 'need': ['F_FUNC', '#ctype#', 'STRINGFREE'], - '_check':l_and(isstringfunction, l_not(isfunction_wrap)) # ???obsolete + '_check': l_and(isstringfunction, l_not(isfunction_wrap)) # ???obsolete }, { # Debugging 'routdebugenter': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");', @@ -692,8 +752,8 @@ 'decl': [' #ctype# #varname# = NULL;', ' int slen(#varname#);', ], - 'need':['len..'], - '_check':isstring + 'need': ['len..'], + '_check': isstring }, # Array { # Common @@ -701,7 +761,7 @@ ' npy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', ' const int #varname#_Rank = #rank#;', ], - 'need':['len..', {hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}], + 'need': ['len..', {hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}], '_check': isarray }, # Scalararray @@ -810,7 +870,7 @@ 'setjmpbuf': '(setjmp(#varname#_cb.jmpbuf))', 'callfortran': {l_not(isintent_callback): '#varname#_cptr,'}, 'need': ['#cbname#', 'setjmp.h'], - '_check':isexternal + '_check': isexternal }, { 'frompyobj': [{l_not(isintent_callback): """\ @@ -864,8 +924,8 @@ Py_DECREF(#varname#_cb.args_capi); }""", 'need': ['SWAP', 'create_cb_arglist'], - '_check':isexternal, - '_depend':'' + '_check': isexternal, + '_depend': '' }, # Scalars (not complex) { # Common @@ -956,8 +1016,8 @@ 'frompyobj': [{hasinitvalue: ' if (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'}, {l_and(isoptional, l_not(hasinitvalue)) : ' if (#varname#_capi != Py_None)'}, - ' f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");' - '\n if (f2py_success) {'], + (' f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");' + '\n if (f2py_success) {')], 'cleanupfrompyobj': ' } /*if (f2py_success) of #varname# frompyobj*/', 'need': ['#ctype#_from_pyobj'], '_check': l_and(iscomplex, isintent_nothide), @@ -983,9 +1043,9 @@ 'decl': [' #ctype# #varname# = NULL;', ' int slen(#varname#);', ' PyObject *#varname#_capi = Py_None;'], - 'callfortran':'#varname#,', - 'callfortranappend':'slen(#varname#),', - 'pyobjfrom':[ + 'callfortran': '#varname#,', + 'callfortranappend': 'slen(#varname#),', + 'pyobjfrom': [ {debugcapi: ' fprintf(stderr,' '"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, @@ -998,13 +1058,13 @@ {l_and(isintent_out, l_not(isintent_c)): 'STRINGPADN'}], '_check': isstring }, { # Common - 'frompyobj': [ + 'frompyobj': [( """\ slen(#varname#) = #elsize#; f2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,""" """#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth#""" """`#varname#\' of #pyname# to C #ctype#\"); - if (f2py_success) {""", + if (f2py_success) {"""), # The trailing null value for Fortran is blank. {l_not(isintent_c): " STRINGPADN(#varname#, slen(#varname#), '\\0', ' ');"}, @@ -1014,8 +1074,8 @@ } /*if (f2py_success) of #varname#*/""", 'need': ['#ctype#_from_pyobj', 'len..', 'STRINGFREE', {l_not(isintent_c): 'STRINGPADN'}], - '_check':isstring, - '_depend':'' + '_check': isstring, + '_depend': '' }, { # Not hidden 'argformat': {isrequired: 'O'}, 'keyformat': {isoptional: 'O'}, @@ -1048,7 +1108,7 @@ ' int capi_#varname#_intent = 0;', {isstringarray: ' int slen(#varname#) = 0;'}, ], - 'callfortran':'#varname#,', + 'callfortran': '#varname#,', 'callfortranappend': {isstringarray: 'slen(#varname#),'}, 'return': {isintent_out: ',capi_#varname#_as_array'}, 'need': 'len..', @@ -1095,7 +1155,7 @@ 'frompyobj': [ ' #setdims#;', ' capi_#varname#_intent |= #intent#;', - (' const char * capi_errmess = "#modulename#.#pyname#:' + (' const char capi_errmess[] = "#modulename#.#pyname#:' ' failed to create array from the #nth# `#varname#`";'), {isintent_hide: ' capi_#varname#_as_array = ndarray_from_pyobj(' @@ -1125,9 +1185,10 @@ """\ int *_i,capi_i=0; CFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\"); - if (initforcomb(PyArray_DIMS(capi_#varname#_as_array), + struct ForcombCache cache; + if (initforcomb(&cache, PyArray_DIMS(capi_#varname#_as_array), PyArray_NDIM(capi_#varname#_as_array),1)) { - while ((_i = nextforcomb())) + while ((_i = nextforcomb(&cache))) #varname#[capi_i++] = #init#; /* fortran way */ } else { PyObject *exc, *val, *tb; @@ -1140,9 +1201,20 @@ } if (f2py_success) {"""]}, ], + 'pyobjfrom': [ + {l_and(isintent_inplace, l_not(isintent_out)): """\ + f2py_success = (PyArray_ResolveWritebackIfCopy(capi_#varname#_as_array) >= 0); + if (f2py_success) { /* inplace array #varname# has been written back to */"""}, + {l_and(isintent_inplace, isintent_out): """\ + f2py_success = (PyArray_ResolveWritebackIfCopy(capi_#varname#_as_array) >= 0); + if (f2py_success) { /* return written-back-to inplace array #varname# */ + Py_INCREF(#varname#_capi); + Py_SETREF(capi_#varname#_as_array, (PyArrayObject*)#varname#_capi);"""}, + ], + 'closepyobjfrom': {isintent_inplace: ' } /*if (f2py_success) of #varname# pyobjfrom*/'}, 'cleanupfrompyobj': [ # note that this list will be reversed - ' } ' - '/* if (capi_#varname#_as_array == NULL) ... else of #varname# */', + (' } ' + '/* if (capi_#varname#_as_array == NULL) ... else of #varname# */'), {l_not(l_or(isintent_out, isintent_hide)): """\ if((PyObject *)capi_#varname#_as_array!=#varname#_capi) { Py_XDECREF(capi_#varname#_as_array); }"""}, @@ -1242,7 +1314,7 @@ def buildmodule(m, um): """ Return """ - outmess(' Building module "%s"...\n' % (m['name'])) + outmess(f" Building module \"{m['name']}\"...\n") ret = {} mod_rules = defmod_rules[:] vrd = capi_maps.modsign2map(m) @@ -1262,7 +1334,7 @@ def buildmodule(m, um): if not nb: print( - 'buildmodule: Could not find the body of interfaced routine "%s". Skipping.\n' % (n), file=sys.stderr) + f'buildmodule: Could not find the body of interfaced routine "{n}". Skipping.\n', file=sys.stderr) continue nb_list = [nb] if 'entry' in nb: @@ -1321,7 +1393,7 @@ def buildmodule(m, um): needs = cfuncs.get_needs() # Add mapped definitions - needs['typedefs'] += [cvar for cvar in capi_maps.f2cmap_mapped # + needs['typedefs'] += [cvar for cvar in capi_maps.f2cmap_mapped # if cvar in typedef_need_dict.values()] code = {} for n in needs.keys(): @@ -1349,7 +1421,7 @@ def buildmodule(m, um): elif k in cfuncs.commonhooks: c = cfuncs.commonhooks[k] else: - errmess('buildmodule: unknown need %s.\n' % (repr(k))) + errmess(f'buildmodule: unknown need {repr(k)}.\n') continue code[n].append(c) mod_rules.append(code) @@ -1363,7 +1435,7 @@ def buildmodule(m, um): ret['csrc'] = fn with open(fn, 'w') as f: f.write(ar['modulebody'].replace('\t', 2 * ' ')) - outmess(' Wrote C/API module "%s" to file "%s"\n' % (m['name'], fn)) + outmess(f" Wrote C/API module \"{m['name']}\" to file \"{fn}\"\n") if options['dorestdoc']: fn = os.path.join( @@ -1379,7 +1451,7 @@ def buildmodule(m, um): ret['ltx'] = fn with open(fn, 'w') as f: f.write( - '%% This file is auto-generated with f2py (version:%s)\n' % (f2py_version)) + f'% This file is auto-generated with f2py (version:{f2py_version})\n') if 'shortlatex' not in options: f.write( '\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n') @@ -1394,7 +1466,7 @@ def buildmodule(m, um): with open(wn, 'w') as f: f.write('C -*- fortran -*-\n') f.write( - 'C This file is autogenerated with f2py (version:%s)\n' % (f2py_version)) + f'C This file is autogenerated with f2py (version:{f2py_version})\n') f.write( 'C It contains Fortran 77 wrappers to fortran functions.\n') lines = [] @@ -1411,15 +1483,15 @@ def buildmodule(m, um): lines.append(l + '\n') lines = ''.join(lines).replace('\n &\n', '\n') f.write(lines) - outmess(' Fortran 77 wrappers are saved to "%s"\n' % (wn)) + outmess(f' Fortran 77 wrappers are saved to "{wn}\"\n') if funcwrappers2: wn = os.path.join( - options['buildpath'], '%s-f2pywrappers2.f90' % (vrd['modulename'])) + options['buildpath'], f"{vrd['modulename']}-f2pywrappers2.f90") ret['fsrc'] = wn with open(wn, 'w') as f: f.write('! -*- f90 -*-\n') f.write( - '! This file is autogenerated with f2py (version:%s)\n' % (f2py_version)) + f'! This file is autogenerated with f2py (version:{f2py_version})\n') f.write( '! It contains Fortran 90 wrappers to fortran functions.\n') lines = [] @@ -1438,11 +1510,12 @@ def buildmodule(m, um): lines.append(l + '\n') lines = ''.join(lines).replace('\n &\n', '\n') f.write(lines) - outmess(' Fortran 90 wrappers are saved to "%s"\n' % (wn)) + outmess(f' Fortran 90 wrappers are saved to "{wn}\"\n') return ret ################## Build C/API function ############# + stnd = {1: 'st', 2: 'nd', 3: 'rd', 4: 'th', 5: 'th', 6: 'th', 7: 'th', 8: 'th', 9: 'th', 0: 'th'} @@ -1457,7 +1530,7 @@ def buildapi(rout): outmess(' Constructing wrapper function "%s.%s"...\n' % (rout['modulename'], rout['name'])) else: - outmess(' Constructing wrapper function "%s"...\n' % (rout['name'])) + outmess(f" Constructing wrapper function \"{rout['name']}\"...\n") # Routine vrd = capi_maps.routsign2map(rout) rd = dictappend({}, vrd) @@ -1559,9 +1632,9 @@ def buildapi(rout): ar = applyrules(routine_rules, rd) if ismoduleroutine(rout): - outmess(' %s\n' % (ar['docshort'])) + outmess(f" {ar['docshort']}\n") else: - outmess(' %s\n' % (ar['docshort'])) + outmess(f" {ar['docshort']}\n") return ar, wrap diff --git a/numpy/f2py/rules.pyi b/numpy/f2py/rules.pyi new file mode 100644 index 000000000000..c45d42289363 --- /dev/null +++ b/numpy/f2py/rules.pyi @@ -0,0 +1,36 @@ +from collections.abc import Callable, Iterable, Mapping +from typing import Any, Final, Literal as L + +from .__version__ import version +from .auxfuncs import _Bool, _Var + +type _Predicate = Callable[[_Var], _Bool] +type _RuleDict[VT] = dict[str, VT] +type _DefDict[VT] = dict[_Predicate, VT] + +### + +f2py_version: Final = version +numpy_version: Final = version + +options: Final[dict[str, bool]] = ... +sepdict: Final[dict[str, str]] = ... + +generationtime: Final[int] = ... +typedef_need_dict: Final[_DefDict[str]] = ... + +module_rules: Final[_RuleDict[str | list[str] | _RuleDict[str]]] = ... +routine_rules: Final[_RuleDict[str | list[str] | _DefDict[str] | _RuleDict[str]]] = ... +defmod_rules: Final[list[_RuleDict[str | _DefDict[str]]]] = ... +rout_rules: Final[list[_RuleDict[str | Any]]] = ... +aux_rules: Final[list[_RuleDict[str | Any]]] = ... +arg_rules: Final[list[_RuleDict[str | Any]]] = ... +check_rules: Final[list[_RuleDict[str | Any]]] = ... + +stnd: Final[dict[L[1, 2, 3, 4, 5, 6, 7, 8, 9, 0], L["st", "nd", "rd", "th"]]] = ... + +def buildmodule(m: Mapping[str, str | Any], um: Iterable[Mapping[str, str | Any]]) -> _RuleDict[str]: ... +def buildapi(rout: Mapping[str, str]) -> tuple[_RuleDict[str], str]: ... + +# namespace pollution +k: str diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index 3594147281a2..f368b18292d7 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -47,7 +47,7 @@ F2PySwapThreadLocalCallbackPtr(char *key, void *ptr) "failed"); } - value = PyDict_GetItemString(local_dict, key); + value = PyDict_GetItemString(local_dict, key); // noqa: borrowed-ref OK if (value != NULL) { prev = PyLong_AsVoidPtr(value); if (PyErr_Occurred()) { @@ -87,7 +87,7 @@ F2PyGetThreadLocalCallbackPtr(char *key) "F2PyGetThreadLocalCallbackPtr: PyThreadState_GetDict failed"); } - value = PyDict_GetItemString(local_dict, key); + value = PyDict_GetItemString(local_dict, key); // noqa: borrowed-ref OK if (value != NULL) { prev = PyLong_AsVoidPtr(value); if (PyErr_Occurred()) { @@ -192,13 +192,24 @@ PyFortranObject_NewAsAttr(FortranDataDef *defs) } fp->len = 1; fp->defs = defs; + PyObject *name; if (defs->rank == -1) { - PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("function %s", defs->name)); + name = PyUnicode_FromFormat("function %s", defs->name); } else if (defs->rank == 0) { - PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("scalar %s", defs->name)); + name = PyUnicode_FromFormat("scalar %s", defs->name); } else { - PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("array %s", defs->name)); + name = PyUnicode_FromFormat("array %s", defs->name); } + if (name == NULL) { + Py_DECREF(fp); + return NULL; + } + if (PyDict_SetItemString(fp->dict, "__name__", name) < 0) { + Py_DECREF(name); + Py_DECREF(fp); + return NULL; + } + Py_DECREF(name); return (PyObject *)fp; } @@ -363,7 +374,9 @@ fortran_getattr(PyFortranObject *fp, char *name) { int i, j, k, flag; if (fp->dict != NULL) { - PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name); + // python 3.13 added PyDict_GetItemRef +#if PY_VERSION_HEX < 0x030D0000 + PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name); // noqa: borrowed-ref OK if (v == NULL && PyErr_Occurred()) { return NULL; } @@ -371,6 +384,17 @@ fortran_getattr(PyFortranObject *fp, char *name) Py_INCREF(v); return v; } +#else + PyObject *v; + int result = PyDict_GetItemStringRef(fp->dict, name, &v); + if (result == -1) { + return NULL; + } + else if (result == 1) { + return v; + } +#endif + } for (i = 0, j = 1; i < fp->len && (j = strcmp(name, fp->defs[i].name)); i++) @@ -761,30 +785,6 @@ dump_attrs(const PyArrayObject *obj) } #endif -#define SWAPTYPE(a, b, t) \ - { \ - t c; \ - c = (a); \ - (a) = (b); \ - (b) = c; \ - } - -static int -swap_arrays(PyArrayObject *obj1, PyArrayObject *obj2) -{ - PyArrayObject_fields *arr1 = (PyArrayObject_fields *)obj1, - *arr2 = (PyArrayObject_fields *)obj2; - SWAPTYPE(arr1->data, arr2->data, char *); - SWAPTYPE(arr1->nd, arr2->nd, int); - SWAPTYPE(arr1->dimensions, arr2->dimensions, npy_intp *); - SWAPTYPE(arr1->strides, arr2->strides, npy_intp *); - SWAPTYPE(arr1->base, arr2->base, PyObject *); - SWAPTYPE(arr1->descr, arr2->descr, PyArray_Descr *); - SWAPTYPE(arr1->flags, arr2->flags, int); - /* SWAPTYPE(arr1->weakreflist,arr2->weakreflist,PyObject*); */ - return 0; -} - #define ARRAY_ISCOMPATIBLE(arr,type_num) \ ((PyArray_ISINTEGER(arr) && PyTypeNum_ISINTEGER(type_num)) || \ (PyArray_ISFLOAT(arr) && PyTypeNum_ISFLOAT(type_num)) || \ @@ -809,7 +809,7 @@ get_elsize(PyObject *obj) { } else if (PyUnicode_Check(obj)) { return PyUnicode_GET_LENGTH(obj); } else if (PySequence_Check(obj)) { - PyObject* fast = PySequence_Fast(obj, "f2py:fortranobject.c:get_elsize"); + PyObject* fast = PySequence_Fast(obj, "f2py:fortranobject.c:get_elsize"); // noqa: borrowed-ref OK if (fast != NULL) { Py_ssize_t i, n = PySequence_Fast_GET_SIZE(fast); int sz, elsize = 0; @@ -863,7 +863,7 @@ ndarray_from_pyobj(const int type_num, * dtype('S'). In addition, there is also dtype('c'), that * appears as dtype('S1') (these have the same type_num value), * but is actually different (.char attribute is either 'S' or - * 'c', respecitely). + * 'c', respectively). * * In Fortran, character arrays and strings are different * concepts. The relation between Fortran types, NumPy dtypes, @@ -1026,32 +1026,38 @@ ndarray_from_pyobj(const int type_num, return NULL; } - /* here we have always intent(in) or intent(inplace) */ + /* + * Here, we have always intent(in) or intent(inplace) + * and require a copy for input. We allow arbitrary casting for + * input, but for inplace we check that the types are equivalent. + */ { - PyArrayObject * retarr = (PyArrayObject *) \ - PyArray_NewFromDescr(&PyArray_Type, descr, PyArray_NDIM(arr), PyArray_DIMS(arr), - NULL, NULL, !(intent & F2PY_INTENT_C), NULL); + int flags = NPY_ARRAY_FORCECAST | NPY_ARRAY_ENSURECOPY + | ((intent & F2PY_INTENT_C) ? NPY_ARRAY_IN_ARRAY + : NPY_ARRAY_IN_FARRAY); + if (intent & F2PY_INTENT_INPLACE) { + if (!(ARRAY_ISCOMPATIBLE(arr, type_num)) || + (PyArray_ISSIGNED(arr) && PyTypeNum_ISUNSIGNED(type_num)) || + (PyArray_ISUNSIGNED(arr) && PyTypeNum_ISSIGNED(type_num)) + ) { + sprintf(mess, "failed to initialize intent(inplace) array" + " -- input '%c' not compatible to '%c'", + PyArray_DESCR(arr)->type, descr->type); + PyErr_SetString(PyExc_ValueError, mess); + Py_DECREF(descr); + return NULL; + } + flags |= NPY_ARRAY_WRITEBACKIFCOPY; + } + /* Steals reference to descr */ + PyArrayObject *retarr = (PyArrayObject *)PyArray_FromArray( + arr, descr, flags); if (retarr==NULL) { - Py_DECREF(descr); return NULL; } + arr = retarr; F2PY_REPORT_ON_ARRAY_COPY_FROMARR; - if (PyArray_CopyInto(retarr, arr)) { - Py_DECREF(retarr); - return NULL; - } - if (intent & F2PY_INTENT_INPLACE) { - if (swap_arrays(arr,retarr)) { - Py_DECREF(retarr); - return NULL; /* XXX: set exception */ - } - Py_XDECREF(retarr); - if (intent & F2PY_INTENT_OUT) - Py_INCREF(arr); - } else { - arr = retarr; - } } return arr; } diff --git a/numpy/f2py/symbolic.py b/numpy/f2py/symbolic.py index 67120d79a51e..c768b3c470ed 100644 --- a/numpy/f2py/symbolic.py +++ b/numpy/f2py/symbolic.py @@ -155,14 +155,14 @@ def ewarn(message): class Expr: - """Represents a Fortran expression as a op-data pair. + """Represents a Fortran expression as an op-data pair. Expr instances are hashable and sortable. """ @staticmethod def parse(s, language=Language.C): - """Parse a Fortran expression to a Expr. + """Parse a Fortran expression to an Expr. """ return fromstring(s, language=language) @@ -190,7 +190,7 @@ def __init__(self, op, data): # (default is 1) assert isinstance(data, tuple) and len(data) == 2 assert (isinstance(data[0], str) - and data[0][::len(data[0])-1] in ('""', "''", '@@')) + and data[0][::len(data[0]) - 1] in ('""', "''", '@@')) assert isinstance(data[1], (int, str)), data elif op is Op.SYMBOL: # data is any hashable object @@ -310,12 +310,11 @@ def tostring(self, parent_precedence=Precedence.NONE, op = ' + ' if coeff == 1: term = term.tostring(Precedence.SUM, language=language) + elif term == as_number(1): + term = str(coeff) else: - if term == as_number(1): - term = str(coeff) - else: - term = f'{coeff} * ' + term.tostring( - Precedence.PRODUCT, language=language) + term = f'{coeff} * ' + term.tostring( + Precedence.PRODUCT, language=language) if terms: terms.append(op) elif op == ' - ': @@ -570,7 +569,7 @@ def __call__(self, *args, **kwargs): # TODO: implement a method for deciding when __call__ should # return an INDEXING expression. return as_apply(self, *map(as_expr, args), - **dict((k, as_expr(v)) for k, v in kwargs.items())) + **{k: as_expr(v) for k, v in kwargs.items()}) def __getitem__(self, index): # Provided to support C indexing operations that .pyf files @@ -636,8 +635,8 @@ def substitute(self, symbols_map): if isinstance(target, Expr): target = target.substitute(symbols_map) args = tuple(a.substitute(symbols_map) for a in args) - kwargs = dict((k, v.substitute(symbols_map)) - for k, v in kwargs.items()) + kwargs = {k: v.substitute(symbols_map) + for k, v in kwargs.items()} return normalize(Expr(self.op, (target, args, kwargs))) if self.op is Op.INDEXING: func = self.data[0] @@ -693,8 +692,8 @@ def traverse(self, visit, *args, **kwargs): if isinstance(obj, Expr) else obj) operands = tuple(operand.traverse(visit, *args, **kwargs) for operand in self.data[1]) - kwoperands = dict((k, v.traverse(visit, *args, **kwargs)) - for k, v in self.data[2].items()) + kwoperands = {k: v.traverse(visit, *args, **kwargs) + for k, v in self.data[2].items()} return normalize(Expr(self.op, (func, operands, kwoperands))) elif self.op is Op.INDEXING: obj = self.data[0] @@ -866,9 +865,9 @@ def normalize(obj): t2, c2 = as_term_coeff(divisor) if isinstance(c1, integer_types) and isinstance(c2, integer_types): g = gcd(c1, c2) - c1, c2 = c1//g, c2//g + c1, c2 = c1 // g, c2 // g else: - c1, c2 = c1/c2, 1 + c1, c2 = c1 / c2, 1 if t1.op is Op.APPLY and t1.data[0] is ArithOp.DIV: numer = t1.data[1][0] * c1 @@ -1011,7 +1010,7 @@ def as_apply(func, *args, **kwargs): """ return Expr(Op.APPLY, (func, tuple(map(as_expr, args)), - dict((k, as_expr(v)) for k, v in kwargs.items()))) + {k: as_expr(v) for k, v in kwargs.items()})) def as_ternary(cond, expr1, expr2): @@ -1084,9 +1083,9 @@ def as_factors(obj): if coeff == 1: return Expr(Op.FACTORS, {term: 1}) return Expr(Op.FACTORS, {term: 1, Expr.number(coeff): 1}) - if ((obj.op is Op.APPLY + if (obj.op is Op.APPLY and obj.data[0] is ArithOp.DIV - and not obj.data[2])): + and not obj.data[2]): return Expr(Op.FACTORS, {obj.data[1][0]: 1, obj.data[1][1]: -1}) return Expr(Op.FACTORS, {obj: 1}) raise OpError(f'cannot convert {type(obj)} to terms Expr') @@ -1237,17 +1236,19 @@ def replace_parenthesis(s): i = mn_i j = s.find(right, i) + if j == -1: + raise ValueError(f'Mismatch of {left + right} parenthesis in {s!r}') while s.count(left, i + 1, j) != s.count(right, i + 1, j): j = s.find(right, j + 1) if j == -1: - raise ValueError(f'Mismatch of {left+right} parenthesis in {s!r}') + raise ValueError(f'Mismatch of {left + right} parenthesis in {s!r}') p = {'(': 'ROUND', '[': 'SQUARE', '{': 'CURLY', '(/': 'ROUNDDIV'}[left] k = f'@__f2py_PARENTHESIS_{p}_{COUNTER.__next__()}@' - v = s[i+len(left):j] - r, d = replace_parenthesis(s[j+len(right):]) + v = s[i + len(left):j] + r, d = replace_parenthesis(s[j + len(right):]) d[k] = v return s[:i] + k + r, d @@ -1262,8 +1263,8 @@ def unreplace_parenthesis(s, d): """ for k, v in d.items(): p = _get_parenthesis_kind(k) - left = dict(ROUND='(', SQUARE='[', CURLY='{', ROUNDDIV='(/')[p] - right = dict(ROUND=')', SQUARE=']', CURLY='}', ROUNDDIV='/)')[p] + left = {'ROUND': '(', 'SQUARE': '[', 'CURLY': '{', 'ROUNDDIV': '(/'}[p] + right = {'ROUND': ')', 'SQUARE': ']', 'CURLY': '}', 'ROUNDDIV': '/)'}[p] s = s.replace(k, left + v + right) return s @@ -1425,7 +1426,7 @@ def restore(r): return result # referencing/dereferencing - if r.startswith('*') or r.startswith('&'): + if r.startswith(('*', '&')): op = {'*': Op.DEREF, '&': Op.REF}[r[0]] operand = self.process(restore(r[1:])) return Expr(op, operand) @@ -1479,7 +1480,7 @@ def restore(r): if isinstance(items, Expr): return items if paren in ['ROUNDDIV', 'SQUARE']: - # Expression is a array constructor + # Expression is an array constructor if isinstance(items, Expr): items = (items,) return as_array(items) @@ -1494,8 +1495,8 @@ def restore(r): if not isinstance(args, tuple): args = args, if paren == 'ROUND': - kwargs = dict((a.left, a.right) for a in args - if isinstance(a, _Pair)) + kwargs = {a.left: a.right for a in args + if isinstance(a, _Pair)} args = tuple(a for a in args if not isinstance(a, _Pair)) # Warning: this could also be Fortran indexing operation.. return as_apply(target, *args, **kwargs) diff --git a/numpy/f2py/symbolic.pyi b/numpy/f2py/symbolic.pyi new file mode 100644 index 000000000000..94ad1461760b --- /dev/null +++ b/numpy/f2py/symbolic.pyi @@ -0,0 +1,216 @@ +from collections.abc import Callable, Mapping +from enum import Enum +from typing import Any, Generic, Literal as L, Self, overload +from typing_extensions import TypeVar + +__all__ = ["Expr"] + +### + +# Explicit covariance is required here due to the inexpressible read-only attributes. +_OpT_co = TypeVar("_OpT_co", bound=Op, default=Op, covariant=True) +_LanguageT_co = TypeVar("_LanguageT_co", bound=Language, default=Language, covariant=True) +_DataT_co = TypeVar("_DataT_co", default=Any, covariant=True) +_LeftT_co = TypeVar("_LeftT_co", default=Any, covariant=True) +_RightT_co = TypeVar("_RightT_co", default=Any, covariant=True) + +type _RelCOrPy = L["==", "!=", "<", "<=", ">", ">="] +type _RelFortran = L[".eq.", ".ne.", ".lt.", ".le.", ".gt.", ".ge."] + +type _ToExpr = Expr | complex | str +type _ToExprN = _ToExpr | tuple[_ToExprN, ...] +type _NestedString = str | tuple[_NestedString, ...] | list[_NestedString] + +### + +class OpError(Exception): ... +class ExprWarning(UserWarning): ... + +class Language(Enum): + Python = 0 + Fortran = 1 + C = 2 + +class Op(Enum): + INTEGER = 10 + REAL = 12 + COMPLEX = 15 + STRING = 20 + ARRAY = 30 + SYMBOL = 40 + TERNARY = 100 + APPLY = 200 + INDEXING = 210 + CONCAT = 220 + RELATIONAL = 300 + TERMS = 1_000 + FACTORS = 2_000 + REF = 3_000 + DEREF = 3_001 + +class RelOp(Enum): + EQ = 1 + NE = 2 + LT = 3 + LE = 4 + GT = 5 + GE = 6 + + @overload + @classmethod + def fromstring(cls, s: _RelCOrPy, language: L[Language.C, Language.Python] = ...) -> RelOp: ... + @overload + @classmethod + def fromstring(cls, s: _RelFortran, language: L[Language.Fortran]) -> RelOp: ... + + # + @overload + def tostring(self, /, language: L[Language.C, Language.Python] = ...) -> _RelCOrPy: ... + @overload + def tostring(self, /, language: L[Language.Fortran]) -> _RelFortran: ... + +class ArithOp(Enum): + POS = 1 + NEG = 2 + ADD = 3 + SUB = 4 + MUL = 5 + DIV = 6 + POW = 7 + +class Precedence(Enum): + ATOM = 0 + POWER = 1 + UNARY = 2 + PRODUCT = 3 + SUM = 4 + LT = 6 + EQ = 7 + LAND = 11 + LOR = 12 + TERNARY = 13 + ASSIGN = 14 + TUPLE = 15 + NONE = 100 + +class Expr(Generic[_OpT_co, _DataT_co]): + op: _OpT_co # read-only + data: _DataT_co # read-only + + @staticmethod + def parse(s: str, language: Language = ...) -> Expr: ... + + # + def __init__(self, /, op: Op, data: _DataT_co) -> None: ... + + # + def __lt__(self, other: Expr, /) -> bool: ... + def __le__(self, other: Expr, /) -> bool: ... + def __gt__(self, other: Expr, /) -> bool: ... + def __ge__(self, other: Expr, /) -> bool: ... + + # + def __pos__(self, /) -> Self: ... + def __neg__(self, /) -> Expr: ... + + # + def __add__(self, other: Expr, /) -> Expr: ... + def __radd__(self, other: Expr, /) -> Expr: ... + + # + def __sub__(self, other: Expr, /) -> Expr: ... + def __rsub__(self, other: Expr, /) -> Expr: ... + + # + def __mul__(self, other: Expr, /) -> Expr: ... + def __rmul__(self, other: Expr, /) -> Expr: ... + + # + def __pow__(self, other: Expr, /) -> Expr: ... + + # + def __truediv__(self, other: Expr, /) -> Expr: ... + def __rtruediv__(self, other: Expr, /) -> Expr: ... + + # + def __floordiv__(self, other: Expr, /) -> Expr: ... + def __rfloordiv__(self, other: Expr, /) -> Expr: ... + + # + def __call__( + self, + /, + *args: _ToExprN, + **kwargs: _ToExprN, + ) -> Expr[L[Op.APPLY], tuple[Self, tuple[Expr, ...], dict[str, Expr]]]: ... + + # + @overload + def __getitem__[ExprT: Expr](self, index: ExprT | tuple[ExprT], /) -> Expr[L[Op.INDEXING], tuple[Self, ExprT]]: ... + @overload + def __getitem__(self, index: _ToExpr | tuple[_ToExpr], /) -> Expr[L[Op.INDEXING], tuple[Self, Expr]]: ... + + # + def substitute(self, /, symbols_map: Mapping[Expr, Expr]) -> Expr: ... + + # + @overload + def traverse[**Tss](self, /, visit: Callable[Tss, None], *args: Tss.args, **kwargs: Tss.kwargs) -> Expr: ... + @overload + def traverse[**Tss, ExprT: Expr](self, /, visit: Callable[Tss, ExprT], *args: Tss.args, **kwargs: Tss.kwargs) -> ExprT: ... + + # + def contains(self, /, other: Expr) -> bool: ... + + # + def symbols(self, /) -> set[Expr]: ... + def polynomial_atoms(self, /) -> set[Expr]: ... + + # + def linear_solve(self, /, symbol: Expr) -> tuple[Expr, Expr]: ... + + # + def tostring(self, /, parent_precedence: Precedence = ..., language: Language = ...) -> str: ... + +class _Pair(Generic[_LeftT_co, _RightT_co]): + left: _LeftT_co # read-only + right: _RightT_co # read-only + + def __init__(self, /, left: _LeftT_co, right: _RightT_co) -> None: ... + + # + @overload + def substitute[ExprT: Expr](self: _Pair[ExprT, ExprT], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Expr]: ... + @overload + def substitute[ExprT: Expr](self: _Pair[ExprT, object], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Any]: ... + @overload + def substitute[ExprT: Expr](self: _Pair[object, ExprT], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Any, Expr]: ... + @overload + def substitute(self, /, symbols_map: Mapping[Expr, Expr]) -> _Pair: ... + +class _FromStringWorker(Generic[_LanguageT_co]): + language: _LanguageT_co # read-only + + original: str | None + quotes_map: dict[str, str] + + @overload + def __init__(self: _FromStringWorker[L[Language.C]], /, language: L[Language.C] = ...) -> None: ... + @overload + def __init__(self, /, language: _LanguageT_co) -> None: ... + + # + def finalize_string(self, /, s: str) -> str: ... + + # + def parse(self, /, inp: str) -> Expr | _Pair: ... + + # + @overload + def process(self, /, s: str, context: str = "expr") -> Expr | _Pair: ... + @overload + def process(self, /, s: list[str], context: str = "expr") -> list[Expr | _Pair]: ... + @overload + def process(self, /, s: tuple[str, ...], context: str = "expr") -> tuple[Expr | _Pair, ...]: ... + @overload + def process(self, /, s: _NestedString, context: str = "expr") -> Any: ... diff --git a/numpy/f2py/tests/__init__.py b/numpy/f2py/tests/__init__.py index 5ecb68077b94..4ed8fdd53f8c 100644 --- a/numpy/f2py/tests/__init__.py +++ b/numpy/f2py/tests/__init__.py @@ -1,6 +1,7 @@ -from numpy.testing import IS_WASM, IS_EDITABLE import pytest +from numpy.testing import IS_EDITABLE, IS_WASM + if IS_WASM: pytest.skip( "WASM/Pyodide does not use or support Fortran", diff --git a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c index f3bffdc1c220..99bfca3322c9 100644 --- a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c +++ b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c @@ -115,15 +115,42 @@ static PyObject *f2py_rout_wrap_attrs(PyObject *capi_self, PyArray_DESCR(arr)->type, PyArray_TYPE(arr), PyArray_ITEMSIZE(arr), - PyArray_DESCR(arr)->alignment, + PyDataType_ALIGNMENT(PyArray_DESCR(arr)), PyArray_FLAGS(arr), PyArray_ITEMSIZE(arr)); } +static char doc_f2py_rout_wrap_resolve_write_back_if_copy[] = "\ +Function signature:\n\ + resolvewritebackifcopy(arr)\n\ + Calls PyArray_ResolveWriteBackIfCopy\n\ +Required arguments:\n" +" arr : input array object\n" +"Return objects:\n" +" return_code : int\n" +; +static PyObject *f2py_rout_wrap_resolve_write_back_if_copy(PyObject *capi_self, + PyObject *capi_args) { + PyObject *arr_capi = Py_None; + PyArrayObject *arr = NULL; + if (!PyArg_ParseTuple(capi_args,"O!|:wrap.resolve_write_back_if_copy", + &PyArray_Type,&arr_capi)) { + return NULL; + } + arr = (PyArrayObject *)arr_capi; + int res = PyArray_ResolveWritebackIfCopy(arr); + if (res < 0) { + return NULL; + } + return Py_BuildValue("i",res); +} + static PyMethodDef f2py_module_methods[] = { {"call",f2py_rout_wrap_call,METH_VARARGS,doc_f2py_rout_wrap_call}, {"array_attrs",f2py_rout_wrap_attrs,METH_VARARGS,doc_f2py_rout_wrap_attrs}, + {"resolve_write_back_if_copy",f2py_rout_wrap_resolve_write_back_if_copy, + METH_VARARGS,doc_f2py_rout_wrap_resolve_write_back_if_copy}, {NULL,NULL} }; @@ -214,7 +241,7 @@ PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) { ADDCONST("DEFAULT", NPY_ARRAY_DEFAULT); ADDCONST("UPDATE_ALL", NPY_ARRAY_UPDATE_ALL); -#undef ADDCONST( +#undef ADDCONST if (PyErr_Occurred()) Py_FatalError("can't initialize module wrap"); @@ -223,6 +250,11 @@ PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) { on_exit(f2py_report_on_exit,(void*)"array_from_pyobj.wrap.call"); #endif +#ifdef Py_GIL_DISABLED + // signal whether this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; } #ifdef __cplusplus diff --git a/numpy/f2py/tests/src/callback/gh26681.f90 b/numpy/f2py/tests/src/callback/gh26681.f90 new file mode 100644 index 000000000000..00c0ec93df05 --- /dev/null +++ b/numpy/f2py/tests/src/callback/gh26681.f90 @@ -0,0 +1,18 @@ +module utils + implicit none + contains + subroutine my_abort(message) + implicit none + character(len=*), intent(in) :: message + !f2py callstatement PyErr_SetString(PyExc_ValueError, message);f2py_success = 0; + !f2py callprotoargument char* + write(0,*) "THIS SHOULD NOT APPEAR" + stop 1 + end subroutine my_abort + + subroutine do_something(message) + !f2py intent(callback, hide) mypy_abort + character(len=*), intent(in) :: message + call mypy_abort(message) + end subroutine do_something +end module utils diff --git a/numpy/f2py/tests/src/crackfortran/common_with_division.f b/numpy/f2py/tests/src/crackfortran/common_with_division.f new file mode 100644 index 000000000000..4aa12cf6dcee --- /dev/null +++ b/numpy/f2py/tests/src/crackfortran/common_with_division.f @@ -0,0 +1,17 @@ + subroutine common_with_division + integer lmu,lb,lub,lpmin + parameter (lmu=1) + parameter (lb=20) +c crackfortran fails to parse this +c parameter (lub=(lb-1)*lmu+1) +c crackfortran can successfully parse this though + parameter (lub=lb*lmu-lmu+1) + parameter (lpmin=2) + +c crackfortran fails to parse this correctly +c common /mortmp/ ctmp((lub*(lub+1)*(lub+1))/lpmin+1) + + common /mortmp/ ctmp(lub/lpmin+1) + + return + end diff --git a/numpy/f2py/tests/src/crackfortran/gh27697.f90 b/numpy/f2py/tests/src/crackfortran/gh27697.f90 new file mode 100644 index 000000000000..a5eae4e79b25 --- /dev/null +++ b/numpy/f2py/tests/src/crackfortran/gh27697.f90 @@ -0,0 +1,12 @@ +module utils + implicit none + contains + subroutine my_abort(message) + implicit none + character(len=*), intent(in) :: message + !f2py callstatement PyErr_SetString(PyExc_ValueError, message);f2py_success = 0; + !f2py callprotoargument char* + write(0,*) "THIS SHOULD NOT APPEAR" + stop 1 + end subroutine my_abort +end module utils diff --git a/numpy/f2py/tests/src/inplace/foo.f b/numpy/f2py/tests/src/inplace/foo.f new file mode 100644 index 000000000000..ac85112beda8 --- /dev/null +++ b/numpy/f2py/tests/src/inplace/foo.f @@ -0,0 +1,31 @@ +c Test inplace calculations in array c, by squaring all its values. +c As a sanity check on the input, stores the original content in copy. + subroutine inplace(c, m1, m2, copy) + integer*4 m1, m2, i, j + real*4 c(m1, m2), copy(m1, m2) +cf2py intent(inplace) c +cf2py intent(out) copy +cf2py integer, depend(c), intent(hide) :: m1 = len(c) +cf2py integer, depend(c), intent(hide) :: m2 = shape(c, 1) + do i=1,m1 + do j=1,m2 + copy(i, j) = c(i, j) + c(i, j) = c(i, j) ** 2 + end do + end do + end + + subroutine inplace_out(c, m1, m2, copy) + integer*4 m1, m2, i, j + real*4 c(m1, m2), copy(m1, m2) +cf2py intent(inplace, out) c +cf2py intent(out) copy +cf2py integer, depend(c), intent(hide) :: m1 = len(c) +cf2py integer, depend(c), intent(hide) :: m2 = shape(c, 1) + do i=1,m1 + do j=1,m2 + copy(i, j) = c(i, j) + c(i, j) = c(i, j) ** 2 + end do + end do + end diff --git a/numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 b/numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 new file mode 100644 index 000000000000..07adce591f35 --- /dev/null +++ b/numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 @@ -0,0 +1,21 @@ + module mod2 + implicit none + private mod2_func1 + contains + + subroutine mod2_func1() + print*, "mod2_func1" + end subroutine mod2_func1 + + end module mod2 + + module mod1 + implicit none + private :: mod1_func1 + contains + + subroutine mod1_func1() + print*, "mod1_func1" + end subroutine mod1_func1 + + end module mod1 diff --git a/numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 b/numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 new file mode 100644 index 000000000000..b7fb95b010a6 --- /dev/null +++ b/numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 @@ -0,0 +1,21 @@ + module mod2 + implicit none + PUBLIC :: mod2_func1 + contains + + subroutine mod2_func1() + print*, "mod2_func1" + end subroutine mod2_func1 + + end module mod2 + + module mod1 + implicit none + PUBLIC :: mod1_func1 + contains + + subroutine mod1_func1() + print*, "mod1_func1" + end subroutine mod1_func1 + + end module mod1 diff --git a/numpy/f2py/tests/src/regression/assignOnlyModule.f90 b/numpy/f2py/tests/src/regression/assignOnlyModule.f90 new file mode 100644 index 000000000000..479ac7980c22 --- /dev/null +++ b/numpy/f2py/tests/src/regression/assignOnlyModule.f90 @@ -0,0 +1,25 @@ + MODULE MOD_TYPES + INTEGER, PARAMETER :: SP = SELECTED_REAL_KIND(6, 37) + INTEGER, PARAMETER :: DP = SELECTED_REAL_KIND(15, 307) + END MODULE +! + MODULE F_GLOBALS + USE MOD_TYPES + IMPLICIT NONE + INTEGER, PARAMETER :: N_MAX = 16 + INTEGER, PARAMETER :: I_MAX = 18 + INTEGER, PARAMETER :: J_MAX = 72 + REAL(SP) :: XREF + END MODULE F_GLOBALS +! + SUBROUTINE DUMMY () +! + USE F_GLOBALS + USE MOD_TYPES + IMPLICIT NONE +! + REAL(SP) :: MINIMAL + MINIMAL = 0.01*XREF + RETURN +! + END SUBROUTINE DUMMY diff --git a/numpy/f2py/tests/src/regression/datonly.f90 b/numpy/f2py/tests/src/regression/datonly.f90 new file mode 100644 index 000000000000..67fc4aca82e3 --- /dev/null +++ b/numpy/f2py/tests/src/regression/datonly.f90 @@ -0,0 +1,17 @@ +module datonly + implicit none + integer, parameter :: max_value = 100 + real, dimension(:), allocatable :: data_array +end module datonly + +module dat + implicit none + integer, parameter :: max_= 1009 +end module dat + +subroutine simple_subroutine(ain, aout) + use dat, only: max_ + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + max_ +end subroutine simple_subroutine diff --git a/numpy/f2py/tests/src/regression/f77fixedform.f95 b/numpy/f2py/tests/src/regression/f77fixedform.f95 new file mode 100644 index 000000000000..e47a13f7e851 --- /dev/null +++ b/numpy/f2py/tests/src/regression/f77fixedform.f95 @@ -0,0 +1,5 @@ +C This is an invalid file, but it does compile with -ffixed-form + subroutine mwe( + & x) + real x + end subroutine mwe diff --git a/numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 b/numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 new file mode 100644 index 000000000000..1c4b8c192b1b --- /dev/null +++ b/numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 @@ -0,0 +1,5 @@ +subroutine inquire_next(IU) + IMPLICIT NONE + integer :: IU + !f2py intent(in) IU +end subroutine diff --git a/numpy/f2py/tests/src/regression/mod_derived_types.f90 b/numpy/f2py/tests/src/regression/mod_derived_types.f90 new file mode 100644 index 000000000000..7692c82cf42e --- /dev/null +++ b/numpy/f2py/tests/src/regression/mod_derived_types.f90 @@ -0,0 +1,23 @@ +module mtypes + implicit none + integer, parameter :: value1 = 100 + type :: master_data + integer :: idat = 200 + end type master_data + type(master_data) :: masterdata +end module mtypes + + +subroutine no_type_subroutine(ain, aout) + use mtypes, only: value1 + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + value1 +end subroutine no_type_subroutine + +subroutine type_subroutine(ain, aout) + use mtypes, only: masterdata + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + masterdata%idat +end subroutine type_subroutine \ No newline at end of file diff --git a/numpy/f2py/tests/src/routines/funcfortranname.f b/numpy/f2py/tests/src/routines/funcfortranname.f new file mode 100644 index 000000000000..89be972d3419 --- /dev/null +++ b/numpy/f2py/tests/src/routines/funcfortranname.f @@ -0,0 +1,5 @@ + REAL*8 FUNCTION FUNCFORTRANNAME(A,B) + REAL*8 A, B + FUNCFORTRANNAME = A + B + RETURN + END FUNCTION diff --git a/numpy/f2py/tests/src/routines/funcfortranname.pyf b/numpy/f2py/tests/src/routines/funcfortranname.pyf new file mode 100644 index 000000000000..8730ca6a67ed --- /dev/null +++ b/numpy/f2py/tests/src/routines/funcfortranname.pyf @@ -0,0 +1,11 @@ +python module funcfortranname ! in + interface ! in :funcfortranname + function funcfortranname_default(a,b) ! in :funcfortranname:funcfortranname.f + fortranname funcfortranname + real*8 :: a + real*8 :: b + real*8 :: funcfortranname_default + real*8, intent(out) :: funcfortranname + end function funcfortranname_default + end interface +end python module funcfortranname diff --git a/numpy/f2py/tests/src/routines/subrout.f b/numpy/f2py/tests/src/routines/subrout.f new file mode 100644 index 000000000000..1d1eeaeb5a45 --- /dev/null +++ b/numpy/f2py/tests/src/routines/subrout.f @@ -0,0 +1,4 @@ + SUBROUTINE SUBROUT(A,B,C) + REAL*8 A, B, C + C = A + B + END SUBROUTINE diff --git a/numpy/f2py/tests/src/routines/subrout.pyf b/numpy/f2py/tests/src/routines/subrout.pyf new file mode 100644 index 000000000000..e27cbe1c7455 --- /dev/null +++ b/numpy/f2py/tests/src/routines/subrout.pyf @@ -0,0 +1,10 @@ +python module subrout ! in + interface ! in :subrout + subroutine subrout_default(a,b,c) ! in :subrout:subrout.f + fortranname subrout + real*8 :: a + real*8 :: b + real*8, intent(out) :: c + end subroutine subrout_default + end interface +end python module subrout diff --git a/numpy/f2py/tests/test_abstract_interface.py b/numpy/f2py/tests/test_abstract_interface.py index 2c6555aecea1..21e77db3e8d3 100644 --- a/numpy/f2py/tests/test_abstract_interface.py +++ b/numpy/f2py/tests/test_abstract_interface.py @@ -1,10 +1,10 @@ -from pathlib import Path import pytest -import textwrap -from . import util + from numpy.f2py import crackfortran from numpy.testing import IS_WASM +from . import util + @pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") @pytest.mark.slow diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py index d5ae235e7d82..9046f2df4fa4 100644 --- a/numpy/f2py/tests/test_array_from_pyobj.py +++ b/numpy/f2py/tests/test_array_from_pyobj.py @@ -1,14 +1,13 @@ -import os -import sys import copy import platform -import pytest +import sys from pathlib import Path -import numpy as np +import pytest -from numpy.testing import assert_, assert_equal +import numpy as np from numpy._core._type_aliases import c_names_dict as _c_names_dict + from . import util wrap = None @@ -22,7 +21,7 @@ def get_testdir(): testroot = Path(__file__).resolve().parent / "src" - return testroot / "array_from_pyobj" + return testroot / "array_from_pyobj" def setup_module(): """ @@ -35,7 +34,7 @@ def setup_module(): src = [ get_testdir() / "wrapmodule.c", ] - wrap = util.build_meson(src, module_name = "test_array_from_pyobj_ext") + wrap = util.build_meson(src, module_name="test_array_from_pyobj_ext") def flags_info(arr): @@ -84,16 +83,13 @@ def __getattr__(self, name): return self.__class__(self.intent_list + [name]) def __str__(self): - return "intent(%s)" % (",".join(self.intent_list)) + return f"intent({','.join(self.intent_list)})" def __repr__(self): - return "Intent(%r)" % (self.intent_list) + return f"Intent({self.intent_list!r})" def is_intent(self, *names): - for name in names: - if name not in self.intent_list: - return False - return True + return all(name in self.intent_list for name in names) def is_intent_exact(self, *names): return len(self.intent_list) == len(names) and self.is_intent(*names) @@ -148,12 +144,12 @@ def is_intent_exact(self, *names): # 32 bit system malloc typically does not provide the alignment required by # 16 byte long double types this means the inout intent cannot be satisfied -# and several tests fail as the alignment flag can be randomly true or fals +# and several tests fail as the alignment flag can be randomly true or false # when numpy gains an aligned allocator the tests could be enabled again # -# Furthermore, on macOS ARM64, LONGDOUBLE is an alias for DOUBLE. +# Furthermore, on macOS ARM64 and AIX, LONGDOUBLE is an alias for DOUBLE. if ((np.intp().dtype.itemsize != 4 or np.clongdouble().dtype.alignment <= 8) - and sys.platform != "win32" + and sys.platform not in ["win32", "aix"] and (platform.system(), platform.processor()) != ("Darwin", "arm")): _type_names.extend(["LONGDOUBLE", "CDOUBLE", "CLONGDOUBLE"]) _cast_dict["LONGDOUBLE"] = _cast_dict["LONG"] + [ @@ -194,12 +190,12 @@ def _init(self, name): if self.NAME == 'CHARACTER': info = c_names_dict[self.NAME] - self.type_num = getattr(wrap, 'NPY_STRING') + self.type_num = wrap.NPY_STRING self.elsize = 1 self.dtype = np.dtype('c') elif self.NAME.startswith('STRING'): info = c_names_dict[self.NAME[:6]] - self.type_num = getattr(wrap, 'NPY_STRING') + self.type_num = wrap.NPY_STRING self.elsize = int(self.NAME[6:] or 0) self.dtype = np.dtype(f'S{self.elsize}') else: @@ -296,7 +292,7 @@ def __init__(self, typ, dims, intent, obj): else: self.pyarr = np.array( np.array(obj, dtype=typ.dtypechar).reshape(*dims), - order=self.intent.is_intent("c") and "C" or "F", + order=(self.intent.is_intent("c") and "C") or "F", ) assert self.pyarr.dtype == typ self.pyarr.setflags(write=self.arr.flags["WRITEABLE"]) @@ -325,7 +321,17 @@ def __init__(self, typ, dims, intent, obj): assert self.arr_attr[5][-2:] == self.pyarr_attr[5][-2:], repr(( self.arr_attr[5], self.pyarr_attr[5] )) # descr - assert self.arr_attr[6] == self.pyarr_attr[6], repr(( + arr_flags = self.arr_attr[6] + if intent.is_intent("inplace") and not ( + obj.dtype == typ and obj.flags["F_CONTIGUOUS"] + ): + assert flags2names(8192) == ["WRITEBACKIFCOPY"] + assert (arr_flags & 8192), f"{flags2names(8192)} not set." + arr_flags -= 8192 # Not easy to set on pyarr. + else: + assert not (arr_flags & 8192) + + assert arr_flags == self.pyarr_attr[6], repr(( self.arr_attr[6], self.pyarr_attr[6], flags2names(0 * self.arr_attr[6] - self.pyarr_attr[6]), @@ -655,14 +661,34 @@ def test_inplace(self): assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] shape = obj.shape a = self.array(shape, intent.inplace, obj) + # Spot check that they contain the same information initially. assert obj[1][2] == a.arr[1][2], repr((obj, a.arr)) - a.arr[1][2] = 54 - assert obj[1][2] == a.arr[1][2] == np.array(54, dtype=self.type.dtype) + # If we change a.arr, that will not immediatetly be reflected in obj. + change_item = 54 if self.type.dtype != bool else False + a.arr[1][2] = change_item + assert a.arr[1][2] == np.array(change_item, dtype=self.type.dtype) + assert obj[1][2] != np.array(change_item, dtype=self.type.dtype) + # This is because our implementation uses writebackifcopy. + assert a.arr.flags["WRITEBACKIFCOPY"] + assert a.arr.base is obj + # It has a different organization from obj. + assert a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"] + # If we resolve the write-back, obj will be propertly filled. + code = wrap.resolve_write_back_if_copy(a.arr) + assert code == 1, "no write-back resolution was done!" + assert obj[1][2] == np.array(change_item, dtype=self.type.dtype) + # Check that the original's attributes are not messed up. + assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] + + def test_inplace_f_order(self): + # If the input array is suitable, it will just be used. + obj = np.array(self.num23seq, dtype=self.type.dtype, order="F") + assert obj.flags["FORTRAN"] and not obj.flags["CONTIGUOUS"] + a = self.array(obj.shape, intent.inplace, obj) assert a.arr is obj - assert obj.flags["FORTRAN"] # obj attributes are changed inplace! - assert not obj.flags["CONTIGUOUS"] def test_inplace_from_casttype(self): + # Similar to above, but including casting. for t in self.type.cast_types(): if t is self.type: continue @@ -671,12 +697,33 @@ def test_inplace_from_casttype(self): assert obj.dtype.type is not self.type.type assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] shape = obj.shape - a = self.array(shape, intent.inplace, obj) + same_kind = obj.dtype.kind == self.type.dtype.kind + # We avoid pytest.raises here since if the error is not raised, + # we need to do the callback to avoid a runtime warning. + try: + a = self.array(shape, intent.inplace, obj) + except ValueError as exc: + assert not same_kind, "Array not created while having same kind" + assert "not compatible" in str(exc) + return + + if not same_kind: + # Shouldn't happen! Resolve write-back to get right error. + wrap.resolve_write_back_if_copy(a.arr) + assert same_kind, "Array created despite not having same kind" + assert obj[1][2] == a.arr[1][2], repr((obj, a.arr)) - a.arr[1][2] = 54 - assert obj[1][2] == a.arr[1][2] == np.array(54, - dtype=self.type.dtype) - assert a.arr is obj - assert obj.flags["FORTRAN"] # obj attributes changed inplace! - assert not obj.flags["CONTIGUOUS"] - assert obj.dtype.type is self.type.type # obj changed inplace! + change_item = 54 if self.type.dtype != bool else False + a.arr[1][2] = change_item + assert a.arr[1][2] == np.array(change_item, dtype=self.type.dtype) + # Not yet propagated. + assert obj[1][2] != np.array(change_item, dtype=self.type.dtype) + assert a.arr.flags["WRITEBACKIFCOPY"] + assert a.arr.base is obj + # Propagate back to obj. + code = wrap.resolve_write_back_if_copy(a.arr) + assert code == 1, "no write-back resolution was done!" + assert obj[1][2] == np.array(change_item, dtype=self.type.dtype) + # Should not affect attributes. + assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] + assert obj.dtype.type is not self.type.type diff --git a/numpy/f2py/tests/test_assumed_shape.py b/numpy/f2py/tests/test_assumed_shape.py index d4664cf88cbe..cf75644d40ee 100644 --- a/numpy/f2py/tests/test_assumed_shape.py +++ b/numpy/f2py/tests/test_assumed_shape.py @@ -1,7 +1,8 @@ import os -import pytest import tempfile +import pytest + from . import util diff --git a/numpy/f2py/tests/test_block_docstring.py b/numpy/f2py/tests/test_block_docstring.py index 16b5559e8e42..0a35f2e34a7e 100644 --- a/numpy/f2py/tests/test_block_docstring.py +++ b/numpy/f2py/tests/test_block_docstring.py @@ -1,8 +1,8 @@ import sys + import pytest -from . import util -from numpy.testing import IS_PYPY +from . import util @pytest.mark.slow @@ -11,8 +11,6 @@ class TestBlockDocString(util.F2PyTest): @pytest.mark.skipif(sys.platform == "win32", reason="Fails with MinGW64 Gfortran (Issue #9673)") - @pytest.mark.xfail(IS_PYPY, - reason="PyPy cannot modify tp_doc after PyType_Ready") def test_block_docstring(self): expected = "bar : 'i'-array(2,3)\n" assert self.module.block.__doc__ == expected diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py index 8bd6175a3eb9..1560c73d01fc 100644 --- a/numpy/f2py/tests/test_callback.py +++ b/numpy/f2py/tests/test_callback.py @@ -1,26 +1,26 @@ import math -import textwrap +import platform import sys -import pytest +import textwrap import threading -import traceback import time +import traceback + +import pytest import numpy as np -from numpy.testing import IS_PYPY + from . import util class TestF77Callback(util.F2PyTest): sources = [util.getpath("tests", "src", "callback", "foo.f")] - @pytest.mark.parametrize("name", "t,t2".split(",")) + @pytest.mark.parametrize("name", ["t", "t2"]) @pytest.mark.slow def test_all(self, name): self.check_function(name) - @pytest.mark.xfail(IS_PYPY, - reason="PyPy cannot modify tp_doc after PyType_Ready") def test_docstring(self): expected = textwrap.dedent("""\ a = t(fun,[fun_extra_args]) @@ -60,7 +60,7 @@ def check_function(self, name): assert r == 6 r = t(lambda a: 5 + a, fun_extra_args=(7, )) assert r == 12 - r = t(lambda a: math.degrees(a), fun_extra_args=(math.pi, )) + r = t(math.degrees, fun_extra_args=(math.pi, )) assert r == 180 r = t(math.degrees, fun_extra_args=(math.pi, )) assert r == 180 @@ -94,7 +94,7 @@ def callback(code): else: return 1 - f = getattr(self.module, "string_callback") + f = self.module.string_callback r = f(callback) assert r == 0 @@ -115,7 +115,7 @@ def callback(cu, lencu): return 3 return 0 - f = getattr(self.module, "string_callback_array") + f = self.module.string_callback_array for cu in [cu1, cu2, cu3]: res = f(callback, cu, cu.size) assert res == 0 @@ -240,7 +240,21 @@ class TestGH25211(util.F2PyTest): def test_gh25211(self): def bar(x): - return x*x + return x * x res = self.module.foo(bar) assert res == 110 + + +@pytest.mark.slow +@pytest.mark.xfail(condition=(platform.system().lower() == 'darwin'), + run=False, + reason="Callback aborts cause CI failures on macOS") +class TestCBFortranCallstatement(util.F2PyTest): + sources = [util.getpath("tests", "src", "callback", "gh26681.f90")] + options = ['--lower'] + + def test_callstatement_fortran(self): + with pytest.raises(ValueError, match='helpme') as exc: + self.module.mypy_abort = self.module.utils.my_abort + self.module.utils.do_something('helpme') diff --git a/numpy/f2py/tests/test_character.py b/numpy/f2py/tests/test_character.py index 50e55e1a91cf..74868a6f09f7 100644 --- a/numpy/f2py/tests/test_character.py +++ b/numpy/f2py/tests/test_character.py @@ -1,8 +1,10 @@ -import pytest import textwrap -from numpy.testing import assert_array_equal, assert_equal, assert_raises + +import pytest + import numpy as np from numpy.f2py.tests import util +from numpy.testing import assert_array_equal, assert_equal, assert_raises @pytest.mark.slow @@ -15,7 +17,7 @@ class TestCharacterString(util.F2PyTest): code = '' for length in length_list: fsuffix = length - clength = dict(star='(*)').get(length, length) + clength = {'star': '(*)'}.get(length, length) code += textwrap.dedent(f""" @@ -102,7 +104,7 @@ def test_array_input(self, length): {'1': 'A', '3': 'ABC', 'star': 'ABCDE' * 3}[length], ], dtype='S') - expected = np.array([[c for c in s] for s in a], dtype='u1') + expected = np.array([list(s) for s in a], dtype='u1') assert_array_equal(f(a), expected) @pytest.mark.parametrize("length", length_list) @@ -114,7 +116,7 @@ def test_array_output(self, length): [{'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length], {'1': 'A', '3': 'ABC', 'star': 'ABCDE' * 3}[length]], dtype='S') - a = np.array([[c for c in s] for s in expected], dtype='u1') + a = np.array([list(s) for s in expected], dtype='u1') assert_array_equal(f(a), expected) @pytest.mark.parametrize("length", length_list) @@ -127,7 +129,7 @@ def test_2d_array_input(self, length): [{'1': 'f', '3': 'fgh', 'star': 'fghij' * 3}[length], {'1': 'F', '3': 'FGH', 'star': 'FGHIJ' * 3}[length]]], dtype='S') - expected = np.array([[[c for c in item] for item in row] for row in a], + expected = np.array([[list(item) for item in row] for row in a], dtype='u1', order='F') assert_array_equal(f(a), expected) @@ -538,13 +540,13 @@ def test_gh4519(self): f = getattr(self.module, self.fprefix + '_gh4519') for x, expected in [ - ('a', dict(shape=(), dtype=np.dtype('S1'))), - ('text', dict(shape=(), dtype=np.dtype('S4'))), + ('a', {'shape': (), 'dtype': np.dtype('S1')}), + ('text', {'shape': (), 'dtype': np.dtype('S4')}), (np.array(['1', '2', '3'], dtype='S1'), - dict(shape=(3,), dtype=np.dtype('S1'))), + {'shape': (3,), 'dtype': np.dtype('S1')}), (['1', '2', '34'], - dict(shape=(3,), dtype=np.dtype('S2'))), - (['', ''], dict(shape=(2,), dtype=np.dtype('S1')))]: + {'shape': (3,), 'dtype': np.dtype('S2')}), + (['', ''], {'shape': (2,), 'dtype': np.dtype('S1')})]: r = f(x) for k, v in expected.items(): assert_equal(getattr(r, k), v) @@ -587,7 +589,7 @@ def test_char(self): def test_char_arr(self): for out in (self.module.string_test.strarr, self.module.string_test.strarr77): - expected = (5,7) + expected = (5, 7) assert out.shape == expected expected = '|S12' assert out.dtype == expected @@ -607,7 +609,7 @@ def test_gh24662(self): a = np.array('hi', dtype='S32') self.module.string_inout_optional(a) assert "output string" in a.tobytes().decode() - with pytest.raises(Exception): + with pytest.raises(Exception): # noqa: B017 aa = "Hi" self.module.string_inout_optional(aa) diff --git a/numpy/f2py/tests/test_common.py b/numpy/f2py/tests/test_common.py index 09bd6147f0f3..b9fbd84d52fb 100644 --- a/numpy/f2py/tests/test_common.py +++ b/numpy/f2py/tests/test_common.py @@ -1,7 +1,10 @@ import pytest + import numpy as np + from . import util + @pytest.mark.slow class TestCommonBlock(util.F2PyTest): sources = [util.getpath("tests", "src", "common", "block.f")] diff --git a/numpy/f2py/tests/test_crackfortran.py b/numpy/f2py/tests/test_crackfortran.py index 1caa4147c2d7..c3967cfb967b 100644 --- a/numpy/f2py/tests/test_crackfortran.py +++ b/numpy/f2py/tests/test_crackfortran.py @@ -1,15 +1,16 @@ +import contextlib import importlib -import codecs +import io +import textwrap import time -import unicodedata + import pytest + import numpy as np +from numpy.f2py import crackfortran from numpy.f2py.crackfortran import markinnerspaces, nameargspattern + from . import util -from numpy.f2py import crackfortran -import textwrap -import contextlib -import io class TestNoSpace(util.F2PyTest): @@ -66,7 +67,7 @@ def test_nowrap_private_proceedures(self, tmp_path): pyf = crackfortran.crack2fortran(mod) assert 'bar' not in pyf -class TestModuleProcedure(): +class TestModuleProcedure: def test_moduleOperators(self, tmp_path): fpath = util.getpath("tests", "src", "crackfortran", "operators.f90") mod = crackfortran.crackfortran([str(fpath)]) @@ -116,12 +117,16 @@ def incr(x): class TestCrackFortran(util.F2PyTest): # gh-2848: commented lines between parameters in subroutine parameter lists - sources = [util.getpath("tests", "src", "crackfortran", "gh2848.f90")] + sources = [util.getpath("tests", "src", "crackfortran", "gh2848.f90"), + util.getpath("tests", "src", "crackfortran", "common_with_division.f") + ] def test_gh2848(self): r = self.module.gh2848(1, 2) assert r == (1, 2) + def test_common_with_division(self): + assert len(self.module.mortmp.ctmp) == 11 class TestMarkinnerspaces: # gh-14118: markinnerspaces does not handle multiple quotations @@ -261,7 +266,7 @@ def test_eval_scalar(self): assert eval_scalar('123', {}) == '123' assert eval_scalar('12 + 3', {}) == '15' - assert eval_scalar('a + b', dict(a=1, b=2)) == '3' + assert eval_scalar('a + b', {"a": 1, "b": 2}) == '3' assert eval_scalar('"123"', {}) == "'123'" @@ -347,20 +352,20 @@ def test_end_if_comment(self): assert False, f"'crackfortran.crackfortran' raised an exception {exc}" -class TestF77CommonBlockReader(): +class TestF77CommonBlockReader: def test_gh22648(self, tmp_path): fpath = util.getpath("tests", "src", "crackfortran", "gh22648.pyf") with contextlib.redirect_stdout(io.StringIO()) as stdout_f2py: mod = crackfortran.crackfortran([str(fpath)]) assert "Mismatch" not in stdout_f2py.getvalue() -class TestParamEval(): +class TestParamEval: # issue gh-11612, array parameter parsing def test_param_eval_nested(self): v = '(/3.14, 4./)' - g_params = dict(kind=crackfortran._kind_func, - selected_int_kind=crackfortran._selected_int_kind_func, - selected_real_kind=crackfortran._selected_real_kind_func) + g_params = {"kind": crackfortran._kind_func, + "selected_int_kind": crackfortran._selected_int_kind_func, + "selected_real_kind": crackfortran._selected_real_kind_func} params = {'dp': 8, 'intparamarray': {1: 3, 2: 5}, 'nested': {1: 1, 2: 2, 3: 3}} dimspec = '(2)' @@ -369,9 +374,9 @@ def test_param_eval_nested(self): def test_param_eval_nonstandard_range(self): v = '(/ 6, 3, 1 /)' - g_params = dict(kind=crackfortran._kind_func, - selected_int_kind=crackfortran._selected_int_kind_func, - selected_real_kind=crackfortran._selected_real_kind_func) + g_params = {"kind": crackfortran._kind_func, + "selected_int_kind": crackfortran._selected_int_kind_func, + "selected_real_kind": crackfortran._selected_real_kind_func} params = {} dimspec = '(-1:1)' ret = crackfortran.param_eval(v, g_params, params, dimspec=dimspec) @@ -379,9 +384,9 @@ def test_param_eval_nonstandard_range(self): def test_param_eval_empty_range(self): v = '6' - g_params = dict(kind=crackfortran._kind_func, - selected_int_kind=crackfortran._selected_int_kind_func, - selected_real_kind=crackfortran._selected_real_kind_func) + g_params = {"kind": crackfortran._kind_func, + "selected_int_kind": crackfortran._selected_int_kind_func, + "selected_real_kind": crackfortran._selected_real_kind_func} params = {} dimspec = '' pytest.raises(ValueError, crackfortran.param_eval, v, g_params, params, @@ -389,19 +394,28 @@ def test_param_eval_empty_range(self): def test_param_eval_non_array_param(self): v = '3.14_dp' - g_params = dict(kind=crackfortran._kind_func, - selected_int_kind=crackfortran._selected_int_kind_func, - selected_real_kind=crackfortran._selected_real_kind_func) + g_params = {"kind": crackfortran._kind_func, + "selected_int_kind": crackfortran._selected_int_kind_func, + "selected_real_kind": crackfortran._selected_real_kind_func} params = {} ret = crackfortran.param_eval(v, g_params, params, dimspec=None) assert ret == '3.14_dp' def test_param_eval_too_many_dims(self): v = 'reshape((/ (i, i=1, 250) /), (/5, 10, 5/))' - g_params = dict(kind=crackfortran._kind_func, - selected_int_kind=crackfortran._selected_int_kind_func, - selected_real_kind=crackfortran._selected_real_kind_func) + g_params = {"kind": crackfortran._kind_func, + "selected_int_kind": crackfortran._selected_int_kind_func, + "selected_real_kind": crackfortran._selected_real_kind_func} params = {} dimspec = '(0:4, 3:12, 5)' pytest.raises(ValueError, crackfortran.param_eval, v, g_params, params, dimspec=dimspec) + +@pytest.mark.slow +class TestLowerF2PYDirective(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "gh27697.f90")] + options = ['--lower'] + + def test_no_lower_fail(self): + with pytest.raises(ValueError, match='aborting directly') as exc: + self.module.utils.my_abort('aborting directly') diff --git a/numpy/f2py/tests/test_data.py b/numpy/f2py/tests/test_data.py index 5af5c40447d3..0cea5561bd6c 100644 --- a/numpy/f2py/tests/test_data.py +++ b/numpy/f2py/tests/test_data.py @@ -1,9 +1,9 @@ -import os import pytest + import numpy as np +from numpy.f2py.crackfortran import crackfortran from . import util -from numpy.f2py.crackfortran import crackfortran class TestData(util.F2PyTest): @@ -17,9 +17,9 @@ def test_data_stmts(self): assert self.module.cmplxdat.x == 1.5 assert self.module.cmplxdat.y == 2.0 assert self.module.cmplxdat.pi == 3.1415926535897932384626433832795028841971693993751058209749445923078164062 - assert self.module.cmplxdat.medium_ref_index == np.array(1.+0.j) + assert self.module.cmplxdat.medium_ref_index == np.array(1. + 0.j) assert np.all(self.module.cmplxdat.z == np.array([3.5, 7.0])) - assert np.all(self.module.cmplxdat.my_array == np.array([ 1.+2.j, -3.+4.j])) + assert np.all(self.module.cmplxdat.my_array == np.array([ 1. + 2.j, -3. + 4.j])) assert np.all(self.module.cmplxdat.my_real_array == np.array([ 1., 2., 3.])) assert np.all(self.module.cmplxdat.ref_index_one == np.array([13.0 + 21.0j])) assert np.all(self.module.cmplxdat.ref_index_two == np.array([-30.0 + 43.0j])) diff --git a/numpy/f2py/tests/test_docs.py b/numpy/f2py/tests/test_docs.py index 55540a9c7d19..7015af3b2627 100644 --- a/numpy/f2py/tests/test_docs.py +++ b/numpy/f2py/tests/test_docs.py @@ -1,8 +1,12 @@ +from pathlib import Path + import pytest + import numpy as np from numpy.testing import assert_array_equal, assert_equal + from . import util -from pathlib import Path + def get_docdir(): parents = Path(__file__).resolve().parents @@ -18,6 +22,7 @@ def get_docdir(): # Assumes that an editable install is used to run tests return parents[3] / "doc" / "source" / "f2py" / "code" + pytestmark = pytest.mark.skipif( not get_docdir().is_dir(), reason=f"Could not find f2py documentation sources" @@ -34,11 +39,11 @@ class TestDocAdvanced(util.F2PyTest): _path('ftype.f')] def test_asterisk1(self): - foo = getattr(self.module, 'foo1') + foo = self.module.foo1 assert_equal(foo(), b'123456789A12') def test_asterisk2(self): - foo = getattr(self.module, 'foo2') + foo = self.module.foo2 assert_equal(foo(2), b'12') assert_equal(foo(12), b'123456789A12') assert_equal(foo(20), b'123456789A123456789B') @@ -55,5 +60,7 @@ def test_ftype(self): ftype.data.x[1] = 45 assert_array_equal(ftype.data.x, np.array([1, 45, 3], dtype=np.float32)) + # gh-26718 Cleanup for repeated test runs + ftype.data.a = 0 # TODO: implement test methods for other example Fortran codes diff --git a/numpy/f2py/tests/test_f2cmap.py b/numpy/f2py/tests/test_f2cmap.py index 6596ada33a54..a35320ccc18a 100644 --- a/numpy/f2py/tests/test_f2cmap.py +++ b/numpy/f2py/tests/test_f2cmap.py @@ -1,6 +1,8 @@ -from . import util import numpy as np +from . import util + + class TestF2Cmap(util.F2PyTest): sources = [ util.getpath("tests", "src", "f2cmap", "isoFortranEnvMap.f90"), diff --git a/numpy/f2py/tests/test_f2py2e.py b/numpy/f2py/tests/test_f2py2e.py index 744049a2422d..90063d474a33 100644 --- a/numpy/f2py/tests/test_f2py2e.py +++ b/numpy/f2py/tests/test_f2py2e.py @@ -1,17 +1,37 @@ -import textwrap, re, sys, subprocess, shlex -from pathlib import Path -from collections import namedtuple +import os import platform +import re +import shlex +import subprocess +import sys +import textwrap +from collections import namedtuple +from pathlib import Path import pytest -from . import util from numpy.f2py.f2py2e import main as f2pycli +from numpy.testing._private.utils import NOGIL_BUILD + +from . import util + +####################### +# F2PY Test utilities # +###################### + +# Tests for CLI commands which call meson will fail if no compilers are present, these are to be skipped + +def compiler_check_f2pycli(): + if not util.has_fortran_compiler(): + pytest.skip("CLI command needs a Fortran compiler") + else: + f2pycli() ######################### # CLI utils and classes # ######################### + PPaths = namedtuple("PPaths", "finp, f90inp, pyf, wrap77, wrap90, cmodf") @@ -49,9 +69,9 @@ def get_io_paths(fname_inp, mname="untitled"): ) -############## -# CLI Fixtures and Tests # -############# +################ +# CLI Fixtures # +################ @pytest.fixture(scope="session") @@ -109,6 +129,9 @@ def f2cmap_f90(tmpdir_factory): fmap.write_text(f2cmap, encoding="ascii") return fn +######### +# Tests # +######### def test_gh22819_cli(capfd, gh22819_cli, monkeypatch): """Check that module names are handled correctly @@ -123,11 +146,10 @@ def test_gh22819_cli(capfd, gh22819_cli, monkeypatch): with util.switchdir(ipath.parent): f2pycli() gen_paths = [item.name for item in ipath.parent.rglob("*") if item.is_file()] - assert "blahmodule.c" not in gen_paths # shouldn't be generated + assert "blahmodule.c" not in gen_paths # shouldn't be generated assert "blah-f2pywrappers.f" not in gen_paths assert "test_22819-f2pywrappers.f" in gen_paths assert "test_22819module.c" in gen_paths - assert "Ignoring blah" def test_gh22819_many_pyf(capfd, gh22819_cli, monkeypatch): @@ -198,8 +220,7 @@ def test_gen_pyf_no_overwrite(capfd, hello_world_f90, monkeypatch): assert "Use --overwrite-signature to overwrite" in err -@pytest.mark.skipif((platform.system() != 'Linux') or (sys.version_info <= (3, 12)), - reason='Compiler and 3.12 required') +@pytest.mark.skipif(sys.version_info <= (3, 12), reason="Python 3.12 required") def test_untitled_cli(capfd, hello_world_f90, monkeypatch): """Check that modules are named correctly @@ -208,14 +229,12 @@ def test_untitled_cli(capfd, hello_world_f90, monkeypatch): ipath = Path(hello_world_f90) monkeypatch.setattr(sys, "argv", f"f2py --backend meson -c {ipath}".split()) with util.switchdir(ipath.parent): - f2pycli() + compiler_check_f2pycli() out, _ = capfd.readouterr() assert "untitledmodule.c" in out - -@pytest.mark.skipif((platform.system() != 'Linux') or (sys.version_info <= (3, 12)), reason='Compiler and 3.12 required') -def test_no_py312_distutils_fcompiler(capfd, hello_world_f90, monkeypatch): - """Check that no distutils imports are performed on 3.12 +def test_no_distutils_backend(capfd, hello_world_f90, monkeypatch): + """Check that distutils backend and related options fail CLI :: --fcompiler --help-link --backend distutils """ MNAME = "hi" @@ -225,25 +244,26 @@ def test_no_py312_distutils_fcompiler(capfd, hello_world_f90, monkeypatch): sys, "argv", f"f2py {ipath} -c --fcompiler=gfortran -m {MNAME}".split() ) with util.switchdir(ipath.parent): - f2pycli() + compiler_check_f2pycli() out, _ = capfd.readouterr() assert "--fcompiler cannot be used with meson" in out + monkeypatch.setattr( - sys, "argv", f"f2py --help-link".split() + sys, "argv", ["f2py", "--help-link"] ) - with util.switchdir(ipath.parent): + with pytest.raises(SystemExit): f2pycli() out, _ = capfd.readouterr() - assert "Use --dep for meson builds" in out - MNAME = "hi2" # Needs to be different for a new -c + assert "Unknown option --help-link" in out + monkeypatch.setattr( - sys, "argv", f"f2py {ipath} -c -m {MNAME} --backend distutils".split() + sys, "argv", ["f2py", "--backend", "distutils"] ) - with util.switchdir(ipath.parent): + with pytest.raises(SystemExit): + compiler_check_f2pycli() f2pycli() out, _ = capfd.readouterr() - assert "Cannot use distutils backend with Python>=3.12" in out - + assert "'distutils' backend was removed" in out @pytest.mark.xfail def test_f2py_skip(capfd, retreal_f77, monkeypatch): @@ -573,7 +593,7 @@ def test_debugcapi_bld(hello_world_f90, monkeypatch): with util.switchdir(ipath.parent): f2pycli() - cmd_run = shlex.split("python3 -c \"import blah; blah.hi()\"") + cmd_run = shlex.split(f"{sys.executable} -c \"import blah; blah.hi()\"") rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') eout = ' Hello World\n' eerr = textwrap.dedent("""\ @@ -653,6 +673,25 @@ def test_inclheader(capfd, hello_world_f90, monkeypatch): assert "#include " in ocmr assert "#include " in ocmr +@pytest.mark.skipif((platform.system() != 'Linux'), reason='Compiler required') +def test_cli_obj(capfd, hello_world_f90, monkeypatch): + """Ensures that the extra object can be specified when using meson backend + """ + ipath = Path(hello_world_f90) + mname = "blah" + odir = "tttmp" + obj = "extra.o" + monkeypatch.setattr(sys, "argv", + f'f2py --backend meson --build-dir {odir} -m {mname} -c {obj} {ipath}'.split()) + + with util.switchdir(ipath.parent): + Path(obj).touch() + compiler_check_f2pycli() + with Path(f"{odir}/meson.build").open() as mesonbuild: + mbld = mesonbuild.read() + assert "objects:" in mbld + assert f"'''{obj}'''" in mbld + def test_inclpath(): """Add to the include directories @@ -723,7 +762,7 @@ def test_version(capfd, monkeypatch): CLI :: -v """ - monkeypatch.setattr(sys, "argv", 'f2py -v'.split()) + monkeypatch.setattr(sys, "argv", ["f2py", "-v"]) # TODO: f2py2e should not call sys.exit() after printing the version with pytest.raises(SystemExit): f2pycli() @@ -742,16 +781,65 @@ def test_npdistop(hello_world_f90, monkeypatch): with util.switchdir(ipath.parent): f2pycli() - cmd_run = shlex.split("python -c \"import blah; blah.hi()\"") + cmd_run = shlex.split(f"{sys.executable} -c \"import blah; blah.hi()\"") rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') eout = ' Hello World\n' assert rout.stdout == eout +@pytest.mark.skipif((platform.system() != 'Linux') or sys.version_info <= (3, 12), + reason='Compiler and Python 3.12 or newer required') +def test_no_freethreading_compatible(hello_world_f90, monkeypatch): + """ + CLI :: --no-freethreading-compatible + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c --no-freethreading-compatible'.split()) + + with util.switchdir(ipath.parent): + compiler_check_f2pycli() + cmd = f"{sys.executable} -c \"import blah; blah.hi();" + if NOGIL_BUILD: + cmd += "import sys; assert sys._is_gil_enabled() is True\"" + else: + cmd += "\"" + cmd_run = shlex.split(cmd) + rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') + eout = ' Hello World\n' + assert rout.stdout == eout + if NOGIL_BUILD: + assert "The global interpreter lock (GIL) has been enabled to load module 'blah'" in rout.stderr + assert rout.returncode == 0 + + +@pytest.mark.skipif((platform.system() != 'Linux') or sys.version_info <= (3, 12), + reason='Compiler and Python 3.12 or newer required') +def test_freethreading_compatible(hello_world_f90, monkeypatch): + """ + CLI :: --freethreading_compatible + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c --freethreading-compatible'.split()) + + with util.switchdir(ipath.parent): + compiler_check_f2pycli() + cmd = f"{sys.executable} -c \"import blah; blah.hi();" + if NOGIL_BUILD: + cmd += "import sys; assert sys._is_gil_enabled() is False\"" + else: + cmd += "\"" + cmd_run = shlex.split(cmd) + rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') + eout = ' Hello World\n' + assert rout.stdout == eout + if "LSAN_OPTIONS" not in os.environ: + assert rout.stderr == "" + assert rout.returncode == 0 + + # Numpy distutils flags # TODO: These should be tested separately - def test_npd_fcompiler(): """ CLI :: -c --fcompiler diff --git a/numpy/f2py/tests/test_inplace.py b/numpy/f2py/tests/test_inplace.py new file mode 100644 index 000000000000..35af2a42db6a --- /dev/null +++ b/numpy/f2py/tests/test_inplace.py @@ -0,0 +1,49 @@ + +import pytest + +import numpy as np +from numpy.f2py.tests import util +from numpy.testing import assert_array_equal + + +@pytest.mark.slow +class TestInplace(util.F2PyTest): + sources = [util.getpath("tests", "src", "inplace", "foo.f")] + + @pytest.mark.parametrize("func", ["inplace", "inplace_out"]) + @pytest.mark.parametrize("writeable", ["writeable", "readonly"]) + @pytest.mark.parametrize("view", [ + None, (), (slice(None, 2, None), slice(None, None, 2))]) + @pytest.mark.parametrize("dtype", ["f4", "f8"]) + def test_inplace(self, dtype, view, writeable, func): + # Test inplace modifications of an input array. + a = np.arange(12.0, dtype=dtype).reshape((3, 4)).copy() + a.flags.writeable = writeable == "writeable" + k = a if view is None else a[view] + + ffunc = getattr(self.module, func) + if not a.flags.writeable: + with pytest.raises(ValueError, match="WRITEBACKIFCOPY base is read-only"): + ffunc(k) + return + + ref_k = k + exp_copy = k.copy() + exp_k = k ** 2 + exp_a = a.copy() + exp_a[view or ()] = exp_k + if func == "inplace_out": + kout, copy = ffunc(k) + assert kout is k + else: + copy = ffunc(k) + assert_array_equal(copy, exp_copy) + assert k is ref_k + assert np.allclose(k, exp_k) + assert np.allclose(a, exp_a) + + @pytest.mark.parametrize("func", ["inplace", "inplace_out"]) + def test_inplace_error(self, func): + ffunc = getattr(self.module, func) + with pytest.raises(ValueError, match="input.*not compatible"): + ffunc(np.array([1 + 1j])) diff --git a/numpy/f2py/tests/test_isoc.py b/numpy/f2py/tests/test_isoc.py index 97f71e6c854c..f3450f15fead 100644 --- a/numpy/f2py/tests/test_isoc.py +++ b/numpy/f2py/tests/test_isoc.py @@ -1,8 +1,11 @@ -from . import util -import numpy as np import pytest + +import numpy as np from numpy.testing import assert_allclose +from . import util + + class TestISOC(util.F2PyTest): sources = [ util.getpath("tests", "src", "isocintrin", "isoCtests.f90"), @@ -13,26 +16,26 @@ class TestISOC(util.F2PyTest): def test_c_double(self): out = self.module.coddity.c_add(1, 2) exp_out = 3 - assert out == exp_out + assert out == exp_out # gh-9693 def test_bindc_function(self): out = self.module.coddity.wat(1, 20) exp_out = 8 - assert out == exp_out + assert out == exp_out # gh-25207 def test_bindc_kinds(self): out = self.module.coddity.c_add_int64(1, 20) exp_out = 21 - assert out == exp_out + assert out == exp_out # gh-25207 def test_bindc_add_arr(self): - a = np.array([1,2,3]) - b = np.array([1,2,3]) + a = np.array([1, 2, 3]) + b = np.array([1, 2, 3]) out = self.module.coddity.add_arr(a, b) - exp_out = a*2 + exp_out = a * 2 assert_allclose(out, exp_out) diff --git a/numpy/f2py/tests/test_kind.py b/numpy/f2py/tests/test_kind.py index c8cc57ff21c9..c219cc8bfd09 100644 --- a/numpy/f2py/tests/test_kind.py +++ b/numpy/f2py/tests/test_kind.py @@ -1,14 +1,16 @@ +import platform import sys -import os + import pytest -import platform from numpy.f2py.crackfortran import ( _selected_int_kind_func as selected_int_kind, _selected_real_kind_func as selected_real_kind, ) + from . import util +IS_PPC_OR_AIX = platform.machine().lower().startswith("ppc") or platform.system() == 'AIX' class TestKind(util.F2PyTest): sources = [util.getpath("tests", "src", "kind", "foo.f90")] @@ -36,7 +38,7 @@ def test_real(self): i ), f"selectedrealkind({i}): expected {selected_real_kind(i)!r} but got {selectedrealkind(i)!r}" - @pytest.mark.xfail(platform.machine().lower().startswith("ppc"), + @pytest.mark.xfail(IS_PPC_OR_AIX, reason="Some PowerPC may not support full IEEE 754 precision") def test_quad_precision(self): """ diff --git a/numpy/f2py/tests/test_mixed.py b/numpy/f2py/tests/test_mixed.py index 49d0ba20c29a..bb3a5e541859 100644 --- a/numpy/f2py/tests/test_mixed.py +++ b/numpy/f2py/tests/test_mixed.py @@ -1,8 +1,7 @@ -import os import textwrap + import pytest -from numpy.testing import IS_PYPY from . import util @@ -19,8 +18,6 @@ def test_all(self): assert self.module.foo_fixed.bar12() == 12 assert self.module.foo_free.bar13() == 13 - @pytest.mark.xfail(IS_PYPY, - reason="PyPy cannot modify tp_doc after PyType_Ready") def test_docstring(self): expected = textwrap.dedent("""\ a = bar11() diff --git a/numpy/f2py/tests/test_modules.py b/numpy/f2py/tests/test_modules.py index 009ae3365cd5..36b6060bcfc7 100644 --- a/numpy/f2py/tests/test_modules.py +++ b/numpy/f2py/tests/test_modules.py @@ -1,15 +1,45 @@ -import pytest import textwrap +import pytest + from . import util -from numpy.testing import IS_PYPY + + +@pytest.mark.slow +class TestModuleFilterPublicEntities(util.F2PyTest): + sources = [ + util.getpath( + "tests", "src", "modules", "gh26920", + "two_mods_with_one_public_routine.f90" + ) + ] + # we filter the only public function mod2 + only = ["mod1_func1", ] + + def test_gh26920(self): + # if it compiles and can be loaded, things are fine + pass + + +@pytest.mark.slow +class TestModuleWithoutPublicEntities(util.F2PyTest): + sources = [ + util.getpath( + "tests", "src", "modules", "gh26920", + "two_mods_with_no_public_entities.f90" + ) + ] + only = ["mod1_func1", ] + + def test_gh26920(self): + # if it compiles and can be loaded, things are fine + pass @pytest.mark.slow class TestModuleDocString(util.F2PyTest): sources = [util.getpath("tests", "src", "modules", "module_data_docstring.f90")] - @pytest.mark.xfail(IS_PYPY, reason="PyPy cannot modify tp_doc after PyType_Ready") def test_module_docstring(self): assert self.module.mod.__doc__ == textwrap.dedent( """\ diff --git a/numpy/f2py/tests/test_parameter.py b/numpy/f2py/tests/test_parameter.py index 9c83af174440..513d021002b7 100644 --- a/numpy/f2py/tests/test_parameter.py +++ b/numpy/f2py/tests/test_parameter.py @@ -1,4 +1,3 @@ -import os import pytest import numpy as np @@ -116,8 +115,8 @@ def test_constant_array(self): x = np.arange(3, dtype=np.float64) y = np.arange(5, dtype=np.float64) z = self.module.foo_array(x, y) - assert np.allclose(x, [0.0, 1./10, 2./10]) - assert np.allclose(y, [0.0, 1.*10, 2.*10, 3.*10, 4.*10]) + assert np.allclose(x, [0.0, 1. / 10, 2. / 10]) + assert np.allclose(y, [0.0, 1. * 10, 2. * 10, 3. * 10, 4. * 10]) assert np.allclose(z, 19.0) def test_constant_array_any_index(self): @@ -128,4 +127,3 @@ def test_constant_array_any_index(self): def test_constant_array_delims(self): x = self.module.foo_array_delims() assert x == 9 - diff --git a/numpy/f2py/tests/test_pyf_src.py b/numpy/f2py/tests/test_pyf_src.py index f77ded2f31d4..2ecb0fbeb8c8 100644 --- a/numpy/f2py/tests/test_pyf_src.py +++ b/numpy/f2py/tests/test_pyf_src.py @@ -2,7 +2,6 @@ from numpy.f2py._src_pyf import process_str from numpy.testing import assert_equal - pyf_src = """ python module foo <_rd=real,double precision> diff --git a/numpy/f2py/tests/test_quoted_character.py b/numpy/f2py/tests/test_quoted_character.py index 85e83a781e7b..3cbcb3c55b4f 100644 --- a/numpy/f2py/tests/test_quoted_character.py +++ b/numpy/f2py/tests/test_quoted_character.py @@ -2,6 +2,7 @@ """ import sys + import pytest from . import util diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index 7da62d6cb287..c4636a764914 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -1,4 +1,6 @@ import os +import platform + import pytest import numpy as np @@ -23,6 +25,28 @@ def test_inout(self): assert np.allclose(x, [3, 1, 2]) +class TestDataOnlyMultiModule(util.F2PyTest): + # Check that modules without subroutines work + sources = [util.getpath("tests", "src", "regression", "datonly.f90")] + + @pytest.mark.slow + def test_mdat(self): + assert self.module.datonly.max_value == 100 + assert self.module.dat.max_ == 1009 + int_in = 5 + assert self.module.simple_subroutine(5) == 1014 + + +class TestModuleWithDerivedType(util.F2PyTest): + # Check that modules with derived types work + sources = [util.getpath("tests", "src", "regression", "mod_derived_types.f90")] + + @pytest.mark.slow + def test_mtypes(self): + assert self.module.no_type_subroutine(10) == 110 + assert self.module.type_subroutine(10) == 210 + + class TestNegativeBounds(util.F2PyTest): # Check that negative bounds work correctly sources = [util.getpath("tests", "src", "negative_bounds", "issue_20853.f90")] @@ -32,13 +56,15 @@ def test_negbound(self): xvec = np.arange(12) xlow = -6 xhigh = 4 + # Calculate the upper bound, # Keeping the 1 index in mind + def ubound(xl, xh): return xh - xl + 1 rval = self.module.foo(is_=xlow, ie_=xhigh, arr=xvec[:ubound(xlow, xhigh)]) - expval = np.arange(11, dtype = np.float32) + expval = np.arange(11, dtype=np.float32) assert np.allclose(rval, expval) @@ -76,7 +102,7 @@ class TestIncludeFiles(util.F2PyTest): def test_gh25344(self): exp = 7.0 res = self.module.add(3.0, 4.0) - assert exp == res + assert exp == res class TestF77Comments(util.F2PyTest): # Check that comments are stripped from F77 continuation lines @@ -86,15 +112,15 @@ class TestF77Comments(util.F2PyTest): def test_gh26148(self): x1 = np.array(3, dtype=np.int32) x2 = np.array(5, dtype=np.int32) - res=self.module.testsub(x1, x2) - assert(res[0] == 8) - assert(res[1] == 15) + res = self.module.testsub(x1, x2) + assert res[0] == 8 + assert res[1] == 15 @pytest.mark.slow def test_gh26466(self): # Check that comments after PARAMETER directions are stripped - expected = np.arange(1, 11, dtype=np.float32)*2 - res=self.module.testsub2() + expected = np.arange(1, 11, dtype=np.float32) * 2 + res = self.module.testsub2() npt.assert_allclose(expected, res) class TestF90Contiuation(util.F2PyTest): @@ -105,6 +131,57 @@ class TestF90Contiuation(util.F2PyTest): def test_gh26148b(self): x1 = np.array(3, dtype=np.int32) x2 = np.array(5, dtype=np.int32) - res=self.module.testsub(x1, x2) - assert(res[0] == 8) - assert(res[1] == 15) + res = self.module.testsub(x1, x2) + assert res[0] == 8 + assert res[1] == 15 + +class TestLowerF2PYDirectives(util.F2PyTest): + # Check variables are cased correctly + sources = [util.getpath("tests", "src", "regression", "lower_f2py_fortran.f90")] + + @pytest.mark.slow + def test_gh28014(self): + self.module.inquire_next(3) + assert True + +@pytest.mark.slow +def test_gh26623(): + # Including libraries with . should not generate an incorrect meson.build + try: + aa = util.build_module( + [util.getpath("tests", "src", "regression", "f90continuation.f90")], + ["-lfoo.bar"], + module_name="Blah", + ) + except RuntimeError as rerr: + assert "lparen got assign" not in str(rerr) + + +@pytest.mark.slow +@pytest.mark.skipif(platform.system() == "Windows", reason='Unsupported on this platform for now') +def test_gh25784(): + # Compile dubious file using passed flags + try: + aa = util.build_module( + [util.getpath("tests", "src", "regression", "f77fixedform.f95")], + options=[ + # Meson will collect and dedup these to pass to fortran_args: + "--f77flags='-ffixed-form -O2'", + "--f90flags=\"-ffixed-form -g\"", + ], + module_name="Blah", + ) + except ImportError as rerr: + assert "unknown_subroutine_" in str(rerr) + + +@pytest.mark.slow +class TestAssignmentOnlyModules(util.F2PyTest): + # Ensure that variables are exposed without functions or subroutines in a module + sources = [util.getpath("tests", "src", "regression", "assignOnlyModule.f90")] + + @pytest.mark.slow + def test_gh27167(self): + assert (self.module.f_globals.n_max == 16) + assert (self.module.f_globals.i_max == 18) + assert (self.module.f_globals.j_max == 72) diff --git a/numpy/f2py/tests/test_return_character.py b/numpy/f2py/tests/test_return_character.py index 078d445a6df6..aae3f0f91671 100644 --- a/numpy/f2py/tests/test_return_character.py +++ b/numpy/f2py/tests/test_return_character.py @@ -1,8 +1,10 @@ +import platform + import pytest from numpy import array + from . import util -import platform IS_S390X = platform.machine() == "s390x" @@ -36,11 +38,11 @@ class TestFReturnCharacter(TestReturnCharacter): ] @pytest.mark.xfail(IS_S390X, reason="callback returns ' '") - @pytest.mark.parametrize("name", "t0,t1,t5,s0,s1,s5,ss".split(",")) + @pytest.mark.parametrize("name", ["t0", "t1", "t5", "s0", "s1", "s5", "ss"]) def test_all_f77(self, name): self.check_function(getattr(self.module, name), name) @pytest.mark.xfail(IS_S390X, reason="callback returns ' '") - @pytest.mark.parametrize("name", "t0,t1,t5,ts,s0,s1,s5,ss".split(",")) + @pytest.mark.parametrize("name", ["t0", "t1", "t5", "ts", "s0", "s1", "s5", "ss"]) def test_all_f90(self, name): self.check_function(getattr(self.module.f90_return_char, name), name) diff --git a/numpy/f2py/tests/test_return_complex.py b/numpy/f2py/tests/test_return_complex.py index 17811f5d98f9..aa3f28e679f8 100644 --- a/numpy/f2py/tests/test_return_complex.py +++ b/numpy/f2py/tests/test_return_complex.py @@ -1,6 +1,7 @@ import pytest from numpy import array + from . import util @@ -56,11 +57,11 @@ class TestFReturnComplex(TestReturnComplex): util.getpath("tests", "src", "return_complex", "foo90.f90"), ] - @pytest.mark.parametrize("name", "t0,t8,t16,td,s0,s8,s16,sd".split(",")) + @pytest.mark.parametrize("name", ["t0", "t8", "t16", "td", "s0", "s8", "s16", "sd"]) def test_all_f77(self, name): self.check_function(getattr(self.module, name), name) - @pytest.mark.parametrize("name", "t0,t8,t16,td,s0,s8,s16,sd".split(",")) + @pytest.mark.parametrize("name", ["t0", "t8", "t16", "td", "s0", "s8", "s16", "sd"]) def test_all_f90(self, name): self.check_function(getattr(self.module.f90_return_complex, name), name) diff --git a/numpy/f2py/tests/test_return_integer.py b/numpy/f2py/tests/test_return_integer.py index 428afec4a0ef..50309d5dadaf 100644 --- a/numpy/f2py/tests/test_return_integer.py +++ b/numpy/f2py/tests/test_return_integer.py @@ -1,6 +1,7 @@ import pytest from numpy import array + from . import util @@ -28,8 +29,8 @@ def check_function(self, t, tname): pytest.raises(IndexError, t, []) pytest.raises(IndexError, t, ()) - pytest.raises(Exception, t, t) - pytest.raises(Exception, t, {}) + pytest.raises(TypeError, t, t) + pytest.raises(TypeError, t, {}) if tname in ["t8", "s8"]: pytest.raises(OverflowError, t, 100000000000000000000000) @@ -43,12 +44,12 @@ class TestFReturnInteger(TestReturnInteger): ] @pytest.mark.parametrize("name", - "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(",")) + ["t0", "t1", "t2", "t4", "t8", "s0", "s1", "s2", "s4", "s8"]) def test_all_f77(self, name): self.check_function(getattr(self.module, name), name) @pytest.mark.parametrize("name", - "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(",")) + ["t0", "t1", "t2", "t4", "t8", "s0", "s1", "s2", "s4", "s8"]) def test_all_f90(self, name): self.check_function(getattr(self.module.f90_return_integer, name), name) diff --git a/numpy/f2py/tests/test_return_logical.py b/numpy/f2py/tests/test_return_logical.py index 92fb902af4dd..a4a339572366 100644 --- a/numpy/f2py/tests/test_return_logical.py +++ b/numpy/f2py/tests/test_return_logical.py @@ -1,6 +1,7 @@ import pytest from numpy import array + from . import util @@ -53,12 +54,12 @@ class TestFReturnLogical(TestReturnLogical): ] @pytest.mark.slow - @pytest.mark.parametrize("name", "t0,t1,t2,t4,s0,s1,s2,s4".split(",")) + @pytest.mark.parametrize("name", ["t0", "t1", "t2", "t4", "s0", "s1", "s2", "s4"]) def test_all_f77(self, name): self.check_function(getattr(self.module, name)) @pytest.mark.slow @pytest.mark.parametrize("name", - "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(",")) + ["t0", "t1", "t2", "t4", "t8", "s0", "s1", "s2", "s4", "s8"]) def test_all_f90(self, name): self.check_function(getattr(self.module.f90_return_logical, name)) diff --git a/numpy/f2py/tests/test_return_real.py b/numpy/f2py/tests/test_return_real.py index d9b316dcc45d..4339657aa013 100644 --- a/numpy/f2py/tests/test_return_real.py +++ b/numpy/f2py/tests/test_return_real.py @@ -1,8 +1,10 @@ import platform + import pytest -import numpy as np from numpy import array +from numpy.testing import IS_64BIT + from . import util @@ -37,8 +39,8 @@ def check_function(self, t, tname): pytest.raises(IndexError, t, []) pytest.raises(IndexError, t, ()) - pytest.raises(Exception, t, t) - pytest.raises(Exception, t, {}) + pytest.raises(TypeError, t, t) + pytest.raises(TypeError, t, {}) try: r = t(10**400) @@ -53,8 +55,7 @@ def check_function(self, t, tname): "but not when run in isolation", ) @pytest.mark.skipif( - np.dtype(np.intp).itemsize < 8, - reason="32-bit builds are buggy" + not IS_64BIT, reason="32-bit builds are buggy" ) class TestCReturnReal(TestReturnReal): suffix = ".pyf" @@ -88,7 +89,7 @@ class TestCReturnReal(TestReturnReal): end python module c_ext_return_real """ - @pytest.mark.parametrize("name", "t4,t8,s4,s8".split(",")) + @pytest.mark.parametrize("name", ["t4", "t8", "s4", "s8"]) def test_all(self, name): self.check_function(getattr(self.module, name), name) @@ -99,10 +100,10 @@ class TestFReturnReal(TestReturnReal): util.getpath("tests", "src", "return_real", "foo90.f90"), ] - @pytest.mark.parametrize("name", "t0,t4,t8,td,s0,s4,s8,sd".split(",")) + @pytest.mark.parametrize("name", ["t0", "t4", "t8", "td", "s0", "s4", "s8", "sd"]) def test_all_f77(self, name): self.check_function(getattr(self.module, name), name) - @pytest.mark.parametrize("name", "t0,t4,t8,td,s0,s4,s8,sd".split(",")) + @pytest.mark.parametrize("name", ["t0", "t4", "t8", "td", "s0", "s4", "s8", "sd"]) def test_all_f90(self, name): self.check_function(getattr(self.module.f90_return_real, name), name) diff --git a/numpy/f2py/tests/test_routines.py b/numpy/f2py/tests/test_routines.py new file mode 100644 index 000000000000..01135dd692a6 --- /dev/null +++ b/numpy/f2py/tests/test_routines.py @@ -0,0 +1,29 @@ +import pytest + +from . import util + + +@pytest.mark.slow +class TestRenamedFunc(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "routines", "funcfortranname.f"), + util.getpath("tests", "src", "routines", "funcfortranname.pyf"), + ] + module_name = "funcfortranname" + + def test_gh25799(self): + assert dir(self.module) + assert self.module.funcfortranname_default(200, 12) == 212 + + +@pytest.mark.slow +class TestRenamedSubroutine(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "routines", "subrout.f"), + util.getpath("tests", "src", "routines", "subrout.pyf"), + ] + module_name = "subrout" + + def test_renamed_subroutine(self): + assert dir(self.module) + assert self.module.subrout_default(200, 12) == 212 diff --git a/numpy/f2py/tests/test_semicolon_split.py b/numpy/f2py/tests/test_semicolon_split.py index ab9c093dbb82..2a16b191beba 100644 --- a/numpy/f2py/tests/test_semicolon_split.py +++ b/numpy/f2py/tests/test_semicolon_split.py @@ -1,6 +1,8 @@ import platform + import pytest -import numpy as np + +from numpy.testing import IS_64BIT from . import util @@ -11,8 +13,7 @@ "but not when run in isolation", ) @pytest.mark.skipif( - np.dtype(np.intp).itemsize < 8, - reason="32-bit builds are buggy" + not IS_64BIT, reason="32-bit builds are buggy" ) class TestMultiline(util.F2PyTest): suffix = ".pyf" @@ -44,8 +45,7 @@ def test_multiline(self): "but not when run in isolation", ) @pytest.mark.skipif( - np.dtype(np.intp).itemsize < 8, - reason="32-bit builds are buggy" + not IS_64BIT, reason="32-bit builds are buggy" ) @pytest.mark.slow class TestCallstatement(util.F2PyTest): diff --git a/numpy/f2py/tests/test_size.py b/numpy/f2py/tests/test_size.py index bd2c349df585..ac2eaf1413ef 100644 --- a/numpy/f2py/tests/test_size.py +++ b/numpy/f2py/tests/test_size.py @@ -1,5 +1,5 @@ -import os import pytest + import numpy as np from . import util diff --git a/numpy/f2py/tests/test_string.py b/numpy/f2py/tests/test_string.py index 9e937188c930..f484ea3f11a9 100644 --- a/numpy/f2py/tests/test_string.py +++ b/numpy/f2py/tests/test_string.py @@ -1,7 +1,7 @@ -import os import pytest -import textwrap + import numpy as np + from . import util diff --git a/numpy/f2py/tests/test_symbolic.py b/numpy/f2py/tests/test_symbolic.py index 8452783111eb..fbf5abd9aa18 100644 --- a/numpy/f2py/tests/test_symbolic.py +++ b/numpy/f2py/tests/test_symbolic.py @@ -1,34 +1,35 @@ import pytest from numpy.f2py.symbolic import ( - Expr, - Op, ArithOp, + Expr, Language, - as_symbol, - as_number, - as_string, + Op, + as_apply, as_array, as_complex, - as_terms, - as_factors, - eliminate_quotes, - insert_quotes, - fromstring, - as_expr, - as_apply, - as_numer_denom, - as_ternary, - as_ref, as_deref, - normalize, as_eq, - as_ne, - as_lt, + as_expr, + as_factors, + as_ge, as_gt, as_le, - as_ge, + as_lt, + as_ne, + as_number, + as_numer_denom, + as_ref, + as_string, + as_symbol, + as_terms, + as_ternary, + eliminate_quotes, + fromstring, + insert_quotes, + normalize, ) + from . import util @@ -492,3 +493,8 @@ def test_polynomial_atoms(self): assert (y(x) + x).polynomial_atoms() == {y(x), x} assert (y(x) * x[y]).polynomial_atoms() == {y(x), x[y]} assert (y(x)**x).polynomial_atoms() == {y(x)} + + def test_unmatched_parenthesis_gh30268(self): + #gh - 30268 + with pytest.raises(ValueError, match=r"Mismatch of \(\) parenthesis"): + Expr.parse("DATA (A, I=1, N", language=Language.Fortran) diff --git a/numpy/f2py/tests/test_value_attrspec.py b/numpy/f2py/tests/test_value_attrspec.py index 3855a6273288..1afae08bfe0e 100644 --- a/numpy/f2py/tests/test_value_attrspec.py +++ b/numpy/f2py/tests/test_value_attrspec.py @@ -1,8 +1,8 @@ -import os import pytest from . import util + class TestValueAttr(util.F2PyTest): sources = [util.getpath("tests", "src", "value_attrspec", "gh21665.f90")] diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index faedd4cc1597..944b5ae6e084 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -6,25 +6,120 @@ - determining paths to tests """ +import atexit +import concurrent.futures +import contextlib import glob import os -import sys +import shutil import subprocess +import sys import tempfile -import shutil -import atexit -import textwrap -import re +from importlib import import_module +from pathlib import Path + import pytest -import contextlib -import numpy -import concurrent.futures -from pathlib import Path +import numpy from numpy._utils import asunicode -from numpy.testing import temppath, IS_WASM -from importlib import import_module from numpy.f2py._backends._meson import MesonBackend +from numpy.testing import IS_WASM, temppath + +# +# Check if compilers are available at all... +# + +def check_language(lang, code_snippet=None): + if sys.platform == "win32": + pytest.skip("No Fortran tests on Windows (Issue #25134)", allow_module_level=True) + tmpdir = tempfile.mkdtemp() + try: + meson_file = os.path.join(tmpdir, "meson.build") + with open(meson_file, "w") as f: + f.write("project('check_compilers')\n") + f.write(f"add_languages('{lang}')\n") + if code_snippet: + f.write(f"{lang}_compiler = meson.get_compiler('{lang}')\n") + f.write(f"{lang}_code = '''{code_snippet}'''\n") + f.write( + f"_have_{lang}_feature =" + f"{lang}_compiler.compiles({lang}_code," + f" name: '{lang} feature check')\n" + ) + try: + runmeson = subprocess.run( + ["meson", "setup", "btmp"], + check=False, + cwd=tmpdir, + capture_output=True, + ) + except subprocess.CalledProcessError: + pytest.skip("meson not present, skipping compiler dependent test", allow_module_level=True) + return runmeson.returncode == 0 + finally: + shutil.rmtree(tmpdir) + + +fortran77_code = ''' +C Example Fortran 77 code + PROGRAM HELLO + PRINT *, 'Hello, Fortran 77!' + END +''' + +fortran90_code = ''' +! Example Fortran 90 code +program hello90 + type :: greeting + character(len=20) :: text + end type greeting + + type(greeting) :: greet + greet%text = 'hello, fortran 90!' + print *, greet%text +end program hello90 +''' + +# Dummy class for caching relevant checks +class CompilerChecker: + def __init__(self): + self.compilers_checked = False + self.has_c = False + self.has_f77 = False + self.has_f90 = False + + def check_compilers(self): + if (not self.compilers_checked) and (not sys.platform == "cygwin"): + with concurrent.futures.ThreadPoolExecutor() as executor: + futures = [ + executor.submit(check_language, "c"), + executor.submit(check_language, "fortran", fortran77_code), + executor.submit(check_language, "fortran", fortran90_code) + ] + + self.has_c = futures[0].result() + self.has_f77 = futures[1].result() + self.has_f90 = futures[2].result() + + self.compilers_checked = True + + +if not IS_WASM: + checker = CompilerChecker() + checker.check_compilers() + +def has_c_compiler(): + return checker.has_c + +def has_f77_compiler(): + return checker.has_f77 + +def has_f90_compiler(): + return checker.has_f90 + +def has_fortran_compiler(): + return (checker.has_f90 and checker.has_f77) + # # Maintaining a temporary module directory @@ -109,19 +204,22 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None): code = f"import sys; sys.path = {sys.path!r}; import numpy.f2py; numpy.f2py.main()" d = get_module_dir() + # gh-27045 : Skip if no compilers are found + if not has_fortran_compiler(): + pytest.skip("No Fortran compiler available") # Copy files dst_sources = [] f2py_sources = [] for fn in source_files: if not os.path.isfile(fn): - raise RuntimeError("%s is not a file" % fn) + raise RuntimeError(f"{fn} is not a file") dst = os.path.join(d, os.path.basename(fn)) shutil.copyfile(fn, dst) dst_sources.append(dst) base, ext = os.path.splitext(dst) - if ext in (".f90", ".f", ".c", ".pyf"): + if ext in (".f90", ".f95", ".f", ".c", ".pyf"): f2py_sources.append(dst) assert f2py_sources @@ -129,7 +227,11 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None): # Prepare options if module_name is None: module_name = get_temp_module_name() - f2py_opts = ["-c", "-m", module_name] + options + f2py_sources + gil_options = [] + if '--freethreading-compatible' not in options and '--no-freethreading-compatible' not in options: + # default to disabling the GIL if unset in options + gil_options = ['--freethreading-compatible'] + f2py_opts = ["-c", "-m", module_name] + options + gil_options + f2py_sources f2py_opts += ["--backend", "meson"] if skip: f2py_opts += ["skip:"] + skip @@ -146,8 +248,7 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None): stderr=subprocess.STDOUT) out, err = p.communicate() if p.returncode != 0: - raise RuntimeError("Running f2py failed: %s\n%s" % - (cmd[4:], asunicode(out))) + raise RuntimeError(f"Running f2py failed: {cmd[4:]}\n{asunicode(out)}") finally: os.chdir(cwd) @@ -161,7 +262,7 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None): # need to change to record how big each module is, rather than # relying on rebase being able to find that from the files. _module_list.extend( - glob.glob(os.path.join(d, "{:s}*".format(module_name))) + glob.glob(os.path.join(d, f"{module_name:s}*")) ) subprocess.check_call( ["/usr/bin/rebase", "--database", "--oblivious", "--verbose"] @@ -195,96 +296,6 @@ def build_code(source_code, module_name=module_name) -# -# Check if compilers are available at all... -# - -def check_language(lang, code_snippet=None): - tmpdir = tempfile.mkdtemp() - try: - meson_file = os.path.join(tmpdir, "meson.build") - with open(meson_file, "w") as f: - f.write("project('check_compilers')\n") - f.write(f"add_languages('{lang}')\n") - if code_snippet: - f.write(f"{lang}_compiler = meson.get_compiler('{lang}')\n") - f.write(f"{lang}_code = '''{code_snippet}'''\n") - f.write( - f"_have_{lang}_feature =" - f"{lang}_compiler.compiles({lang}_code," - f" name: '{lang} feature check')\n" - ) - runmeson = subprocess.run( - ["meson", "setup", "btmp"], - check=False, - cwd=tmpdir, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - if runmeson.returncode == 0: - return True - else: - return False - finally: - shutil.rmtree(tmpdir) - return False - -fortran77_code = ''' -C Example Fortran 77 code - PROGRAM HELLO - PRINT *, 'Hello, Fortran 77!' - END -''' - -fortran90_code = ''' -! Example Fortran 90 code -program hello90 - type :: greeting - character(len=20) :: text - end type greeting - - type(greeting) :: greet - greet%text = 'hello, fortran 90!' - print *, greet%text -end program hello90 -''' - -# Dummy class for caching relevant checks -class CompilerChecker: - def __init__(self): - self.compilers_checked = False - self.has_c = False - self.has_f77 = False - self.has_f90 = False - - def check_compilers(self): - if (not self.compilers_checked) and (not sys.platform == "cygwin"): - with concurrent.futures.ThreadPoolExecutor() as executor: - futures = [ - executor.submit(check_language, "c"), - executor.submit(check_language, "fortran", fortran77_code), - executor.submit(check_language, "fortran", fortran90_code) - ] - - self.has_c = futures[0].result() - self.has_f77 = futures[1].result() - self.has_f90 = futures[2].result() - - self.compilers_checked = True - -if not IS_WASM: - checker = CompilerChecker() - checker.check_compilers() - -def has_c_compiler(): - return checker.has_c - -def has_f77_compiler(): - return checker.has_f77 - -def has_f90_compiler(): - return checker.has_f90 - # # Building with meson # @@ -303,6 +314,11 @@ def build_meson(source_files, module_name=None, **kwargs): """ Build a module via Meson and import it. """ + + # gh-27045 : Skip if no compilers are found + if not has_fortran_compiler(): + pytest.skip("No Fortran compiler available") + build_dir = get_module_dir() if module_name is None: module_name = get_temp_module_name() @@ -327,13 +343,7 @@ def build_meson(source_files, module_name=None, **kwargs): extra_dat=kwargs.get("extra_dat", {}), ) - # Compile the module - # NOTE: Catch-all since without distutils it is hard to determine which - # compiler stack is on the CI - try: - backend.compile() - except: - pytest.skip("Failed to compile module") + backend.compile() # Import the compiled module sys.path.insert(0, f"{build_dir}/{backend.meson_build_dir}") @@ -348,9 +358,9 @@ def build_meson(source_files, module_name=None, **kwargs): class F2PyTest: code = None sources = None - options = [] - skip = [] - only = [] + options = [] # noqa: RUF012 + skip = [] # noqa: RUF012 + only = [] # noqa: RUF012 suffix = ".f" module = None _has_c_compiler = None @@ -360,7 +370,7 @@ class F2PyTest: @property def module_name(self): cls = type(self) - return f'_{cls.__module__.rsplit(".",1)[-1]}_{cls.__name__}_ext_module' + return f'_{cls.__module__.rsplit(".", 1)[-1]}_{cls.__name__}_ext_module' @classmethod def setup_class(cls): @@ -369,12 +379,13 @@ def setup_class(cls): F2PyTest._has_c_compiler = has_c_compiler() F2PyTest._has_f77_compiler = has_f77_compiler() F2PyTest._has_f90_compiler = has_f90_compiler() + F2PyTest._has_fortran_compiler = has_fortran_compiler() def setup_method(self): if self.module is not None: return - codes = self.sources if self.sources else [] + codes = self.sources or [] if self.code: codes.append(self.suffix) @@ -386,7 +397,7 @@ def setup_method(self): pytest.skip("No Fortran 77 compiler available") if needs_f90 and not self._has_f90_compiler: pytest.skip("No Fortran 90 compiler available") - if needs_pyf and not (self._has_f90_compiler or self._has_f77_compiler): + if needs_pyf and not self._has_fortran_compiler: pytest.skip("No Fortran compiler available") # Build the module diff --git a/numpy/f2py/use_rules.py b/numpy/f2py/use_rules.py index 808b3dd97ec2..1e06f6c01a39 100644 --- a/numpy/f2py/use_rules.py +++ b/numpy/f2py/use_rules.py @@ -13,10 +13,7 @@ f2py_version = 'See `f2py -v`' -from .auxfuncs import ( - applyrules, dictappend, gentitle, hasnote, outmess -) - +from .auxfuncs import applyrules, dictappend, gentitle, hasnote, outmess usemodule_rules = { 'body': """ @@ -45,7 +42,7 @@ def buildusevars(m, r): ret = {} outmess( - '\t\tBuilding use variable hooks for module "%s" (feature only for F90/F95)...\n' % (m['name'])) + f"\t\tBuilding use variable hooks for module \"{m['name']}\" (feature only for F90/F95)...\n") varsmap = {} revmap = {} if 'map' in r: @@ -55,24 +52,20 @@ def buildusevars(m, r): r['map'][k], k, revmap[r['map'][k]])) else: revmap[r['map'][k]] = k - if 'only' in r and r['only']: + if r.get('only'): for v in r['map'].keys(): if r['map'][v] in m['vars']: if revmap[r['map'][v]] == v: varsmap[v] = r['map'][v] else: - outmess('\t\t\tIgnoring map "%s=>%s". See above.\n' % - (v, r['map'][v])) + outmess(f"\t\t\tIgnoring map \"{v}=>{r['map'][v]}\". See above.\n") else: outmess( - '\t\t\tNo definition for variable "%s=>%s". Skipping.\n' % (v, r['map'][v])) + f"\t\t\tNo definition for variable \"{v}=>{r['map'][v]}\". Skipping.\n") else: for v in m['vars'].keys(): - if v in revmap: - varsmap[v] = revmap[v] - else: - varsmap[v] = v + varsmap[v] = revmap.get(v, v) for v in varsmap.keys(): ret = dictappend(ret, buildusevar(v, varsmap[v], m['vars'], m['name'])) return ret @@ -88,9 +81,9 @@ def buildusevar(name, realname, vars, usemodulename): 'usemodulename': usemodulename, 'USEMODULENAME': usemodulename.upper(), 'texname': name.replace('_', '\\_'), - 'begintitle': gentitle('%s=>%s' % (name, realname)), - 'endtitle': gentitle('end of %s=>%s' % (name, realname)), - 'apiname': '#modulename#_use_%s_from_%s' % (realname, usemodulename) + 'begintitle': gentitle(f'{name}=>{realname}'), + 'endtitle': gentitle(f'end of {name}=>{realname}'), + 'apiname': f'#modulename#_use_{realname}_from_{usemodulename}' } nummap = {0: 'Ro', 1: 'Ri', 2: 'Rii', 3: 'Riii', 4: 'Riv', 5: 'Rv', 6: 'Rvi', 7: 'Rvii', 8: 'Rviii', 9: 'Rix'} diff --git a/numpy/f2py/use_rules.pyi b/numpy/f2py/use_rules.pyi new file mode 100644 index 000000000000..58c7f9b5f451 --- /dev/null +++ b/numpy/f2py/use_rules.pyi @@ -0,0 +1,9 @@ +from collections.abc import Mapping +from typing import Any, Final + +__version__: Final[str] = ... +f2py_version: Final = "See `f2py -v`" +usemodule_rules: Final[dict[str, str | list[str]]] = ... + +def buildusevars(m: Mapping[str, object], r: Mapping[str, Mapping[str, object]]) -> dict[str, Any]: ... +def buildusevar(name: str, realname: str, vars: Mapping[str, Mapping[str, object]], usemodulename: str) -> dict[str, Any]: ... diff --git a/numpy/fft/__init__.py b/numpy/fft/__init__.py index 0f6e6373e856..2de162c5ec71 100644 --- a/numpy/fft/__init__.py +++ b/numpy/fft/__init__.py @@ -1,11 +1,11 @@ """ -Discrete Fourier Transform (:mod:`numpy.fft`) -============================================= +Discrete Fourier Transform +========================== .. currentmodule:: numpy.fft The SciPy module `scipy.fft` is a more comprehensive superset -of ``numpy.fft``, which includes only a basic set of routines. +of `numpy.fft`, which includes only a basic set of routines. Standard FFTs ------------- @@ -200,16 +200,14 @@ """ -from . import _pocketfft, _helper -# TODO: `numpy.fft.helper`` was deprecated in NumPy 2.0. It should -# be deleted once downstream libraries move to `numpy.fft`. -from . import helper -from ._pocketfft import * +from . import _helper, _pocketfft from ._helper import * +from ._pocketfft import * -__all__ = _pocketfft.__all__.copy() +__all__ = _pocketfft.__all__.copy() # noqa: PLE0605 __all__ += _helper.__all__ from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/numpy/fft/__init__.pyi b/numpy/fft/__init__.pyi index 504baff265a6..893a697f1398 100644 --- a/numpy/fft/__init__.pyi +++ b/numpy/fft/__init__.pyi @@ -1,28 +1,38 @@ -from numpy._pytesttester import PytestTester - -from numpy.fft._pocketfft import ( - fft as fft, - ifft as ifft, - rfft as rfft, - irfft as irfft, - hfft as hfft, - ihfft as ihfft, - rfftn as rfftn, - irfftn as irfftn, - rfft2 as rfft2, - irfft2 as irfft2, - fft2 as fft2, - ifft2 as ifft2, - fftn as fftn, - ifftn as ifftn, -) - -from numpy.fft._helper import ( - fftshift as fftshift, - ifftshift as ifftshift, - fftfreq as fftfreq, - rfftfreq as rfftfreq, +from ._helper import fftfreq, fftshift, ifftshift, rfftfreq +from ._pocketfft import ( + fft, + fft2, + fftn, + hfft, + ifft, + ifft2, + ifftn, + ihfft, + irfft, + irfft2, + irfftn, + rfft, + rfft2, + rfftn, ) -__all__: list[str] -test: PytestTester +__all__ = [ + "fft", + "ifft", + "rfft", + "irfft", + "hfft", + "ihfft", + "rfftn", + "irfftn", + "rfft2", + "irfft2", + "fft2", + "ifft2", + "fftn", + "ifftn", + "fftshift", + "ifftshift", + "fftfreq", + "rfftfreq", +] diff --git a/numpy/fft/_helper.py b/numpy/fft/_helper.py index 9f4512f90715..b3598534bcdf 100644 --- a/numpy/fft/_helper.py +++ b/numpy/fft/_helper.py @@ -2,7 +2,7 @@ Discrete Fourier Transforms - _helper.py """ -from numpy._core import integer, empty, arange, asarray, roll +from numpy._core import arange, asarray, empty, integer, roll from numpy._core.overrides import array_function_dispatch, set_module # Created by Pearu Peterson, September 2002 @@ -42,6 +42,7 @@ def fftshift(x, axes=None): Examples -------- + >>> import numpy as np >>> freqs = np.fft.fftfreq(10, 0.1) >>> freqs array([ 0., 1., 2., ..., -3., -2., -1.]) @@ -97,6 +98,7 @@ def ifftshift(x, axes=None): Examples -------- + >>> import numpy as np >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) >>> freqs array([[ 0., 1., 2.], @@ -153,7 +155,8 @@ def fftfreq(n, d=1.0, device=None): Examples -------- - >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) + >>> import numpy as np + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=np.float64) >>> fourier = np.fft.fft(signal) >>> n = signal.size >>> timestep = 0.1 @@ -166,10 +169,10 @@ def fftfreq(n, d=1.0, device=None): raise ValueError("n should be an integer") val = 1.0 / (n * d) results = empty(n, int, device=device) - N = (n-1)//2 + 1 + N = (n - 1) // 2 + 1 p1 = arange(0, N, dtype=int, device=device) results[:N] = p1 - p2 = arange(-(n//2), 0, dtype=int, device=device) + p2 = arange(-(n // 2), 0, dtype=int, device=device) results[N:] = p2 return results * val @@ -211,7 +214,8 @@ def rfftfreq(n, d=1.0, device=None): Examples -------- - >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float) + >>> import numpy as np + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=np.float64) >>> fourier = np.fft.rfft(signal) >>> n = signal.size >>> sample_rate = 100 @@ -225,7 +229,7 @@ def rfftfreq(n, d=1.0, device=None): """ if not isinstance(n, integer_types): raise ValueError("n should be an integer") - val = 1.0/(n*d) - N = n//2 + 1 + val = 1.0 / (n * d) + N = n // 2 + 1 results = arange(0, N, dtype=int, device=device) return results * val diff --git a/numpy/fft/_helper.pyi b/numpy/fft/_helper.pyi index a3c17fc675e7..05603a2046c3 100644 --- a/numpy/fft/_helper.pyi +++ b/numpy/fft/_helper.pyi @@ -1,51 +1,134 @@ -from typing import Any, TypeVar, overload, Literal as L +from typing import Any, Final, Literal as L, overload -from numpy import generic, integer, floating, complexfloating -from numpy._typing import ( - NDArray, - ArrayLike, - _ShapeLike, - _ArrayLike, - _ArrayLikeFloat_co, - _ArrayLikeComplex_co, -) +import numpy as np +from numpy._typing import ArrayLike, NDArray, _ArrayLike, _Shape, _ShapeLike -_SCT = TypeVar("_SCT", bound=generic) +__all__ = ["fftfreq", "fftshift", "ifftshift", "rfftfreq"] -__all__: list[str] +### + +type _Device = L["cpu"] + +type _IntLike = int | np.integer + +type _AsFloat64 = np.float64 | np.float32 | np.float16 | np.integer | np.bool +type _AsComplex128 = np.complex128 | np.complex64 +type _Inexact80 = np.longdouble | np.clongdouble + +type _Array[ShapeT: _Shape, ScalarT: np.generic] = np.ndarray[ShapeT, np.dtype[ScalarT]] +type _1D = tuple[int] + +### + +integer_types: Final[tuple[type[int], type[np.integer]]] = ... @overload -def fftshift(x: _ArrayLike[_SCT], axes: None | _ShapeLike = ...) -> NDArray[_SCT]: ... +def fftshift[ScalarT: np.generic](x: _ArrayLike[ScalarT], axes: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload -def fftshift(x: ArrayLike, axes: None | _ShapeLike = ...) -> NDArray[Any]: ... +def fftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... +# @overload -def ifftshift(x: _ArrayLike[_SCT], axes: None | _ShapeLike = ...) -> NDArray[_SCT]: ... +def ifftshift[ScalarT: np.generic](x: _ArrayLike[ScalarT], axes: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload -def ifftshift(x: ArrayLike, axes: None | _ShapeLike = ...) -> NDArray[Any]: ... +def ifftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... -@overload +# keep in sync with `rfftfreq` below +@overload # 0d +f64 (default) def fftfreq( - n: int | integer[Any], - d: _ArrayLikeFloat_co = ..., - device: None | L["cpu"] = ..., -) -> NDArray[floating[Any]]: ... -@overload + n: _IntLike, + d: _AsFloat64 | float = 1.0, + device: _Device | None = None, +) -> _Array[_1D, np.float64]: ... +@overload # 0d c64 | c128 +def fftfreq( + n: _IntLike, + d: _AsComplex128, + device: _Device | None = None, +) -> _Array[_1D, np.complex128]: ... +@overload # 0d +complex def fftfreq( - n: int | integer[Any], - d: _ArrayLikeComplex_co = ..., - device: None | L["cpu"] = ..., -) -> NDArray[complexfloating[Any, Any]]: ... + n: _IntLike, + d: complex, + device: _Device | None = None, +) -> _Array[_1D, np.complex128 | Any]: ... +@overload # 0d T: f80 | c160 +def fftfreq[ScalarT: _Inexact80]( + n: _IntLike, + d: ScalarT, + device: _Device | None = None, +) -> _Array[_1D, ScalarT]: ... +@overload # nd +f64 +def fftfreq[ShapeT: _Shape]( + n: _IntLike, + d: _Array[ShapeT, _AsFloat64], + device: _Device | None = None, +) -> _Array[ShapeT, np.float64]: ... +@overload # nd c64 | c128 +def fftfreq[ShapeT: _Shape]( + n: _IntLike, + d: _Array[ShapeT, _AsComplex128], + device: _Device | None = None, +) -> _Array[ShapeT, np.complex128]: ... +@overload # nd T: f80 | c160 +def fftfreq[ShapeT: _Shape, LongDoubleT: _Inexact80]( + n: _IntLike, + d: _Array[ShapeT, LongDoubleT], + device: _Device | None = None, +) -> _Array[ShapeT, LongDoubleT]: ... +@overload # nd +complex (fallback) +def fftfreq[ShapeT: _Shape]( + n: _IntLike, + d: _Array[ShapeT, np.number | np.bool], + device: _Device | None = None, +) -> _Array[ShapeT, Any]: ... -@overload +# keep in sync with `fftfreq` above +@overload # 0d +f64 (default) def rfftfreq( - n: int | integer[Any], - d: _ArrayLikeFloat_co = ..., - device: None | L["cpu"] = ..., -) -> NDArray[floating[Any]]: ... -@overload + n: _IntLike, + d: _AsFloat64 | float = 1.0, + device: _Device | None = None, +) -> _Array[_1D, np.float64]: ... +@overload # 0d c64 | c128 +def rfftfreq( + n: _IntLike, + d: _AsComplex128, + device: _Device | None = None, +) -> _Array[_1D, np.complex128]: ... +@overload # 0d +complex def rfftfreq( - n: int | integer[Any], - d: _ArrayLikeComplex_co = ..., - device: None | L["cpu"] = ..., -) -> NDArray[complexfloating[Any, Any]]: ... + n: _IntLike, + d: complex, + device: _Device | None = None, +) -> _Array[_1D, np.complex128 | Any]: ... +@overload # 0d T: f80 | c160 +def rfftfreq[LongDoubleT: _Inexact80]( + n: _IntLike, + d: LongDoubleT, + device: _Device | None = None, +) -> _Array[_1D, LongDoubleT]: ... +@overload # nd +f64 +def rfftfreq[ShapeT: _Shape]( + n: _IntLike, + d: _Array[ShapeT, _AsFloat64], + device: _Device | None = None, +) -> _Array[ShapeT, np.float64]: ... +@overload # nd c64 | c128 +def rfftfreq[ShapeT: _Shape]( + n: _IntLike, + d: _Array[ShapeT, _AsComplex128], + device: _Device | None = None, +) -> _Array[ShapeT, np.complex128]: ... +@overload # nd T: f80 | c160 +def rfftfreq[ShapeT: _Shape, LongDoubleT: _Inexact80]( + n: _IntLike, + d: _Array[ShapeT, LongDoubleT], + device: _Device | None = None, +) -> _Array[ShapeT, LongDoubleT]: ... +@overload # nd +complex (fallback) +def rfftfreq[ShapeT: _Shape]( + n: _IntLike, + d: _Array[ShapeT, np.number | np.bool], + device: _Device | None = None, +) -> _Array[ShapeT, Any]: ... diff --git a/numpy/fft/_pocketfft.py b/numpy/fft/_pocketfft.py index 5972a346de20..90de21607ad2 100644 --- a/numpy/fft/_pocketfft.py +++ b/numpy/fft/_pocketfft.py @@ -33,12 +33,19 @@ import functools import warnings +from numpy._core import ( + asarray, + conjugate, + empty_like, + overrides, + reciprocal, + result_type, + sqrt, + take, +) from numpy.lib.array_utils import normalize_axis_index -from numpy._core import (asarray, empty, zeros, swapaxes, result_type, - conjugate, take, sqrt, reciprocal) -from . import _pocketfft_umath as pfu -from numpy._core import overrides +from . import _pocketfft_umath as pfu array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy.fft') @@ -85,8 +92,8 @@ def _raw_fft(a, n, axis, is_real, is_forward, norm, out=None): out_dtype = real_dtype else: # Others, complex output. out_dtype = result_type(a.dtype, 1j) - out = empty(a.shape[:axis] + (n_out,) + a.shape[axis+1:], - dtype=out_dtype) + out = empty_like(a, shape=a.shape[:axis] + (n_out,) + a.shape[axis + 1:], + dtype=out_dtype) elif ((shape := getattr(out, "shape", None)) is not None and (len(shape) != a.ndim or shape[axis] != n_out)): raise ValueError("output array has wrong shape.") @@ -117,7 +124,7 @@ def fft(a, n=None, axis=-1, norm=None, out=None): This function computes the one-dimensional *n*-point discrete Fourier Transform (DFT) with the efficient Fast Fourier Transform (FFT) - algorithm [CT]. + algorithm [CT]_. Parameters ---------- @@ -132,8 +139,6 @@ def fft(a, n=None, axis=-1, norm=None, out=None): Axis over which to compute the FFT. If not given, the last axis is used. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -185,6 +190,7 @@ def fft(a, n=None, axis=-1, norm=None, out=None): Examples -------- + >>> import numpy as np >>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j, 2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j, @@ -199,8 +205,7 @@ def fft(a, n=None, axis=-1, norm=None, out=None): >>> t = np.arange(256) >>> sp = np.fft.fft(np.sin(t)) >>> freq = np.fft.fftfreq(t.shape[-1]) - >>> plt.plot(freq, sp.real, freq, sp.imag) - [, ] + >>> _ = plt.plot(freq, sp.real, freq, sp.imag) >>> plt.show() """ @@ -248,8 +253,6 @@ def ifft(a, n=None, axis=-1, norm=None, out=None): Axis over which to compute the inverse DFT. If not given, the last axis is used. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -291,6 +294,7 @@ def ifft(a, n=None, axis=-1, norm=None, out=None): Examples -------- + >>> import numpy as np >>> np.fft.ifft([0, 4, 0, 0]) array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary @@ -298,7 +302,7 @@ def ifft(a, n=None, axis=-1, norm=None, out=None): >>> import matplotlib.pyplot as plt >>> t = np.arange(400) - >>> n = np.zeros((400,), dtype=complex) + >>> n = np.zeros((400,), dtype=np.complex128) >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,))) >>> s = np.fft.ifft(n) >>> plt.plot(t, s.real, label='real') @@ -339,8 +343,6 @@ def rfft(a, n=None, axis=-1, norm=None, out=None): Axis over which to compute the FFT. If not given, the last axis is used. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -398,6 +400,7 @@ def rfft(a, n=None, axis=-1, norm=None, out=None): Examples -------- + >>> import numpy as np >>> np.fft.fft([0, 1, 0, 0]) array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary >>> np.fft.rfft([0, 1, 0, 0]) @@ -446,8 +449,6 @@ def irfft(a, n=None, axis=-1, norm=None, out=None): Axis over which to compute the inverse FFT. If not given, the last axis is used. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -506,6 +507,7 @@ def irfft(a, n=None, axis=-1, norm=None, out=None): Examples -------- + >>> import numpy as np >>> np.fft.ifft([1, -1j, -1, 1j]) array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary >>> np.fft.irfft([1, -1j, -1]) @@ -545,8 +547,6 @@ def hfft(a, n=None, axis=-1, norm=None, out=None): Axis over which to compute the FFT. If not given, the last axis is used. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -601,6 +601,7 @@ def hfft(a, n=None, axis=-1, norm=None, out=None): Examples -------- + >>> import numpy as np >>> signal = np.array([1, 2, 3, 4, 3, 2]) >>> np.fft.fft(signal) array([15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j]) # may vary @@ -624,7 +625,7 @@ def hfft(a, n=None, axis=-1, norm=None, out=None): if n is None: n = (a.shape[axis] - 1) * 2 new_norm = _swap_direction(norm) - output = irfft(conjugate(a), n, axis, norm=new_norm, out=None) + output = irfft(conjugate(a), n, axis, norm=new_norm, out=out) return output @@ -647,8 +648,6 @@ def ihfft(a, n=None, axis=-1, norm=None, out=None): Axis over which to compute the inverse FFT. If not given, the last axis is used. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -686,6 +685,7 @@ def ihfft(a, n=None, axis=-1, norm=None, out=None): Examples -------- + >>> import numpy as np >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4]) >>> np.fft.ifft(spectrum) array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary @@ -802,8 +802,6 @@ def fftn(a, s=None, axes=None, norm=None, out=None): must be explicitly specified too. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -855,6 +853,7 @@ def fftn(a, s=None, axes=None, norm=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.mgrid[:3, :3, :3][0] >>> np.fft.fftn(a, axes=(1, 2)) array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary @@ -944,8 +943,6 @@ def ifftn(a, s=None, axes=None, norm=None, out=None): must be explicitly specified too. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -996,6 +993,7 @@ def ifftn(a, s=None, axes=None, norm=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.eye(4) >>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,)) array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary @@ -1007,7 +1005,7 @@ def ifftn(a, s=None, axes=None, norm=None, out=None): Create and plot an image with band-limited frequency content: >>> import matplotlib.pyplot as plt - >>> n = np.zeros((200,200), dtype=complex) + >>> n = np.zeros((200,200), dtype=np.complex128) >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20))) >>> im = np.fft.ifftn(n).real >>> plt.imshow(im) @@ -1069,8 +1067,6 @@ def fft2(a, s=None, axes=(-2, -1), norm=None, out=None): must not be ``None``. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -1127,6 +1123,7 @@ def fft2(a, s=None, axes=(-2, -1), norm=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.mgrid[:5, :5][0] >>> np.fft.fft2(a) array([[ 50. +0.j , 0. +0.j , 0. +0.j , # may vary @@ -1202,8 +1199,6 @@ def ifft2(a, s=None, axes=(-2, -1), norm=None, out=None): must not be ``None``. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -1256,6 +1251,7 @@ def ifft2(a, s=None, axes=(-2, -1), norm=None, out=None): Examples -------- + >>> import numpy as np >>> a = 4 * np.eye(4) >>> np.fft.ifft2(a) array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary @@ -1264,7 +1260,7 @@ def ifft2(a, s=None, axes=(-2, -1), norm=None, out=None): [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]]) """ - return _raw_fftnd(a, s, axes, ifft, norm, out=None) + return _raw_fftnd(a, s, axes, ifft, norm, out=out) @array_function_dispatch(_fftn_dispatcher) @@ -1318,8 +1314,6 @@ def rfftn(a, s=None, axes=None, norm=None, out=None): must be explicitly specified too. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -1373,6 +1367,7 @@ def rfftn(a, s=None, axes=None, norm=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.ones((2, 2, 2)) >>> np.fft.rfftn(a) array([[[8.+0.j, 0.+0.j], # may vary @@ -1390,7 +1385,7 @@ def rfftn(a, s=None, axes=None, norm=None, out=None): a = asarray(a) s, axes = _cook_nd_args(a, s, axes) a = rfft(a, s[-1], axes[-1], norm, out=out) - for ii in range(len(axes)-1): + for ii in range(len(axes) - 2, -1, -1): a = fft(a, s[ii], axes[ii], norm, out=out) return a @@ -1431,8 +1426,6 @@ def rfft2(a, s=None, axes=(-2, -1), norm=None, out=None): must not be ``None``. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -1465,6 +1458,7 @@ def rfft2(a, s=None, axes=(-2, -1), norm=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.mgrid[:5, :5][0] >>> np.fft.rfft2(a) array([[ 50. +0.j , 0. +0.j , 0. +0.j ], @@ -1536,8 +1530,6 @@ def irfftn(a, s=None, axes=None, norm=None, out=None): must be explicitly specified too. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -1597,6 +1589,7 @@ def irfftn(a, s=None, axes=None, norm=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.zeros((3, 2, 2)) >>> a[0, 0, 0] = 3 * 2 * 2 >>> np.fft.irfftn(a) @@ -1610,7 +1603,7 @@ def irfftn(a, s=None, axes=None, norm=None, out=None): """ a = asarray(a) s, axes = _cook_nd_args(a, s, axes, invreal=1) - for ii in range(len(axes)-1): + for ii in range(len(axes) - 1): a = ifft(a, s[ii], axes[ii], norm) a = irfft(a, s[-1], axes[-1], norm, out=out) return a @@ -1653,8 +1646,6 @@ def irfft2(a, s=None, axes=(-2, -1), norm=None, out=None): must not be ``None``. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -1689,6 +1680,7 @@ def irfft2(a, s=None, axes=(-2, -1), norm=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.mgrid[:5, :5][0] >>> A = np.fft.rfft2(a) >>> np.fft.irfft2(A, s=a.shape) @@ -1698,4 +1690,4 @@ def irfft2(a, s=None, axes=(-2, -1), norm=None, out=None): [3., 3., 3., 3., 3.], [4., 4., 4., 4., 4.]]) """ - return irfftn(a, s, axes, norm, out=None) + return irfftn(a, s, axes, norm, out=out) diff --git a/numpy/fft/_pocketfft.pyi b/numpy/fft/_pocketfft.pyi index 7f088572efe8..d34404edb149 100644 --- a/numpy/fft/_pocketfft.pyi +++ b/numpy/fft/_pocketfft.pyi @@ -4,119 +4,134 @@ from typing import Literal as L from numpy import complex128, float64 from numpy._typing import ArrayLike, NDArray, _ArrayLikeNumber_co -_NormKind = L[None, "backward", "ortho", "forward"] - -__all__: list[str] +__all__ = [ + "fft", + "ifft", + "rfft", + "irfft", + "hfft", + "ihfft", + "rfftn", + "irfftn", + "rfft2", + "irfft2", + "fft2", + "ifft2", + "fftn", + "ifftn", +] + +type _NormKind = L["backward", "ortho", "forward"] | None def fft( a: ArrayLike, - n: None | int = ..., - axis: int = ..., - norm: _NormKind = ..., - out: None | NDArray[complex128] = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def ifft( a: ArrayLike, - n: None | int = ..., - axis: int = ..., - norm: _NormKind = ..., - out: None | NDArray[complex128] = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def rfft( a: ArrayLike, - n: None | int = ..., - axis: int = ..., - norm: _NormKind = ..., - out: None | NDArray[complex128] = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def irfft( a: ArrayLike, - n: None | int = ..., - axis: int = ..., - norm: _NormKind = ..., - out: None | NDArray[float64] = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... # Input array must be compatible with `np.conjugate` def hfft( a: _ArrayLikeNumber_co, - n: None | int = ..., - axis: int = ..., - norm: _NormKind = ..., - out: None | NDArray[float64] = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... def ihfft( a: ArrayLike, - n: None | int = ..., - axis: int = ..., - norm: _NormKind = ..., - out: None | NDArray[complex128] = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def fftn( a: ArrayLike, - s: None | Sequence[int] = ..., - axes: None | Sequence[int] = ..., - norm: _NormKind = ..., - out: None | NDArray[complex128] = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def ifftn( a: ArrayLike, - s: None | Sequence[int] = ..., - axes: None | Sequence[int] = ..., - norm: _NormKind = ..., - out: None | NDArray[complex128] = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def rfftn( a: ArrayLike, - s: None | Sequence[int] = ..., - axes: None | Sequence[int] = ..., - norm: _NormKind = ..., - out: None | NDArray[complex128] = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def irfftn( a: ArrayLike, - s: None | Sequence[int] = ..., - axes: None | Sequence[int] = ..., - norm: _NormKind = ..., - out: None | NDArray[float64] = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... def fft2( a: ArrayLike, - s: None | Sequence[int] = ..., - axes: None | Sequence[int] = ..., - norm: _NormKind = ..., - out: None | NDArray[complex128] = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def ifft2( a: ArrayLike, - s: None | Sequence[int] = ..., - axes: None | Sequence[int] = ..., - norm: _NormKind = ..., - out: None | NDArray[complex128] = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def rfft2( a: ArrayLike, - s: None | Sequence[int] = ..., - axes: None | Sequence[int] = ..., - norm: _NormKind = ..., - out: None | NDArray[complex128] = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def irfft2( a: ArrayLike, - s: None | Sequence[int] = ..., - axes: None | Sequence[int] = ..., - norm: _NormKind = ..., - out: None | NDArray[float64] = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... diff --git a/numpy/fft/_pocketfft_umath.cpp b/numpy/fft/_pocketfft_umath.cpp index 013db5f1d8d4..f616fe9b0bdc 100644 --- a/numpy/fft/_pocketfft_umath.cpp +++ b/numpy/fft/_pocketfft_umath.cpp @@ -12,8 +12,8 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define PY_SSIZE_T_CLEAN -#include #include +#include #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" @@ -32,7 +32,7 @@ template static void wrap_legacy_cpp_ufunc(char **args, npy_intp const *dimensions, - ptrdiff_t const *steps, void *func) + npy_intp const *steps, void *func) { NPY_ALLOW_C_API_DEF try { @@ -86,14 +86,14 @@ copy_output(T buff[], char *out, npy_intp step_out, size_t n) */ template static void -fft_loop(char **args, npy_intp const *dimensions, ptrdiff_t const *steps, +fft_loop(char **args, npy_intp const *dimensions, npy_intp const *steps, void *func) { char *ip = args[0], *fp = args[1], *op = args[2]; size_t n_outer = (size_t)dimensions[0]; - ptrdiff_t si = steps[0], sf = steps[1], so = steps[2]; + npy_intp si = steps[0], sf = steps[1], so = steps[2]; size_t nin = (size_t)dimensions[1], nout = (size_t)dimensions[2]; - ptrdiff_t step_in = steps[3], step_out = steps[4]; + npy_intp step_in = steps[3], step_out = steps[4]; bool direction = *((bool *)func); /* pocketfft::FORWARD or BACKWARD */ assert (nout > 0); @@ -144,9 +144,9 @@ rfft_impl(char **args, npy_intp const *dimensions, npy_intp const *steps, { char *ip = args[0], *fp = args[1], *op = args[2]; size_t n_outer = (size_t)dimensions[0]; - ptrdiff_t si = steps[0], sf = steps[1], so = steps[2]; + npy_intp si = steps[0], sf = steps[1], so = steps[2]; size_t nin = (size_t)dimensions[1], nout = (size_t)dimensions[2]; - ptrdiff_t step_in = steps[3], step_out = steps[4]; + npy_intp step_in = steps[3], step_out = steps[4]; assert (nout > 0 && nout == npts / 2 + 1); @@ -233,14 +233,13 @@ irfft_loop(char **args, npy_intp const *dimensions, npy_intp const *steps, void size_t nin = (size_t)dimensions[1], nout = (size_t)dimensions[2]; ptrdiff_t step_in = steps[3], step_out = steps[4]; - size_t npts_in = nout / 2 + 1; - assert(nout > 0); #ifndef POCKETFFT_NO_VECTORS /* * Call pocketfft directly if vectorization is possible. */ + size_t npts_in = nout / 2 + 1; constexpr auto vlen = pocketfft::detail::VLEN::val; if (vlen > 1 && n_outer >= vlen && nin >= npts_in && sf == 0) { std::vector axes = { 1 }; @@ -388,36 +387,57 @@ add_gufuncs(PyObject *dictionary) { return 0; } -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_multiarray_umath", - NULL, - -1, - NULL, - NULL, - NULL, - NULL, - NULL -}; +static int module_loaded = 0; -/* Initialization function for the module */ -PyMODINIT_FUNC PyInit__pocketfft_umath(void) +static int +_pocketfft_umath_exec(PyObject *m) { - PyObject *m = PyModule_Create(&moduledef); - if (m == NULL) { - return NULL; + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; } + module_loaded = 1; /* Import the array and ufunc objects */ - import_array(); - import_ufunc(); + if (PyArray_ImportNumPyAPI() < 0) { + return -1; + } + if (PyUFunc_ImportUFuncAPI() < 0) { + return -1; + } PyObject *d = PyModule_GetDict(m); if (add_gufuncs(d) < 0) { Py_DECREF(d); - Py_DECREF(m); - return NULL; + return -1; } - return m; + return 0; +} + +static struct PyModuleDef_Slot _pocketfft_umath_slots[] = { + {Py_mod_exec, (void*)_pocketfft_umath_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, +#endif + {0, NULL}, +}; + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, /* m_base */ + "_pocketfft_umath", /* m_name */ + NULL, /* m_doc */ + 0, /* m_size */ + NULL, /* m_methods */ + _pocketfft_umath_slots, /* m_slots */ +}; + +PyMODINIT_FUNC PyInit__pocketfft_umath(void) { + return PyModuleDef_Init(&moduledef); } diff --git a/numpy/fft/helper.py b/numpy/fft/helper.py deleted file mode 100644 index 4375cedf7fcf..000000000000 --- a/numpy/fft/helper.py +++ /dev/null @@ -1,16 +0,0 @@ -def __getattr__(attr_name): - import warnings - from numpy.fft import _helper - ret = getattr(_helper, attr_name, None) - if ret is None: - raise AttributeError( - f"module 'numpy.fft.helper' has no attribute {attr_name}") - warnings.warn( - "The numpy.fft.helper has been made private and renamed to " - "numpy.fft._helper. All four functions exported by it (i.e. fftshift, " - "ifftshift, fftfreq, rfftfreq) are available from numpy.fft. " - f"Please use numpy.fft.{attr_name} instead.", - DeprecationWarning, - stacklevel=3 - ) - return ret diff --git a/numpy/fft/meson.build b/numpy/fft/meson.build index 751b5dc74d30..a5b2413ebb90 100644 --- a/numpy/fft/meson.build +++ b/numpy/fft/meson.build @@ -23,7 +23,6 @@ py.install_sources( '_pocketfft.pyi', '_helper.py', '_helper.pyi', - 'helper.py', ], subdir: 'numpy/fft' ) diff --git a/numpy/fft/tests/test_helper.py b/numpy/fft/tests/test_helper.py index 852e6625fff2..c02a73639331 100644 --- a/numpy/fft/tests/test_helper.py +++ b/numpy/fft/tests/test_helper.py @@ -4,8 +4,8 @@ """ import numpy as np -from numpy.testing import assert_array_almost_equal from numpy import fft, pi +from numpy.testing import assert_array_almost_equal class TestFFTShift: @@ -84,8 +84,8 @@ def test_uneven_dims(self): assert_array_almost_equal(fft.ifftshift(shift_dim_both), freqs) def test_equal_to_original(self): - """ Test that the new (>=v1.15) implementation (see #10073) is equal to the original (<=v1.14) """ - from numpy._core import asarray, concatenate, arange, take + """ Test the new (>=v1.15) and old implementations are equal (see #10073) """ + from numpy._core import arange, asarray, concatenate, take def original_fftshift(x, axes=None): """ How fftshift was implemented in v1.14""" @@ -137,29 +137,29 @@ class TestFFTFreq: def test_definition(self): x = [0, 1, 2, 3, 4, -4, -3, -2, -1] - assert_array_almost_equal(9*fft.fftfreq(9), x) - assert_array_almost_equal(9*pi*fft.fftfreq(9, pi), x) + assert_array_almost_equal(9 * fft.fftfreq(9), x) + assert_array_almost_equal(9 * pi * fft.fftfreq(9, pi), x) x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1] - assert_array_almost_equal(10*fft.fftfreq(10), x) - assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x) + assert_array_almost_equal(10 * fft.fftfreq(10), x) + assert_array_almost_equal(10 * pi * fft.fftfreq(10, pi), x) class TestRFFTFreq: def test_definition(self): x = [0, 1, 2, 3, 4] - assert_array_almost_equal(9*fft.rfftfreq(9), x) - assert_array_almost_equal(9*pi*fft.rfftfreq(9, pi), x) + assert_array_almost_equal(9 * fft.rfftfreq(9), x) + assert_array_almost_equal(9 * pi * fft.rfftfreq(9, pi), x) x = [0, 1, 2, 3, 4, 5] - assert_array_almost_equal(10*fft.rfftfreq(10), x) - assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x) + assert_array_almost_equal(10 * fft.rfftfreq(10), x) + assert_array_almost_equal(10 * pi * fft.rfftfreq(10, pi), x) class TestIRFFTN: def test_not_last_axis_success(self): ar, ai = np.random.random((2, 16, 8, 32)) - a = ar + 1j*ai + a = ar + 1j * ai axes = (-2,) diff --git a/numpy/fft/tests/test_pocketfft.py b/numpy/fft/tests/test_pocketfft.py index f58ed0cecb39..f294a26da58e 100644 --- a/numpy/fft/tests/test_pocketfft.py +++ b/numpy/fft/tests/test_pocketfft.py @@ -1,18 +1,18 @@ -import numpy as np +import queue +import threading + import pytest + +import numpy as np from numpy.random import random -from numpy.testing import ( - assert_array_equal, assert_raises, assert_allclose, IS_WASM - ) -import threading -import queue +from numpy.testing import IS_WASM, assert_allclose, assert_array_equal, assert_raises def fft1(x): L = len(x) phase = -2j * np.pi * (np.arange(L) / L) phase = np.arange(L).reshape(-1, 1) * phase - return np.sum(x*np.exp(phase), axis=1) + return np.sum(x * np.exp(phase), axis=1) class TestFFTShift: @@ -25,7 +25,7 @@ class TestFFT1D: def test_identity(self): maxlen = 512 - x = random(maxlen) + 1j*random(maxlen) + x = random(maxlen) + 1j * random(maxlen) xr = random(maxlen) for i in range(1, maxlen): assert_allclose(np.fft.ifft(np.fft.fft(x[0:i])), x[0:i], @@ -38,12 +38,12 @@ def test_identity_long_short(self, dtype): # Test with explicitly given number of points, both for n # smaller and for n larger than the input size. maxlen = 16 - atol = 4 * np.spacing(np.array(1., dtype=dtype)) - x = random(maxlen).astype(dtype) + 1j*random(maxlen).astype(dtype) + atol = 5 * np.spacing(np.array(1., dtype=dtype)) + x = random(maxlen).astype(dtype) + 1j * random(maxlen).astype(dtype) xx = np.concatenate([x, np.zeros_like(x)]) xr = random(maxlen).astype(dtype) xxr = np.concatenate([xr, np.zeros_like(xr)]) - for i in range(1, maxlen*2): + for i in range(1, maxlen * 2): check_c = np.fft.ifft(np.fft.fft(x, n=i), n=i) assert check_c.real.dtype == dtype assert_allclose(check_c, xx[0:i], atol=atol, rtol=0) @@ -55,10 +55,10 @@ def test_identity_long_short(self, dtype): def test_identity_long_short_reversed(self, dtype): # Also test explicitly given number of points in reversed order. maxlen = 16 - atol = 5 * np.spacing(np.array(1., dtype=dtype)) - x = random(maxlen).astype(dtype) + 1j*random(maxlen).astype(dtype) + atol = 6 * np.spacing(np.array(1., dtype=dtype)) + x = random(maxlen).astype(dtype) + 1j * random(maxlen).astype(dtype) xx = np.concatenate([x, np.zeros_like(x)]) - for i in range(1, maxlen*2): + for i in range(1, maxlen * 2): check_via_c = np.fft.fft(np.fft.ifft(x, n=i), n=i) assert check_via_c.dtype == x.dtype assert_allclose(check_via_c, xx[0:i], atol=atol, rtol=0) @@ -69,14 +69,14 @@ def test_identity_long_short_reversed(self, dtype): n = i // 2 + 1 y.imag[0] = 0 if i % 2 == 0: - y.imag[n-1:] = 0 + y.imag[n - 1:] = 0 yy = np.concatenate([y, np.zeros_like(y)]) check_via_r = np.fft.rfft(np.fft.irfft(x, n=i), n=i) assert check_via_r.dtype == x.dtype assert_allclose(check_via_r, yy[0:n], atol=atol, rtol=0) def test_fft(self): - x = random(30) + 1j*random(30) + x = random(30) + 1j * random(30) assert_allclose(fft1(x), np.fft.fft(x), atol=1e-6) assert_allclose(fft1(x), np.fft.fft(x, norm="backward"), atol=1e-6) assert_allclose(fft1(x) / np.sqrt(30), @@ -96,7 +96,7 @@ def zeros_like(x): # tests below only test the out parameter if dtype is complex: - y = random((10, 20)) + 1j*random((10, 20)) + y = random((10, 20)) + 1j * random((10, 20)) fft, ifft = np.fft.fft, np.fft.ifft else: y = random((10, 20)) @@ -117,7 +117,7 @@ def zeros_like(x): @pytest.mark.parametrize("axis", [0, 1]) def test_fft_inplace_out(self, axis): # Test some weirder in-place combinations - y = random((20, 20)) + 1j*random((20, 20)) + y = random((20, 20)) + 1j * random((20, 20)) # Fully in-place. y1 = y.copy() expected1 = np.fft.fft(y1, axis=axis) @@ -185,7 +185,7 @@ def test_fft_bad_out(self): @pytest.mark.parametrize('norm', (None, 'backward', 'ortho', 'forward')) def test_ifft(self, norm): - x = random(30) + 1j*random(30) + x = random(30) + 1j * random(30) assert_allclose( x, np.fft.ifft(np.fft.fft(x, norm=norm), norm=norm), atol=1e-6) @@ -195,7 +195,7 @@ def test_ifft(self, norm): np.fft.ifft([], norm=norm) def test_fft2(self): - x = random((30, 20)) + 1j*random((30, 20)) + x = random((30, 20)) + 1j * random((30, 20)) assert_allclose(np.fft.fft(np.fft.fft(x, axis=1), axis=0), np.fft.fft2(x), atol=1e-6) assert_allclose(np.fft.fft2(x), @@ -206,7 +206,7 @@ def test_fft2(self): np.fft.fft2(x, norm="forward"), atol=1e-6) def test_ifft2(self): - x = random((30, 20)) + 1j*random((30, 20)) + x = random((30, 20)) + 1j * random((30, 20)) assert_allclose(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0), np.fft.ifft2(x), atol=1e-6) assert_allclose(np.fft.ifft2(x), @@ -216,8 +216,14 @@ def test_ifft2(self): assert_allclose(np.fft.ifft2(x) * (30. * 20.), np.fft.ifft2(x, norm="forward"), atol=1e-6) + def test_ifft2_out(self): + z = np.array([[1 + 2j, 3 - 4j], [0.5 - 2j, 4 + 1j]]) + out = np.zeros_like(z) + result = np.fft.ifft2(z, out=out) + assert result is out + def test_fftn(self): - x = random((30, 20, 10)) + 1j*random((30, 20, 10)) + x = random((30, 20, 10)) + 1j * random((30, 20, 10)) assert_allclose( np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0), np.fft.fftn(x), atol=1e-6) @@ -229,7 +235,7 @@ def test_fftn(self): np.fft.fftn(x, norm="forward"), atol=1e-6) def test_ifftn(self): - x = random((30, 20, 10)) + 1j*random((30, 20, 10)) + x = random((30, 20, 10)) + 1j * random((30, 20, 10)) assert_allclose( np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0), np.fft.ifftn(x), atol=1e-6) @@ -242,10 +248,10 @@ def test_ifftn(self): def test_rfft(self): x = random(30) - for n in [x.size, 2*x.size]: + for n in [x.size, 2 * x.size]: for norm in [None, 'backward', 'ortho', 'forward']: assert_allclose( - np.fft.fft(x, n=n, norm=norm)[:(n//2 + 1)], + np.fft.fft(x, n=n, norm=norm)[:(n // 2 + 1)], np.fft.rfft(x, n=n, norm=norm), atol=1e-6) assert_allclose( np.fft.rfft(x, n=n), @@ -261,7 +267,7 @@ def test_rfft_even(self): x = np.arange(8) n = 4 y = np.fft.rfft(x, n) - assert_allclose(y, np.fft.fft(x[:n])[:n//2 + 1], rtol=1e-14) + assert_allclose(y, np.fft.fft(x[:n])[:n // 2 + 1], rtol=1e-14) def test_rfft_odd(self): x = np.array([1, 0, 2, 3, -3]) @@ -298,6 +304,13 @@ def test_irfft2(self): assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="forward"), norm="forward"), atol=1e-6) + def test_irfft2_out(self): + z = np.array([[7, 1 + 4j, -5], [2 - 1j, -2 - 1j, -8 + 1j], + [-3, 1 + 2j, 5], [2 + 1j, 4 - 1j, -8 - 1j]]) + out = np.zeros((4, 4), dtype=np.float64) + result = np.fft.irfft2(z, out=out) + assert result is out + def test_rfftn(self): x = random((30, 20, 10)) assert_allclose(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x), atol=1e-6) @@ -307,6 +320,14 @@ def test_rfftn(self): np.fft.rfftn(x, norm="ortho"), atol=1e-6) assert_allclose(np.fft.rfftn(x) / (30. * 20. * 10.), np.fft.rfftn(x, norm="forward"), atol=1e-6) + # Regression test for gh-27159 + x = np.ones((2, 3)) + result = np.fft.rfftn(x, axes=(0, 0, 1), s=(10, 20, 40)) + assert result.shape == (10, 21) + expected = np.fft.fft(np.fft.fft(np.fft.rfft(x, axis=1, n=40), + axis=0, n=20), axis=0, n=10) + assert expected.shape == (10, 21) + assert_allclose(result, expected, atol=1e-6) def test_irfftn(self): x = random((30, 20, 10)) @@ -319,7 +340,7 @@ def test_irfftn(self): norm="forward"), atol=1e-6) def test_hfft(self): - x = random(14) + 1j*random(14) + x = random(14) + 1j * random(14) x_herm = np.concatenate((random(1), x, random(1))) x = np.concatenate((x_herm, x[::-1].conj())) assert_allclose(np.fft.fft(x), np.fft.hfft(x_herm), atol=1e-6) @@ -330,8 +351,15 @@ def test_hfft(self): assert_allclose(np.fft.hfft(x_herm) / 30., np.fft.hfft(x_herm, norm="forward"), atol=1e-6) + def test_hfft_out(self): + a = np.array([1, 2, 3, 4, 3, 2], dtype=complex) + n = (len(a) - 1) * 2 + out = np.zeros(n, dtype=np.float64) + result = np.fft.hfft(a, n=n, out=out) + assert result is out + def test_ihfft(self): - x = random(14) + 1j*random(14) + x = random(14) + 1j * random(14) x_herm = np.concatenate((random(1), x, random(1))) x = np.concatenate((x_herm, x[::-1].conj())) assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)), atol=1e-6) @@ -392,7 +420,7 @@ def test_all_1d_norm_preserving(self): (np.fft.ihfft, np.fft.hfft), ] for forw, back in func_pairs: - for n in [x.size, 2*x.size]: + for n in [x.size, 2 * x.size]: for norm in [None, 'backward', 'ortho', 'forward']: tmp = forw(x, n=n, norm=norm) tmp = back(tmp, n=n, norm=norm) @@ -411,7 +439,7 @@ def zeros_like(x): # tests below only test the out parameter if dtype is complex: - x = random((10, 5, 6)) + 1j*random((10, 5, 6)) + x = random((10, 5, 6)) + 1j * random((10, 5, 6)) fft, ifft = np.fft.fftn, np.fft.ifftn else: x = random((10, 5, 6)) @@ -435,7 +463,7 @@ def test_fftn_out_and_s_interaction(self, fft): if fft is np.fft.rfftn: x = random((10, 5, 6)) else: - x = random((10, 5, 6)) + 1j*random((10, 5, 6)) + x = random((10, 5, 6)) + 1j * random((10, 5, 6)) with pytest.raises(ValueError, match="has wrong shape"): fft(x, out=np.zeros_like(x), s=(3, 3, 3), axes=(0, 1, 2)) # Except on the first axis done (which is the last of axes). @@ -450,7 +478,7 @@ def test_fftn_out_and_s_interaction(self, fft): def test_irfftn_out_and_s_interaction(self, s): # Since for irfftn, the output is real and thus cannot be used for # intermediate steps, it should always work. - x = random((9, 5, 6, 2)) + 1j*random((9, 5, 6, 2)) + x = random((9, 5, 6, 2)) + 1j * random((9, 5, 6, 2)) expected = np.fft.irfftn(x, s=s, axes=(0, 1, 2)) out = np.zeros_like(expected) result = np.fft.irfftn(x, s=s, axes=(0, 1, 2), out=out) @@ -494,9 +522,19 @@ def test_fft_with_order(dtype, order, fft): Y_res = fft(Y, axes=ax) assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol) else: - raise ValueError() + raise ValueError +@pytest.mark.parametrize("order", ["F", "C"]) +@pytest.mark.parametrize("n", [None, 7, 12]) +def test_fft_output_order(order, n): + rng = np.random.RandomState(42) + x = rng.rand(10) + x = np.asarray(x, dtype=np.complex64, order=order) + res = np.fft.fft(x, n=n) + assert res.flags.c_contiguous == x.flags.c_contiguous + assert res.flags.f_contiguous == x.flags.f_contiguous + @pytest.mark.skipif(IS_WASM, reason="Cannot start thread") class TestFFTThreadSafe: threads = 16 @@ -521,11 +559,11 @@ def worker(args, q): 'Function returned wrong value in multithreaded context') def test_fft(self): - a = np.ones(self.input_shape) * 1+0j + a = np.ones(self.input_shape) * 1 + 0j self._test_mtsame(np.fft.fft, a) def test_ifft(self): - a = np.ones(self.input_shape) * 1+0j + a = np.ones(self.input_shape) * 1 + 0j self._test_mtsame(np.fft.ifft, a) def test_rfft(self): @@ -533,7 +571,7 @@ def test_rfft(self): self._test_mtsame(np.fft.rfft, a) def test_irfft(self): - a = np.ones(self.input_shape) * 1+0j + a = np.ones(self.input_shape) * 1 + 0j self._test_mtsame(np.fft.irfft, a) diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py index 22ad35e93c35..e14827b5de37 100644 --- a/numpy/lib/__init__.py +++ b/numpy/lib/__init__.py @@ -9,65 +9,65 @@ """ # Public submodules -# Note: recfunctions and (maybe) format are public too, but not imported -from . import array_utils -from . import introspect -from . import mixins -from . import npyio -from . import scimath -from . import stride_tricks +# Note: recfunctions is public, but not imported +from numpy._core._multiarray_umath import add_docstring, tracemalloc_domain +from numpy._core.function_base import add_newdoc # Private submodules # load module names. See https://github.com/networkx/networkx/issues/5838 -from . import _type_check_impl -from . import _index_tricks_impl -from . import _nanfunctions_impl -from . import _function_base_impl -from . import _stride_tricks_impl -from . import _shape_base_impl -from . import _twodim_base_impl -from . import _ufunclike_impl -from . import _histograms_impl -from . import _utils_impl -from . import _arraysetops_impl -from . import _polynomial_impl -from . import _npyio_impl -from . import _arrayterator_impl -from . import _arraypad_impl -from . import _version +from . import ( + _arraypad_impl, + _arraysetops_impl, + _arrayterator_impl, + _function_base_impl, + _histograms_impl, + _index_tricks_impl, + _nanfunctions_impl, + _npyio_impl, + _polynomial_impl, + _shape_base_impl, + _stride_tricks_impl, + _twodim_base_impl, + _type_check_impl, + _ufunclike_impl, + _utils_impl, + _version, + array_utils, + format, + introspect, + mixins, + npyio, + scimath, + stride_tricks, +) # numpy.lib namespace members from ._arrayterator_impl import Arrayterator from ._version import NumpyVersion -from numpy._core._multiarray_umath import add_docstring, tracemalloc_domain -from numpy._core.function_base import add_newdoc __all__ = [ "Arrayterator", "add_docstring", "add_newdoc", "array_utils", - "introspect", "mixins", "NumpyVersion", "npyio", "scimath", - "stride_tricks", "tracemalloc_domain" + "format", "introspect", "mixins", "NumpyVersion", "npyio", "scimath", + "stride_tricks", "tracemalloc_domain", ] +add_newdoc.__module__ = "numpy.lib" + from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester def __getattr__(attr): - # Warn for reprecated attributes - import math + # Warn for deprecated/removed aliases import warnings - if attr == "math": - warnings.warn( - "`np.lib.math` is a deprecated alias for the standard library " - "`math` module (Deprecated Numpy 1.25). Replace usages of " - "`numpy.lib.math` with `math`", DeprecationWarning, stacklevel=2) - return math - elif attr == "emath": + if attr == "emath": raise AttributeError( "numpy.lib.emath was an alias for emath module that was removed " "in NumPy 2.0. Replace usages of numpy.lib.emath with " - "numpy.emath." + "numpy.emath.", + name=None ) elif attr in ( "histograms", "type_check", "nanfunctions", "function_base", @@ -77,13 +77,14 @@ def __getattr__(attr): raise AttributeError( f"numpy.lib.{attr} is now private. If you are using a public " "function, it should be available in the main numpy namespace, " - "otherwise check the NumPy 2.0 migration guide." + "otherwise check the NumPy 2.0 migration guide.", + name=None ) elif attr == "arrayterator": raise AttributeError( "numpy.lib.arrayterator submodule is now private. To access " - "Arrayterator class use numpy.lib.Arrayterator." + "Arrayterator class use numpy.lib.Arrayterator.", + name=None ) else: - raise AttributeError("module {!r} has no attribute " - "{!r}".format(__name__, attr)) + raise AttributeError(f"module {__name__!r} has no attribute {attr!r}") diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi index b8bf2c5afbda..5a85743e4d0f 100644 --- a/numpy/lib/__init__.pyi +++ b/numpy/lib/__init__.pyi @@ -1,41 +1,52 @@ -import math as math - -from numpy._pytesttester import PytestTester - -from numpy import ( - ndenumerate as ndenumerate, - ndindex as ndindex, -) - -from numpy.version import version - -from numpy.lib import ( - format as format, - mixins as mixins, - scimath as scimath, - stride_tricks as stride_tricks, - npyio as npyio, - array_utils as array_utils, -) - -from numpy.lib._version import ( - NumpyVersion as NumpyVersion, +from numpy._core.function_base import add_newdoc +from numpy._core.multiarray import add_docstring, tracemalloc_domain + +# all submodules of `lib` are accessible at runtime through `__getattr__`, +# so we implicitly re-export them here +from . import ( + _array_utils_impl as _array_utils_impl, + _arraypad_impl as _arraypad_impl, + _arraysetops_impl as _arraysetops_impl, + _arrayterator_impl as _arrayterator_impl, + _datasource as _datasource, + _format_impl as _format_impl, + _function_base_impl as _function_base_impl, + _histograms_impl as _histograms_impl, + _index_tricks_impl as _index_tricks_impl, + _iotools as _iotools, + _nanfunctions_impl as _nanfunctions_impl, + _npyio_impl as _npyio_impl, + _polynomial_impl as _polynomial_impl, + _scimath_impl as _scimath_impl, + _shape_base_impl as _shape_base_impl, + _stride_tricks_impl as _stride_tricks_impl, + _twodim_base_impl as _twodim_base_impl, + _type_check_impl as _type_check_impl, + _ufunclike_impl as _ufunclike_impl, + _utils_impl as _utils_impl, + _version as _version, + array_utils, + format, + introspect, + mixins, + npyio, + scimath, + stride_tricks, ) - -from numpy.lib._arrayterator_impl import ( - Arrayterator as Arrayterator, -) - -from numpy._core.multiarray import ( - add_docstring as add_docstring, - tracemalloc_domain as tracemalloc_domain, -) - -from numpy._core.function_base import ( - add_newdoc as add_newdoc, -) - -__all__: list[str] -test: PytestTester - -__version__ = version +from ._arrayterator_impl import Arrayterator +from ._version import NumpyVersion + +__all__ = [ + "Arrayterator", + "add_docstring", + "add_newdoc", + "array_utils", + "format", + "introspect", + "mixins", + "NumpyVersion", + "npyio", + "scimath", + "stride_tricks", + "tracemalloc_domain", +] diff --git a/numpy/lib/_array_utils_impl.py b/numpy/lib/_array_utils_impl.py index 3e9d96e93dd9..25d78c1eb6a6 100644 --- a/numpy/lib/_array_utils_impl.py +++ b/numpy/lib/_array_utils_impl.py @@ -2,7 +2,7 @@ Miscellaneous utils. """ from numpy._core import asarray -from numpy._core.numeric import normalize_axis_tuple, normalize_axis_index +from numpy._core.numeric import normalize_axis_index, normalize_axis_tuple from numpy._utils import set_module __all__ = ["byte_bounds", "normalize_axis_tuple", "normalize_axis_index"] @@ -29,7 +29,8 @@ def byte_bounds(a): Examples -------- - >>> I = np.eye(2, dtype='f'); I.dtype + >>> import numpy as np + >>> I = np.eye(2, dtype=np.float32); I.dtype dtype('float32') >>> low, high = np.lib.array_utils.byte_bounds(I) >>> high - low == I.size*I.itemsize @@ -54,8 +55,8 @@ def byte_bounds(a): else: for shape, stride in zip(ashape, astrides): if stride < 0: - a_low += (shape-1)*stride + a_low += (shape - 1) * stride else: - a_high += (shape-1)*stride + a_high += (shape - 1) * stride a_high += bytes_a return a_low, a_high diff --git a/numpy/lib/_array_utils_impl.pyi b/numpy/lib/_array_utils_impl.pyi index a38a62f2813c..e33507a127c9 100644 --- a/numpy/lib/_array_utils_impl.pyi +++ b/numpy/lib/_array_utils_impl.pyi @@ -1,25 +1,10 @@ -from typing import Any, Iterable, Tuple +import numpy as np +from numpy._core.numeric import normalize_axis_index, normalize_axis_tuple -from numpy import generic -from numpy.typing import NDArray - -__all__: list[str] +__all__ = ["byte_bounds", "normalize_axis_tuple", "normalize_axis_index"] # NOTE: In practice `byte_bounds` can (potentially) take any object # implementing the `__array_interface__` protocol. The caveat is # that certain keys, marked as optional in the spec, must be present for # `byte_bounds`. This concerns `"strides"` and `"data"`. -def byte_bounds(a: generic | NDArray[Any]) -> tuple[int, int]: ... - -def normalize_axis_tuple( - axis: int | Iterable[int], - ndim: int = ..., - argname: None | str = ..., - allow_duplicate: None | bool = ..., -) -> Tuple[int, int]: ... - -def normalize_axis_index( - axis: int = ..., - ndim: int = ..., - msg_prefix: None | str = ..., -) -> int: ... +def byte_bounds(a: np.generic | np.ndarray) -> tuple[int, int]: ... diff --git a/numpy/lib/_arraypad_impl.py b/numpy/lib/_arraypad_impl.py index 7ec52167f1c0..681b92fc8a72 100644 --- a/numpy/lib/_arraypad_impl.py +++ b/numpy/lib/_arraypad_impl.py @@ -3,11 +3,12 @@ of an n-dimensional array. """ +import typing + import numpy as np from numpy._core.overrides import array_function_dispatch from numpy.lib._index_tricks_impl import ndindex - __all__ = ['pad'] @@ -49,7 +50,7 @@ def _slice_at_axis(sl, axis): Examples -------- - >>> _slice_at_axis(slice(None, 3, -1), 1) + >>> np._slice_at_axis(slice(None, 3, -1), 1) (slice(None, None, None), slice(None, 3, -1), (...,)) """ return (slice(None),) * axis + (sl,) + (...,) @@ -210,7 +211,7 @@ def _get_linear_ramps(padded, axis, width_pair, end_value_pair): left_ramp, right_ramp = ( np.linspace( start=end_value, - stop=edge.squeeze(axis), # Dimension is replaced by linspace + stop=edge.squeeze(axis), # Dimension is replaced by linspace num=width, endpoint=False, dtype=padded.dtype, @@ -220,7 +221,7 @@ def _get_linear_ramps(padded, axis, width_pair, end_value_pair): end_value_pair, edge_pair, width_pair ) ) - + # Reverse linear space in appropriate dimension right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)] @@ -293,7 +294,8 @@ def _get_stats(padded, axis, width_pair, length_pair, stat_func): return left_stat, right_stat -def _set_reflect_both(padded, axis, width_pair, method, include_edge=False): +def _set_reflect_both(padded, axis, width_pair, method, + original_period, include_edge=False): """ Pad `axis` of `arr` with reflection. @@ -308,6 +310,8 @@ def _set_reflect_both(padded, axis, width_pair, method, include_edge=False): dimension. method : str Controls method of reflection; options are 'even' or 'odd'. + original_period : int + Original length of data on `axis` of `arr`. include_edge : bool If true, edge value is included in reflection, otherwise the edge value forms the symmetric axis to the reflection. @@ -322,9 +326,18 @@ def _set_reflect_both(padded, axis, width_pair, method, include_edge=False): old_length = padded.shape[axis] - right_pad - left_pad if include_edge: + # Avoid wrapping with only a subset of the original area + # by ensuring period can only be a multiple of the original + # area's length. + old_length = old_length // original_period * original_period # Edge is included, we need to offset the pad amount by 1 edge_offset = 1 else: + # Avoid wrapping with only a subset of the original area + # by ensuring period can only be a multiple of the original + # area's length. + old_length = ((old_length - 1) // (original_period - 1) + * (original_period - 1) + 1) edge_offset = 0 # Edge is not included, no need to offset pad amount old_length -= 1 # but must be omitted from the chunk @@ -539,7 +552,7 @@ def pad(array, pad_width, mode='constant', **kwargs): ---------- array : array_like of rank N The array to pad. - pad_width : {sequence, array_like, int} + pad_width : {sequence, array_like, int, dict} Number of values padded to the edges of each axis. ``((before_1, after_1), ... (before_N, after_N))`` unique pad widths for each axis. @@ -547,6 +560,9 @@ def pad(array, pad_width, mode='constant', **kwargs): and after pad for each axis. ``(pad,)`` or ``int`` is a shortcut for before = after = pad width for all axes. + If a ``dict``, each key is an axis and its corresponding value is an ``int`` or + ``int`` pair describing the padding ``(before, after)`` or ``pad`` width for + that axis. mode : str or function, optional One of the following string values or a user supplied function. @@ -583,8 +599,6 @@ def pad(array, pad_width, mode='constant', **kwargs): 'empty' Pads with undefined values. - .. versionadded:: 1.17 - Padding function, see Notes. stat_length : sequence or int, optional @@ -643,8 +657,6 @@ def pad(array, pad_width, mode='constant', **kwargs): Notes ----- - .. versionadded:: 1.7.0 - For an array with rank greater than 1, some of the padding of later axes is calculated from padding of previous axes. This is easiest to think about with a rank 2 array where the corners of the padded array @@ -672,6 +684,7 @@ def pad(array, pad_width, mode='constant', **kwargs): Examples -------- + >>> import numpy as np >>> a = [1, 2, 3, 4, 5] >>> np.pad(a, (2, 3), 'constant', constant_values=(4, 6)) array([4, 4, 1, ..., 6, 6, 6]) @@ -737,8 +750,39 @@ def pad(array, pad_width, mode='constant', **kwargs): [100, 100, 3, 4, 5, 100, 100], [100, 100, 100, 100, 100, 100, 100], [100, 100, 100, 100, 100, 100, 100]]) + + >>> a = np.arange(1, 7).reshape(2, 3) + >>> np.pad(a, {1: (1, 2)}) + array([[0, 1, 2, 3, 0, 0], + [0, 4, 5, 6, 0, 0]]) + >>> np.pad(a, {-1: 2}) + array([[0, 0, 1, 2, 3, 0, 0], + [0, 0, 4, 5, 6, 0, 0]]) + >>> np.pad(a, {0: (3, 0)}) + array([[0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [1, 2, 3], + [4, 5, 6]]) + >>> np.pad(a, {0: (3, 0), 1: 2}) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 2, 3, 0, 0], + [0, 0, 4, 5, 6, 0, 0]]) """ array = np.asarray(array) + if isinstance(pad_width, dict): + seq = [(0, 0)] * array.ndim + for axis, width in pad_width.items(): + match width: + case int(both): + seq[axis] = both, both + case tuple((int(before), int(after))): + seq[axis] = before, after + case _ as invalid: + typing.assert_never(invalid) + pad_width = seq pad_width = np.asarray(pad_width) if not pad_width.dtype.kind == 'i': @@ -785,10 +829,10 @@ def pad(array, pad_width, mode='constant', **kwargs): try: unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode]) except KeyError: - raise ValueError("mode '{}' is not supported".format(mode)) from None + raise ValueError(f"mode '{mode}' is not supported") from None if unsupported_kwargs: - raise ValueError("unsupported keyword arguments for mode '{}': {}" - .format(mode, unsupported_kwargs)) + raise ValueError("unsupported keyword arguments for mode " + f"'{mode}': {unsupported_kwargs}") stat_functions = {"maximum": np.amax, "minimum": np.amin, "mean": np.mean, "median": np.median} @@ -817,8 +861,8 @@ def pad(array, pad_width, mode='constant', **kwargs): for axis, width_pair in zip(axes, pad_width): if array.shape[axis] == 0 and any(width_pair): raise ValueError( - "can't extend empty axis {} using modes other than " - "'constant' or 'empty'".format(axis) + f"can't extend empty axis {axis} using modes other than " + "'constant' or 'empty'" ) # passed, don't need to do anything more as _pad_simple already # returned the correct result @@ -839,7 +883,7 @@ def pad(array, pad_width, mode='constant', **kwargs): elif mode in stat_functions: func = stat_functions[mode] - length = kwargs.get("stat_length", None) + length = kwargs.get("stat_length") length = _as_pairs(length, padded.ndim, as_index=True) for axis, width_pair, length_pair in zip(axes, pad_width, length): roi = _view_roi(padded, original_area_slice, axis) @@ -848,7 +892,7 @@ def pad(array, pad_width, mode='constant', **kwargs): elif mode in {"reflect", "symmetric"}: method = kwargs.get("reflect_type", "even") - include_edge = True if mode == "symmetric" else False + include_edge = mode == "symmetric" for axis, (left_index, right_index) in zip(axes, pad_width): if array.shape[axis] == 1 and (left_index > 0 or right_index > 0): # Extending singleton dimension for 'reflect' is legacy @@ -865,7 +909,7 @@ def pad(array, pad_width, mode='constant', **kwargs): # the length of the original values in the current dimension. left_index, right_index = _set_reflect_both( roi, axis, (left_index, right_index), - method, include_edge + method, array.shape[axis], include_edge ) elif mode == "wrap": diff --git a/numpy/lib/_arraypad_impl.pyi b/numpy/lib/_arraypad_impl.pyi index 1ac6fc7d91c8..da7c89859d86 100644 --- a/numpy/lib/_arraypad_impl.pyi +++ b/numpy/lib/_arraypad_impl.pyi @@ -1,22 +1,11 @@ -from typing import ( - Literal as L, - Any, - overload, - TypeVar, - Protocol, -) - -from numpy import generic +from typing import Any, Literal as L, Protocol, overload, type_check_only -from numpy._typing import ( - ArrayLike, - NDArray, - _ArrayLikeInt, - _ArrayLike, -) +import numpy as np +from numpy._typing import ArrayLike, NDArray, _ArrayLike, _ArrayLikeInt -_SCT = TypeVar("_SCT", bound=generic) +__all__ = ["pad"] +@type_check_only class _ModeFunc(Protocol): def __call__( self, @@ -27,7 +16,7 @@ class _ModeFunc(Protocol): /, ) -> None: ... -_ModeKind = L[ +type _ModeKind = L[ "constant", "edge", "linear_ramp", @@ -41,45 +30,52 @@ _ModeKind = L[ "empty", ] -__all__: list[str] +type _PadWidth = ( + _ArrayLikeInt + | dict[int, int] + | dict[int, tuple[int, int]] + | dict[int, int | tuple[int, int]] +) + +### # TODO: In practice each keyword argument is exclusive to one or more # specific modes. Consider adding more overloads to express this in the future. # Expand `**kwargs` into explicit keyword-only arguments @overload -def pad( - array: _ArrayLike[_SCT], - pad_width: _ArrayLikeInt, - mode: _ModeKind = ..., +def pad[ScalarT: np.generic]( + array: _ArrayLike[ScalarT], + pad_width: _PadWidth, + mode: _ModeKind = "constant", *, - stat_length: None | _ArrayLikeInt = ..., - constant_values: ArrayLike = ..., - end_values: ArrayLike = ..., - reflect_type: L["odd", "even"] = ..., -) -> NDArray[_SCT]: ... + stat_length: _ArrayLikeInt | None = None, + constant_values: ArrayLike = 0, + end_values: ArrayLike = 0, + reflect_type: L["odd", "even"] = "even", +) -> NDArray[ScalarT]: ... @overload def pad( array: ArrayLike, - pad_width: _ArrayLikeInt, - mode: _ModeKind = ..., + pad_width: _PadWidth, + mode: _ModeKind = "constant", *, - stat_length: None | _ArrayLikeInt = ..., - constant_values: ArrayLike = ..., - end_values: ArrayLike = ..., - reflect_type: L["odd", "even"] = ..., + stat_length: _ArrayLikeInt | None = None, + constant_values: ArrayLike = 0, + end_values: ArrayLike = 0, + reflect_type: L["odd", "even"] = "even", ) -> NDArray[Any]: ... @overload -def pad( - array: _ArrayLike[_SCT], - pad_width: _ArrayLikeInt, +def pad[ScalarT: np.generic]( + array: _ArrayLike[ScalarT], + pad_width: _PadWidth, mode: _ModeFunc, **kwargs: Any, -) -> NDArray[_SCT]: ... +) -> NDArray[ScalarT]: ... @overload def pad( array: ArrayLike, - pad_width: _ArrayLikeInt, + pad_width: _PadWidth, mode: _ModeFunc, **kwargs: Any, ) -> NDArray[Any]: ... diff --git a/numpy/lib/_arraysetops_impl.py b/numpy/lib/_arraysetops_impl.py index 435904c95321..5d521b1fba60 100644 --- a/numpy/lib/_arraysetops_impl.py +++ b/numpy/lib/_arraysetops_impl.py @@ -15,20 +15,19 @@ """ import functools -import warnings from typing import NamedTuple import numpy as np from numpy._core import overrides -from numpy._core._multiarray_umath import _array_converter - +from numpy._core._multiarray_umath import _array_converter, _unique_hash +from numpy.lib.array_utils import normalize_axis_index array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') __all__ = [ - "ediff1d", "in1d", "intersect1d", "isin", "setdiff1d", "setxor1d", + "ediff1d", "intersect1d", "isin", "setdiff1d", "setxor1d", "union1d", "unique", "unique_all", "unique_counts", "unique_inverse", "unique_values" ] @@ -68,6 +67,7 @@ def ediff1d(ary, to_end=None, to_begin=None): Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 4, 7, 0]) >>> np.ediff1d(x) array([ 1, 2, 3, -7]) @@ -137,13 +137,15 @@ def _unpack_tuple(x): def _unique_dispatcher(ar, return_index=None, return_inverse=None, - return_counts=None, axis=None, *, equal_nan=None): + return_counts=None, axis=None, *, equal_nan=None, + sorted=True): return (ar,) @array_function_dispatch(_unique_dispatcher) def unique(ar, return_index=False, return_inverse=False, - return_counts=False, axis=None, *, equal_nan=True): + return_counts=False, axis=None, *, equal_nan=True, + sorted=True): """ Find the unique elements of an array. @@ -176,13 +178,18 @@ def unique(ar, return_index=False, return_inverse=False, that contain objects are not supported if the `axis` kwarg is used. The default is None. - .. versionadded:: 1.13.0 - equal_nan : bool, optional If True, collapses multiple NaN values in the return array into one. .. versionadded:: 1.24 + sorted : bool, optional + If True, the unique elements are sorted. Elements may be sorted in + practice even if ``sorted=False``, but this could change without + notice. + + .. versionadded:: 2.3 + Returns ------- unique : ndarray @@ -197,11 +204,10 @@ def unique(ar, return_index=False, return_inverse=False, The number of times each of the unique values comes up in the original array. Only provided if `return_counts` is True. - .. versionadded:: 1.9.0 - See Also -------- repeat : Repeat elements of an array. + sort : Return a sorted copy of an array. Notes ----- @@ -215,24 +221,28 @@ def unique(ar, return_index=False, return_inverse=False, flattened subarrays are sorted in lexicographic order starting with the first element. - .. versionchanged: 1.21 - If nan values are in the input array, a single nan is put - to the end of the sorted unique values. - - Also for complex arrays all NaN values are considered equivalent + .. versionchanged:: 1.21 + Like np.sort, NaN will sort to the end of the values. + For complex arrays all NaN values are considered equivalent (no matter whether the NaN is in the real or imaginary part). As the representant for the returned array the smallest one in the lexicographical order is chosen - see np.sort for how the lexicographical order is defined for complex arrays. - .. versionchanged: 2.0 + .. versionchanged:: 2.0 For multi-dimensional inputs, ``unique_inverse`` is reshaped such that the input can be reconstructed using - ``np.take(unique, unique_inverse)`` when ``axis = None``, and - ``np.take_along_axis(unique, unique_inverse, axis=axis)`` otherwise. + ``np.take(unique, unique_inverse, axis=axis)``. The result is + now not 1-dimensional when ``axis=None``. + + Note that in NumPy 2.0.0 a higher dimensional array was returned also + when ``axis`` was not ``None``. This was reverted, but + ``inverse.reshape(-1)`` can be used to ensure compatibility with both + versions. Examples -------- + >>> import numpy as np >>> np.unique([1, 1, 2, 2, 3, 3]) array([1, 2, 3]) >>> a = np.array([[1, 1], [2, 3]]) @@ -280,9 +290,12 @@ def unique(ar, return_index=False, return_inverse=False, """ ar = np.asanyarray(ar) - if axis is None: - ret = _unique1d(ar, return_index, return_inverse, return_counts, - equal_nan=equal_nan, inverse_shape=ar.shape) + if axis is None or ar.ndim == 1: + if axis is not None: + normalize_axis_index(axis, ar.ndim) + ret = _unique1d(ar, return_index, return_inverse, return_counts, + equal_nan=equal_nan, inverse_shape=ar.shape, axis=None, + sorted=sorted) return _unpack_tuple(ret) # axis was specified and not None @@ -298,7 +311,7 @@ def unique(ar, return_index=False, return_inverse=False, orig_shape, orig_dtype = ar.shape, ar.dtype ar = ar.reshape(orig_shape[0], np.prod(orig_shape[1:], dtype=np.intp)) ar = np.ascontiguousarray(ar) - dtype = [('f{i}'.format(i=i), ar.dtype) for i in range(ar.shape[1])] + dtype = [(f'f{i}', ar.dtype) for i in range(ar.shape[1])] # At this point, `ar` has shape `(n, m)`, and `dtype` is a structured # data type with `m` fields where each field has the data type of `ar`. @@ -328,20 +341,43 @@ def reshape_uniq(uniq): output = _unique1d(consolidated, return_index, return_inverse, return_counts, - equal_nan=equal_nan, inverse_shape=inverse_shape) + equal_nan=equal_nan, inverse_shape=inverse_shape, + axis=axis, sorted=sorted) output = (reshape_uniq(output[0]),) + output[1:] return _unpack_tuple(output) def _unique1d(ar, return_index=False, return_inverse=False, - return_counts=False, *, equal_nan=True, inverse_shape=None): + return_counts=False, *, equal_nan=True, inverse_shape=None, + axis=None, sorted=True): """ Find the unique elements of an array, ignoring shape. + + Uses a hash table to find the unique elements if possible. """ ar = np.asanyarray(ar).flatten() + if len(ar.shape) != 1: + # np.matrix, and maybe some other array subclasses, insist on keeping + # two dimensions for all operations. Coerce to an ndarray in such cases. + ar = np.asarray(ar).flatten() optional_indices = return_index or return_inverse + # masked arrays are not supported yet. + if not optional_indices and not return_counts and not np.ma.is_masked(ar): + # First we convert the array to a numpy array, later we wrap it back + # in case it was a subclass of numpy.ndarray. + conv = _array_converter(ar) + ar_, = conv + + if (hash_unique := _unique_hash(ar_, equal_nan=equal_nan)) \ + is not NotImplemented: + if sorted: + hash_unique.sort() + # We wrap the result back in case it was a subclass of numpy.ndarray. + return (conv.wrap(hash_unique),) + + # If we don't use the hash map, we use the slower sorting method. if optional_indices: perm = ar.argsort(kind='mergesort' if return_index else 'quicksort') aux = ar[perm] @@ -371,7 +407,7 @@ def _unique1d(ar, return_index=False, return_inverse=False, imask = np.cumsum(mask) - 1 inv_idx = np.empty(mask.shape, dtype=np.intp) inv_idx[perm] = imask - ret += (inv_idx.reshape(inverse_shape),) + ret += (inv_idx.reshape(inverse_shape) if axis is None else inv_idx,) if return_counts: idx = np.concatenate(np.nonzero(mask) + ([mask.size],)) ret += (np.diff(idx),) @@ -404,14 +440,18 @@ def _unique_all_dispatcher(x, /): @array_function_dispatch(_unique_all_dispatcher) def unique_all(x): """ - Find the unique elements of an array, and counts, inverse and indices. + Find the unique elements of an array, and counts, inverse, and indices. + + This function is an Array API compatible alternative to:: - This function is an Array API compatible alternative to: + np.unique(x, return_index=True, return_inverse=True, + return_counts=True, equal_nan=False, sorted=False) - >>> x = np.array([1, 1, 2]) - >>> np.unique(x, return_index=True, return_inverse=True, - ... return_counts=True, equal_nan=False) - (array([1, 2]), array([0, 2]), array([0, 0, 1]), array([2, 1])) + but returns a namedtuple for easier access to each output. + + .. note:: + This function currently always returns a sorted result, however, + this could change in any NumPy minor release. Parameters ---------- @@ -435,19 +475,24 @@ def unique_all(x): Examples -------- - >>> np.unique_all([1, 1, 2]) - UniqueAllResult(values=array([1, 2]), - indices=array([0, 2]), - inverse_indices=array([0, 0, 1]), - counts=array([2, 1])) - + >>> import numpy as np + >>> x = [1, 1, 2] + >>> uniq = np.unique_all(x) + >>> uniq.values + array([1, 2]) + >>> uniq.indices + array([0, 2]) + >>> uniq.inverse_indices + array([0, 0, 1]) + >>> uniq.counts + array([2, 1]) """ result = unique( x, return_index=True, return_inverse=True, return_counts=True, - equal_nan=False + equal_nan=False, ) return UniqueAllResult(*result) @@ -461,11 +506,15 @@ def unique_counts(x): """ Find the unique elements and counts of an input array `x`. - This function is an Array API compatible alternative to: + This function is an Array API compatible alternative to:: + + np.unique(x, return_counts=True, equal_nan=False, sorted=False) + + but returns a namedtuple for easier access to each output. - >>> x = np.array([1, 1, 2]) - >>> np.unique(x, return_counts=True, equal_nan=False) - (array([1, 2]), array([2, 1])) + .. note:: + This function currently always returns a sorted result, however, + this could change in any NumPy minor release. Parameters ---------- @@ -486,16 +535,20 @@ def unique_counts(x): Examples -------- - >>> np.unique_counts([1, 1, 2]) - UniqueCountsResult(values=array([1, 2]), counts=array([2, 1])) - + >>> import numpy as np + >>> x = [1, 1, 2] + >>> uniq = np.unique_counts(x) + >>> uniq.values + array([1, 2]) + >>> uniq.counts + array([2, 1]) """ result = unique( x, return_index=False, return_inverse=False, return_counts=True, - equal_nan=False + equal_nan=False, ) return UniqueCountsResult(*result) @@ -509,11 +562,15 @@ def unique_inverse(x): """ Find the unique elements of `x` and indices to reconstruct `x`. - This function is Array API compatible alternative to: + This function is an Array API compatible alternative to:: + + np.unique(x, return_inverse=True, equal_nan=False, sorted=False) - >>> x = np.array([1, 1, 2]) - >>> np.unique(x, return_inverse=True, equal_nan=False) - (array([1, 2]), array([0, 0, 1])) + but returns a namedtuple for easier access to each output. + + .. note:: + This function currently always returns a sorted result, however, + this could change in any NumPy minor release. Parameters ---------- @@ -535,16 +592,20 @@ def unique_inverse(x): Examples -------- - >>> np.unique_inverse([1, 1, 2]) - UniqueInverseResult(values=array([1, 2]), inverse_indices=array([0, 0, 1])) - + >>> import numpy as np + >>> x = [1, 1, 2] + >>> uniq = np.unique_inverse(x) + >>> uniq.values + array([1, 2]) + >>> uniq.inverse_indices + array([0, 0, 1]) """ result = unique( x, return_index=False, return_inverse=True, return_counts=False, - equal_nan=False + equal_nan=False, ) return UniqueInverseResult(*result) @@ -558,11 +619,13 @@ def unique_values(x): """ Returns the unique elements of an input array `x`. - This function is Array API compatible alternative to: + This function is an Array API compatible alternative to:: - >>> x = np.array([1, 1, 2]) - >>> np.unique(x, equal_nan=False) - array([1, 2]) + np.unique(x, equal_nan=False, sorted=False) + + .. versionchanged:: 2.3 + The algorithm was changed to a faster one that does not rely on + sorting, and hence the results are no longer implicitly sorted. Parameters ---------- @@ -580,8 +643,9 @@ def unique_values(x): Examples -------- + >>> import numpy as np >>> np.unique_values([1, 1, 2]) - array([1, 2]) + array([1, 2]) # may vary """ return unique( @@ -589,7 +653,8 @@ def unique_values(x): return_index=False, return_inverse=False, return_counts=False, - equal_nan=False + equal_nan=False, + sorted=False, ) @@ -619,8 +684,6 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): arrays are returned. The first instance of a value is used if there are multiple. Default is False. - .. versionadded:: 1.15.0 - Returns ------- intersect1d : ndarray @@ -634,6 +697,7 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): Examples -------- + >>> import numpy as np >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1]) array([1, 3]) @@ -719,6 +783,7 @@ def setxor1d(ar1, ar2, assume_unique=False): Examples -------- + >>> import numpy as np >>> a = np.array([1, 2, 3, 2, 4]) >>> b = np.array([2, 3, 5, 7, 5]) >>> np.setxor1d(a,b) @@ -738,115 +803,7 @@ def setxor1d(ar1, ar2, assume_unique=False): return aux[flag[1:] & flag[:-1]] -def _in1d_dispatcher(ar1, ar2, assume_unique=None, invert=None, *, - kind=None): - return (ar1, ar2) - - -@array_function_dispatch(_in1d_dispatcher) -def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): - """ - Test whether each element of a 1-D array is also present in a second array. - - .. deprecated:: 2.0 - Use :func:`isin` instead of `in1d` for new code. - - Returns a boolean array the same length as `ar1` that is True - where an element of `ar1` is in `ar2` and False otherwise. - - Parameters - ---------- - ar1 : (M,) array_like - Input array. - ar2 : array_like - The values against which to test each value of `ar1`. - assume_unique : bool, optional - If True, the input arrays are both assumed to be unique, which - can speed up the calculation. Default is False. - invert : bool, optional - If True, the values in the returned array are inverted (that is, - False where an element of `ar1` is in `ar2` and True otherwise). - Default is False. ``np.in1d(a, b, invert=True)`` is equivalent - to (but is faster than) ``np.invert(in1d(a, b))``. - kind : {None, 'sort', 'table'}, optional - The algorithm to use. This will not affect the final result, - but will affect the speed and memory use. The default, None, - will select automatically based on memory considerations. - - * If 'sort', will use a mergesort-based approach. This will have - a memory usage of roughly 6 times the sum of the sizes of - `ar1` and `ar2`, not accounting for size of dtypes. - * If 'table', will use a lookup table approach similar - to a counting sort. This is only available for boolean and - integer arrays. This will have a memory usage of the - size of `ar1` plus the max-min value of `ar2`. `assume_unique` - has no effect when the 'table' option is used. - * If None, will automatically choose 'table' if - the required memory allocation is less than or equal to - 6 times the sum of the sizes of `ar1` and `ar2`, - otherwise will use 'sort'. This is done to not use - a large amount of memory by default, even though - 'table' may be faster in most cases. If 'table' is chosen, - `assume_unique` will have no effect. - - .. versionadded:: 1.8.0 - - Returns - ------- - in1d : (M,) ndarray, bool - The values `ar1[in1d]` are in `ar2`. - - See Also - -------- - isin : Version of this function that preserves the - shape of ar1. - - Notes - ----- - `in1d` can be considered as an element-wise function version of the - python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly - equivalent to ``np.array([item in b for item in a])``. - However, this idea fails if `ar2` is a set, or similar (non-sequence) - container: As ``ar2`` is converted to an array, in those cases - ``asarray(ar2)`` is an object array rather than the expected array of - contained values. - - Using ``kind='table'`` tends to be faster than `kind='sort'` if the - following relationship is true: - ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``, - but may use greater memory. The default value for `kind` will - be automatically selected based only on memory usage, so one may - manually set ``kind='table'`` if memory constraints can be relaxed. - - .. versionadded:: 1.4.0 - - Examples - -------- - >>> test = np.array([0, 1, 2, 5, 0]) - >>> states = [0, 2] - >>> mask = np.in1d(test, states) - >>> mask - array([ True, False, True, False, True]) - >>> test[mask] - array([0, 2, 0]) - >>> mask = np.in1d(test, states, invert=True) - >>> mask - array([False, True, False, True, False]) - >>> test[mask] - array([1, 5]) - """ - - # Deprecated in NumPy 2.0, 2023-08-18 - warnings.warn( - "`in1d` is deprecated. Use `np.isin` instead.", - DeprecationWarning, - stacklevel=2 - ) - - return _in1d(ar1, ar2, assume_unique, invert, kind=kind) - - -def _in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): +def _isin(ar1, ar2, assume_unique=False, invert=False, *, kind=None): # Ravel both arrays, behavior for the first array could be different ar1 = np.asarray(ar1).ravel() ar2 = np.asarray(ar2).ravel() @@ -892,11 +849,11 @@ def _in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): # However, here we set the requirement that by default # the intermediate array can only be 6x # the combined memory allocation of the original - # arrays. See discussion on + # arrays. See discussion on # https://github.com/numpy/numpy/pull/12065. if ( - range_safe_from_overflow and + range_safe_from_overflow and (below_memory_constraint or kind == 'table') ): @@ -915,8 +872,25 @@ def _in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): # Mask out elements we know won't work basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min) + in_range_ar1 = ar1[basic_mask] + if in_range_ar1.size == 0: + # Nothing more to do, since all values are out of range. + return outgoing_array + + # Unfortunately, ar2_min can be out of range for `intp` even + # if the calculation result must fit in range (and be positive). + # In that case, use ar2.dtype which must work for all unmasked + # values. + try: + ar2_min = np.array(ar2_min, dtype=np.intp) + dtype = np.intp + except OverflowError: + dtype = ar2.dtype + + out = np.empty_like(in_range_ar1, dtype=np.intp) outgoing_array[basic_mask] = isin_helper_ar[ - np.subtract(ar1[basic_mask], ar2_min, dtype=np.intp)] + np.subtract(in_range_ar1, ar2_min, dtype=dtype, + out=out, casting="unsafe")] return outgoing_array elif kind == 'table': # not range_safe_from_overflow @@ -933,7 +907,6 @@ def _in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): "Please select 'sort' or None for kind." ) - # Check if one of the arrays may contain arbitrary objects contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject @@ -1036,7 +1009,6 @@ def isin(element, test_elements, assume_unique=False, invert=False, *, Notes ----- - `isin` is an element-wise function version of the python keyword `in`. ``isin(a, b)`` is roughly equivalent to ``np.array([item in b for item in a])`` if `a` and `b` are 1-D sequences. @@ -1056,10 +1028,9 @@ def isin(element, test_elements, assume_unique=False, invert=False, *, be automatically selected based only on memory usage, so one may manually set ``kind='table'`` if memory constraints can be relaxed. - .. versionadded:: 1.13.0 - Examples -------- + >>> import numpy as np >>> element = 2*np.arange(4).reshape((2, 2)) >>> element array([[0, 2], @@ -1101,7 +1072,7 @@ def isin(element, test_elements, assume_unique=False, invert=False, *, [ True, False]]) """ element = np.asarray(element) - return _in1d(element, test_elements, assume_unique=assume_unique, + return _isin(element, test_elements, assume_unique=assume_unique, invert=invert, kind=kind).reshape(element.shape) @@ -1129,6 +1100,7 @@ def union1d(ar1, ar2): Examples -------- + >>> import numpy as np >>> np.union1d([-1, 0, 1], [-2, 0, 2]) array([-2, -1, 0, 1, 2]) @@ -1171,6 +1143,7 @@ def setdiff1d(ar1, ar2, assume_unique=False): Examples -------- + >>> import numpy as np >>> a = np.array([1, 2, 3, 2, 4, 1]) >>> b = np.array([3, 4, 5, 6]) >>> np.setdiff1d(a, b) @@ -1182,4 +1155,4 @@ def setdiff1d(ar1, ar2, assume_unique=False): else: ar1 = unique(ar1) ar2 = unique(ar2) - return ar1[_in1d(ar1, ar2, assume_unique=True, invert=True)] + return ar1[_isin(ar1, ar2, assume_unique=True, invert=True)] diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index 95498248f21a..77db8c4f3620 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -1,399 +1,460 @@ -from typing import ( - Any, - Generic, - Literal as L, - NamedTuple, - overload, - SupportsIndex, - TypeVar, -) +from _typeshed import Incomplete +from typing import Any, Literal as L, NamedTuple, SupportsIndex, TypeVar, overload import numpy as np -from numpy import ( - generic, - number, - ushort, - ubyte, - uintc, - uint, - ulonglong, - short, - int8, - byte, - intc, - int_, - intp, - longlong, - half, - single, - double, - longdouble, - csingle, - cdouble, - clongdouble, - timedelta64, - datetime64, - object_, - str_, - bytes_, - void, -) - from numpy._typing import ( ArrayLike, NDArray, _ArrayLike, _ArrayLikeBool_co, - _ArrayLikeDT64_co, - _ArrayLikeTD64_co, - _ArrayLikeObject_co, _ArrayLikeNumber_co, ) -_SCT = TypeVar("_SCT", bound=generic) -_NumberType = TypeVar("_NumberType", bound=number[Any]) +__all__ = [ + "ediff1d", + "intersect1d", + "isin", + "setdiff1d", + "setxor1d", + "union1d", + "unique", + "unique_all", + "unique_counts", + "unique_inverse", + "unique_values", +] # Explicitly set all allowed values to prevent accidental castings to # abstract dtypes (their common super-type). -# # Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`) # which could result in, for example, `int64` and `float64`producing a # `number[_64Bit]` array -_SCTNoCast = TypeVar( - "_SCTNoCast", +_AnyScalarT = TypeVar( + "_AnyScalarT", np.bool, - ushort, - ubyte, - uintc, - uint, - ulonglong, - short, - byte, - intc, - int_, - longlong, - half, - single, - double, - longdouble, - csingle, - cdouble, - clongdouble, - timedelta64, - datetime64, - object_, - str_, - bytes_, - void, -) + np.int8, np.int16, np.int32, np.int64, np.intp, + np.uint8, np.uint16, np.uint32, np.uint64, np.uintp, + np.float16, np.float32, np.float64, np.longdouble, + np.complex64, np.complex128, np.clongdouble, + np.timedelta64, np.datetime64, + np.bytes_, np.str_, np.void, np.object_, + np.integer, np.floating, np.complexfloating, np.character, +) # fmt: skip + +type _NumericScalar = np.number | np.timedelta64 | np.object_ +type _IntArray = NDArray[np.intp] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] -class UniqueAllResult(NamedTuple, Generic[_SCT]): - values: NDArray[_SCT] - indices: NDArray[intp] - inverse_indices: NDArray[intp] - counts: NDArray[intp] +type _IntersectResult[ScalarT: np.generic] = tuple[_Array1D[ScalarT], _Array1D[np.intp], _Array1D[np.intp]] -class UniqueCountsResult(NamedTuple, Generic[_SCT]): - values: NDArray[_SCT] - counts: NDArray[intp] +### -class UniqueInverseResult(NamedTuple, Generic[_SCT]): - values: NDArray[_SCT] - inverse_indices: NDArray[intp] +class UniqueAllResult[ScalarT: np.generic](NamedTuple): + values: _Array1D[ScalarT] + indices: _Array1D[np.intp] + inverse_indices: _IntArray + counts: _Array1D[np.intp] -__all__: list[str] +class UniqueCountsResult[ScalarT: np.generic](NamedTuple): + values: _Array1D[ScalarT] + counts: _Array1D[np.intp] +class UniqueInverseResult[ScalarT: np.generic](NamedTuple): + values: _Array1D[ScalarT] + inverse_indices: NDArray[np.intp] + +# keep in sync with `ma.extras.ediff1d` @overload def ediff1d( ary: _ArrayLikeBool_co, - to_end: None | ArrayLike = ..., - to_begin: None | ArrayLike = ..., -) -> NDArray[int8]: ... -@overload -def ediff1d( - ary: _ArrayLike[_NumberType], - to_end: None | ArrayLike = ..., - to_begin: None | ArrayLike = ..., -) -> NDArray[_NumberType]: ... + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> _Array1D[np.int8]: ... @overload -def ediff1d( - ary: _ArrayLikeNumber_co, - to_end: None | ArrayLike = ..., - to_begin: None | ArrayLike = ..., -) -> NDArray[Any]: ... +def ediff1d[NumericT: _NumericScalar]( + ary: _ArrayLike[NumericT], + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> _Array1D[NumericT]: ... @overload def ediff1d( - ary: _ArrayLikeDT64_co | _ArrayLikeTD64_co, - to_end: None | ArrayLike = ..., - to_begin: None | ArrayLike = ..., -) -> NDArray[timedelta64]: ... + ary: _ArrayLike[np.datetime64[Any]], + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> _Array1D[np.timedelta64]: ... @overload def ediff1d( - ary: _ArrayLikeObject_co, - to_end: None | ArrayLike = ..., - to_begin: None | ArrayLike = ..., -) -> NDArray[object_]: ... + ary: _ArrayLikeNumber_co, + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> _Array1D[Incomplete]: ... -@overload -def unique( - ar: _ArrayLike[_SCT], - return_index: L[False] = ..., - return_inverse: L[False] = ..., - return_counts: L[False] = ..., - axis: None | SupportsIndex = ..., +# +@overload # known scalar-type, FFF +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], + return_index: L[False] = False, + return_inverse: L[False] = False, + return_counts: L[False] = False, + axis: SupportsIndex | None = None, *, - equal_nan: bool = ..., -) -> NDArray[_SCT]: ... -@overload + equal_nan: bool = True, + sorted: bool = True, +) -> NDArray[ScalarT]: ... +@overload # unknown scalar-type, FFF def unique( ar: ArrayLike, - return_index: L[False] = ..., - return_inverse: L[False] = ..., - return_counts: L[False] = ..., - axis: None | SupportsIndex = ..., + return_index: L[False] = False, + return_inverse: L[False] = False, + return_counts: L[False] = False, + axis: SupportsIndex | None = None, *, - equal_nan: bool = ..., -) -> NDArray[Any]: ... -@overload -def unique( - ar: _ArrayLike[_SCT], - return_index: L[True] = ..., - return_inverse: L[False] = ..., - return_counts: L[False] = ..., - axis: None | SupportsIndex = ..., + equal_nan: bool = True, + sorted: bool = True, +) -> np.ndarray: ... +@overload # known scalar-type, TFF +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], + return_index: L[True], + return_inverse: L[False] = False, + return_counts: L[False] = False, + axis: SupportsIndex | None = None, *, - equal_nan: bool = ..., -) -> tuple[NDArray[_SCT], NDArray[intp]]: ... -@overload + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[ScalarT], _IntArray]: ... +@overload # unknown scalar-type, TFF def unique( ar: ArrayLike, - return_index: L[True] = ..., - return_inverse: L[False] = ..., - return_counts: L[False] = ..., - axis: None | SupportsIndex = ..., + return_index: L[True], + return_inverse: L[False] = False, + return_counts: L[False] = False, + axis: SupportsIndex | None = None, *, - equal_nan: bool = ..., -) -> tuple[NDArray[Any], NDArray[intp]]: ... -@overload -def unique( - ar: _ArrayLike[_SCT], - return_index: L[False] = ..., - return_inverse: L[True] = ..., - return_counts: L[False] = ..., - axis: None | SupportsIndex = ..., + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[np.ndarray, _IntArray]: ... +@overload # known scalar-type, FTF (positional) +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], + return_index: L[False], + return_inverse: L[True], + return_counts: L[False] = False, + axis: SupportsIndex | None = None, *, - equal_nan: bool = ..., -) -> tuple[NDArray[_SCT], NDArray[intp]]: ... -@overload + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[ScalarT], _IntArray]: ... +@overload # known scalar-type, FTF (keyword) +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], + return_index: L[False] = False, + *, + return_inverse: L[True], + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[ScalarT], _IntArray]: ... +@overload # unknown scalar-type, FTF (positional) def unique( ar: ArrayLike, - return_index: L[False] = ..., - return_inverse: L[True] = ..., - return_counts: L[False] = ..., - axis: None | SupportsIndex = ..., + return_index: L[False], + return_inverse: L[True], + return_counts: L[False] = False, + axis: SupportsIndex | None = None, *, - equal_nan: bool = ..., -) -> tuple[NDArray[Any], NDArray[intp]]: ... -@overload + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[np.ndarray, _IntArray]: ... +@overload # unknown scalar-type, FTF (keyword) def unique( - ar: _ArrayLike[_SCT], - return_index: L[False] = ..., - return_inverse: L[False] = ..., - return_counts: L[True] = ..., - axis: None | SupportsIndex = ..., + ar: ArrayLike, + return_index: L[False] = False, *, - equal_nan: bool = ..., -) -> tuple[NDArray[_SCT], NDArray[intp]]: ... -@overload + return_inverse: L[True], + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[np.ndarray, _IntArray]: ... +@overload # known scalar-type, FFT (positional) +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], + return_index: L[False], + return_inverse: L[False], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[ScalarT], _IntArray]: ... +@overload # known scalar-type, FFT (keyword) +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], + return_index: L[False] = False, + return_inverse: L[False] = False, + *, + return_counts: L[True], + axis: SupportsIndex | None = None, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[ScalarT], _IntArray]: ... +@overload # unknown scalar-type, FFT (positional) def unique( ar: ArrayLike, - return_index: L[False] = ..., - return_inverse: L[False] = ..., - return_counts: L[True] = ..., - axis: None | SupportsIndex = ..., + return_index: L[False], + return_inverse: L[False], + return_counts: L[True], + axis: SupportsIndex | None = None, *, - equal_nan: bool = ..., -) -> tuple[NDArray[Any], NDArray[intp]]: ... -@overload + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[np.ndarray, _IntArray]: ... +@overload # unknown scalar-type, FFT (keyword) def unique( - ar: _ArrayLike[_SCT], - return_index: L[True] = ..., - return_inverse: L[True] = ..., - return_counts: L[False] = ..., - axis: None | SupportsIndex = ..., + ar: ArrayLike, + return_index: L[False] = False, + return_inverse: L[False] = False, *, - equal_nan: bool = ..., -) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ... -@overload + return_counts: L[True], + axis: SupportsIndex | None = None, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[np.ndarray, _IntArray]: ... +@overload # known scalar-type, TTF +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], + return_index: L[True], + return_inverse: L[True], + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... +@overload # unknown scalar-type, TTF def unique( ar: ArrayLike, - return_index: L[True] = ..., - return_inverse: L[True] = ..., - return_counts: L[False] = ..., - axis: None | SupportsIndex = ..., + return_index: L[True], + return_inverse: L[True], + return_counts: L[False] = False, + axis: SupportsIndex | None = None, *, - equal_nan: bool = ..., -) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ... -@overload -def unique( - ar: _ArrayLike[_SCT], - return_index: L[True] = ..., - return_inverse: L[False] = ..., - return_counts: L[True] = ..., - axis: None | SupportsIndex = ..., + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... +@overload # known scalar-type, TFT (positional) +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], + return_index: L[True], + return_inverse: L[False], + return_counts: L[True], + axis: SupportsIndex | None = None, *, - equal_nan: bool = ..., -) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ... -@overload + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... +@overload # known scalar-type, TFT (keyword) +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], + return_index: L[True], + return_inverse: L[False] = False, + *, + return_counts: L[True], + axis: SupportsIndex | None = None, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... +@overload # unknown scalar-type, TFT (positional) def unique( ar: ArrayLike, - return_index: L[True] = ..., - return_inverse: L[False] = ..., - return_counts: L[True] = ..., - axis: None | SupportsIndex = ..., + return_index: L[True], + return_inverse: L[False], + return_counts: L[True], + axis: SupportsIndex | None = None, *, - equal_nan: bool = ..., -) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ... -@overload + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... +@overload # unknown scalar-type, TFT (keyword) def unique( - ar: _ArrayLike[_SCT], - return_index: L[False] = ..., - return_inverse: L[True] = ..., - return_counts: L[True] = ..., - axis: None | SupportsIndex = ..., + ar: ArrayLike, + return_index: L[True], + return_inverse: L[False] = False, *, - equal_nan: bool = ..., -) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ... -@overload + return_counts: L[True], + axis: SupportsIndex | None = None, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... +@overload # known scalar-type, FTT (positional) +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], + return_index: L[False], + return_inverse: L[True], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... +@overload # known scalar-type, FTT (keyword) +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], + return_index: L[False] = False, + *, + return_inverse: L[True], + return_counts: L[True], + axis: SupportsIndex | None = None, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... +@overload # unknown scalar-type, FTT (positional) def unique( ar: ArrayLike, - return_index: L[False] = ..., - return_inverse: L[True] = ..., - return_counts: L[True] = ..., - axis: None | SupportsIndex = ..., + return_index: L[False], + return_inverse: L[True], + return_counts: L[True], + axis: SupportsIndex | None = None, *, - equal_nan: bool = ..., -) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ... -@overload + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... +@overload # unknown scalar-type, FTT (keyword) def unique( - ar: _ArrayLike[_SCT], - return_index: L[True] = ..., - return_inverse: L[True] = ..., - return_counts: L[True] = ..., - axis: None | SupportsIndex = ..., + ar: ArrayLike, + return_index: L[False] = False, *, - equal_nan: bool = ..., -) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp], NDArray[intp]]: ... -@overload + return_inverse: L[True], + return_counts: L[True], + axis: SupportsIndex | None = None, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... +@overload # known scalar-type, TTT +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], + return_index: L[True], + return_inverse: L[True], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray, _IntArray]: ... +@overload # unknown scalar-type, TTT def unique( ar: ArrayLike, - return_index: L[True] = ..., - return_inverse: L[True] = ..., - return_counts: L[True] = ..., - axis: None | SupportsIndex = ..., + return_index: L[True], + return_inverse: L[True], + return_counts: L[True], + axis: SupportsIndex | None = None, *, - equal_nan: bool = ..., -) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp], NDArray[intp]]: ... + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[np.ndarray, _IntArray, _IntArray, _IntArray]: ... +# @overload -def unique_all( - x: _ArrayLike[_SCT], / -) -> UniqueAllResult[_SCT]: ... +def unique_all[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> UniqueAllResult[ScalarT]: ... @overload -def unique_all( - x: ArrayLike, / -) -> UniqueAllResult[Any]: ... +def unique_all(x: ArrayLike) -> UniqueAllResult[Any]: ... +# @overload -def unique_counts( - x: _ArrayLike[_SCT], / -) -> UniqueCountsResult[_SCT]: ... +def unique_counts[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> UniqueCountsResult[ScalarT]: ... @overload -def unique_counts( - x: ArrayLike, / -) -> UniqueCountsResult[Any]: ... +def unique_counts(x: ArrayLike) -> UniqueCountsResult[Any]: ... +# @overload -def unique_inverse(x: _ArrayLike[_SCT], /) -> UniqueInverseResult[_SCT]: ... +def unique_inverse[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> UniqueInverseResult[ScalarT]: ... @overload -def unique_inverse(x: ArrayLike, /) -> UniqueInverseResult[Any]: ... +def unique_inverse(x: ArrayLike) -> UniqueInverseResult[Any]: ... +# @overload -def unique_values(x: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... +def unique_values[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> _Array1D[ScalarT]: ... @overload -def unique_values(x: ArrayLike, /) -> NDArray[Any]: ... +def unique_values(x: ArrayLike) -> _Array1D[Incomplete]: ... -@overload -def intersect1d( - ar1: _ArrayLike[_SCTNoCast], - ar2: _ArrayLike[_SCTNoCast], - assume_unique: bool = ..., - return_indices: L[False] = ..., -) -> NDArray[_SCTNoCast]: ... -@overload +# NOTE: we ignore UP047 because inlining `_AnyScalarT` would result in a lot of code duplication + +# +@overload # known scalar-type, return_indices=False (default) +def intersect1d( # noqa: UP047 + ar1: _ArrayLike[_AnyScalarT], + ar2: _ArrayLike[_AnyScalarT], + assume_unique: bool = False, + return_indices: L[False] = False, +) -> _Array1D[_AnyScalarT]: ... +@overload # known scalar-type, return_indices=True (positional) +def intersect1d( # noqa: UP047 + ar1: _ArrayLike[_AnyScalarT], + ar2: _ArrayLike[_AnyScalarT], + assume_unique: bool, + return_indices: L[True], +) -> _IntersectResult[_AnyScalarT]: ... +@overload # known scalar-type, return_indices=True (keyword) +def intersect1d( # noqa: UP047 + ar1: _ArrayLike[_AnyScalarT], + ar2: _ArrayLike[_AnyScalarT], + assume_unique: bool = False, + *, + return_indices: L[True], +) -> _IntersectResult[_AnyScalarT]: ... +@overload # unknown scalar-type, return_indices=False (default) def intersect1d( ar1: ArrayLike, ar2: ArrayLike, - assume_unique: bool = ..., - return_indices: L[False] = ..., -) -> NDArray[Any]: ... -@overload + assume_unique: bool = False, + return_indices: L[False] = False, +) -> _Array1D[Incomplete]: ... +@overload # unknown scalar-type, return_indices=True (positional) def intersect1d( - ar1: _ArrayLike[_SCTNoCast], - ar2: _ArrayLike[_SCTNoCast], - assume_unique: bool = ..., - return_indices: L[True] = ..., -) -> tuple[NDArray[_SCTNoCast], NDArray[intp], NDArray[intp]]: ... -@overload + ar1: ArrayLike, + ar2: ArrayLike, + assume_unique: bool, + return_indices: L[True], +) -> _IntersectResult[Incomplete]: ... +@overload # unknown scalar-type, return_indices=True (keyword) def intersect1d( ar1: ArrayLike, ar2: ArrayLike, - assume_unique: bool = ..., - return_indices: L[True] = ..., -) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ... + assume_unique: bool = False, + *, + return_indices: L[True], +) -> _IntersectResult[Incomplete]: ... +# @overload -def setxor1d( - ar1: _ArrayLike[_SCTNoCast], - ar2: _ArrayLike[_SCTNoCast], - assume_unique: bool = ..., -) -> NDArray[_SCTNoCast]: ... +def setxor1d( # noqa: UP047 + ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False +) -> _Array1D[_AnyScalarT]: ... @overload -def setxor1d( - ar1: ArrayLike, - ar2: ArrayLike, - assume_unique: bool = ..., -) -> NDArray[Any]: ... - -def isin( - element: ArrayLike, - test_elements: ArrayLike, - assume_unique: bool = ..., - invert: bool = ..., - *, - kind: None | str = ..., -) -> NDArray[np.bool]: ... +def setxor1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _Array1D[Incomplete]: ... +# @overload -def union1d( - ar1: _ArrayLike[_SCTNoCast], - ar2: _ArrayLike[_SCTNoCast], -) -> NDArray[_SCTNoCast]: ... +def union1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT]) -> _Array1D[_AnyScalarT]: ... # noqa: UP047 @overload -def union1d( - ar1: ArrayLike, - ar2: ArrayLike, -) -> NDArray[Any]: ... +def union1d(ar1: ArrayLike, ar2: ArrayLike) -> _Array1D[Incomplete]: ... +# @overload -def setdiff1d( - ar1: _ArrayLike[_SCTNoCast], - ar2: _ArrayLike[_SCTNoCast], - assume_unique: bool = ..., -) -> NDArray[_SCTNoCast]: ... +def setdiff1d( # noqa: UP047 + ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False +) -> _Array1D[_AnyScalarT]: ... @overload -def setdiff1d( - ar1: ArrayLike, - ar2: ArrayLike, - assume_unique: bool = ..., -) -> NDArray[Any]: ... +def setdiff1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _Array1D[Incomplete]: ... + +# +def isin( + element: ArrayLike, + test_elements: ArrayLike, + assume_unique: bool = False, + invert: bool = False, + *, + kind: L["sort", "table"] | None = None, +) -> NDArray[np.bool]: ... diff --git a/numpy/lib/_arrayterator_impl.py b/numpy/lib/_arrayterator_impl.py index 8b21a6086638..5f7c5fc4fb65 100644 --- a/numpy/lib/_arrayterator_impl.py +++ b/numpy/lib/_arrayterator_impl.py @@ -7,8 +7,8 @@ a user-specified number of elements. """ -from operator import mul from functools import reduce +from operator import mul __all__ = ['Arrayterator'] @@ -66,6 +66,7 @@ class Arrayterator: Examples -------- + >>> import numpy as np >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) >>> a_itor = np.lib.Arrayterator(a, 2) >>> a_itor.shape @@ -82,12 +83,14 @@ class Arrayterator: """ + __module__ = "numpy.lib" + def __init__(self, var, buf_size=None): self.var = var self.buf_size = buf_size self.start = [0 for dim in var.shape] - self.stop = [dim for dim in var.shape] + self.stop = list(var.shape) self.step = [1 for dim in var.shape] def __getattr__(self, attr): @@ -105,15 +108,15 @@ def __getitem__(self, index): length, dims = len(index), self.ndim for slice_ in index: if slice_ is Ellipsis: - fixed.extend([slice(None)] * (dims-length+1)) + fixed.extend([slice(None)] * (dims - length + 1)) length = len(fixed) elif isinstance(slice_, int): - fixed.append(slice(slice_, slice_+1, 1)) + fixed.append(slice(slice_, slice_ + 1, 1)) else: fixed.append(slice_) index = tuple(fixed) if len(index) < dims: - index += (slice(None),) * (dims-len(index)) + index += (slice(None),) * (dims - len(index)) # Return a new arrayterator object. out = self.__class__(self.var, self.buf_size) @@ -121,7 +124,7 @@ def __getitem__(self, index): zip(self.start, self.stop, self.step, index)): out.start[i] = start + (slice_.start or 0) out.step[i] = step * (slice_.step or 1) - out.stop[i] = start + (slice_.stop or stop-start) + out.stop[i] = start + (slice_.stop or stop - start) out.stop[i] = min(stop, out.stop[i]) return out @@ -140,7 +143,7 @@ def flat(self): A 1-D flat iterator for Arrayterator objects. This iterator returns elements of the array to be iterated over in - `~lib.Arrayterator` one by one. + `~lib.Arrayterator` one by one. It is similar to `flatiter`. See Also @@ -171,7 +174,7 @@ def shape(self): For an example, see `Arrayterator`. """ - return tuple(((stop-start-1)//step+1) for start, stop, step in + return tuple(((stop - start - 1) // step + 1) for start, stop, step in zip(self.start, self.stop, self.step)) def __iter__(self): @@ -191,20 +194,20 @@ def __iter__(self): # running dimension (ie, the dimension along which # the blocks will be built from) rundim = 0 - for i in range(ndims-1, -1, -1): + for i in range(ndims - 1, -1, -1): # if count is zero we ran out of elements to read # along higher dimensions, so we read only a single position if count == 0: - stop[i] = start[i]+1 + stop[i] = start[i] + 1 elif count <= self.shape[i]: # limit along this dimension - stop[i] = start[i] + count*step[i] + stop[i] = start[i] + count * step[i] rundim = i else: # read everything along this dimension stop[i] = self.stop[i] stop[i] = min(self.stop[i], stop[i]) - count = count//self.shape[i] + count = count // self.shape[i] # yield a block slice_ = tuple(slice(*t) for t in zip(start, stop, step)) @@ -213,9 +216,9 @@ def __iter__(self): # Update start position, taking care of overflow to # other dimensions start[rundim] = stop[rundim] # start where we stopped - for i in range(ndims-1, 0, -1): + for i in range(ndims - 1, 0, -1): if start[i] >= self.stop[i]: start[i] = self.start[i] - start[i-1] += self.step[i-1] + start[i - 1] += self.step[i - 1] if start[0] >= self.stop[0]: return diff --git a/numpy/lib/_arrayterator_impl.pyi b/numpy/lib/_arrayterator_impl.pyi index fb9c42dd2bbe..2d221f9007e9 100644 --- a/numpy/lib/_arrayterator_impl.pyi +++ b/numpy/lib/_arrayterator_impl.pyi @@ -1,48 +1,44 @@ +# pyright: reportIncompatibleMethodOverride=false + from collections.abc import Generator -from typing import ( - Any, - TypeVar, - overload, -) +from types import EllipsisType +from typing import Any, Final, overload +from typing_extensions import TypeVar -from numpy import ndarray, dtype, generic -from numpy._typing import DTypeLike, NDArray +import numpy as np +from numpy._typing import _AnyShape, _Shape -# TODO: Set a shape bound once we've got proper shape support -_Shape = TypeVar("_Shape", bound=Any) -_DType = TypeVar("_DType", bound=dtype[Any]) -_ScalarType = TypeVar("_ScalarType", bound=generic) +__all__ = ["Arrayterator"] -_Index = ( - ellipsis - | int - | slice - | tuple[ellipsis | int | slice, ...] -) +# Type parameter default syntax (PEP 696) requires Python 3.13+ +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) -__all__: list[str] +type _AnyIndex = EllipsisType | int | slice | tuple[EllipsisType | int | slice, ...] # NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`, # but its ``__getattr__` method does wrap around the former and thus has # access to all its methods -class Arrayterator(ndarray[_Shape, _DType]): - var: ndarray[_Shape, _DType] # type: ignore[assignment] - buf_size: None | int - start: list[int] - stop: list[int] - step: list[int] +class Arrayterator(np.ndarray[_ShapeT_co, _DTypeT_co]): + var: np.ndarray[_ShapeT_co, _DTypeT_co] # type: ignore[assignment] + buf_size: Final[int | None] + start: Final[list[int]] + stop: Final[list[int]] + step: Final[list[int]] @property # type: ignore[misc] - def shape(self) -> tuple[int, ...]: ... + def shape(self) -> _ShapeT_co: ... # pyrefly: ignore[bad-override] @property - def flat(self: NDArray[_ScalarType]) -> Generator[_ScalarType, None, None]: ... - def __init__( - self, var: ndarray[_Shape, _DType], buf_size: None | int = ... - ) -> None: ... + def flat[ScalarT: np.generic](self: Arrayterator[Any, np.dtype[ScalarT]]) -> Generator[ScalarT]: ... # type: ignore[override] + + # + def __init__(self, /, var: np.ndarray[_ShapeT_co, _DTypeT_co], buf_size: int | None = None) -> None: ... + def __getitem__(self, index: _AnyIndex, /) -> Arrayterator[_AnyShape, _DTypeT_co]: ... # type: ignore[override] + def __iter__(self) -> Generator[np.ndarray[_AnyShape, _DTypeT_co]]: ... # pyrefly: ignore[bad-override] + + # @overload - def __array__(self, dtype: None = ..., copy: None | bool = ...) -> ndarray[Any, _DType]: ... + def __array__(self, /, dtype: None = None, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __array__(self, dtype: DTypeLike, copy: None | bool = ...) -> NDArray[Any]: ... - def __getitem__(self, index: _Index) -> Arrayterator[Any, _DType]: ... - def __iter__(self) -> Generator[ndarray[Any, _DType], None, None]: ... + def __array__[DTypeT: np.dtype](self, /, dtype: DTypeT, copy: bool | None = None) -> np.ndarray[_ShapeT_co, DTypeT]: ... diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py index 9b455513ac89..1c9331fe553a 100644 --- a/numpy/lib/_datasource.py +++ b/numpy/lib/_datasource.py @@ -36,8 +36,7 @@ """ import os -from .._utils import set_module - +from numpy._utils import set_module _open = open @@ -57,7 +56,7 @@ def _check_mode(mode, encoding, newline): """ if "t" in mode: if "b" in mode: - raise ValueError("Invalid mode: %r" % (mode,)) + raise ValueError(f"Invalid mode: {mode!r}") else: if encoding is not None: raise ValueError("Argument 'encoding' not supported in binary mode") @@ -149,13 +148,14 @@ def __getitem__(self, key): self._load() return self._file_openers[key] + _file_openers = _FileOpeners() def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None): """ Open `path` with `mode` and return the file object. - If ``path`` is an URL, it will be downloaded, stored in the + If ``path`` is a URL, it will be downloaded, stored in the `DataSource` `destpath` directory and opened from there. Parameters @@ -271,10 +271,7 @@ def _iswritemode(self, mode): # Currently only used to test the bz2 files. _writemodes = ("w", "+") - for c in mode: - if c in _writemodes: - return True - return False + return any(c in _writemodes for c in mode) def _splitzipext(self, filename): """Split zip extension from filename and return filename. @@ -296,7 +293,7 @@ def _possible_names(self, filename): if not self._iszip(filename): for zipext in _file_openers.keys(): if zipext: - names.append(filename+zipext) + names.append(filename + zipext) return names def _isurl(self, path): @@ -343,7 +340,7 @@ def _cache(self, path): def _findfile(self, path): """Searches for ``path`` and returns full path if found. - If path is an URL, _findfile will cache a local copy and return the + If path is a URL, _findfile will cache a local copy and return the path to the cached file. If path is a local file, _findfile will return a path to that local file. @@ -375,7 +372,7 @@ def abspath(self, path): """ Return absolute path of file in the DataSource directory. - If `path` is an URL, then `abspath` will return either the location + If `path` is a URL, then `abspath` will return either the location the file exists locally or the location it would exist when opened using the `open` method. @@ -423,7 +420,7 @@ def _sanitize_relative_path(self, path): last = path # Note: os.path.join treats '/' as os.sep on Windows path = path.lstrip(os.sep).lstrip('/') - path = path.lstrip(os.pardir).lstrip('..') + path = path.lstrip(os.pardir).removeprefix('..') drive, path = os.path.splitdrive(path) # for Windows return path @@ -451,7 +448,7 @@ def exists(self, path): Notes ----- - When `path` is an URL, `exists` will return True if it's either + When `path` is a URL, `exists` will return True if it's either stored locally in the `DataSource` directory, or is a valid remote URL. `DataSource` does not discriminate between the two, the file is accessible if it exists in either location. @@ -464,8 +461,8 @@ def exists(self, path): # We import this here because importing urllib is slow and # a significant fraction of numpy's total import time. - from urllib.request import urlopen from urllib.error import URLError + from urllib.request import urlopen # Test cached url upath = self.abspath(path) @@ -477,7 +474,7 @@ def exists(self, path): try: netfile = urlopen(path) netfile.close() - del(netfile) + del netfile return True except URLError: return False @@ -487,7 +484,7 @@ def open(self, path, mode='r', encoding=None, newline=None): """ Open and return file-like object. - If `path` is an URL, it will be downloaded, stored in the + If `path` is a URL, it will be downloaded, stored in the `DataSource` directory and opened from there. Parameters @@ -597,7 +594,7 @@ def abspath(self, path): """ Return absolute path of file in the Repository directory. - If `path` is an URL, then `abspath` will return either the location + If `path` is a URL, then `abspath` will return either the location the file exists locally or the location it would exist when opened using the `open` method. @@ -642,7 +639,7 @@ def exists(self, path): Notes ----- - When `path` is an URL, `exists` will return True if it's either + When `path` is a URL, `exists` will return True if it's either stored locally in the `DataSource` directory, or is a valid remote URL. `DataSource` does not discriminate between the two, the file is accessible if it exists in either location. @@ -654,7 +651,7 @@ def open(self, path, mode='r', encoding=None, newline=None): """ Open and return file-like object prepending Repository base URL. - If `path` is an URL, it will be downloaded, stored in the + If `path` is a URL, it will be downloaded, stored in the DataSource directory and opened from there. Parameters diff --git a/numpy/lib/_datasource.pyi b/numpy/lib/_datasource.pyi new file mode 100644 index 000000000000..33af9cf1b197 --- /dev/null +++ b/numpy/lib/_datasource.pyi @@ -0,0 +1,30 @@ +from _typeshed import OpenBinaryMode, OpenTextMode +from pathlib import Path +from typing import IO, Any + +type _Mode = OpenBinaryMode | OpenTextMode + +### + +# exported in numpy.lib.nppyio +class DataSource: + def __init__(self, /, destpath: Path | str | None = ".") -> None: ... + def __del__(self, /) -> None: ... + def abspath(self, /, path: str) -> str: ... + def exists(self, /, path: str) -> bool: ... + + # Whether the file-object is opened in string or bytes mode (by default) + # depends on the file-extension of `path` + def open(self, /, path: str, mode: _Mode = "r", encoding: str | None = None, newline: str | None = None) -> IO[Any]: ... + +class Repository(DataSource): + def __init__(self, /, baseurl: str, destpath: str | None = ".") -> None: ... + def listdir(self, /) -> list[str]: ... + +def open( + path: str, + mode: _Mode = "r", + destpath: str | None = ".", + encoding: str | None = None, + newline: str | None = None, +) -> IO[Any]: ... diff --git a/numpy/lib/_format_impl.py b/numpy/lib/_format_impl.py new file mode 100644 index 000000000000..51b16ce0de48 --- /dev/null +++ b/numpy/lib/_format_impl.py @@ -0,0 +1,1038 @@ +""" +Binary serialization + +NPY format +========== + +A simple format for saving numpy arrays to disk with the full +information about them. + +The ``.npy`` format is the standard binary file format in NumPy for +persisting a *single* arbitrary NumPy array on disk. The format stores all +of the shape and dtype information necessary to reconstruct the array +correctly even on another machine with a different architecture. +The format is designed to be as simple as possible while achieving +its limited goals. + +The ``.npz`` format is the standard format for persisting *multiple* NumPy +arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy`` +files, one for each array. + +Capabilities +------------ + +- Can represent all NumPy arrays including nested record arrays and + object arrays. + +- Represents the data in its native binary form. + +- Supports Fortran-contiguous arrays directly. + +- Stores all of the necessary information to reconstruct the array + including shape and dtype on a machine of a different + architecture. Both little-endian and big-endian arrays are + supported, and a file with little-endian numbers will yield + a little-endian array on any machine reading the file. The + types are described in terms of their actual sizes. For example, + if a machine with a 64-bit C "long int" writes out an array with + "long ints", a reading machine with 32-bit C "long ints" will yield + an array with 64-bit integers. + +- Is straightforward to reverse engineer. Datasets often live longer than + the programs that created them. A competent developer should be + able to create a solution in their preferred programming language to + read most ``.npy`` files that they have been given without much + documentation. + +- Allows memory-mapping of the data. See `open_memmap`. + +- Can be read from a filelike stream object instead of an actual file. + +- Stores object arrays, i.e. arrays containing elements that are arbitrary + Python objects. Files with object arrays are not to be mmapable, but + can be read and written to disk. + +Limitations +----------- + +- Arbitrary subclasses of numpy.ndarray are not completely preserved. + Subclasses will be accepted for writing, but only the array data will + be written out. A regular numpy.ndarray object will be created + upon reading the file. + +.. warning:: + + Due to limitations in the interpretation of structured dtypes, dtypes + with fields with empty names will have the names replaced by 'f0', 'f1', + etc. Such arrays will not round-trip through the format entirely + accurately. The data is intact; only the field names will differ. We are + working on a fix for this. This fix will not require a change in the + file format. The arrays with such structures can still be saved and + restored, and the correct dtype may be restored by using the + ``loadedarray.view(correct_dtype)`` method. + +File extensions +--------------- + +We recommend using the ``.npy`` and ``.npz`` extensions for files saved +in this format. This is by no means a requirement; applications may wish +to use these file formats but use an extension specific to the +application. In the absence of an obvious alternative, however, +we suggest using ``.npy`` and ``.npz``. + +Version numbering +----------------- + +The version numbering of these formats is independent of NumPy version +numbering. If the format is upgraded, the code in `numpy.io` will still +be able to read and write Version 1.0 files. + +Format Version 1.0 +------------------ + +The first 6 bytes are a magic string: exactly ``\\x93NUMPY``. + +The next 1 byte is an unsigned byte: the major version number of the file +format, e.g. ``\\x01``. + +The next 1 byte is an unsigned byte: the minor version number of the file +format, e.g. ``\\x00``. Note: the version of the file format is not tied +to the version of the numpy package. + +The next 2 bytes form a little-endian unsigned short int: the length of +the header data HEADER_LEN. + +The next HEADER_LEN bytes form the header data describing the array's +format. It is an ASCII string which contains a Python literal expression +of a dictionary. It is terminated by a newline (``\\n``) and padded with +spaces (``\\x20``) to make the total of +``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible +by 64 for alignment purposes. + +The dictionary contains three keys: + + "descr" : dtype.descr + An object that can be passed as an argument to the `numpy.dtype` + constructor to create the array's dtype. + "fortran_order" : bool + Whether the array data is Fortran-contiguous or not. Since + Fortran-contiguous arrays are a common form of non-C-contiguity, + we allow them to be written directly to disk for efficiency. + "shape" : tuple of int + The shape of the array. + +For repeatability and readability, the dictionary keys are sorted in +alphabetic order. This is for convenience only. A writer SHOULD implement +this if possible. A reader MUST NOT depend on this. + +Following the header comes the array data. If the dtype contains Python +objects (i.e. ``dtype.hasobject is True``), then the data is a Python +pickle of the array. Otherwise the data is the contiguous (either C- +or Fortran-, depending on ``fortran_order``) bytes of the array. +Consumers can figure out the number of bytes by multiplying the number +of elements given by the shape (noting that ``shape=()`` means there is +1 element) by ``dtype.itemsize``. + +Format Version 2.0 +------------------ + +The version 1.0 format only allowed the array header to have a total size of +65535 bytes. This can be exceeded by structured arrays with a large number of +columns. The version 2.0 format extends the header size to 4 GiB. +`numpy.save` will automatically save in 2.0 format if the data requires it, +else it will always use the more compatible 1.0 format. + +The description of the fourth element of the header therefore has become: +"The next 4 bytes form a little-endian unsigned int: the length of the header +data HEADER_LEN." + +Format Version 3.0 +------------------ + +This version replaces the ASCII string (which in practice was latin1) with +a utf8-encoded string, so supports structured types with any unicode field +names. + +Notes +----- +The ``.npy`` format, including motivation for creating it and a comparison of +alternatives, is described in the +:doc:`"npy-format" NEP `, however details have +evolved with time and this document is more current. + +""" +import io +import os +import pickle +import warnings + +import numpy +from numpy._utils import set_module +from numpy.lib._utils_impl import drop_metadata + +__all__ = [] + +drop_metadata.__module__ = "numpy.lib.format" + +EXPECTED_KEYS = {'descr', 'fortran_order', 'shape'} +MAGIC_PREFIX = b'\x93NUMPY' +MAGIC_LEN = len(MAGIC_PREFIX) + 2 +ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096 +BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes +# allow growth within the address space of a 64 bit machine along one axis +GROWTH_AXIS_MAX_DIGITS = 21 # = len(str(8*2**64-1)) hypothetical int1 dtype + +# difference between version 1.0 and 2.0 is a 4 byte (I) header length +# instead of 2 bytes (H) allowing storage of large structured arrays +_header_size_info = { + (1, 0): (' 255: + raise ValueError("major version must be 0 <= major < 256") + if minor < 0 or minor > 255: + raise ValueError("minor version must be 0 <= minor < 256") + return MAGIC_PREFIX + bytes([major, minor]) + + +@set_module("numpy.lib.format") +def read_magic(fp): + """ Read the magic string to get the version of the file format. + + Parameters + ---------- + fp : filelike object + + Returns + ------- + major : int + minor : int + """ + magic_str = _read_bytes(fp, MAGIC_LEN, "magic string") + if magic_str[:-2] != MAGIC_PREFIX: + msg = "the magic string is not correct; expected %r, got %r" + raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2])) + major, minor = magic_str[-2:] + return major, minor + + +@set_module("numpy.lib.format") +def dtype_to_descr(dtype): + """ + Get a serializable descriptor from the dtype. + + The .descr attribute of a dtype object cannot be round-tripped through + the dtype() constructor. Simple types, like dtype('float32'), have + a descr which looks like a record array with one field with '' as + a name. The dtype() constructor interprets this as a request to give + a default name. Instead, we construct descriptor that can be passed to + dtype(). + + Parameters + ---------- + dtype : dtype + The dtype of the array that will be written to disk. + + Returns + ------- + descr : object + An object that can be passed to `numpy.dtype()` in order to + replicate the input dtype. + + """ + # NOTE: that drop_metadata may not return the right dtype e.g. for user + # dtypes. In that case our code below would fail the same, though. + new_dtype = drop_metadata(dtype) + if new_dtype is not dtype: + warnings.warn("metadata on a dtype is not saved to an npy/npz. " + "Use another format (such as pickle) to store it.", + UserWarning, stacklevel=2) + dtype = new_dtype + + if dtype.names is not None: + # This is a record array. The .descr is fine. XXX: parts of the + # record array with an empty name, like padding bytes, still get + # fiddled with. This needs to be fixed in the C implementation of + # dtype(). + return dtype.descr + elif not type(dtype)._legacy: + # this must be a user-defined dtype since numpy does not yet expose any + # non-legacy dtypes in the public API + # + # non-legacy dtypes don't yet have __array_interface__ + # support. Instead, as a hack, we use pickle to save the array, and lie + # that the dtype is object. When the array is loaded, the descriptor is + # unpickled with the array and the object dtype in the header is + # discarded. + # + # a future NEP should define a way to serialize user-defined + # descriptors and ideally work out the possible security implications + warnings.warn("Custom dtypes are saved as python objects using the " + "pickle protocol. Loading this file requires " + "allow_pickle=True to be set.", + UserWarning, stacklevel=2) + return "|O" + else: + return dtype.str + + +@set_module("numpy.lib.format") +def descr_to_dtype(descr): + """ + Returns a dtype based off the given description. + + This is essentially the reverse of `~lib.format.dtype_to_descr`. It will + remove the valueless padding fields created by, i.e. simple fields like + dtype('float32'), and then convert the description to its corresponding + dtype. + + Parameters + ---------- + descr : object + The object retrieved by dtype.descr. Can be passed to + `numpy.dtype` in order to replicate the input dtype. + + Returns + ------- + dtype : dtype + The dtype constructed by the description. + + """ + if isinstance(descr, str): + # No padding removal needed + return numpy.dtype(descr) + elif isinstance(descr, tuple): + # subtype, will always have a shape descr[1] + dt = descr_to_dtype(descr[0]) + return numpy.dtype((dt, descr[1])) + + titles = [] + names = [] + formats = [] + offsets = [] + offset = 0 + for field in descr: + if len(field) == 2: + name, descr_str = field + dt = descr_to_dtype(descr_str) + else: + name, descr_str, shape = field + dt = numpy.dtype((descr_to_dtype(descr_str), shape)) + + # Ignore padding bytes, which will be void bytes with '' as name + # Once support for blank names is removed, only "if name == ''" needed) + is_pad = (name == '' and dt.type is numpy.void and dt.names is None) + if not is_pad: + title, name = name if isinstance(name, tuple) else (None, name) + titles.append(title) + names.append(name) + formats.append(dt) + offsets.append(offset) + offset += dt.itemsize + + return numpy.dtype({'names': names, 'formats': formats, 'titles': titles, + 'offsets': offsets, 'itemsize': offset}) + + +@set_module("numpy.lib.format") +def header_data_from_array_1_0(array): + """ Get the dictionary of header metadata from a numpy.ndarray. + + Parameters + ---------- + array : numpy.ndarray + + Returns + ------- + d : dict + This has the appropriate entries for writing its string representation + to the header of the file. + """ + d = {'shape': array.shape} + if array.flags.c_contiguous: + d['fortran_order'] = False + elif array.flags.f_contiguous: + d['fortran_order'] = True + else: + # Totally non-contiguous data. We will have to make it C-contiguous + # before writing. Note that we need to test for C_CONTIGUOUS first + # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS. + d['fortran_order'] = False + + d['descr'] = dtype_to_descr(array.dtype) + return d + + +def _wrap_header(header, version): + """ + Takes a stringified header, and attaches the prefix and padding to it + """ + import struct + assert version is not None + fmt, encoding = _header_size_info[version] + header = header.encode(encoding) + hlen = len(header) + 1 + padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN) + try: + header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen) + except struct.error: + msg = f"Header length {hlen} too big for version={version}" + raise ValueError(msg) from None + + # Pad the header with spaces and a final newline such that the magic + # string, the header-length short and the header are aligned on a + # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes + # aligned up to ARRAY_ALIGN on systems like Linux where mmap() + # offset must be page-aligned (i.e. the beginning of the file). + return header_prefix + header + b' ' * padlen + b'\n' + + +def _wrap_header_guess_version(header): + """ + Like `_wrap_header`, but chooses an appropriate version given the contents + """ + try: + return _wrap_header(header, (1, 0)) + except ValueError: + pass + + try: + ret = _wrap_header(header, (2, 0)) + except UnicodeEncodeError: + pass + else: + warnings.warn("Stored array in format 2.0. It can only be" + "read by NumPy >= 1.9", UserWarning, stacklevel=2) + return ret + + header = _wrap_header(header, (3, 0)) + warnings.warn("Stored array in format 3.0. It can only be " + "read by NumPy >= 1.17", UserWarning, stacklevel=2) + return header + + +def _write_array_header(fp, d, version=None): + """ Write the header for an array and returns the version used + + Parameters + ---------- + fp : filelike object + d : dict + This has the appropriate entries for writing its string representation + to the header of the file. + version : tuple or None + None means use oldest that works. Providing an explicit version will + raise a ValueError if the format does not allow saving this data. + Default: None + """ + header = ["{"] + for key, value in sorted(d.items()): + # Need to use repr here, since we eval these when reading + header.append(f"'{key}': {repr(value)}, ") + header.append("}") + header = "".join(header) + + # Add some spare space so that the array header can be modified in-place + # when changing the array size, e.g. when growing it by appending data at + # the end. + shape = d['shape'] + header += " " * ((GROWTH_AXIS_MAX_DIGITS - len(repr( + shape[-1 if d['fortran_order'] else 0] + ))) if len(shape) > 0 else 0) + + if version is None: + header = _wrap_header_guess_version(header) + else: + header = _wrap_header(header, version) + fp.write(header) + + +@set_module("numpy.lib.format") +def write_array_header_1_0(fp, d): + """ Write the header for an array using the 1.0 format. + + Parameters + ---------- + fp : filelike object + d : dict + This has the appropriate entries for writing its string + representation to the header of the file. + """ + _write_array_header(fp, d, (1, 0)) + + +@set_module("numpy.lib.format") +def write_array_header_2_0(fp, d): + """ Write the header for an array using the 2.0 format. + The 2.0 format allows storing very large structured arrays. + + Parameters + ---------- + fp : filelike object + d : dict + This has the appropriate entries for writing its string + representation to the header of the file. + """ + _write_array_header(fp, d, (2, 0)) + + +@set_module("numpy.lib.format") +def read_array_header_1_0(fp, max_header_size=_MAX_HEADER_SIZE): + """ + Read an array header from a filelike object using the 1.0 file format + version. + + This will leave the file object located just after the header. + + Parameters + ---------- + fp : filelike object + A file object or something with a `.read()` method like a file. + + Returns + ------- + shape : tuple of int + The shape of the array. + fortran_order : bool + The array data will be written out directly if it is either + C-contiguous or Fortran-contiguous. Otherwise, it will be made + contiguous before writing it out. + dtype : dtype + The dtype of the file's data. + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + + Raises + ------ + ValueError + If the data is invalid. + + """ + return _read_array_header( + fp, version=(1, 0), max_header_size=max_header_size) + + +@set_module("numpy.lib.format") +def read_array_header_2_0(fp, max_header_size=_MAX_HEADER_SIZE): + """ + Read an array header from a filelike object using the 2.0 file format + version. + + This will leave the file object located just after the header. + + Parameters + ---------- + fp : filelike object + A file object or something with a `.read()` method like a file. + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + + Returns + ------- + shape : tuple of int + The shape of the array. + fortran_order : bool + The array data will be written out directly if it is either + C-contiguous or Fortran-contiguous. Otherwise, it will be made + contiguous before writing it out. + dtype : dtype + The dtype of the file's data. + + Raises + ------ + ValueError + If the data is invalid. + + """ + return _read_array_header( + fp, version=(2, 0), max_header_size=max_header_size) + + +def _filter_header(s): + """Clean up 'L' in npz header ints. + + Cleans up the 'L' in strings representing integers. Needed to allow npz + headers produced in Python2 to be read in Python3. + + Parameters + ---------- + s : string + Npy file header. + + Returns + ------- + header : str + Cleaned up header. + + """ + import tokenize + from io import StringIO + + tokens = [] + last_token_was_number = False + for token in tokenize.generate_tokens(StringIO(s).readline): + token_type = token[0] + token_string = token[1] + if (last_token_was_number and + token_type == tokenize.NAME and + token_string == "L"): + continue + else: + tokens.append(token) + last_token_was_number = (token_type == tokenize.NUMBER) + return tokenize.untokenize(tokens) + + +def _read_array_header(fp, version, max_header_size=_MAX_HEADER_SIZE): + """ + see read_array_header_1_0 + """ + # Read an unsigned, little-endian short int which has the length of the + # header. + import ast + import struct + hinfo = _header_size_info.get(version) + if hinfo is None: + raise ValueError(f"Invalid version {version!r}") + hlength_type, encoding = hinfo + + hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length") + header_length = struct.unpack(hlength_type, hlength_str)[0] + header = _read_bytes(fp, header_length, "array header") + header = header.decode(encoding) + if len(header) > max_header_size: + raise ValueError( + f"Header info length ({len(header)}) is large and may not be safe " + "to load securely.\n" + "To allow loading, adjust `max_header_size` or fully trust " + "the `.npy` file using `allow_pickle=True`.\n" + "For safety against large resource use or crashes, sandboxing " + "may be necessary.") + + # The header is a pretty-printed string representation of a literal + # Python dictionary with trailing newlines padded to an ARRAY_ALIGN byte + # boundary. The keys are strings. + # "shape" : tuple of int + # "fortran_order" : bool + # "descr" : dtype.descr + # Versions (2, 0) and (1, 0) could have been created by a Python 2 + # implementation before header filtering was implemented. + # + # For performance reasons, we try without _filter_header first though + try: + d = ast.literal_eval(header) + except SyntaxError as e: + if version <= (2, 0): + header = _filter_header(header) + try: + d = ast.literal_eval(header) + except SyntaxError as e2: + msg = "Cannot parse header: {!r}" + raise ValueError(msg.format(header)) from e2 + else: + warnings.warn( + "Reading `.npy` or `.npz` file required additional " + "header parsing as it was created on Python 2. Save the " + "file again to speed up loading and avoid this warning.", + UserWarning, stacklevel=4) + else: + msg = "Cannot parse header: {!r}" + raise ValueError(msg.format(header)) from e + if not isinstance(d, dict): + msg = "Header is not a dictionary: {!r}" + raise ValueError(msg.format(d)) + + if EXPECTED_KEYS != d.keys(): + keys = sorted(d.keys()) + msg = "Header does not contain the correct keys: {!r}" + raise ValueError(msg.format(keys)) + + # Sanity-check the values. + if (not isinstance(d['shape'], tuple) or + not all(isinstance(x, int) for x in d['shape'])): + msg = "shape is not valid: {!r}" + raise ValueError(msg.format(d['shape'])) + if not isinstance(d['fortran_order'], bool): + msg = "fortran_order is not a valid bool: {!r}" + raise ValueError(msg.format(d['fortran_order'])) + try: + dtype = descr_to_dtype(d['descr']) + except TypeError as e: + msg = "descr is not a valid dtype descriptor: {!r}" + raise ValueError(msg.format(d['descr'])) from e + + return d['shape'], d['fortran_order'], dtype + + +@set_module("numpy.lib.format") +def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None): + """ + Write an array to an NPY file, including a header. + + If the array is neither C-contiguous nor Fortran-contiguous AND the + file_like object is not a real file object, this function will have to + copy data in memory. + + Parameters + ---------- + fp : file_like object + An open, writable file object, or similar object with a + ``.write()`` method. + array : ndarray + The array to write to disk. + version : (int, int) or None, optional + The version number of the format. None means use the oldest + supported version that is able to store the data. Default: None + allow_pickle : bool, optional + Whether to allow writing pickled data. Default: True + pickle_kwargs : dict, optional + Additional keyword arguments to pass to pickle.dump, excluding + 'protocol'. These are only useful when pickling objects in object + arrays to Python 2 compatible format. + + Raises + ------ + ValueError + If the array cannot be persisted. This includes the case of + allow_pickle=False and array being an object array. + Various other errors + If the array contains Python objects as part of its dtype, the + process of pickling them may raise various errors if the objects + are not picklable. + + """ + _check_version(version) + _write_array_header(fp, header_data_from_array_1_0(array), version) + + if array.itemsize == 0: + buffersize = 0 + else: + # Set buffer size to 16 MiB to hide the Python loop overhead. + buffersize = max(16 * 1024 ** 2 // array.itemsize, 1) + + dtype_class = type(array.dtype) + + if array.dtype.hasobject or not dtype_class._legacy: + # We contain Python objects so we cannot write out the data + # directly. Instead, we will pickle it out + if not allow_pickle: + if array.dtype.hasobject: + raise ValueError("Object arrays cannot be saved when " + "allow_pickle=False") + if not dtype_class._legacy: + raise ValueError("User-defined dtypes cannot be saved " + "when allow_pickle=False") + if pickle_kwargs is None: + pickle_kwargs = {} + pickle.dump(array, fp, protocol=4, **pickle_kwargs) + elif array.flags.f_contiguous and not array.flags.c_contiguous: + if isfileobj(fp): + array.T.tofile(fp) + else: + for chunk in numpy.nditer( + array, flags=['external_loop', 'buffered', 'zerosize_ok'], + buffersize=buffersize, order='F'): + fp.write(chunk.tobytes('C')) + elif isfileobj(fp): + array.tofile(fp) + else: + for chunk in numpy.nditer( + array, flags=['external_loop', 'buffered', 'zerosize_ok'], + buffersize=buffersize, order='C'): + fp.write(chunk.tobytes('C')) + + +@set_module("numpy.lib.format") +def read_array(fp, allow_pickle=False, pickle_kwargs=None, *, + max_header_size=_MAX_HEADER_SIZE): + """ + Read an array from an NPY file. + + Parameters + ---------- + fp : file_like object + If this is not a real file object, then this may take extra memory + and time. + allow_pickle : bool, optional + Whether to allow writing pickled data. Default: False + pickle_kwargs : dict + Additional keyword arguments to pass to pickle.load. These are only + useful when loading object arrays saved on Python 2. + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + This option is ignored when `allow_pickle` is passed. In that case + the file is by definition trusted and the limit is unnecessary. + + Returns + ------- + array : ndarray + The array from the data on disk. + + Raises + ------ + ValueError + If the data is invalid, or allow_pickle=False and the file contains + an object array. + + """ + if allow_pickle: + # Effectively ignore max_header_size, since `allow_pickle` indicates + # that the input is fully trusted. + max_header_size = 2**64 + + version = read_magic(fp) + _check_version(version) + shape, fortran_order, dtype = _read_array_header( + fp, version, max_header_size=max_header_size) + if len(shape) == 0: + count = 1 + else: + count = numpy.multiply.reduce(shape, dtype=numpy.int64) + + # Now read the actual data. + if dtype.hasobject: + # The array contained Python objects. We need to unpickle the data. + if not allow_pickle: + raise ValueError("Object arrays cannot be loaded when " + "allow_pickle=False") + if pickle_kwargs is None: + pickle_kwargs = {} + try: + array = pickle.load(fp, **pickle_kwargs) + except UnicodeError as err: + # Friendlier error message + raise UnicodeError( + f"Unpickling a python object failed: {err!r}\n" + "You may need to pass the encoding= option " + "to numpy.load" + ) from err + else: + if isfileobj(fp): + # We can use the fast fromfile() function. + array = numpy.fromfile(fp, dtype=dtype, count=count) + else: + # This is not a real file. We have to read it the + # memory-intensive way. + # crc32 module fails on reads greater than 2 ** 32 bytes, + # breaking large reads from gzip streams. Chunk reads to + # BUFFER_SIZE bytes to avoid issue and reduce memory overhead + # of the read. In non-chunked case count < max_read_count, so + # only one read is performed. + + # Use np.ndarray instead of np.empty since the latter does + # not correctly instantiate zero-width string dtypes; see + # https://github.com/numpy/numpy/pull/6430 + array = numpy.ndarray(count, dtype=dtype) + + if dtype.itemsize > 0: + # If dtype.itemsize == 0 then there's nothing more to read + max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize) + + for i in range(0, count, max_read_count): + read_count = min(max_read_count, count - i) + read_size = int(read_count * dtype.itemsize) + data = _read_bytes(fp, read_size, "array data") + array[i:i + read_count] = numpy.frombuffer(data, dtype=dtype, + count=read_count) + + if array.size != count: + raise ValueError( + "Failed to read all data for array. " + f"Expected {shape} = {count} elements, " + f"could only read {array.size} elements. " + "(file seems not fully written?)" + ) + + if fortran_order: + array = array.reshape(shape[::-1]) + array = array.transpose() + else: + array = array.reshape(shape) + + return array + + +@set_module("numpy.lib.format") +def open_memmap(filename, mode='r+', dtype=None, shape=None, + fortran_order=False, version=None, *, + max_header_size=_MAX_HEADER_SIZE): + """ + Open a .npy file as a memory-mapped array. + + This may be used to read an existing file or create a new one. + + Parameters + ---------- + filename : str or path-like + The name of the file on disk. This may *not* be a file-like + object. + mode : str, optional + The mode in which to open the file; the default is 'r+'. In + addition to the standard file modes, 'c' is also accepted to mean + "copy on write." See `memmap` for the available mode strings. + dtype : data-type, optional + The data type of the array if we are creating a new file in "write" + mode, if not, `dtype` is ignored. The default value is None, which + results in a data-type of `float64`. + shape : tuple of int + The shape of the array if we are creating a new file in "write" + mode, in which case this parameter is required. Otherwise, this + parameter is ignored and is thus optional. + fortran_order : bool, optional + Whether the array should be Fortran-contiguous (True) or + C-contiguous (False, the default) if we are creating a new file in + "write" mode. + version : tuple of int (major, minor) or None + If the mode is a "write" mode, then this is the version of the file + format used to create the file. None means use the oldest + supported version that is able to store the data. Default: None + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + + Returns + ------- + marray : memmap + The memory-mapped array. + + Raises + ------ + ValueError + If the data or the mode is invalid. + OSError + If the file is not found or cannot be opened correctly. + + See Also + -------- + numpy.memmap + + """ + if isfileobj(filename): + raise ValueError("Filename must be a string or a path-like object." + " Memmap cannot use existing file handles.") + + if 'w' in mode: + # We are creating the file, not reading it. + # Check if we ought to create the file. + _check_version(version) + # Ensure that the given dtype is an authentic dtype object rather + # than just something that can be interpreted as a dtype object. + dtype = numpy.dtype(dtype) + if dtype.hasobject: + msg = "Array can't be memory-mapped: Python objects in dtype." + raise ValueError(msg) + d = { + "descr": dtype_to_descr(dtype), + "fortran_order": fortran_order, + "shape": shape, + } + # If we got here, then it should be safe to create the file. + with open(os.fspath(filename), mode + 'b') as fp: + _write_array_header(fp, d, version) + offset = fp.tell() + else: + # Read the header of the file first. + with open(os.fspath(filename), 'rb') as fp: + version = read_magic(fp) + _check_version(version) + + shape, fortran_order, dtype = _read_array_header( + fp, version, max_header_size=max_header_size) + if dtype.hasobject: + msg = "Array can't be memory-mapped: Python objects in dtype." + raise ValueError(msg) + offset = fp.tell() + + if fortran_order: + order = 'F' + else: + order = 'C' + + # We need to change a write-only mode to a read-write mode since we've + # already written data to the file. + if mode == 'w+': + mode = 'r+' + + marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order, + mode=mode, offset=offset) + + return marray + + +def _read_bytes(fp, size, error_template="ran out of data"): + """ + Read from file-like object until size bytes are read. + Raises ValueError if not EOF is encountered before size bytes are read. + Non-blocking objects only supported if they derive from io objects. + + Required as e.g. ZipExtFile in python 2.6 can return less data than + requested. + """ + data = b"" + while True: + # io files (default in python3) return None or raise on + # would-block, python2 file will truncate, probably nothing can be + # done about that. note that regular files can't be non-blocking + try: + r = fp.read(size - len(data)) + data += r + if len(r) == 0 or len(data) == size: + break + except BlockingIOError: + pass + if len(data) != size: + msg = "EOF: reading %s, expected %d bytes got %d" + raise ValueError(msg % (error_template, size, len(data))) + else: + return data + + +@set_module("numpy.lib.format") +def isfileobj(f): + if not isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)): + return False + try: + # BufferedReader/Writer may raise OSError when + # fetching `fileno()` (e.g. when wrapping BytesIO). + f.fileno() + return True + except OSError: + return False diff --git a/numpy/lib/_format_impl.pyi b/numpy/lib/_format_impl.pyi new file mode 100644 index 000000000000..f8b9a7ab88a9 --- /dev/null +++ b/numpy/lib/_format_impl.pyi @@ -0,0 +1,56 @@ +import os +from _typeshed import SupportsRead, SupportsWrite +from typing import Any, BinaryIO, Final, TypeGuard + +import numpy as np +import numpy.typing as npt +from numpy.lib._utils_impl import drop_metadata as drop_metadata + +__all__: list[str] = [] + +type _DTypeDescr = list[tuple[str, str]] | list[tuple[str, str, tuple[int, ...]]] + +### + +EXPECTED_KEYS: Final[set[str]] = ... +MAGIC_PREFIX: Final = b"\x93NUMPY" +MAGIC_LEN: Final = 8 +ARRAY_ALIGN: Final = 64 +BUFFER_SIZE: Final = 262_144 # 1 << 18 +GROWTH_AXIS_MAX_DIGITS: Final = 21 +_MAX_HEADER_SIZE: Final = 10_000 + +def magic(major: int, minor: int) -> bytes: ... +def read_magic(fp: SupportsRead[bytes]) -> tuple[int, int]: ... +def dtype_to_descr(dtype: np.dtype) -> _DTypeDescr: ... +def descr_to_dtype(descr: _DTypeDescr) -> np.dtype: ... +def header_data_from_array_1_0(array: np.ndarray) -> dict[str, Any]: ... +def write_array_header_1_0(fp: SupportsWrite[bytes], d: dict[str, Any]) -> None: ... +def write_array_header_2_0(fp: SupportsWrite[bytes], d: dict[str, Any]) -> None: ... +def read_array_header_1_0(fp: SupportsRead[bytes], max_header_size: int = 10_000) -> tuple[tuple[int, ...], bool, np.dtype]: ... +def read_array_header_2_0(fp: SupportsRead[bytes], max_header_size: int = 10_000) -> tuple[tuple[int, ...], bool, np.dtype]: ... +def write_array( + fp: SupportsWrite[bytes], + array: np.ndarray, + version: tuple[int, int] | None = None, + allow_pickle: bool = True, + pickle_kwargs: dict[str, Any] | None = None, +) -> None: ... +def read_array( + fp: SupportsRead[bytes], + allow_pickle: bool = False, + pickle_kwargs: dict[str, Any] | None = None, + *, + max_header_size: int = 10_000, +) -> np.ndarray: ... +def open_memmap( + filename: str | os.PathLike[Any], + mode: str = "r+", + dtype: npt.DTypeLike | None = None, + shape: tuple[int, ...] | None = None, + fortran_order: bool = False, + version: tuple[int, int] | None = None, + *, + max_header_size: int = 10_000, +) -> np.memmap: ... +def isfileobj(f: object) -> TypeGuard[BinaryIO]: ... # don't use `typing.TypeIs` diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 5356f1cc31c3..69d1ed8a7c87 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -2,35 +2,61 @@ import collections.abc import functools import re -import sys import warnings import numpy as np import numpy._core.numeric as _nx -from numpy._core import transpose, overrides +from numpy._core import overrides, transpose +from numpy._core._multiarray_umath import _array_converter +from numpy._core.fromnumeric import any, mean, nonzero, partition, ravel, sum +from numpy._core.multiarray import ( + _monotonicity, + _place, + bincount, + interp as compiled_interp, + interp_complex as compiled_interp_complex, + normalize_axis_index, +) from numpy._core.numeric import ( - ones, zeros_like, arange, concatenate, array, asarray, asanyarray, empty, - ndarray, take, dot, where, intp, integer, isscalar, absolute - ) -from numpy._core.umath import ( - pi, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin, - mod, exp, not_equal, subtract, minimum - ) -from numpy._core.fromnumeric import ( - ravel, nonzero, partition, mean, any, sum - ) + absolute, + arange, + array, + asanyarray, + asarray, + concatenate, + dot, + empty, + integer, + intp, + isscalar, + ndarray, + ones, + take, + where, + zeros_like, +) from numpy._core.numerictypes import typecodes -from numpy.lib._twodim_base_impl import diag -from numpy._core.multiarray import ( - _place, bincount, normalize_axis_index, _monotonicity, - interp as compiled_interp, interp_complex as compiled_interp_complex - ) -from numpy._core._multiarray_umath import _array_converter +from numpy._core.umath import ( + add, + arctan2, + cos, + exp, + floor, + frompyfunc, + less_equal, + minimum, + mod, + not_equal, + pi, + sin, + sqrt, + subtract, +) from numpy._utils import set_module # needed in this module for compatibility from numpy.lib._histograms_impl import histogram, histogramdd # noqa: F401 - +from numpy.lib._twodim_base_impl import diag array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') @@ -42,7 +68,7 @@ 'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', 'bincount', 'digitize', 'cov', 'corrcoef', 'median', 'sinc', 'hamming', 'hanning', 'bartlett', - 'blackman', 'kaiser', 'trapezoid', 'trapz', 'i0', + 'blackman', 'kaiser', 'trapezoid', 'i0', 'meshgrid', 'delete', 'insert', 'append', 'interp', 'quantile' ] @@ -55,95 +81,94 @@ # When the sample contains exactly the percentile wanted, the virtual_index is # an integer to the index of this element. # When the percentile wanted is in between two elements, the virtual_index -# is made of a integer part (a.k.a 'i' or 'left') and a fractional part +# is made of an integer part (a.k.a 'i' or 'left') and a fractional part # (a.k.a 'g' or 'gamma') # # Each method in _QuantileMethods has two properties # get_virtual_index : Callable # The function used to compute the virtual_index. # fix_gamma : Callable -# A function used for discret methods to force the index to a specific value. -_QuantileMethods = dict( +# A function used for discrete methods to force the index to a specific value. +_QuantileMethods = { # --- HYNDMAN and FAN METHODS # Discrete methods - inverted_cdf=dict( - get_virtual_index=lambda n, quantiles: _inverted_cdf(n, quantiles), - fix_gamma=None, # should never be called - ), - averaged_inverted_cdf=dict( - get_virtual_index=lambda n, quantiles: (n * quantiles) - 1, - fix_gamma=lambda gamma, _: _get_gamma_mask( + 'inverted_cdf': { + 'get_virtual_index': lambda n, quantiles: _inverted_cdf(n, quantiles), + 'fix_gamma': None, # should never be called + }, + 'averaged_inverted_cdf': { + 'get_virtual_index': lambda n, quantiles: (n * quantiles) - 1, + 'fix_gamma': lambda gamma, _: _get_gamma_mask( shape=gamma.shape, default_value=1., conditioned_value=0.5, where=gamma == 0), - ), - closest_observation=dict( - get_virtual_index=lambda n, quantiles: _closest_observation(n, - quantiles), - fix_gamma=None, # should never be called - ), + }, + 'closest_observation': { + 'get_virtual_index': lambda n, quantiles: _closest_observation(n, quantiles), + 'fix_gamma': None, # should never be called + }, # Continuous methods - interpolated_inverted_cdf=dict( - get_virtual_index=lambda n, quantiles: + 'interpolated_inverted_cdf': { + 'get_virtual_index': lambda n, quantiles: _compute_virtual_index(n, quantiles, 0, 1), - fix_gamma=lambda gamma, _: gamma, - ), - hazen=dict( - get_virtual_index=lambda n, quantiles: + 'fix_gamma': lambda gamma, _: gamma, + }, + 'hazen': { + 'get_virtual_index': lambda n, quantiles: _compute_virtual_index(n, quantiles, 0.5, 0.5), - fix_gamma=lambda gamma, _: gamma, - ), - weibull=dict( - get_virtual_index=lambda n, quantiles: + 'fix_gamma': lambda gamma, _: gamma, + }, + 'weibull': { + 'get_virtual_index': lambda n, quantiles: _compute_virtual_index(n, quantiles, 0, 0), - fix_gamma=lambda gamma, _: gamma, - ), + 'fix_gamma': lambda gamma, _: gamma, + }, # Default method. # To avoid some rounding issues, `(n-1) * quantiles` is preferred to # `_compute_virtual_index(n, quantiles, 1, 1)`. # They are mathematically equivalent. - linear=dict( - get_virtual_index=lambda n, quantiles: (n - 1) * quantiles, - fix_gamma=lambda gamma, _: gamma, - ), - median_unbiased=dict( - get_virtual_index=lambda n, quantiles: + 'linear': { + 'get_virtual_index': lambda n, quantiles: (n - 1) * quantiles, + 'fix_gamma': lambda gamma, _: gamma, + }, + 'median_unbiased': { + 'get_virtual_index': lambda n, quantiles: _compute_virtual_index(n, quantiles, 1 / 3.0, 1 / 3.0), - fix_gamma=lambda gamma, _: gamma, - ), - normal_unbiased=dict( - get_virtual_index=lambda n, quantiles: + 'fix_gamma': lambda gamma, _: gamma, + }, + 'normal_unbiased': { + 'get_virtual_index': lambda n, quantiles: _compute_virtual_index(n, quantiles, 3 / 8.0, 3 / 8.0), - fix_gamma=lambda gamma, _: gamma, - ), + 'fix_gamma': lambda gamma, _: gamma, + }, # --- OTHER METHODS - lower=dict( - get_virtual_index=lambda n, quantiles: np.floor( + 'lower': { + 'get_virtual_index': lambda n, quantiles: np.floor( (n - 1) * quantiles).astype(np.intp), - fix_gamma=None, # should never be called, index dtype is int - ), - higher=dict( - get_virtual_index=lambda n, quantiles: np.ceil( + 'fix_gamma': None, # should never be called, index dtype is int + }, + 'higher': { + 'get_virtual_index': lambda n, quantiles: np.ceil( (n - 1) * quantiles).astype(np.intp), - fix_gamma=None, # should never be called, index dtype is int - ), - midpoint=dict( - get_virtual_index=lambda n, quantiles: 0.5 * ( + 'fix_gamma': None, # should never be called, index dtype is int + }, + 'midpoint': { + 'get_virtual_index': lambda n, quantiles: 0.5 * ( np.floor((n - 1) * quantiles) + np.ceil((n - 1) * quantiles)), - fix_gamma=lambda gamma, index: _get_gamma_mask( + 'fix_gamma': lambda gamma, index: _get_gamma_mask( shape=gamma.shape, default_value=0.5, conditioned_value=0., where=index % 1 == 0), - ), - nearest=dict( - get_virtual_index=lambda n, quantiles: np.around( + }, + 'nearest': { + 'get_virtual_index': lambda n, quantiles: np.around( (n - 1) * quantiles).astype(np.intp), - fix_gamma=None, + 'fix_gamma': None, # should never be called, index dtype is int - )) + }} def _rot90_dispatcher(m, k=None, axes=None): @@ -169,8 +194,6 @@ def rot90(m, k=1, axes=(0, 1)): The array is rotated in the plane defined by the axes. Axes must be different. - .. versionadded:: 1.12.0 - Returns ------- y : ndarray @@ -192,6 +215,7 @@ def rot90(m, k=1, axes=(0, 1)): Examples -------- + >>> import numpy as np >>> m = np.array([[1,2],[3,4]], int) >>> m array([[1, 2], @@ -221,8 +245,7 @@ def rot90(m, k=1, axes=(0, 1)): if (axes[0] >= m.ndim or axes[0] < -m.ndim or axes[1] >= m.ndim or axes[1] < -m.ndim): - raise ValueError("Axes={} out of range for array of ndim={}." - .format(axes, m.ndim)) + raise ValueError(f"Axes={axes} out of range for array of ndim={m.ndim}.") k %= 4 @@ -253,8 +276,6 @@ def flip(m, axis=None): The shape of the array is preserved, but the elements are reordered. - .. versionadded:: 1.12.0 - Parameters ---------- m : array_like @@ -267,9 +288,6 @@ def flip(m, axis=None): If axis is a tuple of ints, flipping is performed on all of the axes specified in the tuple. - .. versionchanged:: 1.15.0 - None and tuples of axes are supported - Returns ------- out : array_like @@ -297,6 +315,7 @@ def flip(m, axis=None): Examples -------- + >>> import numpy as np >>> A = np.arange(8).reshape((2,2,2)) >>> A array([[[0, 1], @@ -360,6 +379,7 @@ def iterable(y): Examples -------- + >>> import numpy as np >>> np.iterable([1, 2, 3]) True >>> np.iterable(2) @@ -388,7 +408,7 @@ def iterable(y): def _weights_are_valid(weights, a, axis): """Validate weights array. - + We assume, weights is not None. """ wgt = np.asanyarray(weights) @@ -431,9 +451,6 @@ def average(a, axis=None, weights=None, returned=False, *, Axis or axes along which to average `a`. The default, `axis=None`, will average over all of the elements of the input array. If axis is negative it counts from the last to the first axis. - - .. versionadded:: 1.7.0 - If axis is a tuple of ints, averaging is performed on all of the axes specified in the tuple instead of a single axis or all the axes as before. @@ -448,7 +465,7 @@ def average(a, axis=None, weights=None, returned=False, *, The calculation is:: avg = sum(a * weights) / sum(weights) - + where the sum is over all included elements. The only constraint on the values of `weights` is that `sum(weights)` must not be 0. @@ -502,6 +519,7 @@ def average(a, axis=None, weights=None, returned=False, *, Examples -------- + >>> import numpy as np >>> data = np.arange(1, 5) >>> data array([1, 2, 3, 4]) @@ -557,7 +575,7 @@ def average(a, axis=None, weights=None, returned=False, *, if weights is None: avg = a.mean(axis, **keepdims_kw) avg_as_array = np.asanyarray(avg) - scl = avg_as_array.dtype.type(a.size/avg_as_array.size) + scl = avg_as_array.dtype.type(a.size / avg_as_array.size) else: wgt = _weights_are_valid(weights=weights, a=a, axis=axis) @@ -576,7 +594,7 @@ def average(a, axis=None, weights=None, returned=False, *, if returned: if scl.shape != avg_as_array.shape: - scl = np.broadcast_to(scl, avg_as_array.shape).copy() + scl = np.broadcast_to(scl, avg_as_array.shape, subok=True).copy() return avg, scl else: return avg @@ -595,12 +613,15 @@ def asarray_chkfinite(a, dtype=None, order=None): dtype : data-type, optional By default, the data-type is inferred from the input data. order : {'C', 'F', 'A', 'K'}, optional - Memory layout. 'A' and 'K' depend on the order of input array a. - 'C' row-major (C-style), - 'F' column-major (Fortran-style) memory representation. - 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise - 'K' (keep) preserve input order - Defaults to 'C'. + The memory layout of the output. + 'C' gives a row-major layout (C-style), + 'F' gives a column-major layout (Fortran-style). + 'C' and 'F' will copy if needed to ensure the output format. + 'A' (any) is equivalent to 'F' if input a is non-contiguous or + Fortran-contiguous, otherwise, it is equivalent to 'C'. + Unlike 'C' or 'F', 'A' does not ensure that the result is contiguous. + 'K' (keep) preserves the input order for the output. + 'C' is the default. Returns ------- @@ -627,11 +648,13 @@ class ndarray is returned. Examples -------- - Convert a list into an array. If all elements are finite + >>> import numpy as np + + Convert a list into an array. If all elements are finite, then ``asarray_chkfinite`` is identical to ``asarray``. >>> a = [1, 2] - >>> np.asarray_chkfinite(a, dtype=float) + >>> np.asarray_chkfinite(a, dtype=np.float64) array([1., 2.]) Raises ValueError if array_like contains Nans or Infs. @@ -684,7 +707,7 @@ def piecewise(x, condlist, funclist, *args, **kw): is the default value, used wherever all conditions are false. funclist : list of callables, f(x,*args,**kw), or scalars Each function is evaluated over `x` wherever its corresponding - condition is True. It should take a 1d array as input and give an 1d + condition is True. It should take a 1d array as input and give a 1d array or a scalar value as output. If, instead of a callable, a scalar is provided then a constant function (``lambda x: scalar``) is assumed. @@ -728,6 +751,8 @@ def piecewise(x, condlist, funclist, *args, **kw): Examples -------- + >>> import numpy as np + Define the signum function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. >>> x = np.linspace(-2.5, 2.5, 6) @@ -764,8 +789,7 @@ def piecewise(x, condlist, funclist, *args, **kw): n += 1 elif n != n2: raise ValueError( - "with {} condition(s), either {} or {} functions are expected" - .format(n, n, n+1) + f"with {n} condition(s), either {n} or {n + 1} functions are expected" ) y = zeros_like(x) @@ -799,7 +823,7 @@ def select(condlist, choicelist, default=0): choicelist : list of ndarrays The list of arrays from which the output elements are taken. It has to be of the same length as `condlist`. - default : scalar, optional + default : array_like, optional The element inserted in `output` when all conditions evaluate to False. Returns @@ -816,6 +840,8 @@ def select(condlist, choicelist, default=0): Examples -------- + >>> import numpy as np + Beginning with an array of integers from 0 to 5 (inclusive), elements less than ``3`` are negated, elements greater than ``3`` are squared, and elements not meeting either of these conditions @@ -823,9 +849,9 @@ def select(condlist, choicelist, default=0): >>> x = np.arange(6) >>> condlist = [x<3, x>3] - >>> choicelist = [x, x**2] + >>> choicelist = [-x, x**2] >>> np.select(condlist, choicelist, 42) - array([ 0, 1, 2, 42, 16, 25]) + array([ 0, -1, -2, 42, 16, 25]) When multiple conditions are satisfied, the first one encountered in `condlist` is used. @@ -870,7 +896,7 @@ def select(condlist, choicelist, default=0): for i, cond in enumerate(condlist): if cond.dtype.type is not np.bool: raise TypeError( - 'invalid entry {} in condlist: should be boolean ndarray'.format(i)) + f'invalid entry {i} in condlist: should be boolean ndarray') if choicelist[0].ndim == 0: # This may be common, so avoid the call. @@ -915,8 +941,6 @@ def copy(a, order='K', subok=False): If True, then sub-classes will be passed-through, otherwise the returned array will be forced to be a base-class array (defaults to False). - .. versionadded:: 1.19.0 - Returns ------- arr : ndarray @@ -938,6 +962,8 @@ def copy(a, order='K', subok=False): Examples -------- + >>> import numpy as np + Create an array x, with a reference y and a copy z: >>> x = np.array([1, 2, 3]) @@ -991,7 +1017,7 @@ def gradient(f, *varargs, axis=None, edge_order=1): Spacing between f values. Default unitary spacing for all dimensions. Spacing can be specified using: - 1. single scalar to specify a sample distance for all dimensions. + 1. Single scalar to specify a sample distance for all dimensions. 2. N scalars to specify a constant sample distance for each dimension. i.e. `dx`, `dy`, `dz`, ... 3. N arrays to specify the coordinates of the values along each @@ -999,23 +1025,19 @@ def gradient(f, *varargs, axis=None, edge_order=1): the corresponding dimension 4. Any combination of N scalars/arrays with the meaning of 2. and 3. - If `axis` is given, the number of varargs must equal the number of axes. + If `axis` is given, the number of varargs must equal the number of axes + specified in the axis parameter. Default: 1. (see Examples below). edge_order : {1, 2}, optional Gradient is calculated using N-th order accurate differences at the boundaries. Default: 1. - - .. versionadded:: 1.9.1 - axis : None or int or tuple of ints, optional - Gradient is calculated only along the given axis or axes + Gradient is calculated only along the given axis or axes. The default (axis = None) is to calculate the gradient for all the axes of the input array. axis may be negative, in which case it counts from the last to the first axis. - .. versionadded:: 1.11.0 - Returns ------- gradient : ndarray or tuple of ndarray @@ -1025,6 +1047,7 @@ def gradient(f, *varargs, axis=None, edge_order=1): Examples -------- + >>> import numpy as np >>> f = np.array([1, 2, 4, 7, 11, 16]) >>> np.gradient(f) array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) @@ -1246,10 +1269,10 @@ def gradient(f, *varargs, axis=None, edge_order=1): outvals = [] # create slice objects --- initially all are [:, :, ..., :] - slice1 = [slice(None)]*N - slice2 = [slice(None)]*N - slice3 = [slice(None)]*N - slice4 = [slice(None)]*N + slice1 = [slice(None)] * N + slice2 = [slice(None)] * N + slice3 = [slice(None)] * N + slice4 = [slice(None)] * N otype = f.dtype if otype.type is np.datetime64: @@ -1291,15 +1314,19 @@ def gradient(f, *varargs, axis=None, edge_order=1): else: dx1 = ax_dx[0:-1] dx2 = ax_dx[1:] - a = -(dx2)/(dx1 * (dx1 + dx2)) + a = -(dx2) / (dx1 * (dx1 + dx2)) b = (dx2 - dx1) / (dx1 * dx2) c = dx1 / (dx2 * (dx1 + dx2)) # fix the shape for broadcasting shape = np.ones(N, dtype=int) shape[axis] = -1 - a.shape = b.shape = c.shape = shape + + a = a.reshape(shape) + b = b.reshape(shape) + c = c.reshape(shape) # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] - out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] \ + + c * f[tuple(slice4)] # Numerical differentiation: 1st order edges if edge_order == 1: @@ -1330,11 +1357,12 @@ def gradient(f, *varargs, axis=None, edge_order=1): else: dx1 = ax_dx[0] dx2 = ax_dx[1] - a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2)) + a = -(2. * dx1 + dx2) / (dx1 * (dx1 + dx2)) b = (dx1 + dx2) / (dx1 * dx2) c = - dx1 / (dx2 * (dx1 + dx2)) # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2] - out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] \ + + c * f[tuple(slice4)] slice1[axis] = -1 slice2[axis] = -3 @@ -1351,7 +1379,8 @@ def gradient(f, *varargs, axis=None, edge_order=1): b = - (dx2 + dx1) / (dx1 * dx2) c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2)) # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] - out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] \ + + c * f[tuple(slice4)] outvals.append(out) @@ -1396,8 +1425,6 @@ def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): of the input array in along all other axes. Otherwise the dimension and shape must match `a` except along axis. - .. versionadded:: 1.16.0 - Returns ------- diff : ndarray @@ -1426,7 +1453,7 @@ def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): >>> np.diff(u8_arr) array([255], dtype=uint8) >>> u8_arr[1,...] - u8_arr[0,...] - 255 + np.uint8(255) If this is not desirable, then the array should be cast to a larger integer type first: @@ -1437,6 +1464,7 @@ def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 4, 7, 0]) >>> np.diff(x) array([ 1, 2, 3, -7]) @@ -1539,8 +1567,6 @@ def interp(x, xp, fp, left=None, right=None, period=None): interpolation of angular x-coordinates. Parameters `left` and `right` are ignored if `period` is specified. - .. versionadded:: 1.10.0 - Returns ------- y : float or complex (corresponding to fp) or ndarray @@ -1571,6 +1597,7 @@ def interp(x, xp, fp, left=None, right=None, period=None): Examples -------- + >>> import numpy as np >>> xp = [1, 2, 3] >>> fp = [3, 2, 0] >>> np.interp(2.5, xp, fp) @@ -1642,7 +1669,7 @@ def interp(x, xp, fp, left=None, right=None, period=None): asort_xp = np.argsort(xp) xp = xp[asort_xp] fp = fp[asort_xp] - xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period)) + xp = np.concatenate((xp[-1:] - period, xp, xp[0:1] + period)) fp = np.concatenate((fp[-1:], fp, fp[0:1])) return interp_func(x, xp, fp, left, right) @@ -1670,9 +1697,6 @@ def angle(z, deg=False): The counterclockwise angle from the positive real axis on the complex plane in the range ``(-pi, pi]``, with dtype as numpy.float64. - .. versionchanged:: 1.16.0 - This function works on subclasses of ndarray like `ma.array`. - See Also -------- arctan2 @@ -1686,6 +1710,7 @@ def angle(z, deg=False): Examples -------- + >>> import numpy as np >>> np.angle([1.0, 1.0j, 1+1j]) # in radians array([ 0. , 1.57079633, 0.78539816]) # may vary >>> np.angle(1+1j, deg=True) # in degrees @@ -1704,7 +1729,7 @@ def angle(z, deg=False): a = arctan2(zimag, zreal) if deg: - a *= 180/pi + a *= 180 / pi return a @@ -1713,7 +1738,7 @@ def _unwrap_dispatcher(p, discont=None, axis=None, *, period=None): @array_function_dispatch(_unwrap_dispatcher) -def unwrap(p, discont=None, axis=-1, *, period=2*pi): +def unwrap(p, discont=None, axis=-1, *, period=2 * pi): r""" Unwrap by taking the complement of large deltas with respect to the period. @@ -1760,6 +1785,8 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi): Examples -------- + >>> import numpy as np + >>> phase = np.linspace(0, np.pi, num=5) >>> phase[3:] += np.pi >>> phase @@ -1777,13 +1804,30 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi): array([-180., -140., -100., -60., -20., 20., 60., 100., 140., 180., 220., 260., 300., 340., 380., 420., 460., 500., 540.]) + + This example plots the unwrapping of the wrapped input signal `w`. + First generate `w`, then apply `unwrap` to get `u`. + + >>> t = np.linspace(0, 25, 801) + >>> w = np.mod(1.5 * np.sin(1.1 * t + 0.26) * (1 - t / 6 + (t / 23) ** 3), 2.0) - 1 + >>> u = np.unwrap(w, period=2.0) + + Plot `w` and `u`. + + >>> import matplotlib.pyplot as plt + >>> plt.plot(t, w, label='w (a signal wrapped to [-1, 1])') + >>> plt.plot(t, u, linewidth=2.5, alpha=0.5, label='unwrap(w, period=2)') + >>> plt.xlabel('t') + >>> plt.grid(alpha=0.6) + >>> plt.legend(framealpha=1, shadow=True) + >>> plt.show() """ p = asarray(p) nd = p.ndim dd = diff(p, axis=axis) if discont is None: - discont = period/2 - slice1 = [slice(None, None)]*nd # full slices + discont = period / 2 + slice1 = [slice(None, None)] * nd # full slices slice1[axis] = slice(1, None) slice1 = tuple(slice1) dtype = np.result_type(dd, period) @@ -1829,6 +1873,7 @@ def sort_complex(a): Examples -------- + >>> import numpy as np >>> np.sort_complex([5, 3, 6, 2, 1]) array([1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) @@ -1849,61 +1894,146 @@ def sort_complex(a): return b -def _trim_zeros(filt, trim=None): +def _arg_trim_zeros(filt): + """Return indices of the first and last non-zero element. + + Parameters + ---------- + filt : array_like + Input array. + + Returns + ------- + start, stop : ndarray + Two arrays containing the indices of the first and last non-zero + element in each dimension. + + See also + -------- + trim_zeros + + Examples + -------- + >>> import numpy as np + >>> _arg_trim_zeros(np.array([0, 0, 1, 1, 0])) + (array([2]), array([3])) + """ + nonzero = ( + np.argwhere(filt) + if filt.dtype != np.object_ + # Historically, `trim_zeros` treats `None` in an object array + # as non-zero while argwhere doesn't, account for that + else np.argwhere(filt != 0) + ) + if nonzero.size == 0: + start = stop = np.array([], dtype=np.intp) + else: + start = nonzero.min(axis=0) + stop = nonzero.max(axis=0) + return start, stop + + +def _trim_zeros(filt, trim=None, axis=None): return (filt,) @array_function_dispatch(_trim_zeros) -def trim_zeros(filt, trim='fb'): - """ - Trim the leading and/or trailing zeros from a 1-D array or sequence. +def trim_zeros(filt, trim='fb', axis=None): + """Remove values along a dimension which are zero along all other. Parameters ---------- - filt : 1-D array or sequence + filt : array_like Input array. - trim : str, optional + trim : {"fb", "f", "b"}, optional A string with 'f' representing trim from front and 'b' to trim from - back. Default is 'fb', trim zeros from both front and back of the - array. + back. By default, zeros are trimmed on both sides. + Front and back refer to the edges of a dimension, with "front" referring + to the side with the lowest index 0, and "back" referring to the highest + index (or index -1). + axis : int or sequence, optional + If None, `filt` is cropped such that the smallest bounding box is + returned that still contains all values which are not zero. + If an axis is specified, `filt` will be sliced in that dimension only + on the sides specified by `trim`. The remaining area will be the + smallest that still contains all values wich are not zero. + + .. versionadded:: 2.2.0 Returns ------- - trimmed : 1-D array or sequence - The result of trimming the input. The input data type is preserved. + trimmed : ndarray or sequence + The result of trimming the input. The number of dimensions and the + input data type are preserved. + + Notes + ----- + For all-zero arrays, the first axis is trimmed first. Examples -------- + >>> import numpy as np >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) >>> np.trim_zeros(a) array([1, 2, 3, 0, 2, 1]) - >>> np.trim_zeros(a, 'b') + >>> np.trim_zeros(a, trim='b') array([0, 0, 0, ..., 0, 2, 1]) + Multiple dimensions are supported. + + >>> b = np.array([[0, 0, 2, 3, 0, 0], + ... [0, 1, 0, 3, 0, 0], + ... [0, 0, 0, 0, 0, 0]]) + >>> np.trim_zeros(b) + array([[0, 2, 3], + [1, 0, 3]]) + + >>> np.trim_zeros(b, axis=-1) + array([[0, 2, 3], + [1, 0, 3], + [0, 0, 0]]) + The input data type is preserved, list/tuple in means list/tuple out. >>> np.trim_zeros([0, 1, 2, 0]) [1, 2] """ + filt_ = np.asarray(filt) - first = 0 - trim = trim.upper() - if 'F' in trim: - for i in filt: - if i != 0.: - break - else: - first = first + 1 - last = len(filt) - if 'B' in trim: - for i in filt[::-1]: - if i != 0.: - break - else: - last = last - 1 - return filt[first:last] + trim = trim.lower() + if trim not in {"fb", "bf", "f", "b"}: + raise ValueError(f"unexpected character(s) in `trim`: {trim!r}") + if axis is None: + axis_tuple = tuple(range(filt_.ndim)) + else: + axis_tuple = _nx.normalize_axis_tuple(axis, filt_.ndim, argname="axis") + + if not axis_tuple: + # No trimming requested -> return input unmodified. + return filt + + start, stop = _arg_trim_zeros(filt_) + stop += 1 # Adjust for slicing + + if start.size == 0: + # filt is all-zero -> assign same values to start and stop so that + # resulting slice will be empty + start = stop = np.zeros(filt_.ndim, dtype=np.intp) + else: + if 'f' not in trim: + start = (None,) * filt_.ndim + if 'b' not in trim: + stop = (None,) * filt_.ndim + + sl = tuple(slice(start[ax], stop[ax]) if ax in axis_tuple else slice(None) + for ax in range(filt_.ndim)) + if len(sl) == 1: + # filt is 1D -> avoid multi-dimensional slicing to preserve + # non-array input types + return filt[sl[0]] + return filt[sl] def _extract_dispatcher(condition, arr): @@ -1939,6 +2069,7 @@ def extract(condition, arr): Examples -------- + >>> import numpy as np >>> arr = np.arange(12).reshape((3, 4)) >>> arr array([[ 0, 1, 2, 3], @@ -1996,6 +2127,7 @@ def place(arr, mask, vals): Examples -------- + >>> import numpy as np >>> arr = np.arange(6).reshape(2, 3) >>> np.place(arr, arr>2, [44, 55]) >>> arr @@ -2006,67 +2138,12 @@ def place(arr, mask, vals): return _place(arr, mask, vals) -def disp(mesg, device=None, linefeed=True): - """ - Display a message on a device. - - .. deprecated:: 2.0 - Use your own printing function instead. - - Parameters - ---------- - mesg : str - Message to display. - device : object - Device to write message. If None, defaults to ``sys.stdout`` which is - very similar to ``print``. `device` needs to have ``write()`` and - ``flush()`` methods. - linefeed : bool, optional - Option whether to print a line feed or not. Defaults to True. - - Raises - ------ - AttributeError - If `device` does not have a ``write()`` or ``flush()`` method. - - Examples - -------- - Besides ``sys.stdout``, a file-like object can also be used as it has - both required methods: - - >>> from io import StringIO - >>> buf = StringIO() - >>> np.disp('"Display" in a file', device=buf) - >>> buf.getvalue() - '"Display" in a file\\n' - - """ - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`disp` is deprecated, " - "use your own printing function instead. " - "(deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - if device is None: - device = sys.stdout - if linefeed: - device.write('%s\n' % mesg) - else: - device.write('%s' % mesg) - device.flush() - return - - # See https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html _DIMENSION_NAME = r'\w+' -_CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*)?'.format(_DIMENSION_NAME) -_ARGUMENT = r'\({}\)'.format(_CORE_DIMENSION_LIST) -_ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_ARGUMENT) -_SIGNATURE = '^{0:}->{0:}$'.format(_ARGUMENT_LIST) +_CORE_DIMENSION_LIST = f'(?:{_DIMENSION_NAME}(?:,{_DIMENSION_NAME})*)?' +_ARGUMENT = fr'\({_CORE_DIMENSION_LIST}\)' +_ARGUMENT_LIST = f'{_ARGUMENT}(?:,{_ARGUMENT})*' +_SIGNATURE = f'^{_ARGUMENT_LIST}->{_ARGUMENT_LIST}$' def _parse_gufunc_signature(signature): @@ -2088,7 +2165,7 @@ def _parse_gufunc_signature(signature): if not re.match(_SIGNATURE, signature): raise ValueError( - 'not a valid gufunc signature: {}'.format(signature)) + f'not a valid gufunc signature: {signature}') return tuple([tuple(re.findall(_DIMENSION_NAME, arg)) for arg in re.findall(_ARGUMENT, arg_list)] for arg_list in signature.split('->')) @@ -2113,17 +2190,17 @@ def _update_dim_sizes(dim_sizes, arg, core_dims): num_core_dims = len(core_dims) if arg.ndim < num_core_dims: raise ValueError( - '%d-dimensional argument does not have enough ' - 'dimensions for all core dimensions %r' - % (arg.ndim, core_dims)) + f'{arg.ndim}-dimensional argument does not have enough ' + f'dimensions for all core dimensions {core_dims!r}') core_shape = arg.shape[-num_core_dims:] for dim, size in zip(core_dims, core_shape): if dim in dim_sizes: if size != dim_sizes[dim]: raise ValueError( - 'inconsistent size for core dimension %r: %r vs %r' - % (dim, size, dim_sizes[dim])) + f'inconsistent size for core dimension {dim!r}: {size!r} vs ' + f'{dim_sizes[dim]!r}' + ) else: dim_sizes[dim] = size @@ -2219,16 +2296,12 @@ class vectorize: ``pyfunc.__doc__``. excluded : set, optional Set of strings or integers representing the positional or keyword - arguments for which the function will not be vectorized. These will be + arguments for which the function will not be vectorized. These will be passed directly to `pyfunc` unmodified. - .. versionadded:: 1.7.0 - cache : bool, optional - If `True`, then cache the first function call that determines the number - of outputs if `otypes` is not provided. - - .. versionadded:: 1.7.0 + If neither `otypes` nor `signature` are provided, and `cache` is ``True``, then + cache the number of outputs. signature : string, optional Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for @@ -2237,8 +2310,6 @@ class vectorize: size of corresponding core dimensions. By default, ``pyfunc`` is assumed to take scalars as input and output. - .. versionadded:: 1.12.0 - Returns ------- out : callable @@ -2254,12 +2325,12 @@ class vectorize: The `vectorize` function is provided primarily for convenience, not for performance. The implementation is essentially a for loop. - If `otypes` is not specified, then a call to the function with the - first argument will be used to determine the number of outputs. The - results of this call will be cached if `cache` is `True` to prevent - calling the function twice. However, to implement the cache, the - original function must be wrapped which will slow down subsequent - calls, so only do this if your function is expensive. + If neither `otypes` nor `signature` are specified, then a call to the function with + the first argument will be used to determine the number of outputs. The results of + this call will be cached if `cache` is `True` to prevent calling the function + twice. However, to implement the cache, the original function must be wrapped + which will slow down subsequent calls, so only do this if your function is + expensive. The new keyword argument interface and `excluded` argument support further degrades performance. @@ -2270,6 +2341,7 @@ class vectorize: Examples -------- + >>> import numpy as np >>> def myfunc(a, b): ... "Return a-b if a>b, otherwise return a+b" ... if a > b: @@ -2311,15 +2383,15 @@ class vectorize: ... while _p: ... res = res*x + _p.pop(0) ... return res - >>> vpolyval = np.vectorize(mypolyval, excluded=['p']) - >>> vpolyval(p=[1, 2, 3], x=[0, 1]) - array([3, 6]) - Positional arguments may also be excluded by specifying their position: + Here, we exclude the zeroth argument from vectorization whether it is + passed by position or keyword. - >>> vpolyval.excluded.add(0) + >>> vpolyval = np.vectorize(mypolyval, excluded={0, 'p'}) >>> vpolyval([1, 2, 3], x=[0, 1]) array([3, 6]) + >>> vpolyval(p=[1, 2, 3], x=[0, 1]) + array([3, 6]) The `signature` argument allows for vectorizing functions that act on non-scalar arrays of fixed length. For example, you can use it for a @@ -2360,8 +2432,8 @@ def __init__(self, pyfunc=np._NoValue, otypes=None, doc=None, excluded=None, cache=False, signature=None): if (pyfunc != np._NoValue) and (not callable(pyfunc)): - #Splitting the error message to keep - #the length below 79 characters. + # Splitting the error message to keep + # the length below 79 characters. part1 = "When used as a decorator, " part2 = "only accepts keyword arguments." raise TypeError(part1 + part2) @@ -2383,7 +2455,7 @@ def __init__(self, pyfunc=np._NoValue, otypes=None, doc=None, if isinstance(otypes, str): for char in otypes: if char not in typecodes['All']: - raise ValueError("Invalid otype specified: %s" % (char,)) + raise ValueError(f"Invalid otype specified: {char}") elif iterable(otypes): otypes = [_get_vectorize_dtype(_nx.dtype(x)) for x in otypes] elif otypes is not None: @@ -2475,7 +2547,7 @@ def _get_ufunc_and_otypes(self, func, args): # the subsequent call when the ufunc is evaluated. # Assumes that ufunc first evaluates the 0th elements in the input # arrays (the input values are not checked to ensure this) - args = [asarray(arg) for arg in args] + args = [asarray(a) for a in args] if builtins.any(arg.size == 0 for arg in args): raise ValueError('cannot call `vectorize` on size 0 inputs ' 'unless `otypes` is set') @@ -2522,17 +2594,15 @@ def _vectorize_call(self, func, args): res = func() else: ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) - - # Convert args to object arrays first - inputs = [asanyarray(a, dtype=object) for a in args] - - outputs = ufunc(*inputs) + # gh-29196: `dtype=object` should eventually be removed + args = [asanyarray(a, dtype=object) for a in args] + outputs = ufunc(*args, out=...) if ufunc.nout == 1: res = asanyarray(outputs, dtype=otypes[0]) else: - res = tuple([asanyarray(x, dtype=t) - for x, t in zip(outputs, otypes)]) + res = tuple(asanyarray(x, dtype=t) + for x, t in zip(outputs, otypes)) return res def _vectorize_call_with_signature(self, func, args): @@ -2540,9 +2610,10 @@ def _vectorize_call_with_signature(self, func, args): input_core_dims, output_core_dims = self._in_and_out_core_dims if len(args) != len(input_core_dims): - raise TypeError('wrong number of positional arguments: ' - 'expected %r, got %r' - % (len(input_core_dims), len(args))) + raise TypeError( + 'wrong number of positional arguments: ' + f'expected {len(input_core_dims)!r}, got {len(args)!r}' + ) args = tuple(asanyarray(arg) for arg in args) broadcast_shape, dim_sizes = _parse_input_dimensions( @@ -2563,8 +2634,9 @@ def _vectorize_call_with_signature(self, func, args): if nout != n_results: raise ValueError( - 'wrong number of outputs from pyfunc: expected %r, got %r' - % (nout, n_results)) + f'wrong number of outputs from pyfunc: expected {nout!r}, ' + f'got {n_results!r}' + ) if nout == 1: results = (results,) @@ -2608,7 +2680,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, Estimate a covariance matrix, given data and weights. Covariance indicates the level to which two variables vary together. - If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, + If we examine N-dimensional samples, :math:`X = [x_1, x_2, ..., x_N]^T`, then the covariance matrix element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance of :math:`x_i`. @@ -2640,20 +2712,14 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, `fweights` and `aweights` are specified, and ``ddof=0`` will return the simple average. See the notes for the details. The default value is ``None``. - - .. versionadded:: 1.5 fweights : array_like, int, optional 1-D array of integer frequency weights; the number of times each observation vector should be repeated. - - .. versionadded:: 1.10 aweights : array_like, optional 1-D array of observation vector weights. These relative weights are typically large for observations considered "important" and smaller for observations considered less "important". If ``ddof=0`` the array of weights can be used to assign probabilities to observation vectors. - - .. versionadded:: 1.10 dtype : data-type, optional Data-type of the result. By default, the return data-type will have at least `numpy.float64` precision. @@ -2691,6 +2757,8 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, Examples -------- + >>> import numpy as np + Consider two variables, :math:`x_0` and :math:`x_1`, which correlate perfectly, but in opposite directions: @@ -2746,7 +2814,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, dtype = np.result_type(m, y, np.float64) X = array(m, ndmin=2, dtype=dtype) - if not rowvar and X.shape[0] != 1: + if not rowvar and m.ndim != 1: X = X.T if X.shape[0] == 0: return np.array([]).reshape(0, 0) @@ -2806,7 +2874,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, elif aweights is None: fact = w_sum - ddof else: - fact = w_sum - ddof*sum(w*aweights)/w_sum + fact = w_sum - ddof * sum(w * aweights) / w_sum if fact <= 0: warnings.warn("Degrees of freedom <= 0 for slice", @@ -2817,19 +2885,19 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, if w is None: X_T = X.T else: - X_T = (X*w).T + X_T = (X * w).T c = dot(X, X_T.conj()) c *= np.true_divide(1, fact) return c.squeeze() -def _corrcoef_dispatcher(x, y=None, rowvar=None, bias=None, ddof=None, *, +def _corrcoef_dispatcher(x, y=None, rowvar=None, *, dtype=None): return (x, y) @array_function_dispatch(_corrcoef_dispatcher) -def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *, +def corrcoef(x, y=None, rowvar=True, *, dtype=None): """ Return Pearson product-moment correlation coefficients. @@ -2856,14 +2924,7 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *, variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. - bias : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.10.0 - ddof : _NoValue, optional - Has no effect, do not use. - .. deprecated:: 1.10.0 dtype : data-type, optional Data-type of the result. By default, the return data-type will have at least `numpy.float64` precision. @@ -2887,13 +2948,10 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *, interval [-1, 1] in an attempt to improve on that situation but is not much help in the complex case. - This function accepts but discards arguments `bias` and `ddof`. This is - for backwards compatibility with previous versions of this function. These - arguments had no effect on the return values of the function and can be - safely ignored in this and previous versions of numpy. - Examples -------- + >>> import numpy as np + In this example we generate two random arrays, ``xarr`` and ``yarr``, and compute the row-wise and column-wise Pearson correlation coefficients, ``R``. Since ``rowvar`` is true by default, we first find the row-wise @@ -2956,10 +3014,6 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *, 1. ]]) """ - if bias is not np._NoValue or ddof is not np._NoValue: - # 2015-03-15, 1.10 - warnings.warn('bias and ddof have no effect and are deprecated', - DeprecationWarning, stacklevel=2) c = cov(x, y, rowvar, dtype=dtype) try: d = diag(c) @@ -3019,18 +3073,18 @@ def blackman(M): "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. It is known as a "near optimal" tapering function, almost as good (by some measures) - as the kaiser window. + as the Kaiser window. References ---------- - Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, - Dover Publications, New York. - - Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. - Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. + .. [1] Blackman, R.B. and Tukey, J.W., (1958) + The measurement of power spectra, Dover Publications, New York. + .. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. + Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. Examples -------- + >>> import numpy as np >>> import matplotlib.pyplot as plt >>> np.blackman(12) array([-1.38777878e-17, 3.26064346e-02, 1.59903635e-01, # may vary @@ -3076,8 +3130,8 @@ def blackman(M): return array([], dtype=values.dtype) if M == 1: return ones(1, dtype=values.dtype) - n = arange(1-M, M, 2) - return 0.42 + 0.5*cos(pi*n/(M-1)) + 0.08*cos(2.0*pi*n/(M-1)) + n = arange(1 - M, M, 2) + return 0.42 + 0.5 * cos(pi * n / (M - 1)) + 0.08 * cos(2.0 * pi * n / (M - 1)) @set_module('numpy') @@ -3139,6 +3193,7 @@ def bartlett(M): Examples -------- + >>> import numpy as np >>> import matplotlib.pyplot as plt >>> np.bartlett(12) array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, # may vary @@ -3182,8 +3237,8 @@ def bartlett(M): return array([], dtype=values.dtype) if M == 1: return ones(1, dtype=values.dtype) - n = arange(1-M, M, 2) - return where(less_equal(n, 0), 1 + n/(M-1), 1 - n/(M-1)) + n = arange(1 - M, M, 2) + return where(less_equal(n, 0), 1 + n / (M - 1), 1 - n / (M - 1)) @set_module('numpy') @@ -3240,6 +3295,7 @@ def hanning(M): Examples -------- + >>> import numpy as np >>> np.hanning(12) array([0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, @@ -3283,8 +3339,8 @@ def hanning(M): return array([], dtype=values.dtype) if M == 1: return ones(1, dtype=values.dtype) - n = arange(1-M, M, 2) - return 0.5 + 0.5*cos(pi*n/(M-1)) + n = arange(1 - M, M, 2) + return 0.5 + 0.5 * cos(pi * n / (M - 1)) @set_module('numpy') @@ -3339,6 +3395,7 @@ def hamming(M): Examples -------- + >>> import numpy as np >>> np.hamming(12) array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, # may vary 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, @@ -3381,8 +3438,8 @@ def hamming(M): return array([], dtype=values.dtype) if M == 1: return ones(1, dtype=values.dtype) - n = arange(1-M, M, 2) - return 0.54 + 0.46*cos(pi*n/(M-1)) + n = arange(1 - M, M, 2) + return 0.54 + 0.46 * cos(pi * n / (M - 1)) ## Code from cephes for i0 @@ -3456,17 +3513,17 @@ def _chbevl(x, vals): for i in range(1, len(vals)): b2 = b1 b1 = b0 - b0 = x*b1 - b2 + vals[i] + b0 = x * b1 - b2 + vals[i] - return 0.5*(b0 - b2) + return 0.5 * (b0 - b2) def _i0_1(x): - return exp(x) * _chbevl(x/2.0-2, _i0A) + return exp(x) * _chbevl(x / 2.0 - 2, _i0A) def _i0_2(x): - return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x) + return exp(x) * _chbevl(32.0 / x - 2.0, _i0B) / sqrt(x) def _i0_dispatcher(x): @@ -3518,6 +3575,7 @@ def i0(x): Examples -------- + >>> import numpy as np >>> np.i0(0.) array(1.0) >>> np.i0([0, 1, 2, 3]) @@ -3614,6 +3672,7 @@ def kaiser(M, beta): Examples -------- + >>> import numpy as np >>> import matplotlib.pyplot as plt >>> np.kaiser(12, 14) array([7.72686684e-06, 3.46009194e-03, 4.65200189e-02, # may vary @@ -3661,8 +3720,8 @@ def kaiser(M, beta): if M == 1: return np.ones(1, dtype=values.dtype) n = arange(0, M) - alpha = (M-1)/2.0 - return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(beta) + alpha = (M - 1) / 2.0 + return i0(beta * sqrt(1 - ((n - alpha) / alpha)**2.0)) / i0(beta) def _sinc_dispatcher(x): @@ -3716,6 +3775,7 @@ def sinc(x): Examples -------- + >>> import numpy as np >>> import matplotlib.pyplot as plt >>> x = np.linspace(-4, 4, 41) >>> np.sinc(x) @@ -3746,8 +3806,11 @@ def sinc(x): """ x = np.asanyarray(x) - y = pi * where(x == 0, 1.0e-20, x) - return sin(y)/y + x = pi * x + # Hope that 1e-20 is sufficient for objects... + eps = np.finfo(x.dtype).eps if x.dtype.kind == "f" else 1e-20 + y = where(x, x, eps) + return sin(y) / y def _ureduce(a, func, keepdims=False, **kwargs): @@ -3777,8 +3840,8 @@ def _ureduce(a, func, keepdims=False, **kwargs): """ a = np.asanyarray(a) - axis = kwargs.get('axis', None) - out = kwargs.get('out', None) + axis = kwargs.get('axis') + out = kwargs.get('out') if keepdims is np._NoValue: keepdims = False @@ -3787,28 +3850,33 @@ def _ureduce(a, func, keepdims=False, **kwargs): if axis is not None: axis = _nx.normalize_axis_tuple(axis, nd) - if keepdims: - if out is not None: - index_out = tuple( - 0 if i in axis else slice(None) for i in range(nd)) - kwargs['out'] = out[(Ellipsis, ) + index_out] + if keepdims and out is not None: + index_out = tuple( + 0 if i in axis else slice(None) for i in range(nd)) + kwargs['out'] = out[(Ellipsis, ) + index_out] if len(axis) == 1: kwargs['axis'] = axis[0] else: - keep = set(range(nd)) - set(axis) + keep = sorted(set(range(nd)) - set(axis)) nkeep = len(keep) - # swap axis that should not be reduced to front - for i, s in enumerate(sorted(keep)): - a = a.swapaxes(i, s) - # merge reduced axis - a = a.reshape(a.shape[:nkeep] + (-1,)) + + def reshape_arr(a): + # move axis that should not be reduced to front + a = np.moveaxis(a, keep, range(nkeep)) + # merge reduced axis + return a.reshape(a.shape[:nkeep] + (-1,)) + + a = reshape_arr(a) + + weights = kwargs.get("weights") + if weights is not None: + kwargs["weights"] = reshape_arr(weights) + kwargs['axis'] = -1 - else: - if keepdims: - if out is not None: - index_out = (0, ) * nd - kwargs['out'] = out[(Ellipsis, ) + index_out] + elif keepdims and out is not None: + index_out = (0, ) * nd + kwargs['out'] = out[(Ellipsis, ) + index_out] r = func(a, **kwargs) @@ -3846,13 +3914,9 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): axis : {int, sequence of int, None}, optional Axis or axes along which the medians are computed. The default, axis=None, will compute the median along a flattened version of - the array. - - .. versionadded:: 1.9.0 - - If a sequence of axes, the array is first flattened along the - given axes, then the median is computed along the resulting - flattened axis. + the array. If a sequence of axes, the array is first flattened + along the given axes, then the median is computed along the + resulting flattened axis. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, @@ -3870,8 +3934,6 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. - .. versionadded:: 1.9.0 - Returns ------- median : ndarray @@ -3894,6 +3956,7 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): Examples -------- + >>> import numpy as np >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10, 7, 4], @@ -3967,9 +4030,9 @@ def _median(a, axis=None, out=None, overwrite_input=False): index = part.shape[axis] // 2 if part.shape[axis] % 2 == 1: # index with slice to allow mean (below) to work - indexer[axis] = slice(index, index+1) + indexer[axis] = slice(index, index + 1) else: - indexer[axis] = slice(index-1, index+1) + indexer[axis] = slice(index - 1, index + 1) indexer = tuple(indexer) # Use mean in both odd and even case to coerce data type, @@ -3983,8 +4046,7 @@ def _median(a, axis=None, out=None, overwrite_input=False): def _percentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, - method=None, keepdims=None, *, weights=None, - interpolation=None): + method=None, keepdims=None, *, weights=None): return (a, q, out, weights) @@ -3997,8 +4059,7 @@ def percentile(a, method="linear", keepdims=False, *, - weights=None, - interpolation=None): + weights=None): """ Compute the q-th percentile of the data along the specified axis. @@ -4015,9 +4076,6 @@ def percentile(a, Axis or axes along which the percentiles are computed. The default is to compute the percentile(s) along a flattened version of the array. - - .. versionchanged:: 1.9.0 - A tuple of axes is supported out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, @@ -4059,9 +4117,7 @@ def percentile(a, the result as dimensions with size one. With this option, the result will broadcast correctly against the original array `a`. - .. versionadded:: 1.9.0 - - weights : array_like, optional + weights : array_like, optional An array of weights associated with the values in `a`. Each value in `a` contributes to the percentile according to its associated weight. The weights array can either be 1-D (in which case its length must be @@ -4073,11 +4129,6 @@ def percentile(a, .. versionadded:: 2.0.0 - interpolation : str, optional - Deprecated name for the method keyword argument. - - .. deprecated:: 1.22.0 - Returns ------- percentile : scalar or ndarray @@ -4105,6 +4156,7 @@ def percentile(a, Examples -------- + >>> import numpy as np >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10, 7, 4], @@ -4172,18 +4224,12 @@ def percentile(a, The American Statistician, 50(4), pp. 361-365, 1996 """ - if interpolation is not None: - method = _check_interpolation_as_method( - method, interpolation, "percentile") - a = np.asanyarray(a) if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") - # Use dtype of array if possible (e.g., if q is a python int or float) - # by making the divisor have the dtype of the data array. - q = np.true_divide(q, a.dtype.type(100) if a.dtype.kind == "f" else 100) - q = asanyarray(q) # undo any decay that the ufunc performed (see gh-13105) + weak_q = type(q) in (int, float) # use weak promotion for final result type + q = np.true_divide(q, 100, out=...) if not _quantile_is_valid(q): raise ValueError("Percentiles must be in the range [0, 100]") @@ -4199,12 +4245,11 @@ def percentile(a, raise ValueError("Weights must be non-negative.") return _quantile_unchecked( - a, q, axis, out, overwrite_input, method, keepdims, weights) + a, q, axis, out, overwrite_input, method, keepdims, weights, weak_q) def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, - method=None, keepdims=None, *, weights=None, - interpolation=None): + method=None, keepdims=None, *, weights=None): return (a, q, out, weights) @@ -4217,13 +4262,10 @@ def quantile(a, method="linear", keepdims=False, *, - weights=None, - interpolation=None): + weights=None): """ Compute the q-th quantile of the data along the specified axis. - .. versionadded:: 1.15.0 - Parameters ---------- a : array_like of real numbers @@ -4290,11 +4332,6 @@ def quantile(a, .. versionadded:: 2.0.0 - interpolation : str, optional - Deprecated name for the method keyword argument. - - .. deprecated:: 1.22.0 - Returns ------- quantile : scalar or ndarray @@ -4355,12 +4392,14 @@ def quantile(a, The table above includes only the estimators from H&F that are continuous functions of probability `q` (estimators 4-9). NumPy also provides the three discontinuous estimators from H&F (estimators 1-3), where ``j`` is - defined as above and ``m`` and ``g`` are defined as follows. + defined as above, ``m`` is defined as follows, and ``g`` is a function + of the real-valued ``index = q*n + m - 1`` and ``j``. - 1. ``inverted_cdf``: ``m = 0`` and ``g = int(q*n > 0)`` - 2. ``averaged_inverted_cdf``: ``m = 0`` and ``g = (1 + int(q*n > 0)) / 2`` + 1. ``inverted_cdf``: ``m = 0`` and ``g = int(index - j > 0)`` + 2. ``averaged_inverted_cdf``: ``m = 0`` and + ``g = (1 + int(index - j > 0)) / 2`` 3. ``closest_observation``: ``m = -1/2`` and - ``1 - int((g == 0) & (j%2 == 0))`` + ``g = 1 - int((index == j) & (j%2 == 1))`` For backward compatibility with previous versions of NumPy, `quantile` provides four additional discontinuous estimators. Like @@ -4394,12 +4433,13 @@ def quantile(a, For weighted quantiles, the coverage conditions still hold. The empirical cumulative distribution is simply replaced by its weighted - version, i.e. + version, i.e. :math:`P(Y \\leq t) = \\frac{1}{\\sum_i w_i} \\sum_i w_i 1_{x_i \\leq t}`. Only ``method="inverted_cdf"`` supports weights. Examples -------- + >>> import numpy as np >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10, 7, 4], @@ -4433,19 +4473,12 @@ def quantile(a, The American Statistician, 50(4), pp. 361-365, 1996 """ - if interpolation is not None: - method = _check_interpolation_as_method( - method, interpolation, "quantile") - a = np.asanyarray(a) if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") - # Use dtype of array if possible (e.g., if q is a python int or float). - if isinstance(q, (int, float)) and a.dtype.kind == "f": - q = np.asanyarray(q, dtype=a.dtype) - else: - q = np.asanyarray(q) + weak_q = type(q) in (int, float) # use weak promotion for final result type + q = np.asanyarray(q) if not _quantile_is_valid(q): raise ValueError("Quantiles must be in the range [0, 1]") @@ -4462,7 +4495,7 @@ def quantile(a, raise ValueError("Weights must be non-negative.") return _quantile_unchecked( - a, q, axis, out, overwrite_input, method, keepdims, weights) + a, q, axis, out, overwrite_input, method, keepdims, weights, weak_q) def _quantile_unchecked(a, @@ -4472,7 +4505,8 @@ def _quantile_unchecked(a, overwrite_input=False, method="linear", keepdims=False, - weights=None): + weights=None, + weak_q=False): """Assumes that q is in [0, 1], and is an ndarray""" return _ureduce(a, func=_quantile_ureduce_func, @@ -4482,7 +4516,8 @@ def _quantile_unchecked(a, axis=axis, out=out, overwrite_input=overwrite_input, - method=method) + method=method, + weak_q=weak_q) def _quantile_is_valid(q): @@ -4491,29 +4526,11 @@ def _quantile_is_valid(q): for i in range(q.size): if not (0.0 <= q[i] <= 1.0): return False - else: - if not (q.min() >= 0 and q.max() <= 1): - return False + elif not (q.min() >= 0 and q.max() <= 1): + return False return True -def _check_interpolation_as_method(method, interpolation, fname): - # Deprecated NumPy 1.22, 2021-11-08 - warnings.warn( - f"the `interpolation=` argument to {fname} was renamed to " - "`method=`, which has additional options.\n" - "Users of the modes 'nearest', 'lower', 'higher', or " - "'midpoint' are encouraged to review the method they used. " - "(Deprecated NumPy 1.22)", - DeprecationWarning, stacklevel=4) - if method != "linear": - # sanity check, we assume this basically never happens - raise TypeError( - "You shall not pass both `method` and `interpolation`!\n" - "(`interpolation` is Deprecated in favor of `method`)") - return interpolation - - def _compute_virtual_index(n, quantiles, alpha: float, beta: float): """ Compute the floating point indexes of an array for the linear @@ -4549,7 +4566,7 @@ def _get_gamma(virtual_indexes, previous_indexes, method): sample. previous_indexes : array_like The floor values of virtual_indexes. - interpolation : dict + method : dict The interpolation method chosen, which may have a specific rule modifying gamma. @@ -4577,9 +4594,8 @@ def _lerp(a, b, t, out=None): out : array_like Output array. """ - diff_b_a = subtract(b, a) - # asanyarray is a stop-gap until gh-13105 - lerp_interpolation = asanyarray(add(a, diff_b_a * t, out=out)) + diff_b_a = b - a + lerp_interpolation = add(a, diff_b_a * t, out=... if out is None else out) subtract(b, diff_b_a * (1 - t), out=lerp_interpolation, where=t >= 0.5, casting='unsafe', dtype=type(lerp_interpolation.dtype)) if lerp_interpolation.ndim == 0 and out is None: @@ -4593,7 +4609,7 @@ def _get_gamma_mask(shape, default_value, conditioned_value, where): return out -def _discret_interpolation_to_boundaries(index, gamma_condition_fun): +def _discrete_interpolation_to_boundaries(index, gamma_condition_fun): previous = np.floor(index) next = previous + 1 gamma = index - previous @@ -4608,26 +4624,29 @@ def _discret_interpolation_to_boundaries(index, gamma_condition_fun): def _closest_observation(n, quantiles): - gamma_fun = lambda gamma, index: (gamma == 0) & (np.floor(index) % 2 == 0) - return _discret_interpolation_to_boundaries((n * quantiles) - 1 - 0.5, - gamma_fun) + # "choose the nearest even order statistic at g=0" (H&F (1996) pp. 362). + # Order is 1-based so for zero-based indexing round to nearest odd index. + gamma_fun = lambda gamma, index: (gamma == 0) & (np.floor(index) % 2 == 1) + return _discrete_interpolation_to_boundaries((n * quantiles) - 1 - 0.5, + gamma_fun) def _inverted_cdf(n, quantiles): gamma_fun = lambda gamma, _: (gamma == 0) - return _discret_interpolation_to_boundaries((n * quantiles) - 1, - gamma_fun) + return _discrete_interpolation_to_boundaries((n * quantiles) - 1, + gamma_fun) def _quantile_ureduce_func( - a: np.array, - q: np.array, - weights: np.array, - axis: int = None, - out=None, - overwrite_input: bool = False, - method="linear", -) -> np.array: + a: np.ndarray, + q: np.ndarray, + weights: np.ndarray | None, + axis: int | None = None, + out: np.ndarray | None = None, + overwrite_input: bool = False, + method: str = "linear", + weak_q: bool = False, +) -> np.ndarray: if q.ndim > 2: # The code below works fine for nd, but it might not have useful # semantics. For now, keep the supported dimensions the same as it was @@ -4641,20 +4660,20 @@ def _quantile_ureduce_func( else: arr = a wgt = weights + elif axis is None: + axis = 0 + arr = a.flatten() + wgt = None if weights is None else weights.flatten() else: - if axis is None: - axis = 0 - arr = a.flatten() - wgt = None if weights is None else weights.flatten() - else: - arr = a.copy() - wgt = weights + arr = a.copy() + wgt = weights result = _quantile(arr, quantiles=q, axis=axis, method=method, out=out, - weights=wgt) + weights=wgt, + weak_q=weak_q) return result @@ -4670,8 +4689,8 @@ def _get_indexes(arr, virtual_indexes, valid_values_count): (previous_indexes, next_indexes): Tuple A Tuple of virtual_indexes neighbouring indexes """ - previous_indexes = np.asanyarray(np.floor(virtual_indexes)) - next_indexes = np.asanyarray(previous_indexes + 1) + previous_indexes = floor(virtual_indexes, out=...) + next_indexes = add(previous_indexes, 1, out=...) indexes_above_bounds = virtual_indexes >= valid_values_count - 1 # When indexes is above max index, take the max value of the array if indexes_above_bounds.any(): @@ -4694,19 +4713,20 @@ def _get_indexes(arr, virtual_indexes, valid_values_count): def _quantile( - arr: np.array, - quantiles: np.array, - axis: int = -1, - method="linear", - out=None, - weights=None, -): + arr: "np.typing.ArrayLike", + quantiles: np.ndarray, + axis: int = -1, + method: str = "linear", + out: np.ndarray | None = None, + weights: "np.typing.ArrayLike | None" = None, + weak_q: bool = False, +) -> np.ndarray: """ Private function that doesn't support extended axis or keepdims. - These methods are extended to this function using _ureduce - See nanpercentile for parameter usage + These methods are extended to this function using _ureduce. + See nanpercentile for parameter usage. It computes the quantiles of the array for the given axis. - A linear interpolation is performed based on the `interpolation`. + A linear interpolation is performed based on the `method`. By default, the method is "linear" where alpha == beta == 1 which performs the 7th method of Hyndman&Fan. @@ -4727,7 +4747,7 @@ def _quantile( if weights is None: # --- Computation of indexes # Index where to find the value in the sorted array. - # Virtual because it is a floating point value, not an valid index. + # Virtual because it is a floating point value, not a valid index. # The nearest neighbours are used for interpolation try: method_props = _QuantileMethods[method] @@ -4778,9 +4798,13 @@ def _quantile( previous = arr[previous_indexes] next = arr[next_indexes] # --- Linear interpolation - gamma = _get_gamma(virtual_indexes, previous_indexes, method_props) - result_shape = virtual_indexes.shape + (1,) * (arr.ndim - 1) - gamma = gamma.reshape(result_shape) + gamma = _get_gamma(virtual_indexes, previous_indexes, + method_props) + if weak_q: + gamma = float(gamma) + else: + result_shape = virtual_indexes.shape + (1,) * (arr.ndim - 1) + gamma = gamma.reshape(result_shape) result = _lerp(previous, next, gamma, @@ -4792,7 +4816,7 @@ def _quantile( weights = np.asanyarray(weights) if axis != 0: weights = np.moveaxis(weights, axis, destination=0) - index_array = np.argsort(arr, axis=0, kind="stable") + index_array = np.argsort(arr, axis=0) # arr = arr[index_array, ...] # but this adds trailing dimensions of # 1. @@ -4814,6 +4838,9 @@ def _quantile( # distribution function cdf cdf = weights.cumsum(axis=0, dtype=np.float64) cdf /= cdf[-1, ...] # normalization to 1 + if np.isnan(cdf[-1]).any(): + # Above calculations should normally warn for the zero/inf case. + raise ValueError("Weights included NaN, inf or were all zero.") # Search index i such that # sum(weights[j], j=0..i-1) < quantile <= sum(weights[j], j=0..i) # is then equivalent to @@ -4828,6 +4855,13 @@ def _quantile( # returns 2 instead of 1 because 0.4 is not binary representable. if quantiles.dtype.kind == "f": cdf = cdf.astype(quantiles.dtype) + # Weights must be non-negative, so we might have zero weights at the + # beginning leading to some leading zeros in cdf. The call to + # np.searchsorted for quantiles=0 will then pick the first element, + # but should pick the first one larger than zero. We + # therefore simply set 0 values in cdf to -1. + if np.any(cdf[0, ...] == 0): + cdf[cdf == 0] = -1 def find_cdf_1d(arr, cdf): indices = np.searchsorted(cdf, quantiles, side="left") @@ -4838,7 +4872,7 @@ def find_cdf_1d(arr, cdf): return result r_shape = arr.shape[1:] - if quantiles.ndim > 0: + if quantiles.ndim > 0: r_shape = quantiles.shape + r_shape if out is None: result = np.empty_like(arr, shape=r_shape) @@ -4933,6 +4967,8 @@ def trapezoid(y, x=None, dx=1.0, axis=-1): Examples -------- + >>> import numpy as np + Use the trapezoidal rule on evenly spaced points: >>> np.trapezoid([1, 2, 3]) @@ -4987,14 +5023,14 @@ def trapezoid(y, x=None, dx=1.0, axis=-1): if x.ndim == 1: d = diff(x) # reshape to correct shape - shape = [1]*y.ndim + shape = [1] * y.ndim shape[axis] = d.shape[0] d = d.reshape(shape) else: d = diff(x, axis=axis) nd = y.ndim - slice1 = [slice(None)]*nd - slice2 = [slice(None)]*nd + slice1 = [slice(None)] * nd + slice2 = [slice(None)] * nd slice1[axis] = slice(1, None) slice2[axis] = slice(None, -1) try: @@ -5003,28 +5039,10 @@ def trapezoid(y, x=None, dx=1.0, axis=-1): # Operations didn't work, cast to ndarray d = np.asarray(d) y = np.asarray(y) - ret = add.reduce(d * (y[tuple(slice1)]+y[tuple(slice2)])/2.0, axis) + ret = add.reduce(d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0, axis) return ret -@set_module('numpy') -def trapz(y, x=None, dx=1.0, axis=-1): - """ - `trapz` is deprecated in NumPy 2.0. - - Please use `trapezoid` instead, or one of the numerical integration - functions in `scipy.integrate`. - """ - # Deprecated in NumPy 2.0, 2023-08-18 - warnings.warn( - "`trapz` is deprecated. Use `trapezoid` instead, or one of the " - "numerical integration functions in `scipy.integrate`.", - DeprecationWarning, - stacklevel=2 - ) - return trapezoid(y, x=x, dx=dx, axis=axis) - - def _meshgrid_dispatcher(*xi, copy=None, sparse=None, indexing=None): return xi @@ -5039,9 +5057,6 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): N-D scalar/vector fields over N-D grids, given one-dimensional coordinate arrays x1, x2,..., xn. - .. versionchanged:: 1.9 - 1-D and 0-D cases are allowed. - Parameters ---------- x1, x2,..., xn : array_like @@ -5049,19 +5064,16 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): indexing : {'xy', 'ij'}, optional Cartesian ('xy', default) or matrix ('ij') indexing of output. See Notes for more details. - - .. versionadded:: 1.7.0 sparse : bool, optional If True the shape of the returned coordinate array for dimension *i* is reduced from ``(N1, ..., Ni, ... Nn)`` to ``(1, ..., 1, Ni, 1, ..., 1)``. These sparse coordinate grids are - intended to be use with :ref:`basics.broadcasting`. When all + intended to be used with :ref:`basics.broadcasting`. When all coordinates are used in an expression, broadcasting still leads to a fully-dimensonal result array. Default is False. - .. versionadded:: 1.7.0 copy : bool, optional If False, a view into the original arrays are returned in order to conserve memory. Default is True. Please note that @@ -5070,8 +5082,6 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): may refer to a single memory location. If you need to write to the arrays, make copies first. - .. versionadded:: 1.7.0 - Returns ------- X1, X2,..., XN : tuple of ndarrays @@ -5113,6 +5123,7 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): Examples -------- + >>> import numpy as np >>> nx, ny = (3, 2) >>> x = np.linspace(0, 1, nx) >>> y = np.linspace(0, 1, ny) @@ -5175,8 +5186,8 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): if indexing == 'xy' and ndim > 1: # switch first and second axis - output[0].shape = (1, -1) + s0[2:] - output[1].shape = (-1, 1) + s0[2:] + output[0] = output[0].reshape((1, -1) + s0[2:]) + output[1] = output[1].reshape((-1, 1) + s0[2:]) if not sparse: # Return the full N-D matrix (not only the 1-D vector) @@ -5185,6 +5196,9 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): if copy: output = tuple(x.copy() for x in output) + if sparse and not copy: + return tuple(output) + return output @@ -5203,7 +5217,7 @@ def delete(arr, obj, axis=None): ---------- arr : array_like Input array. - obj : slice, int or array of ints + obj : slice, int, array-like of ints or bools Indicate indices of sub-arrays to remove along the specified axis. .. versionchanged:: 1.19.0 @@ -5231,7 +5245,7 @@ def delete(arr, obj, axis=None): Often it is preferable to use a boolean mask. For example: >>> arr = np.arange(12) + 1 - >>> mask = np.ones(len(arr), dtype=bool) + >>> mask = np.ones(len(arr), dtype=np.bool) >>> mask[[0,2,4]] = False >>> result = arr[mask,...] @@ -5240,6 +5254,7 @@ def delete(arr, obj, axis=None): Examples -------- + >>> import numpy as np >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) >>> arr array([[ 1, 2, 3, 4], @@ -5271,7 +5286,7 @@ def delete(arr, obj, axis=None): else: axis = normalize_axis_index(axis, ndim) - slobj = [slice(None)]*ndim + slobj = [slice(None)] * ndim N = arr.shape[axis] newshape = list(arr.shape) @@ -5301,18 +5316,18 @@ def delete(arr, obj, axis=None): if stop == N: pass else: - slobj[axis] = slice(stop-numtodel, None) - slobj2 = [slice(None)]*ndim + slobj[axis] = slice(stop - numtodel, None) + slobj2 = [slice(None)] * ndim slobj2[axis] = slice(stop, None) new[tuple(slobj)] = arr[tuple(slobj2)] # copy middle pieces if step == 1: pass else: # use array indexing. - keep = ones(stop-start, dtype=bool) - keep[:stop-start:step] = False - slobj[axis] = slice(start, stop-numtodel) - slobj2 = [slice(None)]*ndim + keep = ones(stop - start, dtype=bool) + keep[:stop - start:step] = False + slobj[axis] = slice(start, stop - numtodel) + slobj2 = [slice(None)] * ndim slobj2[axis] = slice(start, stop) arr = arr[tuple(slobj2)] slobj2[axis] = keep @@ -5340,8 +5355,8 @@ def delete(arr, obj, axis=None): # optimization for a single value if (obj < -N or obj >= N): raise IndexError( - "index %i is out of bounds for axis %i with " - "size %i" % (obj, axis, N)) + f"index {obj} is out of bounds for axis {axis} with " + f"size {N}") if (obj < 0): obj += N newshape[axis] -= 1 @@ -5349,15 +5364,15 @@ def delete(arr, obj, axis=None): slobj[axis] = slice(None, obj) new[tuple(slobj)] = arr[tuple(slobj)] slobj[axis] = slice(obj, None) - slobj2 = [slice(None)]*ndim - slobj2[axis] = slice(obj+1, None) + slobj2 = [slice(None)] * ndim + slobj2[axis] = slice(obj + 1, None) new[tuple(slobj)] = arr[tuple(slobj2)] else: if obj.dtype == bool: if obj.shape != (N,): raise ValueError('boolean array argument obj to delete ' 'must be one dimensional and match the axis ' - 'length of {}'.format(N)) + f'length of {N}') # optimization, the other branch is slower keep = ~obj @@ -5384,11 +5399,13 @@ def insert(arr, obj, values, axis=None): ---------- arr : array_like Input array. - obj : int, slice or sequence of ints + obj : slice, int, array-like of ints or bools Object that defines the index or indices before which `values` is inserted. - .. versionadded:: 1.8.0 + .. versionchanged:: 2.1.2 + Boolean indices are now treated as a mask of elements to insert, + rather than being cast to the integers 0 and 1. Support for multiple insertions when `obj` is a single scalar or a sequence with one element (similar to calling insert multiple @@ -5424,6 +5441,7 @@ def insert(arr, obj, values, axis=None): Examples -------- + >>> import numpy as np >>> a = np.arange(6).reshape(3, 2) >>> a array([[0, 1], @@ -5483,7 +5501,7 @@ def insert(arr, obj, values, axis=None): axis = ndim - 1 else: axis = normalize_axis_index(axis, ndim) - slobj = [slice(None)]*ndim + slobj = [slice(None)] * ndim N = arr.shape[axis] newshape = list(arr.shape) @@ -5494,18 +5512,10 @@ def insert(arr, obj, values, axis=None): # need to copy obj, because indices will be changed in-place indices = np.array(obj) if indices.dtype == bool: - # See also delete - # 2012-10-11, NumPy 1.8 - warnings.warn( - "in the future insert will treat boolean arrays and " - "array-likes as a boolean index instead of casting it to " - "integer", FutureWarning, stacklevel=2) - indices = indices.astype(intp) - # Code after warning period: - #if obj.ndim != 1: - # raise ValueError('boolean array argument obj to insert ' - # 'must be one dimensional') - #indices = np.flatnonzero(obj) + if obj.ndim != 1: + raise ValueError('boolean array argument obj to insert ' + 'must be one dimensional') + indices = np.flatnonzero(obj) elif indices.ndim > 1: raise ValueError( "index array argument obj to insert must be one dimensional " @@ -5531,9 +5541,9 @@ def insert(arr, obj, values, axis=None): new = empty(newshape, arr.dtype, arrorder) slobj[axis] = slice(None, index) new[tuple(slobj)] = arr[tuple(slobj)] - slobj[axis] = slice(index, index+numnew) + slobj[axis] = slice(index, index + numnew) new[tuple(slobj)] = values - slobj[axis] = slice(index+numnew, None) + slobj[axis] = slice(index + numnew, None) slobj2 = [slice(None)] * ndim slobj2[axis] = slice(index, None) new[tuple(slobj)] = arr[tuple(slobj2)] @@ -5555,7 +5565,7 @@ def insert(arr, obj, values, axis=None): old_mask[indices] = False new = empty(newshape, arr.dtype, arrorder) - slobj2 = [slice(None)]*ndim + slobj2 = [slice(None)] * ndim slobj[axis] = indices slobj2[axis] = old_mask new[tuple(slobj)] = values @@ -5600,6 +5610,7 @@ def append(arr, values, axis=None): Examples -------- + >>> import numpy as np >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) array([1, 2, 3, ..., 7, 8, 9]) @@ -5617,7 +5628,7 @@ def append(arr, values, axis=None): the array at index 0 has 2 dimension(s) and the array at index 1 has 1 dimension(s) - >>> a = np.array([1, 2], dtype=int) + >>> a = np.array([1, 2], dtype=np.int_) >>> c = np.append(a, []) >>> c array([1., 2.]) @@ -5633,7 +5644,7 @@ def append(arr, values, axis=None): if arr.ndim != 1: arr = arr.ravel() values = ravel(values) - axis = arr.ndim-1 + axis = arr.ndim - 1 return concatenate((arr, values), axis=axis) @@ -5712,6 +5723,7 @@ def digitize(x, bins, right=False): Examples -------- + >>> import numpy as np >>> x = np.array([0.2, 6.4, 3.0, 1.6]) >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0]) >>> inds = np.digitize(x, bins) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 2650568d3923..5887d7d496ce 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -1,700 +1,2307 @@ -import sys -from collections.abc import Sequence, Iterator, Callable, Iterable +from _typeshed import ConvertibleToInt, Incomplete +from collections.abc import Callable, Iterable, Sequence from typing import ( - Literal as L, Any, - TypeVar, - overload, + Concatenate, + Literal as L, + Never, Protocol, SupportsIndex, SupportsInt, + overload, + type_check_only, ) +from typing_extensions import TypeIs -if sys.version_info >= (3, 10): - from typing import TypeGuard -else: - from typing_extensions import TypeGuard - -from numpy import ( - vectorize as vectorize, - ufunc, - generic, - floating, - complexfloating, - intp, - float64, - complex128, - timedelta64, - datetime64, - object_, - _OrderKACF, -) - +import numpy as np +from numpy import _OrderKACF +from numpy._core.multiarray import bincount +from numpy._globals import _NoValueType from numpy._typing import ( - NDArray, ArrayLike, DTypeLike, - _ShapeLike, - _ScalarLike_co, - _DTypeLike, + NDArray, _ArrayLike, - _ArrayLikeInt_co, - _ArrayLikeFloat_co, + _ArrayLikeBool_co, _ArrayLikeComplex_co, - _ArrayLikeTD64_co, - _ArrayLikeDT64_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeNumber_co, _ArrayLikeObject_co, - _FloatLike_co, _ComplexLike_co, + _DTypeLike, + _FloatLike_co, + _NestedSequence as _SeqND, + _NumberLike_co, + _ScalarLike_co, + _Shape, + _ShapeLike, + _SupportsArray, ) -from numpy._core.multiarray import ( - bincount as bincount, +__all__ = [ + "select", + "piecewise", + "trim_zeros", + "copy", + "iterable", + "percentile", + "diff", + "gradient", + "angle", + "unwrap", + "sort_complex", + "flip", + "rot90", + "extract", + "place", + "vectorize", + "asarray_chkfinite", + "average", + "bincount", + "digitize", + "cov", + "corrcoef", + "median", + "sinc", + "hamming", + "hanning", + "bartlett", + "blackman", + "kaiser", + "trapezoid", + "i0", + "meshgrid", + "delete", + "insert", + "append", + "interp", + "quantile", +] + +type _ArrayLike1D[ScalarT: np.generic] = _SupportsArray[np.dtype[ScalarT]] | Sequence[ScalarT] + +type _integer_co = np.integer | np.bool +type _float64_co = np.float64 | _integer_co +type _floating_co = np.floating | _integer_co + +# non-trivial scalar-types that will become `complex128` in `sort_complex()`, +# i.e. all numeric scalar types except for `[u]int{8,16} | longdouble` +type _SortsToComplex128 = ( + np.bool + | np.int32 + | np.uint32 + | np.int64 + | np.uint64 + | np.float16 + | np.float32 + | np.float64 + | np.timedelta64 + | np.object_ ) +type _ScalarNumeric = np.inexact | np.timedelta64 | np.object_ +type _InexactDouble = np.float64 | np.longdouble | np.complex128 | np.clongdouble + +type _Array[ShapeT: _Shape, ScalarT: np.generic] = np.ndarray[ShapeT, np.dtype[ScalarT]] +type _Array0D[ScalarT: np.generic] = np.ndarray[tuple[()], np.dtype[ScalarT]] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _Array3D[ScalarT: np.generic] = np.ndarray[tuple[int, int, int], np.dtype[ScalarT]] +type _ArrayMax2D[ScalarT: np.generic] = np.ndarray[tuple[int] | tuple[int, int], np.dtype[ScalarT]] +# workaround for mypy and pyright not following the typing spec for overloads +type _ArrayNoD[ScalarT: np.generic] = np.ndarray[tuple[Never, Never, Never, Never], np.dtype[ScalarT]] -_T = TypeVar("_T") -_T_co = TypeVar("_T_co", covariant=True) -_SCT = TypeVar("_SCT", bound=generic) -_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) +type _Seq1D[T] = Sequence[T] +type _Seq2D[T] = Sequence[Sequence[T]] +type _Seq3D[T] = Sequence[Sequence[Sequence[T]]] +type _ListSeqND[T] = list[T] | _SeqND[list[T]] -_2Tuple = tuple[_T, _T] +type _Tuple2[T] = tuple[T, T] +type _Tuple3[T] = tuple[T, T, T] +type _Tuple4[T] = tuple[T, T, T, T] -class _TrimZerosSequence(Protocol[_T_co]): +type _Mesh1[ScalarT: np.generic] = tuple[_Array1D[ScalarT]] +type _Mesh2[ScalarT: np.generic, ScalarT1: np.generic] = tuple[_Array2D[ScalarT], _Array2D[ScalarT1]] +type _Mesh3[ScalarT: np.generic, ScalarT1: np.generic, ScalarT2: np.generic] = tuple[ + _Array3D[ScalarT], _Array3D[ScalarT1], _Array3D[ScalarT2] +] + +type _IndexLike = slice | _ArrayLikeInt_co + +type _Indexing = L["ij", "xy"] +type _InterpolationMethod = L[ + "inverted_cdf", + "averaged_inverted_cdf", + "closest_observation", + "interpolated_inverted_cdf", + "hazen", + "weibull", + "linear", + "median_unbiased", + "normal_unbiased", + "lower", + "higher", + "midpoint", + "nearest", +] + +# The resulting value will be used as `y[cond] = func(vals, *args, **kw)`, so in can +# return any (usually 1d) array-like or scalar-like compatible with the input. +type _PiecewiseFunction[ScalarT: np.generic, **Tss] = Callable[Concatenate[NDArray[ScalarT], Tss], ArrayLike] +type _PiecewiseFunctions[ScalarT: np.generic, **Tss] = _SizedIterable[_PiecewiseFunction[ScalarT, Tss] | _ScalarLike_co] + +@type_check_only +class _TrimZerosSequence[T](Protocol): + def __len__(self, /) -> int: ... + @overload + def __getitem__(self, key: int, /) -> object: ... + @overload + def __getitem__(self, key: slice, /) -> T: ... + +@type_check_only +class _SupportsRMulFloat[T](Protocol): + def __rmul__(self, other: float, /) -> T: ... + +@type_check_only +class _SizedIterable[T](Protocol): + def __iter__(self) -> Iterable[T]: ... def __len__(self) -> int: ... - def __getitem__(self, key: slice, /) -> _T_co: ... - def __iter__(self) -> Iterator[Any]: ... -class _SupportsWriteFlush(Protocol): - def write(self, s: str, /) -> object: ... - def flush(self) -> object: ... +### + +class vectorize: + __doc__: str | None + __module__: L["numpy"] = "numpy" # pyrefly: ignore[bad-override] + pyfunc: Callable[..., Incomplete] + cache: bool + signature: str | None + otypes: str | None + excluded: set[int | str] -__all__: list[str] + def __init__( + self, + /, + pyfunc: Callable[..., Incomplete] | _NoValueType = ..., # = _NoValue + otypes: str | Iterable[DTypeLike] | None = None, + doc: str | None = None, + excluded: Iterable[int | str] | None = None, + cache: bool = False, + signature: str | None = None, + ) -> None: ... + def __call__(self, /, *args: Incomplete, **kwargs: Incomplete) -> Incomplete: ... @overload -def rot90( - m: _ArrayLike[_SCT], - k: int = ..., - axes: tuple[int, int] = ..., -) -> NDArray[_SCT]: ... +def rot90[ArrayT: np.ndarray](m: ArrayT, k: int = 1, axes: tuple[int, int] = (0, 1)) -> ArrayT: ... @overload -def rot90( - m: ArrayLike, - k: int = ..., - axes: tuple[int, int] = ..., -) -> NDArray[Any]: ... - +def rot90[ScalarT: np.generic](m: _ArrayLike[ScalarT], k: int = 1, axes: tuple[int, int] = (0, 1)) -> NDArray[ScalarT]: ... @overload -def flip(m: _SCT, axis: None = ...) -> _SCT: ... +def rot90(m: ArrayLike, k: int = 1, axes: tuple[int, int] = (0, 1)) -> NDArray[Incomplete]: ... + +# NOTE: Technically `flip` also accept scalars, but that has no effect and complicates +# the overloads significantly, so we ignore that case here. @overload -def flip(m: _ScalarLike_co, axis: None = ...) -> Any: ... +def flip[ArrayT: np.ndarray](m: ArrayT, axis: int | tuple[int, ...] | None = None) -> ArrayT: ... @overload -def flip(m: _ArrayLike[_SCT], axis: None | _ShapeLike = ...) -> NDArray[_SCT]: ... +def flip[ScalarT: np.generic](m: _ArrayLike[ScalarT], axis: int | tuple[int, ...] | None = None) -> NDArray[ScalarT]: ... @overload -def flip(m: ArrayLike, axis: None | _ShapeLike = ...) -> NDArray[Any]: ... +def flip(m: ArrayLike, axis: int | tuple[int, ...] | None = None) -> NDArray[Incomplete]: ... -def iterable(y: object) -> TypeGuard[Iterable[Any]]: ... +# +def iterable(y: object) -> TypeIs[Iterable[Any]]: ... -@overload +# NOTE: This assumes that if `axis` is given the input is at least 2d, and will +# therefore always return an array. +# NOTE: This assumes that if `keepdims=True` the input is at least 1d, and will +# therefore always return an array. +@overload # inexact array, keepdims=True +def average[ArrayT: NDArray[np.inexact]]( + a: ArrayT, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[True], +) -> ArrayT: ... +@overload # inexact array, returned=True keepdims=True +def average[ArrayT: NDArray[np.inexact]]( + a: ArrayT, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[True], +) -> _Tuple2[ArrayT]: ... +@overload # inexact array-like, axis=None +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], + axis: None = None, + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> ScalarT: ... +@overload # inexact array-like, axis= +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...], + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # inexact array-like, keepdims=True +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[True], +) -> NDArray[ScalarT]: ... +@overload # inexact array-like, axis=None, returned=True +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], + axis: None = None, + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[ScalarT]: ... +@overload # inexact array-like, axis=, returned=True +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...], + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[NDArray[ScalarT]]: ... +@overload # inexact array-like, returned=True, keepdims=True +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[True], +) -> _Tuple2[NDArray[ScalarT]]: ... +@overload # bool or integer array-like, axis=None def average( - a: _ArrayLikeFloat_co, - axis: None = ..., - weights: None | _ArrayLikeFloat_co= ..., - returned: L[False] = ..., - keepdims: L[False] = ..., -) -> floating[Any]: ... -@overload + a: _SeqND[float] | _ArrayLikeInt_co, + axis: None = None, + weights: _ArrayLikeFloat_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> np.float64: ... +@overload # bool or integer array-like, axis= def average( - a: _ArrayLikeComplex_co, - axis: None = ..., - weights: None | _ArrayLikeComplex_co = ..., - returned: L[False] = ..., - keepdims: L[False] = ..., -) -> complexfloating[Any, Any]: ... -@overload + a: _SeqND[float] | _ArrayLikeInt_co, + axis: int | tuple[int, ...], + weights: _ArrayLikeFloat_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> NDArray[np.float64]: ... +@overload # bool or integer array-like, keepdims=True def average( - a: _ArrayLikeObject_co, - axis: None = ..., - weights: None | Any = ..., - returned: L[False] = ..., - keepdims: L[False] = ..., -) -> Any: ... -@overload + a: _SeqND[float] | _ArrayLikeInt_co, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeFloat_co | None = None, + returned: L[False] = False, + *, + keepdims: L[True], +) -> NDArray[np.float64]: ... +@overload # bool or integer array-like, axis=None, returned=True def average( - a: _ArrayLikeFloat_co, - axis: None = ..., - weights: None | _ArrayLikeFloat_co= ..., - returned: L[True] = ..., - keepdims: L[False] = ..., -) -> _2Tuple[floating[Any]]: ... -@overload + a: _SeqND[float] | _ArrayLikeInt_co, + axis: None = None, + weights: _ArrayLikeFloat_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[np.float64]: ... +@overload # bool or integer array-like, axis=, returned=True def average( - a: _ArrayLikeComplex_co, - axis: None = ..., - weights: None | _ArrayLikeComplex_co = ..., - returned: L[True] = ..., - keepdims: L[False] = ..., -) -> _2Tuple[complexfloating[Any, Any]]: ... -@overload + a: _SeqND[float] | _ArrayLikeInt_co, + axis: int | tuple[int, ...], + weights: _ArrayLikeFloat_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[NDArray[np.float64]]: ... +@overload # bool or integer array-like, returned=True, keepdims=True def average( - a: _ArrayLikeObject_co, - axis: None = ..., - weights: None | Any = ..., - returned: L[True] = ..., - keepdims: L[False] = ..., -) -> _2Tuple[Any]: ... -@overload + a: _SeqND[float] | _ArrayLikeInt_co, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeFloat_co | None = None, + *, + returned: L[True], + keepdims: L[True], +) -> _Tuple2[NDArray[np.float64]]: ... +@overload # complex array-like, axis=None +def average( + a: _ListSeqND[complex], + axis: None = None, + weights: _ArrayLikeComplex_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> np.complex128: ... +@overload # complex array-like, axis= def average( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - weights: None | Any = ..., - returned: L[False] = ..., - keepdims: bool = ..., + a: _ListSeqND[complex], + axis: int | tuple[int, ...], + weights: _ArrayLikeComplex_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> NDArray[np.complex128]: ... +@overload # complex array-like, keepdims=True +def average( + a: _ListSeqND[complex], + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeComplex_co | None = None, + returned: L[False] = False, + *, + keepdims: L[True], +) -> NDArray[np.complex128]: ... +@overload # complex array-like, axis=None, returned=True +def average( + a: _ListSeqND[complex], + axis: None = None, + weights: _ArrayLikeComplex_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[np.complex128]: ... +@overload # complex array-like, axis=, returned=True +def average( + a: _ListSeqND[complex], + axis: int | tuple[int, ...], + weights: _ArrayLikeComplex_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[NDArray[np.complex128]]: ... +@overload # complex array-like, keepdims=True, returned=True +def average( + a: _ListSeqND[complex], + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeComplex_co | None = None, + *, + returned: L[True], + keepdims: L[True], +) -> _Tuple2[NDArray[np.complex128]]: ... +@overload # unknown, axis=None +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: None = None, + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., ) -> Any: ... -@overload +@overload # unknown, axis= def average( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - weights: None | Any = ..., - returned: L[True] = ..., - keepdims: bool = ..., -) -> _2Tuple[Any]: ... - -@overload -def asarray_chkfinite( - a: _ArrayLike[_SCT], - dtype: None = ..., - order: _OrderKACF = ..., -) -> NDArray[_SCT]: ... -@overload -def asarray_chkfinite( - a: object, - dtype: None = ..., - order: _OrderKACF = ..., -) -> NDArray[Any]: ... -@overload -def asarray_chkfinite( - a: Any, - dtype: _DTypeLike[_SCT], - order: _OrderKACF = ..., -) -> NDArray[_SCT]: ... -@overload -def asarray_chkfinite( - a: Any, - dtype: DTypeLike, - order: _OrderKACF = ..., -) -> NDArray[Any]: ... - -# TODO: Use PEP 612 `ParamSpec` once mypy supports `Concatenate` -# xref python/mypy#8645 -@overload -def piecewise( - x: _ArrayLike[_SCT], + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: int | tuple[int, ...], + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> np.ndarray: ... +@overload # unknown, keepdims=True +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[True], +) -> np.ndarray: ... +@overload # unknown, axis=None, returned=True +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: None = None, + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[Any]: ... +@overload # unknown, axis=, returned=True +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: int | tuple[int, ...], + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[np.ndarray]: ... +@overload # unknown, returned=True, keepdims=True +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[True], +) -> _Tuple2[np.ndarray]: ... + +# +@overload +def asarray_chkfinite[ArrayT: np.ndarray](a: ArrayT, dtype: None = None, order: _OrderKACF = None) -> ArrayT: ... +@overload +def asarray_chkfinite[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT], dtype: _DTypeLike[ScalarT], order: _OrderKACF = None +) -> _Array[ShapeT, ScalarT]: ... +@overload +def asarray_chkfinite[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], dtype: None = None, order: _OrderKACF = None +) -> NDArray[ScalarT]: ... +@overload +def asarray_chkfinite[ScalarT: np.generic]( + a: object, dtype: _DTypeLike[ScalarT], order: _OrderKACF = None +) -> NDArray[ScalarT]: ... +@overload +def asarray_chkfinite(a: object, dtype: DTypeLike | None = None, order: _OrderKACF = None) -> NDArray[Incomplete]: ... + +# NOTE: Contrary to the documentation, scalars are also accepted and treated as +# `[condlist]`. And even though the documentation says these should be boolean, in +# practice anything that `np.array(condlist, dtype=bool)` accepts will work, i.e. any +# array-like. +@overload +def piecewise[ShapeT: _Shape, ScalarT: np.generic, **Tss]( + x: _Array[ShapeT, ScalarT], + condlist: ArrayLike, + funclist: _PiecewiseFunctions[Any, Tss], + *args: Tss.args, + **kw: Tss.kwargs, +) -> _Array[ShapeT, ScalarT]: ... +@overload +def piecewise[ScalarT: np.generic, **Tss]( + x: _ArrayLike[ScalarT], condlist: ArrayLike, - funclist: Sequence[Any | Callable[..., Any]], - *args: Any, - **kw: Any, -) -> NDArray[_SCT]: ... + funclist: _PiecewiseFunctions[Any, Tss], + *args: Tss.args, + **kw: Tss.kwargs, +) -> NDArray[ScalarT]: ... @overload -def piecewise( +def piecewise[ScalarT: np.generic, **Tss]( x: ArrayLike, condlist: ArrayLike, - funclist: Sequence[Any | Callable[..., Any]], - *args: Any, - **kw: Any, -) -> NDArray[Any]: ... + funclist: _PiecewiseFunctions[ScalarT, Tss], + *args: Tss.args, + **kw: Tss.kwargs, +) -> NDArray[ScalarT]: ... +# NOTE: condition is usually boolean, but anything with zero/non-zero semantics works +@overload +def extract[ScalarT: np.generic](condition: ArrayLike, arr: _ArrayLike[ScalarT]) -> _Array1D[ScalarT]: ... +@overload +def extract(condition: ArrayLike, arr: _SeqND[bool]) -> _Array1D[np.bool]: ... +@overload +def extract(condition: ArrayLike, arr: _ListSeqND[int]) -> _Array1D[np.int_]: ... +@overload +def extract(condition: ArrayLike, arr: _ListSeqND[float]) -> _Array1D[np.float64]: ... +@overload +def extract(condition: ArrayLike, arr: _ListSeqND[complex]) -> _Array1D[np.complex128]: ... +@overload +def extract(condition: ArrayLike, arr: _SeqND[bytes]) -> _Array1D[np.bytes_]: ... +@overload +def extract(condition: ArrayLike, arr: _SeqND[str]) -> _Array1D[np.str_]: ... +@overload +def extract(condition: ArrayLike, arr: ArrayLike) -> _Array1D[Incomplete]: ... + +# NOTE: unlike `extract`, passing non-boolean conditions for `condlist` will raise an +# error at runtime +@overload +def select[ArrayT: np.ndarray]( + condlist: _SizedIterable[_ArrayLikeBool_co], + choicelist: Sequence[ArrayT], + default: ArrayLike = 0, +) -> ArrayT: ... +@overload +def select[ScalarT: np.generic]( + condlist: _SizedIterable[_ArrayLikeBool_co], + choicelist: Sequence[_ArrayLike[ScalarT]] | NDArray[ScalarT], + default: ArrayLike = 0, +) -> NDArray[ScalarT]: ... +@overload def select( - condlist: Sequence[ArrayLike], + condlist: _SizedIterable[_ArrayLikeBool_co], choicelist: Sequence[ArrayLike], - default: ArrayLike = ..., -) -> NDArray[Any]: ... + default: ArrayLike = 0, +) -> np.ndarray: ... +# keep roughly in sync with `ma.core.copy` @overload -def copy( - a: _ArrayType, - order: _OrderKACF, - subok: L[True], -) -> _ArrayType: ... +def copy[ArrayT: np.ndarray](a: ArrayT, order: _OrderKACF, subok: L[True]) -> ArrayT: ... @overload -def copy( - a: _ArrayType, - order: _OrderKACF = ..., - *, - subok: L[True], -) -> _ArrayType: ... +def copy[ArrayT: np.ndarray](a: ArrayT, order: _OrderKACF = "K", *, subok: L[True]) -> ArrayT: ... @overload -def copy( - a: _ArrayLike[_SCT], - order: _OrderKACF = ..., - subok: L[False] = ..., -) -> NDArray[_SCT]: ... +def copy[ScalarT: np.generic](a: _ArrayLike[ScalarT], order: _OrderKACF = "K", subok: L[False] = False) -> NDArray[ScalarT]: ... @overload -def copy( - a: ArrayLike, - order: _OrderKACF = ..., - subok: L[False] = ..., -) -> NDArray[Any]: ... +def copy(a: ArrayLike, order: _OrderKACF = "K", subok: L[False] = False) -> NDArray[Incomplete]: ... +# +@overload # ?d, known inexact scalar-type +def gradient[ScalarT: np.inexact | np.timedelta64]( + f: _ArrayNoD[ScalarT], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, + # `| Any` instead of ` | tuple` is returned to avoid several mypy_primer errors +) -> _Array1D[ScalarT] | Any: ... +@overload # 1d, known inexact scalar-type +def gradient[ScalarT: np.inexact | np.timedelta64]( + f: _Array1D[ScalarT], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Array1D[ScalarT]: ... +@overload # 2d, known inexact scalar-type +def gradient[ScalarT: np.inexact | np.timedelta64]( + f: _Array2D[ScalarT], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh2[ScalarT, ScalarT]: ... +@overload # 3d, known inexact scalar-type +def gradient[ScalarT: np.inexact | np.timedelta64]( + f: _Array3D[ScalarT], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh3[ScalarT, ScalarT, ScalarT]: ... +@overload # ?d, datetime64 scalar-type +def gradient( + f: _ArrayNoD[np.datetime64], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Array1D[np.timedelta64] | tuple[NDArray[np.timedelta64], ...]: ... +@overload # 1d, datetime64 scalar-type +def gradient( + f: _Array1D[np.datetime64], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Array1D[np.timedelta64]: ... +@overload # 2d, datetime64 scalar-type +def gradient( + f: _Array2D[np.datetime64], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh2[np.timedelta64, np.timedelta64]: ... +@overload # 3d, datetime64 scalar-type +def gradient( + f: _Array3D[np.datetime64], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh3[np.timedelta64, np.timedelta64, np.timedelta64]: ... +@overload # 1d float-like +def gradient( + f: _Seq1D[float], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Array1D[np.float64]: ... +@overload # 2d float-like +def gradient( + f: _Seq2D[float], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh2[np.float64, np.float64]: ... +@overload # 3d float-like +def gradient( + f: _Seq3D[float], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh3[np.float64, np.float64, np.float64]: ... +@overload # 1d complex-like (the `list` avoids overlap with the float-like overload) +def gradient( + f: list[complex], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Array1D[np.complex128]: ... +@overload # 2d float-like +def gradient( + f: _Seq1D[list[complex]], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh2[np.complex128, np.complex128]: ... +@overload # 3d float-like +def gradient( + f: _Seq2D[list[complex]], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh3[np.complex128, np.complex128, np.complex128]: ... +@overload # fallback def gradient( f: ArrayLike, - *varargs: ArrayLike, - axis: None | _ShapeLike = ..., - edge_order: L[1, 2] = ..., -) -> Any: ... + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> Incomplete: ... -@overload -def diff( - a: _T, +# keep in sync with `ma.core.diff` +@overload # n == 0; return input unchanged +def diff[T]( + a: T, n: L[0], - axis: SupportsIndex = ..., - prepend: ArrayLike = ..., - append: ArrayLike = ..., -) -> _T: ... -@overload + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., # = _NoValue + append: ArrayLike | _NoValueType = ..., # = _NoValue +) -> T: ... +@overload # known array-type +def diff[ArrayT: NDArray[_ScalarNumeric]]( + a: ArrayT, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> ArrayT: ... +@overload # known shape, datetime64 +def diff[ShapeT: _Shape]( + a: _Array[ShapeT, np.datetime64], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array[ShapeT, np.timedelta64]: ... +@overload # unknown shape, known scalar-type +def diff[ScalarT: _ScalarNumeric]( + a: _ArrayLike[ScalarT], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # unknown shape, datetime64 +def diff( + a: _ArrayLike[np.datetime64], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> NDArray[np.timedelta64]: ... +@overload # 1d int +def diff( + a: _Seq1D[int], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array1D[np.int_]: ... +@overload # 2d int +def diff( + a: _Seq2D[int], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array2D[np.int_]: ... +@overload # 1d float (the `list` avoids overlap with the `int` overloads) +def diff( + a: list[float], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array1D[np.float64]: ... +@overload # 2d float +def diff( + a: _Seq1D[list[float]], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array2D[np.float64]: ... +@overload # 1d complex (the `list` avoids overlap with the `int` overloads) +def diff( + a: list[complex], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array1D[np.complex128]: ... +@overload # 2d complex +def diff( + a: _Seq1D[list[complex]], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array2D[np.complex128]: ... +@overload # unknown shape, unknown scalar-type def diff( a: ArrayLike, - n: int = ..., - axis: SupportsIndex = ..., - prepend: ArrayLike = ..., - append: ArrayLike = ..., -) -> NDArray[Any]: ... + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> NDArray[Incomplete]: ... -@overload +# +@overload # float scalar +def interp( + x: _FloatLike_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> np.float64: ... +@overload # complex scalar +def interp( + x: _FloatLike_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLike1D[np.complexfloating] | list[complex], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> np.complex128: ... +@overload # float array +def interp[ShapeT: _Shape]( + x: _Array[ShapeT, _floating_co], + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> _Array[ShapeT, np.float64]: ... +@overload # complex array +def interp[ShapeT: _Shape]( + x: _Array[ShapeT, _floating_co], + xp: _ArrayLikeFloat_co, + fp: _ArrayLike1D[np.complexfloating] | list[complex], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> _Array[ShapeT, np.complex128]: ... +@overload # float sequence +def interp( + x: _Seq1D[_FloatLike_co], + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> _Array1D[np.float64]: ... +@overload # complex sequence +def interp( + x: _Seq1D[_FloatLike_co], + xp: _ArrayLikeFloat_co, + fp: _ArrayLike1D[np.complexfloating] | list[complex], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> _Array1D[np.complex128]: ... +@overload # float array-like +def interp( + x: _SeqND[_FloatLike_co], + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[np.float64]: ... +@overload # complex array-like +def interp( + x: _SeqND[_FloatLike_co], + xp: _ArrayLikeFloat_co, + fp: _ArrayLike1D[np.complexfloating] | list[complex], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # float scalar/array-like def interp( x: _ArrayLikeFloat_co, xp: _ArrayLikeFloat_co, fp: _ArrayLikeFloat_co, - left: None | _FloatLike_co = ..., - right: None | _FloatLike_co = ..., - period: None | _FloatLike_co = ..., -) -> NDArray[float64]: ... -@overload + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[np.float64] | np.float64: ... +@overload # complex scalar/array-like +def interp( + x: _ArrayLikeFloat_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLike1D[np.complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[np.complex128] | np.complex128: ... +@overload # float/complex scalar/array-like def interp( x: _ArrayLikeFloat_co, xp: _ArrayLikeFloat_co, - fp: _ArrayLikeComplex_co, - left: None | _ComplexLike_co = ..., - right: None | _ComplexLike_co = ..., - period: None | _FloatLike_co = ..., -) -> NDArray[complex128]: ... + fp: _ArrayLikeNumber_co, + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[np.complex128 | np.float64] | np.complex128 | np.float64: ... -@overload -def angle(z: _ComplexLike_co, deg: bool = ...) -> floating[Any]: ... -@overload -def angle(z: object_, deg: bool = ...) -> Any: ... -@overload -def angle(z: _ArrayLikeComplex_co, deg: bool = ...) -> NDArray[floating[Any]]: ... -@overload -def angle(z: _ArrayLikeObject_co, deg: bool = ...) -> NDArray[object_]: ... +# +@overload # 0d T: floating -> 0d T +def angle[FloatingT: np.floating](z: FloatingT, deg: bool = False) -> FloatingT: ... +@overload # 0d complex | float | ~integer -> 0d float64 +def angle(z: complex | _integer_co, deg: bool = False) -> np.float64: ... +@overload # 0d complex64 -> 0d float32 +def angle(z: np.complex64, deg: bool = False) -> np.float32: ... +@overload # 0d clongdouble -> 0d longdouble +def angle(z: np.clongdouble, deg: bool = False) -> np.longdouble: ... +@overload # T: nd floating -> T +def angle[ArrayFloatingT: NDArray[np.floating]](z: ArrayFloatingT, deg: bool = False) -> ArrayFloatingT: ... +@overload # nd T: complex128 | ~integer -> nd float64 +def angle[ShapeT: _Shape](z: _Array[ShapeT, np.complex128 | _integer_co], deg: bool = False) -> _Array[ShapeT, np.float64]: ... +@overload # nd T: complex64 -> nd float32 +def angle[ShapeT: _Shape](z: _Array[ShapeT, np.complex64], deg: bool = False) -> _Array[ShapeT, np.float32]: ... +@overload # nd T: clongdouble -> nd longdouble +def angle[ShapeT: _Shape](z: _Array[ShapeT, np.clongdouble], deg: bool = False) -> _Array[ShapeT, np.longdouble]: ... +@overload # 1d complex -> 1d float64 +def angle(z: _Seq1D[complex], deg: bool = False) -> _Array1D[np.float64]: ... +@overload # 2d complex -> 2d float64 +def angle(z: _Seq2D[complex], deg: bool = False) -> _Array2D[np.float64]: ... +@overload # 3d complex -> 3d float64 +def angle(z: _Seq3D[complex], deg: bool = False) -> _Array3D[np.float64]: ... +@overload # fallback +def angle(z: _ArrayLikeComplex_co, deg: bool = False) -> NDArray[np.floating] | Any: ... -@overload +# +@overload # known array-type +def unwrap[ArrayT: NDArray[np.floating | np.object_]]( + p: ArrayT, + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = Ī„ +) -> ArrayT: ... +@overload # known shape, float64 +def unwrap[ShapeT: _Shape]( + p: _Array[ShapeT, _float64_co], + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = Ī„ +) -> _Array[ShapeT, np.float64]: ... +@overload # 1d float64-like def unwrap( - p: _ArrayLikeFloat_co, - discont: None | float = ..., - axis: int = ..., + p: _Seq1D[float | _float64_co], + discont: float | None = None, + axis: int = -1, *, - period: float = ..., -) -> NDArray[floating[Any]]: ... -@overload + period: float = ..., # = Ī„ +) -> _Array1D[np.float64]: ... +@overload # 2d float64-like def unwrap( - p: _ArrayLikeObject_co, - discont: None | float = ..., - axis: int = ..., + p: _Seq2D[float | _float64_co], + discont: float | None = None, + axis: int = -1, *, - period: float = ..., -) -> NDArray[object_]: ... - -def sort_complex(a: ArrayLike) -> NDArray[complexfloating[Any, Any]]: ... - -def trim_zeros( - filt: _TrimZerosSequence[_T], - trim: L["f", "b", "fb", "bf"] = ..., -) -> _T: ... - -@overload -def extract(condition: ArrayLike, arr: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... -@overload -def extract(condition: ArrayLike, arr: ArrayLike) -> NDArray[Any]: ... + period: float = ..., # = Ī„ +) -> _Array2D[np.float64]: ... +@overload # 3d float64-like +def unwrap( + p: _Seq3D[float | _float64_co], + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = Ī„ +) -> _Array3D[np.float64]: ... +@overload # ?d, float64 +def unwrap( + p: _SeqND[float] | _ArrayLike[_float64_co], + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = Ī„ +) -> NDArray[np.float64]: ... +@overload # fallback +def unwrap( + p: _ArrayLikeFloat_co | _ArrayLikeObject_co, + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = Ī„ +) -> np.ndarray: ... -def place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None: ... +# +@overload +def sort_complex[ArrayT: NDArray[np.complexfloating]](a: ArrayT) -> ArrayT: ... +@overload # complex64, shape known +def sort_complex[ShapeT: _Shape](a: _Array[ShapeT, np.int8 | np.uint8 | np.int16 | np.uint16]) -> _Array[ShapeT, np.complex64]: ... +@overload # complex64, shape unknown +def sort_complex(a: _ArrayLike[np.int8 | np.uint8 | np.int16 | np.uint16]) -> NDArray[np.complex64]: ... +@overload # complex128, shape known +def sort_complex[ShapeT: _Shape](a: _Array[ShapeT, _SortsToComplex128]) -> _Array[ShapeT, np.complex128]: ... +@overload # complex128, shape unknown +def sort_complex(a: _ArrayLike[_SortsToComplex128]) -> NDArray[np.complex128]: ... +@overload # clongdouble, shape known +def sort_complex[ShapeT: _Shape](a: _Array[ShapeT, np.longdouble]) -> _Array[ShapeT, np.clongdouble]: ... +@overload # clongdouble, shape unknown +def sort_complex(a: _ArrayLike[np.longdouble]) -> NDArray[np.clongdouble]: ... -def disp( - mesg: object, - device: None | _SupportsWriteFlush = ..., - linefeed: bool = ..., -) -> None: ... +# +def trim_zeros[T](filt: _TrimZerosSequence[T], trim: L["f", "b", "fb", "bf"] = "fb", axis: _ShapeLike | None = None) -> T: ... -@overload +# NOTE: keep in sync with `corrcoef` +@overload # ?d, known inexact scalar-type >=64 precision, y=. +def cov[ScalarT: _InexactDouble]( + m: _ArrayLike[ScalarT], + y: _ArrayLike[ScalarT], + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: None = None, +) -> _Array2D[ScalarT]: ... +@overload # ?d, known inexact scalar-type >=64 precision, y=None -> 0d or 2d +def cov[ScalarT: _InexactDouble]( + m: _ArrayNoD[ScalarT], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[ScalarT] | None = None, +) -> NDArray[ScalarT]: ... +@overload # 1d, known inexact scalar-type >=64 precision, y=None +def cov[ScalarT: _InexactDouble]( + m: _Array1D[ScalarT], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[ScalarT] | None = None, +) -> _Array0D[ScalarT]: ... +@overload # nd, known inexact scalar-type >=64 precision, y=None -> 0d or 2d +def cov[ScalarT: _InexactDouble]( + m: _ArrayLike[ScalarT], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[ScalarT] | None = None, +) -> NDArray[ScalarT]: ... +@overload # nd, casts to float64, y= def cov( - m: _ArrayLikeFloat_co, - y: None | _ArrayLikeFloat_co = ..., - rowvar: bool = ..., - bias: bool = ..., - ddof: None | SupportsIndex | SupportsInt = ..., - fweights: None | ArrayLike = ..., - aweights: None | ArrayLike = ..., + m: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], + y: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, *, - dtype: None = ..., -) -> NDArray[floating[Any]]: ... -@overload + dtype: _DTypeLike[np.float64] | None = None, +) -> _Array2D[np.float64]: ... +@overload # ?d or 2d, casts to float64, y=None -> 0d or 2d +def cov( + m: _ArrayNoD[np.float32 | np.float16 | _integer_co] | _Seq2D[float], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> NDArray[np.float64]: ... +@overload # 1d, casts to float64, y=None +def cov( + m: _Array1D[np.float32 | np.float16 | _integer_co] | _Seq1D[float], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> _Array0D[np.float64]: ... +@overload # nd, casts to float64, y=None -> 0d or 2d +def cov( + m: _ArrayLike[np.float32 | np.float16 | _integer_co], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> NDArray[np.float64]: ... +@overload # 1d complex, y= (`list` avoids overlap with float overloads) def cov( + m: list[complex] | _Seq1D[list[complex]], + y: list[complex] | _Seq1D[list[complex]], + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> _Array2D[np.complex128]: ... +@overload # 1d complex, y=None +def cov( + m: list[complex], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> _Array0D[np.complex128]: ... +@overload # 2d complex, y=None -> 0d or 2d +def cov( + m: _Seq1D[list[complex]], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> NDArray[np.complex128]: ... +@overload # 1d complex-like, y=None, dtype= +def cov[ScalarT: np.generic]( + m: _Seq1D[_ComplexLike_co], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[ScalarT], +) -> _Array0D[ScalarT]: ... +@overload # nd complex-like, y=, dtype= +def cov[ScalarT: np.generic]( m: _ArrayLikeComplex_co, - y: None | _ArrayLikeComplex_co = ..., - rowvar: bool = ..., - bias: bool = ..., - ddof: None | SupportsIndex | SupportsInt = ..., - fweights: None | ArrayLike = ..., - aweights: None | ArrayLike = ..., + y: _ArrayLikeComplex_co, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, *, - dtype: None = ..., -) -> NDArray[complexfloating[Any, Any]]: ... -@overload + dtype: _DTypeLike[ScalarT], +) -> _Array2D[ScalarT]: ... +@overload # nd complex-like, y=None, dtype= -> 0d or 2d +def cov[ScalarT: np.generic]( + m: _ArrayLikeComplex_co, + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[ScalarT], +) -> NDArray[ScalarT]: ... +@overload # nd complex-like, y=, dtype=? def cov( m: _ArrayLikeComplex_co, - y: None | _ArrayLikeComplex_co = ..., - rowvar: bool = ..., - bias: bool = ..., - ddof: None | SupportsIndex | SupportsInt = ..., - fweights: None | ArrayLike = ..., - aweights: None | ArrayLike = ..., + y: _ArrayLikeComplex_co, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, *, - dtype: _DTypeLike[_SCT], -) -> NDArray[_SCT]: ... -@overload + dtype: DTypeLike | None = None, +) -> _Array2D[Incomplete]: ... +@overload # 1d complex-like, y=None, dtype=? +def cov( + m: _Seq1D[_ComplexLike_co], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: DTypeLike | None = None, +) -> _Array0D[Incomplete]: ... +@overload # nd complex-like, dtype=? def cov( m: _ArrayLikeComplex_co, - y: None | _ArrayLikeComplex_co = ..., - rowvar: bool = ..., - bias: bool = ..., - ddof: None | SupportsIndex | SupportsInt = ..., - fweights: None | ArrayLike = ..., - aweights: None | ArrayLike = ..., + y: _ArrayLikeComplex_co | None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, *, - dtype: DTypeLike, -) -> NDArray[Any]: ... + dtype: DTypeLike | None = None, +) -> NDArray[Incomplete]: ... -# NOTE `bias` and `ddof` have been deprecated -@overload +# NOTE: If only `x` is given and the resulting array has shape (1,1), a bare scalar +# is returned instead of a 2D array. When y is given, a 2D array is always returned. +# This differs from `cov`, which returns 0-D arrays instead of scalars in such cases. +# NOTE: keep in sync with `cov` +@overload # ?d, known inexact scalar-type >=64 precision, y=. +def corrcoef[ScalarT: _InexactDouble]( + x: _ArrayLike[ScalarT], + y: _ArrayLike[ScalarT], + rowvar: bool = True, + *, + dtype: _DTypeLike[ScalarT] | None = None, +) -> _Array2D[ScalarT]: ... +@overload # ?d, known inexact scalar-type >=64 precision, y=None +def corrcoef[ScalarT: _InexactDouble]( + x: _ArrayNoD[ScalarT], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[ScalarT] | None = None, +) -> _Array2D[ScalarT] | ScalarT: ... +@overload # 1d, known inexact scalar-type >=64 precision, y=None +def corrcoef[ScalarT: _InexactDouble]( + x: _Array1D[ScalarT], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[ScalarT] | None = None, +) -> ScalarT: ... +@overload # nd, known inexact scalar-type >=64 precision, y=None +def corrcoef[ScalarT: _InexactDouble]( + x: _ArrayLike[ScalarT], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[ScalarT] | None = None, +) -> _Array2D[ScalarT] | ScalarT: ... +@overload # nd, casts to float64, y= def corrcoef( - m: _ArrayLikeFloat_co, - y: None | _ArrayLikeFloat_co = ..., - rowvar: bool = ..., + x: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], + y: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], + rowvar: bool = True, *, - dtype: None = ..., -) -> NDArray[floating[Any]]: ... -@overload + dtype: _DTypeLike[np.float64] | None = None, +) -> _Array2D[np.float64]: ... +@overload # ?d or 2d, casts to float64, y=None def corrcoef( - m: _ArrayLikeComplex_co, - y: None | _ArrayLikeComplex_co = ..., - rowvar: bool = ..., + x: _ArrayNoD[np.float32 | np.float16 | _integer_co] | _Seq2D[float], + y: None = None, + rowvar: bool = True, *, - dtype: None = ..., -) -> NDArray[complexfloating[Any, Any]]: ... -@overload + dtype: _DTypeLike[np.float64] | None = None, +) -> _Array2D[np.float64] | np.float64: ... +@overload # 1d, casts to float64, y=None def corrcoef( - m: _ArrayLikeComplex_co, - y: None | _ArrayLikeComplex_co = ..., - rowvar: bool = ..., + x: _Array1D[np.float32 | np.float16 | _integer_co] | _Seq1D[float], + y: None = None, + rowvar: bool = True, *, - dtype: _DTypeLike[_SCT], -) -> NDArray[_SCT]: ... -@overload + dtype: _DTypeLike[np.float64] | None = None, +) -> np.float64: ... +@overload # nd, casts to float64, y=None def corrcoef( - m: _ArrayLikeComplex_co, - y: None | _ArrayLikeComplex_co = ..., - rowvar: bool = ..., + x: _ArrayLike[np.float32 | np.float16 | _integer_co], + y: None = None, + rowvar: bool = True, *, - dtype: DTypeLike, -) -> NDArray[Any]: ... - -def blackman(M: _FloatLike_co) -> NDArray[floating[Any]]: ... - -def bartlett(M: _FloatLike_co) -> NDArray[floating[Any]]: ... - -def hanning(M: _FloatLike_co) -> NDArray[floating[Any]]: ... - -def hamming(M: _FloatLike_co) -> NDArray[floating[Any]]: ... - -def i0(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... + dtype: _DTypeLike[np.float64] | None = None, +) -> _Array2D[np.float64] | np.float64: ... +@overload # 1d complex, y= (`list` avoids overlap with float overloads) +def corrcoef( + x: list[complex] | _Seq1D[list[complex]], + y: list[complex] | _Seq1D[list[complex]], + rowvar: bool = True, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> _Array2D[np.complex128]: ... +@overload # 1d complex, y=None +def corrcoef( + x: list[complex], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> np.complex128: ... +@overload # 2d complex, y=None +def corrcoef( + x: _Seq1D[list[complex]], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> _Array2D[np.complex128] | np.complex128: ... +@overload # 1d complex-like, y=None, dtype= +def corrcoef[ScalarT: np.generic]( + x: _Seq1D[_ComplexLike_co], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[ScalarT], +) -> ScalarT: ... +@overload # nd complex-like, y=, dtype= +def corrcoef[ScalarT: np.generic]( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + rowvar: bool = True, + *, + dtype: _DTypeLike[ScalarT], +) -> _Array2D[ScalarT]: ... +@overload # nd complex-like, y=None, dtype= +def corrcoef[ScalarT: np.generic]( + x: _ArrayLikeComplex_co, + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[ScalarT], +) -> _Array2D[ScalarT] | ScalarT: ... +@overload # nd complex-like, y=, dtype=? +def corrcoef( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + rowvar: bool = True, + *, + dtype: DTypeLike | None = None, +) -> _Array2D[Incomplete]: ... +@overload # 1d complex-like, y=None, dtype=? +def corrcoef( + x: _Seq1D[_ComplexLike_co], + y: None = None, + rowvar: bool = True, + *, + dtype: DTypeLike | None = None, +) -> Incomplete: ... +@overload # nd complex-like, dtype=? +def corrcoef( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co | None = None, + rowvar: bool = True, + *, + dtype: DTypeLike | None = None, +) -> _Array2D[Incomplete] | Incomplete: ... -def kaiser( - M: _FloatLike_co, - beta: _FloatLike_co, -) -> NDArray[floating[Any]]: ... +# note that floating `M` are accepted, but their fractional part is ignored +def blackman(M: _FloatLike_co) -> _Array1D[np.float64]: ... +def bartlett(M: _FloatLike_co) -> _Array1D[np.float64]: ... +def hanning(M: _FloatLike_co) -> _Array1D[np.float64]: ... +def hamming(M: _FloatLike_co) -> _Array1D[np.float64]: ... +def kaiser(M: _FloatLike_co, beta: _FloatLike_co) -> _Array1D[np.float64]: ... +# +@overload +def i0[ShapeT: _Shape](x: _Array[ShapeT, np.floating | np.integer]) -> _Array[ShapeT, np.float64]: ... @overload -def sinc(x: _FloatLike_co) -> floating[Any]: ... +def i0(x: _FloatLike_co) -> _Array0D[np.float64]: ... @overload -def sinc(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +def i0(x: _Seq1D[_FloatLike_co]) -> _Array1D[np.float64]: ... @overload -def sinc(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... +def i0(x: _Seq2D[_FloatLike_co]) -> _Array2D[np.float64]: ... @overload -def sinc(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... +def i0(x: _Seq3D[_FloatLike_co]) -> _Array3D[np.float64]: ... +@overload +def i0(x: _ArrayLikeFloat_co) -> NDArray[np.float64]: ... +# @overload -def median( - a: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - keepdims: L[False] = ..., -) -> floating[Any]: ... +def sinc[ScalarT: np.inexact](x: ScalarT) -> ScalarT: ... @overload -def median( - a: _ArrayLikeComplex_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - keepdims: L[False] = ..., -) -> complexfloating[Any, Any]: ... +def sinc(x: float | _float64_co) -> np.float64: ... @overload -def median( - a: _ArrayLikeTD64_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - keepdims: L[False] = ..., -) -> timedelta64: ... +def sinc(x: complex) -> np.complex128 | Any: ... @overload -def median( - a: _ArrayLikeObject_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - keepdims: L[False] = ..., -) -> Any: ... +def sinc[ArrayT: NDArray[np.inexact]](x: ArrayT) -> ArrayT: ... @overload -def median( - a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - out: None = ..., - overwrite_input: bool = ..., - keepdims: bool = ..., -) -> Any: ... +def sinc[ShapeT: _Shape](x: _Array[ShapeT, _integer_co]) -> _Array[ShapeT, np.float64]: ... +@overload +def sinc(x: _Seq1D[float]) -> _Array1D[np.float64]: ... +@overload +def sinc(x: _Seq2D[float]) -> _Array2D[np.float64]: ... +@overload +def sinc(x: _Seq3D[float]) -> _Array3D[np.float64]: ... +@overload +def sinc(x: _SeqND[float]) -> NDArray[np.float64]: ... @overload +def sinc(x: list[complex]) -> _Array1D[np.complex128]: ... +@overload +def sinc(x: _Seq1D[list[complex]]) -> _Array2D[np.complex128]: ... +@overload +def sinc(x: _Seq2D[list[complex]]) -> _Array3D[np.complex128]: ... +@overload +def sinc(x: _ArrayLikeComplex_co) -> np.ndarray | Any: ... + +# NOTE: We assume that `axis` is only provided for >=1-D arrays because for <1-D arrays +# it has no effect, and would complicate the overloads significantly. +@overload # known scalar-type, keepdims=False (default) +def median[ScalarT: np.inexact | np.timedelta64]( + a: _ArrayLike[ScalarT], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, +) -> ScalarT: ... +@overload # float array-like, keepdims=False (default) def median( - a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - out: _ArrayType = ..., - overwrite_input: bool = ..., - keepdims: bool = ..., -) -> _ArrayType: ... - -_MethodKind = L[ - "inverted_cdf", - "averaged_inverted_cdf", - "closest_observation", - "interpolated_inverted_cdf", - "hazen", - "weibull", - "linear", - "median_unbiased", - "normal_unbiased", - "lower", - "higher", - "midpoint", - "nearest", -] + a: _ArrayLikeInt_co | _SeqND[float] | float, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, +) -> np.float64: ... +@overload # complex array-like, keepdims=False (default) +def median( + a: _ListSeqND[complex], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, +) -> np.complex128: ... +@overload # complex scalar, keepdims=False (default) +def median( + a: complex, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, +) -> np.complex128 | Any: ... +@overload # known array-type, keepdims=True +def median[ArrayT: NDArray[_ScalarNumeric]]( + a: ArrayT, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> ArrayT: ... +@overload # known scalar-type, keepdims=True +def median[ScalarT: _ScalarNumeric]( + a: _ArrayLike[ScalarT], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> NDArray[ScalarT]: ... +@overload # known scalar-type, axis= +def median[ScalarT: _ScalarNumeric]( + a: _ArrayLike[ScalarT], + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> NDArray[ScalarT]: ... +@overload # float array-like, keepdims=True +def median( + a: _SeqND[float], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> NDArray[np.float64]: ... +@overload # float array-like, axis= +def median( + a: _SeqND[float], + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> NDArray[np.float64]: ... +@overload # complex array-like, keepdims=True +def median( + a: _ListSeqND[complex], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> NDArray[np.complex128]: ... +@overload # complex array-like, axis= +def median( + a: _ListSeqND[complex], + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> NDArray[np.complex128]: ... +@overload # out= (keyword) +def median[ArrayT: np.ndarray]( + a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + axis: _ShapeLike | None = None, + *, + out: ArrayT, + overwrite_input: bool = False, + keepdims: bool = False, +) -> ArrayT: ... +@overload # out= (positional) +def median[ArrayT: np.ndarray]( + a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + axis: _ShapeLike | None, + out: ArrayT, + overwrite_input: bool = False, + keepdims: bool = False, +) -> ArrayT: ... +@overload # fallback +def median( + a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> Incomplete: ... -@overload +# NOTE: keep in sync with `quantile` +@overload # inexact, scalar, axis=None +def percentile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> ScalarT: ... +@overload # inexact, scalar, axis= +def percentile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[ScalarT]: ... +@overload # inexact, scalar, keepdims=True +def percentile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[ScalarT]: ... +@overload # inexact, array, axis=None +def percentile[ScalarT: np.inexact | np.timedelta64 | np.datetime64, ShapeT: _Shape]( + a: _ArrayLike[ScalarT], + q: _Array[ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[ShapeT, ScalarT]: ... +@overload # inexact, array-like +def percentile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[ScalarT]: ... +@overload # float, scalar, axis=None def percentile( - a: _ArrayLikeFloat_co, + a: _SeqND[float] | _ArrayLikeInt_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, *, - weights: None | _ArrayLikeFloat_co = ..., -) -> floating[Any]: ... -@overload + weights: _ArrayLikeFloat_co | None = None, +) -> np.float64: ... +@overload # float, scalar, axis= def percentile( - a: _ArrayLikeComplex_co, + a: _SeqND[float] | _ArrayLikeInt_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, *, - weights: None | _ArrayLikeFloat_co = ..., -) -> complexfloating[Any, Any]: ... -@overload + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # float, scalar, keepdims=True def percentile( - a: _ArrayLikeTD64_co, + a: _SeqND[float] | _ArrayLikeInt_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", *, - weights: None | _ArrayLikeFloat_co = ..., -) -> timedelta64: ... -@overload + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # float, array, axis=None +def percentile[ShapeT: _Shape]( + a: _SeqND[float] | _ArrayLikeInt_co, + q: _Array[ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[ShapeT, np.float64]: ... +@overload # float, array-like +def percentile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # complex, scalar, axis=None def percentile( - a: _ArrayLikeDT64_co, + a: _ListSeqND[complex], q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, *, - weights: None | _ArrayLikeFloat_co = ..., -) -> datetime64: ... -@overload + weights: _ArrayLikeFloat_co | None = None, +) -> np.complex128: ... +@overload # complex, scalar, axis= +def percentile( + a: _ListSeqND[complex], + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # complex, scalar, keepdims=True +def percentile( + a: _ListSeqND[complex], + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # complex, array, axis=None +def percentile[ShapeT: _Shape]( + a: _ListSeqND[complex], + q: _Array[ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[ShapeT, np.complex128]: ... +@overload # complex, array-like +def percentile( + a: _ListSeqND[complex], + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # object_, scalar, axis=None def percentile( a: _ArrayLikeObject_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, *, - weights: None | _ArrayLikeFloat_co = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> Any: ... -@overload +@overload # object_, scalar, axis= def percentile( - a: _ArrayLikeFloat_co, - q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, *, - weights: None | _ArrayLikeFloat_co = ..., -) -> NDArray[floating[Any]]: ... -@overload + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.object_]: ... +@overload # object_, scalar, keepdims=True def percentile( - a: _ArrayLikeComplex_co, - q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", *, - weights: None | _ArrayLikeFloat_co = ..., -) -> NDArray[complexfloating[Any, Any]]: ... -@overload + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.object_]: ... +@overload # object_, array, axis=None +def percentile[ShapeT: _Shape]( + a: _ArrayLikeObject_co, + q: _Array[ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[ShapeT, np.object_]: ... +@overload # object_, array-like def percentile( - a: _ArrayLikeTD64_co, + a: _ArrayLikeObject_co, + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.object_]: ... +@overload # out= (keyword) +def percentile[ArrayT: np.ndarray]( + a: ArrayLike, q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: _ShapeLike | None, + out: ArrayT, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, *, - weights: None | _ArrayLikeFloat_co = ..., -) -> NDArray[timedelta64]: ... -@overload -def percentile( - a: _ArrayLikeDT64_co, + weights: _ArrayLikeFloat_co | None = None, +) -> ArrayT: ... +@overload # out= (positional) +def percentile[ArrayT: np.ndarray]( + a: ArrayLike, q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: _ShapeLike | None = None, *, - weights: None | _ArrayLikeFloat_co = ..., -) -> NDArray[datetime64]: ... -@overload + out: ArrayT, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + weights: _ArrayLikeFloat_co | None = None, +) -> ArrayT: ... +@overload # fallback def percentile( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> Incomplete: ... + +# NOTE: keep in sync with `percentile` +@overload # inexact, scalar, axis=None +def quantile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> ScalarT: ... +@overload # inexact, scalar, axis= +def quantile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[ScalarT]: ... +@overload # inexact, scalar, keepdims=True +def quantile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[ScalarT]: ... +@overload # inexact, array, axis=None +def quantile[ScalarT: np.inexact | np.timedelta64 | np.datetime64, ShapeT: _Shape]( + a: _ArrayLike[ScalarT], + q: _Array[ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[ShapeT, ScalarT]: ... +@overload # inexact, array-like +def quantile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[ScalarT]: ... +@overload # float, scalar, axis=None +def quantile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> np.float64: ... +@overload # float, scalar, axis= +def quantile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # float, scalar, keepdims=True +def quantile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # float, array, axis=None +def quantile[ShapeT: _Shape]( + a: _SeqND[float] | _ArrayLikeInt_co, + q: _Array[ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[ShapeT, np.float64]: ... +@overload # float, array-like +def quantile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # complex, scalar, axis=None +def quantile( + a: _ListSeqND[complex], + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> np.complex128: ... +@overload # complex, scalar, axis= +def quantile( + a: _ListSeqND[complex], + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # complex, scalar, keepdims=True +def quantile( + a: _ListSeqND[complex], + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # complex, array, axis=None +def quantile[ShapeT: _Shape]( + a: _ListSeqND[complex], + q: _Array[ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[ShapeT, np.complex128]: ... +@overload # complex, array-like +def quantile( + a: _ListSeqND[complex], + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # object_, scalar, axis=None +def quantile( + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> Any: ... +@overload # object_, scalar, axis= +def quantile( a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.object_]: ... +@overload # object_, scalar, keepdims=True +def quantile( + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.object_]: ... +@overload # object_, array, axis=None +def quantile[ShapeT: _Shape]( + a: _ArrayLikeObject_co, + q: _Array[ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[ShapeT, np.object_]: ... +@overload # object_, array-like +def quantile( + a: _ArrayLikeObject_co, + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.object_]: ... +@overload # out= (keyword) +def quantile[ArrayT: np.ndarray]( + a: ArrayLike, q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: _ShapeLike | None, + out: ArrayT, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, *, - weights: None | _ArrayLikeFloat_co = ..., -) -> NDArray[object_]: ... -@overload -def percentile( - a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + weights: _ArrayLikeFloat_co | None = None, +) -> ArrayT: ... +@overload # out= (positional) +def quantile[ArrayT: np.ndarray]( + a: ArrayLike, q: _ArrayLikeFloat_co, - axis: None | _ShapeLike = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: bool = ..., + axis: _ShapeLike | None = None, *, - weights: None | _ArrayLikeFloat_co = ..., -) -> Any: ... -@overload -def percentile( - a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + out: ArrayT, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + weights: _ArrayLikeFloat_co | None = None, +) -> ArrayT: ... +@overload # fallback +def quantile( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, q: _ArrayLikeFloat_co, - axis: None | _ShapeLike = ..., - out: _ArrayType = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: bool = ..., + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, *, - weights: None | _ArrayLikeFloat_co = ..., -) -> _ArrayType: ... + weights: _ArrayLikeFloat_co | None = None, +) -> Incomplete: ... -# NOTE: Not an alias, but they do have identical signatures -# (that we can reuse) -quantile = percentile +# +@overload # ?d, known inexact/timedelta64 scalar-type +def trapezoid[ScalarT: np.inexact | np.timedelta64]( + y: _ArrayNoD[ScalarT], + x: _ArrayLike[ScalarT] | _ArrayLikeFloat_co | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> NDArray[ScalarT] | ScalarT: ... +@overload # ?d, casts to float64 +def trapezoid( + y: _ArrayNoD[_integer_co], + x: _ArrayLikeFloat_co | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> NDArray[np.float64] | np.float64: ... +@overload # strict 1d, known inexact/timedelta64 scalar-type +def trapezoid[ScalarT: np.inexact | np.timedelta64]( + y: _Array1D[ScalarT], + x: _Array1D[ScalarT] | _Seq1D[float] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> ScalarT: ... +@overload # strict 1d, casts to float64 +def trapezoid( + y: _Array1D[_float64_co] | _Seq1D[float], + x: _Array1D[_float64_co] | _Seq1D[float] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> np.float64: ... +@overload # strict 1d, casts to complex128 (`list` prevents overlapping overloads) +def trapezoid( + y: list[complex], + x: _Seq1D[complex] | None = None, + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> np.complex128: ... +@overload # strict 1d, casts to complex128 +def trapezoid( + y: _Seq1D[complex], + x: list[complex], + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> np.complex128: ... +@overload # strict 2d, known inexact/timedelta64 scalar-type +def trapezoid[ScalarT: np.inexact | np.timedelta64]( + y: _Array2D[ScalarT], + x: _ArrayMax2D[ScalarT] | _Seq2D[float] | _Seq1D[float] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> ScalarT: ... +@overload # strict 2d, casts to float64 +def trapezoid( + y: _Array2D[_float64_co] | _Seq2D[float], + x: _ArrayMax2D[_float64_co] | _Seq2D[float] | _Seq1D[float] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> np.float64: ... +@overload # strict 2d, casts to complex128 (`list` prevents overlapping overloads) +def trapezoid( + y: _Seq1D[list[complex]], + x: _Seq2D[complex] | _Seq1D[complex] | None = None, + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> np.complex128: ... +@overload # strict 2d, casts to complex128 +def trapezoid( + y: _Seq2D[complex] | _Seq1D[complex], + x: _Seq1D[list[complex]], + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> np.complex128: ... +@overload +def trapezoid[ScalarT: np.inexact | np.timedelta64]( + y: _ArrayLike[ScalarT], + x: _ArrayLike[ScalarT] | _ArrayLikeInt_co | None = None, + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> NDArray[ScalarT] | ScalarT: ... +@overload +def trapezoid( + y: _ArrayLike[_float64_co], + x: _ArrayLikeFloat_co | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> NDArray[np.float64] | np.float64: ... +@overload +def trapezoid( + y: _ArrayLike[np.complex128], + x: _ArrayLikeComplex_co | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> NDArray[np.complex128] | np.complex128: ... +@overload +def trapezoid( + y: _ArrayLikeComplex_co, + x: _ArrayLike[np.complex128], + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> NDArray[np.complex128] | np.complex128: ... +@overload +def trapezoid( + y: _ArrayLikeObject_co, + x: _ArrayLikeObject_co | _ArrayLikeFloat_co | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> NDArray[np.object_] | Any: ... +@overload +def trapezoid[T]( + y: _Seq1D[_SupportsRMulFloat[T]], + x: _Seq1D[_SupportsRMulFloat[T] | T] | None = None, + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> T: ... +@overload +def trapezoid( + y: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + x: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_] | None = None, + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> Incomplete: ... +# +@overload # 0d +def meshgrid(*, copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy") -> tuple[()]: ... +@overload # 1d, known scalar-type +def meshgrid[ScalarT: np.generic]( + x1: _ArrayLike[ScalarT], + /, + *, + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> _Mesh1[ScalarT]: ... +@overload # 1d, unknown scalar-type +def meshgrid( + x1: ArrayLike, + /, + *, + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> _Mesh1[Any]: ... +@overload # 2d, known scalar-types +def meshgrid[ScalarT1: np.generic, ScalarT2: np.generic]( + x1: _ArrayLike[ScalarT1], + x2: _ArrayLike[ScalarT2], + /, + *, + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> _Mesh2[ScalarT1, ScalarT2]: ... +@overload # 2d, known/unknown scalar-types +def meshgrid[ScalarT: np.generic]( + x1: _ArrayLike[ScalarT], + x2: ArrayLike, + /, + *, + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> _Mesh2[ScalarT, Any]: ... +@overload # 2d, unknown/known scalar-types +def meshgrid[ScalarT: np.generic]( + x1: ArrayLike, + x2: _ArrayLike[ScalarT], + /, + *, + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> _Mesh2[Any, ScalarT]: ... +@overload # 2d, unknown scalar-types +def meshgrid( + x1: ArrayLike, + x2: ArrayLike, + /, + *, + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> _Mesh2[Any, Any]: ... +@overload # 3d, known scalar-types +def meshgrid[ScalarT1: np.generic, ScalarT2: np.generic, ScalarT3: np.generic]( + x1: _ArrayLike[ScalarT1], + x2: _ArrayLike[ScalarT2], + x3: _ArrayLike[ScalarT3], + /, + *, + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> _Mesh3[ScalarT1, ScalarT2, ScalarT3]: ... +@overload # 3d, unknown scalar-types +def meshgrid( + x1: ArrayLike, + x2: ArrayLike, + x3: ArrayLike, + /, + *, + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> _Mesh3[Any, Any, Any]: ... +@overload # ?d, known scalar-types +def meshgrid[ScalarT: np.generic]( + *xi: _ArrayLike[ScalarT], + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> tuple[NDArray[ScalarT], ...]: ... +@overload # ?d, unknown scalar-types def meshgrid( *xi: ArrayLike, - copy: bool = ..., - sparse: bool = ..., - indexing: L["xy", "ij"] = ..., + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", ) -> tuple[NDArray[Any], ...]: ... +# +def place(arr: np.ndarray, mask: ConvertibleToInt | Sequence[ConvertibleToInt], vals: ArrayLike) -> None: ... + +# keep in sync with `insert` +@overload # known scalar-type, axis=None (default) +def delete[ScalarT: np.generic](arr: _ArrayLike[ScalarT], obj: _IndexLike, axis: None = None) -> _Array1D[ScalarT]: ... +@overload # known array-type, axis specified +def delete[ArrayT: np.ndarray](arr: ArrayT, obj: _IndexLike, axis: SupportsIndex) -> ArrayT: ... +@overload # known scalar-type, axis specified +def delete[ScalarT: np.generic](arr: _ArrayLike[ScalarT], obj: _IndexLike, axis: SupportsIndex) -> NDArray[ScalarT]: ... +@overload # known scalar-type, axis=None (default) +def delete(arr: ArrayLike, obj: _IndexLike, axis: None = None) -> _Array1D[Any]: ... +@overload # unknown scalar-type, axis specified +def delete(arr: ArrayLike, obj: _IndexLike, axis: SupportsIndex) -> NDArray[Any]: ... + +# keep in sync with `delete` +@overload # known scalar-type, axis=None (default) +def insert[ScalarT: np.generic](arr: _ArrayLike[ScalarT], obj: _IndexLike, values: ArrayLike, axis: None = None) -> _Array1D[ScalarT]: ... +@overload # known array-type, axis specified +def insert[ArrayT: np.ndarray](arr: ArrayT, obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> ArrayT: ... +@overload # known scalar-type, axis specified +def insert[ScalarT: np.generic](arr: _ArrayLike[ScalarT], obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> NDArray[ScalarT]: ... +@overload # known scalar-type, axis=None (default) +def insert(arr: ArrayLike, obj: _IndexLike, values: ArrayLike, axis: None = None) -> _Array1D[Any]: ... +@overload # unknown scalar-type, axis specified +def insert(arr: ArrayLike, obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> NDArray[Any]: ... + +# keep in sync with `ma.core.append` +@overload # known array type, axis specified +def append[ArrayT: np.ndarray](arr: ArrayT, values: ArrayT, axis: SupportsIndex) -> ArrayT: ... +@overload # 1d, known scalar type, axis specified +def append[ScalarT: np.generic](arr: _Seq1D[ScalarT], values: _Seq1D[ScalarT], axis: SupportsIndex) -> _Array1D[ScalarT]: ... +@overload # 2d, known scalar type, axis specified +def append[ScalarT: np.generic](arr: _Seq2D[ScalarT], values: _Seq2D[ScalarT], axis: SupportsIndex) -> _Array2D[ScalarT]: ... +@overload # 3d, known scalar type, axis specified +def append[ScalarT: np.generic](arr: _Seq3D[ScalarT], values: _Seq3D[ScalarT], axis: SupportsIndex) -> _Array3D[ScalarT]: ... +@overload # ?d, known scalar type, axis specified +def append[ScalarT: np.generic](arr: _SeqND[ScalarT], values: _SeqND[ScalarT], axis: SupportsIndex) -> NDArray[ScalarT]: ... +@overload # ?d, unknown scalar type, axis specified +def append(arr: np.ndarray | _SeqND[_ScalarLike_co], values: _SeqND[_ScalarLike_co], axis: SupportsIndex) -> np.ndarray: ... +@overload # known scalar type, axis=None +def append[ScalarT: np.generic](arr: _ArrayLike[ScalarT], values: _ArrayLike[ScalarT], axis: None = None) -> _Array1D[ScalarT]: ... +@overload # unknown scalar type, axis=None +def append(arr: ArrayLike, values: ArrayLike, axis: None = None) -> _Array1D[Any]: ... + +# @overload -def delete( - arr: _ArrayLike[_SCT], - obj: slice | _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., -) -> NDArray[_SCT]: ... +def digitize[ShapeT: _Shape]( + x: _Array[ShapeT, np.floating | np.integer], bins: _ArrayLikeFloat_co, right: bool = False +) -> _Array[ShapeT, np.int_]: ... @overload -def delete( - arr: ArrayLike, - obj: slice | _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., -) -> NDArray[Any]: ... - +def digitize(x: _FloatLike_co, bins: _ArrayLikeFloat_co, right: bool = False) -> np.int_: ... @overload -def insert( - arr: _ArrayLike[_SCT], - obj: slice | _ArrayLikeInt_co, - values: ArrayLike, - axis: None | SupportsIndex = ..., -) -> NDArray[_SCT]: ... +def digitize(x: _Seq1D[_FloatLike_co], bins: _ArrayLikeFloat_co, right: bool = False) -> _Array1D[np.int_]: ... @overload -def insert( - arr: ArrayLike, - obj: slice | _ArrayLikeInt_co, - values: ArrayLike, - axis: None | SupportsIndex = ..., -) -> NDArray[Any]: ... - -def append( - arr: ArrayLike, - values: ArrayLike, - axis: None | SupportsIndex = ..., -) -> NDArray[Any]: ... - +def digitize(x: _Seq2D[_FloatLike_co], bins: _ArrayLikeFloat_co, right: bool = False) -> _Array2D[np.int_]: ... @overload -def digitize( - x: _FloatLike_co, - bins: _ArrayLikeFloat_co, - right: bool = ..., -) -> intp: ... +def digitize(x: _Seq3D[_FloatLike_co], bins: _ArrayLikeFloat_co, right: bool = False) -> _Array3D[np.int_]: ... @overload -def digitize( - x: _ArrayLikeFloat_co, - bins: _ArrayLikeFloat_co, - right: bool = ..., -) -> NDArray[intp]: ... +def digitize(x: _ArrayLikeFloat_co, bins: _ArrayLikeFloat_co, right: bool = False) -> NDArray[np.int_] | Any: ... diff --git a/numpy/lib/_histograms_impl.py b/numpy/lib/_histograms_impl.py index 80eeffb6a03c..b4aacd057eaa 100644 --- a/numpy/lib/_histograms_impl.py +++ b/numpy/lib/_histograms_impl.py @@ -123,8 +123,9 @@ def _hist_bin_stone(x, range): """ Histogram bin estimator based on minimizing the estimated integrated squared error (ISE). - The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution. - The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule. + The number of bins is chosen by minimizing the estimated ISE against the unknown + true distribution. The ISE is estimated using cross-validation and can be regarded + as a generalization of Scott's rule. https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule This paper by Stone appears to be the origination of this rule. @@ -141,7 +142,7 @@ def _hist_bin_stone(x, range): Returns ------- h : An estimate of the optimal bin width for the given data. - """ + """ # noqa: E501 n = x.size ptp_x = _ptp(x) @@ -228,9 +229,10 @@ def _hist_bin_fd(x, range): def _hist_bin_auto(x, range): """ - Histogram bin estimator that uses the minimum width of the - Freedman-Diaconis and Sturges estimators if the FD bin width is non-zero. - If the bin width from the FD estimator is 0, the Sturges estimator is used. + Histogram bin estimator that uses the minimum width of a relaxed + Freedman-Diaconis and Sturges estimators if the FD bin width does + not result in a large number of bins. The relaxed Freedman-Diaconis estimator + limits the bin width to half the sqrt estimated to avoid small bins. The FD estimator is usually the most robust method, but its width estimate tends to be too large for small `x` and bad for data with limited @@ -238,19 +240,13 @@ def _hist_bin_auto(x, range): and is the default in the R language. This method gives good off-the-shelf behaviour. - .. versionchanged:: 1.15.0 - If there is limited variance the IQR can be 0, which results in the - FD bin width being 0 too. This is not a valid bin width, so - ``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal. - If the IQR is 0, it's unlikely any variance-based estimators will be of - use, so we revert to the Sturges estimator, which only uses the size of the - dataset in its calculation. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. + range : Tuple with range for the histogram Returns ------- @@ -262,12 +258,11 @@ def _hist_bin_auto(x, range): """ fd_bw = _hist_bin_fd(x, range) sturges_bw = _hist_bin_sturges(x, range) - del range # unused - if fd_bw: - return min(fd_bw, sturges_bw) - else: - # limited variance, so we return a len dependent bw estimator - return sturges_bw + sqrt_bw = _hist_bin_sqrt(x, range) + # heuristic to limit the maximal number of bins + fd_bw_corrected = max(fd_bw, sqrt_bw / 2) + return min(fd_bw_corrected, sturges_bw) + # Private dict initialized at module load time _hist_bin_selectors = {'stone': _hist_bin_stone, @@ -286,9 +281,8 @@ def _ravel_and_check_weights(a, weights): # Ensure that the array is a "subtractable" dtype if a.dtype == np.bool: - warnings.warn("Converting input from {} to {} for compatibility." - .format(a.dtype, np.uint8), - RuntimeWarning, stacklevel=3) + msg = f"Converting input from {a.dtype} to {np.uint8} for compatibility." + warnings.warn(msg, RuntimeWarning, stacklevel=3) a = a.astype(np.uint8) if weights is not None: @@ -313,7 +307,7 @@ def _get_outer_edges(a, range): 'max must be larger than min in range parameter.') if not (np.isfinite(first_edge) and np.isfinite(last_edge)): raise ValueError( - "supplied range of [{}, {}] is not finite".format(first_edge, last_edge)) + f"supplied range of [{first_edge}, {last_edge}] is not finite") elif a.size == 0: # handle empty arrays. Can't determine range, so use 0-1. first_edge, last_edge = 0, 1 @@ -321,7 +315,7 @@ def _get_outer_edges(a, range): first_edge, last_edge = a.min(), a.max() if not (np.isfinite(first_edge) and np.isfinite(last_edge)): raise ValueError( - "autodetected range of [{}, {}] is not finite".format(first_edge, last_edge)) + f"autodetected range of [{first_edge}, {last_edge}] is not finite") # expand empty range to avoid divide by zero if first_edge == last_edge: @@ -390,7 +384,7 @@ def _get_bin_edges(a, bins, range, weights): # this will replace it with the number of bins calculated if bin_name not in _hist_bin_selectors: raise ValueError( - "{!r} is not a valid estimator for `bins`".format(bin_name)) + f"{bin_name!r} is not a valid estimator for `bins`") if weights is not None: raise TypeError("Automated estimation of the number of " "bins is not supported for weighted data") @@ -412,7 +406,8 @@ def _get_bin_edges(a, bins, range, weights): if width: if np.issubdtype(a.dtype, np.integer) and width < 1: width = 1 - n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width)) + delta = _unsigned_subtract(last_edge, first_edge) + n_equal_bins = int(np.ceil(delta / width)) else: # Width can be zero for some estimators, e.g. FD when # the IQR of the data is zero. @@ -450,6 +445,10 @@ def _get_bin_edges(a, bins, range, weights): bin_edges = np.linspace( first_edge, last_edge, n_equal_bins + 1, endpoint=True, dtype=bin_type) + if np.any(bin_edges[:-1] >= bin_edges[1:]): + raise ValueError( + f'Too many bins for data range. Cannot create {n_equal_bins} ' + f'finite-sized bins.') return bin_edges, (first_edge, last_edge, n_equal_bins) else: return bin_edges, None @@ -498,7 +497,7 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None): supported for automated bin size selection. 'auto' - Minimum bin width between the 'sturges' and 'fd' estimators. + Minimum bin width between the 'sturges' and 'fd' estimators. Provides good all-around performance. 'fd' (Freedman Diaconis Estimator) @@ -632,6 +631,7 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None): Examples -------- + >>> import numpy as np >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5]) >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1)) array([0. , 0.25, 0.5 , 0.75, 1. ]) @@ -697,8 +697,6 @@ def histogram(a, bins=10, range=None, density=None, weights=None): sequence, it defines a monotonically increasing array of bin edges, including the rightmost edge, allowing for non-uniform bin widths. - .. versionadded:: 1.11.0 - If `bins` is a string, it defines the method used to calculate the optimal bin width, as defined by `histogram_bin_edges`. @@ -755,6 +753,7 @@ def histogram(a, bins=10, range=None, density=None, weights=None): Examples -------- + >>> import numpy as np >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) (array([0, 2, 1]), array([0, 1, 2, 3])) >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) @@ -771,8 +770,6 @@ def histogram(a, bins=10, range=None, density=None, weights=None): >>> np.sum(hist * np.diff(bin_edges)) 1.0 - .. versionadded:: 1.11.0 - Automated Bin Selection Methods example, using 2 peak random data with 2000 points. @@ -830,7 +827,7 @@ def histogram(a, bins=10, range=None, density=None, weights=None): # is 2x as fast) and it results in a memory footprint 3x lower in the # limit of large arrays. for i in _range(0, len(a), BLOCK): - tmp_a = a[i:i+BLOCK] + tmp_a = a[i:i + BLOCK] if weights is None: tmp_w = None else: @@ -879,13 +876,13 @@ def histogram(a, bins=10, range=None, density=None, weights=None): cum_n = np.zeros(bin_edges.shape, ntype) if weights is None: for i in _range(0, len(a), BLOCK): - sa = np.sort(a[i:i+BLOCK]) + sa = np.sort(a[i:i + BLOCK]) cum_n += _search_sorted_inclusive(sa, bin_edges) else: zero = np.zeros(1, dtype=ntype) for i in _range(0, len(a), BLOCK): - tmp_a = a[i:i+BLOCK] - tmp_w = weights[i:i+BLOCK] + tmp_a = a[i:i + BLOCK] + tmp_w = weights[i:i + BLOCK] sorting_index = np.argsort(tmp_a) sa = tmp_a[sorting_index] sw = tmp_w[sorting_index] @@ -897,7 +894,7 @@ def histogram(a, bins=10, range=None, density=None, weights=None): if density: db = np.array(np.diff(bin_edges), float) - return n/db/n.sum(), bin_edges + return n / db / n.sum(), bin_edges return n, bin_edges @@ -972,6 +969,7 @@ def histogramdd(sample, bins=10, range=None, density=None, weights=None): Examples -------- + >>> import numpy as np >>> rng = np.random.default_rng() >>> r = rng.normal(size=(100,3)) >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) @@ -989,8 +987,8 @@ def histogramdd(sample, bins=10, range=None, density=None, weights=None): N, D = sample.shape nbin = np.empty(D, np.intp) - edges = D*[None] - dedges = D*[None] + edges = D * [None] + dedges = D * [None] if weights is not None: weights = np.asarray(weights) @@ -1002,7 +1000,7 @@ def histogramdd(sample, bins=10, range=None, density=None, weights=None): 'sample x.') except TypeError: # bins is an integer - bins = D*[bins] + bins = D * [bins] # normalize the range argument if range is None: @@ -1015,14 +1013,14 @@ def histogramdd(sample, bins=10, range=None, density=None, weights=None): if np.ndim(bins[i]) == 0: if bins[i] < 1: raise ValueError( - '`bins[{}]` must be positive, when an integer'.format(i)) - smin, smax = _get_outer_edges(sample[:,i], range[i]) + f'`bins[{i}]` must be positive, when an integer') + smin, smax = _get_outer_edges(sample[:, i], range[i]) try: n = operator.index(bins[i]) except TypeError as e: raise TypeError( - "`bins[{}]` must be an integer, when a scalar".format(i) + f"`bins[{i}]` must be an integer, when a scalar" ) from e edges[i] = np.linspace(smin, smax, n + 1) @@ -1030,11 +1028,10 @@ def histogramdd(sample, bins=10, range=None, density=None, weights=None): edges[i] = np.asarray(bins[i]) if np.any(edges[i][:-1] > edges[i][1:]): raise ValueError( - '`bins[{}]` must be monotonically increasing, when an array' - .format(i)) + f'`bins[{i}]` must be monotonically increasing, when an array') else: raise ValueError( - '`bins[{}]` must be a scalar or 1d array'.format(i)) + f'`bins[{i}]` must be a scalar or 1d array') nbin[i] = len(edges[i]) + 1 # includes an outlier on each end dedges[i] = np.diff(edges[i]) @@ -1070,7 +1067,7 @@ def histogramdd(sample, bins=10, range=None, density=None, weights=None): hist = hist.astype(float, casting='safe') # Remove outliers (indices 0 and -1 for each dimension). - core = D*(slice(1, -1),) + core = D * (slice(1, -1),) hist = hist[core] if density: diff --git a/numpy/lib/_histograms_impl.pyi b/numpy/lib/_histograms_impl.pyi index 138cdb115ef5..9f9bdb25aa6c 100644 --- a/numpy/lib/_histograms_impl.pyi +++ b/numpy/lib/_histograms_impl.pyi @@ -1,47 +1,273 @@ +from _typeshed import Incomplete from collections.abc import Sequence -from typing import ( - Literal as L, - Any, - SupportsIndex, -) +from typing import Any, Literal as L, SupportsIndex, overload +import numpy as np from numpy._typing import ( - NDArray, ArrayLike, + NDArray, + _ArrayLike, + _ArrayLikeComplex_co, + _ArrayLikeFloat64_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeObject_co, + _NestedSequence, ) -_BinKind = L[ - "stone", - "auto", - "doane", - "fd", - "rice", - "scott", - "sqrt", - "sturges", -] +__all__ = ["histogram", "histogramdd", "histogram_bin_edges"] + +### + +type _BinKind = L["auto", "fd", "doane", "scott", "stone", "rice", "sturges", "sqrt"] + +type _Range = tuple[float, float] +type _NestedList[T] = list[T] | _NestedSequence[list[T]] -__all__: list[str] +type _WeightsLike = _ArrayLikeComplex_co | _ArrayLikeObject_co +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _HistogramResult[HistT: np.generic, EdgeT: np.generic] = tuple[_Array1D[HistT], _Array1D[EdgeT]] +### + +# NOTE: The return type can also be complex or `object_`, not only floating like the docstring suggests. +@overload # dtype +float64 +def histogram_bin_edges( + a: _ArrayLikeInt_co | _NestedSequence[float], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + weights: _WeightsLike | None = None, +) -> _Array1D[np.float64]: ... +@overload # dtype ~complex +def histogram_bin_edges( + a: _NestedList[complex], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + weights: _WeightsLike | None = None, +) -> _Array1D[np.complex128]: ... +@overload # dtype known +def histogram_bin_edges[ScalarT: np.inexact | np.object_]( + a: _ArrayLike[ScalarT], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + weights: _WeightsLike | None = None, +) -> _Array1D[ScalarT]: ... +@overload # dtype unknown def histogram_bin_edges( - a: ArrayLike, - bins: _BinKind | SupportsIndex | ArrayLike = ..., - range: None | tuple[float, float] = ..., - weights: None | ArrayLike = ..., -) -> NDArray[Any]: ... + a: _ArrayLikeComplex_co, + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + weights: _WeightsLike | None = None, +) -> _Array1D[Incomplete]: ... +# There are 4 groups of 2 + 3 overloads (2 for density=True, 3 for density=False) = 20 in total +@overload # a: +float64, density: True (keyword), weights: +float | None (default) +def histogram( + a: _ArrayLikeInt_co | _NestedSequence[float], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + *, + density: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> _HistogramResult[np.float64, np.float64]: ... +@overload # a: +float64, density: True (keyword), weights: +complex +def histogram( + a: _ArrayLikeInt_co | _NestedSequence[float], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + *, + density: L[True], + weights: _ArrayLike[np.complexfloating] | _NestedList[complex], +) -> _HistogramResult[np.complex128, np.float64]: ... +@overload # a: +float64, density: False (default), weights: ~int | None (default) def histogram( - a: ArrayLike, - bins: _BinKind | SupportsIndex | ArrayLike = ..., - range: None | tuple[float, float] = ..., - density: bool = ..., - weights: None | ArrayLike = ..., -) -> tuple[NDArray[Any], NDArray[Any]]: ... + a: _ArrayLikeInt_co | _NestedSequence[float], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + weights: _NestedSequence[int] | None = None, +) -> _HistogramResult[np.intp, np.float64]: ... +@overload # a: +float64, density: False (default), weights: known (keyword) +def histogram[WeightsT: np.bool | np.number | np.timedelta64]( + a: _ArrayLikeInt_co | _NestedSequence[float], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + *, + weights: _ArrayLike[WeightsT], +) -> _HistogramResult[WeightsT, np.float64]: ... +@overload # a: +float64, density: False (default), weights: unknown (keyword) +def histogram( + a: _ArrayLikeInt_co | _NestedSequence[float], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + *, + weights: _WeightsLike, +) -> _HistogramResult[Incomplete, np.float64]: ... +@overload # a: ~complex, density: True (keyword), weights: +float | None (default) +def histogram( + a: _NestedList[complex], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + *, + density: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> _HistogramResult[np.float64, np.complex128]: ... +@overload # a: ~complex, density: True (keyword), weights: +complex +def histogram( + a: _NestedList[complex], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + *, + density: L[True], + weights: _ArrayLike[np.complexfloating] | _NestedList[complex], +) -> _HistogramResult[np.complex128, np.complex128]: ... +@overload # a: ~complex, density: False (default), weights: ~int | None (default) +def histogram( + a: _NestedList[complex], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + weights: _NestedSequence[int] | None = None, +) -> _HistogramResult[np.intp, np.complex128]: ... +@overload # a: ~complex, density: False (default), weights: known (keyword) +def histogram[WeightsT: np.bool | np.number | np.timedelta64]( + a: _NestedList[complex], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + *, + weights: _ArrayLike[WeightsT], +) -> _HistogramResult[WeightsT, np.complex128]: ... +@overload # a: ~complex, density: False (default), weights: unknown (keyword) +def histogram( + a: _NestedList[complex], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + *, + weights: _WeightsLike, +) -> _HistogramResult[Incomplete, np.complex128]: ... +@overload # a: known, density: True (keyword), weights: +float | None (default) +def histogram[ScalarT: np.inexact | np.object_]( + a: _ArrayLike[ScalarT], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + *, + density: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> _HistogramResult[np.float64, ScalarT]: ... +@overload # a: known, density: True (keyword), weights: +complex +def histogram[ScalarT: np.inexact | np.object_]( + a: _ArrayLike[ScalarT], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + *, + density: L[True], + weights: _ArrayLike[np.complexfloating] | _NestedList[complex], +) -> _HistogramResult[np.complex128, ScalarT]: ... +@overload # a: known, density: False (default), weights: ~int | None (default) +def histogram[ScalarT: np.inexact | np.object_]( + a: _ArrayLike[ScalarT], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + weights: _NestedSequence[int] | None = None, +) -> _HistogramResult[np.intp, ScalarT]: ... +@overload # a: known, density: False (default), weights: known (keyword) +def histogram[ScalarT: np.inexact | np.object_, WeightsT: np.bool | np.number | np.timedelta64]( + a: _ArrayLike[ScalarT], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + *, + weights: _ArrayLike[WeightsT], +) -> _HistogramResult[WeightsT, ScalarT]: ... +@overload # a: known, density: False (default), weights: unknown (keyword) +def histogram[ScalarT: np.inexact | np.object_]( + a: _ArrayLike[ScalarT], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + *, + weights: _WeightsLike, +) -> _HistogramResult[Incomplete, ScalarT]: ... +@overload # a: unknown, density: True (keyword), weights: +float | None (default) +def histogram( + a: _ArrayLikeComplex_co, + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + *, + density: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> _HistogramResult[np.float64, Incomplete]: ... +@overload # a: unknown, density: True (keyword), weights: +complex +def histogram( + a: _ArrayLikeComplex_co, + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + *, + density: L[True], + weights: _ArrayLike[np.complexfloating] | _NestedList[complex], +) -> _HistogramResult[np.complex128, Incomplete]: ... +@overload # a: unknown, density: False (default), weights: int | None (default) +def histogram( + a: _ArrayLikeComplex_co, + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + weights: _NestedSequence[int] | None = None, +) -> _HistogramResult[np.intp, Incomplete]: ... +@overload # a: unknown, density: False (default), weights: known (keyword) +def histogram[WeightsT: np.bool | np.number | np.timedelta64]( + a: _ArrayLikeComplex_co, + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + *, + weights: _ArrayLike[WeightsT], +) -> _HistogramResult[WeightsT, Incomplete]: ... +@overload # a: unknown, density: False (default), weights: unknown (keyword) +def histogram( + a: _ArrayLikeComplex_co, + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + *, + weights: _WeightsLike, +) -> _HistogramResult[Incomplete, Incomplete]: ... +# unlike `histogram`, `weights` must be safe-castable to f64 +@overload # dtype +float64 +def histogramdd( + sample: _ArrayLikeInt_co | _NestedSequence[float] | _ArrayLikeObject_co, + bins: SupportsIndex | ArrayLike = 10, + range: Sequence[_Range] | None = None, + density: bool | None = None, + weights: _ArrayLikeFloat64_co | None = None, +) -> tuple[NDArray[np.float64], tuple[_Array1D[np.float64], ...]]: ... +@overload # dtype ~complex +def histogramdd( + sample: _NestedList[complex], + bins: SupportsIndex | ArrayLike = 10, + range: Sequence[_Range] | None = None, + density: bool | None = None, + weights: _ArrayLikeFloat64_co | None = None, +) -> tuple[NDArray[np.float64], tuple[_Array1D[np.complex128], ...]]: ... +@overload # dtype known +def histogramdd[ScalarT: np.inexact]( + sample: _ArrayLike[ScalarT], + bins: SupportsIndex | ArrayLike = 10, + range: Sequence[_Range] | None = None, + density: bool | None = None, + weights: _ArrayLikeFloat64_co | None = None, +) -> tuple[NDArray[np.float64], tuple[_Array1D[ScalarT], ...]]: ... +@overload # dtype unknown def histogramdd( - sample: ArrayLike, - bins: SupportsIndex | ArrayLike = ..., - range: Sequence[tuple[float, float]] = ..., - density: None | bool = ..., - weights: None | ArrayLike = ..., -) -> tuple[NDArray[Any], tuple[NDArray[Any], ...]]: ... + sample: _ArrayLikeComplex_co, + bins: SupportsIndex | ArrayLike = 10, + range: Sequence[_Range] | None = None, + density: bool | None = None, + weights: _ArrayLikeFloat64_co | None = None, +) -> tuple[NDArray[np.float64], tuple[_Array1D[Any], ...]]: ... diff --git a/numpy/lib/_index_tricks_impl.py b/numpy/lib/_index_tricks_impl.py index 62f1d213b29f..5ee60d0fceaf 100644 --- a/numpy/lib/_index_tricks_impl.py +++ b/numpy/lib/_index_tricks_impl.py @@ -1,21 +1,18 @@ import functools -import sys import math -import warnings +import sys +from itertools import product import numpy as np -from .._utils import set_module import numpy._core.numeric as _nx -from numpy._core.numeric import ScalarType, array -from numpy._core.numerictypes import issubdtype - import numpy.matrixlib as matrixlib +from numpy._core import linspace, overrides from numpy._core.multiarray import ravel_multi_index, unravel_index -from numpy._core import overrides, linspace -from numpy.lib.stride_tricks import as_strided +from numpy._core.numeric import ScalarType, array +from numpy._core.numerictypes import issubdtype +from numpy._utils import set_module from numpy.lib._function_base_impl import diff - array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') @@ -65,6 +62,7 @@ def ix_(*args): Examples -------- + >>> import numpy as np >>> a = np.arange(10).reshape(2, 5) >>> a array([[0, 1, 2, 3, 4], @@ -101,7 +99,7 @@ def ix_(*args): raise ValueError("Cross index must be 1 dimensional") if issubdtype(new.dtype, _nx.bool): new, = new.nonzero() - new = new.reshape((1,)*k + (new.size,) + (1,)*(nd-k-1)) + new = new.reshape((1,) * k + (new.size,) + (1,) * (nd - k - 1)) out.append(new) return tuple(out) @@ -140,6 +138,7 @@ class nd_grid: Users should use these pre-defined instances instead of using `nd_grid` directly. """ + __slots__ = ('sparse',) def __init__(self, sparse=False): self.sparse = sparse @@ -163,12 +162,12 @@ def __getitem__(self, key): size.append(int(step)) else: size.append( - int(math.ceil((stop - start) / (step*1.0)))) + math.ceil((stop - start) / step)) num_list += [start, stop, step] typ = _nx.result_type(*num_list) if self.sparse: nn = [_nx.arange(_x, dtype=_t) - for _x, _t in zip(size, (typ,)*len(size))] + for _x, _t in zip(size, (typ,) * len(size))] else: nn = _nx.indices(size, typ) for k, kk in enumerate(key): @@ -182,9 +181,9 @@ def __getitem__(self, key): step = int(abs(step)) if step != 1: step = (kk.stop - start) / float(step - 1) - nn[k] = (nn[k]*step+start) + nn[k] = (nn[k] * step + start) if self.sparse: - slobj = [_nx.newaxis]*len(size) + slobj = [_nx.newaxis] * len(size) for k in range(len(size)): slobj[k] = slice(None, None) nn[k] = nn[k][tuple(slobj)] @@ -202,9 +201,9 @@ def __getitem__(self, key): step_float = abs(step) step = length = int(step_float) if step != 1: - step = (key.stop-start)/float(step-1) + step = (key.stop - start) / float(step - 1) typ = _nx.result_type(start, stop, step_float) - return _nx.arange(0, length, 1, dtype=typ)*step + start + return _nx.arange(0, length, 1, dtype=typ) * step + start else: return _nx.arange(start, stop, step) @@ -239,6 +238,7 @@ class MGridClass(nd_grid): Examples -------- + >>> import numpy as np >>> np.mgrid[0:5, 0:5] array([[[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], @@ -261,6 +261,7 @@ class MGridClass(nd_grid): (3, 4, 5, 6) """ + __slots__ = () def __init__(self): super().__init__(sparse=False) @@ -312,6 +313,7 @@ class OGridClass(nd_grid): array([[0, 1, 2, 3, 4]])) """ + __slots__ = () def __init__(self): super().__init__(sparse=True) @@ -326,6 +328,8 @@ class AxisConcatenator: For detailed documentation on usage, see `r_`. """ + __slots__ = ('axis', 'matrix', 'ndmin', 'trans1d') + # allow ma.mr_ to override this concatenate = staticmethod(_nx.concatenate) makemat = staticmethod(matrixlib.matrix) @@ -392,7 +396,7 @@ def __getitem__(self, key): continue except Exception as e: raise ValueError( - "unknown special directive {!r}".format(item) + f"unknown special directive {item!r}" ) from e try: axis = int(item) @@ -440,7 +444,7 @@ def __getitem__(self, key): def __len__(self): return 0 -# separate classes are used here instead of just making r_ = concatentor(0), +# separate classes are used here instead of just making r_ = concatenator(0), # etc. because otherwise we couldn't get the doc string to come out right # in help(r_) @@ -468,9 +472,9 @@ class RClass(AxisConcatenator): Optional character strings placed as the first element of the index expression can be used to change the output. The strings 'r' or 'c' result in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row) - matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1 - (column) matrix is produced. If the result is 2-D then both provide the - same matrix result. + matrix is produced. If the result is 1-D and 'c' is specified, then + an N x 1 (column) matrix is produced. + If the result is 2-D then both provide the same matrix result. A string integer specifies which axis to stack multiple comma separated arrays along. A string of two comma-separated integers allows indication @@ -505,6 +509,7 @@ class RClass(AxisConcatenator): Examples -------- + >>> import numpy as np >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])] array([1, 2, 3, ..., 4, 5, 6]) >>> np.r_[-1:1:6j, [0]*3, 5, 6] @@ -539,6 +544,7 @@ class RClass(AxisConcatenator): matrix([[1, 2, 3, 4, 5, 6]]) """ + __slots__ = () def __init__(self): AxisConcatenator.__init__(self, 0) @@ -563,6 +569,7 @@ class CClass(AxisConcatenator): Examples -------- + >>> import numpy as np >>> np.c_[np.array([1,2,3]), np.array([4,5,6])] array([[1, 4], [2, 5], @@ -571,6 +578,7 @@ class CClass(AxisConcatenator): array([[1, 2, 3, ..., 4, 5, 6]]) """ + __slots__ = () def __init__(self): AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0) @@ -597,6 +605,7 @@ class ndenumerate: Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> for index, x in np.ndenumerate(a): ... print(index, x) @@ -649,6 +658,8 @@ class ndindex: Examples -------- + >>> import numpy as np + Dimensions as individual arguments >>> for index in np.ndindex(3, 2, 1): @@ -676,30 +687,13 @@ class ndindex: def __init__(self, *shape): if len(shape) == 1 and isinstance(shape[0], tuple): shape = shape[0] - x = as_strided(_nx.zeros(1), shape=shape, - strides=_nx.zeros_like(shape)) - self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'], - order='C') + if min(shape, default=0) < 0: + raise ValueError("negative dimensions are not allowed") + self._iter = product(*map(range, shape)) def __iter__(self): return self - def ndincr(self): - """ - Increment the multi-dimensional index by one. - - This method is for backward compatibility only: do not use. - - .. deprecated:: 1.20.0 - This method has been advised against since numpy 1.8.0, but only - started emitting DeprecationWarning as of this version. - """ - # NumPy 1.20.0, 2020-09-08 - warnings.warn( - "`ndindex.ndincr()` is deprecated, use `next(ndindex)` instead", - DeprecationWarning, stacklevel=2) - next(self) - def __next__(self): """ Standard iterator method, updates the index and returns the index @@ -712,8 +706,7 @@ def __next__(self): iteration. """ - next(self._it) - return self._it.multi_index + return next(self._iter) # You can do all this with slice() plus a few special objects, @@ -762,6 +755,7 @@ class IndexExpression: Examples -------- + >>> import numpy as np >>> np.s_[2::2] slice(2, None, 2) >>> np.index_exp[2::2] @@ -771,6 +765,7 @@ class IndexExpression: array([2, 4]) """ + __slots__ = ('maketuple',) def __init__(self, maketuple): self.maketuple = maketuple @@ -825,14 +820,13 @@ def fill_diagonal(a, val, wrap=False): Notes ----- - .. versionadded:: 1.4.0 - This functionality can be obtained via `diag_indices`, but internally this version uses a much faster implementation that never constructs the indices and uses simple slicing. Examples -------- + >>> import numpy as np >>> a = np.zeros((3, 3), int) >>> np.fill_diagonal(a, 5) >>> a @@ -953,12 +947,10 @@ def diag_indices(n, ndim=2): -------- diag_indices_from - Notes - ----- - .. versionadded:: 1.4.0 - Examples -------- + >>> import numpy as np + Create a set of indices to access the diagonal of a (4, 4) array: >>> di = np.diag_indices(4) @@ -985,7 +977,7 @@ def diag_indices(n, ndim=2): And use it to set the diagonal of an array of zeros to 1: - >>> a = np.zeros((2, 2, 2), dtype=int) + >>> a = np.zeros((2, 2, 2), dtype=np.int_) >>> a[d3] = 1 >>> a array([[[1, 0], @@ -1017,13 +1009,10 @@ def diag_indices_from(arr): -------- diag_indices - Notes - ----- - .. versionadded:: 1.4.0 - Examples -------- - + >>> import numpy as np + Create a 4 by 4 array. >>> a = np.arange(16).reshape(4, 4) @@ -1032,7 +1021,7 @@ def diag_indices_from(arr): [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) - + Get the indices of the diagonal elements. >>> di = np.diag_indices_from(a) diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index f13ab4d96e48..ad3b5cb6e236 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -1,154 +1,265 @@ +from _typeshed import Incomplete, SupportsLenAndGetItem from collections.abc import Sequence from typing import ( Any, - TypeVar, + ClassVar, + Final, Generic, - overload, - Literal, + Literal as L, + Self, SupportsIndex, + final, + overload, ) +from typing_extensions import TypeVar import numpy as np -from numpy import ( - # Circumvent a naming conflict with `AxisConcatenator.matrix` - matrix as _Matrix, - ndenumerate as ndenumerate, - ndindex as ndindex, - ndarray, - dtype, - str_, - bytes_, - int_, - float64, - complex128, -) +from numpy import _CastingKind +from numpy._core.multiarray import ravel_multi_index, unravel_index from numpy._typing import ( - # Arrays ArrayLike, - _NestedSequence, - _FiniteNestedSequence, - NDArray, - - # DTypes DTypeLike, - _SupportsDType, + NDArray, + _AnyShape, + _ArrayLike, + _DTypeLike, + _HasDType, + _NestedSequence, + _SupportsArray, ) -from numpy._core.multiarray import ( - unravel_index as unravel_index, - ravel_multi_index as ravel_multi_index, -) +__all__ = [ + "ravel_multi_index", + "unravel_index", + "mgrid", + "ogrid", + "r_", + "c_", + "s_", + "index_exp", + "ix_", + "ndenumerate", + "ndindex", + "fill_diagonal", + "diag_indices", + "diag_indices_from", +] -_T = TypeVar("_T") -_DType = TypeVar("_DType", bound=dtype[Any]) -_BoolType = TypeVar("_BoolType", Literal[True], Literal[False]) -_TupType = TypeVar("_TupType", bound=tuple[Any, ...]) -_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) +### -__all__: list[str] +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, default=Any, covariant=True) +_BoolT_co = TypeVar("_BoolT_co", bound=bool, default=bool, covariant=True) +_AxisT_co = TypeVar("_AxisT_co", bound=int, default=L[0], covariant=True) +_MatrixT_co = TypeVar("_MatrixT_co", bound=bool, default=L[False], covariant=True) +_NDMinT_co = TypeVar("_NDMinT_co", bound=int, default=L[1], covariant=True) +_Trans1DT_co = TypeVar("_Trans1DT_co", bound=int, default=L[-1], covariant=True) -@overload -def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DType]]) -> tuple[ndarray[Any, _DType], ...]: ... -@overload -def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[str_], ...]: ... -@overload -def ix_(*args: bytes | _NestedSequence[bytes]) -> tuple[NDArray[bytes_], ...]: ... -@overload -def ix_(*args: bool | _NestedSequence[bool]) -> tuple[NDArray[np.bool], ...]: ... -@overload -def ix_(*args: int | _NestedSequence[int]) -> tuple[NDArray[int_], ...]: ... -@overload -def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[float64], ...]: ... -@overload -def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[complex128], ...]: ... +### + +class ndenumerate(Generic[_ScalarT_co]): + @overload + def __init__[ScalarT: np.generic]( + self: ndenumerate[ScalarT], + arr: _NestedSequence[_SupportsArray[np.dtype[ScalarT]]] | _SupportsArray[np.dtype[ScalarT]], + ) -> None: ... + @overload + def __init__(self: ndenumerate[np.str_], arr: str | _NestedSequence[str]) -> None: ... + @overload + def __init__(self: ndenumerate[np.bytes_], arr: bytes | _NestedSequence[bytes]) -> None: ... + @overload + def __init__(self: ndenumerate[np.bool], arr: bool | _NestedSequence[bool]) -> None: ... + @overload + def __init__(self: ndenumerate[np.intp], arr: int | _NestedSequence[int]) -> None: ... + @overload + def __init__(self: ndenumerate[np.float64], arr: float | _NestedSequence[float]) -> None: ... + @overload + def __init__(self: ndenumerate[np.complex128], arr: complex | _NestedSequence[complex]) -> None: ... + @overload + def __init__(self: ndenumerate[Incomplete], arr: object) -> None: ... + + # The first overload is a (semi-)workaround for a mypy bug (tested with v1.10 and v1.11) + @overload + def __next__( + self: ndenumerate[np.bool | np.number | np.flexible | np.datetime64 | np.timedelta64], + /, + ) -> tuple[_AnyShape, _ScalarT_co]: ... + @overload + def __next__(self: ndenumerate[np.object_], /) -> tuple[_AnyShape, Incomplete]: ... + @overload + def __next__(self, /) -> tuple[_AnyShape, _ScalarT_co]: ... -class nd_grid(Generic[_BoolType]): - sparse: _BoolType - def __init__(self, sparse: _BoolType = ...) -> None: ... + # + def __iter__(self) -> Self: ... + +class ndindex: + @overload + def __init__(self, shape: tuple[SupportsIndex, ...], /) -> None: ... + @overload + def __init__(self, /, *shape: SupportsIndex) -> None: ... + + # + def __iter__(self) -> Self: ... + def __next__(self) -> _AnyShape: ... + +class nd_grid(Generic[_BoolT_co]): + __slots__ = ("sparse",) + + sparse: _BoolT_co + def __init__(self, sparse: _BoolT_co = ...) -> None: ... # stubdefaulter: ignore[missing-default] @overload - def __getitem__( - self: nd_grid[Literal[False]], - key: slice | Sequence[slice], - ) -> NDArray[Any]: ... + def __getitem__(self: nd_grid[L[False]], key: slice | Sequence[slice]) -> NDArray[Incomplete]: ... @overload - def __getitem__( - self: nd_grid[Literal[True]], - key: slice | Sequence[slice], - ) -> tuple[NDArray[Any], ...]: ... + def __getitem__(self: nd_grid[L[True]], key: slice | Sequence[slice]) -> tuple[NDArray[Incomplete], ...]: ... + +@final +class MGridClass(nd_grid[L[False]]): + __slots__ = () -class MGridClass(nd_grid[Literal[False]]): def __init__(self) -> None: ... -mgrid: MGridClass +@final +class OGridClass(nd_grid[L[True]]): + __slots__ = () -class OGridClass(nd_grid[Literal[True]]): def __init__(self) -> None: ... -ogrid: OGridClass +class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co]): + __slots__ = "axis", "matrix", "ndmin", "trans1d" + + makemat: ClassVar[type[np.matrix[tuple[int, int], np.dtype]]] -class AxisConcatenator: - axis: int - matrix: bool - ndmin: int - trans1d: int + axis: _AxisT_co + matrix: _MatrixT_co + ndmin: _NDMinT_co + trans1d: _Trans1DT_co + + # NOTE: mypy does not understand that these default values are the same as the + # TypeVar defaults. Since the workaround would require us to write 16 overloads, + # we ignore the assignment type errors here. def __init__( self, - axis: int = ..., - matrix: bool = ..., - ndmin: int = ..., - trans1d: int = ..., + /, + axis: _AxisT_co = 0, # type: ignore[assignment] + matrix: _MatrixT_co = False, # type: ignore[assignment] + ndmin: _NDMinT_co = 1, # type: ignore[assignment] + trans1d: _Trans1DT_co = -1, # type: ignore[assignment] ) -> None: ... + + # TODO(jorenham): annotate this + def __getitem__(self, key: Incomplete, /) -> Incomplete: ... + def __len__(self, /) -> L[0]: ... + + # Keep in sync with _core.multiarray.concatenate + @staticmethod + @overload + def concatenate[ScalarT: np.generic]( + arrays: _ArrayLike[ScalarT], + /, + axis: SupportsIndex | None = 0, + out: None = None, + *, + dtype: None = None, + casting: _CastingKind | None = "same_kind", + ) -> NDArray[ScalarT]: ... @staticmethod @overload - def concatenate( # type: ignore[misc] - *a: ArrayLike, axis: SupportsIndex = ..., out: None = ... - ) -> NDArray[Any]: ... + def concatenate[ScalarT: np.generic]( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None = 0, + out: None = None, + *, + dtype: _DTypeLike[ScalarT], + casting: _CastingKind | None = "same_kind", + ) -> NDArray[ScalarT]: ... @staticmethod @overload def concatenate( - *a: ArrayLike, axis: SupportsIndex = ..., out: _ArrayType = ... - ) -> _ArrayType: ... + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None = 0, + out: None = None, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", + ) -> NDArray[Incomplete]: ... @staticmethod - def makemat( - data: ArrayLike, dtype: DTypeLike = ..., copy: bool = ... - ) -> _Matrix[Any, Any]: ... - - # TODO: Sort out this `__getitem__` method - def __getitem__(self, key: Any) -> Any: ... - -class RClass(AxisConcatenator): - axis: Literal[0] - matrix: Literal[False] - ndmin: Literal[1] - trans1d: Literal[-1] - def __init__(self) -> None: ... + @overload + def concatenate[OutT: np.ndarray]( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None = 0, + *, + out: OutT, + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", + ) -> OutT: ... + @staticmethod + @overload + def concatenate[OutT: np.ndarray]( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None, + out: OutT, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", + ) -> OutT: ... -r_: RClass +@final +class RClass(AxisConcatenator[L[0], L[False], L[1], L[-1]]): + __slots__ = () -class CClass(AxisConcatenator): - axis: Literal[-1] - matrix: Literal[False] - ndmin: Literal[2] - trans1d: Literal[0] - def __init__(self) -> None: ... + def __init__(self, /) -> None: ... + +@final +class CClass(AxisConcatenator[L[-1], L[False], L[2], L[0]]): + __slots__ = () + + def __init__(self, /) -> None: ... -c_: CClass +class IndexExpression(Generic[_BoolT_co]): + __slots__ = ("maketuple",) -class IndexExpression(Generic[_BoolType]): - maketuple: _BoolType - def __init__(self, maketuple: _BoolType) -> None: ... + maketuple: _BoolT_co + def __init__(self, maketuple: _BoolT_co) -> None: ... @overload - def __getitem__(self, item: _TupType) -> _TupType: ... # type: ignore[misc] + def __getitem__[TupleT: tuple[Any, ...]](self, item: TupleT) -> TupleT: ... @overload - def __getitem__(self: IndexExpression[Literal[True]], item: _T) -> tuple[_T]: ... + def __getitem__[T](self: IndexExpression[L[True]], item: T) -> tuple[T]: ... @overload - def __getitem__(self: IndexExpression[Literal[False]], item: _T) -> _T: ... + def __getitem__[T](self: IndexExpression[L[False]], item: T) -> T: ... + +@overload +def ix_[DTypeT: np.dtype]( + *args: _NestedSequence[_HasDType[DTypeT]] | _HasDType[DTypeT] +) -> tuple[np.ndarray[_AnyShape, DTypeT], ...]: ... +@overload +def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[np.str_], ...]: ... +@overload +def ix_(*args: bytes | _NestedSequence[bytes]) -> tuple[NDArray[np.bytes_], ...]: ... +@overload +def ix_(*args: bool | _NestedSequence[bool]) -> tuple[NDArray[np.bool], ...]: ... +@overload +def ix_(*args: int | _NestedSequence[int]) -> tuple[NDArray[np.intp], ...]: ... +@overload +def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[np.float64], ...]: ... +@overload +def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[np.complex128], ...]: ... + +# +def fill_diagonal(a: NDArray[Any], val: object, wrap: bool = False) -> None: ... + +# +def diag_indices(n: int, ndim: int = 2) -> tuple[NDArray[np.intp], ...]: ... +def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[np.intp], ...]: ... -index_exp: IndexExpression[Literal[True]] -s_: IndexExpression[Literal[False]] +# +mgrid: Final[MGridClass] = ... +ogrid: Final[OGridClass] = ... -def fill_diagonal(a: NDArray[Any], val: Any, wrap: bool = ...) -> None: ... -def diag_indices(n: int, ndim: int = ...) -> tuple[NDArray[int_], ...]: ... -def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[int_], ...]: ... +r_: Final[RClass] = ... +c_: Final[CClass] = ... -# NOTE: see `numpy/__init__.pyi` for `ndenumerate` and `ndindex` +index_exp: Final[IndexExpression[L[True]]] = ... +s_: Final[IndexExpression[L[False]]] = ... diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py index a38b0017ee5d..ad1ee8785328 100644 --- a/numpy/lib/_iotools.py +++ b/numpy/lib/_iotools.py @@ -3,6 +3,8 @@ """ __docformat__ = "restructuredtext en" +import itertools + import numpy as np import numpy._core.numeric as nx from numpy._utils import asbytes, asunicode @@ -11,8 +13,7 @@ def _decode_line(line, encoding=None): """Decode bytes from binary input streams. - Defaults to decoding from 'latin1'. That differs from the behavior of - np.compat.asunicode that decodes from 'ascii'. + Defaults to decoding from 'latin1'. Parameters ---------- @@ -72,15 +73,13 @@ def has_nested_fields(ndtype): Examples -------- + >>> import numpy as np >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)]) >>> np.lib._iotools.has_nested_fields(dt) False """ - for name in ndtype.names or (): - if ndtype[name].names is not None: - return True - return False + return any(ndtype[name].names is not None for name in ndtype.names or ()) def flatten_dtype(ndtype, flatten_base=False): @@ -100,6 +99,7 @@ def flatten_dtype(ndtype, flatten_base=False): Examples -------- + >>> import numpy as np >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), ... ('block', int, (2, 3))]) >>> np.lib._iotools.flatten_dtype(dt) @@ -181,7 +181,7 @@ def __init__(self, delimiter=None, comments='#', autostrip=True, elif hasattr(delimiter, '__iter__'): _handyman = self._variablewidth_splitter idx = np.cumsum([0] + list(delimiter)) - delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])] + delimiter = [slice(i, j) for (i, j) in itertools.pairwise(idx)] # Delimiter is a single integer elif int(delimiter): (_handyman, delimiter) = ( @@ -266,6 +266,7 @@ class NameValidator: Examples -------- + >>> import numpy as np >>> validator = np.lib._iotools.NameValidator() >>> validator(['file', 'field2', 'with space', 'CaSe']) ('file_', 'field2', 'with_space', 'CaSe') @@ -278,8 +279,8 @@ class NameValidator: """ - defaultexcludelist = ['return', 'file', 'print'] - defaultdeletechars = set(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""") + defaultexcludelist = 'return', 'file', 'print' + defaultdeletechars = frozenset(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""") def __init__(self, excludelist=None, deletechars=None, case_sensitive=None, replace_space='_'): @@ -290,7 +291,7 @@ def __init__(self, excludelist=None, deletechars=None, self.excludelist = excludelist # Process the list of characters to delete if deletechars is None: - delete = self.defaultdeletechars + delete = set(self.defaultdeletechars) else: delete = set(deletechars) delete.add('"') @@ -303,7 +304,7 @@ def __init__(self, excludelist=None, deletechars=None, elif case_sensitive.startswith('l'): self.case_converter = lambda x: x.lower() else: - msg = 'unrecognized case_sensitive value %s.' % case_sensitive + msg = f'unrecognized case_sensitive value {case_sensitive}.' raise ValueError(msg) self.replace_space = replace_space @@ -354,7 +355,7 @@ def validate(self, names, defaultfmt="f%i", nbfields=None): replace_space = self.replace_space # Initializes some variables ... validatednames = [] - seen = dict() + seen = {} nbempty = 0 for item in names: @@ -372,7 +373,7 @@ def validate(self, names, defaultfmt="f%i", nbfields=None): item += '_' cnt = seen.get(item, 0) if cnt > 0: - validatednames.append(item + '_%d' % cnt) + validatednames.append(f"{item}_{cnt}") else: validatednames.append(item) seen[item] = cnt + 1 @@ -403,6 +404,7 @@ def str2bool(value): Examples -------- + >>> import numpy as np >>> np.lib._iotools.str2bool('TRUE') True >>> np.lib._iotools.str2bool('false') @@ -495,7 +497,7 @@ class StringConverter: upgrade or not. Default is False. """ - _mapper = [(nx.bool, str2bool, False), + _mapper = [(nx.bool, str2bool, False), # noqa: RUF012 (nx.int_, int, -1),] # On 32-bit systems, we need to make sure that we explicitly include @@ -564,7 +566,7 @@ def upgrade_mapper(cls, func, default=None): >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate) """ # Func is a single functions - if hasattr(func, '__call__'): + if callable(func): cls._mapper.insert(-1, (cls._getsubdtype(default), func, default)) return elif hasattr(func, '__iter__'): @@ -611,7 +613,7 @@ def __init__(self, dtype_or_func=None, default=None, missing_values=None, dtype = np.dtype(dtype_or_func) except TypeError: # dtype_or_func must be a function, then - if not hasattr(dtype_or_func, '__call__'): + if not callable(dtype_or_func): errmsg = ("The input argument `dtype` is neither a" " function nor a dtype (got '%s' instead)") raise TypeError(errmsg % type(dtype_or_func)) @@ -696,7 +698,7 @@ def _strict_call(self, value): if not self._status: self._checked = False return self.default - raise ValueError("Cannot convert string '%s'" % value) + raise ValueError(f"Cannot convert string '{value}'") def __call__(self, value): return self._callingfunction(value) @@ -844,6 +846,7 @@ def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs): Examples -------- + >>> import numpy as np >>> np.lib._iotools.easy_dtype(float) dtype('float64') >>> np.lib._iotools.easy_dtype("i4, f8") @@ -867,7 +870,7 @@ def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs): elif isinstance(names, str): names = names.split(",") names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt) - ndtype = np.dtype(dict(formats=ndtype, names=names)) + ndtype = np.dtype({"formats": ndtype, "names": names}) else: # Explicit names if names is not None: @@ -887,7 +890,7 @@ def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs): elif ndtype.names is not None: validate = NameValidator(**validationargs) # Default initial names : should we change the format ? - numbered_names = tuple("f%i" % i for i in range(len(ndtype.names))) + numbered_names = tuple(f"f{i}" for i in range(len(ndtype.names))) if ((ndtype.names == numbered_names) and (defaultfmt != "f%i")): ndtype.names = validate([''] * len(ndtype.names), defaultfmt=defaultfmt) diff --git a/numpy/lib/_iotools.pyi b/numpy/lib/_iotools.pyi new file mode 100644 index 000000000000..7baca9c78045 --- /dev/null +++ b/numpy/lib/_iotools.pyi @@ -0,0 +1,113 @@ +from collections.abc import Callable, Iterable, Sequence +from typing import ( + Any, + ClassVar, + Final, + Literal, + TypedDict, + Unpack, + overload, + type_check_only, +) + +import numpy as np +import numpy.typing as npt +from numpy._typing._dtype_like import _DTypeLikeNested + +@type_check_only +class _NameValidatorKwargs(TypedDict, total=False): + excludelist: Iterable[str] | None + deletechars: Iterable[str] | None + case_sensitive: Literal["upper", "lower"] | bool | None + replace_space: str + +### + +__docformat__: Final = "restructuredtext en" + +class ConverterError(Exception): ... +class ConverterLockError(ConverterError): ... +class ConversionWarning(UserWarning): ... + +class LineSplitter: + delimiter: str | int | Iterable[int] | None + comments: str + encoding: str | None + + def __init__( + self, + /, + delimiter: str | bytes | int | Iterable[int] | None = None, + comments: str | bytes = "#", + autostrip: bool = True, + encoding: str | None = None, + ) -> None: ... + def __call__(self, /, line: str | bytes) -> list[str]: ... + def autostrip[T](self, /, method: Callable[[T], Iterable[str]]) -> Callable[[T], list[str]]: ... + +class NameValidator: + defaultexcludelist: ClassVar[Sequence[str]] = ... + defaultdeletechars: ClassVar[frozenset[str]] = ... + excludelist: list[str] + deletechars: set[str] + case_converter: Callable[[str], str] + replace_space: str + + def __init__( + self, + /, + excludelist: Iterable[str] | None = None, + deletechars: Iterable[str] | None = None, + case_sensitive: Literal["upper", "lower"] | bool | None = None, + replace_space: str = "_", + ) -> None: ... + def __call__(self, /, names: Iterable[str], defaultfmt: str = "f%i", nbfields: int | None = None) -> tuple[str, ...]: ... + def validate(self, /, names: Iterable[str], defaultfmt: str = "f%i", nbfields: int | None = None) -> tuple[str, ...]: ... + +class StringConverter: + func: Callable[[str], Any] | None + default: Any + missing_values: set[str] + type: np.dtype[np.datetime64] | np.generic + + def __init__( + self, + /, + dtype_or_func: npt.DTypeLike | None = None, + default: None = None, + missing_values: Iterable[str] | None = None, + locked: bool = False, + ) -> None: ... + def update( + self, + /, + func: Callable[[str], Any], + default: object | None = None, + testing_value: str | None = None, + missing_values: str = "", + locked: bool = False, + ) -> None: ... + # + def __call__(self, /, value: str) -> Any: ... + def upgrade(self, /, value: str) -> Any: ... + def iterupgrade(self, /, value: Iterable[str] | str) -> None: ... + + # + @classmethod + def upgrade_mapper(cls, func: Callable[[str], Any], default: object | None = None) -> None: ... + +def _decode_line(line: str | bytes, encoding: str | None = None) -> str: ... +def _is_string_like(obj: object) -> bool: ... +def _is_bytes_like(obj: object) -> bool: ... +def has_nested_fields(ndtype: np.dtype[np.void]) -> bool: ... +def flatten_dtype(ndtype: np.dtype[np.void], flatten_base: bool = False) -> type[np.dtype]: ... +@overload +def str2bool(value: Literal["false", "False", "FALSE"]) -> Literal[False]: ... +@overload +def str2bool(value: Literal["true", "True", "TRUE"]) -> Literal[True]: ... +def easy_dtype( + ndtype: str | Sequence[_DTypeLikeNested], + names: str | Sequence[str] | None = None, + defaultfmt: str = "f%i", + **validationargs: Unpack[_NameValidatorKwargs], +) -> np.dtype[np.void]: ... diff --git a/numpy/lib/_nanfunctions_impl.py b/numpy/lib/_nanfunctions_impl.py index baedb7d12498..86e3e9933784 100644 --- a/numpy/lib/_nanfunctions_impl.py +++ b/numpy/lib/_nanfunctions_impl.py @@ -22,12 +22,12 @@ """ import functools import warnings + import numpy as np import numpy._core.numeric as _nx +from numpy._core import overrides from numpy.lib import _function_base_impl as fnb from numpy.lib._function_base_impl import _weights_are_valid -from numpy._core import overrides - array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') @@ -141,7 +141,7 @@ def _copyto(a, val, mask): return a -def _remove_nan_1d(arr1d, overwrite_input=False): +def _remove_nan_1d(arr1d, second_arr1d=None, overwrite_input=False): """ Equivalent to arr1d[~arr1d.isnan()], but in a different order @@ -151,6 +151,8 @@ def _remove_nan_1d(arr1d, overwrite_input=False): ---------- arr1d : ndarray Array to remove nans from + second_arr1d : ndarray or None + A second array which will have the same positions removed as arr1d. overwrite_input : bool True if `arr1d` can be modified in place @@ -158,6 +160,8 @@ def _remove_nan_1d(arr1d, overwrite_input=False): ------- res : ndarray Array with nan elements removed + second_res : ndarray or None + Second array with nan element positions of first array removed. overwrite_input : bool True if `res` can be modified in place, given the constraint on the input @@ -172,9 +176,12 @@ def _remove_nan_1d(arr1d, overwrite_input=False): if s.size == arr1d.size: warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=6) - return arr1d[:0], True + if second_arr1d is None: + return arr1d[:0], None, True + else: + return arr1d[:0], second_arr1d[:0], True elif s.size == 0: - return arr1d, overwrite_input + return arr1d, second_arr1d, overwrite_input else: if not overwrite_input: arr1d = arr1d.copy() @@ -183,7 +190,15 @@ def _remove_nan_1d(arr1d, overwrite_input=False): # fill nans in beginning of array with non-nans of end arr1d[s[:enonan.size]] = enonan - return arr1d[:-s.size], True + if second_arr1d is None: + return arr1d[:-s.size], None, True + else: + if not overwrite_input: + second_arr1d = second_arr1d.copy() + enonan = second_arr1d[-s.size:][~c[-s.size:]] + second_arr1d[s[:enonan.size]] = enonan + + return arr1d[:-s.size], second_arr1d[:-s.size], True def _divide_by_count(a, b, out=None): @@ -217,17 +232,16 @@ def _divide_by_count(a, b, out=None): return np.divide(a, b, out=a, casting='unsafe') else: return np.divide(a, b, out=out, casting='unsafe') + elif out is None: + # Precaution against reduced object arrays + try: + return a.dtype.type(a / b) + except AttributeError: + return a / b else: - if out is None: - # Precaution against reduced object arrays - try: - return a.dtype.type(a / b) - except AttributeError: - return a / b - else: - # This is questionable, but currently a numpy scalar can - # be output to a zero dimensional array. - return np.divide(a, b, out=out, casting='unsafe') + # This is questionable, but currently a numpy scalar can + # be output to a zero dimensional array. + return np.divide(a, b, out=out, casting='unsafe') def _nanmin_dispatcher(a, axis=None, out=None, keepdims=None, @@ -256,8 +270,6 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, is ``None``; if provided, it must have the same shape as the expected output, but the type will be cast if necessary. See :ref:`ufuncs-output-type` for more details. - - .. versionadded:: 1.8.0 keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, @@ -267,8 +279,6 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, `keepdims` will be passed through to the `min` method of sub-classes of `ndarray`. If the sub-classes methods does not implement `keepdims` any exceptions will be raised. - - .. versionadded:: 1.8.0 initial : scalar, optional The maximum value of an output element. Must be present to allow computation on empty slice. See `~numpy.ufunc.reduce` for details. @@ -315,6 +325,7 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, np.nan]]) >>> np.nanmin(a) 1.0 @@ -339,7 +350,7 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, if where is not np._NoValue: kwargs['where'] = where - if type(a) is np.ndarray and a.dtype != np.object_: + if (type(a) is np.ndarray or type(a) is np.memmap) and a.dtype != np.object_: # Fast, but not safe for subclasses of ndarray, or object arrays, # which do not implement isnan (gh-9009), or fmin correctly (gh-8975) res = np.fmin.reduce(a, axis=axis, out=out, **kwargs) @@ -389,19 +400,14 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, is ``None``; if provided, it must have the same shape as the expected output, but the type will be cast if necessary. See :ref:`ufuncs-output-type` for more details. - - .. versionadded:: 1.8.0 keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `a`. - If the value is anything but the default, then `keepdims` will be passed through to the `max` method of sub-classes of `ndarray`. If the sub-classes methods does not implement `keepdims` any exceptions will be raised. - - .. versionadded:: 1.8.0 initial : scalar, optional The minimum value of an output element. Must be present to allow computation on empty slice. See `~numpy.ufunc.reduce` for details. @@ -448,6 +454,7 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, np.nan]]) >>> np.nanmax(a) 3.0 @@ -472,7 +479,7 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, if where is not np._NoValue: kwargs['where'] = where - if type(a) is np.ndarray and a.dtype != np.object_: + if (type(a) is np.ndarray or type(a) is np.memmap) and a.dtype != np.object_: # Fast, but not safe for subclasses of ndarray, or object arrays, # which do not implement isnan (gh-9009), or fmax correctly (gh-8975) res = np.fmax.reduce(a, axis=axis, out=out, **kwargs) @@ -536,6 +543,7 @@ def nanargmin(a, axis=None, out=None, *, keepdims=np._NoValue): Examples -------- + >>> import numpy as np >>> a = np.array([[np.nan, 4], [2, 3]]) >>> np.argmin(a) 0 @@ -597,6 +605,7 @@ def nanargmax(a, axis=None, out=None, *, keepdims=np._NoValue): Examples -------- + >>> import numpy as np >>> a = np.array([[np.nan, 4], [2, 3]]) >>> np.argmax(a) 0 @@ -647,28 +656,21 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, the platform (u)intp. In that case, the default will be either (u)int32 or (u)int64 depending on whether the platform is 32 or 64 bits. For inexact inputs, dtype must be inexact. - - .. versionadded:: 1.8.0 out : ndarray, optional Alternate output array in which to place the result. The default is ``None``. If provided, it must have the same shape as the expected output, but the type will be cast if necessary. See :ref:`ufuncs-output-type` for more details. The casting of NaN to integer can yield unexpected results. - - .. versionadded:: 1.8.0 keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `a`. - If the value is anything but the default, then `keepdims` will be passed through to the `mean` or `sum` methods of sub-classes of `ndarray`. If the sub-classes methods does not implement `keepdims` any exceptions will be raised. - - .. versionadded:: 1.8.0 initial : scalar, optional Starting value for the sum. See `~numpy.ufunc.reduce` for details. @@ -699,6 +701,7 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, Examples -------- + >>> import numpy as np >>> np.nansum(1) 1 >>> np.nansum([1]) @@ -714,7 +717,6 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, inf >>> np.nansum([1, np.nan, -np.inf]) -inf - >>> from numpy.testing import suppress_warnings >>> with np.errstate(invalid="ignore"): ... np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present np.float64(nan) @@ -739,8 +741,6 @@ def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, One is returned for slices that are all-NaN or empty. - .. versionadded:: 1.10.0 - Parameters ---------- a : array_like @@ -790,6 +790,7 @@ def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, Examples -------- + >>> import numpy as np >>> np.nanprod(1) 1 >>> np.nanprod([1]) @@ -821,8 +822,6 @@ def nancumsum(a, axis=None, dtype=None, out=None): Zeros are returned for slices that are all-NaN or empty. - .. versionadded:: 1.12.0 - Parameters ---------- a : array_like @@ -857,6 +856,7 @@ def nancumsum(a, axis=None, dtype=None, out=None): Examples -------- + >>> import numpy as np >>> np.nancumsum(1) array([1]) >>> np.nancumsum([1]) @@ -891,8 +891,6 @@ def nancumprod(a, axis=None, dtype=None, out=None): Ones are returned for slices that are all-NaN or empty. - .. versionadded:: 1.12.0 - Parameters ---------- a : array_like @@ -924,6 +922,7 @@ def nancumprod(a, axis=None, dtype=None, out=None): Examples -------- + >>> import numpy as np >>> np.nancumprod(1) array([1]) >>> np.nancumprod([1]) @@ -962,8 +961,6 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised. - .. versionadded:: 1.8.0 - Parameters ---------- a : array_like @@ -1021,6 +1018,7 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, Examples -------- + >>> import numpy as np >>> a = np.array([[1, np.nan], [3, 4]]) >>> np.nanmean(a) 2.6666666666666665 @@ -1061,7 +1059,7 @@ def _nanmedian1d(arr1d, overwrite_input=False): Private function for rank 1 arrays. Compute the median ignoring NaNs. See nanmedian for parameter usage """ - arr1d_parsed, overwrite_input = _remove_nan_1d( + arr1d_parsed, _, overwrite_input = _remove_nan_1d( arr1d, overwrite_input=overwrite_input, ) @@ -1131,8 +1129,6 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValu Returns the median of the array elements. - .. versionadded:: 1.9.0 - Parameters ---------- a : array_like @@ -1186,6 +1182,7 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValu Examples -------- + >>> import numpy as np >>> a = np.array([[10.0, 7, 4], [3, 2, 1]]) >>> a[0, 1] = np.nan >>> a @@ -1222,7 +1219,7 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValu def _nanpercentile_dispatcher( a, q, axis=None, out=None, overwrite_input=None, - method=None, keepdims=None, *, weights=None, interpolation=None): + method=None, keepdims=None, *, weights=None): return (a, q, out, weights) @@ -1237,7 +1234,6 @@ def nanpercentile( keepdims=np._NoValue, *, weights=None, - interpolation=None, ): """ Compute the qth percentile of the data along the specified axis, @@ -1245,8 +1241,6 @@ def nanpercentile( Returns the qth percentile(s) of the array elements. - .. versionadded:: 1.9.0 - Parameters ---------- a : array_like @@ -1318,11 +1312,6 @@ def nanpercentile( .. versionadded:: 2.0.0 - interpolation : str, optional - Deprecated name for the method keyword argument. - - .. deprecated:: 1.22.0 - Returns ------- percentile : scalar or ndarray @@ -1350,6 +1339,7 @@ def nanpercentile( Examples -------- + >>> import numpy as np >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) >>> a[0][1] = np.nan >>> a @@ -1383,17 +1373,12 @@ def nanpercentile( The American Statistician, 50(4), pp. 361-365, 1996 """ - if interpolation is not None: - method = fnb._check_interpolation_as_method( - method, interpolation, "nanpercentile") - a = np.asanyarray(a) if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") - q = np.true_divide(q, a.dtype.type(100) if a.dtype.kind == "f" else 100) - # undo any decay that the ufunc performed (see gh-13105) - q = np.asanyarray(q) + weak_q = type(q) in (int, float) # use weak promotion for final result type + q = np.true_divide(q, 100, out=...) if not fnb._quantile_is_valid(q): raise ValueError("Percentiles must be in the range [0, 100]") @@ -1409,12 +1394,11 @@ def nanpercentile( raise ValueError("Weights must be non-negative.") return _nanquantile_unchecked( - a, q, axis, out, overwrite_input, method, keepdims, weights) + a, q, axis, out, overwrite_input, method, keepdims, weights, weak_q) def _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, - method=None, keepdims=None, *, weights=None, - interpolation=None): + method=None, keepdims=None, *, weights=None): return (a, q, out, weights) @@ -1429,15 +1413,12 @@ def nanquantile( keepdims=np._NoValue, *, weights=None, - interpolation=None, ): """ Compute the qth quantile of the data along the specified axis, while ignoring nan values. Returns the qth quantile(s) of the array elements. - .. versionadded:: 1.15.0 - Parameters ---------- a : array_like @@ -1508,11 +1489,6 @@ def nanquantile( .. versionadded:: 2.0.0 - interpolation : str, optional - Deprecated name for the method keyword argument. - - .. deprecated:: 1.22.0 - Returns ------- quantile : scalar or ndarray @@ -1540,6 +1516,7 @@ def nanquantile( Examples -------- + >>> import numpy as np >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) >>> a[0][1] = np.nan >>> a @@ -1572,20 +1549,12 @@ def nanquantile( The American Statistician, 50(4), pp. 361-365, 1996 """ - - if interpolation is not None: - method = fnb._check_interpolation_as_method( - method, interpolation, "nanquantile") - a = np.asanyarray(a) if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") - # Use dtype of array if possible (e.g., if q is a python int or float). - if isinstance(q, (int, float)) and a.dtype.kind == "f": - q = np.asanyarray(q, dtype=a.dtype) - else: - q = np.asanyarray(q) + weak_q = type(q) in (int, float) # use weak promotion for final result type + q = np.asanyarray(q) if not fnb._quantile_is_valid(q): raise ValueError("Quantiles must be in the range [0, 1]") @@ -1602,7 +1571,7 @@ def nanquantile( raise ValueError("Weights must be non-negative.") return _nanquantile_unchecked( - a, q, axis, out, overwrite_input, method, keepdims, weights) + a, q, axis, out, overwrite_input, method, keepdims, weights, weak_q) def _nanquantile_unchecked( @@ -1614,6 +1583,7 @@ def _nanquantile_unchecked( method="linear", keepdims=np._NoValue, weights=None, + weak_q=False, ): """Assumes that q is in [0, 1], and is an ndarray""" # apply_along_axis in _nanpercentile doesn't handle empty arrays well, @@ -1628,17 +1598,19 @@ def _nanquantile_unchecked( axis=axis, out=out, overwrite_input=overwrite_input, - method=method) + method=method, + weak_q=weak_q) def _nanquantile_ureduce_func( - a: np.array, - q: np.array, - weights: np.array, - axis: int = None, + a: np.ndarray, + q: np.ndarray, + weights: np.ndarray, + axis: int | None = None, out=None, overwrite_input: bool = False, method="linear", + weak_q=False, ): """ Private function that doesn't support extended axis or keepdims. @@ -1648,15 +1620,39 @@ def _nanquantile_ureduce_func( if axis is None or a.ndim == 1: part = a.ravel() wgt = None if weights is None else weights.ravel() - result = _nanquantile_1d(part, q, overwrite_input, method, weights=wgt) - else: + result = _nanquantile_1d(part, q, overwrite_input, method, + weights=wgt, weak_q=weak_q) + # Note that this code could try to fill in `out` right away + elif weights is None: result = np.apply_along_axis(_nanquantile_1d, axis, a, q, - overwrite_input, method, weights) + overwrite_input, method, weights, weak_q) # apply_along_axis fills in collapsed axis with results. - # Move that axis to the beginning to match percentile's + # Move those axes to the beginning to match percentile's # convention. if q.ndim != 0: - result = np.moveaxis(result, axis, 0) + from_ax = [axis + i for i in range(q.ndim)] + result = np.moveaxis(result, from_ax, list(range(q.ndim))) + else: + # We need to apply along axis over 2 arrays, a and weights. + # move operation axes to end for simplicity: + a = np.moveaxis(a, axis, -1) + if weights is not None: + weights = np.moveaxis(weights, axis, -1) + if out is not None: + result = out + else: + # weights are limited to `inverted_cdf` so the result dtype + # is known to be identical to that of `a` here: + result = np.empty_like(a, shape=q.shape + a.shape[:-1]) + + for ii in np.ndindex(a.shape[:-1]): + result[(...,) + ii] = _nanquantile_1d( + a[ii], q, weights=weights[ii], + overwrite_input=overwrite_input, method=method, + weak_q=weak_q, + ) + # This path dealt with `out` already... + return result if out is not None: out[...] = result @@ -1665,13 +1661,15 @@ def _nanquantile_ureduce_func( def _nanquantile_1d( arr1d, q, overwrite_input=False, method="linear", weights=None, + weak_q=False, ): """ Private function for rank 1 arrays. Compute quantile ignoring NaNs. See nanpercentile for parameter usage """ - arr1d, overwrite_input = _remove_nan_1d(arr1d, - overwrite_input=overwrite_input) + # TODO: What to do when arr1d = [1, np.nan] and weights = [0, 1]? + arr1d, weights, overwrite_input = _remove_nan_1d(arr1d, + second_arr1d=weights, overwrite_input=overwrite_input) if arr1d.size == 0: # convert to scalar return np.full(q.shape, np.nan, dtype=arr1d.dtype)[()] @@ -1682,6 +1680,7 @@ def _nanquantile_1d( overwrite_input=overwrite_input, method=method, weights=weights, + weak_q=weak_q, ) @@ -1704,8 +1703,6 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, For all-NaN slices or slices with zero degrees of freedom, NaN is returned and a `RuntimeWarning` is raised. - .. versionadded:: 1.8.0 - Parameters ---------- a : array_like @@ -1742,7 +1739,7 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, The axis for the calculation of the mean should be the same as used in the call to this var function. - .. versionadded:: 1.26.0 + .. versionadded:: 2.0.0 correction : {int, float}, optional Array API compatible name for the ``ddof`` parameter. Only one of them @@ -1792,6 +1789,7 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, Examples -------- + >>> import numpy as np >>> a = np.array([[1, np.nan], [3, 4]]) >>> np.nanvar(a) 1.5555555555555554 @@ -1897,8 +1895,6 @@ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, For all-NaN slices or slices with zero degrees of freedom, NaN is returned and a `RuntimeWarning` is raised. - .. versionadded:: 1.8.0 - Parameters ---------- a : array_like @@ -1940,7 +1936,7 @@ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, The axis for the calculation of the mean should be the same as used in the call to this std function. - .. versionadded:: 1.26.0 + .. versionadded:: 2.0.0 correction : {int, float}, optional Array API compatible name for the ``ddof`` parameter. Only one of them @@ -1988,6 +1984,7 @@ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, Examples -------- + >>> import numpy as np >>> a = np.array([[1, np.nan], [3, 4]]) >>> np.nanstd(a) 1.247219128924647 diff --git a/numpy/lib/_nanfunctions_impl.pyi b/numpy/lib/_nanfunctions_impl.pyi index d81f883f76c3..fd5d277cbd7d 100644 --- a/numpy/lib/_nanfunctions_impl.pyi +++ b/numpy/lib/_nanfunctions_impl.pyi @@ -1,26 +1,36 @@ from numpy._core.fromnumeric import ( - amin, amax, - argmin, + amin, argmax, - sum, - prod, - cumsum, + argmin, cumprod, + cumsum, mean, + prod, + std, + sum, var, - std -) - -from numpy.lib._function_base_impl import ( - median, - percentile, - quantile, ) +from numpy.lib._function_base_impl import median, percentile, quantile -__all__: list[str] +__all__ = [ + "nansum", + "nanmax", + "nanmin", + "nanargmax", + "nanargmin", + "nanmean", + "nanmedian", + "nanpercentile", + "nanvar", + "nanstd", + "nanprod", + "nancumsum", + "nancumprod", + "nanquantile", +] -# NOTE: In reaility these functions are not aliases but distinct functions +# NOTE: In reality these functions are not aliases but distinct functions # with identical signatures. nanmin = amin nanmax = amax diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index 0ae4ee2e9386..34e5985ea2a2 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -1,32 +1,41 @@ """ IO related functions. """ -import os -import re +import contextlib import functools import itertools +import operator +import os +import pickle +import re import warnings import weakref -import contextlib -import operator -from operator import itemgetter, index as opindex, methodcaller from collections.abc import Mapping -import pickle +from operator import itemgetter import numpy as np -from . import format -from ._datasource import DataSource from numpy._core import overrides -from numpy._core.multiarray import packbits, unpackbits from numpy._core._multiarray_umath import _load_from_filelike -from numpy._core.overrides import set_array_function_like_doc, set_module -from ._iotools import ( - LineSplitter, NameValidator, StringConverter, ConverterError, - ConverterLockError, ConversionWarning, _is_string_like, - has_nested_fields, flatten_dtype, easy_dtype, _decode_line - ) -from numpy._utils import asunicode, asbytes +from numpy._core.multiarray import packbits, unpackbits +from numpy._core.overrides import finalize_array_function_like, set_module +from numpy._utils import asbytes, asunicode +from . import format +from ._datasource import DataSource # noqa: F401 +from ._format_impl import _MAX_HEADER_SIZE +from ._iotools import ( + ConversionWarning, + ConverterError, + ConverterLockError, + LineSplitter, + NameValidator, + StringConverter, + _decode_line, + _is_string_like, + easy_dtype, + flatten_dtype, + has_nested_fields, +) __all__ = [ 'savetxt', 'loadtxt', 'genfromtxt', 'load', 'save', 'savez', @@ -51,6 +60,7 @@ class BagObj: Examples -------- + >>> import numpy as np >>> from numpy.lib._npyio_impl import BagObj as BO >>> class BagDemo: ... def __getitem__(self, key): # An instance of BagObj(BagDemo) @@ -131,14 +141,10 @@ class NpzFile(Mapping): to getitem access on the `NpzFile` instance itself. allow_pickle : bool, optional Allow loading pickled data. Default: False - - .. versionchanged:: 1.16.3 - Made default False in response to CVE-2019-6446. - pickle_kwargs : dict, optional Additional keyword arguments to pass on to pickle.load. These are only useful when loading object arrays saved on - Python 2 when using Python 3. + Python 2. max_header_size : int, optional Maximum allowed size of the header. Large headers may not be safe to load securely and thus require explicitly passing a larger value. @@ -157,6 +163,7 @@ class NpzFile(Mapping): Examples -------- + >>> import numpy as np >>> from tempfile import TemporaryFile >>> outfile = TemporaryFile() >>> x = np.arange(10) @@ -184,20 +191,17 @@ class NpzFile(Mapping): def __init__(self, fid, own_fid=False, allow_pickle=False, pickle_kwargs=None, *, - max_header_size=format._MAX_HEADER_SIZE): + max_header_size=_MAX_HEADER_SIZE): # Import is postponed to here since zipfile depends on gzip, an # optional component of the so-called standard library. _zip = zipfile_factory(fid) - self._files = _zip.namelist() - self.files = [] + _files = _zip.namelist() + self.files = [name.removesuffix(".npy") for name in _files] + self._files = dict(zip(self.files, _files)) + self._files.update(zip(_files, _files)) self.allow_pickle = allow_pickle self.max_header_size = max_header_size self.pickle_kwargs = pickle_kwargs - for x in self._files: - if x.endswith('.npy'): - self.files.append(x[:-4]) - else: - self.files.append(x) self.zip = _zip self.f = BagObj(self) if own_fid: @@ -233,37 +237,34 @@ def __len__(self): return len(self.files) def __getitem__(self, key): - # FIXME: This seems like it will copy strings around - # more than is strictly necessary. The zipfile - # will read the string and then - # the format.read_array will copy the string - # to another place in memory. - # It would be better if the zipfile could read - # (or at least uncompress) the data - # directly into the array memory. - member = False - if key in self._files: - member = True - elif key in self.files: - member = True - key += '.npy' - if member: - bytes = self.zip.open(key) - magic = bytes.read(len(format.MAGIC_PREFIX)) - bytes.close() - if magic == format.MAGIC_PREFIX: - bytes = self.zip.open(key) - return format.read_array(bytes, - allow_pickle=self.allow_pickle, - pickle_kwargs=self.pickle_kwargs, - max_header_size=self.max_header_size) - else: - return self.zip.read(key) + try: + key = self._files[key] + except KeyError: + raise KeyError(f"{key} is not a file in the archive") from None else: - raise KeyError(f"{key} is not a file in the archive") + with self.zip.open(key) as bytes: + magic = bytes.read(len(format.MAGIC_PREFIX)) + bytes.seek(0) + if magic == format.MAGIC_PREFIX: + # FIXME: This seems like it will copy strings around + # more than is strictly necessary. The zipfile + # will read the string and then + # the format.read_array will copy the string + # to another place in memory. + # It would be better if the zipfile could read + # (or at least uncompress) the data + # directly into the array memory. + return format.read_array( + bytes, + allow_pickle=self.allow_pickle, + pickle_kwargs=self.pickle_kwargs, + max_header_size=self.max_header_size + ) + else: + return bytes.read() def __contains__(self, key): - return (key in self._files or key in self.files) + return (key in self._files) def __repr__(self): # Get filename or default to `object` @@ -309,7 +310,7 @@ def values(self): @set_module('numpy') def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, - encoding='ASCII', *, max_header_size=format._MAX_HEADER_SIZE): + encoding='ASCII', *, max_header_size=_MAX_HEADER_SIZE): """ Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files. @@ -338,18 +339,14 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, disallowing pickles include security, as loading pickled data can execute arbitrary code. If pickles are disallowed, loading object arrays will fail. Default: False - - .. versionchanged:: 1.16.3 - Made default False in response to CVE-2019-6446. - fix_imports : bool, optional - Only useful when loading Python 2 generated pickled files on Python 3, + Only useful when loading Python 2 generated pickled files, which includes npy/npz files containing object arrays. If `fix_imports` is True, pickle will try to map the old Python 2 names to the new names used in Python 3. encoding : str, optional What encoding to use when reading Python 2 strings. Only useful when - loading Python 2 generated pickled files in Python 3, which includes + loading Python 2 generated pickled files, which includes npy/npz files containing object arrays. Values other than 'latin1', 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical data. Default: 'ASCII' @@ -403,6 +400,8 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, Examples -------- + >>> import numpy as np + Store data to disk, and load it again: >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]])) @@ -445,7 +444,7 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, # result can similarly silently corrupt numerical data. raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'") - pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports) + pickle_kwargs = {'encoding': encoding, 'fix_imports': fix_imports} with contextlib.ExitStack() as stack: if hasattr(file, 'read'): @@ -465,7 +464,7 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, # If the file size is less than N, we need to make sure not # to seek past the beginning of the file fid.seek(-min(N, len(magic)), 1) # back-up - if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX): + if magic.startswith((_ZIP_PREFIX, _ZIP_SUFFIX)): # zip-file (assume .npz) # Potentially transfer file ownership to NpzFile stack.pop_all() @@ -487,8 +486,10 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, else: # Try a pickle if not allow_pickle: - raise ValueError("Cannot load file containing pickled data " - "when allow_pickle=False") + raise ValueError( + "This file contains pickled (object) data. If you trust " + "the file you can load it unsafely using the " + "`allow_pickle=` keyword argument or `pickle.load()`.") try: return pickle.load(fid, **pickle_kwargs) except Exception as e: @@ -496,12 +497,12 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, f"Failed to interpret file {file!r} as a pickle") from e -def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None): +def _save_dispatcher(file, arr, allow_pickle=None): return (arr,) @array_function_dispatch(_save_dispatcher) -def save(file, arr, allow_pickle=True, fix_imports=np._NoValue): +def save(file, arr, allow_pickle=True): """ Save an array to a binary file in NumPy ``.npy`` format. @@ -522,12 +523,6 @@ def save(file, arr, allow_pickle=True, fix_imports=np._NoValue): require libraries that are not available, and not all pickled data is compatible between different versions of Python). Default: True - fix_imports : bool, optional - The `fix_imports` flag is deprecated and has no effect. - - .. deprecated:: 2.1 - This flag is ignored since NumPy 1.17 and was only needed to - support loading some files in Python 2 written in Python 3. See Also -------- @@ -542,6 +537,8 @@ def save(file, arr, allow_pickle=True, fix_imports=np._NoValue): Examples -------- + >>> import numpy as np + >>> from tempfile import TemporaryFile >>> outfile = TemporaryFile() @@ -562,12 +559,6 @@ def save(file, arr, allow_pickle=True, fix_imports=np._NoValue): >>> print(a, b) # [1 2] [1 3] """ - if fix_imports is not np._NoValue: - # Deprecated 2024-05-16, NumPy 2.1 - warnings.warn( - "The 'fix_imports' flag is deprecated and has no effect. " - "(Deprecated in NumPy 2.1)", - DeprecationWarning, stacklevel=2) if hasattr(file, 'write'): file_ctx = contextlib.nullcontext(file) else: @@ -578,17 +569,16 @@ def save(file, arr, allow_pickle=True, fix_imports=np._NoValue): with file_ctx as fid: arr = np.asanyarray(arr) - format.write_array(fid, arr, allow_pickle=allow_pickle, - pickle_kwargs=dict(fix_imports=fix_imports)) + format.write_array(fid, arr, allow_pickle=allow_pickle) -def _savez_dispatcher(file, *args, **kwds): +def _savez_dispatcher(file, *args, allow_pickle=True, **kwds): yield from args yield from kwds.values() @array_function_dispatch(_savez_dispatcher) -def savez(file, *args, **kwds): +def savez(file, *args, allow_pickle=True, **kwds): """Save several arrays into a single file in uncompressed ``.npz`` format. Provide arrays as keyword arguments to store them under the @@ -608,6 +598,14 @@ def savez(file, *args, **kwds): Arrays to save to the file. Please use keyword arguments (see `kwds` below) to assign names to arrays. Arrays specified as args will be named "arr_0", "arr_1", and so on. + allow_pickle : bool, optional + Allow saving object arrays using Python pickles. Reasons for + disallowing pickles include security (loading pickled data can execute + arbitrary code) and portability (pickled objects may not be loadable + on different Python installations, for example if the stored objects + require libraries that are not available, and not all pickled data is + compatible between different versions of Python). + Default: True kwds : Keyword arguments, optional Arrays to save to the file. Each array will be saved to the output file with its corresponding keyword name. @@ -644,6 +642,7 @@ def savez(file, *args, **kwds): Examples -------- + >>> import numpy as np >>> from tempfile import TemporaryFile >>> outfile = TemporaryFile() >>> x = np.arange(10) @@ -671,16 +670,16 @@ def savez(file, *args, **kwds): array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) """ - _savez(file, args, kwds, False) + _savez(file, args, kwds, False, allow_pickle=allow_pickle) -def _savez_compressed_dispatcher(file, *args, **kwds): +def _savez_compressed_dispatcher(file, *args, allow_pickle=True, **kwds): yield from args yield from kwds.values() @array_function_dispatch(_savez_compressed_dispatcher) -def savez_compressed(file, *args, **kwds): +def savez_compressed(file, *args, allow_pickle=True, **kwds): """ Save several arrays into a single file in compressed ``.npz`` format. @@ -701,6 +700,14 @@ def savez_compressed(file, *args, **kwds): Arrays to save to the file. Please use keyword arguments (see `kwds` below) to assign names to arrays. Arrays specified as args will be named "arr_0", "arr_1", and so on. + allow_pickle : bool, optional + Allow saving object arrays using Python pickles. Reasons for + disallowing pickles include security (loading pickled data can execute + arbitrary code) and portability (pickled objects may not be loadable + on different Python installations, for example if the stored objects + require libraries that are not available, and not all pickled data is + compatible between different versions of Python). + Default: True kwds : Keyword arguments, optional Arrays to save to the file. Each array will be saved to the output file with its corresponding keyword name. @@ -732,6 +739,7 @@ def savez_compressed(file, *args, **kwds): Examples -------- + >>> import numpy as np >>> test_array = np.random.rand(3, 2) >>> test_vector = np.random.rand(4) >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector) @@ -742,7 +750,7 @@ def savez_compressed(file, *args, **kwds): True """ - _savez(file, args, kwds, True) + _savez(file, args, kwds, True, allow_pickle=allow_pickle) def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None): @@ -757,10 +765,10 @@ def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None): namedict = kwds for i, val in enumerate(args): - key = 'arr_%d' % i + key = f'arr_{i}' if key in namedict.keys(): raise ValueError( - "Cannot use un-named variables and keyword %s" % key) + f"Cannot use un-named variables and keyword {key}") namedict[key] = val if compress: @@ -769,17 +777,17 @@ def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None): compression = zipfile.ZIP_STORED zipf = zipfile_factory(file, mode="w", compression=compression) - - for key, val in namedict.items(): - fname = key + '.npy' - val = np.asanyarray(val) - # always force zip64, gh-10776 - with zipf.open(fname, 'w', force_zip64=True) as fid: - format.write_array(fid, val, - allow_pickle=allow_pickle, - pickle_kwargs=pickle_kwargs) - - zipf.close() + try: + for key, val in namedict.items(): + fname = key + '.npy' + val = np.asanyarray(val) + # always force zip64, gh-10776 + with zipf.open(fname, 'w', force_zip64=True) as fid: + format.write_array(fid, val, + allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs) + finally: + zipf.close() def _ensure_ndmin_ndarray_check_param(ndmin): @@ -929,8 +937,8 @@ def _read(fname, *, delimiter=',', comment='#', quote='"', dtype = np.dtype(dtype) read_dtype_via_object_chunks = None - if dtype.kind in 'SUM' and ( - dtype == "S0" or dtype == "U0" or dtype == "M8" or dtype == 'm8'): + if dtype.kind in 'SUM' and dtype in { + np.dtype("S0"), np.dtype("U0"), np.dtype("M8"), np.dtype("m8")}: # This is a legacy "flexible" dtype. We do not truly support # parametric dtypes currently (no dtype discovery step in the core), # but have to support these for backward compatibility. @@ -966,13 +974,12 @@ def _read(fname, *, delimiter=',', comment='#', quote='"', if isinstance(comments[0], str) and len(comments[0]) == 1: comment = comments[0] comments = None - else: - # Input validation if there are multiple comment characters - if delimiter in comments: - raise TypeError( - f"Comment characters '{comments}' cannot include the " - f"delimiter '{delimiter}'" - ) + # Input validation if there are multiple comment characters + elif delimiter in comments: + raise TypeError( + f"Comment characters '{comments}' cannot include the " + f"delimiter '{delimiter}'" + ) # comment is now either a 1 or 0 character string or a tuple: if comments is not None: @@ -1040,6 +1047,7 @@ def _read(fname, *, delimiter=',', comment='#', quote='"', # Due to chunking, certain error reports are less clear, currently. if filelike: data = iter(data) # cannot chunk when reading from file + filelike = False c_byte_converters = False if read_dtype_via_object_chunks == "S": @@ -1055,7 +1063,7 @@ def _read(fname, *, delimiter=',', comment='#', quote='"', next_arr = _load_from_filelike( data, delimiter=delimiter, comment=comment, quote=quote, imaginary_unit=imaginary_unit, - usecols=usecols, skiplines=skiplines, max_rows=max_rows, + usecols=usecols, skiplines=skiplines, max_rows=chunk_size, converters=converters, dtype=dtype, encoding=encoding, filelike=filelike, byte_converters=byte_converters, @@ -1065,7 +1073,7 @@ def _read(fname, *, delimiter=',', comment='#', quote='"', # be adapted (in principle the concatenate could cast). chunks.append(next_arr.astype(read_dtype_via_object_chunks)) - skiprows = 0 # Only have to skip for first chunk + skiplines = 0 # Only have to skip for first chunk if max_rows >= 0: max_rows -= chunk_size if len(next_arr) < chunk_size: @@ -1107,7 +1115,7 @@ def _read(fname, *, delimiter=',', comment='#', quote='"', return arr -@set_array_function_like_doc +@finalize_array_function_like @set_module('numpy') def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None, skiprows=0, usecols=None, unpack=False, @@ -1158,11 +1166,6 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, Which columns to read, with 0 being the first. For example, ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. The default, None, results in all columns being read. - - .. versionchanged:: 1.11.0 - When a single column has to be read it is possible to use - an integer instead of a tuple. E.g ``usecols = 3`` reads the - fourth column the same way as ``usecols = (3,)`` would. unpack : bool, optional If True, the returned array is transposed, so that arguments may be unpacked using ``x, y, z = loadtxt(...)``. When used with a @@ -1172,17 +1175,14 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, The returned array will have at least `ndmin` dimensions. Otherwise mono-dimensional axes will be squeezed. Legal values: 0 (default), 1 or 2. - - .. versionadded:: 1.6.0 encoding : str, optional Encoding used to decode the inputfile. Does not apply to input streams. The special value 'bytes' enables backward compatibility workarounds that ensures you receive byte arrays as results if possible and passes 'latin1' encoded strings to converters. Override this value to receive unicode arrays and pass strings as input to converters. If set to None - the system default is used. The default value is 'bytes'. + the system default is used. The default value is None. - .. versionadded:: 1.14.0 .. versionchanged:: 2.0 Before NumPy 2, the default was ``'bytes'`` for Python 2 compatibility. The default is now ``None``. @@ -1193,8 +1193,6 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, empty lines and comment lines are not counted towards `max_rows`, while such lines are counted in `skiprows`. - .. versionadded:: 1.16.0 - .. versionchanged:: 1.23.0 Lines containing no data, including comment lines (e.g., lines starting with '#' or as specified via `comments`) are not counted @@ -1235,13 +1233,12 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, subset of up to n columns (where n is the least number of values present in all rows) can be read by specifying the columns via `usecols`. - .. versionadded:: 1.10.0 - The strings produced by the Python float.hex method can be used as input for floats. Examples -------- + >>> import numpy as np >>> from io import StringIO # StringIO behaves like a file object >>> c = StringIO("0 1\n2 3") >>> np.loadtxt(c) @@ -1346,7 +1343,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, single escaped character: >>> s = StringIO('"Hello, my name is ""Monty""!"') - >>> np.loadtxt(s, dtype="U", delimiter=",", quotechar='"') + >>> np.loadtxt(s, dtype=np.str_, delimiter=",", quotechar='"') array('Hello, my name is "Monty"!', dtype='>> import numpy as np >>> x = y = z = np.arange(0.0,5.0,1.0) >>> np.savetxt('test.out', x, delimiter=',') # X is an array >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays @@ -1577,7 +1561,7 @@ def first_write(self, v): # Handle 1-dimensional arrays if X.ndim == 0 or X.ndim > 2: raise ValueError( - "Expected 1D or 2D array, got %dD array instead" % X.ndim) + f"Expected 1D or 2D array, got {X.ndim}D array instead") elif X.ndim == 1: # Common case -- 1d array of numbers if X.dtype.names is None: @@ -1595,14 +1579,14 @@ def first_write(self, v): # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d') if type(fmt) in (list, tuple): if len(fmt) != ncol: - raise AttributeError('fmt has wrong shape. %s' % str(fmt)) + raise AttributeError(f'fmt has wrong shape. {str(fmt)}') format = delimiter.join(fmt) elif isinstance(fmt, str): n_fmt_chars = fmt.count('%') - error = ValueError('fmt has wrong number of %% formats: %s' % fmt) + error = ValueError(f'fmt has wrong number of % formats: {fmt}') if n_fmt_chars == 1: if iscomplex_X: - fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol + fmt = [f' ({fmt}+{fmt}j)', ] * ncol else: fmt = [fmt, ] * ncol format = delimiter.join(fmt) @@ -1613,7 +1597,7 @@ def first_write(self, v): else: format = fmt else: - raise ValueError('invalid fmt: %r' % (fmt,)) + raise ValueError(f'invalid fmt: {fmt!r}') if len(header) > 0: header = header.replace('\n', '\n' + comments) @@ -1622,8 +1606,7 @@ def first_write(self, v): for row in X: row2 = [] for number in row: - row2.append(number.real) - row2.append(number.imag) + row2.extend((number.real, number.imag)) s = format % tuple(row2) + newline fh.write(s.replace('+-', '-')) else: @@ -1631,9 +1614,10 @@ def first_write(self, v): try: v = format % tuple(row) + newline except TypeError as e: - raise TypeError("Mismatch between array dtype ('%s') and " - "format specifier ('%s')" - % (str(X.dtype), format)) from e + raise TypeError( + f"Mismatch between array dtype ('{str(X.dtype)}') and " + f"format specifier ('{format}')" + ) from e fh.write(v) if len(footer) > 0: @@ -1660,6 +1644,7 @@ def fromregex(file, regexp, dtype, encoding=None): .. versionchanged:: 1.22.0 Now accepts `os.PathLike` implementations. + regexp : str or regexp Regular expression used to parse the file. Groups in the regular expression correspond to fields in the dtype. @@ -1668,8 +1653,6 @@ def fromregex(file, regexp, dtype, encoding=None): encoding : str, optional Encoding used to decode the inputfile. Does not apply to input streams. - .. versionadded:: 1.14.0 - Returns ------- output : ndarray @@ -1693,6 +1676,7 @@ def fromregex(file, regexp, dtype, encoding=None): Examples -------- + >>> import numpy as np >>> from io import StringIO >>> text = StringIO("1312 foo\n1534 bar\n444 qux") @@ -1731,7 +1715,7 @@ def fromregex(file, regexp, dtype, encoding=None): # re-interpret as a single-field structured array. newdtype = np.dtype(dtype[dtype.names[0]]) output = np.array(seq, dtype=newdtype) - output.dtype = dtype + output = output.view(dtype) else: output = np.array(seq, dtype=dtype) @@ -1746,13 +1730,13 @@ def fromregex(file, regexp, dtype, encoding=None): #####-------------------------------------------------------------------------- -@set_array_function_like_doc +@finalize_array_function_like @set_module('numpy') def genfromtxt(fname, dtype=float, comments='#', delimiter=None, skip_header=0, skip_footer=0, converters=None, missing_values=None, filling_values=None, usecols=None, names=None, excludelist=None, - deletechars=''.join(sorted(NameValidator.defaultdeletechars)), + deletechars=''.join(sorted(NameValidator.defaultdeletechars)), # noqa: B008 replace_space='_', autostrip=False, case_sensitive=True, defaultfmt="f%i", unpack=None, usemask=False, loose=True, invalid_raise=True, max_rows=None, encoding=None, @@ -1772,8 +1756,11 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, in a list or produced by a generator are treated as lines. dtype : dtype, optional Data type of the resulting array. - If None, the dtypes will be determined by the contents of each - column, individually. + If a structured dtype, the output array will be 1D and structured where + each field corresponds to one column. + If None, the dtype of each column will be inferred automatically, and + the output array will be structured only if either the dtypes are not + all the same or if `names` is not None. comments : str, optional The character used to indicate the start of a comment. All the characters occurring on a line after a comment are discarded. @@ -1802,13 +1789,15 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, Which columns to read, with 0 being the first. For example, ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. names : {None, True, str, sequence}, optional - If `names` is True, the field names are read from the first line after - the first `skip_header` lines. This line can optionally be preceded - by a comment delimiter. Any content before the comment delimiter is - discarded. If `names` is a sequence or a single-string of - comma-separated names, the names will be used to define the field - names in a structured dtype. If `names` is None, the names of the - dtype fields will be used, if any. + If `names` is True, the output will be a structured array whose field + names are read from the first line after the first `skip_header` lines. + This line can optionally be preceded by a comment delimiter. Any content + before the comment delimiter is discarded. + If `names` is a sequence or a single string of comma-separated names, + the output is a structured array whose field names are taken from + `names`. + If `names` is None, the output is structured only if `dtype` is + structured, in which case the field names are taken from `dtype`. excludelist : sequence, optional A list of names to exclude. This list is appended to the default list ['return','file','print']. Excluded names are appended with an @@ -1845,8 +1834,6 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, The maximum number of rows to read. Must not be used with skip_footer at the same time. If given, the value must be at least 1. Default is to read the entire file. - - .. versionadded:: 1.10.0 encoding : str, optional Encoding used to decode the inputfile. Does not apply when `fname` is a file object. The special value 'bytes' enables backward @@ -1856,7 +1843,6 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, as input to converters. If set to None the system default is used. The default value is 'bytes'. - .. versionadded:: 1.14.0 .. versionchanged:: 2.0 Before NumPy 2, the default was ``'bytes'`` for Python 2 compatibility. The default is now ``None``. @@ -1980,7 +1966,8 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, if not isinstance(user_converters, dict): raise TypeError( "The input argument 'converter' should be a valid dictionary " - "(got '%s' instead)" % type(user_converters)) + f"(got '{type(user_converters)}' instead)" + ) if encoding == 'bytes': encoding = None @@ -2032,7 +2019,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, first_line = '' first_values = [] warnings.warn( - 'genfromtxt: Empty input file: "%s"' % fname, stacklevel=2 + f'genfromtxt: Empty input file: "{fname}"', stacklevel=2 ) # Should we take the first values as names ? @@ -2097,7 +2084,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, user_missing_values = user_missing_values.decode('latin1') # Define the list of missing_values (one column: one list) - missing_values = [list(['']) for _ in range(nbcols)] + missing_values = [[''] for _ in range(nbcols)] # We have a dictionary: process it field by field if isinstance(user_missing_values, dict): @@ -2289,9 +2276,9 @@ def tobytes_first(x, conv): # Store the values append_to_rows(tuple(values)) if usemask: - append_to_masks(tuple([v.strip() in m + append_to_masks(tuple(v.strip() in m for (v, m) in zip(values, - missing_values)])) + missing_values))) if len(rows) == max_rows: break @@ -2302,14 +2289,14 @@ def tobytes_first(x, conv): try: converter.iterupgrade(current_column) except ConverterLockError: - errmsg = "Converter #%i is locked and cannot be upgraded: " % i + errmsg = f"Converter #{i} is locked and cannot be upgraded: " current_column = map(itemgetter(i), rows) for (j, value) in enumerate(current_column): try: converter.upgrade(value) except (ConverterError, ValueError): - errmsg += "(occurred line #%i for value '%s')" - errmsg %= (j + 1 + skip_header, value) + line_number = j + 1 + skip_header + errmsg += f"(occurred line #{line_number} for value '{value}')" raise ConverterError(errmsg) # Check that we don't have invalid values @@ -2317,7 +2304,7 @@ def tobytes_first(x, conv): if nbinvalid > 0: nbrows = len(rows) + nbinvalid - skip_footer # Construct the error message - template = " Line #%%i (got %%i columns instead of %i)" % nbcols + template = f" Line #%i (got %i columns instead of {nbcols})" if skip_footer > 0: nbinvalid_skipped = len([_ for _ in invalid if _[0] > nbrows + skip_header]) @@ -2389,7 +2376,7 @@ def encode_unicode_cols(row_tup): column_types[i] = np.bytes_ # Update string types to be the right length - sized_column_types = column_types[:] + sized_column_types = column_types.copy() for i, col_type in enumerate(column_types): if np.issubdtype(col_type, np.character): n_chars = max(len(row[i]) for row in data) @@ -2502,99 +2489,3 @@ def encode_unicode_cols(row_tup): _genfromtxt_with_like = array_function_dispatch()(genfromtxt) - - -def recfromtxt(fname, **kwargs): - """ - Load ASCII data from a file and return it in a record array. - - If ``usemask=False`` a standard `recarray` is returned, - if ``usemask=True`` a MaskedRecords array is returned. - - .. deprecated:: 2.0 - Use `numpy.genfromtxt` instead. - - Parameters - ---------- - fname, kwargs : For a description of input parameters, see `genfromtxt`. - - See Also - -------- - numpy.genfromtxt : generic function - - Notes - ----- - By default, `dtype` is None, which means that the data-type of the output - array will be determined from the data. - - """ - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`recfromtxt` is deprecated, " - "use `numpy.genfromtxt` instead." - "(deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - kwargs.setdefault("dtype", None) - usemask = kwargs.get('usemask', False) - output = genfromtxt(fname, **kwargs) - if usemask: - from numpy.ma.mrecords import MaskedRecords - output = output.view(MaskedRecords) - else: - output = output.view(np.recarray) - return output - - -def recfromcsv(fname, **kwargs): - """ - Load ASCII data stored in a comma-separated file. - - The returned array is a record array (if ``usemask=False``, see - `recarray`) or a masked record array (if ``usemask=True``, - see `ma.mrecords.MaskedRecords`). - - .. deprecated:: 2.0 - Use `numpy.genfromtxt` with comma as `delimiter` instead. - - Parameters - ---------- - fname, kwargs : For a description of input parameters, see `genfromtxt`. - - See Also - -------- - numpy.genfromtxt : generic function to load ASCII data. - - Notes - ----- - By default, `dtype` is None, which means that the data-type of the output - array will be determined from the data. - - """ - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`recfromcsv` is deprecated, " - "use `numpy.genfromtxt` with comma as `delimiter` instead. " - "(deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - # Set default kwargs for genfromtxt as relevant to csv import. - kwargs.setdefault("case_sensitive", "lower") - kwargs.setdefault("names", True) - kwargs.setdefault("delimiter", ",") - kwargs.setdefault("dtype", None) - output = genfromtxt(fname, **kwargs) - - usemask = kwargs.get("usemask", False) - if usemask: - from numpy.ma.mrecords import MaskedRecords - output = output.view(MaskedRecords) - else: - output = output.view(np.recarray) - return output diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index d9b43578d798..efe7c0886719 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -1,348 +1,284 @@ -import os -import sys -import zipfile import types +import zipfile +from _typeshed import ( + StrOrBytesPath, + StrPath, + SupportsKeysAndGetItem, + SupportsRead, + SupportsWrite, +) +from collections.abc import Callable, Collection, Iterable, Iterator, Mapping, Sequence from re import Pattern -from collections.abc import Collection, Mapping, Iterator, Sequence, Callable, Iterable from typing import ( - Literal as L, - Any, - TypeVar, - Generic, IO, - overload, + Any, + ClassVar, + Literal as L, Protocol, + Self, + overload, + override, + type_check_only, ) +from typing_extensions import TypeVar -from numpy import ( - ndarray, - recarray, - dtype, - generic, - float64, - void, - record, -) - -from numpy.ma.mrecords import MaskedRecords -from numpy._typing import ( - ArrayLike, - DTypeLike, - NDArray, - _DTypeLike, - _SupportsArrayFunc, -) +import numpy as np +from numpy._core.multiarray import packbits, unpackbits +from numpy._typing import ArrayLike, DTypeLike, NDArray, _DTypeLike, _SupportsArrayFunc -from numpy._core.multiarray import ( - packbits as packbits, - unpackbits as unpackbits, -) +from ._datasource import DataSource as DataSource -_T = TypeVar("_T") -_T_contra = TypeVar("_T_contra", contravariant=True) -_T_co = TypeVar("_T_co", covariant=True) -_SCT = TypeVar("_SCT", bound=generic) -_CharType_co = TypeVar("_CharType_co", str, bytes, covariant=True) -_CharType_contra = TypeVar("_CharType_contra", str, bytes, contravariant=True) +__all__ = [ + "fromregex", + "genfromtxt", + "load", + "loadtxt", + "packbits", + "save", + "savetxt", + "savez", + "savez_compressed", + "unpackbits", +] -class _SupportsGetItem(Protocol[_T_contra, _T_co]): - def __getitem__(self, key: _T_contra, /) -> _T_co: ... +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, default=Any, covariant=True) -class _SupportsRead(Protocol[_CharType_co]): - def read(self) -> _CharType_co: ... +type _FName = StrPath | Iterable[str] | Iterable[bytes] +type _FNameRead = StrPath | SupportsRead[str] | SupportsRead[bytes] +type _FNameWriteBytes = StrPath | SupportsWrite[bytes] +type _FNameWrite = _FNameWriteBytes | SupportsWrite[str] -class _SupportsReadSeek(Protocol[_CharType_co]): - def read(self, n: int, /) -> _CharType_co: ... +@type_check_only +class _SupportsReadSeek[T](SupportsRead[T], Protocol): def seek(self, offset: int, whence: int, /) -> object: ... -class _SupportsWrite(Protocol[_CharType_contra]): - def write(self, s: _CharType_contra, /) -> object: ... - -__all__: list[str] - -class BagObj(Generic[_T_co]): - def __init__(self, obj: _SupportsGetItem[str, _T_co]) -> None: ... - def __getattribute__(self, key: str) -> _T_co: ... +class BagObj[T]: + def __init__(self, /, obj: SupportsKeysAndGetItem[str, T]) -> None: ... + def __getattribute__(self, key: str, /) -> T: ... def __dir__(self) -> list[str]: ... -class NpzFile(Mapping[str, NDArray[Any]]): - zip: zipfile.ZipFile - fid: None | IO[str] +class NpzFile(Mapping[str, NDArray[_ScalarT_co]]): + _MAX_REPR_ARRAY_COUNT: ClassVar[int] = 5 + + zip: zipfile.ZipFile | None = None + fid: IO[str] | None = None files: list[str] allow_pickle: bool - pickle_kwargs: None | Mapping[str, Any] - _MAX_REPR_ARRAY_COUNT: int - # Represent `f` as a mutable property so we can access the type of `self` - @property - def f(self: _T) -> BagObj[_T]: ... - @f.setter - def f(self: _T, value: BagObj[_T]) -> None: ... + pickle_kwargs: Mapping[str, Any] | None + f: BagObj[NpzFile[_ScalarT_co]] + + # def __init__( self, - fid: IO[str], - own_fid: bool = ..., - allow_pickle: bool = ..., - pickle_kwargs: None | Mapping[str, Any] = ..., - ) -> None: ... - def __enter__(self: _T) -> _T: ... - def __exit__( - self, - exc_type: None | type[BaseException], - exc_value: None | BaseException, - traceback: None | types.TracebackType, /, + fid: IO[Any], + own_fid: bool = False, + allow_pickle: bool = False, + pickle_kwargs: Mapping[str, object] | None = None, + *, + max_header_size: int = 10_000, ) -> None: ... - def close(self) -> None: ... def __del__(self) -> None: ... - def __iter__(self) -> Iterator[str]: ... + def __enter__(self) -> Self: ... + def __exit__(self, cls: type[BaseException] | None, e: BaseException | None, tb: types.TracebackType | None, /) -> None: ... + @override def __len__(self) -> int: ... - def __getitem__(self, key: str) -> NDArray[Any]: ... - def __contains__(self, key: str) -> bool: ... - def __repr__(self) -> str: ... + @override + def __iter__(self) -> Iterator[str]: ... + @override + def __getitem__(self, key: str, /) -> NDArray[_ScalarT_co]: ... -class DataSource: - def __init__( - self, - destpath: None | str | os.PathLike[str] = ..., - ) -> None: ... - def __del__(self) -> None: ... - def abspath(self, path: str) -> str: ... - def exists(self, path: str) -> bool: ... + # + @override + @overload + def get(self, key: str, default: None = None, /) -> NDArray[_ScalarT_co] | None: ... # pyrefly: ignore[bad-override] + @overload + def get[T](self, key: str, default: NDArray[_ScalarT_co] | T, /) -> NDArray[_ScalarT_co] | T: ... # pyright: ignore[reportIncompatibleMethodOverride] - # Whether the file-object is opened in string or bytes mode (by default) - # depends on the file-extension of `path` - def open( - self, - path: str, - mode: str = ..., - encoding: None | str = ..., - newline: None | str = ..., - ) -> IO[Any]: ... + # + def close(self) -> None: ... # NOTE: Returns a `NpzFile` if file is a zip file; # returns an `ndarray`/`memmap` otherwise def load( - file: str | bytes | os.PathLike[Any] | _SupportsReadSeek[bytes], - mmap_mode: L[None, "r+", "r", "w+", "c"] = ..., - allow_pickle: bool = ..., - fix_imports: bool = ..., - encoding: L["ASCII", "latin1", "bytes"] = ..., + file: StrOrBytesPath | _SupportsReadSeek[bytes], + mmap_mode: L["r+", "r", "w+", "c"] | None = None, + allow_pickle: bool = False, + fix_imports: bool = True, + encoding: L["ASCII", "latin1", "bytes"] = "ASCII", + *, + max_header_size: int = 10_000, ) -> Any: ... -def save( - file: str | os.PathLike[str] | _SupportsWrite[bytes], - arr: ArrayLike, - allow_pickle: bool = ..., - fix_imports: bool = ..., -) -> None: ... - -def savez( - file: str | os.PathLike[str] | _SupportsWrite[bytes], - *args: ArrayLike, - **kwds: ArrayLike, -) -> None: ... - -def savez_compressed( - file: str | os.PathLike[str] | _SupportsWrite[bytes], - *args: ArrayLike, - **kwds: ArrayLike, -) -> None: ... +def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool = True) -> None: ... +def savez(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ... +def savez_compressed(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ... # File-like objects only have to implement `__iter__` and, # optionally, `encoding` @overload def loadtxt( - fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], - dtype: None = ..., - comments: None | str | Sequence[str] = ..., - delimiter: None | str = ..., - converters: None | Mapping[int | str, Callable[[str], Any]] = ..., - skiprows: int = ..., - usecols: int | Sequence[int] = ..., - unpack: bool = ..., - ndmin: L[0, 1, 2] = ..., - encoding: None | str = ..., - max_rows: None | int = ..., + fname: _FName, + dtype: None = None, + comments: str | Sequence[str] | None = "#", + delimiter: str | None = None, + converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, + skiprows: int = 0, + usecols: int | Sequence[int] | None = None, + unpack: bool = False, + ndmin: L[0, 1, 2] = 0, + encoding: str | None = None, + max_rows: int | None = None, *, - quotechar: None | str = ..., - like: None | _SupportsArrayFunc = ... -) -> NDArray[float64]: ... + quotechar: str | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[np.float64]: ... @overload -def loadtxt( - fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], - dtype: _DTypeLike[_SCT], - comments: None | str | Sequence[str] = ..., - delimiter: None | str = ..., - converters: None | Mapping[int | str, Callable[[str], Any]] = ..., - skiprows: int = ..., - usecols: int | Sequence[int] = ..., - unpack: bool = ..., - ndmin: L[0, 1, 2] = ..., - encoding: None | str = ..., - max_rows: None | int = ..., +def loadtxt[ScalarT: np.generic]( + fname: _FName, + dtype: _DTypeLike[ScalarT], + comments: str | Sequence[str] | None = "#", + delimiter: str | None = None, + converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, + skiprows: int = 0, + usecols: int | Sequence[int] | None = None, + unpack: bool = False, + ndmin: L[0, 1, 2] = 0, + encoding: str | None = None, + max_rows: int | None = None, *, - quotechar: None | str = ..., - like: None | _SupportsArrayFunc = ... -) -> NDArray[_SCT]: ... + quotechar: str | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... @overload def loadtxt( - fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], - dtype: DTypeLike, - comments: None | str | Sequence[str] = ..., - delimiter: None | str = ..., - converters: None | Mapping[int | str, Callable[[str], Any]] = ..., - skiprows: int = ..., - usecols: int | Sequence[int] = ..., - unpack: bool = ..., - ndmin: L[0, 1, 2] = ..., - encoding: None | str = ..., - max_rows: None | int = ..., + fname: _FName, + dtype: DTypeLike | None, + comments: str | Sequence[str] | None = "#", + delimiter: str | None = None, + converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, + skiprows: int = 0, + usecols: int | Sequence[int] | None = None, + unpack: bool = False, + ndmin: L[0, 1, 2] = 0, + encoding: str | None = None, + max_rows: int | None = None, *, - quotechar: None | str = ..., - like: None | _SupportsArrayFunc = ... + quotechar: str | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... def savetxt( - fname: str | os.PathLike[str] | _SupportsWrite[str] | _SupportsWrite[bytes], + fname: _FNameWrite, X: ArrayLike, - fmt: str | Sequence[str] = ..., - delimiter: str = ..., - newline: str = ..., - header: str = ..., - footer: str = ..., - comments: str = ..., - encoding: None | str = ..., + fmt: str | Sequence[str] = "%.18e", + delimiter: str = " ", + newline: str = "\n", + header: str = "", + footer: str = "", + comments: str = "# ", + encoding: str | None = None, ) -> None: ... @overload -def fromregex( - file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes], +def fromregex[ScalarT: np.generic]( + file: _FNameRead, regexp: str | bytes | Pattern[Any], - dtype: _DTypeLike[_SCT], - encoding: None | str = ... -) -> NDArray[_SCT]: ... + dtype: _DTypeLike[ScalarT], + encoding: str | None = None, +) -> NDArray[ScalarT]: ... @overload def fromregex( - file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes], + file: _FNameRead, regexp: str | bytes | Pattern[Any], - dtype: DTypeLike, - encoding: None | str = ... + dtype: DTypeLike | None, + encoding: str | None = None, ) -> NDArray[Any]: ... @overload def genfromtxt( - fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], - dtype: None = ..., - comments: str = ..., - delimiter: None | str | int | Iterable[int] = ..., - skip_header: int = ..., - skip_footer: int = ..., - converters: None | Mapping[int | str, Callable[[str], Any]] = ..., - missing_values: Any = ..., - filling_values: Any = ..., - usecols: None | Sequence[int] = ..., - names: L[None, True] | str | Collection[str] = ..., - excludelist: None | Sequence[str] = ..., - deletechars: str = ..., - replace_space: str = ..., - autostrip: bool = ..., - case_sensitive: bool | L['upper', 'lower'] = ..., - defaultfmt: str = ..., - unpack: None | bool = ..., - usemask: bool = ..., - loose: bool = ..., - invalid_raise: bool = ..., - max_rows: None | int = ..., - encoding: str = ..., + fname: _FName, + dtype: None = None, + comments: str = "#", + delimiter: str | int | Iterable[int] | None = None, + skip_header: int = 0, + skip_footer: int = 0, + converters: Mapping[int | str, Callable[[str], Any]] | None = None, + missing_values: Any = None, + filling_values: Any = None, + usecols: Sequence[int] | None = None, + names: L[True] | str | Collection[str] | None = None, + excludelist: Sequence[str] | None = None, + deletechars: str = " !#$%&'()*+,-./:;<=>?@[\\]^{|}~", + replace_space: str = "_", + autostrip: bool = False, + case_sensitive: bool | L["upper", "lower"] = True, + defaultfmt: str = "f%i", + unpack: bool | None = None, + usemask: bool = False, + loose: bool = True, + invalid_raise: bool = True, + max_rows: int | None = None, + encoding: str | None = None, *, - ndmin: L[0, 1, 2] = ..., - like: None | _SupportsArrayFunc = ..., + ndmin: L[0, 1, 2] = 0, + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... @overload -def genfromtxt( - fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], - dtype: _DTypeLike[_SCT], - comments: str = ..., - delimiter: None | str | int | Iterable[int] = ..., - skip_header: int = ..., - skip_footer: int = ..., - converters: None | Mapping[int | str, Callable[[str], Any]] = ..., - missing_values: Any = ..., - filling_values: Any = ..., - usecols: None | Sequence[int] = ..., - names: L[None, True] | str | Collection[str] = ..., - excludelist: None | Sequence[str] = ..., - deletechars: str = ..., - replace_space: str = ..., - autostrip: bool = ..., - case_sensitive: bool | L['upper', 'lower'] = ..., - defaultfmt: str = ..., - unpack: None | bool = ..., - usemask: bool = ..., - loose: bool = ..., - invalid_raise: bool = ..., - max_rows: None | int = ..., - encoding: str = ..., +def genfromtxt[ScalarT: np.generic]( + fname: _FName, + dtype: _DTypeLike[ScalarT], + comments: str = "#", + delimiter: str | int | Iterable[int] | None = None, + skip_header: int = 0, + skip_footer: int = 0, + converters: Mapping[int | str, Callable[[str], Any]] | None = None, + missing_values: Any = None, + filling_values: Any = None, + usecols: Sequence[int] | None = None, + names: L[True] | str | Collection[str] | None = None, + excludelist: Sequence[str] | None = None, + deletechars: str = " !#$%&'()*+,-./:;<=>?@[\\]^{|}~", + replace_space: str = "_", + autostrip: bool = False, + case_sensitive: bool | L["upper", "lower"] = True, + defaultfmt: str = "f%i", + unpack: bool | None = None, + usemask: bool = False, + loose: bool = True, + invalid_raise: bool = True, + max_rows: int | None = None, + encoding: str | None = None, *, - ndmin: L[0, 1, 2] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... + ndmin: L[0, 1, 2] = 0, + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... @overload def genfromtxt( - fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], - dtype: DTypeLike, - comments: str = ..., - delimiter: None | str | int | Iterable[int] = ..., - skip_header: int = ..., - skip_footer: int = ..., - converters: None | Mapping[int | str, Callable[[str], Any]] = ..., - missing_values: Any = ..., - filling_values: Any = ..., - usecols: None | Sequence[int] = ..., - names: L[None, True] | str | Collection[str] = ..., - excludelist: None | Sequence[str] = ..., - deletechars: str = ..., - replace_space: str = ..., - autostrip: bool = ..., - case_sensitive: bool | L['upper', 'lower'] = ..., - defaultfmt: str = ..., - unpack: None | bool = ..., - usemask: bool = ..., - loose: bool = ..., - invalid_raise: bool = ..., - max_rows: None | int = ..., - encoding: str = ..., + fname: _FName, + dtype: DTypeLike | None, + comments: str = "#", + delimiter: str | int | Iterable[int] | None = None, + skip_header: int = 0, + skip_footer: int = 0, + converters: Mapping[int | str, Callable[[str], Any]] | None = None, + missing_values: Any = None, + filling_values: Any = None, + usecols: Sequence[int] | None = None, + names: L[True] | str | Collection[str] | None = None, + excludelist: Sequence[str] | None = None, + deletechars: str = " !#$%&'()*+,-./:;<=>?@[\\]^{|}~", + replace_space: str = "_", + autostrip: bool = False, + case_sensitive: bool | L["upper", "lower"] = True, + defaultfmt: str = "f%i", + unpack: bool | None = None, + usemask: bool = False, + loose: bool = True, + invalid_raise: bool = True, + max_rows: int | None = None, + encoding: str | None = None, *, - ndmin: L[0, 1, 2] = ..., - like: None | _SupportsArrayFunc = ..., + ndmin: L[0, 1, 2] = 0, + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... - -@overload -def recfromtxt( - fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], - *, - usemask: L[False] = ..., - **kwargs: Any, -) -> recarray[Any, dtype[record]]: ... -@overload -def recfromtxt( - fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], - *, - usemask: L[True], - **kwargs: Any, -) -> MaskedRecords[Any, dtype[void]]: ... - -@overload -def recfromcsv( - fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], - *, - usemask: L[False] = ..., - **kwargs: Any, -) -> recarray[Any, dtype[record]]: ... -@overload -def recfromcsv( - fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], - *, - usemask: L[True], - **kwargs: Any, -) -> MaskedRecords[Any, dtype[void]]: ... diff --git a/numpy/lib/_polynomial_impl.py b/numpy/lib/_polynomial_impl.py index 63c12f438240..81f2a0e5d7cd 100644 --- a/numpy/lib/_polynomial_impl.py +++ b/numpy/lib/_polynomial_impl.py @@ -5,23 +5,28 @@ __all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd', 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d', 'polyfit'] - import functools import re import warnings -from .._utils import set_module import numpy._core.numeric as NX - -from numpy._core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array, - ones) -from numpy._core import overrides +from numpy._core import ( + abs, + array, + atleast_1d, + dot, + finfo, + hstack, + isscalar, + ones, + overrides, +) +from numpy._utils import set_module from numpy.exceptions import RankWarning -from numpy.lib._twodim_base_impl import diag, vander from numpy.lib._function_base_impl import trim_zeros -from numpy.lib._type_check_impl import iscomplex, real, imag, mintypecode -from numpy.linalg import eigvals, lstsq, inv - +from numpy.lib._twodim_base_impl import diag, vander +from numpy.lib._type_check_impl import imag, iscomplex, mintypecode, real +from numpy.linalg import eigvals, inv, lstsq array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') @@ -101,8 +106,11 @@ def poly(seq_of_zeros): Examples -------- + Given a sequence of a polynomial's zeros: + >>> import numpy as np + >>> np.poly((0, 0, 0)) # Multiple root example array([1., 0., 0., 0.]) @@ -132,8 +140,7 @@ def poly(seq_of_zeros): seq_of_zeros = eigvals(seq_of_zeros) elif len(sh) == 1: dt = seq_of_zeros.dtype - # Let object arrays slip through, e.g. for arbitrary precision - if dt != object: + if dt.type is not NX.object_: seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char)) else: raise ValueError("input must be 1d or non-empty square 2d array.") @@ -209,6 +216,7 @@ def roots(p): Examples -------- + >>> import numpy as np >>> coeff = [3.2, 2, 1] >>> np.roots(coeff) array([-0.3125+0.46351241j, -0.3125-0.46351241j]) @@ -230,7 +238,7 @@ def roots(p): trailing_zeros = len(p) - non_zero[-1] - 1 # strip leading and trailing zeros - p = p[int(non_zero[0]):int(non_zero[-1])+1] + p = p[int(non_zero[0]):int(non_zero[-1]) + 1] # casting: if incoming array isn't floating point, make it floating point. if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)): @@ -239,9 +247,13 @@ def roots(p): N = len(p) if N > 1: # build companion matrix and find its eigenvalues (the roots) - A = diag(NX.ones((N-2,), p.dtype), -1) - A[0,:] = -p[1:] / p[0] + A = diag(NX.ones((N - 2,), p.dtype), -1) + A[0, :] = -p[1:] / p[0] roots = eigvals(A) + + # backwards compat: return real values if possible + from numpy.linalg._linalg import _to_real_if_imag_zero + roots = _to_real_if_imag_zero(roots, A) else: roots = NX.array([]) @@ -295,8 +307,11 @@ def polyint(p, m=1, k=None): Examples -------- + The defining property of the antiderivative: + >>> import numpy as np + >>> p = np.poly1d([1,1,1]) >>> P = np.polyint(p) >>> P @@ -335,7 +350,7 @@ def polyint(p, m=1, k=None): k = NX.zeros(m, float) k = atleast_1d(k) if len(k) == 1 and m > 1: - k = k[0]*NX.ones(m, float) + k = k[0] * NX.ones(m, float) if len(k) < m: raise ValueError( "k must be a scalar or a rank-1 array of length 1 or >m.") @@ -390,8 +405,11 @@ def polyder(p, m=1): Examples -------- + The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is: + >>> import numpy as np + >>> p = np.poly1d([1,1,1,1]) >>> p2 = np.polyder(p) >>> p2 @@ -450,7 +468,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): A summary of the differences can be found in the :doc:`transition guide `. - Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg` + Fit a polynomial ``p[0] * x**deg + ... + p[deg]`` of degree `deg` to points `(x, y)`. Returns a vector of coefficients `p` that minimises the squared error in the order `deg`, `deg-1`, ... `0`. @@ -504,9 +522,9 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): - residuals -- sum of squared residuals of the least squares fit - rank -- the effective rank of the scaled Vandermonde - coefficient matrix + coefficient matrix - singular_values -- singular values of the scaled Vandermonde - coefficient matrix + coefficient matrix - rcond -- value of `rcond`. For more details, see `numpy.linalg.lstsq`. @@ -575,6 +593,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): Examples -------- + >>> import numpy as np >>> import warnings >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]) >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0]) @@ -633,7 +652,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): # set rcond if rcond is None: - rcond = len(x)*finfo(x.dtype).eps + rcond = len(x) * finfo(x.dtype).eps # set up least squares equation for powers of x lhs = vander(x, order) @@ -653,10 +672,10 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): rhs *= w # scale lhs to improve condition number and solve - scale = NX.sqrt((lhs*lhs).sum(axis=0)) + scale = NX.sqrt((lhs * lhs).sum(axis=0)) lhs /= scale c, resids, rank, s = lstsq(lhs, rhs, rcond) - c = (c.T/scale).T # broadcast scale coefficients + c = (c.T / scale).T # broadcast scale coefficients # warn on rank reduction, which indicates an ill conditioned matrix if rank != order and not full: @@ -675,14 +694,14 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): raise ValueError("the number of data points must exceed order " "to scale the covariance matrix") # note, this used to be: fac = resids / (len(x) - order - 2.0) - # it was deciced that the "- 2" (originally justified by "Bayesian + # it was decided that the "- 2" (originally justified by "Bayesian # uncertainty analysis") is not what the user expects # (see gh-11196 and gh-11197) fac = resids / (len(x) - order) if y.ndim == 1: return c, Vbase * fac else: - return c, Vbase[:,:, NX.newaxis] * fac + return c, Vbase[:, :, NX.newaxis] * fac else: return c @@ -749,6 +768,7 @@ def polyval(p, x): Examples -------- + >>> import numpy as np >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1 76 >>> np.polyval([3,0,1], np.poly1d(5)) @@ -808,6 +828,7 @@ def polyadd(a1, a2): Examples -------- + >>> import numpy as np >>> np.polyadd([1, 2], [9, 5, 4]) array([9, 6, 6]) @@ -873,8 +894,11 @@ def polysub(a1, a2): Examples -------- + .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2) + >>> import numpy as np + >>> np.polysub([2, 10, -2], [3, 10, -4]) array([-1, 0, 2]) @@ -933,6 +957,7 @@ def polymul(a1, a2): Examples -------- + >>> import numpy as np >>> np.polymul([1, 2, 3], [9, 5, 1]) array([ 9, 23, 38, 17, 3]) @@ -1007,8 +1032,11 @@ def polydiv(u, v): Examples -------- + .. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25 + >>> import numpy as np + >>> x = np.array([3.0, 5.0, 2.0]) >>> y = np.array([2.0, 1.0]) >>> np.polydiv(x, y) @@ -1025,16 +1053,17 @@ def polydiv(u, v): scale = 1. / v[0] q = NX.zeros((max(m - n + 1, 1),), w.dtype) r = u.astype(w.dtype) - for k in range(0, m-n+1): + for k in range(m - n + 1): d = scale * r[k] q[k] = d - r[k:k+n+1] -= d*v + r[k:k + n + 1] -= d * v while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1): r = r[1:] if truepoly: return poly1d(q), poly1d(r) return q, r + _poly_mat = re.compile(r"\*\*([0-9]*)") def _raise_power(astr, wrap=70): n = 0 @@ -1049,16 +1078,16 @@ def _raise_power(astr, wrap=70): power = mat.groups()[0] partstr = astr[n:span[0]] n = span[1] - toadd2 = partstr + ' '*(len(power)-1) - toadd1 = ' '*(len(partstr)-1) + power + toadd2 = partstr + ' ' * (len(power) - 1) + toadd1 = ' ' * (len(partstr) - 1) + power if ((len(line2) + len(toadd2) > wrap) or (len(line1) + len(toadd1) > wrap)): output += line1 + "\n" + line2 + "\n " line1 = toadd1 line2 = toadd2 else: - line2 += partstr + ' '*(len(power)-1) - line1 += ' '*(len(partstr)-1) + power + line2 += partstr + ' ' * (len(power) - 1) + line1 += ' ' * (len(partstr) - 1) + power output += line1 + "\n" + line2 return output + astr[n:] @@ -1096,8 +1125,12 @@ class poly1d: Examples -------- + >>> import numpy as np + Construct the polynomial :math:`x^2 + 2x + 3`: + >>> import numpy as np + >>> p = np.poly1d([1, 2, 3]) >>> print(np.poly1d(p)) 2 @@ -1204,6 +1237,7 @@ def roots(self): @property def _coeffs(self): return self.__dict__['coeffs'] + @_coeffs.setter def _coeffs(self, coeffs): self.__dict__['coeffs'] = coeffs @@ -1249,7 +1283,7 @@ def __array__(self, t=None, copy=None): def __repr__(self): vals = repr(self.coeffs) vals = vals[6:-1] - return "poly1d(%s)" % vals + return f"poly1d({vals})" def __len__(self): return self.order @@ -1260,53 +1294,49 @@ def __str__(self): # Remove leading zeros coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)] - N = len(coeffs)-1 + N = len(coeffs) - 1 def fmt_float(q): - s = '%.4g' % q - if s.endswith('.0000'): - s = s[:-5] + s = f'{q:.4g}' + s = s.removesuffix('.0000') return s for k, coeff in enumerate(coeffs): if not iscomplex(coeff): coefstr = fmt_float(real(coeff)) elif real(coeff) == 0: - coefstr = '%sj' % fmt_float(imag(coeff)) + coefstr = f'{fmt_float(imag(coeff))}j' else: - coefstr = '(%s + %sj)' % (fmt_float(real(coeff)), - fmt_float(imag(coeff))) + coefstr = f'({fmt_float(real(coeff))} + {fmt_float(imag(coeff))}j)' - power = (N-k) + power = (N - k) if power == 0: if coefstr != '0': - newstr = '%s' % (coefstr,) + newstr = f'{coefstr}' + elif k == 0: + newstr = '0' else: - if k == 0: - newstr = '0' - else: - newstr = '' + newstr = '' elif power == 1: if coefstr == '0': newstr = '' elif coefstr == 'b': newstr = var else: - newstr = '%s %s' % (coefstr, var) + newstr = f'{coefstr} {var}' + elif coefstr == '0': + newstr = '' + elif coefstr == 'b': + newstr = f'{var}**{power}' else: - if coefstr == '0': - newstr = '' - elif coefstr == 'b': - newstr = '%s**%d' % (var, power,) - else: - newstr = '%s %s**%d' % (coefstr, var, power) + newstr = f'{coefstr} {var}**{power}' if k > 0: if newstr != '': if newstr.startswith('-'): - thestr = "%s - %s" % (thestr, newstr[1:]) + thestr = f"{thestr} - {newstr[1:]}" else: - thestr = "%s + %s" % (thestr, newstr) + thestr = f"{thestr} + {newstr}" else: thestr = newstr return _raise_power(thestr) @@ -1358,24 +1388,20 @@ def __rsub__(self, other): other = poly1d(other) return poly1d(polysub(other.coeffs, self.coeffs)) - def __div__(self, other): + def __truediv__(self, other): if isscalar(other): - return poly1d(self.coeffs/other) + return poly1d(self.coeffs / other) else: other = poly1d(other) return polydiv(self, other) - __truediv__ = __div__ - - def __rdiv__(self, other): + def __rtruediv__(self, other): if isscalar(other): - return poly1d(other/self.coeffs) + return poly1d(other / self.coeffs) else: other = poly1d(other) return polydiv(other, self) - __rtruediv__ = __rdiv__ - def __eq__(self, other): if not isinstance(other, poly1d): return NotImplemented @@ -1388,7 +1414,6 @@ def __ne__(self, other): return NotImplemented return not self.__eq__(other) - def __getitem__(self, val): ind = self.order - val if val > self.order: @@ -1402,11 +1427,10 @@ def __setitem__(self, key, val): if key < 0: raise ValueError("Does not support negative powers.") if key > self.order: - zr = NX.zeros(key-self.order, self.coeffs.dtype) + zr = NX.zeros(key - self.order, self.coeffs.dtype) self._coeffs = NX.concatenate((zr, self.coeffs)) ind = 0 self._coeffs[ind] = val - return def __iter__(self): return iter(self.coeffs) @@ -1439,4 +1463,5 @@ def deriv(self, m=1): # Stuff to do on module import + warnings.simplefilter('always', RankWarning) diff --git a/numpy/lib/_polynomial_impl.pyi b/numpy/lib/_polynomial_impl.pyi index 123f32049939..82ec616d6458 100644 --- a/numpy/lib/_polynomial_impl.pyi +++ b/numpy/lib/_polynomial_impl.pyi @@ -1,101 +1,203 @@ +from _typeshed import ConvertibleToInt, Incomplete +from collections.abc import Iterator from typing import ( - Literal as L, - overload, Any, - SupportsInt, - SupportsIndex, - TypeVar, + ClassVar, + Literal as L, NoReturn, + Self, + SupportsIndex, + SupportsInt, + overload, ) import numpy as np from numpy import ( - poly1d as poly1d, - unsignedinteger, - signedinteger, - floating, + complex128, complexfloating, + float64, + floating, int32, int64, - float64, - complex128, object_, + signedinteger, + unsignedinteger, ) - from numpy._typing import ( - NDArray, ArrayLike, + NDArray, _ArrayLikeBool_co, - _ArrayLikeUInt_co, - _ArrayLikeInt_co, - _ArrayLikeFloat_co, _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, _ArrayLikeObject_co, + _ArrayLikeUInt_co, + _FloatLike_co, + _NestedSequence, + _ScalarLike_co, ) -_T = TypeVar("_T") +type _2Tup[T] = tuple[T, T] +type _5Tup[T] = tuple[T, NDArray[float64], NDArray[int32], NDArray[float64], NDArray[float64]] + +### -_2Tup = tuple[_T, _T] -_5Tup = tuple[ - _T, - NDArray[float64], - NDArray[int32], - NDArray[float64], - NDArray[float64], +__all__ = [ + "poly", + "roots", + "polyint", + "polyder", + "polyadd", + "polysub", + "polymul", + "polydiv", + "polyval", + "poly1d", + "polyfit", ] -__all__: list[str] +class poly1d: + __module__: L["numpy"] = "numpy" # pyrefly: ignore[bad-override] + + __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] -def poly(seq_of_zeros: ArrayLike) -> NDArray[floating[Any]]: ... + @property + def variable(self) -> str: ... + @property + def order(self) -> int: ... + @property + def o(self) -> int: ... + @property + def roots(self) -> NDArray[Incomplete]: ... + @property + def r(self) -> NDArray[Incomplete]: ... + + # + @property + def coeffs(self) -> NDArray[Incomplete]: ... + @coeffs.setter + def coeffs(self, value: NDArray[Incomplete], /) -> None: ... + + # + @property + def c(self) -> NDArray[Any]: ... + @c.setter + def c(self, value: NDArray[Incomplete], /) -> None: ... + + # + @property + def coef(self) -> NDArray[Incomplete]: ... + @coef.setter + def coef(self, value: NDArray[Incomplete], /) -> None: ... + + # + @property + def coefficients(self) -> NDArray[Incomplete]: ... + @coefficients.setter + def coefficients(self, value: NDArray[Incomplete], /) -> None: ... + + # + def __init__(self, /, c_or_r: ArrayLike, r: bool = False, variable: str | None = None) -> None: ... + + # + @overload + def __array__(self, /, t: None = None, copy: bool | None = None) -> np.ndarray[tuple[int], np.dtype[Incomplete]]: ... + @overload + def __array__[DTypeT: np.dtype](self, /, t: DTypeT, copy: bool | None = None) -> np.ndarray[tuple[int], DTypeT]: ... + + # + @overload + def __call__(self, /, val: _ScalarLike_co) -> Incomplete: ... + @overload + def __call__(self, /, val: poly1d) -> Self: ... + @overload + def __call__(self, /, val: NDArray[Incomplete] | _NestedSequence[_ScalarLike_co]) -> NDArray[Incomplete]: ... + + # + def __len__(self) -> int: ... + def __iter__(self) -> Iterator[Incomplete]: ... + + # + def __getitem__(self, val: int, /) -> Incomplete: ... + def __setitem__(self, key: int, val: Incomplete, /) -> None: ... + + def __neg__(self) -> Self: ... + def __pos__(self) -> Self: ... + + # + def __add__(self, other: ArrayLike, /) -> Self: ... + def __radd__(self, other: ArrayLike, /) -> Self: ... + + # + def __sub__(self, other: ArrayLike, /) -> Self: ... + def __rsub__(self, other: ArrayLike, /) -> Self: ... + + # + def __mul__(self, other: ArrayLike, /) -> Self: ... + def __rmul__(self, other: ArrayLike, /) -> Self: ... + + # + def __pow__(self, val: _FloatLike_co, /) -> Self: ... # Integral floats are accepted + + # + def __truediv__(self, other: ArrayLike, /) -> Self: ... + def __rtruediv__(self, other: ArrayLike, /) -> Self: ... + + # + def deriv(self, /, m: ConvertibleToInt = 1) -> Self: ... + def integ(self, /, m: ConvertibleToInt = 1, k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = 0) -> poly1d: ... + +# +def poly(seq_of_zeros: ArrayLike) -> NDArray[floating]: ... # Returns either a float or complex array depending on the input values. # See `np.linalg.eigvals`. -def roots(p: ArrayLike) -> NDArray[complexfloating[Any, Any]] | NDArray[floating[Any]]: ... +def roots(p: ArrayLike) -> NDArray[complexfloating] | NDArray[floating]: ... @overload def polyint( p: poly1d, - m: SupportsInt | SupportsIndex = ..., - k: None | _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = None, ) -> poly1d: ... @overload def polyint( p: _ArrayLikeFloat_co, - m: SupportsInt | SupportsIndex = ..., - k: None | _ArrayLikeFloat_co = ..., -) -> NDArray[floating[Any]]: ... + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeFloat_co | None = None, +) -> NDArray[floating]: ... @overload def polyint( p: _ArrayLikeComplex_co, - m: SupportsInt | SupportsIndex = ..., - k: None | _ArrayLikeComplex_co = ..., -) -> NDArray[complexfloating[Any, Any]]: ... + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeComplex_co | None = None, +) -> NDArray[complexfloating]: ... @overload def polyint( p: _ArrayLikeObject_co, - m: SupportsInt | SupportsIndex = ..., - k: None | _ArrayLikeObject_co = ..., + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeObject_co | None = None, ) -> NDArray[object_]: ... @overload def polyder( p: poly1d, - m: SupportsInt | SupportsIndex = ..., + m: SupportsInt | SupportsIndex = 1, ) -> poly1d: ... @overload def polyder( p: _ArrayLikeFloat_co, - m: SupportsInt | SupportsIndex = ..., -) -> NDArray[floating[Any]]: ... + m: SupportsInt | SupportsIndex = 1, +) -> NDArray[floating]: ... @overload def polyder( p: _ArrayLikeComplex_co, - m: SupportsInt | SupportsIndex = ..., -) -> NDArray[complexfloating[Any, Any]]: ... + m: SupportsInt | SupportsIndex = 1, +) -> NDArray[complexfloating]: ... @overload def polyder( p: _ArrayLikeObject_co, - m: SupportsInt | SupportsIndex = ..., + m: SupportsInt | SupportsIndex = 1, ) -> NDArray[object_]: ... @overload @@ -103,60 +205,84 @@ def polyfit( x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, deg: SupportsIndex | SupportsInt, - rcond: None | float = ..., - full: L[False] = ..., - w: None | _ArrayLikeFloat_co = ..., - cov: L[False] = ..., + rcond: float | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + cov: L[False] = False, ) -> NDArray[float64]: ... @overload def polyfit( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, deg: SupportsIndex | SupportsInt, - rcond: None | float = ..., - full: L[False] = ..., - w: None | _ArrayLikeFloat_co = ..., - cov: L[False] = ..., + rcond: float | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + cov: L[False] = False, ) -> NDArray[complex128]: ... @overload def polyfit( x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, deg: SupportsIndex | SupportsInt, - rcond: None | float = ..., - full: L[False] = ..., - w: None | _ArrayLikeFloat_co = ..., - cov: L[True, "unscaled"] = ..., + rcond: float | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + *, + cov: L[True, "unscaled"], ) -> _2Tup[NDArray[float64]]: ... @overload def polyfit( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, deg: SupportsIndex | SupportsInt, - rcond: None | float = ..., - full: L[False] = ..., - w: None | _ArrayLikeFloat_co = ..., - cov: L[True, "unscaled"] = ..., + rcond: float | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + *, + cov: L[True, "unscaled"], ) -> _2Tup[NDArray[complex128]]: ... @overload def polyfit( x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, deg: SupportsIndex | SupportsInt, - rcond: None | float = ..., - full: L[True] = ..., - w: None | _ArrayLikeFloat_co = ..., - cov: bool | L["unscaled"] = ..., + rcond: float | None, + full: L[True], + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, +) -> _5Tup[NDArray[float64]]: ... +@overload +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None = None, + *, + full: L[True], + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, ) -> _5Tup[NDArray[float64]]: ... @overload def polyfit( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, deg: SupportsIndex | SupportsInt, - rcond: None | float = ..., - full: L[True] = ..., - w: None | _ArrayLikeFloat_co = ..., - cov: bool | L["unscaled"] = ..., + rcond: float | None, + full: L[True], + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, +) -> _5Tup[NDArray[complex128]]: ... +@overload +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None = None, + *, + full: L[True], + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, ) -> _5Tup[NDArray[complex128]]: ... @overload @@ -168,22 +294,22 @@ def polyval( def polyval( p: _ArrayLikeUInt_co, x: _ArrayLikeUInt_co, -) -> NDArray[unsignedinteger[Any]]: ... +) -> NDArray[unsignedinteger]: ... @overload def polyval( p: _ArrayLikeInt_co, x: _ArrayLikeInt_co, -) -> NDArray[signedinteger[Any]]: ... +) -> NDArray[signedinteger]: ... @overload def polyval( p: _ArrayLikeFloat_co, x: _ArrayLikeFloat_co, -) -> NDArray[floating[Any]]: ... +) -> NDArray[floating]: ... @overload def polyval( p: _ArrayLikeComplex_co, x: _ArrayLikeComplex_co, -) -> NDArray[complexfloating[Any, Any]]: ... +) -> NDArray[complexfloating]: ... @overload def polyval( p: _ArrayLikeObject_co, @@ -209,22 +335,22 @@ def polyadd( def polyadd( a1: _ArrayLikeUInt_co, a2: _ArrayLikeUInt_co, -) -> NDArray[unsignedinteger[Any]]: ... +) -> NDArray[unsignedinteger]: ... @overload def polyadd( a1: _ArrayLikeInt_co, a2: _ArrayLikeInt_co, -) -> NDArray[signedinteger[Any]]: ... +) -> NDArray[signedinteger]: ... @overload def polyadd( a1: _ArrayLikeFloat_co, a2: _ArrayLikeFloat_co, -) -> NDArray[floating[Any]]: ... +) -> NDArray[floating]: ... @overload def polyadd( a1: _ArrayLikeComplex_co, a2: _ArrayLikeComplex_co, -) -> NDArray[complexfloating[Any, Any]]: ... +) -> NDArray[complexfloating]: ... @overload def polyadd( a1: _ArrayLikeObject_co, @@ -250,22 +376,22 @@ def polysub( def polysub( a1: _ArrayLikeUInt_co, a2: _ArrayLikeUInt_co, -) -> NDArray[unsignedinteger[Any]]: ... +) -> NDArray[unsignedinteger]: ... @overload def polysub( a1: _ArrayLikeInt_co, a2: _ArrayLikeInt_co, -) -> NDArray[signedinteger[Any]]: ... +) -> NDArray[signedinteger]: ... @overload def polysub( a1: _ArrayLikeFloat_co, a2: _ArrayLikeFloat_co, -) -> NDArray[floating[Any]]: ... +) -> NDArray[floating]: ... @overload def polysub( a1: _ArrayLikeComplex_co, a2: _ArrayLikeComplex_co, -) -> NDArray[complexfloating[Any, Any]]: ... +) -> NDArray[complexfloating]: ... @overload def polysub( a1: _ArrayLikeObject_co, @@ -289,12 +415,12 @@ def polydiv( def polydiv( u: _ArrayLikeFloat_co, v: _ArrayLikeFloat_co, -) -> _2Tup[NDArray[floating[Any]]]: ... +) -> _2Tup[NDArray[floating]]: ... @overload def polydiv( u: _ArrayLikeComplex_co, v: _ArrayLikeComplex_co, -) -> _2Tup[NDArray[complexfloating[Any, Any]]]: ... +) -> _2Tup[NDArray[complexfloating]]: ... @overload def polydiv( u: _ArrayLikeObject_co, diff --git a/numpy/lib/_scimath_impl.py b/numpy/lib/_scimath_impl.py index 43682fefee17..b33f42b3d10d 100644 --- a/numpy/lib/_scimath_impl.py +++ b/numpy/lib/_scimath_impl.py @@ -13,30 +13,13 @@ Similarly, `sqrt`, other base logarithms, `power` and trig functions are correctly handled. See their respective docstrings for specific examples. -Functions ---------- - -.. autosummary:: - :toctree: generated/ - - sqrt - log - log2 - logn - log10 - power - arccos - arcsin - arctanh - """ import numpy._core.numeric as nx import numpy._core.numerictypes as nt -from numpy._core.numeric import asarray, any -from numpy._core.overrides import array_function_dispatch +from numpy._core.numeric import any, asarray +from numpy._core.overrides import array_function_dispatch, set_module from numpy.lib._type_check_impl import isreal - __all__ = [ 'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin', 'arctanh' @@ -66,6 +49,7 @@ def _tocomplex(arr): Examples -------- + >>> import numpy as np First, consider an input of type short: @@ -124,6 +108,7 @@ def _fix_real_lt_zero(x): Examples -------- + >>> import numpy as np >>> np.lib.scimath._fix_real_lt_zero([1,2]) array([1, 2]) @@ -152,6 +137,7 @@ def _fix_int_lt_zero(x): Examples -------- + >>> import numpy as np >>> np.lib.scimath._fix_int_lt_zero([1,2]) array([1, 2]) @@ -179,6 +165,7 @@ def _fix_real_abs_gt_1(x): Examples -------- + >>> import numpy as np >>> np.lib.scimath._fix_real_abs_gt_1([0,1]) array([0, 1]) @@ -195,6 +182,7 @@ def _unary_dispatcher(x): return (x,) +@set_module('numpy.lib.scimath') @array_function_dispatch(_unary_dispatcher) def sqrt(x): """ @@ -222,6 +210,8 @@ def sqrt(x): -------- For real, non-negative inputs this works just like `numpy.sqrt`: + >>> import numpy as np + >>> np.emath.sqrt(1) 1.0 >>> np.emath.sqrt([1, 4]) @@ -248,6 +238,7 @@ def sqrt(x): return nx.sqrt(x) +@set_module('numpy.lib.scimath') @array_function_dispatch(_unary_dispatcher) def log(x): """ @@ -282,6 +273,7 @@ def log(x): Examples -------- + >>> import numpy as np >>> np.emath.log(np.exp(1)) 1.0 @@ -296,6 +288,7 @@ def log(x): return nx.log(x) +@set_module('numpy.lib.scimath') @array_function_dispatch(_unary_dispatcher) def log10(x): """ @@ -330,6 +323,7 @@ def log10(x): Examples -------- + >>> import numpy as np (We set the printing precision so the example can be auto-tested) @@ -350,6 +344,7 @@ def _logn_dispatcher(n, x): return (n, x,) +@set_module('numpy.lib.scimath') @array_function_dispatch(_logn_dispatcher) def logn(n, x): """ @@ -373,6 +368,7 @@ def logn(n, x): Examples -------- + >>> import numpy as np >>> np.set_printoptions(precision=4) >>> np.emath.logn(2, [4, 8]) @@ -383,9 +379,10 @@ def logn(n, x): """ x = _fix_real_lt_zero(x) n = _fix_real_lt_zero(n) - return nx.log(x)/nx.log(n) + return nx.log(x) / nx.log(n) +@set_module('numpy.lib.scimath') @array_function_dispatch(_unary_dispatcher) def log2(x): """ @@ -420,6 +417,7 @@ def log2(x): Examples -------- + We set the printing precision so the example can be auto-tested: >>> np.set_printoptions(precision=4) @@ -438,6 +436,7 @@ def _power_dispatcher(x, p): return (x, p) +@set_module('numpy.lib.scimath') @array_function_dispatch(_power_dispatcher) def power(x, p): """ @@ -468,6 +467,7 @@ def power(x, p): Examples -------- + >>> import numpy as np >>> np.set_printoptions(precision=4) >>> np.emath.power(2, 2) @@ -491,6 +491,7 @@ def power(x, p): return nx.power(x, p) +@set_module('numpy.lib.scimath') @array_function_dispatch(_unary_dispatcher) def arccos(x): """ @@ -523,6 +524,7 @@ def arccos(x): Examples -------- + >>> import numpy as np >>> np.set_printoptions(precision=4) >>> np.emath.arccos(1) # a scalar is returned @@ -536,6 +538,7 @@ def arccos(x): return nx.arccos(x) +@set_module('numpy.lib.scimath') @array_function_dispatch(_unary_dispatcher) def arcsin(x): """ @@ -569,6 +572,7 @@ def arcsin(x): Examples -------- + >>> import numpy as np >>> np.set_printoptions(precision=4) >>> np.emath.arcsin(0) @@ -582,6 +586,7 @@ def arcsin(x): return nx.arcsin(x) +@set_module('numpy.lib.scimath') @array_function_dispatch(_unary_dispatcher) def arctanh(x): """ @@ -617,14 +622,15 @@ def arctanh(x): Examples -------- + >>> import numpy as np >>> np.set_printoptions(precision=4) >>> np.emath.arctanh(0.5) 0.5493061443340549 - >>> from numpy.testing import suppress_warnings - >>> with suppress_warnings() as sup: - ... sup.filter(RuntimeWarning) + >>> import warnings + >>> with warnings.catch_warnings(): + ... warnings.simplefilter('ignore', RuntimeWarning) ... np.emath.arctanh(np.eye(2)) array([[inf, 0.], [ 0., inf]]) diff --git a/numpy/lib/_scimath_impl.pyi b/numpy/lib/_scimath_impl.pyi index 589feb15f8ff..e6390c29ccb3 100644 --- a/numpy/lib/_scimath_impl.pyi +++ b/numpy/lib/_scimath_impl.pyi @@ -1,94 +1,93 @@ -from typing import overload, Any +from typing import Any, overload from numpy import complexfloating - from numpy._typing import ( NDArray, - _ArrayLikeFloat_co, _ArrayLikeComplex_co, + _ArrayLikeFloat_co, _ComplexLike_co, _FloatLike_co, ) -__all__: list[str] +__all__ = ["sqrt", "log", "log2", "logn", "log10", "power", "arccos", "arcsin", "arctanh"] @overload def sqrt(x: _FloatLike_co) -> Any: ... @overload -def sqrt(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +def sqrt(x: _ComplexLike_co) -> complexfloating: ... @overload def sqrt(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... @overload -def sqrt(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... +def sqrt(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... @overload def log(x: _FloatLike_co) -> Any: ... @overload -def log(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +def log(x: _ComplexLike_co) -> complexfloating: ... @overload def log(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... @overload -def log(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... +def log(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... @overload def log10(x: _FloatLike_co) -> Any: ... @overload -def log10(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +def log10(x: _ComplexLike_co) -> complexfloating: ... @overload def log10(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... @overload -def log10(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... +def log10(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... @overload def log2(x: _FloatLike_co) -> Any: ... @overload -def log2(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +def log2(x: _ComplexLike_co) -> complexfloating: ... @overload def log2(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... @overload -def log2(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... +def log2(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... @overload def logn(n: _FloatLike_co, x: _FloatLike_co) -> Any: ... @overload -def logn(n: _ComplexLike_co, x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +def logn(n: _ComplexLike_co, x: _ComplexLike_co) -> complexfloating: ... @overload def logn(n: _ArrayLikeFloat_co, x: _ArrayLikeFloat_co) -> NDArray[Any]: ... @overload -def logn(n: _ArrayLikeComplex_co, x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... +def logn(n: _ArrayLikeComplex_co, x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... @overload def power(x: _FloatLike_co, p: _FloatLike_co) -> Any: ... @overload -def power(x: _ComplexLike_co, p: _ComplexLike_co) -> complexfloating[Any, Any]: ... +def power(x: _ComplexLike_co, p: _ComplexLike_co) -> complexfloating: ... @overload def power(x: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co) -> NDArray[Any]: ... @overload -def power(x: _ArrayLikeComplex_co, p: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... +def power(x: _ArrayLikeComplex_co, p: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... @overload def arccos(x: _FloatLike_co) -> Any: ... @overload -def arccos(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +def arccos(x: _ComplexLike_co) -> complexfloating: ... @overload def arccos(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... @overload -def arccos(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... +def arccos(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... @overload def arcsin(x: _FloatLike_co) -> Any: ... @overload -def arcsin(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +def arcsin(x: _ComplexLike_co) -> complexfloating: ... @overload def arcsin(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... @overload -def arcsin(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... +def arcsin(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... @overload def arctanh(x: _FloatLike_co) -> Any: ... @overload -def arctanh(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +def arctanh(x: _ComplexLike_co) -> complexfloating: ... @overload def arctanh(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... @overload -def arctanh(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... +def arctanh(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... diff --git a/numpy/lib/_shape_base_impl.py b/numpy/lib/_shape_base_impl.py index 68453095db7e..454d3f5d2c26 100644 --- a/numpy/lib/_shape_base_impl.py +++ b/numpy/lib/_shape_base_impl.py @@ -1,22 +1,25 @@ import functools -import warnings +import numpy as np import numpy._core.numeric as _nx -from numpy._core.numeric import asarray, zeros, zeros_like, array, asanyarray +from numpy._core import atleast_3d, overrides +from numpy._core._multiarray_umath import _array_converter from numpy._core.fromnumeric import reshape, transpose from numpy._core.multiarray import normalize_axis_index -from numpy._core._multiarray_umath import _array_converter -from numpy._core import overrides -from numpy._core import vstack, atleast_3d -from numpy._core.numeric import normalize_axis_tuple -from numpy._core.overrides import set_module +from numpy._core.numeric import ( + array, + asanyarray, + asarray, + normalize_axis_tuple, + zeros, + zeros_like, +) from numpy._core.shape_base import _arrays_for_stack_dispatcher from numpy.lib._index_tricks_impl import ndindex from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells - __all__ = [ - 'column_stack', 'row_stack', 'dstack', 'array_split', 'split', + 'column_stack', 'dstack', 'array_split', 'split', 'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims', 'apply_along_axis', 'kron', 'tile', 'take_along_axis', 'put_along_axis' @@ -35,7 +38,7 @@ def _make_along_axis_idx(arr_shape, indices, axis): raise ValueError( "`indices` and `arr` must have the same number of dimensions") shape_ones = (1,) * indices.ndim - dest_dims = list(range(axis)) + [None] + list(range(axis+1, indices.ndim)) + dest_dims = list(range(axis)) + [None] + list(range(axis + 1, indices.ndim)) # build a fancy index, consisting of orthogonal aranges, with the # requested index inserted at the right location @@ -44,18 +47,18 @@ def _make_along_axis_idx(arr_shape, indices, axis): if dim is None: fancy_index.append(indices) else: - ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:] + ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim + 1:] fancy_index.append(_nx.arange(n).reshape(ind_shape)) return tuple(fancy_index) -def _take_along_axis_dispatcher(arr, indices, axis): +def _take_along_axis_dispatcher(arr, indices, axis=None): return (arr, indices) @array_function_dispatch(_take_along_axis_dispatcher) -def take_along_axis(arr, indices, axis): +def take_along_axis(arr, indices, axis=-1): """ Take values from the input array by matching 1d index and data slices. @@ -66,21 +69,22 @@ def take_along_axis(arr, indices, axis): Functions returning an index along an axis, like `argsort` and `argpartition`, produce suitable indices for this function. - .. versionadded:: 1.15.0 - Parameters ---------- arr : ndarray (Ni..., M, Nk...) Source array indices : ndarray (Ni..., J, Nk...) - Indices to take along each 1d slice of `arr`. This must match the - dimension of arr, but dimensions Ni and Nj only need to broadcast - against `arr`. - axis : int + Indices to take along each 1d slice of ``arr``. This must match the + dimension of ``arr``, but dimensions Ni and Nj only need to broadcast + against ``arr``. + axis : int or None, optional The axis to take 1d slices along. If axis is None, the input array is treated as if it had first been flattened to 1d, for consistency with `sort` and `argsort`. + .. versionchanged:: 2.3 + The default value is now ``-1``. + Returns ------- out: ndarray (Ni..., J, Nk...) @@ -115,6 +119,7 @@ def take_along_axis(arr, indices, axis): Examples -------- + >>> import numpy as np For this sample array @@ -162,15 +167,16 @@ def take_along_axis(arr, indices, axis): """ # normalize inputs if axis is None: - arr = arr.flat - arr_shape = (len(arr),) # flatiter has no .shape + if indices.ndim != 1: + raise ValueError( + 'when axis=None, `indices` must have a single dimension.') + arr = np.array(arr.flat) axis = 0 else: axis = normalize_axis_index(axis, arr.ndim) - arr_shape = arr.shape # use the fancy index - return arr[_make_along_axis_idx(arr_shape, indices, axis)] + return arr[_make_along_axis_idx(arr.shape, indices, axis)] def _put_along_axis_dispatcher(arr, indices, values, axis): @@ -189,8 +195,6 @@ def put_along_axis(arr, indices, values, axis): Functions returning an index along an axis, like `argsort` and `argpartition`, produce suitable indices for this function. - .. versionadded:: 1.15.0 - Parameters ---------- arr : ndarray (Ni..., M, Nk...) @@ -233,6 +237,7 @@ def put_along_axis(arr, indices, values, axis): Examples -------- + >>> import numpy as np For this sample array @@ -252,15 +257,16 @@ def put_along_axis(arr, indices, values, axis): """ # normalize inputs if axis is None: - arr = arr.flat + if indices.ndim != 1: + raise ValueError( + 'when axis=None, `indices` must have a single dimension.') + arr = np.array(arr.flat) axis = 0 - arr_shape = (len(arr),) # flatiter has no .shape else: axis = normalize_axis_index(axis, arr.ndim) - arr_shape = arr.shape # use the fancy index - arr[_make_along_axis_idx(arr_shape, indices, axis)] = values + arr[_make_along_axis_idx(arr.shape, indices, axis)] = values def _apply_along_axis_dispatcher(func1d, axis, arr, *args, **kwargs): @@ -307,9 +313,6 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs): kwargs : any Additional named arguments to `func1d`. - .. versionadded:: 1.9.0 - - Returns ------- out : ndarray (Ni..., Nj..., Nk...) @@ -325,6 +328,7 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs): Examples -------- + >>> import numpy as np >>> def my_func(a): ... \"\"\"Average first and last element of a 1-D array\"\"\" ... return (a[0] + a[-1]) * 0.5 @@ -367,7 +371,7 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs): # arr, with the iteration axis at the end in_dims = list(range(nd)) - inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis+1:] + [axis]) + inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis + 1:] + [axis]) # compute indices for the iteration axes, and append a trailing ellipsis to # prevent 0d arrays decaying to scalars, which fixes gh-8642 @@ -397,8 +401,8 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs): buff_dims = list(range(buff.ndim)) buff_permute = ( buff_dims[0 : axis] + - buff_dims[buff.ndim-res.ndim : buff.ndim] + - buff_dims[axis : buff.ndim-res.ndim] + buff_dims[buff.ndim - res.ndim : buff.ndim] + + buff_dims[axis : buff.ndim - res.ndim] ) # save the first result, then compute and save all remaining results @@ -455,6 +459,7 @@ def apply_over_axes(func, a, axes): Examples -------- + >>> import numpy as np >>> a = np.arange(24).reshape(2,3,4) >>> a array([[[ 0, 1, 2, 3], @@ -525,11 +530,6 @@ def expand_dims(a, axis): ``axis == a.ndim``, and passing ``axis < -a.ndim - 1`` will be treated as ``axis == 0``. This behavior is deprecated. - .. versionchanged:: 1.18.0 - A tuple of axes is now supported. Out of range axes as - described above are now forbidden and raise an - `~exceptions.AxisError`. - Returns ------- result : ndarray @@ -543,6 +543,7 @@ def expand_dims(a, axis): Examples -------- + >>> import numpy as np >>> x = np.array([1, 2]) >>> x.shape (2,) @@ -587,7 +588,7 @@ def expand_dims(a, axis): else: a = asanyarray(a) - if type(axis) not in (tuple, list): + if not isinstance(axis, (tuple, list)): axis = (axis,) out_ndim = len(axis) + a.ndim @@ -599,22 +600,6 @@ def expand_dims(a, axis): return a.reshape(shape) -# NOTE: Remove once deprecation period passes -@set_module("numpy") -def row_stack(tup, *, dtype=None, casting="same_kind"): - # Deprecated in NumPy 2.0, 2023-08-18 - warnings.warn( - "`row_stack` alias is deprecated. " - "Use `np.vstack` directly.", - DeprecationWarning, - stacklevel=2 - ) - return vstack(tup, dtype=dtype, casting=casting) - - -row_stack.__doc__ = vstack.__doc__ - - def _column_stack_dispatcher(tup): return _arrays_for_stack_dispatcher(tup) @@ -645,12 +630,13 @@ def column_stack(tup): Examples -------- + >>> import numpy as np >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) + >>> b = np.array((4,5,6)) >>> np.column_stack((a,b)) - array([[1, 2], - [2, 3], - [3, 4]]) + array([[1, 4], + [2, 5], + [3, 6]]) """ arrays = [] @@ -704,19 +690,20 @@ def dstack(tup): Examples -------- + >>> import numpy as np >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) + >>> b = np.array((4,5,6)) >>> np.dstack((a,b)) - array([[[1, 2], - [2, 3], - [3, 4]]]) + array([[[1, 4], + [2, 5], + [3, 6]]]) >>> a = np.array([[1],[2],[3]]) - >>> b = np.array([[2],[3],[4]]) + >>> b = np.array([[4],[5],[6]]) >>> np.dstack((a,b)) - array([[[1, 2]], - [[2, 3]], - [[3, 4]]]) + array([[[1, 4]], + [[2, 5]], + [[3, 6]]]) """ arrs = atleast_3d(*tup) @@ -725,15 +712,6 @@ def dstack(tup): return _nx.concatenate(arrs, 2) -def _replace_zero_by_x_arrays(sub_arys): - for i in range(len(sub_arys)): - if _nx.ndim(sub_arys[i]) == 0: - sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) - elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)): - sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) - return sub_arys - - def _array_split_dispatcher(ary, indices_or_sections, axis=None): return (ary, indices_or_sections) @@ -756,6 +734,7 @@ def array_split(ary, indices_or_sections, axis=0): Examples -------- + >>> import numpy as np >>> x = np.arange(8.0) >>> np.array_split(x, 3) [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])] @@ -780,8 +759,8 @@ def array_split(ary, indices_or_sections, axis=0): raise ValueError('number sections must be larger than 0.') from None Neach_section, extras = divmod(Ntotal, Nsections) section_sizes = ([0] + - extras * [Neach_section+1] + - (Nsections-extras) * [Neach_section]) + extras * [Neach_section + 1] + + (Nsections - extras) * [Neach_section]) div_points = _nx.array(section_sizes, dtype=_nx.intp).cumsum() sub_arys = [] @@ -852,6 +831,7 @@ def split(ary, indices_or_sections, axis=0): Examples -------- + >>> import numpy as np >>> x = np.arange(9.0) >>> np.split(x, 3) [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])] @@ -895,6 +875,7 @@ def hsplit(ary, indices_or_sections): Examples -------- + >>> import numpy as np >>> x = np.arange(16.0).reshape(4, 4) >>> x array([[ 0., 1., 2., 3.], @@ -965,6 +946,7 @@ def vsplit(ary, indices_or_sections): Examples -------- + >>> import numpy as np >>> x = np.arange(16.0).reshape(4, 4) >>> x array([[ 0., 1., 2., 3.], @@ -1018,6 +1000,7 @@ def dsplit(ary, indices_or_sections): Examples -------- + >>> import numpy as np >>> x = np.arange(16.0).reshape(2, 2, 4) >>> x array([[[ 0., 1., 2., 3.], @@ -1048,30 +1031,6 @@ def dsplit(ary, indices_or_sections): return split(ary, indices_or_sections, 2) -def get_array_wrap(*args): - """Find the wrapper for the array with the highest priority. - - In case of ties, leftmost wins. If no wrapper is found, return None. - - .. deprecated:: 2.0 - """ - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`get_array_wrap` is deprecated. " - "(deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - wrappers = sorted((getattr(x, '__array_priority__', 0), -i, - x.__array_wrap__) for i, x in enumerate(args) - if hasattr(x, '__array_wrap__')) - if wrappers: - return wrappers[-1][-1] - return None - - def _kron_dispatcher(a, b): return (a, b) @@ -1100,7 +1059,7 @@ def kron(a, b): ----- The function assumes that the number of dimensions of `a` and `b` are the same, if necessary prepending the smallest with ones. - If ``a.shape = (r0,r1,..,rN)`` and ``b.shape = (s0,s1,...,sN)``, + If ``a.shape = (r0,r1,...,rN)`` and ``b.shape = (s0,s1,...,sN)``, the Kronecker product has shape ``(r0*s0, r1*s1, ..., rN*SN)``. The elements are products of elements from `a` and `b`, organized explicitly by:: @@ -1120,6 +1079,7 @@ def kron(a, b): Examples -------- + >>> import numpy as np >>> np.kron([1,10,100], [5,6,7]) array([ 5, 6, 7, ..., 500, 600, 700]) >>> np.kron([5,6,7], [1,10,100]) @@ -1171,16 +1131,16 @@ def kron(a, b): b = reshape(b, bs) # Equalise the shapes by prepending smaller one with 1s - as_ = (1,)*max(0, ndb-nda) + as_ - bs = (1,)*max(0, nda-ndb) + bs + as_ = (1,) * max(0, ndb - nda) + as_ + bs = (1,) * max(0, nda - ndb) + bs # Insert empty dimensions - a_arr = expand_dims(a, axis=tuple(range(ndb-nda))) - b_arr = expand_dims(b, axis=tuple(range(nda-ndb))) + a_arr = expand_dims(a, axis=tuple(range(ndb - nda))) + b_arr = expand_dims(b, axis=tuple(range(nda - ndb))) # Compute the product - a_arr = expand_dims(a_arr, axis=tuple(range(1, nd*2, 2))) - b_arr = expand_dims(b_arr, axis=tuple(range(0, nd*2, 2))) + a_arr = expand_dims(a_arr, axis=tuple(range(1, nd * 2, 2))) + b_arr = expand_dims(b_arr, axis=tuple(range(0, nd * 2, 2))) # In case of `mat`, convert result to `array` result = _nx.multiply(a_arr, b_arr, subok=(not is_any_mat)) @@ -1234,6 +1194,7 @@ def tile(A, reps): Examples -------- + >>> import numpy as np >>> a = np.array([0, 1, 2]) >>> np.tile(a, 2) array([0, 1, 2, 0, 1, 2]) @@ -1275,8 +1236,8 @@ def tile(A, reps): # have no data there is no risk of an inadvertent overwrite. c = _nx.array(A, copy=None, subok=True, ndmin=d) if (d < c.ndim): - tup = (1,)*(c.ndim-d) + tup - shape_out = tuple(s*t for s, t in zip(c.shape, tup)) + tup = (1,) * (c.ndim - d) + tup + shape_out = tuple(s * t for s, t in zip(c.shape, tup)) n = c.size if n > 0: for dim_in, nrep in zip(c.shape, tup): diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index cdfe9d9d5637..25cf94be3927 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -1,202 +1,207 @@ -import sys +from _typeshed import Incomplete from collections.abc import Callable, Sequence -from typing import TypeVar, Any, overload, SupportsIndex, Protocol - -if sys.version_info >= (3, 10): - from typing import ParamSpec, Concatenate -else: - from typing_extensions import ParamSpec, Concatenate - -import numpy as np -from numpy import ( - generic, - integer, - ufunc, - unsignedinteger, - signedinteger, - floating, - complexfloating, - object_, +from typing import ( + Any, + Concatenate, + Protocol, + Self, + SupportsIndex, + overload, + type_check_only, ) +import numpy as np from numpy._typing import ( ArrayLike, NDArray, - _ShapeLike, _ArrayLike, _ArrayLikeBool_co, - _ArrayLikeUInt_co, - _ArrayLikeInt_co, - _ArrayLikeFloat_co, _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, _ArrayLikeObject_co, + _ArrayLikeUInt_co, + _ShapeLike, ) -from numpy._core.shape_base import vstack - -_P = ParamSpec("_P") -_SCT = TypeVar("_SCT", bound=generic) +__all__ = [ + "column_stack", + "dstack", + "array_split", + "split", + "hsplit", + "vsplit", + "dsplit", + "apply_over_axes", + "expand_dims", + "apply_along_axis", + "kron", + "tile", + "take_along_axis", + "put_along_axis", +] # Signature of `__array_wrap__` +@type_check_only class _ArrayWrap(Protocol): def __call__( self, array: NDArray[Any], - context: None | tuple[ufunc, tuple[Any, ...], int] = ..., + context: tuple[np.ufunc, tuple[Any, ...], int] | None = ..., return_scalar: bool = ..., /, ) -> Any: ... +@type_check_only class _SupportsArrayWrap(Protocol): @property def __array_wrap__(self) -> _ArrayWrap: ... - -__all__: list[str] - -def take_along_axis( - arr: _SCT | NDArray[_SCT], - indices: NDArray[integer[Any]], - axis: None | int, -) -> NDArray[_SCT]: ... - -def put_along_axis( - arr: NDArray[_SCT], - indices: NDArray[integer[Any]], +# Protocol for array-like objects that preserve their type through split operations. +# Requires shape for size, ndim for dimensional checks in hsplit/vsplit/dsplit, +# swapaxes for axis manipulation, and __getitem__ for slicing. +@type_check_only +class _SupportsSplitOps(Protocol): + @property + def shape(self) -> tuple[int, ...]: ... + @property + def ndim(self) -> int: ... + def swapaxes(self, axis1: int, axis2: int, /) -> Self: ... + def __getitem__(self, key: Any, /) -> Self: ... + +### + +def take_along_axis[ScalarT: np.generic]( + arr: ScalarT | NDArray[ScalarT], + indices: NDArray[np.integer], + axis: int | None = -1, +) -> NDArray[ScalarT]: ... + +# +def put_along_axis[ScalarT: np.generic]( + arr: NDArray[ScalarT], + indices: NDArray[np.integer], values: ArrayLike, - axis: None | int, + axis: int | None, ) -> None: ... +# @overload -def apply_along_axis( - func1d: Callable[Concatenate[NDArray[Any], _P], _ArrayLike[_SCT]], +def apply_along_axis[**Tss, ScalarT: np.generic]( + func1d: Callable[Concatenate[np.ndarray, Tss], _ArrayLike[ScalarT]], axis: SupportsIndex, arr: ArrayLike, - *args: _P.args, - **kwargs: _P.kwargs, -) -> NDArray[_SCT]: ... + *args: Tss.args, + **kwargs: Tss.kwargs, +) -> NDArray[ScalarT]: ... @overload -def apply_along_axis( - func1d: Callable[Concatenate[NDArray[Any], _P], ArrayLike], +def apply_along_axis[**Tss]( + func1d: Callable[Concatenate[np.ndarray, Tss], Any], axis: SupportsIndex, arr: ArrayLike, - *args: _P.args, - **kwargs: _P.kwargs, + *args: Tss.args, + **kwargs: Tss.kwargs, ) -> NDArray[Any]: ... -def apply_over_axes( - func: Callable[[NDArray[Any], int], NDArray[_SCT]], +# +def apply_over_axes[ScalarT: np.generic]( + func: Callable[[np.ndarray, int], NDArray[ScalarT]], a: ArrayLike, - axes: int | Sequence[int], -) -> NDArray[_SCT]: ... + axes: _ShapeLike, +) -> NDArray[ScalarT]: ... +# @overload -def expand_dims( - a: _ArrayLike[_SCT], - axis: _ShapeLike, -) -> NDArray[_SCT]: ... +def expand_dims[ScalarT: np.generic](a: _ArrayLike[ScalarT], axis: _ShapeLike) -> NDArray[ScalarT]: ... @overload -def expand_dims( - a: ArrayLike, - axis: _ShapeLike, -) -> NDArray[Any]: ... +def expand_dims(a: ArrayLike, axis: _ShapeLike) -> NDArray[Incomplete]: ... +# keep in sync with `numpy.ma.extras.column_stack` @overload -def column_stack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ... +def column_stack[ScalarT: np.generic](tup: Sequence[_ArrayLike[ScalarT]]) -> NDArray[ScalarT]: ... @overload -def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... +def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Incomplete]: ... +# keep in sync with `numpy.ma.extras.dstack` @overload -def dstack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ... +def dstack[ScalarT: np.generic](tup: Sequence[_ArrayLike[ScalarT]]) -> NDArray[ScalarT]: ... @overload -def dstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... +def dstack(tup: Sequence[ArrayLike]) -> NDArray[Incomplete]: ... +# @overload -def array_split( - ary: _ArrayLike[_SCT], - indices_or_sections: _ShapeLike, - axis: SupportsIndex = ..., -) -> list[NDArray[_SCT]]: ... -@overload -def array_split( - ary: ArrayLike, +def array_split[SplitableT: _SupportsSplitOps]( + ary: SplitableT, indices_or_sections: _ShapeLike, - axis: SupportsIndex = ..., -) -> list[NDArray[Any]]: ... - + axis: SupportsIndex = 0, +) -> list[SplitableT]: ... @overload -def split( - ary: _ArrayLike[_SCT], +def array_split[ScalarT: np.generic]( + ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike, - axis: SupportsIndex = ..., -) -> list[NDArray[_SCT]]: ... + axis: SupportsIndex = 0, +) -> list[NDArray[ScalarT]]: ... @overload -def split( - ary: ArrayLike, - indices_or_sections: _ShapeLike, - axis: SupportsIndex = ..., -) -> list[NDArray[Any]]: ... +def array_split(ary: ArrayLike, indices_or_sections: _ShapeLike, axis: SupportsIndex = 0) -> list[NDArray[Incomplete]]: ... +# @overload -def hsplit( - ary: _ArrayLike[_SCT], +def split[SplitableT: _SupportsSplitOps]( + ary: SplitableT, indices_or_sections: _ShapeLike, -) -> list[NDArray[_SCT]]: ... + axis: SupportsIndex = 0, +) -> list[SplitableT]: ... @overload -def hsplit( - ary: ArrayLike, +def split[ScalarT: np.generic]( + ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike, -) -> list[NDArray[Any]]: ... + axis: SupportsIndex = 0, +) -> list[NDArray[ScalarT]]: ... +@overload +def split(ary: ArrayLike, indices_or_sections: _ShapeLike, axis: SupportsIndex = 0) -> list[NDArray[Incomplete]]: ... +# keep in sync with `numpy.ma.extras.hsplit` @overload -def vsplit( - ary: _ArrayLike[_SCT], - indices_or_sections: _ShapeLike, -) -> list[NDArray[_SCT]]: ... +def hsplit[SplitableT: _SupportsSplitOps](ary: SplitableT, indices_or_sections: _ShapeLike) -> list[SplitableT]: ... @overload -def vsplit( - ary: ArrayLike, - indices_or_sections: _ShapeLike, -) -> list[NDArray[Any]]: ... +def hsplit[ScalarT: np.generic](ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike) -> list[NDArray[ScalarT]]: ... +@overload +def hsplit(ary: ArrayLike, indices_or_sections: _ShapeLike) -> list[NDArray[Incomplete]]: ... +# @overload -def dsplit( - ary: _ArrayLike[_SCT], - indices_or_sections: _ShapeLike, -) -> list[NDArray[_SCT]]: ... +def vsplit[SplitableT: _SupportsSplitOps](ary: SplitableT, indices_or_sections: _ShapeLike) -> list[SplitableT]: ... @overload -def dsplit( - ary: ArrayLike, - indices_or_sections: _ShapeLike, -) -> list[NDArray[Any]]: ... +def vsplit[ScalarT: np.generic](ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike) -> list[NDArray[ScalarT]]: ... +@overload +def vsplit(ary: ArrayLike, indices_or_sections: _ShapeLike) -> list[NDArray[Incomplete]]: ... +# @overload -def get_array_wrap(*args: _SupportsArrayWrap) -> _ArrayWrap: ... +def dsplit[SplitableT: _SupportsSplitOps](ary: SplitableT, indices_or_sections: _ShapeLike) -> list[SplitableT]: ... @overload -def get_array_wrap(*args: object) -> None | _ArrayWrap: ... +def dsplit[ScalarT: np.generic](ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike) -> list[NDArray[ScalarT]]: ... +@overload +def dsplit(ary: ArrayLike, indices_or_sections: _ShapeLike) -> list[NDArray[Incomplete]]: ... +# @overload -def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] +def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[np.bool]: ... @overload -def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] +def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[np.unsignedinteger]: ... @overload -def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] +def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[np.signedinteger]: ... @overload -def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] +def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[np.floating]: ... @overload -def kron(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... +def kron(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[np.complexfloating]: ... @overload -def kron(a: _ArrayLikeObject_co, b: Any) -> NDArray[object_]: ... +def kron(a: _ArrayLikeObject_co, b: object) -> NDArray[np.object_]: ... @overload -def kron(a: Any, b: _ArrayLikeObject_co) -> NDArray[object_]: ... +def kron(a: object, b: _ArrayLikeObject_co) -> NDArray[np.object_]: ... +# @overload -def tile( - A: _ArrayLike[_SCT], - reps: int | Sequence[int], -) -> NDArray[_SCT]: ... +def tile[ScalarT: np.generic](A: _ArrayLike[ScalarT], reps: int | Sequence[int]) -> NDArray[ScalarT]: ... @overload -def tile( - A: ArrayLike, - reps: int | Sequence[int], -) -> NDArray[Any]: ... +def tile(A: ArrayLike, reps: int | Sequence[int]) -> NDArray[Incomplete]: ... diff --git a/numpy/lib/_stride_tricks_impl.py b/numpy/lib/_stride_tricks_impl.py index 0d437ea1e416..9e8324cec259 100644 --- a/numpy/lib/_stride_tricks_impl.py +++ b/numpy/lib/_stride_tricks_impl.py @@ -3,18 +3,13 @@ An explanation of strides can be found in the :ref:`arrays.ndarray`. -Functions ---------- - -.. autosummary:: - :toctree: generated/ - """ import numpy as np from numpy._core.numeric import normalize_axis_tuple from numpy._core.overrides import array_function_dispatch, set_module +from numpy.lib._array_utils_impl import byte_bounds -__all__ = ['broadcast_to', 'broadcast_arrays', 'broadcast_shapes'] +__all__ = ["broadcast_to", "broadcast_arrays", "broadcast_shapes"] class DummyArray: @@ -41,7 +36,9 @@ def _maybe_view_as_subclass(original_array, new_array): @set_module("numpy.lib.stride_tricks") -def as_strided(x, shape=None, strides=None, subok=False, writeable=True): +def as_strided( + x, shape=None, strides=None, subok=False, writeable=True, *, check_bounds=None +): """ Create a view into the array with the given shape and strides. @@ -56,20 +53,25 @@ def as_strided(x, shape=None, strides=None, subok=False, writeable=True): strides : sequence of int, optional The strides of the new array. Defaults to ``x.strides``. subok : bool, optional - .. versionadded:: 1.10 - If True, subclasses are preserved. writeable : bool, optional - .. versionadded:: 1.12 - If set to False, the returned array will always be readonly. Otherwise it will be writable if the original array was. It is advisable to set this to False if possible (see Notes). + check_bounds : bool or None + Check new stride and shape for potential out of bound memory + access. Returns ------- view : ndarray + Raises + ------ + ValueError + If `check_bounds` is True the given shape and strides could result in + out-of-bounds memory access. + See also -------- broadcast_to : broadcast an array to a given shape. @@ -79,7 +81,7 @@ def as_strided(x, shape=None, strides=None, subok=False, writeable=True): Notes ----- - ``as_strided`` creates a view into the array given the exact strides + `as_strided` creates a view into the array given the exact strides and shape. This means it manipulates the internal data structure of ndarray and, if done incorrectly, the array elements can point to invalid memory and can corrupt results or crash your program. @@ -97,27 +99,73 @@ def as_strided(x, shape=None, strides=None, subok=False, writeable=True): care, you may want to use ``writeable=False`` to avoid accidental write operations. - For these reasons it is advisable to avoid ``as_strided`` when + For these reasons it is advisable to avoid `as_strided` when possible. + + Examples + -------- + + >>> import numpy as np + ... from numpy.lib.stride_tricks import as_strided + ... x = np.arange(10) + ... y = as_strided(x, shape=(5,), strides=(8,), check_bounds=True) + ... y + array([0, 1, 2, 3, 4]) + + Attempting to create an out-of-bounds view and use ``check_bounds=True`` + as_strided will raises an error: + + >>> as_strided(x, shape=(20,), strides=(8,), check_bounds=True) + Traceback (most recent call last): + ... + ValueError: Given shape and strides would access memory out of bounds... + + When working with views, bounds are checked against the base array: + + >>> a = np.arange(1000) + ... b = a[:2] + ... c = as_strided(b, shape=(2,), strides=(400,), check_bounds=True) + ... c[0], c[1] + (0, 50) """ + # first convert input to array, possibly keeping subclass - x = np.array(x, copy=None, subok=subok) - interface = dict(x.__array_interface__) + base = np.array(x, copy=None, subok=subok) + interface = dict(base.__array_interface__) if shape is not None: interface['shape'] = tuple(shape) if strides is not None: interface['strides'] = tuple(strides) - array = np.asarray(DummyArray(interface, base=x)) + array = np.asarray(DummyArray(interface, base=base)) # The route via `__interface__` does not preserve structured # dtypes. Since dtype should remain unchanged, we set it explicitly. - array.dtype = x.dtype + array.dtype = base.dtype - view = _maybe_view_as_subclass(x, array) + view = _maybe_view_as_subclass(base, array) if view.flags.writeable and not writeable: view.flags.writeable = False + if check_bounds: + while isinstance(base.base, np.ndarray): + base = base.base + + base_low, base_high = byte_bounds(base) + view_low, view_high = byte_bounds(view) + + if view_low < base_low: + raise ValueError( + f"Given shape and strides would access memory out of bounds. " + f"View starts {base_low - view_low} bytes before lowest address" + ) + + if view_high > base_high: + raise ValueError( + f"Given shape and strides would access memory out of bounds. " + f"View ends {view_high - base_high} bytes after highest address" + ) + return view @@ -178,11 +226,20 @@ def sliding_window_view(x, window_shape, axis=None, *, See Also -------- lib.stride_tricks.as_strided: A lower-level and less safe routine for - creating arbitrary views from custom shape and strides. + creating arbitrary views from custom shape and strides. Use the + ``check_bounds`` parameter for bounds validation. broadcast_to: broadcast an array to a given shape. Notes ----- + .. warning:: + + This function creates views with overlapping memory. When + ``writeable=True``, writing to the view will modify the original array + and may affect multiple view positions. See the examples below and + :doc:`this guide ` + about the difference between copies and views. + For many applications using a sliding window view can be convenient, but potentially very slow. Often specialized solutions exist, for example: @@ -204,6 +261,7 @@ def sliding_window_view(x, window_shape, axis=None, *, Examples -------- + >>> import numpy as np >>> from numpy.lib.stride_tricks import sliding_window_view >>> x = np.arange(6) >>> x.shape @@ -306,6 +364,31 @@ def sliding_window_view(x, window_shape, axis=None, *, >>> moving_average array([1., 2., 3., 4.]) + The two examples below demonstrate the effect of ``writeable=True``. + + Creating a view with the default ``writeable=False`` and then writing to + it raises an error. + + >>> v = sliding_window_view(x, 3) + >>> v[0,1] = 10 + Traceback (most recent call last): + ... + ValueError: assignment destination is read-only + + Creating a view with ``writeable=True`` and then writing to it changes + the original array and multiple view positions. + + >>> x = np.arange(6) # reset x for the second example + >>> v = sliding_window_view(x, 3, writeable=True) + >>> v[0,1] = 10 + >>> x + array([ 0, 10, 2, 3, 4, 5]) + >>> v + array([[ 0, 10, 2], + [10, 2, 3], + [ 2, 3, 4], + [ 3, 4, 5]]) + Note that a sliding window approach is often **not** optimal (see Notes). """ window_shape = (tuple(window_shape) @@ -407,12 +490,9 @@ def broadcast_to(array, shape, subok=False): broadcast_arrays broadcast_shapes - Notes - ----- - .. versionadded:: 1.10.0 - Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> np.broadcast_to(x, (3, 3)) array([[1, 2, 3], @@ -428,14 +508,14 @@ def _broadcast_shape(*args): """ # use the old-iterator because np.nditer does not handle size 0 arrays # consistently - b = np.broadcast(*args[:32]) - # unfortunately, it cannot handle 32 or more arguments directly - for pos in range(32, len(args), 31): + b = np.broadcast(*args[:64]) + # unfortunately, it cannot handle 64 or more arguments directly + for pos in range(64, len(args), 63): # ironically, np.broadcast does not properly handle np.broadcast # objects (it treats them as scalars) # use broadcasting to avoid allocating the full array b = broadcast_to(0, b.shape) - b = np.broadcast(b, *args[pos:(pos + 31)]) + b = np.broadcast(b, *args[pos:(pos + 63)]) return b.shape @@ -475,6 +555,7 @@ def broadcast_shapes(*args): Examples -------- + >>> import numpy as np >>> np.broadcast_shapes((1, 2), (3, 1), (3, 2)) (3, 2) @@ -526,6 +607,7 @@ def broadcast_arrays(*args, subok=False): Examples -------- + >>> import numpy as np >>> x = np.array([[1,2,3]]) >>> y = np.array([[4],[5]]) >>> np.broadcast_arrays(x, y) @@ -544,7 +626,7 @@ def broadcast_arrays(*args, subok=False): [5, 5, 5]])] """ - # nditer is not used here to avoid the limit of 32 arrays. + # nditer is not used here to avoid the limit of 64 arrays. # Otherwise, something like the following one-liner would suffice: # return np.nditer(args, flags=['multi_index', 'zerosize_ok'], # order='C').itviews diff --git a/numpy/lib/_stride_tricks_impl.pyi b/numpy/lib/_stride_tricks_impl.pyi index cf635f1fb640..faba9ab80cd4 100644 --- a/numpy/lib/_stride_tricks_impl.pyi +++ b/numpy/lib/_stride_tricks_impl.pyi @@ -1,80 +1,75 @@ from collections.abc import Iterable -from typing import Any, TypeVar, overload, SupportsIndex +from typing import Any, SupportsIndex, overload -from numpy import generic -from numpy._typing import ( - NDArray, - ArrayLike, - _ShapeLike, - _Shape, - _ArrayLike -) +import numpy as np +from numpy._typing import ArrayLike, NDArray, _AnyShape, _ArrayLike, _ShapeLike -_SCT = TypeVar("_SCT", bound=generic) - -__all__: list[str] +__all__ = ["broadcast_to", "broadcast_arrays", "broadcast_shapes"] class DummyArray: __array_interface__: dict[str, Any] - base: None | NDArray[Any] + base: NDArray[Any] | None def __init__( self, interface: dict[str, Any], - base: None | NDArray[Any] = ..., + base: NDArray[Any] | None = None, ) -> None: ... @overload -def as_strided( - x: _ArrayLike[_SCT], - shape: None | Iterable[int] = ..., - strides: None | Iterable[int] = ..., - subok: bool = ..., - writeable: bool = ..., -) -> NDArray[_SCT]: ... +def as_strided[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + shape: Iterable[int] | None = None, + strides: Iterable[int] | None = None, + subok: bool = False, + writeable: bool = True, + *, + check_bounds: bool | None = None +) -> NDArray[ScalarT]: ... @overload def as_strided( x: ArrayLike, - shape: None | Iterable[int] = ..., - strides: None | Iterable[int] = ..., - subok: bool = ..., - writeable: bool = ..., + shape: Iterable[int] | None = None, + strides: Iterable[int] | None = None, + subok: bool = False, + writeable: bool = True, + *, + check_bounds: bool | None = None ) -> NDArray[Any]: ... @overload -def sliding_window_view( - x: _ArrayLike[_SCT], +def sliding_window_view[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], window_shape: int | Iterable[int], - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = None, *, - subok: bool = ..., - writeable: bool = ..., -) -> NDArray[_SCT]: ... + subok: bool = False, + writeable: bool = False, +) -> NDArray[ScalarT]: ... @overload def sliding_window_view( x: ArrayLike, window_shape: int | Iterable[int], - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = None, *, - subok: bool = ..., - writeable: bool = ..., + subok: bool = False, + writeable: bool = False, ) -> NDArray[Any]: ... @overload -def broadcast_to( - array: _ArrayLike[_SCT], +def broadcast_to[ScalarT: np.generic]( + array: _ArrayLike[ScalarT], shape: int | Iterable[int], - subok: bool = ..., -) -> NDArray[_SCT]: ... + subok: bool = False, +) -> NDArray[ScalarT]: ... @overload def broadcast_to( array: ArrayLike, shape: int | Iterable[int], - subok: bool = ..., + subok: bool = False, ) -> NDArray[Any]: ... -def broadcast_shapes(*args: _ShapeLike) -> _Shape: ... +def broadcast_shapes(*args: _ShapeLike) -> _AnyShape: ... +def broadcast_arrays(*args: ArrayLike, subok: bool = False) -> tuple[NDArray[Any], ...]: ... -def broadcast_arrays( - *args: ArrayLike, - subok: bool = ..., -) -> tuple[NDArray[Any], ...]: ... +# used internally by `lib._function_base_impl._parse_input_dimensions` +def _broadcast_shape(*args: ArrayLike) -> _AnyShape: ... diff --git a/numpy/lib/_twodim_base_impl.py b/numpy/lib/_twodim_base_impl.py index 8eb6eccfcfbd..f92bfe9ce104 100644 --- a/numpy/lib/_twodim_base_impl.py +++ b/numpy/lib/_twodim_base_impl.py @@ -4,18 +4,31 @@ import functools import operator +from numpy._core import iinfo, overrides from numpy._core._multiarray_umath import _array_converter from numpy._core.numeric import ( - asanyarray, arange, zeros, greater_equal, multiply, ones, - asarray, where, int8, int16, int32, int64, intp, empty, promote_types, - diagonal, nonzero, indices - ) -from numpy._core.overrides import set_array_function_like_doc, set_module -from numpy._core import overrides -from numpy._core import iinfo + arange, + asanyarray, + asarray, + diagonal, + empty, + greater_equal, + indices, + int8, + int16, + int32, + int64, + intp, + multiply, + nonzero, + ones, + promote_types, + where, + zeros, +) +from numpy._core.overrides import finalize_array_function_like, set_module from numpy.lib._stride_tricks_impl import broadcast_to - __all__ = [ 'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu', 'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices', @@ -79,6 +92,7 @@ def fliplr(m): Examples -------- + >>> import numpy as np >>> A = np.diag([1.,2.,3.]) >>> A array([[1., 0., 0.], @@ -133,6 +147,7 @@ def flipud(m): Examples -------- + >>> import numpy as np >>> A = np.diag([1.0, 2, 3]) >>> A array([[1., 0., 0.], @@ -158,7 +173,7 @@ def flipud(m): return m[::-1, ...] -@set_array_function_like_doc +@finalize_array_function_like @set_module('numpy') def eye(N, M=None, k=0, dtype=float, order='C', *, device=None, like=None): """ @@ -179,8 +194,6 @@ def eye(N, M=None, k=0, dtype=float, order='C', *, device=None, like=None): order : {'C', 'F'}, optional Whether the output should be stored in row-major (C-style) or column-major (Fortran-style) order in memory. - - .. versionadded:: 1.14.0 device : str, optional The device on which to place the created array. Default: None. For Array-API interoperability only, so must be ``"cpu"`` if passed. @@ -203,7 +216,8 @@ def eye(N, M=None, k=0, dtype=float, order='C', *, device=None, like=None): Examples -------- - >>> np.eye(2, dtype=int) + >>> import numpy as np + >>> np.eye(2, dtype=np.int_) array([[1, 0], [0, 1]]) >>> np.eye(3, k=1) @@ -230,7 +244,7 @@ def eye(N, M=None, k=0, dtype=float, order='C', *, device=None, like=None): i = k else: i = (-k) * M - m[:M-k].flat[i::M+1] = 1 + m[:M - k].flat[i::M + 1] = 1 return m @@ -277,6 +291,7 @@ def diag(v, k=0): Examples -------- + >>> import numpy as np >>> x = np.arange(9).reshape((3,3)) >>> x array([[0, 1, 2], @@ -299,13 +314,13 @@ def diag(v, k=0): v = asanyarray(v) s = v.shape if len(s) == 1: - n = s[0]+abs(k) + n = s[0] + abs(k) res = zeros((n, n), v.dtype) if k >= 0: i = k else: i = (-k) * n - res[:n-k].flat[i::n+1] = v + res[:n - k].flat[i::n + 1] = v return res elif len(s) == 2: return diagonal(v, k) @@ -341,6 +356,7 @@ def diagflat(v, k=0): Examples -------- + >>> import numpy as np >>> np.diagflat([[1,2], [3,4]]) array([[1, 0, 0, 0], [0, 2, 0, 0], @@ -360,17 +376,17 @@ def diagflat(v, k=0): n = s + abs(k) res = zeros((n, n), v.dtype) if (k >= 0): - i = arange(0, n-k, dtype=intp) - fi = i+k+i*n + i = arange(0, n - k, dtype=intp) + fi = i + k + i * n else: - i = arange(0, n+k, dtype=intp) - fi = i+(i-k)*n + i = arange(0, n + k, dtype=intp) + fi = i + (i - k) * n res.flat[fi] = v return conv.wrap(res) -@set_array_function_like_doc +@finalize_array_function_like @set_module('numpy') def tri(N, M=None, k=0, dtype=float, *, like=None): """ @@ -401,7 +417,8 @@ def tri(N, M=None, k=0, dtype=float, *, like=None): Examples -------- - >>> np.tri(3, 5, 2, dtype=int) + >>> import numpy as np + >>> np.tri(3, 5, 2, dtype=np.int_) array([[1, 1, 1, 0, 0], [1, 1, 1, 1, 0], [1, 1, 1, 1, 1]]) @@ -419,7 +436,7 @@ def tri(N, M=None, k=0, dtype=float, *, like=None): M = N m = greater_equal.outer(arange(N, dtype=_min_int(0, N)), - arange(-k, M-k, dtype=_min_int(-k, M - k))) + arange(-k, M - k, dtype=_min_int(-k, M - k))) # Avoid making a copy if the requested type is already bool m = m.astype(dtype, copy=False) @@ -462,6 +479,7 @@ def tril(m, k=0): Examples -------- + >>> import numpy as np >>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) array([[ 0, 0, 0], [ 4, 0, 0], @@ -506,6 +524,7 @@ def triu(m, k=0): Examples -------- + >>> import numpy as np >>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) array([[ 1, 2, 3], [ 4, 5, 6], @@ -528,7 +547,7 @@ def triu(m, k=0): """ m = asanyarray(m) - mask = tri(*m.shape[-2:], k=k-1, dtype=bool) + mask = tri(*m.shape[-2:], k=k - 1, dtype=bool) return where(mask, zeros(1, m.dtype), m) @@ -561,8 +580,6 @@ def vander(x, N=None, increasing=False): Order of the powers of the columns. If True, the powers increase from left to right, if False (the default) they are reversed. - .. versionadded:: 1.9.0 - Returns ------- out : ndarray @@ -576,6 +593,7 @@ def vander(x, N=None, increasing=False): Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3, 5]) >>> N = 3 >>> np.vander(x, N) @@ -718,6 +736,7 @@ def histogram2d(x, y, bins=10, range=None, density=None, weights=None): Examples -------- + >>> import numpy as np >>> from matplotlib.image import NonUniformImage >>> import matplotlib.pyplot as plt @@ -809,7 +828,7 @@ def histogram2d(x, y, bins=10, range=None, density=None, weights=None): except TypeError: N = 1 - if N != 1 and N != 2: + if N not in {1, 2}: xedges = yedges = asarray(bins) bins = [xedges, yedges] hist, edges = histogramdd([x, y], bins, range, density, weights) @@ -850,12 +869,10 @@ def mask_indices(n, mask_func, k=0): -------- triu, tril, triu_indices, tril_indices - Notes - ----- - .. versionadded:: 1.4.0 - Examples -------- + >>> import numpy as np + These are the indices that would allow you to access the upper triangular part of any 3x3 array: @@ -900,8 +917,6 @@ def tril_indices(n, k=0, m=None): k : int, optional Diagonal offset (see `tril` for details). m : int, optional - .. versionadded:: 1.9.0 - The column dimension of the arrays for which the returned arrays will be valid. By default `m` is taken equal to `n`. @@ -910,8 +925,9 @@ def tril_indices(n, k=0, m=None): Returns ------- inds : tuple of arrays - The indices for the triangle. The returned tuple contains two arrays, - each with the indices along one dimension of the array. + The row and column indices, respectively. The row indices are sorted + in non-decreasing order, and the corresponding column indices are + strictly increasing for each row. See also -------- @@ -919,19 +935,20 @@ def tril_indices(n, k=0, m=None): mask_indices : generic function accepting an arbitrary mask function. tril, triu - Notes - ----- - .. versionadded:: 1.4.0 - Examples -------- + >>> import numpy as np + Compute two different sets of indices to access 4x4 arrays, one for the lower triangular part starting at the main diagonal, and one starting two diagonals further right: >>> il1 = np.tril_indices(4) - >>> il2 = np.tril_indices(4, 2) + >>> il1 + (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3])) + Note that row indices (first array) are non-decreasing, and the corresponding + column indices (second array) are strictly increasing for each row. Here is how they can be used with a sample array: >>> a = np.arange(16).reshape(4, 4) @@ -957,6 +974,7 @@ def tril_indices(n, k=0, m=None): These cover almost the whole array (two diagonals right of the main one): + >>> il2 = np.tril_indices(4, 2) >>> a[il2] = -10 >>> a array([[-10, -10, -10, 3], @@ -992,8 +1010,9 @@ def tril_indices_from(arr, k=0): Examples -------- + >>> import numpy as np - Create a 4 by 4 array. + Create a 4 by 4 array >>> a = np.arange(16).reshape(4, 4) >>> a @@ -1026,11 +1045,6 @@ def tril_indices_from(arr, k=0): See Also -------- tril_indices, tril, triu_indices_from - - Notes - ----- - .. versionadded:: 1.4.0 - """ if arr.ndim != 2: raise ValueError("input array must be 2-d") @@ -1050,8 +1064,6 @@ def triu_indices(n, k=0, m=None): k : int, optional Diagonal offset (see `triu` for details). m : int, optional - .. versionadded:: 1.9.0 - The column dimension of the arrays for which the returned arrays will be valid. By default `m` is taken equal to `n`. @@ -1060,9 +1072,9 @@ def triu_indices(n, k=0, m=None): Returns ------- inds : tuple, shape(2) of ndarrays, shape(`n`) - The indices for the triangle. The returned tuple contains two arrays, - each with the indices along one dimension of the array. Can be used - to slice a ndarray of shape(`n`, `n`). + The row and column indices, respectively. The row indices are sorted + in non-decreasing order, and the corresponding column indices are + strictly increasing for each row. See also -------- @@ -1070,18 +1082,20 @@ def triu_indices(n, k=0, m=None): mask_indices : generic function accepting an arbitrary mask function. triu, tril - Notes - ----- - .. versionadded:: 1.4.0 - Examples -------- + >>> import numpy as np + Compute two different sets of indices to access 4x4 arrays, one for the upper triangular part starting at the main diagonal, and one starting two diagonals further right: >>> iu1 = np.triu_indices(4) - >>> iu2 = np.triu_indices(4, 2) + >>> iu1 + (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3])) + + Note that row indices (first array) are non-decreasing, and the corresponding + column indices (second array) are strictly increasing for each row. Here is how they can be used with a sample array: @@ -1109,6 +1123,7 @@ def triu_indices(n, k=0, m=None): These cover only a small part of the whole array (two diagonals right of the main one): + >>> iu2 = np.triu_indices(4, 2) >>> a[iu2] = -10 >>> a array([[ -1, -1, -10, -10], @@ -1144,8 +1159,9 @@ def triu_indices_from(arr, k=0): Examples -------- + >>> import numpy as np - Create a 4 by 4 array. + Create a 4 by 4 array >>> a = np.arange(16).reshape(4, 4) >>> a @@ -1179,11 +1195,6 @@ def triu_indices_from(arr, k=0): See Also -------- triu_indices, triu, tril_indices_from - - Notes - ----- - .. versionadded:: 1.4.0 - """ if arr.ndim != 2: raise ValueError("input array must be 2-d") diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index 4096976871d7..af8e3d72c4d8 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -1,243 +1,391 @@ -import builtins +from _typeshed import Incomplete from collections.abc import Callable, Sequence -from typing import ( - Any, - overload, - TypeVar, - Literal as L, -) +from typing import Any, Literal as L, Never, Protocol, overload, type_check_only import numpy as np -from numpy import ( - generic, - number, - timedelta64, - datetime64, - int_, - intp, - float64, - signedinteger, - floating, - complexfloating, - object_, - _OrderCF, -) - +from numpy import _OrderCF from numpy._typing import ( - DTypeLike, - _DTypeLike, ArrayLike, - _ArrayLike, + DTypeLike, NDArray, + _ArrayLike, + _DTypeLike, + _NumberLike_co, + _ScalarLike_co, + _SupportsArray, _SupportsArrayFunc, - _ArrayLikeInt_co, - _ArrayLikeFloat_co, - _ArrayLikeComplex_co, - _ArrayLikeObject_co, ) -_T = TypeVar("_T") -_SCT = TypeVar("_SCT", bound=generic) +__all__ = [ + "diag", + "diagflat", + "eye", + "fliplr", + "flipud", + "tri", + "triu", + "tril", + "vander", + "histogram2d", + "mask_indices", + "tril_indices", + "tril_indices_from", + "triu_indices", + "triu_indices_from", +] + +### + +type _Int_co = np.integer | np.bool +type _Float_co = np.floating | _Int_co +type _Number_co = np.number | np.bool + +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +# Workaround for mypy's and pyright's lack of compliance with the typing spec for +# overloads for gradual types. This works because only `Any` and `Never` are assignable +# to `Never`. +type _ArrayNoD[ScalarT: np.generic] = np.ndarray[tuple[Never] | tuple[Never, Never], np.dtype[ScalarT]] + +type _ArrayLike1D[ScalarT: np.generic] = _SupportsArray[np.dtype[ScalarT]] | Sequence[ScalarT] +type _ArrayLike1DInt_co = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] +type _ArrayLike1DFloat_co = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] +type _ArrayLike2DFloat_co = _SupportsArray[np.dtype[_Float_co]] | Sequence[_ArrayLike1DFloat_co] +type _ArrayLike1DNumber_co = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] # The returned arrays dtype must be compatible with `np.equal` -_MaskFunc = Callable[ - [NDArray[int_], _T], - NDArray[number[Any] | np.bool | timedelta64 | datetime64 | object_], -] +type _MaskFunc[_T] = Callable[[NDArray[np.int_], _T], NDArray[_Number_co | np.timedelta64 | np.datetime64 | np.object_]] -__all__: list[str] +type _Indices2D = tuple[_Array1D[np.intp], _Array1D[np.intp]] +type _Histogram2D[ScalarT: np.generic] = tuple[_Array2D[np.float64], _Array1D[ScalarT], _Array1D[ScalarT]] +@type_check_only +class _HasShapeAndNDim(Protocol): + @property # TODO: require 2d shape once shape-typing has matured + def shape(self) -> tuple[int, ...]: ... + @property + def ndim(self) -> int: ... + +### + +# keep in sync with `flipud` @overload -def fliplr(m: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... +def fliplr[ArrayT: np.ndarray](m: ArrayT) -> ArrayT: ... +@overload +def fliplr[ScalarT: np.generic](m: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... @overload def fliplr(m: ArrayLike) -> NDArray[Any]: ... +# keep in sync with `fliplr` +@overload +def flipud[ArrayT: np.ndarray](m: ArrayT) -> ArrayT: ... @overload -def flipud(m: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... +def flipud[ScalarT: np.generic](m: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... @overload def flipud(m: ArrayLike) -> NDArray[Any]: ... +# @overload def eye( N: int, - M: None | int = ..., - k: int = ..., - dtype: None = ..., - order: _OrderCF = ..., + M: int | None = None, + k: int = 0, + dtype: None = ..., # = float # stubdefaulter: ignore[missing-default] + order: _OrderCF = "C", *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[float64]: ... + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array2D[np.float64]: ... @overload -def eye( +def eye[ScalarT: np.generic]( + N: int, + M: int | None, + k: int, + dtype: _DTypeLike[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array2D[ScalarT]: ... +@overload +def eye[ScalarT: np.generic]( N: int, - M: None | int = ..., - k: int = ..., - dtype: _DTypeLike[_SCT] = ..., - order: _OrderCF = ..., + M: int | None = None, + k: int = 0, *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... + dtype: _DTypeLike[ScalarT], + order: _OrderCF = "C", + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array2D[ScalarT]: ... @overload def eye( N: int, - M: None | int = ..., - k: int = ..., - dtype: DTypeLike = ..., - order: _OrderCF = ..., + M: int | None = None, + k: int = 0, + dtype: DTypeLike | None = ..., # = float + order: _OrderCF = "C", *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array2D[Incomplete]: ... +# +@overload +def diag[ScalarT: np.generic](v: _ArrayNoD[ScalarT] | Sequence[Sequence[ScalarT]], k: int = 0) -> NDArray[ScalarT]: ... +@overload +def diag[ScalarT: np.generic](v: _Array2D[ScalarT] | Sequence[Sequence[ScalarT]], k: int = 0) -> _Array1D[ScalarT]: ... +@overload +def diag[ScalarT: np.generic](v: _Array1D[ScalarT] | Sequence[ScalarT], k: int = 0) -> _Array2D[ScalarT]: ... @overload -def diag(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ... +def diag(v: Sequence[Sequence[_ScalarLike_co]], k: int = 0) -> _Array1D[Incomplete]: ... @overload -def diag(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... +def diag(v: Sequence[_ScalarLike_co], k: int = 0) -> _Array2D[Incomplete]: ... +@overload +def diag[ScalarT: np.generic](v: _ArrayLike[ScalarT], k: int = 0) -> NDArray[ScalarT]: ... +@overload +def diag(v: ArrayLike, k: int = 0) -> NDArray[Incomplete]: ... +# keep in sync with `numpy.ma.extras.diagflat` @overload -def diagflat(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ... +def diagflat[ScalarT: np.generic](v: _ArrayLike[ScalarT], k: int = 0) -> _Array2D[ScalarT]: ... @overload -def diagflat(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... +def diagflat(v: ArrayLike, k: int = 0) -> _Array2D[Incomplete]: ... +# @overload def tri( N: int, - M: None | int = ..., - k: int = ..., - dtype: None = ..., + M: int | None = None, + k: int = 0, + dtype: None = ..., # = float # stubdefaulter: ignore[missing-default] *, - like: None | _SupportsArrayFunc = ... -) -> NDArray[float64]: ... + like: _SupportsArrayFunc | None = None +) -> _Array2D[np.float64]: ... @overload -def tri( +def tri[ScalarT: np.generic]( N: int, - M: None | int = ..., - k: int = ..., - dtype: _DTypeLike[_SCT] = ..., + M: int | None, + k: int, + dtype: _DTypeLike[ScalarT], *, - like: None | _SupportsArrayFunc = ... -) -> NDArray[_SCT]: ... + like: _SupportsArrayFunc | None = None +) -> _Array2D[ScalarT]: ... +@overload +def tri[ScalarT: np.generic]( + N: int, + M: int | None = None, + k: int = 0, + *, + dtype: _DTypeLike[ScalarT], + like: _SupportsArrayFunc | None = None +) -> _Array2D[ScalarT]: ... @overload def tri( N: int, - M: None | int = ..., - k: int = ..., - dtype: DTypeLike = ..., + M: int | None = None, + k: int = 0, + dtype: DTypeLike | None = ..., # = float *, - like: None | _SupportsArrayFunc = ... -) -> NDArray[Any]: ... + like: _SupportsArrayFunc | None = None +) -> _Array2D[Any]: ... +# keep in sync with `triu` +@overload +def tril[ArrayT: np.ndarray](m: ArrayT, k: int = 0) -> ArrayT: ... @overload -def tril(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ... +def tril[ScalarT: np.generic](m: _ArrayLike[ScalarT], k: int = 0) -> NDArray[ScalarT]: ... @overload -def tril(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... +def tril(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... +# keep in sync with `tril` @overload -def triu(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ... +def triu[ArrayT: np.ndarray](m: ArrayT, k: int = 0) -> ArrayT: ... @overload -def triu(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... +def triu[ScalarT: np.generic](m: _ArrayLike[ScalarT], k: int = 0) -> NDArray[ScalarT]: ... +@overload +def triu(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... +# we use `list` (invariant) instead of `Sequence` (covariant) to avoid overlap +# keep in sync with `ma.extras.vander` @overload -def vander( # type: ignore[misc] - x: _ArrayLikeInt_co, - N: None | int = ..., - increasing: bool = ..., -) -> NDArray[signedinteger[Any]]: ... +def vander[ScalarT: np.number | np.object_](x: _ArrayLike1D[ScalarT], N: int | None = None, increasing: bool = False) -> _Array2D[ScalarT]: ... @overload -def vander( # type: ignore[misc] - x: _ArrayLikeFloat_co, - N: None | int = ..., - increasing: bool = ..., -) -> NDArray[floating[Any]]: ... +def vander(x: _ArrayLike1D[np.bool] | list[int], N: int | None = None, increasing: bool = False) -> _Array2D[np.int_]: ... @overload -def vander( - x: _ArrayLikeComplex_co, - N: None | int = ..., - increasing: bool = ..., -) -> NDArray[complexfloating[Any, Any]]: ... +def vander(x: list[float], N: int | None = None, increasing: bool = False) -> _Array2D[np.float64]: ... @overload -def vander( - x: _ArrayLikeObject_co, - N: None | int = ..., - increasing: bool = ..., -) -> NDArray[object_]: ... +def vander(x: list[complex], N: int | None = None, increasing: bool = False) -> _Array2D[np.complex128]: ... +@overload # fallback +def vander(x: Sequence[_NumberLike_co], N: int | None = None, increasing: bool = False) -> _Array2D[Any]: ... +# +@overload +def histogram2d[ScalarT: np.complexfloating]( + x: _ArrayLike1D[ScalarT], + y: _ArrayLike1D[ScalarT | _Float_co], + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[ScalarT]: ... +@overload +def histogram2d[ScalarT: np.complexfloating]( + x: _ArrayLike1D[ScalarT | _Float_co], + y: _ArrayLike1D[ScalarT], + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[ScalarT]: ... +@overload +def histogram2d[ScalarT: np.inexact]( + x: _ArrayLike1D[ScalarT], + y: _ArrayLike1D[ScalarT | _Int_co], + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[ScalarT]: ... @overload -def histogram2d( # type: ignore[misc] - x: _ArrayLikeFloat_co, - y: _ArrayLikeFloat_co, - bins: int | Sequence[int] = ..., - range: None | _ArrayLikeFloat_co = ..., - density: None | bool = ..., - weights: None | _ArrayLikeFloat_co = ..., -) -> tuple[ - NDArray[float64], - NDArray[floating[Any]], - NDArray[floating[Any]], -]: ... +def histogram2d[ScalarT: np.inexact]( + x: _ArrayLike1D[ScalarT | _Int_co], + y: _ArrayLike1D[ScalarT], + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[ScalarT]: ... @overload def histogram2d( - x: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co, - bins: int | Sequence[int] = ..., - range: None | _ArrayLikeFloat_co = ..., - density: None | bool = ..., - weights: None | _ArrayLikeFloat_co = ..., -) -> tuple[ - NDArray[float64], - NDArray[complexfloating[Any, Any]], - NDArray[complexfloating[Any, Any]], -]: ... -@overload # TODO: Sort out `bins` + x: _ArrayLike1DInt_co | Sequence[float], + y: _ArrayLike1DInt_co | Sequence[float], + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.float64]: ... +@overload +def histogram2d( + x: Sequence[complex], + y: Sequence[complex], + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.complex128 | Any]: ... +@overload +def histogram2d[ScalarT: _Number_co]( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: _ArrayLike1D[ScalarT] | Sequence[_ArrayLike1D[ScalarT]], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[ScalarT]: ... +@overload +def histogram2d[ScalarT: np.inexact, BinsScalarT: _Number_co]( + x: _ArrayLike1D[ScalarT], + y: _ArrayLike1D[ScalarT], + bins: Sequence[_ArrayLike1D[BinsScalarT] | int], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[ScalarT | BinsScalarT]: ... +@overload +def histogram2d[ScalarT: np.inexact]( + x: _ArrayLike1D[ScalarT], + y: _ArrayLike1D[ScalarT], + bins: Sequence[_ArrayLike1DNumber_co | int], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[ScalarT | Any]: ... +@overload +def histogram2d[ScalarT: _Number_co]( + x: _ArrayLike1DInt_co | Sequence[float], + y: _ArrayLike1DInt_co | Sequence[float], + bins: Sequence[_ArrayLike1D[ScalarT] | int], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.float64 | ScalarT]: ... +@overload def histogram2d( - x: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co, - bins: Sequence[_ArrayLikeInt_co], - range: None | _ArrayLikeFloat_co = ..., - density: None | bool = ..., - weights: None | _ArrayLikeFloat_co = ..., -) -> tuple[ - NDArray[float64], - NDArray[Any], - NDArray[Any], -]: ... + x: _ArrayLike1DInt_co | Sequence[float], + y: _ArrayLike1DInt_co | Sequence[float], + bins: Sequence[_ArrayLike1DNumber_co | int], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.float64 | Any]: ... +@overload +def histogram2d[ScalarT: _Number_co]( + x: Sequence[complex], + y: Sequence[complex], + bins: Sequence[_ArrayLike1D[ScalarT] | int], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.complex128 | ScalarT]: ... +@overload +def histogram2d( + x: Sequence[complex], + y: Sequence[complex], + bins: Sequence[_ArrayLike1DNumber_co | int], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.complex128 | Any]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[int]], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.int_]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[float]], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.float64 | Any]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[complex]], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.complex128 | Any]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[_ArrayLike1DNumber_co | int] | int, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[Any]: ... # NOTE: we're assuming/demanding here the `mask_func` returns # an ndarray of shape `(n, n)`; otherwise there is the possibility # of the output tuple having more or less than 2 elements @overload -def mask_indices( - n: int, - mask_func: _MaskFunc[int], - k: int = ..., -) -> tuple[NDArray[intp], NDArray[intp]]: ... -@overload -def mask_indices( - n: int, - mask_func: _MaskFunc[_T], - k: _T, -) -> tuple[NDArray[intp], NDArray[intp]]: ... - -def tril_indices( - n: int, - k: int = ..., - m: None | int = ..., -) -> tuple[NDArray[int_], NDArray[int_]]: ... - -def tril_indices_from( - arr: NDArray[Any], - k: int = ..., -) -> tuple[NDArray[int_], NDArray[int_]]: ... - -def triu_indices( - n: int, - k: int = ..., - m: None | int = ..., -) -> tuple[NDArray[int_], NDArray[int_]]: ... - -def triu_indices_from( - arr: NDArray[Any], - k: int = ..., -) -> tuple[NDArray[int_], NDArray[int_]]: ... +def mask_indices(n: int, mask_func: _MaskFunc[int], k: int = 0) -> _Indices2D: ... +@overload +def mask_indices[T](n: int, mask_func: _MaskFunc[T], k: T) -> _Indices2D: ... + +# +def tril_indices(n: int, k: int = 0, m: int | None = None) -> _Indices2D: ... +def triu_indices(n: int, k: int = 0, m: int | None = None) -> _Indices2D: ... + +# these will accept anything with `shape: tuple[int, int]` and `ndim: int` attributes +def tril_indices_from(arr: _HasShapeAndNDim, k: int = 0) -> _Indices2D: ... +def triu_indices_from(arr: _HasShapeAndNDim, k: int = 0) -> _Indices2D: ... diff --git a/numpy/lib/_type_check_impl.py b/numpy/lib/_type_check_impl.py index 2e4ef4e6954a..e3c942be0d99 100644 --- a/numpy/lib/_type_check_impl.py +++ b/numpy/lib/_type_check_impl.py @@ -2,18 +2,19 @@ """ import functools +import warnings __all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex', 'isreal', 'nan_to_num', 'real', 'real_if_close', 'typename', 'mintypecode', 'common_type'] -from .._utils import set_module import numpy._core.numeric as _nx -from numpy._core.numeric import asarray, asanyarray, isnan, zeros -from numpy._core import overrides, getlimits -from ._ufunclike_impl import isneginf, isposinf +from numpy._core import getlimits, overrides +from numpy._core.numeric import asanyarray, asarray, isnan, zeros +from numpy._utils import set_module +from ._ufunclike_impl import isneginf, isposinf array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') @@ -56,6 +57,7 @@ def mintypecode(typechars, typeset='GDFgdf', default='d'): Examples -------- + >>> import numpy as np >>> np.mintypecode(['d', 'f', 'S']) 'd' >>> x = np.array([1.1, 2-3.j]) @@ -68,7 +70,7 @@ def mintypecode(typechars, typeset='GDFgdf', default='d'): """ typecodes = ((isinstance(t, str) and t) or asarray(t).dtype.char for t in typechars) - intersection = set(t for t in typecodes if t in typeset) + intersection = {t for t in typecodes if t in typeset} if not intersection: return default if 'F' in intersection and 'd' in intersection: @@ -103,6 +105,7 @@ def real(val): Examples -------- + >>> import numpy as np >>> a = np.array([1+2j, 3+4j, 5+6j]) >>> a.real array([1., 3., 5.]) @@ -149,6 +152,7 @@ def imag(val): Examples -------- + >>> import numpy as np >>> a = np.array([1+2j, 3+4j, 5+6j]) >>> a.imag array([2., 4., 6.]) @@ -195,6 +199,7 @@ def iscomplex(x): Examples -------- + >>> import numpy as np >>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j]) array([ True, False, False, False, False, True]) @@ -235,26 +240,27 @@ def isreal(x): Examples -------- - >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex) + >>> import numpy as np + >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=np.complex128) >>> np.isreal(a) array([False, True, True, True, True, False]) The function does not work on string arrays. - >>> a = np.array([2j, "a"], dtype="U") + >>> a = np.array([2j, "a"], dtype=np.str_) >>> np.isreal(a) # Warns about non-elementwise comparison False - Returns True for all elements in input array of ``dtype=object`` even if + Returns True for all elements in input array of ``dtype=np.object_`` even if any of the elements is complex. - >>> a = np.array([1, "2", 3+4j], dtype=object) + >>> a = np.array([1, "2", 3+4j], dtype=np.object_) >>> np.isreal(a) array([ True, True, True]) isreal should not be used with object arrays - >>> a = np.array([1+2j, 2+1j], dtype=object) + >>> a = np.array([1+2j, 2+1j], dtype=np.object_) >>> np.isreal(a) array([ True, True]) @@ -287,6 +293,7 @@ def iscomplexobj(x): Examples -------- + >>> import numpy as np >>> np.iscomplexobj(1) False >>> np.iscomplexobj(1+0j) @@ -341,6 +348,7 @@ def isrealobj(x): Examples -------- + >>> import numpy as np >>> np.isrealobj(1) True >>> np.isrealobj(1+0j) @@ -391,28 +399,18 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): in-place (False). The in-place operation only occurs if casting to an array does not require a copy. Default is True. - - .. versionadded:: 1.13 - nan : int, float, optional - Value to be used to fill NaN values. If no value is passed + nan : int, float, or bool or array_like of int, float, or bool, optional + Values to be used to fill NaN values. If no values are passed then NaN values will be replaced with 0.0. - - .. versionadded:: 1.17 - posinf : int, float, optional - Value to be used to fill positive infinity values. If no value is + posinf : int, float, or bool or array_like of int, float, or bool, optional + Values to be used to fill positive infinity values. If no values are passed then positive infinity values will be replaced with a very large number. - - .. versionadded:: 1.17 - neginf : int, float, optional - Value to be used to fill negative infinity values. If no value is + neginf : int, float, or bool or array_like of int, float, or bool, optional + Values to be used to fill negative infinity values. If no values are passed then negative infinity values will be replaced with a very small (or negative) number. - .. versionadded:: 1.17 - - - Returns ------- out : ndarray @@ -434,6 +432,7 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): Examples -------- + >>> import numpy as np >>> np.nan_to_num(np.inf) 1.7976931348623157e+308 >>> np.nan_to_num(-np.inf) @@ -447,6 +446,12 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333) array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03, -1.2800000e+02, 1.2800000e+02]) + >>> nan = np.array([11, 12, -9999, 13, 14]) + >>> posinf = np.array([33333333, 11, 12, 13, 14]) + >>> neginf = np.array([11, 33333333, 12, 13, 14]) + >>> np.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf) + array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03, -1.2800000e+02, + 1.2800000e+02]) >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)]) array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary -1.28000000e+002, 1.28000000e+002]) @@ -456,6 +461,11 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): 0.00000000e+000 +1.79769313e+308j]) >>> np.nan_to_num(y, nan=111111, posinf=222222) array([222222.+111111.j, 111111. +0.j, 111111.+222222.j]) + >>> nan = np.array([11, 12, 13]) + >>> posinf = np.array([21, 22, 23]) + >>> neginf = np.array([31, 32, 33]) + >>> np.nan_to_num(y, nan=nan, posinf=posinf, neginf=neginf) + array([21.+11.j, 12. +0.j, 13.+23.j]) """ x = _nx.array(x, subok=True, copy=copy) xtype = x.dtype.type @@ -525,6 +535,7 @@ def real_if_close(a, tol=100): Examples -------- + >>> import numpy as np >>> np.finfo(float).eps 2.2204460492503131e-16 # may vary @@ -577,6 +588,9 @@ def typename(char): """ Return a description for the given data type code. + .. deprecated:: 2.5 + `numpy.typename` is deprecated. Use `numpy.dtype.name` instead. + Parameters ---------- char : str @@ -593,6 +607,7 @@ def typename(char): Examples -------- + >>> import numpy as np >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q', ... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q'] >>> for typechar in typechars: @@ -622,6 +637,12 @@ def typename(char): q : long long integer """ + # Deprecated in NumPy 2.5, 2026-02-03 + warnings.warn( + "numpy.typename is deprecated. Use numpy.dtype.name instead.", + DeprecationWarning, + stacklevel=2 + ) return _namefromtype[char] #----------------------------------------------------------------------------- @@ -689,7 +710,7 @@ def common_type(*arrays): if issubclass(t, _nx.integer): p = 2 # array_precision[_nx.double] else: - p = array_precision.get(t, None) + p = array_precision.get(t) if p is None: raise TypeError("can't get common type for non-numeric array") precision = max(precision, p) diff --git a/numpy/lib/_type_check_impl.pyi b/numpy/lib/_type_check_impl.pyi index 6cc5073b8e20..d23123f80c8e 100644 --- a/numpy/lib/_type_check_impl.pyi +++ b/numpy/lib/_type_check_impl.pyi @@ -1,204 +1,365 @@ +from _typeshed import Incomplete from collections.abc import Container, Iterable -from typing import ( - Literal as L, - Any, - overload, - TypeVar, - Protocol, -) +from typing import Any, Literal as L, Protocol, overload, type_check_only +from typing_extensions import deprecated import numpy as np -from numpy import ( - dtype, - generic, - floating, - float64, - complexfloating, - integer, -) - from numpy._typing import ( ArrayLike, - DTypeLike, - NBitBase, NDArray, + _16Bit, + _32Bit, _64Bit, - _SupportsDType, - _ScalarLike_co, _ArrayLike, - _DTypeLikeComplex, + _NestedSequence, + _ScalarLike_co, + _SupportsArray, ) -_T = TypeVar("_T") -_T_co = TypeVar("_T_co", covariant=True) -_SCT = TypeVar("_SCT", bound=generic) -_NBit1 = TypeVar("_NBit1", bound=NBitBase) -_NBit2 = TypeVar("_NBit2", bound=NBitBase) +__all__ = [ + "common_type", + "imag", + "iscomplex", + "iscomplexobj", + "isreal", + "isrealobj", + "mintypecode", + "nan_to_num", + "real", + "real_if_close", + "typename", +] + +type _FloatMax32 = np.float32 | np.float16 +type _ComplexMax128 = np.complex128 | np.complex64 +type _RealMax64 = np.float64 | np.float32 | np.float16 | np.integer +type _Real = np.floating | np.integer +type _ToReal = _Real | np.bool +type _InexactMax32 = np.inexact[_32Bit] | np.float16 +type _NumberMax64 = np.number[_64Bit] | np.number[_32Bit] | np.number[_16Bit] | np.integer + +@type_check_only +class _HasReal[T](Protocol): + @property + def real(self, /) -> T: ... -class _SupportsReal(Protocol[_T_co]): +@type_check_only +class _HasImag[T](Protocol): @property - def real(self) -> _T_co: ... + def imag(self, /) -> T: ... -class _SupportsImag(Protocol[_T_co]): +@type_check_only +class _HasDType[ScalarT: np.generic](Protocol): @property - def imag(self) -> _T_co: ... + def dtype(self, /) -> np.dtype[ScalarT]: ... -__all__: list[str] +### -def mintypecode( - typechars: Iterable[str | ArrayLike], - typeset: Container[str] = ..., - default: str = ..., -) -> str: ... +def mintypecode(typechars: Iterable[str | ArrayLike], typeset: str | Container[str] = "GDFgdf", default: str = "d") -> str: ... +# @overload -def real(val: _SupportsReal[_T]) -> _T: ... +def real[T](val: _HasReal[T]) -> T: ... +@overload +def real[RealT: _ToReal](val: _ArrayLike[RealT]) -> NDArray[RealT]: ... @overload def real(val: ArrayLike) -> NDArray[Any]: ... +# +@overload +def imag[T](val: _HasImag[T]) -> T: ... @overload -def imag(val: _SupportsImag[_T]) -> _T: ... +def imag[RealT: _ToReal](val: _ArrayLike[RealT]) -> NDArray[RealT]: ... @overload def imag(val: ArrayLike) -> NDArray[Any]: ... +# @overload -def iscomplex(x: _ScalarLike_co) -> np.bool: ... # type: ignore[misc] +def iscomplex(x: _ScalarLike_co) -> np.bool: ... @overload -def iscomplex(x: ArrayLike) -> NDArray[np.bool]: ... +def iscomplex(x: NDArray[Any] | _NestedSequence[ArrayLike]) -> NDArray[np.bool]: ... +@overload +def iscomplex(x: ArrayLike) -> np.bool | NDArray[np.bool]: ... +# @overload -def isreal(x: _ScalarLike_co) -> np.bool: ... # type: ignore[misc] +def isreal(x: _ScalarLike_co) -> np.bool: ... @overload -def isreal(x: ArrayLike) -> NDArray[np.bool]: ... - -def iscomplexobj(x: _SupportsDType[dtype[Any]] | ArrayLike) -> bool: ... +def isreal(x: NDArray[Any] | _NestedSequence[ArrayLike]) -> NDArray[np.bool]: ... +@overload +def isreal(x: ArrayLike) -> np.bool | NDArray[np.bool]: ... -def isrealobj(x: _SupportsDType[dtype[Any]] | ArrayLike) -> bool: ... +# +def iscomplexobj(x: _HasDType[Any] | ArrayLike) -> bool: ... +def isrealobj(x: _HasDType[Any] | ArrayLike) -> bool: ... -@overload -def nan_to_num( # type: ignore[misc] - x: _SCT, - copy: bool = ..., - nan: float = ..., - posinf: None | float = ..., - neginf: None | float = ..., -) -> _SCT: ... +# +@overload +def nan_to_num[ScalarT: np.generic]( + x: ScalarT, + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> ScalarT: ... +@overload +def nan_to_num[ScalarT: np.generic]( + x: NDArray[ScalarT] | _NestedSequence[_ArrayLike[ScalarT]], + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> NDArray[ScalarT]: ... +@overload +def nan_to_num[ScalarT: np.generic]( + x: _SupportsArray[np.dtype[ScalarT]], + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> ScalarT | NDArray[ScalarT]: ... @overload def nan_to_num( - x: _ScalarLike_co, - copy: bool = ..., - nan: float = ..., - posinf: None | float = ..., - neginf: None | float = ..., -) -> Any: ... -@overload -def nan_to_num( - x: _ArrayLike[_SCT], - copy: bool = ..., - nan: float = ..., - posinf: None | float = ..., - neginf: None | float = ..., -) -> NDArray[_SCT]: ... + x: _NestedSequence[ArrayLike], + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> NDArray[Incomplete]: ... @overload def nan_to_num( x: ArrayLike, - copy: bool = ..., - nan: float = ..., - posinf: None | float = ..., - neginf: None | float = ..., -) -> NDArray[Any]: ... - -# If one passes a complex array to `real_if_close`, then one is reasonably -# expected to verify the output dtype (so we can return an unsafe union here) + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> Incomplete: ... +# NOTE: The [overload-overlap] mypy error is a false positive +@overload +def real_if_close(a: _ArrayLike[np.complex64], tol: float = 100) -> NDArray[np.float32 | np.complex64]: ... +@overload +def real_if_close(a: _ArrayLike[np.complex128], tol: float = 100) -> NDArray[np.float64 | np.complex128]: ... @overload -def real_if_close( # type: ignore[misc] - a: _ArrayLike[complexfloating[_NBit1, _NBit1]], - tol: float = ..., -) -> NDArray[floating[_NBit1]] | NDArray[complexfloating[_NBit1, _NBit1]]: ... +def real_if_close(a: _ArrayLike[np.clongdouble], tol: float = 100) -> NDArray[np.longdouble | np.clongdouble]: ... @overload -def real_if_close( - a: _ArrayLike[_SCT], - tol: float = ..., -) -> NDArray[_SCT]: ... +def real_if_close[RealT: _ToReal](a: _ArrayLike[RealT], tol: float = 100) -> NDArray[RealT]: ... @overload -def real_if_close( - a: ArrayLike, - tol: float = ..., -) -> NDArray[Any]: ... +def real_if_close(a: ArrayLike, tol: float = 100) -> NDArray[Any]: ... +# @overload -def typename(char: L['S1']) -> L['character']: ... +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") +def typename(char: L["S1"]) -> L["character"]: ... @overload -def typename(char: L['?']) -> L['bool']: ... +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") +def typename(char: L["?"]) -> L["bool"]: ... @overload -def typename(char: L['b']) -> L['signed char']: ... +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") +def typename(char: L["b"]) -> L["signed char"]: ... @overload -def typename(char: L['B']) -> L['unsigned char']: ... +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") +def typename(char: L["B"]) -> L["unsigned char"]: ... @overload -def typename(char: L['h']) -> L['short']: ... +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") +def typename(char: L["h"]) -> L["short"]: ... @overload -def typename(char: L['H']) -> L['unsigned short']: ... +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") +def typename(char: L["H"]) -> L["unsigned short"]: ... @overload -def typename(char: L['i']) -> L['integer']: ... +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") +def typename(char: L["i"]) -> L["integer"]: ... @overload -def typename(char: L['I']) -> L['unsigned integer']: ... +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") +def typename(char: L["I"]) -> L["unsigned integer"]: ... @overload -def typename(char: L['l']) -> L['long integer']: ... +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") +def typename(char: L["l"]) -> L["long integer"]: ... @overload -def typename(char: L['L']) -> L['unsigned long integer']: ... +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") +def typename(char: L["L"]) -> L["unsigned long integer"]: ... @overload -def typename(char: L['q']) -> L['long long integer']: ... +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") +def typename(char: L["q"]) -> L["long long integer"]: ... @overload -def typename(char: L['Q']) -> L['unsigned long long integer']: ... +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") +def typename(char: L["Q"]) -> L["unsigned long long integer"]: ... @overload -def typename(char: L['f']) -> L['single precision']: ... +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") +def typename(char: L["f"]) -> L["single precision"]: ... @overload -def typename(char: L['d']) -> L['double precision']: ... +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") +def typename(char: L["d"]) -> L["double precision"]: ... @overload -def typename(char: L['g']) -> L['long precision']: ... +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") +def typename(char: L["g"]) -> L["long precision"]: ... @overload -def typename(char: L['F']) -> L['complex single precision']: ... +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") +def typename(char: L["F"]) -> L["complex single precision"]: ... @overload -def typename(char: L['D']) -> L['complex double precision']: ... +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") +def typename(char: L["D"]) -> L["complex double precision"]: ... @overload -def typename(char: L['G']) -> L['complex long double precision']: ... +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") +def typename(char: L["G"]) -> L["complex long double precision"]: ... @overload -def typename(char: L['S']) -> L['string']: ... +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") +def typename(char: L["S"]) -> L["string"]: ... @overload -def typename(char: L['U']) -> L['unicode']: ... +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") +def typename(char: L["U"]) -> L["unicode"]: ... @overload -def typename(char: L['V']) -> L['void']: ... +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") +def typename(char: L["V"]) -> L["void"]: ... @overload -def typename(char: L['O']) -> L['object']: ... +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") +def typename(char: L["O"]) -> L["object"]: ... +# NOTE: The [overload-overlap] mypy errors are false positives +@overload +def common_type() -> type[np.float16]: ... +@overload +def common_type(a0: _HasDType[np.float16], /, *ai: _HasDType[np.float16]) -> type[np.float16]: ... +@overload +def common_type(a0: _HasDType[np.float32], /, *ai: _HasDType[_FloatMax32]) -> type[np.float32]: ... +@overload +def common_type( + a0: _HasDType[np.float64 | np.integer], + /, + *ai: _HasDType[_RealMax64], +) -> type[np.float64]: ... +@overload +def common_type( + a0: _HasDType[np.longdouble], + /, + *ai: _HasDType[_Real], +) -> type[np.longdouble]: ... +@overload +def common_type( + a0: _HasDType[np.complex64], + /, + *ai: _HasDType[_InexactMax32], +) -> type[np.complex64]: ... +@overload +def common_type( + a0: _HasDType[np.complex128], + /, + *ai: _HasDType[_NumberMax64], +) -> type[np.complex128]: ... +@overload +def common_type( + a0: _HasDType[np.clongdouble], + /, + *ai: _HasDType[np.number], +) -> type[np.clongdouble]: ... +@overload +def common_type( + a0: _HasDType[_FloatMax32], + array1: _HasDType[np.float32], + /, + *ai: _HasDType[_FloatMax32], +) -> type[np.float32]: ... @overload -def common_type( # type: ignore[misc] - *arrays: _SupportsDType[dtype[ - integer[Any] - ]] -) -> type[floating[_64Bit]]: ... +def common_type( + a0: _HasDType[_RealMax64], + array1: _HasDType[np.float64 | np.integer], + /, + *ai: _HasDType[_RealMax64], +) -> type[np.float64]: ... @overload -def common_type( # type: ignore[misc] - *arrays: _SupportsDType[dtype[ - floating[_NBit1] - ]] -) -> type[floating[_NBit1]]: ... +def common_type( + a0: _HasDType[_Real], + array1: _HasDType[np.longdouble], + /, + *ai: _HasDType[_Real], +) -> type[np.longdouble]: ... @overload -def common_type( # type: ignore[misc] - *arrays: _SupportsDType[dtype[ - integer[Any] | floating[_NBit1] - ]] -) -> type[floating[_NBit1 | _64Bit]]: ... +def common_type( + a0: _HasDType[_InexactMax32], + array1: _HasDType[np.complex64], + /, + *ai: _HasDType[_InexactMax32], +) -> type[np.complex64]: ... @overload -def common_type( # type: ignore[misc] - *arrays: _SupportsDType[dtype[ - floating[_NBit1] | complexfloating[_NBit2, _NBit2] - ]] -) -> type[complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]]: ... +def common_type( + a0: _HasDType[np.float64], + array1: _HasDType[_ComplexMax128], + /, + *ai: _HasDType[_NumberMax64], +) -> type[np.complex128]: ... +@overload +def common_type( + a0: _HasDType[_ComplexMax128], + array1: _HasDType[np.float64], + /, + *ai: _HasDType[_NumberMax64], +) -> type[np.complex128]: ... +@overload +def common_type( + a0: _HasDType[_NumberMax64], + array1: _HasDType[np.complex128], + /, + *ai: _HasDType[_NumberMax64], +) -> type[np.complex128]: ... +@overload +def common_type( + a0: _HasDType[_ComplexMax128], + array1: _HasDType[np.complex128 | np.integer], + /, + *ai: _HasDType[_NumberMax64], +) -> type[np.complex128]: ... +@overload +def common_type( + a0: _HasDType[np.complex128 | np.integer], + array1: _HasDType[_ComplexMax128], + /, + *ai: _HasDType[_NumberMax64], +) -> type[np.complex128]: ... +@overload +def common_type( + a0: _HasDType[_Real], + /, + *ai: _HasDType[_Real], +) -> type[np.floating]: ... +@overload +def common_type( + a0: _HasDType[np.number], + array1: _HasDType[np.clongdouble], + /, + *ai: _HasDType[np.number], +) -> type[np.clongdouble]: ... +@overload +def common_type( + a0: _HasDType[np.longdouble], + array1: _HasDType[np.complexfloating], + /, + *ai: _HasDType[np.number], +) -> type[np.clongdouble]: ... +@overload +def common_type( + a0: _HasDType[np.complexfloating], + array1: _HasDType[np.longdouble], + /, + *ai: _HasDType[np.number], +) -> type[np.clongdouble]: ... +@overload +def common_type( + a0: _HasDType[np.complexfloating], + array1: _HasDType[np.number], + /, + *ai: _HasDType[np.number], +) -> type[np.complexfloating]: ... +@overload +def common_type( + a0: _HasDType[np.number], + array1: _HasDType[np.complexfloating], + /, + *ai: _HasDType[np.number], +) -> type[np.complexfloating]: ... @overload def common_type( - *arrays: _SupportsDType[dtype[ - integer[Any] | floating[_NBit1] | complexfloating[_NBit2, _NBit2] - ]] -) -> type[complexfloating[_64Bit | _NBit1 | _NBit2, _64Bit | _NBit1 | _NBit2]]: ... + a0: _HasDType[np.number], + array1: _HasDType[np.number], + /, + *ai: _HasDType[np.number], +) -> type[Any]: ... diff --git a/numpy/lib/_ufunclike_impl.py b/numpy/lib/_ufunclike_impl.py index 241d8af4b4ce..0f503d03a556 100644 --- a/numpy/lib/_ufunclike_impl.py +++ b/numpy/lib/_ufunclike_impl.py @@ -5,10 +5,10 @@ """ __all__ = ['fix', 'isneginf', 'isposinf'] +import warnings + import numpy._core.numeric as nx from numpy._core.overrides import array_function_dispatch -import warnings -import functools def _dispatcher(x, out=None): @@ -20,13 +20,17 @@ def fix(x, out=None): """ Round to nearest integer towards zero. + .. deprecated:: 2.5 + `numpy.fix` is deprecated. Use `numpy.trunc` instead, + which is faster and follows the Array API standard. + Round an array of floats element-wise to nearest integer towards zero. - The rounded values are returned as floats. + The rounded values have the same data-type as the input. Parameters ---------- x : array_like - An array of floats to be rounded + An array to be rounded out : ndarray, optional A location into which the result is stored. If provided, it must have a shape that the input broadcasts to. If not provided or None, a @@ -35,12 +39,12 @@ def fix(x, out=None): Returns ------- out : ndarray of floats - A float array with the same dimensions as the input. - If second argument is not supplied then a float array is returned + An array with the same dimensions and data-type as the input. + If second argument is not supplied then a new array is returned with the rounded values. If a second argument is supplied the result is stored there. - The return value `out` is then a reference to that array. + The return value ``out`` is then a reference to that array. See Also -------- @@ -49,23 +53,23 @@ def fix(x, out=None): Examples -------- + >>> import numpy as np >>> np.fix(3.14) 3.0 >>> np.fix(3) - 3.0 + 3 >>> np.fix([2.1, 2.9, -2.1, -2.9]) array([ 2., 2., -2., -2.]) """ - # promote back to an array if flattened - res = nx.asanyarray(nx.ceil(x, out=out)) - res = nx.floor(x, out=res, where=nx.greater_equal(x, 0)) - - # when no out argument is passed and no subclasses are involved, flatten - # scalars - if out is None and type(res) is nx.ndarray: - res = res[()] - return res + # Deprecated in NumPy 2.5, 2026-01-12 + warnings.warn( + "numpy.fix is deprecated. Use numpy.trunc instead, " + "which is faster and follows the Array API standard.", + DeprecationWarning, + stacklevel=2, + ) + return nx.trunc(x, out=out) @array_function_dispatch(_dispatcher, verify=False, module='numpy') @@ -111,6 +115,7 @@ def isposinf(x, out=None): Examples -------- + >>> import numpy as np >>> np.isposinf(np.inf) True >>> np.isposinf(-np.inf) @@ -180,6 +185,7 @@ def isneginf(x, out=None): Examples -------- + >>> import numpy as np >>> np.isneginf(-np.inf) True >>> np.isneginf(np.inf) diff --git a/numpy/lib/_ufunclike_impl.pyi b/numpy/lib/_ufunclike_impl.pyi index dd927bc62158..4145ff205e1b 100644 --- a/numpy/lib/_ufunclike_impl.pyi +++ b/numpy/lib/_ufunclike_impl.pyi @@ -1,67 +1,41 @@ -from typing import Any, overload, TypeVar +from typing import overload +from typing_extensions import deprecated import numpy as np -from numpy import floating, object_ from numpy._typing import ( NDArray, - _FloatLike_co, _ArrayLikeFloat_co, _ArrayLikeObject_co, + _FloatLike_co, ) -_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) - -__all__: list[str] +__all__ = ["fix", "isneginf", "isposinf"] @overload -def fix( # type: ignore[misc] - x: _FloatLike_co, - out: None = ..., -) -> floating[Any]: ... +@deprecated("numpy.fix is deprecated. Use numpy.trunc instead.") +def fix(x: _FloatLike_co, out: None = None) -> np.floating: ... @overload -def fix( - x: _ArrayLikeFloat_co, - out: None = ..., -) -> NDArray[floating[Any]]: ... +@deprecated("numpy.fix is deprecated. Use numpy.trunc instead.") +def fix(x: _ArrayLikeFloat_co, out: None = None) -> NDArray[np.floating]: ... @overload -def fix( - x: _ArrayLikeObject_co, - out: None = ..., -) -> NDArray[object_]: ... +@deprecated("numpy.fix is deprecated. Use numpy.trunc instead.") +def fix(x: _ArrayLikeObject_co, out: None = None) -> NDArray[np.object_]: ... @overload -def fix( - x: _ArrayLikeFloat_co | _ArrayLikeObject_co, - out: _ArrayType, -) -> _ArrayType: ... +@deprecated("numpy.fix is deprecated. Use numpy.trunc instead.") +def fix[ArrayT: np.ndarray](x: _ArrayLikeFloat_co | _ArrayLikeObject_co, out: ArrayT) -> ArrayT: ... +# @overload -def isposinf( # type: ignore[misc] - x: _FloatLike_co, - out: None = ..., -) -> np.bool: ... +def isposinf(x: _FloatLike_co, out: None = None) -> np.bool: ... @overload -def isposinf( - x: _ArrayLikeFloat_co, - out: None = ..., -) -> NDArray[np.bool]: ... +def isposinf(x: _ArrayLikeFloat_co, out: None = None) -> NDArray[np.bool]: ... @overload -def isposinf( - x: _ArrayLikeFloat_co, - out: _ArrayType, -) -> _ArrayType: ... +def isposinf[ArrayT: np.ndarray](x: _ArrayLikeFloat_co, out: ArrayT) -> ArrayT: ... +# @overload -def isneginf( # type: ignore[misc] - x: _FloatLike_co, - out: None = ..., -) -> np.bool: ... +def isneginf(x: _FloatLike_co, out: None = None) -> np.bool: ... @overload -def isneginf( - x: _ArrayLikeFloat_co, - out: None = ..., -) -> NDArray[np.bool]: ... +def isneginf(x: _ArrayLikeFloat_co, out: None = None) -> NDArray[np.bool]: ... @overload -def isneginf( - x: _ArrayLikeFloat_co, - out: _ArrayType, -) -> _ArrayType: ... +def isneginf[ArrayT: np.ndarray](x: _ArrayLikeFloat_co, out: ArrayT) -> ArrayT: ... diff --git a/numpy/lib/_user_array_impl.py b/numpy/lib/_user_array_impl.py index c26fa4435e92..2465f5f70b99 100644 --- a/numpy/lib/_user_array_impl.py +++ b/numpy/lib/_user_array_impl.py @@ -8,13 +8,38 @@ """ from numpy._core import ( - array, asarray, absolute, add, subtract, multiply, divide, - remainder, power, left_shift, right_shift, bitwise_and, bitwise_or, - bitwise_xor, invert, less, less_equal, not_equal, equal, greater, - greater_equal, shape, reshape, arange, sin, sqrt, transpose + absolute, + add, + arange, + array, + asarray, + bitwise_and, + bitwise_or, + bitwise_xor, + divide, + equal, + greater, + greater_equal, + invert, + left_shift, + less, + less_equal, + multiply, + not_equal, + power, + remainder, + reshape, + right_shift, + shape, + sin, + sqrt, + subtract, + transpose, ) +from numpy._core.overrides import set_module +@set_module("numpy.lib.user_array") class container: """ container(data, dtype=None, copy=True) @@ -24,11 +49,21 @@ class container: Methods ------- copy - tostring byteswap astype """ + def __init_subclass__(cls) -> None: + # Deprecated in NumPy 2.4, 2025-11-24 + import warnings + + warnings.warn( + "The numpy.lib.user_array.container class is deprecated and will be " + "removed in a future version.", + DeprecationWarning, + stacklevel=2, + ) + def __init__(self, data, dtype=None, copy=True): self.array = array(data, dtype, copy=copy) @@ -87,16 +122,6 @@ def __imul__(self, other): multiply(self.array, other, self.array) return self - def __div__(self, other): - return self._rc(divide(self.array, asarray(other))) - - def __rdiv__(self, other): - return self._rc(divide(asarray(other), self.array)) - - def __idiv__(self, other): - divide(self.array, other, self.array) - return self - def __mod__(self, other): return self._rc(remainder(self.array, other)) @@ -225,10 +250,6 @@ def copy(self): "" return self._rc(self.array.copy()) - def tostring(self): - "" - return self.array.tostring() - def tobytes(self): "" return self.array.tobytes() diff --git a/numpy/lib/_user_array_impl.pyi b/numpy/lib/_user_array_impl.pyi new file mode 100644 index 000000000000..0910e10dbde2 --- /dev/null +++ b/numpy/lib/_user_array_impl.pyi @@ -0,0 +1,225 @@ +from _typeshed import Incomplete +from types import EllipsisType +from typing import Any, Generic, Self, SupportsIndex, overload, override +from typing_extensions import TypeVar, deprecated + +import numpy as np +import numpy.typing as npt +from numpy._typing import ( + _AnyShape, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeInt_co, + _DTypeLike, + _Shape, +) + +### + +_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=_AnyShape, covariant=True) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) + +type _ArrayInt_co = npt.NDArray[np.integer | np.bool] + +type _BoolContainer = container[Any, np.dtype[np.bool]] # type: ignore[deprecated] +type _IntegralContainer = container[Any, np.dtype[np.bool | np.integer | np.object_]] # type: ignore[deprecated] +type _RealContainer = container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]] # type: ignore[deprecated] +type _NumericContainer = container[Any, np.dtype[np.number | np.timedelta64 | np.object_]] # type: ignore[deprecated] + +type _ToIndexSlice = slice | EllipsisType | _ArrayInt_co | None +type _ToIndexSlices = _ToIndexSlice | tuple[_ToIndexSlice, ...] +type _ToIndex = SupportsIndex | _ToIndexSlice +type _ToIndices = _ToIndex | tuple[_ToIndex, ...] + +### +# pyright: reportDeprecated = false + +@deprecated("The numpy.lib.user_array.container class is deprecated and will be removed in a future version.") +class container(Generic[_ShapeT_co, _DTypeT_co]): + array: np.ndarray[_ShapeT_co, _DTypeT_co] + + @overload + def __init__( + self, + /, + data: container[_ShapeT_co, _DTypeT_co] | np.ndarray[_ShapeT_co, _DTypeT_co], + dtype: None = None, + copy: bool = True, + ) -> None: ... + @overload + def __init__[ScalarT: np.generic]( + self: container[Any, np.dtype[ScalarT]], + /, + data: _ArrayLike[ScalarT], + dtype: None = None, + copy: bool = True, + ) -> None: ... + @overload + def __init__[ScalarT: np.generic]( + self: container[Any, np.dtype[ScalarT]], + /, + data: npt.ArrayLike, + dtype: _DTypeLike[ScalarT], + copy: bool = True, + ) -> None: ... + @overload + def __init__(self, /, data: npt.ArrayLike, dtype: npt.DTypeLike | None = None, copy: bool = True) -> None: ... + + # + def __complex__(self, /) -> complex: ... + def __float__(self, /) -> float: ... + def __int__(self, /) -> int: ... + def __hex__(self, /) -> str: ... + def __oct__(self, /) -> str: ... + + # + @override + def __eq__(self, other: object, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + @override + def __ne__(self, other: object, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + + # + def __lt__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + def __le__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + def __gt__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + def __ge__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + + # + def __len__(self, /) -> int: ... + + # keep in sync with np.ndarray + @overload + def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> container[_ShapeT_co, _DTypeT_co]: ... + @overload + def __getitem__(self, key: _ToIndexSlices, /) -> container[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self, key: _ToIndices, /) -> Any: ... + @overload + def __getitem__(self: container[Any, np.dtype[np.void]], key: list[str], /) -> container[_ShapeT_co, np.dtype[np.void]]: ... + @overload + def __getitem__(self: container[Any, np.dtype[np.void]], key: str, /) -> container[_ShapeT_co, np.dtype]: ... + + # keep in sync with np.ndarray + @overload + def __setitem__(self, index: _ToIndices, value: object, /) -> None: ... + @overload + def __setitem__(self: container[Any, np.dtype[np.void]], key: str | list[str], value: object, /) -> None: ... + + # keep in sync with np.ndarray + @overload + def __abs__[ShapeT: _Shape]( + self: container[ShapeT, np.dtype[np.complex64]], / + ) -> container[ShapeT, np.dtype[np.float32]]: ... + @overload + def __abs__[ShapeT: _Shape]( + self: container[ShapeT, np.dtype[np.complex128]], / + ) -> container[ShapeT, np.dtype[np.float64]]: ... + @overload + def __abs__[ShapeT: _Shape]( + self: container[ShapeT, np.dtype[np.complex192]], / + ) -> container[ShapeT, np.dtype[np.float96]]: ... + @overload + def __abs__[ShapeT: _Shape]( + self: container[ShapeT, np.dtype[np.complex256]], / + ) -> container[ShapeT, np.dtype[np.float128]]: ... + @overload + def __abs__[ContainerT: _RealContainer](self: ContainerT, /) -> ContainerT: ... + + # + def __neg__[ContainerT: _NumericContainer](self: ContainerT, /) -> ContainerT: ... + def __pos__[ContainerT: _NumericContainer](self: ContainerT, /) -> ContainerT: ... + def __invert__[ContainerT: _IntegralContainer](self: ContainerT, /) -> ContainerT: ... + + # TODO(jorenham): complete these binary ops + + # + def __add__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __radd__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __iadd__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __sub__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rsub__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __isub__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __mul__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rmul__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __imul__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __mod__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rmod__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __imod__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __divmod__(self, other: npt.ArrayLike, /) -> tuple[Incomplete, Incomplete]: ... + def __rdivmod__(self, other: npt.ArrayLike, /) -> tuple[Incomplete, Incomplete]: ... + + # + def __pow__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rpow__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __ipow__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __lshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ... + def __rlshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ... + def __ilshift__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + def __rshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ... + def __rrshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ... + def __irshift__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __and__(self: _BoolContainer, other: _ArrayLikeBool_co, /) -> container[_AnyShape, np.dtype[np.bool]]: ... + @overload + def __and__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... + __rand__ = __and__ + @overload + def __iand__[ContainerT: _BoolContainer](self: ContainerT, other: _ArrayLikeBool_co, /) -> ContainerT: ... + @overload + def __iand__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __xor__(self: _BoolContainer, other: _ArrayLikeBool_co, /) -> container[_AnyShape, np.dtype[np.bool]]: ... + @overload + def __xor__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... + __rxor__ = __xor__ + @overload + def __ixor__[ContainerT: _BoolContainer](self: ContainerT, other: _ArrayLikeBool_co, /) -> ContainerT: ... + @overload + def __ixor__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __or__(self: _BoolContainer, other: _ArrayLikeBool_co, /) -> container[_AnyShape, np.dtype[np.bool]]: ... + @overload + def __or__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... + __ror__ = __or__ + @overload + def __ior__[ContainerT: _BoolContainer](self: ContainerT, other: _ArrayLikeBool_co, /) -> ContainerT: ... + @overload + def __ior__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __array__(self, /, t: None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __array__[DTypeT: np.dtype](self, /, t: DTypeT) -> np.ndarray[_ShapeT_co, DTypeT]: ... + + # + @overload + def __array_wrap__(self, arg0: npt.ArrayLike, /) -> container[_ShapeT_co, _DTypeT_co]: ... + @overload + def __array_wrap__[ShapeT: _Shape, DTypeT: np.dtype]( + self, a: np.ndarray[ShapeT, DTypeT], c: Any = ..., s: Any = ..., / + ) -> container[ShapeT, DTypeT]: ... + + # + def copy(self, /) -> Self: ... + def tobytes(self, /) -> bytes: ... + def byteswap(self, /) -> Self: ... + def astype[ScalarT: np.generic](self, /, typecode: _DTypeLike[ScalarT]) -> container[_ShapeT_co, np.dtype[ScalarT]]: ... diff --git a/numpy/lib/_utils_impl.py b/numpy/lib/_utils_impl.py index 0c5d08ee7d9c..6aa1065047ef 100644 --- a/numpy/lib/_utils_impl.py +++ b/numpy/lib/_utils_impl.py @@ -1,15 +1,11 @@ import os +import platform import sys -import textwrap import types -import re -import warnings -import functools -import platform +import numpy as np from numpy._core import ndarray from numpy._utils import set_module -import numpy as np __all__ = [ 'get_include', 'info', 'show_runtime' @@ -37,10 +33,13 @@ def show_runtime(): ``__cpu_baseline__`` and ``__cpu_dispatch__`` """ + from pprint import pprint + from numpy._core._multiarray_umath import ( - __cpu_features__, __cpu_baseline__, __cpu_dispatch__ + __cpu_baseline__, + __cpu_dispatch__, + __cpu_features__, ) - from pprint import pprint config_found = [{ "numpy_version": np.__version__, "python": sys.version, @@ -59,6 +58,11 @@ def show_runtime(): "not_found": features_not_found } }) + config_found.append({ + "ignore_floating_point_errors_in_matmul": + not np._core._multiarray_umath._blas_supports_fpe(None), + }) + try: from threadpoolctl import threadpool_info config_found.extend(threadpool_info()) @@ -116,74 +120,6 @@ def get_include(): return d -class _Deprecate: - """ - Decorator class to deprecate old functions. - - Refer to `deprecate` for details. - - See Also - -------- - deprecate - - """ - - def __init__(self, old_name=None, new_name=None, message=None): - self.old_name = old_name - self.new_name = new_name - self.message = message - - def __call__(self, func, *args, **kwargs): - """ - Decorator call. Refer to ``decorate``. - - """ - old_name = self.old_name - new_name = self.new_name - message = self.message - - if old_name is None: - old_name = func.__name__ - if new_name is None: - depdoc = "`%s` is deprecated!" % old_name - else: - depdoc = "`%s` is deprecated, use `%s` instead!" % \ - (old_name, new_name) - - if message is not None: - depdoc += "\n" + message - - @functools.wraps(func) - def newfunc(*args, **kwds): - warnings.warn(depdoc, DeprecationWarning, stacklevel=2) - return func(*args, **kwds) - - newfunc.__name__ = old_name - doc = func.__doc__ - if doc is None: - doc = depdoc - else: - lines = doc.expandtabs().split('\n') - indent = _get_indent(lines[1:]) - if lines[0].lstrip(): - # Indent the original first line to let inspect.cleandoc() - # dedent the docstring despite the deprecation notice. - doc = indent * ' ' + doc - else: - # Remove the same leading blank lines as cleandoc() would. - skip = len(lines[0]) + 1 - for line in lines[1:]: - if len(line) > indent: - break - skip += len(line) + 1 - doc = doc[skip:] - depdoc = textwrap.indent(depdoc, ' ' * indent) - doc = '\n\n'.join([depdoc, doc]) - newfunc.__doc__ = doc - - return newfunc - - def _get_indent(lines): """ Determines the leading whitespace that could be removed from all the lines. @@ -198,112 +134,6 @@ def _get_indent(lines): return indent -def deprecate(*args, **kwargs): - """ - Issues a DeprecationWarning, adds warning to `old_name`'s - docstring, rebinds ``old_name.__name__`` and returns the new - function object. - - This function may also be used as a decorator. - - .. deprecated:: 2.0 - Use `~warnings.warn` with :exc:`DeprecationWarning` instead. - - Parameters - ---------- - func : function - The function to be deprecated. - old_name : str, optional - The name of the function to be deprecated. Default is None, in - which case the name of `func` is used. - new_name : str, optional - The new name for the function. Default is None, in which case the - deprecation message is that `old_name` is deprecated. If given, the - deprecation message is that `old_name` is deprecated and `new_name` - should be used instead. - message : str, optional - Additional explanation of the deprecation. Displayed in the - docstring after the warning. - - Returns - ------- - old_func : function - The deprecated function. - - Examples - -------- - Note that ``olduint`` returns a value after printing Deprecation - Warning: - - >>> olduint = np.lib.utils.deprecate(np.uint) - DeprecationWarning: `uint64` is deprecated! # may vary - >>> olduint(6) - 6 - - """ - # Deprecate may be run as a function or as a decorator - # If run as a function, we initialise the decorator class - # and execute its __call__ method. - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`deprecate` is deprecated, " - "use `warn` with `DeprecationWarning` instead. " - "(deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - if args: - fn = args[0] - args = args[1:] - - return _Deprecate(*args, **kwargs)(fn) - else: - return _Deprecate(*args, **kwargs) - - -def deprecate_with_doc(msg): - """ - Deprecates a function and includes the deprecation in its docstring. - - .. deprecated:: 2.0 - Use `~warnings.warn` with :exc:`DeprecationWarning` instead. - - This function is used as a decorator. It returns an object that can be - used to issue a DeprecationWarning, by passing the to-be decorated - function as argument, this adds warning to the to-be decorated function's - docstring and returns the new function object. - - See Also - -------- - deprecate : Decorate a function such that it issues a - :exc:`DeprecationWarning` - - Parameters - ---------- - msg : str - Additional explanation of the deprecation. Displayed in the - docstring after the warning. - - Returns - ------- - obj : object - - """ - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`deprecate` is deprecated, " - "use `warn` with `DeprecationWarning` instead. " - "(deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - return _Deprecate(message=msg) - - #----------------------------------------------------------------------------- @@ -327,11 +157,12 @@ def _split_line(name, arguments, width): k = k + len(argument) + len(addstr) if k > width: k = firstwidth + 1 + len(argument) - newstr = newstr + ",\n" + " "*(firstwidth+2) + argument + newstr = newstr + ",\n" + " " * (firstwidth + 2) + argument else: newstr = newstr + addstr + argument return newstr + _namedict = None _dictlist = None @@ -339,7 +170,7 @@ def _split_line(name, arguments, width): # to see if something is defined def _makenamedict(module='numpy'): module = __import__(module, globals(), locals(), []) - thedict = {module.__name__:module.__dict__} + thedict = {module.__name__: module.__dict__} dictlist = [module.__name__] totraverse = [module.__dict__] while True: @@ -394,21 +225,21 @@ def _info(obj, output=None): print("contiguous: ", bp(obj.flags.contiguous), file=output) print("fortran: ", obj.flags.fortran, file=output) print( - "data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra), + f"data pointer: {hex(obj.ctypes._as_parameter_.value)}{extra}", file=output ) print("byteorder: ", end=' ', file=output) if endian in ['|', '=']: - print("%s%s%s" % (tic, sys.byteorder, tic), file=output) + print(f"{tic}{sys.byteorder}{tic}", file=output) byteswap = False elif endian == '>': - print("%sbig%s" % (tic, tic), file=output) + print(f"{tic}big{tic}", file=output) byteswap = sys.byteorder != "big" else: - print("%slittle%s" % (tic, tic), file=output) + print(f"{tic}little{tic}", file=output) byteswap = sys.byteorder != "little" print("byteswap: ", bp(byteswap), file=output) - print("type: %s" % obj.dtype, file=output) + print(f"type: {obj.dtype}", file=output) @set_module('numpy') @@ -477,8 +308,8 @@ def info(object=None, maxwidth=76, output=None, toplevel='numpy'): """ global _namedict, _dictlist # Local import to speed up numpy's import time. - import pydoc import inspect + import pydoc if (hasattr(object, '_ppimport_importer') or hasattr(object, '_ppimport_module')): @@ -502,23 +333,22 @@ def info(object=None, maxwidth=76, output=None, toplevel='numpy'): try: obj = _namedict[namestr][object] if id(obj) in objlist: - print("\n " - "*** Repeat reference found in %s *** " % namestr, + print(f"\n *** Repeat reference found in {namestr} *** ", file=output ) else: objlist.append(id(obj)) - print(" *** Found in %s ***" % namestr, file=output) + print(f" *** Found in {namestr} ***", file=output) info(obj) - print("-"*maxwidth, file=output) + print("-" * maxwidth, file=output) numfound += 1 except KeyError: pass if numfound == 0: - print("Help for %s not found." % object, file=output) + print(f"Help for {object} not found.", file=output) else: print("\n " - "*** Total of %d references found. ***" % numfound, + f"*** Total of {numfound} references found. ***", file=output ) @@ -529,7 +359,7 @@ def info(object=None, maxwidth=76, output=None, toplevel='numpy'): except Exception: arguments = "()" - if len(name+arguments) > maxwidth: + if len(name + arguments) > maxwidth: argstr = _split_line(name, arguments, maxwidth) else: argstr = name + arguments @@ -544,7 +374,7 @@ def info(object=None, maxwidth=76, output=None, toplevel='numpy'): except Exception: arguments = "()" - if len(name+arguments) > maxwidth: + if len(name + arguments) > maxwidth: argstr = _split_line(name, arguments, maxwidth) else: argstr = name + arguments @@ -568,79 +398,12 @@ def info(object=None, maxwidth=76, output=None, toplevel='numpy'): methstr, other = pydoc.splitdoc( inspect.getdoc(thisobj) or "None" ) - print(" %s -- %s" % (meth, methstr), file=output) + print(f" {meth} -- {methstr}", file=output) elif hasattr(object, '__doc__'): print(inspect.getdoc(object), file=output) -def safe_eval(source): - """ - Protected string evaluation. - - .. deprecated:: 2.0 - Use `ast.literal_eval` instead. - - Evaluate a string containing a Python literal expression without - allowing the execution of arbitrary non-literal code. - - .. warning:: - - This function is identical to :py:meth:`ast.literal_eval` and - has the same security implications. It may not always be safe - to evaluate large input strings. - - Parameters - ---------- - source : str - The string to evaluate. - - Returns - ------- - obj : object - The result of evaluating `source`. - - Raises - ------ - SyntaxError - If the code has invalid Python syntax, or if it contains - non-literal code. - - Examples - -------- - >>> np.safe_eval('1') - 1 - >>> np.safe_eval('[1, 2, 3]') - [1, 2, 3] - >>> np.safe_eval('{"foo": ("bar", 10.0)}') - {'foo': ('bar', 10.0)} - - >>> np.safe_eval('import os') - Traceback (most recent call last): - ... - SyntaxError: invalid syntax - - >>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()') - Traceback (most recent call last): - ... - ValueError: malformed node or string: <_ast.Call object at 0x...> - - """ - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`safe_eval` is deprecated. Use `ast.literal_eval` instead. " - "Be aware of security implications, such as memory exhaustion " - "based attacks (deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - # Local import to speed up numpy's import time. - import ast - return ast.literal_eval(source) - - def _median_nancheck(data, result, axis): """ Utility function to check median result from data for NaN values at the end @@ -698,7 +461,9 @@ def _opt_info(): str: A formatted string indicating the supported CPU features. """ from numpy._core._multiarray_umath import ( - __cpu_features__, __cpu_baseline__, __cpu_dispatch__ + __cpu_baseline__, + __cpu_dispatch__, + __cpu_features__, ) if len(__cpu_baseline__) == 0 and len(__cpu_dispatch__) == 0: @@ -754,9 +519,9 @@ def drop_metadata(dtype, /): if not found_metadata: return dtype - structure = dict( - names=names, formats=formats, offsets=offsets, titles=titles, - itemsize=dtype.itemsize) + structure = { + 'names': names, 'formats': formats, 'offsets': offsets, 'titles': titles, + 'itemsize': dtype.itemsize} # NOTE: Could pass (dtype.type, structure) to preserve record dtypes... return np.dtype(structure, align=dtype.isalignedstruct) diff --git a/numpy/lib/_utils_impl.pyi b/numpy/lib/_utils_impl.pyi index b1453874e85e..87fbc3aa5c4c 100644 --- a/numpy/lib/_utils_impl.pyi +++ b/numpy/lib/_utils_impl.pyi @@ -1,33 +1,18 @@ -from typing import ( - Any, - TypeVar, - Protocol, -) +from _typeshed import SupportsWrite +from typing import LiteralString -from numpy._core.numerictypes import ( - issubdtype as issubdtype, -) +import numpy as np -_T_contra = TypeVar("_T_contra", contravariant=True) - -# A file-like object opened in `w` mode -class _SupportsWrite(Protocol[_T_contra]): - def write(self, s: _T_contra, /) -> Any: ... - -__all__: list[str] - -def get_include() -> str: ... +__all__ = ["get_include", "info", "show_runtime"] +def get_include() -> LiteralString: ... +def show_runtime() -> None: ... def info( - object: object = ..., - maxwidth: int = ..., - output: None | _SupportsWrite[str] = ..., - toplevel: str = ..., + object: object = None, maxwidth: int = 76, output: SupportsWrite[str] | None = None, toplevel: str = "numpy" ) -> None: ... +def drop_metadata[DTypeT: np.dtype](dtype: DTypeT, /) -> DTypeT: ... -def source( - object: object, - output: None | _SupportsWrite[str] = ..., -) -> None: ... - -def show_runtime() -> None: ... +# used internally by `lib._function_base_impl._median` +def _median_nancheck[ScalarOrArrayT: np.generic | np.ndarray]( + data: np.ndarray, result: ScalarOrArrayT, axis: int +) -> ScalarOrArrayT: ... diff --git a/numpy/lib/_version.py b/numpy/lib/_version.py index bfac5f814501..d70a61040a40 100644 --- a/numpy/lib/_version.py +++ b/numpy/lib/_version.py @@ -7,11 +7,10 @@ """ import re - __all__ = ['NumpyVersion'] -class NumpyVersion(): +class NumpyVersion: """Parse and compare numpy version strings. NumPy has the following versioning scheme (numbers given are examples; they @@ -23,16 +22,13 @@ class NumpyVersion(): - Release candidates: '1.8.0rc1', '1.8.0rc2', etc. - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended) - Development versions after a1: '1.8.0a1.dev-f1234afa', - '1.8.0b2.dev-f1234afa', - '1.8.1rc1.dev-f1234afa', etc. + '1.8.0b2.dev-f1234afa', '1.8.1rc1.dev-f1234afa', etc. - Development versions (no git hash available): '1.8.0.dev-Unknown' Comparing needs to be done against a valid version string or other `NumpyVersion` instance. Note that all development versions of the same (pre-)release compare equal. - .. versionadded:: 1.9.0 - Parameters ---------- vstring : str @@ -52,6 +48,8 @@ class NumpyVersion(): """ + __module__ = "numpy.lib" + def __init__(self, vstring): self.vstring = vstring ver_main = re.match(r'\d+\.\d+\.\d+', vstring) @@ -152,4 +150,4 @@ def __ge__(self, other): return self._compare(other) >= 0 def __repr__(self): - return "NumpyVersion(%s)" % self.vstring + return f"NumpyVersion({self.vstring})" diff --git a/numpy/lib/_version.pyi b/numpy/lib/_version.pyi index 1c82c99b686e..c53ef795f926 100644 --- a/numpy/lib/_version.pyi +++ b/numpy/lib/_version.pyi @@ -1,4 +1,4 @@ -__all__: list[str] +__all__ = ["NumpyVersion"] class NumpyVersion: vstring: str diff --git a/numpy/lib/array_utils.py b/numpy/lib/array_utils.py index b4e7976131d2..c267eb021ad8 100644 --- a/numpy/lib/array_utils.py +++ b/numpy/lib/array_utils.py @@ -1,4 +1,4 @@ -from ._array_utils_impl import ( +from ._array_utils_impl import ( # noqa: F401 __all__, __doc__, byte_bounds, diff --git a/numpy/lib/format.py b/numpy/lib/format.py index 8e14dfe4bcab..8e0c79942d23 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -1,1013 +1,24 @@ -""" -Binary serialization - -NPY format -========== - -A simple format for saving numpy arrays to disk with the full -information about them. - -The ``.npy`` format is the standard binary file format in NumPy for -persisting a *single* arbitrary NumPy array on disk. The format stores all -of the shape and dtype information necessary to reconstruct the array -correctly even on another machine with a different architecture. -The format is designed to be as simple as possible while achieving -its limited goals. - -The ``.npz`` format is the standard format for persisting *multiple* NumPy -arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy`` -files, one for each array. - -Capabilities ------------- - -- Can represent all NumPy arrays including nested record arrays and - object arrays. - -- Represents the data in its native binary form. - -- Supports Fortran-contiguous arrays directly. - -- Stores all of the necessary information to reconstruct the array - including shape and dtype on a machine of a different - architecture. Both little-endian and big-endian arrays are - supported, and a file with little-endian numbers will yield - a little-endian array on any machine reading the file. The - types are described in terms of their actual sizes. For example, - if a machine with a 64-bit C "long int" writes out an array with - "long ints", a reading machine with 32-bit C "long ints" will yield - an array with 64-bit integers. - -- Is straightforward to reverse engineer. Datasets often live longer than - the programs that created them. A competent developer should be - able to create a solution in their preferred programming language to - read most ``.npy`` files that they have been given without much - documentation. - -- Allows memory-mapping of the data. See `open_memmap`. - -- Can be read from a filelike stream object instead of an actual file. - -- Stores object arrays, i.e. arrays containing elements that are arbitrary - Python objects. Files with object arrays are not to be mmapable, but - can be read and written to disk. - -Limitations ------------ - -- Arbitrary subclasses of numpy.ndarray are not completely preserved. - Subclasses will be accepted for writing, but only the array data will - be written out. A regular numpy.ndarray object will be created - upon reading the file. - -.. warning:: - - Due to limitations in the interpretation of structured dtypes, dtypes - with fields with empty names will have the names replaced by 'f0', 'f1', - etc. Such arrays will not round-trip through the format entirely - accurately. The data is intact; only the field names will differ. We are - working on a fix for this. This fix will not require a change in the - file format. The arrays with such structures can still be saved and - restored, and the correct dtype may be restored by using the - ``loadedarray.view(correct_dtype)`` method. - -File extensions ---------------- - -We recommend using the ``.npy`` and ``.npz`` extensions for files saved -in this format. This is by no means a requirement; applications may wish -to use these file formats but use an extension specific to the -application. In the absence of an obvious alternative, however, -we suggest using ``.npy`` and ``.npz``. - -Version numbering ------------------ - -The version numbering of these formats is independent of NumPy version -numbering. If the format is upgraded, the code in `numpy.io` will still -be able to read and write Version 1.0 files. - -Format Version 1.0 ------------------- - -The first 6 bytes are a magic string: exactly ``\\x93NUMPY``. - -The next 1 byte is an unsigned byte: the major version number of the file -format, e.g. ``\\x01``. - -The next 1 byte is an unsigned byte: the minor version number of the file -format, e.g. ``\\x00``. Note: the version of the file format is not tied -to the version of the numpy package. - -The next 2 bytes form a little-endian unsigned short int: the length of -the header data HEADER_LEN. - -The next HEADER_LEN bytes form the header data describing the array's -format. It is an ASCII string which contains a Python literal expression -of a dictionary. It is terminated by a newline (``\\n``) and padded with -spaces (``\\x20``) to make the total of -``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible -by 64 for alignment purposes. - -The dictionary contains three keys: - - "descr" : dtype.descr - An object that can be passed as an argument to the `numpy.dtype` - constructor to create the array's dtype. - "fortran_order" : bool - Whether the array data is Fortran-contiguous or not. Since - Fortran-contiguous arrays are a common form of non-C-contiguity, - we allow them to be written directly to disk for efficiency. - "shape" : tuple of int - The shape of the array. - -For repeatability and readability, the dictionary keys are sorted in -alphabetic order. This is for convenience only. A writer SHOULD implement -this if possible. A reader MUST NOT depend on this. - -Following the header comes the array data. If the dtype contains Python -objects (i.e. ``dtype.hasobject is True``), then the data is a Python -pickle of the array. Otherwise the data is the contiguous (either C- -or Fortran-, depending on ``fortran_order``) bytes of the array. -Consumers can figure out the number of bytes by multiplying the number -of elements given by the shape (noting that ``shape=()`` means there is -1 element) by ``dtype.itemsize``. - -Format Version 2.0 ------------------- - -The version 1.0 format only allowed the array header to have a total size of -65535 bytes. This can be exceeded by structured arrays with a large number of -columns. The version 2.0 format extends the header size to 4 GiB. -`numpy.save` will automatically save in 2.0 format if the data requires it, -else it will always use the more compatible 1.0 format. - -The description of the fourth element of the header therefore has become: -"The next 4 bytes form a little-endian unsigned int: the length of the header -data HEADER_LEN." - -Format Version 3.0 ------------------- - -This version replaces the ASCII string (which in practice was latin1) with -a utf8-encoded string, so supports structured types with any unicode field -names. - -Notes ------ -The ``.npy`` format, including motivation for creating it and a comparison of -alternatives, is described in the -:doc:`"npy-format" NEP `, however details have -evolved with time and this document is more current. - -""" -import io -import os -import pickle -import warnings - -import numpy -from numpy.lib._utils_impl import drop_metadata - - -__all__ = [] - - -EXPECTED_KEYS = {'descr', 'fortran_order', 'shape'} -MAGIC_PREFIX = b'\x93NUMPY' -MAGIC_LEN = len(MAGIC_PREFIX) + 2 -ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096 -BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes -# allow growth within the address space of a 64 bit machine along one axis -GROWTH_AXIS_MAX_DIGITS = 21 # = len(str(8*2**64-1)) hypothetical int1 dtype - -# difference between version 1.0 and 2.0 is a 4 byte (I) header length -# instead of 2 bytes (H) allowing storage of large structured arrays -_header_size_info = { - (1, 0): (' 255: - raise ValueError("major version must be 0 <= major < 256") - if minor < 0 or minor > 255: - raise ValueError("minor version must be 0 <= minor < 256") - return MAGIC_PREFIX + bytes([major, minor]) - -def read_magic(fp): - """ Read the magic string to get the version of the file format. - - Parameters - ---------- - fp : filelike object - - Returns - ------- - major : int - minor : int - """ - magic_str = _read_bytes(fp, MAGIC_LEN, "magic string") - if magic_str[:-2] != MAGIC_PREFIX: - msg = "the magic string is not correct; expected %r, got %r" - raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2])) - major, minor = magic_str[-2:] - return major, minor - - -def dtype_to_descr(dtype): - """ - Get a serializable descriptor from the dtype. - - The .descr attribute of a dtype object cannot be round-tripped through - the dtype() constructor. Simple types, like dtype('float32'), have - a descr which looks like a record array with one field with '' as - a name. The dtype() constructor interprets this as a request to give - a default name. Instead, we construct descriptor that can be passed to - dtype(). - - Parameters - ---------- - dtype : dtype - The dtype of the array that will be written to disk. - - Returns - ------- - descr : object - An object that can be passed to `numpy.dtype()` in order to - replicate the input dtype. - - """ - # NOTE: that drop_metadata may not return the right dtype e.g. for user - # dtypes. In that case our code below would fail the same, though. - new_dtype = drop_metadata(dtype) - if new_dtype is not dtype: - warnings.warn("metadata on a dtype is not saved to an npy/npz. " - "Use another format (such as pickle) to store it.", - UserWarning, stacklevel=2) - if dtype.names is not None: - # This is a record array. The .descr is fine. XXX: parts of the - # record array with an empty name, like padding bytes, still get - # fiddled with. This needs to be fixed in the C implementation of - # dtype(). - return dtype.descr - elif not type(dtype)._legacy: - # this must be a user-defined dtype since numpy does not yet expose any - # non-legacy dtypes in the public API - # - # non-legacy dtypes don't yet have __array_interface__ - # support. Instead, as a hack, we use pickle to save the array, and lie - # that the dtype is object. When the array is loaded, the descriptor is - # unpickled with the array and the object dtype in the header is - # discarded. - # - # a future NEP should define a way to serialize user-defined - # descriptors and ideally work out the possible security implications - warnings.warn("Custom dtypes are saved as python objects using the " - "pickle protocol. Loading this file requires " - "allow_pickle=True to be set.", - UserWarning, stacklevel=2) - return "|O" - else: - return dtype.str - -def descr_to_dtype(descr): - """ - Returns a dtype based off the given description. - - This is essentially the reverse of `~lib.format.dtype_to_descr`. It will - remove the valueless padding fields created by, i.e. simple fields like - dtype('float32'), and then convert the description to its corresponding - dtype. - - Parameters - ---------- - descr : object - The object retrieved by dtype.descr. Can be passed to - `numpy.dtype` in order to replicate the input dtype. - - Returns - ------- - dtype : dtype - The dtype constructed by the description. - - """ - if isinstance(descr, str): - # No padding removal needed - return numpy.dtype(descr) - elif isinstance(descr, tuple): - # subtype, will always have a shape descr[1] - dt = descr_to_dtype(descr[0]) - return numpy.dtype((dt, descr[1])) - - titles = [] - names = [] - formats = [] - offsets = [] - offset = 0 - for field in descr: - if len(field) == 2: - name, descr_str = field - dt = descr_to_dtype(descr_str) - else: - name, descr_str, shape = field - dt = numpy.dtype((descr_to_dtype(descr_str), shape)) - - # Ignore padding bytes, which will be void bytes with '' as name - # Once support for blank names is removed, only "if name == ''" needed) - is_pad = (name == '' and dt.type is numpy.void and dt.names is None) - if not is_pad: - title, name = name if isinstance(name, tuple) else (None, name) - titles.append(title) - names.append(name) - formats.append(dt) - offsets.append(offset) - offset += dt.itemsize - - return numpy.dtype({'names': names, 'formats': formats, 'titles': titles, - 'offsets': offsets, 'itemsize': offset}) - -def header_data_from_array_1_0(array): - """ Get the dictionary of header metadata from a numpy.ndarray. - - Parameters - ---------- - array : numpy.ndarray - - Returns - ------- - d : dict - This has the appropriate entries for writing its string representation - to the header of the file. - """ - d = {'shape': array.shape} - if array.flags.c_contiguous: - d['fortran_order'] = False - elif array.flags.f_contiguous: - d['fortran_order'] = True - else: - # Totally non-contiguous data. We will have to make it C-contiguous - # before writing. Note that we need to test for C_CONTIGUOUS first - # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS. - d['fortran_order'] = False - - d['descr'] = dtype_to_descr(array.dtype) - return d - - -def _wrap_header(header, version): - """ - Takes a stringified header, and attaches the prefix and padding to it - """ - import struct - assert version is not None - fmt, encoding = _header_size_info[version] - header = header.encode(encoding) - hlen = len(header) + 1 - padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN) - try: - header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen) - except struct.error: - msg = "Header length {} too big for version={}".format(hlen, version) - raise ValueError(msg) from None - - # Pad the header with spaces and a final newline such that the magic - # string, the header-length short and the header are aligned on a - # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes - # aligned up to ARRAY_ALIGN on systems like Linux where mmap() - # offset must be page-aligned (i.e. the beginning of the file). - return header_prefix + header + b' '*padlen + b'\n' - - -def _wrap_header_guess_version(header): - """ - Like `_wrap_header`, but chooses an appropriate version given the contents - """ - try: - return _wrap_header(header, (1, 0)) - except ValueError: - pass - - try: - ret = _wrap_header(header, (2, 0)) - except UnicodeEncodeError: - pass - else: - warnings.warn("Stored array in format 2.0. It can only be" - "read by NumPy >= 1.9", UserWarning, stacklevel=2) - return ret - - header = _wrap_header(header, (3, 0)) - warnings.warn("Stored array in format 3.0. It can only be " - "read by NumPy >= 1.17", UserWarning, stacklevel=2) - return header - - -def _write_array_header(fp, d, version=None): - """ Write the header for an array and returns the version used - - Parameters - ---------- - fp : filelike object - d : dict - This has the appropriate entries for writing its string representation - to the header of the file. - version : tuple or None - None means use oldest that works. Providing an explicit version will - raise a ValueError if the format does not allow saving this data. - Default: None - """ - header = ["{"] - for key, value in sorted(d.items()): - # Need to use repr here, since we eval these when reading - header.append("'%s': %s, " % (key, repr(value))) - header.append("}") - header = "".join(header) - - # Add some spare space so that the array header can be modified in-place - # when changing the array size, e.g. when growing it by appending data at - # the end. - shape = d['shape'] - header += " " * ((GROWTH_AXIS_MAX_DIGITS - len(repr( - shape[-1 if d['fortran_order'] else 0] - ))) if len(shape) > 0 else 0) - - if version is None: - header = _wrap_header_guess_version(header) - else: - header = _wrap_header(header, version) - fp.write(header) - -def write_array_header_1_0(fp, d): - """ Write the header for an array using the 1.0 format. - - Parameters - ---------- - fp : filelike object - d : dict - This has the appropriate entries for writing its string - representation to the header of the file. - """ - _write_array_header(fp, d, (1, 0)) - - -def write_array_header_2_0(fp, d): - """ Write the header for an array using the 2.0 format. - The 2.0 format allows storing very large structured arrays. - - .. versionadded:: 1.9.0 - - Parameters - ---------- - fp : filelike object - d : dict - This has the appropriate entries for writing its string - representation to the header of the file. - """ - _write_array_header(fp, d, (2, 0)) - -def read_array_header_1_0(fp, max_header_size=_MAX_HEADER_SIZE): - """ - Read an array header from a filelike object using the 1.0 file format - version. - - This will leave the file object located just after the header. - - Parameters - ---------- - fp : filelike object - A file object or something with a `.read()` method like a file. - - Returns - ------- - shape : tuple of int - The shape of the array. - fortran_order : bool - The array data will be written out directly if it is either - C-contiguous or Fortran-contiguous. Otherwise, it will be made - contiguous before writing it out. - dtype : dtype - The dtype of the file's data. - max_header_size : int, optional - Maximum allowed size of the header. Large headers may not be safe - to load securely and thus require explicitly passing a larger value. - See :py:func:`ast.literal_eval()` for details. - - Raises - ------ - ValueError - If the data is invalid. - - """ - return _read_array_header( - fp, version=(1, 0), max_header_size=max_header_size) - -def read_array_header_2_0(fp, max_header_size=_MAX_HEADER_SIZE): - """ - Read an array header from a filelike object using the 2.0 file format - version. - - This will leave the file object located just after the header. - - .. versionadded:: 1.9.0 - - Parameters - ---------- - fp : filelike object - A file object or something with a `.read()` method like a file. - max_header_size : int, optional - Maximum allowed size of the header. Large headers may not be safe - to load securely and thus require explicitly passing a larger value. - See :py:func:`ast.literal_eval()` for details. - - Returns - ------- - shape : tuple of int - The shape of the array. - fortran_order : bool - The array data will be written out directly if it is either - C-contiguous or Fortran-contiguous. Otherwise, it will be made - contiguous before writing it out. - dtype : dtype - The dtype of the file's data. - - Raises - ------ - ValueError - If the data is invalid. - - """ - return _read_array_header( - fp, version=(2, 0), max_header_size=max_header_size) - - -def _filter_header(s): - """Clean up 'L' in npz header ints. - - Cleans up the 'L' in strings representing integers. Needed to allow npz - headers produced in Python2 to be read in Python3. - - Parameters - ---------- - s : string - Npy file header. - - Returns - ------- - header : str - Cleaned up header. - - """ - import tokenize - from io import StringIO - - tokens = [] - last_token_was_number = False - for token in tokenize.generate_tokens(StringIO(s).readline): - token_type = token[0] - token_string = token[1] - if (last_token_was_number and - token_type == tokenize.NAME and - token_string == "L"): - continue - else: - tokens.append(token) - last_token_was_number = (token_type == tokenize.NUMBER) - return tokenize.untokenize(tokens) - - -def _read_array_header(fp, version, max_header_size=_MAX_HEADER_SIZE): - """ - see read_array_header_1_0 - """ - # Read an unsigned, little-endian short int which has the length of the - # header. - import ast - import struct - hinfo = _header_size_info.get(version) - if hinfo is None: - raise ValueError("Invalid version {!r}".format(version)) - hlength_type, encoding = hinfo - - hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length") - header_length = struct.unpack(hlength_type, hlength_str)[0] - header = _read_bytes(fp, header_length, "array header") - header = header.decode(encoding) - if len(header) > max_header_size: - raise ValueError( - f"Header info length ({len(header)}) is large and may not be safe " - "to load securely.\n" - "To allow loading, adjust `max_header_size` or fully trust " - "the `.npy` file using `allow_pickle=True`.\n" - "For safety against large resource use or crashes, sandboxing " - "may be necessary.") - - # The header is a pretty-printed string representation of a literal - # Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte - # boundary. The keys are strings. - # "shape" : tuple of int - # "fortran_order" : bool - # "descr" : dtype.descr - # Versions (2, 0) and (1, 0) could have been created by a Python 2 - # implementation before header filtering was implemented. - # - # For performance reasons, we try without _filter_header first though - try: - d = ast.literal_eval(header) - except SyntaxError as e: - if version <= (2, 0): - header = _filter_header(header) - try: - d = ast.literal_eval(header) - except SyntaxError as e2: - msg = "Cannot parse header: {!r}" - raise ValueError(msg.format(header)) from e2 - else: - warnings.warn( - "Reading `.npy` or `.npz` file required additional " - "header parsing as it was created on Python 2. Save the " - "file again to speed up loading and avoid this warning.", - UserWarning, stacklevel=4) - else: - msg = "Cannot parse header: {!r}" - raise ValueError(msg.format(header)) from e - if not isinstance(d, dict): - msg = "Header is not a dictionary: {!r}" - raise ValueError(msg.format(d)) - - if EXPECTED_KEYS != d.keys(): - keys = sorted(d.keys()) - msg = "Header does not contain the correct keys: {!r}" - raise ValueError(msg.format(keys)) - - # Sanity-check the values. - if (not isinstance(d['shape'], tuple) or - not all(isinstance(x, int) for x in d['shape'])): - msg = "shape is not valid: {!r}" - raise ValueError(msg.format(d['shape'])) - if not isinstance(d['fortran_order'], bool): - msg = "fortran_order is not a valid bool: {!r}" - raise ValueError(msg.format(d['fortran_order'])) - try: - dtype = descr_to_dtype(d['descr']) - except TypeError as e: - msg = "descr is not a valid dtype descriptor: {!r}" - raise ValueError(msg.format(d['descr'])) from e - - return d['shape'], d['fortran_order'], dtype - -def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None): - """ - Write an array to an NPY file, including a header. - - If the array is neither C-contiguous nor Fortran-contiguous AND the - file_like object is not a real file object, this function will have to - copy data in memory. - - Parameters - ---------- - fp : file_like object - An open, writable file object, or similar object with a - ``.write()`` method. - array : ndarray - The array to write to disk. - version : (int, int) or None, optional - The version number of the format. None means use the oldest - supported version that is able to store the data. Default: None - allow_pickle : bool, optional - Whether to allow writing pickled data. Default: True - pickle_kwargs : dict, optional - Additional keyword arguments to pass to pickle.dump, excluding - 'protocol'. These are only useful when pickling objects in object - arrays on Python 3 to Python 2 compatible format. - - Raises - ------ - ValueError - If the array cannot be persisted. This includes the case of - allow_pickle=False and array being an object array. - Various other errors - If the array contains Python objects as part of its dtype, the - process of pickling them may raise various errors if the objects - are not picklable. - - """ - _check_version(version) - _write_array_header(fp, header_data_from_array_1_0(array), version) - - if array.itemsize == 0: - buffersize = 0 - else: - # Set buffer size to 16 MiB to hide the Python loop overhead. - buffersize = max(16 * 1024 ** 2 // array.itemsize, 1) - - dtype_class = type(array.dtype) - - if array.dtype.hasobject or not dtype_class._legacy: - # We contain Python objects so we cannot write out the data - # directly. Instead, we will pickle it out - if not allow_pickle: - if array.dtype.hasobject: - raise ValueError("Object arrays cannot be saved when " - "allow_pickle=False") - if not dtype_class._legacy: - raise ValueError("User-defined dtypes cannot be saved " - "when allow_pickle=False") - if pickle_kwargs is None: - pickle_kwargs = {} - pickle.dump(array, fp, protocol=4, **pickle_kwargs) - elif array.flags.f_contiguous and not array.flags.c_contiguous: - if isfileobj(fp): - array.T.tofile(fp) - else: - for chunk in numpy.nditer( - array, flags=['external_loop', 'buffered', 'zerosize_ok'], - buffersize=buffersize, order='F'): - fp.write(chunk.tobytes('C')) - else: - if isfileobj(fp): - array.tofile(fp) - else: - for chunk in numpy.nditer( - array, flags=['external_loop', 'buffered', 'zerosize_ok'], - buffersize=buffersize, order='C'): - fp.write(chunk.tobytes('C')) - - -def read_array(fp, allow_pickle=False, pickle_kwargs=None, *, - max_header_size=_MAX_HEADER_SIZE): - """ - Read an array from an NPY file. - - Parameters - ---------- - fp : file_like object - If this is not a real file object, then this may take extra memory - and time. - allow_pickle : bool, optional - Whether to allow writing pickled data. Default: False - - .. versionchanged:: 1.16.3 - Made default False in response to CVE-2019-6446. - - pickle_kwargs : dict - Additional keyword arguments to pass to pickle.load. These are only - useful when loading object arrays saved on Python 2 when using - Python 3. - max_header_size : int, optional - Maximum allowed size of the header. Large headers may not be safe - to load securely and thus require explicitly passing a larger value. - See :py:func:`ast.literal_eval()` for details. - This option is ignored when `allow_pickle` is passed. In that case - the file is by definition trusted and the limit is unnecessary. - - Returns - ------- - array : ndarray - The array from the data on disk. - - Raises - ------ - ValueError - If the data is invalid, or allow_pickle=False and the file contains - an object array. - - """ - if allow_pickle: - # Effectively ignore max_header_size, since `allow_pickle` indicates - # that the input is fully trusted. - max_header_size = 2**64 - - version = read_magic(fp) - _check_version(version) - shape, fortran_order, dtype = _read_array_header( - fp, version, max_header_size=max_header_size) - if len(shape) == 0: - count = 1 - else: - count = numpy.multiply.reduce(shape, dtype=numpy.int64) - - # Now read the actual data. - if dtype.hasobject: - # The array contained Python objects. We need to unpickle the data. - if not allow_pickle: - raise ValueError("Object arrays cannot be loaded when " - "allow_pickle=False") - if pickle_kwargs is None: - pickle_kwargs = {} - try: - array = pickle.load(fp, **pickle_kwargs) - except UnicodeError as err: - # Friendlier error message - raise UnicodeError("Unpickling a python object failed: %r\n" - "You may need to pass the encoding= option " - "to numpy.load" % (err,)) from err - else: - if isfileobj(fp): - # We can use the fast fromfile() function. - array = numpy.fromfile(fp, dtype=dtype, count=count) - else: - # This is not a real file. We have to read it the - # memory-intensive way. - # crc32 module fails on reads greater than 2 ** 32 bytes, - # breaking large reads from gzip streams. Chunk reads to - # BUFFER_SIZE bytes to avoid issue and reduce memory overhead - # of the read. In non-chunked case count < max_read_count, so - # only one read is performed. - - # Use np.ndarray instead of np.empty since the latter does - # not correctly instantiate zero-width string dtypes; see - # https://github.com/numpy/numpy/pull/6430 - array = numpy.ndarray(count, dtype=dtype) - - if dtype.itemsize > 0: - # If dtype.itemsize == 0 then there's nothing more to read - max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize) - - for i in range(0, count, max_read_count): - read_count = min(max_read_count, count - i) - read_size = int(read_count * dtype.itemsize) - data = _read_bytes(fp, read_size, "array data") - array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype, - count=read_count) - - if fortran_order: - array.shape = shape[::-1] - array = array.transpose() - else: - array.shape = shape - - return array - - -def open_memmap(filename, mode='r+', dtype=None, shape=None, - fortran_order=False, version=None, *, - max_header_size=_MAX_HEADER_SIZE): - """ - Open a .npy file as a memory-mapped array. - - This may be used to read an existing file or create a new one. - - Parameters - ---------- - filename : str or path-like - The name of the file on disk. This may *not* be a file-like - object. - mode : str, optional - The mode in which to open the file; the default is 'r+'. In - addition to the standard file modes, 'c' is also accepted to mean - "copy on write." See `memmap` for the available mode strings. - dtype : data-type, optional - The data type of the array if we are creating a new file in "write" - mode, if not, `dtype` is ignored. The default value is None, which - results in a data-type of `float64`. - shape : tuple of int - The shape of the array if we are creating a new file in "write" - mode, in which case this parameter is required. Otherwise, this - parameter is ignored and is thus optional. - fortran_order : bool, optional - Whether the array should be Fortran-contiguous (True) or - C-contiguous (False, the default) if we are creating a new file in - "write" mode. - version : tuple of int (major, minor) or None - If the mode is a "write" mode, then this is the version of the file - format used to create the file. None means use the oldest - supported version that is able to store the data. Default: None - max_header_size : int, optional - Maximum allowed size of the header. Large headers may not be safe - to load securely and thus require explicitly passing a larger value. - See :py:func:`ast.literal_eval()` for details. - - Returns - ------- - marray : memmap - The memory-mapped array. - - Raises - ------ - ValueError - If the data or the mode is invalid. - OSError - If the file is not found or cannot be opened correctly. - - See Also - -------- - numpy.memmap - - """ - if isfileobj(filename): - raise ValueError("Filename must be a string or a path-like object." - " Memmap cannot use existing file handles.") - - if 'w' in mode: - # We are creating the file, not reading it. - # Check if we ought to create the file. - _check_version(version) - # Ensure that the given dtype is an authentic dtype object rather - # than just something that can be interpreted as a dtype object. - dtype = numpy.dtype(dtype) - if dtype.hasobject: - msg = "Array can't be memory-mapped: Python objects in dtype." - raise ValueError(msg) - d = dict( - descr=dtype_to_descr(dtype), - fortran_order=fortran_order, - shape=shape, - ) - # If we got here, then it should be safe to create the file. - with open(os.fspath(filename), mode+'b') as fp: - _write_array_header(fp, d, version) - offset = fp.tell() - else: - # Read the header of the file first. - with open(os.fspath(filename), 'rb') as fp: - version = read_magic(fp) - _check_version(version) - - shape, fortran_order, dtype = _read_array_header( - fp, version, max_header_size=max_header_size) - if dtype.hasobject: - msg = "Array can't be memory-mapped: Python objects in dtype." - raise ValueError(msg) - offset = fp.tell() - - if fortran_order: - order = 'F' - else: - order = 'C' - - # We need to change a write-only mode to a read-write mode since we've - # already written data to the file. - if mode == 'w+': - mode = 'r+' - - marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order, - mode=mode, offset=offset) - - return marray - - -def _read_bytes(fp, size, error_template="ran out of data"): - """ - Read from file-like object until size bytes are read. - Raises ValueError if not EOF is encountered before size bytes are read. - Non-blocking objects only supported if they derive from io objects. - - Required as e.g. ZipExtFile in python 2.6 can return less data than - requested. - """ - data = bytes() - while True: - # io files (default in python3) return None or raise on - # would-block, python2 file will truncate, probably nothing can be - # done about that. note that regular files can't be non-blocking - try: - r = fp.read(size - len(data)) - data += r - if len(r) == 0 or len(data) == size: - break - except BlockingIOError: - pass - if len(data) != size: - msg = "EOF: reading %s, expected %d bytes got %d" - raise ValueError(msg % (error_template, size, len(data))) - else: - return data - - -def isfileobj(f): - if not isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)): - return False - try: - # BufferedReader/Writer may raise OSError when - # fetching `fileno()` (e.g. when wrapping BytesIO). - f.fileno() - return True - except OSError: - return False +from ._format_impl import ( # noqa: F401 + ARRAY_ALIGN, + BUFFER_SIZE, + EXPECTED_KEYS, + GROWTH_AXIS_MAX_DIGITS, + MAGIC_LEN, + MAGIC_PREFIX, + __all__, + __doc__, + descr_to_dtype, + drop_metadata, + dtype_to_descr, + header_data_from_array_1_0, + isfileobj, + magic, + open_memmap, + read_array, + read_array_header_1_0, + read_array_header_2_0, + read_magic, + write_array, + write_array_header_1_0, + write_array_header_2_0, +) diff --git a/numpy/lib/format.pyi b/numpy/lib/format.pyi index a4468f52f464..c29e18fe0581 100644 --- a/numpy/lib/format.pyi +++ b/numpy/lib/format.pyi @@ -1,22 +1,24 @@ -from typing import Any, Literal, Final - -__all__: list[str] - -EXPECTED_KEYS: Final[set[str]] -MAGIC_PREFIX: Final[bytes] -MAGIC_LEN: Literal[8] -ARRAY_ALIGN: Literal[64] -BUFFER_SIZE: Literal[262144] # 2**18 - -def magic(major, minor): ... -def read_magic(fp): ... -def dtype_to_descr(dtype): ... -def descr_to_dtype(descr): ... -def header_data_from_array_1_0(array): ... -def write_array_header_1_0(fp, d): ... -def write_array_header_2_0(fp, d): ... -def read_array_header_1_0(fp): ... -def read_array_header_2_0(fp): ... -def write_array(fp, array, version=..., allow_pickle=..., pickle_kwargs=...): ... -def read_array(fp, allow_pickle=..., pickle_kwargs=...): ... -def open_memmap(filename, mode=..., dtype=..., shape=..., fortran_order=..., version=...): ... +from ._format_impl import ( + ARRAY_ALIGN as ARRAY_ALIGN, + BUFFER_SIZE as BUFFER_SIZE, + EXPECTED_KEYS as EXPECTED_KEYS, + GROWTH_AXIS_MAX_DIGITS as GROWTH_AXIS_MAX_DIGITS, + MAGIC_LEN as MAGIC_LEN, + MAGIC_PREFIX as MAGIC_PREFIX, + __all__ as __all__, + __doc__ as __doc__, + descr_to_dtype as descr_to_dtype, + drop_metadata as drop_metadata, + dtype_to_descr as dtype_to_descr, + header_data_from_array_1_0 as header_data_from_array_1_0, + isfileobj as isfileobj, + magic as magic, + open_memmap as open_memmap, + read_array as read_array, + read_array_header_1_0 as read_array_header_1_0, + read_array_header_2_0 as read_array_header_2_0, + read_magic as read_magic, + write_array as write_array, + write_array_header_1_0 as write_array_header_1_0, + write_array_header_2_0 as write_array_header_2_0, +) diff --git a/numpy/lib/introspect.py b/numpy/lib/introspect.py index 4688eadc32ac..816c79a669b9 100644 --- a/numpy/lib/introspect.py +++ b/numpy/lib/introspect.py @@ -1,7 +1,6 @@ """ Introspection helper functions. """ -import re __all__ = ['opt_func_info'] @@ -30,11 +29,12 @@ def opt_func_info(func_name=None, signature=None): Retrieve dispatch information for functions named 'add' or 'sub' and data types 'float64' or 'float32': + >>> import numpy as np >>> dict = np.lib.introspect.opt_func_info( ... func_name="add|abs", signature="float64|complex64" ... ) >>> import json - >>> print(json.dumps(dict, indent=2)) + >>> print(json.dumps(dict, indent=2)) # may vary (architecture) { "absolute": { "dd": { @@ -63,9 +63,9 @@ def opt_func_info(func_name=None, signature=None): } """ - from numpy._core._multiarray_umath import ( - __cpu_targets_info__ as targets, dtype - ) + import re + + from numpy._core._multiarray_umath import __cpu_targets_info__ as targets, dtype if func_name is not None: func_pattern = re.compile(func_name) @@ -82,12 +82,11 @@ def opt_func_info(func_name=None, signature=None): for k, v in matching_funcs.items(): matching_chars = {} for chars, targets in v.items(): - if any([ - sig_pattern.search(c) or - sig_pattern.search(dtype(c).name) + if any( + sig_pattern.search(c) or sig_pattern.search(dtype(c).name) for c in chars - ]): - matching_chars[chars] = targets + ): + matching_chars[chars] = targets # noqa: PERF403 if matching_chars: matching_sigs[k] = matching_chars else: diff --git a/numpy/lib/introspect.pyi b/numpy/lib/introspect.pyi new file mode 100644 index 000000000000..7929981cd636 --- /dev/null +++ b/numpy/lib/introspect.pyi @@ -0,0 +1,3 @@ +__all__ = ["opt_func_info"] + +def opt_func_info(func_name: str | None = None, signature: str | None = None) -> dict[str, dict[str, dict[str, str]]]: ... diff --git a/numpy/lib/mixins.py b/numpy/lib/mixins.py index a15bdeeac104..cd02bf7f4a50 100644 --- a/numpy/lib/mixins.py +++ b/numpy/lib/mixins.py @@ -3,7 +3,6 @@ """ from numpy._core import umath as um - __all__ = ['NDArrayOperatorsMixin'] @@ -21,7 +20,7 @@ def func(self, other): if _disables_array_ufunc(other): return NotImplemented return ufunc(self, other) - func.__name__ = '__{}__'.format(name) + func.__name__ = f'__{name}__' return func @@ -31,7 +30,7 @@ def func(self, other): if _disables_array_ufunc(other): return NotImplemented return ufunc(other, self) - func.__name__ = '__r{}__'.format(name) + func.__name__ = f'__r{name}__' return func @@ -39,7 +38,7 @@ def _inplace_binary_method(ufunc, name): """Implement an in-place binary method with a ufunc, e.g., __iadd__.""" def func(self, other): return ufunc(self, other, out=(self,)) - func.__name__ = '__i{}__'.format(name) + func.__name__ = f'__i{name}__' return func @@ -54,7 +53,7 @@ def _unary_method(ufunc, name): """Implement a unary special method with a ufunc.""" def func(self): return ufunc(self) - func.__name__ = '__{}__'.format(name) + func.__name__ = f'__{name}__' return func @@ -69,10 +68,9 @@ class NDArrayOperatorsMixin: It is useful for writing classes that do not inherit from `numpy.ndarray`, but that should support arithmetic and numpy universal functions like - arrays as described in `A Mechanism for Overriding Ufuncs - `_. + arrays as described in :external+neps:doc:`nep-0013-ufunc-overrides`. - As an trivial example, consider this implementation of an ``ArrayLike`` + As a trivial example, consider this implementation of an ``ArrayLike`` class that simply wraps a NumPy array and ensures that the result of any arithmetic operation is also an ``ArrayLike`` object: @@ -116,7 +114,7 @@ class that simply wraps a NumPy array and ensures that the result of any ... else: ... # one return value ... return type(self)(result) - ... + ... ... def __repr__(self): ... return '%s(%r)' % (type(self).__name__, self.value) @@ -137,8 +135,8 @@ class that simply wraps a NumPy array and ensures that the result of any with arbitrary, unrecognized types. This ensures that interactions with ArrayLike preserve a well-defined casting hierarchy. - .. versionadded:: 1.13 """ + __slots__ = () # Like np.ndarray, this mixin class implements "Option 1" from the ufunc # overrides NEP. @@ -157,7 +155,6 @@ class that simply wraps a NumPy array and ensures that the result of any __mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul') __matmul__, __rmatmul__, __imatmul__ = _numeric_methods( um.matmul, 'matmul') - # Python 3 does not use __div__, __rdiv__, or __idiv__ __truediv__, __rtruediv__, __itruediv__ = _numeric_methods( um.true_divide, 'truediv') __floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods( diff --git a/numpy/lib/mixins.pyi b/numpy/lib/mixins.pyi index dfabe3d89053..e508a5cfd4bb 100644 --- a/numpy/lib/mixins.pyi +++ b/numpy/lib/mixins.pyi @@ -1,9 +1,9 @@ -from abc import ABCMeta, abstractmethod -from typing import Literal as L, Any +from abc import ABC, abstractmethod +from typing import Any, Literal as L, type_check_only from numpy import ufunc -__all__: list[str] +__all__ = ["NDArrayOperatorsMixin"] # NOTE: `NDArrayOperatorsMixin` is not formally an abstract baseclass, # even though it's reliant on subclasses implementing `__array_ufunc__` @@ -12,12 +12,16 @@ __all__: list[str] # completely dependent on how `__array_ufunc__` is implemented. # As such, only little type safety can be provided here. -class NDArrayOperatorsMixin(metaclass=ABCMeta): +class NDArrayOperatorsMixin(ABC): + __slots__ = () + + @type_check_only @abstractmethod def __array_ufunc__( self, ufunc: ufunc, method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "at"], + /, *inputs: Any, **kwargs: Any, ) -> Any: ... diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py index 1003ef5be4b1..84d8079266d7 100644 --- a/numpy/lib/npyio.py +++ b/numpy/lib/npyio.py @@ -1,3 +1 @@ -from ._npyio_impl import ( - __doc__, DataSource, NpzFile -) +from ._npyio_impl import DataSource, NpzFile, __doc__ # noqa: F401 diff --git a/numpy/lib/npyio.pyi b/numpy/lib/npyio.pyi index c3258e88d04f..fd3ae8f5a287 100644 --- a/numpy/lib/npyio.pyi +++ b/numpy/lib/npyio.pyi @@ -1,4 +1,5 @@ from numpy.lib._npyio_impl import ( DataSource as DataSource, NpzFile as NpzFile, + __doc__ as __doc__, ) diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py index bc5c5de095a8..8603f7b81a46 100644 --- a/numpy/lib/recfunctions.py +++ b/numpy/lib/recfunctions.py @@ -6,18 +6,13 @@ """ import itertools + import numpy as np import numpy.ma as ma -from numpy import ndarray -from numpy.ma import MaskedArray -from numpy.ma.mrecords import MaskedRecords +import numpy.ma.mrecords as mrec from numpy._core.overrides import array_function_dispatch -from numpy._core.records import recarray from numpy.lib._iotools import _is_string_like -_check_fill_value = np.ma.core._check_fill_value - - __all__ = [ 'append_fields', 'apply_along_fields', 'assign_fields_by_name', 'drop_fields', 'find_duplicates', 'flatten_descr', @@ -52,6 +47,7 @@ def recursive_fill_fields(input, output): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)]) >>> b = np.zeros((3,), dtype=a.dtype) @@ -84,6 +80,7 @@ def _get_fieldspec(dtype): Examples -------- + >>> import numpy as np >>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)]) >>> dt.descr [(('a', 'A'), '>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> rfn.get_names(np.empty((1,), dtype=[('A', int)]).dtype) ('A',) @@ -148,6 +146,7 @@ def get_names_flat(adtype): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None False @@ -173,6 +172,7 @@ def flatten_descr(ndtype): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> ndtype = np.dtype([('a', '>> rfn.flatten_descr(ndtype) @@ -236,10 +236,11 @@ def get_fieldstructure(adtype, lastname=None, parents=None,): lastname : optional Last processed field name (used internally during recursion). parents : dictionary - Dictionary of parent fields (used interbally during recursion). + Dictionary of parent fields (used internally during recursion). Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> ndtype = np.dtype([('A', int), ... ('B', [('BA', int), @@ -261,7 +262,7 @@ def get_fieldstructure(adtype, lastname=None, parents=None,): parents[name] = [] parents.update(get_fieldstructure(current, name, parents)) else: - lastparent = [_ for _ in (parents.get(lastname, []) or [])] + lastparent = list(parents.get(lastname, []) or []) if lastparent: lastparent.append(lastname) elif lastname: @@ -325,18 +326,18 @@ def _izip_records(seqarrays, fill_value=None, flatten=True): def _fix_output(output, usemask=True, asrecarray=False): """ - Private function: return a recarray, a ndarray, a MaskedArray + Private function: return a recarray, an ndarray, a MaskedArray or a MaskedRecords depending on the input parameters """ - if not isinstance(output, MaskedArray): + if not isinstance(output, ma.MaskedArray): usemask = False if usemask: if asrecarray: - output = output.view(MaskedRecords) + output = output.view(mrec.MaskedRecords) else: output = ma.filled(output) if asrecarray: - output = output.view(recarray) + output = output.view(np.recarray) return output @@ -380,6 +381,7 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False, Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.]))) array([( 1, 10.), ( 2, 20.), (-1, 30.)], @@ -411,7 +413,7 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False, if (len(seqarrays) == 1): seqarrays = np.asanyarray(seqarrays[0]) # Do we have a single ndarray as input ? - if isinstance(seqarrays, (ndarray, np.void)): + if isinstance(seqarrays, (np.ndarray, np.void)): seqdtype = seqarrays.dtype # Make sure we have named fields if seqdtype.names is None: @@ -422,13 +424,13 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False, # Find what type of array we must return if usemask: if asrecarray: - seqtype = MaskedRecords + seqtype = mrec.MaskedRecords else: - seqtype = MaskedArray + seqtype = ma.MaskedArray elif asrecarray: - seqtype = recarray + seqtype = np.recarray else: - seqtype = ndarray + seqtype = np.ndarray return seqarrays.view(dtype=seqdtype, type=seqtype) else: seqarrays = (seqarrays,) @@ -452,8 +454,8 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False, mask = ma.getmaskarray(a).ravel() # Get the filling value (if needed) if nbmissing: - fval = _check_fill_value(fill_value, a.dtype) - if isinstance(fval, (ndarray, np.void)): + fval = mrec._check_fill_value(fill_value, a.dtype) + if isinstance(fval, (np.ndarray, np.void)): if len(fval.dtype) == 1: fval = fval.item()[0] fmsk = True @@ -471,15 +473,15 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False, output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength), mask=list(_izip_records(seqmask, flatten=flatten))) if asrecarray: - output = output.view(MaskedRecords) + output = output.view(mrec.MaskedRecords) else: # Same as before, without the mask we don't need... for (a, n) in zip(seqarrays, sizes): nbmissing = (maxlength - n) data = a.ravel().__array__() if nbmissing: - fval = _check_fill_value(fill_value, a.dtype) - if isinstance(fval, (ndarray, np.void)): + fval = mrec._check_fill_value(fill_value, a.dtype) + if isinstance(fval, (np.ndarray, np.void)): if len(fval.dtype) == 1: fval = fval.item()[0] else: @@ -490,7 +492,7 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False, output = np.fromiter(tuple(_izip_records(seqdata, flatten=flatten)), dtype=newdtype, count=maxlength) if asrecarray: - output = output.view(recarray) + output = output.view(np.recarray) # And we're done... return output @@ -506,10 +508,6 @@ def drop_fields(base, drop_names, usemask=True, asrecarray=False): Nested fields are supported. - .. versionchanged:: 1.18.0 - `drop_fields` returns an array with 0 fields if all fields are dropped, - rather than returning ``None`` as it did previously. - Parameters ---------- base : array @@ -526,6 +524,7 @@ def drop_fields(base, drop_names, usemask=True, asrecarray=False): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], ... dtype=[('a', np.int64), ('b', [('ba', np.double), ('bb', np.int64)])]) @@ -621,6 +620,7 @@ def rename_fields(base, namemapper): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])]) @@ -805,6 +805,7 @@ def repack_fields(a, align=False, recurse=False): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> def print_offsets(d): @@ -883,7 +884,7 @@ def count_elem(dt): # optimization: avoid list comprehension if no subarray fields.extend(subfields) else: - fields.extend([(d, c, o + i*size) for d, c, o in subfields]) + fields.extend([(d, c, o + i * size) for d, c, o in subfields]) return fields def _common_stride(offsets, counts, itemsize): @@ -975,6 +976,7 @@ def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) @@ -993,7 +995,7 @@ def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'): >>> np.mean(rfn.structured_to_unstructured(b[['x', 'z']]), axis=-1) array([ 3. , 5.5, 9. , 11. ]) - """ + """ # noqa: E501 if arr.dtype.names is None: raise ValueError('arr must be a structured array') @@ -1006,7 +1008,7 @@ def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'): raise NotImplementedError("arr with no fields is not supported") dts, counts, offsets = zip(*fields) - names = ['f{}'.format(n) for n in range(n_fields)] + names = [f'f{n}' for n in range(n_fields)] if dtype is None: out_dtype = np.result_type(*[dt.base for dt in dts]) @@ -1110,6 +1112,7 @@ def unstructured_to_structured(arr, dtype=None, names=None, align=False, Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) @@ -1124,7 +1127,7 @@ def unstructured_to_structured(arr, dtype=None, names=None, align=False, (10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])], dtype=[('a', '>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], @@ -1294,6 +1298,7 @@ def require_fields(array, required_dtype): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')]) @@ -1329,7 +1334,7 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False, Dictionary mapping field names to the corresponding default values. usemask : {True, False}, optional Whether to return a MaskedArray (or MaskedRecords is - `asrecarray==True`) or a ndarray. + `asrecarray==True`) or an ndarray. asrecarray : {False, True}, optional Whether to return a recarray (or MaskedRecords if `usemask==True`) or just a flexible-type ndarray. @@ -1338,6 +1343,7 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False, Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> x = np.array([1, 2,]) >>> rfn.stack_arrays(x) is x @@ -1356,7 +1362,7 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False, dtype=[('A', 'S3'), ('B', ' '%s'" % - (cdtype, fdtype)) + raise TypeError(f"Incompatible type '{cdtype}' <> '{fdtype}'") # Only one field: use concatenate if len(newdescr) == 1: output = ma.concatenate(seqarrays) @@ -1392,7 +1397,7 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False, for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]): names = a.dtype.names if names is None: - output['f%i' % len(seen)][i:j] = a + output[f'f{len(seen)}'][i:j] = a else: for name in n: output[name][i:j] = a[name] @@ -1427,6 +1432,7 @@ def find_duplicates(a, key=None, ignoremask=True, return_index=False): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> ndtype = [('a', int)] >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3], @@ -1508,7 +1514,7 @@ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', Dictionary mapping field names to the corresponding default values. usemask : {True, False}, optional Whether to return a MaskedArray (or MaskedRecords is - `asrecarray==True`) or a ndarray. + `asrecarray==True`) or an ndarray. asrecarray : {False, True}, optional Whether to return a recarray (or MaskedRecords if `usemask==True`) or just a flexible-type ndarray. @@ -1526,29 +1532,27 @@ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', # Check jointype if jointype not in ('inner', 'outer', 'leftouter'): raise ValueError( - "The 'jointype' argument should be in 'inner', " - "'outer' or 'leftouter' (got '%s' instead)" % jointype - ) + "The 'jointype' argument should be in 'inner', " + f"'outer' or 'leftouter' (got '{jointype}' instead)" + ) # If we have a single key, put it in a tuple if isinstance(key, str): key = (key,) # Check the keys if len(set(key)) != len(key): - dup = next(x for n,x in enumerate(key) if x in key[n+1:]) - raise ValueError("duplicate join key %r" % dup) + dup = next(x for n, x in enumerate(key) if x in key[n + 1:]) + raise ValueError(f"duplicate join key {dup!r}") for name in key: if name not in r1.dtype.names: - raise ValueError('r1 does not have key field %r' % name) + raise ValueError(f'r1 does not have key field {name!r}') if name not in r2.dtype.names: - raise ValueError('r2 does not have key field %r' % name) + raise ValueError(f'r2 does not have key field {name!r}') # Make sure we work with ravelled arrays r1 = r1.ravel() r2 = r2.ravel() - # Fixme: nb2 below is never used. Commenting out for pyflakes. - # (nb1, nb2) = (len(r1), len(r2)) - nb1 = len(r1) + (nb1, nb2) = (len(r1), len(r2)) (r1names, r2names) = (r1.dtype.names, r2.dtype.names) # Check the names for collision @@ -1560,7 +1564,7 @@ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', # Make temporary arrays of just the keys # (use order of keys in `r1` for back-compatibility) - key1 = [ n for n in r1names if n in key ] + key1 = [n for n in r1names if n in key] r1k = _keep_fields(r1, key1) r2k = _keep_fields(r2, key1) @@ -1603,7 +1607,7 @@ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', for fname, fdtype in _get_fieldspec(r2.dtype): # Have we seen the current name already ? # we need to rebuild this list every time - names = list(name for name, dtype in ndtype) + names = [name for name, dtype in ndtype] try: nameidx = names.index(fname) except ValueError: @@ -1648,7 +1652,7 @@ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', current[-r2spc:] = selected[r2cmn:] # Sort and finalize the output output.sort(order=key) - kwargs = dict(usemask=usemask, asrecarray=asrecarray) + kwargs = {'usemask': usemask, 'asrecarray': asrecarray} return _fix_output(_fix_defaults(output, defaults), **kwargs) @@ -1669,6 +1673,9 @@ def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', -------- join_by : equivalent function """ - kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix, - defaults=defaults, usemask=False, asrecarray=True) + kwargs = {'jointype': jointype, 'r1postfix': r1postfix, 'r2postfix': r2postfix, + 'defaults': defaults, 'usemask': False, 'asrecarray': True} return join_by(key, r1, r2, **kwargs) + + +del array_function_dispatch diff --git a/numpy/lib/recfunctions.pyi b/numpy/lib/recfunctions.pyi new file mode 100644 index 000000000000..3ba63bdb91dd --- /dev/null +++ b/numpy/lib/recfunctions.pyi @@ -0,0 +1,436 @@ +from _typeshed import Incomplete +from collections.abc import Callable, Iterable, Mapping, Sequence +from typing import Any, Literal, overload + +import numpy as np +import numpy.typing as npt +from numpy import _CastingKind +from numpy._typing import _AnyShape, _DTypeLike, _DTypeLikeVoid, _Shape +from numpy.ma.mrecords import MaskedRecords + +__all__ = [ + "append_fields", + "apply_along_fields", + "assign_fields_by_name", + "drop_fields", + "find_duplicates", + "flatten_descr", + "get_fieldstructure", + "get_names", + "get_names_flat", + "join_by", + "merge_arrays", + "rec_append_fields", + "rec_drop_fields", + "rec_join", + "recursive_fill_fields", + "rename_fields", + "repack_fields", + "require_fields", + "stack_arrays", + "structured_to_unstructured", + "unstructured_to_structured", +] + +type _OneOrMany[T] = T | Iterable[T] +type _BuiltinSequence[T] = tuple[T, ...] | list[T] + +type _NestedNames = tuple[str | _NestedNames, ...] +type _NonVoid = np.bool | np.number | np.character | np.datetime64 | np.timedelta64 | np.object_ +type _NonVoidDType = np.dtype[_NonVoid] | np.dtypes.StringDType + +type _JoinType = Literal["inner", "outer", "leftouter"] + +### + +def recursive_fill_fields[VoidArrayT: npt.NDArray[np.void]](input: npt.NDArray[np.void], output: VoidArrayT) -> VoidArrayT: ... + +# +def get_names(adtype: np.dtype[np.void]) -> _NestedNames: ... +def get_names_flat(adtype: np.dtype[np.void]) -> tuple[str, ...]: ... + +# +@overload +def flatten_descr[NonVoidDTypeT: _NonVoidDType](ndtype: NonVoidDTypeT) -> tuple[tuple[Literal[""], NonVoidDTypeT]]: ... +@overload +def flatten_descr(ndtype: np.dtype[np.void]) -> tuple[tuple[str, np.dtype]]: ... + +# +def get_fieldstructure( + adtype: np.dtype[np.void], + lastname: str | None = None, + parents: dict[str, list[str]] | None = None, +) -> dict[str, list[str]]: ... + +# +@overload +def merge_arrays[ShapeT: _Shape]( + seqarrays: Sequence[np.ndarray[ShapeT, np.dtype]] | np.ndarray[ShapeT, np.dtype], + fill_value: float = -1, + flatten: bool = False, + usemask: bool = False, + asrecarray: bool = False, +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... +@overload +def merge_arrays( + seqarrays: Sequence[npt.ArrayLike] | np.void, + fill_value: float = -1, + flatten: bool = False, + usemask: bool = False, + asrecarray: bool = False, +) -> np.recarray[_AnyShape, np.dtype[np.void]]: ... + +# +@overload +def drop_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], + usemask: bool = True, + asrecarray: Literal[False] = False, +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... +@overload +def drop_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], + usemask: bool, + asrecarray: Literal[True], +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... +@overload +def drop_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], + usemask: bool = True, + *, + asrecarray: Literal[True], +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... + +# +@overload +def rename_fields[ShapeT: _Shape]( + base: MaskedRecords[ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> MaskedRecords[ShapeT, np.dtype[np.void]]: ... +@overload +def rename_fields[ShapeT: _Shape]( + base: np.ma.MaskedArray[ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> np.ma.MaskedArray[ShapeT, np.dtype[np.void]]: ... +@overload +def rename_fields[ShapeT: _Shape]( + base: np.recarray[ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... +@overload +def rename_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... + +# +@overload +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None, + fill_value: int, + usemask: Literal[False], + asrecarray: Literal[False] = False, +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None = None, + fill_value: int = -1, + *, + usemask: Literal[False], + asrecarray: Literal[False] = False, +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None, + fill_value: int, + usemask: Literal[False], + asrecarray: Literal[True], +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None = None, + fill_value: int = -1, + *, + usemask: Literal[False], + asrecarray: Literal[True], +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None = None, + fill_value: int = -1, + usemask: Literal[True] = True, + asrecarray: Literal[False] = False, +) -> np.ma.MaskedArray[ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None, + fill_value: int, + usemask: Literal[True], + asrecarray: Literal[True], +) -> MaskedRecords[ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None = None, + fill_value: int = -1, + usemask: Literal[True] = True, + *, + asrecarray: Literal[True], +) -> MaskedRecords[ShapeT, np.dtype[np.void]]: ... + +# +def rec_drop_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... + +# +def rec_append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None = None, +) -> np.ma.MaskedArray[ShapeT, np.dtype[np.void]]: ... + +# TODO(jorenham): Stop passing `void` directly once structured dtypes are implemented, +# e.g. using a `TypeVar` with constraints. +# https://github.com/numpy/numtype/issues/92 +@overload +def repack_fields[DTypeT: np.dtype](a: DTypeT, align: bool = False, recurse: bool = False) -> DTypeT: ... +@overload +def repack_fields[ScalarT: np.generic](a: ScalarT, align: bool = False, recurse: bool = False) -> ScalarT: ... +@overload +def repack_fields[ArrayT: np.ndarray](a: ArrayT, align: bool = False, recurse: bool = False) -> ArrayT: ... + +# TODO(jorenham): Attempt shape-typing (return type has ndim == arr.ndim + 1) +@overload +def structured_to_unstructured[ScalarT: np.generic]( + arr: npt.NDArray[np.void], + dtype: _DTypeLike[ScalarT], + copy: bool = False, + casting: _CastingKind = "unsafe", +) -> npt.NDArray[ScalarT]: ... +@overload +def structured_to_unstructured( + arr: npt.NDArray[np.void], + dtype: npt.DTypeLike | None = None, + copy: bool = False, + casting: _CastingKind = "unsafe", +) -> npt.NDArray[Any]: ... + +# +@overload +def unstructured_to_structured( + arr: npt.NDArray[Any], + dtype: npt.DTypeLike, + names: None = None, + align: bool = False, + copy: bool = False, + casting: str = "unsafe", +) -> npt.NDArray[np.void]: ... +@overload +def unstructured_to_structured( + arr: npt.NDArray[Any], + dtype: None, + names: _OneOrMany[str], + align: bool = False, + copy: bool = False, + casting: str = "unsafe", +) -> npt.NDArray[np.void]: ... +@overload +def unstructured_to_structured( + arr: npt.NDArray[Any], + dtype: None = None, + *, + names: _OneOrMany[str], + align: bool = False, + copy: bool = False, + casting: str = "unsafe", +) -> npt.NDArray[np.void]: ... + +# +def apply_along_fields[ShapeT: _Shape]( + func: Callable[[np.ndarray[ShapeT]], np.ndarray], + arr: np.ndarray[ShapeT, np.dtype[np.void]], +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... + +# +def assign_fields_by_name(dst: npt.NDArray[np.void], src: npt.NDArray[np.void], zero_unassigned: bool = True) -> None: ... + +# +def require_fields[ShapeT: _Shape]( + array: np.ndarray[ShapeT, np.dtype[np.void]], + required_dtype: _DTypeLikeVoid, +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... + +# TODO(jorenham): Attempt shape-typing +@overload +def stack_arrays[ArrayT: np.ndarray]( + arrays: ArrayT, + defaults: Mapping[str, object] | None = None, + usemask: bool = True, + asrecarray: bool = False, + autoconvert: bool = False, +) -> ArrayT: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None, + usemask: Literal[False], + asrecarray: Literal[False] = False, + autoconvert: bool = False, +) -> npt.NDArray[np.void]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[False] = False, + autoconvert: bool = False, +) -> npt.NDArray[np.void]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[True], + autoconvert: bool = False, +) -> np.recarray[_AnyShape, np.dtype[np.void]]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + usemask: Literal[True] = True, + asrecarray: Literal[False] = False, + autoconvert: bool = False, +) -> np.ma.MaskedArray[_AnyShape, np.dtype[np.void]]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None, + usemask: Literal[True], + asrecarray: Literal[True], + autoconvert: bool = False, +) -> MaskedRecords[_AnyShape, np.dtype[np.void]]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + usemask: Literal[True] = True, + *, + asrecarray: Literal[True], + autoconvert: bool = False, +) -> MaskedRecords[_AnyShape, np.dtype[np.void]]: ... + +# +@overload +def find_duplicates[ShapeT: _Shape]( + a: np.ma.MaskedArray[ShapeT, np.dtype[np.void]], + key: str | None = None, + ignoremask: bool = True, + return_index: Literal[False] = False, +) -> np.ma.MaskedArray[ShapeT, np.dtype[np.void]]: ... +@overload +def find_duplicates[ShapeT: _Shape]( + a: np.ma.MaskedArray[ShapeT, np.dtype[np.void]], + key: str | None, + ignoremask: bool, + return_index: Literal[True], +) -> tuple[np.ma.MaskedArray[ShapeT, np.dtype[np.void]], np.ndarray[ShapeT, np.dtype[np.int_]]]: ... +@overload +def find_duplicates[ShapeT: _Shape]( + a: np.ma.MaskedArray[ShapeT, np.dtype[np.void]], + key: str | None = None, + ignoremask: bool = True, + *, + return_index: Literal[True], +) -> tuple[np.ma.MaskedArray[ShapeT, np.dtype[np.void]], np.ndarray[ShapeT, np.dtype[np.int_]]]: ... + +# +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[False] = False, +) -> np.ndarray[tuple[int], np.dtype[np.void]]: ... +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[True], +) -> np.recarray[tuple[int], np.dtype[np.void]]: ... +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + usemask: Literal[True] = True, + asrecarray: Literal[False] = False, +) -> np.ma.MaskedArray[tuple[int], np.dtype[np.void]]: ... +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + usemask: Literal[True] = True, + *, + asrecarray: Literal[True], +) -> MaskedRecords[tuple[int], np.dtype[np.void]]: ... + +# +def rec_join( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, +) -> np.recarray[tuple[int], np.dtype[np.void]]: ... diff --git a/numpy/lib/scimath.py b/numpy/lib/scimath.py index ffd05ef9f364..fb6824d9bb89 100644 --- a/numpy/lib/scimath.py +++ b/numpy/lib/scimath.py @@ -1,4 +1,13 @@ -from ._scimath_impl import ( - __all__, __doc__, sqrt, log, log2, logn, log10, power, arccos, arcsin, - arctanh +from ._scimath_impl import ( # noqa: F401 + __all__, + __doc__, + arccos, + arcsin, + arctanh, + log, + log2, + log10, + logn, + power, + sqrt, ) diff --git a/numpy/lib/scimath.pyi b/numpy/lib/scimath.pyi index a149cdc34644..ef2772a33a47 100644 --- a/numpy/lib/scimath.pyi +++ b/numpy/lib/scimath.pyi @@ -1,12 +1,12 @@ from ._scimath_impl import ( __all__ as __all__, - sqrt as sqrt, - log as log, - log2 as log2, - logn as logn, - log10 as log10, - power as power, - arccos as arccos, - arcsin as arcsin, + arccos as arccos, + arcsin as arcsin, arctanh as arctanh, + log as log, + log2 as log2, + log10 as log10, + logn as logn, + power as power, + sqrt as sqrt, ) diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py index ba567be0c823..721a548f4d48 100644 --- a/numpy/lib/stride_tricks.py +++ b/numpy/lib/stride_tricks.py @@ -1,3 +1 @@ -from ._stride_tricks_impl import ( - __doc__, as_strided, sliding_window_view -) +from ._stride_tricks_impl import __doc__, as_strided, sliding_window_view # noqa: F401 diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py index c8149abc30c4..dece6823f09f 100644 --- a/numpy/lib/tests/test__datasource.py +++ b/numpy/lib/tests/test__datasource.py @@ -1,15 +1,15 @@ import os -import pytest -from tempfile import mkdtemp, mkstemp, NamedTemporaryFile +import urllib.request as urllib_request from shutil import rmtree +from tempfile import NamedTemporaryFile, mkdtemp, mkstemp +from urllib.error import URLError +from urllib.parse import urlparse + +import pytest import numpy.lib._datasource as datasource from numpy.testing import assert_, assert_equal, assert_raises -import urllib.request as urllib_request -from urllib.parse import urlparse -from urllib.error import URLError - def urlopen_stub(url, data=None): '''Stub to replace urlopen for testing.''' @@ -19,6 +19,7 @@ def urlopen_stub(url, data=None): else: raise URLError('Name or service not known') + # setup and teardown old_urlopen = None @@ -33,6 +34,7 @@ def setup_module(): def teardown_module(): urllib_request.urlopen = old_urlopen + # A valid website for more robust testing http_path = 'http://www.google.com/' http_file = 'index.html' @@ -63,11 +65,11 @@ def invalid_textfile(filedir): def valid_httpurl(): - return http_path+http_file + return http_path + http_file def invalid_httpurl(): - return http_fakepath+http_fakefile + return http_fakepath + http_fakefile def valid_baseurl(): @@ -87,246 +89,222 @@ def invalid_httpfile(): class TestDataSourceOpen: - def setup_method(self): - self.tmpdir = mkdtemp() - self.ds = datasource.DataSource(self.tmpdir) - - def teardown_method(self): - rmtree(self.tmpdir) - del self.ds - - def test_ValidHTTP(self): - fh = self.ds.open(valid_httpurl()) + def test_ValidHTTP(self, tmp_path): + ds = datasource.DataSource(tmp_path) + fh = ds.open(valid_httpurl()) assert_(fh) fh.close() - def test_InvalidHTTP(self): + def test_InvalidHTTP(self, tmp_path): + ds = datasource.DataSource(tmp_path) url = invalid_httpurl() - assert_raises(OSError, self.ds.open, url) + assert_raises(OSError, ds.open, url) try: - self.ds.open(url) + ds.open(url) except OSError as e: # Regression test for bug fixed in r4342. assert_(e.errno is None) - def test_InvalidHTTPCacheURLError(self): - assert_raises(URLError, self.ds._cache, invalid_httpurl()) + def test_InvalidHTTPCacheURLError(self, tmp_path): + ds = datasource.DataSource(tmp_path) + assert_raises(URLError, ds._cache, invalid_httpurl()) - def test_ValidFile(self): - local_file = valid_textfile(self.tmpdir) - fh = self.ds.open(local_file) + def test_ValidFile(self, tmp_path): + ds = datasource.DataSource(tmp_path) + local_file = valid_textfile(tmp_path) + fh = ds.open(local_file) assert_(fh) fh.close() - def test_InvalidFile(self): - invalid_file = invalid_textfile(self.tmpdir) - assert_raises(OSError, self.ds.open, invalid_file) + def test_InvalidFile(self, tmp_path): + ds = datasource.DataSource(tmp_path) + invalid_file = invalid_textfile(tmp_path) + assert_raises(OSError, ds.open, invalid_file) - def test_ValidGzipFile(self): + def test_ValidGzipFile(self, tmp_path): try: import gzip except ImportError: # We don't have the gzip capabilities to test. pytest.skip() # Test datasource's internal file_opener for Gzip files. - filepath = os.path.join(self.tmpdir, 'foobar.txt.gz') + ds = datasource.DataSource(tmp_path) + filepath = os.path.join(tmp_path, 'foobar.txt.gz') fp = gzip.open(filepath, 'w') fp.write(magic_line) fp.close() - fp = self.ds.open(filepath) + fp = ds.open(filepath) result = fp.readline() fp.close() assert_equal(magic_line, result) - def test_ValidBz2File(self): + def test_ValidBz2File(self, tmp_path): try: import bz2 except ImportError: # We don't have the bz2 capabilities to test. pytest.skip() # Test datasource's internal file_opener for BZip2 files. - filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2') + ds = datasource.DataSource(tmp_path) + filepath = os.path.join(tmp_path, 'foobar.txt.bz2') fp = bz2.BZ2File(filepath, 'w') fp.write(magic_line) fp.close() - fp = self.ds.open(filepath) + fp = ds.open(filepath) result = fp.readline() fp.close() assert_equal(magic_line, result) class TestDataSourceExists: - def setup_method(self): - self.tmpdir = mkdtemp() - self.ds = datasource.DataSource(self.tmpdir) - - def teardown_method(self): - rmtree(self.tmpdir) - del self.ds + def test_ValidHTTP(self, tmp_path): + ds = datasource.DataSource(tmp_path) + assert_(ds.exists(valid_httpurl())) - def test_ValidHTTP(self): - assert_(self.ds.exists(valid_httpurl())) + def test_InvalidHTTP(self, tmp_path): + ds = datasource.DataSource(tmp_path) + assert_equal(ds.exists(invalid_httpurl()), False) - def test_InvalidHTTP(self): - assert_equal(self.ds.exists(invalid_httpurl()), False) - - def test_ValidFile(self): + def test_ValidFile(self, tmp_path): # Test valid file in destpath - tmpfile = valid_textfile(self.tmpdir) - assert_(self.ds.exists(tmpfile)) + ds = datasource.DataSource(tmp_path) + tmpfile = valid_textfile(tmp_path) + assert_(ds.exists(tmpfile)) # Test valid local file not in destpath localdir = mkdtemp() tmpfile = valid_textfile(localdir) - assert_(self.ds.exists(tmpfile)) + assert_(ds.exists(tmpfile)) rmtree(localdir) - def test_InvalidFile(self): - tmpfile = invalid_textfile(self.tmpdir) - assert_equal(self.ds.exists(tmpfile), False) + def test_InvalidFile(self, tmp_path): + ds = datasource.DataSource(tmp_path) + tmpfile = invalid_textfile(tmp_path) + assert_equal(ds.exists(tmpfile), False) class TestDataSourceAbspath: - def setup_method(self): - self.tmpdir = os.path.abspath(mkdtemp()) - self.ds = datasource.DataSource(self.tmpdir) - - def teardown_method(self): - rmtree(self.tmpdir) - del self.ds - - def test_ValidHTTP(self): - scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) - local_path = os.path.join(self.tmpdir, netloc, + def test_ValidHTTP(self, tmp_path): + ds = datasource.DataSource(tmp_path) + _, netloc, upath, _, _, _ = urlparse(valid_httpurl()) + local_path = os.path.join(tmp_path, netloc, upath.strip(os.sep).strip('/')) - assert_equal(local_path, self.ds.abspath(valid_httpurl())) + assert_equal(local_path, ds.abspath(valid_httpurl())) - def test_ValidFile(self): - tmpfile = valid_textfile(self.tmpdir) + def test_ValidFile(self, tmp_path): + ds = datasource.DataSource(tmp_path) + tmpfile = valid_textfile(tmp_path) tmpfilename = os.path.split(tmpfile)[-1] # Test with filename only - assert_equal(tmpfile, self.ds.abspath(tmpfilename)) + assert_equal(tmpfile, ds.abspath(tmpfilename)) # Test filename with complete path - assert_equal(tmpfile, self.ds.abspath(tmpfile)) + assert_equal(tmpfile, ds.abspath(tmpfile)) - def test_InvalidHTTP(self): - scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl()) - invalidhttp = os.path.join(self.tmpdir, netloc, + def test_InvalidHTTP(self, tmp_path): + ds = datasource.DataSource(tmp_path) + _, netloc, upath, _, _, _ = urlparse(invalid_httpurl()) + invalidhttp = os.path.join(tmp_path, netloc, upath.strip(os.sep).strip('/')) - assert_(invalidhttp != self.ds.abspath(valid_httpurl())) + assert_(invalidhttp != ds.abspath(valid_httpurl())) - def test_InvalidFile(self): - invalidfile = valid_textfile(self.tmpdir) - tmpfile = valid_textfile(self.tmpdir) + def test_InvalidFile(self, tmp_path): + ds = datasource.DataSource(tmp_path) + invalidfile = valid_textfile(tmp_path) + tmpfile = valid_textfile(tmp_path) tmpfilename = os.path.split(tmpfile)[-1] # Test with filename only - assert_(invalidfile != self.ds.abspath(tmpfilename)) + assert_(invalidfile != ds.abspath(tmpfilename)) # Test filename with complete path - assert_(invalidfile != self.ds.abspath(tmpfile)) + assert_(invalidfile != ds.abspath(tmpfile)) - def test_sandboxing(self): - tmpfile = valid_textfile(self.tmpdir) + def test_sandboxing(self, tmp_path): + ds = datasource.DataSource(tmp_path) + tmpfile = valid_textfile(tmp_path) tmpfilename = os.path.split(tmpfile)[-1] - tmp_path = lambda x: os.path.abspath(self.ds.abspath(x)) + path = lambda x: os.path.abspath(ds.abspath(x)) - assert_(tmp_path(valid_httpurl()).startswith(self.tmpdir)) - assert_(tmp_path(invalid_httpurl()).startswith(self.tmpdir)) - assert_(tmp_path(tmpfile).startswith(self.tmpdir)) - assert_(tmp_path(tmpfilename).startswith(self.tmpdir)) + assert_(path(valid_httpurl()).startswith(str(tmp_path))) + assert_(path(invalid_httpurl()).startswith(str(tmp_path))) + assert_(path(tmpfile).startswith(str(tmp_path))) + assert_(path(tmpfilename).startswith(str(tmp_path))) for fn in malicious_files: - assert_(tmp_path(http_path+fn).startswith(self.tmpdir)) - assert_(tmp_path(fn).startswith(self.tmpdir)) + assert_(path(http_path + fn).startswith(str(tmp_path))) + assert_(path(fn).startswith(str(tmp_path))) - def test_windows_os_sep(self): + def test_windows_os_sep(self, tmp_path): orig_os_sep = os.sep try: os.sep = '\\' - self.test_ValidHTTP() - self.test_ValidFile() - self.test_InvalidHTTP() - self.test_InvalidFile() - self.test_sandboxing() + self.test_ValidHTTP(tmp_path) + self.test_ValidFile(tmp_path) + self.test_InvalidHTTP(tmp_path) + self.test_InvalidFile(tmp_path) + self.test_sandboxing(tmp_path) finally: os.sep = orig_os_sep class TestRepositoryAbspath: - def setup_method(self): - self.tmpdir = os.path.abspath(mkdtemp()) - self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) - - def teardown_method(self): - rmtree(self.tmpdir) - del self.repos - - def test_ValidHTTP(self): - scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) - local_path = os.path.join(self.repos._destpath, netloc, + def test_ValidHTTP(self, tmp_path): + repos = datasource.Repository(valid_baseurl(), tmp_path) + _, netloc, upath, _, _, _ = urlparse(valid_httpurl()) + local_path = os.path.join(repos._destpath, netloc, upath.strip(os.sep).strip('/')) - filepath = self.repos.abspath(valid_httpfile()) + filepath = repos.abspath(valid_httpfile()) assert_equal(local_path, filepath) - def test_sandboxing(self): - tmp_path = lambda x: os.path.abspath(self.repos.abspath(x)) - assert_(tmp_path(valid_httpfile()).startswith(self.tmpdir)) + def test_sandboxing(self, tmp_path): + repos = datasource.Repository(valid_baseurl(), tmp_path) + path = lambda x: os.path.abspath(repos.abspath(x)) + assert_(path(valid_httpfile()).startswith(str(tmp_path))) for fn in malicious_files: - assert_(tmp_path(http_path+fn).startswith(self.tmpdir)) - assert_(tmp_path(fn).startswith(self.tmpdir)) + assert_(path(http_path + fn).startswith(str(tmp_path))) + assert_(path(fn).startswith(str(tmp_path))) - def test_windows_os_sep(self): + def test_windows_os_sep(self, tmp_path): orig_os_sep = os.sep try: os.sep = '\\' - self.test_ValidHTTP() - self.test_sandboxing() + self.test_ValidHTTP(tmp_path) + self.test_sandboxing(tmp_path) finally: os.sep = orig_os_sep class TestRepositoryExists: - def setup_method(self): - self.tmpdir = mkdtemp() - self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) - - def teardown_method(self): - rmtree(self.tmpdir) - del self.repos - - def test_ValidFile(self): + def test_ValidFile(self, tmp_path): # Create local temp file - tmpfile = valid_textfile(self.tmpdir) - assert_(self.repos.exists(tmpfile)) + repos = datasource.Repository(valid_baseurl(), tmp_path) + tmpfile = valid_textfile(tmp_path) + assert_(repos.exists(tmpfile)) - def test_InvalidFile(self): - tmpfile = invalid_textfile(self.tmpdir) - assert_equal(self.repos.exists(tmpfile), False) + def test_InvalidFile(self, tmp_path): + repos = datasource.Repository(valid_baseurl(), tmp_path) + tmpfile = invalid_textfile(tmp_path) + assert_equal(repos.exists(tmpfile), False) - def test_RemoveHTTPFile(self): - assert_(self.repos.exists(valid_httpurl())) + def test_RemoveHTTPFile(self, tmp_path): + repos = datasource.Repository(valid_baseurl(), tmp_path) + assert_(repos.exists(valid_httpurl())) - def test_CachedHTTPFile(self): + def test_CachedHTTPFile(self, tmp_path): localfile = valid_httpurl() - # Create a locally cached temp file with an URL based + # Create a locally cached temp file with a URL based # directory structure. This is similar to what Repository.open # would do. - scheme, netloc, upath, pms, qry, frg = urlparse(localfile) - local_path = os.path.join(self.repos._destpath, netloc) + repos = datasource.Repository(valid_baseurl(), tmp_path) + _, netloc, _, _, _, _ = urlparse(localfile) + local_path = os.path.join(repos._destpath, netloc) os.mkdir(local_path, 0o0700) tmpfile = valid_textfile(local_path) - assert_(self.repos.exists(tmpfile)) + assert_(repos.exists(tmpfile)) class TestOpenFunc: - def setup_method(self): - self.tmpdir = mkdtemp() - - def teardown_method(self): - rmtree(self.tmpdir) - - def test_DataSourceOpen(self): - local_file = valid_textfile(self.tmpdir) + def test_DataSourceOpen(self, tmp_path): + local_file = valid_textfile(tmp_path) # Test case where destpath is passed in - fp = datasource.open(local_file, destpath=self.tmpdir) + fp = datasource.open(local_file, destpath=tmp_path) assert_(fp) fp.close() # Test case where default destpath is used diff --git a/numpy/lib/tests/test__iotools.py b/numpy/lib/tests/test__iotools.py index 396d4147c6c5..2555c4b86f6c 100644 --- a/numpy/lib/tests/test__iotools.py +++ b/numpy/lib/tests/test__iotools.py @@ -1,14 +1,18 @@ import time from datetime import date +import pytest + import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_allclose, assert_raises, - ) from numpy.lib._iotools import ( - LineSplitter, NameValidator, StringConverter, - has_nested_fields, easy_dtype, flatten_dtype - ) + LineSplitter, + NameValidator, + StringConverter, + easy_dtype, + flatten_dtype, + has_nested_fields, +) +from numpy.testing import assert_, assert_allclose, assert_equal, assert_raises class TestLineSplitter: @@ -198,6 +202,7 @@ def test_missing(self): except ValueError: pass + @pytest.mark.thread_unsafe(reason="monkeypatches StringConverter") def test_upgrademapper(self): "Tests updatemapper" dateparser = _bytes_to_date diff --git a/numpy/lib/tests/test__version.py b/numpy/lib/tests/test__version.py index e6d41ad93932..6e6a34a241ac 100644 --- a/numpy/lib/tests/test__version.py +++ b/numpy/lib/tests/test__version.py @@ -1,8 +1,8 @@ """Tests for the NumpyVersion class. """ -from numpy.testing import assert_, assert_raises from numpy.lib import NumpyVersion +from numpy.testing import assert_, assert_raises def test_main_versions(): diff --git a/numpy/lib/tests/test_array_utils.py b/numpy/lib/tests/test_array_utils.py index 3d8b2bd4616e..55b9d283b15b 100644 --- a/numpy/lib/tests/test_array_utils.py +++ b/numpy/lib/tests/test_array_utils.py @@ -1,5 +1,4 @@ import numpy as np - from numpy.lib import array_utils from numpy.testing import assert_equal diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py index 8723f4d9ba73..14383e743e47 100644 --- a/numpy/lib/tests/test_arraypad.py +++ b/numpy/lib/tests/test_arraypad.py @@ -4,9 +4,8 @@ import pytest import numpy as np -from numpy.testing import assert_array_equal, assert_allclose, assert_equal from numpy.lib._arraypad_impl import _as_pairs - +from numpy.testing import assert_allclose, assert_array_equal, assert_equal _numeric_dtypes = ( np._core.sctypes["uint"] @@ -235,11 +234,11 @@ def test_check_minimum_1(self): a = np.arange(100) a = np.pad(a, (25, 20), 'minimum') b = np.array( - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, + [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, @@ -259,11 +258,11 @@ def test_check_minimum_2(self): a = np.arange(100) + 2 a = np.pad(a, (25, 20), 'minimum') b = np.array( - [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, + [ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, - 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, @@ -551,16 +550,16 @@ def test_check_constant_float(self): test = np.pad(arr, (1, 2), mode='constant', constant_values=1.1) expected = np.array( - [[ 1, 1, 1, 1, 1, 1, 1, 1, 1], + [[1, 1, 1, 1, 1, 1, 1, 1, 1], - [ 1, 0, 1, 2, 3, 4, 5, 1, 1], - [ 1, 6, 7, 8, 9, 10, 11, 1, 1], - [ 1, 12, 13, 14, 15, 16, 17, 1, 1], - [ 1, 18, 19, 20, 21, 22, 23, 1, 1], - [ 1, 24, 25, 26, 27, 28, 29, 1, 1], + [1, 0, 1, 2, 3, 4, 5, 1, 1], + [1, 6, 7, 8, 9, 10, 11, 1, 1], + [1, 12, 13, 14, 15, 16, 17, 1, 1], + [1, 18, 19, 20, 21, 22, 23, 1, 1], + [1, 24, 25, 26, 27, 28, 29, 1, 1], - [ 1, 1, 1, 1, 1, 1, 1, 1, 1], - [ 1, 1, 1, 1, 1, 1, 1, 1, 1]] + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1, 1]] ) assert_allclose(test, expected) @@ -572,16 +571,16 @@ def test_check_constant_float2(self): test = np.pad(arr_float, ((1, 2), (1, 2)), mode='constant', constant_values=1.1) expected = np.array( - [[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1], + [[1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1], - [ 1.1, 0. , 1. , 2. , 3. , 4. , 5. , 1.1, 1.1], - [ 1.1, 6. , 7. , 8. , 9. , 10. , 11. , 1.1, 1.1], - [ 1.1, 12. , 13. , 14. , 15. , 16. , 17. , 1.1, 1.1], - [ 1.1, 18. , 19. , 20. , 21. , 22. , 23. , 1.1, 1.1], - [ 1.1, 24. , 25. , 26. , 27. , 28. , 29. , 1.1, 1.1], + [1.1, 0. , 1. , 2. , 3. , 4. , 5. , 1.1, 1.1], # noqa: E203 + [1.1, 6. , 7. , 8. , 9. , 10. , 11. , 1.1, 1.1], # noqa: E203 + [1.1, 12. , 13. , 14. , 15. , 16. , 17. , 1.1, 1.1], # noqa: E203 + [1.1, 18. , 19. , 20. , 21. , 22. , 23. , 1.1, 1.1], # noqa: E203 + [1.1, 24. , 25. , 26. , 27. , 28. , 29. , 1.1, 1.1], # noqa: E203 - [ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1], - [ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]] + [1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1], + [1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]] ) assert_allclose(test, expected) @@ -614,15 +613,15 @@ def test_check_constant_odd_pad_amount(self): test = np.pad(arr, ((1,), (2,)), mode='constant', constant_values=3) expected = np.array( - [[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3], + [[3, 3, 3, 3, 3, 3, 3, 3, 3, 3], - [ 3, 3, 0, 1, 2, 3, 4, 5, 3, 3], - [ 3, 3, 6, 7, 8, 9, 10, 11, 3, 3], - [ 3, 3, 12, 13, 14, 15, 16, 17, 3, 3], - [ 3, 3, 18, 19, 20, 21, 22, 23, 3, 3], - [ 3, 3, 24, 25, 26, 27, 28, 29, 3, 3], + [3, 3, 0, 1, 2, 3, 4, 5, 3, 3], + [3, 3, 6, 7, 8, 9, 10, 11, 3, 3], + [3, 3, 12, 13, 14, 15, 16, 17, 3, 3], + [3, 3, 18, 19, 20, 21, 22, 23, 3, 3], + [3, 3, 24, 25, 26, 27, 28, 29, 3, 3], - [ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]] + [3, 3, 3, 3, 3, 3, 3, 3, 3, 3]] ) assert_allclose(test, expected) @@ -868,6 +867,42 @@ def test_check_03(self): b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3]) assert_array_equal(a, b) + def test_check_04(self): + a = np.pad([1, 2, 3], [1, 10], 'reflect') + b = np.array([2, 1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3, 2, 1]) + assert_array_equal(a, b) + + def test_check_05(self): + a = np.pad([1, 2, 3, 4], [45, 10], 'reflect') + b = np.array( + [4, 3, 2, 1, 2, 3, 4, 3, 2, 1, + 2, 3, 4, 3, 2, 1, 2, 3, 4, 3, + 2, 1, 2, 3, 4, 3, 2, 1, 2, 3, + 4, 3, 2, 1, 2, 3, 4, 3, 2, 1, + 2, 3, 4, 3, 2, 1, 2, 3, 4, 3, + 2, 1, 2, 3, 4, 3, 2, 1, 2]) + assert_array_equal(a, b) + + def test_check_06(self): + a = np.pad([1, 2, 3, 4], [15, 2], 'symmetric') + b = np.array( + [2, 3, 4, 4, 3, 2, 1, 1, 2, 3, + 4, 4, 3, 2, 1, 1, 2, 3, 4, 4, + 3] + ) + assert_array_equal(a, b) + + def test_check_07(self): + a = np.pad([1, 2, 3, 4, 5, 6], [45, 3], 'symmetric') + b = np.array( + [4, 5, 6, 6, 5, 4, 3, 2, 1, 1, + 2, 3, 4, 5, 6, 6, 5, 4, 3, 2, + 1, 1, 2, 3, 4, 5, 6, 6, 5, 4, + 3, 2, 1, 1, 2, 3, 4, 5, 6, 6, + 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, + 6, 6, 5, 4]) + assert_array_equal(a, b) + class TestEmptyArray: """Check how padding behaves on arrays with an empty dimension.""" @@ -1139,7 +1174,7 @@ def test_repeated_wrapping(self): a = np.arange(5) b = np.pad(a, (0, 12), mode="wrap") assert_array_equal(np.r_[a, a, a, a][:-3], b) - + def test_repeated_wrapping_multiple_origin(self): """ Assert that 'wrap' pads only with multiples of the original area if @@ -1338,7 +1373,7 @@ def test_kwargs(mode): np.pad([1, 2, 3], 1, mode, **allowed) # Test if prohibited keyword arguments of other modes raise an error for key, value in not_allowed.items(): - match = "unsupported keyword arguments for mode '{}'".format(mode) + match = f"unsupported keyword arguments for mode '{mode}'" with pytest.raises(ValueError, match=match): np.pad([1, 2, 3], 1, mode, **{key: value}) @@ -1350,7 +1385,7 @@ def test_constant_zero_default(): @pytest.mark.parametrize("mode", [1, "const", object(), None, True, False]) def test_unsupported_mode(mode): - match= "mode '{}' is not supported".format(mode) + match = f"mode '{mode}' is not supported" with pytest.raises(ValueError, match=match): np.pad([1, 2, 3], 4, mode=mode) @@ -1378,3 +1413,15 @@ def test_dtype_persistence(dtype, mode): arr = np.zeros((3, 2, 1), dtype=dtype) result = np.pad(arr, 1, mode=mode) assert result.dtype == dtype + + +@pytest.mark.parametrize("input_shape, pad_width, expected_shape", [ + ((3, 4, 5), {-2: (1, 3)}, (3, 4 + 1 + 3, 5)), + ((3, 4, 5), {0: (5, 2)}, (3 + 5 + 2, 4, 5)), + ((3, 4, 5), {0: (5, 2), -1: (3, 4)}, (3 + 5 + 2, 4, 5 + 3 + 4)), + ((3, 4, 5), {1: 5}, (3, 4 + 2 * 5, 5)), +]) +def test_pad_dict_pad_width(input_shape, pad_width, expected_shape): + a = np.zeros(input_shape) + result = np.pad(a, pad_width) + assert result.shape == expected_shape diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index 33b43b57a381..9faf670be96d 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -1,15 +1,18 @@ """Test functions for 1D array set operations. """ -import numpy as np +import pytest -from numpy import ( - ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, isin - ) +import numpy as np +from numpy import ediff1d, intersect1d, isin, setdiff1d, setxor1d, union1d, unique +from numpy.dtypes import StringDType from numpy.exceptions import AxisError -from numpy.testing import (assert_array_equal, assert_equal, - assert_raises, assert_raises_regex) -import pytest +from numpy.testing import ( + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) class TestSetOps: @@ -170,7 +173,7 @@ def test_ediff1d_forbidden_type_casts(self, ary, prepend, append, expected): # specifically, raise an appropriate # Exception when attempting to append or # prepend with an incompatible type - msg = 'dtype of `{}` must be compatible'.format(expected) + msg = f'dtype of `{expected}` must be compatible' with assert_raises_regex(TypeError, msg): ediff1d(ary=ary, to_end=append, @@ -270,7 +273,7 @@ def assert_isin_equal(a, b): assert_isin_equal(empty_array, empty_array) @pytest.mark.parametrize("kind", [None, "sort", "table"]) - def test_isin(self, kind): + def test_isin_additional(self, kind): # we use two different sizes for the b array here to test the # two different paths in isin(). for mult in (1, 10): @@ -440,6 +443,22 @@ def test_isin_mixed_dtype(self, dtype1, dtype2, kind): else: assert_array_equal(isin(ar1, ar2, kind=kind), expected) + @pytest.mark.parametrize("data", [ + np.array([2**63, 2**63 + 1], dtype=np.uint64), + np.array([-2**62, -2**62 - 1], dtype=np.int64), + ]) + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_mixed_huge_vals(self, kind, data): + """Test values outside intp range (negative ones if 32bit system)""" + query = data[1] + res = np.isin(data, query, kind=kind) + assert_array_equal(res, [False, True]) + # Also check that nothing weird happens for values can't possibly + # in range. + data = data.astype(np.int32) # clearly different values + res = np.isin(data, query, kind=kind) + assert_array_equal(res, [False, False]) + @pytest.mark.parametrize("kind", [None, "sort", "table"]) def test_isin_mixed_boolean(self, kind): """Test that isin works as expected for bool/int input.""" @@ -455,21 +474,21 @@ def test_isin_mixed_boolean(self, kind): def test_isin_first_array_is_object(self): ar1 = [None] - ar2 = np.array([1]*10) + ar2 = np.array([1] * 10) expected = np.array([False]) result = np.isin(ar1, ar2) assert_array_equal(result, expected) def test_isin_second_array_is_object(self): ar1 = 1 - ar2 = np.array([None]*10) + ar2 = np.array([None] * 10) expected = np.array([False]) result = np.isin(ar1, ar2) assert_array_equal(result, expected) def test_isin_both_arrays_are_object(self): ar1 = [None] - ar2 = np.array([None]*10) + ar2 = np.array([None] * 10) expected = np.array([True]) result = np.isin(ar1, ar2) assert_array_equal(result, expected) @@ -479,7 +498,7 @@ def test_isin_both_arrays_have_structured_dtype(self): # and a field of dtype `object` allowing for arbitrary Python objects dt = np.dtype([('field1', int), ('field2', object)]) ar1 = np.array([(1, None)], dtype=dt) - ar2 = np.array([(1, None)]*10, dtype=dt) + ar2 = np.array([(1, None)] * 10, dtype=dt) expected = np.array([True]) result = np.isin(ar1, ar2) assert_array_equal(result, expected) @@ -612,72 +631,86 @@ def test_manyways(self): class TestUnique: + def check_all(self, a, b, i1, i2, c, dt): + base_msg = 'check {0} failed for type {1}' + + msg = base_msg.format('values', dt) + v = unique(a) + assert_array_equal(v, b, msg) + assert type(v) is type(b) + + msg = base_msg.format('return_index', dt) + v, j = unique(a, True, False, False) + assert_array_equal(v, b, msg) + assert_array_equal(j, i1, msg) + assert type(v) is type(b) + + msg = base_msg.format('return_inverse', dt) + v, j = unique(a, False, True, False) + assert_array_equal(v, b, msg) + assert_array_equal(j, i2, msg) + assert type(v) is type(b) + + msg = base_msg.format('return_counts', dt) + v, j = unique(a, False, False, True) + assert_array_equal(v, b, msg) + assert_array_equal(j, c, msg) + assert type(v) is type(b) + + msg = base_msg.format('return_index and return_inverse', dt) + v, j1, j2 = unique(a, True, True, False) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, i2, msg) + assert type(v) is type(b) + + msg = base_msg.format('return_index and return_counts', dt) + v, j1, j2 = unique(a, True, False, True) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, c, msg) + assert type(v) is type(b) + + msg = base_msg.format('return_inverse and return_counts', dt) + v, j1, j2 = unique(a, False, True, True) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i2, msg) + assert_array_equal(j2, c, msg) + assert type(v) is type(b) + + msg = base_msg.format(('return_index, return_inverse ' + 'and return_counts'), dt) + v, j1, j2, j3 = unique(a, True, True, True) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, i2, msg) + assert_array_equal(j3, c, msg) + assert type(v) is type(b) + + def get_types(self): + + types = [] + types.extend(np.typecodes['AllInteger']) + types.extend(np.typecodes['AllFloat']) + types.append('datetime64[D]') + types.append('timedelta64[D]') + return types + + @pytest.mark.filterwarnings(r"ignore:\w+ chararray \w+:DeprecationWarning") def test_unique_1d(self): - def check_all(a, b, i1, i2, c, dt): - base_msg = 'check {0} failed for type {1}' - - msg = base_msg.format('values', dt) - v = unique(a) - assert_array_equal(v, b, msg) - - msg = base_msg.format('return_index', dt) - v, j = unique(a, True, False, False) - assert_array_equal(v, b, msg) - assert_array_equal(j, i1, msg) - - msg = base_msg.format('return_inverse', dt) - v, j = unique(a, False, True, False) - assert_array_equal(v, b, msg) - assert_array_equal(j, i2, msg) - - msg = base_msg.format('return_counts', dt) - v, j = unique(a, False, False, True) - assert_array_equal(v, b, msg) - assert_array_equal(j, c, msg) - - msg = base_msg.format('return_index and return_inverse', dt) - v, j1, j2 = unique(a, True, True, False) - assert_array_equal(v, b, msg) - assert_array_equal(j1, i1, msg) - assert_array_equal(j2, i2, msg) - - msg = base_msg.format('return_index and return_counts', dt) - v, j1, j2 = unique(a, True, False, True) - assert_array_equal(v, b, msg) - assert_array_equal(j1, i1, msg) - assert_array_equal(j2, c, msg) - - msg = base_msg.format('return_inverse and return_counts', dt) - v, j1, j2 = unique(a, False, True, True) - assert_array_equal(v, b, msg) - assert_array_equal(j1, i2, msg) - assert_array_equal(j2, c, msg) - - msg = base_msg.format(('return_index, return_inverse ' - 'and return_counts'), dt) - v, j1, j2, j3 = unique(a, True, True, True) - assert_array_equal(v, b, msg) - assert_array_equal(j1, i1, msg) - assert_array_equal(j2, i2, msg) - assert_array_equal(j3, c, msg) - - a = [5, 7, 1, 2, 1, 5, 7]*10 + a = [5, 7, 1, 2, 1, 5, 7] * 10 b = [1, 2, 5, 7] i1 = [2, 3, 0, 1] - i2 = [2, 3, 0, 1, 0, 2, 3]*10 + i2 = [2, 3, 0, 1, 0, 2, 3] * 10 c = np.multiply([2, 1, 2, 2], 10) # test for numeric arrays - types = [] - types.extend(np.typecodes['AllInteger']) - types.extend(np.typecodes['AllFloat']) - types.append('datetime64[D]') - types.append('timedelta64[D]') + types = self.get_types() for dt in types: aa = np.array(a, dt) bb = np.array(b, dt) - check_all(aa, bb, i1, i2, c, dt) + self.check_all(aa, bb, i1, i2, c, dt) # test for object arrays dt = 'O' @@ -685,17 +718,20 @@ def check_all(a, b, i1, i2, c, dt): aa[:] = a bb = np.empty(len(b), dt) bb[:] = b - check_all(aa, bb, i1, i2, c, dt) + self.check_all(aa, bb, i1, i2, c, dt) # test for structured arrays dt = [('', 'i'), ('', 'i')] aa = np.array(list(zip(a, a)), dt) bb = np.array(list(zip(b, b)), dt) - check_all(aa, bb, i1, i2, c, dt) + self.check_all(aa, bb, i1, i2, c, dt) # test for ticket #2799 aa = [1. + 0.j, 1 - 1.j, 1] - assert_array_equal(np.unique(aa), [1. - 1.j, 1. + 0.j]) + assert_array_equal( + np.sort(np.unique(aa)), + [1. - 1.j, 1.], + ) # test for ticket #4785 a = [(1, 2), (1, 2), (2, 3)] @@ -730,18 +766,20 @@ def check_all(a, b, i1, i2, c, dt): ua_idx = [2, 0, 1] ua_inv = [1, 2, 0, 2] ua_cnt = [1, 1, 2] - assert_equal(np.unique(a), ua) + # order of unique values is not guaranteed + assert_equal(np.sort(np.unique(a)), np.sort(ua)) assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) # test for ticket 2111 - complex - a = [2.0-1j, np.nan, 1.0+1j, complex(0.0, np.nan), complex(1.0, np.nan)] - ua = [1.0+1j, 2.0-1j, complex(0.0, np.nan)] + a = [2.0 - 1j, np.nan, 1.0 + 1j, complex(0.0, np.nan), complex(1.0, np.nan)] + ua = [1.0 + 1j, 2.0 - 1j, complex(0.0, np.nan)] ua_idx = [2, 0, 3] ua_inv = [1, 2, 0, 2, 2] ua_cnt = [1, 1, 3] - assert_equal(np.unique(a), ua) + # order of unique values is not guaranteed + assert_equal(np.sort(np.unique(a)), np.sort(ua)) assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) @@ -781,6 +819,232 @@ def check_all(a, b, i1, i2, c, dt): assert_equal(np.unique(all_nans, return_inverse=True), (ua, ua_inv)) assert_equal(np.unique(all_nans, return_counts=True), (ua, ua_cnt)) + def test_unique_zero_sized(self): + # test for zero-sized arrays + types = self.get_types() + types.extend('SU') + for dt in types: + a = np.array([], dt) + b = np.array([], dt) + i1 = np.array([], np.int64) + i2 = np.array([], np.int64) + c = np.array([], np.int64) + self.check_all(a, b, i1, i2, c, dt) + + def test_unique_subclass(self): + class Subclass(np.ndarray): + pass + + i1 = [2, 3, 0, 1] + i2 = [2, 3, 0, 1, 0, 2, 3] * 10 + c = np.multiply([2, 1, 2, 2], 10) + + # test for numeric arrays + types = self.get_types() + for dt in types: + a = np.array([5, 7, 1, 2, 1, 5, 7] * 10, dtype=dt) + b = np.array([1, 2, 5, 7], dtype=dt) + aa = Subclass(a.shape, dtype=dt, buffer=a) + bb = Subclass(b.shape, dtype=dt, buffer=b) + self.check_all(aa, bb, i1, i2, c, dt) + + def test_unique_byte_string_hash_based(self): + # test for byte string arrays + arr = ['apple', 'banana', 'apple', 'cherry', 'date', 'banana', 'fig', 'grape'] + unq_sorted = ['apple', 'banana', 'cherry', 'date', 'fig', 'grape'] + + a1 = unique(arr, sorted=False) + # the result varies depending on the impl of std::unordered_set, + # so we check them by sorting + assert_array_equal(sorted(a1.tolist()), unq_sorted) + + def test_unique_unicode_string_hash_based(self): + # test for unicode string arrays + arr = [ + 'cafÊ', 'cafe', 'cafÊ', 'naïve', 'naive', + 'rÊsumÊ', 'naïve', 'resume', 'rÊsumÊ', + ] + unq_sorted = ['cafe', 'cafÊ', 'naive', 'naïve', 'resume', 'rÊsumÊ'] + + a1 = unique(arr, sorted=False) + # the result varies depending on the impl of std::unordered_set, + # so we check them by sorting + assert_array_equal(sorted(a1.tolist()), unq_sorted) + + def test_unique_vstring_hash_based_equal_nan(self): + # test for unicode and nullable string arrays (equal_nan=True) + a = np.array([ + # short strings + 'straße', + None, + 'strasse', + 'straße', + None, + 'niÃąo', + 'nino', + 'Êlève', + 'eleve', + 'niÃąo', + 'Êlève', + # medium strings + 'b' * 20, + 'ß' * 30, + None, + 'Ê' * 30, + 'e' * 20, + 'ß' * 30, + 'n' * 30, + 'Ãą' * 20, + None, + 'e' * 20, + 'Ãą' * 20, + # long strings + 'b' * 300, + 'ß' * 400, + None, + 'Ê' * 400, + 'e' * 300, + 'ß' * 400, + 'n' * 400, + 'Ãą' * 300, + None, + 'e' * 300, + 'Ãą' * 300, + ], + dtype=StringDType(na_object=None) + ) + unq_sorted_wo_none = [ + 'b' * 20, + 'b' * 300, + 'e' * 20, + 'e' * 300, + 'eleve', + 'nino', + 'niÃąo', + 'n' * 30, + 'n' * 400, + 'strasse', + 'straße', + 'ß' * 30, + 'ß' * 400, + 'Êlève', + 'Ê' * 30, + 'Ê' * 400, + 'Ãą' * 20, + 'Ãą' * 300, + ] + + a1 = unique(a, sorted=False, equal_nan=True) + # the result varies depending on the impl of std::unordered_set, + # so we check them by sorting + + # a1 should have exactly one None + count_none = sum(x is None for x in a1) + assert_equal(count_none, 1) + + a1_wo_none = sorted(x for x in a1 if x is not None) + assert_array_equal(a1_wo_none, unq_sorted_wo_none) + + def test_unique_vstring_hash_based_not_equal_nan(self): + # test for unicode and nullable string arrays (equal_nan=False) + a = np.array([ + # short strings + 'straße', + None, + 'strasse', + 'straße', + None, + 'niÃąo', + 'nino', + 'Êlève', + 'eleve', + 'niÃąo', + 'Êlève', + # medium strings + 'b' * 20, + 'ß' * 30, + None, + 'Ê' * 30, + 'e' * 20, + 'ß' * 30, + 'n' * 30, + 'Ãą' * 20, + None, + 'e' * 20, + 'Ãą' * 20, + # long strings + 'b' * 300, + 'ß' * 400, + None, + 'Ê' * 400, + 'e' * 300, + 'ß' * 400, + 'n' * 400, + 'Ãą' * 300, + None, + 'e' * 300, + 'Ãą' * 300, + ], + dtype=StringDType(na_object=None) + ) + unq_sorted_wo_none = [ + 'b' * 20, + 'b' * 300, + 'e' * 20, + 'e' * 300, + 'eleve', + 'nino', + 'niÃąo', + 'n' * 30, + 'n' * 400, + 'strasse', + 'straße', + 'ß' * 30, + 'ß' * 400, + 'Êlève', + 'Ê' * 30, + 'Ê' * 400, + 'Ãą' * 20, + 'Ãą' * 300, + ] + + a1 = unique(a, sorted=False, equal_nan=False) + # the result varies depending on the impl of std::unordered_set, + # so we check them by sorting + + # a1 should have exactly one None + count_none = sum(x is None for x in a1) + assert_equal(count_none, 6) + + a1_wo_none = sorted(x for x in a1 if x is not None) + assert_array_equal(a1_wo_none, unq_sorted_wo_none) + + def test_unique_vstring_errors(self): + a = np.array( + [ + 'apple', 'banana', 'apple', None, 'cherry', + 'date', 'banana', 'fig', None, 'grape', + ] * 2, + dtype=StringDType(na_object=None) + ) + assert_raises(ValueError, unique, a, equal_nan=False) + + @pytest.mark.parametrize("arg", ["return_index", "return_inverse", "return_counts"]) + def test_unsupported_hash_based(self, arg): + """These currently never use the hash-based solution. However, + it seems easier to just allow it. + + When the hash-based solution is added, this test should fail and be + replaced with something more comprehensive. + """ + a = np.array([1, 5, 2, 3, 4, 8, 199, 1, 3, 5]) + + res_not_sorted = np.unique([1, 1], sorted=False, **{arg: True}) + res_sorted = np.unique([1, 1], sorted=True, **{arg: True}) + # The following should fail without first sorting `res_not_sorted`. + for arr, expected in zip(res_not_sorted, res_sorted): + assert_array_equal(arr, expected) + def test_unique_axis_errors(self): assert_raises(TypeError, self._run_axis_tests, object) assert_raises(TypeError, self._run_axis_tests, @@ -828,11 +1092,8 @@ def test_unique_1d_with_axis(self, axis): def test_unique_inverse_with_axis(self, axis): x = np.array([[4, 4, 3], [2, 2, 1], [2, 2, 1], [4, 4, 3]]) uniq, inv = unique(x, return_inverse=True, axis=axis) - assert_equal(inv.ndim, x.ndim) - if axis is None: - assert_array_equal(x, np.take(uniq, inv)) - else: - assert_array_equal(x, np.take_along_axis(uniq, inv, axis=axis)) + assert_equal(inv.ndim, x.ndim if axis is None else 1) + assert_array_equal(x, np.take(uniq, inv, axis=axis)) def test_unique_axis_zeros(self): # issue 15559 @@ -844,7 +1105,7 @@ def test_unique_axis_zeros(self): assert_equal(uniq.dtype, single_zero.dtype) assert_array_equal(uniq, np.empty(shape=(1, 0))) assert_array_equal(idx, np.array([0])) - assert_array_equal(inv, np.array([[0], [0]])) + assert_array_equal(inv, np.array([0, 0])) assert_array_equal(cnt, np.array([2])) # there's 0 elements of shape (2,) along axis 1 @@ -854,7 +1115,7 @@ def test_unique_axis_zeros(self): assert_equal(uniq.dtype, single_zero.dtype) assert_array_equal(uniq, np.empty(shape=(2, 0))) assert_array_equal(idx, np.array([])) - assert_array_equal(inv, np.empty((1, 0))) + assert_array_equal(inv, np.array([])) assert_array_equal(cnt, np.array([])) # test a "complicated" shape @@ -923,7 +1184,7 @@ def _run_axis_tests(self, dtype): msg = "Unique's return_index=True failed with axis=0" assert_array_equal(data[idx], uniq, msg) msg = "Unique's return_inverse=True failed with axis=0" - assert_array_equal(np.take_along_axis(uniq, inv, axis=0), data) + assert_array_equal(np.take(uniq, inv, axis=0), data) msg = "Unique's return_counts=True failed with axis=0" assert_array_equal(cnt, np.array([2, 2]), msg) @@ -932,7 +1193,7 @@ def _run_axis_tests(self, dtype): msg = "Unique's return_index=True failed with axis=1" assert_array_equal(data[:, idx], uniq) msg = "Unique's return_inverse=True failed with axis=1" - assert_array_equal(np.take_along_axis(uniq, inv, axis=1), data) + assert_array_equal(np.take(uniq, inv, axis=1), data) msg = "Unique's return_counts=True failed with axis=1" assert_array_equal(cnt, np.array([2, 1, 1]), msg) @@ -945,7 +1206,13 @@ def test_unique_nanequals(self): assert_array_equal(not_unq, np.array([1, np.nan, np.nan, np.nan])) def test_unique_array_api_functions(self): - arr = np.array([np.nan, 1, 4, 1, 3, 4, np.nan, 5, 1]) + arr = np.array( + [ + np.nan, 1.0, 0.0, 4.0, -np.nan, + -0.0, 1.0, 3.0, 4.0, np.nan, + 5.0, -0.0, 1.0, -np.nan, 0.0, + ], + ) for res_unique_array_api, res_unique in [ ( @@ -972,8 +1239,14 @@ def test_unique_array_api_functions(self): ) ]: assert len(res_unique_array_api) == len(res_unique) + if not isinstance(res_unique_array_api, tuple): + res_unique_array_api = (res_unique_array_api,) + if not isinstance(res_unique, tuple): + res_unique = (res_unique,) + for actual, expected in zip(res_unique_array_api, res_unique): - assert_array_equal(actual, expected) + # Order of output is not guaranteed + assert_equal(np.sort(actual), np.sort(expected)) def test_unique_inverse_shape(self): # Regression test for https://github.com/numpy/numpy/issues/25552 @@ -985,3 +1258,47 @@ def test_unique_inverse_shape(self): assert_array_equal(expected_values, result.values) assert_array_equal(expected_inverse, result.inverse_indices) assert_array_equal(arr, result.values[result.inverse_indices]) + + @pytest.mark.parametrize( + 'data', + [[[1, 1, 1], + [1, 1, 1]], + [1, 3, 2], + 1], + ) + @pytest.mark.parametrize('transpose', [False, True]) + @pytest.mark.parametrize('dtype', [np.int32, np.float64]) + def test_unique_with_matrix(self, data, transpose, dtype): + mat = np.matrix(data).astype(dtype) + if transpose: + mat = mat.T + u = np.unique(mat) + expected = np.unique(np.asarray(mat)) + assert_array_equal(u, expected, strict=True) + + def test_unique_axis0_equal_nan_on_1d_array(self): + # Test Issue #29336 + arr1d = np.array([np.nan, 0, 0, np.nan]) + expected = np.array([0., np.nan]) + result = np.unique(arr1d, axis=0, equal_nan=True) + assert_array_equal(result, expected) + + def test_unique_axis_minus1_eq_on_1d_array(self): + arr1d = np.array([np.nan, 0, 0, np.nan]) + expected = np.array([0., np.nan]) + result = np.unique(arr1d, axis=-1, equal_nan=True) + assert_array_equal(result, expected) + + def test_unique_axis_float_raises_typeerror(self): + arr1d = np.array([np.nan, 0, 0, np.nan]) + with pytest.raises(TypeError, match="integer argument expected"): + np.unique(arr1d, axis=0.0, equal_nan=False) + + @pytest.mark.parametrize('dt', [np.dtype('F'), np.dtype('D')]) + @pytest.mark.parametrize('values', [[complex(0.0, -1), complex(-0.0, -1), 0], + [-200, complex(-200, -0.0), -1], + [-25, 3, -5j, complex(-25, -0.0), 3j]]) + def test_unique_complex_signed_zeros(self, dt, values): + z = np.array(values, dtype=dt) + u = np.unique(z) + assert len(u) == len(values) - 1 diff --git a/numpy/lib/tests/test_arrayterator.py b/numpy/lib/tests/test_arrayterator.py index c00ed13d7f30..42a85e58ff62 100644 --- a/numpy/lib/tests/test_arrayterator.py +++ b/numpy/lib/tests/test_arrayterator.py @@ -1,9 +1,9 @@ -from operator import mul from functools import reduce +from operator import mul import numpy as np -from numpy.random import randint from numpy.lib import Arrayterator +from numpy.random import randint from numpy.testing import assert_ @@ -11,13 +11,12 @@ def test(): np.random.seed(np.arange(10)) # Create a random array - ndims = randint(5)+1 - shape = tuple(randint(10)+1 for dim in range(ndims)) + ndims = randint(5) + 1 + shape = tuple(randint(10) + 1 for dim in range(ndims)) els = reduce(mul, shape) - a = np.arange(els) - a.shape = shape + a = np.arange(els).reshape(shape) - buf_size = randint(2*els) + buf_size = randint(2 * els) b = Arrayterator(a, buf_size) # Check that each block has at most ``buf_size`` elements @@ -29,8 +28,8 @@ def test(): # Slice arrayterator start = [randint(dim) for dim in shape] - stop = [randint(dim)+1 for dim in shape] - step = [randint(dim)+1 for dim in shape] + stop = [randint(dim) + 1 for dim in shape] + step = [randint(dim) + 1 for dim in shape] slice_ = tuple(slice(*t) for t in zip(start, stop, step)) c = b[slice_] d = a[slice_] diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index 31352864b7e2..d9e70c118792 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -274,20 +274,24 @@ "v\x00{'descr': [('x', '>i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" "\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" ''' -import sys import os +import sys import warnings -import pytest from io import BytesIO +import pytest + import numpy as np +from numpy.lib import format from numpy.testing import ( - assert_, assert_array_equal, assert_raises, assert_raises_regex, - assert_warns, IS_PYPY, IS_WASM - ) + IS_64BIT, + IS_WASM, + assert_, + assert_array_equal, + assert_raises, + assert_raises_regex, +) from numpy.testing._private.utils import requires_memory -from numpy.lib import format - # Generate some basic arrays to test with. scalars = [ @@ -378,9 +382,6 @@ ('z', 'u1')] NbufferT = [ - # x Info color info y z - # value y2 Info2 name z2 Name Value - # name value y3 z3 ([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True), 'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), ([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False), @@ -396,7 +397,7 @@ ] -#BytesIO that reads a random number of bytes at a time +# BytesIO that reads a random number of bytes at a time class BytesIOSRandomSize(BytesIO): def read(self, size=None): import random @@ -423,12 +424,11 @@ def roundtrip_randsize(arr): def roundtrip_truncated(arr): f = BytesIO() format.write_array(f, arr) - #BytesIO is one byte short + # BytesIO is one byte short f2 = BytesIO(f.getvalue()[0:-1]) arr2 = format.read_array(f2) return arr2 - def assert_equal_(o1, o2): assert_(o1 == o2) @@ -451,6 +451,30 @@ def test_roundtrip_truncated(): if arr.dtype != object: assert_raises(ValueError, roundtrip_truncated, arr) +def test_file_truncated(tmp_path): + path = tmp_path / "a.npy" + for arr in basic_arrays: + if arr.dtype != object: + with open(path, 'wb') as f: + format.write_array(f, arr) + # truncate the file by one byte + with open(path, 'rb+') as f: + f.seek(-1, os.SEEK_END) + f.truncate() + with open(path, 'rb') as f: + with pytest.raises( + ValueError, + match=( + r"EOF: reading array header, " + r"expected (\d+) bytes got (\d+)" + ) if arr.size == 0 else ( + r"Failed to read all data for array\. " + r"Expected \(.*?\) = (\d+) elements, " + r"could only read (\d+) elements\. " + r"\(file seems not fully written\?\)" + ) + ): + _ = format.read_array(f) def test_long_str(): # check items larger than internal buffer size, gh-4027 @@ -508,7 +532,7 @@ def test_compressed_roundtrip(tmpdir): # nested struct-in-struct dt3 = np.dtype({'names': ['c', 'd'], 'formats': ['i4', dt2]}) # field with '' name -dt4 = np.dtype({'names': ['a', '', 'b'], 'formats': ['i4']*3}) +dt4 = np.dtype({'names': ['a', '', 'b'], 'formats': ['i4'] * 3}) # titles dt5 = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'i4'], 'offsets': [1, 6], 'titles': ['aa', 'bb']}) @@ -527,16 +551,8 @@ def test_load_padded_dtype(tmpdir, dt): assert_array_equal(arr, arr1) -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="see gh-23988") -@pytest.mark.xfail(IS_WASM, reason="Emscripten NODEFS has a buggy dup") -def test_python2_python3_interoperability(): - fname = 'win64python2.npy' - path = os.path.join(os.path.dirname(__file__), 'data', fname) - with pytest.warns(UserWarning, match="Reading.*this warning\\."): - data = np.load(path) - assert_array_equal(data, np.ones(2)) - - +@pytest.mark.filterwarnings( + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") def test_pickle_python2_python3(): # Test that loading object arrays saved on Python 2 works both on # Python 2 and Python 3 and vice versa @@ -600,17 +616,18 @@ def test_pickle_disallow(tmpdir): allow_pickle=False) @pytest.mark.parametrize('dt', [ - np.dtype(np.dtype([('a', np.int8), - ('b', np.int16), - ('c', np.int32), - ], align=True), - (3,)), - np.dtype([('x', np.dtype({'names':['a','b'], - 'formats':['i1','i1'], - 'offsets':[0,4], - 'itemsize':8, + # Not testing a subarray only dtype, because it cannot be attached to an array + # (and would fail the test as of writing this.) + np.dtype([('a', np.int8), + ('b', np.int16), + ('c', np.int32), + ], align=True), + np.dtype([('x', np.dtype(({'names': ['a', 'b'], + 'formats': ['i1', 'i1'], + 'offsets': [0, 4], + 'itemsize': 8, }, - (3,)), + (3,))), (4,), )]), np.dtype([('x', @@ -619,10 +636,10 @@ def test_pickle_disallow(tmpdir): )]), np.dtype([('x', np.dtype(( np.dtype(( - np.dtype({'names':['a','b'], - 'formats':['i1','i1'], - 'offsets':[0,4], - 'itemsize':8}), + np.dtype({'names': ['a', 'b'], + 'formats': ['i1', 'i1'], + 'offsets': [0, 4], + 'itemsize': 8}), (3,) )), (4,) @@ -634,10 +651,10 @@ def test_pickle_disallow(tmpdir): np.dtype(( np.dtype([ ('a', int), - ('b', np.dtype({'names':['a','b'], - 'formats':['i1','i1'], - 'offsets':[0,4], - 'itemsize':8})), + ('b', np.dtype({'names': ['a', 'b'], + 'formats': ['i1', 'i1'], + 'offsets': [0, 4], + 'itemsize': 8})), ]), (3,), )), @@ -647,7 +664,6 @@ def test_pickle_disallow(tmpdir): ))) ]), ]) - def test_descr_to_dtype(dt): dt1 = format.descr_to_dtype(dt.descr) assert_equal_(dt1, dt) @@ -658,7 +674,7 @@ def test_descr_to_dtype(dt): def test_version_2_0(): f = BytesIO() # requires more than 2 byte for header - dt = [(("%d" % i) * 100, float) for i in range(500)] + dt = [(f"{i}" * 100, float) for i in range(500)] d = np.ones(1000, dtype=dt) format.write_array(f, d, version=(2, 0)) @@ -683,10 +699,10 @@ def test_version_2_0(): @pytest.mark.skipif(IS_WASM, reason="memmap doesn't work correctly") def test_version_2_0_memmap(tmpdir): # requires more than 2 byte for header - dt = [(("%d" % i) * 100, float) for i in range(500)] + dt = [(f"{i}" * 100, float) for i in range(500)] d = np.ones(1000, dtype=dt) - tf1 = os.path.join(tmpdir, f'version2_01.npy') - tf2 = os.path.join(tmpdir, f'version2_02.npy') + tf1 = os.path.join(tmpdir, 'version2_01.npy') + tf2 = os.path.join(tmpdir, 'version2_02.npy') # 1.0 requested but data cannot be saved this way assert_raises(ValueError, format.open_memmap, tf1, mode='w+', dtype=d.dtype, @@ -713,12 +729,12 @@ def test_version_2_0_memmap(tmpdir): @pytest.mark.parametrize("mmap_mode", ["r", None]) def test_huge_header(tmpdir, mmap_mode): - f = os.path.join(tmpdir, f'large_header.npy') - arr = np.array(1, dtype="i,"*10000+"i") + f = os.path.join(tmpdir, 'large_header.npy') + arr = np.array(1, dtype="i," * 10000 + "i") with pytest.warns(UserWarning, match=".*format 2.0"): np.save(f, arr) - + with pytest.raises(ValueError, match="Header.*large"): np.load(f, mmap_mode=mmap_mode) @@ -732,12 +748,12 @@ def test_huge_header(tmpdir, mmap_mode): assert_array_equal(res, arr) def test_huge_header_npz(tmpdir): - f = os.path.join(tmpdir, f'large_header.npz') - arr = np.array(1, dtype="i,"*10000+"i") + f = os.path.join(tmpdir, 'large_header.npz') + arr = np.array(1, dtype="i," * 10000 + "i") with pytest.warns(UserWarning, match=".*format 2.0"): np.savez(f, arr=arr) - + # Only getting the array from the file actually reads it with pytest.raises(ValueError, match="Header.*large"): np.load(f)["arr"] @@ -838,11 +854,11 @@ def test_bad_magic_args(): def test_large_header(): s = BytesIO() - d = {'shape': tuple(), 'fortran_order': False, 'descr': ' int64) + dtypes = [np.int16, np.int32, np.int64, np.float16, np.float32, np.float64] + + for dtype in dtypes: + x = np.asarray([1, 2, 3], dtype=dtype) + y = np.vectorize(lambda x: x + x)(x) + assert x.dtype == y.dtype + def test_cache(self): # Ensure that vectorized func called exactly once per argument. _calls = [0] @@ -1654,6 +1823,21 @@ def test_otypes(self): x = np.arange(5) assert_array_equal(f(x), x) + def test_otypes_object_28624(self): + # with object otype, the vectorized function should return y + # wrapped into an object array + y = np.arange(3) + f = vectorize(lambda x: y, otypes=[object]) + + assert f(None).item() is y + assert f([None]).item() is y + + y = [1, 2, 3] + f = vectorize(lambda x: y, otypes=[object]) + + assert f(None).item() is y + assert f([None]).item() is y + def test_parse_gufunc_signature(self): assert_equal(nfb._parse_gufunc_signature('(x)->()'), ([('x',)], [()])) assert_equal(nfb._parse_gufunc_signature('(x,y)->()'), @@ -1833,13 +2017,13 @@ class subclass(np.ndarray): assert_equal(r, [[1., 3., 2.], [4., 6., 5.], [7., 9., 8.]]) # element-wise (ufunc) - mult = np.vectorize(lambda x, y: x*y) + mult = np.vectorize(lambda x, y: x * y) r = mult(m, v) assert_equal(type(r), subclass) assert_equal(r, m * v) def test_name(self): - #See gh-23021 + # gh-23021 @np.vectorize def f2(a, b): return a + b @@ -1886,7 +2070,7 @@ def f(x): def test_bad_input(self): with assert_raises(TypeError): - A = np.vectorize(pyfunc = 3) + A = np.vectorize(pyfunc=3) def test_no_keywords(self): with assert_raises(TypeError): @@ -1904,7 +2088,7 @@ def test_positional_regression_9477(self): def test_datetime_conversion(self): otype = "datetime64[ns]" - arr = np.array(['2024-01-01', '2024-01-02', '2024-01-03'], + arr = np.array(['2024-01-01', '2024-01-02', '2024-01-03'], dtype='datetime64[ns]') assert_array_equal(np.vectorize(lambda x: x, signature="(i)->(j)", otypes=[otype])(arr), arr) @@ -1929,12 +2113,15 @@ def unbound(*args): ('bound', A.iters), ('unbound', 0), ]) + @pytest.mark.thread_unsafe( + reason="test result depends on the reference count of a global object" + ) def test_frompyfunc_leaks(self, name, incr): # exposed in gh-11867 as np.vectorized, but the problem stems from # frompyfunc. # class.attribute = np.frompyfunc() creates a - # reference cycle if is a bound class method. It requires a - # gc collection cycle to break the cycle (on CPython 3) + # reference cycle if is a bound class method. + # It requires a gc collection cycle to break the cycle. import gc A_func = getattr(self.A, name) gc.disable() @@ -2201,7 +2388,7 @@ def test_ndim(self): wz[0] /= 2 wz[-1] /= 2 - q = x[:, None, None] + y[None,:, None] + z[None, None,:] + q = x[:, None, None] + y[None, :, None] + z[None, None, :] qx = (q * wx[:, None, None]).sum(axis=0) qy = (q * wy[None, :, None]).sum(axis=1) @@ -2256,6 +2443,35 @@ def test_array_like(self): assert_array_equal(y1, y2) assert_array_equal(y1, y3) + def test_bool_dtype(self): + x = (np.arange(4, dtype=np.uint8) % 2 == 1) + actual = sinc(x) + expected = sinc(x.astype(np.float64)) + assert_allclose(actual, expected) + assert actual.dtype == np.float64 + + @pytest.mark.parametrize('dtype', [np.uint8, np.int16, np.uint64]) + def test_int_dtypes(self, dtype): + x = np.arange(4, dtype=dtype) + actual = sinc(x) + expected = sinc(x.astype(np.float64)) + assert_allclose(actual, expected) + assert actual.dtype == np.float64 + + @pytest.mark.parametrize( + 'dtype', + [np.float16, np.float32, np.longdouble, np.complex64, np.complex128] + ) + def test_float_dtypes(self, dtype): + x = np.arange(4, dtype=dtype) + assert sinc(x).dtype == x.dtype + + def test_float16_underflow(self): + x = np.float16(0) + # before gh-27784, fill value for 0 in input would underflow float16, + # resulting in nan + assert_array_equal(sinc(x), np.asarray(1.0)) + class TestUnique: @@ -2309,7 +2525,7 @@ class TestCorrCoef: def test_non_array(self): assert_almost_equal(np.corrcoef([0, 1, 0], [1, 0, 1]), - [[1., -1.], [-1., 1.]]) + [[1., -1.], [-1., 1.]]) def test_simple(self): tgt1 = corrcoef(self.A) @@ -2320,28 +2536,6 @@ def test_simple(self): assert_almost_equal(tgt2, self.res2) assert_(np.all(np.abs(tgt2) <= 1.0)) - def test_ddof(self): - # ddof raises DeprecationWarning - with suppress_warnings() as sup: - warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, self.A, ddof=-1) - sup.filter(DeprecationWarning) - # ddof has no or negligible effect on the function - assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1) - assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2) - assert_almost_equal(corrcoef(self.A, ddof=3), self.res1) - assert_almost_equal(corrcoef(self.A, self.B, ddof=3), self.res2) - - def test_bias(self): - # bias raises DeprecationWarning - with suppress_warnings() as sup: - warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0) - assert_warns(DeprecationWarning, corrcoef, self.A, bias=0) - sup.filter(DeprecationWarning) - # bias has no or negligible effect on the function - assert_almost_equal(corrcoef(self.A, bias=1), self.res1) - def test_complex(self): x = np.array([[1, 2, 3], [1j, 2j, 3j]]) res = corrcoef(x) @@ -2370,7 +2564,7 @@ def test_extreme(self): assert_array_almost_equal(c, np.array([[1., -1.], [-1., 1.]])) assert_(np.all(np.abs(c) <= 1.0)) - @pytest.mark.parametrize("test_type", [np.half, np.single, np.double, np.longdouble]) + @pytest.mark.parametrize("test_type", np_floats) def test_corrcoef_dtype(self, test_type): cast_A = self.A.astype(test_type) res = corrcoef(cast_A, dtype=test_type) @@ -2476,12 +2670,18 @@ def test_unit_fweights_and_aweights(self): aweights=self.unit_weights), self.res1) - @pytest.mark.parametrize("test_type", [np.half, np.single, np.double, np.longdouble]) + @pytest.mark.parametrize("test_type", np_floats) def test_cov_dtype(self, test_type): cast_x1 = self.x1.astype(test_type) res = cov(cast_x1, dtype=test_type) assert test_type == res.dtype + def test_gh_27658(self): + x = np.ones((3, 1)) + expected = np.cov(x, ddof=0, rowvar=True) + actual = np.cov(x.T, ddof=0, rowvar=False) + assert_allclose(actual, expected, strict=True) + class Test_I0: @@ -2492,7 +2692,8 @@ def test_simple(self): # need at least one test above 8, as the implementation is piecewise A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549, 10.0]) - expected = np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049, 2815.71662847]) + expected = np.array([1.06307822, 1.12518299, 1.01214991, + 1.00006049, 2815.71662847]) assert_almost_equal(i0(A), expected) assert_almost_equal(i0(-A), expected) @@ -2601,6 +2802,12 @@ def test_sparse(self): assert_array_equal(X, np.array([[1, 2, 3]])) assert_array_equal(Y, np.array([[4], [5], [6], [7]])) + def test_always_tuple(self): + A = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True, copy=False) + B = meshgrid([], sparse=True, copy=False) + assert isinstance(A, tuple) + assert isinstance(B, tuple) + def test_invalid_arguments(self): # Test that meshgrid complains about invalid arguments # Regression test for issue #4755: @@ -2615,19 +2822,19 @@ def test_return_type(self): x = np.arange(0, 10, dtype=np.float32) y = np.arange(10, 20, dtype=np.float64) - X, Y = np.meshgrid(x,y) + X, Y = np.meshgrid(x, y) assert_(X.dtype == x.dtype) assert_(Y.dtype == y.dtype) # copy - X, Y = np.meshgrid(x,y, copy=True) + X, Y = np.meshgrid(x, y, copy=True) assert_(X.dtype == x.dtype) assert_(Y.dtype == y.dtype) # sparse - X, Y = np.meshgrid(x,y, sparse=True) + X, Y = np.meshgrid(x, y, sparse=True) assert_(X.dtype == x.dtype) assert_(Y.dtype == y.dtype) @@ -2759,7 +2966,7 @@ def test_subclasses(self): class subclass(np.ndarray): pass x = np.arange(5.).view(subclass) - r = piecewise(x, [x<2., x>=4], [-1., 1., 0.]) + r = piecewise(x, [x < 2., x >= 4], [-1., 1., 0.]) assert_equal(type(r), subclass) assert_equal(r, [-1., -1., 0., 0., 1.]) @@ -2817,6 +3024,11 @@ def test_empty_with_minlength(self): y = np.bincount(x, minlength=5) assert_array_equal(y, np.zeros(5, dtype=int)) + @pytest.mark.parametrize('minlength', [0, 3]) + def test_empty_list(self, minlength): + assert_array_equal(np.bincount([], minlength=minlength), + np.zeros(minlength, dtype=int)) + def test_with_incorrect_minlength(self): x = np.array([], dtype=int) assert_raises_regex(TypeError, @@ -2859,6 +3071,32 @@ def test_error_not_1d(self, vals): with assert_raises(ValueError): np.bincount(vals) + @pytest.mark.parametrize("vals", [[1.0], [1j], ["1"], [b"1"]]) + def test_error_not_int(self, vals): + with assert_raises(TypeError): + np.bincount(vals) + + @pytest.mark.parametrize("dt", np.typecodes["AllInteger"]) + def test_gh_28354(self, dt): + a = np.array([0, 1, 1, 3, 2, 1, 7], dtype=dt) + actual = np.bincount(a) + expected = [1, 3, 1, 1, 0, 0, 0, 1] + assert_array_equal(actual, expected) + + def test_contiguous_handling(self): + # check for absence of hard crash + np.bincount(np.arange(10000)[::2]) + + def test_gh_28354_array_like(self): + class A: + def __array__(self): + return np.array([0, 1, 1, 3, 2, 1, 7], dtype=np.uint64) + + a = A() + actual = np.bincount(a) + expected = [1, 3, 1, 1, 0, 0, 0, 1] + assert_array_equal(actual, expected) + class TestInterp: @@ -2936,7 +3174,7 @@ def test_non_finite_behavior_exact_x(self): assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.nan, np.nan, 4]) @pytest.fixture(params=[ - lambda x: np.float64(x), + np.float64, lambda x: _make_complex(x, 0), lambda x: _make_complex(0, x), lambda x: _make_complex(x, np.multiply(x, -2)) @@ -2959,28 +3197,32 @@ def test_non_finite_any_nan(self, sc): def test_non_finite_inf(self, sc): """ Test that interp between opposite infs gives nan """ - assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 0, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, 1], sc([-np.inf, +np.inf])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, 1], sc([+np.inf, -np.inf])), sc(np.nan)) + inf = np.inf + nan = np.nan + assert_equal(np.interp(0.5, [-inf, +inf], sc([ 0, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([-inf, +inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([+inf, -inf])), sc(nan)) # unless the y values are equal assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 10, 10])), sc(10)) def test_non_finite_half_inf_xf(self, sc): """ Test that interp where both axes have a bound at inf gives nan """ - assert_equal(np.interp(0.5, [-np.inf, 1], sc([-np.inf, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [-np.inf, 1], sc([+np.inf, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, -np.inf])), sc(np.nan)) - assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, +np.inf])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, +np.inf], sc([-np.inf, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, +np.inf], sc([+np.inf, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, -np.inf])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, +np.inf])), sc(np.nan)) + inf = np.inf + nan = np.nan + assert_equal(np.interp(0.5, [-inf, 1], sc([-inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([+inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, -inf])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, +inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([-inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([+inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, -inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, +inf])), sc(nan)) def test_non_finite_half_inf_x(self, sc): """ Test interp where the x axis has a bound at inf """ assert_equal(np.interp(0.5, [-np.inf, -np.inf], sc([0, 10])), sc(10)) - assert_equal(np.interp(0.5, [-np.inf, 1 ], sc([0, 10])), sc(10)) + assert_equal(np.interp(0.5, [-np.inf, 1 ], sc([0, 10])), sc(10)) # noqa: E202 assert_equal(np.interp(0.5, [ 0, +np.inf], sc([0, 10])), sc(0)) assert_equal(np.interp(0.5, [+np.inf, +np.inf], sc([0, 10])), sc(0)) @@ -2996,9 +3238,9 @@ def test_non_finite_half_inf_f(self, sc): def test_complex_interp(self): # test complex interpolation x = np.linspace(0, 1, 5) - y = np.linspace(0, 1, 5) + (1 + np.linspace(0, 1, 5))*1.0j + y = np.linspace(0, 1, 5) + (1 + np.linspace(0, 1, 5)) * 1.0j x0 = 0.3 - y0 = x0 + (1+x0)*1.0j + y0 = x0 + (1 + x0) * 1.0j assert_almost_equal(np.interp(x0, x, y), y0) # test complex left and right x0 = -1 @@ -3010,15 +3252,15 @@ def test_complex_interp(self): # test complex non finite x = [1, 2, 2.5, 3, 4] xp = [1, 2, 3, 4] - fp = [1, 2+1j, np.inf, 4] - y = [1, 2+1j, np.inf+0.5j, np.inf, 4] + fp = [1, 2 + 1j, np.inf, 4] + y = [1, 2 + 1j, np.inf + 0.5j, np.inf, 4] assert_almost_equal(np.interp(x, xp, fp), y) # test complex periodic x = [-180, -170, -185, 185, -10, -5, 0, 365] xp = [190, -190, 350, -350] - fp = [5+1.0j, 10+2j, 3+3j, 4+4j] - y = [7.5+1.5j, 5.+1.0j, 8.75+1.75j, 6.25+1.25j, 3.+3j, 3.25+3.25j, - 3.5+3.5j, 3.75+3.75j] + fp = [5 + 1.0j, 10 + 2j, 3 + 3j, 4 + 4j] + y = [7.5 + 1.5j, 5. + 1.0j, 8.75 + 1.75j, 6.25 + 1.25j, 3. + 3j, 3.25 + 3.25j, + 3.5 + 3.5j, 3.75 + 3.75j] assert_almost_equal(np.interp(x, xp, fp, period=360), y) def test_zero_dimensional_interpolation_point(self): @@ -3054,6 +3296,21 @@ def test_period(self): assert_almost_equal(np.interp(x, xp, fp, period=360), y) +quantile_methods = [ + 'inverted_cdf', 'averaged_inverted_cdf', 'closest_observation', + 'interpolated_inverted_cdf', 'hazen', 'weibull', 'linear', + 'median_unbiased', 'normal_unbiased', 'nearest', 'lower', 'higher', + 'midpoint'] + +# Note: Technically, averaged_inverted_cdf and midpoint are not interpolated. +# but NumPy doesn't currently make a difference (at least w.r.t. to promotion). +interpolating_quantile_methods = [ + 'averaged_inverted_cdf', 'interpolated_inverted_cdf', 'hazen', 'weibull', + 'linear', 'median_unbiased', 'normal_unbiased', 'midpoint'] + +methods_supporting_weights = ["inverted_cdf"] + + class TestPercentile: def test_basic(self): @@ -3098,11 +3355,11 @@ def test_api(self): np.percentile(d, 5, None, o, False, 'linear') def test_complex(self): - arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='G') + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='G') assert_raises(TypeError, np.percentile, arr_c, 0.5) - arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='D') + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='D') assert_raises(TypeError, np.percentile, arr_c, 0.5) - arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='F') + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='F') assert_raises(TypeError, np.percentile, arr_c, 0.5) def test_2D(self): @@ -3157,8 +3414,6 @@ def test_linear_interpolation(self, input_dtype, expected_dtype): expected_dtype = np.dtype(expected_dtype) - if np._get_promotion_state() == "legacy": - expected_dtype = np.promote_types(expected_dtype, np.float64) arr = np.asarray([15.0, 20.0, 35.0, 40.0, 50.0], dtype=input_dtype) weights = np.ones_like(arr) if weighted else None @@ -3267,10 +3522,10 @@ def test_scalar_q(self): x = np.arange(12).reshape(3, 4) assert_equal(np.percentile(x, 50), 5.5) assert_(np.isscalar(np.percentile(x, 50))) - r0 = np.array([4., 5., 6., 7.]) + r0 = np.array([4., 5., 6., 7.]) assert_equal(np.percentile(x, 50, axis=0), r0) assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape) - r1 = np.array([1.5, 5.5, 9.5]) + r1 = np.array([1.5, 5.5, 9.5]) assert_almost_equal(np.percentile(x, 50, axis=1), r1) assert_equal(np.percentile(x, 50, axis=1).shape, r1.shape) @@ -3288,11 +3543,11 @@ def test_scalar_q(self): x = np.arange(12).reshape(3, 4) assert_equal(np.percentile(x, 50, method='lower'), 5.) assert_(np.isscalar(np.percentile(x, 50))) - r0 = np.array([4., 5., 6., 7.]) + r0 = np.array([4., 5., 6., 7.]) c0 = np.percentile(x, 50, method='lower', axis=0) assert_equal(c0, r0) assert_equal(c0.shape, r0.shape) - r1 = np.array([1., 5., 9.]) + r1 = np.array([1., 5., 9.]) c1 = np.percentile(x, 50, method='lower', axis=1) assert_almost_equal(c1, r1) assert_equal(c1.shape, r1.shape) @@ -3362,18 +3617,18 @@ def test_percentile_out(self, percentile, with_weights): percentile(x, (25, 50), axis=0, out=out, weights=weights), r0 ) assert_equal(out, r0) - r1 = np.array([[0.75, 4.75, 8.75], [1.5, 5.5, 9.5]]) + r1 = np.array([[0.75, 4.75, 8.75], [1.5, 5.5, 9.5]]) out = np.empty((2, 3)) assert_equal(np.percentile(x, (25, 50), axis=1, out=out), r1) assert_equal(out, r1) # q.dim > 1, int - r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) + r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) out = np.empty((2, 4), dtype=x.dtype) c = np.percentile(x, (25, 50), method='lower', axis=0, out=out) assert_equal(c, r0) assert_equal(out, r0) - r1 = np.array([[0, 4, 8], [1, 5, 9]]) + r1 = np.array([[0, 4, 8], [1, 5, 9]]) out = np.empty((2, 3), dtype=x.dtype) c = np.percentile(x, (25, 50), method='lower', axis=1, out=out) assert_equal(c, r1) @@ -3449,20 +3704,20 @@ def test_extended_axis(self): d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11)) np.random.shuffle(d.ravel()) - assert_equal(np.percentile(d, 25, axis=(0, 1, 2))[0], - np.percentile(d[:,:,:, 0].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(0, 1, 2))[0], + np.percentile(d[:, :, :, 0].flatten(), 25)) assert_equal(np.percentile(d, [10, 90], axis=(0, 1, 3))[:, 1], - np.percentile(d[:,:, 1,:].flatten(), [10, 90])) + np.percentile(d[:, :, 1, :].flatten(), [10, 90])) assert_equal(np.percentile(d, 25, axis=(3, 1, -4))[2], - np.percentile(d[:,:, 2,:].flatten(), 25)) + np.percentile(d[:, :, 2, :].flatten(), 25)) assert_equal(np.percentile(d, 25, axis=(3, 1, 2))[2], - np.percentile(d[2,:,:,:].flatten(), 25)) + np.percentile(d[2, :, :, :].flatten(), 25)) assert_equal(np.percentile(d, 25, axis=(3, 2))[2, 1], - np.percentile(d[2, 1,:,:].flatten(), 25)) + np.percentile(d[2, 1, :, :].flatten(), 25)) assert_equal(np.percentile(d, 25, axis=(1, -2))[2, 1], - np.percentile(d[2,:,:, 1].flatten(), 25)) + np.percentile(d[2, :, :, 1].flatten(), 25)) assert_equal(np.percentile(d, 25, axis=(1, 3))[2, 2], - np.percentile(d[2,:, 2,:].flatten(), 25)) + np.percentile(d[2, :, 2, :].flatten(), 25)) def test_extended_axis_invalid(self): d = np.ones((3, 5, 7, 11)) @@ -3644,20 +3899,62 @@ def test_nat_basic(self, dtype, pos): assert res.dtype == dtype assert np.isnat(res).all() - a = np.arange(0, 24*3, dtype=dtype).reshape(-1, 3) + a = np.arange(0, 24 * 3, dtype=dtype).reshape(-1, 3) a[pos, 1] = "NaT" res = np.percentile(a, 30, axis=0) assert_array_equal(np.isnat(res), [False, True, False]) - -quantile_methods = [ - 'inverted_cdf', 'averaged_inverted_cdf', 'closest_observation', - 'interpolated_inverted_cdf', 'hazen', 'weibull', 'linear', - 'median_unbiased', 'normal_unbiased', 'nearest', 'lower', 'higher', - 'midpoint'] - - -methods_supporting_weights = ["inverted_cdf"] + @pytest.mark.parametrize("qtype", [np.float16, np.float32]) + @pytest.mark.parametrize("method", quantile_methods) + def test_percentile_gh_29003(self, qtype, method): + # test that with float16 or float32 input we do not get overflow + zero = qtype(0) + one = qtype(1) + a = np.zeros(65521, qtype) + a[:20_000] = one + z = np.percentile(a, 50, method=method) + assert z == zero + assert z.dtype == a.dtype + z = np.percentile(a, 99, method=method) + assert z == one + assert z.dtype == a.dtype + + def test_percentile_gh_29003_Fraction(self): + zero = Fraction(0) + one = Fraction(1) + a = np.array([zero] * 65521) + a[:20_000] = one + z = np.percentile(a, 50) + assert z == zero + z = np.percentile(a, Fraction(50)) + assert z == zero + assert np.array(z).dtype == a.dtype + + z = np.percentile(a, 99) + assert z == one + # test that with only Fraction input the return type is a Fraction + z = np.percentile(a, Fraction(99)) + assert z == one + assert np.array(z).dtype == a.dtype + + @pytest.mark.parametrize("method", interpolating_quantile_methods) + @pytest.mark.parametrize("q", [50, 10.0]) + def test_q_weak_promotion(self, method, q): + a = np.array([1, 2, 3, 4, 5], dtype=np.float32) + value = np.percentile(a, q, method=method) + assert value.dtype == np.float32 + + @pytest.mark.parametrize("method", interpolating_quantile_methods) + def test_q_strong_promotion(self, method): + # For interpolating methods, the dtype should be float64, for + # discrete ones the original int8. (technically, mid-point has no + # reason to take into account `q`, but does so anyway.) + a = np.array([1, 2, 3, 4, 5], dtype=np.float32) + value = np.percentile(a, np.float64(50), method=method) + assert value.dtype == np.float64 + # Check that we don't do accidental promotion either: + value = np.percentile(a, np.float32(50), method=method) + assert value.dtype == np.float32 class TestQuantile: @@ -3704,7 +4001,7 @@ def test_fraction(self): q = np.quantile(x, .5) assert_equal(q, 1.75) - assert_equal(type(q), np.float64) + assert isinstance(q, float) q = np.quantile(x, Fraction(1, 2)) assert_equal(q, Fraction(7, 4)) @@ -3723,12 +4020,12 @@ def test_fraction(self): assert_equal(np.quantile(x, Fraction(1, 2)), Fraction(7, 2)) def test_complex(self): - #See gh-22652 - arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='G') + # gh-22652 + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='G') assert_raises(TypeError, np.quantile, arr_c, 0.5) - arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='D') + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='D') assert_raises(TypeError, np.quantile, arr_c, 0.5) - arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='F') + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='F') assert_raises(TypeError, np.quantile, arr_c, 0.5) def test_no_p_overwrite(self): @@ -3753,8 +4050,8 @@ def test_quantile_preserve_int_type(self, dtype): def test_q_zero_one(self, method): # gh-24710 arr = [10, 11, 12] - quantile = np.quantile(arr, q = [0, 1], method=method) - assert_equal(quantile, np.array([10, 12])) + quantile = np.quantile(arr, q=[0, 1], method=method) + assert_equal(quantile, np.array([10, 12])) @pytest.mark.parametrize("method", quantile_methods) def test_quantile_monotonic(self, method): @@ -3872,14 +4169,14 @@ def test_quantile_add_and_multiply_constant(self, weights, method, alpha): assert_allclose(q, np.quantile(y, alpha, method="higher")) elif np.round(n * alpha) == int(n * alpha) + 1: assert_allclose( - q, np.quantile(y, alpha + 1/n, method="higher")) + q, np.quantile(y, alpha + 1 / n, method="higher")) else: assert_allclose(q, np.quantile(y, alpha, method="lower")) elif method == "interpolated_inverted_cdf": - assert_allclose(q, np.quantile(y, alpha + 1/n, method=method)) + assert_allclose(q, np.quantile(y, alpha + 1 / n, method=method)) elif method == "nearest": if n * alpha == int(n * alpha): - assert_allclose(q, np.quantile(y, alpha + 1/n, method=method)) + assert_allclose(q, np.quantile(y, alpha + 1 / n, method=method)) else: assert_allclose(q, np.quantile(y, alpha, method=method)) elif method == "lower": @@ -3974,6 +4271,28 @@ def test_quantile_with_weights_and_axis(self, method): ) assert_allclose(q, q_res) + # axis is a tuple of all axes + q = np.quantile(y, alpha, weights=w, method=method, axis=(0, 1, 2)) + q_res = np.quantile(y, alpha, weights=w, method=method, axis=None) + assert_allclose(q, q_res) + + q = np.quantile(y, alpha, weights=w, method=method, axis=(1, 2)) + q_res = np.zeros(shape=(2,)) + for i in range(2): + q_res[i] = np.quantile(y[i], alpha, weights=w[i], method=method) + assert_allclose(q, q_res) + + @pytest.mark.parametrize("method", methods_supporting_weights) + def test_quantile_weights_min_max(self, method): + # Test weighted quantile at 0 and 1 with leading and trailing zero + # weights. + w = [0, 0, 1, 2, 3, 0] + y = np.arange(6) + y_min = np.quantile(y, 0, weights=w, method="inverted_cdf") + y_max = np.quantile(y, 1, weights=w, method="inverted_cdf") + assert y_min == y[2] # == 2 + assert y_max == y[4] # == 4 + def test_quantile_weights_raises_negative_weights(self): y = [1, 2] w = [-0.5, 1] @@ -3998,16 +4317,96 @@ def test_weibull_fraction(self): quantile = np.quantile(arr, [Fraction(1, 2)], method='weibull') assert_equal(quantile, np.array(Fraction(1, 20))) + def test_closest_observation(self): + # Round ties to nearest even order statistic (see #26656) + m = 'closest_observation' + q = 0.5 + arr = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + assert_equal(2, np.quantile(arr[0:3], q, method=m)) + assert_equal(2, np.quantile(arr[0:4], q, method=m)) + assert_equal(2, np.quantile(arr[0:5], q, method=m)) + assert_equal(3, np.quantile(arr[0:6], q, method=m)) + assert_equal(4, np.quantile(arr[0:7], q, method=m)) + assert_equal(4, np.quantile(arr[0:8], q, method=m)) + assert_equal(4, np.quantile(arr[0:9], q, method=m)) + assert_equal(5, np.quantile(arr, q, method=m)) + + @pytest.mark.parametrize("weights", + [[1, np.inf, 1, 1], [1, np.inf, 1, np.inf], [0, 0, 0, 0], + [np.finfo("float64").max] * 4]) + @pytest.mark.parametrize("dty", ["f8", "O"]) + def test_inf_zeroes_err(self, weights, dty): + m = "inverted_cdf" + q = 0.5 + arr = np.array([[1, 2, 3, 4]] * 2) + # Make one entry have bad weights and another good ones. + wgts = np.array([weights, [0.5] * 4], dtype=dty) + with pytest.raises(ValueError, + match=r"Weights included NaN, inf or were all zero"): + # We (currently) don't bother to check ahead so 0/0 or + # overflow to `inf` while summing weights, or `inf / inf` + # will all warn before the error is raised. + with np.errstate(all="ignore"): + a = np.quantile(arr, q, weights=wgts, method=m, axis=1) + + @pytest.mark.parametrize("weights", + [[1, np.nan, 1, 1], [1, np.nan, np.nan, 1]]) + @pytest.mark.parametrize(["err", "dty"], + [(ValueError, "f8"), ((RuntimeWarning, ValueError), "O")]) + def test_nan_err(self, err, dty, weights): + m = "inverted_cdf" + q = 0.5 + arr = np.array([[1, 2, 3, 4]] * 2) + # Make one entry have bad weights and another good ones. + wgts = np.array([weights, [0.5] * 4], dtype=dty) + with pytest.raises(err): + a = np.quantile(arr, q, weights=wgts, method=m) + + def test_quantile_gh_29003_Fraction(self): + r = np.quantile([1, 2], q=Fraction(1)) + assert r == Fraction(2) + assert isinstance(r, Fraction) + + r = np.quantile([1, 2], q=Fraction(.5)) + assert r == Fraction(3, 2) + assert isinstance(r, Fraction) + + def test_float16_gh_29003(self): + a = np.arange(50_001, dtype=np.float16) + q = .999 + value = np.quantile(a, q) + assert value == q * 50_000 + assert value.dtype == np.float16 + + @pytest.mark.parametrize("method", interpolating_quantile_methods) + @pytest.mark.parametrize("q", [0.5, 1]) + def test_q_weak_promotion(self, method, q): + a = np.array([1, 2, 3, 4, 5], dtype=np.float32) + value = np.quantile(a, q, method=method) + assert value.dtype == np.float32 + + @pytest.mark.parametrize("method", interpolating_quantile_methods) + def test_q_strong_promotion(self, method): + # For interpolating methods, the dtype should be float64, for + # discrete ones the original int8. (technically, mid-point has no + # reason to take into account `q`, but does so anyway.) + a = np.array([1, 2, 3, 4, 5], dtype=np.float32) + value = np.quantile(a, np.float64(0.5), method=method) + assert value.dtype == np.float64 + # Check that we don't do accidental promotion either: + value = np.quantile(a, np.float32(0.5), method=method) + assert value.dtype == np.float32 + class TestLerp: @hypothesis.given(t0=st.floats(allow_nan=False, allow_infinity=False, min_value=0, max_value=1), t1=st.floats(allow_nan=False, allow_infinity=False, min_value=0, max_value=1), - a = st.floats(allow_nan=False, allow_infinity=False, - min_value=-1e300, max_value=1e300), - b = st.floats(allow_nan=False, allow_infinity=False, - min_value=-1e300, max_value=1e300)) + a=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300), + b=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300)) def test_linear_interpolation_formula_monotonic(self, t0, t1, a, b): l0 = nfb._lerp(a, b, t0) l1 = nfb._lerp(a, b, t1) @@ -4058,7 +4457,7 @@ def test_basic(self): assert_equal(np.median(a0), 1) assert_allclose(np.median(a1), 0.5) assert_allclose(np.median(a2), 2.5) - assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5]) + assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5]) assert_equal(np.median(a2, axis=1), [1, 4]) assert_allclose(np.median(a2, axis=None), 2.5) @@ -4085,8 +4484,8 @@ def test_axis_keyword(self): np.median(a, axis=ax) assert_array_equal(a, orig) - assert_allclose(np.median(a3, axis=0), [3, 4]) - assert_allclose(np.median(a3.T, axis=1), [3, 4]) + assert_allclose(np.median(a3, axis=0), [3, 4]) + assert_allclose(np.median(a3.T, axis=1), [3, 4]) assert_allclose(np.median(a3), 3.5) assert_allclose(np.median(a3, axis=None), 3.5) assert_allclose(np.median(a3.T), 3.5) @@ -4102,16 +4501,16 @@ def test_overwrite_keyword(self): assert_allclose(np.median(a0.copy(), overwrite_input=True), 1) assert_allclose(np.median(a1.copy(), overwrite_input=True), 0.5) assert_allclose(np.median(a2.copy(), overwrite_input=True), 2.5) - assert_allclose(np.median(a2.copy(), overwrite_input=True, axis=0), - [1.5, 2.5, 3.5]) + assert_allclose( + np.median(a2.copy(), overwrite_input=True, axis=0), [1.5, 2.5, 3.5]) assert_allclose( np.median(a2.copy(), overwrite_input=True, axis=1), [1, 4]) assert_allclose( np.median(a2.copy(), overwrite_input=True, axis=None), 2.5) assert_allclose( - np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4]) - assert_allclose(np.median(a3.T.copy(), overwrite_input=True, axis=1), - [3, 4]) + np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4]) + assert_allclose( + np.median(a3.T.copy(), overwrite_input=True, axis=1), [3, 4]) a4 = np.arange(3 * 4 * 5, dtype=np.float32).reshape((3, 4, 5)) np.random.shuffle(a4.ravel()) @@ -4261,19 +4660,19 @@ def test_extended_axis(self): d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11)) np.random.shuffle(d.ravel()) assert_equal(np.median(d, axis=(0, 1, 2))[0], - np.median(d[:,:,:, 0].flatten())) + np.median(d[:, :, :, 0].flatten())) assert_equal(np.median(d, axis=(0, 1, 3))[1], - np.median(d[:,:, 1,:].flatten())) + np.median(d[:, :, 1, :].flatten())) assert_equal(np.median(d, axis=(3, 1, -4))[2], - np.median(d[:,:, 2,:].flatten())) + np.median(d[:, :, 2, :].flatten())) assert_equal(np.median(d, axis=(3, 1, 2))[2], - np.median(d[2,:,:,:].flatten())) + np.median(d[2, :, :, :].flatten())) assert_equal(np.median(d, axis=(3, 2))[2, 1], - np.median(d[2, 1,:,:].flatten())) + np.median(d[2, 1, :, :].flatten())) assert_equal(np.median(d, axis=(1, -2))[2, 1], - np.median(d[2,:,:, 1].flatten())) + np.median(d[2, :, :, 1].flatten())) assert_equal(np.median(d, axis=(1, 3))[2, 2], - np.median(d[2,:, 2,:].flatten())) + np.median(d[2, :, 2, :].flatten())) def test_extended_axis_invalid(self): d = np.ones((3, 5, 7, 11)) @@ -4335,7 +4734,7 @@ def test_nat_behavior(self, dtype, pos): assert res.dtype == dtype assert np.isnat(res).all() - a = np.arange(0, 24*3, dtype=dtype).reshape(-1, 3) + a = np.arange(0, 24 * 3, dtype=dtype).reshape(-1, 3) a[pos, 1] = "NaT" res = np.median(a, axis=0) assert_array_equal(np.isnat(res), [False, True, False]) diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py index 09a1a5ab709d..cae11cfdcd65 100644 --- a/numpy/lib/tests/test_histograms.py +++ b/numpy/lib/tests/test_histograms.py @@ -1,14 +1,21 @@ -import numpy as np +import warnings -from numpy import histogram, histogramdd, histogram_bin_edges -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_almost_equal, - assert_array_almost_equal, assert_raises, assert_allclose, - assert_array_max_ulp, assert_raises_regex, suppress_warnings, - ) -from numpy.testing._private.utils import requires_memory import pytest +import numpy as np +from numpy import histogram, histogram_bin_edges, histogramdd +from numpy.testing import ( + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_raises, + assert_raises_regex, +) + class TestHistogram: @@ -101,7 +108,6 @@ def test_arr_weights_mismatch(self): with assert_raises_regex(ValueError, "same shape as"): h, b = histogram(a, range=[1, 9], weights=w, density=True) - def test_type(self): # Check the type of the returned histogram a = np.arange(10) + .5 @@ -130,14 +136,12 @@ def test_bool_conversion(self): a = np.array([1, 1, 0], dtype=np.uint8) int_hist, int_edges = np.histogram(a) - # Should raise an warning on booleans + # Should raise a warning on booleans # Ensure that the histograms are equivalent, need to suppress # the warnings to get the actual outputs - with suppress_warnings() as sup: - rec = sup.record(RuntimeWarning, 'Converting input from .*') + with pytest.warns(RuntimeWarning, match='Converting input from .*'): hist, edges = np.histogram([True, True, False]) # A warning should be issued - assert_equal(len(rec), 1) assert_array_equal(hist, int_hist) assert_array_equal(edges, int_edges) @@ -212,7 +216,7 @@ def test_empty(self): assert_array_equal(a, np.array([0])) assert_array_equal(b, np.array([0, 1])) - def test_error_binnum_type (self): + def test_error_binnum_type(self): # Tests if right Error is raised if bins argument is float vals = np.linspace(0.0, 1.0, num=100) histogram(vals, 5) @@ -221,9 +225,9 @@ def test_error_binnum_type (self): def test_finite_range(self): # Normal ranges should be fine vals = np.linspace(0.0, 1.0, num=100) - histogram(vals, range=[0.25,0.75]) - assert_raises(ValueError, histogram, vals, range=[np.nan,0.75]) - assert_raises(ValueError, histogram, vals, range=[0.25,np.inf]) + histogram(vals, range=[0.25, 0.75]) + assert_raises(ValueError, histogram, vals, range=[np.nan, 0.75]) + assert_raises(ValueError, histogram, vals, range=[0.25, np.inf]) def test_invalid_range(self): # start of range must be < end of range @@ -270,7 +274,7 @@ def test_object_array_of_0d(self): histogram, [np.array(0.4) for i in range(10)] + [np.inf]) # these should not crash - np.histogram([np.array(0.5) for i in range(10)] + [.500000000000001]) + np.histogram([np.array(0.5) for i in range(10)] + [.500000000000002]) np.histogram([np.array(0.5) for i in range(10)] + [.5]) def test_some_nan_values(self): @@ -279,9 +283,8 @@ def test_some_nan_values(self): all_nan = np.array([np.nan, np.nan]) # the internal comparisons with NaN give warnings - sup = suppress_warnings() - sup.filter(RuntimeWarning) - with sup: + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) # can't infer range with nan assert_raises(ValueError, histogram, one_nan, bins='auto') assert_raises(ValueError, histogram, all_nan, bins='auto') @@ -395,6 +398,11 @@ def test_histogram_bin_edges(self): edges = histogram_bin_edges(arr, bins='auto', range=(0, 1)) assert_array_equal(edges, e) + def test_small_value_range(self): + arr = np.array([1, 1 + 2e-16] * 10) + with pytest.raises(ValueError, match="Too many bins for data range"): + histogram(arr, bins=10) + # @requires_memory(free_bytes=1e10) # @pytest.mark.slow @pytest.mark.skip(reason="Bad memory reports lead to OOM in ci testing") @@ -413,6 +421,13 @@ def test_gh_23110(self): expected_hist = np.array([1, 0]) assert_array_equal(hist, expected_hist) + def test_gh_28400(self): + e = 1 + 1e-12 + Z = [0, 1, 1, 1, 1, 1, e, e, e, e, e, e, 2] + counts, edges = np.histogram(Z, bins="auto") + assert len(counts) < 10 + assert edges[0] == Z[0] + assert edges[-1] == Z[-1] class TestHistogramOptimBinNums: """ @@ -452,8 +467,8 @@ def test_simple(self): x = np.concatenate((x1, x2)) for estimator, numbins in expectedResults.items(): a, b = np.histogram(x, estimator) - assert_equal(len(a), numbins, err_msg="For the {0} estimator " - "with datasize of {1}".format(estimator, testlen)) + assert_equal(len(a), numbins, err_msg=f"For the {estimator} estimator " + f"with datasize of {testlen}") def test_small(self): """ @@ -472,8 +487,8 @@ def test_small(self): testdat = np.arange(testlen).astype(float) for estimator, expbins in expectedResults.items(): a, b = np.histogram(testdat, estimator) - assert_equal(len(a), expbins, err_msg="For the {0} estimator " - "with datasize of {1}".format(estimator, testlen)) + assert_equal(len(a), expbins, err_msg=f"For the {estimator} estimator " + f"with datasize of {testlen}") def test_incorrect_methods(self): """ @@ -494,20 +509,21 @@ def test_novariance(self): for estimator, numbins in novar_resultdict.items(): a, b = np.histogram(novar_dataset, estimator) - assert_equal(len(a), numbins, err_msg="{0} estimator, " - "No Variance test".format(estimator)) + assert_equal(len(a), numbins, + err_msg=f"{estimator} estimator, No Variance test") def test_limited_variance(self): """ - Check when IQR is 0, but variance exists, we return the sturges value - and not the fd value. + Check when IQR is 0, but variance exists, we return a reasonable value. """ lim_var_data = np.ones(1000) lim_var_data[:3] = 0 lim_var_data[-4:] = 100 edges_auto = histogram_bin_edges(lim_var_data, 'auto') - assert_equal(edges_auto, np.linspace(0, 100, 12)) + assert_equal(edges_auto[0], 0) + assert_equal(edges_auto[-1], 100.) + assert len(edges_auto) < 100 edges_fd = histogram_bin_edges(lim_var_data, 'fd') assert_equal(edges_fd, np.array([0, 100])) @@ -536,7 +552,8 @@ def test_outlier(self): assert_equal(len(a), numbins) def test_scott_vs_stone(self): - """Verify that Scott's rule and Stone's rule converges for normally distributed data""" + # Verify that Scott's rule and Stone's rule converges for normally + # distributed data def nbins_ratio(seed, size): rng = np.random.RandomState(seed) @@ -544,10 +561,11 @@ def nbins_ratio(seed, size): a, b = len(np.histogram(x, 'stone')[0]), len(np.histogram(x, 'scott')[0]) return a / (a + b) - ll = [[nbins_ratio(seed, size) for size in np.geomspace(start=10, stop=100, num=4).round().astype(int)] - for seed in range(10)] + geom_space = np.geomspace(start=10, stop=100, num=4).round().astype(int) + ll = [[nbins_ratio(seed, size) for size in geom_space] for seed in range(10)] - # the average difference between the two methods decreases as the dataset size increases. + # the average difference between the two methods decreases as the dataset + # size increases. avg = abs(np.mean(ll, axis=0) - 0.5) assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2) @@ -577,9 +595,9 @@ def test_simple_range(self): x3 = np.linspace(-100, -50, testlen) x = np.hstack((x1, x2, x3)) for estimator, numbins in expectedResults.items(): - a, b = np.histogram(x, estimator, range = (-20, 20)) - msg = "For the {0} estimator".format(estimator) - msg += " with datasize of {0}".format(testlen) + a, b = np.histogram(x, estimator, range=(-20, 20)) + msg = f"For the {estimator} estimator" + msg += f" with datasize of {testlen}" assert_equal(len(a), numbins, err_msg=msg) @pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott', @@ -598,9 +616,9 @@ def test_integer(self, bins): """ Test that bin width for integer data is at least 1. """ - with suppress_warnings() as sup: + with warnings.catch_warnings(): if bins == 'stone': - sup.filter(RuntimeWarning) + warnings.simplefilter('ignore', RuntimeWarning) assert_equal( np.histogram_bin_edges(np.tile(np.arange(9), 1000), bins), np.arange(9)) @@ -816,8 +834,8 @@ def test_density_non_uniform_2d(self): [1, 3]]) # ensure the number of points in each region is proportional to its area - x = np.array([1] + [1]*3 + [7]*3 + [7]*9) - y = np.array([7] + [1]*3 + [7]*3 + [1]*9) + x = np.array([1] + [1] * 3 + [7] * 3 + [7] * 9) + y = np.array([7] + [1] * 3 + [7] * 3 + [1] * 9) # sanity check that the above worked as intended hist, edges = histogramdd((y, x), bins=(y_edges, x_edges)) @@ -825,7 +843,7 @@ def test_density_non_uniform_2d(self): # resulting histogram should be uniform, since counts and areas are proportional hist, edges = histogramdd((y, x), bins=(y_edges, x_edges), density=True) - assert_equal(hist, 1 / (8*8)) + assert_equal(hist, 1 / (8 * 8)) def test_density_non_uniform_1d(self): # compare to histogram to show the results are the same diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py index fe1cfce2eaf8..81e47ec3dff2 100644 --- a/numpy/lib/tests/test_index_tricks.py +++ b/numpy/lib/tests/test_index_tricks.py @@ -1,14 +1,29 @@ import pytest import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_almost_equal, - assert_array_almost_equal, assert_raises, assert_raises_regex, - ) from numpy.lib._index_tricks_impl import ( - mgrid, ogrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from, - index_exp, ndindex, c_, r_, s_, ix_ - ) + c_, + diag_indices, + diag_indices_from, + fill_diagonal, + index_exp, + ix_, + mgrid, + ndenumerate, + ndindex, + ogrid, + r_, + s_, +) +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) class TestRavelUnravelIndex: @@ -46,9 +61,9 @@ def test_basic(self): assert_raises(ValueError, np.ravel_multi_index, (0, 2), (2, 2)) assert_raises(TypeError, np.ravel_multi_index, (0.1, 0.), (2, 2)) - assert_equal(np.unravel_index((2*3 + 1)*6 + 4, (4, 3, 6)), [2, 1, 4]) + assert_equal(np.unravel_index((2 * 3 + 1) * 6 + 4, (4, 3, 6)), [2, 1, 4]) assert_equal( - np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2*3 + 1)*6 + 4) + np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2 * 3 + 1) * 6 + 4) arr = np.array([[3, 6, 6], [4, 5, 1]]) assert_equal(np.ravel_multi_index(arr, (7, 6)), [22, 41, 37]) @@ -74,7 +89,7 @@ def test_empty_indices(self): assert_raises_regex(TypeError, msg1, np.unravel_index, (), (10, 3, 5)) assert_raises_regex(TypeError, msg2, np.unravel_index, np.array([]), (10, 3, 5)) - assert_equal(np.unravel_index(np.array([],dtype=int), (10, 3, 5)), + assert_equal(np.unravel_index(np.array([], dtype=int), (10, 3, 5)), [[], [], []]) assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], []), (10, 3)) @@ -97,19 +112,19 @@ def test_big_indices(self): [5627771580, 117259570957]) # test unravel_index for big indices (issue #9538) - assert_raises(ValueError, np.unravel_index, 1, (2**32-1, 2**31+1)) + assert_raises(ValueError, np.unravel_index, 1, (2**32 - 1, 2**31 + 1)) # test overflow checking for too big array (issue #7546) - dummy_arr = ([0],[0]) + dummy_arr = ([0], [0]) half_max = np.iinfo(np.intp).max // 2 assert_equal( np.ravel_multi_index(dummy_arr, (half_max, 2)), [0]) assert_raises(ValueError, - np.ravel_multi_index, dummy_arr, (half_max+1, 2)) + np.ravel_multi_index, dummy_arr, (half_max + 1, 2)) assert_equal( np.ravel_multi_index(dummy_arr, (half_max, 2), order='F'), [0]) assert_raises(ValueError, - np.ravel_multi_index, dummy_arr, (half_max+1, 2), order='F') + np.ravel_multi_index, dummy_arr, (half_max + 1, 2), order='F') def test_dtypes(self): # Test with different data types @@ -118,10 +133,10 @@ def test_dtypes(self): coords = np.array( [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype) shape = (5, 8) - uncoords = 8*coords[0]+coords[1] + uncoords = 8 * coords[0] + coords[1] assert_equal(np.ravel_multi_index(coords, shape), uncoords) assert_equal(coords, np.unravel_index(uncoords, shape)) - uncoords = coords[0]+5*coords[1] + uncoords = coords[0] + 5 * coords[1] assert_equal( np.ravel_multi_index(coords, shape, order='F'), uncoords) assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) @@ -130,10 +145,10 @@ def test_dtypes(self): [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]], dtype=dtype) shape = (5, 8, 10) - uncoords = 10*(8*coords[0]+coords[1])+coords[2] + uncoords = 10 * (8 * coords[0] + coords[1]) + coords[2] assert_equal(np.ravel_multi_index(coords, shape), uncoords) assert_equal(coords, np.unravel_index(uncoords, shape)) - uncoords = coords[0]+5*(coords[1]+8*coords[2]) + uncoords = coords[0] + 5 * (coords[1] + 8 * coords[2]) assert_equal( np.ravel_multi_index(coords, shape, order='F'), uncoords) assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) @@ -151,7 +166,7 @@ def test_clipmodes(self): ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12)) def test_writeability(self): - # See gh-7269 + # gh-7269 x, y = np.unravel_index([1, 2, 3], (4, 5)) assert_(x.flags.writeable) assert_(y.flags.writeable) @@ -169,7 +184,7 @@ def test_0d(self): def test_empty_array_ravel(self, mode): res = np.ravel_multi_index( np.zeros((3, 0), dtype=np.intp), (2, 1, 0), mode=mode) - assert(res.shape == (0,)) + assert res.shape == (0,) with assert_raises(ValueError): np.ravel_multi_index( @@ -178,12 +193,17 @@ def test_empty_array_ravel(self, mode): def test_empty_array_unravel(self): res = np.unravel_index(np.zeros(0, dtype=np.intp), (2, 1, 0)) # res is a tuple of three empty arrays - assert(len(res) == 3) - assert(all(a.shape == (0,) for a in res)) + assert len(res) == 3 + assert all(a.shape == (0,) for a in res) with assert_raises(ValueError): np.unravel_index([1], (2, 1, 0)) + def test_regression_size_1_index(self): + # actually tests the nditer size one index tracking + # regression test for gh-29690 + np.unravel_index(np.array([[1, 0, 1, 0]], dtype=np.uint32), (4,)) + class TestGrid: def test_basic(self): a = mgrid[-1:1:10j] @@ -193,13 +213,13 @@ def test_basic(self): assert_(a[0] == -1) assert_almost_equal(a[-1], 1) assert_(b[0] == -1) - assert_almost_equal(b[1]-b[0], 0.1, 11) - assert_almost_equal(b[-1], b[0]+19*0.1, 11) - assert_almost_equal(a[1]-a[0], 2.0/9.0, 11) + assert_almost_equal(b[1] - b[0], 0.1, 11) + assert_almost_equal(b[-1], b[0] + 19 * 0.1, 11) + assert_almost_equal(a[1] - a[0], 2.0 / 9.0, 11) def test_linspace_equivalence(self): y, st = np.linspace(2, 10, retstep=True) - assert_almost_equal(st, 8/49.0) + assert_almost_equal(st, 8 / 49.0) assert_array_almost_equal(y, mgrid[2:10:50j], 13) def test_nd(self): @@ -208,16 +228,16 @@ def test_nd(self): assert_(c.shape == (2, 10, 10)) assert_(d.shape == (2, 20, 20)) assert_array_equal(c[0][0, :], -np.ones(10, 'd')) - assert_array_equal(c[1][:, 0], -2*np.ones(10, 'd')) + assert_array_equal(c[1][:, 0], -2 * np.ones(10, 'd')) assert_array_almost_equal(c[0][-1, :], np.ones(10, 'd'), 11) - assert_array_almost_equal(c[1][:, -1], 2*np.ones(10, 'd'), 11) + assert_array_almost_equal(c[1][:, -1], 2 * np.ones(10, 'd'), 11) assert_array_almost_equal(d[0, 1, :] - d[0, 0, :], - 0.1*np.ones(20, 'd'), 11) + 0.1 * np.ones(20, 'd'), 11) assert_array_almost_equal(d[1, :, 1] - d[1, :, 0], - 0.2*np.ones(20, 'd'), 11) + 0.2 * np.ones(20, 'd'), 11) def test_sparse(self): - grid_full = mgrid[-1:1:10j, -2:2:10j] + grid_full = mgrid[-1:1:10j, -2:2:10j] grid_sparse = ogrid[-1:1:10j, -2:2:10j] # sparse grids can be made dense by broadcasting @@ -477,7 +497,7 @@ def test_low_dim_handling(self): def test_hetero_shape_handling(self): # raise error with high dimensionality and # shape mismatch - a = np.zeros((3,3,7,3), int) + a = np.zeros((3, 3, 7, 3), int) with assert_raises_regex(ValueError, "equal length"): fill_diagonal(a, 2) @@ -551,3 +571,123 @@ def test_ndindex(): # Make sure 0-sized ndindex works correctly x = list(ndindex(*[0])) assert_equal(x, []) + + +def test_ndindex_zero_dimensions_explicit(): + """Test ndindex produces empty iterators for explicit + zero-length dimensions.""" + assert list(np.ndindex(0, 3)) == [] + assert list(np.ndindex(3, 0, 2)) == [] + assert list(np.ndindex(0)) == [] + + +@pytest.mark.parametrize("bad_shape", [2.5, "2", [2, 3], (2.0, 3)]) +def test_ndindex_non_integer_dimensions(bad_shape): + """Test that non-integer dimensions raise TypeError.""" + with pytest.raises(TypeError): + # Passing invalid_shape_arg directly to ndindex. It will try to use it + # as a dimension and should trigger a TypeError. + list(np.ndindex(bad_shape)) + + +def test_ndindex_stop_iteration_behavior(): + """Test that StopIteration is raised properly after exhaustion.""" + it = np.ndindex(2, 2) + # Exhaust the iterator + list(it) + # Should raise StopIteration on subsequent calls + with pytest.raises(StopIteration): + next(it) + + +def test_ndindex_iterator_independence(): + """Test that each ndindex instance creates independent iterators.""" + shape = (2, 3) + iter1 = np.ndindex(*shape) + iter2 = np.ndindex(*shape) + + next(iter1) + next(iter1) + + assert_equal(next(iter2), (0, 0)) + assert_equal(next(iter1), (0, 2)) + + +def test_ndindex_tuple_vs_args_consistency(): + """Test that ndindex(shape) and ndindex(*shape) produce same results.""" + # Single dimension + assert_equal(list(np.ndindex(5)), list(np.ndindex((5,)))) + + # Multiple dimensions + assert_equal(list(np.ndindex(2, 3)), list(np.ndindex((2, 3)))) + + # Complex shape + shape = (2, 1, 4) + assert_equal(list(np.ndindex(*shape)), list(np.ndindex(shape))) + + +def test_ndindex_against_ndenumerate_compatibility(): + """Test ndindex produces same indices as ndenumerate.""" + for shape in [(1, 2, 3), (3,), (2, 2), ()]: + ndindex_result = list(np.ndindex(shape)) + ndenumerate_indices = [ix for ix, _ in np.ndenumerate(np.zeros(shape))] + assert_array_equal(ndindex_result, ndenumerate_indices) + + +def test_ndindex_multidimensional_correctness(): + """Test ndindex produces correct indices for multidimensional arrays.""" + shape = (2, 1, 3) + result = list(np.ndindex(*shape)) + expected = [ + (0, 0, 0), + (0, 0, 1), + (0, 0, 2), + (1, 0, 0), + (1, 0, 1), + (1, 0, 2), + ] + assert_equal(result, expected) + + +def test_ndindex_large_dimensions_behavior(): + """Test ndindex behaves correctly when initialized with large dimensions.""" + large_shape = (1000, 1000) + iter_obj = np.ndindex(*large_shape) + first_element = next(iter_obj) + assert_equal(first_element, (0, 0)) + + +def test_ndindex_empty_iterator_behavior(): + """Test detailed behavior of empty iterators.""" + empty_iter = np.ndindex(0, 5) + assert_equal(list(empty_iter), []) + + empty_iter2 = np.ndindex(3, 0, 2) + with pytest.raises(StopIteration): + next(empty_iter2) + + +@pytest.mark.parametrize( + "negative_shape_arg", + [ + (-1,), # Single negative dimension + (2, -3, 4), # Negative dimension in the middle + (5, 0, -2), # Mix of valid (0) and invalid (negative) dimensions + ], +) +def test_ndindex_negative_dimensions(negative_shape_arg): + """Test that negative dimensions raise ValueError.""" + with pytest.raises(ValueError): + ndindex(negative_shape_arg) + + +def test_ndindex_empty_shape(): + import numpy as np + # ndindex() and ndindex(()) should return a single empty tuple + assert list(np.ndindex()) == [()] + assert list(np.ndindex(())) == [()] + +def test_ndindex_negative_dim_raises(): + # ndindex(-1) should raise a ValueError + with pytest.raises(ValueError): + list(np.ndindex(-1)) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 38ded1f26cda..0725da3041a7 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -1,35 +1,42 @@ -import sys -import gc import gzip +import locale import os +import re +import sys import threading import time import warnings -import re -import pytest -from pathlib import Path -from tempfile import NamedTemporaryFile -from io import BytesIO, StringIO +import zipfile +from ctypes import c_bool from datetime import datetime -import locale +from io import BytesIO, StringIO from multiprocessing import Value, get_context -from ctypes import c_bool +from pathlib import Path +from tempfile import NamedTemporaryFile + +import pytest import numpy as np import numpy.ma as ma +from numpy._utils import asbytes from numpy.exceptions import VisibleDeprecationWarning -from numpy.lib._iotools import ConverterError, ConversionWarning from numpy.lib import _npyio_impl -from numpy.lib._npyio_impl import recfromcsv, recfromtxt +from numpy.lib._iotools import ConversionWarning, ConverterError from numpy.ma.testutils import assert_equal from numpy.testing import ( - assert_warns, assert_, assert_raises_regex, assert_raises, - assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY, - HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles, assert_no_warnings, - break_cycles, IS_WASM - ) + HAS_REFCOUNT, + IS_WASM, + assert_, + assert_allclose, + assert_array_equal, + assert_no_gc_cycles, + assert_no_warnings, + assert_raises, + assert_raises_regex, + tempdir, + temppath, +) from numpy.testing._private.utils import requires_memory -from numpy._utils import asbytes class TextIO(BytesIO): @@ -70,7 +77,7 @@ def strptime(s, fmt=None): 2.5. """ - if type(s) == bytes: + if isinstance(s, bytes): s = s.decode("latin1") return datetime(*time.strptime(s, fmt)[:3]) @@ -114,8 +121,6 @@ def roundtrip(self, save_func, *args, **kwargs): arr_reloaded = np.load(load_file, **load_kwds) - self.arr = arr - self.arr_reloaded = arr_reloaded finally: if not isinstance(target_file, BytesIO): target_file.close() @@ -124,6 +129,8 @@ def roundtrip(self, save_func, *args, **kwargs): if not isinstance(arr_reloaded, np.lib.npyio.NpzFile): os.remove(target_file.name) + return arr, arr_reloaded + def check_roundtrips(self, a): self.roundtrip(a) self.roundtrip(a, file_on_disk=True) @@ -175,7 +182,7 @@ def test_record(self): @pytest.mark.slow def test_format_2_0(self): - dt = [(("%d" % i) * 100, float) for i in range(500)] + dt = [(f"{i}" * 100, float) for i in range(500)] a = np.ones(1000, dtype=dt) with warnings.catch_warnings(record=True): warnings.filterwarnings('always', '', UserWarning) @@ -184,30 +191,46 @@ def test_format_2_0(self): class TestSaveLoad(RoundtripTest): def roundtrip(self, *args, **kwargs): - RoundtripTest.roundtrip(self, np.save, *args, **kwargs) - assert_equal(self.arr[0], self.arr_reloaded) - assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype) - assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc) + arr, arr_reloaded = RoundtripTest.roundtrip(self, np.save, *args, **kwargs) + assert_equal(arr[0], arr_reloaded) + assert_equal(arr[0].dtype, arr_reloaded.dtype) + assert_equal(arr[0].flags.fnc, arr_reloaded.flags.fnc) class TestSavezLoad(RoundtripTest): def roundtrip(self, *args, **kwargs): - RoundtripTest.roundtrip(self, np.savez, *args, **kwargs) + arr, arr_reloaded = RoundtripTest.roundtrip(self, np.savez, *args, **kwargs) try: - for n, arr in enumerate(self.arr): - reloaded = self.arr_reloaded['arr_%d' % n] - assert_equal(arr, reloaded) - assert_equal(arr.dtype, reloaded.dtype) - assert_equal(arr.flags.fnc, reloaded.flags.fnc) + for n, a in enumerate(arr): + reloaded = arr_reloaded[f'arr_{n}'] + assert_equal(a, reloaded) + assert_equal(a.dtype, reloaded.dtype) + assert_equal(a.flags.fnc, reloaded.flags.fnc) finally: # delete tempfile, must be done here on windows - if self.arr_reloaded.fid: - self.arr_reloaded.fid.close() - os.remove(self.arr_reloaded.fid.name) + if arr_reloaded.fid: + arr_reloaded.fid.close() + os.remove(arr_reloaded.fid.name) + + def test_load_non_npy(self): + """Test loading non-.npy files and name mapping in .npz.""" + with temppath(prefix="numpy_test_npz_load_non_npy_", suffix=".npz") as tmp: + with zipfile.ZipFile(tmp, "w") as npz: + with npz.open("test1.npy", "w") as out_file: + np.save(out_file, np.arange(10)) + with npz.open("test2", "w") as out_file: + np.save(out_file, np.arange(10)) + with npz.open("metadata", "w") as out_file: + out_file.write(b"Name: Test") + with np.load(tmp) as npz: + assert len(npz["test1"]) == 10 + assert len(npz["test1.npy"]) == 10 + assert len(npz["test2"]) == 10 + assert npz["metadata"] == b"Name: Test" - @pytest.mark.skipif(IS_PYPY, reason="Hangs on PyPy") @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") @pytest.mark.slow + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_big_arrays(self): L = (1 << 31) + 100000 a = np.empty(L, dtype=np.uint8) @@ -217,7 +240,6 @@ def test_big_arrays(self): npfile = np.load(tmp) a = npfile['a'] # Should succeed npfile.close() - del a # Avoid pyflakes unused variable warning. def test_multiple_arrays(self): a = np.array([[1, 2], [3, 4]], float) @@ -234,7 +256,6 @@ def test_named_arrays(self): assert_equal(a, l['file_a']) assert_equal(b, l['file_b']) - def test_tuple_getitem_raises(self): # gh-23748 a = np.array([1, 2, 3]) @@ -252,7 +273,7 @@ def test_BagObj(self): np.savez(c, file_a=a, file_b=b) c.seek(0) l = np.load(c) - assert_equal(sorted(dir(l.f)), ['file_a','file_b']) + assert_equal(sorted(dir(l.f)), ['file_a', 'file_b']) assert_equal(a, l.f.file_a) assert_equal(b, l.f.file_b) @@ -295,7 +316,6 @@ def test_not_closing_opened_fid(self): fp.seek(0) assert_(not fp.closed) - @pytest.mark.slow_pypy def test_closing_fid(self): # Test that issue #1517 (too many opened files) remains closed # It might be a "weak" test since failed to get triggered on @@ -306,20 +326,14 @@ def test_closing_fid(self): np.savez(tmp, data='LOVELY LOAD') # We need to check if the garbage collector can properly close # numpy npz file returned by np.load when their reference count - # goes to zero. Python 3 running in debug mode raises a + # goes to zero. Python running in debug mode raises a # ResourceWarning when file closing is left to the garbage # collector, so we catch the warnings. - with suppress_warnings() as sup: - sup.filter(ResourceWarning) # TODO: specify exact message + with warnings.catch_warnings(): + # TODO: specify exact message + warnings.simplefilter('ignore', ResourceWarning) for i in range(1, 1025): - try: - np.load(tmp)["data"] - except Exception as e: - msg = "Failed to load data from a file: %s" % e - raise AssertionError(msg) - finally: - if IS_PYPY: - gc.collect() + np.load(tmp)["data"] def test_closing_zipfile_after_load(self): # Check that zipfile owns file and can close it. This needs to @@ -344,7 +358,7 @@ def test_closing_zipfile_after_load(self): def test_repr_lists_keys(self, count, expected_repr): a = np.array([[1, 2], [3, 4]], float) with temppath(suffix='.npz') as tmp: - np.savez(tmp, *[a]*count) + np.savez(tmp, *[a] * count) l = np.load(tmp) assert repr(l) == expected_repr.format(fname=tmp) l.close() @@ -389,7 +403,7 @@ def test_structured(self): def test_structured_padded(self): # gh-13297 - a = np.array([(1, 2, 3),(4, 5, 6)], dtype=[ + a = np.array([(1, 2, 3), (4, 5, 6)], dtype=[ ('foo', 'i4'), ('bar', 'i4'), ('baz', 'i4') ]) c = BytesIO() @@ -536,7 +550,6 @@ def test_complex_negative_exponent(self): [b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n', b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n']) - def test_custom_writer(self): class CustomWriter(list): @@ -598,13 +611,14 @@ def test_unicode_and_bytes_fmt(self, iotype): np.savetxt(s, a, fmt="%f") s.seek(0) if iotype is StringIO: - assert_equal(s.read(), "%f\n" % 1.) + assert_equal(s.read(), f"{1.:f}\n") else: assert_equal(s.read(), b"%f\n" % 1.) - @pytest.mark.skipif(sys.platform=='win32', reason="files>4GB may not work") + @pytest.mark.skipif(sys.platform == 'win32', reason="files>4GB may not work") @pytest.mark.slow @requires_memory(free_bytes=7e9) + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_large_zip(self): def check_large_zip(memoryerror_raised): memoryerror_raised.value = False @@ -612,7 +626,7 @@ def check_large_zip(memoryerror_raised): # The test takes at least 6GB of memory, writes a file larger # than 4GB. This tests the ``allowZip64`` kwarg to ``zipfile`` test_data = np.asarray([np.random.rand( - np.random.randint(50,100),4) + np.random.randint(50, 100), 4) for i in range(800000)], dtype=object) with tempdir() as tmpdir: np.savez(os.path.join(tmpdir, 'test.npz'), @@ -620,14 +634,14 @@ def check_large_zip(memoryerror_raised): except MemoryError: memoryerror_raised.value = True raise - # run in a subprocess to ensure memory is released on PyPy, see gh-15775 + # run in a subprocess to ensure memory is released # Use an object in shared memory to re-raise the MemoryError exception # in our process if needed, see gh-16889 memoryerror_raised = Value(c_bool) # Since Python 3.8, the default start method for multiprocessing has # been changed from 'fork' to 'spawn' on macOS, causing inconsistency - # on memory sharing model, lead to failed test for check_large_zip + # on memory sharing model, leading to failed test for check_large_zip ctx = get_context('fork') p = ctx.Process(target=check_large_zip, args=(memoryerror_raised,)) p.start() @@ -636,7 +650,8 @@ def check_large_zip(memoryerror_raised): raise MemoryError("Child process raised a MemoryError exception") # -9 indicates a SIGKILL, probably an OOM. if p.exitcode == -9: - pytest.xfail("subprocess got a SIGKILL, apparently free memory was not sufficient") + msg = "subprocess got a SIGKILL, apparently free memory was not sufficient" + pytest.xfail(msg) assert p.exitcode == 0 class LoadTxtBase: @@ -815,8 +830,6 @@ def test_comments_multiple(self): a = np.array([[1, 2, 3], [4, 5, 6]], int) assert_array_equal(x, a) - @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_comments_multi_chars(self): c = TextIO() c.write('/* comment\n1,2,3,5\n') @@ -909,13 +922,13 @@ def __index__(self): bogus_idx = 1.5 assert_raises_regex( TypeError, - '^usecols must be.*%s' % type(bogus_idx).__name__, + f'^usecols must be.*{type(bogus_idx).__name__}', np.loadtxt, c, usecols=bogus_idx ) assert_raises_regex( TypeError, - '^usecols must be.*%s' % type(bogus_idx).__name__, + f'^usecols must be.*{type(bogus_idx).__name__}', np.loadtxt, c, usecols=[0, bogus_idx, 0] ) @@ -1031,10 +1044,8 @@ def test_from_float_hex(self): c.seek(0) res = np.loadtxt( c, dtype=dt, converters=float.fromhex, encoding="latin1") - assert_equal(res, tgt, err_msg="%s" % dt) + assert_equal(res, tgt, err_msg=f"{dt}") - @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_default_float_converter_no_default_hex_conversion(self): """ Ensure that fromhex is only used for values with the correct prefix and @@ -1045,8 +1056,6 @@ def test_default_float_converter_no_default_hex_conversion(self): match=".*convert string 'a' to float64 at row 0, column 1"): np.loadtxt(c) - @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_default_float_converter_exception(self): """ Ensure that the exception message raised during failed floating point @@ -1158,7 +1167,7 @@ def test_ndmin_keyword(self): def test_generator_source(self): def count(): for i in range(10): - yield "%d" % i + yield f"{i}" res = np.loadtxt(count()) assert_array_equal(res, np.arange(10)) @@ -1233,7 +1242,7 @@ def test_max_rows_with_read_continuation(self): assert_array_equal(x, a) # test continuation x = np.loadtxt(c, dtype=int, delimiter=',') - a = np.array([2,1,4,5], int) + a = np.array([2, 1, 4, 5], int) assert_array_equal(x, a) def test_max_rows_larger(self): @@ -1250,16 +1259,20 @@ def test_max_rows_larger(self): (1, ["ignored\n", "1,2\n", "\n", "3,4\n"]), # "Bad" lines that do not end in newlines: (1, ["ignored", "1,2", "", "3,4"]), - (1, StringIO("ignored\n1,2\n\n3,4")), + (1, lambda: StringIO("ignored\n1,2\n\n3,4")), # Same as above, but do not skip any lines: (0, ["-1,0\n", "1,2\n", "\n", "3,4\n"]), (0, ["-1,0", "1,2", "", "3,4"]), - (0, StringIO("-1,0\n1,2\n\n3,4"))]) + (0, lambda: StringIO("-1,0\n1,2\n\n3,4"))]) def test_max_rows_empty_lines(self, skip, data): + # gh-26718 re-instantiate StringIO objects each time + if callable(data): + data = data() + with pytest.warns(UserWarning, - match=f"Input line 3.*max_rows={3-skip}"): + match=f"Input line 3.*max_rows={3 - skip}"): res = np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",", - max_rows=3-skip) + max_rows=3 - skip) assert_array_equal(res, [[-1, 0], [1, 2], [3, 4]][skip:]) if isinstance(data, StringIO): @@ -1269,7 +1282,7 @@ def test_max_rows_empty_lines(self, skip, data): warnings.simplefilter("error", UserWarning) with pytest.raises(UserWarning): np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",", - max_rows=3-skip) + max_rows=3 - skip) class Testfromregex: def test_record(self): @@ -1323,7 +1336,7 @@ def test_record_unicode(self, path_type): assert_array_equal(x, a) def test_compiled_bytes(self): - regexp = re.compile(b'(\\d)') + regexp = re.compile(br'(\d)') c = BytesIO(b'123') dt = [('num', np.float64)] a = np.array([1, 2, 3], dtype=dt) @@ -1331,7 +1344,7 @@ def test_compiled_bytes(self): assert_array_equal(x, a) def test_bad_dtype_not_structured(self): - regexp = re.compile(b'(\\d)') + regexp = re.compile(br'(\d)') c = BytesIO(b'123') with pytest.raises(TypeError, match='structured datatype'): np.fromregex(c, regexp, dtype=np.float64) @@ -1397,7 +1410,7 @@ def test_comments(self): def test_skiprows(self): # Test row skipping control = np.array([1, 2, 3, 5], int) - kwargs = dict(dtype=int, delimiter=',') + kwargs = {"dtype": int, "delimiter": ','} # data = TextIO('comment\n1,2,3,5\n') test = np.genfromtxt(data, skip_header=1, **kwargs) @@ -1408,19 +1421,19 @@ def test_skiprows(self): assert_equal(test, control) def test_skip_footer(self): - data = ["# %i" % i for i in range(1, 6)] + data = [f"# {i}" for i in range(1, 6)] data.append("A, B, C") - data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)]) + data.extend([f"{i},{i:3.1f},{i:03d}" for i in range(51)]) data[-1] = "99,99" - kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10) + kwargs = {"delimiter": ",", "names": True, "skip_header": 5, "skip_footer": 10} test = np.genfromtxt(TextIO("\n".join(data)), **kwargs) - ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)], + ctrl = np.array([(f"{i:f}", f"{i:f}", f"{i:f}") for i in range(41)], dtype=[(_, float) for _ in "ABC"]) assert_equal(test, ctrl) def test_skip_footer_with_invalid(self): - with suppress_warnings() as sup: - sup.filter(ConversionWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ConversionWarning) basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n' # Footer too small to get rid of all invalid values assert_raises(ValueError, np.genfromtxt, @@ -1471,7 +1484,7 @@ def test_auto_dtype(self): np.array([True, False]), ] assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4']) for (i, ctrl) in enumerate(control): - assert_equal(test['f%i' % i], ctrl) + assert_equal(test[f'f{i}'], ctrl) def test_auto_dtype_uniform(self): # Tests whether the output dtype can be uniformized @@ -1625,15 +1638,15 @@ def test_unused_converter(self): def test_invalid_converter(self): strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or - (b'r' not in x.lower() and x.strip() or 0.0)) + ((b'r' not in x.lower() and x.strip()) or 0.0)) strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or - (b'%' not in x.lower() and x.strip() or 0.0)) + ((b'%' not in x.lower() and x.strip()) or 0.0)) s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n" "L24U05,12/5/2003, 2 %,1,300, 150.5\r\n" "D02N03,10/10/2004,R 1,,7,145.55") - kwargs = dict( - converters={2: strip_per, 3: strip_rand}, delimiter=",", - dtype=None, encoding="bytes") + kwargs = { + "converters": {2: strip_per, 3: strip_rand}, "delimiter": ",", + "dtype": None, "encoding": "bytes"} assert_raises(ConverterError, np.genfromtxt, s, **kwargs) def test_tricky_converter_bug1666(self): @@ -1656,21 +1669,21 @@ def test_dtype_with_converters(self): control = np.array([2009., 23., 46],) assert_equal(test, control) - @pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning") def test_dtype_with_converters_and_usecols(self): dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n" - dmap = {'1:1':0, '1:n':1, 'm:1':2, 'm:n':3} - dtyp = [('e1','i4'),('e2','i4'),('e3','i2'),('n', 'i1')] + dmap = {'1:1': 0, '1:n': 1, 'm:1': 2, 'm:n': 3} + dtyp = [('e1', 'i4'), ('e2', 'i4'), ('e3', 'i2'), ('n', 'i1')] conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]} - test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', - names=None, converters=conv, encoding="bytes") - control = np.rec.array([(1,5,-1,0), (2,8,-1,1), (3,3,-2,3)], dtype=dtyp) + test = np.genfromtxt(TextIO(dstr,), dtype=dtyp, delimiter=',', + names=None, converters=conv, encoding="bytes") + control = np.rec.array([(1, 5, -1, 0), (2, 8, -1, 1), (3, 3, -2, 3)], + dtype=dtyp) assert_equal(test, control) dtyp = [('e1', 'i4'), ('e2', 'i4'), ('n', 'i1')] - test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', - usecols=(0, 1, 3), names=None, converters=conv, - encoding="bytes") - control = np.rec.array([(1,5,0), (2,8,1), (3,3,3)], dtype=dtyp) + test = np.genfromtxt(TextIO(dstr,), dtype=dtyp, delimiter=',', + usecols=(0, 1, 3), names=None, converters=conv, + encoding="bytes") + control = np.rec.array([(1, 5, 0), (2, 8, 1), (3, 3, 3)], dtype=dtyp) assert_equal(test, control) def test_dtype_with_object(self): @@ -1808,7 +1821,7 @@ def test_usecols_with_named_columns(self): # Test usecols with named columns ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)]) data = "1 2 3\n4 5 6" - kwargs = dict(names="a, b, c") + kwargs = {"names": "a, b, c"} test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs) assert_equal(test, ctrl) test = np.genfromtxt(TextIO(data), @@ -1817,8 +1830,8 @@ def test_usecols_with_named_columns(self): def test_empty_file(self): # Test that an empty file raises the proper warning. - with suppress_warnings() as sup: - sup.filter(message="genfromtxt: Empty input file:") + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', message="genfromtxt: Empty input file:") data = TextIO() test = np.genfromtxt(data) assert_equal(test, np.array([])) @@ -1846,7 +1859,7 @@ def test_shaped_dtype(self): def test_withmissing(self): data = TextIO('A,B\n0,1\n2,N/A') - kwargs = dict(delimiter=",", missing_values="N/A", names=True) + kwargs = {"delimiter": ",", "missing_values": "N/A", "names": True} test = np.genfromtxt(data, dtype=None, usemask=True, **kwargs) control = ma.array([(0, 1), (2, -1)], mask=[(False, False), (False, True)], @@ -1864,7 +1877,7 @@ def test_withmissing(self): def test_user_missing_values(self): data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j" - basekwargs = dict(dtype=None, delimiter=",", names=True,) + basekwargs = {"dtype": None, "delimiter": ",", "names": True} mdtype = [('A', int), ('B', float), ('C', complex)] # test = np.genfromtxt(TextIO(data), missing_values="N/A", @@ -1877,7 +1890,8 @@ def test_user_missing_values(self): # basekwargs['dtype'] = mdtype test = np.genfromtxt(TextIO(data), - missing_values={0: -9, 1: -99, 2: -999j}, usemask=True, **basekwargs) + missing_values={0: -9, 1: -99, 2: -999j}, + usemask=True, **basekwargs) control = ma.array([(0, 0.0, 0j), (1, -999, 1j), (-9, 2.2, -999j), (3, -99, 3j)], mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], @@ -1898,11 +1912,11 @@ def test_user_filling_values(self): # Test with missing and filling values ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)]) data = "N/A, 2, 3\n4, ,???" - kwargs = dict(delimiter=",", - dtype=int, - names="a,b,c", - missing_values={0: "N/A", 'b': " ", 2: "???"}, - filling_values={0: 0, 'b': 0, 2: -999}) + kwargs = {"delimiter": ",", + "dtype": int, + "names": "a,b,c", + "missing_values": {0: "N/A", 'b': " ", 2: "???"}, + "filling_values": {0: 0, 'b': 0, 2: -999}} test = np.genfromtxt(TextIO(data), **kwargs) ctrl = np.array([(0, 2, 3), (4, 0, -999)], dtype=[(_, int) for _ in "abc"]) @@ -1958,10 +1972,11 @@ def test_invalid_raise(self): data.insert(0, "a, b, c, d, e") mdata = TextIO("\n".join(data)) - kwargs = dict(delimiter=",", dtype=None, names=True) + kwargs = {"delimiter": ",", "dtype": None, "names": True} + def f(): return np.genfromtxt(mdata, invalid_raise=False, **kwargs) - mtest = assert_warns(ConversionWarning, f) + mtest = pytest.warns(ConversionWarning, f) assert_equal(len(mtest), 45) assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde'])) # @@ -1977,11 +1992,12 @@ def test_invalid_raise_with_usecols(self): data.insert(0, "a, b, c, d, e") mdata = TextIO("\n".join(data)) - kwargs = dict(delimiter=",", dtype=None, names=True, - invalid_raise=False) + kwargs = {"delimiter": ",", "dtype": None, "names": True, + "invalid_raise": False} + def f(): return np.genfromtxt(mdata, usecols=(0, 4), **kwargs) - mtest = assert_warns(ConversionWarning, f) + mtest = pytest.warns(ConversionWarning, f) assert_equal(len(mtest), 45) assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae'])) # @@ -1997,9 +2013,9 @@ def test_inconsistent_dtype(self): data = ["1, 1, 1, 1, -1.1"] * 50 mdata = TextIO("\n".join(data)) - converters = {4: lambda x: "(%s)" % x.decode()} - kwargs = dict(delimiter=",", converters=converters, - dtype=[(_, int) for _ in 'abcde'], encoding="bytes") + converters = {4: lambda x: f"({x.decode()})"} + kwargs = {"delimiter": ",", "converters": converters, + "dtype": [(_, int) for _ in 'abcde'], "encoding": "bytes"} assert_raises(ValueError, np.genfromtxt, mdata, **kwargs) def test_default_field_format(self): @@ -2049,7 +2065,7 @@ def test_easy_structured_dtype(self): def test_autostrip(self): # Test autostrip data = "01/01/2003 , 1.3, abcde" - kwargs = dict(delimiter=",", dtype=None, encoding="bytes") + kwargs = {"delimiter": ",", "dtype": None, "encoding": "bytes"} with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', VisibleDeprecationWarning) mtest = np.genfromtxt(TextIO(data), **kwargs) @@ -2116,7 +2132,7 @@ def test_replace_space_known_dtype(self): def test_incomplete_names(self): # Test w/ incomplete names data = "A,,C\n0,1,2\n3,4,5" - kwargs = dict(delimiter=",", names=True) + kwargs = {"delimiter": ",", "names": True} # w/ dtype=None ctrl = np.array([(0, 1, 2), (3, 4, 5)], dtype=[(_, int) for _ in ('A', 'f0', 'C')]) @@ -2158,13 +2174,13 @@ def test_names_with_usecols_bug1636(self): def test_fixed_width_names(self): # Test fix-width w/ names data = " A B C\n 0 1 2.3\n 45 67 9." - kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None) + kwargs = {"delimiter": (5, 5, 4), "names": True, "dtype": None} ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)], dtype=[('A', int), ('B', int), ('C', float)]) test = np.genfromtxt(TextIO(data), **kwargs) assert_equal(test, ctrl) # - kwargs = dict(delimiter=5, names=True, dtype=None) + kwargs = {"delimiter": 5, "names": True, "dtype": None} ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)], dtype=[('A', int), ('B', int), ('C', float)]) test = np.genfromtxt(TextIO(data), **kwargs) @@ -2173,7 +2189,7 @@ def test_fixed_width_names(self): def test_filling_values(self): # Test missing values data = b"1, 2, 3\n1, , 5\n0, 6, \n" - kwargs = dict(delimiter=",", dtype=None, filling_values=-999) + kwargs = {"delimiter": ",", "dtype": None, "filling_values": -999} ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int) test = np.genfromtxt(TextIO(data), **kwargs) assert_equal(test, ctrl) @@ -2303,69 +2319,6 @@ def test_utf8_file_nodtype_unicode(self): dtype=np.str_) assert_array_equal(test, ctl) - @pytest.mark.filterwarnings("ignore:.*recfromtxt.*:DeprecationWarning") - def test_recfromtxt(self): - # - data = TextIO('A,B\n0,1\n2,3') - kwargs = dict(delimiter=",", missing_values="N/A", names=True) - test = recfromtxt(data, **kwargs) - control = np.array([(0, 1), (2, 3)], - dtype=[('A', int), ('B', int)]) - assert_(isinstance(test, np.recarray)) - assert_equal(test, control) - # - data = TextIO('A,B\n0,1\n2,N/A') - test = recfromtxt(data, dtype=None, usemask=True, **kwargs) - control = ma.array([(0, 1), (2, -1)], - mask=[(False, False), (False, True)], - dtype=[('A', int), ('B', int)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - assert_equal(test.A, [0, 2]) - - @pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning") - def test_recfromcsv(self): - # - data = TextIO('A,B\n0,1\n2,3') - kwargs = dict(missing_values="N/A", names=True, case_sensitive=True, - encoding="bytes") - test = recfromcsv(data, dtype=None, **kwargs) - control = np.array([(0, 1), (2, 3)], - dtype=[('A', int), ('B', int)]) - assert_(isinstance(test, np.recarray)) - assert_equal(test, control) - # - data = TextIO('A,B\n0,1\n2,N/A') - test = recfromcsv(data, dtype=None, usemask=True, **kwargs) - control = ma.array([(0, 1), (2, -1)], - mask=[(False, False), (False, True)], - dtype=[('A', int), ('B', int)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - assert_equal(test.A, [0, 2]) - # - data = TextIO('A,B\n0,1\n2,3') - test = recfromcsv(data, missing_values='N/A',) - control = np.array([(0, 1), (2, 3)], - dtype=[('a', int), ('b', int)]) - assert_(isinstance(test, np.recarray)) - assert_equal(test, control) - # - data = TextIO('A,B\n0,1\n2,3') - dtype = [('a', int), ('b', float)] - test = recfromcsv(data, missing_values='N/A', dtype=dtype) - control = np.array([(0, 1), (2, 3)], - dtype=dtype) - assert_(isinstance(test, np.recarray)) - assert_equal(test, control) - - #gh-10394 - data = TextIO('color\n"red"\n"blue"') - test = recfromcsv(data, converters={0: lambda x: x.strip('\"')}) - control = np.array([('red',), ('blue',)], dtype=[('color', (str, 4))]) - assert_equal(test.dtype, control.dtype) - assert_equal(test, control) - def test_max_rows(self): # Test the `max_rows` keyword argument. data = '1 2\n3 4\n5 6\n7 8\n9 10\n' @@ -2393,8 +2346,8 @@ def test_max_rows(self): assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4) # Test with invalid not raise - with suppress_warnings() as sup: - sup.filter(ConversionWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ConversionWarning) test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False) control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) @@ -2454,7 +2407,7 @@ def test_gft_using_generator(self): # gft doesn't work with unicode. def count(): for i in range(10): - yield asbytes("%d" % i) + yield asbytes(f"{i}") res = np.genfromtxt(count()) assert_array_equal(res, np.arange(10)) @@ -2476,9 +2429,9 @@ def test_auto_dtype_largeint(self): assert_equal(test.dtype.names, ['f0', 'f1', 'f2']) - assert_(test.dtype['f0'] == float) - assert_(test.dtype['f1'] == np.int64) - assert_(test.dtype['f2'] == np.int_) + assert_(test.dtype['f0'].type is np.float64) + assert_(test.dtype['f1'].type is np.int64) + assert_(test.dtype['f2'].type is np.int_) assert_allclose(test['f0'], 73786976294838206464.) assert_equal(test['f1'], 17179869184) @@ -2538,7 +2491,7 @@ def test_squeeze_scalar(self): @pytest.mark.parametrize("ndim", [0, 1, 2]) def test_ndmin_keyword(self, ndim: int): - # lets have the same behaviour of ndmin as loadtxt + # let's have the same behaviour of ndmin as loadtxt # as they should be the same for non-missing values txt = "42" @@ -2577,9 +2530,6 @@ def test_save_load_memmap(self): assert_array_equal(data, a) # close the mem-mapped file del data - if IS_PYPY: - break_cycles() - break_cycles() @pytest.mark.xfail(IS_WASM, reason="memmap doesn't work correctly") @pytest.mark.parametrize("filename_type", [Path, str]) @@ -2592,9 +2542,6 @@ def test_save_load_memmap_readwrite(self, filename_type): a[0][0] = 5 b[0][0] = 5 del b # closes the file - if IS_PYPY: - break_cycles() - break_cycles() data = np.load(path) assert_array_equal(data, a) @@ -2624,38 +2571,6 @@ def test_genfromtxt(self, filename_type): data = np.genfromtxt(path) assert_array_equal(a, data) - @pytest.mark.parametrize("filename_type", [Path, str]) - @pytest.mark.filterwarnings("ignore:.*recfromtxt.*:DeprecationWarning") - def test_recfromtxt(self, filename_type): - with temppath(suffix='.txt') as path: - path = filename_type(path) - with open(path, 'w') as f: - f.write('A,B\n0,1\n2,3') - - kwargs = dict(delimiter=",", missing_values="N/A", names=True) - test = recfromtxt(path, **kwargs) - control = np.array([(0, 1), (2, 3)], - dtype=[('A', int), ('B', int)]) - assert_(isinstance(test, np.recarray)) - assert_equal(test, control) - - @pytest.mark.parametrize("filename_type", [Path, str]) - @pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning") - def test_recfromcsv(self, filename_type): - with temppath(suffix='.txt') as path: - path = filename_type(path) - with open(path, 'w') as f: - f.write('A,B\n0,1\n2,3') - - kwargs = dict( - missing_values="N/A", names=True, case_sensitive=True - ) - test = recfromcsv(path, dtype=None, **kwargs) - control = np.array([(0, 1), (2, 3)], - dtype=[('A', int), ('B', int)]) - assert_(isinstance(test, np.recarray)) - assert_equal(test, control) - def test_gzip_load(): a = np.random.random((5, 5)) @@ -2708,7 +2623,6 @@ def test_ducktyping(): assert_array_equal(np.load(f), a) - def test_gzip_loadtxt(): # Thanks to another windows brokenness, we can't use # NamedTemporaryFile: a file created from this function cannot be @@ -2773,6 +2687,7 @@ def test_npzfile_dict(): @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +@pytest.mark.thread_unsafe(reason="garbage collector is global state") def test_load_refcount(): # Check that objects returned by np.load are directly freed based on # their refcount, rather than needing the gc to collect them. @@ -2790,12 +2705,32 @@ def test_load_refcount(): x = np.loadtxt(TextIO("0 1 2 3"), dtype=dt) assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt)) + def test_load_multiple_arrays_until_eof(): f = BytesIO() np.save(f, 1) np.save(f, 2) f.seek(0) - assert np.load(f) == 1 - assert np.load(f) == 2 + out1 = np.load(f) + assert out1 == 1 + out2 = np.load(f) + assert out2 == 2 with pytest.raises(EOFError): np.load(f) + + +def test_savez_nopickle(): + obj_array = np.array([1, 'hello'], dtype=object) + with temppath(suffix='.npz') as tmp: + np.savez(tmp, obj_array) + + with temppath(suffix='.npz') as tmp: + with pytest.raises(ValueError, match="Object arrays cannot be saved when.*"): + np.savez(tmp, obj_array, allow_pickle=False) + + with temppath(suffix='.npz') as tmp: + np.savez_compressed(tmp, obj_array) + + with temppath(suffix='.npz') as tmp: + with pytest.raises(ValueError, match="Object arrays cannot be saved when.*"): + np.savez_compressed(tmp, obj_array, allow_pickle=False) diff --git a/numpy/lib/tests/test_loadtxt.py b/numpy/lib/tests/test_loadtxt.py index 78c84e491c08..b50478209520 100644 --- a/numpy/lib/tests/test_loadtxt.py +++ b/numpy/lib/tests/test_loadtxt.py @@ -4,26 +4,27 @@ These tests complement those found in `test_io.py`. """ -import sys import os -import pytest -from tempfile import NamedTemporaryFile, mkstemp +import sys from io import StringIO +from tempfile import NamedTemporaryFile, mkstemp + +import pytest import numpy as np from numpy.ma.testutils import assert_equal -from numpy.testing import assert_array_equal, HAS_REFCOUNT, IS_PYPY +from numpy.testing import HAS_REFCOUNT, assert_array_equal def test_scientific_notation(): """Test that both 'e' and 'E' are parsed correctly.""" data = StringIO( - ( + "1.0e-1,2.0E1,3.0\n" "4.0e-2,5.0E-1,6.0\n" "7.0e-3,8.0E1,9.0\n" "0.0e-4,1.0E-1,2.0" - ) + ) expected = np.array( [[0.1, 20., 3.0], [0.04, 0.5, 6], [0.007, 80., 9], [0, 0.1, 2]] @@ -39,21 +40,20 @@ def test_comment_multiple_chars(comment): assert_equal(a, [[1.5, 2.5], [3.0, 4.0], [5.5, 6.0]]) -@pytest.fixture def mixed_types_structured(): """ - Fixture providing hetergeneous input data with a structured dtype, along + Function providing heterogeneous input data with a structured dtype, along with the associated structured array. """ data = StringIO( - ( + "1000;2.4;alpha;-34\n" "2000;3.1;beta;29\n" "3500;9.9;gamma;120\n" "4090;8.1;delta;0\n" "5001;4.4;epsilon;-99\n" "6543;7.8;omega;-1\n" - ) + ) dtype = np.dtype( [('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)] @@ -73,15 +73,14 @@ def mixed_types_structured(): @pytest.mark.parametrize('skiprows', [0, 1, 2, 3]) -def test_structured_dtype_and_skiprows_no_empty_lines( - skiprows, mixed_types_structured): - data, dtype, expected = mixed_types_structured +def test_structured_dtype_and_skiprows_no_empty_lines(skiprows): + data, dtype, expected = mixed_types_structured() a = np.loadtxt(data, dtype=dtype, delimiter=";", skiprows=skiprows) assert_array_equal(a, expected[skiprows:]) -def test_unpack_structured(mixed_types_structured): - data, dtype, expected = mixed_types_structured +def test_unpack_structured(): + data, dtype, expected = mixed_types_structured() a, b, c, d = np.loadtxt(data, dtype=dtype, delimiter=";", unpack=True) assert_array_equal(a, expected["f0"]) @@ -205,8 +204,6 @@ def test_maxrows_no_blank_lines(dtype): assert_equal(res, np.array([["1.5", "2.5"], ["3.0", "4.0"]], dtype=dtype)) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", (np.dtype("f8"), np.dtype("i2"))) def test_exception_message_bad_values(dtype): txt = StringIO("1,2\n3,XXX\n5,6") @@ -300,7 +297,7 @@ def test_unicode_with_converter(): def test_converter_with_structured_dtype(): txt = StringIO('1.5,2.5,Abc\n3.0,4.0,dEf\n5.5,6.0,ghI\n') dt = np.dtype([('m', np.int32), ('r', np.float32), ('code', 'U8')]) - conv = {0: lambda s: int(10*float(s)), -1: lambda s: s.upper()} + conv = {0: lambda s: int(10 * float(s)), -1: lambda s: s.upper()} res = np.loadtxt(txt, dtype=dt, delimiter=",", converters=conv) expected = np.array( [(15, 2.5, 'ABC'), (30, 4.0, 'DEF'), (55, 6.0, 'GHI')], dtype=dt @@ -394,8 +391,6 @@ def test_bool(): assert_array_equal(res.view(np.uint8), [[1, 0], [1, 1]]) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) @pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning") def test_integer_signs(dtype): @@ -412,8 +407,6 @@ def test_integer_signs(dtype): np.loadtxt([f"{sign}2\n"], dtype=dtype) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) @pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning") def test_implicit_cast_float_to_int_fails(dtype): @@ -430,7 +423,7 @@ def test_complex_parsing(dtype, with_parens): res = np.loadtxt(StringIO(s), dtype=dtype, delimiter=",") expected = np.array( - [[1.0-2.5j, 3.75, 7-5j], [4.0, -1900j, 0]], dtype=dtype + [[1.0 - 2.5j, 3.75, 7 - 5j], [4.0, -1900j, 0]], dtype=dtype ) assert_equal(res, expected) @@ -438,7 +431,7 @@ def test_complex_parsing(dtype, with_parens): def test_read_from_generator(): def gen(): for i in range(4): - yield f"{i},{2*i},{i**2}" + yield f"{i},{2 * i},{i**2}" res = np.loadtxt(gen(), dtype=int, delimiter=",") expected = np.array([[0, 0, 0], [1, 2, 1], [2, 4, 4], [3, 6, 9]]) @@ -484,8 +477,6 @@ def conv(x): assert sys.getrefcount(sentinel) == 2 -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_character_not_bytes_compatible(): """Test exception when a character cannot be encoded as 'S'.""" data = StringIO("–") # == \u2013 @@ -503,8 +494,6 @@ def test_invalid_converter(conv): np.loadtxt(StringIO("1 2\n3 4"), converters=conv) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_converters_dict_raises_non_integer_key(): with pytest.raises(TypeError, match="keys of the converters dict"): np.loadtxt(StringIO("1 2\n3 4"), converters={"a": int}) @@ -570,8 +559,6 @@ def test_quote_support_default(): assert_array_equal(res, expected) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_quotechar_multichar_error(): txt = StringIO("1,2\n3,4") msg = r".*must be a single unicode character or None" @@ -597,14 +584,14 @@ def test_comment_multichar_error_with_quote(): def test_structured_dtype_with_quotes(): data = StringIO( - ( + "1000;2.4;'alpha';-34\n" "2000;3.1;'beta';29\n" "3500;9.9;'gamma';120\n" "4090;8.1;'delta';0\n" "5001;4.4;'epsilon';-99\n" "6543;7.8;'omega';-1\n" - ) + ) dtype = np.dtype( [('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)] @@ -683,11 +670,9 @@ def test_warn_on_skipped_data(skiprows): ("i8", 0x0001020304050607), ("u8", 0x0001020304050607), # The following values are constructed to lead to unique bytes: ("float16", 3.07e-05), - ("float32", 9.2557e-41), ("complex64", 9.2557e-41+2.8622554e-29j), + ("float32", 9.2557e-41), ("complex64", 9.2557e-41 + 2.8622554e-29j), ("float64", -1.758571353180402e-24), - # Here and below, the repr side-steps a small loss of precision in - # complex `str` in PyPy (which is probably fine, as repr works): - ("complex128", repr(5.406409232372729e-29-1.758571353180402e-24j)), + ("complex128", 5.406409232372729e-29 - 1.758571353180402e-24j), # Use integer values that fit into double. Everything else leads to # problems due to longdoubles going via double and decimal strings # causing rounding errors. @@ -698,7 +683,7 @@ def test_warn_on_skipped_data(skiprows): def test_byteswapping_and_unaligned(dtype, value, swap): # Try to create "interesting" values within the valid unicode range: dtype = np.dtype(dtype) - data = [f"x,{value}\n"] # repr as PyPy `str` truncates some + data = [f"x,{value}\n"] if swap: dtype = dtype.newbyteorder() full_dt = np.dtype([("a", "S1"), ("b", dtype)], align=False) @@ -728,11 +713,9 @@ def test_unicode_whitespace_stripping_complex(dtype): line = " 1 , 2+3j , ( 4+5j ), ( 6+-7j ) , 8j , ( 9j ) \n" data = [line, line.replace(" ", "\u202F")] res = np.loadtxt(data, dtype=dtype, delimiter=',') - assert_array_equal(res, np.array([[1, 2+3j, 4+5j, 6-7j, 8j, 9j]] * 2)) + assert_array_equal(res, np.array([[1, 2 + 3j, 4 + 5j, 6 - 7j, 8j, 9j]] * 2)) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", "FD") @pytest.mark.parametrize("field", ["1 +2j", "1+ 2j", "1+2 j", "1+-+3", "(1j", "(1", "(1+2j", "1+2j)"]) @@ -741,8 +724,6 @@ def test_bad_complex(dtype, field): np.loadtxt([field + "\n"], dtype=dtype, delimiter=",") -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"] + "efgdFDG" + "?") def test_nul_character_error(dtype): @@ -754,8 +735,6 @@ def test_nul_character_error(dtype): np.loadtxt(["1\000"], dtype=dtype, delimiter=",", quotechar='"') -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"] + "efgdFDG" + "?") def test_no_thousands_support(dtype): @@ -826,7 +805,7 @@ def __len__(self): def __getitem__(self, item): if item == 50: raise RuntimeError("Bad things happened!") - return f"{item}, {item+1}" + return f"{item}, {item + 1}" with pytest.raises(RuntimeError, match="Bad things happened!"): np.loadtxt(BadSequence(), dtype=int, delimiter=",") @@ -970,12 +949,15 @@ def test_parametric_unit_discovery( """Check that the correct unit (e.g. month, day, second) is discovered from the data when a user specifies a unitless datetime.""" # Unit should be "D" (days) due to last entry - data = [generic_data] * 50000 + [long_datum] + data = [generic_data] * nrows + [long_datum] expected = np.array(data, dtype=expected_dtype) + assert len(data) == nrows + 1 + assert len(data) == len(expected) # file-like path txt = StringIO("\n".join(data)) a = np.loadtxt(txt, dtype=unitless_dtype) + assert len(a) == len(expected) assert a.dtype == expected.dtype assert_equal(a, expected) @@ -983,11 +965,17 @@ def test_parametric_unit_discovery( fd, fname = mkstemp() os.close(fd) with open(fname, "w") as fh: - fh.write("\n".join(data)) + fh.write("\n".join(data) + "\n") + # loading the full file... a = np.loadtxt(fname, dtype=unitless_dtype) - os.remove(fname) + assert len(a) == len(expected) assert a.dtype == expected.dtype assert_equal(a, expected) + # loading half of the file... + a = np.loadtxt(fname, dtype=unitless_dtype, max_rows=int(nrows / 2)) + os.remove(fname) + assert len(a) == int(nrows / 2) + assert_equal(a, expected[:int(nrows / 2)]) def test_str_dtype_unit_discovery_with_converter(): @@ -995,7 +983,7 @@ def test_str_dtype_unit_discovery_with_converter(): expected = np.array( ["spam-a-lot"] * 60000 + ["tis_but_a_scratch"], dtype="U17" ) - conv = lambda s: s.strip("XXX") + conv = lambda s: s.removeprefix("XXX") # file-like path txt = StringIO("\n".join(data)) @@ -1014,8 +1002,6 @@ def test_str_dtype_unit_discovery_with_converter(): assert_equal(a, expected) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_control_character_empty(): with pytest.raises(TypeError, match="Text reading control character must"): np.loadtxt(StringIO("1 2 3"), delimiter="") @@ -1041,5 +1027,51 @@ def test_field_growing_cases(): assert len(res) == 0 for i in range(1, 1024): - res = np.loadtxt(["," * i], delimiter=",", dtype=bytes) - assert len(res) == i+1 + res = np.loadtxt(["," * i], delimiter=",", dtype=bytes, max_rows=10) + assert len(res) == i + 1 + +@pytest.mark.parametrize("nmax", (10000, 50000, 55000, 60000)) +def test_maxrows_exceeding_chunksize(nmax): + # tries to read all of the file, + # or less, equal, greater than _loadtxt_chunksize + file_length = 60000 + + # file-like path + data = ["a 0.5 1"] * file_length + txt = StringIO("\n".join(data)) + res = np.loadtxt(txt, dtype=str, delimiter=" ", max_rows=nmax) + assert len(res) == nmax + + # file-obj path + fd, fname = mkstemp() + os.close(fd) + with open(fname, "w") as fh: + fh.write("\n".join(data)) + res = np.loadtxt(fname, dtype=str, delimiter=" ", max_rows=nmax) + os.remove(fname) + assert len(res) == nmax + +@pytest.mark.parametrize("nskip", (0, 10000, 12345, 50000, 67891, 100000)) +def test_skiprow_exceeding_maxrows_exceeding_chunksize(tmpdir, nskip): + # tries to read a file in chunks by skipping a variable amount of lines, + # less, equal, greater than max_rows + file_length = 110000 + data = "\n".join(f"{i} a 0.5 1" for i in range(1, file_length + 1)) + expected_length = min(60000, file_length - nskip) + expected = np.arange(nskip + 1, nskip + 1 + expected_length).astype(str) + + # file-like path + txt = StringIO(data) + res = np.loadtxt(txt, dtype='str', delimiter=" ", skiprows=nskip, max_rows=60000) + assert len(res) == expected_length + # are the right lines read in res? + assert_array_equal(expected, res[:, 0]) + + # file-obj path + tmp_file = tmpdir / "test_data.txt" + tmp_file.write(data) + fname = str(tmp_file) + res = np.loadtxt(fname, dtype='str', delimiter=" ", skiprows=nskip, max_rows=60000) + assert len(res) == expected_length + # are the right lines read in res? + assert_array_equal(expected, res[:, 0]) diff --git a/numpy/lib/tests/test_mixins.py b/numpy/lib/tests/test_mixins.py index 632058763b7d..f0aec156d0ee 100644 --- a/numpy/lib/tests/test_mixins.py +++ b/numpy/lib/tests/test_mixins.py @@ -4,7 +4,6 @@ import numpy as np from numpy.testing import assert_, assert_equal, assert_raises - # NOTE: This class should be kept as an exact copy of the example from the # docstring for NDArrayOperatorsMixin. @@ -46,7 +45,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): return type(self)(result) def __repr__(self): - return '%s(%r)' % (type(self).__name__, self.value) + return f'{type(self).__name__}({self.value!r})' def wrap_array_like(result): @@ -182,14 +181,14 @@ def test_forward_binary_methods(self): for op in _ALL_BINARY_OPERATORS: expected = wrap_array_like(op(array, 1)) actual = op(array_like, 1) - err_msg = 'failed for operator {}'.format(op) + err_msg = f'failed for operator {op}' _assert_equal_type_and_value(expected, actual, err_msg=err_msg) def test_reflected_binary_methods(self): for op in _ALL_BINARY_OPERATORS: expected = wrap_array_like(op(2, 1)) actual = op(2, ArrayLike(1)) - err_msg = 'failed for operator {}'.format(op) + err_msg = f'failed for operator {op}' _assert_equal_type_and_value(expected, actual, err_msg=err_msg) def test_matmul(self): diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index da3ee0f2a3dc..6ef86bf84ee0 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -1,17 +1,21 @@ -import warnings -import pytest import inspect +import warnings from functools import partial +import pytest + import numpy as np from numpy._core.numeric import normalize_axis_tuple from numpy.exceptions import AxisError, ComplexWarning from numpy.lib._nanfunctions_impl import _nan_mask, _replace_nan from numpy.testing import ( - assert_, assert_equal, assert_almost_equal, assert_raises, - assert_raises_regex, assert_array_equal, suppress_warnings - ) - + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) # Test data _ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170], @@ -142,7 +146,7 @@ def test_result_values(self): ], ids=["0d", "2d"]) def test_allnans(self, axis, dtype, array): if axis is not None and array.ndim == 0: - pytest.skip(f"`axis != None` not supported for 0d arrays") + pytest.skip("`axis != None` not supported for 0d arrays") array = array.astype(dtype) match = "All-NaN slice encountered" @@ -276,8 +280,9 @@ def test_mutation(self): def test_result_values(self): for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]): for row in _ndat: - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "invalid value encountered in", RuntimeWarning) ind = f(row) val = row[ind] # comparing with NaN is tricky as the result @@ -294,7 +299,7 @@ def test_result_values(self): ], ids=["0d", "2d"]) def test_allnans(self, axis, dtype, array): if axis is not None and array.ndim == 0: - pytest.skip(f"`axis != None` not supported for 0d arrays") + pytest.skip("`axis != None` not supported for 0d arrays") array = array.astype(dtype) for func in self.nanfuncs: @@ -358,7 +363,6 @@ def test_out(self, dtype): assert ret == reference - _TEST_ARRAYS = { "0d": np.array(5), "1d": np.array([127, 39, 93, 87, 46]) @@ -487,10 +491,10 @@ def test_dtype_from_dtype(self): codes = 'efdgFDG' for nf, rf in zip(self.nanfuncs, self.stdfuncs): for c in codes: - with suppress_warnings() as sup: + with warnings.catch_warnings(): if nf in {np.nanstd, np.nanvar} and c in 'FDG': # Giving the warning is a small bug, see gh-8000 - sup.filter(ComplexWarning) + warnings.simplefilter('ignore', ComplexWarning) tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type assert_(res is tgt) @@ -504,10 +508,10 @@ def test_dtype_from_char(self): codes = 'efdgFDG' for nf, rf in zip(self.nanfuncs, self.stdfuncs): for c in codes: - with suppress_warnings() as sup: + with warnings.catch_warnings(): if nf in {np.nanstd, np.nanvar} and c in 'FDG': # Giving the warning is a small bug, see gh-8000 - sup.filter(ComplexWarning) + warnings.simplefilter('ignore', ComplexWarning) tgt = rf(mat, dtype=c, axis=1).dtype.type res = nf(mat, dtype=c, axis=1).dtype.type assert_(res is tgt) @@ -523,7 +527,7 @@ def test_dtype_from_input(self): mat = np.eye(3, dtype=c) tgt = rf(mat, axis=1).dtype.type res = nf(mat, axis=1).dtype.type - assert_(res is tgt, "res %s, tgt %s" % (res, tgt)) + assert_(res is tgt, f"res {res}, tgt {tgt}") # scalar case tgt = rf(mat, axis=None).dtype.type res = nf(mat, axis=None).dtype.type @@ -575,7 +579,7 @@ class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin): ], ids=["0d", "2d"]) def test_allnans(self, axis, dtype, array): if axis is not None and array.ndim == 0: - pytest.skip(f"`axis != None` not supported for 0d arrays") + pytest.skip("`axis != None` not supported for 0d arrays") array = array.astype(dtype) for func, identity in zip(self.nanfuncs, [0, 1]): @@ -586,7 +590,7 @@ def test_allnans(self, axis, dtype, array): def test_empty(self): for f, tgt_value in zip([np.nansum, np.nanprod], [0, 1]): mat = np.zeros((0, 3)) - tgt = [tgt_value]*3 + tgt = [tgt_value] * 3 res = f(mat, axis=0) assert_equal(res, tgt) tgt = [] @@ -634,7 +638,7 @@ class TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin): ], ids=["0d", "2d"]) def test_allnans(self, axis, dtype, array): if axis is not None and array.ndim == 0: - pytest.skip(f"`axis != None` not supported for 0d arrays") + pytest.skip("`axis != None` not supported for 0d arrays") array = array.astype(dtype) for func, identity in zip(self.nanfuncs, [0, 1]): @@ -645,13 +649,13 @@ def test_allnans(self, axis, dtype, array): def test_empty(self): for f, tgt_value in zip(self.nanfuncs, [0, 1]): mat = np.zeros((0, 3)) - tgt = tgt_value*np.ones((0, 3)) + tgt = tgt_value * np.ones((0, 3)) res = f(mat, axis=0) assert_equal(res, tgt) tgt = mat res = f(mat, axis=1) assert_equal(res, tgt) - tgt = np.zeros((0)) + tgt = np.zeros(0) res = f(mat, axis=None) assert_equal(res, tgt) @@ -679,7 +683,7 @@ def test_result_values(self): tgt = np.cumprod(_ndat_ones, axis=axis) res = np.nancumprod(_ndat, axis=axis) assert_almost_equal(res, tgt) - tgt = np.cumsum(_ndat_zeros,axis=axis) + tgt = np.cumsum(_ndat_zeros, axis=axis) res = np.nancumsum(_ndat, axis=axis) assert_almost_equal(res, tgt) @@ -725,16 +729,16 @@ def test_ddof_too_big(self): dsize = [len(d) for d in _rdat] for nf, rf in zip(nanfuncs, stdfuncs): for ddof in range(5): - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - sup.filter(ComplexWarning) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + warnings.simplefilter('ignore', ComplexWarning) tgt = [ddof >= d for d in dsize] res = nf(_ndat, axis=1, ddof=ddof) assert_equal(np.isnan(res), tgt) - if any(tgt): - assert_(len(sup.log) == 1) - else: - assert_(len(sup.log) == 0) + if any(tgt): + assert_(len(w) == 1) + else: + assert_(len(w) == 0) @pytest.mark.parametrize("axis", [None, 0, 1]) @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) @@ -744,7 +748,7 @@ def test_ddof_too_big(self): ], ids=["0d", "2d"]) def test_allnans(self, axis, dtype, array): if axis is not None and array.ndim == 0: - pytest.skip(f"`axis != None` not supported for 0d arrays") + pytest.skip("`axis != None` not supported for 0d arrays") array = array.astype(dtype) match = "(Degrees of freedom <= 0 for slice.)|(Mean of empty slice)" @@ -826,6 +830,7 @@ def test_nanstd_with_mean_keyword(self): assert std_old.shape == mean.shape assert_almost_equal(std, std_old) + _TIME_UNITS = ( "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as" ) @@ -855,8 +860,8 @@ def test_keepdims(self): w = np.random.random((4, 200)) * np.array(d.shape)[:, None] w = w.astype(np.intp) d[tuple(w)] = np.nan - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) res = np.nanmedian(d, axis=None, keepdims=True) assert_equal(res.shape, (1, 1, 1, 1)) res = np.nanmedian(d, axis=(0, 1), keepdims=True) @@ -923,7 +928,7 @@ def test_small_large(self): # Randomly set some elements to NaN: w = np.random.randint(0, d.size, size=d.size // 5) d.ravel()[w] = np.nan - d[:,0] = 1. # ensure at least one good value + d[:, 0] = 1. # ensure at least one good value # use normal median without nans to compare tgt = [] for x in d: @@ -933,25 +938,23 @@ def test_small_large(self): assert_array_equal(np.nanmedian(d, axis=-1), tgt) def test_result_values(self): - tgt = [np.median(d) for d in _rdat] - res = np.nanmedian(_ndat, axis=1) - assert_almost_equal(res, tgt) + tgt = [np.median(d) for d in _rdat] + res = np.nanmedian(_ndat, axis=1) + assert_almost_equal(res, tgt) @pytest.mark.parametrize("axis", [None, 0, 1]) @pytest.mark.parametrize("dtype", _TYPE_CODES) def test_allnans(self, dtype, axis): mat = np.full((3, 3), np.nan).astype(dtype) - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - + with pytest.warns(RuntimeWarning) as r: output = np.nanmedian(mat, axis=axis) assert output.dtype == mat.dtype assert np.isnan(output).all() if axis is None: - assert_(len(sup.log) == 1) + assert_(len(r) == 1) else: - assert_(len(sup.log) == 3) + assert_(len(r) == 3) # Check scalar scalar = np.array(np.nan).astype(dtype)[()] @@ -960,9 +963,9 @@ def test_allnans(self, dtype, axis): assert np.isnan(output_scalar) if axis is None: - assert_(len(sup.log) == 2) + assert_(len(r) == 2) else: - assert_(len(sup.log) == 4) + assert_(len(r) == 4) def test_empty(self): mat = np.zeros((0, 3)) @@ -990,8 +993,8 @@ def test_extended_axis_invalid(self): assert_raises(ValueError, np.nanmedian, d, axis=(1, 1)) def test_float_special(self): - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) for inf in [np.inf, -np.inf]: a = np.array([[inf, np.nan], [np.nan, np.nan]]) assert_equal(np.nanmedian(a, axis=0), [inf, np.nan]) @@ -1021,7 +1024,7 @@ def test_float_special(self): assert_equal(np.nanmedian(a), -2.5) assert_equal(np.nanmedian(a, axis=-1), [-1., -2.5, inf]) - for i in range(0, 10): + for i in range(10): for j in range(1, 10): a = np.array([([np.nan] * i) + ([inf] * j)] * 2) assert_equal(np.nanmedian(a), inf) @@ -1058,8 +1061,8 @@ def test_keepdims(self): w = np.random.random((4, 200)) * np.array(d.shape)[:, None] w = w.astype(np.intp) d[tuple(w)] = np.nan - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) res = np.nanpercentile(d, 90, axis=None, keepdims=True) assert_equal(res.shape, (1, 1, 1, 1)) res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True) @@ -1115,8 +1118,8 @@ def test_out(self, weighted): "weights": np.ones_like(nan_mat), "method": "inverted_cdf" } else: - w_args = dict() - nan_w_args = dict() + w_args = {} + nan_w_args = {} tgt = np.percentile(mat, 42, axis=1, **w_args) res = np.nanpercentile(nan_mat, 42, axis=1, out=resout, **nan_w_args) assert_almost_equal(res, resout) @@ -1136,15 +1139,16 @@ def test_out(self, weighted): assert_almost_equal(res, tgt) def test_complex(self): - arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='G') + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='G') assert_raises(TypeError, np.nanpercentile, arr_c, 0.5) - arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='D') + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='D') assert_raises(TypeError, np.nanpercentile, arr_c, 0.5) - arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='F') + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='F') assert_raises(TypeError, np.nanpercentile, arr_c, 0.5) @pytest.mark.parametrize("weighted", [False, True]) - def test_result_values(self, weighted): + @pytest.mark.parametrize("use_out", [False, True]) + def test_result_values(self, weighted, use_out): if weighted: percentile = partial(np.percentile, method="inverted_cdf") nanpercentile = partial(np.nanpercentile, method="inverted_cdf") @@ -1160,13 +1164,16 @@ def gen_weights(d): return None tgt = [percentile(d, 28, weights=gen_weights(d)) for d in _rdat] - res = nanpercentile(_ndat, 28, axis=1, weights=gen_weights(_ndat)) + out = np.empty_like(tgt) if use_out else None + res = nanpercentile(_ndat, 28, axis=1, + weights=gen_weights(_ndat), out=out) assert_almost_equal(res, tgt) # Transpose the array to fit the output convention of numpy.percentile tgt = np.transpose([percentile(d, (28, 98), weights=gen_weights(d)) for d in _rdat]) + out = np.empty_like(tgt) if use_out else None res = nanpercentile(_ndat, (28, 98), axis=1, - weights=gen_weights(_ndat)) + weights=gen_weights(_ndat), out=out) assert_almost_equal(res, tgt) @pytest.mark.parametrize("axis", [None, 0, 1]) @@ -1177,7 +1184,7 @@ def gen_weights(d): ], ids=["0d", "2d"]) def test_allnans(self, axis, dtype, array): if axis is not None and array.ndim == 0: - pytest.skip(f"`axis != None` not supported for 0d arrays") + pytest.skip("`axis != None` not supported for 0d arrays") array = array.astype(dtype) with pytest.warns(RuntimeWarning, match="All-NaN slice encountered"): @@ -1224,8 +1231,9 @@ def test_multiple_percentiles(self): large_mat[:, :, 3:] *= 2 for axis in [None, 0, 1]: for keepdim in [False, True]: - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "All-NaN slice encountered") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "All-NaN slice encountered", RuntimeWarning) val = np.percentile(mat, perc, axis=axis, keepdims=keepdim) nan_val = np.nanpercentile(nan_mat, perc, axis=axis, keepdims=keepdim) @@ -1242,6 +1250,58 @@ def test_multiple_percentiles(self): np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6) ) + @pytest.mark.parametrize("nan_weight", [0, 1, 2, 3, 1e200]) + def test_nan_value_with_weight(self, nan_weight): + x = [1, np.nan, 2, 3] + result = np.float64(2.0) + q_unweighted = np.nanpercentile(x, 50, method="inverted_cdf") + assert_equal(q_unweighted, result) + + # The weight value at the nan position should not matter. + w = [1.0, nan_weight, 1.0, 1.0] + q_weighted = np.nanpercentile(x, 50, weights=w, method="inverted_cdf") + assert_equal(q_weighted, result) + + @pytest.mark.parametrize("axis", [0, 1, 2]) + def test_nan_value_with_weight_ndim(self, axis): + # Create a multi-dimensional array to test + np.random.seed(1) + x_no_nan = np.random.random(size=(100, 99, 2)) + # Set some places to NaN (not particularly smart) so there is always + # some non-Nan. + x = x_no_nan.copy() + x[np.arange(99), np.arange(99), 0] = np.nan + + p = np.array([[20., 50., 30], [70, 33, 80]]) + + # We just use ones as weights, but replace it with 0 or 1e200 at the + # NaN positions below. + weights = np.ones_like(x) + + # For comparison use weighted normal percentile with nan weights at + # 0 (and no NaNs); not sure this is strictly identical but should be + # sufficiently so (if a percentile lies exactly on a 0 value). + weights[np.isnan(x)] = 0 + p_expected = np.percentile( + x_no_nan, p, axis=axis, weights=weights, method="inverted_cdf") + + p_unweighted = np.nanpercentile( + x, p, axis=axis, method="inverted_cdf") + # The normal and unweighted versions should be identical: + assert_equal(p_unweighted, p_expected) + + weights[np.isnan(x)] = 1e200 # huge value, shouldn't matter + p_weighted = np.nanpercentile( + x, p, axis=axis, weights=weights, method="inverted_cdf") + assert_equal(p_weighted, p_expected) + # Also check with out passed: + out = np.empty_like(p_weighted) + res = np.nanpercentile( + x, p, axis=axis, weights=weights, out=out, method="inverted_cdf") + + assert res is out + assert_equal(out, p_expected) + class TestNanFunctions_Quantile: # most of this is already tested by TestPercentile @@ -1253,7 +1313,7 @@ def test_regression(self, weighted): if weighted: w_args = {"weights": np.ones_like(ar), "method": "inverted_cdf"} else: - w_args = dict() + w_args = {} assert_equal(np.nanquantile(ar, q=0.5, **w_args), np.nanpercentile(ar, q=50, **w_args)) @@ -1273,11 +1333,11 @@ def test_basic(self): assert_equal(np.nanquantile(x, 0.5), 1.75) def test_complex(self): - arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='G') + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='G') assert_raises(TypeError, np.nanquantile, arr_c, 0.5) - arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='D') + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='D') assert_raises(TypeError, np.nanquantile, arr_c, 0.5) - arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='F') + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='F') assert_raises(TypeError, np.nanquantile, arr_c, 0.5) def test_no_p_overwrite(self): @@ -1300,7 +1360,7 @@ def test_no_p_overwrite(self): ], ids=["0d", "2d"]) def test_allnans(self, axis, dtype, array): if axis is not None and array.ndim == 0: - pytest.skip(f"`axis != None` not supported for 0d arrays") + pytest.skip("`axis != None` not supported for 0d arrays") array = array.astype(dtype) with pytest.warns(RuntimeWarning, match="All-NaN slice encountered"): @@ -1360,3 +1420,19 @@ def test__replace_nan(): assert result_nan is not arr_nan assert_equal(result_nan, np.array([0, 1, 2])) assert np.isnan(arr_nan[-1]) + + +@pytest.mark.thread_unsafe(reason="memmap is thread-unsafe (gh-29126)") +def test_memmap_takes_fast_route(tmpdir): + # We want memory mapped arrays to take the fast route through nanmax, + # which avoids creating a mask by using fmax.reduce (see gh-28721). So we + # check that on bad input, the error is from fmax (rather than maximum). + a = np.arange(10., dtype=float) + with open(tmpdir.join("data.bin"), "w+b") as fh: + fh.write(a.tobytes()) + mm = np.memmap(fh, dtype=a.dtype, shape=a.shape) + with pytest.raises(ValueError, match="reduction operation fmax"): + np.nanmax(mm, out=np.zeros(2)) + # For completeness, same for nanmin. + with pytest.raises(ValueError, match="reduction operation fmin"): + np.nanmin(mm, out=np.zeros(2)) diff --git a/numpy/lib/tests/test_packbits.py b/numpy/lib/tests/test_packbits.py index 5b07f41c6260..0b0e9d1857c8 100644 --- a/numpy/lib/tests/test_packbits.py +++ b/numpy/lib/tests/test_packbits.py @@ -1,7 +1,10 @@ +from itertools import chain + +import pytest + import numpy as np from numpy.testing import assert_array_equal, assert_equal, assert_raises -import pytest -from itertools import chain + def test_packbits(): # Copied from the docstring. @@ -90,7 +93,6 @@ def test_packbits_large(bitorder): assert_array_equal(b, [128, 128, 128, 31, 30, 28, 24, 16, 0, 0, 0, 199, 198, 196, 192]) - arr = arr.reshape(36, 25) b = np.packbits(arr, axis=0) assert_equal(b.dtype, np.uint8) @@ -196,7 +198,6 @@ def test_packbits_large(bitorder): [ 74, 90, 131, 170, 192], [ 88, 18, 163, 168, 128]]) - # result is the same if input is multiplied with a nonzero value for dtype in 'bBhHiIlLqQ': arr = np.array(a, dtype=dtype) @@ -237,13 +238,12 @@ def test_pack_unpack_order(): b_big = np.unpackbits(a, axis=1, bitorder='big') assert_array_equal(b, b_big) assert_array_equal(a, np.packbits(b_little, axis=1, bitorder='little')) - assert_array_equal(b[:,::-1], b_little) + assert_array_equal(b[:, ::-1], b_little) assert_array_equal(a, np.packbits(b_big, axis=1, bitorder='big')) assert_raises(ValueError, np.unpackbits, a, bitorder='r') assert_raises(TypeError, np.unpackbits, a, bitorder=10) - def test_unpackbits_empty(): a = np.empty((0,), dtype=np.uint8) b = np.unpackbits(a) @@ -282,7 +282,7 @@ def test_unpackbits_large(): assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d) -class TestCount(): +class TestCount: x = np.array([ [1, 0, 1, 0, 0, 1, 0], [0, 1, 1, 1, 0, 0, 0], @@ -345,9 +345,9 @@ def test_roundtrip_axis(self, bitorder, count): @pytest.mark.parametrize('kwargs', [ {}, {'count': None}, - {'bitorder' : 'little'}, + {'bitorder': 'little'}, {'bitorder': 'little', 'count': None}, - {'bitorder' : 'big'}, + {'bitorder': 'big'}, {'bitorder': 'big', 'count': None}, ]) def test_axis_count(self, kwargs): diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py index 5fface63c7d5..a388ab7bace5 100644 --- a/numpy/lib/tests/test_polynomial.py +++ b/numpy/lib/tests/test_polynomial.py @@ -1,10 +1,16 @@ +import pytest + import numpy as np +import numpy.polynomial.polynomial as poly from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_almost_equal, - assert_array_almost_equal, assert_raises, assert_allclose - ) - -import pytest + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, +) # `poly1d` has some support for `np.bool` and `np.timedelta64`, # but it is limited and they are therefore excluded here @@ -46,16 +52,17 @@ def test_poly1d_math(self): # here we use some simple coeffs to make calculations easier p = np.poly1d([1., 2, 4]) q = np.poly1d([4., 2, 1]) - assert_equal(p/q, (np.poly1d([0.25]), np.poly1d([1.5, 3.75]))) - assert_equal(p.integ(), np.poly1d([1/3, 1., 4., 0.])) - assert_equal(p.integ(1), np.poly1d([1/3, 1., 4., 0.])) + assert_equal(p / q, (np.poly1d([0.25]), np.poly1d([1.5, 3.75]))) + assert_equal(p.integ(), np.poly1d([1 / 3, 1., 4., 0.])) + assert_equal(p.integ(1), np.poly1d([1 / 3, 1., 4., 0.])) p = np.poly1d([1., 2, 3]) q = np.poly1d([3., 2, 1]) assert_equal(p * q, np.poly1d([3., 8., 14., 8., 3.])) assert_equal(p + q, np.poly1d([4., 4., 4.])) assert_equal(p - q, np.poly1d([-2., 0., 2.])) - assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., 312., 324., 216., 81.])) + assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., + 312., 324., 216., 81.])) assert_equal(p(q), np.poly1d([9., 12., 16., 8., 6.])) assert_equal(q(p), np.poly1d([3., 12., 32., 40., 34.])) assert_equal(p.deriv(), np.poly1d([2., 2.])) @@ -104,10 +111,10 @@ def test_poly(self): # Should produce real output for perfect conjugates assert_(np.isrealobj(np.poly([+1.082j, +2.613j, -2.613j, -1.082j]))) - assert_(np.isrealobj(np.poly([0+1j, -0+-1j, 1+2j, - 1-2j, 1.+3.5j, 1-3.5j]))) - assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j, 1+3j, 1-3.j]))) - assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j]))) + assert_(np.isrealobj(np.poly([0 + 1j, -0 + -1j, 1 + 2j, + 1 - 2j, 1. + 3.5j, 1 - 3.5j]))) + assert_(np.isrealobj(np.poly([1j, -1j, 1 + 2j, 1 - 2j, 1 + 3j, 1 - 3.j]))) + assert_(np.isrealobj(np.poly([1j, -1j, 1 + 2j, 1 - 2j]))) assert_(np.isrealobj(np.poly([1j, -1j, 2j, -2j]))) assert_(np.isrealobj(np.poly([1j, -1j]))) assert_(np.isrealobj(np.poly([1, -1]))) @@ -115,12 +122,35 @@ def test_poly(self): assert_(np.iscomplexobj(np.poly([1j, -1.0000001j]))) np.random.seed(42) - a = np.random.randn(100) + 1j*np.random.randn(100) + a = np.random.randn(100) + 1j * np.random.randn(100) assert_(np.isrealobj(np.poly(np.concatenate((a, np.conjugate(a)))))) def test_roots(self): assert_array_equal(np.roots([1, 0, 0]), [0, 0]) + # Testing for larger root values + for i in np.logspace(10, 25, num=1000, base=10): + tgt = np.array([-1, 1, i]) + res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1])) + # Adapting the expected precision according to the root value, + # to take into account numerical calculation error + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) + + for i in np.logspace(10, 25, num=1000, base=10): + tgt = np.array([-1, 1.01, i]) + res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1])) + # Adapting the expected precision according to the root value, + # to take into account numerical calculation error + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) + + @pytest.mark.parametrize("dtyp", [int, np.float32, np.float64]) + def test_roots_dtype(self, dtyp): + coef = np.asarray([1, 0, -1], dtype=dtyp) # x**2 - 1 + r = np.roots(coef) + r.sort() + assert_allclose(r, np.asarray([-1, 1])) + assert r.dtype == {int: np.float64}.get(dtyp, dtyp) + def test_str_leading_zeros(self): p = np.poly1d([4, 3, 2, 1]) p[3] = 0 @@ -138,7 +168,7 @@ def test_polyfit(self): x = np.linspace(0, 2, 7) y = np.polyval(c, x) err = [1, -1, 1, -1, 1, -1, 1] - weights = np.arange(8, 1, -1)**2/7.0 + weights = np.arange(8, 1, -1)**2 / 7.0 # Check exception when too few points for variance estimate. Note that # the estimate requires the number of data points to exceed @@ -147,25 +177,25 @@ def test_polyfit(self): [1], [1], deg=0, cov=True) # check 1D case - m, cov = np.polyfit(x, y+err, 2, cov=True) + m, cov = np.polyfit(x, y + err, 2, cov=True) est = [3.8571, 0.2857, 1.619] assert_almost_equal(est, m, decimal=4) val0 = [[ 1.4694, -2.9388, 0.8163], [-2.9388, 6.3673, -2.1224], - [ 0.8163, -2.1224, 1.161 ]] + [ 0.8163, -2.1224, 1.161 ]] # noqa: E202 assert_almost_equal(val0, cov, decimal=4) - m2, cov2 = np.polyfit(x, y+err, 2, w=weights, cov=True) + m2, cov2 = np.polyfit(x, y + err, 2, w=weights, cov=True) assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4) val = [[ 4.3964, -5.0052, 0.4878], [-5.0052, 6.8067, -0.9089], [ 0.4878, -0.9089, 0.3337]] assert_almost_equal(val, cov2, decimal=4) - m3, cov3 = np.polyfit(x, y+err, 2, w=weights, cov="unscaled") + m3, cov3 = np.polyfit(x, y + err, 2, w=weights, cov="unscaled") assert_almost_equal([4.8927, -1.0177, 1.7768], m3, decimal=4) val = [[ 0.1473, -0.1677, 0.0163], - [-0.1677, 0.228 , -0.0304], + [-0.1677, 0.228 , -0.0304], # noqa: E203 [ 0.0163, -0.0304, 0.0112]] assert_almost_equal(val, cov3, decimal=4) @@ -197,7 +227,7 @@ def test_polyfit(self): assert_allclose(mean.std(), 0.5, atol=0.01) assert_almost_equal(np.sqrt(cov.mean()), 0.5) # If we estimate our errors wrong, no change with scaling: - w = np.full(y.shape[0], 1./0.5) + w = np.full(y.shape[0], 1. / 0.5) mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov=True) assert_allclose(mean.std(), 0.5, atol=0.01) assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01) @@ -232,8 +262,8 @@ def test_complex(self): def test_integ_coeffs(self): p = np.poly1d([3, 2, 1]) p2 = p.integ(3, k=[9, 7, 6]) - assert_( - (p2.coeffs == [1/4./5., 1/3./4., 1/2./3., 9/1./2., 7, 6]).all()) + expected = [1 / 4 / 5, 1 / 3 / 4, 1 / 2 / 3, 9 / 1 / 2, 7, 6] + assert_((p2.coeffs == expected).all()) def test_zero_dims(self): try: @@ -265,19 +295,19 @@ def test_zero_poly_dtype(self): def test_poly_eq(self): p = np.poly1d([1, 2, 3]) p2 = np.poly1d([1, 2, 4]) - assert_equal(p == None, False) - assert_equal(p != None, True) + assert_equal(p == None, False) # noqa: E711 + assert_equal(p != None, True) # noqa: E711 assert_equal(p == p, True) assert_equal(p == p2, False) assert_equal(p != p2, True) def test_polydiv(self): b = np.poly1d([2, 6, 6, 1]) - a = np.poly1d([-1j, (1+2j), -(2+1j), 1]) + a = np.poly1d([-1j, (1 + 2j), -(2 + 1j), 1]) q, r = np.polydiv(b, a) assert_equal(q.coeffs.dtype, np.complex128) assert_equal(r.coeffs.dtype, np.complex128) - assert_equal(q*a + r, b) + assert_equal(q * a + r, b) c = [1, 2, 3] d = np.poly1d([1, 2, 3]) diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py index 98860dfdab77..b9cc266a9363 100644 --- a/numpy/lib/tests/test_recfunctions.py +++ b/numpy/lib/tests/test_recfunctions.py @@ -2,14 +2,27 @@ import numpy as np import numpy.ma as ma +from numpy.lib.recfunctions import ( + append_fields, + apply_along_fields, + assign_fields_by_name, + drop_fields, + find_duplicates, + get_fieldstructure, + join_by, + merge_arrays, + recursive_fill_fields, + rename_fields, + repack_fields, + require_fields, + stack_arrays, + structured_to_unstructured, + unstructured_to_structured, +) from numpy.ma.mrecords import MaskedRecords from numpy.ma.testutils import assert_equal from numpy.testing import assert_, assert_raises -from numpy.lib.recfunctions import ( - drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields, - find_duplicates, merge_arrays, append_fields, stack_arrays, join_by, - repack_fields, unstructured_to_structured, structured_to_unstructured, - apply_along_fields, require_fields, assign_fields_by_name) + get_fieldspec = np.lib.recfunctions._get_fieldspec get_names = np.lib.recfunctions.get_names get_names_flat = np.lib.recfunctions.get_names_flat @@ -19,19 +32,14 @@ class TestRecFunctions: # Misc tests - - def setup_method(self): + def test_zip_descr(self): + # Test zip_descr x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array([('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - self.data = (w, x, y, z) - - def test_zip_descr(self): - # Test zip_descr - (w, x, y, z) = self.data # Std array test = zip_descr((x, x), flatten=True) @@ -228,19 +236,20 @@ def test_repack_fields(self): dt = np.dtype((np.record, dt)) assert_(repack_fields(dt).type is np.record) + @pytest.mark.thread_unsafe(reason="memmap is thread-unsafe (gh-29126)") def test_structured_to_unstructured(self, tmp_path): a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) out = structured_to_unstructured(a) - assert_equal(out, np.zeros((4,5), dtype='f8')) + assert_equal(out, np.zeros((4, 5), dtype='f8')) - b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + b = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)], dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1) - assert_equal(out, np.array([ 3. , 5.5, 9. , 11. ])) + assert_equal(out, np.array([3., 5.5, 9., 11.])) out = np.mean(structured_to_unstructured(b[['x']]), axis=-1) - assert_equal(out, np.array([ 1. , 4. , 7. , 10. ])) + assert_equal(out, np.array([1., 4. , 7., 10.])) # noqa: E203 - c = np.arange(20).reshape((4,5)) + c = np.arange(20).reshape((4, 5)) out = unstructured_to_structured(c, a.dtype) want = np.array([( 0, ( 1., 2), [ 3., 4.]), ( 5, ( 6., 7), [ 8., 9.]), @@ -251,15 +260,15 @@ def test_structured_to_unstructured(self, tmp_path): ('c', 'f4', (2,))]) assert_equal(out, want) - d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + d = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)], dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) assert_equal(apply_along_fields(np.mean, d), - np.array([ 8.0/3, 16.0/3, 26.0/3, 11. ])) + np.array([ 8.0 / 3, 16.0 / 3, 26.0 / 3, 11.])) assert_equal(apply_along_fields(np.mean, d[['x', 'z']]), - np.array([ 3. , 5.5, 9. , 11. ])) + np.array([ 3., 5.5, 9., 11.])) # check that for uniform field dtypes we get a view, not a copy: - d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + d = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)], dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')]) dd = structured_to_unstructured(d) ddd = unstructured_to_structured(dd, d.dtype) @@ -310,13 +319,12 @@ def test_structured_to_unstructured(self, tmp_path): res = structured_to_unstructured(arr, dtype=int) assert_equal(res, np.zeros((10, 6), dtype=int)) - # test nested combinations of subarrays and structured arrays, gh-13333 def subarray(dt, shape): return np.dtype((dt, shape)) def structured(*dts): - return np.dtype([('x{}'.format(i), dt) for i, dt in enumerate(dts)]) + return np.dtype([(f'x{i}', dt) for i, dt in enumerate(dts)]) def inspect(dt, dtype=None): arr = np.zeros((), dt) @@ -343,7 +351,7 @@ def inspect(dt, dtype=None): assert_raises(NotImplementedError, structured_to_unstructured, np.zeros(3, dt), dtype=np.int32) assert_raises(NotImplementedError, unstructured_to_structured, - np.zeros((3,0), dtype=np.int32)) + np.zeros((3, 0), dtype=np.int32)) # test supported ndarray subclasses d_plain = np.array([(1, 2), (3, 4)], dtype=[('a', 'i4'), ('b', 'i4')]) @@ -389,11 +397,11 @@ def test_field_assignment_by_name(self): assert_equal(require_fields(a, newdt), np.ones(2, newdt)) - b = np.array([(1,2), (3,4)], dtype=newdt) + b = np.array([(1, 2), (3, 4)], dtype=newdt) assign_fields_by_name(a, b, zero_unassigned=False) - assert_equal(a, np.array([(1,1,2),(1,3,4)], dtype=a.dtype)) + assert_equal(a, np.array([(1, 1, 2), (1, 3, 4)], dtype=a.dtype)) assign_fields_by_name(a, b) - assert_equal(a, np.array([(0,1,2),(0,3,4)], dtype=a.dtype)) + assert_equal(a, np.array([(0, 1, 2), (0, 3, 4)], dtype=a.dtype)) # test nested fields a = np.ones(2, dtype=[('a', [('b', 'f8'), ('c', 'u1')])]) @@ -401,9 +409,9 @@ def test_field_assignment_by_name(self): assert_equal(require_fields(a, newdt), np.ones(2, newdt)) b = np.array([((2,),), ((3,),)], dtype=newdt) assign_fields_by_name(a, b, zero_unassigned=False) - assert_equal(a, np.array([((1,2),), ((1,3),)], dtype=a.dtype)) + assert_equal(a, np.array([((1, 2),), ((1, 3),)], dtype=a.dtype)) assign_fields_by_name(a, b) - assert_equal(a, np.array([((0,2),), ((0,3),)], dtype=a.dtype)) + assert_equal(a, np.array([((0, 2),), ((0, 3),)], dtype=a.dtype)) # test unstructured code path for 0d arrays a, b = np.array(3), np.array(0) @@ -437,7 +445,7 @@ def test_masked_flexible(self): class TestMergeArrays: # Test merge_arrays - def setup_method(self): + def _create_arrays(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array( @@ -445,11 +453,11 @@ def setup_method(self): w = np.array( [(1, (2, 3.0, ())), (4, (5, 6.0, ()))], dtype=[('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])]) - self.data = (w, x, y, z) + return w, x, y, z def test_solo(self): # Test merge_arrays on a single array. - (_, x, _, z) = self.data + _, x, _, z = self._create_arrays() test = merge_arrays(x) control = np.array([(1,), (2,)], dtype=[('f0', int)]) @@ -464,7 +472,7 @@ def test_solo(self): def test_solo_w_flatten(self): # Test merge_arrays on a single array w & w/o flattening - w = self.data[0] + w = self._create_arrays()[0] test = merge_arrays(w, flatten=False) assert_equal(test, w) @@ -476,7 +484,7 @@ def test_solo_w_flatten(self): def test_standard(self): # Test standard & standard # Test merge arrays - (_, x, y, _) = self.data + _, x, y, _ = self._create_arrays() test = merge_arrays((x, y), usemask=False) control = np.array([(1, 10), (2, 20), (-1, 30)], dtype=[('f0', int), ('f1', int)]) @@ -491,7 +499,7 @@ def test_standard(self): def test_flatten(self): # Test standard & flexible - (_, x, _, z) = self.data + _, x, _, z = self._create_arrays() test = merge_arrays((x, z), flatten=True) control = np.array([(1, 'A', 1.), (2, 'B', 2.)], dtype=[('f0', int), ('A', '|S3'), ('B', float)]) @@ -505,7 +513,7 @@ def test_flatten(self): def test_flatten_wflexible(self): # Test flatten standard & nested - (w, x, _, _) = self.data + w, x, _, _ = self._create_arrays() test = merge_arrays((x, w), flatten=True) control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)], dtype=[('f0', int), @@ -513,16 +521,15 @@ def test_flatten_wflexible(self): assert_equal(test, control) test = merge_arrays((x, w), flatten=False) - controldtype = [('f0', int), - ('f1', [('a', int), - ('b', [('ba', float), ('bb', int), ('bc', [])])])] + f1_descr = [('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])] + controldtype = [('f0', int), ('f1', f1_descr)] control = np.array([(1., (1, (2, 3.0, ()))), (2, (4, (5, 6.0, ())))], dtype=controldtype) assert_equal(test, control) def test_wmasked_arrays(self): # Test merge_arrays masked arrays - (_, x, _, _) = self.data + x = self._create_arrays()[1] mx = ma.array([1, 2, 3], mask=[1, 0, 0]) test = merge_arrays((x, mx), usemask=True) control = ma.array([(1, 1), (2, 2), (-1, 3)], @@ -544,7 +551,7 @@ def test_w_singlefield(self): def test_w_shorter_flex(self): # Test merge_arrays w/ a shorter flexndarray. - z = self.data[-1] + z = self._create_arrays()[-1] # Fixme, this test looks incomplete and broken #test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) @@ -552,13 +559,12 @@ def test_w_shorter_flex(self): # dtype=[('A', '|S3'), ('B', float), ('C', int)]) #assert_equal(test, control) - # Hack to avoid pyflakes warnings about unused variables merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], dtype=[('A', '|S3'), ('B', float), ('C', int)]) def test_singlerecord(self): - (_, x, y, z) = self.data + _, x, y, z = self._create_arrays() test = merge_arrays((x[0], y[0], z[0]), usemask=False) control = np.array([(1, 10, ('A', 1))], dtype=[('f0', int), @@ -570,18 +576,18 @@ def test_singlerecord(self): class TestAppendFields: # Test append_fields - def setup_method(self): + def _create_arrays(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array( [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - self.data = (w, x, y, z) + return w, x, y, z def test_append_single(self): # Test simple case - (_, x, _, _) = self.data + x = self._create_arrays()[1] test = append_fields(x, 'A', data=[10, 20, 30]) control = ma.array([(1, 10), (2, 20), (-1, 30)], mask=[(0, 0), (0, 0), (1, 0)], @@ -590,7 +596,7 @@ def test_append_single(self): def test_append_double(self): # Test simple case - (_, x, _, _) = self.data + x = self._create_arrays()[1] test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]]) control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)], mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)], @@ -599,7 +605,7 @@ def test_append_double(self): def test_append_on_flex(self): # Test append_fields on flexible type arrays - z = self.data[-1] + z = self._create_arrays()[-1] test = append_fields(z, 'C', data=[10, 20, 30]) control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)], mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)], @@ -608,7 +614,7 @@ def test_append_on_flex(self): def test_append_on_nested(self): # Test append_fields on nested fields - w = self.data[0] + w = self._create_arrays()[0] test = append_fields(w, 'C', data=[10, 20, 30]) control = ma.array([(1, (2, 3.0), 10), (4, (5, 6.0), 20), @@ -623,18 +629,18 @@ def test_append_on_nested(self): class TestStackArrays: # Test stack_arrays - def setup_method(self): + def _create_arrays(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array( [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - self.data = (w, x, y, z) + return w, x, y, z def test_solo(self): # Test stack_arrays on single arrays - (_, x, _, _) = self.data + x = self._create_arrays()[1] test = stack_arrays((x,)) assert_equal(test, x) assert_(test is x) @@ -645,7 +651,7 @@ def test_solo(self): def test_unnamed_fields(self): # Tests combinations of arrays w/o named fields - (_, x, y, _) = self.data + _, x, y, _ = self._create_arrays() test = stack_arrays((x, x), usemask=False) control = np.array([1, 2, 1, 2]) @@ -661,7 +667,7 @@ def test_unnamed_fields(self): def test_unnamed_and_named_fields(self): # Test combination of arrays w/ & w/o named fields - (_, x, _, z) = self.data + _, x, _, z = self._create_arrays() test = stack_arrays((x, z)) control = ma.array([(1, -1, -1), (2, -1, -1), @@ -693,7 +699,7 @@ def test_unnamed_and_named_fields(self): def test_matching_named_fields(self): # Test combination of arrays w/ matching field names - (_, x, _, z) = self.data + _, x, _, z = self._create_arrays() zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], dtype=[('A', '|S3'), ('B', float), ('C', float)]) test = stack_arrays((z, zz)) @@ -721,7 +727,7 @@ def test_matching_named_fields(self): def test_defaults(self): # Test defaults: no exception raised if keys of defaults are not fields. - (_, _, _, z) = self.data + z = self._create_arrays()[-1] zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], dtype=[('A', '|S3'), ('B', float), ('C', float)]) defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.} @@ -779,8 +785,8 @@ def test_subdtype(self): (b'b', [20.0], 200.0), (b'c', [30.0], 300.0)], mask=[ - (False, [False], True), - (False, [False], True), + (False, [False], True), + (False, [False], True), (False, [False], False), (False, [False], False), (False, [False], False) @@ -793,18 +799,18 @@ def test_subdtype(self): class TestJoinBy: - def setup_method(self): - self.a = np.array(list(zip(np.arange(10), np.arange(50, 60), + def _create_arrays(self): + a = np.array(list(zip(np.arange(10), np.arange(50, 60), np.arange(100, 110))), dtype=[('a', int), ('b', int), ('c', int)]) - self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75), + b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75), np.arange(100, 110))), dtype=[('a', int), ('b', int), ('d', int)]) + return a, b def test_inner_join(self): # Basic test of join_by - a, b = self.a, self.b - + a, b = self._create_arrays() test = join_by('a', a, b, jointype='inner') control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101), (7, 57, 67, 107, 102), (8, 58, 68, 108, 103), @@ -814,8 +820,7 @@ def test_inner_join(self): assert_equal(test, control) def test_join(self): - a, b = self.a, self.b - + a, b = self._create_arrays() # Fixme, this test is broken #test = join_by(('a', 'b'), a, b) #control = np.array([(5, 55, 105, 100), (6, 56, 106, 101), @@ -824,8 +829,6 @@ def test_join(self): # dtype=[('a', int), ('b', int), # ('c', int), ('d', int)]) #assert_equal(test, control) - - # Hack to avoid pyflakes unused variable warnings join_by(('a', 'b'), a, b) np.array([(5, 55, 105, 100), (6, 56, 106, 101), (7, 57, 107, 102), (8, 58, 108, 103), @@ -837,14 +840,13 @@ def test_join_subdtype(self): # tests the bug in https://stackoverflow.com/q/44769632/102441 foo = np.array([(1,)], dtype=[('key', int)]) - bar = np.array([(1, np.array([1,2,3]))], + bar = np.array([(1, np.array([1, 2, 3]))], dtype=[('key', int), ('value', 'uint16', 3)]) res = join_by('key', foo, bar) assert_equal(res, bar.view(ma.MaskedArray)) def test_outer_join(self): - a, b = self.a, self.b - + a, b = self._create_arrays() test = join_by(('a', 'b'), a, b, 'outer') control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), (2, 52, 102, -1), (3, 53, 103, -1), @@ -871,8 +873,7 @@ def test_outer_join(self): assert_equal(test, control) def test_leftouter_join(self): - a, b = self.a, self.b - + a, b = self._create_arrays() test = join_by(('a', 'b'), a, b, 'leftouter') control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), (2, 52, 102, -1), (3, 53, 103, -1), @@ -1021,19 +1022,17 @@ def test_two_keys_two_vars(self): assert_equal(test.dtype, control.dtype) assert_equal(test, control) + class TestAppendFieldsObj: """ Test append_fields with arrays containing objects """ # https://github.com/numpy/numpy/issues/2346 - def setup_method(self): - from datetime import date - self.data = dict(obj=date(2000, 1, 1)) - def test_append_to_objects(self): "Test append_fields when the base array contains objects" - obj = self.data['obj'] + from datetime import date + obj = date(2000, 1, 1) x = np.array([(obj, 1.), (obj, 2.)], dtype=[('A', object), ('B', float)]) y = np.array([10, 20], dtype=int) diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py index 07b80904b917..8839ed53c506 100644 --- a/numpy/lib/tests/test_regression.py +++ b/numpy/lib/tests/test_regression.py @@ -2,10 +2,13 @@ import numpy as np from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_array_almost_equal, - assert_raises, _assert_valid_refcount, - ) -import pytest + _assert_valid_refcount, + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, +) class TestRegression: @@ -70,7 +73,7 @@ def test_poly_div(self): u = np.poly1d([1, 2, 3]) v = np.poly1d([1, 2, 3, 4, 5]) q, r = np.polydiv(u, v) - assert_equal(q*v + r, u) + assert_equal(q * v + r, u) def test_poly_eq(self): # Ticket #554 @@ -132,17 +135,17 @@ def test_ndenumerate_crash(self): def test_large_fancy_indexing(self): # Large enough to fail on 64-bit. nbits = np.dtype(np.intp).itemsize * 8 - thesize = int((2**nbits)**(1.0/5.0)+1) + thesize = int((2**nbits)**(1.0 / 5.0) + 1) def dp(): n = 3 - a = np.ones((n,)*5) + a = np.ones((n,) * 5) i = np.random.randint(0, n, size=thesize) a[np.ix_(i, i, i, i, i)] = 0 def dp2(): n = 3 - a = np.ones((n,)*5) + a = np.ones((n,) * 5) i = np.random.randint(0, n, size=thesize) a[np.ix_(i, i, i, i, i)] @@ -181,7 +184,7 @@ def test_append_fields_dtype_list(self): try: append_fields(base, names, data, dlist) except Exception: - raise AssertionError() + raise AssertionError def test_loadtxt_fields_subarrays(self): # For ticket #1936 @@ -210,12 +213,12 @@ def test_nansum_with_boolean(self): try: np.nansum(a) except Exception: - raise AssertionError() + raise AssertionError def test_py3_compat(self): # gh-2561 # Test if the oldstyle class test is bypassed in python3 - class C(): + class C: """Old-style class in python2, normal class in python3""" pass @@ -223,6 +226,6 @@ class C(): try: np.info(C(), output=out) except AttributeError: - raise AssertionError() + raise AssertionError finally: out.close() diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py index 609b77720c86..b0b68dda773c 100644 --- a/numpy/lib/tests/test_shape_base.py +++ b/numpy/lib/tests/test_shape_base.py @@ -1,18 +1,27 @@ -import numpy as np import functools import sys + import pytest +import numpy as np from numpy import ( - apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit, - vsplit, dstack, column_stack, kron, tile, expand_dims, take_along_axis, - put_along_axis - ) + apply_along_axis, + apply_over_axes, + array_split, + column_stack, + dsplit, + dstack, + expand_dims, + hsplit, + kron, + put_along_axis, + split, + take_along_axis, + tile, + vsplit, +) from numpy.exceptions import AxisError -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises, assert_warns - ) - +from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises IS_64BIT = sys.maxsize > 2**32 @@ -35,9 +44,9 @@ def test_argequivalent(self): a = rand(3, 4, 5) funcs = [ - (np.sort, np.argsort, dict()), - (_add_keepdims(np.min), _add_keepdims(np.argmin), dict()), - (_add_keepdims(np.max), _add_keepdims(np.argmax), dict()), + (np.sort, np.argsort, {}), + (_add_keepdims(np.min), _add_keepdims(np.argmin), {}), + (_add_keepdims(np.max), _add_keepdims(np.argmax), {}), #(np.partition, np.argpartition, dict(kth=2)), ] @@ -63,10 +72,12 @@ def test_invalid(self): assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1) # invalid axis assert_raises(AxisError, take_along_axis, a, ai, axis=10) + # invalid indices + assert_raises(ValueError, take_along_axis, a, ai, axis=None) def test_empty(self): """ Test everything is ok with empty results, even with inserted dims """ - a = np.ones((3, 4, 5)) + a = np.ones((3, 4, 5)) ai = np.ones((3, 0, 5), dtype=np.intp) actual = take_along_axis(a, ai, axis=1) @@ -74,7 +85,7 @@ def test_empty(self): def test_broadcast(self): """ Test that non-indexing dimensions are broadcast in both directions """ - a = np.ones((3, 4, 1)) + a = np.ones((3, 4, 1)) ai = np.ones((1, 2, 5), dtype=np.intp) actual = take_along_axis(a, ai, axis=1) assert_equal(actual.shape, (3, 2, 5)) @@ -99,22 +110,39 @@ def test_replace_max(self): def test_broadcast(self): """ Test that non-indexing dimensions are broadcast in both directions """ - a = np.ones((3, 4, 1)) + a = np.ones((3, 4, 1)) ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4 put_along_axis(a, ai, 20, axis=1) assert_equal(take_along_axis(a, ai, axis=1), 20) + def test_invalid(self): + """ Test invalid inputs """ + a_base = np.array([[10, 30, 20], [60, 40, 50]]) + indices = np.array([[0], [1]]) + values = np.array([[2], [1]]) + + # sanity check + a = a_base.copy() + put_along_axis(a, indices, values, axis=0) + assert np.all(a == [[2, 2, 2], [1, 1, 1]]) + + # invalid indices + a = a_base.copy() + with assert_raises(ValueError) as exc: + put_along_axis(a, indices, values, axis=None) + assert "single dimension" in str(exc.exception) + class TestApplyAlongAxis: def test_simple(self): a = np.ones((20, 10), 'd') assert_array_equal( - apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) + apply_along_axis(len, 0, a), len(a) * np.ones(a.shape[1])) def test_simple101(self): a = np.ones((10, 101), 'd') assert_array_equal( - apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) + apply_along_axis(len, 0, a), len(a) * np.ones(a.shape[1])) def test_3d(self): a = np.arange(27).reshape((3, 3, 3)) @@ -176,14 +204,14 @@ def test_axis_insertion(self, cls=np.ndarray): def f1to2(x): """produces an asymmetric non-square matrix from x""" assert_equal(x.ndim, 1) - return (x[::-1] * x[1:,None]).view(cls) + return (x[::-1] * x[1:, None]).view(cls) - a2d = np.arange(6*3).reshape((6, 3)) + a2d = np.arange(6 * 3).reshape((6, 3)) # 2d insertion along first axis actual = apply_along_axis(f1to2, 0, a2d) expected = np.stack([ - f1to2(a2d[:,i]) for i in range(a2d.shape[1]) + f1to2(a2d[:, i]) for i in range(a2d.shape[1]) ], axis=-1).view(cls) assert_equal(type(actual), type(expected)) assert_equal(actual, expected) @@ -191,18 +219,18 @@ def f1to2(x): # 2d insertion along last axis actual = apply_along_axis(f1to2, 1, a2d) expected = np.stack([ - f1to2(a2d[i,:]) for i in range(a2d.shape[0]) + f1to2(a2d[i, :]) for i in range(a2d.shape[0]) ], axis=0).view(cls) assert_equal(type(actual), type(expected)) assert_equal(actual, expected) # 3d insertion along middle axis - a3d = np.arange(6*5*3).reshape((6, 5, 3)) + a3d = np.arange(6 * 5 * 3).reshape((6, 5, 3)) actual = apply_along_axis(f1to2, 1, a3d) expected = np.stack([ np.stack([ - f1to2(a3d[i,:,j]) for i in range(a3d.shape[0]) + f1to2(a3d[i, :, j]) for i in range(a3d.shape[0]) ], axis=0) for j in range(a3d.shape[2]) ], axis=-1).view(cls) @@ -220,15 +248,15 @@ def test_axis_insertion_ma(self): def f1to2(x): """produces an asymmetric non-square matrix from x""" assert_equal(x.ndim, 1) - res = x[::-1] * x[1:,None] - return np.ma.masked_where(res%5==0, res) - a = np.arange(6*3).reshape((6, 3)) + res = x[::-1] * x[1:, None] + return np.ma.masked_where(res % 5 == 0, res) + a = np.arange(6 * 3).reshape((6, 3)) res = apply_along_axis(f1to2, 0, a) assert_(isinstance(res, np.ma.masked_array)) assert_equal(res.ndim, 3) - assert_array_equal(res[:,:,0].mask, f1to2(a[:,0]).mask) - assert_array_equal(res[:,:,1].mask, f1to2(a[:,1]).mask) - assert_array_equal(res[:,:,2].mask, f1to2(a[:,2]).mask) + assert_array_equal(res[:, :, 0].mask, f1to2(a[:, 0]).mask) + assert_array_equal(res[:, :, 1].mask, f1to2(a[:, 1]).mask) + assert_array_equal(res[:, :, 2].mask, f1to2(a[:, 2]).mask) def test_tuple_func1d(self): def sample_1d(x): @@ -239,7 +267,7 @@ def sample_1d(x): def test_empty(self): # can't apply_along_axis when there's no chance to call the function def never_call(x): - assert_(False) # should never be reached + assert_(False) # should never be reached a = np.empty((0, 0)) assert_raises(ValueError, np.apply_along_axis, never_call, 0, a) @@ -310,7 +338,7 @@ def test_repeated_axis(self): def test_subclasses(self): a = np.arange(10).reshape((2, 5)) - a = np.ma.array(a, mask=a%3 == 0) + a = np.ma.array(a, mask=a % 3 == 0) expanded = np.expand_dims(a, axis=1) assert_(isinstance(expanded, np.ma.MaskedArray)) @@ -494,7 +522,7 @@ def test_2D_arrays(self): def test_generator(self): with pytest.raises(TypeError, match="arrays to stack must be"): - column_stack((np.arange(3) for _ in range(2))) + column_stack(np.arange(3) for _ in range(2)) class TestDstack: @@ -531,7 +559,7 @@ def test_2D_array2(self): def test_generator(self): with pytest.raises(TypeError, match="arrays to stack must be"): - dstack((np.arange(3) for _ in range(2))) + dstack(np.arange(3) for _ in range(2)) # array_split has more comprehensive test of splitting. @@ -714,8 +742,8 @@ def test_kron_ma(self): def test_kron_shape(self, shape_a, shape_b): a = np.ones(shape_a) b = np.ones(shape_b) - normalised_shape_a = (1,) * max(0, len(shape_b)-len(shape_a)) + shape_a - normalised_shape_b = (1,) * max(0, len(shape_a)-len(shape_b)) + shape_b + normalised_shape_a = (1,) * max(0, len(shape_b) - len(shape_a)) + shape_a + normalised_shape_b = (1,) * max(0, len(shape_a) - len(shape_b)) + shape_b expected_shape = np.multiply(normalised_shape_a, normalised_shape_b) k = np.kron(a, b) @@ -781,8 +809,5 @@ def test_basic(self): # Utility def compare_results(res, desired): """Compare lists of arrays.""" - if len(res) != len(desired): - raise ValueError("Iterables have different lengths") - # See also PEP 618 for Python 3.10 - for x, y in zip(res, desired): + for x, y in zip(res, desired, strict=False): assert_array_equal(x, y) diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py index 3cbebbdd552e..c9a475b392c3 100644 --- a/numpy/lib/tests/test_stride_tricks.py +++ b/numpy/lib/tests/test_stride_tricks.py @@ -1,14 +1,22 @@ +import pytest + import numpy as np from numpy._core._rational_tests import rational -from numpy.testing import ( - assert_equal, assert_array_equal, assert_raises, assert_, - assert_raises_regex, assert_warns, - ) from numpy.lib._stride_tricks_impl import ( - as_strided, broadcast_arrays, _broadcast_shape, broadcast_to, - broadcast_shapes, sliding_window_view, - ) -import pytest + _broadcast_shape, + as_strided, + broadcast_arrays, + broadcast_shapes, + broadcast_to, + sliding_window_view, +) +from numpy.testing import ( + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) def assert_shapes_correct(input_shapes, expected_shape): @@ -219,7 +227,7 @@ def test_same_as_ufunc(): ] for input_shapes, expected_shape in data: assert_same_as_ufunc(input_shapes[0], input_shapes[1], - "Shapes: %s %s" % (input_shapes[0], input_shapes[1])) + f"Shapes: {input_shapes[0]} {input_shapes[1]}") # Reverse the input shapes since broadcasting should be symmetric. assert_same_as_ufunc(input_shapes[1], input_shapes[0]) # Try them transposed, too. @@ -373,7 +381,7 @@ def test_as_strided(): a['num'] = np.arange(1, 5) a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) expected_num = [[1, 2, 3, 4]] * 3 - expected_obj = [[None]*4]*3 + expected_obj = [[None] * 4] * 3 assert_equal(a_view.dtype, dt) assert_array_equal(expected_num, a_view['num']) assert_array_equal(expected_obj, a_view['obj']) @@ -409,7 +417,7 @@ def test_1d(self): def test_2d(self): i, j = np.ogrid[:3, :4] - arr = 10*i + j + arr = 10 * i + j shape = (2, 2) arr_view = sliding_window_view(arr, shape) expected = np.array([[[[0, 1], [10, 11]], @@ -422,7 +430,7 @@ def test_2d(self): def test_2d_with_axis(self): i, j = np.ogrid[:3, :4] - arr = 10*i + j + arr = 10 * i + j arr_view = sliding_window_view(arr, 3, 0) expected = np.array([[[0, 10, 20], [1, 11, 21], @@ -432,7 +440,7 @@ def test_2d_with_axis(self): def test_2d_repeated_axis(self): i, j = np.ogrid[:3, :4] - arr = 10*i + j + arr = 10 * i + j arr_view = sliding_window_view(arr, (2, 3), (1, 1)) expected = np.array([[[[0, 1, 2], [1, 2, 3]]], @@ -444,7 +452,7 @@ def test_2d_repeated_axis(self): def test_2d_without_axis(self): i, j = np.ogrid[:4, :4] - arr = 10*i + j + arr = 10 * i + j shape = (2, 3) arr_view = sliding_window_view(arr, shape) expected = np.array([[[[0, 1, 2], [10, 11, 12]], @@ -457,7 +465,7 @@ def test_2d_without_axis(self): def test_errors(self): i, j = np.ogrid[:4, :4] - arr = 10*i + j + arr = 10 * i + j with pytest.raises(ValueError, match='cannot contain negative values'): sliding_window_view(arr, (-1, 3)) with pytest.raises( @@ -584,9 +592,9 @@ def test_writeable(): for array_is_broadcast, result in zip(is_broadcast, results): # This will change to False in a future version if array_is_broadcast: - with assert_warns(FutureWarning): + with pytest.warns(FutureWarning): assert_equal(result.flags.writeable, True) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): result[:] = 0 # Warning not emitted, writing to the array resets it assert_equal(result.flags.writeable, True) @@ -645,3 +653,231 @@ def test_reference_types(): actual, _ = broadcast_arrays(input_array, np.ones(3)) assert_array_equal(expected, actual) + + +@pytest.mark.parametrize( + "dtype", + [ + np.int8, + np.int16, + np.int32, + np.int64, + np.uint8, + np.uint16, + np.uint32, + np.uint64, + np.float32, + np.float64, + np.complex64, + np.complex128, + ], +) +def test_as_strided_checked_different_dtypes(dtype): + """Test as_strided with check_bounds=True with different dtypes.""" + x = np.arange(10, dtype=dtype) + y = as_strided(x, shape=(5,), strides=(x.itemsize * 2,), check_bounds=True) + assert y.shape == (5,) + assert y.dtype == dtype + + +@pytest.mark.parametrize( + "size,view_size,stride_mult", + [ + (10, 5, 1), # Contiguous view + (10, 5, 2), # Every other element + (20, 10, 2), # Every other element + (100, 10, 10), # Every 10th element + ], +) +def test_as_strided_checked_1d_positive_strides(size, view_size, stride_mult): + """Test 1D arrays with positive strides.""" + x = np.arange(size, dtype=np.int64) + itemsize = x.itemsize + y = as_strided( + x, shape=(view_size,), strides=(itemsize * stride_mult,), check_bounds=True + ) + assert y.shape == (view_size,) + # Verify data correctness + expected = x[::stride_mult][:view_size] + assert_array_equal(y, expected) + + +@pytest.mark.parametrize( + "shape,window_shape", + [ + ((10,), (3,)), + ((20,), (5,)), + ((100,), (10,)), + ], +) +def test_as_strided_checked_sliding_window_1d(shape, window_shape): + """Test sliding window views in 1D.""" + x = np.arange(shape[0], dtype=np.int64) + itemsize = x.itemsize + n_windows = shape[0] - window_shape[0] + 1 + view_shape = (n_windows, window_shape[0]) + view_strides = (itemsize, itemsize) + + y = as_strided(x, shape=view_shape, strides=view_strides, check_bounds=True) + assert y.shape == view_shape + # Check first and last windows + assert_array_equal(y[0], x[: window_shape[0]]) + assert_array_equal(y[-1], x[-window_shape[0] :]) + + +@pytest.mark.parametrize( + "shape", + [ + (3, 4), + (5, 6), + (10, 10), + ], +) +def test_as_strided_checked_2d_default_strides(shape): + """Test 2D arrays with default strides.""" + x = np.arange(np.prod(shape), dtype=np.int64).reshape(shape) + y = as_strided(x, check_bounds=True) # Should use default shape and strides + assert_array_equal(y, x) + + +@pytest.mark.parametrize("size", [0, 1, 2, 10, 100]) +def test_as_strided_checked_zero_stride_broadcasting(size): + """Test zero strides (broadcasting a single value).""" + x = np.array([42], dtype=np.int64) + y = as_strided(x, shape=(size,), strides=(0,), check_bounds=True) + assert y.shape == (size,) + if size > 0: + assert_(np.all(y == 42)) + + +@pytest.mark.parametrize( + "size,shape,strides", + [ + # Strides too large + (10, (5,), (32,)), + (10, (10,), (16,)), + (20, (15,), (16,)), + # Shape too large for strides + (10, (20,), (8,)), + (10, (100,), (8,)), + # 2D out of bounds cases + (20, (5, 5), (80, 8)), + (20, (3, 10), (64, 8)), + # Negative strides that go before array start + (10, (5,), (-8,)), + (10, (10,), (-8,)), + (20, (5,), (-16,)), + # ND negative strides + (10, (2, 3, 4), (96, 32, -8)), + (20, (3, 4), (64, -8)), + (30, (2, 3, 4), (-96, 32, 8)), + ], +) +def test_as_strided_checked_out_of_bounds_positive_strides(size, shape, strides): + """Test that out-of-bounds positive strides raise ValueError.""" + x = np.arange(size, dtype=np.int64) + with pytest.raises(ValueError, match="out of bounds"): + as_strided(x, shape=shape, strides=strides, check_bounds=True) + + +def test_as_strided_checked_view_of_larger_array(): + """Test as_strided + + - with check_bounds=True + - considers the base array bounds, not just the view. + + """ + a = np.arange(1000, dtype=np.int64) + + b = a[:2] + + # This should succeed because the underlying array has enough memory + y = as_strided(b, shape=(2,), strides=(400,), check_bounds=True) + assert_equal(y.shape, (2,)) + assert_equal(y[0], 0) + assert_equal(y[1], 50) + + +def test_as_strided_checked_view_with_offset(): + """Test as_strided + + - with check_bounds=True + - on a view that doesn't start at the beginning. + """ + a = np.arange(1000, dtype=np.int64) + + b = a[100:102] + + y = as_strided(b, shape=(2,), strides=(80,), check_bounds=True) + assert_equal(y.shape, (2,)) + assert_equal(y[0], 100) + assert_equal(y[1], 110) + + +def test_as_strided_checked_view_out_of_bounds_negative(): + """Test that negative strides on a view correctly detect out of bounds.""" + a = np.arange(1000, dtype=np.int64) + + b = a[5:7] + + with pytest.raises(ValueError, match="out of bounds"): + as_strided(b, shape=(2,), strides=(-48,), check_bounds=True) + + +def test_as_strided_checked_view_out_of_bounds_positive(): + """Test that positive strides on a view correctly detect out of bounds.""" + a = np.arange(100, dtype=np.int64) + + b = a[95:97] + + with pytest.raises(ValueError, match="out of bounds"): + as_strided(b, shape=(2,), strides=(200,), check_bounds=True) + + +def test_as_strided_checked_nested_views(): + """Test as_strided with check_bounds=True on a view of a view.""" + a = np.arange(1000, dtype=np.int64) + b = a[10:100] + c = b[5:10] + + y = as_strided(c, shape=(2,), strides=(160,), check_bounds=True) + assert_equal(y.shape, (2,)) + assert_equal(y[0], 15) + assert_equal(y[1], 35) + + +def test_as_strided_checked_sliced_array(): + """Test various slicing scenarios.""" + a = np.arange(200, dtype=np.int64) + + b = a[10:20] + y = as_strided(b, shape=(5,), strides=(16,), check_bounds=True) + assert_equal(y.shape, (5,)) + + c = a[::2] + y = as_strided(c, shape=(10,), strides=(16,), check_bounds=True) + assert_equal(y.shape, (10,)) + + +@pytest.mark.parametrize( + "start,stop,stride_bytes,should_pass", + [ + (0, 10, 552, True), + (0, 10, 552 + 1, True), + (90, 95, 72, True), + (90, 95, 72 + 1, False), + (5, 7, -40, True), + (5, 7, -40 - 1, False), + ], +) +def test_as_strided_checked_view_parametrized(start, stop, stride_bytes, should_pass): + """Parametrized test for various view and stride combinations.""" + a = np.arange(100, dtype=np.int64) + b = a[start:stop] + + if should_pass: + y = as_strided(b, shape=(2,), strides=(stride_bytes,), check_bounds=True) + assert_equal(y.shape, (2,)) + else: + with pytest.raises(ValueError, match="out of bounds"): + as_strided(b, shape=(2,), strides=(stride_bytes,), check_bounds=True) diff --git a/numpy/lib/tests/test_twodim_base.py b/numpy/lib/tests/test_twodim_base.py index eb008c6002c8..eb6aa69a443c 100644 --- a/numpy/lib/tests/test_twodim_base.py +++ b/numpy/lib/tests/test_twodim_base.py @@ -1,18 +1,36 @@ """Test functions for matrix module """ -from numpy.testing import ( - assert_equal, assert_array_equal, assert_array_max_ulp, - assert_array_almost_equal, assert_raises, assert_ -) +import pytest + +import numpy as np from numpy import ( - arange, add, fliplr, flipud, zeros, ones, eye, array, diag, histogram2d, - tri, mask_indices, triu_indices, triu_indices_from, tril_indices, - tril_indices_from, vander, + add, + arange, + array, + diag, + eye, + fliplr, + flipud, + histogram2d, + mask_indices, + ones, + tri, + tril_indices, + tril_indices_from, + triu_indices, + triu_indices_from, + vander, + zeros, +) +from numpy.testing import ( + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_raises, ) -import numpy as np - -import pytest def get_mat(n): @@ -220,7 +238,7 @@ def test_asym(self): [1, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 1]]) - assert_array_almost_equal(H, answer/8., 3) + assert_array_almost_equal(H, answer / 8., 3) assert_array_equal(xed, np.linspace(0, 6, 7)) assert_array_equal(yed, np.linspace(0, 5, 6)) @@ -231,7 +249,7 @@ def test_density(self): x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], density=True) answer = array([[1, 1, .5], [1, 1, .5], - [.5, .5, .25]])/9. + [.5, .5, .25]]) / 9. assert_array_almost_equal(H, answer, 3) def test_all_outliers(self): @@ -290,12 +308,12 @@ def __array_function__(self, function, types, args, kwargs): r = histogram2d(xy, s_d) assert_(r == ((ShouldDispatch,), (xy, s_d), {})) r = histogram2d(xy, xy, bins=s_d) - assert_(r, ((ShouldDispatch,), (xy, xy), dict(bins=s_d))) + assert_(r, ((ShouldDispatch,), (xy, xy), {'bins': s_d})) r = histogram2d(xy, xy, bins=[s_d, 5]) - assert_(r, ((ShouldDispatch,), (xy, xy), dict(bins=[s_d, 5]))) + assert_(r, ((ShouldDispatch,), (xy, xy), {'bins': [s_d, 5]})) assert_raises(Exception, histogram2d, xy, xy, bins=[s_d]) r = histogram2d(xy, xy, weights=s_d) - assert_(r, ((ShouldDispatch,), (xy, xy), dict(weights=s_d))) + assert_(r, ((ShouldDispatch,), (xy, xy), {'weights': s_d})) @pytest.mark.parametrize(("x_len", "y_len"), [(10, 11), (20, 19)]) def test_bad_length(self, x_len, y_len): @@ -521,7 +539,7 @@ def test_basic(self): m = powers.shape[1] for n in range(6): v = vander(c, N=n) - assert_array_equal(v, powers[:, m-n:m]) + assert_array_equal(v, powers[:, m - n:m]) def test_dtypes(self): c = array([11, -12, 13], dtype=np.int8) @@ -531,10 +549,10 @@ def test_dtypes(self): [169, 13, 1]]) assert_array_equal(v, expected) - c = array([1.0+1j, 1.0-1j]) + c = array([1.0 + 1j, 1.0 - 1j]) v = vander(c, N=3) - expected = np.array([[2j, 1+1j, 1], - [-2j, 1-1j, 1]]) + expected = np.array([[2j, 1 + 1j, 1], + [-2j, 1 - 1j, 1]]) # The data is floating point, but the values are small integers, # so assert_array_equal *should* be safe here (rather than, say, # assert_array_almost_equal). diff --git a/numpy/lib/tests/test_type_check.py b/numpy/lib/tests/test_type_check.py index e8e11c4257c3..447c2c36c192 100644 --- a/numpy/lib/tests/test_type_check.py +++ b/numpy/lib/tests/test_type_check.py @@ -1,11 +1,17 @@ import numpy as np from numpy import ( - common_type, mintypecode, isreal, iscomplex, isposinf, isneginf, - nan_to_num, isrealobj, iscomplexobj, real_if_close - ) -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises - ) + common_type, + iscomplex, + iscomplexobj, + isneginf, + isposinf, + isreal, + isrealobj, + mintypecode, + nan_to_num, + real_if_close, +) +from numpy.testing import assert_, assert_array_equal, assert_equal def assert_all(x): @@ -18,8 +24,8 @@ def test_basic(self): af16 = np.array([[1, 2], [3, 4]], dtype=np.float16) af32 = np.array([[1, 2], [3, 4]], dtype=np.float32) af64 = np.array([[1, 2], [3, 4]], dtype=np.float64) - acs = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.complex64) - acd = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.complex128) + acs = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.complex64) + acd = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.complex128) assert_(common_type(ai32) == np.float64) assert_(common_type(af16) == np.float16) assert_(common_type(af32) == np.float32) @@ -40,10 +46,10 @@ def test_default_1(self): def test_default_2(self): for itype in '1bcsuwil': - assert_equal(mintypecode(itype+'f'), 'f') - assert_equal(mintypecode(itype+'d'), 'd') - assert_equal(mintypecode(itype+'F'), 'F') - assert_equal(mintypecode(itype+'D'), 'D') + assert_equal(mintypecode(itype + 'f'), 'f') + assert_equal(mintypecode(itype + 'd'), 'd') + assert_equal(mintypecode(itype + 'F'), 'F') + assert_equal(mintypecode(itype + 'D'), 'D') assert_equal(mintypecode('ff'), 'f') assert_equal(mintypecode('fd'), 'd') assert_equal(mintypecode('fF'), 'F') @@ -105,7 +111,7 @@ def test_real(self): assert_(not isinstance(out, np.ndarray)) def test_cmplx(self): - y = np.random.rand(10,)+1j*np.random.rand(10,) + y = np.random.rand(10,) + 1j * np.random.rand(10,) assert_array_equal(y.real, np.real(y)) y = np.array(1 + 1j) @@ -136,7 +142,7 @@ def test_real(self): assert_(not isinstance(out, np.ndarray)) def test_cmplx(self): - y = np.random.rand(10,)+1j*np.random.rand(10,) + y = np.random.rand(10,) + 1j * np.random.rand(10,) assert_array_equal(y.imag, np.imag(y)) y = np.array(1 + 1j) @@ -186,10 +192,10 @@ def test_basic(self): def test_scalar(self): assert_(not iscomplexobj(1.0)) - assert_(iscomplexobj(1+0j)) + assert_(iscomplexobj(1 + 0j)) def test_list(self): - assert_(iscomplexobj([3, 1+0j, True])) + assert_(iscomplexobj([3, 1 + 0j, True])) assert_(not iscomplexobj([3, 1, True])) def test_duck(self): @@ -205,6 +211,7 @@ def test_pandas_duck(self): # (pandas.core.dtypes) class PdComplex(np.complex128): pass + class PdDtype: name = 'category' names = None @@ -212,6 +219,7 @@ class PdDtype: kind = 'c' str = ' 1e10) and assert_all(np.isfinite(vals[2])) assert_equal(type(vals), np.ndarray) - + # perform the same tests but with nan, posinf and neginf keywords with np.errstate(divide='ignore', invalid='ignore'): - vals = nan_to_num(np.array((-1., 0, 1))/0., + vals = nan_to_num(np.array((-1., 0, 1)) / 0., nan=10, posinf=20, neginf=30) assert_equal(vals, [30, 10, 20]) assert_all(np.isfinite(vals[[0, 2]])) @@ -367,7 +375,7 @@ def test_generic(self): # perform the same test but in-place with np.errstate(divide='ignore', invalid='ignore'): - vals = np.array((-1., 0, 1))/0. + vals = np.array((-1., 0, 1)) / 0. result = nan_to_num(vals, copy=False) assert_(result is vals) @@ -375,10 +383,10 @@ def test_generic(self): assert_(vals[1] == 0) assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2])) assert_equal(type(vals), np.ndarray) - + # perform the same test but in-place with np.errstate(divide='ignore', invalid='ignore'): - vals = np.array((-1., 0, 1))/0. + vals = np.array((-1., 0, 1)) / 0. result = nan_to_num(vals, copy=False, nan=10, posinf=20, neginf=30) assert_(result is vals) @@ -411,17 +419,17 @@ def test_float(self): assert_equal(type(vals), np.float64) def test_complex_good(self): - vals = nan_to_num(1+1j) - assert_all(vals == 1+1j) + vals = nan_to_num(1 + 1j) + assert_all(vals == 1 + 1j) assert_equal(type(vals), np.complex128) - vals = nan_to_num(1+1j, nan=10, posinf=20, neginf=30) - assert_all(vals == 1+1j) + vals = nan_to_num(1 + 1j, nan=10, posinf=20, neginf=30) + assert_all(vals == 1 + 1j) assert_equal(type(vals), np.complex128) def test_complex_bad(self): with np.errstate(divide='ignore', invalid='ignore'): v = 1 + 1j - v += np.array(0+1.j)/0. + v += np.array(0 + 1.j) / 0. vals = nan_to_num(v) # !! This is actually (unexpectedly) zero assert_all(np.isfinite(vals)) @@ -430,7 +438,7 @@ def test_complex_bad(self): def test_complex_bad2(self): with np.errstate(divide='ignore', invalid='ignore'): v = 1 + 1j - v += np.array(-1+1.j)/0. + v += np.array(-1 + 1.j) / 0. vals = nan_to_num(v) assert_all(np.isfinite(vals)) assert_equal(type(vals), np.complex128) @@ -440,12 +448,12 @@ def test_complex_bad2(self): # !! inf. Comment out for now, and see if it # !! changes #assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals)) - + def test_do_not_rewrite_previous_keyword(self): - # This is done to test that when, for instance, nan=np.inf then these + # This is done to test that when, for instance, nan=np.inf then these # values are not rewritten by posinf keyword to the posinf value. with np.errstate(divide='ignore', invalid='ignore'): - vals = nan_to_num(np.array((-1., 0, 1))/0., nan=np.inf, posinf=999) + vals = nan_to_num(np.array((-1., 0, 1)) / 0., nan=np.inf, posinf=999) assert_all(np.isfinite(vals[[0, 2]])) assert_all(vals[0] < -1e10) assert_equal(vals[[1, 2]], [np.inf, 999]) @@ -456,10 +464,10 @@ class TestRealIfClose: def test_basic(self): a = np.random.rand(10) - b = real_if_close(a+1e-15j) + b = real_if_close(a + 1e-15j) assert_all(isrealobj(b)) assert_array_equal(a, b) - b = real_if_close(a+1e-7j) + b = real_if_close(a + 1e-7j) assert_all(iscomplexobj(b)) - b = real_if_close(a+1e-7j, tol=1e-6) + b = real_if_close(a + 1e-7j, tol=1e-6) assert_all(isrealobj(b)) diff --git a/numpy/lib/tests/test_ufunclike.py b/numpy/lib/tests/test_ufunclike.py index 4b5d11010e0f..8452a913c98c 100644 --- a/numpy/lib/tests/test_ufunclike.py +++ b/numpy/lib/tests/test_ufunclike.py @@ -1,9 +1,8 @@ -import numpy as np +import pytest -from numpy import fix, isposinf, isneginf -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises -) +import numpy as np +from numpy import fix, isneginf, isposinf +from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises class TestUfunclike: @@ -38,6 +37,7 @@ def test_isneginf(self): with assert_raises(TypeError): isneginf(a) + @pytest.mark.filterwarnings("ignore:numpy.fix is deprecated:DeprecationWarning") def test_fix(self): a = np.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]]) out = np.zeros(a.shape, float) @@ -50,6 +50,7 @@ def test_fix(self): assert_equal(out, tgt) assert_equal(fix(3.14), 3) + @pytest.mark.filterwarnings("ignore:numpy.fix is deprecated:DeprecationWarning") def test_fix_with_subclass(self): class MyArray(np.ndarray): def __new__(cls, data, metadata=None): @@ -76,12 +77,13 @@ def __array_finalize__(self, obj): assert_equal(f.metadata, 'foo') # check 0d arrays don't decay to scalars - m0d = m[0,...] + m0d = m[0, ...] m0d.metadata = 'bar' f0d = fix(m0d) assert_(isinstance(f0d, MyArray)) assert_equal(f0d.metadata, 'bar') + @pytest.mark.filterwarnings("ignore:numpy.fix is deprecated:DeprecationWarning") def test_scalar(self): x = np.inf actual = np.isposinf(x) @@ -98,3 +100,22 @@ def test_scalar(self): out = np.array(0.0) actual = np.fix(x, out=out) assert_(actual is out) + + +class TestFixDeprecation: + """Test that numpy.fix emits a DeprecationWarning.""" + + def test_fix_emits_deprecation_warning(self): + a = np.array([1.5, 2.7, -1.5, -2.7]) + with pytest.warns(DeprecationWarning, match="numpy.fix is deprecated"): + fix(a) + + def test_fix_scalar_emits_deprecation_warning(self): + with pytest.warns(DeprecationWarning, match="numpy.fix is deprecated"): + fix(3.14) + + def test_fix_with_out_emits_deprecation_warning(self): + a = np.array([1.5, 2.7]) + out = np.zeros(a.shape) + with pytest.warns(DeprecationWarning, match="numpy.fix is deprecated"): + fix(a, out=out) diff --git a/numpy/lib/tests/test_utils.py b/numpy/lib/tests/test_utils.py index e2f72ac90c92..0106ee0d8414 100644 --- a/numpy/lib/tests/test_utils.py +++ b/numpy/lib/tests/test_utils.py @@ -1,10 +1,10 @@ +from io import StringIO + import pytest import numpy as np -from numpy.testing import assert_raises_regex import numpy.lib._utils_impl as _utils_impl - -from io import StringIO +from numpy.testing import assert_raises_regex def test_assert_raises_regex_context_manager(): @@ -43,7 +43,7 @@ def _compare_dtypes(dt1, dt2): assert dt_m.metadata is None assert dt_m['l1'].metadata is None assert dt_m['l1']['l2'].metadata is None - + # alignment dt = np.dtype([('x', '>> a = np.eye(2*3*4) - >>> a.shape = (2*3, 4, 2, 3, 4) + >>> import numpy as np + >>> a = np.eye(2*3*4).reshape((2*3, 4, 2, 3, 4)) >>> rng = np.random.default_rng() >>> b = rng.normal(size=(2*3, 4)) >>> x = np.linalg.tensorsolve(a, b) @@ -288,13 +345,13 @@ def tensorsolve(a, b, axes=None): an = a.ndim if axes is not None: - allaxes = list(range(0, an)) + allaxes = list(range(an)) for k in axes: allaxes.remove(k) allaxes.insert(an, k) a = a.transpose(allaxes) - oldshape = a.shape[-(an-b.ndim):] + oldshape = a.shape[-(an - b.ndim):] prod = 1 for k in oldshape: prod *= k @@ -308,8 +365,7 @@ def tensorsolve(a, b, axes=None): a = a.reshape(prod, prod) b = b.ravel() res = wrap(solve(a, b)) - res.shape = oldshape - return res + return res.reshape(oldshape) def _solve_dispatcher(a, b): @@ -349,9 +405,6 @@ def solve(a, b): Notes ----- - - .. versionadded:: 1.8.0 - Broadcasting rules apply, see the `numpy.linalg` documentation for details. @@ -380,6 +433,7 @@ def solve(a, b): ``x0 + 2 * x1 = 1`` and ``3 * x0 + 5 * x1 = 2``: + >>> import numpy as np >>> a = np.array([[1, 2], [3, 5]]) >>> b = np.array([1, 2]) >>> x = np.linalg.solve(a, b) @@ -393,7 +447,6 @@ def solve(a, b): """ a, _ = _makearray(a) - _assert_stacked_2d(a) _assert_stacked_square(a) b, wrap = _makearray(b) t, result_t = _commonType(a, b) @@ -452,8 +505,8 @@ def tensorinv(a, ind=2): Examples -------- - >>> a = np.eye(4*6) - >>> a.shape = (4, 6, 8, 3) + >>> import numpy as np + >>> a = np.eye(4*6).reshape((4, 6, 8, 3)) >>> ainv = np.linalg.tensorinv(a, ind=2) >>> ainv.shape (8, 3, 4, 6) @@ -462,8 +515,7 @@ def tensorinv(a, ind=2): >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b)) True - >>> a = np.eye(4*6) - >>> a.shape = (24, 8, 3) + >>> a = np.eye(4*6).reshape((24, 8, 3)) >>> ainv = np.linalg.tensorinv(a, ind=1) >>> ainv.shape (8, 3, 24) @@ -524,9 +576,6 @@ def inv(a): Notes ----- - - .. versionadded:: 1.8.0 - Broadcasting rules apply, see the `numpy.linalg` documentation for details. @@ -541,6 +590,7 @@ def inv(a): Examples -------- + >>> import numpy as np >>> from numpy.linalg import inv >>> a = np.array([[1., 2.], [3., 4.]]) >>> ainv = inv(a) @@ -601,7 +651,6 @@ def inv(a): """ a, wrap = _makearray(a) - _assert_stacked_2d(a) _assert_stacked_square(a) t, result_t = _commonType(a) @@ -652,6 +701,7 @@ def matrix_power(a, n): Examples -------- + >>> import numpy as np >>> from numpy.linalg import matrix_power >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit >>> matrix_power(i, 3) # should = -i @@ -682,7 +732,6 @@ def matrix_power(a, n): """ a = asanyarray(a) - _assert_stacked_2d(a) _assert_stacked_square(a) try: @@ -784,9 +833,6 @@ def cholesky(a, /, *, upper=False): Notes ----- - - .. versionadded:: 1.8.0 - Broadcasting rules apply, see the `numpy.linalg` documentation for details. @@ -806,6 +852,7 @@ def cholesky(a, /, *, upper=False): Examples -------- + >>> import numpy as np >>> A = np.array([[1,-2j],[2j,5]]) >>> A array([[ 1.+0.j, -0.-2.j], @@ -833,7 +880,6 @@ def cholesky(a, /, *, upper=False): """ gufunc = _umath_linalg.cholesky_up if upper else _umath_linalg.cholesky_lo a, wrap = _makearray(a) - _assert_stacked_2d(a) _assert_stacked_square(a) t, result_t = _commonType(a) signature = 'D->D' if isComplexType(t) else 'd->d' @@ -876,6 +922,40 @@ def outer(x1, x2, /): -------- outer + Examples + -------- + Make a (*very* coarse) grid for computing a Mandelbrot set: + + >>> rl = np.linalg.outer(np.ones((5,)), np.linspace(-2, 2, 5)) + >>> rl + array([[-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.]]) + >>> im = np.linalg.outer(1j*np.linspace(2, -2, 5), np.ones((5,))) + >>> im + array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j], + [0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j], + [0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], + [0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j], + [0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]]) + >>> grid = rl + im + >>> grid + array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j], + [-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j], + [-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j], + [-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j], + [-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]]) + + An example using a "vector" of letters: + + >>> x = np.array(['a', 'b', 'c'], dtype=np.object_) + >>> np.linalg.outer(x, [1, 2, 3]) + array([['a', 'aa', 'aaa'], + ['b', 'bb', 'bbb'], + ['c', 'cc', 'ccc']], dtype=object) + """ x1 = asanyarray(x1) x2 = asanyarray(x2) @@ -927,9 +1007,6 @@ def qr(a, mode='reduced'): Returns ------- - When mode is 'reduced' or 'complete', the result will be a namedtuple with - the attributes `Q` and `R`. - Q : ndarray of float or complex, optional A matrix with orthonormal columns. When mode = 'complete' the result is an orthogonal/unitary matrix depending on whether or not @@ -958,6 +1035,9 @@ def qr(a, mode='reduced'): Notes ----- + When mode is 'reduced' or 'complete', the result will be a namedtuple with + the attributes ``Q`` and ``R``. + This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``, ``dorgqr``, and ``zungqr``. @@ -981,6 +1061,7 @@ def qr(a, mode='reduced'): Examples -------- + >>> import numpy as np >>> rng = np.random.default_rng() >>> a = rng.normal(size=(9, 6)) >>> Q, R = np.linalg.qr(a) @@ -1030,9 +1111,10 @@ def qr(a, mode='reduced'): if mode not in ('reduced', 'complete', 'r', 'raw'): if mode in ('f', 'full'): # 2013-04-01, 1.8 - msg = "".join(( - "The 'full' option is deprecated in favor of 'reduced'.\n", - "For backward compatibility let mode default.")) + msg = ( + "The 'full' option is deprecated in favor of 'reduced'.\n" + "For backward compatibility let mode default." + ) warnings.warn(msg, DeprecationWarning, stacklevel=2) mode = 'reduced' elif mode in ('e', 'economic'): @@ -1051,15 +1133,10 @@ def qr(a, mode='reduced'): a = _to_native_byte_order(a) mn = min(m, n) - if m <= n: - gufunc = _umath_linalg.qr_r_raw_m - else: - gufunc = _umath_linalg.qr_r_raw_n - signature = 'D->D' if isComplexType(t) else 'd->d' with errstate(call=_raise_linalgerror_qr, invalid='call', over='ignore', divide='ignore', under='ignore'): - tau = gufunc(a, signature=signature) + tau = _umath_linalg.qr_r_raw(a, signature=signature) # handle modes that don't return q if mode == 'r': @@ -1138,9 +1215,6 @@ def eigvals(a): Notes ----- - - .. versionadded:: 1.8.0 - Broadcasting rules apply, see the `numpy.linalg` documentation for details. @@ -1156,6 +1230,7 @@ def eigvals(a): if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as ``A``: + >>> import numpy as np >>> from numpy import linalg as LA >>> x = np.random.random() >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]]) @@ -1167,15 +1242,14 @@ def eigvals(a): >>> D = np.diag((-1,1)) >>> LA.eigvals(D) - array([-1., 1.]) + array([-1. + 0.j, 1. + 0.j]) >>> A = np.dot(Q, D) >>> A = np.dot(A, Q.T) >>> LA.eigvals(A) - array([ 1., -1.]) # random + array([ 1., -1.]) # random """ a, wrap = _makearray(a) - _assert_stacked_2d(a) _assert_stacked_square(a) _assert_finite(a) t, result_t = _commonType(a) @@ -1186,14 +1260,7 @@ def eigvals(a): under='ignore'): w = _umath_linalg.eigvals(a, signature=signature) - if not isComplexType(t): - if all(w.imag == 0): - w = w.real - result_t = _realType(result_t) - else: - result_t = _complexType(result_t) - - return w.astype(result_t, copy=False) + return w.astype(_complexType(result_t), copy=False) def _eigvalsh_dispatcher(a, UPLO=None): @@ -1242,9 +1309,6 @@ def eigvalsh(a, UPLO='L'): Notes ----- - - .. versionadded:: 1.8.0 - Broadcasting rules apply, see the `numpy.linalg` documentation for details. @@ -1252,6 +1316,7 @@ def eigvalsh(a, UPLO='L'): Examples -------- + >>> import numpy as np >>> from numpy import linalg as LA >>> a = np.array([[1, -2j], [2j, 5]]) >>> LA.eigvalsh(a) @@ -1270,8 +1335,9 @@ def eigvalsh(a, UPLO='L'): [0.+2.j, 2.+0.j]]) >>> wa = LA.eigvalsh(a) >>> wb = LA.eigvals(b) - >>> wa; wb + >>> wa array([1., 6.]) + >>> wb array([6.+0.j, 1.+0.j]) """ @@ -1285,7 +1351,6 @@ def eigvalsh(a, UPLO='L'): gufunc = _umath_linalg.eigvalsh_up a, wrap = _makearray(a) - _assert_stacked_2d(a) _assert_stacked_square(a) t, result_t = _commonType(a) signature = 'D->d' if isComplexType(t) else 'd->d' @@ -1295,11 +1360,6 @@ def eigvalsh(a, UPLO='L'): w = gufunc(a, signature=signature) return w.astype(_realType(result_t), copy=False) -def _convertarray(a): - t, result_t = _commonType(a) - a = a.astype(t).T.copy() - return a, t, result_t - # Eigenvectors @@ -1351,9 +1411,6 @@ def eig(a): Notes ----- - - .. versionadded:: 1.8.0 - Broadcasting rules apply, see the `numpy.linalg` documentation for details. @@ -1392,14 +1449,15 @@ def eig(a): Examples -------- + >>> import numpy as np >>> from numpy import linalg as LA (Almost) trivial example with real eigenvalues and eigenvectors. >>> eigenvalues, eigenvectors = LA.eig(np.diag((1, 2, 3))) >>> eigenvalues - array([1., 2., 3.]) - >>> eigenvectors + array([1. + 0j, 2. + 0j, 3. + 0j]) + >>> eigenvectors.real array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) @@ -1431,14 +1489,13 @@ def eig(a): >>> # Theor. eigenvalues are 1 +/- 1e-9 >>> eigenvalues, eigenvectors = LA.eig(a) >>> eigenvalues - array([1., 1.]) - >>> eigenvectors + array([1.+0j, 1.+0j]) + >>> eigenvectors.real array([[1., 0.], [0., 1.]]) """ a, wrap = _makearray(a) - _assert_stacked_2d(a) _assert_stacked_square(a) _assert_finite(a) t, result_t = _commonType(a) @@ -1449,15 +1506,9 @@ def eig(a): under='ignore'): w, vt = _umath_linalg.eig(a, signature=signature) - if not isComplexType(t) and all(w.imag == 0.0): - w = w.real - vt = vt.real - result_t = _realType(result_t) - else: - result_t = _complexType(result_t) - - vt = vt.astype(result_t, copy=False) - return EigResult(w.astype(result_t, copy=False), wrap(vt)) + w = w.astype(_complexType(result_t), copy=False) + vt = vt.astype(_complexType(result_t), copy=False) + return EigResult(w, wrap(vt)) @array_function_dispatch(_eigvalsh_dispatcher) @@ -1511,9 +1562,6 @@ def eigh(a, UPLO='L'): Notes ----- - - .. versionadded:: 1.8.0 - Broadcasting rules apply, see the `numpy.linalg` documentation for details. @@ -1532,6 +1580,7 @@ def eigh(a, UPLO='L'): Examples -------- + >>> import numpy as np >>> from numpy import linalg as LA >>> a = np.array([[1, -2j], [2j, 5]]) >>> a @@ -1574,12 +1623,14 @@ def eigh(a, UPLO='L'): [0.+2.j, 2.+0.j]]) >>> wa, va = LA.eigh(a) >>> wb, vb = LA.eig(b) - >>> wa; wb + >>> wa array([1., 6.]) + >>> wb array([6.+0.j, 1.+0.j]) - >>> va; vb + >>> va array([[-0.4472136 +0.j , -0.89442719+0.j ], # may vary [ 0. +0.89442719j, 0. -0.4472136j ]]) + >>> vb array([[ 0.89442719+0.j , -0. +0.4472136j], [-0. +0.4472136j, 0.89442719+0.j ]]) @@ -1589,7 +1640,6 @@ def eigh(a, UPLO='L'): raise ValueError("UPLO argument must be 'L' or 'U'") a, wrap = _makearray(a) - _assert_stacked_2d(a) _assert_stacked_square(a) t, result_t = _commonType(a) @@ -1643,13 +1693,8 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): enabling a more efficient method for finding singular values. Defaults to False. - .. versionadded:: 1.17.0 - Returns ------- - When `compute_uv` is True, the result is a namedtuple with the following - attribute names: - U : { (..., M, M), (..., M, K) } array Unitary array(s). The first ``a.ndim - 2`` dimensions have the same size as those of the input `a`. The size of the last two dimensions @@ -1677,10 +1722,8 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): Notes ----- - - .. versionchanged:: 1.8.0 - Broadcasting rules apply, see the `numpy.linalg` documentation for - details. + When `compute_uv` is True, the result is a namedtuple with the following + attribute names: `U`, `S`, and `Vh`. The decomposition is performed using LAPACK routine ``_gesdd``. @@ -1707,6 +1750,7 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): Examples -------- + >>> import numpy as np >>> rng = np.random.default_rng() >>> a = rng.normal(size=(9, 6)) + 1j*rng.normal(size=(9, 6)) >>> b = rng.normal(size=(2, 7, 8, 3)) + 1j*rng.normal(size=(2, 7, 8, 3)) @@ -1719,7 +1763,7 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): ((9, 9), (6,), (6, 6)) >>> np.allclose(a, np.dot(U[:, :6] * S, Vh)) True - >>> smat = np.zeros((9, 6), dtype=complex) + >>> smat = np.zeros((9, 6), dtype=np.complex128) >>> smat[:6, :6] = np.diag(S) >>> np.allclose(a, np.dot(U, np.dot(smat, Vh))) True @@ -1756,7 +1800,7 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): True """ - import numpy as _nx + import numpy as np a, wrap = _makearray(a) if hermitian: @@ -1768,9 +1812,9 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): sgn = sign(s) s = abs(s) sidx = argsort(s)[..., ::-1] - sgn = _nx.take_along_axis(sgn, sidx, axis=-1) - s = _nx.take_along_axis(s, sidx, axis=-1) - u = _nx.take_along_axis(u, sidx[..., None, :], axis=-1) + sgn = np.take_along_axis(sgn, sidx, axis=-1) + s = np.take_along_axis(s, sidx, axis=-1) + u = np.take_along_axis(u, sidx[..., None, :], axis=-1) # singular values are unsigned, move the sign into v vt = transpose(u * sgn[..., None, :]).conjugate() return SVDResult(wrap(u), s, wrap(vt)) @@ -1785,15 +1829,9 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): m, n = a.shape[-2:] if compute_uv: if full_matrices: - if m < n: - gufunc = _umath_linalg.svd_m_f - else: - gufunc = _umath_linalg.svd_n_f + gufunc = _umath_linalg.svd_f else: - if m < n: - gufunc = _umath_linalg.svd_m_s - else: - gufunc = _umath_linalg.svd_n_s + gufunc = _umath_linalg.svd_s signature = 'D->DdD' if isComplexType(t) else 'd->ddd' with errstate(call=_raise_linalgerror_svd_nonconvergence, @@ -1805,16 +1843,11 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): vh = vh.astype(result_t, copy=False) return SVDResult(wrap(u), s, wrap(vh)) else: - if m < n: - gufunc = _umath_linalg.svd_m - else: - gufunc = _umath_linalg.svd_n - signature = 'D->d' if isComplexType(t) else 'd->d' with errstate(call=_raise_linalgerror_svd_nonconvergence, invalid='call', over='ignore', divide='ignore', under='ignore'): - s = gufunc(a, signature=signature) + s = _umath_linalg.svd(a, signature=signature) s = s.astype(_realType(result_t), copy=False) return s @@ -1852,6 +1885,23 @@ def svdvals(x, /): -------- scipy.linalg.svdvals : Compute singular values of a matrix. + Examples + -------- + + >>> np.linalg.svdvals([[1, 2, 3, 4, 5], + ... [1, 4, 9, 16, 25], + ... [1, 8, 27, 64, 125]]) + array([146.68862757, 5.57510612, 0.60393245]) + + Determine the rank of a matrix using singular values: + + >>> s = np.linalg.svdvals([[1, 2, 3], + ... [2, 4, 6], + ... [-1, 1, -1]]); s + array([8.38434191e+00, 1.64402274e+00, 2.31534378e-16]) + >>> np.count_nonzero(s > 1e-10) # Matrix of rank 2 + 2 + """ return svd(x, compute_uv=False, hermitian=False) @@ -1914,6 +1964,7 @@ def cond(x, p=None): Examples -------- + >>> import numpy as np >>> from numpy import linalg as LA >>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]]) >>> a @@ -1944,7 +1995,7 @@ def cond(x, p=None): x = asarray(x) # in case we have a matrix if _is_empty_2d(x): raise LinAlgError("cond is not defined on empty arrays") - if p is None or p == 2 or p == -2: + if p is None or p in {2, -2}: s = svd(x, compute_uv=False) with errstate(all='ignore'): if p == -2: @@ -1954,9 +2005,9 @@ def cond(x, p=None): else: # Call inv(x) ignoring errors. The result array will # contain nans in the entries where inversion failed. - _assert_stacked_2d(x) _assert_stacked_square(x) t, result_t = _commonType(x) + result_t = _realType(result_t) # condition number is always real signature = 'D->D' if isComplexType(t) else 'd->d' with errstate(all='ignore'): invx = _umath_linalg.inv(x, signature=signature) @@ -1964,18 +2015,14 @@ def cond(x, p=None): r = r.astype(result_t, copy=False) # Convert nans to infs unless the original array had nan entries - r = asarray(r) nan_mask = isnan(r) if nan_mask.any(): nan_mask &= ~isnan(x).any(axis=(-2, -1)) if r.ndim > 0: r[nan_mask] = inf elif nan_mask: - r[()] = inf - - # Convention is to return scalars instead of 0d arrays - if r.ndim == 0: - r = r[()] + # Convention is to return scalars instead of 0d arrays. + r = r.dtype.type(inf) return r @@ -1992,9 +2039,6 @@ def matrix_rank(A, tol=None, hermitian=False, *, rtol=None): Rank of the array is the number of singular values of the array that are greater than `tol`. - .. versionchanged:: 1.14 - Can now operate on stacks of matrices - Parameters ---------- A : {(M,), (..., M, N)} array_like @@ -2004,15 +2048,10 @@ def matrix_rank(A, tol=None, hermitian=False, *, rtol=None): None, and ``S`` is an array with singular values for `M`, and ``eps`` is the epsilon value for datatype of ``S``, then `tol` is set to ``S.max() * max(M, N) * eps``. - - .. versionchanged:: 1.14 - Broadcasted against the stack of matrices hermitian : bool, optional If True, `A` is assumed to be Hermitian (symmetric if real-valued), enabling a more efficient method for finding singular values. Defaults to False. - - .. versionadded:: 1.14 rtol : (...) array_like, float, optional Parameter for the relative tolerance component. Only ``tol`` or ``rtol`` can be set at a time. Defaults to ``max(M, N) * eps``. @@ -2029,9 +2068,9 @@ def matrix_rank(A, tol=None, hermitian=False, *, rtol=None): The default threshold to detect rank deficiency is a test on the magnitude of the singular values of `A`. By default, we identify singular values less than ``S.max() * max(M, N) * eps`` as indicating rank deficiency - (with the symbols defined above). This is the algorithm MATLAB uses [1]. + (with the symbols defined above). This is the algorithm MATLAB uses [1]_. It also appears in *Numerical recipes* in the discussion of SVD solutions - for linear least squares [2]. + for linear least squares [2]_. This default threshold is designed to detect rank deficiency accounting for the numerical errors of the SVD computation. Imagine that there @@ -2072,6 +2111,7 @@ def matrix_rank(A, tol=None, hermitian=False, *, rtol=None): Examples -------- + >>> import numpy as np >>> from numpy.linalg import matrix_rank >>> matrix_rank(np.eye(4)) # Full rank matrix 4 @@ -2089,6 +2129,7 @@ def matrix_rank(A, tol=None, hermitian=False, *, rtol=None): A = asarray(A) if A.ndim < 2: return int(not all(A == 0)) + S = svd(A, compute_uv=False, hermitian=hermitian) if tol is None: @@ -2096,7 +2137,7 @@ def matrix_rank(A, tol=None, hermitian=False, *, rtol=None): rtol = max(A.shape[-2:]) * finfo(S.dtype).eps else: rtol = asarray(rtol)[..., newaxis] - tol = S.max(axis=-1, keepdims=True) * rtol + tol = S.max(axis=-1, keepdims=True, initial=0) * rtol else: tol = asarray(tol)[..., newaxis] @@ -2118,9 +2159,6 @@ def pinv(a, rcond=None, hermitian=False, *, rtol=_NoValue): singular-value decomposition (SVD) and including all *large* singular values. - .. versionchanged:: 1.14 - Can now operate on stacks of matrices - Parameters ---------- a : (..., M, N) array_like @@ -2134,8 +2172,6 @@ def pinv(a, rcond=None, hermitian=False, *, rtol=_NoValue): If True, `a` is assumed to be Hermitian (symmetric if real-valued), enabling a more efficient method for finding singular values. Defaults to False. - - .. versionadded:: 1.17.0 rtol : (...) array_like of float, optional Same as `rcond`, but it's an Array API compatible parameter name. Only `rcond` or `rtol` can be set at a time. If none of them are @@ -2187,6 +2223,7 @@ def pinv(a, rcond=None, hermitian=False, *, rtol=_NoValue): The following example checks that ``a * a+ * a == a`` and ``a+ * a * a+ == a+``: + >>> import numpy as np >>> rng = np.random.default_rng() >>> a = rng.normal(size=(9, 6)) >>> B = np.linalg.pinv(a) @@ -2267,22 +2304,17 @@ def slogdet(a): Notes ----- - - .. versionadded:: 1.8.0 - Broadcasting rules apply, see the `numpy.linalg` documentation for details. - .. versionadded:: 1.6.0 - The determinant is computed via LU factorization using the LAPACK routine ``z/dgetrf``. - Examples -------- The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``: + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> (sign, logabsdet) = np.linalg.slogdet(a) >>> (sign, logabsdet) @@ -2310,7 +2342,6 @@ def slogdet(a): """ a = asarray(a) - _assert_stacked_2d(a) _assert_stacked_square(a) t, result_t = _commonType(a) real_t = _realType(result_t) @@ -2344,9 +2375,6 @@ def det(a): Notes ----- - - .. versionadded:: 1.8.0 - Broadcasting rules apply, see the `numpy.linalg` documentation for details. @@ -2357,6 +2385,7 @@ def det(a): -------- The determinant of a 2-D array [[a, b], [c, d]] is ad - bc: + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> np.linalg.det(a) -2.0 # may vary @@ -2371,7 +2400,6 @@ def det(a): """ a = asarray(a) - _assert_stacked_2d(a) _assert_stacked_square(a) t, result_t = _commonType(a) signature = 'D->D' if isComplexType(t) else 'd->d' @@ -2453,6 +2481,7 @@ def lstsq(a, b, rcond=None): -------- Fit a line, ``y = mx + c``, through some noisy data-points: + >>> import numpy as np >>> x = np.array([0, 1, 2, 3]) >>> y = np.array([-1, 0.2, 0.9, 2.1]) @@ -2499,11 +2528,6 @@ def lstsq(a, b, rcond=None): if rcond is None: rcond = finfo(t).eps * max(n, m) - if m <= n: - gufunc = _umath_linalg.lstsq_m - else: - gufunc = _umath_linalg.lstsq_n - signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid' if n_rhs == 0: # lapack can't handle n_rhs = 0 - so allocate @@ -2512,7 +2536,8 @@ def lstsq(a, b, rcond=None): with errstate(call=_raise_linalgerror_lstsq, invalid='call', over='ignore', divide='ignore', under='ignore'): - x, resids, rank, s = gufunc(a, b, rcond, signature=signature) + x, resids, rank, s = _umath_linalg.lstsq(a, b, rcond, + signature=signature) if m == 0: x[...] = 0 if n_rhs == 0: @@ -2538,7 +2563,7 @@ def lstsq(a, b, rcond=None): return wrap(x), wrap(resids), rank, s -def _multi_svd_norm(x, row_axis, col_axis, op): +def _multi_svd_norm(x, row_axis, col_axis, op, initial=None): """Compute a function of the singular values of the 2-D matrices in `x`. This is a private utility function used by `numpy.linalg.norm()`. @@ -2562,7 +2587,7 @@ def _multi_svd_norm(x, row_axis, col_axis, op): """ y = moveaxis(x, (row_axis, col_axis), (-2, -1)) - result = op(svd(y, compute_uv=False), axis=-1) + result = op(svd(y, compute_uv=False), axis=-1, initial=initial) return result @@ -2585,8 +2610,9 @@ def norm(x, ord=None, axis=None, keepdims=False): Input array. If `axis` is None, `x` must be 1-D or 2-D, unless `ord` is None. If both `axis` and `ord` are None, the 2-norm of ``x.ravel`` will be returned. - ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional - Order of the norm (see table under ``Notes``). inf means numpy's + ord : {int, float, inf, -inf, 'fro', 'nuc'}, optional + Order of the norm (see table under ``Notes`` for what values are + supported for matrices and vectors respectively). inf means numpy's `inf` object. The default is None. axis : {None, int, 2-tuple of ints}, optional. If `axis` is an integer, it specifies the axis of `x` along which to @@ -2596,15 +2622,11 @@ def norm(x, ord=None, axis=None, keepdims=False): is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default is None. - .. versionadded:: 1.8.0 - keepdims : bool, optional If this is set to True, the axes which are normed over are left in the result as dimensions with size one. With this option the result will broadcast correctly against the original `x`. - .. versionadded:: 1.10.0 - Returns ------- n : float or ndarray @@ -2654,6 +2676,8 @@ def norm(x, ord=None, axis=None, keepdims=False): Examples -------- + + >>> import numpy as np >>> from numpy import linalg as LA >>> a = np.arange(9) - 4 >>> a @@ -2743,7 +2767,7 @@ def norm(x, ord=None, axis=None, keepdims=False): sqnorm = x.dot(x) ret = sqrt(sqnorm) if keepdims: - ret = ret.reshape(ndim*[1]) + ret = ret.reshape(ndim * [1]) return ret # Normalize the `axis` argument to a tuple. @@ -2761,7 +2785,7 @@ def norm(x, ord=None, axis=None, keepdims=False): if len(axis) == 1: if ord == inf: - return abs(x).max(axis=axis, keepdims=keepdims) + return abs(x).max(axis=axis, keepdims=keepdims, initial=0) elif ord == -inf: return abs(x).min(axis=axis, keepdims=keepdims) elif ord == 0: @@ -2795,17 +2819,17 @@ def norm(x, ord=None, axis=None, keepdims=False): if row_axis == col_axis: raise ValueError('Duplicate axes given.') if ord == 2: - ret = _multi_svd_norm(x, row_axis, col_axis, amax) + ret = _multi_svd_norm(x, row_axis, col_axis, amax, 0) elif ord == -2: ret = _multi_svd_norm(x, row_axis, col_axis, amin) elif ord == 1: if col_axis > row_axis: col_axis -= 1 - ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis) + ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis, initial=0) elif ord == inf: if row_axis > col_axis: row_axis -= 1 - ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis) + ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis, initial=0) elif ord == -1: if col_axis > row_axis: col_axis -= 1 @@ -2817,7 +2841,7 @@ def norm(x, ord=None, axis=None, keepdims=False): elif ord in [None, 'fro', 'f']: ret = sqrt(add.reduce((x.conj() * x).real, axis=axis)) elif ord == 'nuc': - ret = _multi_svd_norm(x, row_axis, col_axis, sum) + ret = _multi_svd_norm(x, row_axis, col_axis, sum, 0) else: raise ValueError("Invalid norm order for matrices.") if keepdims: @@ -2870,8 +2894,6 @@ def multi_dot(arrays): return functools.reduce(np.dot, arrays) conditions are not met, an exception is raised, instead of attempting to be flexible. - .. versionadded:: 1.19.0 - Returns ------- output : ndarray @@ -2891,6 +2913,7 @@ def multi_dot(arrays): return functools.reduce(np.dot, arrays) -------- `multi_dot` allows you to write:: + >>> import numpy as np >>> from numpy.linalg import multi_dot >>> # Prepare some data >>> A = np.random.random((10000, 100)) @@ -2915,7 +2938,7 @@ def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1] Assume we have three matrices - :math:`A_{10x100}, B_{100x5}, C_{5x50}`. + :math:`A_{10 \\times 100}, B_{100 \\times 5}, C_{5 \\times 50}`. The costs for the two different parenthesizations are as follows:: @@ -2981,7 +3004,7 @@ def _multi_dot_three(A, B, C, out=None): def _multi_dot_matrix_chain_order(arrays, return_costs=False): """ - Return a np.array that encodes the optimal order of mutiplications. + Return a np.array that encodes the optimal order of multiplications. The optimal order array is then used by `_multi_dot()` to do the multiplication. @@ -3012,7 +3035,7 @@ def _multi_dot_matrix_chain_order(arrays, return_costs=False): j = i + l m[i, j] = inf for k in range(i, j): - q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1] + q = m[i, k] + m[k + 1, j] + p[i] * p[k + 1] * p[j + 1] if q < m[i, j]: m[i, j] = q s[i, j] = k # Note that Cormen uses 1-based index @@ -3073,6 +3096,58 @@ def diagonal(x, /, *, offset=0): -------- numpy.diagonal + Examples + -------- + >>> a = np.arange(4).reshape(2, 2); a + array([[0, 1], + [2, 3]]) + >>> np.linalg.diagonal(a) + array([0, 3]) + + A 3-D example: + + >>> a = np.arange(8).reshape(2, 2, 2); a + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.linalg.diagonal(a) + array([[0, 3], + [4, 7]]) + + Diagonals adjacent to the main diagonal can be obtained by using the + `offset` argument: + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.linalg.diagonal(a, offset=1) # First superdiagonal + array([1, 5]) + >>> np.linalg.diagonal(a, offset=2) # Second superdiagonal + array([2]) + >>> np.linalg.diagonal(a, offset=-1) # First subdiagonal + array([3, 7]) + >>> np.linalg.diagonal(a, offset=-2) # Second subdiagonal + array([6]) + + The anti-diagonal can be obtained by reversing the order of elements + using either `numpy.flipud` or `numpy.fliplr`. + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.linalg.diagonal(np.fliplr(a)) # Horizontal flip + array([2, 4, 6]) + >>> np.linalg.diagonal(np.flipud(a)) # Vertical flip + array([6, 4, 2]) + + Note that the order in which the diagonal is retrieved varies depending + on the flip function. + """ return _core_diagonal(x, offset, axis1=-2, axis2=-1) @@ -3126,6 +3201,38 @@ def trace(x, /, *, offset=0, dtype=None): -------- numpy.trace + Examples + -------- + >>> np.linalg.trace(np.eye(3)) + 3.0 + >>> a = np.arange(8).reshape((2, 2, 2)) + >>> np.linalg.trace(a) + array([3, 11]) + + Trace is computed with the last two axes as the 2-d sub-arrays. + This behavior differs from :py:func:`numpy.trace` which uses the first two + axes by default. + + >>> a = np.arange(24).reshape((3, 2, 2, 2)) + >>> np.linalg.trace(a).shape + (3, 2) + + Traces adjacent to the main diagonal can be obtained by using the + `offset` argument: + + >>> a = np.arange(9).reshape((3, 3)); a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.linalg.trace(a, offset=1) # First superdiagonal + 6 + >>> np.linalg.trace(a, offset=2) # Second superdiagonal + 2 + >>> np.linalg.trace(a, offset=-1) # First subdiagonal + 10 + >>> np.linalg.trace(a, offset=-2) # Second subdiagonal + 6 + """ return _core_trace(x, offset, axis1=-2, axis2=-1, dtype=dtype) @@ -3170,14 +3277,34 @@ def cross(x1, x2, /, *, axis=-1): -------- numpy.cross - """ - if x1.shape[axis] != 3 or x2.shape[axis] != 3: - raise ValueError( - "Both input arrays must be (arrays of) 3-dimensional vectors, " - f"but they are {x1.shape[axis]} and {x2.shape[axis]} " - "dimensional instead." - ) + Examples + -------- + Vector cross-product. + + >>> x = np.array([1, 2, 3]) + >>> y = np.array([4, 5, 6]) + >>> np.linalg.cross(x, y) + array([-3, 6, -3]) + + Multiple vector cross-products. Note that the direction of the cross + product vector is defined by the *right-hand rule*. + >>> x = np.array([[1,2,3], [4,5,6]]) + >>> y = np.array([[4,5,6], [1,2,3]]) + >>> np.linalg.cross(x, y) + array([[-3, 6, -3], + [ 3, -6, 3]]) + + >>> x = np.array([[1, 2], [3, 4], [5, 6]]) + >>> y = np.array([[4, 5], [6, 1], [2, 3]]) + >>> np.linalg.cross(x, y, axis=0) + array([[-24, 6], + [ 18, 24], + [-6, -18]]) + + """ + x1 = asanyarray(x1) + x2 = asanyarray(x2) return _core_cross(x1, x2, axis=axis) @@ -3220,6 +3347,53 @@ def matmul(x1, x2, /): -------- numpy.matmul + Examples + -------- + For 2-D arrays it is the matrix product: + + >>> a = np.array([[1, 0], + ... [0, 1]]) + >>> b = np.array([[4, 1], + ... [2, 2]]) + >>> np.linalg.matmul(a, b) + array([[4, 1], + [2, 2]]) + + For 2-D mixed with 1-D, the result is the usual. + + >>> a = np.array([[1, 0], + ... [0, 1]]) + >>> b = np.array([1, 2]) + >>> np.linalg.matmul(a, b) + array([1, 2]) + >>> np.linalg.matmul(b, a) + array([1, 2]) + + + Broadcasting is conventional for stacks of arrays + + >>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4)) + >>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2)) + >>> np.linalg.matmul(a,b).shape + (2, 2, 2) + >>> np.linalg.matmul(a, b)[0, 1, 1] + 98 + >>> sum(a[0, 1, :] * b[0 , :, 1]) + 98 + + Vector, vector returns the scalar inner product, but neither argument + is complex-conjugated: + + >>> np.linalg.matmul([2j, 3j], [2j, 3j]) + (-13+0j) + + Scalar multiplication raises an error. + + >>> np.linalg.matmul([1,2], 3) + Traceback (most recent call last): + ... + ValueError: matmul: Input operand 1 does not have enough dimensions ... + """ return _core_matmul(x1, x2) @@ -3248,7 +3422,12 @@ def matrix_transpose(x, /): return _core_matrix_transpose(x) -matrix_transpose.__doc__ = _core_matrix_transpose.__doc__ +matrix_transpose.__doc__ = f"""{_core_matrix_transpose.__doc__} + + Notes + ----- + This function is an alias of `numpy.matrix_transpose`. +""" # matrix_norm @@ -3279,6 +3458,36 @@ def matrix_norm(x, /, *, keepdims=False, ord="fro"): -------- numpy.linalg.norm : Generic norm function + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.arange(9) - 4 + >>> a + array([-4, -3, -2, ..., 2, 3, 4]) + >>> b = a.reshape((3, 3)) + >>> b + array([[-4, -3, -2], + [-1, 0, 1], + [ 2, 3, 4]]) + + >>> LA.matrix_norm(b) + 7.745966692414834 + >>> LA.matrix_norm(b, ord='fro') + 7.745966692414834 + >>> LA.matrix_norm(b, ord=np.inf) + 9.0 + >>> LA.matrix_norm(b, ord=-np.inf) + 2.0 + + >>> LA.matrix_norm(b, ord=1) + 7.0 + >>> LA.matrix_norm(b, ord=-1) + 6.0 + >>> LA.matrix_norm(b, ord=2) + 7.3484692283495345 + >>> LA.matrix_norm(b, ord=-2) + 1.8570331885190563e-016 # may vary + """ x = asanyarray(x) return norm(x, axis=(-2, -1), keepdims=keepdims, ord=ord) @@ -3310,7 +3519,7 @@ def vector_norm(x, /, *, axis=None, keepdims=False, ord=2): keepdims : bool, optional If this is set to True, the axes which are normed over are left in the result as dimensions with size one. Default: False. - ord : {1, -1, 2, -2, inf, -inf, 'fro', 'nuc'}, optional + ord : {int, float, inf, -inf}, optional The order of the norm. For details see the table under ``Notes`` in `numpy.linalg.norm`. @@ -3318,6 +3527,36 @@ def vector_norm(x, /, *, axis=None, keepdims=False, ord=2): -------- numpy.linalg.norm : Generic norm function + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.arange(9) + 1 + >>> a + array([1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> b = a.reshape((3, 3)) + >>> b + array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + >>> LA.vector_norm(b) + 16.881943016134134 + >>> LA.vector_norm(b, ord=np.inf) + 9.0 + >>> LA.vector_norm(b, ord=-np.inf) + 1.0 + + >>> LA.vector_norm(b, ord=0) + 9.0 + >>> LA.vector_norm(b, ord=1) + 45.0 + >>> LA.vector_norm(b, ord=-1) + 0.3534857623790153 + >>> LA.vector_norm(b, ord=2) + 16.881943016134134 + >>> LA.vector_norm(b, ord=-2) + 0.8058837395885292 + """ x = asanyarray(x) shape = list(x.shape) @@ -3397,5 +3636,14 @@ def vecdot(x1, x2, /, *, axis=-1): -------- numpy.vecdot + Examples + -------- + Get the projected size along a given normal for an array of vectors. + + >>> v = np.array([[0., 5., 0.], [0., 0., 10.], [0., 6., 8.]]) + >>> n = np.array([0., 0.6, 0.8]) + >>> np.linalg.vecdot(v, n) + array([ 3., 8., 10.]) + """ return _core_vecdot(x1, x2, axis=axis) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index e9f00e226a94..8eb3e57cf1e2 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -1,426 +1,1224 @@ -from collections.abc import Iterable +from collections.abc import Iterable, Sequence from typing import ( - Literal as L, - overload, - TypeVar, Any, - SupportsIndex, - SupportsInt, - NamedTuple, Generic, + Literal as L, + NamedTuple, + Never, + Protocol, + SupportsIndex, + overload, + type_check_only, ) +from typing_extensions import TypeVar import numpy as np -from numpy import ( - generic, - floating, - complexfloating, - signedinteger, - unsignedinteger, - timedelta64, - object_, - int32, - float64, - complex128, -) - -from numpy.linalg import LinAlgError as LinAlgError - +from numpy import vecdot +from numpy._core.fromnumeric import matrix_transpose +from numpy._globals import _NoValue, _NoValueType from numpy._typing import ( - NDArray, ArrayLike, - _ArrayLikeUnknown, + DTypeLike, + NDArray, + _AnyShape, + _ArrayLike, _ArrayLikeBool_co, - _ArrayLikeInt_co, - _ArrayLikeUInt_co, - _ArrayLikeFloat_co, _ArrayLikeComplex_co, - _ArrayLikeTD64_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, _ArrayLikeObject_co, - DTypeLike, + _ArrayLikeTD64_co, + _DTypeLike, + _NestedSequence, + _Shape, + _ShapeLike, ) +from numpy.linalg import LinAlgError -_T = TypeVar("_T") -_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) -_SCT = TypeVar("_SCT", bound=generic, covariant=True) -_SCT2 = TypeVar("_SCT2", bound=generic, covariant=True) +__all__ = [ + "matrix_power", + "solve", + "tensorsolve", + "tensorinv", + "inv", + "cholesky", + "eigvals", + "eigvalsh", + "pinv", + "slogdet", + "det", + "svd", + "svdvals", + "eig", + "eigh", + "lstsq", + "norm", + "qr", + "cond", + "matrix_rank", + "LinAlgError", + "multi_dot", + "trace", + "diagonal", + "cross", + "outer", + "tensordot", + "matmul", + "matrix_transpose", + "matrix_norm", + "vector_norm", + "vecdot", +] -_2Tuple = tuple[_T, _T] -_ModeKind = L["reduced", "complete", "r", "raw"] +type _AtMost1D = tuple[()] | tuple[int] +type _AtLeast1D = tuple[int, *tuple[int, ...]] +type _AtLeast2D = tuple[int, int, *tuple[int, ...]] +type _AtLeast3D = tuple[int, int, int, *tuple[int, ...]] +type _AtLeast4D = tuple[int, int, int, int, *tuple[int, ...]] +type _JustAnyShape = tuple[Never, ...] # workaround for microsoft/pyright#10232 -__all__: list[str] +type _tuple2[T] = tuple[T, T] +type _Ax2 = SupportsIndex | _tuple2[SupportsIndex] -class EigResult(NamedTuple): - eigenvalues: NDArray[Any] - eigenvectors: NDArray[Any] +type _inexact32 = np.float32 | np.complex64 +type _inexact80 = np.longdouble | np.clongdouble +type _to_integer = np.integer | np.bool +type _to_timedelta64 = np.timedelta64 | _to_integer +type _to_float64 = np.float64 | _to_integer +type _to_inexact64 = np.complex128 | _to_float64 +type _to_inexact64_unsafe = _to_inexact64 | np.datetime64 | np.timedelta64 | np.character +type _to_complex = np.number | np.bool +type _to_float64_co = np.float64 | np.float32 | np.float16 | _to_integer +type _to_complex128_co = np.complex128 | np.complex64 | _to_float64_co -class EighResult(NamedTuple): - eigenvalues: NDArray[Any] - eigenvectors: NDArray[Any] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _Array3ND[ScalarT: np.generic] = np.ndarray[_AtLeast3D, np.dtype[ScalarT]] -class QRResult(NamedTuple): - Q: NDArray[Any] - R: NDArray[Any] +type _Sequence2D[T] = Sequence[Sequence[T]] +type _Sequence3D[T] = Sequence[_Sequence2D[T]] +type _Sequence2ND[T] = _NestedSequence[Sequence[T]] +type _Sequence3ND[T] = _NestedSequence[_Sequence2D[T]] +type _Sequence4ND[T] = _NestedSequence[_Sequence3D[T]] +type _Sequence0D1D[T] = T | Sequence[T] +type _Sequence1D2D[T] = Sequence[T] | _Sequence2D[T] -class SlogdetResult(NamedTuple): - # TODO: `sign` and `logabsdet` are scalars for input 2D arrays and - # a `(x.ndim - 2)`` dimensionl arrays otherwise - sign: Any - logabsdet: Any +type _ArrayLike1D[ScalarT: np.generic] = _SupportsArray[tuple[int], np.dtype[ScalarT]] | Sequence[ScalarT] # ==1d +type _ArrayLike2D[ScalarT: np.generic] = _SupportsArray[tuple[int, int], np.dtype[ScalarT]] | _Sequence2D[ScalarT] # ==2d +type _ArrayLike1D2D[ScalarT: np.generic] = ( # 1d or 2d + _SupportsArray[tuple[int] | tuple[int, int], np.dtype[ScalarT]] | _Sequence1D2D[ScalarT] +) +type _ArrayLike3D[ScalarT: np.generic] = _SupportsArray[tuple[int, int, int], np.dtype[ScalarT]] | _Sequence3D[ScalarT] # ==3d +type _ArrayLike1ND[ScalarT: np.generic] = _SupportsArray[_AtLeast1D, np.dtype[ScalarT]] | _NestedSequence[ScalarT] # >=1d +type _ArrayLike2ND[ScalarT: np.generic] = _SupportsArray[_AtLeast2D, np.dtype[ScalarT]] | _Sequence2ND[ScalarT] # >=2d +type _ArrayLike3ND[ScalarT: np.generic] = _SupportsArray[_AtLeast3D, np.dtype[ScalarT]] | _Sequence3ND[ScalarT] # >=3d +type _ArrayLike4ND[ScalarT: np.generic] = _SupportsArray[_AtLeast4D, np.dtype[ScalarT]] | _Sequence4ND[ScalarT] # >=3d -class SVDResult(NamedTuple): - U: NDArray[Any] - S: NDArray[Any] - Vh: NDArray[Any] +# safe-castable array-likes +type _ToArrayBool_1d = _ArrayLike1D[np.bool_] | Sequence[bool] +type _ToArrayBool_1nd = _ArrayLike1ND[np.bool_] | _NestedSequence[bool] +type _ToArrayBool_2nd = _ArrayLike2ND[np.bool_] | _Sequence2ND[bool] +type _ToArrayInt_1d = _ArrayLike1D[_to_integer] | Sequence[int] +type _ToArrayInt_1nd = _ArrayLike1ND[_to_integer] | _NestedSequence[int] +type _ToArrayInt_2nd = _ArrayLike2ND[_to_integer] | _Sequence2ND[int] +type _ToArrayF64 = _ArrayLike[_to_float64] | _NestedSequence[float] +type _ToArrayF64_1d = _ArrayLike1D[_to_float64_co] | Sequence[float] +type _ToArrayF64_1nd = _ArrayLike1ND[_to_float64_co] | _NestedSequence[float] +type _ToArrayF64_2nd = _ArrayLike2ND[_to_float64_co] | _Sequence2ND[float] +type _ToArrayC128 = _ArrayLike[_to_inexact64] | _NestedSequence[complex] +type _ToArrayC128_1d = _ArrayLike1D[_to_complex128_co] | Sequence[complex] +type _ToArrayC128_1nd = _ArrayLike1ND[_to_complex128_co] | _NestedSequence[complex] +type _ToArrayC128_2nd = _ArrayLike2ND[_to_complex128_co] | _Sequence2ND[complex] +type _ToArrayComplex_1d = _ArrayLike1D[_to_complex] | Sequence[complex] +type _ToArrayComplex_2d = _ArrayLike2D[_to_complex] | _Sequence2D[complex] +type _ToArrayComplex_3d = _ArrayLike3D[_to_complex] | _Sequence3D[complex] +type _ToArrayComplex_1nd = _ArrayLike1ND[_to_complex] | _NestedSequence[complex] +type _ToArrayComplex_2nd = _ArrayLike2ND[_to_complex] | _Sequence2ND[complex] +# the invariant `list` type avoids overlap with bool, int, etc +type _AsArrayI64 = _ArrayLike[np.int64] | list[int] | _NestedSequence[list[int]] +type _AsArrayI64_1d = _ArrayLike1D[np.int64] | list[int] +type _AsArrayI64_1nd = _ArrayLike1ND[np.int64] | list[int] | _NestedSequence[list[int]] +type _AsArrayI64_2nd = _ArrayLike2ND[np.int64] | _NestedSequence[list[int]] +type _AsArrayF64 = _ArrayLike[np.float64] | list[float] | _NestedSequence[list[float]] +type _AsArrayF64_1d = _ArrayLike1D[np.float64] | list[float] +type _AsArrayF64_1nd = _ArrayLike1ND[np.float64] | list[float] | _NestedSequence[list[float]] +type _AsArrayF64_2nd = _ArrayLike2ND[np.float64] | _NestedSequence[list[float]] +type _AsArrayC128 = _ArrayLike[np.complex128] | list[complex] | _NestedSequence[list[complex]] +type _AsArrayC128_1d = _ArrayLike1D[np.complex128] | list[complex] +type _AsArrayC128_2d = _ArrayLike2D[np.complex128] | Sequence[list[complex]] +type _AsArrayC128_1nd = _ArrayLike1ND[np.complex128] | list[complex] | _NestedSequence[list[complex]] +type _AsArrayC128_2nd = _ArrayLike2ND[np.complex128] | _NestedSequence[list[complex]] +type _AsArrayC128_3nd = _ArrayLike3ND[np.complex128] | _Sequence2ND[list[complex]] -@overload +type _OrderKind = L[1, -1, 2, -2, "fro", "nuc"] | float # only accepts `-inf` and `inf` as `float` +type _SideKind = L["L", "U", "l", "u"] +type _NonNegInt = L[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] +type _NegInt = L[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16] + +type _LstSqResult[ShapeT: _Shape, InexactT: np.inexact, FloatingT: np.floating] = tuple[ + np.ndarray[ShapeT, np.dtype[InexactT]], # least-squares solution + _Array1D[FloatingT], # residuals + np.int32, # rank + _Array1D[FloatingT], # singular values +] + +_FloatingT_co = TypeVar("_FloatingT_co", bound=np.floating, default=Any, covariant=True) +_FloatingOrArrayT_co = TypeVar("_FloatingOrArrayT_co", bound=np.floating | NDArray[np.floating], default=Any, covariant=True) +_InexactT_co = TypeVar("_InexactT_co", bound=np.inexact, default=Any, covariant=True) +_InexactOrArrayT_co = TypeVar("_InexactOrArrayT_co", bound=np.inexact | NDArray[np.inexact], default=Any, covariant=True) + +# shape-typed variant of numpy._typing._SupportsArray +@type_check_only +class _SupportsArray[ShapeT: _Shape, DTypeT: np.dtype](Protocol): + def __array__(self, /) -> np.ndarray[ShapeT, DTypeT]: ... + +### + +fortran_int = np.intc + +# NOTE: These named tuple types are only generic when `typing.TYPE_CHECKING` + +class EigResult(NamedTuple, Generic[_InexactT_co]): + eigenvalues: NDArray[_InexactT_co] + eigenvectors: NDArray[_InexactT_co] + +class EighResult(NamedTuple, Generic[_FloatingT_co, _InexactT_co]): + eigenvalues: NDArray[_FloatingT_co] + eigenvectors: NDArray[_InexactT_co] + +class QRResult(NamedTuple, Generic[_InexactT_co]): + Q: NDArray[_InexactT_co] + R: NDArray[_InexactT_co] + +class SVDResult(NamedTuple, Generic[_FloatingT_co, _InexactT_co]): + U: NDArray[_InexactT_co] + S: NDArray[_FloatingT_co] + Vh: NDArray[_InexactT_co] + +class SlogdetResult(NamedTuple, Generic[_FloatingOrArrayT_co, _InexactOrArrayT_co]): + sign: _FloatingOrArrayT_co + logabsdet: _InexactOrArrayT_co + +# keep in sync with `solve` +@overload # ~float64, +float64 +def tensorsolve(a: _ToArrayF64, b: _ArrayLikeFloat_co, axes: Iterable[int] | None = None) -> NDArray[np.float64]: ... +@overload # +float64, ~float64 +def tensorsolve(a: _ArrayLikeFloat_co, b: _ToArrayF64, axes: Iterable[int] | None = None) -> NDArray[np.float64]: ... +@overload # ~float32, ~float32 def tensorsolve( - a: _ArrayLikeInt_co, - b: _ArrayLikeInt_co, - axes: None | Iterable[int] =..., -) -> NDArray[float64]: ... -@overload + a: _ArrayLike[np.float32], b: _ArrayLike[np.float32], axes: Iterable[int] | None = None +) -> NDArray[np.float32]: ... +@overload # +float, +float +def tensorsolve(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, axes: Iterable[int] | None = None) -> NDArray[np.float64 | Any]: ... +@overload # ~complex128, +complex128 +def tensorsolve(a: _AsArrayC128, b: _ArrayLikeComplex_co, axes: Iterable[int] | None = None) -> NDArray[np.complex128]: ... +@overload # +complex128, ~complex128 +def tensorsolve(a: _ArrayLikeComplex_co, b: _AsArrayC128, axes: Iterable[int] | None = None) -> NDArray[np.complex128]: ... +@overload # ~complex64, +complex64 def tensorsolve( - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, - axes: None | Iterable[int] =..., -) -> NDArray[floating[Any]]: ... -@overload + a: _ArrayLike[np.complex64], b: _ArrayLike[_inexact32], axes: Iterable[int] | None = None +) -> NDArray[np.complex64]: ... +@overload # +complex64, ~complex64 def tensorsolve( - a: _ArrayLikeComplex_co, - b: _ArrayLikeComplex_co, - axes: None | Iterable[int] =..., -) -> NDArray[complexfloating[Any, Any]]: ... + a: _ArrayLike[_inexact32], b: _ArrayLike[np.complex64], axes: Iterable[int] | None = None +) -> NDArray[np.complex64]: ... +@overload # +complex, +complex +def tensorsolve( + a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, axes: Iterable[int] | None = None +) -> NDArray[np.complex128 | Any]: ... -@overload -def solve( - a: _ArrayLikeInt_co, - b: _ArrayLikeInt_co, -) -> NDArray[float64]: ... -@overload -def solve( - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, -) -> NDArray[floating[Any]]: ... -@overload -def solve( - a: _ArrayLikeComplex_co, - b: _ArrayLikeComplex_co, -) -> NDArray[complexfloating[Any, Any]]: ... +# keep in sync with `tensorsolve` +@overload # ~float64, +float64 +def solve(a: _ToArrayF64, b: _ArrayLikeFloat_co) -> NDArray[np.float64]: ... +@overload # +float64, ~float64 +def solve(a: _ArrayLikeFloat_co, b: _ToArrayF64) -> NDArray[np.float64]: ... +@overload # ~float32, ~float32 +def solve(a: _ArrayLike[np.float32], b: _ArrayLike[np.float32]) -> NDArray[np.float32]: ... +@overload # +float, +float +def solve(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[np.float64 | Any]: ... +@overload # ~complex128, +complex128 +def solve(a: _AsArrayC128, b: _ArrayLikeComplex_co) -> NDArray[np.complex128]: ... +@overload # +complex128, ~complex128 +def solve(a: _ArrayLikeComplex_co, b: _AsArrayC128) -> NDArray[np.complex128]: ... +@overload # ~complex64, +complex64 +def solve(a: _ArrayLike[np.complex64], b: _ArrayLike[_inexact32]) -> NDArray[np.complex64]: ... +@overload # +complex64, ~complex64 +def solve(a: _ArrayLike[_inexact32], b: _ArrayLike[np.complex64]) -> NDArray[np.complex64]: ... +@overload # +complex, +complex +def solve(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[np.complex128 | Any]: ... -@overload -def tensorinv( - a: _ArrayLikeInt_co, - ind: int = ..., -) -> NDArray[float64]: ... -@overload -def tensorinv( - a: _ArrayLikeFloat_co, - ind: int = ..., -) -> NDArray[floating[Any]]: ... -@overload -def tensorinv( - a: _ArrayLikeComplex_co, - ind: int = ..., -) -> NDArray[complexfloating[Any, Any]]: ... +# keep in sync with the other inverse functions and cholesky +@overload # inexact32 +def tensorinv[ScalarT: _inexact32](a: _ArrayLike[ScalarT], ind: int = 2) -> NDArray[ScalarT]: ... +@overload # +float64 +def tensorinv(a: _ToArrayF64, ind: int = 2) -> NDArray[np.float64]: ... +@overload # ~complex128 +def tensorinv(a: _AsArrayC128, ind: int = 2) -> NDArray[np.complex128]: ... +@overload # fallback +def tensorinv(a: _ArrayLikeComplex_co, ind: int = 2) -> np.ndarray: ... -@overload -def inv(a: _ArrayLikeInt_co) -> NDArray[float64]: ... -@overload -def inv(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... -@overload -def inv(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... +# keep in sync with the other inverse functions and cholesky +@overload # inexact32 +def inv[ScalarT: _inexact32](a: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... +@overload # +float64 +def inv(a: _ToArrayF64) -> NDArray[np.float64]: ... +@overload # ~complex128 +def inv(a: _AsArrayC128) -> NDArray[np.complex128]: ... +@overload # fallback +def inv(a: _ArrayLikeComplex_co) -> np.ndarray: ... -# TODO: The supported input and output dtypes are dependent on the value of `n`. -# For example: `n < 0` always casts integer types to float64 -def matrix_power( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - n: SupportsIndex, +# keep in sync with the other inverse functions and cholesky +@overload # inexact32 +def pinv[ScalarT: _inexact32]( + a: _ArrayLike[ScalarT], + rcond: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = _NoValue, +) -> NDArray[ScalarT]: ... +@overload # +float64 +def pinv( + a: _ToArrayF64, + rcond: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = _NoValue, +) -> NDArray[np.float64]: ... +@overload # ~complex128 +def pinv( + a: _AsArrayC128, + rcond: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = _NoValue, +) -> NDArray[np.complex128]: ... +@overload # fallback +def pinv( + a: _ArrayLikeComplex_co, + rcond: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = _NoValue, ) -> NDArray[Any]: ... -@overload -def cholesky(a: _ArrayLikeInt_co) -> NDArray[float64]: ... -@overload -def cholesky(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... -@overload -def cholesky(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... +# keep in sync with the inverse functions +@overload # inexact32 +def cholesky[ScalarT: _inexact32](a: _ArrayLike[ScalarT], /, *, upper: bool = False) -> NDArray[ScalarT]: ... +@overload # +float64 +def cholesky(a: _ToArrayF64, /, *, upper: bool = False) -> NDArray[np.float64]: ... +@overload # ~complex128 +def cholesky(a: _AsArrayC128, /, *, upper: bool = False) -> NDArray[np.complex128]: ... +@overload # fallback +def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> np.ndarray: ... -@overload -def outer(x1: _ArrayLikeUnknown, x2: _ArrayLikeUnknown) -> NDArray[Any]: ... -@overload -def outer(x1: _ArrayLikeBool_co, x2: _ArrayLikeBool_co) -> NDArray[np.bool]: ... -@overload -def outer(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... -@overload -def outer(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... -@overload -def outer(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... -@overload -def outer( - x1: _ArrayLikeComplex_co, - x2: _ArrayLikeComplex_co, -) -> NDArray[complexfloating[Any, Any]]: ... -@overload -def outer( - x1: _ArrayLikeTD64_co, - x2: _ArrayLikeTD64_co, - out: None = ..., -) -> NDArray[timedelta64]: ... -@overload -def outer(x1: _ArrayLikeObject_co, x2: _ArrayLikeObject_co) -> NDArray[object_]: ... -@overload -def outer( - x1: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - x2: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, -) -> _ArrayType: ... +# NOTE: Technically this also accepts boolean array-likes, but that case is not very useful, so we skip it. +# If you have a use case for it, please open an issue. +@overload # +int, n â‰Ĩ 0 +def matrix_power(a: _NestedSequence[int], n: _NonNegInt) -> NDArray[np.int_]: ... +@overload # +integer | ~object, n â‰Ĩ 0 +def matrix_power[ScalarT: np.integer | np.object_](a: _ArrayLike[ScalarT], n: _NonNegInt) -> NDArray[ScalarT]: ... +@overload # +float64, n < 0 +def matrix_power(a: _ToArrayF64, n: _NegInt) -> NDArray[np.float64]: ... +@overload # ~float64 +def matrix_power(a: _AsArrayF64, n: SupportsIndex) -> NDArray[np.float64]: ... +@overload # ~complex128 +def matrix_power(a: _AsArrayC128, n: SupportsIndex) -> NDArray[np.complex128]: ... +@overload # ~inexact32 +def matrix_power[ScalarT: _inexact32](a: _ArrayLike[ScalarT], n: SupportsIndex) -> NDArray[ScalarT]: ... +@overload # fallback +def matrix_power(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, n: SupportsIndex) -> np.ndarray: ... -@overload -def qr(a: _ArrayLikeInt_co, mode: _ModeKind = ...) -> QRResult: ... -@overload -def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = ...) -> QRResult: ... -@overload -def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = ...) -> QRResult: ... - -@overload -def eigvals(a: _ArrayLikeInt_co) -> NDArray[float64] | NDArray[complex128]: ... -@overload -def eigvals(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]] | NDArray[complexfloating[Any, Any]]: ... -@overload -def eigvals(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... - -@overload -def eigvalsh(a: _ArrayLikeInt_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[float64]: ... -@overload -def eigvalsh(a: _ArrayLikeComplex_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[floating[Any]]: ... - -@overload -def eig(a: _ArrayLikeInt_co) -> EigResult: ... -@overload -def eig(a: _ArrayLikeFloat_co) -> EigResult: ... -@overload +# NOTE: for real input the output dtype (floating/complexfloating) depends on the specific values +@overload # abstract `inexact` and `floating` (excluding concrete types) +def eig(a: NDArray[np.inexact[Never]]) -> EigResult: ... +@overload # ~complex128 +def eig(a: _AsArrayC128) -> EigResult[np.complex128]: ... +@overload # +float64 +def eig(a: _ToArrayF64) -> EigResult[np.complex128]: ... +@overload # ~complex64 +def eig(a: _ArrayLike[np.complex64]) -> EigResult[np.complex64]: ... +@overload # ~float32 +def eig(a: _ArrayLike[np.float32]) -> EigResult[np.complex64]: ... +@overload # fallback def eig(a: _ArrayLikeComplex_co) -> EigResult: ... -@overload -def eigh( - a: _ArrayLikeInt_co, - UPLO: L["L", "U", "l", "u"] = ..., -) -> EighResult: ... -@overload -def eigh( - a: _ArrayLikeFloat_co, - UPLO: L["L", "U", "l", "u"] = ..., -) -> EighResult: ... -@overload -def eigh( - a: _ArrayLikeComplex_co, - UPLO: L["L", "U", "l", "u"] = ..., -) -> EighResult: ... +# +@overload # workaround for microsoft/pyright#10232 +def eigh(a: NDArray[Never], UPLO: _SideKind = "L") -> EighResult: ... +@overload # ~inexact32 +def eigh[ScalarT: _inexact32](a: _ArrayLike[ScalarT], UPLO: _SideKind = "L") -> EighResult[np.float32, ScalarT]: ... +@overload # +float64 +def eigh(a: _ToArrayF64, UPLO: _SideKind = "L") -> EighResult[np.float64, np.float64]: ... +@overload # ~complex128 +def eigh(a: _AsArrayC128, UPLO: _SideKind = "L") -> EighResult[np.float64, np.complex128]: ... +@overload # fallback +def eigh(a: _ArrayLikeComplex_co, UPLO: _SideKind = "L") -> EighResult: ... -@overload +# +@overload # ~inexact32, reduced|complete +def qr[ScalarT: _inexact32](a: _ArrayLike[ScalarT], mode: L["reduced", "complete"] = "reduced") -> QRResult[ScalarT]: ... +@overload # ~inexact32, r +def qr[ScalarT: _inexact32](a: _ArrayLike[ScalarT], mode: L["r"]) -> NDArray[ScalarT]: ... +@overload # ~inexact32, raw +def qr[ScalarT: _inexact32](a: _ArrayLike[ScalarT], mode: L["raw"]) -> _tuple2[NDArray[ScalarT]]: ... +@overload # +float64, reduced|complete +def qr(a: _ToArrayF64, mode: L["reduced", "complete"] = "reduced") -> QRResult[np.float64]: ... +@overload # +float64, r +def qr(a: _ToArrayF64, mode: L["r"]) -> NDArray[np.float64]: ... +@overload # +float64, raw +def qr(a: _ToArrayF64, mode: L["raw"]) -> _tuple2[NDArray[np.float64]]: ... +@overload # ~complex128, reduced|complete +def qr(a: _AsArrayC128, mode: L["reduced", "complete"] = "reduced") -> QRResult[np.complex128]: ... +@overload # ~complex128, r +def qr(a: _AsArrayC128, mode: L["r"]) -> NDArray[np.complex128]: ... +@overload # ~complex128, raw +def qr(a: _AsArrayC128, mode: L["raw"]) -> _tuple2[NDArray[np.complex128]]: ... +@overload # fallback, reduced|complete +def qr(a: _ArrayLikeComplex_co, mode: L["reduced", "complete"] = "reduced") -> QRResult: ... +@overload # fallback, r +def qr(a: _ArrayLikeComplex_co, mode: L["r"]) -> np.ndarray: ... +@overload # fallback, raw +def qr(a: _ArrayLikeComplex_co, mode: L["raw"]) -> _tuple2[np.ndarray]: ... + +# +@overload # workaround for microsoft/pyright#10232, compute_uv=True (default) +def svd(a: NDArray[Never], full_matrices: bool = True, compute_uv: L[True] = True, hermitian: bool = False) -> SVDResult: ... +@overload # workaround for microsoft/pyright#10232, compute_uv=False (positional) +def svd(a: NDArray[Never], full_matrices: bool, compute_uv: L[False], hermitian: bool = False) -> np.ndarray: ... +@overload # workaround for microsoft/pyright#10232, compute_uv=False (keyword) +def svd(a: NDArray[Never], full_matrices: bool = True, *, compute_uv: L[False], hermitian: bool = False) -> np.ndarray: ... +@overload # ~inexact32, compute_uv=True (default) +def svd[ScalarT: _inexact32]( + a: _ArrayLike[ScalarT], full_matrices: bool = True, compute_uv: L[True] = True, hermitian: bool = False +) -> SVDResult[np.float32, ScalarT]: ... +@overload # ~inexact32, compute_uv=False (positional) +def svd(a: _ArrayLike[_inexact32], full_matrices: bool, compute_uv: L[False], hermitian: bool = False) -> NDArray[np.float32]: ... +@overload # ~inexact32, compute_uv=False (keyword) def svd( - a: _ArrayLikeInt_co, - full_matrices: bool = ..., - compute_uv: L[True] = ..., - hermitian: bool = ..., -) -> SVDResult: ... -@overload + a: _ArrayLike[_inexact32], full_matrices: bool = True, *, compute_uv: L[False], hermitian: bool = False +) -> NDArray[np.float32]: ... +@overload # +float64, compute_uv=True (default) def svd( - a: _ArrayLikeFloat_co, - full_matrices: bool = ..., - compute_uv: L[True] = ..., - hermitian: bool = ..., -) -> SVDResult: ... -@overload + a: _ToArrayF64, full_matrices: bool = True, compute_uv: L[True] = True, hermitian: bool = False +) -> SVDResult[np.float64, np.float64]: ... +@overload # ~complex128, compute_uv=True (default) def svd( - a: _ArrayLikeComplex_co, - full_matrices: bool = ..., - compute_uv: L[True] = ..., - hermitian: bool = ..., -) -> SVDResult: ... -@overload -def svd( - a: _ArrayLikeInt_co, - full_matrices: bool = ..., - compute_uv: L[False] = ..., - hermitian: bool = ..., -) -> NDArray[float64]: ... -@overload + a: _AsArrayC128, full_matrices: bool = True, compute_uv: L[True] = True, hermitian: bool = False +) -> SVDResult[np.float64, np.complex128]: ... +@overload # +float64 | ~complex128, compute_uv=False (positional) +def svd(a: _ToArrayC128, full_matrices: bool, compute_uv: L[False], hermitian: bool = False) -> NDArray[np.float64]: ... +@overload # +float64 | ~complex128, compute_uv=False (keyword) +def svd(a: _ToArrayC128, full_matrices: bool = True, *, compute_uv: L[False], hermitian: bool = False) -> NDArray[np.float64]: ... +@overload # fallback, compute_uv=True (default) def svd( - a: _ArrayLikeComplex_co, - full_matrices: bool = ..., - compute_uv: L[False] = ..., - hermitian: bool = ..., -) -> NDArray[floating[Any]]: ... + a: _ArrayLikeComplex_co, full_matrices: bool = True, compute_uv: L[True] = True, hermitian: bool = False +) -> SVDResult: ... +@overload # fallback, compute_uv=False (positional) +def svd(a: _ArrayLikeComplex_co, full_matrices: bool, compute_uv: L[False], hermitian: bool = False) -> np.ndarray: ... +@overload # fallback, compute_uv=False (keyword) +def svd(a: _ArrayLikeComplex_co, full_matrices: bool = True, *, compute_uv: L[False], hermitian: bool = False) -> np.ndarray: ... + +# NOTE: for real input the output dtype (floating/complexfloating) depends on the specific values +@overload # abstract `inexact` and `floating` (excluding concrete types) +def eigvals(a: NDArray[np.inexact[Never]]) -> np.ndarray: ... +@overload # ~complex128 +def eigvals(a: _AsArrayC128) -> NDArray[np.complex128]: ... +@overload # +float64 +def eigvals(a: _ToArrayF64) -> NDArray[np.complex128] | NDArray[np.float64]: ... +@overload # ~complex64 +def eigvals(a: _ArrayLike[np.complex64]) -> NDArray[np.complex64]: ... +@overload # ~float32 +def eigvals(a: _ArrayLike[np.float32]) -> NDArray[np.complex64] | NDArray[np.float32]: ... +@overload # fallback +def eigvals(a: _ArrayLikeComplex_co) -> np.ndarray: ... -def svdvals( - x: _ArrayLikeInt_co | _ArrayLikeFloat_co | _ArrayLikeComplex_co -) -> NDArray[floating[Any]]: ... +# keep in sync with svdvals +@overload # abstract `inexact` (excluding concrete types) +def eigvalsh(a: NDArray[np.inexact[Never]], UPLO: _SideKind = "L") -> NDArray[np.floating]: ... +@overload # ~inexact32 +def eigvalsh(a: _ArrayLike[_inexact32], UPLO: _SideKind = "L") -> NDArray[np.float32]: ... +@overload # +complex128 +def eigvalsh(a: _ToArrayC128, UPLO: _SideKind = "L") -> NDArray[np.float64]: ... +@overload # fallback +def eigvalsh(a: _ArrayLikeComplex_co, UPLO: _SideKind = "L") -> NDArray[np.floating]: ... -# TODO: Returns a scalar for 2D arrays and -# a `(x.ndim - 2)`` dimensionl array otherwise -def cond(x: _ArrayLikeComplex_co, p: None | float | L["fro", "nuc"] = ...) -> Any: ... +# keep in sync with eigvalsh +@overload # abstract `inexact` (excluding concrete types) +def svdvals(a: NDArray[np.inexact[Never]], /) -> NDArray[np.floating]: ... +@overload # ~inexact32 +def svdvals(a: _ArrayLike[_inexact32], /) -> NDArray[np.float32]: ... +@overload # +complex128 +def svdvals(a: _ToArrayC128, /) -> NDArray[np.float64]: ... +@overload # fallback +def svdvals(a: _ArrayLikeComplex_co, /) -> NDArray[np.floating]: ... -# TODO: Returns `int` for <2D arrays and `intp` otherwise +# +@overload # workaround for microsoft/pyright#10232 +def matrix_rank( + A: np.ndarray[_JustAnyShape, np.dtype[_to_complex]], + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | None = None, +) -> Any: ... +@overload # <2d +def matrix_rank( + A: _SupportsArray[_AtMost1D, np.dtype[_to_complex]] | Sequence[complex | _to_complex] | complex | _to_complex, + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | None = None, +) -> L[0, 1]: ... +@overload # =2d +def matrix_rank( + A: _ToArrayComplex_2d, + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | None = None, +) -> np.int_: ... +@overload # =3d +def matrix_rank( + A: _ToArrayComplex_3d, + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | None = None, +) -> _Array1D[np.int_]: ... +@overload # â‰Ĩ4d +def matrix_rank( + A: _ArrayLike4ND[_to_complex] | _Sequence4ND[complex], + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.int_]: ... +@overload # ?d def matrix_rank( A: _ArrayLikeComplex_co, - tol: None | _ArrayLikeFloat_co = ..., - hermitian: bool = ..., + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, *, - rtol: None | _ArrayLikeFloat_co = ..., + rtol: _ArrayLikeFloat_co | None = None, ) -> Any: ... -@overload -def pinv( - a: _ArrayLikeInt_co, - rcond: _ArrayLikeFloat_co = ..., - hermitian: bool = ..., -) -> NDArray[float64]: ... -@overload -def pinv( - a: _ArrayLikeFloat_co, - rcond: _ArrayLikeFloat_co = ..., - hermitian: bool = ..., -) -> NDArray[floating[Any]]: ... -@overload -def pinv( - a: _ArrayLikeComplex_co, - rcond: _ArrayLikeFloat_co = ..., - hermitian: bool = ..., -) -> NDArray[complexfloating[Any, Any]]: ... +# +@overload # workaround for microsoft/pyright#10232 +def cond(x: np.ndarray[_JustAnyShape, np.dtype[_to_complex]], p: _OrderKind | None = None) -> Any: ... +@overload # 2d ~inexact32 +def cond(x: _ArrayLike2D[_inexact32], p: _OrderKind | None = None) -> np.float32: ... +@overload # 2d +inexact64 +def cond(x: _ArrayLike2D[_to_inexact64] | _Sequence2D[complex], p: _OrderKind | None = None) -> np.float64: ... +@overload # 2d ~number +def cond(x: _ArrayLike2D[_to_complex], p: _OrderKind | None = None) -> np.floating: ... +@overload # >2d ~inexact32 +def cond(x: _ArrayLike3ND[_inexact32], p: _OrderKind | None = None) -> NDArray[np.float32]: ... +@overload # >2d +inexact64 +def cond(x: _ArrayLike3ND[_to_inexact64] | _Sequence3ND[complex], p: _OrderKind | None = None) -> NDArray[np.float64]: ... +@overload # >2d ~number +def cond(x: _ArrayLike3ND[_to_complex], p: _OrderKind | None = None) -> NDArray[np.floating]: ... +@overload # fallback +def cond(x: _ArrayLikeComplex_co, p: _OrderKind | None = None) -> Any: ... -# TODO: Returns a 2-tuple of scalars for 2D arrays and -# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise +# keep in sync with `det` +@overload # workaround for microsoft/pyright#10232 +def slogdet(a: np.ndarray[_JustAnyShape, np.dtype[_to_complex]]) -> SlogdetResult: ... +@overload # 2d ~inexact32 +def slogdet[ScalarT: _inexact32](a: _ArrayLike2D[ScalarT]) -> SlogdetResult[np.float32, ScalarT]: ... +@overload # >2d ~inexact32 +def slogdet[ScalarT: _inexact32](a: _ArrayLike3ND[ScalarT]) -> SlogdetResult[NDArray[np.float32], NDArray[ScalarT]]: ... +@overload # 2d +float64 +def slogdet(a: _ArrayLike2D[_to_float64]) -> SlogdetResult[np.float64, np.float64]: ... +@overload # >2d +float64 +def slogdet(a: _ArrayLike3ND[_to_float64]) -> SlogdetResult[NDArray[np.float64], NDArray[np.float64]]: ... +@overload # 2d ~complex128 +def slogdet(a: _AsArrayC128_2d) -> SlogdetResult[np.float64, np.complex128]: ... +@overload # >2d ~complex128 +def slogdet(a: _AsArrayC128_3nd) -> SlogdetResult[NDArray[np.float64], NDArray[np.complex128]]: ... +@overload # fallback def slogdet(a: _ArrayLikeComplex_co) -> SlogdetResult: ... -# TODO: Returns a 2-tuple of scalars for 2D arrays and -# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise +# keep in sync with `slogdet` +@overload # workaround for microsoft/pyright#10232 +def det(a: np.ndarray[_JustAnyShape, np.dtype[_to_complex]]) -> Any: ... +@overload # 2d ~inexact32 +def det[ScalarT: _inexact32](a: _ArrayLike2D[ScalarT]) -> ScalarT: ... +@overload # >2d ~inexact32 +def det[ScalarT: _inexact32](a: _ArrayLike3ND[ScalarT]) -> NDArray[ScalarT]: ... +@overload # 2d +float64 +def det(a: _ArrayLike2D[_to_float64]) -> np.float64: ... +@overload # >2d +float64 +def det(a: _ArrayLike3ND[_to_float64]) -> NDArray[np.float64]: ... +@overload # 2d ~complex128 +def det(a: _AsArrayC128_2d) -> np.complex128: ... +@overload # >2d ~complex128 +def det(a: _AsArrayC128_3nd) -> NDArray[np.complex128]: ... +@overload # fallback def det(a: _ArrayLikeComplex_co) -> Any: ... -@overload -def lstsq(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: None | float = ...) -> tuple[ - NDArray[float64], - NDArray[float64], - int32, - NDArray[float64], -]: ... -@overload -def lstsq(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: None | float = ...) -> tuple[ - NDArray[floating[Any]], - NDArray[floating[Any]], - int32, - NDArray[floating[Any]], -]: ... -@overload -def lstsq(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: None | float = ...) -> tuple[ - NDArray[complexfloating[Any, Any]], - NDArray[floating[Any]], - int32, - NDArray[floating[Any]], -]: ... +# +@overload # +float64, ~float64, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _ArrayLike2D[_to_float64] | _Sequence2D[float], + b: _SupportsArray[ShapeT, np.dtype[np.floating | _to_integer]], + rcond: float | None = None, +) -> _LstSqResult[ShapeT, np.float64, np.float64]: ... +@overload # ~float64, +float64, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _ArrayLike2D[np.floating | _to_integer] | _Sequence2D[float], + b: _SupportsArray[ShapeT, np.dtype[_to_float64]], + rcond: float | None = None, +) -> _LstSqResult[ShapeT, np.float64, np.float64]: ... +@overload # +complex128, ~complex128, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _ToArrayComplex_2d, b: _SupportsArray[ShapeT, np.dtype[np.complex128]], rcond: float | None = None +) -> _LstSqResult[ShapeT, np.complex128, np.float64]: ... +@overload # ~complex128, +complex128, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _AsArrayC128_2d, b: _SupportsArray[ShapeT, np.dtype[_to_complex]], rcond: float | None = None +) -> _LstSqResult[ShapeT, np.complex128, np.float64]: ... +@overload # ~float32, ~float32, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _ArrayLike2D[np.float32], b: _SupportsArray[ShapeT, np.dtype[np.float32]], rcond: float | None = None +) -> _LstSqResult[ShapeT, np.float32, np.float32]: ... +@overload # +complex64, ~complex64, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _ArrayLike2D[_inexact32], b: _SupportsArray[ShapeT, np.dtype[np.complex64]], rcond: float | None = None +) -> _LstSqResult[ShapeT, np.complex64, np.float32]: ... +@overload # ~complex64, +complex64, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _ArrayLike2D[np.complex64], b: _SupportsArray[ShapeT, np.dtype[_inexact32]], rcond: float | None = None +) -> _LstSqResult[ShapeT, np.complex64, np.float32]: ... +@overload # +float64, +float64, unknown shape +def lstsq( + a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: float | None = None +) -> _LstSqResult[_AnyShape, np.float64 | Any, np.float64 | Any]: ... +@overload # +complex128, +complex128, unknown shape +def lstsq( + a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: float | None = None +) -> _LstSqResult[_AnyShape, np.complex128 | Any, np.float64 | Any]: ... -@overload +# NOTE: This assumes that `axis` is only passed if `x` is >1d, and that `keepdims` is never passed positionally. +# keep in sync with `vector_norm` +@overload # +inexact64 (unsafe casting), axis=None, keepdims=False def norm( - x: ArrayLike, - ord: None | float | L["fro", "nuc"] = ..., - axis: None = ..., - keepdims: bool = ..., -) -> floating[Any]: ... -@overload + x: _ArrayLike[_to_inexact64_unsafe] | _NestedSequence[complex], + ord: _OrderKind | None = None, + axis: None = None, + keepdims: L[False] = False, +) -> np.float64: ... +@overload # +inexact64 (unsafe casting), axis= (positional), keepdims=False def norm( - x: ArrayLike, - ord: None | float | L["fro", "nuc"] = ..., - axis: SupportsInt | SupportsIndex | tuple[int, ...] = ..., - keepdims: bool = ..., -) -> Any: ... + x: _ArrayLike[_to_inexact64_unsafe] | _NestedSequence[complex], + ord: _OrderKind | None, + axis: _Ax2, + keepdims: L[False] = False, +) -> NDArray[np.float64]: ... +@overload # +inexact64 (unsafe casting), axis= (keyword), keepdims=False +def norm( + x: _ArrayLike[_to_inexact64_unsafe] | _NestedSequence[complex], + ord: _OrderKind | None = None, + *, + axis: _Ax2, + keepdims: L[False] = False, +) -> NDArray[np.float64]: ... +@overload # +inexact64 (unsafe casting), shape known, keepdims=True +def norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_to_inexact64_unsafe]], + ord: _OrderKind | None = None, + axis: _Ax2 | None = None, + *, + keepdims: L[True], +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... +@overload # +inexact64 (unsafe casting), shape unknown, keepdims=True +def norm( + x: _ArrayLike[_to_inexact64_unsafe] | _NestedSequence[complex], + ord: _OrderKind | None = None, + axis: _Ax2 | None = None, + *, + keepdims: L[True], +) -> NDArray[np.float64]: ... +@overload # ~float16, axis=None, keepdims=False +def norm( + x: _ArrayLike[np.float16], ord: _OrderKind | None = None, axis: None = None, keepdims: L[False] = False +) -> np.float16: ... +@overload # ~float16, axis= (positional), keepdims=False +def norm(x: _ArrayLike[np.float16], ord: _OrderKind | None, axis: _Ax2, keepdims: L[False] = False) -> NDArray[np.float16]: ... +@overload # ~float16, axis= (keyword), keepdims=False +def norm( + x: _ArrayLike[np.float16], ord: _OrderKind | None = None, *, axis: _Ax2, keepdims: L[False] = False +) -> NDArray[np.float16]: ... +@overload # ~float16, shape known, keepdims=True +def norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[np.float16]], ord: _OrderKind | None = None, axis: _Ax2 | None = None, *, keepdims: L[True] +) -> np.ndarray[ShapeT, np.dtype[np.float16]]: ... +@overload # ~float16, shape unknown, keepdims=True +def norm( + x: _ArrayLike[np.float16], ord: _OrderKind | None = None, axis: _Ax2 | None = None, *, keepdims: L[True] +) -> NDArray[np.float16]: ... +@overload # ~inexact32, axis=None, keepdims=False +def norm( + x: _ArrayLike[_inexact32], ord: _OrderKind | None = None, axis: None = None, keepdims: L[False] = False +) -> np.float32: ... +@overload # ~inexact32, axis= (positional), keepdims=False +def norm(x: _ArrayLike[_inexact32], ord: _OrderKind | None, axis: _Ax2, keepdims: L[False] = False) -> NDArray[np.float32]: ... +@overload # ~inexact32, axis= (keyword), keepdims=False +def norm( + x: _ArrayLike[_inexact32], ord: _OrderKind | None = None, *, axis: _Ax2, keepdims: L[False] = False +) -> NDArray[np.float32]: ... +@overload # ~inexact32, shape known, keepdims=True +def norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_inexact32]], ord: _OrderKind | None = None, axis: _Ax2 | None = None, *, keepdims: L[True] +) -> np.ndarray[ShapeT, np.dtype[np.float32]]: ... +@overload # ~inexact32, shape unknown, keepdims=True +def norm( + x: _ArrayLike[_inexact32], ord: _OrderKind | None = None, axis: _Ax2 | None = None, *, keepdims: L[True] +) -> NDArray[np.float32]: ... +@overload # ~inexact80, axis=None, keepdims=False +def norm( + x: _ArrayLike[_inexact80], ord: _OrderKind | None = None, axis: None = None, keepdims: L[False] = False +) -> np.longdouble: ... +@overload # ~inexact80, axis= (positional), keepdims=False +def norm(x: _ArrayLike[_inexact80], ord: _OrderKind | None, axis: _Ax2, keepdims: L[False] = False) -> NDArray[np.longdouble]: ... +@overload # ~inexact80, axis= (keyword), keepdims=False +def norm( + x: _ArrayLike[_inexact80], ord: _OrderKind | None = None, *, axis: _Ax2, keepdims: L[False] = False +) -> NDArray[np.longdouble]: ... +@overload # ~inexact80, shape known, keepdims=True +def norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_inexact80]], ord: _OrderKind | None = None, axis: _Ax2 | None = None, *, keepdims: L[True] +) -> np.ndarray[ShapeT, np.dtype[np.longdouble]]: ... +@overload # ~inexact80, shape unknown, keepdims=True +def norm( + x: _ArrayLike[_inexact80], ord: _OrderKind | None = None, axis: _Ax2 | None = None, *, keepdims: L[True] +) -> NDArray[np.longdouble]: ... +@overload # fallback +def norm(x: ArrayLike, ord: _OrderKind | None = None, axis: _Ax2 | None = None, keepdims: bool = False) -> Any: ... -@overload +# +@overload # +inexact64 (unsafe casting), ?d, keepdims=False def matrix_norm( - x: ArrayLike, - ord: None | float | L["fro", "nuc"] = ..., - keepdims: bool = ..., -) -> floating[Any]: ... -@overload + x: _SupportsArray[_JustAnyShape, np.dtype[_to_inexact64_unsafe]], + /, + *, + ord: _OrderKind | None = "fro", + keepdims: L[False] = False, +) -> NDArray[np.float64] | Any: ... +@overload # +inexact64 (unsafe casting), 2d, keepdims=False def matrix_norm( - x: ArrayLike, - ord: None | float | L["fro", "nuc"] = ..., - keepdims: bool = ..., -) -> Any: ... + x: _ArrayLike2D[_to_inexact64_unsafe] | _Sequence2D[complex], + /, + *, + ord: _OrderKind | None = "fro", + keepdims: L[False] = False, +) -> np.float64: ... +@overload # +inexact64 (unsafe casting), >2d, keepdims=False +def matrix_norm( + x: _ArrayLike3ND[_to_inexact64_unsafe] | _Sequence3D[complex], + /, + *, + ord: _OrderKind | None = "fro", + keepdims: L[False] = False, +) -> NDArray[np.float64]: ... +@overload # +inexact64 (unsafe casting), shape known, keepdims=True +def matrix_norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_to_inexact64_unsafe]], + /, + *, + ord: _OrderKind | None = "fro", + keepdims: L[True], +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... +@overload # +inexact64 (unsafe casting), ?d, keepdims=True +def matrix_norm( + x: _ArrayLike2ND[_to_inexact64_unsafe] | _Sequence2ND[complex], /, *, ord: _OrderKind | None = "fro", keepdims: L[True] +) -> NDArray[np.float64]: ... +@overload # ~float16, ?d, keepdims=False +def matrix_norm( + x: _SupportsArray[_JustAnyShape, np.dtype[np.float16]], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False +) -> NDArray[np.float16] | Any: ... +@overload # ~float16, 2d, keepdims=False +def matrix_norm(x: _ArrayLike2D[np.float16], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False) -> np.float16: ... +@overload # ~float16, >2d, keepdims=False +def matrix_norm( + x: _ArrayLike3ND[np.float16], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False +) -> NDArray[np.float16]: ... +@overload # ~float16, shape known, keepdims=True +def matrix_norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[np.float16]], /, *, ord: _OrderKind | None = "fro", keepdims: L[True] +) -> np.ndarray[ShapeT, np.dtype[np.float16]]: ... +@overload # ~float16, ?d, keepdims=True +def matrix_norm(x: _ArrayLike2ND[np.float16], /, *, ord: _OrderKind | None = "fro", keepdims: L[True]) -> NDArray[np.float16]: ... +@overload # ~inexact32, ?d, keepdims=False +def matrix_norm( + x: _SupportsArray[_JustAnyShape, np.dtype[_inexact32]], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False +) -> NDArray[np.float32] | Any: ... +@overload # ~inexact32, 2d, keepdims=False +def matrix_norm(x: _ArrayLike2D[_inexact32], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False) -> np.float32: ... +@overload # ~inexact32, >2d, keepdims=False +def matrix_norm( + x: _ArrayLike3ND[_inexact32], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False +) -> NDArray[np.float32]: ... +@overload # ~inexact32, shape known, keepdims=True +def matrix_norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_inexact32]], /, *, ord: _OrderKind | None = "fro", keepdims: L[True] +) -> np.ndarray[ShapeT, np.dtype[np.float32]]: ... +@overload # ~inexact32, ?d, keepdims=True +def matrix_norm(x: _ArrayLike2ND[_inexact32], /, *, ord: _OrderKind | None = "fro", keepdims: L[True]) -> NDArray[np.float32]: ... +@overload # ~inexact80, ?d, keepdims=False +def matrix_norm( + x: _SupportsArray[_JustAnyShape, np.dtype[_inexact80]], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False +) -> NDArray[np.longdouble] | Any: ... +@overload # ~inexact80, 2d, keepdims=False +def matrix_norm( + x: _ArrayLike2D[_inexact80], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False +) -> np.longdouble: ... +@overload # ~inexact80, >2d, keepdims=False +def matrix_norm( + x: _ArrayLike3ND[_inexact80], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False +) -> NDArray[np.longdouble]: ... +@overload # ~inexact80, shape known, keepdims=True +def matrix_norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_inexact80]], /, *, ord: _OrderKind | None = "fro", keepdims: L[True] +) -> np.ndarray[ShapeT, np.dtype[np.longdouble]]: ... +@overload # ~inexact80, ?d, keepdims=True +def matrix_norm( + x: _ArrayLike2ND[_inexact80], /, *, ord: _OrderKind | None = "fro", keepdims: L[True] +) -> NDArray[np.longdouble]: ... +@overload # fallback +def matrix_norm(x: ArrayLike, /, *, ord: _OrderKind | None = "fro", keepdims: bool = False) -> Any: ... -@overload +# keep in sync with `norm` +@overload # +inexact64 (unsafe casting), axis=None, keepdims=False def vector_norm( - x: ArrayLike, - axis: None = ..., - ord: None | float = ..., - keepdims: bool = ..., -) -> floating[Any]: ... -@overload + x: _ArrayLike[_to_inexact64_unsafe] | _NestedSequence[complex], + /, + *, + keepdims: L[False] = False, + axis: None = None, + ord: float | None = 2, +) -> np.float64: ... +@overload # +inexact64 (unsafe casting), axis=, keepdims=False def vector_norm( - x: ArrayLike, - axis: SupportsInt | SupportsIndex | tuple[int, ...] = ..., - ord: None | float = ..., - keepdims: bool = ..., -) -> Any: ... - -# TODO: Returns a scalar or array -def multi_dot( - arrays: Iterable[_ArrayLikeComplex_co | _ArrayLikeObject_co | _ArrayLikeTD64_co], + x: _ArrayLike[_to_inexact64_unsafe] | _NestedSequence[complex], + /, *, - out: None | NDArray[Any] = ..., -) -> Any: ... + axis: _Ax2, + keepdims: L[False] = False, + ord: float | None = 2, +) -> NDArray[np.float64]: ... +@overload # +inexact64 (unsafe casting), shape known, keepdims=True +def vector_norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_to_inexact64_unsafe]], + /, + *, + axis: _Ax2 | None = None, + keepdims: L[True], + ord: float | None = 2, +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... +@overload # +inexact64 (unsafe casting), shape unknown, keepdims=True +def vector_norm( + x: _ArrayLike[_to_inexact64_unsafe] | _NestedSequence[complex], + /, + *, + axis: _Ax2 | None = None, + keepdims: L[True], + ord: float | None = 2, +) -> NDArray[np.float64]: ... +@overload # ~float16, axis=None, keepdims=False +def vector_norm( + x: _ArrayLike[np.float16], /, *, axis: None = None, keepdims: L[False] = False, ord: float | None = 2 +) -> np.float16: ... +@overload # ~float16, axis= keepdims=False +def vector_norm( + x: _ArrayLike[np.float16], /, *, axis: _Ax2, keepdims: L[False] = False, ord: float | None = 2 +) -> NDArray[np.float16]: ... +@overload # ~float16, shape known, keepdims=True +def vector_norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[np.float16]], /, *, axis: _Ax2 | None = None, keepdims: L[True], ord: float | None = 2 +) -> np.ndarray[ShapeT, np.dtype[np.float16]]: ... +@overload # ~float16, shape unknown, keepdims=True +def vector_norm( + x: _ArrayLike[np.float16], /, *, axis: _Ax2 | None = None, keepdims: L[True], ord: float | None = 2 +) -> NDArray[np.float16]: ... +@overload # ~inexact32, axis=None, keepdims=False +def vector_norm( + x: _ArrayLike[_inexact32], /, *, axis: None = None, keepdims: L[False] = False, ord: float | None = 2 +) -> np.float32: ... +@overload # ~inexact32, axis= keepdims=False +def vector_norm( + x: _ArrayLike[_inexact32], /, *, axis: _Ax2, keepdims: L[False] = False, ord: float | None = 2 +) -> NDArray[np.float32]: ... +@overload # ~inexact32, shape known, keepdims=True +def vector_norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_inexact32]], /, *, axis: _Ax2 | None = None, keepdims: L[True], ord: float | None = 2 +) -> np.ndarray[ShapeT, np.dtype[np.float32]]: ... +@overload # ~inexact32, shape unknown, keepdims=True +def vector_norm( + x: _ArrayLike[_inexact32], /, *, axis: _Ax2 | None = None, keepdims: L[True], ord: float | None = 2 +) -> NDArray[np.float32]: ... +@overload # ~inexact80, axis=None, keepdims=False +def vector_norm( + x: _ArrayLike[_inexact80], /, *, axis: None = None, keepdims: L[False] = False, ord: float | None = 2 +) -> np.longdouble: ... +@overload # ~inexact80, axis=, keepdims=False +def vector_norm( + x: _ArrayLike[_inexact80], /, *, axis: _Ax2, keepdims: L[False] = False, ord: float | None = 2 +) -> NDArray[np.longdouble]: ... +@overload # ~inexact80, shape known, keepdims=True +def vector_norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_inexact80]], /, *, axis: _Ax2 | None = None, keepdims: L[True], ord: float | None = 2 +) -> np.ndarray[ShapeT, np.dtype[np.longdouble]]: ... +@overload # ~inexact80, shape unknown, keepdims=True +def vector_norm( + x: _ArrayLike[_inexact80], /, *, axis: _Ax2 | None = None, keepdims: L[True], ord: float | None = 2 +) -> NDArray[np.longdouble]: ... +@overload # fallback +def vector_norm(x: ArrayLike, /, *, axis: _Ax2 | None = None, keepdims: bool = False, ord: float | None = 2) -> Any: ... -def diagonal( - x: ArrayLike, # >= 2D array - offset: SupportsIndex = ..., -) -> NDArray[Any]: ... +# keep in sync with numpy._core.numeric.tensordot (ignoring `/, *`) +@overload +def tensordot[ScalarT: np.number | np.timedelta64 | np.object_]( + a: _ArrayLike[ScalarT], b: _ArrayLike[ScalarT], /, *, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[ScalarT]: ... +@overload +def tensordot( + a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /, *, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[np.bool]: ... +@overload +def tensordot( + a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, /, *, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[np.int_ | Any]: ... +@overload +def tensordot( + a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, /, *, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[np.float64 | Any]: ... +@overload +def tensordot( + a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /, *, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[np.complex128 | Any]: ... + +# +@overload +def multi_dot[ArrayT: np.ndarray]( + arrays: Iterable[_ArrayLikeComplex_co | _ArrayLikeObject_co | _ArrayLikeTD64_co], *, out: ArrayT, +) -> ArrayT: ... +@overload +def multi_dot[ + AnyScalarT: ( + np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64, + np.float16, np.float32, np.float64, np.longdouble, np.complex64, np.complex128, np.clongdouble, + np.object_, np.timedelta64, + ), +](arrays: Sequence[_ArrayLike[AnyScalarT]], *, out: None = None) -> NDArray[AnyScalarT]: ... +@overload +def multi_dot(arrays: Sequence[_ArrayLikeBool_co], *, out: None = None) -> NDArray[np.bool]: ... +@overload +def multi_dot(arrays: Sequence[_ArrayLikeInt_co], *, out: None = None) -> NDArray[np.int64 | Any]: ... +@overload +def multi_dot(arrays: Sequence[_ArrayLikeFloat_co], *, out: None = None) -> NDArray[np.float64 | Any]: ... +@overload +def multi_dot(arrays: Sequence[_ArrayLikeComplex_co], *, out: None = None) -> NDArray[np.complex128 | Any]: ... +@overload +def multi_dot(arrays: Sequence[_ArrayLikeTD64_co], *, out: None = None) -> NDArray[np.timedelta64 | Any]: ... +@overload +def multi_dot[ScalarT: np.number | np.object_ | np.timedelta64]( + arrays: Sequence[_ArrayLike[ScalarT]], *, out: None = None +) -> NDArray[ScalarT]: ... +# +@overload # workaround for microsoft/pyright#10232 +def diagonal[DTypeT: np.dtype]( + x: _SupportsArray[_JustAnyShape, DTypeT], /, *, offset: SupportsIndex = 0 +) -> np.ndarray[_AnyShape, DTypeT]: ... +@overload # 2d, known dtype +def diagonal[DTypeT: np.dtype]( + x: _SupportsArray[tuple[int, int], DTypeT], /, *, offset: SupportsIndex = 0 +) -> np.ndarray[tuple[int], DTypeT]: ... +@overload # 3d, known dtype +def diagonal[DTypeT: np.dtype]( + x: _SupportsArray[tuple[int, int, int], DTypeT], /, *, offset: SupportsIndex = 0 +) -> np.ndarray[tuple[int, int], DTypeT]: ... +@overload # 4d, known dtype +def diagonal[DTypeT: np.dtype]( + x: _SupportsArray[tuple[int, int, int, int], DTypeT], /, *, offset: SupportsIndex = 0 +) -> np.ndarray[tuple[int, int, int], DTypeT]: ... +@overload # nd like ~bool +def diagonal(x: _NestedSequence[list[bool]], /, *, offset: SupportsIndex = 0) -> NDArray[np.bool]: ... +@overload # nd like ~int +def diagonal(x: _NestedSequence[list[int]], /, *, offset: SupportsIndex = 0) -> NDArray[np.int_]: ... +@overload # nd like ~float +def diagonal(x: _NestedSequence[list[float]], /, *, offset: SupportsIndex = 0) -> NDArray[np.float64]: ... +@overload # nd like ~complex +def diagonal(x: _NestedSequence[list[complex]], /, *, offset: SupportsIndex = 0) -> NDArray[np.complex128]: ... +@overload # nd like ~bytes +def diagonal(x: _NestedSequence[list[bytes]], /, *, offset: SupportsIndex = 0) -> NDArray[np.bytes_]: ... +@overload # nd like ~str +def diagonal(x: _NestedSequence[list[str]], /, *, offset: SupportsIndex = 0) -> NDArray[np.str_]: ... +@overload # fallback +def diagonal(x: ArrayLike, /, *, offset: SupportsIndex = 0) -> np.ndarray: ... + +# +@overload # workaround for microsoft/pyright#10232 def trace( - x: ArrayLike, # >= 2D array - offset: SupportsIndex = ..., - dtype: DTypeLike = ..., + x: _SupportsArray[_JustAnyShape, np.dtype[_to_complex]], /, *, offset: SupportsIndex = 0, dtype: DTypeLike | None = None ) -> Any: ... +@overload # 2d known dtype, dtype=None +def trace[ScalarT: _to_complex](x: _ArrayLike2D[ScalarT], /, *, offset: SupportsIndex = 0, dtype: None = None) -> ScalarT: ... +@overload # 2d, dtype= +def trace[ScalarT: _to_complex]( + x: _ToArrayComplex_2d, /, *, offset: SupportsIndex = 0, dtype: _DTypeLike[ScalarT] +) -> ScalarT: ... +@overload # 2d bool +def trace(x: _Sequence2D[bool], /, *, offset: SupportsIndex = 0, dtype: None = None) -> np.bool: ... +@overload # 2d int +def trace(x: Sequence[list[int]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> np.int_: ... +@overload # 2d float +def trace(x: Sequence[list[float]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> np.float64: ... +@overload # 2d complex +def trace(x: Sequence[list[complex]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> np.complex128: ... +@overload # 3d known dtype, dtype=None +def trace[DTypeT: np.dtype[_to_complex]]( + x: _SupportsArray[tuple[int, int, int], DTypeT], /, *, offset: SupportsIndex = 0, dtype: None = None +) -> np.ndarray[tuple[int], DTypeT]: ... +@overload # 3d, dtype= +def trace[ScalarT: _to_complex]( + x: _ToArrayComplex_3d, /, *, offset: SupportsIndex = 0, dtype: _DTypeLike[ScalarT] +) -> _Array1D[ScalarT]: ... +@overload # 3d+ known dtype, dtype=None +def trace[DTypeT: np.dtype[_to_complex]]( + x: _SupportsArray[_AtLeast3D, DTypeT], /, *, offset: SupportsIndex = 0, dtype: None = None +) -> np.ndarray[tuple[int, *tuple[Any, ...]], DTypeT]: ... +@overload # 3d+, dtype= +def trace[ScalarT: _to_complex]( + x: _ArrayLike3ND[_to_complex] | _Sequence3ND[complex], /, *, offset: SupportsIndex = 0, dtype: _DTypeLike[ScalarT] +) -> np.ndarray[tuple[int, *tuple[Any, ...]], np.dtype[ScalarT]]: ... +@overload # 3d+ bool +def trace(x: _Sequence3ND[bool], /, *, offset: SupportsIndex = 0, dtype: None = None) -> NDArray[np.bool]: ... +@overload # 3d+ int +def trace(x: _Sequence2ND[list[int]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> NDArray[np.int_]: ... +@overload # 3d+ float +def trace(x: _Sequence2ND[list[float]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> NDArray[np.float64]: ... +@overload # 3d+ complex +def trace(x: _Sequence2ND[list[complex]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> NDArray[np.complex128]: ... +@overload # fallback +def trace(x: _ArrayLikeComplex_co, /, *, offset: SupportsIndex = 0, dtype: DTypeLike | None = None) -> Any: ... -@overload +# +@overload # workaround for microsoft/pyright#10232 +def outer(x1: NDArray[Never], x2: NDArray[Never], /) -> _Array2D[Any]: ... +@overload # +bool, +bool +def outer(x1: _ToArrayBool_1d, x2: _ToArrayBool_1d, /) -> _Array2D[np.bool]: ... +@overload # ~int64, +int64 +def outer(x1: _AsArrayI64_1d, x2: _ToArrayInt_1d, /) -> _Array2D[np.int64]: ... +@overload # +int64, ~int64 +def outer(x1: _ToArrayInt_1d, x2: _AsArrayI64_1d, /) -> _Array2D[np.int64]: ... +@overload # ~timedelta64, +timedelta64 +def outer(x1: _ArrayLike1D[np.timedelta64], x2: _ArrayLike1D[_to_timedelta64], /) -> _Array2D[np.timedelta64]: ... +@overload # +timedelta64, ~timedelta64 +def outer(x1: _ArrayLike1D[_to_timedelta64], x2: _ArrayLike1D[np.timedelta64], /) -> _Array2D[np.timedelta64]: ... +@overload # ~float64, +float64 +def outer(x1: _AsArrayF64_1d, x2: _ToArrayF64_1d, /) -> _Array2D[np.float64]: ... +@overload # +float64, ~float64 +def outer(x1: _ToArrayF64_1d, x2: _AsArrayF64_1d, /) -> _Array2D[np.float64]: ... +@overload # ~complex128, +complex128 +def outer(x1: _AsArrayC128_1d, x2: _ToArrayComplex_1d, /) -> _Array2D[np.complex128]: ... +@overload # +complex128, ~complex128 +def outer(x1: _ToArrayComplex_1d, x2: _AsArrayC128_1d, /) -> _Array2D[np.complex128]: ... +@overload # ~ScalarT, ~ScalarT +def outer[ScalarT: np.number | np.object_](x1: _ArrayLike1D[ScalarT], x2: _ArrayLike1D[ScalarT], /) -> _Array2D[ScalarT]: ... +@overload # fallback +def outer(x1: _ToArrayComplex_1d, x2: _ToArrayComplex_1d, /) -> _Array2D[Any]: ... + +# note that this doesn't include bool, int_, float64, and complex128, as those require special-casing overloads +_AnyScalarT = TypeVar( + "_AnyScalarT", + np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.uint64, + np.float16, np.float32, np.longdouble, np.complex64, np.clongdouble, +) # fmt: skip + +# NOTE: we ignore UP047 because inlining `_AnyScalarT` would result in a lot of code duplication + +# +@overload # ~T, ~T (we use constraints instead of a `: np.number` bound to prevent joins/unions) +def cross( # noqa: UP047 + x1: _ArrayLike1D2D[_AnyScalarT], + x2: _ArrayLike1D2D[_AnyScalarT], + /, + *, + axis: SupportsIndex = -1, +) -> NDArray[_AnyScalarT]: ... # fmt: skip +@overload # ~int64, +int64 def cross( - a: _ArrayLikeUInt_co, - b: _ArrayLikeUInt_co, - axis: int = ..., -) -> NDArray[unsignedinteger[Any]]: ... -@overload + x1: _ArrayLike1D2D[np.int64] | _Sequence1D2D[int], + x2: _ArrayLike1D2D[np.integer] | _Sequence1D2D[int], + /, + *, + axis: SupportsIndex = -1, +) -> NDArray[np.int64]: ... +@overload # +int64, ~int64 def cross( - a: _ArrayLikeInt_co, - b: _ArrayLikeInt_co, - axis: int = ..., -) -> NDArray[signedinteger[Any]]: ... -@overload + x1: _ArrayLike1D2D[np.integer], + x2: _ArrayLike1D2D[np.int64], + /, + *, + axis: SupportsIndex = -1, +) -> NDArray[np.int64]: ... +@overload # ~float64, +float64 def cross( - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, - axis: int = ..., -) -> NDArray[floating[Any]]: ... -@overload + x1: _ArrayLike1D2D[np.float64] | _Sequence0D1D[list[float]], + x2: _ArrayLike1D2D[np.floating | np.integer] | _Sequence1D2D[float], + /, + *, + axis: SupportsIndex = -1, +) -> NDArray[np.float64]: ... +@overload # +float64, ~float64 def cross( - a: _ArrayLikeComplex_co, - b: _ArrayLikeComplex_co, - axis: int = ..., -) -> NDArray[complexfloating[Any, Any]]: ... + x1: _ArrayLike1D2D[np.floating | np.integer] | _Sequence1D2D[float], + x2: _ArrayLike1D2D[np.float64] | _Sequence0D1D[list[float]], + /, + *, + axis: SupportsIndex = -1, +) -> NDArray[np.float64]: ... +@overload # ~complex128, +complex128 +def cross( + x1: _ArrayLike1D2D[np.complex128] | _Sequence0D1D[list[complex]], + x2: _ArrayLike1D2D[np.number] | _Sequence1D2D[complex], + /, + *, + axis: SupportsIndex = -1, +) -> NDArray[np.complex128]: ... +@overload # +complex128, ~complex128 +def cross( + x1: _ArrayLike1D2D[np.number] | _Sequence1D2D[complex], + x2: _ArrayLike1D2D[np.complex128] | _Sequence0D1D[list[complex]], + /, + *, + axis: SupportsIndex = -1, +) -> NDArray[np.complex128]: ... +@overload # ~object_, +object_ +def cross( + x1: _SupportsArray[tuple[int] | tuple[int, int], np.dtype[np.object_]], + x2: _ArrayLike1D2D[np.number | np.object_] | _Sequence1D2D[complex], + /, + *, + axis: SupportsIndex = -1, +) -> NDArray[np.object_]: ... +@overload # +object_, ~object_ +def cross( + x1: _ArrayLike1D2D[np.number | np.object_] | _Sequence1D2D[complex], + x2: _SupportsArray[tuple[int] | tuple[int, int], np.dtype[np.object_]], + /, + *, + axis: SupportsIndex = -1, +) -> NDArray[np.object_]: ... +@overload # fallback +def cross[ScalarT: np.number]( + x1: _ArrayLike1D2D[ScalarT], + x2: _ArrayLike1D2D[ScalarT], + /, + *, + axis: SupportsIndex = -1, +) -> NDArray[ScalarT]: ... -@overload +# These overloads can be grouped into three parts: +# - 16 overloads as workaround for microsoft/pyright#10232 +# - 9 overloads for the scalar cases (both args 1d) +# - 18 overloads for the non-scalar cases (at least one arg >1d) +@overload # ?d ~T, 1d ~T +def matmul( # noqa: UP047 + x1: _SupportsArray[_JustAnyShape, np.dtype[_AnyScalarT]], x2: _ArrayLike1D[_AnyScalarT], / +) -> NDArray[_AnyScalarT] | Any: ... +@overload # 1d ~T, ?d ~T +def matmul( # noqa: UP047 + x1: _ArrayLike1D[_AnyScalarT], x2: _SupportsArray[_JustAnyShape, np.dtype[_AnyScalarT]], / +) -> NDArray[_AnyScalarT] | Any: ... +@overload # ?d bool, 1d bool +def matmul(x1: _SupportsArray[_JustAnyShape, np.dtype[np.bool]], x2: _ToArrayBool_1d, /) -> NDArray[np.bool] | Any: ... +@overload # 1d bool, ?d bool +def matmul(x1: _ToArrayBool_1d, x2: _SupportsArray[_JustAnyShape, np.dtype[np.bool]], /) -> NDArray[np.bool] | Any: ... +@overload # ?d ~int, 1d +int +def matmul(x1: _SupportsArray[_JustAnyShape, np.dtype[np.int64]], x2: _ToArrayInt_1d, /) -> NDArray[np.int64] | Any: ... +@overload # 1d +int, ?d ~int +def matmul(x1: _ToArrayInt_1d, x2: _SupportsArray[_JustAnyShape, np.dtype[np.int64]], /) -> NDArray[np.int64] | Any: ... +@overload # ?d +int, 1d ~int +def matmul(x1: _SupportsArray[_JustAnyShape, np.dtype[_to_integer]], x2: _AsArrayI64_1d, /) -> NDArray[np.int64] | Any: ... +@overload # 1d ~int, ?d +int +def matmul(x1: _AsArrayI64_1d, x2: _SupportsArray[_JustAnyShape, np.dtype[_to_integer]], /) -> NDArray[np.int64] | Any: ... +@overload # ?d ~float64, 1d +float64 +def matmul(x1: _SupportsArray[_JustAnyShape, np.dtype[np.float64]], x2: _ToArrayF64_1d, /) -> NDArray[np.float64] | Any: ... +@overload # 1d +float64, ?d ~float64 +def matmul(x1: _ToArrayF64_1d, x2: _SupportsArray[_JustAnyShape, np.dtype[np.float64]], /) -> NDArray[np.float64] | Any: ... +@overload # ?d +float64, 1d ~float64 +def matmul(x1: _SupportsArray[_JustAnyShape, np.dtype[_to_float64]], x2: _AsArrayF64_1d, /) -> NDArray[np.float64] | Any: ... +@overload # 1d ~float64, ?d +float64 +def matmul(x1: _AsArrayF64_1d, x2: _SupportsArray[_JustAnyShape, np.dtype[_to_float64]], /) -> NDArray[np.float64] | Any: ... +@overload # ?d ~complex128, 1d +complex128 def matmul( - x1: _ArrayLikeInt_co, - x2: _ArrayLikeInt_co, -) -> NDArray[signedinteger[Any]]: ... -@overload + x1: _SupportsArray[_JustAnyShape, np.dtype[np.complex128]], x2: _ToArrayC128_1d, / +) -> NDArray[np.complex128] | Any: ... +@overload # 1d +complex128, ?d ~complex128 def matmul( - x1: _ArrayLikeUInt_co, - x2: _ArrayLikeUInt_co, -) -> NDArray[unsignedinteger[Any]]: ... -@overload + x1: _ToArrayC128_1d, x2: _SupportsArray[_JustAnyShape, np.dtype[np.complex128]], / +) -> NDArray[np.complex128] | Any: ... +@overload # ?d +complex128, 1d ~complex128 def matmul( - x1: _ArrayLikeFloat_co, - x2: _ArrayLikeFloat_co, -) -> NDArray[floating[Any]]: ... -@overload + x1: _SupportsArray[_JustAnyShape, np.dtype[_to_complex128_co]], x2: _AsArrayC128_1d, / +) -> NDArray[np.complex128] | Any: ... +@overload # 1d ~complex128, ?d +complex128 def matmul( - x1: _ArrayLikeComplex_co, - x2: _ArrayLikeComplex_co, -) -> NDArray[complexfloating[Any, Any]]: ... + x1: _AsArrayC128_1d, x2: _SupportsArray[_JustAnyShape, np.dtype[_to_complex128_co]], / +) -> NDArray[np.complex128] | Any: ... # end workaround +@overload # 1d ~T, 1d ~T +def matmul(x1: _ArrayLike1D[_AnyScalarT], x2: _ArrayLike1D[_AnyScalarT], /) -> _AnyScalarT: ... # noqa: UP047 +@overload # 1d +bool, 1d +bool +def matmul(x1: _ToArrayBool_1d, x2: _ToArrayBool_1d, /) -> np.bool: ... +@overload # 1d ~int, 1d +int +def matmul(x1: _AsArrayI64_1d, x2: _ToArrayInt_1d, /) -> np.int64: ... +@overload # 1d +int, 1d ~int +def matmul(x1: _ToArrayInt_1d, x2: _AsArrayI64_1d, /) -> np.int64: ... +@overload # 1d ~float64, 1d +float64 +def matmul(x1: _AsArrayF64_1d, x2: _ToArrayF64_1d, /) -> np.float64: ... +@overload # 1d +float64, 1d ~float64 +def matmul(x1: _ToArrayF64_1d, x2: _AsArrayF64_1d, /) -> np.float64: ... +@overload # 1d ~complex128, 1d +complex128 +def matmul(x1: _AsArrayC128_1d, x2: _ToArrayComplex_1d, /) -> np.complex128: ... +@overload # 1d +complex128, 1d ~complex128 +def matmul(x1: _ToArrayComplex_1d, x2: _AsArrayC128_1d, /) -> np.complex128: ... +@overload # 1d fallback, 1d fallback +def matmul(x1: _ToArrayComplex_1d, x2: _ToArrayComplex_1d, /) -> Any: ... # end 1d x 1d +@overload # >=1d ~T, >=2d ~T +def matmul(x1: _ArrayLike1ND[_AnyScalarT], x2: _ArrayLike2ND[_AnyScalarT], /) -> NDArray[_AnyScalarT]: ... # noqa: UP047 +@overload # >=2d ~T, >=1d ~T +def matmul(x1: _ArrayLike2ND[_AnyScalarT], x2: _ArrayLike1ND[_AnyScalarT], /) -> NDArray[_AnyScalarT]: ... # noqa: UP047 +@overload # >=1d +bool, >=2d +bool +def matmul(x1: _ToArrayBool_1nd, x2: _ToArrayBool_2nd, /) -> NDArray[np.bool]: ... +@overload # >=2d +bool, >=1d +bool +def matmul(x1: _ToArrayBool_2nd, x2: _ToArrayBool_1nd, /) -> NDArray[np.bool]: ... +@overload # >=1d ~int, >=2d +int +def matmul(x1: _AsArrayI64_1nd, x2: _ToArrayInt_2nd, /) -> NDArray[np.int64]: ... +@overload # >=2d ~int, >=1d +int +def matmul(x1: _AsArrayI64_2nd, x2: _ToArrayInt_1nd, /) -> NDArray[np.int64]: ... +@overload # >=1d +int, >=2d ~int +def matmul(x1: _ToArrayInt_1nd, x2: _AsArrayI64_2nd, /) -> NDArray[np.int64]: ... +@overload # >=2d +int, >=1d ~int +def matmul(x1: _ToArrayInt_2nd, x2: _AsArrayI64_1nd, /) -> NDArray[np.int64]: ... +@overload # >=1d ~float64, >=2d +float64 +def matmul(x1: _AsArrayF64_1nd, x2: _ToArrayF64_2nd, /) -> NDArray[np.float64]: ... +@overload # >=2d ~float64, >=1d +float64 +def matmul(x1: _AsArrayF64_2nd, x2: _ToArrayF64_1nd, /) -> NDArray[np.float64]: ... +@overload # >=1d +float64, >=2d ~float64 +def matmul(x1: _ToArrayF64_1nd, x2: _AsArrayF64_2nd, /) -> NDArray[np.float64]: ... +@overload # >=2d +float64, >=1d ~float64 +def matmul(x1: _ToArrayF64_2nd, x2: _AsArrayF64_1nd, /) -> NDArray[np.float64]: ... +@overload # >=1d ~complex128, >=2d +complex128 +def matmul(x1: _AsArrayC128_1nd, x2: _ToArrayC128_2nd, /) -> NDArray[np.complex128]: ... +@overload # >=2d ~complex128, >=1d +complex128 +def matmul(x1: _AsArrayC128_2nd, x2: _ToArrayC128_1nd, /) -> NDArray[np.complex128]: ... +@overload # >=1d +complex128, >=2d ~complex128 +def matmul(x1: _ToArrayC128_1nd, x2: _AsArrayC128_2nd, /) -> NDArray[np.complex128]: ... +@overload # >=2d +complex128, >=1d ~complex128 +def matmul(x1: _ToArrayC128_2nd, x2: _AsArrayC128_1nd, /) -> NDArray[np.complex128]: ... +@overload # >=1d fallback, >=2d fallback +def matmul(x1: _ToArrayComplex_1nd, x2: _ToArrayComplex_2nd, /) -> NDArray[Any]: ... +@overload # >=2d fallback, >=1d fallback +def matmul(x1: _ToArrayComplex_2nd, x2: _ToArrayComplex_1nd, /) -> NDArray[Any]: ... diff --git a/numpy/linalg/_umath_linalg.pyi b/numpy/linalg/_umath_linalg.pyi new file mode 100644 index 000000000000..f90706a7b159 --- /dev/null +++ b/numpy/linalg/_umath_linalg.pyi @@ -0,0 +1,60 @@ +from typing import Final, Literal as L + +import numpy as np +from numpy._typing._ufunc import _GUFunc_Nin2_Nout1 + +__version__: Final[str] = ... +_ilp64: Final[bool] = ... + +### +# 1 -> 1 + +# (m,m) -> () +det: Final[np.ufunc] = ... +# (m,m) -> (m) +cholesky_lo: Final[np.ufunc] = ... +cholesky_up: Final[np.ufunc] = ... +eigvals: Final[np.ufunc] = ... +eigvalsh_lo: Final[np.ufunc] = ... +eigvalsh_up: Final[np.ufunc] = ... +# (m,m) -> (m,m) +inv: Final[np.ufunc] = ... +# (m,n) -> (p) +qr_r_raw: Final[np.ufunc] = ... +svd: Final[np.ufunc] = ... + +### +# 1 -> 2 + +# (m,m) -> (), () +slogdet: Final[np.ufunc] = ... +# (m,m) -> (m), (m,m) +eig: Final[np.ufunc] = ... +eigh_lo: Final[np.ufunc] = ... +eigh_up: Final[np.ufunc] = ... + +### +# 2 -> 1 + +# (m,n), (n) -> (m,m) +qr_complete: Final[_GUFunc_Nin2_Nout1[L["qr_complete"], L[2], None, L["(m,n),(n)->(m,m)"]]] = ... +# (m,n), (k) -> (m,k) +qr_reduced: Final[_GUFunc_Nin2_Nout1[L["qr_reduced"], L[2], None, L["(m,n),(k)->(m,k)"]]] = ... +# (m,m), (m,n) -> (m,n) +solve: Final[_GUFunc_Nin2_Nout1[L["solve"], L[4], None, L["(m,m),(m,n)->(m,n)"]]] = ... +# (m,m), (m) -> (m) +solve1: Final[_GUFunc_Nin2_Nout1[L["solve1"], L[4], None, L["(m,m),(m)->(m)"]]] = ... + +### +# 1 -> 3 + +# (m,n) -> (m,m), (p), (n,n) +svd_f: Final[np.ufunc] = ... +# (m,n) -> (m,p), (p), (p,n) +svd_s: Final[np.ufunc] = ... + +### +# 3 -> 4 + +# (m,n), (m,k), () -> (n,k), (k), (), (p) +lstsq: Final[np.ufunc] = ... diff --git a/numpy/linalg/lapack_lite.pyi b/numpy/linalg/lapack_lite.pyi new file mode 100644 index 000000000000..3ec3919bfa3b --- /dev/null +++ b/numpy/linalg/lapack_lite.pyi @@ -0,0 +1,143 @@ +from typing import Final, TypedDict, type_check_only + +import numpy as np +from numpy._typing import NDArray + +from ._linalg import fortran_int + +### + +@type_check_only +class _GELSD(TypedDict): + m: int + n: int + nrhs: int + lda: int + ldb: int + rank: int + lwork: int + info: int + +@type_check_only +class _DGELSD(_GELSD): + dgelsd_: int + rcond: float + +@type_check_only +class _ZGELSD(_GELSD): + zgelsd_: int + +@type_check_only +class _GEQRF(TypedDict): + m: int + n: int + lda: int + lwork: int + info: int + +@type_check_only +class _DGEQRF(_GEQRF): + dgeqrf_: int + +@type_check_only +class _ZGEQRF(_GEQRF): + zgeqrf_: int + +@type_check_only +class _DORGQR(TypedDict): + dorgqr_: int + info: int + +@type_check_only +class _ZUNGQR(TypedDict): + zungqr_: int + info: int + +### + +_ilp64: Final[bool] = ... + +class LapackError(Exception): ... + +def dgelsd( + m: int, + n: int, + nrhs: int, + a: NDArray[np.float64], + lda: int, + b: NDArray[np.float64], + ldb: int, + s: NDArray[np.float64], + rcond: float, + rank: int, + work: NDArray[np.float64], + lwork: int, + iwork: NDArray[fortran_int], + info: int, +) -> _DGELSD: ... +def zgelsd( + m: int, + n: int, + nrhs: int, + a: NDArray[np.complex128], + lda: int, + b: NDArray[np.complex128], + ldb: int, + s: NDArray[np.float64], + rcond: float, + rank: int, + work: NDArray[np.complex128], + lwork: int, + rwork: NDArray[np.float64], + iwork: NDArray[fortran_int], + info: int, +) -> _ZGELSD: ... + +# +def dgeqrf( + m: int, + n: int, + a: NDArray[np.float64], # in/out, shape: (lda, n) + lda: int, + tau: NDArray[np.float64], # out, shape: (min(m, n),) + work: NDArray[np.float64], # out, shape: (max(1, lwork),) + lwork: int, + info: int, # out +) -> _DGEQRF: ... +def zgeqrf( + m: int, + n: int, + a: NDArray[np.complex128], # in/out, shape: (lda, n) + lda: int, + tau: NDArray[np.complex128], # out, shape: (min(m, n),) + work: NDArray[np.complex128], # out, shape: (max(1, lwork),) + lwork: int, + info: int, # out +) -> _ZGEQRF: ... + +# +def dorgqr( + m: int, # >=0 + n: int, # m >= n >= 0 + k: int, # n >= k >= 0 + a: NDArray[np.float64], # in/out, shape: (lda, n) + lda: int, # >= max(1, m) + tau: NDArray[np.float64], # in, shape: (k,) + work: NDArray[np.float64], # out, shape: (max(1, lwork),) + lwork: int, + info: int, # out +) -> _DORGQR: ... +def zungqr( + m: int, + n: int, + k: int, + a: NDArray[np.complex128], + lda: int, + tau: NDArray[np.complex128], + work: NDArray[np.complex128], + lwork: int, + info: int, +) -> _ZUNGQR: ... + +# +def xerbla(srname: object, info: int) -> None: ... diff --git a/numpy/linalg/lapack_lite/clapack_scrub.py b/numpy/linalg/lapack_lite/clapack_scrub.py index aeb6139b3a56..fea0d6a77ad4 100644 --- a/numpy/linalg/lapack_lite/clapack_scrub.py +++ b/numpy/linalg/lapack_lite/clapack_scrub.py @@ -4,7 +4,7 @@ import re import sys -from plex import Scanner, Str, Lexicon, Opt, Bol, State, AnyChar, TEXT, IGNORE +from plex import IGNORE, TEXT, AnyChar, Bol, Lexicon, Opt, Scanner, State, Str from plex.traditional import re as Re try: @@ -66,7 +66,7 @@ def endArgs(self, text): digits = Re('[0-9]+') iofun = Re(r'\([^;]*;') - decl = Re(r'\([^)]*\)[,;'+'\n]') + decl = Re(r'\([^)]*\)[,;' + '\n]') any = Re('[.]*') S = Re('[ \t\n]*') cS = Str(',') + S @@ -79,19 +79,19 @@ def endArgs(self, text): keep_ftnlen = (Str('ilaenv_') | Str('iparmq_') | Str('s_rnge')) + Str('(') lexicon = Lexicon([ - (iofunctions, TEXT), - (keep_ftnlen, beginArgs), + (iofunctions, TEXT), + (keep_ftnlen, beginArgs), State('args', [ (Str(')'), endArgs), (Str('('), beginArgs), (AnyChar, TEXT), ]), - (cS+Re(r'[1-9][0-9]*L'), IGNORE), - (cS+Str('ftnlen')+Opt(S+len_), IGNORE), - (cS+sep_seq(['(', 'ftnlen', ')'], S)+S+digits, IGNORE), - (Bol+Str('ftnlen ')+len_+Str(';\n'), IGNORE), - (cS+len_, TEXT), - (AnyChar, TEXT), + (cS + Re(r'[1-9][0-9]*L'), IGNORE), + (cS + Str('ftnlen') + Opt(S + len_), IGNORE), + (cS + sep_seq(['(', 'ftnlen', ')'], S) + S + digits, IGNORE), + (Bol + Str('ftnlen ') + len_ + Str(';\n'), IGNORE), + (cS + len_, TEXT), + (AnyChar, TEXT), ]) def scrubFtnlen(source): @@ -155,10 +155,12 @@ def flushTo(self, other_queue): def cleanComments(source): lines = LineQueue() comments = CommentQueue() + def isCommentLine(line): return line.startswith('/*') and line.endswith('*/\n') blanks = LineQueue() + def isBlank(line): return line.strip() == '' @@ -169,6 +171,7 @@ def SourceLines(line): else: lines.add(line) return SourceLines + def HaveCommentLines(line): if isBlank(line): blanks.add('\n') @@ -180,6 +183,7 @@ def HaveCommentLines(line): comments.flushTo(lines) lines.add(line) return SourceLines + def HaveBlankLines(line): if isBlank(line): blanks.add('\n') @@ -210,11 +214,13 @@ def LookingForHeader(line): else: lines.add(line) return LookingForHeader + def InHeader(line): if line.startswith('*/'): return OutOfHeader else: return InHeader + def OutOfHeader(line): if line.startswith('#include "f2c.h"'): pass @@ -243,6 +249,7 @@ def removeSubroutinePrototypes(source): def removeBuiltinFunctions(source): lines = LineQueue() + def LookingForBuiltinFunctions(line): if line.strip() == '/* Builtin functions */': return InBuiltInFunctions @@ -265,8 +272,8 @@ def replaceDlamch(source): """Replace dlamch_ calls with appropriate macros""" def repl(m): s = m.group(1) - return dict(E='EPSILON', P='PRECISION', S='SAFEMINIMUM', - B='BASE')[s[0]] + return {'E': 'EPSILON', 'P': 'PRECISION', 'S': 'SAFEMINIMUM', + 'B': 'BASE'}[s[0]] source = re.sub(r'dlamch_\("(.*?)"\)', repl, source) source = re.sub(r'^\s+extern.*? dlamch_.*?;$(?m)', '', source) return source @@ -294,6 +301,7 @@ def scrubSource(source, nsteps=None, verbose=False): return source + if __name__ == '__main__': filename = sys.argv[1] outfilename = os.path.join(sys.argv[2], os.path.basename(filename)) @@ -305,7 +313,7 @@ def scrubSource(source, nsteps=None, verbose=False): else: nsteps = None - source = scrub_source(source, nsteps, verbose=True) + source = scrubSource(source, nsteps, verbose=True) with open(outfilename, 'w') as writefo: writefo.write(source) diff --git a/numpy/linalg/lapack_lite/f2c.c b/numpy/linalg/lapack_lite/f2c.c index 47e4d5729b83..9afac89e61d1 100644 --- a/numpy/linalg/lapack_lite/f2c.c +++ b/numpy/linalg/lapack_lite/f2c.c @@ -191,7 +191,7 @@ integer i_dnnt(x) doublereal *x; integer i_dnnt(doublereal *x) #endif { -return( (*x)>=0 ? +return (integer)( (*x)>=0 ? floor(*x + .5) : -floor(.5 - *x) ); } diff --git a/numpy/linalg/lapack_lite/fortran.py b/numpy/linalg/lapack_lite/fortran.py index 2a5c9c05ee23..22eb666ef26f 100644 --- a/numpy/linalg/lapack_lite/fortran.py +++ b/numpy/linalg/lapack_lite/fortran.py @@ -1,6 +1,7 @@ # WARNING! This a Python 2 script. Read README.rst for rationale. -import re import itertools +import re + def isBlank(line): return not line @@ -11,6 +12,7 @@ def isComment(line): def isContinuation(line): return line[5] != ' ' + COMMENT, STATEMENT, CONTINUATION = 0, 1, 2 def lineType(line): """Return the type of a line of Fortran code.""" diff --git a/numpy/linalg/lapack_lite/make_lite.py b/numpy/linalg/lapack_lite/make_lite.py index 3c1783448a1f..d5bb1e01cc7f 100755 --- a/numpy/linalg/lapack_lite/make_lite.py +++ b/numpy/linalg/lapack_lite/make_lite.py @@ -12,14 +12,14 @@ * patch """ -import sys import os import re -import subprocess import shutil +import subprocess +import sys -import fortran import clapack_scrub +import fortran try: from distutils.spawn import find_executable as which # Python 2 @@ -70,6 +70,7 @@ class FortranRoutine: """Wrapper for a Fortran routine in a file. """ type = 'generic' + def __init__(self, name=None, filename=None): self.filename = filename if name is None: @@ -85,14 +86,14 @@ def dependencies(self): return self._dependencies def __repr__(self): - return "FortranRoutine({!r}, filename={!r})".format(self.name, - self.filename) + return f"FortranRoutine({self.name!r}, filename={self.filename!r})" class UnknownFortranRoutine(FortranRoutine): """Wrapper for a Fortran routine for which the corresponding file is not known. """ type = 'unknown' + def __init__(self, name): FortranRoutine.__init__(self, name=name, filename='') @@ -198,7 +199,7 @@ def allRoutinesByType(self, typename): def printRoutineNames(desc, routines): print(desc) for r in routines: - print('\t%s' % r.name) + print(f'\t{r.name}') def getLapackRoutines(wrapped_routines, ignores, lapack_dir): blas_src_dir = os.path.join(lapack_dir, 'BLAS', 'SRC') @@ -239,6 +240,7 @@ def getWrappedRoutineNames(wrapped_routines_file): routines.append(line) return routines, ignores + types = {'blas', 'lapack', 'd_lapack', 's_lapack', 'z_lapack', 'c_lapack', 'config'} def dumpRoutineNames(library, output_dir): @@ -248,7 +250,7 @@ def dumpRoutineNames(library, output_dir): with open(filename, 'w') as fo: for r in routines: deps = r.dependencies() - fo.write('%s: %s\n' % (r.name, ' '.join(deps))) + fo.write(f"{r.name}: {' '.join(deps)}\n") def concatenateRoutines(routines, output_file): with open(output_file, 'w') as output_fo: @@ -289,7 +291,7 @@ def create_name_header(output_dir): extern_re = re.compile(r'^extern [a-z]+ ([a-z0-9_]+)\(.*$') # BLAS/LAPACK symbols - symbols = set(['xerbla']) + symbols = {'xerbla'} for fn in os.listdir(output_dir): fn = os.path.join(output_dir, fn) @@ -321,13 +323,13 @@ def create_name_header(output_dir): # Rename BLAS/LAPACK symbols for name in sorted(symbols): - f.write("#define %s_ BLAS_FUNC(%s)\n" % (name, name)) + f.write(f"#define {name}_ BLAS_FUNC({name})\n") # Rename also symbols that f2c exports itself f.write("\n" "/* Symbols exported by f2c.c */\n") for name in sorted(f2c_symbols): - f.write("#define %s numpy_lapack_lite_%s\n" % (name, name)) + f.write(f"#define {name} numpy_lapack_lite_{name}\n") def main(): if len(sys.argv) != 3: @@ -350,9 +352,9 @@ def main(): dumpRoutineNames(library, output_dir) for typename in types: - fortran_file = os.path.join(output_dir, 'f2c_%s.f' % typename) + fortran_file = os.path.join(output_dir, f'f2c_{typename}.f') c_file = fortran_file[:-2] + '.c' - print('creating %s ...' % c_file) + print(f'creating {c_file} ...') routines = library.allRoutinesByType(typename) concatenateRoutines(routines, fortran_file) @@ -360,11 +362,11 @@ def main(): patch_file = os.path.basename(fortran_file) + '.patch' if os.path.exists(patch_file): subprocess.check_call(['patch', '-u', fortran_file, patch_file]) - print("Patched {}".format(fortran_file)) + print(f"Patched {fortran_file}") try: runF2C(fortran_file, output_dir) except F2CError: - print('f2c failed on %s' % fortran_file) + print(f'f2c failed on {fortran_file}') break scrubF2CSource(c_file) diff --git a/numpy/linalg/lapack_litemodule.c b/numpy/linalg/lapack_litemodule.c index 766dfa9527b1..cad5f3f92f09 100644 --- a/numpy/linalg/lapack_litemodule.c +++ b/numpy/linalg/lapack_litemodule.c @@ -377,30 +377,27 @@ static struct PyMethodDef lapack_lite_module_methods[] = { { NULL,NULL,0, NULL} }; +static int module_loaded = 0; -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "lapack_lite", - NULL, - -1, - lapack_lite_module_methods, - NULL, - NULL, - NULL, - NULL -}; - -/* Initialization function for the module */ -PyMODINIT_FUNC PyInit_lapack_lite(void) +static int +lapack_lite_exec(PyObject *m) { - PyObject *m,*d; - m = PyModule_Create(&moduledef); - if (m == NULL) { - return NULL; + PyObject *d; + + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; + } + module_loaded = 1; + + if (PyArray_ImportNumPyAPI() < 0) { + return -1; } - import_array(); + d = PyModule_GetDict(m); - LapackError = PyErr_NewException("lapack_lite.LapackError", NULL, NULL); + LapackError = PyErr_NewException("numpy.linalg.lapack_lite.LapackError", NULL, NULL); PyDict_SetItemString(d, "LapackError", LapackError); #ifdef HAVE_BLAS_ILP64 @@ -409,5 +406,29 @@ PyMODINIT_FUNC PyInit_lapack_lite(void) PyDict_SetItemString(d, "_ilp64", Py_False); #endif - return m; + return 0; +} + +static struct PyModuleDef_Slot lapack_lite_slots[] = { + {Py_mod_exec, lapack_lite_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, +#endif + {0, NULL}, +}; + +static struct PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "lapack_lite", + .m_size = 0, + .m_methods = lapack_lite_module_methods, + .m_slots = lapack_lite_slots, +}; + +PyMODINIT_FUNC PyInit_lapack_lite(void) { + return PyModuleDef_Init(&moduledef); } diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py deleted file mode 100644 index d75b07342b58..000000000000 --- a/numpy/linalg/linalg.py +++ /dev/null @@ -1,16 +0,0 @@ -def __getattr__(attr_name): - import warnings - from numpy.linalg import _linalg - ret = getattr(_linalg, attr_name, None) - if ret is None: - raise AttributeError( - f"module 'numpy.linalg.linalg' has no attribute {attr_name}") - warnings.warn( - "The numpy.linalg.linalg has been made private and renamed to " - "numpy.linalg._linalg. All public functions exported by it are " - f"available from numpy.linalg. Please use numpy.linalg.{attr_name} " - "instead.", - DeprecationWarning, - stacklevel=3 - ) - return ret diff --git a/numpy/linalg/meson.build b/numpy/linalg/meson.build index 740c9f56c6fa..1d3297286317 100644 --- a/numpy/linalg/meson.build +++ b/numpy/linalg/meson.build @@ -45,7 +45,8 @@ py.install_sources( '__init__.pyi', '_linalg.py', '_linalg.pyi', - 'linalg.py', + '_umath_linalg.pyi', + 'lapack_lite.pyi', ], subdir: 'numpy/linalg' ) diff --git a/numpy/linalg/tests/test_deprecations.py b/numpy/linalg/tests/test_deprecations.py index cd4c10832e7e..7fb5008f1ff8 100644 --- a/numpy/linalg/tests/test_deprecations.py +++ b/numpy/linalg/tests/test_deprecations.py @@ -1,8 +1,9 @@ """Test deprecation and future warnings. """ +import pytest + import numpy as np -from numpy.testing import assert_warns def test_qr_mode_full_future_warning(): @@ -14,7 +15,7 @@ def test_qr_mode_full_future_warning(): """ a = np.eye(2) - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='full') - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='f') - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='economic') - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='e') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='full') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='f') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='economic') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='e') diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 02e94354399d..cd93acaf79c0 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -1,27 +1,50 @@ """ Test functions for linalg module """ +import itertools import os +import subprocess import sys -import itertools -import traceback import textwrap -import subprocess +import threading +import traceback +import warnings + import pytest import numpy as np -from numpy import array, single, double, csingle, cdouble, dot, identity, matmul +from numpy import ( + array, + asarray, + atleast_2d, + cdouble, + csingle, + dot, + double, + identity, + inf, + linalg, + matmul, + multiply, + single, +) from numpy._core import swapaxes from numpy.exceptions import AxisError -from numpy import multiply, atleast_2d, inf, asarray -from numpy import linalg -from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError +from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.linalg._linalg import _multi_dot_matrix_chain_order from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_array_equal, - assert_almost_equal, assert_allclose, suppress_warnings, - assert_raises_regex, HAS_LAPACK64, IS_WASM - ) + HAS_LAPACK64, + IS_WASM, + NOGIL_BUILD, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) + try: import numpy.linalg.lapack_lite except ImportError: @@ -70,7 +93,7 @@ def get_rtol(dtype): # used to categorize tests all_tags = { 'square', 'nonsquare', 'hermitian', # mutually exclusive - 'generalized', 'size-0', 'strided' # optional additions + 'generalized', 'size-0', 'strided' # optional additions } @@ -297,7 +320,7 @@ def _stride_comb_iter(x): for repeats in itertools.product(*tuple(stride_set)): new_shape = [abs(a * b) for a, b in zip(x.shape, repeats)] - slices = tuple([slice(None, None, repeat) for repeat in repeats]) + slices = tuple(slice(None, None, repeat) for repeat in repeats) # new array with different strides, but same data xi = np.empty(new_shape, dtype=x.dtype) @@ -581,7 +604,7 @@ class TestEigvals(EigvalsCases): @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) def test_types(self, dtype): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) - assert_equal(linalg.eigvals(x).dtype, dtype) + assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype)) x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype)) @@ -591,7 +614,7 @@ class ArraySubclass(np.ndarray): pass a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) res = linalg.eigvals(a) - assert_(res.dtype.type is np.float64) + assert_(res.dtype.type is np.complex128) assert_equal((0, 1), res.shape) # This is just for documentation, it might make sense to change: assert_(isinstance(res, np.ndarray)) @@ -620,8 +643,8 @@ class TestEig(EigCases): def test_types(self, dtype): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) w, v = np.linalg.eig(x) - assert_equal(w.dtype, dtype) - assert_equal(v.dtype, dtype) + assert_equal(w.dtype, get_complex_dtype(dtype)) + assert_equal(v.dtype, get_complex_dtype(dtype)) x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) w, v = np.linalg.eig(x) @@ -634,8 +657,8 @@ class ArraySubclass(np.ndarray): pass a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) res, res_v = linalg.eig(a) - assert_(res_v.dtype.type is np.float64) - assert_(res.dtype.type is np.float64) + assert_(res_v.dtype.type is np.complex128) + assert_(res.dtype.type is np.complex128) assert_equal(a.shape, res_v.shape) assert_equal((0, 1), res.shape) # This is just for documentation, it might make sense to change: @@ -706,6 +729,7 @@ def do(self, a, b, tags): assert_allclose(a, matmul(np.asarray(u) * np.asarray(s)[..., None, :], np.asarray(vt)), rtol=get_rtol(u.dtype)) + def hermitian(mat): axes = list(range(mat.ndim)) axes[-1], axes[-2] = axes[-2], axes[-1] @@ -769,15 +793,28 @@ def do(self, a, b, tags): class TestCond(CondCases): - def test_basic_nonsvd(self): + @pytest.mark.parametrize('is_complex', [False, True]) + def test_basic_nonsvd(self, is_complex): # Smoketest the non-svd norms A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]]) + if is_complex: + # Since A is linearly scaled, the condition number should not change + A = A * (1 + 1j) assert_almost_equal(linalg.cond(A, inf), 4) - assert_almost_equal(linalg.cond(A, -inf), 2/3) + assert_almost_equal(linalg.cond(A, -inf), 2 / 3) assert_almost_equal(linalg.cond(A, 1), 4) assert_almost_equal(linalg.cond(A, -1), 0.5) assert_almost_equal(linalg.cond(A, 'fro'), np.sqrt(265 / 12)) + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + @pytest.mark.parametrize('norm_ord', [1, -1, 2, -2, 'fro', np.inf, -np.inf]) + def test_cond_dtypes(self, dtype, norm_ord): + # Check that the condition number is computed in the same dtype + # as the input matrix + A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]], dtype=dtype) + out_type = get_real_dtype(dtype) + assert_equal(linalg.cond(A, p=norm_ord).dtype, out_type) + def test_singular(self): # Singular matrices have infinite condition number for # positive norms, and negative norms shouldn't raise @@ -801,14 +838,14 @@ def test_nan(self): p_pos = [None, 1, 2, 'fro'] A = np.ones((2, 2)) - A[0,1] = np.nan + A[0, 1] = np.nan for p in ps: c = linalg.cond(A, p) assert_(isinstance(c, np.float64)) assert_(np.isnan(c)) A = np.ones((3, 2, 2)) - A[1,0,1] = np.nan + A[1, 0, 1] = np.nan for p in ps: c = linalg.cond(A, p) assert_(np.isnan(c[1])) @@ -824,15 +861,15 @@ def test_stacked_singular(self): # singular np.random.seed(1234) A = np.random.rand(2, 2, 2, 2) - A[0,0] = 0 - A[1,1] = 0 + A[0, 0] = 0 + A[1, 1] = 0 for p in (None, 1, 2, 'fro', -1, -2): c = linalg.cond(A, p) - assert_equal(c[0,0], np.inf) - assert_equal(c[1,1], np.inf) - assert_(np.isfinite(c[0,1])) - assert_(np.isfinite(c[1,0])) + assert_equal(c[0, 0], np.inf) + assert_equal(c[1, 1], np.inf) + assert_(np.isfinite(c[0, 1])) + assert_(np.isfinite(c[1, 0])) class PinvCases(LinalgSquareTestCase, @@ -963,8 +1000,8 @@ def do(self, a, b, tags): np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0) expect_resids = np.asarray(expect_resids) if np.asarray(b).ndim == 1: - expect_resids.shape = (1,) - assert_equal(residuals.shape, expect_resids.shape) + expect_resids = expect_resids.reshape((1,)) + assert_equal(residuals.shape, expect_resids.shape) else: expect_resids = np.array([]).view(type(x)) assert_almost_equal(residuals, expect_resids) @@ -1031,8 +1068,8 @@ class TestMatrixPower: rshft_3 = rshft_0[[1, 2, 3, 0]] rshft_all = [rshft_0, rshft_1, rshft_2, rshft_3] noninv = array([[1, 0], [0, 0]]) - stacked = np.block([[[rshft_0]]]*2) - #FIXME the 'e' dtype might work in future + stacked = np.block([[[rshft_0]]] * 2) + # FIXME the 'e' dtype might work in future dtnoinv = [object, np.dtype('e'), np.dtype('g'), np.dtype('G')] def test_large_power(self, dt): @@ -1054,7 +1091,7 @@ def tz(M): for mat in self.rshft_all: tz(mat.astype(dt)) - if dt != object: + if np.dtype(dt).type is not np.object_: tz(self.stacked.astype(dt)) def test_power_is_one(self, dt): @@ -1065,7 +1102,7 @@ def tz(mat): for mat in self.rshft_all: tz(mat.astype(dt)) - if dt != object: + if np.dtype(dt).type is not np.object_: tz(self.stacked.astype(dt)) def test_power_is_two(self, dt): @@ -1077,7 +1114,7 @@ def tz(mat): for mat in self.rshft_all: tz(mat.astype(dt)) - if dt != object: + if np.dtype(dt).type is not np.object_: tz(self.stacked.astype(dt)) def test_power_is_minus_one(self, dt): @@ -1294,8 +1331,9 @@ def test_vector_return_type(self): self.check_dtype(at, an) assert_almost_equal(an, 0.0) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "divide by zero encountered") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "divide by zero encountered", RuntimeWarning) an = norm(at, -1) self.check_dtype(at, an) assert_almost_equal(an, 0.0) @@ -1310,11 +1348,11 @@ def test_vector_return_type(self): an = norm(at, 2) self.check_dtype(at, an) - assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/2.0)) + assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0 / 2.0)) an = norm(at, 4) self.check_dtype(at, an) - assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/4.0)) + assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0 / 4.0)) an = norm(at, np.inf) self.check_dtype(at, an) @@ -1457,8 +1495,9 @@ def test_matrix_return_type(self): self.check_dtype(at, an) assert_almost_equal(an, 2.0) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "divide by zero encountered") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "divide by zero encountered", RuntimeWarning) an = norm(at, -1) self.check_dtype(at, an) assert_almost_equal(an, 1.0) @@ -1469,7 +1508,7 @@ def test_matrix_return_type(self): an = norm(at, 2) self.check_dtype(at, an) - assert_almost_equal(an, 3.0**(1.0/2.0)) + assert_almost_equal(an, 3.0**(1.0 / 2.0)) an = norm(at, -2) self.check_dtype(at, an) @@ -1626,7 +1665,7 @@ def test_matrix_rank(self): # accepts array-like assert_equal(matrix_rank([1]), 1) # greater than 2 dimensions treated as stacked matrices - ms = np.array([I, np.eye(4), np.zeros((4,4))]) + ms = np.array([I, np.eye(4), np.zeros((4, 4))]) assert_equal(matrix_rank(ms), np.array([3, 4, 0])) # works on scalar assert_equal(matrix_rank(1), 1) @@ -1706,7 +1745,6 @@ def check_qr(self, a): assert_(isinstance(r2, a_type)) assert_almost_equal(r2, r1) - @pytest.mark.parametrize(["m", "n"], [ (3, 0), (0, 3), @@ -1782,7 +1820,7 @@ def check_qr_stacked(self, a): assert_almost_equal(matmul(q, r), a) I_mat = np.identity(q.shape[-1]) stack_I_mat = np.broadcast_to(I_mat, - q.shape[:-2] + (q.shape[-1],)*2) + q.shape[:-2] + (q.shape[-1],) * 2) assert_almost_equal(matmul(swapaxes(q, -1, -2).conj(), q), stack_I_mat) assert_almost_equal(np.triu(r[..., :, :]), r) @@ -1797,7 +1835,7 @@ def check_qr_stacked(self, a): assert_almost_equal(matmul(q1, r1), a) I_mat = np.identity(q1.shape[-1]) stack_I_mat = np.broadcast_to(I_mat, - q1.shape[:-2] + (q1.shape[-1],)*2) + q1.shape[:-2] + (q1.shape[-1],) * 2) assert_almost_equal(matmul(swapaxes(q1, -1, -2).conj(), q1), stack_I_mat) assert_almost_equal(np.triu(r1[..., :, :]), r1) @@ -1822,7 +1860,7 @@ def test_stacked_inputs(self, outer_size, size, dt): A = rng.normal(size=outer_size + size).astype(dt) B = rng.normal(size=outer_size + size).astype(dt) self.check_qr_stacked(A) - self.check_qr_stacked(A + 1.j*B) + self.check_qr_stacked(A + 1.j * B) class TestCholesky: @@ -1839,7 +1877,7 @@ def test_basic_property(self, shape, dtype, upper): np.random.seed(1) a = np.random.randn(*shape) if np.issubdtype(dtype, np.complexfloating): - a = a + 1j*np.random.randn(*shape) + a = a + 1j * np.random.randn(*shape) t = list(range(len(shape))) t[-2:] = -1, -2 @@ -1854,8 +1892,8 @@ def test_basic_property(self, shape, dtype, upper): b = np.matmul(c.transpose(t).conj(), c) else: b = np.matmul(c, c.transpose(t).conj()) - with np._no_nep50_warning(): - atol = 500 * a.shape[0] * np.finfo(dtype).eps + + atol = 500 * a.shape[0] * np.finfo(dtype).eps assert_allclose(b, a, atol=atol, err_msg=f'{shape} {dtype}\n{a}\n{c}') # Check diag(L or U) is real and positive @@ -1881,7 +1919,7 @@ class ArraySubclass(np.ndarray): def test_upper_lower_arg(self): # Explicit test of upper argument that also checks the default. - a = np.array([[1+0j, 0-2j], [0+2j, 5+0j]]) + a = np.array([[1 + 0j, 0 - 2j], [0 + 2j, 5 + 0j]]) assert_equal(linalg.cholesky(a), linalg.cholesky(a, upper=False)) @@ -1944,6 +1982,12 @@ def test_generalized_raise_multiloop(): assert_raises(np.linalg.LinAlgError, np.linalg.inv, x) +@pytest.mark.skipif( + threading.active_count() > 1, + reason="skipping test that uses fork because there are multiple threads") +@pytest.mark.skipif( + NOGIL_BUILD, + reason="Cannot safely use fork in tests on the free-threaded build") def test_xerbla_override(): # Check that our xerbla has been successfully linked in. If it is not, # the default xerbla routine is called, which prints a message to stdout @@ -2174,8 +2218,7 @@ def test_non_square_handling(self, arr, ind): ((24, 8, 3), 1), ]) def test_tensorinv_shape(self, shape, ind): - a = np.eye(24) - a.shape = shape + a = np.eye(24).reshape(shape) ainv = linalg.tensorinv(a=a, ind=ind) expected = a.shape[ind:] + a.shape[:ind] actual = ainv.shape @@ -2185,15 +2228,13 @@ def test_tensorinv_shape(self, shape, ind): 0, -2, ]) def test_tensorinv_ind_limit(self, ind): - a = np.eye(24) - a.shape = (4, 6, 8, 3) + a = np.eye(24).reshape((4, 6, 8, 3)) with assert_raises(ValueError): linalg.tensorinv(a=a, ind=ind) def test_tensorinv_result(self): # mimic a docstring example - a = np.eye(24) - a.shape = (24, 8, 3) + a = np.eye(24).reshape((24, 8, 3)) ainv = linalg.tensorinv(a, ind=1) b = np.ones(24) assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) @@ -2236,9 +2277,9 @@ def test_blas64_dot(): n = 2**32 a = np.zeros([1, n], dtype=np.float32) b = np.ones([1, 1], dtype=np.float32) - a[0,-1] = 1 + a[0, -1] = 1 c = np.dot(b, a) - assert_equal(c[0,-1], 1) + assert_equal(c[0, -1], 1) @pytest.mark.xfail(not HAS_LAPACK64, @@ -2307,6 +2348,14 @@ def test_cross(): assert_equal(actual, expected) + # We test that lists are converted to arrays. + u = [1, 2, 3] + v = [4, 5, 6] + actual = np.linalg.cross(u, v) + expected = array([-3, 6, -3]) + + assert_equal(actual, expected) + with assert_raises_regex( ValueError, r"input arrays must be \(arrays of\) 3-dimensional vectors" @@ -2357,6 +2406,16 @@ def test_matrix_norm(): assert_almost_equal(actual, np.array([[14.2828]]), double_decimal=3) +def test_matrix_norm_empty(): + for shape in [(0, 2), (2, 0), (0, 0)]: + for dtype in [np.float64, np.float32, np.int32]: + x = np.zeros(shape, dtype) + assert_equal(np.linalg.matrix_norm(x, ord="fro"), 0) + assert_equal(np.linalg.matrix_norm(x, ord="nuc"), 0) + assert_equal(np.linalg.matrix_norm(x, ord=1), 0) + assert_equal(np.linalg.matrix_norm(x, ord=2), 0) + assert_equal(np.linalg.matrix_norm(x, ord=np.inf), 0) + def test_vector_norm(): x = np.arange(9).reshape((3, 3)) actual = np.linalg.vector_norm(x) @@ -2373,3 +2432,30 @@ def test_vector_norm(): expected = np.full((1, 1), 14.2828, dtype='float64') assert_equal(actual.shape, expected.shape) assert_almost_equal(actual, expected, double_decimal=3) + + +def test_vector_norm_empty(): + for dtype in [np.float64, np.float32, np.int32]: + x = np.zeros(0, dtype) + assert_equal(np.linalg.vector_norm(x, ord=1), 0) + assert_equal(np.linalg.vector_norm(x, ord=2), 0) + assert_equal(np.linalg.vector_norm(x, ord=np.inf), 0) + +def test_empty_matrix_rank(): + assert_equal(matrix_rank(np.zeros((0, 0))), 0) + assert_equal(matrix_rank(np.zeros((0, 5))), 0) + assert_equal(matrix_rank(np.zeros((5, 0))), 0) + + result = matrix_rank(np.zeros((0, 5, 5))) + assert_equal(result.shape, (0,)) + assert_equal(result.dtype, np.intp) + + result = matrix_rank(np.zeros((3, 0, 5))) + assert_equal(result, np.array([0, 0, 0])) + + result = matrix_rank(np.zeros((2, 5, 0))) + assert_equal(result, np.array([0, 0])) + + result = matrix_rank(np.zeros((2, 3, 0, 4))) + assert_equal(result.shape, (2, 3)) + assert_equal(result, np.zeros((2, 3), dtype=np.intp)) diff --git a/numpy/linalg/tests/test_regression.py b/numpy/linalg/tests/test_regression.py index 8cac195aa864..053e7130da63 100644 --- a/numpy/linalg/tests/test_regression.py +++ b/numpy/linalg/tests/test_regression.py @@ -1,14 +1,17 @@ """ Test functions for linalg module """ -import warnings import pytest import numpy as np -from numpy import linalg, arange, float64, array, dot, transpose +from numpy import arange, array, dot, float64, linalg, transpose from numpy.testing import ( - assert_, assert_raises, assert_equal, assert_array_equal, - assert_array_almost_equal, assert_array_less + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_array_less, + assert_equal, + assert_raises, ) @@ -30,7 +33,7 @@ def test_eig_build(self): 1.51971555e-15 + 0.j, -1.51308713e-15 + 0.j]) a = arange(13 * 13, dtype=float64) - a.shape = (13, 13) + a = a.reshape((13, 13)) a = a % 17 va, ve = linalg.eig(a) va.sort() @@ -41,9 +44,9 @@ def test_eigh_build(self): # Ticket 662. rvals = [68.60568999, 89.57756725, 106.67185574] - cov = array([[77.70273908, 3.51489954, 15.64602427], - [3.51489954, 88.97013878, -1.07431931], - [15.64602427, -1.07431931, 98.18223512]]) + cov = array([[77.70273908, 3.51489954, 15.64602427], + [ 3.51489954, 88.97013878, -1.07431931], + [15.64602427, -1.07431931, 98.18223512]]) vals, vecs = linalg.eigh(cov) assert_array_almost_equal(vals, rvals) @@ -65,8 +68,8 @@ def test_norm_vector_badarg(self): def test_lapack_endian(self): # For bug #1482 - a = array([[5.7998084, -2.1825367], - [-2.1825367, 9.85910595]], dtype='>f8') + a = array([[ 5.7998084, -2.1825367], + [-2.1825367, 9.85910595]], dtype='>f8') b = array(a, dtype=' +#define NPY_TARGET_VERSION NPY_2_1_API_VERSION #define NPY_NO_DEPRECATED_API NPY_API_VERSION #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" @@ -27,6 +28,15 @@ static const char* umath_linalg_version_string = "0.1.5"; +// global lock to serialize calls into lapack_lite +#if !HAVE_EXTERNAL_LAPACK +#if PY_VERSION_HEX < 0x30d00b3 +static PyThread_type_lock lapack_lite_lock; +#else +static PyMutex lapack_lite_lock = {0}; +#endif +#endif + /* **************************************************************************** * Debugging support * @@ -400,6 +410,18 @@ FNAME(zgemm)(char *transa, char *transb, #define LAPACK(FUNC) \ FNAME(FUNC) +#ifdef HAVE_EXTERNAL_LAPACK + #define LOCK_LAPACK_LITE + #define UNLOCK_LAPACK_LITE +#else +#if PY_VERSION_HEX < 0x30d00b3 + #define LOCK_LAPACK_LITE PyThread_acquire_lock(lapack_lite_lock, WAIT_LOCK) + #define UNLOCK_LAPACK_LITE PyThread_release_lock(lapack_lite_lock) +#else + #define LOCK_LAPACK_LITE PyMutex_Lock(&lapack_lite_lock) + #define UNLOCK_LAPACK_LITE PyMutex_Unlock(&lapack_lite_lock) +#endif +#endif /* ***************************************************************************** @@ -426,6 +448,15 @@ set_fp_invalid_or_clear(int error_occurred) } } +static inline void +report_no_memory() +{ + NPY_ALLOW_C_API_DEF + NPY_ALLOW_C_API; + PyErr_NoMemory(); + NPY_DISABLE_C_API; +} + /* ***************************************************************************** ** Some handy constants ** @@ -1110,7 +1141,9 @@ using ftyp = fortran_type_t; fortran_int lda = fortran_int_max(m, 1); int i; /* note: done in place */ + LOCK_LAPACK_LITE; getrf(&m, &m, (ftyp*)src, &lda, pivots, &info); + UNLOCK_LAPACK_LITE; if (info == 0) { int change_sign = 0; @@ -1175,10 +1208,7 @@ slogdet(char **args, } else { /* TODO: Requires use of new ufunc API to indicate error return */ - NPY_ALLOW_C_API_DEF - NPY_ALLOW_C_API; - PyErr_NoMemory(); - NPY_DISABLE_C_API; + report_no_memory(); } } @@ -1231,10 +1261,7 @@ det(char **args, } else { /* TODO: Requires use of new ufunc API to indicate error return */ - NPY_ALLOW_C_API_DEF - NPY_ALLOW_C_API; - PyErr_NoMemory(); - NPY_DISABLE_C_API; + report_no_memory(); } } @@ -1262,22 +1289,26 @@ static inline fortran_int call_evd(EIGH_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(ssyevd)(¶ms->JOBZ, ¶ms->UPLO, ¶ms->N, params->A, ¶ms->LDA, params->W, params->WORK, ¶ms->LWORK, params->IWORK, ¶ms->LIWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } static inline fortran_int call_evd(EIGH_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(dsyevd)(¶ms->JOBZ, ¶ms->UPLO, ¶ms->N, params->A, ¶ms->LDA, params->W, params->WORK, ¶ms->LWORK, params->IWORK, ¶ms->LIWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1303,7 +1334,7 @@ init_evd(EIGH_PARAMS_t* params, char JOBZ, char UPLO, mem_buff = (npy_uint8 *)malloc(alloc_size); if (!mem_buff) { - goto error; + goto no_memory; } a = mem_buff; w = mem_buff + safe_N * safe_N * sizeof(typ); @@ -1337,7 +1368,7 @@ init_evd(EIGH_PARAMS_t* params, char JOBZ, char UPLO, mem_buff2 = (npy_uint8 *)malloc(lwork*sizeof(typ) + liwork*sizeof(fortran_int)); if (!mem_buff2) { - goto error; + goto no_memory; } work = mem_buff2; @@ -1350,6 +1381,9 @@ init_evd(EIGH_PARAMS_t* params, char JOBZ, char UPLO, return 1; + no_memory: + report_no_memory(); + error: /* something failed */ memset(params, 0, sizeof(*params)); @@ -1364,12 +1398,14 @@ static inline fortran_int call_evd(EIGH_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(cheevd)(¶ms->JOBZ, ¶ms->UPLO, ¶ms->N, (fortran_type_t*)params->A, ¶ms->LDA, params->W, (fortran_type_t*)params->WORK, ¶ms->LWORK, params->RWORK, ¶ms->LRWORK, params->IWORK, ¶ms->LIWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1377,12 +1413,14 @@ static inline fortran_int call_evd(EIGH_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(zheevd)(¶ms->JOBZ, ¶ms->UPLO, ¶ms->N, (fortran_type_t*)params->A, ¶ms->LDA, params->W, (fortran_type_t*)params->WORK, ¶ms->LWORK, params->RWORK, ¶ms->LRWORK, params->IWORK, ¶ms->LIWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1408,7 +1446,7 @@ using fbasetyp = fortran_type_t; mem_buff = (npy_uint8 *)malloc(safe_N * safe_N * sizeof(typ) + safe_N * sizeof(basetyp)); if (!mem_buff) { - goto error; + goto no_memory; } a = mem_buff; w = mem_buff + safe_N * safe_N * sizeof(typ); @@ -1446,7 +1484,7 @@ using fbasetyp = fortran_type_t; lrwork*sizeof(basetyp) + liwork*sizeof(fortran_int)); if (!mem_buff2) { - goto error; + goto no_memory; } work = mem_buff2; @@ -1463,6 +1501,8 @@ using fbasetyp = fortran_type_t; return 1; /* something failed */ +no_memory: + report_no_memory(); error: memset(params, 0, sizeof(*params)); free(mem_buff2); @@ -1616,11 +1656,13 @@ static inline fortran_int call_gesv(GESV_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(sgesv)(¶ms->N, ¶ms->NRHS, params->A, ¶ms->LDA, params->IPIV, params->B, ¶ms->LDB, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1628,11 +1670,13 @@ static inline fortran_int call_gesv(GESV_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(dgesv)(¶ms->N, ¶ms->NRHS, params->A, ¶ms->LDA, params->IPIV, params->B, ¶ms->LDB, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1640,11 +1684,13 @@ static inline fortran_int call_gesv(GESV_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(cgesv)(¶ms->N, ¶ms->NRHS, params->A, ¶ms->LDA, params->IPIV, params->B, ¶ms->LDB, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1652,11 +1698,13 @@ static inline fortran_int call_gesv(GESV_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(zgesv)(¶ms->N, ¶ms->NRHS, params->A, ¶ms->LDA, params->IPIV, params->B, ¶ms->LDB, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1693,7 +1741,10 @@ init_gesv(GESV_PARAMS_t *params, fortran_int N, fortran_int NRHS) params->LDB = ld; return 1; + error: + report_no_memory(); + free(mem_buff); memset(params, 0, sizeof(*params)); @@ -1870,9 +1921,11 @@ static inline fortran_int call_potrf(POTR_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(spotrf)(¶ms->UPLO, ¶ms->N, params->A, ¶ms->LDA, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1880,9 +1933,11 @@ static inline fortran_int call_potrf(POTR_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(dpotrf)(¶ms->UPLO, ¶ms->N, params->A, ¶ms->LDA, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1890,9 +1945,11 @@ static inline fortran_int call_potrf(POTR_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(cpotrf)(¶ms->UPLO, ¶ms->N, params->A, ¶ms->LDA, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1900,9 +1957,11 @@ static inline fortran_int call_potrf(POTR_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(zpotrf)(¶ms->UPLO, ¶ms->N, params->A, ¶ms->LDA, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1929,6 +1988,8 @@ init_potrf(POTR_PARAMS_t *params, char UPLO, fortran_int N) return 1; error: + report_no_memory(); + free(mem_buff); memset(params, 0, sizeof(*params)); @@ -2073,6 +2134,7 @@ static inline fortran_int call_geev(GEEV_PARAMS_t* params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(sgeev)(¶ms->JOBVL, ¶ms->JOBVR, ¶ms->N, params->A, ¶ms->LDA, params->WR, params->WI, @@ -2080,6 +2142,7 @@ call_geev(GEEV_PARAMS_t* params) params->VRR, ¶ms->LDVR, params->WORK, ¶ms->LWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -2087,6 +2150,7 @@ static inline fortran_int call_geev(GEEV_PARAMS_t* params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(dgeev)(¶ms->JOBVL, ¶ms->JOBVR, ¶ms->N, params->A, ¶ms->LDA, params->WR, params->WI, @@ -2094,6 +2158,7 @@ call_geev(GEEV_PARAMS_t* params) params->VRR, ¶ms->LDVR, params->WORK, ¶ms->LWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -2123,7 +2188,7 @@ scalar_trait) vlr_size + vrr_size + w_size + vl_size + vr_size); if (!mem_buff) { - goto error; + goto no_memory; } a = mem_buff; @@ -2166,7 +2231,7 @@ scalar_trait) mem_buff2 = (npy_uint8 *)malloc(work_count*sizeof(typ)); if (!mem_buff2) { - goto error; + goto no_memory; } work = mem_buff2; @@ -2174,6 +2239,10 @@ scalar_trait) params->WORK = (typ*)work; return 1; + + no_memory: + report_no_memory(); + error: free(mem_buff2); free(mem_buff); @@ -2285,6 +2354,7 @@ call_geev(GEEV_PARAMS_t* params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(cgeev)(¶ms->JOBVL, ¶ms->JOBVR, ¶ms->N, params->A, ¶ms->LDA, params->W, @@ -2293,6 +2363,7 @@ call_geev(GEEV_PARAMS_t* params) params->WORK, ¶ms->LWORK, params->WR, /* actually RWORK */ &rv); + UNLOCK_LAPACK_LITE; return rv; } #endif @@ -2302,6 +2373,7 @@ call_geev(GEEV_PARAMS_t* params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(zgeev)(¶ms->JOBVL, ¶ms->JOBVR, ¶ms->N, params->A, ¶ms->LDA, params->W, @@ -2310,6 +2382,7 @@ call_geev(GEEV_PARAMS_t* params) params->WORK, ¶ms->LWORK, params->WR, /* actually RWORK */ &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -2336,7 +2409,7 @@ using realtyp = basetype_t; mem_buff = (npy_uint8 *)malloc(total_size); if (!mem_buff) { - goto error; + goto no_memory; } a = mem_buff; @@ -2378,7 +2451,7 @@ using realtyp = basetype_t; mem_buff2 = (npy_uint8 *)malloc(work_count*sizeof(ftyp)); if (!mem_buff2) { - goto error; + goto no_memory; } work = mem_buff2; @@ -2387,6 +2460,9 @@ using realtyp = basetype_t; params->WORK = (ftyp*)work; return 1; + + no_memory: + report_no_memory(); error: free(mem_buff2); free(mem_buff); @@ -2632,6 +2708,7 @@ static inline fortran_int call_gesdd(GESDD_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(sgesdd)(¶ms->JOBZ, ¶ms->M, ¶ms->N, params->A, ¶ms->LDA, params->S, @@ -2640,12 +2717,14 @@ call_gesdd(GESDD_PARAMS_t *params) params->WORK, ¶ms->LWORK, (fortran_int*)params->IWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } static inline fortran_int call_gesdd(GESDD_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(dgesdd)(¶ms->JOBZ, ¶ms->M, ¶ms->N, params->A, ¶ms->LDA, params->S, @@ -2654,6 +2733,7 @@ call_gesdd(GESDD_PARAMS_t *params) params->WORK, ¶ms->LWORK, (fortran_int*)params->IWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -2694,7 +2774,7 @@ init_gesdd(GESDD_PARAMS_t *params, mem_buff = (npy_uint8 *)malloc(a_size + s_size + u_size + vt_size + iwork_size); if (!mem_buff) { - goto error; + goto no_memory; } a = mem_buff; @@ -2738,7 +2818,7 @@ init_gesdd(GESDD_PARAMS_t *params, mem_buff2 = (npy_uint8 *)malloc(work_size); if (!mem_buff2) { - goto error; + goto no_memory; } work = mem_buff2; @@ -2747,6 +2827,9 @@ init_gesdd(GESDD_PARAMS_t *params, params->WORK = (ftyp*)work; return 1; + + no_memory: + report_no_memory(); error: TRACE_TXT("%s failed init\n", __FUNCTION__); free(mem_buff); @@ -2760,6 +2843,7 @@ static inline fortran_int call_gesdd(GESDD_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(cgesdd)(¶ms->JOBZ, ¶ms->M, ¶ms->N, params->A, ¶ms->LDA, params->S, @@ -2769,12 +2853,14 @@ call_gesdd(GESDD_PARAMS_t *params) params->RWORK, params->IWORK, &rv); + LOCK_LAPACK_LITE; return rv; } static inline fortran_int call_gesdd(GESDD_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(zgesdd)(¶ms->JOBZ, ¶ms->M, ¶ms->N, params->A, ¶ms->LDA, params->S, @@ -2784,6 +2870,7 @@ call_gesdd(GESDD_PARAMS_t *params) params->RWORK, params->IWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -2830,7 +2917,7 @@ using frealtyp = basetype_t; rwork_size + iwork_size); if (!mem_buff) { - goto error; + goto no_memory; } a = mem_buff; @@ -2875,7 +2962,7 @@ using frealtyp = basetype_t; mem_buff2 = (npy_uint8 *)malloc(work_size); if (!mem_buff2) { - goto error; + goto no_memory; } work = mem_buff2; @@ -2884,6 +2971,10 @@ using frealtyp = basetype_t; params->WORK = (ftyp*)work; return 1; + + no_memory: + report_no_memory(); + error: TRACE_TXT("%s failed init\n", __FUNCTION__); free(mem_buff2); @@ -3074,22 +3165,26 @@ static inline fortran_int call_geqrf(GEQRF_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(dgeqrf)(¶ms->M, ¶ms->N, params->A, ¶ms->LDA, params->TAU, params->WORK, ¶ms->LWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } static inline fortran_int call_geqrf(GEQRF_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(zgeqrf)(¶ms->M, ¶ms->N, params->A, ¶ms->LDA, params->TAU, params->WORK, ¶ms->LWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -3118,7 +3213,7 @@ using ftyp = fortran_doublereal; mem_buff = (npy_uint8 *)malloc(a_size + tau_size); if (!mem_buff) - goto error; + goto no_memory; a = mem_buff; tau = a + a_size; @@ -3151,13 +3246,17 @@ using ftyp = fortran_doublereal; work_size = (size_t) params->LWORK * sizeof(ftyp); mem_buff2 = (npy_uint8 *)malloc(work_size); if (!mem_buff2) - goto error; + goto no_memory; work = mem_buff2; params->WORK = (ftyp*)work; return 1; + + no_memory: + report_no_memory(); + error: TRACE_TXT("%s failed init\n", __FUNCTION__); free(mem_buff); @@ -3192,7 +3291,7 @@ using ftyp = fortran_doublecomplex; mem_buff = (npy_uint8 *)malloc(a_size + tau_size); if (!mem_buff) - goto error; + goto no_memory; a = mem_buff; tau = a + a_size; @@ -3227,13 +3326,17 @@ using ftyp = fortran_doublecomplex; mem_buff2 = (npy_uint8 *)malloc(work_size); if (!mem_buff2) - goto error; + goto no_memory; work = mem_buff2; params->WORK = (ftyp*)work; return 1; + + no_memory: + report_no_memory(); + error: TRACE_TXT("%s failed init\n", __FUNCTION__); free(mem_buff); @@ -3316,22 +3419,26 @@ static inline fortran_int call_gqr(GQR_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(dorgqr)(¶ms->M, ¶ms->MC, ¶ms->MN, params->Q, ¶ms->LDA, params->TAU, params->WORK, ¶ms->LWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } static inline fortran_int call_gqr(GQR_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(zungqr)(¶ms->M, ¶ms->MC, ¶ms->MN, params->Q, ¶ms->LDA, params->TAU, params->WORK, ¶ms->LWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -3361,7 +3468,7 @@ using ftyp = fortran_doublereal; mem_buff = (npy_uint8 *)malloc(q_size + tau_size + a_size); if (!mem_buff) - goto error; + goto no_memory; q = mem_buff; tau = q + q_size; @@ -3396,13 +3503,17 @@ using ftyp = fortran_doublereal; mem_buff2 = (npy_uint8 *)malloc(work_size); if (!mem_buff2) - goto error; + goto no_memory; work = mem_buff2; params->WORK = (ftyp*)work; return 1; + + no_memory: + report_no_memory(); + error: TRACE_TXT("%s failed init\n", __FUNCTION__); free(mem_buff); @@ -3440,7 +3551,7 @@ using ftyp=fortran_doublecomplex; mem_buff = (npy_uint8 *)malloc(q_size + tau_size + a_size); if (!mem_buff) - goto error; + goto no_memory; q = mem_buff; tau = q + q_size; @@ -3476,7 +3587,7 @@ using ftyp=fortran_doublecomplex; mem_buff2 = (npy_uint8 *)malloc(work_size); if (!mem_buff2) - goto error; + goto no_memory; work = mem_buff2; @@ -3484,6 +3595,10 @@ using ftyp=fortran_doublecomplex; params->LWORK = work_count; return 1; + + no_memory: + report_no_memory(); + error: TRACE_TXT("%s failed init\n", __FUNCTION__); free(mem_buff); @@ -3712,6 +3827,7 @@ static inline fortran_int call_gelsd(GELSD_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(sgelsd)(¶ms->M, ¶ms->N, ¶ms->NRHS, params->A, ¶ms->LDA, params->B, ¶ms->LDB, @@ -3720,6 +3836,7 @@ call_gelsd(GELSD_PARAMS_t *params) params->WORK, ¶ms->LWORK, params->IWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -3728,6 +3845,7 @@ static inline fortran_int call_gelsd(GELSD_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(dgelsd)(¶ms->M, ¶ms->N, ¶ms->NRHS, params->A, ¶ms->LDA, params->B, ¶ms->LDB, @@ -3736,6 +3854,7 @@ call_gelsd(GELSD_PARAMS_t *params) params->WORK, ¶ms->LWORK, params->IWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -3822,10 +3941,7 @@ scalar_trait) return 1; no_memory: - NPY_ALLOW_C_API_DEF - NPY_ALLOW_C_API; - PyErr_NoMemory(); - NPY_DISABLE_C_API; + report_no_memory(); error: TRACE_TXT("%s failed init\n", __FUNCTION__); @@ -3839,6 +3955,7 @@ static inline fortran_int call_gelsd(GELSD_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(cgelsd)(¶ms->M, ¶ms->N, ¶ms->NRHS, params->A, ¶ms->LDA, params->B, ¶ms->LDB, @@ -3847,6 +3964,7 @@ call_gelsd(GELSD_PARAMS_t *params) params->WORK, ¶ms->LWORK, params->RWORK, (fortran_int*)params->IWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -3854,6 +3972,7 @@ static inline fortran_int call_gelsd(GELSD_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(zgelsd)(¶ms->M, ¶ms->N, ¶ms->NRHS, params->A, ¶ms->LDA, params->B, ¶ms->LDB, @@ -3862,6 +3981,7 @@ call_gelsd(GELSD_PARAMS_t *params) params->WORK, ¶ms->LWORK, params->RWORK, (fortran_int*)params->IWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -3954,10 +4074,7 @@ using frealtyp = basetype_t; return 1; no_memory: - NPY_ALLOW_C_API_DEF - NPY_ALLOW_C_API; - PyErr_NoMemory(); - NPY_DISABLE_C_API; + report_no_memory(); error: TRACE_TXT("%s failed init\n", __FUNCTION__); @@ -4273,6 +4390,60 @@ static const char lstsq_types[] = { NPY_CDOUBLE, NPY_CDOUBLE, NPY_DOUBLE, NPY_CDOUBLE, NPY_DOUBLE, NPY_INT, NPY_DOUBLE, }; +/* + * Function to process core dimensions of a gufunc with two input core + * dimensions m and n, and one output core dimension p which must be + * min(m, n). The parameters m_index, n_index and p_index indicate + * the locations of the core dimensions in core_dims[]. + */ +static int +mnp_min_indexed_process_core_dims(PyUFuncObject *gufunc, + npy_intp core_dims[], + npy_intp m_index, + npy_intp n_index, + npy_intp p_index) +{ + npy_intp m = core_dims[m_index]; + npy_intp n = core_dims[n_index]; + npy_intp p = core_dims[p_index]; + npy_intp required_p = m > n ? n : m; /* min(m, n) */ + if (p == -1) { + core_dims[p_index] = required_p; + return 0; + } + if (p != required_p) { + PyErr_Format(PyExc_ValueError, + "core output dimension p must be min(m, n), where " + "m and n are the core dimensions of the inputs. Got " + "m=%zd and n=%zd, so p must be %zd, but got p=%zd.", + m, n, required_p, p); + return -1; + } + return 0; +} + +/* + * Function to process core dimensions of a gufunc with two input core + * dimensions m and n, and one output core dimension p which must be + * min(m, n). There can be only those three core dimensions in the + * gufunc shape signature. + */ +static int +mnp_min_process_core_dims(PyUFuncObject *gufunc, npy_intp core_dims[]) +{ + return mnp_min_indexed_process_core_dims(gufunc, core_dims, 0, 1, 2); +} + +/* + * Process the core dimensions for the lstsq gufunc. + */ +static int +lstsq_process_core_dims(PyUFuncObject *gufunc, npy_intp core_dims[]) +{ + return mnp_min_indexed_process_core_dims(gufunc, core_dims, 0, 1, 3); +} + + typedef struct gufunc_descriptor_struct { const char *name; const char *signature; @@ -4282,6 +4453,7 @@ typedef struct gufunc_descriptor_struct { int nout; PyUFuncGenericFunction *funcs; const char *types; + PyUFunc_ProcessCoreDimsFunc *process_core_dims_func; } GUFUNC_DESCRIPTOR_t; GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { @@ -4294,7 +4466,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(),()\" \n", 4, 1, 2, FUNC_ARRAY_NAME(slogdet), - slogdet_types + slogdet_types, + nullptr }, { "det", @@ -4303,7 +4476,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->()\" \n", 4, 1, 1, FUNC_ARRAY_NAME(det), - equal_2_types + equal_2_types, + nullptr }, { "eigh_lo", @@ -4315,7 +4489,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(m),(m,m)\" \n", 4, 1, 2, FUNC_ARRAY_NAME(eighlo), - eigh_types + eigh_types, + nullptr }, { "eigh_up", @@ -4327,7 +4502,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(m),(m,m)\" \n", 4, 1, 2, FUNC_ARRAY_NAME(eighup), - eigh_types + eigh_types, + nullptr }, { "eigvalsh_lo", @@ -4339,7 +4515,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(m)\" \n", 4, 1, 1, FUNC_ARRAY_NAME(eigvalshlo), - eighvals_types + eighvals_types, + nullptr }, { "eigvalsh_up", @@ -4351,7 +4528,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(m)\" \n", 4, 1, 1, FUNC_ARRAY_NAME(eigvalshup), - eighvals_types + eighvals_types, + nullptr }, { "solve", @@ -4362,7 +4540,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m),(m,n)->(m,n)\" \n", 4, 2, 1, FUNC_ARRAY_NAME(solve), - equal_3_types + equal_3_types, + nullptr }, { "solve1", @@ -4373,7 +4552,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m),(m)->(m)\" \n", 4, 2, 1, FUNC_ARRAY_NAME(solve1), - equal_3_types + equal_3_types, + nullptr }, { "inv", @@ -4384,7 +4564,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(m,m)\" \n", 4, 1, 1, FUNC_ARRAY_NAME(inv), - equal_2_types + equal_2_types, + nullptr }, { "cholesky_lo", @@ -4394,7 +4575,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(m,m)\"\n", 4, 1, 1, FUNC_ARRAY_NAME(cholesky_lo), - equal_2_types + equal_2_types, + nullptr }, { "cholesky_up", @@ -4404,55 +4586,36 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(m,m)\"\n", 4, 1, 1, FUNC_ARRAY_NAME(cholesky_up), - equal_2_types + equal_2_types, + nullptr }, { - "svd_m", - "(m,n)->(m)", - "svd when n>=m. ", + "svd", + "(m,n)->(p)", + "Singular values of array with shape (m, n).\n" + "Return value is 1-d array with shape (min(m, n),).", 4, 1, 1, FUNC_ARRAY_NAME(svd_N), - svd_1_1_types + svd_1_1_types, + mnp_min_process_core_dims }, { - "svd_n", - "(m,n)->(n)", - "svd when n<=m", - 4, 1, 1, - FUNC_ARRAY_NAME(svd_N), - svd_1_1_types - }, - { - "svd_m_s", - "(m,n)->(m,m),(m),(m,n)", - "svd when m<=n", - 4, 1, 3, - FUNC_ARRAY_NAME(svd_S), - svd_1_3_types - }, - { - "svd_n_s", - "(m,n)->(m,n),(n),(n,n)", - "svd when m>=n", + "svd_s", + "(m,n)->(m,p),(p),(p,n)", + "svd (full_matrices=False)", 4, 1, 3, FUNC_ARRAY_NAME(svd_S), - svd_1_3_types + svd_1_3_types, + mnp_min_process_core_dims }, { - "svd_m_f", - "(m,n)->(m,m),(m),(n,n)", - "svd when m<=n", + "svd_f", + "(m,n)->(m,m),(p),(n,n)", + "svd (full_matrices=True)", 4, 1, 3, FUNC_ARRAY_NAME(svd_A), - svd_1_3_types - }, - { - "svd_n_f", - "(m,n)->(m,m),(n),(n,n)", - "svd when m>=n", - 4, 1, 3, - FUNC_ARRAY_NAME(svd_A), - svd_1_3_types + svd_1_3_types, + mnp_min_process_core_dims }, { "eig", @@ -4463,7 +4626,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(m),(m,m)\" \n", 3, 1, 2, FUNC_ARRAY_NAME(eig), - eig_types + eig_types, + nullptr }, { "eigvals", @@ -4472,25 +4636,18 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { "Results in a vector of eigenvalues. \n", 3, 1, 1, FUNC_ARRAY_NAME(eigvals), - eigvals_types - }, - { - "qr_r_raw_m", - "(m,n)->(m)", - "Compute TAU vector for the last two dimensions \n"\ - "and broadcast to the rest. For m <= n. \n", - 2, 1, 1, - FUNC_ARRAY_NAME(qr_r_raw), - qr_r_raw_types + eigvals_types, + nullptr }, { - "qr_r_raw_n", - "(m,n)->(n)", + "qr_r_raw", + "(m,n)->(p)", "Compute TAU vector for the last two dimensions \n"\ - "and broadcast to the rest. For m > n. \n", + "and broadcast to the rest. \n", 2, 1, 1, FUNC_ARRAY_NAME(qr_r_raw), - qr_r_raw_types + qr_r_raw_types, + mnp_min_process_core_dims }, { "qr_reduced", @@ -4499,7 +4656,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { "and broadcast to the rest. \n", 2, 2, 1, FUNC_ARRAY_NAME(qr_reduced), - qr_reduced_types + qr_reduced_types, + nullptr }, { "qr_complete", @@ -4508,37 +4666,30 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { "and broadcast to the rest. For m > n. \n", 2, 2, 1, FUNC_ARRAY_NAME(qr_complete), - qr_complete_types - }, - { - "lstsq_m", - "(m,n),(m,nrhs),()->(n,nrhs),(nrhs),(),(m)", - "least squares on the last two dimensions and broadcast to the rest. \n"\ - "For m <= n. \n", - 4, 3, 4, - FUNC_ARRAY_NAME(lstsq), - lstsq_types + qr_complete_types, + nullptr }, { - "lstsq_n", - "(m,n),(m,nrhs),()->(n,nrhs),(nrhs),(),(n)", - "least squares on the last two dimensions and broadcast to the rest. \n"\ - "For m >= n, meaning that residuals are produced. \n", + "lstsq", + "(m,n),(m,nrhs),()->(n,nrhs),(nrhs),(),(p)", + "least squares on the last two dimensions and broadcast to the rest.", 4, 3, 4, FUNC_ARRAY_NAME(lstsq), - lstsq_types + lstsq_types, + lstsq_process_core_dims } }; static int addUfuncs(PyObject *dictionary) { - PyObject *f; + PyUFuncObject *f; int i; const int gufunc_count = sizeof(gufunc_descriptors)/ sizeof(gufunc_descriptors[0]); for (i = 0; i < gufunc_count; i++) { GUFUNC_DESCRIPTOR_t* d = &gufunc_descriptors[i]; - f = PyUFunc_FromFuncAndDataAndSignature(d->funcs, + f = (PyUFuncObject *) PyUFunc_FromFuncAndDataAndSignature( + d->funcs, array_of_nulls, d->types, d->ntypes, @@ -4552,10 +4703,11 @@ addUfuncs(PyObject *dictionary) { if (f == NULL) { return -1; } + f->process_core_dims_func = d->process_core_dims_func; #if _UMATH_LINALG_DEBUG dump_ufunc_object((PyUFuncObject*) f); #endif - int ret = PyDict_SetItemString(dictionary, d->name, f); + int ret = PyDict_SetItemString(dictionary, d->name, (PyObject *)f); Py_DECREF(f); if (ret < 0) { return -1; @@ -4567,63 +4719,93 @@ addUfuncs(PyObject *dictionary) { /* -------------------------------------------------------------------------- */ - /* Module initialization stuff */ + /* Module initialization and state */ static PyMethodDef UMath_LinAlgMethods[] = { {NULL, NULL, 0, NULL} /* Sentinel */ }; -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - UMATH_LINALG_MODULE_NAME, - NULL, - -1, - UMath_LinAlgMethods, - NULL, - NULL, - NULL, - NULL -}; +static int module_loaded = 0; -PyMODINIT_FUNC PyInit__umath_linalg(void) +static int +_umath_linalg_exec(PyObject *m) { - PyObject *m; PyObject *d; PyObject *version; - m = PyModule_Create(&moduledef); - if (m == NULL) { - return NULL; + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; } + module_loaded = 1; - import_array(); - import_ufunc(); + if (PyArray_ImportNumPyAPI() < 0) { + return -1; + } + if (PyUFunc_ImportUFuncAPI() < 0) { + return -1; + } d = PyModule_GetDict(m); if (d == NULL) { - return NULL; + return -1; } version = PyUnicode_FromString(umath_linalg_version_string); if (version == NULL) { - return NULL; + return -1; } int ret = PyDict_SetItemString(d, "__version__", version); Py_DECREF(version); if (ret < 0) { - return NULL; + return -1; } /* Load the ufunc operators into the module's namespace */ if (addUfuncs(d) < 0) { - return NULL; + return -1; } +#if PY_VERSION_HEX < 0x30d00b3 && !HAVE_EXTERNAL_LAPACK + lapack_lite_lock = PyThread_allocate_lock(); + if (lapack_lite_lock == NULL) { + PyErr_NoMemory(); + return -1; + } +#endif + #ifdef HAVE_BLAS_ILP64 PyDict_SetItemString(d, "_ilp64", Py_True); #else PyDict_SetItemString(d, "_ilp64", Py_False); #endif - return m; + return 0; +} + +static struct PyModuleDef_Slot _umath_linalg_slots[] = { + {Py_mod_exec, (void*)_umath_linalg_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, +#endif + {0, NULL}, +}; + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, /* m_base */ + "_umath_linalg", /* m_name */ + NULL, /* m_doc */ + 0, /* m_size */ + UMath_LinAlgMethods, /* m_methods */ + _umath_linalg_slots, /* m_slots */ +}; + +PyMODINIT_FUNC PyInit__umath_linalg(void) { + return PyModuleDef_Init(&moduledef); } diff --git a/numpy/ma/__init__.py b/numpy/ma/__init__.py index 870cc4ef2daa..e2a742e9b64a 100644 --- a/numpy/ma/__init__.py +++ b/numpy/ma/__init__.py @@ -22,8 +22,8 @@ >>> m = np.ma.masked_array(x, np.isnan(x)) >>> m -masked_array(data = [2.0 1.0 3.0 -- 5.0 2.0 3.0 --], - mask = [False False False True False False False True], +masked_array(data=[2.0, 1.0, 3.0, --, 5.0, 2.0, 3.0, --], + mask=[False, False, False, True, False, False, False, True], fill_value=1e+20) Here, we construct a masked array that suppress all ``NaN`` values. We @@ -39,10 +39,8 @@ .. moduleauthor:: Jarrod Millman """ -from . import core +from . import core, extras from .core import * - -from . import extras from .extras import * __all__ = ['core', 'extras'] @@ -50,5 +48,6 @@ __all__ += extras.__all__ from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/numpy/ma/__init__.pyi b/numpy/ma/__init__.pyi index 805842a892e5..176e929a8228 100644 --- a/numpy/ma/__init__.pyi +++ b/numpy/ma/__init__.pyi @@ -1,233 +1,458 @@ -from numpy._pytesttester import PytestTester - -from numpy.ma import extras as extras - -from numpy.ma.core import ( - MAError as MAError, - MaskError as MaskError, - MaskType as MaskType, - MaskedArray as MaskedArray, - abs as abs, - absolute as absolute, - add as add, - all as all, - allclose as allclose, - allequal as allequal, - alltrue as alltrue, - amax as amax, - amin as amin, - angle as angle, - anom as anom, - anomalies as anomalies, - any as any, - append as append, - arange as arange, - arccos as arccos, - arccosh as arccosh, - arcsin as arcsin, - arcsinh as arcsinh, - arctan as arctan, - arctan2 as arctan2, - arctanh as arctanh, - argmax as argmax, - argmin as argmin, - argsort as argsort, - around as around, - array as array, - asanyarray as asanyarray, - asarray as asarray, - bitwise_and as bitwise_and, - bitwise_or as bitwise_or, - bitwise_xor as bitwise_xor, - bool as bool, - ceil as ceil, - choose as choose, - clip as clip, - common_fill_value as common_fill_value, - compress as compress, - compressed as compressed, - concatenate as concatenate, - conjugate as conjugate, - convolve as convolve, - copy as copy, - correlate as correlate, - cos as cos, - cosh as cosh, - count as count, - cumprod as cumprod, - cumsum as cumsum, - default_fill_value as default_fill_value, - diag as diag, - diagonal as diagonal, - diff as diff, - divide as divide, - empty as empty, - empty_like as empty_like, - equal as equal, - exp as exp, - expand_dims as expand_dims, - fabs as fabs, - filled as filled, - fix_invalid as fix_invalid, - flatten_mask as flatten_mask, - flatten_structured_array as flatten_structured_array, - floor as floor, - floor_divide as floor_divide, - fmod as fmod, - frombuffer as frombuffer, - fromflex as fromflex, - fromfunction as fromfunction, - getdata as getdata, - getmask as getmask, - getmaskarray as getmaskarray, - greater as greater, - greater_equal as greater_equal, - harden_mask as harden_mask, - hypot as hypot, - identity as identity, - ids as ids, - indices as indices, - inner as inner, - innerproduct as innerproduct, - isMA as isMA, - isMaskedArray as isMaskedArray, - is_mask as is_mask, - is_masked as is_masked, - isarray as isarray, - left_shift as left_shift, - less as less, - less_equal as less_equal, - log as log, - log10 as log10, - log2 as log2, - logical_and as logical_and, - logical_not as logical_not, - logical_or as logical_or, - logical_xor as logical_xor, - make_mask as make_mask, - make_mask_descr as make_mask_descr, - make_mask_none as make_mask_none, - mask_or as mask_or, - masked as masked, - masked_array as masked_array, - masked_equal as masked_equal, - masked_greater as masked_greater, - masked_greater_equal as masked_greater_equal, - masked_inside as masked_inside, - masked_invalid as masked_invalid, - masked_less as masked_less, - masked_less_equal as masked_less_equal, - masked_not_equal as masked_not_equal, - masked_object as masked_object, - masked_outside as masked_outside, - masked_print_option as masked_print_option, - masked_singleton as masked_singleton, - masked_values as masked_values, - masked_where as masked_where, - max as max, - maximum as maximum, - maximum_fill_value as maximum_fill_value, - mean as mean, - min as min, - minimum as minimum, - minimum_fill_value as minimum_fill_value, - mod as mod, - multiply as multiply, - mvoid as mvoid, - ndim as ndim, - negative as negative, - nomask as nomask, - nonzero as nonzero, - not_equal as not_equal, - ones as ones, - outer as outer, - outerproduct as outerproduct, - power as power, - prod as prod, - product as product, - ptp as ptp, - put as put, - putmask as putmask, - ravel as ravel, - remainder as remainder, - repeat as repeat, - reshape as reshape, - resize as resize, - right_shift as right_shift, - round as round, - set_fill_value as set_fill_value, - shape as shape, - sin as sin, - sinh as sinh, - size as size, - soften_mask as soften_mask, - sometrue as sometrue, - sort as sort, - sqrt as sqrt, - squeeze as squeeze, - std as std, - subtract as subtract, - sum as sum, - swapaxes as swapaxes, - take as take, - tan as tan, - tanh as tanh, - trace as trace, - transpose as transpose, - true_divide as true_divide, - var as var, - where as where, - zeros as zeros, +from . import core, extras +from .core import ( + MAError, + MaskedArray, + MaskError, + MaskType, + abs, + absolute, + add, + all, + allclose, + allequal, + alltrue, + amax, + amin, + angle, + anom, + anomalies, + any, + append, + arange, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + argmax, + argmin, + argsort, + around, + array, + asanyarray, + asarray, + bitwise_and, + bitwise_or, + bitwise_xor, + bool_, + ceil, + choose, + clip, + common_fill_value, + compress, + compressed, + concatenate, + conjugate, + convolve, + copy, + correlate, + cos, + cosh, + count, + cumprod, + cumsum, + default_fill_value, + diag, + diagonal, + diff, + divide, + empty, + empty_like, + equal, + exp, + expand_dims, + fabs, + filled, + fix_invalid, + flatten_mask, + flatten_structured_array, + floor, + floor_divide, + fmod, + frombuffer, + fromflex, + fromfunction, + getdata, + getmask, + getmaskarray, + greater, + greater_equal, + harden_mask, + hypot, + identity, + ids, + indices, + inner, + innerproduct, + is_mask, + is_masked, + isarray, + isMA, + isMaskedArray, + left_shift, + less, + less_equal, + log, + log2, + log10, + logical_and, + logical_not, + logical_or, + logical_xor, + make_mask, + make_mask_descr, + make_mask_none, + mask_or, + masked, + masked_array, + masked_equal, + masked_greater, + masked_greater_equal, + masked_inside, + masked_invalid, + masked_less, + masked_less_equal, + masked_not_equal, + masked_object, + masked_outside, + masked_print_option, + masked_singleton, + masked_values, + masked_where, + max, + maximum, + maximum_fill_value, + mean, + min, + minimum, + minimum_fill_value, + mod, + multiply, + mvoid, + ndim, + negative, + nomask, + nonzero, + not_equal, + ones, + ones_like, + outer, + outerproduct, + power, + prod, + product, + ptp, + put, + putmask, + ravel, + remainder, + repeat, + reshape, + resize, + right_shift, + round, + round_, + set_fill_value, + shape, + sin, + sinh, + size, + soften_mask, + sometrue, + sort, + sqrt, + squeeze, + std, + subtract, + sum, + swapaxes, + take, + tan, + tanh, + trace, + transpose, + true_divide, + var, + where, + zeros, + zeros_like, ) - -from numpy.ma.extras import ( - apply_along_axis as apply_along_axis, - apply_over_axes as apply_over_axes, - atleast_1d as atleast_1d, - atleast_2d as atleast_2d, - atleast_3d as atleast_3d, - average as average, - clump_masked as clump_masked, - clump_unmasked as clump_unmasked, - column_stack as column_stack, - compress_cols as compress_cols, - compress_nd as compress_nd, - compress_rowcols as compress_rowcols, - compress_rows as compress_rows, - count_masked as count_masked, - corrcoef as corrcoef, - cov as cov, - diagflat as diagflat, - dot as dot, - dstack as dstack, - ediff1d as ediff1d, - flatnotmasked_contiguous as flatnotmasked_contiguous, - flatnotmasked_edges as flatnotmasked_edges, - hsplit as hsplit, - hstack as hstack, - isin as isin, - in1d as in1d, - intersect1d as intersect1d, - mask_cols as mask_cols, - mask_rowcols as mask_rowcols, - mask_rows as mask_rows, - masked_all as masked_all, - masked_all_like as masked_all_like, - median as median, - mr_ as mr_, - ndenumerate as ndenumerate, - notmasked_contiguous as notmasked_contiguous, - notmasked_edges as notmasked_edges, - polyfit as polyfit, - row_stack as row_stack, - setdiff1d as setdiff1d, - setxor1d as setxor1d, - stack as stack, - unique as unique, - union1d as union1d, - vander as vander, - vstack as vstack, +from .extras import ( + apply_along_axis, + apply_over_axes, + atleast_1d, + atleast_2d, + atleast_3d, + average, + clump_masked, + clump_unmasked, + column_stack, + compress_cols, + compress_nd, + compress_rowcols, + compress_rows, + corrcoef, + count_masked, + cov, + diagflat, + dot, + dstack, + ediff1d, + flatnotmasked_contiguous, + flatnotmasked_edges, + hsplit, + hstack, + in1d, + intersect1d, + isin, + mask_cols, + mask_rowcols, + mask_rows, + masked_all, + masked_all_like, + median, + mr_, + ndenumerate, + notmasked_contiguous, + notmasked_edges, + polyfit, + row_stack, + setdiff1d, + setxor1d, + stack, + union1d, + unique, + vander, + vstack, ) -__all__: list[str] -test: PytestTester +__all__ = [ + "core", + "extras", + "MAError", + "MaskError", + "MaskType", + "MaskedArray", + "abs", + "absolute", + "add", + "all", + "allclose", + "allequal", + "alltrue", + "amax", + "amin", + "angle", + "anom", + "anomalies", + "any", + "append", + "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argsort", + "around", + "array", + "asanyarray", + "asarray", + "bitwise_and", + "bitwise_or", + "bitwise_xor", + "bool_", + "ceil", + "choose", + "clip", + "common_fill_value", + "compress", + "compressed", + "concatenate", + "conjugate", + "convolve", + "copy", + "correlate", + "cos", + "cosh", + "count", + "cumprod", + "cumsum", + "default_fill_value", + "diag", + "diagonal", + "diff", + "divide", + "empty", + "empty_like", + "equal", + "exp", + "expand_dims", + "fabs", + "filled", + "fix_invalid", + "flatten_mask", + "flatten_structured_array", + "floor", + "floor_divide", + "fmod", + "frombuffer", + "fromflex", + "fromfunction", + "getdata", + "getmask", + "getmaskarray", + "greater", + "greater_equal", + "harden_mask", + "hypot", + "identity", + "ids", + "indices", + "inner", + "innerproduct", + "isMA", + "isMaskedArray", + "is_mask", + "is_masked", + "isarray", + "left_shift", + "less", + "less_equal", + "log", + "log10", + "log2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "make_mask", + "make_mask_descr", + "make_mask_none", + "mask_or", + "masked", + "masked_array", + "masked_equal", + "masked_greater", + "masked_greater_equal", + "masked_inside", + "masked_invalid", + "masked_less", + "masked_less_equal", + "masked_not_equal", + "masked_object", + "masked_outside", + "masked_print_option", + "masked_singleton", + "masked_values", + "masked_where", + "max", + "maximum", + "maximum_fill_value", + "mean", + "min", + "minimum", + "minimum_fill_value", + "mod", + "multiply", + "mvoid", + "ndim", + "negative", + "nomask", + "nonzero", + "not_equal", + "ones", + "ones_like", + "outer", + "outerproduct", + "power", + "prod", + "product", + "ptp", + "put", + "putmask", + "ravel", + "remainder", + "repeat", + "reshape", + "resize", + "right_shift", + "round", + "round_", + "set_fill_value", + "shape", + "sin", + "sinh", + "size", + "soften_mask", + "sometrue", + "sort", + "sqrt", + "squeeze", + "std", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "trace", + "transpose", + "true_divide", + "var", + "where", + "zeros", + "zeros_like", + "apply_along_axis", + "apply_over_axes", + "atleast_1d", + "atleast_2d", + "atleast_3d", + "average", + "clump_masked", + "clump_unmasked", + "column_stack", + "compress_cols", + "compress_nd", + "compress_rowcols", + "compress_rows", + "count_masked", + "corrcoef", + "cov", + "diagflat", + "dot", + "dstack", + "ediff1d", + "flatnotmasked_contiguous", + "flatnotmasked_edges", + "hsplit", + "hstack", + "isin", + "in1d", + "intersect1d", + "mask_cols", + "mask_rowcols", + "mask_rows", + "masked_all", + "masked_all_like", + "median", + "mr_", + "ndenumerate", + "notmasked_contiguous", + "notmasked_edges", + "polyfit", + "row_stack", + "setdiff1d", + "setxor1d", + "stack", + "unique", + "union1d", + "vander", + "vstack", +] diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 453c63614d2e..6e3b3a54c7c8 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -19,27 +19,35 @@ .. moduleauthor:: Pierre Gerard-Marchant """ -# pylint: disable-msg=E1002 import builtins +import datetime as dt +import functools import inspect import operator -import warnings -import textwrap import re -from functools import reduce -from typing import Dict +import textwrap +import warnings import numpy as np -import numpy._core.umath as umath import numpy._core.numerictypes as ntypes +import numpy._core.umath as umath +from numpy import ( + _NoValue, + amax, + amin, + angle, + array as narray, + bool_, + expand_dims, + finfo, # noqa: F401 + iinfo, # noqa: F401 + iscomplexobj, + ndarray, +) from numpy._core import multiarray as mu -from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue, angle -from numpy import array as narray, expand_dims, iinfo, finfo from numpy._core.numeric import normalize_axis_tuple -from numpy._utils._inspect import getargspec, formatargspec from numpy._utils import set_module - __all__ = [ 'MAError', 'MaskError', 'MaskType', 'MaskedArray', 'abs', 'absolute', 'add', 'all', 'allclose', 'allequal', 'alltrue', 'amax', 'amin', @@ -121,23 +129,11 @@ def doc_note(initialdoc, note): return initialdoc notesplit = re.split(r'\n\s*?Notes\n\s*?-----', inspect.cleandoc(initialdoc)) - notedoc = "\n\nNotes\n-----\n%s\n" % inspect.cleandoc(note) + notedoc = f"\n\nNotes\n-----\n{inspect.cleandoc(note)}\n" return ''.join(notesplit[:1] + [notedoc] + notesplit[1:]) -def get_object_signature(obj): - """ - Get the signature from obj - - """ - try: - sig = formatargspec(*getargspec(obj)) - except TypeError: - sig = '' - return sig - - ############################################################################### # Exceptions # ############################################################################### @@ -173,7 +169,8 @@ class MaskError(MAError): 'S': b'N/A', 'u': 999999, 'V': b'???', - 'U': 'N/A' + 'U': 'N/A', + 'T': 'N/A' } # Add datetime64 and timedelta64 types @@ -185,15 +182,15 @@ class MaskError(MAError): float_types_list = [np.half, np.single, np.double, np.longdouble, np.csingle, np.cdouble, np.clongdouble] -_minvals: Dict[type, int] = {} -_maxvals: Dict[type, int] = {} +_minvals: dict[type, int] = {} +_maxvals: dict[type, int] = {} for sctype in ntypes.sctypeDict.values(): scalar_dtype = np.dtype(sctype) if scalar_dtype.kind in "Mm": info = np.iinfo(np.int64) - min_val, max_val = info.min, info.max + min_val, max_val = info.min + 1, info.max elif np.issubdtype(scalar_dtype, np.integer): info = np.iinfo(sctype) min_val, max_val = info.min, info.max @@ -213,7 +210,7 @@ class MaskError(MAError): max_filler.update([(k, complex(-np.inf, -np.inf)) for k in float_types_list[-3:]]) min_filler = _maxvals -min_filler.update([(k, +np.inf) for k in float_types_list[:4]]) +min_filler.update([(k, +np.inf) for k in float_types_list[:4]]) min_filler.update([(k, complex(+np.inf, +np.inf)) for k in float_types_list[-3:]]) del float_types_list @@ -226,11 +223,22 @@ def _recursive_fill_value(dtype, f): # We wrap into `array` here, which ensures we use NumPy cast rules # for integer casts, this allows the use of 99999 as a fill value # for int8. - # TODO: This is probably a mess, but should best preserve behavior? - vals = tuple( - np.array(_recursive_fill_value(dtype[name], f)) - for name in dtype.names) - return np.array(vals, dtype=dtype)[()] # decay to void scalar from 0d + vals = [] + for name in dtype.names: + field_dtype = dtype[name] + val = _recursive_fill_value(field_dtype, f) + if np.issubdtype(field_dtype, np.datetime64): + if isinstance(val, dt.date): + val = np.datetime64(val) + val = np.array(val) + elif isinstance(val, (int, np.integer)): + val = np.array(val).astype(field_dtype) + else: + val = np.array(val) + else: + val = np.array(val) + vals.append(val) + return np.array(tuple(vals), dtype=dtype)[()] # decay to void scalar from 0d elif dtype.subdtype: subtype, shape = dtype.subdtype subval = _recursive_fill_value(subtype, f) @@ -256,16 +264,17 @@ def default_fill_value(obj): The default filling value depends on the datatype of the input array or the type of the input scalar: - ======== ======== - datatype default - ======== ======== - bool True - int 999999 - float 1.e20 - complex 1.e20+0j - object '?' - string 'N/A' - ======== ======== + =========== ======== + datatype default + =========== ======== + bool True + int 999999 + float 1.e20 + complex 1.e20+0j + object '?' + string 'N/A' + StringDType 'N/A' + =========== ======== For structured types, a structured scalar is returned, with each field the default fill value for its type. @@ -286,6 +295,7 @@ def default_fill_value(obj): Examples -------- + >>> import numpy as np >>> np.ma.default_fill_value(1) 999999 >>> np.ma.default_fill_value(np.array([1.1, 2., np.pi])) @@ -348,6 +358,7 @@ def minimum_fill_value(obj): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.int8() >>> ma.minimum_fill_value(a) @@ -399,6 +410,7 @@ def maximum_fill_value(obj): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.int8() >>> ma.maximum_fill_value(a) @@ -466,6 +478,16 @@ def _check_fill_value(fill_value, ndtype): ndtype = np.dtype(ndtype) if fill_value is None: fill_value = default_fill_value(ndtype) + # TODO: It seems better to always store a valid fill_value, the oddity + # about is that `_fill_value = None` would behave even more + # different then. + # (e.g. this allows arr_uint8.astype(int64) to have the default + # fill value again...) + # The one thing that changed in 2.0/2.1 around cast safety is that the + # default `int(99...)` is not a same-kind cast anymore, so if we + # have a uint, use the default uint. + if ndtype.kind == "u": + fill_value = np.uint(fill_value) elif ndtype.names is not None: if isinstance(fill_value, (ndarray, np.void)): try: @@ -477,22 +499,21 @@ def _check_fill_value(fill_value, ndtype): fill_value = np.asarray(fill_value, dtype=object) fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype), dtype=ndtype) + elif isinstance(fill_value, str) and (ndtype.char not in 'OSTVU'): + # Note this check doesn't work if fill_value is not a scalar + err_msg = "Cannot set fill value of string with array of dtype %s" + raise TypeError(err_msg % ndtype) else: - if isinstance(fill_value, str) and (ndtype.char not in 'OSVU'): - # Note this check doesn't work if fill_value is not a scalar - err_msg = "Cannot set fill value of string with array of dtype %s" - raise TypeError(err_msg % ndtype) - else: - # In case we want to convert 1e20 to int. - # Also in case of converting string arrays. - try: - fill_value = np.asarray(fill_value, dtype=ndtype) - except (OverflowError, ValueError) as e: - # Raise TypeError instead of OverflowError or ValueError. - # OverflowError is seldom used, and the real problem here is - # that the passed fill_value is not compatible with the ndtype. - err_msg = "Cannot convert fill_value %s to dtype %s" - raise TypeError(err_msg % (fill_value, ndtype)) from e + # In case we want to convert 1e20 to int. + # Also in case of converting string arrays. + try: + fill_value = np.asarray(fill_value, dtype=ndtype) + except (OverflowError, ValueError) as e: + # Raise TypeError instead of OverflowError or ValueError. + # OverflowError is seldom used, and the real problem here is + # that the passed fill_value is not compatible with the ndtype. + err_msg = "Cannot convert fill_value %s to dtype %s" + raise TypeError(err_msg % (fill_value, ndtype)) from e return np.array(fill_value) @@ -525,6 +546,7 @@ def set_fill_value(a, fill_value): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(5) >>> a @@ -558,7 +580,6 @@ def set_fill_value(a, fill_value): """ if isinstance(a, MaskedArray): a.set_fill_value(fill_value) - return def get_fill_value(a): @@ -593,6 +614,7 @@ def common_fill_value(a, b): Examples -------- + >>> import numpy as np >>> x = np.ma.array([0, 1.], fill_value=3) >>> y = np.ma.array([0, 1.], fill_value=3) >>> np.ma.common_fill_value(x, y) @@ -637,6 +659,7 @@ def filled(a, fill_value=None): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], ... [1, 0, 0], @@ -699,12 +722,12 @@ def getdata(a, subok=True): Return the data of a masked array as an ndarray. Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``, - else return `a` as a ndarray or subclass (depending on `subok`) if not. + else return `a` as an ndarray or subclass (depending on `subok`) if not. Parameters ---------- a : array_like - Input ``MaskedArray``, alternatively a ndarray or a subclass thereof. + Input ``MaskedArray``, alternatively an ndarray or a subclass thereof. subok : bool Whether to force the output to be a `pure` ndarray (False) or to return a subclass of ndarray if appropriate (True, default). @@ -716,6 +739,7 @@ def getdata(a, subok=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = ma.masked_equal([[1,2],[3,4]], 2) >>> a @@ -779,6 +803,7 @@ def fix_invalid(a, mask=nomask, copy=True, fill_value=None): Examples -------- + >>> import numpy as np >>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3) >>> x masked_array(data=[--, -1.0, nan, inf], @@ -921,6 +946,7 @@ def __init__(self, ufunc): self.f = ufunc self.__doc__ = ufunc.__doc__ self.__name__ = ufunc.__name__ + self.__qualname__ = ufunc.__qualname__ def __str__(self): return f"Masked version of {self.f}" @@ -1091,8 +1117,7 @@ def reduce(self, target, axis=0, dtype=None): if t.shape == (): t = t.reshape(1) if m is not nomask: - m = make_mask(m, copy=True) - m.shape = (1,) + m = make_mask(m, copy=True).reshape((1,)) if m is nomask: tr = self.f.reduce(t, axis) @@ -1147,7 +1172,6 @@ def accumulate(self, target, axis=0): return masked_result - class _DomainedBinaryOperation(_MaskedUFunc): """ Define binary operations that have a domain, like divide. @@ -1291,15 +1315,13 @@ def __call__(self, a, b, *args, **kwargs): # Domained binary ufuncs divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1) -true_divide = _DomainedBinaryOperation(umath.true_divide, - _DomainSafeDivide(), 0, 1) +true_divide = divide # Just an alias for divide. floor_divide = _DomainedBinaryOperation(umath.floor_divide, _DomainSafeDivide(), 0, 1) remainder = _DomainedBinaryOperation(umath.remainder, _DomainSafeDivide(), 0, 1) fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1) -mod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1) - +mod = remainder ############################################################################### # Mask creation functions # @@ -1371,6 +1393,7 @@ def make_mask_descr(ndtype): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> dtype = np.dtype({'names':['foo', 'bar'], ... 'formats':[np.float32, np.int64]}) @@ -1405,6 +1428,7 @@ def getmask(a): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = ma.masked_equal([[1,2],[3,4]], 2) >>> a @@ -1467,6 +1491,7 @@ def getmaskarray(arr): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = ma.masked_equal([[1,2],[3,4]], 2) >>> a @@ -1524,6 +1549,7 @@ def is_mask(m): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> m = ma.masked_equal([0, 1, 0, 2, 3], 0) >>> m @@ -1608,6 +1634,7 @@ def make_mask(m, copy=False, shrink=True, dtype=MaskType): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> m = [True, False, True, True] >>> ma.make_mask(m) @@ -1696,6 +1723,7 @@ def make_mask_none(newshape, dtype=None): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> ma.make_mask_none((3,)) array([False, False, False]) @@ -1758,6 +1786,7 @@ def mask_or(m1, m2, copy=False, shrink=True): Examples -------- + >>> import numpy as np >>> m1 = np.ma.make_mask([0, 1, 1, 0]) >>> m2 = np.ma.make_mask([1, 0, 0, 0]) >>> np.ma.mask_or(m1, m2) @@ -1772,10 +1801,10 @@ def mask_or(m1, m2, copy=False, shrink=True): dtype = getattr(m1, 'dtype', MaskType) return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype) if m1 is m2 and is_mask(m1): - return m1 + return _shrink_mask(m1) if shrink else m1 (dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None)) if dtype1 != dtype2: - raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2)) + raise ValueError(f"Incompatible dtypes '{dtype1}'<>'{dtype2}'") if dtype1.names is not None: # Allocate an output mask array with the properly broadcast shape. newmask = np.empty(np.broadcast(m1, m2).shape, dtype1) @@ -1801,6 +1830,7 @@ def flatten_mask(mask): Examples -------- + >>> import numpy as np >>> mask = np.array([0, 0, 1]) >>> np.ma.flatten_mask(mask) array([False, False, True]) @@ -1837,7 +1867,7 @@ def _flatsequence(sequence): mask = np.asarray(mask) flattened = _flatsequence(_flatmask(mask)) - return np.array([_ for _ in flattened], dtype=bool) + return np.array(list(flattened), dtype=bool) def _check_mask_axis(mask, axis, keepdims=np._NoValue): @@ -1890,6 +1920,7 @@ def masked_where(condition, a, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a @@ -1987,6 +2018,7 @@ def masked_greater(x, value, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a @@ -2013,6 +2045,7 @@ def masked_greater_equal(x, value, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a @@ -2039,6 +2072,7 @@ def masked_less(x, value, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a @@ -2065,6 +2099,7 @@ def masked_less_equal(x, value, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a @@ -2091,6 +2126,7 @@ def masked_not_equal(x, value, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a @@ -2121,6 +2157,7 @@ def masked_equal(x, value, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a @@ -2154,6 +2191,7 @@ def masked_inside(x, v1, v2, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] >>> ma.masked_inside(x, -0.3, 0.3) @@ -2194,6 +2232,7 @@ def masked_outside(x, v1, v2, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] >>> ma.masked_outside(x, -0.3, 0.3) @@ -2247,8 +2286,9 @@ def masked_object(x, value, copy=True, shrink=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma - >>> food = np.array(['green_eggs', 'ham'], dtype=object) + >>> food = np.array(['green_eggs', 'ham'], dtype=np.object_) >>> # don't eat spoiled food >>> eat = ma.masked_object(food, 'green_eggs') >>> eat @@ -2257,7 +2297,7 @@ def masked_object(x, value, copy=True, shrink=True): fill_value='green_eggs', dtype=object) >>> # plain ol` ham is boring - >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object) + >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=np.object_) >>> eat = ma.masked_object(fresh_food, 'green_eggs') >>> eat masked_array(data=['cheese', 'ham', 'pineapple'], @@ -2323,6 +2363,7 @@ def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = np.array([1, 1.1, 2, 1.1, 3]) >>> ma.masked_values(x, 1.1) @@ -2371,8 +2412,9 @@ def masked_invalid(a, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma - >>> a = np.arange(5, dtype=float) + >>> a = np.arange(5, dtype=np.float64) >>> a[2] = np.nan >>> a[3] = np.inf >>> a @@ -2443,6 +2485,7 @@ def __str__(self): __repr__ = __str__ + # if you single index into a masked location you get this object. masked_print_option = _MaskedPrintOption('--') @@ -2462,18 +2505,18 @@ def _recursive_printoption(result, mask, printopt): _recursive_printoption(curdata, curmask, printopt) else: np.copyto(result, printopt, where=mask) - return + # For better or worse, these end in a newline -_legacy_print_templates = dict( - long_std=textwrap.dedent("""\ +_legacy_print_templates = { + 'long_std': textwrap.dedent("""\ masked_%(name)s(data = %(data)s, %(nlen)s mask = %(mask)s, %(nlen)s fill_value = %(fill)s) """), - long_flx=textwrap.dedent("""\ + 'long_flx': textwrap.dedent("""\ masked_%(name)s(data = %(data)s, %(nlen)s mask = @@ -2481,18 +2524,18 @@ def _recursive_printoption(result, mask, printopt): %(nlen)s fill_value = %(fill)s, %(nlen)s dtype = %(dtype)s) """), - short_std=textwrap.dedent("""\ + 'short_std': textwrap.dedent("""\ masked_%(name)s(data = %(data)s, %(nlen)s mask = %(mask)s, %(nlen)s fill_value = %(fill)s) """), - short_flx=textwrap.dedent("""\ + 'short_flx': textwrap.dedent("""\ masked_%(name)s(data = %(data)s, %(nlen)s mask = %(mask)s, %(nlen)s fill_value = %(fill)s, %(nlen)s dtype = %(dtype)s) """) -) +} ############################################################################### # MaskedArray class # @@ -2532,6 +2575,7 @@ def flatten_structured_array(a): Examples -------- + >>> import numpy as np >>> ndtype = [('a', int), ('b', float)] >>> a = np.array([(1, 1), (2, 2)], dtype=ndtype) >>> np.ma.flatten_structured_array(a) @@ -2546,7 +2590,7 @@ def flatten_sequence(iterable): """ for elm in iter(iterable): - if hasattr(elm, '__iter__'): + if hasattr(elm, "__iter__") and not isinstance(elm, (str, bytes)): yield from flatten_sequence(elm) else: yield elm @@ -2564,7 +2608,7 @@ def flatten_sequence(iterable): if len(inishape) > 1: newshape = list(out.shape) newshape[0] = inishape - out.shape = tuple(flatten_sequence(newshape)) + out = out.reshape(tuple(flatten_sequence(newshape))) return out @@ -2637,6 +2681,7 @@ class MaskedIterator: Examples -------- + >>> import numpy as np >>> x = np.ma.array(arange(6).reshape(2, 3)) >>> fl = x.flat >>> type(fl) @@ -2679,8 +2724,7 @@ def __getitem__(self, indx): _mask = self.maskiter.__getitem__(indx) if isinstance(_mask, ndarray): # set shape to match that of data; this is needed for matrices - _mask.shape = result.shape - result._mask = _mask + result._mask = _mask.reshape(result.shape) elif isinstance(_mask, np.void): return mvoid(result, mask=_mask, hardmask=self.ma._hardmask) elif _mask: # Just a scalar, masked @@ -2699,6 +2743,7 @@ def __next__(self): Examples -------- + >>> import numpy as np >>> x = np.ma.array([3, 2], mask=[0, 1]) >>> fl = x.flat >>> next(fl) @@ -2777,6 +2822,7 @@ class MaskedArray(ndarray): Examples -------- + >>> import numpy as np The ``mask`` can be initialized with an array of boolean values with the same shape as ``data``. @@ -2905,7 +2951,7 @@ def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, # the shapes were the same, so we can at least # avoid that path if data._mask.shape != data.shape: - data._mask.shape = data.shape + data._mask = data._mask.reshape(data.shape) else: # Case 2. : With a mask in input. # If mask is boolean, create an array of True or False @@ -2935,33 +2981,32 @@ def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, elif nm == nd: mask = np.reshape(mask, _data.shape) else: - msg = "Mask and data not compatible: data size is %i, " + \ - "mask size is %i." - raise MaskError(msg % (nd, nm)) + msg = (f"Mask and data not compatible:" + f" data size is {nd}, mask size is {nm}.") + raise MaskError(msg) copy = True # Set the mask to the new value if _data._mask is nomask: _data._mask = mask _data._sharedmask = not copy + elif not keep_mask: + _data._mask = mask + _data._sharedmask = not copy else: - if not keep_mask: - _data._mask = mask - _data._sharedmask = not copy + if _data.dtype.names is not None: + def _recursive_or(a, b): + "do a|=b on each field of a, recursively" + for name in a.dtype.names: + (af, bf) = (a[name], b[name]) + if af.dtype.names is not None: + _recursive_or(af, bf) + else: + af |= bf + + _recursive_or(_data._mask, mask) else: - if _data.dtype.names is not None: - def _recursive_or(a, b): - "do a|=b on each field of a, recursively" - for name in a.dtype.names: - (af, bf) = (a[name], b[name]) - if af.dtype.names is not None: - _recursive_or(af, bf) - else: - af |= bf - - _recursive_or(_data._mask, mask) - else: - _data._mask = np.logical_or(mask, _data._mask) - _data._sharedmask = False + _data._mask = np.logical_or(mask, _data._mask) + _data._sharedmask = False # Update fill_value. if fill_value is None: @@ -2977,7 +3022,6 @@ def _recursive_or(a, b): _data._baseclass = _baseclass return _data - def _update_from(self, obj): """ Copies some attributes of obj to self. @@ -2993,16 +3037,15 @@ def _update_from(self, obj): _optinfo.update(getattr(obj, '_basedict', {})) if not isinstance(obj, MaskedArray): _optinfo.update(getattr(obj, '__dict__', {})) - _dict = dict(_fill_value=getattr(obj, '_fill_value', None), - _hardmask=getattr(obj, '_hardmask', False), - _sharedmask=getattr(obj, '_sharedmask', False), - _isfield=getattr(obj, '_isfield', False), - _baseclass=getattr(obj, '_baseclass', _baseclass), - _optinfo=_optinfo, - _basedict=_optinfo) + _dict = {'_fill_value': getattr(obj, '_fill_value', None), + '_hardmask': getattr(obj, '_hardmask', False), + '_sharedmask': getattr(obj, '_sharedmask', False), + '_isfield': getattr(obj, '_isfield', False), + '_baseclass': getattr(obj, '_baseclass', _baseclass), + '_optinfo': _optinfo, + '_basedict': _optinfo} self.__dict__.update(_dict) self.__dict__.update(_optinfo) - return def __array_finalize__(self, obj): """ @@ -3083,7 +3126,7 @@ def __array_finalize__(self, obj): # Finalize the mask if self._mask is not nomask: try: - self._mask.shape = self.shape + self._mask = self._mask.reshape(self.shape) except ValueError: self._mask = nomask except (TypeError, AttributeError): @@ -3115,13 +3158,17 @@ def __array_wrap__(self, obj, context=None, return_scalar=False): func, args, out_i = context # args sometimes contains outputs (gh-10459), which we don't want input_args = args[:func.nin] - m = reduce(mask_or, [getmaskarray(arg) for arg in input_args]) + m = functools.reduce(mask_or, [getmaskarray(arg) for arg in input_args]) # Get the domain mask - domain = ufunc_domain.get(func, None) + domain = ufunc_domain.get(func) if domain is not None: - # Take the domain, and make sure it's a ndarray + # Take the domain, and make sure it's an ndarray with np.errstate(divide='ignore', invalid='ignore'): - d = filled(domain(*input_args), True) + # The result may be masked for two (unary) domains. + # That can't really be right as some domains drop + # the mask and some don't behaving differently here. + d = domain(*input_args).astype(bool, copy=False) + d = filled(d, True) if d.any(): # Fill the result where the domain is wrong @@ -3319,11 +3366,10 @@ def _scalar_heuristic(arr, elem): return dout # Just a scalar + elif mout: + return masked else: - if mout: - return masked - else: - return dout + return dout else: # Force dout to MA dout = dout.view(type(self)) @@ -3455,7 +3501,7 @@ def dtype(self, dtype): # Try to reset the shape of the mask (if we don't have a void). # This raises a ValueError if the dtype change won't work. try: - self._mask.shape = self.shape + self._mask = self._mask.reshape(self.shape) except (AttributeError, TypeError): pass @@ -3469,7 +3515,7 @@ def shape(self, shape): # Cannot use self._mask, since it may not (yet) exist when a # masked matrix sets the shape. if getmask(self) is not nomask: - self._mask.shape = self.shape + self._mask = self._mask.reshape(self.shape) def __setmask__(self, mask, copy=False): """ @@ -3516,7 +3562,7 @@ def __setmask__(self, mask, copy=False): mask = mask.astype(mdtype) # Mask is a sequence else: - # Make sure the new mask is a ndarray with the proper dtype + # Make sure the new mask is an ndarray with the proper dtype try: copy = None if not copy else True mask = np.array(mask, copy=copy, dtype=mdtype) @@ -3538,7 +3584,7 @@ def __setmask__(self, mask, copy=False): current_mask.flat = mask # Reshape if needed if current_mask.shape: - current_mask.shape = self.shape + self._mask = current_mask.reshape(self.shape) return _set_mask = __setmask__ @@ -3561,7 +3607,7 @@ def mask(self, value): def recordmask(self): """ Get or set the mask of the array if it has no named fields. For - structured arrays, returns a ndarray of booleans where entries are + structured arrays, returns an ndarray of booleans where entries are ``True`` if **all** the fields are masked, ``False`` otherwise: >>> x = np.ma.array([(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], @@ -3632,6 +3678,7 @@ def hardmask(self): Examples -------- + >>> import numpy as np >>> x = np.arange(10) >>> m = np.ma.masked_array(x, x>5) >>> assert not m.hardmask @@ -3693,10 +3740,12 @@ def shrink_mask(self): Returns ------- - None + result : MaskedArray + A :class:`~ma.MaskedArray` object. Examples -------- + >>> import numpy as np >>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4) >>> x.mask array([[False, False], @@ -3757,6 +3806,7 @@ def fill_value(self): Examples -------- + >>> import numpy as np >>> for dt in [np.int32, np.int64, np.float64, np.complex128]: ... np.ma.array([0, 1], dtype=dt).get_fill_value() ... @@ -3841,6 +3891,7 @@ def filled(self, fill_value=None): Examples -------- + >>> import numpy as np >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999) >>> x.filled() array([ 1, 2, -999, 4, -999]) @@ -3908,6 +3959,7 @@ def compressed(self): Examples -------- + >>> import numpy as np >>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3) >>> x.compressed() array([0, 1]) @@ -3961,6 +4013,7 @@ def compress(self, condition, axis=None, out=None): Examples -------- + >>> import numpy as np >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( @@ -4044,18 +4097,17 @@ def __repr__(self): else: name = self._baseclass.__name__ - # 2016-11-19: Demoted to legacy format if np._core.arrayprint._get_legacy_print_mode() <= 113: is_long = self.ndim > 1 - parameters = dict( - name=name, - nlen=" " * len(name), - data=str(self), - mask=str(self._mask), - fill=str(self.fill_value), - dtype=str(self.dtype) - ) + parameters = { + 'name': name, + 'nlen': " " * len(name), + 'data': str(self), + 'mask': str(self._mask), + 'fill': str(self.fill_value), + 'dtype': str(self.dtype) + } is_structured = bool(self.dtype.names) key = '{}_{}'.format( 'long' if is_long else 'short', @@ -4092,7 +4144,7 @@ def __repr__(self): prefix = '' # absorbed into the first indent else: # each key on its own line, indented by two spaces - indents = {k: ' ' * min_indent for k in keys} + indents = dict.fromkeys(keys, ' ' * min_indent) prefix = prefix + '\n' # first key on the next line # format the field values @@ -4109,7 +4161,7 @@ def __repr__(self): suffix=',') if self._fill_value is None: - self.fill_value # initialize fill_value + self.fill_value # initialize fill_value # noqa: B018 if (self._fill_value.dtype.kind in ("S", "U") and self.dtype.kind == self._fill_value.dtype.kind): @@ -4128,7 +4180,7 @@ def __repr__(self): # join keys with values and indentations result = ',\n'.join( - '{}{}={}'.format(indents[k], k, reprs[k]) + f'{indents[k]}{k}={reprs[k]}' for k in keys ) return prefix + result + ')' @@ -4309,15 +4361,6 @@ def __rmul__(self, other): # we get here from `other * self`. return multiply(other, self) - def __div__(self, other): - """ - Divide other into self, and return a new masked array. - - """ - if self._delegate_binop(other): - return NotImplemented - return divide(self, other) - def __truediv__(self, other): """ Divide other into self, and return a new masked array. @@ -4376,9 +4419,8 @@ def __iadd__(self, other): if m is not nomask and m.any(): self._mask = make_mask_none(self.shape, self.dtype) self._mask += m - else: - if m is not nomask: - self._mask += m + elif m is not nomask: + self._mask += m other_data = getdata(other) other_data = np.where(self._mask, other_data.dtype.type(0), other_data) self._data.__iadd__(other_data) @@ -4418,25 +4460,6 @@ def __imul__(self, other): self._data.__imul__(other_data) return self - def __idiv__(self, other): - """ - Divide self by other in-place. - - """ - other_data = getdata(other) - dom_mask = _DomainSafeDivide().__call__(self._data, other_data) - other_mask = getmask(other) - new_mask = mask_or(other_mask, dom_mask) - # The following 4 lines control the domain filling - if dom_mask.any(): - (_, fval) = ufunc_fills[np.divide] - other_data = np.where( - dom_mask, other_data.dtype.type(fval), other_data) - self._mask |= new_mask - other_data = np.where(self._mask, other_data.dtype.type(1), other_data) - self._data.__idiv__(other_data) - return self - def __ifloordiv__(self, other): """ Floor divide self by other in-place. @@ -4534,6 +4557,7 @@ def imag(self): Examples -------- + >>> import numpy as np >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) >>> x.imag masked_array(data=[1.0, --, 1.6], @@ -4561,6 +4585,7 @@ def real(self): Examples -------- + >>> import numpy as np >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) >>> x.real masked_array(data=[1.0, --, 3.45], @@ -4586,9 +4611,6 @@ def count(self, axis=None, keepdims=np._NoValue): The default, None, performs the count over all the dimensions of the input array. `axis` may be negative, in which case it counts from the last to the first axis. - - .. versionadded:: 1.10.0 - If this is a tuple of ints, the count is performed on multiple axes, instead of a single axis or all the axes as before. keepdims : bool, optional @@ -4649,7 +4671,7 @@ def count(self, axis=None, keepdims=np._NoValue): raise np.exceptions.AxisError(axis=axis, ndim=self.ndim) return 1 elif axis is None: - if kwargs.get('keepdims', False): + if kwargs.get('keepdims'): return np.array(self.size, dtype=np.intp, ndmin=self.ndim) return self.size @@ -4658,7 +4680,7 @@ def count(self, axis=None, keepdims=np._NoValue): for ax in axes: items *= self.shape[ax] - if kwargs.get('keepdims', False): + if kwargs.get('keepdims'): out_dims = list(self.shape) for a in axes: out_dims[a] = 1 @@ -4703,6 +4725,7 @@ def ravel(self, order='C'): Examples -------- + >>> import numpy as np >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( @@ -4735,7 +4758,6 @@ def ravel(self, order='C'): r._mask = nomask return r - def reshape(self, *s, **kwargs): """ Give a new shape to the array without changing its data. @@ -4767,11 +4789,13 @@ def reshape(self, *s, **kwargs): Notes ----- - The reshaping operation cannot guarantee that a copy will not be made, - to modify the shape in place, use ``a.shape = s`` + By default, the reshaping operation will make a copy if a view + with different strides is not possible. To ensure a view, + pass ``copy=False``. Examples -------- + >>> import numpy as np >>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1]) >>> x masked_array( @@ -4794,7 +4818,6 @@ def reshape(self, *s, **kwargs): fill_value=999999) """ - kwargs.update(order=kwargs.get('order', 'C')) result = self._data.reshape(*s, **kwargs).view(type(self)) result._update_from(self) mask = self._mask @@ -4847,6 +4870,7 @@ def put(self, indices, values, mode='raise'): Examples -------- + >>> import numpy as np >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( @@ -4915,6 +4939,7 @@ def ids(self): Examples -------- + >>> import numpy as np >>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1]) >>> x.ids() (166670640, 166659832) # may vary @@ -4941,6 +4966,7 @@ def iscontiguous(self): Examples -------- + >>> import numpy as np >>> x = np.ma.array([1, 2, 3]) >>> x.iscontiguous() True @@ -4975,6 +5001,7 @@ def all(self, axis=None, out=None, keepdims=np._NoValue): Examples -------- + >>> import numpy as np >>> np.ma.array([1,2,3]).all() True >>> a = np.ma.array([1,2,3], mask=True) @@ -5069,6 +5096,7 @@ def nonzero(self): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = ma.array(np.eye(3)) >>> x @@ -5130,7 +5158,7 @@ def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): """ (this docstring should be overwritten) """ - #!!!: implement out + test! + # !!!: implement out + test! m = self._mask if m is nomask: result = super().trace(offset=offset, axis1=axis1, axis2=axis2, @@ -5151,8 +5179,6 @@ def dot(self, b, out=None, strict=False): recommended that the optional arguments be treated as keyword only. At some point that may be mandatory. - .. versionadded:: 1.10.0 - Parameters ---------- b : masked_array_like @@ -5171,8 +5197,6 @@ def dot(self, b, out=None, strict=False): means that if a masked value appears in a row or column, the whole row or column is considered masked. - .. versionadded:: 1.10.2 - See Also -------- numpy.ma.dot : equivalent function @@ -5195,6 +5219,7 @@ def sum(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): Examples -------- + >>> import numpy as np >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( @@ -5266,6 +5291,7 @@ def cumsum(self, axis=None, dtype=None, out=None): Examples -------- + >>> import numpy as np >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0]) >>> marr.cumsum() masked_array(data=[0, 1, 3, --, --, --, 9, 16, 24, 33], @@ -5373,6 +5399,7 @@ def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): Examples -------- + >>> import numpy as np >>> a = np.ma.array([1,2,3], mask=[False, False, True]) >>> a masked_array(data=[1, 2, --], @@ -5426,8 +5453,8 @@ def anom(self, axis=None, dtype=None): The default is to use the mean of the flattened array as reference. dtype : dtype, optional Type to use in computing the variance. For arrays of integer type - the default is float32; for arrays of float types it is the same as - the array type. + the default is float32; for arrays of float types it is the same as + the array type. See Also -------- @@ -5435,6 +5462,7 @@ def anom(self, axis=None, dtype=None): Examples -------- + >>> import numpy as np >>> a = np.ma.array([1,2,3]) >>> a.anom() masked_array(data=[-1., 0., 1.], @@ -5561,6 +5589,7 @@ def round(self, decimals=0, out=None): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = ma.array([1.35, 2.5, 1.5, 1.75, 2.25, 2.75], ... mask=[0, 0, 0, 1, 0, 0]) @@ -5596,16 +5625,9 @@ def argsort(self, axis=np._NoValue, kind=None, order=None, endwith=True, axis : int, optional Axis along which to sort. If None, the default, the flattened array is used. - - .. versionchanged:: 1.13.0 - Previously, the default was documented to be -1, but that was - in error. At some future date, the default will change to -1, as - originally intended. - Until then, the axis should be given explicitly when - ``arr.ndim > 1``, to avoid a FutureWarning. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional The sorting algorithm used. - order : list, optional + order : str or list of str, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. Not all fields need be specified. @@ -5639,6 +5661,7 @@ def argsort(self, axis=np._NoValue, kind=None, order=None, endwith=True, Examples -------- + >>> import numpy as np >>> a = np.ma.array([3,2,1], mask=[False, False, True]) >>> a masked_array(data=[3, 2, --], @@ -5696,8 +5719,9 @@ def argmin(self, axis=None, fill_value=None, out=None, *, Examples -------- + >>> import numpy as np >>> x = np.ma.array(np.arange(4), mask=[1,1,0,0]) - >>> x.shape = (2,2) + >>> x = x.reshape((2,2)) >>> x masked_array( data=[[--, --], @@ -5741,6 +5765,7 @@ def argmax(self, axis=None, fill_value=None, out=None, *, Examples -------- + >>> import numpy as np >>> a = np.arange(6).reshape(2,3) >>> a.argmax() 5 @@ -5786,11 +5811,6 @@ def sort(self, axis=-1, kind=None, order=None, endwith=True, stable : bool, optional Only for compatibility with ``np.sort``. Ignored. - Returns - ------- - sorted_array : ndarray - Array of the same type and shape as `a`. - See Also -------- numpy.ndarray.sort : Method to sort an array in-place. @@ -5804,6 +5824,7 @@ def sort(self, axis=-1, kind=None, order=None, endwith=True, Examples -------- + >>> import numpy as np >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) >>> # Default >>> a.sort() @@ -5855,7 +5876,6 @@ def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): axis : None or int or tuple of ints, optional Axis along which to operate. By default, ``axis`` is None and the flattened input is used. - .. versionadded:: 1.7.0 If this is a tuple of ints, the minimum is selected over multiple axes, instead of a single axis or all the axes as before. out : array_like, optional @@ -5931,7 +5951,7 @@ def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): result = masked return result # Explicit output - result = self.filled(fill_value).min(axis=axis, out=out, **kwargs) + self.filled(fill_value).min(axis=axis, out=out, **kwargs) if isinstance(out, MaskedArray): outmask = getmask(out) if outmask is nomask: @@ -5954,7 +5974,6 @@ def max(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): axis : None or int or tuple of ints, optional Axis along which to operate. By default, ``axis`` is None and the flattened input is used. - .. versionadded:: 1.7.0 If this is a tuple of ints, the maximum is selected over multiple axes, instead of a single axis or all the axes as before. out : array_like, optional @@ -6037,7 +6056,7 @@ def max(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): result = masked return result # Explicit output - result = self.filled(fill_value).max(axis=axis, out=out, **kwargs) + self.filled(fill_value).max(axis=axis, out=out, **kwargs) if isinstance(out, MaskedArray): outmask = getmask(out) if outmask is nomask: @@ -6089,6 +6108,7 @@ def ptp(self, axis=None, out=None, fill_value=None, keepdims=False): Examples -------- + >>> import numpy as np >>> x = np.ma.MaskedArray([[4, 9, 2, 10], ... [6, 9, 7, 12]]) @@ -6124,7 +6144,7 @@ def ptp(self, axis=None, out=None, fill_value=None, keepdims=False): >>> y.ptp(axis=1).view(np.uint8) masked_array(data=[126, 127, 128, 129], mask=False, - fill_value=np.int64(999999), + fill_value=np.uint64(999999), dtype=uint8) """ if out is None: @@ -6154,6 +6174,71 @@ def argpartition(self, *args, **kwargs): def take(self, indices, axis=None, out=None, mode='raise'): """ + Take elements from a masked array along an axis. + + This function does the same thing as "fancy" indexing (indexing arrays + using arrays) for masked arrays. It can be easier to use if you need + elements along a given axis. + + Parameters + ---------- + a : masked_array + The source masked array. + indices : array_like + The indices of the values to extract. Also allow scalars for indices. + axis : int, optional + The axis over which to select values. By default, the flattened + input array is used. + out : MaskedArray, optional + If provided, the result will be placed in this array. It should + be of the appropriate shape and dtype. Note that `out` is always + buffered if `mode='raise'`; use other modes for better performance. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + 'clip' mode means that all indices that are too large are replaced + by the index that addresses the last element along that axis. Note + that this disables indexing with negative numbers. + + Returns + ------- + out : MaskedArray + The returned array has the same type as `a`. + + See Also + -------- + numpy.take : Equivalent function for ndarrays. + compress : Take elements using a boolean mask. + take_along_axis : Take elements by matching the array and the index arrays. + + Notes + ----- + This function behaves similarly to `numpy.take`, but it handles masked + values. The mask is retained in the output array, and masked values + in the input array remain masked in the output. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([4, 3, 5, 7, 6, 8], mask=[0, 0, 1, 0, 1, 0]) + >>> indices = [0, 1, 4] + >>> np.ma.take(a, indices) + masked_array(data=[4, 3, --], + mask=[False, False, True], + fill_value=999999) + + When `indices` is not one-dimensional, the output also has these dimensions: + + >>> np.ma.take(a, [[0, 1], [2, 3]]) + masked_array(data=[[4, 3], + [--, 7]], + mask=[[False, False], + [ True, False]], + fill_value=999999) """ (_data, _mask) = (self._data, self._mask) cls = type(self) @@ -6222,7 +6307,6 @@ def mT(self): else: return masked_array(data=self.data.mT, mask=self.mask.mT) - def tolist(self, fill_value=None): """ Return the data portion of the masked array as a hierarchical Python list. @@ -6243,6 +6327,7 @@ def tolist(self, fill_value=None): Examples -------- + >>> import numpy as np >>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4) >>> x.tolist() [[1, None, 3], [None, 5, None], [7, None, 9]] @@ -6271,32 +6356,15 @@ def tolist(self, fill_value=None): inishape = self.shape result = np.array(self._data.ravel(), dtype=object) result[_mask.ravel()] = None - result.shape = inishape + result = result.reshape(inishape) return result.tolist() - def tostring(self, fill_value=None, order='C'): - r""" - A compatibility alias for `tobytes`, with exactly the same behavior. - - Despite its name, it returns `bytes` not `str`\ s. - - .. deprecated:: 1.19.0 - """ - # 2020-03-30, Numpy 1.19.0 - warnings.warn( - "tostring() is deprecated. Use tobytes() instead.", - DeprecationWarning, stacklevel=2) - - return self.tobytes(fill_value, order=order) - def tobytes(self, fill_value=None, order='C'): """ Return the array data as a string containing the raw bytes in the array. The array is filled with a fill value before the string conversion. - .. versionadded:: 1.9.0 - Parameters ---------- fill_value : scalar, optional @@ -6322,6 +6390,7 @@ def tobytes(self, fill_value=None, order='C'): Examples -------- + >>> import numpy as np >>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) >>> x.tobytes() b'\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00' @@ -6371,6 +6440,7 @@ def toflex(self): Examples -------- + >>> import numpy as np >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( @@ -6471,11 +6541,11 @@ class mvoid(MaskedArray): Fake a 'void' object to use for masked array with structured dtypes. """ - def __new__(self, data, mask=nomask, dtype=None, fill_value=None, + def __new__(cls, data, mask=nomask, dtype=None, fill_value=None, hardmask=False, copy=False, subok=True): copy = None if not copy else True _data = np.array(data, copy=copy, subok=subok, dtype=dtype) - _data = _data.view(self) + _data = _data.view(cls) _data._hardmask = hardmask if mask is not nomask: if isinstance(mask, np.void): @@ -6580,14 +6650,14 @@ def filled(self, fill_value=None): def tolist(self): """ - Transforms the mvoid object into a tuple. + Transforms the mvoid object into a tuple. - Masked fields are replaced by None. + Masked fields are replaced by None. - Returns - ------- - returned_tuple - Tuple of fields + Returns + ------- + returned_tuple + Tuple of fields """ _mask = self._mask if _mask is nomask: @@ -6631,6 +6701,7 @@ def isMaskedArray(x): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.eye(3, 3) >>> a @@ -6718,16 +6789,17 @@ def __repr__(self): return object.__repr__(self) def __format__(self, format_spec): - # Replace ndarray.__format__ with the default, which supports no format characters. - # Supporting format characters is unwise here, because we do not know what type - # the user was expecting - better to not guess. + # Replace ndarray.__format__ with the default, which supports no + # format characters. + # Supporting format characters is unwise here, because we do not know + # what type the user was expecting - better to not guess. try: return object.__format__(self, format_spec) except TypeError: # 2020-03-23, NumPy 1.19.0 warnings.warn( - "Format strings passed to MaskedConstant are ignored, but in future may " - "error or produce different behavior", + "Format strings passed to MaskedConstant are ignored," + " but in future may error or produce different behavior", FutureWarning, stacklevel=2 ) return object.__format__(self, "") @@ -6793,6 +6865,8 @@ def array(data, dtype=None, copy=False, order=None, subok=subok, keep_mask=keep_mask, hard_mask=hard_mask, fill_value=fill_value, ndmin=ndmin, shrink=shrink, order=order) + + array.__doc__ = masked_array.__doc__ @@ -6815,6 +6889,7 @@ def is_masked(x): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = ma.masked_equal([0, 1, 0, 2, 3], 0) >>> x @@ -6879,18 +6954,19 @@ def reduce(self, target, axis=np._NoValue): m = getmask(target) if axis is np._NoValue and target.ndim > 1: + name = self.__name__ # 2017-05-06, Numpy 1.13.0: warn on axis default warnings.warn( - f"In the future the default for ma.{self.__name__}.reduce will be axis=0, " - f"not the current None, to match np.{self.__name__}.reduce. " + f"In the future the default for ma.{name}.reduce will be axis=0, " + f"not the current None, to match np.{name}.reduce. " "Explicitly pass 0 or None to silence this warning.", MaskedArrayFutureWarning, stacklevel=2) axis = None if axis is not np._NoValue: - kwargs = dict(axis=axis) + kwargs = {'axis': axis} else: - kwargs = dict() + kwargs = {} if m is nomask: t = self.f.reduce(target, **kwargs) @@ -6931,6 +7007,8 @@ def min(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): # fill_value argument return asanyarray(obj).min(axis=axis, fill_value=fill_value, out=out, **kwargs) + + min.__doc__ = MaskedArray.min.__doc__ def max(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): @@ -6943,6 +7021,8 @@ def max(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): # fill_value argument return asanyarray(obj).max(axis=axis, fill_value=fill_value, out=out, **kwargs) + + max.__doc__ = MaskedArray.max.__doc__ @@ -6955,6 +7035,8 @@ def ptp(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): # a fill_value argument return asanyarray(obj).ptp(axis=axis, fill_value=fill_value, out=out, **kwargs) + + ptp.__doc__ = MaskedArray.ptp.__doc__ @@ -6963,7 +7045,7 @@ def ptp(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): ############################################################################## -class _frommethod: +def _frommethod(methodname: str, reversed: bool = False): """ Define functions from existing MaskedArray methods. @@ -6971,43 +7053,47 @@ class _frommethod: ---------- methodname : str Name of the method to transform. - + reversed : bool, optional + Whether to reverse the first two arguments of the method. Default is False. """ + method = getattr(MaskedArray, methodname) + assert callable(method) + + signature = inspect.signature(method) + params = list(signature.parameters.values()) + params[0] = params[0].replace(name="a") # rename 'self' to 'a' - def __init__(self, methodname, reversed=False): - self.__name__ = methodname - self.__doc__ = self.getdoc() - self.reversed = reversed + if reversed: + assert len(params) >= 2 + params[0], params[1] = params[1], params[0] - def getdoc(self): - "Return the doc of the function (from the doc of the method)." - meth = getattr(MaskedArray, self.__name__, None) or\ - getattr(np, self.__name__, None) - signature = self.__name__ + get_object_signature(meth) - if meth is not None: - doc = """ %s\n%s""" % ( - signature, getattr(meth, '__doc__', None)) - return doc + def wrapper(a, b, *args, **params): + return getattr(asanyarray(b), methodname)(a, *args, **params) + + else: + def wrapper(a, *args, **params): + return getattr(asanyarray(a), methodname)(*args, **params) - def __call__(self, a, *args, **params): - if self.reversed: - args = list(args) - a, args[0] = args[0], a + wrapper.__signature__ = signature.replace(parameters=params) + wrapper.__name__ = wrapper.__qualname__ = methodname - marr = asanyarray(a) - method_name = self.__name__ - method = getattr(type(marr), method_name, None) - if method is None: - # use the corresponding np function - method = getattr(np, method_name) + # __doc__ is None when using `python -OO ...` + if method.__doc__ is not None: + str_signature = f"{methodname}{signature}" + # TODO: For methods with a docstring "Parameters" section, that do not already + # mention `a` (see e.g. `MaskedArray.var.__doc__`), it should be inserted there. + wrapper.__doc__ = f" {str_signature}\n{method.__doc__}" - return method(marr, *args, **params) + return wrapper all = _frommethod('all') anomalies = anom = _frommethod('anom') any = _frommethod('any') +argmax = _frommethod('argmax') +argmin = _frommethod('argmin') compress = _frommethod('compress', reversed=True) +count = _frommethod('count') cumprod = _frommethod('cumprod') cumsum = _frommethod('cumsum') copy = _frommethod('copy') @@ -7019,7 +7105,7 @@ def __call__(self, a, *args, **params): minimum = _extrema_operation(umath.minimum, less, minimum_fill_value) nonzero = _frommethod('nonzero') prod = _frommethod('prod') -product = _frommethod('prod') +product = _frommethod('product') ravel = _frommethod('ravel') repeat = _frommethod('repeat') shrink_mask = _frommethod('shrink_mask') @@ -7031,10 +7117,10 @@ def __call__(self, a, *args, **params): trace = _frommethod('trace') var = _frommethod('var') -count = _frommethod('count') def take(a, indices, axis=None, out=None, mode='raise'): """ + """ a = masked_array(a) return a.take(indices, axis=axis, out=out, mode=mode) @@ -7058,6 +7144,7 @@ def power(a, b, third=None): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = [11.2, -3.973, 0.801, -1.41] >>> mask = [0, 0, 0, 1] @@ -7117,8 +7204,6 @@ def power(a, b, third=None): result._data[invalid] = result.fill_value return result -argmin = _frommethod('argmin') -argmax = _frommethod('argmax') def argsort(a, axis=np._NoValue, kind=None, order=None, endwith=True, fill_value=None, *, stable=None): @@ -7134,6 +7219,8 @@ def argsort(a, axis=np._NoValue, kind=None, order=None, endwith=True, fill_value=fill_value, stable=None) else: return a.argsort(axis=axis, kind=kind, order=order, stable=None) + + argsort.__doc__ = MaskedArray.argsort.__doc__ def sort(a, axis=-1, kind=None, order=None, endwith=True, fill_value=None, *, @@ -7152,6 +7239,7 @@ def sort(a, axis=-1, kind=None, order=None, endwith=True, fill_value=None, *, Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = [11.2, -3.973, 0.801, -1.41] >>> mask = [0, 0, 0, 1] @@ -7191,6 +7279,7 @@ def compressed(x): Examples -------- + >>> import numpy as np Create an array with negative values masked: @@ -7239,6 +7328,7 @@ def concatenate(arrays, axis=0): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = ma.arange(3) >>> a[1] = ma.masked @@ -7289,6 +7379,7 @@ def diag(v, k=0): Examples -------- + >>> import numpy as np Create an array with negative values masked: @@ -7337,6 +7428,33 @@ def left_shift(a, n): -------- numpy.left_shift + Examples + -------- + Shift with a masked array: + + >>> arr = np.ma.array([10, 20, 30], mask=[False, True, False]) + >>> np.ma.left_shift(arr, 1) + masked_array(data=[20, --, 60], + mask=[False, True, False], + fill_value=999999) + + Large shift: + + >>> np.ma.left_shift(10, 10) + masked_array(data=10240, + mask=False, + fill_value=999999) + + Shift with a scalar and an array: + + >>> scalar = 10 + >>> arr = np.ma.array([1, 2, 3], mask=[False, True, False]) + >>> np.ma.left_shift(scalar, arr) + masked_array(data=[20, --, 80], + mask=[False, True, False], + fill_value=999999) + + """ m = getmask(a) if m is nomask: @@ -7360,6 +7478,7 @@ def right_shift(a, n): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = [11, 3, 8, 1] >>> mask = [0, 0, 0, 1] @@ -7394,6 +7513,28 @@ def put(a, indices, values, mode='raise'): -------- MaskedArray.put + Examples + -------- + Putting values in a masked array: + + >>> a = np.ma.array([1, 2, 3, 4], mask=[False, True, False, False]) + >>> np.ma.put(a, [1, 3], [10, 30]) + >>> a + masked_array(data=[ 1, 10, 3, 30], + mask=False, + fill_value=999999) + + Using put with a 2D array: + + >>> b = np.ma.array([[1, 2], [3, 4]], mask=[[False, True], [False, False]]) + >>> np.ma.put(b, [[0, 1], [1, 0]], [[10, 20], [30, 40]]) + >>> b + masked_array( + data=[[40, 30], + [ 3, 4]], + mask=False, + fill_value=999999) + """ # We can't use 'frommethod', the order of arguments is different try: @@ -7420,6 +7561,7 @@ def putmask(a, mask, values): # , mode='raise'): Examples -------- + >>> import numpy as np >>> arr = [[1, 2], [3, 4]] >>> mask = [[1, 0], [0, 0]] >>> x = np.ma.array(arr, mask=mask) @@ -7455,7 +7597,6 @@ def putmask(a, mask, values): # , mode='raise'): valmask = getmaskarray(values) np.copyto(a._mask, valmask, where=mask) np.copyto(a._data, valdata, where=mask) - return def transpose(a, axes=None): @@ -7470,6 +7611,7 @@ def transpose(a, axes=None): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = ma.arange(4).reshape((2,2)) >>> x[1, 1] = ma.masked @@ -7506,6 +7648,37 @@ def reshape(a, new_shape, order='C'): -------- MaskedArray.reshape : equivalent function + Examples + -------- + Reshaping a 1-D array: + + >>> a = np.ma.array([1, 2, 3, 4]) + >>> np.ma.reshape(a, (2, 2)) + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) + + Reshaping a 2-D array: + + >>> b = np.ma.array([[1, 2], [3, 4]]) + >>> np.ma.reshape(b, (1, 4)) + masked_array(data=[[1, 2, 3, 4]], + mask=False, + fill_value=999999) + + Reshaping a 1-D array with a mask: + + >>> c = np.ma.array([1, 2, 3, 4], mask=[False, True, False, False]) + >>> np.ma.reshape(c, (2, 2)) + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=999999) + """ # We can't use 'frommethod', it whine about some parameters. Dmmit. try: @@ -7530,6 +7703,7 @@ def resize(x, new_shape): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = ma.array([[1, 2] ,[3, 4]]) >>> a[0, 1] = ma.masked @@ -7586,18 +7760,23 @@ def ndim(obj): """ return np.ndim(getdata(obj)) + ndim.__doc__ = np.ndim.__doc__ def shape(obj): "maskedarray version of the numpy function." return np.shape(getdata(obj)) + + shape.__doc__ = np.shape.__doc__ def size(obj, axis=None): "maskedarray version of the numpy function." return np.size(getdata(obj), axis) + + size.__doc__ = np.size.__doc__ @@ -7654,10 +7833,10 @@ def diff(a, /, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): >>> np.ma.diff(u8_arr) masked_array(data=[255], mask=False, - fill_value=np.int64(999999), + fill_value=np.uint64(999999), dtype=uint8) >>> u8_arr[1,...] - u8_arr[0,...] - 255 + np.uint8(255) If this is not desirable, then the array should be cast to a larger integer type first: @@ -7671,6 +7850,7 @@ def diff(a, /, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): Examples -------- + >>> import numpy as np >>> a = np.array([1, 2, 3, 4, 7, 0, 2, 3]) >>> x = np.ma.masked_where(a < 2, a) >>> np.ma.diff(x) @@ -7772,6 +7952,7 @@ def where(condition, x=_NoValue, y=_NoValue): Examples -------- + >>> import numpy as np >>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0], ... [1, 0, 1], ... [0, 1, 0]]) @@ -7869,6 +8050,7 @@ def choose(indices, choices, out=None, mode='raise'): Examples -------- + >>> import numpy as np >>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]]) >>> a = np.array([2, 1, 0]) >>> np.ma.choose(a, choice) @@ -7907,7 +8089,7 @@ def nmask(x): return d -def round_(a, decimals=0, out=None): +def round(a, decimals=0, out=None): """ Return a copy of a, rounded to 'decimals' places. @@ -7932,6 +8114,7 @@ def round_(a, decimals=0, out=None): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = [11.2, -3.973, 0.801, -1.41] >>> mask = [0, 0, 0, 1] @@ -7940,7 +8123,7 @@ def round_(a, decimals=0, out=None): masked_array(data=[11.2, -3.973, 0.801, --], mask=[False, False, False, True], fill_value=1e+20) - >>> ma.round_(masked_x) + >>> ma.round(masked_x) masked_array(data=[11.0, -4.0, 1.0, --], mask=[False, False, False, True], fill_value=1e+20) @@ -7948,7 +8131,7 @@ def round_(a, decimals=0, out=None): masked_array(data=[11.2, -4.0, 0.8, --], mask=[False, False, False, True], fill_value=1e+20) - >>> ma.round_(masked_x, decimals=-1) + >>> ma.round(masked_x, decimals=-1) masked_array(data=[10.0, -0.0, 0.0, --], mask=[False, False, False, True], fill_value=1e+20) @@ -7960,9 +8143,65 @@ def round_(a, decimals=0, out=None): if hasattr(out, '_mask'): out._mask = getmask(a) return out -round = round_ +def round_(a, decimals=0, out=None): + """ + Return a copy of a, rounded to 'decimals' places. + + .. deprecated:: 2.5 + `numpy.ma.round_` is deprecated. Use `numpy.ma.round` instead. + + When 'decimals' is negative, it specifies the number of positions + to the left of the decimal point. The real and imaginary parts of + complex numbers are rounded separately. Nothing is done if the + array is not of float type and 'decimals' is greater than or equal + to 0. + + Parameters + ---------- + decimals : int + Number of decimals to round to. May be negative. + out : array_like + Existing array to use for output. + If not given, returns a default copy of a. + + Notes + ----- + If out is given and does not have a mask attribute, the mask of a + is lost! + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = [11.2, -3.973, 0.801, -1.41] + >>> mask = [0, 0, 0, 1] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array(data=[11.2, -3.973, 0.801, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.round_(masked_x) + masked_array(data=[11.0, -4.0, 1.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.round(masked_x, decimals=1) + masked_array(data=[11.2, -4.0, 0.8, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.round_(masked_x, decimals=-1) + masked_array(data=[10.0, -0.0, 0.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + """ + warnings.warn( + "numpy.ma.round_ is deprecated. Use numpy.ma.round instead.", + DeprecationWarning, + stacklevel=2, + ) + return round(a, decimals, out) + def _mask_propagate(a, axis): """ Mask whole 1-d vectors of an array that contain masked values. @@ -8008,14 +8247,13 @@ def dot(a, b, strict=False, out=None): conditions are not met, an exception is raised, instead of attempting to be flexible. - .. versionadded:: 1.10.2 - See Also -------- numpy.dot : Equivalent function for ndarrays. Examples -------- + >>> import numpy as np >>> a = np.ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]]) >>> b = np.ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]]) >>> np.ma.dot(a, b) @@ -8074,10 +8312,12 @@ def inner(a, b): fa = filled(a, 0) fb = filled(b, 0) if fa.ndim == 0: - fa.shape = (1,) + fa = fa.reshape((1,)) if fb.ndim == 0: - fb.shape = (1,) + fb = fb.reshape((1,)) return np.inner(fa, fb).view(MaskedArray) + + inner.__doc__ = doc_note(np.inner.__doc__, "Masked values are replaced by 0.") innerproduct = inner @@ -8096,6 +8336,8 @@ def outer(a, b): mb = getmaskarray(b) m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=False) return masked_array(d, mask=m) + + outer.__doc__ = doc_note(np.outer.__doc__, "Masked values are replaced by 0.") outerproduct = outer @@ -8132,9 +8374,9 @@ def correlate(a, v, mode='valid', propagate_mask=True): Refer to the `np.convolve` docstring. Note that the default is 'valid', unlike `convolve`, which uses 'full'. propagate_mask : bool - If True, then a result element is masked if any masked element contributes towards it. - If False, then a result element is only masked if no non-masked element - contribute towards it + If True, then a result element is masked if any masked element contributes + towards it. If False, then a result element is only masked if no non-masked + element contribute towards it Returns ------- @@ -8144,6 +8386,37 @@ def correlate(a, v, mode='valid', propagate_mask=True): See Also -------- numpy.correlate : Equivalent function in the top-level NumPy module. + + Examples + -------- + Basic correlation: + + >>> a = np.ma.array([1, 2, 3]) + >>> v = np.ma.array([0, 1, 0]) + >>> np.ma.correlate(a, v, mode='valid') + masked_array(data=[2], + mask=[False], + fill_value=999999) + + Correlation with masked elements: + + >>> a = np.ma.array([1, 2, 3], mask=[False, True, False]) + >>> v = np.ma.array([0, 1, 0]) + >>> np.ma.correlate(a, v, mode='valid', propagate_mask=True) + masked_array(data=[--], + mask=[ True], + fill_value=999999, + dtype=int64) + + Correlation with different modes and mixed array types: + + >>> a = np.ma.array([1, 2, 3]) + >>> v = np.ma.array([0, 1, 0]) + >>> np.ma.correlate(a, v, mode='full') + masked_array(data=[0, 1, 2, 3, 0], + mask=[False, False, False, False, False], + fill_value=999999) + """ return _convolve_or_correlate(np.correlate, a, v, mode, propagate_mask) @@ -8203,6 +8476,7 @@ def allequal(a, b, fill_value=True): Examples -------- + >>> import numpy as np >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) >>> a masked_array(data=[10000000000.0, 1e-07, --], @@ -8280,6 +8554,7 @@ def allclose(a, b, masked_equal=True, rtol=1e-5, atol=1e-8): Examples -------- + >>> import numpy as np >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) >>> a masked_array(data=[10000000000.0, 1e-07, --], @@ -8372,6 +8647,7 @@ def asarray(a, dtype=None, order=None): Examples -------- + >>> import numpy as np >>> x = np.arange(10.).reshape(2, 5) >>> x array([[0., 1., 2., 3., 4.], @@ -8391,7 +8667,7 @@ def asarray(a, dtype=None, order=None): subok=False, order=order) -def asanyarray(a, dtype=None): +def asanyarray(a, dtype=None, order=None): """ Convert the input to a masked array, conserving subclasses. @@ -8404,9 +8680,13 @@ def asanyarray(a, dtype=None): Input data, in any form that can be converted to an array. dtype : dtype, optional By default, the data-type is inferred from the input data. - order : {'C', 'F'}, optional - Whether to use row-major ('C') or column-major ('FORTRAN') memory - representation. Default is 'C'. + order : {'C', 'F', 'A', 'K'}, optional + Memory layout. 'A' and 'K' depend on the order of input array ``a``. + 'C' row-major (C-style), + 'F' column-major (Fortran-style) memory representation. + 'A' (any) means 'F' if ``a`` is Fortran contiguous, 'C' otherwise + 'K' (keep) preserve input order + Defaults to 'K'. Returns ------- @@ -8419,6 +8699,7 @@ def asanyarray(a, dtype=None): Examples -------- + >>> import numpy as np >>> x = np.arange(10.).reshape(2, 5) >>> x array([[0., 1., 2., 3., 4.], @@ -8435,9 +8716,18 @@ def asanyarray(a, dtype=None): """ # workaround for #8666, to preserve identity. Ideally the bottom line # would handle this for us. - if isinstance(a, MaskedArray) and (dtype is None or dtype == a.dtype): + if ( + isinstance(a, MaskedArray) + and (dtype is None or dtype == a.dtype) + and ( + order in {None, 'A', 'K'} + or order == 'C' and a.flags.carray + or order == 'F' and a.flags.f_contiguous + ) + ): return a - return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True) + return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True, + order=order) ############################################################################## @@ -8474,6 +8764,7 @@ def fromflex(fxarray): Examples -------- + >>> import numpy as np >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4) >>> rec = x.toflex() >>> rec @@ -8514,95 +8805,93 @@ def fromflex(fxarray): return masked_array(fxarray['_data'], mask=fxarray['_mask']) -class _convert2ma: +def _convert2ma(funcname: str, np_ret: str, np_ma_ret: str, + params: dict[str, str] | None = None): + """Convert function from numpy to numpy.ma.""" + func = getattr(np, funcname) + params = params or {} - """ - Convert functions from numpy to numpy.ma. + @functools.wraps(func, assigned=set(functools.WRAPPER_ASSIGNMENTS) - {"__module__"}) + def wrapper(*args, **kwargs): + common_params = kwargs.keys() & params.keys() + extras = params | {p: kwargs.pop(p) for p in common_params} - Parameters - ---------- - _methodname : string - Name of the method to transform. + result = func.__call__(*args, **kwargs).view(MaskedArray) - """ - __doc__ = None + if "fill_value" in common_params: + result.fill_value = extras["fill_value"] + if "hardmask" in common_params: + result._hardmask = bool(extras["hardmask"]) - def __init__(self, funcname, np_ret, np_ma_ret, params=None): - self._func = getattr(np, funcname) - self.__doc__ = self.getdoc(np_ret, np_ma_ret) - self._extras = params or {} + return result - def getdoc(self, np_ret, np_ma_ret): - "Return the doc of the function (from the doc of the method)." - doc = getattr(self._func, '__doc__', None) - sig = get_object_signature(self._func) - if doc: - doc = self._replace_return_type(doc, np_ret, np_ma_ret) - # Add the signature of the function at the beginning of the doc - if sig: - sig = "%s%s\n" % (self._func.__name__, sig) - doc = sig + doc - return doc + # workaround for a doctest bug in Python 3.11 that incorrectly assumes `__code__` + # exists on wrapped functions + del wrapper.__wrapped__ - def _replace_return_type(self, doc, np_ret, np_ma_ret): - """ - Replace documentation of ``np`` function's return type. + # `arange`, `empty`, `empty_like`, `frombuffer`, and `zeros` have no signature + try: + signature = inspect.signature(func) + except ValueError: + signature = inspect.Signature([ + inspect.Parameter('args', inspect.Parameter.VAR_POSITIONAL), + inspect.Parameter('kwargs', inspect.Parameter.VAR_KEYWORD), + ]) + + if params: + sig_params = list(signature.parameters.values()) + + # pop `**kwargs` if present + sig_kwargs = None + if sig_params[-1].kind is inspect.Parameter.VAR_KEYWORD: + sig_kwargs = sig_params.pop() + + # add new keyword-only parameters + for param_name, default in params.items(): + new_param = inspect.Parameter( + param_name, + inspect.Parameter.KEYWORD_ONLY, + default=default, + ) + sig_params.append(new_param) - Replaces it with the proper type for the ``np.ma`` function. + # re-append `**kwargs` if it was present + if sig_kwargs: + sig_params.append(sig_kwargs) - Parameters - ---------- - doc : str - The documentation of the ``np`` method. - np_ret : str - The return type string of the ``np`` method that we want to - replace. (e.g. "out : ndarray") - np_ma_ret : str - The return type string of the ``np.ma`` method. - (e.g. "out : MaskedArray") - """ - if np_ret not in doc: - raise RuntimeError( - f"Failed to replace `{np_ret}` with `{np_ma_ret}`. " - f"The documentation string for return type, {np_ret}, is not " - f"found in the docstring for `np.{self._func.__name__}`. " - f"Fix the docstring for `np.{self._func.__name__}` or " - "update the expected string for return type." - ) + signature = signature.replace(parameters=sig_params) - return doc.replace(np_ret, np_ma_ret) + wrapper.__signature__ = signature - def __call__(self, *args, **params): - # Find the common parameters to the call and the definition - _extras = self._extras - common_params = set(params).intersection(_extras) - # Drop the common parameters from the call - for p in common_params: - _extras[p] = params.pop(p) - # Get the result - result = self._func.__call__(*args, **params).view(MaskedArray) - if "fill_value" in common_params: - result.fill_value = _extras.get("fill_value", None) - if "hardmask" in common_params: - result._hardmask = bool(_extras.get("hard_mask", False)) - return result + # __doc__ is None when using `python -OO ...` + if func.__doc__ is not None: + assert np_ret in func.__doc__, ( + f"Failed to replace `{np_ret}` with `{np_ma_ret}`. " + f"The documentation string for return type, {np_ret}, is not " + f"found in the docstring for `np.{func.__name__}`. " + f"Fix the docstring for `np.{func.__name__}` or " + "update the expected string for return type." + ) + wrapper.__doc__ = inspect.cleandoc(func.__doc__).replace(np_ret, np_ma_ret) + + return wrapper arange = _convert2ma( 'arange', - params=dict(fill_value=None, hardmask=False), + params={'fill_value': None, 'hardmask': False}, np_ret='arange : ndarray', np_ma_ret='arange : MaskedArray', ) clip = _convert2ma( 'clip', - params=dict(fill_value=None, hardmask=False), + params={'fill_value': None, 'hardmask': False}, np_ret='clipped_array : ndarray', np_ma_ret='clipped_array : MaskedArray', ) empty = _convert2ma( 'empty', - params=dict(fill_value=None, hardmask=False), + params={'fill_value': None, 'hardmask': False}, np_ret='out : ndarray', np_ma_ret='out : MaskedArray', ) @@ -8623,19 +8912,19 @@ def __call__(self, *args, **params): ) identity = _convert2ma( 'identity', - params=dict(fill_value=None, hardmask=False), + params={'fill_value': None, 'hardmask': False}, np_ret='out : ndarray', np_ma_ret='out : MaskedArray', ) indices = _convert2ma( 'indices', - params=dict(fill_value=None, hardmask=False), + params={'fill_value': None, 'hardmask': False}, np_ret='grid : one ndarray or tuple of ndarrays', np_ma_ret='grid : one MaskedArray or tuple of MaskedArrays', ) ones = _convert2ma( 'ones', - params=dict(fill_value=None, hardmask=False), + params={'fill_value': None, 'hardmask': False}, np_ret='out : ndarray', np_ma_ret='out : MaskedArray', ) @@ -8646,13 +8935,13 @@ def __call__(self, *args, **params): ) squeeze = _convert2ma( 'squeeze', - params=dict(fill_value=None, hardmask=False), + params={'fill_value': None, 'hardmask': False}, np_ret='squeezed : ndarray', np_ma_ret='squeezed : MaskedArray', ) zeros = _convert2ma( 'zeros', - params=dict(fill_value=None, hardmask=False), + params={'fill_value': None, 'hardmask': False}, np_ret='out : ndarray', np_ma_ret='out : MaskedArray', ) @@ -8666,8 +8955,6 @@ def __call__(self, *args, **params): def append(a, b, axis=None): """Append values to the end of an array. - .. versionadded:: 1.9.0 - Parameters ---------- a : array_like @@ -8694,6 +8981,7 @@ def append(a, b, axis=None): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = ma.masked_values([1, 2, 3], 2) >>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index d6cc0a782c23..d7b92e12065b 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1,469 +1,4521 @@ -from collections.abc import Callable -from typing import Any, TypeVar -from numpy import ndarray, dtype, float64 +# pyright: reportIncompatibleMethodOverride=false +import datetime as dt +import types +from _typeshed import Incomplete, SupportsLenAndGetItem +from collections.abc import Buffer, Callable, Iterator, Sequence +from typing import ( + Any, + Concatenate, + Final, + Generic, + Literal, + Never, + NoReturn, + Protocol, + Self, + SupportsComplex, + SupportsFloat, + SupportsIndex, + SupportsInt, + Unpack, + final, + overload, + override, + type_check_only, +) +from typing_extensions import TypeIs, TypeVar, deprecated + +import numpy as np from numpy import ( - amax as amax, - amin as amin, - bool as bool, - expand_dims as expand_dims, - clip as clip, - indices as indices, - ones_like as ones_like, - squeeze as squeeze, - zeros_like as zeros_like, - angle as angle + _HasDType, + _HasDTypeWithRealAndImag, + _ModeKind, + _OrderACF, + _OrderCF, + _OrderKACF, + _PartitionKind, + _SortKind, + _ToIndices, + amax, + amin, + bool_, + bytes_, + complex128, + complexfloating, + datetime64, + dtype, + expand_dims, + float64, + floating, + generic, + inexact, + int8, + int64, + int_, + integer, + intp, + ndarray, + number, + object_, + signedinteger, + str_, + timedelta64, + unsignedinteger, +) +from numpy._core.fromnumeric import _UFuncKwargs # type-check only +from numpy._globals import _NoValueType +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _32Bit, + _64Bit, + _AnyShape, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeBytes_co, + _ArrayLikeComplex128_co, + _ArrayLikeComplex_co, + _ArrayLikeDT64_co, + _ArrayLikeFloat64_co, + _ArrayLikeFloat_co, + _ArrayLikeInt, + _ArrayLikeInt_co, + _ArrayLikeNumber_co, + _ArrayLikeObject_co, + _ArrayLikeStr_co, + _ArrayLikeString_co, + _ArrayLikeTD64_co, + _ArrayLikeUInt_co, + _CharLike_co, + _DT64Codes, + _DTypeLike, + _DTypeLikeBool, + _DTypeLikeVoid, + _FloatLike_co, + _IntLike_co, + _NestedSequence, + _ScalarLike_co, + _Shape, + _ShapeLike, + _SupportsArrayFunc, + _SupportsDType, + _TD64Like_co, ) +from numpy._typing._dtype_like import _VoidDTypeLike + +__all__ = [ + "MAError", + "MaskError", + "MaskType", + "MaskedArray", + "abs", + "absolute", + "add", + "all", + "allclose", + "allequal", + "alltrue", + "amax", + "amin", + "angle", + "anom", + "anomalies", + "any", + "append", + "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argsort", + "around", + "array", + "asanyarray", + "asarray", + "bitwise_and", + "bitwise_or", + "bitwise_xor", + "bool_", + "ceil", + "choose", + "clip", + "common_fill_value", + "compress", + "compressed", + "concatenate", + "conjugate", + "convolve", + "copy", + "correlate", + "cos", + "cosh", + "count", + "cumprod", + "cumsum", + "default_fill_value", + "diag", + "diagonal", + "diff", + "divide", + "empty", + "empty_like", + "equal", + "exp", + "expand_dims", + "fabs", + "filled", + "fix_invalid", + "flatten_mask", + "flatten_structured_array", + "floor", + "floor_divide", + "fmod", + "frombuffer", + "fromflex", + "fromfunction", + "getdata", + "getmask", + "getmaskarray", + "greater", + "greater_equal", + "harden_mask", + "hypot", + "identity", + "ids", + "indices", + "inner", + "innerproduct", + "isMA", + "isMaskedArray", + "is_mask", + "is_masked", + "isarray", + "left_shift", + "less", + "less_equal", + "log", + "log2", + "log10", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "make_mask", + "make_mask_descr", + "make_mask_none", + "mask_or", + "masked", + "masked_array", + "masked_equal", + "masked_greater", + "masked_greater_equal", + "masked_inside", + "masked_invalid", + "masked_less", + "masked_less_equal", + "masked_not_equal", + "masked_object", + "masked_outside", + "masked_print_option", + "masked_singleton", + "masked_values", + "masked_where", + "max", + "maximum", + "maximum_fill_value", + "mean", + "min", + "minimum", + "minimum_fill_value", + "mod", + "multiply", + "mvoid", + "ndim", + "negative", + "nomask", + "nonzero", + "not_equal", + "ones", + "ones_like", + "outer", + "outerproduct", + "power", + "prod", + "product", + "ptp", + "put", + "putmask", + "ravel", + "remainder", + "repeat", + "reshape", + "resize", + "right_shift", + "round", + "round_", + "set_fill_value", + "shape", + "sin", + "sinh", + "size", + "soften_mask", + "sometrue", + "sort", + "sqrt", + "squeeze", + "std", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "trace", + "transpose", + "true_divide", + "var", + "where", + "zeros", + "zeros_like", +] + +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) +# the additional `Callable[...]` bound simplifies self-binding to the ufunc's callable signature +_UFuncT_co = TypeVar("_UFuncT_co", bound=np.ufunc | Callable[..., object], default=np.ufunc, covariant=True) + +_AnyNumericScalarT = TypeVar( + "_AnyNumericScalarT", + np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, np.longdouble, + np.complex64, np.complex128, np.clongdouble, + np.timedelta64, + np.object_, +) # fmt: skip + +type _RealNumber = np.floating | np.integer + +type _Ignored = object + +# A subset of `MaskedArray` that can be parametrized w.r.t. `np.generic` +type _MaskedArray[ScalarT: np.generic] = MaskedArray[_AnyShape, np.dtype[ScalarT]] +type _Masked1D[ScalarT: np.generic] = MaskedArray[tuple[int], np.dtype[ScalarT]] +type _Masked2D[ScalarT: np.generic] = MaskedArray[tuple[int, int], np.dtype[ScalarT]] +type _Masked3D[ScalarT: np.generic] = MaskedArray[tuple[int, int, int], np.dtype[ScalarT]] + +type _MaskedArrayUInt_co = _MaskedArray[np.unsignedinteger | np.bool] +type _MaskedArrayInt_co = _MaskedArray[np.integer | np.bool] +type _MaskedArrayFloat64_co = _MaskedArray[np.floating[_64Bit] | np.float32 | np.float16 | np.integer | np.bool] +type _MaskedArrayFloat_co = _MaskedArray[np.floating | np.integer | np.bool] +type _MaskedArrayComplex128_co = _MaskedArray[np.number[_64Bit] | np.number[_32Bit] | np.float16 | np.integer | np.bool] +type _MaskedArrayComplex_co = _MaskedArray[np.inexact | np.integer | np.bool] +type _MaskedArrayNumber_co = _MaskedArray[np.number | np.bool] +type _MaskedArrayTD64_co = _MaskedArray[np.timedelta64 | np.integer | np.bool] -# TODO: Set the `bound` to something more suitable once we -# have proper shape support -_ShapeType = TypeVar("_ShapeType", bound=Any) -_DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True) +type _ArrayInt_co = NDArray[np.integer | np.bool] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +# Workaround for https://github.com/microsoft/pyright/issues/10232 +type _ArrayNoD[ScalarT: np.generic] = np.ndarray[tuple[Never] | tuple[Never, Never], np.dtype[ScalarT]] -__all__: list[str] +type _ConvertibleToInt = SupportsInt | SupportsIndex | _CharLike_co +type _ConvertibleToFloat = SupportsFloat | SupportsIndex | _CharLike_co +type _ConvertibleToComplex = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co +type _ConvertibleToTD64 = dt.timedelta | int | _CharLike_co | np.character | np.number | np.timedelta64 | np.bool | None +type _ConvertibleToDT64 = dt.date | int | _CharLike_co | np.character | np.number | np.datetime64 | np.bool | None +type _ArangeScalar = _RealNumber | np.datetime64 | np.timedelta64 -MaskType = bool -nomask: bool +type _NoMaskType = np.bool_[Literal[False]] # type of `np.False_` +type _MaskArray[ShapeT: _Shape] = np.ndarray[ShapeT, np.dtype[np.bool]] + +type _FillValue = complex | None # int | float | complex | None +type _FillValueCallable = Callable[[np.dtype | ArrayLike], _FillValue] +type _DomainCallable = Callable[..., NDArray[np.bool]] + +type _PyArray[T] = list[T] | tuple[T, ...] +type _PyScalar = complex | bytes | str + +type _Seq2D[T] = Sequence[Sequence[T]] +type _Seq3D[T] = Sequence[_Seq2D[T]] + +type _CorrelateMode = Literal["valid", "same", "full"] + +@type_check_only +class _HasShape[ShapeT_co: _Shape](Protocol): + @property + def shape(self, /) -> ShapeT_co: ... + +### + +MaskType = np.bool_ + +nomask: Final[_NoMaskType] = ... class MaskedArrayFutureWarning(FutureWarning): ... class MAError(Exception): ... class MaskError(MAError): ... -def default_fill_value(obj): ... -def minimum_fill_value(obj): ... -def maximum_fill_value(obj): ... -def set_fill_value(a, fill_value): ... -def common_fill_value(a, b): ... -def filled(a, fill_value=...): ... -def getdata(a, subok=...): ... +# not generic at runtime +class _MaskedUFunc(Generic[_UFuncT_co]): + f: _UFuncT_co # readonly + def __init__(self, /, ufunc: _UFuncT_co) -> None: ... + +# not generic at runtime +class _MaskedUnaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): + fill: Final[_FillValue] + domain: Final[_DomainCallable | None] + + def __init__(self, /, mufunc: _UFuncT_co, fill: _FillValue = 0, domain: _DomainCallable | None = None) -> None: ... + + # NOTE: This might not work with overloaded callable signatures might not work on + # pyright, which is a long-standing issue, and is unique to pyright: + # https://github.com/microsoft/pyright/issues/9663 + # https://github.com/microsoft/pyright/issues/10849 + # https://github.com/microsoft/pyright/issues/10899 + # https://github.com/microsoft/pyright/issues/11049 + def __call__[**Tss, T]( + self: _MaskedUnaryOperation[Callable[Concatenate[Any, Tss], T]], + /, + a: ArrayLike, + *args: Tss.args, + **kwargs: Tss.kwargs, + ) -> T: ... + +# not generic at runtime +class _MaskedBinaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): + fillx: Final[_FillValue] + filly: Final[_FillValue] + + def __init__(self, /, mbfunc: _UFuncT_co, fillx: _FillValue = 0, filly: _FillValue = 0) -> None: ... + + # NOTE: See the comment in `_MaskedUnaryOperation.__call__` + def __call__[**Tss, T]( + self: _MaskedBinaryOperation[Callable[Concatenate[Any, Any, Tss], T]], + /, + a: ArrayLike, + b: ArrayLike, + *args: Tss.args, + **kwargs: Tss.kwargs, + ) -> T: ... + + # NOTE: We cannot meaningfully annotate the return (d)types of these methods until + # the signatures of the corresponding `numpy.ufunc` methods are specified. + def reduce(self, /, target: ArrayLike, axis: SupportsIndex = 0, dtype: DTypeLike | None = None) -> Incomplete: ... + def outer(self, /, a: ArrayLike, b: ArrayLike) -> _MaskedArray[Incomplete]: ... + def accumulate(self, /, target: ArrayLike, axis: SupportsIndex = 0) -> _MaskedArray[Incomplete]: ... + +# not generic at runtime +class _DomainedBinaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): + domain: Final[_DomainCallable] + fillx: Final[_FillValue] + filly: Final[_FillValue] + + def __init__( + self, + /, + dbfunc: _UFuncT_co, + domain: _DomainCallable, + fillx: _FillValue = 0, + filly: _FillValue = 0, + ) -> None: ... + + # NOTE: See the comment in `_MaskedUnaryOperation.__call__` + def __call__[**Tss, T]( + self: _DomainedBinaryOperation[Callable[Concatenate[Any, Any, Tss], T]], + /, + a: ArrayLike, + b: ArrayLike, + *args: Tss.args, + **kwargs: Tss.kwargs, + ) -> T: ... + +# not generic at runtime +class _extrema_operation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): + compare: Final[_MaskedBinaryOperation] + fill_value_func: Final[_FillValueCallable] + + def __init__( + self, + /, + ufunc: _UFuncT_co, + compare: _MaskedBinaryOperation, + fill_value: _FillValueCallable, + ) -> None: ... + + # NOTE: This class is only used internally for `maximum` and `minimum`, so we are + # able to annotate the `__call__` method specifically for those two functions. + @overload + def __call__[ScalarT: np.generic](self, /, a: _ArrayLike[ScalarT], b: _ArrayLike[ScalarT]) -> _MaskedArray[ScalarT]: ... + @overload + def __call__(self, /, a: ArrayLike, b: ArrayLike) -> _MaskedArray[Incomplete]: ... + + # NOTE: We cannot meaningfully annotate the return (d)types of these methods until + # the signatures of the corresponding `numpy.ufunc` methods are specified. + def reduce(self, /, target: ArrayLike, axis: SupportsIndex | _NoValueType = ...) -> Incomplete: ... + def outer(self, /, a: ArrayLike, b: ArrayLike) -> _MaskedArray[Incomplete]: ... + +@final +class _MaskedPrintOption: + _display: str + _enabled: bool | Literal[0, 1] + def __init__(self, /, display: str) -> None: ... + def display(self, /) -> str: ... + def set_display(self, /, s: str) -> None: ... + def enabled(self, /) -> bool: ... + def enable(self, /, shrink: bool | Literal[0, 1] = 1) -> None: ... + +masked_print_option: Final[_MaskedPrintOption] = ... + +exp: _MaskedUnaryOperation = ... +conjugate: _MaskedUnaryOperation = ... +sin: _MaskedUnaryOperation = ... +cos: _MaskedUnaryOperation = ... +arctan: _MaskedUnaryOperation = ... +arcsinh: _MaskedUnaryOperation = ... +sinh: _MaskedUnaryOperation = ... +cosh: _MaskedUnaryOperation = ... +tanh: _MaskedUnaryOperation = ... +abs: _MaskedUnaryOperation = ... +absolute: _MaskedUnaryOperation = ... +angle: _MaskedUnaryOperation = ... +fabs: _MaskedUnaryOperation = ... +negative: _MaskedUnaryOperation = ... +floor: _MaskedUnaryOperation = ... +ceil: _MaskedUnaryOperation = ... +around: _MaskedUnaryOperation = ... +logical_not: _MaskedUnaryOperation = ... +sqrt: _MaskedUnaryOperation = ... +log: _MaskedUnaryOperation = ... +log2: _MaskedUnaryOperation = ... +log10: _MaskedUnaryOperation = ... +tan: _MaskedUnaryOperation = ... +arcsin: _MaskedUnaryOperation = ... +arccos: _MaskedUnaryOperation = ... +arccosh: _MaskedUnaryOperation = ... +arctanh: _MaskedUnaryOperation = ... + +add: _MaskedBinaryOperation = ... +subtract: _MaskedBinaryOperation = ... +multiply: _MaskedBinaryOperation = ... +arctan2: _MaskedBinaryOperation = ... +equal: _MaskedBinaryOperation = ... +not_equal: _MaskedBinaryOperation = ... +less_equal: _MaskedBinaryOperation = ... +greater_equal: _MaskedBinaryOperation = ... +less: _MaskedBinaryOperation = ... +greater: _MaskedBinaryOperation = ... +logical_and: _MaskedBinaryOperation = ... +def alltrue(target: ArrayLike, axis: SupportsIndex | None = 0, dtype: _DTypeLikeBool | None = None) -> Incomplete: ... +logical_or: _MaskedBinaryOperation = ... +def sometrue(target: ArrayLike, axis: SupportsIndex | None = 0, dtype: _DTypeLikeBool | None = None) -> Incomplete: ... +logical_xor: _MaskedBinaryOperation = ... +bitwise_and: _MaskedBinaryOperation = ... +bitwise_or: _MaskedBinaryOperation = ... +bitwise_xor: _MaskedBinaryOperation = ... +hypot: _MaskedBinaryOperation = ... + +divide: _DomainedBinaryOperation = ... +true_divide: _DomainedBinaryOperation = ... +floor_divide: _DomainedBinaryOperation = ... +remainder: _DomainedBinaryOperation = ... +fmod: _DomainedBinaryOperation = ... +mod: _DomainedBinaryOperation = ... + +# `obj` can be anything (even `object()`), and is too "flexible", so we can't +# meaningfully annotate it, or its return type. +def default_fill_value(obj: object) -> Any: ... +def minimum_fill_value(obj: object) -> Any: ... +def maximum_fill_value(obj: object) -> Any: ... + +# +@overload # returns `a.fill_value` if `a` is a `MaskedArray` +def get_fill_value[ScalarT: np.generic](a: _MaskedArray[ScalarT]) -> ScalarT: ... +@overload # otherwise returns `default_fill_value(a)` +def get_fill_value(a: object) -> Any: ... + +# this is a noop if `a` isn't a `MaskedArray`, so we only accept `MaskedArray` input +def set_fill_value(a: MaskedArray, fill_value: _ScalarLike_co) -> None: ... + +# the return type depends on the *values* of `a` and `b` (which cannot be known +# statically), which is why we need to return an awkward `_ | None` +@overload +def common_fill_value[ScalarT: np.generic](a: _MaskedArray[ScalarT], b: MaskedArray) -> ScalarT | None: ... +@overload +def common_fill_value(a: object, b: object) -> Any: ... + +# keep in sync with `fix_invalid`, but return `ndarray` instead of `MaskedArray` +@overload +def filled[ShapeT: _Shape, DTypeT: np.dtype]( + a: ndarray[ShapeT, DTypeT], + fill_value: _ScalarLike_co | None = None, +) -> ndarray[ShapeT, DTypeT]: ... +@overload +def filled[ScalarT: np.generic](a: _ArrayLike[ScalarT], fill_value: _ScalarLike_co | None = None) -> NDArray[ScalarT]: ... +@overload +def filled(a: ArrayLike, fill_value: _ScalarLike_co | None = None) -> NDArray[Incomplete]: ... + +# keep in sync with `filled`, but return `MaskedArray` instead of `ndarray` +@overload +def fix_invalid[ShapeT: _Shape, DTypeT: np.dtype]( + a: np.ndarray[ShapeT, DTypeT], + mask: _ArrayLikeBool_co = nomask, + copy: bool = True, + fill_value: _ScalarLike_co | None = None, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload +def fix_invalid[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + mask: _ArrayLikeBool_co = nomask, + copy: bool = True, + fill_value: _ScalarLike_co | None = None, +) -> _MaskedArray[ScalarT]: ... +@overload +def fix_invalid( + a: ArrayLike, + mask: _ArrayLikeBool_co = nomask, + copy: bool = True, + fill_value: _ScalarLike_co | None = None, +) -> _MaskedArray[Incomplete]: ... + +# +def get_masked_subclass(*arrays: object) -> type[MaskedArray]: ... + +# +@overload +def getdata[ShapeT: _Shape, DTypeT: np.dtype]( + a: np.ndarray[ShapeT, DTypeT], + subok: bool = True, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload +def getdata[ScalarT: np.generic](a: _ArrayLike[ScalarT], subok: bool = True) -> NDArray[ScalarT]: ... +@overload +def getdata(a: ArrayLike, subok: bool = True) -> NDArray[Incomplete]: ... + get_data = getdata -def fix_invalid(a, mask=..., copy=..., fill_value=...): ... - -class _MaskedUFunc: - f: Any - __doc__: Any - __name__: Any - def __init__(self, ufunc): ... - -class _MaskedUnaryOperation(_MaskedUFunc): - fill: Any - domain: Any - def __init__(self, mufunc, fill=..., domain=...): ... - def __call__(self, a, *args, **kwargs): ... - -class _MaskedBinaryOperation(_MaskedUFunc): - fillx: Any - filly: Any - def __init__(self, mbfunc, fillx=..., filly=...): ... - def __call__(self, a, b, *args, **kwargs): ... - def reduce(self, target, axis=..., dtype=...): ... - def outer(self, a, b): ... - def accumulate(self, target, axis=...): ... - -class _DomainedBinaryOperation(_MaskedUFunc): - domain: Any - fillx: Any - filly: Any - def __init__(self, dbfunc, domain, fillx=..., filly=...): ... - def __call__(self, a, b, *args, **kwargs): ... - -exp: _MaskedUnaryOperation -conjugate: _MaskedUnaryOperation -sin: _MaskedUnaryOperation -cos: _MaskedUnaryOperation -arctan: _MaskedUnaryOperation -arcsinh: _MaskedUnaryOperation -sinh: _MaskedUnaryOperation -cosh: _MaskedUnaryOperation -tanh: _MaskedUnaryOperation -abs: _MaskedUnaryOperation -absolute: _MaskedUnaryOperation -fabs: _MaskedUnaryOperation -negative: _MaskedUnaryOperation -floor: _MaskedUnaryOperation -ceil: _MaskedUnaryOperation -around: _MaskedUnaryOperation -logical_not: _MaskedUnaryOperation -sqrt: _MaskedUnaryOperation -log: _MaskedUnaryOperation -log2: _MaskedUnaryOperation -log10: _MaskedUnaryOperation -tan: _MaskedUnaryOperation -arcsin: _MaskedUnaryOperation -arccos: _MaskedUnaryOperation -arccosh: _MaskedUnaryOperation -arctanh: _MaskedUnaryOperation - -add: _MaskedBinaryOperation -subtract: _MaskedBinaryOperation -multiply: _MaskedBinaryOperation -arctan2: _MaskedBinaryOperation -equal: _MaskedBinaryOperation -not_equal: _MaskedBinaryOperation -less_equal: _MaskedBinaryOperation -greater_equal: _MaskedBinaryOperation -less: _MaskedBinaryOperation -greater: _MaskedBinaryOperation -logical_and: _MaskedBinaryOperation -alltrue: _MaskedBinaryOperation -logical_or: _MaskedBinaryOperation -sometrue: Callable[..., Any] -logical_xor: _MaskedBinaryOperation -bitwise_and: _MaskedBinaryOperation -bitwise_or: _MaskedBinaryOperation -bitwise_xor: _MaskedBinaryOperation -hypot: _MaskedBinaryOperation -divide: _MaskedBinaryOperation -true_divide: _MaskedBinaryOperation -floor_divide: _MaskedBinaryOperation -remainder: _MaskedBinaryOperation -fmod: _MaskedBinaryOperation -mod: _MaskedBinaryOperation - -def make_mask_descr(ndtype): ... -def getmask(a): ... +# +@overload +def getmask(a: _ScalarLike_co) -> _NoMaskType: ... +@overload +def getmask[ShapeT: _Shape](a: MaskedArray[ShapeT, Any]) -> _MaskArray[ShapeT] | _NoMaskType: ... +@overload +def getmask(a: ArrayLike) -> _MaskArray[_AnyShape] | _NoMaskType: ... + get_mask = getmask -def getmaskarray(arr): ... -def is_mask(m): ... -def make_mask(m, copy=..., shrink=..., dtype=...): ... -def make_mask_none(newshape, dtype=...): ... -def mask_or(m1, m2, copy=..., shrink=...): ... -def flatten_mask(mask): ... -def masked_where(condition, a, copy=...): ... -def masked_greater(x, value, copy=...): ... -def masked_greater_equal(x, value, copy=...): ... -def masked_less(x, value, copy=...): ... -def masked_less_equal(x, value, copy=...): ... -def masked_not_equal(x, value, copy=...): ... -def masked_equal(x, value, copy=...): ... -def masked_inside(x, v1, v2, copy=...): ... -def masked_outside(x, v1, v2, copy=...): ... -def masked_object(x, value, copy=..., shrink=...): ... -def masked_values(x, value, rtol=..., atol=..., copy=..., shrink=...): ... -def masked_invalid(a, copy=...): ... +# like `getmask`, but instead of `nomask` returns `make_mask_none(arr, arr.dtype?)` +@overload +def getmaskarray(arr: _ScalarLike_co) -> _MaskArray[tuple[()]]: ... +@overload +def getmaskarray[ShapeT: _Shape](arr: np.ndarray[ShapeT, Any]) -> _MaskArray[ShapeT]: ... + +# It's sufficient for `m` to have dtype with type: `type[np.bool_]`, +# which isn't necessarily an ndarray. Please open an issue if this causes issues. +def is_mask(m: object) -> TypeIs[NDArray[bool_]]: ... + +# +@overload +def make_mask_descr(ndtype: _VoidDTypeLike) -> np.dtype[np.void]: ... +@overload +def make_mask_descr(ndtype: _DTypeLike[np.generic] | str | type) -> np.dtype[np.bool_]: ... + +# +@overload # m is nomask +def make_mask( + m: _NoMaskType, + copy: bool = False, + shrink: bool = True, + dtype: _DTypeLikeBool = ..., +) -> _NoMaskType: ... +@overload # m: ndarray, shrink=True (default), dtype: bool-like (default) +def make_mask[ShapeT: _Shape]( + m: np.ndarray[ShapeT], + copy: bool = False, + shrink: Literal[True] = True, + dtype: _DTypeLikeBool = ..., +) -> _MaskArray[ShapeT] | _NoMaskType: ... +@overload # m: ndarray, shrink=False (kwarg), dtype: bool-like (default) +def make_mask[ShapeT: _Shape]( + m: np.ndarray[ShapeT], + copy: bool = False, + *, + shrink: Literal[False], + dtype: _DTypeLikeBool = ..., +) -> _MaskArray[ShapeT]: ... +@overload # m: ndarray, dtype: void-like +def make_mask[ShapeT: _Shape]( + m: np.ndarray[ShapeT], + copy: bool = False, + shrink: bool = True, + *, + dtype: _DTypeLikeVoid, +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... +@overload # m: array-like, shrink=True (default), dtype: bool-like (default) +def make_mask( + m: ArrayLike, + copy: bool = False, + shrink: Literal[True] = True, + dtype: _DTypeLikeBool = ..., +) -> _MaskArray[_AnyShape] | _NoMaskType: ... +@overload # m: array-like, shrink=False (kwarg), dtype: bool-like (default) +def make_mask( + m: ArrayLike, + copy: bool = False, + *, + shrink: Literal[False], + dtype: _DTypeLikeBool = ..., +) -> _MaskArray[_AnyShape]: ... +@overload # m: array-like, dtype: void-like +def make_mask( + m: ArrayLike, + copy: bool = False, + shrink: bool = True, + *, + dtype: _DTypeLikeVoid, +) -> NDArray[np.void]: ... +@overload # fallback +def make_mask( + m: ArrayLike, + copy: bool = False, + shrink: bool = True, + *, + dtype: DTypeLike = ..., +) -> NDArray[Incomplete] | _NoMaskType: ... + +# +@overload # known shape, dtype: unstructured (default) +def make_mask_none[ShapeT: _Shape](newshape: ShapeT, dtype: np.dtype | type | str | None = None) -> _MaskArray[ShapeT]: ... +@overload # known shape, dtype: structured +def make_mask_none[ShapeT: _Shape](newshape: ShapeT, dtype: _VoidDTypeLike) -> np.ndarray[ShapeT, dtype[np.void]]: ... +@overload # unknown shape, dtype: unstructured (default) +def make_mask_none(newshape: _ShapeLike, dtype: np.dtype | type | str | None = None) -> _MaskArray[_AnyShape]: ... +@overload # unknown shape, dtype: structured +def make_mask_none(newshape: _ShapeLike, dtype: _VoidDTypeLike) -> NDArray[np.void]: ... + +# +@overload # nomask, scalar-like, shrink=True (default) +def mask_or( + m1: _NoMaskType | Literal[False], + m2: _ScalarLike_co, + copy: bool = False, + shrink: Literal[True] = True, +) -> _NoMaskType: ... +@overload # nomask, scalar-like, shrink=False (kwarg) +def mask_or( + m1: _NoMaskType | Literal[False], + m2: _ScalarLike_co, + copy: bool = False, + *, + shrink: Literal[False], +) -> _MaskArray[tuple[()]]: ... +@overload # scalar-like, nomask, shrink=True (default) +def mask_or( + m1: _ScalarLike_co, + m2: _NoMaskType | Literal[False], + copy: bool = False, + shrink: Literal[True] = True, +) -> _NoMaskType: ... +@overload # scalar-like, nomask, shrink=False (kwarg) +def mask_or( + m1: _ScalarLike_co, + m2: _NoMaskType | Literal[False], + copy: bool = False, + *, + shrink: Literal[False], +) -> _MaskArray[tuple[()]]: ... +@overload # ndarray, ndarray | nomask, shrink=True (default) +def mask_or[ShapeT: _Shape, ScalarT: np.generic]( + m1: np.ndarray[ShapeT, np.dtype[ScalarT]], + m2: np.ndarray[ShapeT, np.dtype[ScalarT]] | _NoMaskType | Literal[False], + copy: bool = False, + shrink: Literal[True] = True, +) -> _MaskArray[ShapeT] | _NoMaskType: ... +@overload # ndarray, ndarray | nomask, shrink=False (kwarg) +def mask_or[ShapeT: _Shape, ScalarT: np.generic]( + m1: np.ndarray[ShapeT, np.dtype[ScalarT]], + m2: np.ndarray[ShapeT, np.dtype[ScalarT]] | _NoMaskType | Literal[False], + copy: bool = False, + *, + shrink: Literal[False], +) -> _MaskArray[ShapeT]: ... +@overload # ndarray | nomask, ndarray, shrink=True (default) +def mask_or[ShapeT: _Shape, ScalarT: np.generic]( + m1: np.ndarray[ShapeT, np.dtype[ScalarT]] | _NoMaskType | Literal[False], + m2: np.ndarray[ShapeT, np.dtype[ScalarT]], + copy: bool = False, + shrink: Literal[True] = True, +) -> _MaskArray[ShapeT] | _NoMaskType: ... +@overload # ndarray | nomask, ndarray, shrink=False (kwarg) +def mask_or[ShapeT: _Shape, ScalarT: np.generic]( + m1: np.ndarray[ShapeT, np.dtype[ScalarT]] | _NoMaskType | Literal[False], + m2: np.ndarray[ShapeT, np.dtype[ScalarT]], + copy: bool = False, + *, + shrink: Literal[False], +) -> _MaskArray[ShapeT]: ... + +# +@overload +def flatten_mask[ShapeT: _Shape](mask: np.ndarray[ShapeT]) -> _MaskArray[ShapeT]: ... +@overload +def flatten_mask(mask: ArrayLike) -> _MaskArray[_AnyShape]: ... + +# NOTE: we currently don't know the field types of `void` dtypes, so it's not possible +# to know the output dtype of the returned array. +@overload +def flatten_structured_array[ShapeT: _Shape](a: MaskedArray[ShapeT, np.dtype[np.void]]) -> MaskedArray[ShapeT]: ... +@overload +def flatten_structured_array[ShapeT: _Shape](a: np.ndarray[ShapeT, np.dtype[np.void]]) -> np.ndarray[ShapeT]: ... +@overload # for some reason this accepts unstructured array-likes, hence this fallback overload +def flatten_structured_array(a: ArrayLike) -> np.ndarray: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_invalid[ShapeT: _Shape, DTypeT: np.dtype]( + a: ndarray[ShapeT, DTypeT], + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload # array-like of known scalar-type +def masked_invalid[ScalarT: np.generic](a: _ArrayLike[ScalarT], copy: bool = True) -> _MaskedArray[ScalarT]: ... +@overload # unknown array-like +def masked_invalid(a: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # array-like of known scalar-type +def masked_where[ShapeT: _Shape, DTypeT: np.dtype]( + condition: _ArrayLikeBool_co, + a: ndarray[ShapeT, DTypeT], + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload # array-like of known scalar-type +def masked_where[ScalarT: np.generic]( + condition: _ArrayLikeBool_co, + a: _ArrayLike[ScalarT], + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... +@overload # unknown array-like +def masked_where(condition: _ArrayLikeBool_co, a: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_greater[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload # array-like of known scalar-type +def masked_greater[ScalarT: np.generic](x: _ArrayLike[ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[ScalarT]: ... +@overload # unknown array-like +def masked_greater(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_greater_equal[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload # array-like of known scalar-type +def masked_greater_equal[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + value: ArrayLike, + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... +@overload # unknown array-like +def masked_greater_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_less[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload # array-like of known scalar-type +def masked_less[ScalarT: np.generic](x: _ArrayLike[ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[ScalarT]: ... +@overload # unknown array-like +def masked_less(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_less_equal[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload # array-like of known scalar-type +def masked_less_equal[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + value: ArrayLike, + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... +@overload # unknown array-like +def masked_less_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_not_equal[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload # array-like of known scalar-type +def masked_not_equal[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + value: ArrayLike, + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... +@overload # unknown array-like +def masked_not_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_equal[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload # array-like of known scalar-type +def masked_equal[ScalarT: np.generic](x: _ArrayLike[ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[ScalarT]: ... +@overload # unknown array-like +def masked_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_inside[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + v1: ArrayLike, + v2: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload # array-like of known scalar-type +def masked_inside[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + v1: ArrayLike, + v2: ArrayLike, + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... +@overload # unknown array-like +def masked_inside(x: ArrayLike, v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_outside[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + v1: ArrayLike, + v2: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload # array-like of known scalar-type +def masked_outside[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + v1: ArrayLike, + v2: ArrayLike, + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... +@overload # unknown array-like +def masked_outside(x: ArrayLike, v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# only intended for object arrays, so we assume that's how it's always used in practice +@overload +def masked_object[ShapeT: _Shape]( + x: np.ndarray[ShapeT, np.dtype[np.object_]], + value: object, + copy: bool = True, + shrink: bool = True, +) -> MaskedArray[ShapeT, np.dtype[np.object_]]: ... +@overload +def masked_object( + x: _ArrayLikeObject_co, + value: object, + copy: bool = True, + shrink: bool = True, +) -> _MaskedArray[np.object_]: ... + +# keep roughly in sync with `filled` +@overload +def masked_values[ShapeT: _Shape, DTypeT: np.dtype]( + x: np.ndarray[ShapeT, DTypeT], + value: _ScalarLike_co, + rtol: float = 1e-5, + atol: float = 1e-8, + copy: bool = True, + shrink: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload +def masked_values[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + value: _ScalarLike_co, + rtol: float = 1e-5, + atol: float = 1e-8, + copy: bool = True, + shrink: bool = True, +) -> _MaskedArray[ScalarT]: ... +@overload +def masked_values( + x: ArrayLike, + value: _ScalarLike_co, + rtol: float = 1e-5, + atol: float = 1e-8, + copy: bool = True, + shrink: bool = True, +) -> _MaskedArray[Incomplete]: ... + +# TODO: Support non-boolean mask dtypes, such as `np.void`. This will require adding an +# additional generic type parameter to (at least) `MaskedArray` and `MaskedIterator` to +# hold the dtype of the mask. + +class MaskedIterator(Generic[_ShapeT_co, _DTypeT_co]): + ma: MaskedArray[_ShapeT_co, _DTypeT_co] # readonly + dataiter: np.flatiter[ndarray[_ShapeT_co, _DTypeT_co]] # readonly + maskiter: Final[np.flatiter[NDArray[np.bool]]] + + def __init__(self, ma: MaskedArray[_ShapeT_co, _DTypeT_co]) -> None: ... + def __iter__(self) -> Self: ... + + # Similar to `MaskedArray.__getitem__` but without the `void` case. + @overload + def __getitem__(self, indx: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self, indx: SupportsIndex | tuple[SupportsIndex, ...], /) -> Incomplete: ... + @overload + def __getitem__(self, indx: _ToIndices, /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + # Similar to `ndarray.__setitem__` but without the `void` case. + @overload # flexible | object_ | bool + def __setitem__( + self: MaskedIterator[Any, dtype[np.flexible | object_ | np.bool] | np.dtypes.StringDType], + index: _ToIndices, + value: object, + /, + ) -> None: ... + @overload # integer + def __setitem__( + self: MaskedIterator[Any, dtype[integer]], + index: _ToIndices, + value: _ConvertibleToInt | _NestedSequence[_ConvertibleToInt] | _ArrayLikeInt_co, + /, + ) -> None: ... + @overload # floating + def __setitem__( + self: MaskedIterator[Any, dtype[floating]], + index: _ToIndices, + value: _ConvertibleToFloat | _NestedSequence[_ConvertibleToFloat | None] | _ArrayLikeFloat_co | None, + /, + ) -> None: ... + @overload # complexfloating + def __setitem__( + self: MaskedIterator[Any, dtype[complexfloating]], + index: _ToIndices, + value: _ConvertibleToComplex | _NestedSequence[_ConvertibleToComplex | None] | _ArrayLikeNumber_co | None, + /, + ) -> None: ... + @overload # timedelta64 + def __setitem__( + self: MaskedIterator[Any, dtype[timedelta64]], + index: _ToIndices, + value: _ConvertibleToTD64 | _NestedSequence[_ConvertibleToTD64], + /, + ) -> None: ... + @overload # datetime64 + def __setitem__( + self: MaskedIterator[Any, dtype[datetime64]], + index: _ToIndices, + value: _ConvertibleToDT64 | _NestedSequence[_ConvertibleToDT64], + /, + ) -> None: ... + @overload # catch-all + def __setitem__(self, index: _ToIndices, value: ArrayLike, /) -> None: ... + + # TODO: Returns `mvoid[(), _DTypeT_co]` for masks with `np.void` dtype. + def __next__[ScalarT: np.generic](self: MaskedIterator[Any, np.dtype[ScalarT]]) -> ScalarT: ... + +class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): + __array_priority__: Final[Literal[15]] = 15 + + @overload + def __new__[ScalarT: np.generic]( + cls, + data: _ArrayLike[ScalarT], + mask: _ArrayLikeBool_co = nomask, + dtype: None = None, + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[ScalarT]: ... + @overload + def __new__[ScalarT: np.generic]( + cls, + data: object, + mask: _ArrayLikeBool_co, + dtype: _DTypeLike[ScalarT], + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[ScalarT]: ... + @overload + def __new__[ScalarT: np.generic]( + cls, + data: object, + mask: _ArrayLikeBool_co = nomask, + *, + dtype: _DTypeLike[ScalarT], + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[ScalarT]: ... + @overload + def __new__( + cls, + data: object = None, + mask: _ArrayLikeBool_co = nomask, + dtype: DTypeLike | None = None, + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[Any]: ... + + def __array_wrap__[ShapeT: _Shape, DTypeT: np.dtype]( + self, + obj: ndarray[ShapeT, DTypeT], + context: tuple[np.ufunc, tuple[Any, ...], int] | None = None, + return_scalar: bool = False, + ) -> MaskedArray[ShapeT, DTypeT]: ... + + @overload # type: ignore[override] # () + def view(self, /, dtype: None = None, type: None = None, fill_value: _ScalarLike_co | None = None) -> Self: ... + @overload # (dtype: DTypeT) + def view[DTypeT: np.dtype]( + self, + /, + dtype: DTypeT | _HasDType[DTypeT], + type: None = None, + fill_value: _ScalarLike_co | None = None, + ) -> MaskedArray[_ShapeT_co, DTypeT]: ... + @overload # (dtype: dtype[ScalarT]) + def view[ScalarT: np.generic]( + self, + /, + dtype: _DTypeLike[ScalarT], + type: None = None, + fill_value: _ScalarLike_co | None = None, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... + @overload # ([dtype: _, ]*, type: ArrayT) + def view[ArrayT: np.ndarray]( + self, + /, + dtype: DTypeLike | None = None, + *, + type: type[ArrayT], + fill_value: _ScalarLike_co | None = None, + ) -> ArrayT: ... + @overload # (dtype: _, type: ArrayT) + def view[ArrayT: np.ndarray]( + self, + /, + dtype: DTypeLike | None, + type: type[ArrayT], + fill_value: _ScalarLike_co | None = None, + ) -> ArrayT: ... + @overload # (dtype: ArrayT, /) + def view[ArrayT: np.ndarray]( + self, + /, + dtype: type[ArrayT], + type: None = None, + fill_value: _ScalarLike_co | None = None, + ) -> ArrayT: ... + @overload # (dtype: ?) + def view( + self, + /, + # `_VoidDTypeLike | str | None` is like `DTypeLike` but without `_DTypeLike[Any]` to avoid + # overlaps with previous overloads. + dtype: _VoidDTypeLike | str | None, + type: None = None, + fill_value: _ScalarLike_co | None = None, + ) -> MaskedArray[_ShapeT_co, np.dtype]: ... + + # Keep in sync with `ndarray.__getitem__` + @overload + def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ... + @overload + def __getitem__(self, key: _ToIndices, /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self: _MaskedArray[np.void], indx: str, /) -> MaskedArray[_ShapeT_co]: ... + @overload + def __getitem__(self: _MaskedArray[np.void], indx: list[str], /) -> MaskedArray[_ShapeT_co, np.dtype[np.void]]: ... -class _MaskedPrintOption: - def __init__(self, display): ... - def display(self): ... - def set_display(self, s): ... - def enabled(self): ... - def enable(self, shrink=...): ... - -masked_print_option: _MaskedPrintOption - -def flatten_structured_array(a): ... - -class MaskedIterator: - ma: Any - dataiter: Any - maskiter: Any - def __init__(self, ma): ... - def __iter__(self): ... - def __getitem__(self, indx): ... - def __setitem__(self, index, value): ... - def __next__(self): ... - -class MaskedArray(ndarray[_ShapeType, _DType_co]): - __array_priority__: Any - def __new__(cls, data=..., mask=..., dtype=..., copy=..., subok=..., ndmin=..., fill_value=..., keep_mask=..., hard_mask=..., shrink=..., order=...): ... - def __array_finalize__(self, obj): ... - def __array_wrap__(self, obj, context=..., return_scalar=...): ... - def view(self, dtype=..., type=..., fill_value=...): ... - def __getitem__(self, indx): ... - def __setitem__(self, indx, value): ... - @property - def dtype(self): ... - @dtype.setter - def dtype(self, dtype): ... @property - def shape(self): ... - @shape.setter - def shape(self, shape): ... - def __setmask__(self, mask, copy=...): ... + def shape(self) -> _ShapeT_co: ... + @shape.setter # type: ignore[override] + def shape[ShapeT: _Shape](self: MaskedArray[ShapeT, Any], shape: ShapeT, /) -> None: ... + + def __setmask__(self, mask: _ArrayLikeBool_co, copy: bool = False) -> None: ... @property - def mask(self): ... + def mask(self) -> np.ndarray[_ShapeT_co, np.dtype[MaskType]] | MaskType: ... @mask.setter - def mask(self, value): ... + def mask(self, value: _ArrayLikeBool_co, /) -> None: ... @property - def recordmask(self): ... + def recordmask(self) -> np.ndarray[_ShapeT_co, np.dtype[MaskType]] | MaskType: ... @recordmask.setter - def recordmask(self, mask): ... - def harden_mask(self): ... - def soften_mask(self): ... + def recordmask(self, mask: Never, /) -> NoReturn: ... + def harden_mask(self) -> Self: ... + def soften_mask(self) -> Self: ... @property - def hardmask(self): ... - def unshare_mask(self): ... + def hardmask(self) -> bool: ... + def unshare_mask(self) -> Self: ... @property - def sharedmask(self): ... - def shrink_mask(self): ... + def sharedmask(self) -> bool: ... + def shrink_mask(self) -> Self: ... + + @property + def baseclass(self) -> type[ndarray]: ... + @property - def baseclass(self): ... - data: Any + def _data(self) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @property - def flat(self): ... + def data(self) -> ndarray[_ShapeT_co, _DTypeT_co]: ... # type: ignore[override] + + @property # type: ignore[override] + def flat(self) -> MaskedIterator[_ShapeT_co, _DTypeT_co]: ... @flat.setter - def flat(self, value): ... + def flat(self, value: ArrayLike, /) -> None: ... + @property - def fill_value(self): ... + def fill_value[ScalarT: np.generic](self: _MaskedArray[ScalarT]) -> ScalarT: ... @fill_value.setter - def fill_value(self, value=...): ... - get_fill_value: Any - set_fill_value: Any - def filled(self, fill_value=...): ... - def compressed(self): ... - def compress(self, condition, axis=..., out=...): ... - def __eq__(self, other): ... - def __ne__(self, other): ... - def __ge__(self, other): ... - def __gt__(self, other): ... - def __le__(self, other): ... - def __lt__(self, other): ... - def __add__(self, other): ... - def __radd__(self, other): ... - def __sub__(self, other): ... - def __rsub__(self, other): ... - def __mul__(self, other): ... - def __rmul__(self, other): ... - def __div__(self, other): ... - def __truediv__(self, other): ... - def __rtruediv__(self, other): ... - def __floordiv__(self, other): ... - def __rfloordiv__(self, other): ... - def __pow__(self, other): ... - def __rpow__(self, other): ... - def __iadd__(self, other): ... - def __isub__(self, other): ... - def __imul__(self, other): ... - def __idiv__(self, other): ... - def __ifloordiv__(self, other): ... - def __itruediv__(self, other): ... - def __ipow__(self, other): ... - def __float__(self): ... - def __int__(self): ... - @property # type: ignore[misc] - def imag(self): ... - get_imag: Any + def fill_value(self, value: _ScalarLike_co | None = None, /) -> None: ... + + def get_fill_value[ScalarT: np.generic](self: _MaskedArray[ScalarT]) -> ScalarT: ... + def set_fill_value(self, /, value: _ScalarLike_co | None = None) -> None: ... + + def filled(self, /, fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def compressed(self) -> ndarray[tuple[int], _DTypeT_co]: ... + + # keep roughly in sync with `ma.core.compress`, but swap the first two arguments + @overload # type: ignore[override] + def compress[ArrayT: np.ndarray]( + self, + condition: _ArrayLikeBool_co, + axis: _ShapeLike | None, + out: ArrayT, + ) -> ArrayT: ... + @overload + def compress[ArrayT: np.ndarray]( + self, + condition: _ArrayLikeBool_co, + axis: _ShapeLike | None = None, + *, + out: ArrayT, + ) -> ArrayT: ... + @overload + def compress( + self, + condition: _ArrayLikeBool_co, + axis: None = None, + out: None = None, + ) -> MaskedArray[tuple[int], _DTypeT_co]: ... + @overload + def compress( + self, + condition: _ArrayLikeBool_co, + axis: _ShapeLike | None = None, + out: None = None, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + # TODO: How to deal with the non-commutative nature of `==` and `!=`? + # xref numpy/numpy#17368 + def __eq__(self, other: Incomplete, /) -> Incomplete: ... + def __ne__(self, other: Incomplete, /) -> Incomplete: ... + + def __ge__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] + def __gt__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] + def __le__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] + def __lt__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] + + # Keep in sync with `ndarray.__add__` + @overload # type: ignore[override] + def __add__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... + @overload + def __add__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... + @overload + def __add__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... + @overload + def __add__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... + @overload + def __add__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __add__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __add__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __add__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __add__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... + @overload + def __add__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... + @overload + def __add__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __add__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __add__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __add__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __add__(self: _MaskedArrayTD64_co, other: _ArrayLikeDT64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __add__(self: _MaskedArray[datetime64], other: _ArrayLikeTD64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __add__(self: _MaskedArray[bytes_], other: _ArrayLikeBytes_co, /) -> _MaskedArray[bytes_]: ... + @overload + def __add__(self: _MaskedArray[str_], other: _ArrayLikeStr_co, /) -> _MaskedArray[str_]: ... + @overload + def __add__( + self: MaskedArray[Any, np.dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> MaskedArray[_AnyShape, np.dtypes.StringDType]: ... + @overload + def __add__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __add__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__radd__` + @overload # type: ignore[override] # signature equivalent to __add__ + def __radd__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... + @overload + def __radd__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... + @overload + def __radd__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... + @overload + def __radd__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... + @overload + def __radd__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __radd__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __radd__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __radd__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __radd__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... + @overload + def __radd__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... + @overload + def __radd__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __radd__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __radd__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __radd__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __radd__(self: _MaskedArrayTD64_co, other: _ArrayLikeDT64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __radd__(self: _MaskedArray[datetime64], other: _ArrayLikeTD64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __radd__(self: _MaskedArray[bytes_], other: _ArrayLikeBytes_co, /) -> _MaskedArray[bytes_]: ... + @overload + def __radd__(self: _MaskedArray[str_], other: _ArrayLikeStr_co, /) -> _MaskedArray[str_]: ... + @overload + def __radd__( + self: MaskedArray[Any, np.dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> MaskedArray[_AnyShape, np.dtypes.StringDType]: ... + @overload + def __radd__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __radd__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__sub__` + @overload # type: ignore[override] + def __sub__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... + @overload + def __sub__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... + @overload + def __sub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __sub__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... + @overload + def __sub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __sub__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __sub__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __sub__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __sub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... + @overload + def __sub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... + @overload + def __sub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __sub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __sub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __sub__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __sub__(self: _MaskedArray[datetime64], other: _ArrayLikeTD64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __sub__(self: _MaskedArray[datetime64], other: _ArrayLikeDT64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __sub__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __sub__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rsub__` + @overload # type: ignore[override] + def __rsub__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... + @overload + def __rsub__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... + @overload + def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __rsub__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... + @overload + def __rsub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rsub__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rsub__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rsub__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rsub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... + @overload + def __rsub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... + @overload + def __rsub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __rsub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __rsub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __rsub__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __rsub__(self: _MaskedArrayTD64_co, other: _ArrayLikeDT64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __rsub__(self: _MaskedArray[datetime64], other: _ArrayLikeDT64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __rsub__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rsub__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__mul__` + @overload # type: ignore[override] + def __mul__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... + @overload + def __mul__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... + @overload + def __mul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... + @overload + def __mul__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... + @overload + def __mul__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __mul__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __mul__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __mul__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __mul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... + @overload + def __mul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... + @overload + def __mul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __mul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __mul__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __mul__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __mul__(self: _MaskedArrayFloat_co, other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __mul__( + self: MaskedArray[Any, np.dtype[np.character] | np.dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> MaskedArray[tuple[Any, ...], _DTypeT_co]: ... + @overload + def __mul__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __mul__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rmul__` + @overload # type: ignore[override] # signature equivalent to __mul__ + def __rmul__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... + @overload + def __rmul__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... + @overload + def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... + @overload + def __rmul__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... + @overload + def __rmul__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rmul__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rmul__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rmul__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rmul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... + @overload + def __rmul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... + @overload + def __rmul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __rmul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __rmul__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __rmul__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __rmul__(self: _MaskedArrayFloat_co, other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __rmul__( + self: MaskedArray[Any, np.dtype[np.character] | np.dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> MaskedArray[tuple[Any, ...], _DTypeT_co]: ... + @overload + def __rmul__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rmul__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__truediv__` + @overload # type: ignore[override] + def __truediv__(self: _MaskedArrayInt_co | _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __truediv__(self: _MaskedArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __truediv__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __truediv__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __truediv__(self: _MaskedArray[floating], other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __truediv__(self: _MaskedArrayFloat_co, other: _ArrayLike[floating], /) -> _MaskedArray[floating]: ... + @overload + def __truediv__(self: _MaskedArray[complexfloating], other: _ArrayLikeNumber_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __truediv__(self: _MaskedArrayNumber_co, other: _ArrayLike[complexfloating], /) -> _MaskedArray[complexfloating]: ... + @overload + def __truediv__(self: _MaskedArray[inexact], other: _ArrayLikeNumber_co, /) -> _MaskedArray[inexact]: ... + @overload + def __truediv__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __truediv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[float64]: ... + @overload + def __truediv__(self: _MaskedArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __truediv__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __truediv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __truediv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rtruediv__` + @overload # type: ignore[override] + def __rtruediv__(self: _MaskedArrayInt_co | _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rtruediv__(self: _MaskedArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rtruediv__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rtruediv__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rtruediv__(self: _MaskedArray[floating], other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __rtruediv__(self: _MaskedArrayFloat_co, other: _ArrayLike[floating], /) -> _MaskedArray[floating]: ... + @overload + def __rtruediv__(self: _MaskedArray[complexfloating], other: _ArrayLikeNumber_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __rtruediv__(self: _MaskedArrayNumber_co, other: _ArrayLike[complexfloating], /) -> _MaskedArray[complexfloating]: ... + @overload + def __rtruediv__(self: _MaskedArray[inexact], other: _ArrayLikeNumber_co, /) -> _MaskedArray[inexact]: ... + @overload + def __rtruediv__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __rtruediv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[float64]: ... + @overload + def __rtruediv__(self: _MaskedArray[integer | floating], other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __rtruediv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rtruediv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__floordiv__` + @overload # type: ignore[override] + def __floordiv__[ScalarT: _RealNumber]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... + @overload + def __floordiv__[ScalarT: _RealNumber](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... + @overload + def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... + @overload + def __floordiv__[ScalarT: _RealNumber]( + self: _MaskedArray[np.bool], + other: _ArrayLike[ScalarT], + /, + ) -> _MaskedArray[ScalarT]: ... + @overload + def __floordiv__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __floordiv__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __floordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... + @overload + def __floordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... + @overload + def __floordiv__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __floordiv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[int64]: ... + @overload + def __floordiv__(self: _MaskedArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __floordiv__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __floordiv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __floordiv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rfloordiv__` + @overload # type: ignore[override] + def __rfloordiv__[ScalarT: _RealNumber]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... + @overload + def __rfloordiv__[ScalarT: _RealNumber]( + self: _MaskedArray[ScalarT], + other: _ArrayLikeBool_co, + /, + ) -> _MaskedArray[ScalarT]: ... + @overload + def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... + @overload + def __rfloordiv__[ScalarT: _RealNumber]( + self: _MaskedArray[np.bool], + other: _ArrayLike[ScalarT], + /, + ) -> _MaskedArray[ScalarT]: ... + @overload + def __rfloordiv__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rfloordiv__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rfloordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... + @overload + def __rfloordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... + @overload + def __rfloordiv__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __rfloordiv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[int64]: ... + @overload + def __rfloordiv__(self: _MaskedArray[floating | integer], other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __rfloordiv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rfloordiv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__pow__` (minus the `mod` parameter) + @overload # type: ignore[override] + def __pow__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... + @overload + def __pow__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... + @overload + def __pow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... + @overload + def __pow__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... + @overload + def __pow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __pow__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __pow__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __pow__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __pow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... + @overload + def __pow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... + @overload + def __pow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __pow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __pow__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __pow__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __pow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rpow__` (minus the `mod` parameter) + @overload # type: ignore[override] + def __rpow__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... + @overload + def __rpow__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... + @overload + def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... + @overload + def __rpow__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... + @overload + def __rpow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rpow__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rpow__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rpow__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rpow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... + @overload + def __rpow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... + @overload + def __rpow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __rpow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __rpow__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __rpow__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rpow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # @property # type: ignore[misc] - def real(self): ... - get_real: Any - def count(self, axis=..., keepdims=...): ... - def ravel(self, order=...): ... - def reshape(self, *s, **kwargs): ... - def resize(self, newshape, refcheck=..., order=...): ... - def put(self, indices, values, mode=...): ... - def ids(self): ... - def iscontiguous(self): ... - def all(self, axis=..., out=..., keepdims=...): ... - def any(self, axis=..., out=..., keepdims=...): ... - def nonzero(self): ... - def trace(self, offset=..., axis1=..., axis2=..., dtype=..., out=...): ... - def dot(self, b, out=..., strict=...): ... - def sum(self, axis=..., dtype=..., out=..., keepdims=...): ... - def cumsum(self, axis=..., dtype=..., out=...): ... - def prod(self, axis=..., dtype=..., out=..., keepdims=...): ... - product: Any - def cumprod(self, axis=..., dtype=..., out=...): ... - def mean(self, axis=..., dtype=..., out=..., keepdims=...): ... - def anom(self, axis=..., dtype=...): ... - def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... - def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... - def round(self, decimals=..., out=...): ... - def argsort(self, axis=..., kind=..., order=..., endwith=..., fill_value=..., stable=...): ... - def argmin(self, axis=..., fill_value=..., out=..., *, keepdims=...): ... - def argmax(self, axis=..., fill_value=..., out=..., *, keepdims=...): ... - def sort(self, axis=..., kind=..., order=..., endwith=..., fill_value=..., stable=...): ... - def min(self, axis=..., out=..., fill_value=..., keepdims=...): ... - # NOTE: deprecated - # def tostring(self, fill_value=..., order=...): ... - def max(self, axis=..., out=..., fill_value=..., keepdims=...): ... - def ptp(self, axis=..., out=..., fill_value=..., keepdims=...): ... - def partition(self, *args, **kwargs): ... - def argpartition(self, *args, **kwargs): ... - def take(self, indices, axis=..., out=..., mode=...): ... - copy: Any - diagonal: Any - flatten: Any - repeat: Any - squeeze: Any - swapaxes: Any - T: Any - transpose: Any + def imag[ScalarT: np.generic]( # type: ignore[override] + self: _HasDTypeWithRealAndImag[object, ScalarT], + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... + def get_imag[ScalarT: np.generic]( + self: _HasDTypeWithRealAndImag[object, ScalarT], + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... + + # @property # type: ignore[misc] - def mT(self): ... - def tolist(self, fill_value=...): ... - def tobytes(self, fill_value=..., order=...): ... - def tofile(self, fid, sep=..., format=...): ... - def toflex(self): ... - torecords: Any - def __reduce__(self): ... - def __deepcopy__(self, memo=...): ... - -class mvoid(MaskedArray[_ShapeType, _DType_co]): - def __new__( + def real[ScalarT: np.generic]( # type: ignore[override] + self: _HasDTypeWithRealAndImag[ScalarT, object], + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... + def get_real[ScalarT: np.generic]( + self: _HasDTypeWithRealAndImag[ScalarT, object], + /, + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... + + # keep in sync with `np.ma.count` + @overload + def count(self, axis: None = None, keepdims: Literal[False] | _NoValueType = ...) -> int: ... + @overload + def count(self, axis: _ShapeLike, keepdims: bool | _NoValueType = ...) -> NDArray[int_]: ... + @overload + def count(self, axis: _ShapeLike | None = None, *, keepdims: Literal[True]) -> NDArray[int_]: ... + @overload + def count(self, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... + + # Keep in sync with `ndarray.reshape` + # NOTE: reshape also accepts negative integers, so we can't use integer literals + @overload # (None) + def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: bool | None = None) -> Self: ... + @overload # (empty_sequence) + def reshape( + self, + shape: Sequence[Never], + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[()], _DTypeT_co]: ... + @overload # (() | (int) | (int, int) | ....) # up to 8-d + def reshape[ShapeT: _Shape]( + self, + shape: ShapeT, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[ShapeT, _DTypeT_co]: ... + @overload # (index) + def reshape( + self, + size1: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int], _DTypeT_co]: ... + @overload # (index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int, int], _DTypeT_co]: ... + @overload # (index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int, int, int], _DTypeT_co]: ... + @overload # (index, index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + size4: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int, int, int, int], _DTypeT_co]: ... + @overload # (int, *(index, ...)) + def reshape( + self, + size0: SupportsIndex, + /, + *shape: SupportsIndex, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload # (sequence[index]) + def reshape( + self, + shape: Sequence[SupportsIndex], + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + def resize(self, newshape: Never, refcheck: bool = True, order: bool = False) -> NoReturn: ... # type: ignore[override] + def put(self, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... + def ids(self) -> tuple[int, int]: ... + def iscontiguous(self) -> bool: ... + + # Keep in sync with `ma.core.all` + @overload # type: ignore[override] + def all( + self, + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> bool_: ... + @overload + def all( + self, + axis: _ShapeLike | None = None, + out: None = None, + *, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def all( + self, + axis: _ShapeLike | None, + out: None, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def all( + self, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> bool_ | _MaskedArray[bool_]: ... + @overload + def all[ArrayT: np.ndarray]( + self, + axis: _ShapeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + @overload + def all[ArrayT: np.ndarray]( + self, + axis: _ShapeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + + # Keep in sync with `ma.core.any` + @overload # type: ignore[override] + def any( + self, + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> bool_: ... + @overload + def any( + self, + axis: _ShapeLike | None = None, + out: None = None, + *, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def any( + self, + axis: _ShapeLike | None, + out: None, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def any( + self, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> bool_ | _MaskedArray[bool_]: ... + @overload + def any[ArrayT: np.ndarray]( + self, + axis: _ShapeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + @overload + def any[ArrayT: np.ndarray]( + self, + axis: _ShapeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + + # Keep in sync with `ndarray.trace` and `ma.core.trace` + @overload + def trace( + self, # >= 2D MaskedArray + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + out: None = None, + ) -> Any: ... + @overload + def trace[ArrayT: np.ndarray]( + self, # >= 2D MaskedArray + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ) -> ArrayT: ... + @overload + def trace[ArrayT: np.ndarray]( + self, # >= 2D MaskedArray + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike | None, + out: ArrayT, + ) -> ArrayT: ... + + # This differs from `ndarray.dot`, in that 1D dot 1D returns a 0D array. + @overload + def dot(self, b: ArrayLike, out: None = None, strict: bool = False) -> _MaskedArray[Any]: ... + @overload + def dot[ArrayT: np.ndarray](self, b: ArrayLike, out: ArrayT, strict: bool = False) -> ArrayT: ... + + # Keep in sync with `ma.core.sum` + @overload # type: ignore[override] + def sum( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def sum[ArrayT: np.ndarray]( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + @overload + def sum[ArrayT: np.ndarray]( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + + # Keep in sync with `ndarray.cumsum` and `ma.core.cumsum` + @overload # out: None (default) + def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> MaskedArray: ... + @overload # out: ndarray + def cumsum[ArrayT: np.ndarray](self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... + @overload + def cumsum[ArrayT: np.ndarray]( self, - data, - mask=..., - dtype=..., - fill_value=..., - hardmask=..., - copy=..., - subok=..., - ): ... - def __getitem__(self, indx): ... - def __setitem__(self, indx, value): ... - def __iter__(self): ... - def __len__(self): ... - def filled(self, fill_value=...): ... - def tolist(self): ... - -def isMaskedArray(x): ... -isarray = isMaskedArray -isMA = isMaskedArray + /, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ) -> ArrayT: ... + + # Keep in sync with `ma.core.prod` + @overload # type: ignore[override] + def prod( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def prod[ArrayT: np.ndarray]( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + @overload + def prod[ArrayT: np.ndarray]( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + + product = prod + + # Keep in sync with `ndarray.cumprod` and `ma.core.cumprod` + @overload # out: None (default) + def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> MaskedArray: ... + @overload # out: ndarray + def cumprod[ArrayT: np.ndarray](self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... + @overload + def cumprod[ArrayT: np.ndarray]( + self, + /, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ) -> ArrayT: ... + + # Keep in sync with `ma.core.mean` + @overload # type: ignore[override] + def mean( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def mean[ArrayT: np.ndarray]( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + @overload + def mean[ArrayT: np.ndarray]( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + + # keep roughly in sync with `ma.core.anom` + @overload + def anom(self, axis: SupportsIndex | None = None, dtype: None = None) -> Self: ... + @overload + def anom(self, axis: SupportsIndex | None = None, *, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, np.dtype]: ... + @overload + def anom(self, axis: SupportsIndex | None, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, np.dtype]: ... + + # keep in sync with `std` and `ma.core.var` + @overload # type: ignore[override] + def var( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> Any: ... + @overload + def var[ArrayT: np.ndarray]( + self, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> ArrayT: ... + @overload + def var[ArrayT: np.ndarray]( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> ArrayT: ... + + # keep in sync with `var` and `ma.core.std` + @overload # type: ignore[override] + def std( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> Any: ... + @overload + def std[ArrayT: np.ndarray]( + self, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> ArrayT: ... + @overload + def std[ArrayT: np.ndarray]( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> ArrayT: ... + + # Keep in sync with `ndarray.round` + @overload # out=None (default) + def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ... + @overload # out=ndarray + def round[ArrayT: np.ndarray](self, /, decimals: SupportsIndex, out: ArrayT) -> ArrayT: ... + @overload + def round[ArrayT: np.ndarray](self, /, decimals: SupportsIndex = 0, *, out: ArrayT) -> ArrayT: ... + + def argsort( # type: ignore[override] + self, + axis: SupportsIndex | _NoValueType = ..., + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: bool = False, + ) -> _MaskedArray[intp]: ... + + # Keep in-sync with np.ma.argmin + @overload # type: ignore[override] + def argmin( # pyrefly: ignore[bad-param-name-override] + self, + axis: None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., + ) -> intp: ... + @overload + def argmin( + self, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def argmin[ArrayT: np.ndarray]( + self, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + @overload + def argmin[ArrayT: np.ndarray]( + self, + axis: SupportsIndex | None, + fill_value: _ScalarLike_co | None, + out: ArrayT, + *, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + + # Keep in-sync with np.ma.argmax + @overload # type: ignore[override] + def argmax( # pyrefly: ignore[bad-param-name-override] + self, + axis: None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., + ) -> intp: ... + @overload + def argmax( + self, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def argmax[ArrayT: np.ndarray]( + self, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + @overload + def argmax[ArrayT: np.ndarray]( + self, + axis: SupportsIndex | None, + fill_value: _ScalarLike_co | None, + out: ArrayT, + *, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + + # + def sort( # type: ignore[override] + self, + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: Literal[False] | None = False, + ) -> None: ... + + # + @overload # type: ignore[override] + def min[ScalarT: np.generic]( + self: _MaskedArray[ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> ScalarT: ... + @overload + def min( + self, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... + ) -> Any: ... + @overload + def min[ArrayT: np.ndarray]( + self, + axis: _ShapeLike | None, + out: ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + @overload + def min[ArrayT: np.ndarray]( + self, + axis: _ShapeLike | None = None, + *, + out: ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + + # + @overload # type: ignore[override] + def max[ScalarT: np.generic]( + self: _MaskedArray[ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> ScalarT: ... + @overload + def max( + self, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... + ) -> Any: ... + @overload + def max[ArrayT: np.ndarray]( + self, + axis: _ShapeLike | None, + out: ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + @overload + def max[ArrayT: np.ndarray]( + self, + axis: _ShapeLike | None = None, + *, + out: ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., + ) -> ArrayT: ... + + # + @overload + def ptp[ScalarT: np.generic]( + self: _MaskedArray[ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] = False, + ) -> ScalarT: ... + @overload + def ptp( + self, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool = False, + ) -> Any: ... + @overload + def ptp[ArrayT: np.ndarray]( + self, + axis: _ShapeLike | None, + out: ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool = False, + ) -> ArrayT: ... + @overload + def ptp[ArrayT: np.ndarray]( + self, + axis: _ShapeLike | None = None, + *, + out: ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool = False, + ) -> ArrayT: ... + + # + @overload + def partition( + self, + /, + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None + ) -> None: ... + @overload + def partition( + self: _MaskedArray[np.void], + /, + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> None: ... + + # keep in sync with ndarray.argpartition + @override + @overload # axis: None + def argpartition( + self, + kth: _ArrayLikeInt, + /, + axis: None, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> MaskedArray[tuple[int], np.dtype[intp]]: ... + @overload # axis: index (default) + def argpartition( + self, + kth: _ArrayLikeInt, + /, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> MaskedArray[_ShapeT_co, np.dtype[intp]]: ... + @overload # void, axis: None + def argpartition( + self: _MaskedArray[np.void], + kth: _ArrayLikeInt, + /, + axis: None, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> MaskedArray[tuple[int], np.dtype[intp]]: ... + @overload # void, axis: index (default) + def argpartition( + self: _MaskedArray[np.void], + kth: _ArrayLikeInt, + /, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> MaskedArray[_ShapeT_co, np.dtype[intp]]: ... + + # Keep in-sync with np.ma.take + @overload # type: ignore[override] + def take[ScalarT: np.generic]( + self: _MaskedArray[ScalarT], + indices: _IntLike_co, + axis: None = None, + out: None = None, + mode: _ModeKind = "raise" + ) -> ScalarT: ... + @overload + def take[ScalarT: np.generic]( + self: _MaskedArray[ScalarT], + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", + ) -> _MaskedArray[ScalarT]: ... + @overload + def take[ArrayT: np.ndarray]( + self, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None, + out: ArrayT, + mode: _ModeKind = "raise", + ) -> ArrayT: ... + @overload + def take[ArrayT: np.ndarray]( + self, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + *, + out: ArrayT, + mode: _ModeKind = "raise", + ) -> ArrayT: ... + + # keep in sync with `ndarray.diagonal` + @override + def diagonal( + self, + /, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + # keep in sync with `ndarray.repeat` + @override + @overload + def repeat( + self, + /, + repeats: _ArrayLikeInt_co, + axis: None = None, + ) -> MaskedArray[tuple[int], _DTypeT_co]: ... + @overload + def repeat( + self, + /, + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + # keep in sync with `ndarray.flatten` and `ndarray.ravel` + @override + def flatten(self, /, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... + @override + def ravel(self, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... + + # keep in sync with `ndarray.squeeze` + @override + def squeeze( + self, + /, + axis: SupportsIndex | tuple[SupportsIndex, ...] | None = None, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + # + def toflex(self) -> MaskedArray[_ShapeT_co, np.dtype[np.void]]: ... + def torecords(self) -> MaskedArray[_ShapeT_co, np.dtype[np.void]]: ... + + # + @override + def tobytes(self, /, fill_value: Incomplete | None = None, order: _OrderKACF = "C") -> bytes: ... # type: ignore[override] + + # keep in sync with `ndarray.tolist` + @override + @overload + def tolist[T](self: MaskedArray[tuple[Never], np.dtype[generic[T]]], /, fill_value: _ScalarLike_co | None = None) -> Any: ... + @overload + def tolist[T](self: MaskedArray[tuple[()], np.dtype[generic[T]]], /, fill_value: _ScalarLike_co | None = None) -> T: ... + @overload + def tolist[T](self: _Masked1D[np.generic[T]], /, fill_value: _ScalarLike_co | None = None) -> list[T]: ... + @overload + def tolist[T]( + self: MaskedArray[tuple[int, int], np.dtype[generic[T]]], + /, + fill_value: _ScalarLike_co | None = None, + ) -> list[list[T]]: ... + @overload + def tolist[T]( + self: MaskedArray[tuple[int, int, int], np.dtype[generic[T]]], + /, + fill_value: _ScalarLike_co | None = None, + ) -> list[list[list[T]]]: ... + @overload + def tolist(self, /, fill_value: _ScalarLike_co | None = None) -> Any: ... + + # NOTE: will raise `NotImplementedError` + @override + def tofile(self, /, fid: Never, sep: str = "", format: str = "%s") -> NoReturn: ... # type: ignore[override] + + # + @override + def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self: ... + + # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` + @property + def dtype(self) -> _DTypeT_co: ... + @dtype.setter + def dtype[DTypeT: np.dtype](self: MaskedArray[_AnyShape, DTypeT], dtype: DTypeT, /) -> None: ... + +class mvoid(MaskedArray[_ShapeT_co, _DTypeT_co]): + def __new__( + cls, + /, + data: ArrayLike, + mask: _ArrayLikeBool_co = nomask, + dtype: DTypeLike | None = None, + fill_value: _FillValue = None, + hardmask: bool = False, + copy: bool = False, + subok: bool = True, + ) -> Self: ... + @override + def __getitem__(self, indx: _ToIndices, /) -> Incomplete: ... # type: ignore[override] + @override + def __setitem__(self, indx: _ToIndices, value: ArrayLike, /) -> None: ... # type: ignore[override] + @override + def __iter__[ScalarT: np.generic](self: mvoid[Any, np.dtype[ScalarT]], /) -> Iterator[MaskedConstant | ScalarT]: ... + @override + def __len__(self, /) -> int: ... + @override + def filled(self, /, fill_value: _ScalarLike_co | None = None) -> Self | np.void: ... # type: ignore[override] + @override # list or tuple + def tolist(self) -> Sequence[Incomplete]: ... # type: ignore[override] + +def isMaskedArray(x: object) -> TypeIs[MaskedArray]: ... +def isarray(x: object) -> TypeIs[MaskedArray]: ... # alias to isMaskedArray +def isMA(x: object) -> TypeIs[MaskedArray]: ... # alias to isMaskedArray # 0D float64 array -class MaskedConstant(MaskedArray[Any, dtype[float64]]): - def __new__(cls): ... - __class__: Any - def __array_finalize__(self, obj): ... - def __array_wrap__(self, obj, context=..., return_scalar=...): ... - def __format__(self, format_spec): ... - def __reduce__(self): ... - def __iop__(self, other): ... - __iadd__: Any - __isub__: Any - __imul__: Any - __ifloordiv__: Any - __itruediv__: Any - __ipow__: Any - def copy(self, *args, **kwargs): ... - def __copy__(self): ... - def __deepcopy__(self, memo): ... - def __setattr__(self, attr, value): ... - -masked: MaskedConstant -masked_singleton: MaskedConstant -masked_array = MaskedArray - -def array( - data, - dtype=..., - copy=..., - order=..., - mask=..., - fill_value=..., - keep_mask=..., - hard_mask=..., - shrink=..., - subok=..., - ndmin=..., -): ... -def is_masked(x): ... - -class _extrema_operation(_MaskedUFunc): - compare: Any - fill_value_func: Any - def __init__(self, ufunc, compare, fill_value): ... - # NOTE: in practice `b` has a default value, but users should - # explicitly provide a value here as the default is deprecated - def __call__(self, a, b): ... - def reduce(self, target, axis=...): ... - def outer(self, a, b): ... - -def min(obj, axis=..., out=..., fill_value=..., keepdims=...): ... -def max(obj, axis=..., out=..., fill_value=..., keepdims=...): ... -def ptp(obj, axis=..., out=..., fill_value=..., keepdims=...): ... - -class _frommethod: - __name__: Any - __doc__: Any - reversed: Any - def __init__(self, methodname, reversed=...): ... - def getdoc(self): ... - def __call__(self, a, *args, **params): ... - -all: _frommethod -anomalies: _frommethod -anom: _frommethod -any: _frommethod -compress: _frommethod -cumprod: _frommethod -cumsum: _frommethod -copy: _frommethod -diagonal: _frommethod -harden_mask: _frommethod -ids: _frommethod -mean: _frommethod -nonzero: _frommethod -prod: _frommethod -product: _frommethod -ravel: _frommethod -repeat: _frommethod -soften_mask: _frommethod -std: _frommethod -sum: _frommethod -swapaxes: _frommethod -trace: _frommethod -var: _frommethod -count: _frommethod -argmin: _frommethod -argmax: _frommethod - -minimum: _extrema_operation -maximum: _extrema_operation - -def take(a, indices, axis=..., out=..., mode=...): ... -def power(a, b, third=...): ... -def argsort(a, axis=..., kind=..., order=..., endwith=..., fill_value=..., stable=...): ... -def sort(a, axis=..., kind=..., order=..., endwith=..., fill_value=..., stable=...): ... -def compressed(x): ... -def concatenate(arrays, axis=...): ... -def diag(v, k=...): ... -def left_shift(a, n): ... -def right_shift(a, n): ... -def put(a, indices, values, mode=...): ... -def putmask(a, mask, values): ... -def transpose(a, axes=...): ... -def reshape(a, new_shape, order=...): ... -def resize(x, new_shape): ... -def ndim(obj): ... -def shape(obj): ... -def size(obj, axis=...): ... -def diff(a, /, n=..., axis=..., prepend=..., append=...): ... -def where(condition, x=..., y=...): ... -def choose(indices, choices, out=..., mode=...): ... -def round(a, decimals=..., out=...): ... - -def inner(a, b): ... +class MaskedConstant(MaskedArray[tuple[()], dtype[float64]]): + def __new__(cls) -> Self: ... + + # these overrides are no-ops + @override + def __iadd__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] + @override + def __isub__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] + @override + def __imul__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] + @override + def __ifloordiv__(self, other: _Ignored, /) -> Self: ... + @override + def __itruediv__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] + @override + def __ipow__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] + @override + def __deepcopy__(self, /, memo: _Ignored) -> Self: ... # type: ignore[override] + @override + def copy(self, /, *args: _Ignored, **kwargs: _Ignored) -> Self: ... + +masked: Final[MaskedConstant] = ... +masked_singleton: Final[MaskedConstant] = ... + +type masked_array = MaskedArray + +# keep in sync with `MaskedArray.__new__` +@overload +def array[ScalarT: np.generic]( + data: _ArrayLike[ScalarT], + dtype: None = None, + copy: bool = False, + order: _OrderKACF | None = None, + mask: _ArrayLikeBool_co = nomask, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool = False, + shrink: bool = True, + subok: bool = True, + ndmin: int = 0, +) -> _MaskedArray[ScalarT]: ... +@overload +def array[ScalarT: np.generic]( + data: object, + dtype: _DTypeLike[ScalarT], + copy: bool = False, + order: _OrderKACF | None = None, + mask: _ArrayLikeBool_co = nomask, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool = False, + shrink: bool = True, + subok: bool = True, + ndmin: int = 0, +) -> _MaskedArray[ScalarT]: ... +@overload +def array[ScalarT: np.generic]( + data: object, + dtype: DTypeLike | None = None, + copy: bool = False, + order: _OrderKACF | None = None, + mask: _ArrayLikeBool_co = nomask, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool = False, + shrink: bool = True, + subok: bool = True, + ndmin: int = 0, +) -> _MaskedArray[ScalarT]: ... + +# keep in sync with `array` +@overload +def asarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + dtype: None = None, + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... +@overload +def asarray[ScalarT: np.generic]( + a: object, + dtype: _DTypeLike[ScalarT], + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... +@overload +def asarray[ScalarT: np.generic]( + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... + +# keep in sync with `asarray` (but note the additional first overload) +@overload +def asanyarray[MArrayT: MaskedArray](a: MArrayT, dtype: None = None, order: _OrderKACF | None = None) -> MArrayT: ... +@overload +def asanyarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + dtype: None = None, + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... +@overload +def asanyarray[ScalarT: np.generic]( + a: object, + dtype: _DTypeLike[ScalarT], + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... +@overload +def asanyarray[ScalarT: np.generic]( + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... + +# +def is_masked(x: object) -> bool: ... + +@overload +def min[ScalarT: np.generic]( + obj: _ArrayLike[ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> ScalarT: ... +@overload +def min( + obj: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... +) -> Any: ... +@overload +def min[ArrayT: np.ndarray]( + obj: ArrayLike, + axis: _ShapeLike | None, + out: ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... +@overload +def min[ArrayT: np.ndarray]( + obj: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... + +@overload +def max[ScalarT: np.generic]( + obj: _ArrayLike[ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> ScalarT: ... +@overload +def max( + obj: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... +) -> Any: ... +@overload +def max[ArrayT: np.ndarray]( + obj: ArrayLike, + axis: _ShapeLike | None, + out: ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... +@overload +def max[ArrayT: np.ndarray]( + obj: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... + +@overload +def ptp[ScalarT: np.generic]( + obj: _ArrayLike[ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> ScalarT: ... +@overload +def ptp( + obj: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... +) -> Any: ... +@overload +def ptp[ArrayT: np.ndarray]( + obj: ArrayLike, + axis: _ShapeLike | None, + out: ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... +@overload +def ptp[ArrayT: np.ndarray]( + obj: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... + +# we cannot meaningfully annotate `frommethod` further, because the callable signature +# of the return type fully depends on the *value* of `methodname` and `reversed` in +# a way that cannot be expressed in the Python type system. +def _frommethod(methodname: str, reversed: bool = False) -> types.FunctionType: ... + +# NOTE: The following `*_mask` functions will accept any array-like input runtime, but +# since their use-cases are specific to masks, they only accept `MaskedArray` inputs. + +# keep in sync with `MaskedArray.harden_mask` +def harden_mask[MArrayT: MaskedArray](a: MArrayT) -> MArrayT: ... +# keep in sync with `MaskedArray.soften_mask` +def soften_mask[MArrayT: MaskedArray](a: MArrayT) -> MArrayT: ... +# keep in sync with `MaskedArray.shrink_mask` +def shrink_mask[MArrayT: MaskedArray](a: MArrayT) -> MArrayT: ... + +# keep in sync with `MaskedArray.ids` +def ids(a: ArrayLike) -> tuple[int, int]: ... + +# keep in sync with `ndarray.nonzero` +def nonzero(a: ArrayLike) -> tuple[_Array1D[np.intp], ...]: ... + +# keep first overload in sync with `MaskedArray.ravel` +@overload +def ravel[DTypeT: np.dtype](a: np.ndarray[Any, DTypeT], order: _OrderKACF = "C") -> MaskedArray[tuple[int], DTypeT]: ... +@overload +def ravel[ScalarT: np.generic](a: _ArrayLike[ScalarT], order: _OrderKACF = "C") -> _Masked1D[ScalarT]: ... +@overload +def ravel(a: ArrayLike, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... + +# keep roughly in sync with `lib._function_base_impl.copy` +@overload +def copy[MArrayT: MaskedArray](a: MArrayT, order: _OrderKACF = "C") -> MArrayT: ... +@overload +def copy[ShapeT: _Shape, DTypeT: np.dtype]( + a: np.ndarray[ShapeT, DTypeT], + order: _OrderKACF = "C", +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload +def copy[ScalarT: np.generic](a: _ArrayLike[ScalarT], order: _OrderKACF = "C") -> _MaskedArray[ScalarT]: ... +@overload +def copy(a: ArrayLike, order: _OrderKACF = "C") -> _MaskedArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.diagonal` +@overload +def diagonal[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, +) -> NDArray[ScalarT]: ... +@overload +def diagonal( + a: ArrayLike, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, +) -> NDArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.repeat` +@overload +def repeat[ScalarT: np.generic](a: _ArrayLike[ScalarT], repeats: _ArrayLikeInt_co, axis: None = None) -> _Masked1D[ScalarT]: ... +@overload +def repeat[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, +) -> _MaskedArray[ScalarT]: ... +@overload +def repeat(a: ArrayLike, repeats: _ArrayLikeInt_co, axis: None = None) -> _Masked1D[Incomplete]: ... +@overload +def repeat(a: ArrayLike, repeats: _ArrayLikeInt_co, axis: SupportsIndex) -> _MaskedArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.swapaxes` +@overload +def swapaxes[MArrayT: MaskedArray](a: MArrayT, axis1: SupportsIndex, axis2: SupportsIndex) -> MArrayT: ... +@overload +def swapaxes[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis1: SupportsIndex, + axis2: SupportsIndex, +) -> _MaskedArray[ScalarT]: ... +@overload +def swapaxes(a: ArrayLike, axis1: SupportsIndex, axis2: SupportsIndex) -> _MaskedArray[Incomplete]: ... + +# NOTE: The `MaskedArray.anom` definition is specific to `MaskedArray`, so we need +# additional overloads to cover the array-like input here. +@overload # a: MaskedArray, dtype=None +def anom[MArrayT: MaskedArray](a: MArrayT, axis: SupportsIndex | None = None, dtype: None = None) -> MArrayT: ... +@overload # a: array-like, dtype=None +def anom[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: SupportsIndex | None = None, + dtype: None = None, +) -> _MaskedArray[ScalarT]: ... +@overload # a: unknown array-like, dtype: dtype-like (positional) +def anom[ScalarT: np.generic](a: ArrayLike, axis: SupportsIndex | None, dtype: _DTypeLike[ScalarT]) -> _MaskedArray[ScalarT]: ... +@overload # a: unknown array-like, dtype: dtype-like (keyword) +def anom[ScalarT: np.generic]( + a: ArrayLike, + axis: SupportsIndex | None = None, + *, + dtype: _DTypeLike[ScalarT], +) -> _MaskedArray[ScalarT]: ... +@overload # a: unknown array-like, dtype: unknown dtype-like (positional) +def anom(a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike) -> _MaskedArray[Incomplete]: ... +@overload # a: unknown array-like, dtype: unknown dtype-like (keyword) +def anom(a: ArrayLike, axis: SupportsIndex | None = None, *, dtype: DTypeLike) -> _MaskedArray[Incomplete]: ... + +anomalies = anom + +# Keep in sync with `any` and `MaskedArray.all` +@overload +def all(a: ArrayLike, axis: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ...) -> np.bool: ... +@overload +def all(a: ArrayLike, axis: _ShapeLike | None, out: None, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... +@overload +def all(a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, *, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... +@overload +def all( + a: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., +) -> np.bool | _MaskedArray[np.bool]: ... +@overload +def all[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... +@overload +def all[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... + +# Keep in sync with `all` and `MaskedArray.any` +@overload +def any(a: ArrayLike, axis: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ...) -> np.bool: ... +@overload +def any(a: ArrayLike, axis: _ShapeLike | None, out: None, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... +@overload +def any(a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, *, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... +@overload +def any( + a: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., +) -> np.bool | _MaskedArray[np.bool]: ... +@overload +def any[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... +@overload +def any[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: ArrayT, keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... + +# NOTE: The `MaskedArray.compress` definition uses its `DTypeT_co` type parameter, +# which wouldn't work here for array-like inputs, so we need additional overloads. +@overload +def compress[ScalarT: np.generic]( + condition: _ArrayLikeBool_co, + a: _ArrayLike[ScalarT], + axis: None = None, + out: None = None, +) -> _Masked1D[ScalarT]: ... +@overload +def compress[ScalarT: np.generic]( + condition: _ArrayLikeBool_co, + a: _ArrayLike[ScalarT], + axis: _ShapeLike | None = None, + out: None = None, +) -> _MaskedArray[ScalarT]: ... +@overload +def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: None = None, out: None = None) -> _Masked1D[Incomplete]: ... +@overload +def compress( + condition: _ArrayLikeBool_co, + a: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, +) -> _MaskedArray[Incomplete]: ... +@overload +def compress[ArrayT: np.ndarray](condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None, out: ArrayT) -> ArrayT: ... +@overload +def compress[ArrayT: np.ndarray]( + condition: _ArrayLikeBool_co, + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: ArrayT, +) -> ArrayT: ... + +# Keep in sync with `cumprod` and `MaskedArray.cumsum` +@overload # out: None (default) +def cumsum( + a: ArrayLike, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, +) -> _MaskedArray[Incomplete]: ... +@overload # out: ndarray (positional) +def cumsum[ArrayT: np.ndarray](a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... +@overload # out: ndarray (kwarg) +def cumsum[ArrayT: np.ndarray]( + a: ArrayLike, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, +) -> ArrayT: ... + +# Keep in sync with `cumsum` and `MaskedArray.cumsum` +@overload # out: None (default) +def cumprod( + a: ArrayLike, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, +) -> _MaskedArray[Incomplete]: ... +@overload # out: ndarray (positional) +def cumprod[ArrayT: np.ndarray](a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... +@overload # out: ndarray (kwarg) +def cumprod[ArrayT: np.ndarray]( + a: ArrayLike, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, +) -> ArrayT: ... + +# Keep in sync with `sum`, `prod`, `product`, and `MaskedArray.mean` +@overload +def mean( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., +) -> Incomplete: ... +@overload +def mean[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... +@overload +def mean[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... + +# Keep in sync with `mean`, `prod`, `product`, and `MaskedArray.sum` +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., +) -> Incomplete: ... +@overload +def sum[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... +@overload +def sum[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... + +# Keep in sync with `product` and `MaskedArray.prod` +@overload +def prod( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., +) -> Incomplete: ... +@overload +def prod[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... +@overload +def prod[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... + +# Keep in sync with `prod` and `MaskedArray.prod` +@overload +def product( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., +) -> Incomplete: ... +@overload +def product[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... +@overload +def product[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... + +# Keep in sync with `MaskedArray.trace` and `_core.fromnumeric.trace` +@overload +def trace( + a: ArrayLike, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + out: None = None, +) -> Incomplete: ... +@overload +def trace[ArrayT: np.ndarray]( + a: ArrayLike, + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike | None, + out: ArrayT, +) -> ArrayT: ... +@overload +def trace[ArrayT: np.ndarray]( + a: ArrayLike, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + *, + out: ArrayT, +) -> ArrayT: ... + +# keep in sync with `std` and `MaskedArray.var` +@overload +def std( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> Incomplete: ... +@overload +def std[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> ArrayT: ... +@overload +def std[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> ArrayT: ... + +# keep in sync with `std` and `MaskedArray.var` +@overload +def var( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> Incomplete: ... +@overload +def var[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> ArrayT: ... +@overload +def var[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> ArrayT: ... + +# (a, b) +minimum: _extrema_operation = ... +maximum: _extrema_operation = ... + +# NOTE: this is a `_frommethod` instance at runtime +@overload +def count(a: ArrayLike, axis: None = None, keepdims: Literal[False] | _NoValueType = ...) -> int: ... +@overload +def count(a: ArrayLike, axis: _ShapeLike, keepdims: bool | _NoValueType = ...) -> NDArray[int_]: ... +@overload +def count(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: Literal[True]) -> NDArray[int_]: ... +@overload +def count(a: ArrayLike, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... + +# NOTE: this is a `_frommethod` instance at runtime +@overload +def argmin( + a: ArrayLike, + axis: None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., +) -> intp: ... +@overload +def argmin( + a: ArrayLike, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: bool | _NoValueType = ..., +) -> Any: ... +@overload +def argmin[ArrayT: np.ndarray]( + a: ArrayLike, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... +@overload +def argmin[ArrayT: np.ndarray]( + a: ArrayLike, + axis: SupportsIndex | None, + fill_value: _ScalarLike_co | None, + out: ArrayT, + *, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... + +# keep in sync with `argmin` +@overload +def argmax( + a: ArrayLike, + axis: None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., +) -> intp: ... +@overload +def argmax( + a: ArrayLike, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: bool | _NoValueType = ..., +) -> Any: ... +@overload +def argmax[ArrayT: np.ndarray]( + a: ArrayLike, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... +@overload +def argmax[ArrayT: np.ndarray]( + a: ArrayLike, + axis: SupportsIndex | None, + fill_value: _ScalarLike_co | None, + out: ArrayT, + *, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... + +@overload +def take[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + indices: _IntLike_co, + axis: None = None, + out: None = None, + mode: _ModeKind = "raise", +) -> ScalarT: ... +@overload +def take[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", +) -> _MaskedArray[ScalarT]: ... +@overload +def take( + a: ArrayLike, + indices: _IntLike_co, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", +) -> Any: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", +) -> _MaskedArray[Any]: ... +@overload +def take[ArrayT: np.ndarray]( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None, + out: ArrayT, + mode: _ModeKind = "raise", +) -> ArrayT: ... +@overload +def take[ArrayT: np.ndarray]( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + *, + out: ArrayT, + mode: _ModeKind = "raise", +) -> ArrayT: ... + +# +def power(a: ArrayLike, b: ArrayLike, third: None = None) -> _MaskedArray[Incomplete]: ... + +# +@overload # axis: (deprecated) +@deprecated( + "In the future the default for argsort will be axis=-1, not the current None, to match its documentation and np.argsort. " + "Explicitly pass -1 or None to silence this warning.", + category=MaskedArrayFutureWarning, + stacklevel=2, +) +def argsort( + a: ArrayLike, + axis: _NoValueType = ..., + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: bool | None = None, +) -> _Array1D[np.intp]: ... +@overload # MaskedArray, axis: None +def argsort( + a: MaskedArray, + axis: None, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: bool | None = None, +) -> _Masked1D[np.intp]: ... +@overload # MaskedArray, axis: int-like +def argsort( + a: MaskedArray, + axis: SupportsIndex, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: bool | None = None, +) -> _MaskedArray[np.intp]: ... +@overload # array-like, axis: None +def argsort( + a: ArrayLike, + axis: None, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: bool | None = None, +) -> _Array1D[np.intp]: ... +@overload # array-like, axis: int-like +def argsort( + a: ArrayLike, + axis: SupportsIndex, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: bool | None = None, +) -> NDArray[np.intp]: ... + +# +@overload +def sort[ArrayT: np.ndarray]( + a: ArrayT, + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: Literal[False] | None = None, +) -> ArrayT: ... +@overload +def sort( + a: ArrayLike, + axis: SupportsIndex | None = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: Literal[False] | None = None, +) -> NDArray[Any]: ... + +# +@overload +def compressed[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> _Array1D[ScalarT]: ... +@overload +def compressed(x: ArrayLike) -> _Array1D[Any]: ... + +# +@overload +def concatenate[ScalarT: np.generic](arrays: _ArrayLike[ScalarT], axis: SupportsIndex | None = 0) -> _MaskedArray[ScalarT]: ... +@overload +def concatenate(arrays: SupportsLenAndGetItem[ArrayLike], axis: SupportsIndex | None = 0) -> _MaskedArray[Incomplete]: ... + +# keep in sync with `diag` and `lib._twodim_base_impl.diag` +@overload +def diag[ScalarT: np.generic](v: _ArrayNoD[ScalarT] | Sequence[Sequence[ScalarT]], k: int = 0) -> _MaskedArray[ScalarT]: ... +@overload +def diag[ScalarT: np.generic](v: _Array2D[ScalarT] | Sequence[Sequence[ScalarT]], k: int = 0) -> _Masked1D[ScalarT]: ... +@overload +def diag[ScalarT: np.generic](v: _Array1D[ScalarT] | Sequence[ScalarT], k: int = 0) -> _Masked2D[ScalarT]: ... +@overload +def diag(v: Sequence[Sequence[_ScalarLike_co]], k: int = 0) -> _Masked1D[Incomplete]: ... +@overload +def diag(v: Sequence[_ScalarLike_co], k: int = 0) -> _Masked2D[Incomplete]: ... +@overload +def diag[ScalarT: np.generic](v: _ArrayLike[ScalarT], k: int = 0) -> _MaskedArray[ScalarT]: ... +@overload +def diag(v: ArrayLike, k: int = 0) -> _MaskedArray[Incomplete]: ... + +# keep in sync with `right_shift` +@overload +def left_shift[ShapeT: _Shape, ScalarT: np.bool | np.integer | np.object_]( + a: ndarray[ShapeT, np.dtype[ScalarT]], n: int +) -> MaskedArray[ShapeT, np.dtype[ScalarT]]: ... +@overload +def left_shift[ScalarT: np.bool | np.integer | np.object_](a: _ArrayLike[ScalarT], n: int) -> _MaskedArray[ScalarT]: ... +@overload +def left_shift(a: _ArrayLikeInt_co, n: int) -> _MaskedArray[Incomplete]: ... + +# keep in sync with `left_shift` +@overload +def right_shift[ShapeT: _Shape, ScalarT: np.bool | np.integer | np.object_]( + a: ndarray[ShapeT, np.dtype[ScalarT]], n: int +) -> MaskedArray[ShapeT, np.dtype[ScalarT]]: ... +@overload +def right_shift[ScalarT: np.bool | np.integer | np.object_](a: _ArrayLike[ScalarT], n: int) -> _MaskedArray[ScalarT]: ... +@overload +def right_shift(a: _ArrayLikeInt_co, n: int) -> _MaskedArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.put` +def put(a: np.ndarray, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... + +# +def putmask(a: np.ndarray, mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... + +# keep in sync with `_core.fromnumeric.transpose` +@overload +def transpose[ScalarT: np.generic](a: _ArrayLike[ScalarT], axes: _ShapeLike | None = None) -> _MaskedArray[ScalarT]: ... +@overload +def transpose(a: ArrayLike, axes: _ShapeLike | None = None) -> _MaskedArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.reshape` +@overload # shape: index +def reshape[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], new_shape: SupportsIndex, order: _OrderACF = "C" +) -> _Masked1D[ScalarT]: ... +@overload # shape: ~ShapeT +def reshape[ScalarT: np.generic, ShapeT: _Shape]( + a: _ArrayLike[ScalarT], new_shape: ShapeT, order: _OrderACF = "C" +) -> MaskedArray[ShapeT, np.dtype[ScalarT]]: ... +@overload # shape: Sequence[index] +def reshape[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], new_shape: Sequence[SupportsIndex], order: _OrderACF = "C" +) -> _MaskedArray[ScalarT]: ... +@overload # shape: index +def reshape(a: ArrayLike, new_shape: SupportsIndex, order: _OrderACF = "C") -> _Masked1D[Incomplete]: ... +@overload # shape: ~ShapeT +def reshape[ShapeT: _Shape](a: ArrayLike, new_shape: ShapeT, order: _OrderACF = "C") -> MaskedArray[ShapeT]: ... +@overload # shape: Sequence[index] +def reshape(a: ArrayLike, new_shape: Sequence[SupportsIndex], order: _OrderACF = "C") -> _MaskedArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.resize` +@overload +def resize[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], new_shape: SupportsIndex | tuple[SupportsIndex] +) -> _Masked1D[ScalarT]: ... +@overload +def resize[ScalarT: np.generic, ShapeT: _Shape]( + x: _ArrayLike[ScalarT], new_shape: ShapeT +) -> MaskedArray[ShapeT, np.dtype[ScalarT]]: ... +@overload +def resize[ScalarT: np.generic](x: _ArrayLike[ScalarT], new_shape: _ShapeLike) -> _MaskedArray[ScalarT]: ... +@overload +def resize(x: ArrayLike, new_shape: SupportsIndex | tuple[SupportsIndex]) -> _Masked1D[Incomplete]: ... +@overload +def resize[ShapeT: _Shape](x: ArrayLike, new_shape: ShapeT) -> MaskedArray[ShapeT]: ... +@overload +def resize(x: ArrayLike, new_shape: _ShapeLike) -> _MaskedArray[Incomplete]: ... + +# +def ndim(obj: ArrayLike) -> int: ... + +# keep in sync with `_core.fromnumeric.shape` +@overload # this prevents `Any` from being returned with Pyright +def shape(obj: _HasShape[Never]) -> _AnyShape: ... +@overload +def shape[ShapeT: _Shape](obj: _HasShape[ShapeT]) -> ShapeT: ... +@overload +def shape(obj: _PyScalar) -> tuple[()]: ... +@overload # `collections.abc.Sequence` can't be used because `bytes` and `str` are assignable to it +def shape(obj: _PyArray[_PyScalar]) -> tuple[int]: ... +@overload +def shape(obj: _PyArray[_PyArray[_PyScalar]]) -> tuple[int, int]: ... +@overload # requires PEP 688 support +def shape(obj: memoryview | bytearray) -> tuple[int]: ... +@overload +def shape(obj: ArrayLike) -> _AnyShape: ... + +# +def size(obj: ArrayLike, axis: SupportsIndex | None = None) -> int: ... + +# keep in sync with `lib._function_base_impl.diff` +@overload # known array-type +def diff[MArrayT: _MaskedArray[np.inexact | np.timedelta64 | np.object_]]( + a: MArrayT, + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> MArrayT: ... +@overload # known shape, datetime64 +def diff[ShapeT: _Shape]( + a: MaskedArray[ShapeT, np.dtype[np.datetime64]], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> MaskedArray[ShapeT, np.dtype[np.timedelta64]]: ... +@overload # unknown shape, known scalar-type +def diff[ScalarT: np.inexact | np.timedelta64 | np.object_]( + a: _ArrayLike[ScalarT], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _MaskedArray[ScalarT]: ... +@overload # unknown shape, datetime64 +def diff( + a: _ArrayLike[np.datetime64], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _MaskedArray[np.timedelta64]: ... +@overload # 1d int +def diff( + a: Sequence[int], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Masked1D[np.int_]: ... +@overload # 2d int +def diff( + a: Sequence[Sequence[int]], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Masked2D[np.int_]: ... +@overload # 1d float (the `list` avoids overlap with the `int` overloads) +def diff( + a: list[float], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Masked1D[np.float64]: ... +@overload # 2d float +def diff( + a: Sequence[list[float]], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Masked2D[np.float64]: ... +@overload # 1d complex (the `list` avoids overlap with the `int` overloads) +def diff( + a: list[complex], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Masked1D[np.complex128]: ... +@overload # 2d complex +def diff( + a: Sequence[list[complex]], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Masked2D[np.complex128]: ... +@overload # unknown shape, unknown scalar-type +def diff( + a: ArrayLike, + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _MaskedArray[Incomplete]: ... + +# keep in sync with `_core.multiarray.where` +@overload +def where(condition: ArrayLike, x: _NoValueType = ..., y: _NoValueType = ...) -> tuple[_MaskedArray[np.intp], ...]: ... +@overload +def where(condition: ArrayLike, x: ArrayLike, y: ArrayLike) -> _MaskedArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.choose` +@overload +def choose( + indices: _IntLike_co, + choices: ArrayLike, + out: None = None, + mode: _ModeKind = "raise", +) -> Any: ... +@overload +def choose[ScalarT: np.generic]( + indices: _ArrayLikeInt_co, + choices: _ArrayLike[ScalarT], + out: None = None, + mode: _ModeKind = "raise", +) -> _MaskedArray[ScalarT]: ... +@overload +def choose( + indices: _ArrayLikeInt_co, + choices: ArrayLike, + out: None = None, + mode: _ModeKind = "raise", +) -> _MaskedArray[Incomplete]: ... +@overload +def choose[ArrayT: np.ndarray]( + indices: _ArrayLikeInt_co, + choices: ArrayLike, + out: ArrayT, + mode: _ModeKind = "raise", +) -> ArrayT: ... + +# +@overload # a: masked_array, out: None (default) +def round[MArray: MaskedArray](a: MArray, decimals: int = 0, out: None = None) -> MArray: ... +@overload # a: known array-like, out: None (default) +def round[ScalarT: np.number](a: _ArrayLike[ScalarT], decimals: int = 0, out: None = None) -> _MaskedArray[ScalarT]: ... +@overload # a: unknown array-like, out: None (default) +def round(a: _ArrayLikeNumber_co, decimals: int = 0, out: None = None) -> _MaskedArray[Incomplete]: ... +@overload # out: ndarray (positional) +def round[ArrayT: np.ndarray](a: ArrayLike, decimals: int, out: ArrayT) -> ArrayT: ... +@overload # out: ndarray (keyword) +def round[ArrayT: np.ndarray](a: ArrayLike, decimals: int = 0, *, out: ArrayT) -> ArrayT: ... + +# +@overload # a: masked_array, out: None (default) +@deprecated("numpy.ma.round_ is deprecated. Use numpy.ma.round instead.") +def round_[MArray: MaskedArray](a: MArray, decimals: int = 0, out: None = None) -> MArray: ... +@overload # a: known array-like, out: None (default) +@deprecated("numpy.ma.round_ is deprecated. Use numpy.ma.round instead.") +def round_[ScalarT: np.number](a: _ArrayLike[ScalarT], decimals: int = 0, out: None = None) -> _MaskedArray[ScalarT]: ... +@overload # a: unknown array-like, out: None (default) +@deprecated("numpy.ma.round_ is deprecated. Use numpy.ma.round instead.") +def round_(a: _ArrayLikeNumber_co, decimals: int = 0, out: None = None) -> _MaskedArray[Incomplete]: ... +@overload # out: ndarray (positional) +@deprecated("numpy.ma.round_ is deprecated. Use numpy.ma.round instead.") +def round_[ArrayT: np.ndarray](a: ArrayLike, decimals: int, out: ArrayT) -> ArrayT: ... +@overload # out: ndarray (keyword) +@deprecated("numpy.ma.round_ is deprecated. Use numpy.ma.round instead.") +def round_[ArrayT: np.ndarray](a: ArrayLike, decimals: int = 0, *, out: ArrayT) -> ArrayT: ... + +# keep in sync with `_core.multiarray.inner` +def inner(a: ArrayLike, b: ArrayLike) -> Incomplete: ... + innerproduct = inner -def outer(a, b): ... +# NOTE: we ignore UP047 because inlining `_AnyScalarT` would result in a lot of code duplication + +# keep in sync with `_core.numeric.outer` +@overload +def outer(a: _ArrayLike[_AnyNumericScalarT], b: _ArrayLike[_AnyNumericScalarT]) -> _Masked2D[_AnyNumericScalarT]: ... # noqa: UP047 +@overload +def outer(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> _Masked2D[np.bool]: ... +@overload +def outer(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> _Masked2D[np.int_ | Any]: ... +@overload +def outer(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> _Masked2D[np.float64 | Any]: ... +@overload +def outer(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> _Masked2D[np.complex128 | Any]: ... +@overload +def outer(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co) -> _Masked2D[np.timedelta64 | Any]: ... + outerproduct = outer -def correlate(a, v, mode=..., propagate_mask=...): ... -def convolve(a, v, mode=..., propagate_mask=...): ... -def allequal(a, b, fill_value=...): ... -def allclose(a, b, masked_equal=..., rtol=..., atol=...): ... -def asarray(a, dtype=..., order=...): ... -def asanyarray(a, dtype=...): ... -def fromflex(fxarray): ... - -class _convert2ma: - __doc__: Any - def __init__(self, funcname, params=...): ... - def getdoc(self): ... - def __call__(self, *args, **params): ... - -arange: _convert2ma -empty: _convert2ma -empty_like: _convert2ma -frombuffer: _convert2ma -fromfunction: _convert2ma -identity: _convert2ma -ones: _convert2ma -zeros: _convert2ma - -def append(a, b, axis=...): ... -def dot(a, b, strict=..., out=...): ... -def mask_rowcols(a, axis=...): ... +# keep in sync with `convolve` and `_core.numeric.correlate` +@overload +def correlate( # noqa: UP047 + a: _ArrayLike[_AnyNumericScalarT], + v: _ArrayLike[_AnyNumericScalarT], + mode: _CorrelateMode = "valid", + propagate_mask: bool = True, +) -> _Masked1D[_AnyNumericScalarT]: ... +@overload +def correlate( + a: _ArrayLikeBool_co, + v: _ArrayLikeBool_co, + mode: _CorrelateMode = "valid", + propagate_mask: bool = True, +) -> _Masked1D[np.bool]: ... +@overload +def correlate( + a: _ArrayLikeInt_co, + v: _ArrayLikeInt_co, + mode: _CorrelateMode = "valid", + propagate_mask: bool = True, +) -> _Masked1D[np.int_ | Any]: ... +@overload +def correlate( + a: _ArrayLikeFloat_co, + v: _ArrayLikeFloat_co, + mode: _CorrelateMode = "valid", + propagate_mask: bool = True, +) -> _Masked1D[np.float64 | Any]: ... +@overload +def correlate( + a: _ArrayLikeNumber_co, + v: _ArrayLikeNumber_co, + mode: _CorrelateMode = "valid", + propagate_mask: bool = True, +) -> _Masked1D[np.complex128 | Any]: ... +@overload +def correlate( + a: _ArrayLikeTD64_co, + v: _ArrayLikeTD64_co, + mode: _CorrelateMode = "valid", + propagate_mask: bool = True, +) -> _Masked1D[np.timedelta64 | Any]: ... + +# keep in sync with `correlate` and `_core.numeric.convolve` +@overload +def convolve( # noqa: UP047 + a: _ArrayLike[_AnyNumericScalarT], + v: _ArrayLike[_AnyNumericScalarT], + mode: _CorrelateMode = "full", + propagate_mask: bool = True, +) -> _Masked1D[_AnyNumericScalarT]: ... +@overload +def convolve( + a: _ArrayLikeBool_co, + v: _ArrayLikeBool_co, + mode: _CorrelateMode = "full", + propagate_mask: bool = True, +) -> _Masked1D[np.bool]: ... +@overload +def convolve( + a: _ArrayLikeInt_co, + v: _ArrayLikeInt_co, + mode: _CorrelateMode = "full", + propagate_mask: bool = True, +) -> _Masked1D[np.int_ | Any]: ... +@overload +def convolve( + a: _ArrayLikeFloat_co, + v: _ArrayLikeFloat_co, + mode: _CorrelateMode = "full", + propagate_mask: bool = True, +) -> _Masked1D[np.float64 | Any]: ... +@overload +def convolve( + a: _ArrayLikeNumber_co, + v: _ArrayLikeNumber_co, + mode: _CorrelateMode = "full", + propagate_mask: bool = True, +) -> _Masked1D[np.complex128 | Any]: ... +@overload +def convolve( + a: _ArrayLikeTD64_co, + v: _ArrayLikeTD64_co, + mode: _CorrelateMode = "full", + propagate_mask: bool = True, +) -> _Masked1D[np.timedelta64 | Any]: ... + +# +def allequal(a: ArrayLike, b: ArrayLike, fill_value: bool = True) -> bool: ... +def allclose(a: ArrayLike, b: ArrayLike, masked_equal: bool = True, rtol: float = 1e-5, atol: float = 1e-8) -> bool: ... + +# +def fromflex[ShapeT: _Shape](fxarray: np.ndarray[ShapeT, np.dtype[np.void]]) -> MaskedArray[ShapeT, np.dtype[Incomplete]]: ... + +# keep in sync with `lib._function_base_impl.append` +@overload # known array type, axis specified +def append[MArrayT: MaskedArray]( + a: MArrayT, + b: MArrayT, + axis: SupportsIndex, +) -> MArrayT: ... +@overload # 1d, known scalar type, axis specified +def append[ScalarT: np.generic]( + a: Sequence[ScalarT], + b: Sequence[ScalarT], + axis: SupportsIndex, +) -> _Masked1D[ScalarT]: ... +@overload # 2d, known scalar type, axis specified +def append[ScalarT: np.generic]( + a: _Seq2D[ScalarT], + b: _Seq2D[ScalarT], + axis: SupportsIndex, +) -> _Masked2D[ScalarT]: ... +@overload # 3d, known scalar type, axis specified +def append[ScalarT: np.generic]( + a: _Seq3D[ScalarT], + b: _Seq3D[ScalarT], + axis: SupportsIndex, +) -> _Masked3D[ScalarT]: ... +@overload # ?d, known scalar type, axis specified +def append[ScalarT: np.generic]( + a: _NestedSequence[ScalarT], + b: _NestedSequence[ScalarT], + axis: SupportsIndex, +) -> _MaskedArray[ScalarT]: ... +@overload # ?d, unknown scalar type, axis specified +def append( + a: np.ndarray | _NestedSequence[_ScalarLike_co], + b: _NestedSequence[_ScalarLike_co], + axis: SupportsIndex, +) -> _MaskedArray[Incomplete]: ... +@overload # known scalar type, axis=None +def append[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + b: _ArrayLike[ScalarT], + axis: None = None, +) -> _Masked1D[ScalarT]: ... +@overload # unknown scalar type, axis=None +def append( + a: ArrayLike, + b: ArrayLike, + axis: None = None, +) -> _Masked1D[Incomplete]: ... + +# keep in sync with `_core.multiarray.dot` +@overload +def dot(a: ArrayLike, b: ArrayLike, strict: bool = False, out: None = None) -> Incomplete: ... +@overload +def dot[OutT: np.ndarray](a: ArrayLike, b: ArrayLike, strict: bool = False, *, out: OutT) -> OutT: ... + +# internal wrapper functions for the functions below +def _convert2ma( + funcname: str, + np_ret: str, + np_ma_ret: str, + params: dict[str, Any] | None = None, +) -> Callable[..., Any]: ... + +# keep in sync with `_core.multiarray.arange` +@overload # dtype= +def arange[ScalarT: _ArangeScalar]( + start_or_stop: _ArangeScalar | float, + /, + stop: _ArangeScalar | float | None = None, + step: _ArangeScalar | float | None = 1, + *, + dtype: _DTypeLike[ScalarT], + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[ScalarT]: ... +@overload # (int-like, int-like?, int-like?) +def arange( + start_or_stop: _IntLike_co, + /, + stop: _IntLike_co | None = None, + step: _IntLike_co | None = 1, + *, + dtype: type[int] | _DTypeLike[np.int_] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.int_]: ... +@overload # (float, float-like?, float-like?) +def arange( + start_or_stop: float | floating, + /, + stop: _FloatLike_co | None = None, + step: _FloatLike_co | None = 1, + *, + dtype: type[float] | _DTypeLike[np.float64] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.float64 | Any]: ... +@overload # (float-like, float, float-like?) +def arange( + start_or_stop: _FloatLike_co, + /, + stop: float | floating, + step: _FloatLike_co | None = 1, + *, + dtype: type[float] | _DTypeLike[np.float64] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.float64 | Any]: ... +@overload # (timedelta, timedelta-like?, timedelta-like?) +def arange( + start_or_stop: np.timedelta64, + /, + stop: _TD64Like_co | None = None, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.timedelta64] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.timedelta64[Incomplete]]: ... +@overload # (timedelta-like, timedelta, timedelta-like?) +def arange( + start_or_stop: _TD64Like_co, + /, + stop: np.timedelta64, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.timedelta64] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.timedelta64[Incomplete]]: ... +@overload # (datetime, datetime, timedelta-like) (requires both start and stop) +def arange( + start_or_stop: np.datetime64, + /, + stop: np.datetime64, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.datetime64] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.datetime64[Incomplete]]: ... +@overload # (str, str, timedelta-like, dtype=dt64-like) (requires both start and stop) +def arange( + start_or_stop: str, + /, + stop: str, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.datetime64] | _DT64Codes, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.datetime64[Incomplete]]: ... +@overload # dtype= +def arange( + start_or_stop: _ArangeScalar | float | str, + /, + stop: _ArangeScalar | float | str | None = None, + step: _ArangeScalar | float | None = 1, + *, + dtype: DTypeLike | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[Incomplete]: ... + +# based on `_core.fromnumeric.clip` +@overload +def clip[ScalarT: np.generic]( + a: ScalarT, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> ScalarT: ... +@overload +def clip[ScalarT: np.generic]( + a: NDArray[ScalarT], + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> _MaskedArray[ScalarT]: ... +@overload +def clip[MArrayT: MaskedArray]( + a: ArrayLike, + a_min: ArrayLike | None, + a_max: ArrayLike | None, + out: MArrayT, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> MArrayT: ... +@overload +def clip[MArrayT: MaskedArray]( + a: ArrayLike, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + *, + out: MArrayT, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> MArrayT: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> Incomplete: ... + +# keep in sync with `_core.multiarray.ones` +@overload +def empty( + shape: SupportsIndex, + dtype: None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.float64]: ... +@overload +def empty[DTypeT: np.dtype]( + shape: SupportsIndex, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int], DTypeT]: ... +@overload +def empty[ScalarT: np.generic]( + shape: SupportsIndex, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[ScalarT]: ... +@overload +def empty( + shape: SupportsIndex, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[Any]: ... +@overload # known shape +def empty[ShapeT: _Shape]( + shape: ShapeT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[ShapeT, np.dtype[np.float64]]: ... +@overload +def empty[ShapeT: _Shape, DTypeT: np.dtype]( + shape: ShapeT, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[ShapeT, DTypeT]: ... +@overload +def empty[ShapeT: _Shape, ScalarT: np.generic]( + shape: ShapeT, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[ShapeT, np.dtype[ScalarT]]: ... +@overload +def empty[ShapeT: _Shape]( + shape: ShapeT, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[ShapeT]: ... +@overload # unknown shape +def empty[ShapeT: _Shape]( + shape: _ShapeLike, + dtype: None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[np.float64]: ... +@overload +def empty[DTypeT: np.dtype]( + shape: _ShapeLike, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[_AnyShape, DTypeT]: ... +@overload +def empty[ScalarT: np.generic]( + shape: _ShapeLike, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[ScalarT]: ... +@overload +def empty( + shape: _ShapeLike, + dtype: DTypeLike | None = None, + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray: ... + +# keep in sync with `_core.multiarray.empty_like` +@overload +def empty_like[MArrayT: MaskedArray]( + a: MArrayT, + /, + dtype: None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: Literal["cpu"] | None = None, +) -> MArrayT: ... +@overload +def empty_like[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + /, + dtype: None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: Literal["cpu"] | None = None, +) -> _MaskedArray[ScalarT]: ... +@overload +def empty_like[ScalarT: np.generic]( + a: Incomplete, + /, + dtype: _DTypeLike[ScalarT], + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: Literal["cpu"] | None = None, +) -> _MaskedArray[ScalarT]: ... +@overload +def empty_like( + a: Incomplete, + /, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: Literal["cpu"] | None = None, +) -> _MaskedArray[Incomplete]: ... + +# This is a bit of a hack to avoid having to duplicate all those `empty` overloads for +# `ones` and `zeros`, that relies on the fact that empty/zeros/ones have identical +# type signatures, but may cause some type-checkers to report incorrect names in case +# of user errors. Mypy and Pyright seem to handle this just fine. +ones = empty +ones_like = empty_like +zeros = empty +zeros_like = empty_like + +# keep in sync with `_core.multiarray.frombuffer` +@overload +def frombuffer( + buffer: Buffer, + *, + count: SupportsIndex = -1, + offset: SupportsIndex = 0, + like: _SupportsArrayFunc | None = None, +) -> _MaskedArray[np.float64]: ... +@overload +def frombuffer[ScalarT: np.generic]( + buffer: Buffer, + dtype: _DTypeLike[ScalarT], + count: SupportsIndex = -1, + offset: SupportsIndex = 0, + *, + like: _SupportsArrayFunc | None = None, +) -> _MaskedArray[ScalarT]: ... +@overload +def frombuffer( + buffer: Buffer, + dtype: DTypeLike | None = float, + count: SupportsIndex = -1, + offset: SupportsIndex = 0, + *, + like: _SupportsArrayFunc | None = None, +) -> _MaskedArray[Incomplete]: ... + +# keep roughly in sync with `_core.numeric.fromfunction` +def fromfunction[ShapeT: _Shape, DTypeT: np.dtype]( + function: Callable[..., np.ndarray[ShapeT, DTypeT]], + shape: Sequence[int], + *, + dtype: DTypeLike | None = float, + like: _SupportsArrayFunc | None = None, + **kwargs: object, +) -> MaskedArray[ShapeT, DTypeT]: ... + +# keep roughly in sync with `_core.numeric.identity` +@overload +def identity( + n: int, + dtype: None = None, + *, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int, int], np.dtype[np.float64]]: ... +@overload +def identity[ScalarT: np.generic]( + n: int, + dtype: _DTypeLike[ScalarT], + *, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int, int], np.dtype[ScalarT]]: ... +@overload +def identity( + n: int, + dtype: DTypeLike | None = None, + *, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int, int], np.dtype[Incomplete]]: ... + +# keep roughly in sync with `_core.numeric.indices` +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int] = int, + sparse: Literal[False] = False, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[np.intp]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int], + sparse: Literal[True], + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[np.intp], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int] = int, + *, + sparse: Literal[True], + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[np.intp], ...]: ... +@overload +def indices[ScalarT: np.generic]( + dimensions: Sequence[int], + dtype: _DTypeLike[ScalarT], + sparse: Literal[False] = False, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[ScalarT]: ... +@overload +def indices[ScalarT: np.generic]( + dimensions: Sequence[int], + dtype: _DTypeLike[ScalarT], + sparse: Literal[True], + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[ScalarT], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike | None = int, + sparse: Literal[False] = False, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[Incomplete]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike | None, + sparse: Literal[True], + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[Incomplete], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike | None = int, + *, + sparse: Literal[True], + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[Incomplete], ...]: ... + +# keep roughly in sync with `_core.fromnumeric.squeeze` +@overload +def squeeze[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: _ShapeLike | None = None, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[ScalarT]: ... +@overload +def squeeze( + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[Incomplete]: ... diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index dc1f8658f82a..769c38fdc900 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -5,7 +5,6 @@ :author: Pierre Gerard-Marchant :contact: pierregm_at_uga_dot_edu -:version: $Id: extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $ """ __all__ = [ @@ -20,22 +19,40 @@ 'setdiff1d', 'setxor1d', 'stack', 'unique', 'union1d', 'vander', 'vstack', ] +import functools import itertools import warnings -from . import core as ma -from .core import ( - MaskedArray, MAError, add, array, asarray, concatenate, filled, count, - getmask, getmaskarray, make_mask_descr, masked, masked_array, mask_or, - nomask, ones, sort, zeros, getdata, get_masked_subclass, dot - ) - import numpy as np -from numpy import ndarray, array as nxarray -from numpy.lib.array_utils import normalize_axis_index, normalize_axis_tuple +from numpy import array as nxarray, ndarray from numpy.lib._function_base_impl import _ureduce from numpy.lib._index_tricks_impl import AxisConcatenator -from numpy._core.numeric import normalize_axis_tuple +from numpy.lib.array_utils import normalize_axis_index, normalize_axis_tuple + +from . import core as ma +from .core import ( # noqa: F401 + MAError, + MaskedArray, + add, + array, + asarray, + concatenate, + count, + dot, + filled, + get_masked_subclass, + getdata, + getmask, + getmaskarray, + make_mask_descr, + mask_or, + masked, + masked_array, + nomask, + ones, + sort, + zeros, +) def issequence(seq): @@ -70,6 +87,7 @@ def count_masked(arr, axis=None): Examples -------- + >>> import numpy as np >>> a = np.arange(9).reshape((3,3)) >>> a = np.ma.array(a) >>> a[1, 0] = np.ma.masked @@ -133,6 +151,7 @@ def masked_all(shape, dtype=float): Examples -------- + >>> import numpy as np >>> np.ma.masked_all((3, 3)) masked_array( data=[[--, --, --], @@ -196,6 +215,7 @@ def masked_all_like(arr): Examples -------- + >>> import numpy as np >>> arr = np.zeros((2, 3), dtype=np.float32) >>> arr array([[0., 0., 0.], @@ -225,150 +245,93 @@ def masked_all_like(arr): #####-------------------------------------------------------------------------- #---- --- Standard functions --- #####-------------------------------------------------------------------------- -class _fromnxfunction: - """ - Defines a wrapper to adapt NumPy functions to masked arrays. - - An instance of `_fromnxfunction` can be called with the same parameters - as the wrapped NumPy function. The docstring of `newfunc` is adapted from - the wrapped function as well, see `getdoc`. - - This class should not be used directly. Instead, one of its extensions that - provides support for a specific type of input should be used. +def _fromnxfunction_function(_fromnxfunction): + """ + Decorator to wrap a "_fromnxfunction" function, wrapping a numpy function as a + masked array function, with proper docstring and name. Parameters ---------- - funcname : str - The name of the function to be adapted. The function should be - in the NumPy namespace (i.e. ``np.funcname``). - - """ + _fromnxfunction : ({params}) -> ndarray, {params}) -> masked_array + Wrapper function that calls the wrapped numpy function - def __init__(self, funcname): - self.__name__ = funcname - self.__doc__ = self.getdoc() + Returns + ------- + decorator : (f: ({params}) -> ndarray) -> ({params}) -> masked_array + Function that accepts a numpy function and returns a masked array function - def getdoc(self): - """ - Retrieve the docstring and signature from the function. + """ + def decorator(npfunc, /): + def wrapper(*args, **kwargs): + return _fromnxfunction(npfunc, *args, **kwargs) - The ``__doc__`` attribute of the function is used as the docstring for - the new masked array version of the function. A note on application - of the function to the mask is appended. + functools.update_wrapper(wrapper, npfunc, assigned=("__name__", "__qualname__")) + wrapper.__doc__ = ma.doc_note( + npfunc.__doc__, + "The function is applied to both the ``_data`` and the ``_mask``, if any.", + ) + return wrapper - Parameters - ---------- - None + return decorator - """ - npfunc = getattr(np, self.__name__, None) - doc = getattr(npfunc, '__doc__', None) - if doc: - sig = ma.get_object_signature(npfunc) - doc = ma.doc_note(doc, "The function is applied to both the _data " - "and the _mask, if any.") - if sig: - sig = self.__name__ + sig + "\n\n" - return sig + doc - return - def __call__(self, *args, **params): - pass +@_fromnxfunction_function +def _fromnxfunction_single(npfunc, a, /, *args, **kwargs): + """ + Wraps a NumPy function that can be called with a single array argument followed by + auxiliary args that are passed verbatim for both the data and mask calls. + """ + return masked_array( + data=npfunc(np.asarray(a), *args, **kwargs), + mask=npfunc(getmaskarray(a), *args, **kwargs), + ) -class _fromnxfunction_single(_fromnxfunction): +@_fromnxfunction_function +def _fromnxfunction_seq(npfunc, arys, /, *args, **kwargs): """ - A version of `_fromnxfunction` that is called with a single array - argument followed by auxiliary args that are passed verbatim for - both the data and mask calls. + Wraps a NumPy function that can be called with a single sequence of arrays followed + by auxiliary args that are passed verbatim for both the data and mask calls. """ - def __call__(self, x, *args, **params): - func = getattr(np, self.__name__) - if isinstance(x, ndarray): - _d = func(x.__array__(), *args, **params) - _m = func(getmaskarray(x), *args, **params) - return masked_array(_d, mask=_m) - else: - _d = func(np.asarray(x), *args, **params) - _m = func(getmaskarray(x), *args, **params) - return masked_array(_d, mask=_m) - - -class _fromnxfunction_seq(_fromnxfunction): - """ - A version of `_fromnxfunction` that is called with a single sequence - of arrays followed by auxiliary args that are passed verbatim for - both the data and mask calls. - """ - def __call__(self, x, *args, **params): - func = getattr(np, self.__name__) - _d = func(tuple([np.asarray(a) for a in x]), *args, **params) - _m = func(tuple([getmaskarray(a) for a in x]), *args, **params) - return masked_array(_d, mask=_m) - - -class _fromnxfunction_args(_fromnxfunction): - """ - A version of `_fromnxfunction` that is called with multiple array - arguments. The first non-array-like input marks the beginning of the - arguments that are passed verbatim for both the data and mask calls. - Array arguments are processed independently and the results are - returned in a list. If only one array is found, the return value is - just the processed array instead of a list. - """ - def __call__(self, *args, **params): - func = getattr(np, self.__name__) - arrays = [] - args = list(args) - while len(args) > 0 and issequence(args[0]): - arrays.append(args.pop(0)) - res = [] - for x in arrays: - _d = func(np.asarray(x), *args, **params) - _m = func(getmaskarray(x), *args, **params) - res.append(masked_array(_d, mask=_m)) - if len(arrays) == 1: - return res[0] - return res - - -class _fromnxfunction_allargs(_fromnxfunction): - """ - A version of `_fromnxfunction` that is called with multiple array - arguments. Similar to `_fromnxfunction_args` except that all args - are converted to arrays even if they are not so already. This makes - it possible to process scalars as 1-D arrays. Only keyword arguments - are passed through verbatim for the data and mask calls. Arrays - arguments are processed independently and the results are returned - in a list. If only one arg is present, the return value is just the - processed array instead of a list. - """ - def __call__(self, *args, **params): - func = getattr(np, self.__name__) - res = [] - for x in args: - _d = func(np.asarray(x), **params) - _m = func(getmaskarray(x), **params) - res.append(masked_array(_d, mask=_m)) - if len(args) == 1: - return res[0] - return res - - -atleast_1d = _fromnxfunction_allargs('atleast_1d') -atleast_2d = _fromnxfunction_allargs('atleast_2d') -atleast_3d = _fromnxfunction_allargs('atleast_3d') - -vstack = row_stack = _fromnxfunction_seq('vstack') -hstack = _fromnxfunction_seq('hstack') -column_stack = _fromnxfunction_seq('column_stack') -dstack = _fromnxfunction_seq('dstack') -stack = _fromnxfunction_seq('stack') - -hsplit = _fromnxfunction_single('hsplit') - -diagflat = _fromnxfunction_single('diagflat') + return masked_array( + data=npfunc(tuple(np.asarray(a) for a in arys), *args, **kwargs), + mask=npfunc(tuple(getmaskarray(a) for a in arys), *args, **kwargs), + ) + +@_fromnxfunction_function +def _fromnxfunction_allargs(npfunc, /, *arys, **kwargs): + """ + Wraps a NumPy function that can be called with multiple array arguments. + All args are converted to arrays even if they are not so already. + This makes it possible to process scalars as 1-D arrays. + Only keyword arguments are passed through verbatim for the data and mask calls. + Arrays arguments are processed independently and the results are returned in a list. + If only one arg is present, the return value is just the processed array instead of + a list. + """ + out = tuple( + masked_array( + data=npfunc(np.asarray(a), **kwargs), + mask=npfunc(getmaskarray(a), **kwargs), + ) + for a in arys + ) + return out[0] if len(out) == 1 else out + + +atleast_1d = _fromnxfunction_allargs(np.atleast_1d) +atleast_2d = _fromnxfunction_allargs(np.atleast_2d) +atleast_3d = _fromnxfunction_allargs(np.atleast_3d) + +vstack = row_stack = _fromnxfunction_seq(np.vstack) +hstack = _fromnxfunction_seq(np.hstack) +column_stack = _fromnxfunction_seq(np.column_stack) +dstack = _fromnxfunction_seq(np.dstack) +stack = _fromnxfunction_seq(np.stack) + +hsplit = _fromnxfunction_single(np.hsplit) +diagflat = _fromnxfunction_single(np.diagflat) #####-------------------------------------------------------------------------- @@ -464,6 +427,8 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs): result = asarray(outarr, dtype=max_dtypes) result.fill_value = ma.default_fill_value(result) return result + + apply_along_axis.__doc__ = np.apply_along_axis.__doc__ @@ -499,6 +464,7 @@ def apply_over_axes(func, a, axes): Examples -------- + >>> import numpy as np >>> a = np.ma.arange(24).reshape(2,3,4) >>> a[:,0,1] = np.ma.masked >>> a[:,1,:] = np.ma.masked @@ -608,6 +574,7 @@ def average(a, axis=None, weights=None, returned=False, *, Examples -------- + >>> import numpy as np >>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True]) >>> np.ma.average(a, weights=[3, 1, 0, 0]) 1.25 @@ -693,7 +660,7 @@ def average(a, axis=None, weights=None, returned=False, *, for ax, s in enumerate(a.shape))) if m is not nomask: - wgt = wgt*(~a.mask) + wgt = wgt * (~a.mask) wgt.mask |= a.mask scl = wgt.sum(axis=axis, dtype=result_dtype, **keepdims_kw) @@ -738,8 +705,6 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. - .. versionadded:: 1.10.0 - Returns ------- median : ndarray @@ -761,6 +726,7 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): Examples -------- + >>> import numpy as np >>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4) >>> np.ma.median(x) 1.5 @@ -841,9 +807,9 @@ def _median(a, axis=None, out=None, overwrite_input=False): # duplicate high if odd number of elements so mean does nothing odd = counts % 2 == 1 - l = np.where(odd, h, h-1) + l = np.where(odd, h, h - 1) - lh = np.concatenate([l,h], axis=axis) + lh = np.concatenate([l, h], axis=axis) # get low and high median low_high = np.take_along_axis(asorted, lh, axis=axis) @@ -895,6 +861,7 @@ def compress_nd(x, axis=None): Examples -------- + >>> import numpy as np >>> arr = [[1, 2], [3, 4]] >>> mask = [[0, 1], [0, 0]] >>> x = np.ma.array(arr, mask=mask) @@ -925,7 +892,7 @@ def compress_nd(x, axis=None): data = x._data for ax in axis: axes = tuple(list(range(ax)) + list(range(ax + 1, x.ndim))) - data = data[(slice(None),)*ax + (~m.any(axis=axes),)] + data = data[(slice(None),) * ax + (~m.any(axis=axes),)] return data @@ -956,6 +923,7 @@ def compress_rowcols(x, axis=None): Examples -------- + >>> import numpy as np >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], ... [1, 0, 0], ... [0, 0, 0]]) @@ -1009,12 +977,13 @@ def compress_rows(a): Examples -------- + >>> import numpy as np >>> a = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], ... [1, 0, 0], ... [0, 0, 0]]) >>> np.ma.compress_rows(a) array([[6, 7, 8]]) - + """ a = asarray(a) if a.ndim != 2: @@ -1047,6 +1016,7 @@ def compress_cols(a): Examples -------- + >>> import numpy as np >>> a = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], ... [1, 0, 0], ... [0, 0, 0]]) @@ -1107,7 +1077,8 @@ def mask_rowcols(a, axis=None): Examples -------- - >>> a = np.zeros((3, 3), dtype=int) + >>> import numpy as np + >>> a = np.zeros((3, 3), dtype=np.int_) >>> a[1, 1] = 1 >>> a array([[0, 0, 0], @@ -1163,7 +1134,8 @@ def mask_rows(a, axis=np._NoValue): Examples -------- - >>> a = np.zeros((3, 3), dtype=int) + >>> import numpy as np + >>> a = np.zeros((3, 3), dtype=np.int_) >>> a[1, 1] = 1 >>> a array([[0, 0, 0], @@ -1213,7 +1185,8 @@ def mask_cols(a, axis=np._NoValue): Examples -------- - >>> a = np.zeros((3, 3), dtype=int) + >>> import numpy as np + >>> a = np.zeros((3, 3), dtype=np.int_) >>> a[1, 1] = 1 >>> a array([[0, 0, 0], @@ -1266,6 +1239,7 @@ def ediff1d(arr, to_end=None, to_begin=None): Examples -------- + >>> import numpy as np >>> arr = np.ma.array([1, 2, 4, 7, 0]) >>> np.ma.ediff1d(arr) masked_array(data=[ 1, 2, 3, -7], @@ -1303,6 +1277,7 @@ def unique(ar1, return_index=False, return_inverse=False): Examples -------- + >>> import numpy as np >>> a = [1, 2, 1000, 2, 3] >>> mask = [0, 0, 1, 0, 0] >>> masked_a = np.ma.masked_array(a, mask) @@ -1354,6 +1329,7 @@ def intersect1d(ar1, ar2, assume_unique=False): Examples -------- + >>> import numpy as np >>> x = np.ma.array([1, 3, 3, 3], mask=[0, 0, 0, 1]) >>> y = np.ma.array([3, 1, 1, 1], mask=[0, 0, 0, 1]) >>> np.ma.intersect1d(x, y) @@ -1383,11 +1359,12 @@ def setxor1d(ar1, ar2, assume_unique=False): Examples -------- + >>> import numpy as np >>> ar1 = np.ma.array([1, 2, 3, 2, 4]) >>> ar2 = np.ma.array([2, 3, 5, 7, 5]) >>> np.ma.setxor1d(ar1, ar2) masked_array(data=[1, 4, 5, 7], - mask=False, + mask=False, fill_value=999999) """ @@ -1412,21 +1389,17 @@ def in1d(ar1, ar2, assume_unique=False, invert=False): Test whether each element of an array is also present in a second array. - The output is always a masked array. See `numpy.in1d` for more details. + The output is always a masked array. We recommend using :func:`isin` instead of `in1d` for new code. See Also -------- isin : Version of this function that preserves the shape of ar1. - numpy.in1d : Equivalent function for ndarrays. - - Notes - ----- - .. versionadded:: 1.4.0 Examples -------- + >>> import numpy as np >>> ar1 = np.ma.array([0, 1, 2, 5, 0]) >>> ar2 = [0, 2] >>> np.ma.in1d(ar1, ar2) @@ -1471,12 +1444,9 @@ def isin(element, test_elements, assume_unique=False, invert=False): in1d : Flattened version of this function. numpy.isin : Equivalent function for ndarrays. - Notes - ----- - .. versionadded:: 1.13.0 - Examples -------- + >>> import numpy as np >>> element = np.ma.array([1, 2, 3, 4, 5, 6]) >>> test_elements = [0, 2] >>> np.ma.isin(element, test_elements) @@ -1502,6 +1472,7 @@ def union1d(ar1, ar2): Examples -------- + >>> import numpy as np >>> ar1 = np.ma.array([1, 2, 3, 4]) >>> ar2 = np.ma.array([3, 4, 5, 6]) >>> np.ma.union1d(ar1, ar2) @@ -1526,6 +1497,7 @@ def setdiff1d(ar1, ar2, assume_unique=False): Examples -------- + >>> import numpy as np >>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1]) >>> np.ma.setdiff1d(x, [1, 2]) masked_array(data=[3, --], @@ -1569,8 +1541,8 @@ def _covhelper(x, y=None, rowvar=True, allow_masked=True): tup = (None, slice(None)) # if y is None: - # Check if we can guarantee that the integers in the (N - ddof) - # normalisation can be accurately represented with single-precision + # Check if we can guarantee that the integers in the (N - ddof) + # normalisation can be accurately represented with single-precision # before computing the dot product. if x.shape[0] > 2 ** 24 or x.shape[1] > 2 ** 24: xnm_dtype = np.float64 @@ -1591,8 +1563,8 @@ def _covhelper(x, y=None, rowvar=True, allow_masked=True): x._sharedmask = False y._sharedmask = False x = ma.concatenate((x, y), axis) - # Check if we can guarantee that the integers in the (N - ddof) - # normalisation can be accurately represented with single-precision + # Check if we can guarantee that the integers in the (N - ddof) + # normalisation can be accurately represented with single-precision # before computing the dot product. if x.shape[0] > 2 ** 24 or x.shape[1] > 2 ** 24: xnm_dtype = np.float64 @@ -1646,8 +1618,6 @@ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): the number of observations; this overrides the value implied by ``bias``. The default value is ``None``. - .. versionadded:: 1.5 - Raises ------ ValueError @@ -1659,6 +1629,7 @@ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): Examples -------- + >>> import numpy as np >>> x = np.ma.array([[0, 1], [1, 1]], mask=[0, 1, 0, 1]) >>> y = np.ma.array([[1, 0], [0, 1]], mask=[0, 0, 1, 1]) >>> np.ma.cov(x, y) @@ -1673,7 +1644,7 @@ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): [ True, True, True, True]], fill_value=1e+20, dtype=float64) - + """ # Check inputs if ddof is not None and ddof != int(ddof): @@ -1701,8 +1672,8 @@ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): return result -def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, - ddof=np._NoValue): +def corrcoef(x, y=None, rowvar=True, allow_masked=True, + ): """ Return Pearson product-moment correlation coefficients. @@ -1723,34 +1694,20 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. - bias : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.10.0 allow_masked : bool, optional If True, masked values are propagated pair-wise: if a value is masked in `x`, the corresponding value is masked in `y`. If False, raises an exception. Because `bias` is deprecated, this argument needs to be treated as keyword only to avoid a warning. - ddof : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.10.0 See Also -------- numpy.corrcoef : Equivalent function in top-level NumPy module. cov : Estimate the covariance matrix. - Notes - ----- - This function accepts but discards arguments `bias` and `ddof`. This is - for backwards compatibility with previous versions of this function. These - arguments had no effect on the return values of the function and can be - safely ignored in this and previous versions of numpy. - Examples -------- + >>> import numpy as np >>> x = np.ma.array([[0, 1], [1, 1]], mask=[0, 1, 0, 1]) >>> np.ma.corrcoef(x) masked_array( @@ -1762,10 +1719,6 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, dtype=float64) """ - msg = 'bias and ddof have no effect and are deprecated' - if bias is not np._NoValue or ddof is not np._NoValue: - # 2015-03-15, 1.10 - warnings.warn(msg, DeprecationWarning, stacklevel=2) # Estimate the covariance matrix. corr = cov(x, y, rowvar, allow_masked=allow_masked) # The non-masked version returns a masked value for a scalar. @@ -1791,6 +1744,8 @@ class MAxisConcatenator(AxisConcatenator): mr_class """ + __slots__ = () + concatenate = staticmethod(concatenate) @classmethod @@ -1822,15 +1777,19 @@ class mr_class(MAxisConcatenator): Examples -------- + >>> import numpy as np >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])] masked_array(data=[1, 2, 3, ..., 4, 5, 6], mask=False, fill_value=999999) """ + __slots__ = () + def __init__(self): MAxisConcatenator.__init__(self, 0) + mr_ = mr_class() @@ -1865,6 +1824,7 @@ def ndenumerate(a, compressed=True): Examples -------- + >>> import numpy as np >>> a = np.ma.arange(9).reshape((3, 3)) >>> a[1, 0] = np.ma.masked >>> a[1, 2] = np.ma.masked @@ -1934,6 +1894,7 @@ def flatnotmasked_edges(a): Examples -------- + >>> import numpy as np >>> a = np.ma.arange(10) >>> np.ma.flatnotmasked_edges(a) array([0, 9]) @@ -1991,6 +1952,7 @@ def notmasked_edges(a, axis=None): Examples -------- + >>> import numpy as np >>> a = np.arange(9).reshape((3, 3)) >>> m = np.zeros_like(a) >>> m[1:, 1:] = 1 @@ -2008,8 +1970,8 @@ def notmasked_edges(a, axis=None): return flatnotmasked_edges(a) m = getmaskarray(a) idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim)) - return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]), - tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ] + return [tuple(idx[i].min(axis).compressed() for i in range(a.ndim)), + tuple(idx[i].max(axis).compressed() for i in range(a.ndim)), ] def flatnotmasked_contiguous(a): @@ -2026,9 +1988,6 @@ def flatnotmasked_contiguous(a): slice_list : list A sorted sequence of `slice` objects (start index, end index). - .. versionchanged:: 1.15.0 - Now returns an empty list instead of None for a fully masked array - See Also -------- flatnotmasked_edges, notmasked_contiguous, notmasked_edges @@ -2040,6 +1999,7 @@ def flatnotmasked_contiguous(a): Examples -------- + >>> import numpy as np >>> a = np.ma.arange(10) >>> np.ma.flatnotmasked_contiguous(a) [slice(0, 10, None)] @@ -2101,6 +2061,7 @@ def notmasked_contiguous(a, axis=None): Examples -------- + >>> import numpy as np >>> a = np.arange(12).reshape((3, 4)) >>> mask = np.zeros_like(a) >>> mask[1:, :-1] = 1; mask[0, 1] = 1; mask[-1, 0] = 0 @@ -2126,7 +2087,7 @@ def notmasked_contiguous(a, axis=None): >>> np.ma.notmasked_contiguous(ma, axis=1) [[slice(0, 1, None), slice(2, 4, None)], [slice(3, 4, None)], [slice(0, 1, None), slice(3, 4, None)]] - """ + """ # noqa: E501 a = asarray(a) nd = a.ndim if nd > 2: @@ -2191,10 +2152,6 @@ def clump_unmasked(a): The list of slices, one for each continuous region of unmasked elements in `a`. - Notes - ----- - .. versionadded:: 1.4.0 - See Also -------- flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges @@ -2202,6 +2159,7 @@ def clump_unmasked(a): Examples -------- + >>> import numpy as np >>> a = np.ma.masked_array(np.arange(10)) >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked >>> np.ma.clump_unmasked(a) @@ -2230,10 +2188,6 @@ def clump_masked(a): The list of slices, one for each continuous region of masked elements in `a`. - Notes - ----- - .. versionadded:: 1.4.0 - See Also -------- flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges @@ -2241,6 +2195,7 @@ def clump_masked(a): Examples -------- + >>> import numpy as np >>> a = np.ma.masked_array(np.arange(10)) >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked >>> np.ma.clump_masked(a) @@ -2269,6 +2224,7 @@ def vander(x, n=None): _vander[m] = 0 return _vander + vander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__) @@ -2306,4 +2262,5 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): else: return np.polyfit(x, y, deg, rcond, full, w, cov) + polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 56228b927080..232d040360ea 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -1,85 +1,767 @@ -from typing import Any -from numpy.lib.index_tricks import AxisConcatenator +from _typeshed import Incomplete, SupportsLenAndGetItem +from collections.abc import Callable, Iterator, Sequence +from typing import ( + Any, + Concatenate, + Final, + Literal as L, + SupportsIndex, + TypeVar, + overload, + override, +) -from numpy.ma.core import ( - dot as dot, - mask_rowcols as mask_rowcols, +import numpy as np +from numpy import _CastingKind +from numpy._globals import _NoValueType +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _AnyShape, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _DTypeLike, + _NestedSequence, + _NumberLike_co, + _Shape, + _ShapeLike, ) +from numpy.lib._function_base_impl import average +from numpy.lib._index_tricks_impl import AxisConcatenator + +from .core import MaskedArray, dot + +__all__ = [ + "apply_along_axis", + "apply_over_axes", + "atleast_1d", + "atleast_2d", + "atleast_3d", + "average", + "clump_masked", + "clump_unmasked", + "column_stack", + "compress_cols", + "compress_nd", + "compress_rowcols", + "compress_rows", + "corrcoef", + "count_masked", + "cov", + "diagflat", + "dot", + "dstack", + "ediff1d", + "flatnotmasked_contiguous", + "flatnotmasked_edges", + "hsplit", + "hstack", + "in1d", + "intersect1d", + "isin", + "mask_cols", + "mask_rowcols", + "mask_rows", + "masked_all", + "masked_all_like", + "median", + "mr_", + "ndenumerate", + "notmasked_contiguous", + "notmasked_edges", + "polyfit", + "row_stack", + "setdiff1d", + "setxor1d", + "stack", + "union1d", + "unique", + "vander", + "vstack", +] + +type _MArray[ScalarT: np.generic] = MaskedArray[_AnyShape, np.dtype[ScalarT]] +type _MArray1D[ScalarT: np.generic] = MaskedArray[tuple[int], np.dtype[ScalarT]] +type _MArray2D[ScalarT: np.generic] = MaskedArray[tuple[int, int], np.dtype[ScalarT]] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] + +type _IntArray = NDArray[np.intp] +type _ScalarNumeric = np.inexact | np.timedelta64 | np.object_ +type _InexactDouble = np.float64 | np.longdouble | np.complex128 | np.clongdouble +type _ListSeqND[T] = list[T] | _NestedSequence[list[T]] + +# helper aliases for polyfit +type _2Tup[T] = tuple[T, T] +type _5Tup[T] = tuple[T, NDArray[np.float64], NDArray[np.int32], NDArray[np.float64], NDArray[np.float64]] + +# Explicitly set all allowed values to prevent accidental castings to +# abstract dtypes (their common super-type). +# Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`) +# which could result in, for example, `int64` and `float64` producing a +# `number[_64Bit]` array +_AnyScalarT = TypeVar( + "_AnyScalarT", + np.bool, + np.int8, np.int16, np.int32, np.int64, np.intp, + np.uint8, np.uint16, np.uint32, np.uint64, np.uintp, + np.float16, np.float32, np.float64, np.longdouble, + np.complex64, np.complex128, np.clongdouble, + np.timedelta64, np.datetime64, + np.bytes_, np.str_, np.void, np.object_, + np.integer, np.floating, np.complexfloating, np.character, +) # fmt: skip + +### + +# keep in sync with `numpy._core.shape_base.atleast_1d` +@overload +def atleast_1d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> _MArray[ScalarT]: ... +@overload +def atleast_1d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[_MArray[ScalarT1], _MArray[ScalarT2]]: ... +@overload +def atleast_1d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[_MArray[ScalarT], ...]: ... +@overload +def atleast_1d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... +@overload +def atleast_1d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[_MArray[Incomplete], _MArray[Incomplete]]: ... +@overload +def atleast_1d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray[Incomplete], ...]: ... + +# keep in sync with `numpy._core.shape_base.atleast_2d` +@overload +def atleast_2d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> _MArray[ScalarT]: ... +@overload +def atleast_2d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[_MArray[ScalarT1], _MArray[ScalarT2]]: ... +@overload +def atleast_2d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[_MArray[ScalarT], ...]: ... +@overload +def atleast_2d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... +@overload +def atleast_2d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[_MArray[Incomplete], _MArray[Incomplete]]: ... +@overload +def atleast_2d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray[Incomplete], ...]: ... + +# keep in sync with `numpy._core.shape_base.atleast_2d` +@overload +def atleast_3d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> _MArray[ScalarT]: ... +@overload +def atleast_3d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[_MArray[ScalarT1], _MArray[ScalarT2]]: ... +@overload +def atleast_3d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[_MArray[ScalarT], ...]: ... +@overload +def atleast_3d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... +@overload +def atleast_3d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[_MArray[Incomplete], _MArray[Incomplete]]: ... +@overload +def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray[Incomplete], ...]: ... + +# keep in sync with `numpy._core.shape_base.vstack` +@overload +def vstack[ScalarT: np.generic]( + tup: Sequence[_ArrayLike[ScalarT]], + *, + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[ScalarT]: ... +@overload +def vstack[ScalarT: np.generic]( + tup: Sequence[ArrayLike], + *, + dtype: _DTypeLike[ScalarT], + casting: _CastingKind = "same_kind" +) -> _MArray[ScalarT]: ... +@overload +def vstack( + tup: Sequence[ArrayLike], + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[Incomplete]: ... + +row_stack = vstack + +# keep in sync with `numpy._core.shape_base.hstack` +@overload +def hstack[ScalarT: np.generic]( + tup: Sequence[_ArrayLike[ScalarT]], + *, + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[ScalarT]: ... +@overload +def hstack[ScalarT: np.generic]( + tup: Sequence[ArrayLike], + *, + dtype: _DTypeLike[ScalarT], + casting: _CastingKind = "same_kind" +) -> _MArray[ScalarT]: ... +@overload +def hstack( + tup: Sequence[ArrayLike], + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[Incomplete]: ... + +# keep in sync with `numpy._core.shape_base_impl.column_stack` +@overload +def column_stack[ScalarT: np.generic](tup: Sequence[_ArrayLike[ScalarT]]) -> _MArray[ScalarT]: ... +@overload +def column_stack(tup: Sequence[ArrayLike]) -> _MArray[Incomplete]: ... + +# keep in sync with `numpy._core.shape_base_impl.dstack` +@overload +def dstack[ScalarT: np.generic](tup: Sequence[_ArrayLike[ScalarT]]) -> _MArray[ScalarT]: ... +@overload +def dstack(tup: Sequence[ArrayLike]) -> _MArray[Incomplete]: ... + +# keep in sync with `numpy._core.shape_base.stack` +@overload +def stack[ScalarT: np.generic]( + arrays: Sequence[_ArrayLike[ScalarT]], + axis: SupportsIndex = 0, + out: None = None, + *, + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[ScalarT]: ... +@overload +def stack[ScalarT: np.generic]( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + out: None = None, + *, + dtype: _DTypeLike[ScalarT], + casting: _CastingKind = "same_kind" +) -> _MArray[ScalarT]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + out: None = None, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[Incomplete]: ... +@overload +def stack[MArrayT: MaskedArray]( + arrays: Sequence[ArrayLike], + axis: SupportsIndex, + out: MArrayT, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> MArrayT: ... +@overload +def stack[MArrayT: MaskedArray]( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + *, + out: MArrayT, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> MArrayT: ... + +# keep in sync with `numpy._core.shape_base_impl.hsplit` +@overload +def hsplit[ScalarT: np.generic](ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike) -> list[_MArray[ScalarT]]: ... +@overload +def hsplit(ary: ArrayLike, indices_or_sections: _ShapeLike) -> list[_MArray[Incomplete]]: ... + +# keep in sync with `numpy._core.twodim_base_impl.hsplit` +@overload +def diagflat[ScalarT: np.generic](v: _ArrayLike[ScalarT], k: int = 0) -> _MArray[ScalarT]: ... +@overload +def diagflat(v: ArrayLike, k: int = 0) -> _MArray[Incomplete]: ... + +# +def count_masked(arr: ArrayLike, axis: SupportsIndex | None = None) -> NDArray[np.intp]: ... + +# +@overload +def masked_all[ScalarT: np.generic](shape: _ShapeLike, dtype: _DTypeLike[ScalarT]) -> _MArray[ScalarT]: ... +@overload +def masked_all(shape: _ShapeLike, dtype: DTypeLike = float) -> _MArray[Incomplete]: ... + +# +@overload +def masked_all_like[ScalarT: np.generic](arr: _ArrayLike[ScalarT]) -> _MArray[ScalarT]: ... +@overload +def masked_all_like(arr: ArrayLike) -> _MArray[Incomplete]: ... + +# +def apply_along_axis[**Tss]( + func1d: Callable[Concatenate[MaskedArray, Tss], ArrayLike], + axis: SupportsIndex, + arr: ArrayLike, + *args: Tss.args, + **kwargs: Tss.kwargs, +) -> _MArray[Incomplete]: ... + +# +@overload +def apply_over_axes[ScalarT: np.generic]( + func: Callable[[MaskedArray, int], _ArrayLike[ScalarT]], + a: np.ndarray, + axes: _ShapeLike, +) -> _MArray[ScalarT]: ... +@overload +def apply_over_axes( + func: Callable[[MaskedArray, int], ArrayLike], + a: np.ndarray, + axes: _ShapeLike, +) -> _MArray[Incomplete]: ... + +# keep in sync with `lib._function_base_impl.median` +@overload # known scalar-type, keepdims=False (default) +def median[ScalarT: np.inexact | np.timedelta64]( + a: _ArrayLike[ScalarT], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, +) -> ScalarT: ... +@overload # float array-like, keepdims=False (default) +def median( + a: _ArrayLikeInt_co | _NestedSequence[float] | float, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, +) -> np.float64: ... +@overload # complex array-like, keepdims=False (default) +def median( + a: _ListSeqND[complex], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, +) -> np.complex128: ... +@overload # complex scalar, keepdims=False (default) +def median( + a: complex, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, +) -> np.complex128 | Any: ... +@overload # known array-type, keepdims=True +def median[ArrayT: _MArray[_ScalarNumeric]]( + a: ArrayT, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> ArrayT: ... +@overload # known scalar-type, keepdims=True +def median[ScalarT: _ScalarNumeric]( + a: _ArrayLike[ScalarT], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> _MArray[ScalarT]: ... +@overload # known scalar-type, axis= +def median[ScalarT: _ScalarNumeric]( + a: _ArrayLike[ScalarT], + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> _MArray[ScalarT]: ... +@overload # float array-like, keepdims=True +def median( + a: _NestedSequence[float], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> _MArray[np.float64]: ... +@overload # float array-like, axis= +def median( + a: _NestedSequence[float], + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> _MArray[np.float64]: ... +@overload # complex array-like, keepdims=True +def median( + a: _ListSeqND[complex], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> _MArray[np.complex128]: ... +@overload # complex array-like, axis= +def median( + a: _ListSeqND[complex], + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> _MArray[np.complex128]: ... +@overload # out= (keyword) +def median[ArrayT: np.ndarray]( + a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + axis: _ShapeLike | None = None, + *, + out: ArrayT, + overwrite_input: bool = False, + keepdims: bool = False, +) -> ArrayT: ... +@overload # out= (positional) +def median[ArrayT: np.ndarray]( + a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + axis: _ShapeLike | None, + out: ArrayT, + overwrite_input: bool = False, + keepdims: bool = False, +) -> ArrayT: ... +@overload # fallback +def median( + a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> Incomplete: ... + +# +@overload +def compress_nd[ScalarT: np.generic](x: _ArrayLike[ScalarT], axis: _ShapeLike | None = None) -> NDArray[ScalarT]: ... +@overload +def compress_nd(x: ArrayLike, axis: _ShapeLike | None = None) -> NDArray[Incomplete]: ... -__all__: list[str] - -def count_masked(arr, axis=...): ... -def masked_all(shape, dtype = ...): ... -def masked_all_like(arr): ... - -class _fromnxfunction: - __name__: Any - __doc__: Any - def __init__(self, funcname): ... - def getdoc(self): ... - def __call__(self, *args, **params): ... - -class _fromnxfunction_single(_fromnxfunction): - def __call__(self, x, *args, **params): ... - -class _fromnxfunction_seq(_fromnxfunction): - def __call__(self, x, *args, **params): ... - -class _fromnxfunction_allargs(_fromnxfunction): - def __call__(self, *args, **params): ... - -atleast_1d: _fromnxfunction_allargs -atleast_2d: _fromnxfunction_allargs -atleast_3d: _fromnxfunction_allargs - -vstack: _fromnxfunction_seq -row_stack: _fromnxfunction_seq -hstack: _fromnxfunction_seq -column_stack: _fromnxfunction_seq -dstack: _fromnxfunction_seq -stack: _fromnxfunction_seq - -hsplit: _fromnxfunction_single -diagflat: _fromnxfunction_single - -def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... -def apply_over_axes(func, a, axes): ... -def average(a, axis=..., weights=..., returned=..., keepdims=...): ... -def median(a, axis=..., out=..., overwrite_input=..., keepdims=...): ... -def compress_nd(x, axis=...): ... -def compress_rowcols(x, axis=...): ... -def compress_rows(a): ... -def compress_cols(a): ... -def mask_rows(a, axis = ...): ... -def mask_cols(a, axis = ...): ... -def ediff1d(arr, to_end=..., to_begin=...): ... -def unique(ar1, return_index=..., return_inverse=...): ... -def intersect1d(ar1, ar2, assume_unique=...): ... -def setxor1d(ar1, ar2, assume_unique=...): ... -def in1d(ar1, ar2, assume_unique=..., invert=...): ... -def isin(element, test_elements, assume_unique=..., invert=...): ... -def union1d(ar1, ar2): ... -def setdiff1d(ar1, ar2, assume_unique=...): ... -def cov(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...): ... -def corrcoef(x, y=..., rowvar=..., bias = ..., allow_masked=..., ddof = ...): ... +# +@overload +def compress_rowcols[ScalarT: np.generic](x: _ArrayLike[ScalarT], axis: int | None = None) -> _Array2D[ScalarT]: ... +@overload +def compress_rowcols(x: ArrayLike, axis: int | None = None) -> _Array2D[Incomplete]: ... + +# +@overload +def compress_rows[ScalarT: np.generic](a: _ArrayLike[ScalarT]) -> _Array2D[ScalarT]: ... +@overload +def compress_rows(a: ArrayLike) -> _Array2D[Incomplete]: ... + +# +@overload +def compress_cols[ScalarT: np.generic](a: _ArrayLike[ScalarT]) -> _Array2D[ScalarT]: ... +@overload +def compress_cols(a: ArrayLike) -> _Array2D[Incomplete]: ... + +# +def mask_rowcols(a: ArrayLike, axis: SupportsIndex | None = None) -> _MArray[Incomplete]: ... +def mask_rows(a: ArrayLike, axis: _NoValueType = ...) -> _MArray[Incomplete]: ... +def mask_cols(a: ArrayLike, axis: _NoValueType = ...) -> _MArray[Incomplete]: ... + +# keep in sync with `lib._arraysetops_impl.ediff1d` +@overload +def ediff1d( + arr: _ArrayLikeBool_co, + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> _MArray1D[np.int8]: ... +@overload +def ediff1d[NumericT: _ScalarNumeric]( + arr: _ArrayLike[NumericT], + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> _MArray1D[NumericT]: ... +@overload +def ediff1d( + arr: _ArrayLike[np.datetime64[Any]], + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> _MArray1D[np.timedelta64]: ... +@overload +def ediff1d( + arr: _ArrayLikeComplex_co, + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> _MArray1D[Incomplete]: ... + +# keep in sync with `lib._arraysetops_impl.unique`, minus `return_counts` +@overload # known scalar-type, FF +def unique[ScalarT: np.generic]( + ar1: _ArrayLike[ScalarT], + return_index: L[False] = False, + return_inverse: L[False] = False, +) -> _MArray[ScalarT]: ... +@overload # unknown scalar-type, FF +def unique( + ar1: ArrayLike, + return_index: L[False] = False, + return_inverse: L[False] = False, +) -> _MArray[Incomplete]: ... +@overload # known scalar-type, TF +def unique[ScalarT: np.generic]( + ar1: _ArrayLike[ScalarT], + return_index: L[True], + return_inverse: L[False] = False, +) -> tuple[_MArray[ScalarT], _IntArray]: ... +@overload # unknown scalar-type, TFF +def unique( + ar1: ArrayLike, + return_index: L[True], + return_inverse: L[False] = False, +) -> tuple[_MArray[Incomplete], _IntArray]: ... +@overload # known scalar-type, FT (positional) +def unique[ScalarT: np.generic]( + ar1: _ArrayLike[ScalarT], + return_index: L[False], + return_inverse: L[True], +) -> tuple[_MArray[ScalarT], _IntArray]: ... +@overload # known scalar-type, FT (keyword) +def unique[ScalarT: np.generic]( + ar1: _ArrayLike[ScalarT], + return_index: L[False] = False, + *, + return_inverse: L[True], +) -> tuple[_MArray[ScalarT], _IntArray]: ... +@overload # unknown scalar-type, FT (positional) +def unique( + ar1: ArrayLike, + return_index: L[False], + return_inverse: L[True], +) -> tuple[_MArray[Incomplete], _IntArray]: ... +@overload # unknown scalar-type, FT (keyword) +def unique( + ar1: ArrayLike, + return_index: L[False] = False, + *, + return_inverse: L[True], +) -> tuple[_MArray[Incomplete], _IntArray]: ... +@overload # known scalar-type, TT +def unique[ScalarT: np.generic]( + ar1: _ArrayLike[ScalarT], + return_index: L[True], + return_inverse: L[True], +) -> tuple[_MArray[ScalarT], _IntArray, _IntArray]: ... +@overload # unknown scalar-type, TT +def unique( + ar1: ArrayLike, + return_index: L[True], + return_inverse: L[True], +) -> tuple[_MArray[Incomplete], _IntArray, _IntArray]: ... + +# NOTE: we ignore UP047 because inlining `_AnyScalarT` would result in a lot of code duplication + +# keep in sync with `lib._arraysetops_impl.intersect1d` +@overload # known scalar-type, return_indices=False (default) +def intersect1d( # noqa: UP047 + ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False +) -> _MArray1D[_AnyScalarT]: ... +@overload # unknown scalar-type, return_indices=False (default) +def intersect1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _MArray1D[Incomplete]: ... + +# keep in sync with `lib._arraysetops_impl.setxor1d` +@overload +def setxor1d( # noqa: UP047 + ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False +) -> _MArray1D[_AnyScalarT]: ... +@overload +def setxor1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _MArray1D[Incomplete]: ... + +# keep in sync with `lib._arraysetops_impl.union1d` +@overload +def union1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT]) -> _MArray1D[_AnyScalarT]: ... # noqa: UP047 +@overload +def union1d(ar1: ArrayLike, ar2: ArrayLike) -> _MArray1D[Incomplete]: ... + +# keep in sync with `lib._arraysetops_impl.setdiff1d` +@overload +def setdiff1d( # noqa: UP047 + ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False +) -> _MArray1D[_AnyScalarT]: ... +@overload +def setdiff1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _MArray1D[Incomplete]: ... + +# +def in1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False, invert: bool = False) -> _MArray1D[np.bool]: ... + +# keep in sync with `lib._arraysetops_impl.isin` +def isin( + element: ArrayLike, test_elements: ArrayLike, assume_unique: bool = False, invert: bool = False +) -> _MArray[np.bool]: ... + +# keep in sync with `corrcoef` +def cov( + x: ArrayLike, + y: ArrayLike | None = None, + rowvar: bool = True, + bias: bool = False, + allow_masked: bool = True, + ddof: int | None = None +) -> _MArray[Incomplete]: ... + +# keep in sync with `cov` +def corrcoef(x: ArrayLike, y: ArrayLike | None = None, rowvar: bool = True, allow_masked: bool = True) -> _MArray[Incomplete]: ... class MAxisConcatenator(AxisConcatenator): - concatenate: Any + __slots__ = () + + # keep in sync with `ma.core.concatenate` + @override # type: ignore[override] + @overload + @staticmethod + def concatenate[ScalarT: np.generic](arrays: _ArrayLike[ScalarT], axis: SupportsIndex | None = 0) -> _MArray[ScalarT]: ... # pyrefly: ignore[bad-override] + @overload + @staticmethod + def concatenate(arrays: SupportsLenAndGetItem[ArrayLike], axis: SupportsIndex | None = 0) -> _MArray[Incomplete]: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # + @override @classmethod - def makemat(cls, arr): ... - def __getitem__(self, key): ... + def makemat(cls, /, arr: ArrayLike) -> _MArray[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleVariableOverride] class mr_class(MAxisConcatenator): - def __init__(self): ... - -mr_: mr_class - -def ndenumerate(a, compressed=...): ... -def flatnotmasked_edges(a): ... -def notmasked_edges(a, axis=...): ... -def flatnotmasked_contiguous(a): ... -def notmasked_contiguous(a, axis=...): ... -def clump_unmasked(a): ... -def clump_masked(a): ... -def vander(x, n=...): ... -def polyfit(x, y, deg, rcond=..., full=..., w=..., cov=...): ... + __slots__ = () + def __init__(self) -> None: ... + +mr_: Final[mr_class] = ... + +# +@overload +def ndenumerate[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT, np.dtype[ScalarT]], + compressed: bool = True, +) -> Iterator[tuple[ShapeT, ScalarT]]: ... +@overload +def ndenumerate[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + compressed: bool = True, +) -> Iterator[tuple[_AnyShape, ScalarT]]: ... +@overload +def ndenumerate( + a: ArrayLike, + compressed: bool = True, +) -> Iterator[tuple[_AnyShape, Incomplete]]: ... + +# +@overload +def flatnotmasked_edges[ScalarT: np.generic](a: _ArrayLike[ScalarT]) -> _Array1D[ScalarT] | None: ... +@overload +def flatnotmasked_edges(a: ArrayLike) -> _Array1D[Incomplete] | None: ... + +# +@overload +def notmasked_edges[ScalarT: np.generic](a: _ArrayLike[ScalarT], axis: None = None) -> _Array1D[ScalarT] | None: ... +@overload +def notmasked_edges(a: ArrayLike, axis: None = None) -> _Array1D[Incomplete] | None: ... +@overload +def notmasked_edges(a: ArrayLike, axis: SupportsIndex) -> Incomplete: ... + +# +def flatnotmasked_contiguous(a: ArrayLike) -> list[slice[int, int, None]]: ... + +# +@overload +def notmasked_contiguous(a: ArrayLike, axis: None = None) -> list[slice[int, int, None]]: ... +@overload +def notmasked_contiguous(a: ArrayLike, axis: SupportsIndex) -> list[Incomplete]: ... + +# +def _ezclump(mask: np.ndarray) -> list[slice[int, int, None]]: ... # undocumented +def clump_unmasked(a: np.ndarray) -> list[slice[int, int, None]]: ... +def clump_masked(a: np.ndarray) -> list[slice[int, int, None]]: ... + +# keep in sync with `lib._twodim_base_impl.vander` +@overload +def vander[ScalarT: np.number | np.object_](x: _ArrayLike[ScalarT], n: int | None = None) -> _Array2D[ScalarT]: ... +@overload +def vander(x: _ArrayLike[np.bool] | list[int], n: int | None = None) -> _Array2D[np.int_]: ... +@overload +def vander(x: list[float], n: int | None = None) -> _Array2D[np.float64]: ... +@overload +def vander(x: list[complex], n: int | None = None) -> _Array2D[np.complex128]: ... +@overload # fallback +def vander(x: Sequence[_NumberLike_co], n: int | None = None) -> _Array2D[Any]: ... + +# keep roughly in sync with `lib._polynomial_impl.polyfit` +@overload # float dtype, cov: False (default) +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int, + rcond: bool | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + cov: L[False] = False +) -> NDArray[np.float64]: ... +@overload # float dtype, cov: True | "unscaled" (keyword) +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int, + rcond: bool | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + *, + cov: L[True, "unscaled"], +) -> _2Tup[NDArray[np.float64]]: ... +@overload # float dtype, full: True (keyword) +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int, + rcond: bool | None = None, + *, + full: L[True], + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, +) -> _5Tup[NDArray[np.float64]]: ... +@overload # complex dtype, cov: False (default) +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int, + rcond: bool | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + cov: L[False] = False +) -> NDArray[Incomplete]: ... +@overload # complex dtype, cov: True | "unscaled" (keyword) +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int, + rcond: bool | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + *, + cov: L[True, "unscaled"], +) -> _2Tup[NDArray[np.complex128 | Any]]: ... +@overload # complex dtype, full: True (keyword) +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int, + rcond: bool | None = None, + *, + full: L[True], + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, +) -> _5Tup[NDArray[np.complex128 | Any]]: ... diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py index 4eb92b6bd7b0..d35bb9b79925 100644 --- a/numpy/ma/mrecords.py +++ b/numpy/ma/mrecords.py @@ -13,19 +13,10 @@ # first place, and then rename the invalid fields with a trailing # underscore. Maybe we could just overload the parser function ? -from numpy.ma import ( - MAError, MaskedArray, masked, nomask, masked_array, getdata, - getmaskarray, filled -) -import numpy.ma as ma import warnings import numpy as np -from numpy import dtype, ndarray, array as narray - -from numpy._core.records import ( - recarray, fromarrays as recfromarrays, fromrecords as recfromrecords -) +import numpy.ma as ma _byteorderconv = np._core.records._byteorderconv @@ -50,7 +41,7 @@ def _checknames(descr, names=None): """ ndescr = len(descr) - default_names = ['f%i' % i for i in range(ndescr)] + default_names = [f'f{i}' for i in range(ndescr)] if names is None: new_names = default_names else: @@ -82,7 +73,7 @@ def _get_fieldmask(self): return fdmask -class MaskedRecords(MaskedArray): +class MaskedRecords(ma.MaskedArray): """ Attributes @@ -103,17 +94,17 @@ class MaskedRecords(MaskedArray): def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None, formats=None, names=None, titles=None, byteorder=None, aligned=False, - mask=nomask, hard_mask=False, fill_value=None, keep_mask=True, + mask=ma.nomask, hard_mask=False, fill_value=None, keep_mask=True, copy=False, **options): - self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset, - strides=strides, formats=formats, names=names, - titles=titles, byteorder=byteorder, - aligned=aligned,) + self = np.recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset, + strides=strides, formats=formats, names=names, + titles=titles, byteorder=byteorder, + aligned=aligned,) mdtype = ma.make_mask_descr(self.dtype) - if mask is nomask or not np.size(mask): + if mask is ma.nomask or not np.size(mask): if not keep_mask: self._mask = tuple([False] * len(mdtype)) else: @@ -125,9 +116,9 @@ def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None, elif nm == nd: mask = np.reshape(mask, self.shape) else: - msg = "Mask and data not compatible: data size is %i, " + \ - "mask size is %i." - raise MAError(msg % (nd, nm)) + msg = (f"Mask and data not compatible: data size is {nd}," + " mask size is {nm}.") + raise ma.MAError(msg) if not keep_mask: self.__setmask__(mask) self._sharedmask = True @@ -144,21 +135,20 @@ def __array_finalize__(self, obj): # Make sure we have a _fieldmask by default _mask = getattr(obj, '_mask', None) if _mask is None: - objmask = getattr(obj, '_mask', nomask) - _dtype = ndarray.__getattribute__(self, 'dtype') - if objmask is nomask: + objmask = getattr(obj, '_mask', ma.nomask) + _dtype = np.ndarray.__getattribute__(self, 'dtype') + if objmask is ma.nomask: _mask = ma.make_mask_none(self.shape, dtype=_dtype) else: mdescr = ma.make_mask_descr(_dtype) - _mask = narray([tuple([m] * len(mdescr)) for m in objmask], - dtype=mdescr).view(recarray) + _mask = np.array([tuple([m] * len(mdescr)) for m in objmask], + dtype=mdescr).view(np.recarray) # Update some of the attributes _dict = self.__dict__ _dict.update(_mask=_mask) self._update_from(obj) - if _dict['_baseclass'] == ndarray: - _dict['_baseclass'] = recarray - return + if _dict['_baseclass'] == np.ndarray: + _dict['_baseclass'] = np.recarray @property def _data(self): @@ -166,7 +156,7 @@ def _data(self): Returns the data as a recarray. """ - return ndarray.view(self, recarray) + return np.ndarray.view(self, np.recarray) @property def _fieldmask(self): @@ -193,15 +183,15 @@ def __getattribute__(self, attr): except AttributeError: # attr must be a fieldname pass - fielddict = ndarray.__getattribute__(self, 'dtype').fields + fielddict = np.ndarray.__getattribute__(self, 'dtype').fields try: res = fielddict[attr][:2] except (TypeError, KeyError) as e: raise AttributeError( f'record array has no attribute {attr}') from e # So far, so good - _localdict = ndarray.__getattribute__(self, '__dict__') - _data = ndarray.view(self, _localdict['_baseclass']) + _localdict = np.ndarray.__getattribute__(self, '__dict__') + _data = np.ndarray.view(self, _localdict['_baseclass']) obj = _data.getfield(*res) if obj.dtype.names is not None: raise NotImplementedError("MaskedRecords is currently limited to" @@ -219,8 +209,8 @@ def __getattribute__(self, attr): tp_len = len(_mask.dtype) hasmasked = _mask.view((bool, ((tp_len,) if tp_len else ()))).any() if (obj.shape or hasmasked): - obj = obj.view(MaskedArray) - obj._baseclass = ndarray + obj = obj.view(ma.MaskedArray) + obj._baseclass = np.ndarray obj._isfield = True obj._mask = _mask # Reset the field values @@ -252,13 +242,13 @@ def __setattr__(self, attr, val): ret = object.__setattr__(self, attr, val) except Exception: # Not a generic attribute: exit if it's not a valid field - fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} - optinfo = ndarray.__getattribute__(self, '_optinfo') or {} + fielddict = np.ndarray.__getattribute__(self, 'dtype').fields or {} + optinfo = np.ndarray.__getattribute__(self, '_optinfo') or {} if not (attr in fielddict or attr in optinfo): raise else: # Get the list of names - fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} + fielddict = np.ndarray.__getattribute__(self, 'dtype').fields or {} # Check the attribute if attr not in fielddict: return ret @@ -276,7 +266,7 @@ def __setattr__(self, attr, val): raise AttributeError( f'record array has no attribute {attr}') from e - if val is masked: + if val is ma.masked: _fill_value = _localdict['_fill_value'] if _fill_value is not None: dval = _localdict['_fill_value'][attr] @@ -284,9 +274,9 @@ def __setattr__(self, attr, val): dval = val mval = True else: - dval = filled(val) - mval = getmaskarray(val) - obj = ndarray.__getattribute__(self, '_data').setfield(dval, *res) + dval = ma.filled(val) + mval = ma.getmaskarray(val) + obj = np.ndarray.__getattribute__(self, '_data').setfield(dval, *res) _localdict['_mask'].__setitem__(attr, mval) return obj @@ -298,15 +288,15 @@ def __getitem__(self, indx): """ _localdict = self.__dict__ - _mask = ndarray.__getattribute__(self, '_mask') - _data = ndarray.view(self, _localdict['_baseclass']) + _mask = np.ndarray.__getattribute__(self, '_mask') + _data = np.ndarray.view(self, _localdict['_baseclass']) # We want a field if isinstance(indx, str): # Make sure _sharedmask is True to propagate back to _fieldmask # Don't use _set_mask, there are some copies being made that # break propagation Don't force the mask to nomask, that wreaks # easy masking - obj = _data[indx].view(MaskedArray) + obj = _data[indx].view(ma.MaskedArray) obj._mask = _mask[indx] obj._sharedmask = True fval = _localdict['_fill_value'] @@ -314,12 +304,12 @@ def __getitem__(self, indx): obj._fill_value = fval[indx] # Force to masked if the mask is True if not obj.ndim and obj._mask: - return masked + return ma.masked return obj # We want some elements. # First, the data. obj = np.asarray(_data[indx]).view(mrecarray) - obj._mask = np.asarray(_mask[indx]).view(recarray) + obj._mask = np.asarray(_mask[indx]).view(np.recarray) return obj def __setitem__(self, indx, value): @@ -327,7 +317,7 @@ def __setitem__(self, indx, value): Sets the given record to value. """ - MaskedArray.__setitem__(self, indx, value) + ma.MaskedArray.__setitem__(self, indx, value) if isinstance(indx, str): self._mask[indx] = ma.getmaskarray(value) @@ -351,7 +341,7 @@ def __repr__(self): """ _names = self.dtype.names - fmt = "%%%is : %%s" % (max([len(n) for n in _names]) + 4,) + fmt = f"%{max(len(n) for n in _names) + 4}s : %s" reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names] reprstr.insert(0, 'masked_records(') reprstr.extend([fmt % (' fill_value', self.fill_value), @@ -366,16 +356,16 @@ def view(self, dtype=None, type=None): # OK, basic copy-paste from MaskedArray.view. if dtype is None: if type is None: - output = ndarray.view(self) + output = np.ndarray.view(self) else: - output = ndarray.view(self, type) + output = np.ndarray.view(self, type) # Here again. elif type is None: try: - if issubclass(dtype, ndarray): - output = ndarray.view(self, dtype) + if issubclass(dtype, np.ndarray): + output = np.ndarray.view(self, dtype) else: - output = ndarray.view(self, dtype) + output = np.ndarray.view(self, dtype) # OK, there's the change except TypeError: dtype = np.dtype(dtype) @@ -387,15 +377,15 @@ def view(self, dtype=None, type=None): output = self.__array__().view(dtype, basetype) output._update_from(self) else: - output = ndarray.view(self, dtype) + output = np.ndarray.view(self, dtype) output._fill_value = None else: - output = ndarray.view(self, dtype, type) + output = np.ndarray.view(self, dtype, type) # Update the mask, just like in MaskedArray.view - if (getattr(output, '_mask', nomask) is not nomask): + if (getattr(output, '_mask', ma.nomask) is not ma.nomask): mdtype = ma.make_mask_descr(output.dtype) - output._mask = self._mask.view(mdtype, ndarray) - output._mask.shape = output.shape + output._mask = self._mask.view(mdtype, np.ndarray) + output._mask = output._mask.reshape(output.shape) return output def harden_mask(self): @@ -432,8 +422,8 @@ def tolist(self, fill_value=None): """ if fill_value is not None: return self.filled(fill_value).tolist() - result = narray(self.filled().tolist(), dtype=object) - mask = narray(self._mask.tolist()) + result = np.array(self.filled().tolist(), dtype=object) + mask = np.array(self._mask.tolist()) result[mask] = None return result.tolist() @@ -468,8 +458,8 @@ def __setstate__(self, state): """ (ver, shp, typ, isf, raw, msk, flv) = state - ndarray.__setstate__(self, (shp, typ, isf, raw)) - mdtype = dtype([(k, np.bool) for (k, _) in self.dtype.descr]) + np.ndarray.__setstate__(self, (shp, typ, isf, raw)) + mdtype = np.dtype([(k, np.bool) for (k, _) in self.dtype.descr]) self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk)) self.fill_value = flv @@ -488,10 +478,11 @@ def _mrreconstruct(subtype, baseclass, baseshape, basetype,): Build a new MaskedArray from the information stored in a pickle. """ - _data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype) - _mask = ndarray.__new__(ndarray, baseshape, 'b1') + _data = np.ndarray.__new__(baseclass, baseshape, basetype).view(subtype) + _mask = np.ndarray.__new__(np.ndarray, baseshape, 'b1') return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) + mrecarray = MaskedRecords @@ -531,12 +522,12 @@ def fromarrays(arraylist, dtype=None, shape=None, formats=None, Lists of tuples should be preferred over lists of lists for faster processing. """ - datalist = [getdata(x) for x in arraylist] - masklist = [np.atleast_1d(getmaskarray(x)) for x in arraylist] - _array = recfromarrays(datalist, - dtype=dtype, shape=shape, formats=formats, - names=names, titles=titles, aligned=aligned, - byteorder=byteorder).view(mrecarray) + datalist = [ma.getdata(x) for x in arraylist] + masklist = [np.atleast_1d(ma.getmaskarray(x)) for x in arraylist] + _array = np.rec.fromarrays(datalist, + dtype=dtype, shape=shape, formats=formats, + names=names, titles=titles, aligned=aligned, + byteorder=byteorder).view(mrecarray) _array._mask.flat = list(zip(*masklist)) if fill_value is not None: _array.fill_value = fill_value @@ -545,7 +536,7 @@ def fromarrays(arraylist, dtype=None, shape=None, formats=None, def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None, titles=None, aligned=False, byteorder=None, - fill_value=None, mask=nomask): + fill_value=None, mask=ma.nomask): """ Creates a MaskedRecords from a list of records. @@ -579,22 +570,22 @@ def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None, # Grab the initial _fieldmask, if needed: _mask = getattr(reclist, '_mask', None) # Get the list of records. - if isinstance(reclist, ndarray): + if isinstance(reclist, np.ndarray): # Make sure we don't have some hidden mask - if isinstance(reclist, MaskedArray): - reclist = reclist.filled().view(ndarray) + if isinstance(reclist, ma.MaskedArray): + reclist = reclist.filled().view(np.ndarray) # Grab the initial dtype, just in case if dtype is None: dtype = reclist.dtype reclist = reclist.tolist() - mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats, + mrec = np.rec.fromrecords(reclist, dtype=dtype, shape=shape, formats=formats, names=names, titles=titles, aligned=aligned, byteorder=byteorder).view(mrecarray) # Set the fill_value if needed if fill_value is not None: mrec.fill_value = fill_value # Now, let's deal w/ the mask - if mask is not nomask: + if mask is not ma.nomask: mask = np.asarray(mask) maskrecordlength = len(mask.dtype) if maskrecordlength: @@ -666,8 +657,7 @@ def openfile(fname): def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='', - varnames=None, vartypes=None, - *, delimitor=np._NoValue): # backwards compatibility + varnames=None, vartypes=None): """ Creates a mrecarray from data stored in the file `filename`. @@ -691,16 +681,6 @@ def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='', Ultra simple: the varnames are in the header, one line""" - if delimitor is not np._NoValue: - if delimiter is not None: - raise TypeError("fromtextfile() got multiple values for argument " - "'delimiter'") - # NumPy 1.22.0, 2021-09-23 - warnings.warn("The 'delimitor' keyword argument of " - "numpy.ma.mrecords.fromtextfile() is deprecated " - "since NumPy 1.22.0, use 'delimiter' instead.", - DeprecationWarning, stacklevel=2) - delimiter = delimitor # Try to open the file. ftext = openfile(fname) @@ -716,8 +696,8 @@ def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='', varnames = _varnames # Get the data. - _variables = masked_array([line.strip().split(delimiter) for line in ftext - if line[0] != commentchar and len(line) > 1]) + _variables = ma.masked_array([line.strip().split(delimiter) for line in ftext + if line[0] != commentchar and len(line) > 1]) (_, nfields) = _variables.shape ftext.close() @@ -727,19 +707,19 @@ def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='', else: vartypes = [np.dtype(v) for v in vartypes] if len(vartypes) != nfields: - msg = "Attempting to %i dtypes for %i fields!" + msg = f"Attempting to {len(vartypes)} dtypes for {nfields} fields!" msg += " Reverting to default." - warnings.warn(msg % (len(vartypes), nfields), stacklevel=2) + warnings.warn(msg, stacklevel=2) vartypes = _guessvartypes(_variables[0]) # Construct the descriptor. - mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)] + mdescr = list(zip(varnames, vartypes)) mfillv = [ma.default_fill_value(f) for f in vartypes] # Get the data and the mask. # We just need a list of masked_arrays. It's easier to create it like that: _mask = (_variables.T == missingchar) - _datalist = [masked_array(a, mask=m, dtype=t, fill_value=f) + _datalist = [ma.masked_array(a, mask=m, dtype=t, fill_value=f) for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)] return fromarrays(_datalist, dtype=mdescr) @@ -756,12 +736,12 @@ def addfield(mrecord, newfield, newfieldname=None): _data = mrecord._data _mask = mrecord._mask if newfieldname is None or newfieldname in reserved_fields: - newfieldname = 'f%i' % len(_data.dtype) + newfieldname = f'f{len(_data.dtype)}' newfield = ma.array(newfield) # Get the new data. # Create a new empty recarray newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)]) - newdata = recarray(_data.shape, newdtype) + newdata = np.recarray(_data.shape, newdtype) # Add the existing field [newdata.setfield(_data.getfield(*f), *f) for f in _data.dtype.fields.values()] @@ -771,12 +751,12 @@ def addfield(mrecord, newfield, newfieldname=None): # Get the new mask # Create a new empty recarray newmdtype = np.dtype([(n, np.bool) for n in newdtype.names]) - newmask = recarray(_data.shape, newmdtype) + newmask = np.recarray(_data.shape, newmdtype) # Add the old masks [newmask.setfield(_mask.getfield(*f), *f) for f in _mask.dtype.fields.values()] # Add the mask of the new field - newmask.setfield(getmaskarray(newfield), + newmask.setfield(ma.getmaskarray(newfield), *newmask.dtype.fields[newfieldname]) newdata._mask = newmask return newdata diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi index 264807e05d57..f6b5d6424044 100644 --- a/numpy/ma/mrecords.pyi +++ b/numpy/ma/mrecords.pyi @@ -1,90 +1,309 @@ -from typing import Any, TypeVar +from _typeshed import Incomplete, StrPath, SupportsReadline +from collections.abc import Buffer, Sequence +from typing import IO, Any, Generic, Self, SupportsIndex, overload, override +from typing_extensions import TypeVar -from numpy import dtype -from numpy.ma import MaskedArray +import numpy as np +from numpy import _ByteOrder, _ToIndices +from numpy._typing import ( + ArrayLike, + DTypeLike, + _AnyShape, + _ArrayLikeBool_co, + _DTypeLike, + _HasDType, + _ScalarLike_co, + _Shape, + _ShapeLike, + _VoidDTypeLike, +) -__all__: list[str] +from .core import MaskedArray -# TODO: Set the `bound` to something more suitable once we -# have proper shape support -_ShapeType = TypeVar("_ShapeType", bound=Any) -_DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True) +__all__ = ["MaskedRecords", "mrecarray", "fromarrays", "fromrecords", "fromtextfile", "addfield"] -class MaskedRecords(MaskedArray[_ShapeType, _DType_co]): - def __new__( - cls, - shape, - dtype=..., - buf=..., - offset=..., - strides=..., - formats=..., - names=..., - titles=..., - byteorder=..., - aligned=..., - mask=..., - hard_mask=..., - fill_value=..., - keep_mask=..., - copy=..., - **options, - ): ... +### + +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) + +type _Ignored = object +type _Names = str | Sequence[str] + +### +# mypy: disable-error-code=no-untyped-def + +class MaskedRecords(MaskedArray[_ShapeT_co, _DTypeT_co], Generic[_ShapeT_co, _DTypeT_co]): _mask: Any _fill_value: Any + + def __new__( + cls, + shape: _ShapeLike, + dtype: DTypeLike | None = None, + buf: Buffer | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + byteorder: _ByteOrder | None = None, + aligned: bool = False, + mask: _ArrayLikeBool_co = ..., + hard_mask: bool = False, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + copy: bool = False, + **options: _Ignored, + ) -> Self: ... + + # @property - def _data(self): ... + @override + def _data(self, /) -> np.recarray[_ShapeT_co, _DTypeT_co]: ... @property - def _fieldmask(self): ... - def __array_finalize__(self, obj): ... - def __len__(self): ... - def __getattribute__(self, attr): ... - def __setattr__(self, attr, val): ... - def __getitem__(self, indx): ... - def __setitem__(self, indx, value): ... - def view(self, dtype=..., type=...): ... - def harden_mask(self): ... - def soften_mask(self): ... - def copy(self): ... - def tolist(self, fill_value=...): ... - def __reduce__(self): ... + def _fieldmask(self, /) -> np.ndarray[_ShapeT_co, np.dtype[np.bool]] | np.bool: ... + + # + @override + def __array_finalize__(self, obj: np.ndarray) -> None: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + @override + def __getitem__(self, indx: str | _ToIndices, /) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + @override + def __setitem__(self, indx: str | _ToIndices, value: Incomplete, /) -> None: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + + # unlike `MaskedArray`, these two methods don't return `Self` + @override + def harden_mask(self) -> None: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + @override + def soften_mask(self) -> None: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `MaskedArray.view`, but without the `fill_value` + @override # type: ignore[override] + @overload # () + def view(self, /, dtype: None = None, type: None = None) -> Self: ... # pyrefly: ignore[bad-override] + @overload # (dtype: DTypeT) + def view[DTypeT: np.dtype]( + self, /, dtype: DTypeT | _HasDType[DTypeT], type: None = None + ) -> MaskedRecords[_ShapeT_co, DTypeT]: ... + @overload # (dtype: dtype[ScalarT]) + def view[ScalarT: np.generic]( + self, /, dtype: _DTypeLike[ScalarT], type: None = None + ) -> MaskedRecords[_ShapeT_co, np.dtype[ScalarT]]: ... + @overload # ([dtype: _, ]*, type: ArrayT) + def view[ArrayT: np.ndarray](self, /, dtype: DTypeLike | None = None, *, type: type[ArrayT]) -> ArrayT: ... + @overload # (dtype: _, type: ArrayT) + def view[ArrayT: np.ndarray](self, /, dtype: DTypeLike | None, type: type[ArrayT]) -> ArrayT: ... + @overload # (dtype: ArrayT, /) + def view[ArrayT: np.ndarray](self, /, dtype: type[ArrayT], type: None = None) -> ArrayT: ... + @overload # (dtype: ) + def view(self, /, dtype: _VoidDTypeLike | str | None, type: None = None) -> MaskedRecords[_ShapeT_co, np.dtype]: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # unlike `MaskedArray` and `ndarray`, this `copy` method has no `order` parameter + @override + def copy(self, /) -> Self: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] mrecarray = MaskedRecords +@overload # known dtype, known shape +def fromarrays[DTypeT: np.dtype, ShapeT: _Shape]( + arraylist: Sequence[ArrayLike], + dtype: DTypeT | _HasDType[DTypeT], + shape: ShapeT, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, +) -> MaskedRecords[ShapeT, DTypeT]: ... +@overload # known dtype, unknown shape +def fromarrays[DTypeT: np.dtype]( + arraylist: Sequence[ArrayLike], + dtype: DTypeT | _HasDType[DTypeT], + shape: _ShapeLike | None = None, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, +) -> MaskedRecords[_AnyShape, DTypeT]: ... +@overload # known scalar-type, known shape +def fromarrays[ScalarT: np.generic, ShapeT: _Shape]( + arraylist: Sequence[ArrayLike], + dtype: _DTypeLike[ScalarT], + shape: ShapeT, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, +) -> MaskedRecords[ShapeT, np.dtype[ScalarT]]: ... +@overload # known scalar-type, unknown shape +def fromarrays[ScalarT: np.generic]( + arraylist: Sequence[ArrayLike], + dtype: _DTypeLike[ScalarT], + shape: _ShapeLike | None = None, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, +) -> MaskedRecords[_AnyShape, np.dtype[ScalarT]]: ... +@overload # unknown dtype, known shape (positional) +def fromarrays[ShapeT: _Shape]( + arraylist: Sequence[ArrayLike], + dtype: DTypeLike | None, + shape: ShapeT, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, +) -> MaskedRecords[ShapeT]: ... +@overload # unknown dtype, known shape (keyword) +def fromarrays[ShapeT: _Shape]( + arraylist: Sequence[ArrayLike], + dtype: DTypeLike | None = None, + *, + shape: ShapeT, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, +) -> MaskedRecords[ShapeT]: ... +@overload # unknown dtype, unknown shape def fromarrays( - arraylist, - dtype=..., - shape=..., - formats=..., - names=..., - titles=..., - aligned=..., - byteorder=..., - fill_value=..., -): ... + arraylist: Sequence[ArrayLike], + dtype: DTypeLike | None = None, + shape: _ShapeLike | None = None, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, +) -> MaskedRecords: ... +# +@overload # known dtype, known shape +def fromrecords[DTypeT: np.dtype, ShapeT: _Shape]( + reclist: ArrayLike, + dtype: DTypeT, + shape: ShapeT, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, + mask: _ArrayLikeBool_co = ..., +) -> MaskedRecords[ShapeT, DTypeT]: ... +@overload # known dtype, unknown shape +def fromrecords[DTypeT: np.dtype]( + reclist: ArrayLike, + dtype: DTypeT, + shape: _ShapeLike | None = None, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, + mask: _ArrayLikeBool_co = ..., +) -> MaskedRecords[_AnyShape, DTypeT]: ... +@overload # known scalar-type, known shape +def fromrecords[ScalarT: np.generic, ShapeT: _Shape]( + reclist: ArrayLike, + dtype: _DTypeLike[ScalarT], + shape: ShapeT, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, + mask: _ArrayLikeBool_co = ..., +) -> MaskedRecords[ShapeT, np.dtype[ScalarT]]: ... +@overload # known scalar-type, unknown shape +def fromrecords[ScalarT: np.generic]( + reclist: ArrayLike, + dtype: _DTypeLike[ScalarT], + shape: _ShapeLike | None = None, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, + mask: _ArrayLikeBool_co = ..., +) -> MaskedRecords[_AnyShape, np.dtype[ScalarT]]: ... +@overload # unknown dtype, known shape (positional) +def fromrecords[ShapeT: _Shape]( + reclist: ArrayLike, + dtype: DTypeLike | None, + shape: ShapeT, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, + mask: _ArrayLikeBool_co = ..., +) -> MaskedRecords[ShapeT, np.dtype[Incomplete]]: ... +@overload # unknown dtype, known shape (keyword) +def fromrecords[ShapeT: _Shape]( + reclist: ArrayLike, + dtype: DTypeLike | None = None, + *, + shape: ShapeT, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, + mask: _ArrayLikeBool_co = ..., +) -> MaskedRecords[ShapeT, np.dtype[Incomplete]]: ... +@overload # unknown dtype, unknown shape def fromrecords( - reclist, - dtype=..., - shape=..., - formats=..., - names=..., - titles=..., - aligned=..., - byteorder=..., - fill_value=..., - mask=..., -): ... + reclist: ArrayLike, + dtype: DTypeLike | None = None, + shape: _ShapeLike | None = None, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, + mask: _ArrayLikeBool_co = ..., +) -> MaskedRecords[_AnyShape, np.dtype[Incomplete]]: ... +# undocumented +@overload +def openfile(fname: StrPath) -> IO[str]: ... +@overload +def openfile[FileT: SupportsReadline[str]](fname: FileT) -> FileT: ... + +# def fromtextfile( - fname, - delimiter=..., - commentchar=..., - missingchar=..., - varnames=..., - vartypes=..., - # NOTE: deprecated: NumPy 1.22.0, 2021-09-23 - # delimitor=..., -): ... - -def addfield(mrecord, newfield, newfieldname=...): ... + fname: StrPath | SupportsReadline[str], + delimiter: str | None = None, + commentchar: str = "#", + missingchar: str = "", + varnames: Sequence[str] | None = None, + vartypes: Sequence[DTypeLike] | None = None, +) -> MaskedRecords[tuple[int], np.dtype[np.void]]: ... + +# +def addfield[ShapeT: _Shape]( + mrecord: MaskedRecords[ShapeT], + newfield: ArrayLike, + newfieldname: str | None = None, +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 58d787226e84..2dc7fe5a9b17 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -1,4 +1,3 @@ -# pylint: disable-msg=W0400,W0511,W0611,W0612,W0614,R0201,E1102 """Tests suite for MaskedArray & subclassing. :author: Pierre Gerard-Marchant @@ -6,71 +5,162 @@ """ __author__ = "Pierre GF Gerard-Marchant" -import sys -import warnings import copy -import operator +import datetime as dt +import inspect import itertools -import textwrap +import operator import pickle +import sys +import textwrap +import warnings from functools import reduce import pytest import numpy as np -import numpy.ma.core import numpy._core.fromnumeric as fromnumeric import numpy._core.umath as umath -from numpy.exceptions import AxisError -from numpy.testing import ( - assert_raises, assert_warns, suppress_warnings, IS_WASM - ) -from numpy.testing._private.utils import requires_memory +import numpy.ma.core from numpy import ndarray from numpy._utils import asbytes -from numpy.ma.testutils import ( - assert_, assert_array_equal, assert_equal, assert_almost_equal, - assert_equal_records, fail_if_equal, assert_not_equal, - assert_mask_equal - ) +from numpy.exceptions import AxisError from numpy.ma.core import ( - MAError, MaskError, MaskType, MaskedArray, abs, absolute, add, all, - allclose, allequal, alltrue, angle, anom, arange, arccos, arccosh, arctan2, - arcsin, arctan, argsort, array, asarray, choose, concatenate, - conjugate, cos, cosh, count, default_fill_value, diag, divide, doc_note, - empty, empty_like, equal, exp, flatten_mask, filled, fix_invalid, - flatten_structured_array, fromflex, getmask, getmaskarray, greater, - greater_equal, identity, inner, isMaskedArray, less, less_equal, log, - log10, make_mask, make_mask_descr, mask_or, masked, masked_array, - masked_equal, masked_greater, masked_greater_equal, masked_inside, - masked_less, masked_less_equal, masked_not_equal, masked_outside, - masked_print_option, masked_values, masked_where, max, maximum, - maximum_fill_value, min, minimum, minimum_fill_value, mod, multiply, - mvoid, nomask, not_equal, ones, ones_like, outer, power, product, put, - putmask, ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, - sqrt, subtract, sum, take, tan, tanh, transpose, where, zeros, zeros_like, - ) + MAError, + MaskedArray, + MaskError, + MaskType, + abs, + absolute, + add, + all, + allclose, + allequal, + alltrue, + angle, + anom, + arange, + arccos, + arccosh, + arcsin, + arctan, + arctan2, + argsort, + array, + asarray, + choose, + concatenate, + conjugate, + cos, + cosh, + count, + default_fill_value, + diag, + divide, + empty, + empty_like, + equal, + exp, + filled, + fix_invalid, + flatten_mask, + flatten_structured_array, + fromflex, + getmask, + getmaskarray, + greater, + greater_equal, + identity, + inner, + isMaskedArray, + less, + less_equal, + log, + log10, + make_mask, + make_mask_descr, + mask_or, + masked, + masked_array, + masked_equal, + masked_greater, + masked_greater_equal, + masked_inside, + masked_less, + masked_less_equal, + masked_not_equal, + masked_outside, + masked_print_option, + masked_values, + masked_where, + max, + maximum, + maximum_fill_value, + min, + minimum, + minimum_fill_value, + mod, + multiply, + mvoid, + nomask, + not_equal, + ones, + ones_like, + outer, + power, + product, + put, + putmask, + ravel, + repeat, + reshape, + resize, + shape, + sin, + sinh, + sometrue, + sort, + sqrt, + subtract, + sum, + take, + tan, + tanh, + transpose, + where, + zeros, + zeros_like, +) +from numpy.ma.testutils import ( + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_equal_records, + assert_mask_equal, + assert_not_equal, + fail_if_equal, +) +from numpy.testing import IS_WASM, assert_raises, temppath +from numpy.testing._private.utils import requires_memory pi = np.pi -suppress_copy_mask_on_assignment = suppress_warnings() -suppress_copy_mask_on_assignment.filter( - numpy.ma.core.MaskedArrayFutureWarning, - "setting an item on a masked array which has a shared mask will not copy") - - # For parametrized numeric testing num_dts = [np.dtype(dt_) for dt_ in '?bhilqBHILQefdgFD'] num_ids = [dt_.char for dt_ in num_dts] - +WARNING_MESSAGE = ("setting an item on a masked array which has a shared " + "mask will not copy") +WARNING_MARK_SPEC = f"ignore:.*{WARNING_MESSAGE}:numpy.ma.core.MaskedArrayFutureWarning" class TestMaskedArray: # Base test class for MaskedArrays. - def setup_method(self): + # message for warning filters + def _create_data(self): # Base data definition. - x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) a10 = 10. m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] @@ -81,7 +171,7 @@ def setup_method(self): zm = masked_array(z, mask=[0, 1, 0, 0]) xf = np.where(m1, 1e+20, x) xm.set_fill_value(1e+20) - self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) + return x, y, a10, m1, m2, xm, ym, z, zm, xf def test_basicattributes(self): # Tests some basic array attributes. @@ -107,7 +197,7 @@ def test_basic0d(self): def test_basic1d(self): # Test of basic array creation and properties in 1 dimension. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, _, _, m1, _, xm, ym, z, zm, xf = self._create_data() assert_(not isMaskedArray(x)) assert_(isMaskedArray(xm)) assert_((xm - ym).filled(0).any()) @@ -117,35 +207,35 @@ def test_basic1d(self): assert_equal(xm.shape, s) assert_equal(xm.dtype, x.dtype) assert_equal(zm.dtype, z.dtype) - assert_equal(xm.size, reduce(lambda x, y:x * y, s)) - assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) + assert_equal(xm.size, reduce(lambda x, y: x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y: x + y, m1)) assert_array_equal(xm, xf) assert_array_equal(filled(xm, 1.e20), xf) assert_array_equal(x, xm) def test_basic2d(self): # Test of basic array creation and properties in 2 dimensions. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, m1, _, xm, ym, _, _, xf = self._create_data() for s in [(4, 3), (6, 2)]: - x.shape = s - y.shape = s - xm.shape = s - ym.shape = s - xf.shape = s + x = x.reshape(s) + y = y.reshape(s) + xm = xm.reshape(s) + ym = ym.reshape(s) + xf = xf.reshape(s) assert_(not isMaskedArray(x)) assert_(isMaskedArray(xm)) assert_equal(shape(xm), s) assert_equal(xm.shape, s) - assert_equal(xm.size, reduce(lambda x, y:x * y, s)) - assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) + assert_equal(xm.size, reduce(lambda x, y: x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y: x + y, m1)) assert_equal(xm, xf) assert_equal(filled(xm, 1.e20), xf) assert_equal(x, xm) def test_concatenate_basic(self): # Tests concatenations. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, _, _, xm, ym, _, _, _ = self._create_data() # basic concatenation assert_equal(np.concatenate((x, y)), concatenate((xm, ym))) assert_equal(np.concatenate((x, y)), concatenate((x, y))) @@ -154,10 +244,15 @@ def test_concatenate_basic(self): def test_concatenate_alongaxis(self): # Tests concatenations. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, m1, m2, xm, ym, z, _, xf = self._create_data() # Concatenation along an axis s = (3, 4) - x.shape = y.shape = xm.shape = ym.shape = s + x = x.reshape(s) + y = y.reshape(s) + xm = xm.reshape(s) + ym = ym.reshape(s) + xf = xf.reshape(s) + assert_equal(xm.mask, np.reshape(m1, s)) assert_equal(ym.mask, np.reshape(m2, s)) xmym = concatenate((xm, ym), 1) @@ -251,7 +346,7 @@ def test_creation_with_list_of_maskedarrays_no_bool_cast(self): # The above only failed due a long chain of oddity, try also with # an object array that cannot be converted to bool always: - class NotBool(): + class NotBool: def __bool__(self): raise ValueError("not a bool!") masked_obj = np.ma.masked_array([NotBool(), 'b'], mask=[True, False]) @@ -263,9 +358,9 @@ def __bool__(self): assert_array_equal(res.mask, [[True, False], [False, False]]) def test_creation_from_ndarray_with_padding(self): - x = np.array([('A', 0)], dtype={'names':['f0','f1'], - 'formats':['S4','i8'], - 'offsets':[0,8]}) + x = np.array([('A', 0)], dtype={'names': ['f0', 'f1'], + 'formats': ['S4', 'i8'], + 'offsets': [0, 8]}) array(x) # used to fail due to 'V' padding field in x.dtype.descr def test_unknown_keyword_parameter(self): @@ -273,7 +368,7 @@ def test_unknown_keyword_parameter(self): MaskedArray([1, 2, 3], maks=[0, 1, 0]) # `mask` is misspelled. def test_asarray(self): - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + xm = self._create_data()[5] xm.fill_value = -9999 xm._hardmask = True xmm = asarray(xm) @@ -395,7 +490,7 @@ def test_setitem_no_warning(self): x[...] = value x[[0, 1, 2]] = value - @suppress_copy_mask_on_assignment + @pytest.mark.filterwarnings(WARNING_MARK_SPEC) def test_copy(self): # Tests of some subtle points of copying and sizing. n = [0, 0, 1, 0, 0] @@ -414,9 +509,9 @@ def test_copy(self): y1a = array(y1) # Default for masked array is not to copy; see gh-10318. assert_(y1a._data.__array_interface__ == - y1._data.__array_interface__) + y1._data.__array_interface__) assert_(y1a._mask.__array_interface__ == - y1._mask.__array_interface__) + y1._mask.__array_interface__) y2 = array(x1, mask=m3) assert_(y2._data.__array_interface__ == x1.__array_interface__) @@ -476,8 +571,8 @@ def test_copy_0d(self): def test_copy_on_python_builtins(self): # Tests copy works on python builtins (issue#8019) - assert_(isMaskedArray(np.ma.copy([1,2,3]))) - assert_(isMaskedArray(np.ma.copy((1,2,3)))) + assert_(isMaskedArray(np.ma.copy([1, 2, 3]))) + assert_(isMaskedArray(np.ma.copy((1, 2, 3)))) def test_copy_immutable(self): # Tests that the copy method is immutable, GitHub issue #5247 @@ -515,7 +610,7 @@ def test_format(self): # assert_equal(format(masked, " <5"), "-- ") # Expect a FutureWarning for using format_spec with MaskedElement - with assert_warns(FutureWarning): + with pytest.warns(FutureWarning): with_format_string = format(masked, " >5") assert_equal(with_format_string, "--") @@ -554,7 +649,7 @@ def test_str_repr(self): # 2d arrays cause wrapping a = array([[1, 2, 3], [4, 5, 6]], dtype=np.int8) - a[1,1] = np.ma.masked + a[1, 1] = np.ma.masked assert_equal( repr(a), textwrap.dedent(f'''\ @@ -673,8 +768,7 @@ def test_pickling_wstructured(self): def test_pickling_keepalignment(self): # Tests pickling w/ F_CONTIGUOUS arrays - a = arange(10) - a.shape = (-1, 2) + a = arange(10).reshape( (-1, 2)) b = a.T for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): test = pickle.loads(pickle.dumps(b, protocol=proto)) @@ -696,8 +790,9 @@ def test_topython(self): assert_equal(1.0, float(array([[1]]))) assert_raises(TypeError, float, array([1, 1])) - with suppress_warnings() as sup: - sup.filter(UserWarning, 'Warning: converting a masked element') + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', 'Warning: converting a masked element', UserWarning) assert_(np.isnan(float(array([1], mask=[1])))) a = array([1, 2, 3], mask=[1, 0, 0]) @@ -706,7 +801,7 @@ def test_topython(self): assert_(np.isnan(float(a[0]))) assert_raises(TypeError, int, a) assert_equal(int(a[-1]), 3) - assert_raises(MAError, lambda:int(a[0])) + assert_raises(MAError, lambda: int(a[0])) def test_oddfeatures_1(self): # Test of other odd features @@ -750,14 +845,17 @@ def test_oddfeatures_2(self): assert_(z[1] is not masked) assert_(z[2] is masked) - @suppress_copy_mask_on_assignment def test_oddfeatures_3(self): - # Tests some generic features - atest = array([10], mask=True) - btest = array([20]) - idx = atest.mask - atest[idx] = btest[idx] - assert_equal(atest, [20]) + msg = "setting an item on a masked array which has a shared mask will not copy" + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', msg, numpy.ma.core.MaskedArrayFutureWarning) + # Tests some generic features + atest = array([10], mask=True) + btest = array([20]) + idx = atest.mask + atest[idx] = btest[idx] + assert_equal(atest, [20]) def test_filled_with_object_dtype(self): a = np.ma.masked_all(1, dtype='O') @@ -804,7 +902,7 @@ def test_filled_with_nested_dtype(self): assert_equal(test, control) # test if mask gets set correctly (see #6760) - Z = numpy.ma.zeros(2, numpy.dtype([("A", "(2,2)i1,(2,2)i1", (2,2))])) + Z = numpy.ma.zeros(2, numpy.dtype([("A", "(2,2)i1,(2,2)i1", (2, 2))])) assert_equal(Z.data.dtype, numpy.dtype([('A', [('f0', 'i1', (2, 2)), ('f1', 'i1', (2, 2))], (2, 2))])) assert_equal(Z.mask.dtype, numpy.dtype([('A', [('f0', '?', (2, 2)), @@ -828,7 +926,7 @@ def test_optinfo_propagation(self): assert_equal(x._optinfo['info'], '???') def test_optinfo_forward_propagation(self): - a = array([1,2,2,4]) + a = array([1, 2, 2, 4]) a._optinfo["key"] = "value" assert_equal(a._optinfo["key"], (a == 2)._optinfo["key"]) assert_equal(a._optinfo["key"], (a != 2)._optinfo["key"]) @@ -840,7 +938,7 @@ def test_optinfo_forward_propagation(self): assert_equal(a._optinfo["key"], (a * 2)._optinfo["key"]) assert_equal(a._optinfo["key"], (a / 2)._optinfo["key"]) assert_equal(a._optinfo["key"], a[:2]._optinfo["key"]) - assert_equal(a._optinfo["key"], a[[0,0,2]]._optinfo["key"]) + assert_equal(a._optinfo["key"], a[[0, 0, 2]]._optinfo["key"]) assert_equal(a._optinfo["key"], np.exp(a)._optinfo["key"]) assert_equal(a._optinfo["key"], np.abs(a)._optinfo["key"]) assert_equal(a._optinfo["key"], array(a, copy=True)._optinfo["key"]) @@ -856,13 +954,13 @@ def test_fancy_printoptions(self): assert_equal(str(test), control) # Test 0-d array with multi-dimensional dtype - t_2d0 = masked_array(data = (0, [[0.0, 0.0, 0.0], - [0.0, 0.0, 0.0]], - 0.0), - mask = (False, [[True, False, True], - [False, False, True]], - False), - dtype = "int, (2,3)float, float") + t_2d0 = masked_array(data=(0, [[0.0, 0.0, 0.0], + [0.0, 0.0, 0.0]], + 0.0), + mask=(False, [[True, False, True], + [False, False, True]], + False), + dtype="int, (2,3)float, float") control = "(0, [[--, 0.0, --], [0.0, 0.0, --]], 0.0)" assert_equal(str(t_2d0), control) @@ -900,6 +998,13 @@ def test_flatten_structured_array(self): control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=float) assert_equal(test, control) assert_equal(test.dtype, control.dtype) + # for strings + ndtype = [('a', 'U5'), ('b', [('c', 'U5')])] + arr = np.array([('NumPy', ('array',)), ('array', ('numpy',))], dtype=ndtype) + test = flatten_structured_array(arr) + control = np.array([['NumPy', 'array'], ['array', 'numpy']], dtype='U5') + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) def test_void0d(self): # Test creating a mvoid object @@ -933,7 +1038,7 @@ def test_mvoid_getitem(self): assert_equal(f[1], 4) # exotic dtype - A = masked_array(data=[([0,1],)], + A = masked_array(data=[([0, 1],)], mask=[([True, False],)], dtype=[("A", ">i2", (2,))]) assert_equal(A[0]["A"], A["A"][0]) @@ -950,6 +1055,7 @@ def test_mvoid_iter(self): # w/ mask assert_equal(list(a[1]), [masked, 4]) + @pytest.mark.thread_unsafe(reason="masked_print_option.set_display global state") def test_mvoid_print(self): # Test printing a mvoid mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)]) @@ -967,39 +1073,40 @@ def test_mvoid_print(self): mx = array([(1,), (2,)], dtype=[('a', 'O')]) assert_equal(str(mx[0]), "(1,)") + @pytest.mark.thread_unsafe(reason="masked_print_option global state") def test_mvoid_multidim_print(self): # regression test for gh-6019 - t_ma = masked_array(data = [([1, 2, 3],)], - mask = [([False, True, False],)], - fill_value = ([999999, 999999, 999999],), - dtype = [('a', ' 1: assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1)) assert_equal(np.add.reduce(x, 1), add.reduce(x, 1)) @@ -1467,7 +1625,7 @@ def test_noshink_on_creation(self): def test_mod(self): # Tests mod - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, _, _, xm, ym, _, _, _ = self._create_data() assert_equal(mod(x, y), mod(xm, ym)) test = mod(ym, xm) assert_equal(test, np.mod(ym, xm)) @@ -1665,7 +1823,7 @@ def test_eq_ne_structured_extra(self): el_by_el = [m1[name] != m2[name] for name in dt.names] assert_equal(array(el_by_el, dtype=bool).any(), ne_expected) - @pytest.mark.parametrize('dt', ['S', 'U']) + @pytest.mark.parametrize('dt', ['S', 'U', 'T']) @pytest.mark.parametrize('fill', [None, 'A']) def test_eq_for_strings(self, dt, fill): # Test the equality of structured arrays @@ -1697,7 +1855,7 @@ def test_eq_for_strings(self, dt, fill): assert_equal(test.mask, [True, False]) assert_(test.fill_value == True) - @pytest.mark.parametrize('dt', ['S', 'U']) + @pytest.mark.parametrize('dt', ['S', 'U', 'T']) @pytest.mark.parametrize('fill', [None, 'A']) def test_ne_for_strings(self, dt, fill): # Test the equality of structured arrays @@ -1847,17 +2005,26 @@ def test_comparisons_for_numeric(self, op, dt1, dt2, fill): assert_equal(test.mask, [True, False]) assert_(test.fill_value == True) + @pytest.mark.parametrize('dt', ['S', 'U', 'T']) @pytest.mark.parametrize('op', [operator.le, operator.lt, operator.ge, operator.gt]) @pytest.mark.parametrize('fill', [None, "N/A"]) - def test_comparisons_strings(self, op, fill): + def test_comparisons_strings(self, dt, op, fill): # See gh-21770, mask propagation is broken for strings (and some other # cases) so we explicitly test strings here. # In principle only == and != may need special handling... - ma1 = masked_array(["a", "b", "cde"], mask=[0, 1, 0], fill_value=fill) - ma2 = masked_array(["cde", "b", "a"], mask=[0, 1, 0], fill_value=fill) + ma1 = masked_array(["a", "b", "cde"], mask=[0, 1, 0], fill_value=fill, dtype=dt) + ma2 = masked_array(["cde", "b", "a"], mask=[0, 1, 0], fill_value=fill, dtype=dt) assert_equal(op(ma1, ma2)._data, op(ma1._data, ma2._data)) + if isinstance(fill, str): + fill = np.array(fill, dtype=dt) + + ma1 = masked_array(["a", "b", "cde"], mask=[0, 1, 0], fill_value=fill, dtype=dt) + ma2 = masked_array(["cde", "b", "a"], mask=[0, 1, 0], fill_value=fill, dtype=dt) + assert_equal(op(ma1, ma2)._data, op(ma1._data, ma2._data)) + + @pytest.mark.filterwarnings("ignore:.*Comparison to `None`.*:FutureWarning") def test_eq_with_None(self): # Really, comparisons with None should not be done, but check them # anyway. Note that pep8 will flag these tests. @@ -1865,23 +2032,21 @@ def test_eq_with_None(self): # test will fail (and have to be changed accordingly). # With partial mask - with suppress_warnings() as sup: - sup.filter(FutureWarning, "Comparison to `None`") - a = array([None, 1], mask=[0, 1]) - assert_equal(a == None, array([True, False], mask=[0, 1])) - assert_equal(a.data == None, [True, False]) - assert_equal(a != None, array([False, True], mask=[0, 1])) - # With nomask - a = array([None, 1], mask=False) - assert_equal(a == None, [True, False]) - assert_equal(a != None, [False, True]) - # With complete mask - a = array([None, 2], mask=True) - assert_equal(a == None, array([False, True], mask=True)) - assert_equal(a != None, array([True, False], mask=True)) - # Fully masked, even comparison to None should return "masked" - a = masked - assert_equal(a == None, masked) + a = array([None, 1], mask=[0, 1]) + assert_equal(a == None, array([True, False], mask=[0, 1])) # noqa: E711 + assert_equal(a.data == None, [True, False]) # noqa: E711 + assert_equal(a != None, array([False, True], mask=[0, 1])) # noqa: E711 + # With nomask + a = array([None, 1], mask=False) + assert_equal(a == None, [True, False]) # noqa: E711 + assert_equal(a != None, [False, True]) # noqa: E711 + # With complete mask + a = array([None, 2], mask=True) + assert_equal(a == None, array([False, True], mask=True)) # noqa: E711 + assert_equal(a != None, array([True, False], mask=True)) # noqa: E711 + # Fully masked, even comparison to None should return "masked" + a = masked + assert_equal(a == None, masked) # noqa: E711 def test_eq_with_scalar(self): a = array(1) @@ -2117,6 +2282,32 @@ def test_check_on_scalar(self): assert_raises(TypeError, _check_fill_value, 1e+20, int) assert_raises(TypeError, _check_fill_value, 'stuff', int) + def test_fill_value_datetime_structured(self): + # gh-29818 + rec = np.array([(dt.date(2025, 4, 1),)], dtype=[('foo', '= len(a))) # No mask test = take(a, mindices, mode='clip') @@ -3735,7 +3943,7 @@ def test_tolist(self): assert_(xlist[1] is None) assert_(xlist[-2] is None) # ... on 2D - x.shape = (3, 4) + x = x.reshape((3, 4)) xlist = x.tolist() ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]] assert_equal(xlist[0], [0, None, 2, 3]) @@ -3789,9 +3997,9 @@ def test_toflex(self): assert_equal(record['_mask'], data._mask) ndtype = [('i', int), ('s', '|S3'), ('f', float)] - data = array([(i, s, f) for (i, s, f) in zip(np.arange(10), - 'ABCDEFGHIJKLM', - np.random.rand(10))], + data = array(list(zip(np.arange(10), + 'ABCDEFGHIJKLM', + np.random.rand(10))), dtype=ndtype) data[[0, 1, 2, -1]] = masked record = data.toflex() @@ -3799,9 +4007,9 @@ def test_toflex(self): assert_equal(record['_mask'], data._mask) ndtype = np.dtype("int, (2,3)float, float") - data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10), - np.random.rand(10), - np.random.rand(10))], + data = array(list(zip(np.arange(10), + np.random.rand(10), + np.random.rand(10))), dtype=ndtype) data[[0, 1, 2, -1]] = masked record = data.toflex() @@ -3844,29 +4052,28 @@ def test_arraymethod_0d(self): def test_transpose_view(self): x = np.ma.array([[1, 2, 3], [4, 5, 6]]) - x[0,1] = np.ma.masked + x[0, 1] = np.ma.masked xt = x.T - xt[1,0] = 10 - xt[0,1] = np.ma.masked + xt[1, 0] = 10 + xt[0, 1] = np.ma.masked assert_equal(x.data, xt.T.data) assert_equal(x.mask, xt.T.mask) def test_diagonal_view(self): - x = np.ma.zeros((3,3)) - x[0,0] = 10 - x[1,1] = np.ma.masked - x[2,2] = 20 + x = np.ma.zeros((3, 3)) + x[0, 0] = 10 + x[1, 1] = np.ma.masked + x[2, 2] = 20 xd = x.diagonal() - x[1,1] = 15 + x[1, 1] = 15 assert_equal(xd.mask, x.diagonal().mask) assert_equal(xd.data, x.diagonal().data) class TestMaskedArrayMathMethods: - - def setup_method(self): + def _create_data(self): # Base data definition. x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, @@ -3896,11 +4103,11 @@ def setup_method(self): m2x = array(data=x, mask=m2) m2X = array(data=X, mask=m2.reshape(X.shape)) m2XX = array(data=XX, mask=m2.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + return x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX def test_cumsumprod(self): # Tests cumsum & cumprod on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + mX = self._create_data()[5] mXcp = mX.cumsum(0) assert_equal(mXcp._data, mX.filled(0).cumsum(0)) mXcp = mX.cumsum(1) @@ -3934,7 +4141,7 @@ def test_cumsumprod_with_output(self): def test_ptp(self): # Tests ptp on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, X, _, m, mx, mX, _, _, _, _ = self._create_data() (n, m) = X.shape assert_equal(mx.ptp(), np.ptp(mx.compressed())) rows = np.zeros(n, float) @@ -3998,7 +4205,7 @@ def test_anom(self): def test_trace(self): # Tests trace on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, X, _, _, _, mX, _, _, _, _ = self._create_data() mXdiag = mX.diagonal() assert_equal(mX.trace(), mX.diagonal().compressed().sum()) assert_almost_equal(mX.trace(), @@ -4007,13 +4214,13 @@ def test_trace(self): assert_equal(np.trace(mX), mX.trace()) # gh-5560 - arr = np.arange(2*4*4).reshape(2,4,4) + arr = np.arange(2 * 4 * 4).reshape(2, 4, 4) m_arr = np.ma.masked_array(arr, False) assert_equal(arr.trace(axis1=1, axis2=2), m_arr.trace(axis1=1, axis2=2)) def test_dot(self): # Tests dot on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, _, _, _, mx, mX, mXX, _, _, _ = self._create_data() fx = mx.filled(0) r = mx.dot(mx) assert_almost_equal(r.filled(0), fx.dot(fx)) @@ -4022,7 +4229,7 @@ def test_dot(self): fX = mX.filled(0) r = mX.dot(mX) assert_almost_equal(r.filled(0), fX.dot(fX)) - assert_(r.mask[1,3]) + assert_(r.mask[1, 3]) r1 = empty_like(r) mX.dot(mX, out=r1) assert_almost_equal(r, r1) @@ -4037,23 +4244,23 @@ def test_dot(self): def test_dot_shape_mismatch(self): # regression test - x = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]]) - y = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]]) - z = masked_array([[0,1],[3,3]]) + x = masked_array([[1, 2], [3, 4]], mask=[[0, 1], [0, 0]]) + y = masked_array([[1, 2], [3, 4]], mask=[[0, 1], [0, 0]]) + z = masked_array([[0, 1], [3, 3]]) x.dot(y, out=z) assert_almost_equal(z.filled(0), [[1, 0], [15, 16]]) assert_almost_equal(z.mask, [[0, 1], [0, 0]]) def test_varmean_nomask(self): # gh-5769 - foo = array([1,2,3,4], dtype='f8') - bar = array([1,2,3,4], dtype='f8') + foo = array([1, 2, 3, 4], dtype='f8') + bar = array([1, 2, 3, 4], dtype='f8') assert_equal(type(foo.mean()), np.float64) assert_equal(type(foo.var()), np.float64) - assert((foo.mean() == bar.mean()) is np.bool(True)) + assert (foo.mean() == bar.mean()) is np.bool(True) # check array type is preserved and out works - foo = array(np.arange(16).reshape((4,4)), dtype='f8') + foo = array(np.arange(16).reshape((4, 4)), dtype='f8') bar = empty(4, dtype='f4') assert_equal(type(foo.mean(axis=1)), MaskedArray) assert_equal(type(foo.var(axis=1)), MaskedArray) @@ -4062,7 +4269,7 @@ def test_varmean_nomask(self): def test_varstd(self): # Tests var & std on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, X, XX, _, _, mX, mXX, _, _, _ = self._create_data() assert_almost_equal(mX.var(axis=None), mX.compressed().var()) assert_almost_equal(mX.std(axis=None), mX.compressed().std()) assert_almost_equal(mX.std(axis=None, ddof=1), @@ -4082,7 +4289,7 @@ def test_varstd(self): assert_almost_equal(np.sqrt(mXvar0[k]), mX[:, k].compressed().std()) - @suppress_copy_mask_on_assignment + @pytest.mark.filterwarnings(WARNING_MARK_SPEC) def test_varstd_specialcases(self): # Test a special case for var nout = np.array(-1, dtype=float) @@ -4163,6 +4370,7 @@ def test_axis_methods_nomask(self): assert_equal(a.max(-1), [3, 6]) assert_equal(a.max(1), [3, 6]) + @pytest.mark.thread_unsafe(reason="crashes with low memory") @requires_memory(free_bytes=2 * 10000 * 1000 * 2) def test_mean_overflow(self): # Test overflow in masked arrays @@ -4213,7 +4421,7 @@ def test_diff_with_n_0(self): class TestMaskedArrayMathMethodsComplex: # Test class for miscellaneous MaskedArrays methods. - def setup_method(self): + def _create_data(self): # Base data definition. x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, @@ -4243,11 +4451,11 @@ def setup_method(self): m2x = array(data=x, mask=m2) m2X = array(data=X, mask=m2.reshape(X.shape)) m2XX = array(data=XX, mask=m2.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + return x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX def test_varstd(self): # Tests var & std on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, X, XX, _, _, mX, mXX, _, _, _ = self._create_data() assert_almost_equal(mX.var(axis=None), mX.compressed().var()) assert_almost_equal(mX.std(axis=None), mX.compressed().std()) assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) @@ -4266,17 +4474,6 @@ def test_varstd(self): class TestMaskedArrayFunctions: # Test class for miscellaneous functions. - - def setup_method(self): - x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) - y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] - xm = masked_array(x, mask=m1) - ym = masked_array(y, mask=m2) - xm.set_fill_value(1e+20) - self.info = (xm, ym) - def test_masked_where_bool(self): x = [1, 2] y = masked_where(False, x) @@ -4490,7 +4687,7 @@ def test_power_with_broadcasting(self): @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") def test_where(self): # Test the where function - x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] @@ -4631,7 +4828,7 @@ def test_masked_invalid_pandas(self): # getdata() used to be bad for pandas series due to its _data # attribute. This test is a regression test mainly and may be # removed if getdata() is adjusted. - class Series(): + class Series: _data = "nonsense" def __array__(self, dtype=None, copy=None): @@ -4814,9 +5011,9 @@ def test_make_mask(self): bools = [True, False] dtypes = [MaskType, float] msgformat = 'copy=%s, shrink=%s, dtype=%s' - for cpy, shr, dt in itertools.product(bools, bools, dtypes): - res = make_mask(nomask, copy=cpy, shrink=shr, dtype=dt) - assert_(res is nomask, msgformat % (cpy, shr, dt)) + for cpy, shr, dtype in itertools.product(bools, bools, dtypes): + res = make_mask(nomask, copy=cpy, shrink=shr, dtype=dtype) + assert_(res is nomask, msgformat % (cpy, shr, dtype)) def test_mask_or(self): # Initialize @@ -4849,6 +5046,26 @@ def test_mask_or(self): cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype) assert_equal(mask_or(amask, bmask), cntrl) + a = np.array([False, False]) + assert mask_or(a, a) is nomask # gh-27360 + + def test_allequal(self): + x = array([1, 2, 3], mask=[0, 0, 0]) + y = array([1, 2, 3], mask=[1, 0, 0]) + z = array([[1, 2, 3], [4, 5, 6]], mask=[[0, 0, 0], [1, 1, 1]]) + + assert allequal(x, y) + assert not allequal(x, y, fill_value=False) + assert allequal(x, z) + + # test allequal for the same input, with mask=nomask, this test is for + # the scenario raised in https://github.com/numpy/numpy/issues/27201 + assert allequal(x, x) + assert allequal(x, x, fill_value=False) + + assert allequal(y, y) + assert not allequal(y, y, fill_value=False) + def test_flatten_mask(self): # Tests flatten mask # Standard dtype @@ -4879,8 +5096,7 @@ def test_on_ndarray(self): def test_compress(self): # Test compress function on ndarray and masked array # Address Github #2495. - arr = np.arange(8) - arr.shape = 4, 2 + arr = np.arange(8).reshape(4, 2) cond = np.array([True, False, True, True]) control = arr[[0, 2, 3]] test = np.ma.compress(cond, arr, axis=0) @@ -4905,7 +5121,7 @@ class A(np.ndarray): assert_(type(test) is A) # Test that compress flattens - test = np.ma.compressed([[1],[2]]) + test = np.ma.compressed([[1], [2]]) assert_equal(test.ndim, 1) test = np.ma.compressed([[[[[1]]]]]) assert_equal(test.ndim, 1) @@ -4961,8 +5177,7 @@ def test_convolve(self): class TestMaskedFields: - - def setup_method(self): + def _create_data(self): ilist = [1, 2, 3, 4, 5] flist = [1.1, 2.2, 3.3, 4.4, 5.5] slist = ['one', 'two', 'three', 'four', 'five'] @@ -4970,11 +5185,12 @@ def setup_method(self): mdtype = [('a', bool), ('b', bool), ('c', bool)] mask = [0, 1, 0, 0, 1] base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) - self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype) + return {"base": base, "mask": mask, "ddtype": ddtype, "mdtype": mdtype} def test_set_records_masks(self): - base = self.data['base'] - mdtype = self.data['mdtype'] + data = self._create_data() + base = data['base'] + mdtype = data['mdtype'] # Set w/ nomask or masked base.mask = nomask assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) @@ -4993,7 +5209,7 @@ def test_set_records_masks(self): def test_set_record_element(self): # Check setting an element of a record) - base = self.data['base'] + base = self._create_data()['base'] (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) base[0] = (pi, pi, 'pi') @@ -5008,7 +5224,7 @@ def test_set_record_element(self): [b'pi', b'two', b'three', b'four', b'five']) def test_set_record_slice(self): - base = self.data['base'] + base = self._create_data()['base'] (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) base[:3] = (pi, pi, 'pi') @@ -5024,7 +5240,7 @@ def test_set_record_slice(self): def test_mask_element(self): "Check record access" - base = self.data['base'] + base = self._create_data()['base'] base[0] = masked for n in ('a', 'b', 'c'): @@ -5072,8 +5288,8 @@ def _test_index(i): assert_equal_records(a[i]._mask, a._mask[i]) assert_equal(type(a[i, ...]), MaskedArray) - assert_equal_records(a[i,...]._data, a._data[i,...]) - assert_equal_records(a[i,...]._mask, a._mask[i,...]) + assert_equal_records(a[i, ...]._data, a._data[i, ...]) + assert_equal_records(a[i, ...]._mask, a._mask[i, ...]) _test_index(1) # No mask _test_index(0) # One element masked @@ -5117,47 +5333,48 @@ def test_setitem_scalar(self): assert_array_equal(arr.mask, [True, False, False]) def test_element_len(self): + data = self._create_data() # check that len() works for mvoid (Github issue #576) - for rec in self.data['base']: - assert_equal(len(rec), len(self.data['ddtype'])) + for rec in data['base']: + assert_equal(len(rec), len(data['ddtype'])) class TestMaskedObjectArray: def test_getitem(self): arr = np.ma.array([None, None]) - for dt in [float, object]: - a0 = np.eye(2).astype(dt) - a1 = np.eye(3).astype(dt) + for dtype in [float, object]: + a0 = np.eye(2).astype(dtype) + a1 = np.eye(3).astype(dtype) arr[0] = a0 arr[1] = a1 assert_(arr[0] is a0) assert_(arr[1] is a1) - assert_(isinstance(arr[0,...], MaskedArray)) - assert_(isinstance(arr[1,...], MaskedArray)) - assert_(arr[0,...][()] is a0) - assert_(arr[1,...][()] is a1) + assert_(isinstance(arr[0, ...], MaskedArray)) + assert_(isinstance(arr[1, ...], MaskedArray)) + assert_(arr[0, ...][()] is a0) + assert_(arr[1, ...][()] is a1) arr[0] = np.ma.masked assert_(arr[1] is a1) - assert_(isinstance(arr[0,...], MaskedArray)) - assert_(isinstance(arr[1,...], MaskedArray)) - assert_equal(arr[0,...].mask, True) - assert_(arr[1,...][()] is a1) + assert_(isinstance(arr[0, ...], MaskedArray)) + assert_(isinstance(arr[1, ...], MaskedArray)) + assert_equal(arr[0, ...].mask, True) + assert_(arr[1, ...][()] is a1) # gh-5962 - object arrays of arrays do something special assert_equal(arr[0].data, a0) assert_equal(arr[0].mask, True) - assert_equal(arr[0,...][()].data, a0) - assert_equal(arr[0,...][()].mask, True) + assert_equal(arr[0, ...][()].data, a0) + assert_equal(arr[0, ...][()].mask, True) def test_nested_ma(self): arr = np.ma.array([None, None]) # set the first object to be an unmasked masked constant. A little fiddly - arr[0,...] = np.array([np.ma.masked], object)[0,...] + arr[0, ...] = np.array([np.ma.masked], object)[0, ...] # check the above line did what we were aiming for assert_(arr.data[0] is np.ma.masked) @@ -5171,31 +5388,30 @@ def test_nested_ma(self): class TestMaskedView: - - def setup_method(self): + def _create_data(self): iterator = list(zip(np.arange(10), np.random.rand(10))) data = np.array(iterator) a = array(iterator, dtype=[('a', float), ('b', float)]) a.mask[0] = (1, 0) controlmask = np.array([1] + 19 * [0], dtype=bool) - self.data = (data, a, controlmask) + return data, a, controlmask def test_view_to_nothing(self): - (data, a, controlmask) = self.data + a = self._create_data()[1] test = a.view() assert_(isinstance(test, MaskedArray)) assert_equal(test._data, a._data) assert_equal(test._mask, a._mask) def test_view_to_type(self): - (data, a, controlmask) = self.data + data, a, _ = self._create_data() test = a.view(np.ndarray) assert_(not isinstance(test, MaskedArray)) assert_equal(test, a._data) assert_equal_records(test, data.view(a.dtype).squeeze()) def test_view_to_simple_dtype(self): - (data, a, controlmask) = self.data + data, a, controlmask = self._create_data() # View globally test = a.view(float) assert_(isinstance(test, MaskedArray)) @@ -5203,7 +5419,7 @@ def test_view_to_simple_dtype(self): assert_equal(test.mask, controlmask) def test_view_to_flexible_dtype(self): - (data, a, controlmask) = self.data + a = self._create_data()[1] test = a.view([('A', float), ('B', float)]) assert_equal(test.mask.dtype.names, ('A', 'B')) @@ -5223,7 +5439,7 @@ def test_view_to_flexible_dtype(self): assert_equal(test['B'], a['b'][-1]) def test_view_to_subdtype(self): - (data, a, controlmask) = self.data + data, a, controlmask = self._create_data() # View globally test = a.view((float, 2)) assert_(isinstance(test, MaskedArray)) @@ -5240,7 +5456,7 @@ def test_view_to_subdtype(self): assert_equal(test, data[-1]) def test_view_to_dtype_and_type(self): - (data, a, controlmask) = self.data + data, a, _ = self._create_data() test = a.view((float, 2), np.recarray) assert_equal(test, data) @@ -5252,10 +5468,10 @@ class TestOptionalArgs: def test_ndarrayfuncs(self): # test axis arg behaves the same as ndarray (including multiple axes) - d = np.arange(24.0).reshape((2,3,4)) - m = np.zeros(24, dtype=bool).reshape((2,3,4)) + d = np.arange(24.0).reshape((2, 3, 4)) + m = np.zeros(24, dtype=bool).reshape((2, 3, 4)) # mask out last element of last dimension - m[:,:,-1] = True + m[:, :, -1] = True a = np.ma.array(d, mask=m) def testaxis(f, a, d): @@ -5263,9 +5479,9 @@ def testaxis(f, a, d): ma_f = np.ma.__getattribute__(f) # test axis arg - assert_equal(ma_f(a, axis=1)[...,:-1], numpy_f(d[...,:-1], axis=1)) - assert_equal(ma_f(a, axis=(0,1))[...,:-1], - numpy_f(d[...,:-1], axis=(0,1))) + assert_equal(ma_f(a, axis=1)[..., :-1], numpy_f(d[..., :-1], axis=1)) + assert_equal(ma_f(a, axis=(0, 1))[..., :-1], + numpy_f(d[..., :-1], axis=(0, 1))) def testkeepdims(f, a, d): numpy_f = numpy.__getattribute__(f) @@ -5278,10 +5494,10 @@ def testkeepdims(f, a, d): numpy_f(d, keepdims=False).shape) # test both at once - assert_equal(ma_f(a, axis=1, keepdims=True)[...,:-1], - numpy_f(d[...,:-1], axis=1, keepdims=True)) - assert_equal(ma_f(a, axis=(0,1), keepdims=True)[...,:-1], - numpy_f(d[...,:-1], axis=(0,1), keepdims=True)) + assert_equal(ma_f(a, axis=1, keepdims=True)[..., :-1], + numpy_f(d[..., :-1], axis=1, keepdims=True)) + assert_equal(ma_f(a, axis=(0, 1), keepdims=True)[..., :-1], + numpy_f(d[..., :-1], axis=(0, 1), keepdims=True)) for f in ['sum', 'prod', 'mean', 'var', 'std']: testaxis(f, a, d) @@ -5290,7 +5506,7 @@ def testkeepdims(f, a, d): for f in ['min', 'max']: testaxis(f, a, d) - d = (np.arange(24).reshape((2,3,4))%2 == 0) + d = (np.arange(24).reshape((2, 3, 4)) % 2 == 0) a = np.ma.array(d, mask=m) for f in ['all', 'any']: testaxis(f, a, d) @@ -5299,33 +5515,33 @@ def testkeepdims(f, a, d): def test_count(self): # test np.ma.count specially - d = np.arange(24.0).reshape((2,3,4)) - m = np.zeros(24, dtype=bool).reshape((2,3,4)) - m[:,0,:] = True + d = np.arange(24.0).reshape((2, 3, 4)) + m = np.zeros(24, dtype=bool).reshape((2, 3, 4)) + m[:, 0, :] = True a = np.ma.array(d, mask=m) assert_equal(count(a), 16) - assert_equal(count(a, axis=1), 2*ones((2,4))) - assert_equal(count(a, axis=(0,1)), 4*ones((4,))) - assert_equal(count(a, keepdims=True), 16*ones((1,1,1))) - assert_equal(count(a, axis=1, keepdims=True), 2*ones((2,1,4))) - assert_equal(count(a, axis=(0,1), keepdims=True), 4*ones((1,1,4))) - assert_equal(count(a, axis=-2), 2*ones((2,4))) - assert_raises(ValueError, count, a, axis=(1,1)) + assert_equal(count(a, axis=1), 2 * ones((2, 4))) + assert_equal(count(a, axis=(0, 1)), 4 * ones((4,))) + assert_equal(count(a, keepdims=True), 16 * ones((1, 1, 1))) + assert_equal(count(a, axis=1, keepdims=True), 2 * ones((2, 1, 4))) + assert_equal(count(a, axis=(0, 1), keepdims=True), 4 * ones((1, 1, 4))) + assert_equal(count(a, axis=-2), 2 * ones((2, 4))) + assert_raises(ValueError, count, a, axis=(1, 1)) assert_raises(AxisError, count, a, axis=3) # check the 'nomask' path a = np.ma.array(d, mask=nomask) assert_equal(count(a), 24) - assert_equal(count(a, axis=1), 3*ones((2,4))) - assert_equal(count(a, axis=(0,1)), 6*ones((4,))) - assert_equal(count(a, keepdims=True), 24*ones((1,1,1))) + assert_equal(count(a, axis=1), 3 * ones((2, 4))) + assert_equal(count(a, axis=(0, 1)), 6 * ones((4,))) + assert_equal(count(a, keepdims=True), 24 * ones((1, 1, 1))) assert_equal(np.ndim(count(a, keepdims=True)), 3) - assert_equal(count(a, axis=1, keepdims=True), 3*ones((2,1,4))) - assert_equal(count(a, axis=(0,1), keepdims=True), 6*ones((1,1,4))) - assert_equal(count(a, axis=-2), 3*ones((2,4))) - assert_raises(ValueError, count, a, axis=(1,1)) + assert_equal(count(a, axis=1, keepdims=True), 3 * ones((2, 1, 4))) + assert_equal(count(a, axis=(0, 1), keepdims=True), 6 * ones((1, 1, 4))) + assert_equal(count(a, axis=-2), 3 * ones((2, 4))) + assert_raises(ValueError, count, a, axis=(1, 1)) assert_raises(AxisError, count, a, axis=3) # check the 'masked' singleton @@ -5403,7 +5619,7 @@ def test_deepcopy(self): def test_immutable(self): orig = np.ma.masked assert_raises(np.ma.core.MaskError, operator.setitem, orig, (), 1) - assert_raises(ValueError,operator.setitem, orig.data, (), 1) + assert_raises(ValueError, operator.setitem, orig.data, (), 1) assert_raises(ValueError, operator.setitem, orig.mask, (), False) view = np.ma.masked.view(np.ma.MaskedArray) @@ -5418,7 +5634,7 @@ def test_coercion_int(self): def test_coercion_float(self): a_f = np.zeros((), float) - assert_warns(UserWarning, operator.setitem, a_f, (), np.ma.masked) + pytest.warns(UserWarning, operator.setitem, a_f, (), np.ma.masked) assert_(np.isnan(a_f[()])) @pytest.mark.xfail(reason="See gh-9750") @@ -5435,7 +5651,8 @@ def test_coercion_bytes(self): def test_subclass(self): # https://github.com/astropy/astropy/issues/6645 - class Sub(type(np.ma.masked)): pass + class Sub(type(np.ma.masked)): + pass a = Sub() assert_(a is Sub()) @@ -5472,6 +5689,7 @@ def test_masked_array(): a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0]) assert_equal(np.argwhere(a), [[1], [3]]) + def test_masked_array_no_copy(): # check nomask array is updated in place a = np.ma.array([1, 2, 3, 4]) @@ -5486,9 +5704,10 @@ def test_masked_array_no_copy(): _ = np.ma.masked_invalid(a, copy=False) assert_array_equal(a.mask, [True, False, False, False, False]) + def test_append_masked_array(): - a = np.ma.masked_equal([1,2,3], value=2) - b = np.ma.masked_equal([4,3,2], value=2) + a = np.ma.masked_equal([1, 2, 3], value=2) + b = np.ma.masked_equal([4, 3, 2], value=2) result = np.ma.append(a, b) expected_data = [1, 2, 3, 4, 3, 2] @@ -5496,8 +5715,8 @@ def test_append_masked_array(): assert_array_equal(result.data, expected_data) assert_array_equal(result.mask, expected_mask) - a = np.ma.masked_all((2,2)) - b = np.ma.ones((3,1)) + a = np.ma.masked_all((2, 2)) + b = np.ma.ones((3, 1)) result = np.ma.append(a, b) expected_data = [1] * 3 @@ -5511,24 +5730,84 @@ def test_append_masked_array(): def test_append_masked_array_along_axis(): - a = np.ma.masked_equal([1,2,3], value=2) + a = np.ma.masked_equal([1, 2, 3], value=2) b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) # When `axis` is specified, `values` must have the correct shape. assert_raises(ValueError, np.ma.append, a, b, axis=0) - result = np.ma.append(a[np.newaxis,:], b, axis=0) + result = np.ma.append(a[np.newaxis, :], b, axis=0) expected = np.ma.arange(1, 10) expected[[1, 6]] = np.ma.masked - expected = expected.reshape((3,3)) + expected = expected.reshape((3, 3)) assert_array_equal(result.data, expected.data) assert_array_equal(result.mask, expected.mask) + def test_default_fill_value_complex(): # regression test for Python 3, where 'unicode' was not defined assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j) +def test_string_dtype_fill_value_on_construction(): + # Regression test for gh-29421: allow string fill_value on StringDType masked arrays + dt = np.dtypes.StringDType() + data = np.array(["A", "test", "variable", ""], dtype=dt) + mask = [True, False, True, True] + # Prior to the fix, this would TypeError; now it should succeed + arr = np.ma.MaskedArray(data, mask=mask, fill_value="FILL", dtype=dt) + assert isinstance(arr.fill_value, str) + assert arr.fill_value == "FILL" + filled = arr.filled() + # Masked positions should be replaced by 'FILL' + assert filled.tolist() == ["FILL", "test", "FILL", "FILL"] + + +def test_string_dtype_default_fill_value(): + # Regression test for gh-29421: default fill_value for StringDType is 'N/A' + dt = np.dtypes.StringDType() + data = np.array(['x', 'y', 'z'], dtype=dt) + # no fill_value passed → uses default_fill_value internally + arr = np.ma.MaskedArray(data, mask=[True, False, True], dtype=dt) + # ensure it’s stored as a Python str and equals the expected default + assert isinstance(arr.fill_value, str) + assert arr.fill_value == 'N/A' + # masked slots should be replaced by that default + assert arr.filled().tolist() == ['N/A', 'y', 'N/A'] + + +def test_string_dtype_fill_value_persists_through_slice(): + # Regression test for gh-29421: .fill_value survives slicing/viewing + dt = np.dtypes.StringDType() + arr = np.ma.MaskedArray( + ['a', 'b', 'c'], + mask=[True, False, True], + dtype=dt + ) + arr.fill_value = 'Z' + # slice triggers __array_finalize__ + sub = arr[1:] + # the slice should carry the same fill_value and behavior + assert isinstance(sub.fill_value, str) + assert sub.fill_value == 'Z' + assert sub.filled().tolist() == ['b', 'Z'] + + +def test_setting_fill_value_attribute(): + # Regression test for gh-29421: setting .fill_value post-construction works too + dt = np.dtypes.StringDType() + arr = np.ma.MaskedArray( + ["x", "longstring", "mid"], mask=[False, True, False], dtype=dt + ) + # Setting the attribute should not raise + arr.fill_value = "Z" + assert arr.fill_value == "Z" + # And filled() should use the new fill_value + assert arr.filled()[0] == "x" + assert arr.filled()[1] == "Z" + assert arr.filled()[2] == "mid" + + def test_ufunc_with_output(): # check that giving an output argument always returns that output. # Regression test for gh-8416. @@ -5540,9 +5819,9 @@ def test_ufunc_with_output(): def test_ufunc_with_out_varied(): """ Test that masked arrays are immune to gh-10459 """ # the mask of the output should not affect the result, however it is passed - a = array([ 1, 2, 3], mask=[1, 0, 0]) - b = array([10, 20, 30], mask=[1, 0, 0]) - out = array([ 0, 0, 0], mask=[0, 0, 1]) + a = array([ 1, 2, 3], mask=[1, 0, 0]) + b = array([10, 20, 30], mask=[1, 0, 0]) + out = array([ 0, 0, 0], mask=[0, 0, 1]) expected = array([11, 22, 33], mask=[1, 0, 0]) out_pos = out.copy() @@ -5631,11 +5910,13 @@ def test_fieldless_void(): def test_mask_shape_assignment_does_not_break_masked(): a = np.ma.masked b = np.ma.array(1, mask=a.mask) - b.shape = (1,) + with pytest.warns(DeprecationWarning): # gh-29492 + b.shape = (1,) assert_equal(a.mask.shape, ()) + @pytest.mark.skipif(sys.flags.optimize > 1, - reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") + reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") # noqa: E501 def test_doc_note(): def method(self): """This docstring @@ -5707,3 +5988,102 @@ def test_deepcopy_0d_obj(): deepcopy[...] = 17 assert_equal(source, 0) assert_equal(deepcopy, 17) + + +def test_uint_fill_value_and_filled(): + # See also gh-27269 + a = np.ma.MaskedArray([1, 1], [True, False], dtype="uint16") + # the fill value should likely not be 99999, but for now guarantee it: + assert a.fill_value == 999999 + # However, it's type is uint: + assert a.fill_value.dtype.kind == "u" + # And this ensures things like filled work: + np.testing.assert_array_equal( + a.filled(), np.array([999999, 1]).astype("uint16"), strict=True) + + +@pytest.mark.parametrize( + ('fn', 'signature'), + [ + (np.ma.nonzero, "(a)"), + (np.ma.anomalies, "(a, axis=None, dtype=None)"), + (np.ma.cumsum, "(a, axis=None, dtype=None, out=None)"), + (np.ma.compress, "(condition, a, axis=None, out=None)"), + ] +) +def test_frommethod_signature(fn, signature): + assert str(inspect.signature(fn)) == signature + + +@pytest.mark.parametrize( + ('fn', 'signature'), + [ + ( + np.ma.empty, + ( + "(shape, dtype=None, order='C', *, device=None, like=None, " + "fill_value=None, hardmask=False)" + ), + ), + ( + np.ma.empty_like, + ( + "(prototype, /, dtype=None, order='K', subok=True, shape=None, *, " + "device=None)" + ), + ), + (np.ma.squeeze, "(a, axis=None, *, fill_value=None, hardmask=False)"), + ( + np.ma.identity, + "(n, dtype=None, *, like=None, fill_value=None, hardmask=False)", + ), + ] +) +def test_convert2ma_signature(fn, signature): + assert str(inspect.signature(fn)) == signature + assert fn.__module__ == 'numpy.ma.core' + + +class TestPatternMatching: + """Tests for structural pattern matching support (PEP 634).""" + + def test_match_sequence_pattern_1d(self): + arr = array([1, 2, 3], mask=[0, 1, 0]) + match arr: + case [a, b, c]: + assert a == 1 + assert b is masked + assert c == 3 + case _: + raise AssertionError("1D MaskedArray did not match sequence pattern") + + def test_match_sequence_pattern_2d(self): + arr = array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]]) + match arr: + case [row1, row2]: + assert_array_equal(row1, array([1, 2], mask=[0, 1])) + assert_array_equal(row2, array([3, 4], mask=[1, 0])) + case _: + raise AssertionError("2D MaskedArray did not match sequence pattern") + + def test_match_sequence_pattern_3d(self): + arr = array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], + mask=[[[0, 1], [1, 0]], [[1, 0], [0, 1]]]) + # outer matching + match arr: + case [plane1, plane2]: + assert_array_equal(plane1, array([[1, 2], [3, 4]], + mask=[[0, 1], [1, 0]])) + assert_array_equal(plane2, array([[5, 6], [7, 8]], + mask=[[1, 0], [0, 1]])) + case _: + raise AssertionError("3D MaskedArray did not match sequence pattern") + # inner matching + match arr: + case [[row1, row2], [row3, row4]]: + assert_array_equal(row1, array([1, 2], mask=[0, 1])) + assert_array_equal(row2, array([3, 4], mask=[1, 0])) + assert_array_equal(row3, array([5, 6], mask=[1, 0])) + assert_array_equal(row4, array([7, 8], mask=[0, 1])) + case _: + raise AssertionError("3D MaskedArray did not match sequence pattern") diff --git a/numpy/ma/tests/test_deprecations.py b/numpy/ma/tests/test_deprecations.py index 40c8418f5c18..07120b198bea 100644 --- a/numpy/ma/tests/test_deprecations.py +++ b/numpy/ma/tests/test_deprecations.py @@ -2,12 +2,11 @@ """ import pytest + import numpy as np -from numpy.testing import assert_warns -from numpy.ma.testutils import assert_equal from numpy.ma.core import MaskedArrayFutureWarning -import io -import textwrap +from numpy.ma.testutils import assert_equal + class TestArgsort: """ gh-8701 """ @@ -20,7 +19,7 @@ def _test_base(self, argsort, cls): # argsort has a bad default for >1d arrays arr_2d = np.array([[1, 2], [3, 4]]).view(cls) - result = assert_warns( + result = pytest.warns( np.ma.core.MaskedArrayFutureWarning, argsort, arr_2d) assert_equal(result, argsort(arr_2d, axis=None)) @@ -50,10 +49,10 @@ def test_axis_default(self): ma_max = np.ma.maximum.reduce # check that the default axis is still None, but warns on 2d arrays - result = assert_warns(MaskedArrayFutureWarning, ma_max, data2d) + result = pytest.warns(MaskedArrayFutureWarning, ma_max, data2d) assert_equal(result, ma_max(data2d, axis=None)) - result = assert_warns(MaskedArrayFutureWarning, ma_min, data2d) + result = pytest.warns(MaskedArrayFutureWarning, ma_min, data2d) assert_equal(result, ma_min(data2d, axis=None)) # no warnings on 1d, as both new and old defaults are equivalent @@ -64,21 +63,3 @@ def test_axis_default(self): result = ma_max(data1d) assert_equal(result, ma_max(data1d, axis=None)) assert_equal(result, ma_max(data1d, axis=0)) - - -class TestFromtextfile: - def test_fromtextfile_delimitor(self): - # NumPy 1.22.0, 2021-09-23 - - textfile = io.StringIO(textwrap.dedent( - """ - A,B,C,D - 'string 1';1;1.0;'mixed column' - 'string 2';2;2.0; - 'string 3';3;3.0;123 - 'string 4';4;4.0;3.14 - """ - )) - - with pytest.warns(DeprecationWarning): - result = np.ma.mrecords.fromtextfile(textfile, delimitor=';') diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index daf376b766d5..1993ffe3e90d 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -1,36 +1,73 @@ -# pylint: disable-msg=W0611, W0612, W0511 """Tests suite for MaskedArray. Adapted from the original test_ma by Pierre Gerard-Marchant :author: Pierre Gerard-Marchant :contact: pierregm_at_uga_dot_edu -:version: $Id: test_extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $ """ -import warnings +import inspect import itertools + import pytest import numpy as np from numpy._core.numeric import normalize_axis_tuple -from numpy.testing import ( - assert_warns, suppress_warnings - ) -from numpy.ma.testutils import ( - assert_, assert_array_equal, assert_equal, assert_almost_equal - ) from numpy.ma.core import ( - array, arange, masked, MaskedArray, masked_array, getmaskarray, shape, - nomask, ones, zeros, count - ) + MaskedArray, + arange, + array, + count, + getmaskarray, + masked, + masked_array, + nomask, + ones, + shape, + zeros, +) from numpy.ma.extras import ( - atleast_1d, atleast_2d, atleast_3d, mr_, dot, polyfit, cov, corrcoef, - median, average, unique, setxor1d, setdiff1d, union1d, intersect1d, in1d, - ediff1d, apply_over_axes, apply_along_axis, compress_nd, compress_rowcols, - mask_rowcols, clump_masked, clump_unmasked, flatnotmasked_contiguous, - notmasked_contiguous, notmasked_edges, masked_all, masked_all_like, isin, - diagflat, ndenumerate, stack, vstack, _covhelper - ) + _covhelper, + apply_along_axis, + apply_over_axes, + atleast_1d, + atleast_2d, + atleast_3d, + average, + clump_masked, + clump_unmasked, + compress_nd, + compress_rowcols, + corrcoef, + cov, + diagflat, + dot, + ediff1d, + flatnotmasked_contiguous, + in1d, + intersect1d, + isin, + mask_rowcols, + masked_all, + masked_all_like, + median, + mr_, + ndenumerate, + notmasked_contiguous, + notmasked_edges, + polyfit, + setdiff1d, + setxor1d, + stack, + union1d, + unique, + vstack, +) +from numpy.ma.testutils import ( + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, +) class TestGeneric: @@ -314,8 +351,8 @@ def test_complex(self): # (Regression test for https://github.com/numpy/numpy/issues/2684) mask = np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0]], dtype=bool) - a = masked_array([[0, 1+2j, 3+4j, 5+6j, 7+8j], - [9j, 0+1j, 2+3j, 4+5j, 7+7j]], + a = masked_array([[0, 1 + 2j, 3 + 4j, 5 + 6j, 7 + 8j], + [9j, 0 + 1j, 2 + 3j, 4 + 5j, 7 + 7j]], mask=mask) av = average(a) @@ -324,12 +361,12 @@ def test_complex(self): assert_almost_equal(av.imag, expected.imag) av0 = average(a, axis=0) - expected0 = average(a.real, axis=0) + average(a.imag, axis=0)*1j + expected0 = average(a.real, axis=0) + average(a.imag, axis=0) * 1j assert_almost_equal(av0.real, expected0.real) assert_almost_equal(av0.imag, expected0.imag) av1 = average(a, axis=1) - expected1 = average(a.real, axis=1) + average(a.imag, axis=1)*1j + expected1 = average(a.real, axis=1) + average(a.imag, axis=1) * 1j assert_almost_equal(av1.real, expected1.real) assert_almost_equal(av1.imag, expected1.imag) @@ -343,13 +380,13 @@ def test_complex(self): wav0 = average(a, weights=wts, axis=0) expected0 = (average(a.real, weights=wts, axis=0) + - average(a.imag, weights=wts, axis=0)*1j) + average(a.imag, weights=wts, axis=0) * 1j) assert_almost_equal(wav0.real, expected0.real) assert_almost_equal(wav0.imag, expected0.imag) wav1 = average(a, weights=wts, axis=1) expected1 = (average(a.real, weights=wts, axis=1) + - average(a.imag, weights=wts, axis=1)*1j) + average(a.imag, weights=wts, axis=1) * 1j) assert_almost_equal(wav1.real, expected1.real) assert_almost_equal(wav1.imag, expected1.imag) @@ -450,8 +487,8 @@ def test_2d(self): assert_array_equal(d.mask, np.r_['1', m_1, m_2]) d = mr_[b_1, b_2] assert_(d.shape == (10, 5)) - assert_array_equal(d[:5,:], b_1) - assert_array_equal(d[5:,:], b_2) + assert_array_equal(d[:5, :], b_1) + assert_array_equal(d[5:, :], b_2) assert_array_equal(d.mask, np.r_[m_1, m_2]) def test_masked_constant(self): @@ -538,9 +575,9 @@ class TestCompressFunctions: def test_compress_nd(self): # Tests compress_nd - x = np.array(list(range(3*4*5))).reshape(3, 4, 5) - m = np.zeros((3,4,5)).astype(bool) - m[1,1,1] = True + x = np.array(list(range(3 * 4 * 5))).reshape(3, 4, 5) + m = np.zeros((3, 4, 5)).astype(bool) + m[1, 1, 1] = True x = array(x, mask=m) # axis=None @@ -708,7 +745,7 @@ def test_mask_row_cols_axis_deprecation(self, axis, func, rowcols_axis): x = array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): res = func(x, axis=axis) assert_equal(res, mask_rowcols(x, rowcols_axis)) @@ -856,7 +893,7 @@ def test_3d_kwargs(self): a = arange(12).reshape(2, 2, 3) def myfunc(b, offset=0): - return b[1+offset] + return b[1 + offset] xa = apply_along_axis(myfunc, 2, a, offset=1) assert_equal(xa, [[2, 5], [8, 11]]) @@ -921,11 +958,11 @@ def test_non_masked(self): def test_docstring_examples(self): "test the examples given in the docstring of ma.median" - x = array(np.arange(8), mask=[0]*4 + [1]*4) + x = array(np.arange(8), mask=[0] * 4 + [1] * 4) assert_equal(np.ma.median(x), 1.5) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) - x = array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4) + x = array(np.arange(10).reshape(2, 5), mask=[0] * 6 + [1] * 4) assert_equal(np.ma.median(x), 2.5) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) @@ -974,38 +1011,38 @@ def test_masked_1d(self): assert_equal(np.ma.median(x), 2.) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) - x = array(np.arange(5), mask=[0,1,0,0,0]) + x = array(np.arange(5), mask=[0, 1, 0, 0, 0]) assert_equal(np.ma.median(x), 2.5) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) - x = array(np.arange(5), mask=[0,1,1,1,1]) + x = array(np.arange(5), mask=[0, 1, 1, 1, 1]) assert_equal(np.ma.median(x), 0.) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) # integer - x = array(np.arange(5), mask=[0,1,1,0,0]) + x = array(np.arange(5), mask=[0, 1, 1, 0, 0]) assert_equal(np.ma.median(x), 3.) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) # float - x = array(np.arange(5.), mask=[0,1,1,0,0]) + x = array(np.arange(5.), mask=[0, 1, 1, 0, 0]) assert_equal(np.ma.median(x), 3.) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) # integer - x = array(np.arange(6), mask=[0,1,1,1,1,0]) + x = array(np.arange(6), mask=[0, 1, 1, 1, 1, 0]) assert_equal(np.ma.median(x), 2.5) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) # float - x = array(np.arange(6.), mask=[0,1,1,1,1,0]) + x = array(np.arange(6.), mask=[0, 1, 1, 1, 1, 0]) assert_equal(np.ma.median(x), 2.5) assert_equal(np.ma.median(x).shape, (), "shape mismatch") assert_(type(np.ma.median(x)) is not MaskedArray) def test_1d_shape_consistency(self): - assert_equal(np.ma.median(array([1,2,3],mask=[0,0,0])).shape, - np.ma.median(array([1,2,3],mask=[0,1,0])).shape ) + assert_equal(np.ma.median(array([1, 2, 3], mask=[0, 0, 0])).shape, + np.ma.median(array([1, 2, 3], mask=[0, 1, 0])).shape) def test_2d(self): # Tests median w/ 2D @@ -1040,7 +1077,7 @@ def test_3d(self): x = np.ma.arange(24).reshape(3, 4, 2) x[x % 3 == 0] = masked assert_equal(median(x, 0), [[12, 9], [6, 15], [12, 9], [18, 15]]) - x.shape = (4, 3, 2) + x = x.reshape((4, 3, 2)) assert_equal(median(x, 0), [[99, 10], [11, 99], [13, 14]]) x = np.ma.arange(24).reshape(4, 3, 2) x[x % 5 == 0] = masked @@ -1073,11 +1110,11 @@ def test_out(self): out = masked_array(np.ones(10)) r = median(x, axis=1, out=out) if v == 30: - e = masked_array([0.]*3 + [10, 13, 16, 19] + [0.]*3, + e = masked_array([0.] * 3 + [10, 13, 16, 19] + [0.] * 3, mask=[True] * 3 + [False] * 4 + [True] * 3) else: - e = masked_array([0.]*3 + [13.5, 17.5, 21.5, 25.5] + [0.]*3, - mask=[True]*3 + [False]*4 + [True]*3) + e = masked_array([0.] * 3 + [13.5, 17.5, 21.5, 25.5] + [0.] * 3, + mask=[True] * 3 + [False] * 4 + [True] * 3) assert_equal(r, e) assert_(r is out) assert_(type(r) is MaskedArray) @@ -1205,10 +1242,10 @@ def test_ambigous_fill(self): def test_special(self): for inf in [np.inf, -np.inf]: - a = np.array([[inf, np.nan], [np.nan, np.nan]]) + a = np.array([[inf, np.nan], [np.nan, np.nan]]) a = np.ma.masked_array(a, mask=np.isnan(a)) - assert_equal(np.ma.median(a, axis=0), [inf, np.nan]) - assert_equal(np.ma.median(a, axis=1), [inf, np.nan]) + assert_equal(np.ma.median(a, axis=0), [inf, np.nan]) + assert_equal(np.ma.median(a, axis=1), [inf, np.nan]) assert_equal(np.ma.median(a), inf) a = np.array([[np.nan, np.nan, inf], [np.nan, np.nan, inf]]) @@ -1237,7 +1274,7 @@ def test_special(self): assert_equal(np.ma.median(a), -2.5) assert_equal(np.ma.median(a, axis=1), [-1., -2.5, inf]) - for i in range(0, 10): + for i in range(10): for j in range(1, 10): a = np.array([([np.nan] * i) + ([inf] * j)] * 2) a = np.ma.masked_array(a, mask=np.isnan(a)) @@ -1249,19 +1286,14 @@ def test_special(self): def test_empty(self): # empty arrays a = np.ma.masked_array(np.array([], dtype=float)) - with suppress_warnings() as w: - w.record(RuntimeWarning) + with pytest.warns(RuntimeWarning): assert_array_equal(np.ma.median(a), np.nan) - assert_(w.log[0].category is RuntimeWarning) # multiple dimensions a = np.ma.masked_array(np.array([], dtype=float, ndmin=3)) # no axis - with suppress_warnings() as w: - w.record(RuntimeWarning) - warnings.filterwarnings('always', '', RuntimeWarning) + with pytest.warns(RuntimeWarning): assert_array_equal(np.ma.median(a), np.nan) - assert_(w.log[0].category is RuntimeWarning) # axis 0 and 1 b = np.ma.masked_array(np.array([], dtype=float, ndmin=2)) @@ -1270,10 +1302,8 @@ def test_empty(self): # axis 2 b = np.ma.masked_array(np.array(np.nan, dtype=float, ndmin=2)) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) + with pytest.warns(RuntimeWarning): assert_equal(np.ma.median(a, axis=2), b) - assert_(w[0].category is RuntimeWarning) def test_object(self): o = np.ma.masked_array(np.arange(7.)) @@ -1284,11 +1314,11 @@ def test_object(self): class TestCov: - def setup_method(self): - self.data = array(np.random.rand(12)) + def _create_data(self): + return array(np.random.rand(12)) def test_covhelper(self): - x = self.data + x = self._create_data() # Test not mask output type is a float. assert_(_covhelper(x, rowvar=True)[1].dtype, np.float32) assert_(_covhelper(x, y=x, rowvar=False)[1].dtype, np.float32) @@ -1309,7 +1339,7 @@ def test_covhelper(self): def test_1d_without_missing(self): # Test cov on 1D variable w/o missing values - x = self.data + x = self._create_data() assert_almost_equal(np.cov(x), cov(x)) assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) assert_almost_equal(np.cov(x, rowvar=False, bias=True), @@ -1317,7 +1347,7 @@ def test_1d_without_missing(self): def test_2d_without_missing(self): # Test cov on 1 2D variable w/o missing values - x = self.data.reshape(3, 4) + x = self._create_data().reshape(3, 4) assert_almost_equal(np.cov(x), cov(x)) assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) assert_almost_equal(np.cov(x, rowvar=False, bias=True), @@ -1325,7 +1355,7 @@ def test_2d_without_missing(self): def test_1d_with_missing(self): # Test cov 1 1D variable w/missing values - x = self.data + x = self._create_data() x[-1] = masked x -= x.mean() nx = x.compressed() @@ -1349,7 +1379,7 @@ def test_1d_with_missing(self): def test_2d_with_missing(self): # Test cov on 2D variable w/ missing value - x = self.data + x = self._create_data() x[-1] = masked x = x.reshape(3, 4) valid = np.logical_not(getmaskarray(x)).astype(int) @@ -1371,74 +1401,33 @@ def test_2d_with_missing(self): class TestCorrcoef: - def setup_method(self): - self.data = array(np.random.rand(12)) - self.data2 = array(np.random.rand(12)) - - def test_ddof(self): - # ddof raises DeprecationWarning - x, y = self.data, self.data2 - expected = np.corrcoef(x) - expected2 = np.corrcoef(x, y) - with suppress_warnings() as sup: - warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, x, ddof=-1) - sup.filter(DeprecationWarning, "bias and ddof have no effect") - # ddof has no or negligible effect on the function - assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0)) - assert_almost_equal(corrcoef(x, ddof=-1), expected) - assert_almost_equal(corrcoef(x, y, ddof=-1), expected2) - assert_almost_equal(corrcoef(x, ddof=3), expected) - assert_almost_equal(corrcoef(x, y, ddof=3), expected2) - - def test_bias(self): - x, y = self.data, self.data2 - expected = np.corrcoef(x) - # bias raises DeprecationWarning - with suppress_warnings() as sup: - warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, x, y, True, False) - assert_warns(DeprecationWarning, corrcoef, x, y, True, True) - assert_warns(DeprecationWarning, corrcoef, x, bias=False) - sup.filter(DeprecationWarning, "bias and ddof have no effect") - # bias has no or negligible effect on the function - assert_almost_equal(corrcoef(x, bias=1), expected) + def _create_data(self): + data = array(np.random.rand(12)) + data2 = array(np.random.rand(12)) + return data, data2 def test_1d_without_missing(self): # Test cov on 1D variable w/o missing values - x = self.data + x = self._create_data()[0] assert_almost_equal(np.corrcoef(x), corrcoef(x)) assert_almost_equal(np.corrcoef(x, rowvar=False), corrcoef(x, rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") - assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), - corrcoef(x, rowvar=False, bias=True)) def test_2d_without_missing(self): # Test corrcoef on 1 2D variable w/o missing values - x = self.data.reshape(3, 4) + x = self._create_data()[0].reshape(3, 4) assert_almost_equal(np.corrcoef(x), corrcoef(x)) assert_almost_equal(np.corrcoef(x, rowvar=False), corrcoef(x, rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") - assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), - corrcoef(x, rowvar=False, bias=True)) def test_1d_with_missing(self): # Test corrcoef 1 1D variable w/missing values - x = self.data + x = self._create_data()[0] x[-1] = masked x -= x.mean() nx = x.compressed() - assert_almost_equal(np.corrcoef(nx), corrcoef(x)) assert_almost_equal(np.corrcoef(nx, rowvar=False), corrcoef(x, rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") - assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True), - corrcoef(x, rowvar=False, bias=True)) try: corrcoef(x, allow_masked=False) except ValueError: @@ -1448,36 +1437,20 @@ def test_1d_with_missing(self): assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1])) assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False), corrcoef(x, x[::-1], rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") - # ddof and bias have no or negligible effect on the function - assert_almost_equal(np.corrcoef(nx, nx[::-1]), - corrcoef(x, x[::-1], bias=1)) - assert_almost_equal(np.corrcoef(nx, nx[::-1]), - corrcoef(x, x[::-1], ddof=2)) def test_2d_with_missing(self): # Test corrcoef on 2D variable w/ missing value - x = self.data + x = self._create_data()[0] x[-1] = masked x = x.reshape(3, 4) test = corrcoef(x) control = np.corrcoef(x) assert_almost_equal(test[:-1, :-1], control[:-1, :-1]) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") - # ddof and bias have no or negligible effect on the function - assert_almost_equal(corrcoef(x, ddof=-2)[:-1, :-1], - control[:-1, :-1]) - assert_almost_equal(corrcoef(x, ddof=3)[:-1, :-1], - control[:-1, :-1]) - assert_almost_equal(corrcoef(x, bias=1)[:-1, :-1], - control[:-1, :-1]) class TestPolynomial: - # + def test_polyfit(self): # Tests polyfit # On ndarrays @@ -1502,7 +1475,7 @@ def test_polyfit(self): assert_almost_equal(a, a_) # (C, R, K, S, D) = polyfit(x, y, 3, full=True) - (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, :], 3, full=True) for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): assert_almost_equal(a, a_) # @@ -1522,14 +1495,14 @@ def test_polyfit_with_masked_NaNs(self): y = np.random.rand(20).reshape(-1, 2) x[0] = np.nan - y[-1,-1] = np.nan + y[-1, -1] = np.nan x = x.view(MaskedArray) y = y.view(MaskedArray) x[0] = masked - y[-1,-1] = masked + y[-1, -1] = masked (C, R, K, S, D) = polyfit(x, y, 3, full=True) - (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, :], 3, full=True) for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): assert_almost_equal(a, a_) @@ -1717,7 +1690,7 @@ def test_isin(self): c = isin(a, b) assert_(isinstance(c, MaskedArray)) assert_array_equal(c, ec) - #compare results of np.isin to ma.isin + # compare results of np.isin to ma.isin d = np.isin(a, b[~b.mask]) & ~a.mask assert_array_equal(c, d) @@ -1839,6 +1812,18 @@ def test_shape_scalar(self): assert_equal(b.shape, (1, 1)) assert_equal(b.mask.shape, b.data.shape) + @pytest.mark.parametrize("fn", [atleast_1d, vstack, diagflat]) + def test_inspect_signature(self, fn): + name = fn.__name__ + assert getattr(np.ma, name) is fn + + assert fn.__module__ == "numpy.ma.extras" + + wrapped = getattr(np, fn.__name__) + sig_wrapped = inspect.signature(wrapped) + sig = inspect.signature(fn) + assert sig == sig_wrapped + class TestNDEnumerate: diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py index dc2c561b888c..b4070df0f9a3 100644 --- a/numpy/ma/tests/test_mrecords.py +++ b/numpy/ma/tests/test_mrecords.py @@ -1,4 +1,3 @@ -# pylint: disable-msg=W0611, W0612, W0511,R0201 """Tests suite for mrecords. :author: Pierre Gerard-Marchant @@ -9,19 +8,22 @@ import numpy as np import numpy.ma as ma -from numpy.ma import masked, nomask -from numpy.testing import temppath from numpy._core.records import ( - recarray, fromrecords as recfromrecords, fromarrays as recfromarrays - ) + fromarrays as recfromarrays, + fromrecords as recfromrecords, + recarray, +) +from numpy.ma import masked, nomask from numpy.ma.mrecords import ( - MaskedRecords, mrecarray, fromarrays, fromtextfile, fromrecords, - addfield - ) -from numpy.ma.testutils import ( - assert_, assert_equal, - assert_equal_records, - ) + MaskedRecords, + addfield, + fromarrays, + fromrecords, + fromtextfile, + mrecarray, +) +from numpy.ma.testutils import assert_, assert_equal, assert_equal_records +from numpy.testing import temppath class TestMRecords: @@ -70,7 +72,7 @@ def test_get(self): assert_equal(mbase_last.recordmask, True) assert_equal(mbase_last._mask.item(), (True, True, True)) assert_equal(mbase_last['a'], mbase['a'][-1]) - assert_((mbase_last['a'] is masked)) + assert_(mbase_last['a'] is masked) # as slice .......... mbase_sl = mbase[:2] assert_(isinstance(mbase_sl, mrecarray)) @@ -97,10 +99,10 @@ def test_set_fields(self): assert_equal(mbase['a']._mask, [0, 1, 0, 0, 1]) # Change the elements, and the mask will follow mbase.a = 1 - assert_equal(mbase['a']._data, [1]*5) - assert_equal(ma.getmaskarray(mbase['a']), [0]*5) + assert_equal(mbase['a']._data, [1] * 5) + assert_equal(ma.getmaskarray(mbase['a']), [0] * 5) # Use to be _mask, now it's recordmask - assert_equal(mbase.recordmask, [False]*5) + assert_equal(mbase.recordmask, [False] * 5) assert_equal(mbase._mask.tolist(), np.array([(0, 0, 0), (0, 1, 1), @@ -111,10 +113,10 @@ def test_set_fields(self): # Set a field to mask ........................ mbase.c = masked # Use to be mask, and now it's still mask ! - assert_equal(mbase.c.mask, [1]*5) - assert_equal(mbase.c.recordmask, [1]*5) - assert_equal(ma.getmaskarray(mbase['c']), [1]*5) - assert_equal(ma.getdata(mbase['c']), [b'N/A']*5) + assert_equal(mbase.c.mask, [1] * 5) + assert_equal(mbase.c.recordmask, [1] * 5) + assert_equal(ma.getmaskarray(mbase['c']), [1] * 5) + assert_equal(ma.getdata(mbase['c']), [b'N/A'] * 5) assert_equal(mbase._mask.tolist(), np.array([(0, 0, 1), (0, 1, 1), @@ -160,16 +162,16 @@ def test_set_mask(self): mbase = base.view(mrecarray) # Set the mask to True ....................... mbase.mask = masked - assert_equal(ma.getmaskarray(mbase['b']), [1]*5) + assert_equal(ma.getmaskarray(mbase['b']), [1] * 5) assert_equal(mbase['a']._mask, mbase['b']._mask) assert_equal(mbase['a']._mask, mbase['c']._mask) assert_equal(mbase._mask.tolist(), - np.array([(1, 1, 1)]*5, dtype=bool)) + np.array([(1, 1, 1)] * 5, dtype=bool)) # Delete the mask ............................ mbase.mask = nomask - assert_equal(ma.getmaskarray(mbase['c']), [0]*5) + assert_equal(ma.getmaskarray(mbase['c']), [0] * 5) assert_equal(mbase._mask.tolist(), - np.array([(0, 0, 0)]*5, dtype=bool)) + np.array([(0, 0, 0)] * 5, dtype=bool)) def test_set_mask_fromarray(self): base = self.base.copy() @@ -348,24 +350,24 @@ def test_exotic_formats(self): class TestView: - def setup_method(self): - (a, b) = (np.arange(10), np.random.rand(10)) + def _create_data(self): + a, b = (np.arange(10), np.random.rand(10)) ndtype = [('a', float), ('b', float)] arr = np.array(list(zip(a, b)), dtype=ndtype) mrec = fromarrays([a, b], dtype=ndtype, fill_value=(-9., -99.)) mrec.mask[3] = (False, True) - self.data = (mrec, a, b, arr) + return mrec, a, b, arr def test_view_by_itself(self): - (mrec, a, b, arr) = self.data + mrec = self._create_data()[0] test = mrec.view() assert_(isinstance(test, MaskedRecords)) assert_equal_records(test, mrec) assert_equal_records(test._mask, mrec._mask) def test_view_simple_dtype(self): - (mrec, a, b, arr) = self.data + mrec, a, b, _ = self._create_data() ntype = (float, 2) test = mrec.view(ntype) assert_(isinstance(test, ma.MaskedArray)) @@ -373,7 +375,7 @@ def test_view_simple_dtype(self): assert_(test[3, 1] is ma.masked) def test_view_flexible_type(self): - (mrec, a, b, arr) = self.data + mrec, _, _, arr = self._create_data() alttype = [('A', float), ('B', float)] test = mrec.view(alttype) assert_(isinstance(test, MaskedRecords)) @@ -411,14 +413,14 @@ def test_fromarrays(self): def test_fromrecords(self): # Test construction from records. (mrec, nrec, ddtype) = self.data - #...... + # ...... palist = [(1, 'abc', 3.7000002861022949, 0), (2, 'xy', 6.6999998092651367, 1), (0, ' ', 0.40000000596046448, 0)] pa = recfromrecords(palist, names='c1, c2, c3, c4') mpa = fromrecords(palist, names='c1, c2, c3, c4') assert_equal_records(pa, mpa) - #..... + # ..... _mrec = fromrecords(nrec) assert_equal(_mrec.dtype, mrec.dtype) for field in _mrec.dtype.names: diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py index 1aa2026c58a8..2866cc0d6fb0 100644 --- a/numpy/ma/tests/test_old_ma.py +++ b/numpy/ma/tests/test_old_ma.py @@ -1,27 +1,89 @@ -from functools import reduce import pickle +from functools import reduce import pytest import numpy as np -import numpy._core.umath as umath import numpy._core.fromnumeric as fromnumeric -from numpy.testing import ( - assert_, assert_raises, assert_equal, - ) +import numpy._core.umath as umath from numpy.ma import ( - MaskType, MaskedArray, absolute, add, all, allclose, allequal, alltrue, - arange, arccos, arcsin, arctan, arctan2, array, average, choose, - concatenate, conjugate, cos, cosh, count, divide, equal, exp, filled, - getmask, greater, greater_equal, inner, isMaskedArray, less, - less_equal, log, log10, make_mask, masked, masked_array, masked_equal, - masked_greater, masked_greater_equal, masked_inside, masked_less, - masked_less_equal, masked_not_equal, masked_outside, - masked_print_option, masked_values, masked_where, maximum, minimum, - multiply, nomask, nonzero, not_equal, ones, outer, product, put, ravel, - repeat, resize, shape, sin, sinh, sometrue, sort, sqrt, subtract, sum, - take, tan, tanh, transpose, where, zeros, - ) + MaskedArray, + MaskType, + absolute, + add, + all, + allclose, + allequal, + alltrue, + arange, + arccos, + arcsin, + arctan, + arctan2, + array, + average, + choose, + concatenate, + conjugate, + cos, + cosh, + count, + divide, + equal, + exp, + filled, + getmask, + greater, + greater_equal, + inner, + isMaskedArray, + less, + less_equal, + log, + log10, + make_mask, + masked, + masked_array, + masked_equal, + masked_greater, + masked_greater_equal, + masked_inside, + masked_less, + masked_less_equal, + masked_not_equal, + masked_outside, + masked_print_option, + masked_values, + masked_where, + maximum, + minimum, + multiply, + nomask, + nonzero, + not_equal, + ones, + outer, + product, + put, + ravel, + repeat, + resize, + shape, + sin, + sinh, + sometrue, + sort, + sqrt, + subtract, + sum, + take, + tan, + tanh, + transpose, + where, + zeros, +) +from numpy.testing import assert_, assert_equal, assert_raises pi = np.pi @@ -35,8 +97,8 @@ def eq(v, w, msg=''): class TestMa: - def setup_method(self): - x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + def _create_data(self): + x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) a10 = 10. m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] @@ -48,18 +110,18 @@ def setup_method(self): xf = np.where(m1, 1e+20, x) s = x.shape xm.set_fill_value(1e+20) - self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) + return x, y, a10, m1, m2, xm, ym, z, zm, xf, s def test_testBasic1d(self): # Test of basic array creation and properties in 1 dimension. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, _, _, m1, _, xm, _, _, _, xf, s = self._create_data() assert_(not isMaskedArray(x)) assert_(isMaskedArray(xm)) assert_equal(shape(xm), s) assert_equal(xm.shape, s) assert_equal(xm.dtype, x.dtype) - assert_equal(xm.size, reduce(lambda x, y:x * y, s)) - assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) + assert_equal(xm.size, reduce(lambda x, y: x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y: x + y, m1)) assert_(eq(xm, xf)) assert_(eq(filled(xm, 1.e20), xf)) assert_(eq(x, xm)) @@ -67,12 +129,12 @@ def test_testBasic1d(self): @pytest.mark.parametrize("s", [(4, 3), (6, 2)]) def test_testBasic2d(self, s): # Test of basic array creation and properties in 2 dimensions. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d - x.shape = s - y.shape = s - xm.shape = s - ym.shape = s - xf.shape = s + x, y, _, m1, _, xm, ym, _, _, xf, s = self._create_data() + x = x.reshape(s) + y = y.reshape(s) + xm = xm.reshape(s) + ym = ym.reshape(s) + xf = xf.reshape(s) assert_(not isMaskedArray(x)) assert_(isMaskedArray(xm)) @@ -86,7 +148,7 @@ def test_testBasic2d(self, s): def test_testArithmetic(self): # Test of basic arithmetic. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, y, a10, _, _, xm, ym, _, _, xf, s = self._create_data() a2d = array([[1, 2], [0, 4]]) a2dm = masked_array(a2d, [[0, 0], [1, 0]]) assert_(eq(a2d * a2d, a2d * a2dm)) @@ -130,7 +192,7 @@ def test_testMixedArithmetic(self): def test_testUfuncs1(self): # Test various functions such as sin, cos. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, y, _, _, _, xm, ym, z, zm, _, _ = self._create_data() assert_(eq(np.cos(x), cos(xm))) assert_(eq(np.cosh(x), cosh(xm))) assert_(eq(np.sin(x), sin(xm))) @@ -176,7 +238,7 @@ def test_xtestCount(self): def test_testMinMax(self): # Test minimum and maximum. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, _, _, _, _, xm, _, _, _, _, _ = self._create_data() xr = np.ravel(x) # max doesn't work if shaped xmr = ravel(xm) @@ -186,7 +248,7 @@ def test_testMinMax(self): def test_testAddSumProd(self): # Test add, sum, product. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, y, _, _, _, xm, ym, _, _, _, s = self._create_data() assert_(eq(np.add.reduce(x), add.reduce(x))) assert_(eq(np.add.accumulate(x), add.accumulate(x))) assert_(eq(4, sum(array(4), axis=0))) @@ -355,7 +417,7 @@ def test_testPut2(self): assert_(eq(x, [0, 1, 10, 40, 4])) def test_testMaPut(self): - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + _, _, _, _, _, _, ym, _, zm, _, _ = self._create_data() m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1] i = np.nonzero(m)[0] put(ym, i, zm) @@ -594,12 +656,12 @@ def test_testAverage2(self): np.add.reduce(np.arange(6)) * 3. / 12.)) assert_(allclose(average(y, axis=0), np.arange(6) * 3. / 2.)) assert_(allclose(average(y, axis=1), - [average(x, axis=0), average(x, axis=0)*2.0])) + [average(x, axis=0), average(x, axis=0) * 2.0])) assert_(allclose(average(y, None, weights=w2), 20. / 6.)) assert_(allclose(average(y, axis=0, weights=w2), [0., 1., 2., 3., 4., 10.])) assert_(allclose(average(y, axis=1), - [average(x, axis=0), average(x, axis=0)*2.0])) + [average(x, axis=0), average(x, axis=0) * 2.0])) m1 = zeros(6) m2 = [0, 0, 1, 1, 0, 0] m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] @@ -651,7 +713,7 @@ def test_testToPython(self): def test_testScalarArithmetic(self): xm = array(0, mask=1) - #TODO FIXME: Find out what the following raises a warning in r8247 + # TODO FIXME: Find out what the following raises a warning in r8247 with np.errstate(divide='ignore'): assert_((1 / array(0)).mask) assert_((1 + xm).mask) @@ -715,8 +777,9 @@ def test_assignment_by_condition_2(self): class TestUfuncs: - def setup_method(self): - self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), + + def _create_data(self): + return (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) def test_testUfuncRegression(self): @@ -745,7 +808,7 @@ def test_testUfuncRegression(self): except AttributeError: uf = getattr(fromnumeric, f) mf = getattr(np.ma, f) - args = self.d[:uf.nin] + args = self._create_data()[:uf.nin] with np.errstate(): if f in f_invalid_ignore: np.seterr(invalid='ignore') @@ -757,7 +820,7 @@ def test_testUfuncRegression(self): assert_(eqmask(ur.mask, mr.mask)) def test_reduce(self): - a = self.d[0] + a = self._create_data()[0] assert_(not alltrue(a, axis=0)) assert_(sometrue(a, axis=0)) assert_equal(sum(a[:3], axis=0), 0) @@ -781,7 +844,7 @@ def test_nonzero(self): class TestArrayMethods: - def setup_method(self): + def _create_data(self): x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, @@ -801,10 +864,10 @@ def setup_method(self): mX = array(data=X, mask=m.reshape(X.shape)) mXX = array(data=XX, mask=m.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX) + return x, X, XX, m, mx, mX, mXX def test_trace(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + _, X, _, _, _, mX, _ = self._create_data() mXdiag = mX.diagonal() assert_equal(mX.trace(), mX.diagonal().compressed().sum()) assert_(eq(mX.trace(), @@ -812,15 +875,15 @@ def test_trace(self): axis=0))) def test_clip(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + x, _, _, _, mx, _, _ = self._create_data() clipped = mx.clip(2, 8) assert_(eq(clipped.mask, mx.mask)) assert_(eq(clipped._data, x.clip(2, 8))) assert_(eq(clipped._data, mx._data.clip(2, 8))) def test_ptp(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - (n, m) = X.shape + _, X, _, m, mx, mX, _ = self._create_data() + n, m = X.shape # print(type(mx), mx.compressed()) # raise Exception() assert_equal(mx.ptp(), np.ptp(mx.compressed())) @@ -834,28 +897,28 @@ def test_ptp(self): assert_(eq(mX.ptp(1), rows)) def test_swapaxes(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + _, _, _, _, _, mX, mXX = self._create_data() mXswapped = mX.swapaxes(0, 1) assert_(eq(mXswapped[-1], mX[:, -1])) mXXswapped = mXX.swapaxes(0, 2) assert_equal(mXXswapped.shape, (2, 2, 3, 3)) def test_cumprod(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + mX = self._create_data()[5] mXcp = mX.cumprod(0) assert_(eq(mXcp._data, mX.filled(1).cumprod(0))) mXcp = mX.cumprod(1) assert_(eq(mXcp._data, mX.filled(1).cumprod(1))) def test_cumsum(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + mX = self._create_data()[5] mXcp = mX.cumsum(0) assert_(eq(mXcp._data, mX.filled(0).cumsum(0))) mXcp = mX.cumsum(1) assert_(eq(mXcp._data, mX.filled(0).cumsum(1))) def test_varstd(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + _, X, XX, _, _, mX, mXX = self._create_data() assert_(eq(mX.var(axis=None), mX.compressed().var())) assert_(eq(mX.std(axis=None), mX.compressed().std())) assert_(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape)) diff --git a/numpy/ma/tests/test_regression.py b/numpy/ma/tests/test_regression.py index f4f32cc7a98b..4e40a3f8ee75 100644 --- a/numpy/ma/tests/test_regression.py +++ b/numpy/ma/tests/test_regression.py @@ -1,7 +1,5 @@ import numpy as np -from numpy.testing import ( - assert_, assert_array_equal, assert_allclose, suppress_warnings - ) +from numpy.testing import assert_, assert_array_equal class TestRegression: @@ -17,19 +15,19 @@ def test_masked_array(self): def test_mem_masked_where(self): # Ticket #62 - from numpy.ma import masked_where, MaskType + from numpy.ma import MaskType, masked_where a = np.zeros((1, 1)) b = np.zeros(a.shape, MaskType) c = masked_where(b, a) - a-c + a - c def test_masked_array_multiply(self): # Ticket #254 a = np.ma.zeros((4, 1)) a[2, 0] = np.ma.masked b = np.zeros((4, 2)) - a*b - b*a + a * b + b * a def test_masked_array_repeat(self): # Ticket #271 @@ -59,18 +57,6 @@ def test_var_sets_maskedarray_scalar(self): a.var(out=mout) assert_(mout._data == 0) - def test_ddof_corrcoef(self): - # See gh-3336 - x = np.ma.masked_equal([1, 2, 3, 4, 5], 4) - y = np.array([2, 2.5, 3.1, 3, 5]) - # this test can be removed after deprecation. - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") - r0 = np.ma.corrcoef(x, y, ddof=0) - r1 = np.ma.corrcoef(x, y, ddof=1) - # ddof should not have an effect (it gets cancelled out) - assert_allclose(r0.data, r1.data) - def test_mask_not_backmangled(self): # See gh-10314. Test case taken from gh-3140. a = np.ma.MaskedArray([1., 2.], mask=[False, False]) @@ -87,7 +73,7 @@ def test_empty_list_on_structured(self): assert_array_equal(ma[[]], ma[:0]) def test_masked_array_tobytes_fortran(self): - ma = np.ma.arange(4).reshape((2,2)) + ma = np.ma.arange(4).reshape((2, 2)) assert_array_equal(ma.tobytes(order='F'), ma.T.tobytes()) def test_structured_array(self): diff --git a/numpy/ma/tests/test_subclassing.py b/numpy/ma/tests/test_subclassing.py index a627245ffbb3..22bece987cb7 100644 --- a/numpy/ma/tests/test_subclassing.py +++ b/numpy/ma/tests/test_subclassing.py @@ -1,19 +1,28 @@ -# pylint: disable-msg=W0611, W0612, W0511,R0201 """Tests suite for MaskedArray & subclassing. :author: Pierre Gerard-Marchant :contact: pierregm_at_uga_dot_edu -:version: $Id: test_subclassing.py 3473 2007-10-29 15:18:13Z jarrod.millman $ """ import numpy as np from numpy.lib.mixins import NDArrayOperatorsMixin -from numpy.testing import assert_, assert_raises -from numpy.ma.testutils import assert_equal from numpy.ma.core import ( - array, arange, masked, MaskedArray, masked_array, log, add, hypot, - divide, asarray, asanyarray, nomask - ) + MaskedArray, + add, + arange, + array, + asanyarray, + asarray, + divide, + hypot, + log, + masked, + masked_array, + nomask, +) +from numpy.ma.testutils import assert_equal +from numpy.testing import assert_, assert_raises + # from numpy.ma.core import ( def assert_startswith(a, b): @@ -23,7 +32,7 @@ def assert_startswith(a, b): class SubArray(np.ndarray): # Defines a generic np.ndarray subclass, that stores some metadata # in the dictionary `info`. - def __new__(cls,arr,info={}): + def __new__(cls, arr, info={}): x = np.asanyarray(arr).view(cls) x.info = info.copy() return x @@ -31,7 +40,6 @@ def __new__(cls,arr,info={}): def __array_finalize__(self, obj): super().__array_finalize__(obj) self.info = getattr(obj, 'info', {}).copy() - return def __add__(self, other): result = super().__add__(other) @@ -69,6 +77,7 @@ def _series(self): _view._sharedmask = False return _view + msubarray = MSubArray @@ -179,10 +188,10 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): class TestSubclassing: # Test suite for masked subclasses of ndarray. - def setup_method(self): + def _create_data(self): x = np.arange(5, dtype='float') mx = msubarray(x, mask=[0, 1, 0, 0, 0]) - self.data = (x, mx) + return x, mx def test_data_subclassing(self): # Tests whether the subclass is kept. @@ -196,24 +205,24 @@ def test_data_subclassing(self): def test_maskedarray_subclassing(self): # Tests subclassing MaskedArray - (x, mx) = self.data + mx = self._create_data()[1] assert_(isinstance(mx._data, subarray)) def test_masked_unary_operations(self): # Tests masked_unary_operation - (x, mx) = self.data + x, mx = self._create_data() with np.errstate(divide='ignore'): assert_(isinstance(log(mx), msubarray)) assert_equal(log(x), np.log(x)) def test_masked_binary_operations(self): # Tests masked_binary_operation - (x, mx) = self.data + x, mx = self._create_data() # Result should be a msubarray assert_(isinstance(add(mx, mx), msubarray)) assert_(isinstance(add(mx, x), msubarray)) # Result should work - assert_equal(add(mx, x), mx+x) + assert_equal(add(mx, x), mx + x) assert_(isinstance(add(mx, mx)._data, subarray)) assert_(isinstance(add.outer(mx, mx), msubarray)) assert_(isinstance(hypot(mx, mx), msubarray)) @@ -221,24 +230,24 @@ def test_masked_binary_operations(self): def test_masked_binary_operations2(self): # Tests domained_masked_binary_operation - (x, mx) = self.data + x, mx = self._create_data() xmx = masked_array(mx.data.__array__(), mask=mx.mask) assert_(isinstance(divide(mx, mx), msubarray)) assert_(isinstance(divide(mx, x), msubarray)) assert_equal(divide(mx, mx), divide(xmx, xmx)) def test_attributepropagation(self): - x = array(arange(5), mask=[0]+[1]*4) + x = array(arange(5), mask=[0] + [1] * 4) my = masked_array(subarray(x)) ym = msubarray(x) # - z = (my+1) + z = (my + 1) assert_(isinstance(z, MaskedArray)) assert_(not isinstance(z, MSubArray)) assert_(isinstance(z._data, SubArray)) assert_equal(z._data.info, {}) # - z = (ym+1) + z = (ym + 1) assert_(isinstance(z, MaskedArray)) assert_(isinstance(z, MSubArray)) assert_(isinstance(z._data, SubArray)) @@ -255,7 +264,7 @@ def test_attributepropagation(self): ym._series._set_mask([0, 0, 0, 0, 1]) assert_equal(ym._mask, [0, 0, 0, 0, 1]) # - xsub = subarray(x, info={'name':'x'}) + xsub = subarray(x, info={'name': 'x'}) mxsub = masked_array(xsub) assert_(hasattr(mxsub, 'info')) assert_equal(mxsub.info, xsub.info) @@ -264,8 +273,8 @@ def test_subclasspreservation(self): # Checks that masked_array(...,subok=True) preserves the class. x = np.arange(5) m = [0, 0, 1, 0, 0] - xinfo = [(i, j) for (i, j) in zip(x, m)] - xsub = MSubArray(x, mask=m, info={'xsub':xinfo}) + xinfo = list(zip(x, m)) + xsub = MSubArray(x, mask=m, info={'xsub': xinfo}) # mxsub = masked_array(xsub, subok=False) assert_(not isinstance(mxsub, MSubArray)) @@ -295,14 +304,14 @@ def test_subclass_items(self): # getter should return a ComplicatedSubArray, even for single item # first check we wrote ComplicatedSubArray correctly assert_(isinstance(xcsub[1], ComplicatedSubArray)) - assert_(isinstance(xcsub[1,...], ComplicatedSubArray)) + assert_(isinstance(xcsub[1, ...], ComplicatedSubArray)) assert_(isinstance(xcsub[1:4], ComplicatedSubArray)) # now that it propagates inside the MaskedArray assert_(isinstance(mxcsub[1], ComplicatedSubArray)) - assert_(isinstance(mxcsub[1,...].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub[1, ...].data, ComplicatedSubArray)) assert_(mxcsub[0] is masked) - assert_(isinstance(mxcsub[0,...].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub[0, ...].data, ComplicatedSubArray)) assert_(isinstance(mxcsub[1:4].data, ComplicatedSubArray)) # also for flattened version (which goes via MaskedIterator) @@ -329,8 +338,8 @@ def test_subclass_nomask_items(self): xcsub = ComplicatedSubArray(x) mxcsub_nomask = masked_array(xcsub) - assert_(isinstance(mxcsub_nomask[1,...].data, ComplicatedSubArray)) - assert_(isinstance(mxcsub_nomask[0,...].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub_nomask[1, ...].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub_nomask[0, ...].data, ComplicatedSubArray)) assert_(isinstance(mxcsub_nomask[1], ComplicatedSubArray)) assert_(isinstance(mxcsub_nomask[0], ComplicatedSubArray)) @@ -363,8 +372,8 @@ def test_subclass_str(self): def test_pure_subclass_info_preservation(self): # Test that ufuncs and methods conserve extra information consistently; # see gh-7122. - arr1 = SubMaskedArray('test', data=[1,2,3,4,5,6]) - arr2 = SubMaskedArray(data=[0,1,2,3,4,5]) + arr1 = SubMaskedArray('test', data=[1, 2, 3, 4, 5, 6]) + arr2 = SubMaskedArray(data=[0, 1, 2, 3, 4, 5]) diff1 = np.subtract(arr1, arr2) assert_('info' in diff1._optinfo) assert_(diff1._optinfo['info'] == 'test') @@ -418,20 +427,20 @@ def test_array_no_inheritance(): class TestClassWrapping: # Test suite for classes that wrap MaskedArrays - def setup_method(self): + def _create_data(self): m = np.ma.masked_array([1, 3, 5], mask=[False, True, False]) wm = WrappedArray(m) - self.data = (m, wm) + return m, wm def test_masked_unary_operations(self): # Tests masked_unary_operation - (m, wm) = self.data + wm = self._create_data()[1] with np.errstate(divide='ignore'): assert_(isinstance(np.log(wm), WrappedArray)) def test_masked_binary_operations(self): # Tests masked_binary_operation - (m, wm) = self.data + m, wm = self._create_data() # Result should be a WrappedArray assert_(isinstance(np.add(wm, wm), WrappedArray)) assert_(isinstance(np.add(m, wm), WrappedArray)) diff --git a/numpy/ma/testutils.py b/numpy/ma/testutils.py index c51256047c27..0df3b1757fd6 100644 --- a/numpy/ma/testutils.py +++ b/numpy/ma/testutils.py @@ -2,20 +2,23 @@ :author: Pierre Gerard-Marchant :contact: pierregm_at_uga_dot_edu -:version: $Id: testutils.py 3529 2007-11-13 08:01:14Z jarrod.millman $ """ import operator import numpy as np -from numpy import ndarray import numpy._core.umath as umath import numpy.testing -from numpy.testing import ( - assert_, assert_allclose, assert_array_almost_equal_nulp, - assert_raises, build_err_msg - ) -from .core import mask_or, getmask, masked_array, nomask, masked, filled +from numpy import ndarray +from numpy.testing import ( # noqa: F401 + assert_, + assert_allclose, + assert_array_almost_equal_nulp, + assert_raises, + build_err_msg, +) + +from .core import filled, getmask, mask_or, masked, masked_array, nomask __all__masked = [ 'almost', 'approx', 'assert_almost_equal', 'assert_array_almost_equal', @@ -29,13 +32,14 @@ # have mistakenly included them from this file. SciPy is one. That is # unfortunate, as some of these functions are not intended to work with # masked arrays. But there was no way to tell before. -from unittest import TestCase +from unittest import TestCase # noqa: F401 + __some__from_testing = [ 'TestCase', 'assert_', 'assert_allclose', 'assert_array_almost_equal_nulp', 'assert_raises' ] -__all__ = __all__masked + __some__from_testing +__all__ = __all__masked + __some__from_testing # noqa: PLE0605 def approx(a, b, fill_value=True, rtol=1e-5, atol=1e-8): @@ -91,7 +95,6 @@ def _assert_equal_on_sequences(actual, desired, err_msg=''): assert_equal(len(actual), len(desired), err_msg) for k in range(len(desired)): assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}') - return def assert_equal_records(a, b): @@ -106,7 +109,6 @@ def assert_equal_records(a, b): (af, bf) = (operator.getitem(a, f), operator.getitem(b, f)) if not (af is masked) and not (bf is masked): assert_equal(operator.getitem(a, f), operator.getitem(b, f)) - return def assert_equal(actual, desired, err_msg=''): @@ -119,7 +121,7 @@ def assert_equal(actual, desired, err_msg=''): if not isinstance(actual, dict): raise AssertionError(repr(type(actual))) assert_equal(len(actual), len(desired), err_msg) - for k, i in desired.items(): + for k in desired: if k not in actual: raise AssertionError(f"{k} not in {actual}") assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') @@ -157,7 +159,7 @@ def fail_if_equal(actual, desired, err_msg='',): if not isinstance(actual, dict): raise AssertionError(repr(type(actual))) fail_if_equal(len(actual), len(desired), err_msg) - for k, i in desired.items(): + for k in desired: if k not in actual: raise AssertionError(repr(k)) fail_if_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') diff --git a/numpy/ma/testutils.pyi b/numpy/ma/testutils.pyi new file mode 100644 index 000000000000..92b843b93a43 --- /dev/null +++ b/numpy/ma/testutils.pyi @@ -0,0 +1,69 @@ +import numpy as np +from numpy._typing import NDArray +from numpy.testing import ( + TestCase, + assert_, + assert_allclose, + assert_array_almost_equal_nulp, + assert_raises, +) +from numpy.testing._private.utils import _ComparisonFunc + +__all__ = [ + "TestCase", + "almost", + "approx", + "assert_", + "assert_allclose", + "assert_almost_equal", + "assert_array_almost_equal", + "assert_array_almost_equal_nulp", + "assert_array_approx_equal", + "assert_array_compare", + "assert_array_equal", + "assert_array_less", + "assert_close", + "assert_equal", + "assert_equal_records", + "assert_mask_equal", + "assert_not_equal", + "assert_raises", + "fail_if_array_equal", +] + +def approx( + a: object, b: object, fill_value: bool = True, rtol: float = 1e-5, atol: float = 1e-8 +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +def almost(a: object, b: object, decimal: int = 6, fill_value: bool = True) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... + +# +def assert_equal_records(a: NDArray[np.void], b: NDArray[np.void]) -> None: ... +def assert_equal(actual: object, desired: object, err_msg: str = "") -> None: ... +def fail_if_equal(actual: object, desired: object, err_msg: str = "") -> None: ... +def assert_almost_equal( + actual: object, desired: object, decimal: int = 7, err_msg: str = "", verbose: bool = True +) -> None: ... + +# +def assert_array_compare( + comparison: _ComparisonFunc, + x: object, + y: object, + err_msg: str = "", + verbose: bool = True, + header: str = "", + fill_value: bool = True, +) -> None: ... +def assert_array_equal(x: object, y: object, err_msg: str = "", verbose: bool = True) -> None: ... +def fail_if_array_equal(x: object, y: object, err_msg: str = "", verbose: bool = True) -> None: ... +def assert_array_approx_equal( + x: object, y: object, decimal: int = 6, err_msg: str = "", verbose: bool = True +) -> None: ... +def assert_array_almost_equal( + x: object, y: object, decimal: int = 6, err_msg: str = "", verbose: bool = True +) -> None: ... +def assert_array_less(x: object, y: object, err_msg: str = "", verbose: bool = True) -> None: ... +def assert_mask_equal(m1: object, m2: object, err_msg: str = "") -> None: ... + +assert_not_equal = fail_if_equal +assert_close = assert_almost_equal diff --git a/numpy/ma/timer_comparison.py b/numpy/ma/timer_comparison.py deleted file mode 100644 index 9ae4c63c8e9a..000000000000 --- a/numpy/ma/timer_comparison.py +++ /dev/null @@ -1,442 +0,0 @@ -import timeit -from functools import reduce - -import numpy as np -import numpy._core.fromnumeric as fromnumeric - -from numpy.testing import build_err_msg - - -pi = np.pi - -class ModuleTester: - def __init__(self, module): - self.module = module - self.allequal = module.allequal - self.arange = module.arange - self.array = module.array - self.concatenate = module.concatenate - self.count = module.count - self.equal = module.equal - self.filled = module.filled - self.getmask = module.getmask - self.getmaskarray = module.getmaskarray - self.id = id - self.inner = module.inner - self.make_mask = module.make_mask - self.masked = module.masked - self.masked_array = module.masked_array - self.masked_values = module.masked_values - self.mask_or = module.mask_or - self.nomask = module.nomask - self.ones = module.ones - self.outer = module.outer - self.repeat = module.repeat - self.resize = module.resize - self.sort = module.sort - self.take = module.take - self.transpose = module.transpose - self.zeros = module.zeros - self.MaskType = module.MaskType - try: - self.umath = module.umath - except AttributeError: - self.umath = module.core.umath - self.testnames = [] - - def assert_array_compare(self, comparison, x, y, err_msg='', header='', - fill_value=True): - """ - Assert that a comparison of two masked arrays is satisfied elementwise. - - """ - xf = self.filled(x) - yf = self.filled(y) - m = self.mask_or(self.getmask(x), self.getmask(y)) - - x = self.filled(self.masked_array(xf, mask=m), fill_value) - y = self.filled(self.masked_array(yf, mask=m), fill_value) - if (x.dtype.char != "O"): - x = x.astype(np.float64) - if isinstance(x, np.ndarray) and x.size > 1: - x[np.isnan(x)] = 0 - elif np.isnan(x): - x = 0 - if (y.dtype.char != "O"): - y = y.astype(np.float64) - if isinstance(y, np.ndarray) and y.size > 1: - y[np.isnan(y)] = 0 - elif np.isnan(y): - y = 0 - try: - cond = (x.shape == () or y.shape == ()) or x.shape == y.shape - if not cond: - msg = build_err_msg([x, y], - err_msg - + f'\n(shapes {x.shape}, {y.shape} mismatch)', - header=header, - names=('x', 'y')) - assert cond, msg - val = comparison(x, y) - if m is not self.nomask and fill_value: - val = self.masked_array(val, mask=m) - if isinstance(val, bool): - cond = val - reduced = [0] - else: - reduced = val.ravel() - cond = reduced.all() - reduced = reduced.tolist() - if not cond: - match = 100-100.0*reduced.count(1)/len(reduced) - msg = build_err_msg([x, y], - err_msg - + '\n(mismatch %s%%)' % (match,), - header=header, - names=('x', 'y')) - assert cond, msg - except ValueError as e: - msg = build_err_msg([x, y], err_msg, header=header, names=('x', 'y')) - raise ValueError(msg) from e - - def assert_array_equal(self, x, y, err_msg=''): - """ - Checks the elementwise equality of two masked arrays. - - """ - self.assert_array_compare(self.equal, x, y, err_msg=err_msg, - header='Arrays are not equal') - - @np.errstate(all='ignore') - def test_0(self): - """ - Tests creation - - """ - x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) - m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - xm = self.masked_array(x, mask=m) - xm[0] - - @np.errstate(all='ignore') - def test_1(self): - """ - Tests creation - - """ - x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) - y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] - xm = self.masked_array(x, mask=m1) - ym = self.masked_array(y, mask=m2) - xf = np.where(m1, 1.e+20, x) - xm.set_fill_value(1.e+20) - - assert((xm-ym).filled(0).any()) - s = x.shape - assert(xm.size == reduce(lambda x, y:x*y, s)) - assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) - - for s in [(4, 3), (6, 2)]: - x.shape = s - y.shape = s - xm.shape = s - ym.shape = s - xf.shape = s - assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) - - @np.errstate(all='ignore') - def test_2(self): - """ - Tests conversions and indexing. - - """ - x1 = np.array([1, 2, 4, 3]) - x2 = self.array(x1, mask=[1, 0, 0, 0]) - x3 = self.array(x1, mask=[0, 1, 0, 1]) - x4 = self.array(x1) - # test conversion to strings, no errors - str(x2) - repr(x2) - # tests of indexing - assert type(x2[1]) is type(x1[1]) - assert x1[1] == x2[1] - x1[2] = 9 - x2[2] = 9 - self.assert_array_equal(x1, x2) - x1[1:3] = 99 - x2[1:3] = 99 - x2[1] = self.masked - x2[1:3] = self.masked - x2[:] = x1 - x2[1] = self.masked - x3[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0]) - x4[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0]) - x1 = np.arange(5)*1.0 - x2 = self.masked_values(x1, 3.0) - x1 = self.array([1, 'hello', 2, 3], object) - x2 = np.array([1, 'hello', 2, 3], object) - # check that no error occurs. - x1[1] - x2[1] - assert x1[1:1].shape == (0,) - # Tests copy-size - n = [0, 0, 1, 0, 0] - m = self.make_mask(n) - m2 = self.make_mask(m) - assert(m is m2) - m3 = self.make_mask(m, copy=1) - assert(m is not m3) - - @np.errstate(all='ignore') - def test_3(self): - """ - Tests resize/repeat - - """ - x4 = self.arange(4) - x4[2] = self.masked - y4 = self.resize(x4, (8,)) - assert self.allequal(self.concatenate([x4, x4]), y4) - assert self.allequal(self.getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) - y5 = self.repeat(x4, (2, 2, 2, 2), axis=0) - self.assert_array_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) - y6 = self.repeat(x4, 2, axis=0) - assert self.allequal(y5, y6) - y7 = x4.repeat((2, 2, 2, 2), axis=0) - assert self.allequal(y5, y7) - y8 = x4.repeat(2, 0) - assert self.allequal(y5, y8) - - @np.errstate(all='ignore') - def test_4(self): - """ - Test of take, transpose, inner, outer products. - - """ - x = self.arange(24) - y = np.arange(24) - x[5:6] = self.masked - x = x.reshape(2, 3, 4) - y = y.reshape(2, 3, 4) - assert self.allequal(np.transpose(y, (2, 0, 1)), self.transpose(x, (2, 0, 1))) - assert self.allequal(np.take(y, (2, 0, 1), 1), self.take(x, (2, 0, 1), 1)) - assert self.allequal(np.inner(self.filled(x, 0), self.filled(y, 0)), - self.inner(x, y)) - assert self.allequal(np.outer(self.filled(x, 0), self.filled(y, 0)), - self.outer(x, y)) - y = self.array(['abc', 1, 'def', 2, 3], object) - y[2] = self.masked - t = self.take(y, [0, 3, 4]) - assert t[0] == 'abc' - assert t[1] == 2 - assert t[2] == 3 - - @np.errstate(all='ignore') - def test_5(self): - """ - Tests inplace w/ scalar - - """ - x = self.arange(10) - y = self.arange(10) - xm = self.arange(10) - xm[2] = self.masked - x += 1 - assert self.allequal(x, y+1) - xm += 1 - assert self.allequal(xm, y+1) - - x = self.arange(10) - xm = self.arange(10) - xm[2] = self.masked - x -= 1 - assert self.allequal(x, y-1) - xm -= 1 - assert self.allequal(xm, y-1) - - x = self.arange(10)*1.0 - xm = self.arange(10)*1.0 - xm[2] = self.masked - x *= 2.0 - assert self.allequal(x, y*2) - xm *= 2.0 - assert self.allequal(xm, y*2) - - x = self.arange(10)*2 - xm = self.arange(10)*2 - xm[2] = self.masked - x /= 2 - assert self.allequal(x, y) - xm /= 2 - assert self.allequal(xm, y) - - x = self.arange(10)*1.0 - xm = self.arange(10)*1.0 - xm[2] = self.masked - x /= 2.0 - assert self.allequal(x, y/2.0) - xm /= self.arange(10) - self.assert_array_equal(xm, self.ones((10,))) - - x = self.arange(10).astype(np.float64) - xm = self.arange(10) - xm[2] = self.masked - x += 1. - assert self.allequal(x, y + 1.) - - @np.errstate(all='ignore') - def test_6(self): - """ - Tests inplace w/ array - - """ - x = self.arange(10, dtype=np.float64) - y = self.arange(10) - xm = self.arange(10, dtype=np.float64) - xm[2] = self.masked - m = xm.mask - a = self.arange(10, dtype=np.float64) - a[-1] = self.masked - x += a - xm += a - assert self.allequal(x, y+a) - assert self.allequal(xm, y+a) - assert self.allequal(xm.mask, self.mask_or(m, a.mask)) - - x = self.arange(10, dtype=np.float64) - xm = self.arange(10, dtype=np.float64) - xm[2] = self.masked - m = xm.mask - a = self.arange(10, dtype=np.float64) - a[-1] = self.masked - x -= a - xm -= a - assert self.allequal(x, y-a) - assert self.allequal(xm, y-a) - assert self.allequal(xm.mask, self.mask_or(m, a.mask)) - - x = self.arange(10, dtype=np.float64) - xm = self.arange(10, dtype=np.float64) - xm[2] = self.masked - m = xm.mask - a = self.arange(10, dtype=np.float64) - a[-1] = self.masked - x *= a - xm *= a - assert self.allequal(x, y*a) - assert self.allequal(xm, y*a) - assert self.allequal(xm.mask, self.mask_or(m, a.mask)) - - x = self.arange(10, dtype=np.float64) - xm = self.arange(10, dtype=np.float64) - xm[2] = self.masked - m = xm.mask - a = self.arange(10, dtype=np.float64) - a[-1] = self.masked - x /= a - xm /= a - - @np.errstate(all='ignore') - def test_7(self): - "Tests ufunc" - d = (self.array([1.0, 0, -1, pi/2]*2, mask=[0, 1]+[0]*6), - self.array([1.0, 0, -1, pi/2]*2, mask=[1, 0]+[0]*6),) - for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', -# 'sin', 'cos', 'tan', -# 'arcsin', 'arccos', 'arctan', -# 'sinh', 'cosh', 'tanh', -# 'arcsinh', -# 'arccosh', -# 'arctanh', -# 'absolute', 'fabs', 'negative', -# # 'nonzero', 'around', -# 'floor', 'ceil', -# # 'sometrue', 'alltrue', -# 'logical_not', -# 'add', 'subtract', 'multiply', -# 'divide', 'true_divide', 'floor_divide', -# 'remainder', 'fmod', 'hypot', 'arctan2', -# 'equal', 'not_equal', 'less_equal', 'greater_equal', -# 'less', 'greater', -# 'logical_and', 'logical_or', 'logical_xor', - ]: - try: - uf = getattr(self.umath, f) - except AttributeError: - uf = getattr(fromnumeric, f) - mf = getattr(self.module, f) - args = d[:uf.nin] - ur = uf(*args) - mr = mf(*args) - self.assert_array_equal(ur.filled(0), mr.filled(0), f) - self.assert_array_equal(ur._mask, mr._mask) - - @np.errstate(all='ignore') - def test_99(self): - # test average - ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) - self.assert_array_equal(2.0, self.average(ott, axis=0)) - self.assert_array_equal(2.0, self.average(ott, weights=[1., 1., 2., 1.])) - result, wts = self.average(ott, weights=[1., 1., 2., 1.], returned=1) - self.assert_array_equal(2.0, result) - assert(wts == 4.0) - ott[:] = self.masked - assert(self.average(ott, axis=0) is self.masked) - ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) - ott = ott.reshape(2, 2) - ott[:, 1] = self.masked - self.assert_array_equal(self.average(ott, axis=0), [2.0, 0.0]) - assert(self.average(ott, axis=1)[0] is self.masked) - self.assert_array_equal([2., 0.], self.average(ott, axis=0)) - result, wts = self.average(ott, axis=0, returned=1) - self.assert_array_equal(wts, [1., 0.]) - w1 = [0, 1, 1, 1, 1, 0] - w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] - x = self.arange(6) - self.assert_array_equal(self.average(x, axis=0), 2.5) - self.assert_array_equal(self.average(x, axis=0, weights=w1), 2.5) - y = self.array([self.arange(6), 2.0*self.arange(6)]) - self.assert_array_equal(self.average(y, None), np.add.reduce(np.arange(6))*3./12.) - self.assert_array_equal(self.average(y, axis=0), np.arange(6) * 3./2.) - self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0]) - self.assert_array_equal(self.average(y, None, weights=w2), 20./6.) - self.assert_array_equal(self.average(y, axis=0, weights=w2), [0., 1., 2., 3., 4., 10.]) - self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0]) - m1 = self.zeros(6) - m2 = [0, 0, 1, 1, 0, 0] - m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] - m4 = self.ones(6) - m5 = [0, 1, 1, 1, 1, 1] - self.assert_array_equal(self.average(self.masked_array(x, m1), axis=0), 2.5) - self.assert_array_equal(self.average(self.masked_array(x, m2), axis=0), 2.5) - self.assert_array_equal(self.average(self.masked_array(x, m5), axis=0), 0.0) - self.assert_array_equal(self.count(self.average(self.masked_array(x, m4), axis=0)), 0) - z = self.masked_array(y, m3) - self.assert_array_equal(self.average(z, None), 20./6.) - self.assert_array_equal(self.average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]) - self.assert_array_equal(self.average(z, axis=1), [2.5, 5.0]) - self.assert_array_equal(self.average(z, axis=0, weights=w2), [0., 1., 99., 99., 4.0, 10.0]) - - @np.errstate(all='ignore') - def test_A(self): - x = self.arange(24) - x[5:6] = self.masked - x = x.reshape(2, 3, 4) - - -if __name__ == '__main__': - setup_base = ("from __main__ import ModuleTester \n" - "import numpy\n" - "tester = ModuleTester(module)\n") - setup_cur = "import numpy.ma.core as module\n" + setup_base - (nrepeat, nloop) = (10, 10) - - for i in range(1, 8): - func = 'tester.test_%i()' % i - cur = timeit.Timer(func, setup_cur).repeat(nrepeat, nloop*10) - cur = np.sort(cur) - print("#%i" % i + 50*'.') - print(eval("ModuleTester.test_%i.__doc__" % i)) - print(f'core_current : {cur[0]:.3f} - {cur[1]:.3f}') diff --git a/numpy/matlib.py b/numpy/matlib.py index 95f573ab7400..a19a4ed57e21 100644 --- a/numpy/matlib.py +++ b/numpy/matlib.py @@ -10,16 +10,17 @@ PendingDeprecationWarning, stacklevel=2) import numpy as np -from numpy.matrixlib.defmatrix import matrix, asmatrix + # Matlib.py contains all functions in the numpy namespace with a few # replacements. See doc/source/reference/routines.matlib.rst for details. # Need * as we're copying the numpy namespace. -from numpy import * # noqa: F403 +from numpy import * +from numpy.matrixlib.defmatrix import asmatrix, matrix __version__ = np.__version__ -__all__ = np.__all__[:] # copy numpy namespace -__all__ += ['rand', 'randn', 'repmat'] +__all__ = ['rand', 'randn', 'repmat'] +__all__ += np.__all__ def empty(shape, dtype=None, order='C'): """Return a new matrix of given shape and type, without initializing entries. @@ -55,7 +56,7 @@ def empty(shape, dtype=None, order='C'): >>> np.matlib.empty((2, 2)) # filled with random data matrix([[ 6.76425276e-320, 9.79033856e-307], # random [ 7.39337286e-309, 3.22135945e-309]]) - >>> np.matlib.empty((2, 2), dtype=int) + >>> np.matlib.empty((2, 2), dtype=np.int_) matrix([[ 6600475, 0], # random [ 6586976, 22740995]]) @@ -151,7 +152,7 @@ def zeros(shape, dtype=None, order='C'): a.fill(0) return a -def identity(n,dtype=None): +def identity(n, dtype=None): """ Returns the square identity matrix of given size. @@ -176,18 +177,18 @@ def identity(n,dtype=None): Examples -------- >>> import numpy.matlib - >>> np.matlib.identity(3, dtype=int) + >>> np.matlib.identity(3, dtype=np.int_) matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) """ - a = array([1]+n*[0], dtype=dtype) + a = array([1] + n * [0], dtype=dtype) b = empty((n, n), dtype=dtype) b.flat = a return b -def eye(n,M=None, k=0, dtype=float, order='C'): +def eye(n, M=None, k=0, dtype=float, order='C'): """ Return a matrix with ones on the diagonal and zeros elsewhere. @@ -207,8 +208,6 @@ def eye(n,M=None, k=0, dtype=float, order='C'): Whether the output should be stored in row-major (C-style) or column-major (Fortran-style) order in memory. - .. versionadded:: 1.14.0 - Returns ------- I : matrix @@ -223,7 +222,7 @@ def eye(n,M=None, k=0, dtype=float, order='C'): Examples -------- >>> import numpy.matlib - >>> np.matlib.eye(3, k=1, dtype=float) + >>> np.matlib.eye(3, k=1, dtype=np.float64) matrix([[0., 1., 0.], [0., 0., 1.], [0., 0., 0.]]) diff --git a/numpy/matlib.pyi b/numpy/matlib.pyi new file mode 100644 index 000000000000..0904c2744015 --- /dev/null +++ b/numpy/matlib.pyi @@ -0,0 +1,580 @@ +from typing import Any, Literal, overload + +import numpy as np +import numpy.typing as npt +from numpy import ( # type: ignore[deprecated] # noqa: F401 + False_, + ScalarType, + True_, + __array_namespace_info__, + __version__, + abs, + absolute, + acos, + acosh, + add, + all, + allclose, + amax, + amin, + angle, + any, + append, + apply_along_axis, + apply_over_axes, + arange, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + argmax, + argmin, + argpartition, + argsort, + argwhere, + around, + array, + array2string, + array_equal, + array_equiv, + array_repr, + array_split, + array_str, + asanyarray, + asarray, + asarray_chkfinite, + ascontiguousarray, + asfortranarray, + asin, + asinh, + asmatrix, + astype, + atan, + atan2, + atanh, + atleast_1d, + atleast_2d, + atleast_3d, + average, + bartlett, + base_repr, + binary_repr, + bincount, + bitwise_and, + bitwise_count, + bitwise_invert, + bitwise_left_shift, + bitwise_not, + bitwise_or, + bitwise_right_shift, + bitwise_xor, + blackman, + block, + bmat, + bool, + bool_, + broadcast, + broadcast_arrays, + broadcast_shapes, + broadcast_to, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + c_, + can_cast, + cbrt, + cdouble, + ceil, + char, + character, + choose, + clip, + clongdouble, + column_stack, + common_type, + complex64, + complex128, + complex192, + complex256, + complexfloating, + compress, + concat, + concatenate, + conj, + conjugate, + convolve, + copy, + copysign, + copyto, + core, + corrcoef, + correlate, + cos, + cosh, + count_nonzero, + cov, + cross, + csingle, + ctypeslib, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + datetime64, + datetime_as_string, + datetime_data, + deg2rad, + degrees, + delete, + diag, + diag_indices, + diag_indices_from, + diagflat, + diagonal, + diff, + digitize, + divide, + divmod, + dot, + double, + dsplit, + dstack, + dtype, + dtypes, + e, + ediff1d, + einsum, + einsum_path, + emath, + empty_like, + equal, + errstate, + euler_gamma, + exceptions, + exp, + exp2, + expand_dims, + expm1, + extract, + f2py, + fabs, + fft, + fill_diagonal, + finfo, + fix, + flatiter, + flatnonzero, + flexible, + flip, + fliplr, + flipud, + float16, + float32, + float64, + float96, + float128, + float_power, + floating, + floor, + floor_divide, + fmax, + fmin, + fmod, + format_float_positional, + format_float_scientific, + frexp, + from_dlpack, + frombuffer, + fromfile, + fromfunction, + fromiter, + frompyfunc, + fromregex, + fromstring, + full, + full_like, + gcd, + generic, + genfromtxt, + geomspace, + get_include, + get_printoptions, + getbufsize, + geterr, + geterrcall, + gradient, + greater, + greater_equal, + half, + hamming, + hanning, + heaviside, + histogram, + histogram2d, + histogram_bin_edges, + histogramdd, + hsplit, + hstack, + hypot, + i0, + iinfo, + imag, + index_exp, + indices, + inexact, + inf, + info, + inner, + insert, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + interp, + intersect1d, + intp, + invert, + is_busday, + isclose, + iscomplex, + iscomplexobj, + isdtype, + isfinite, + isfortran, + isin, + isinf, + isnan, + isnat, + isneginf, + isposinf, + isreal, + isrealobj, + isscalar, + issubdtype, + iterable, + ix_, + kaiser, + kron, + lcm, + ldexp, + left_shift, + less, + less_equal, + lexsort, + lib, + linalg, + linspace, + little_endian, + load, + loadtxt, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + logspace, + long, + longdouble, + longlong, + ma, + mask_indices, + matmul, + matrix, + matrix_transpose, + matvec, + max, + maximum, + may_share_memory, + mean, + median, + memmap, + meshgrid, + mgrid, + min, + min_scalar_type, + minimum, + mintypecode, + mod, + modf, + moveaxis, + multiply, + nan, + nan_to_num, + nanargmax, + nanargmin, + nancumprod, + nancumsum, + nanmax, + nanmean, + nanmedian, + nanmin, + nanpercentile, + nanprod, + nanquantile, + nanstd, + nansum, + nanvar, + ndarray, + ndenumerate, + ndim, + ndindex, + nditer, + negative, + nested_iters, + newaxis, + nextafter, + nonzero, + not_equal, + number, + object_, + ogrid, + ones_like, + outer, + packbits, + pad, + partition, + percentile, + permute_dims, + pi, + piecewise, + place, + poly, + poly1d, + polyadd, + polyder, + polydiv, + polyfit, + polyint, + polymul, + polynomial, + polysub, + polyval, + positive, + pow, + power, + printoptions, + prod, + promote_types, + ptp, + put, + put_along_axis, + putmask, + quantile, + r_, + rad2deg, + radians, + random, + ravel, + ravel_multi_index, + real, + real_if_close, + rec, + recarray, + reciprocal, + record, + remainder, + repeat, + require, + reshape, + resize, + result_type, + right_shift, + rint, + roll, + rollaxis, + roots, + rot90, + round, + s_, + save, + savetxt, + savez, + savez_compressed, + sctypeDict, + searchsorted, + select, + set_printoptions, + setbufsize, + setdiff1d, + seterr, + seterrcall, + setxor1d, + shape, + shares_memory, + short, + show_config, + show_runtime, + sign, + signbit, + signedinteger, + sin, + sinc, + single, + sinh, + size, + sort, + sort_complex, + spacing, + split, + sqrt, + square, + squeeze, + stack, + std, + str_, + strings, + subtract, + sum, + swapaxes, + take, + take_along_axis, + tan, + tanh, + tensordot, + test, + testing, + tile, + timedelta64, + trace, + transpose, + trapezoid, + tri, + tril, + tril_indices, + tril_indices_from, + trim_zeros, + triu, + triu_indices, + triu_indices_from, + true_divide, + trunc, + typecodes, + typename, + typing, + ubyte, + ufunc, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + union1d, + unique, + unique_all, + unique_counts, + unique_inverse, + unique_values, + unpackbits, + unravel_index, + unsignedinteger, + unstack, + unwrap, + ushort, + vander, + var, + vdot, + vecdot, + vecmat, + vectorize, + void, + vsplit, + vstack, + where, + zeros_like, +) +from numpy._typing import _ArrayLike, _DTypeLike + +__all__ = ["rand", "randn", "repmat"] +__all__ += np.__all__ + +### + +type _Matrix[ScalarT: np.generic] = np.matrix[tuple[int, int], np.dtype[ScalarT]] +type _Order = Literal["C", "F"] + +### + +# +@overload +def empty(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... +@overload +def empty[ScalarT: np.generic](shape: int | tuple[int, int], dtype: _DTypeLike[ScalarT], order: _Order = "C") -> _Matrix[ScalarT]: ... +@overload +def empty(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def ones(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... +@overload +def ones[ScalarT: np.generic](shape: int | tuple[int, int], dtype: _DTypeLike[ScalarT], order: _Order = "C") -> _Matrix[ScalarT]: ... +@overload +def ones(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def zeros(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... +@overload +def zeros[ScalarT: np.generic](shape: int | tuple[int, int], dtype: _DTypeLike[ScalarT], order: _Order = "C") -> _Matrix[ScalarT]: ... +@overload +def zeros(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def identity(n: int, dtype: None = None) -> _Matrix[np.float64]: ... +@overload +def identity[ScalarT: np.generic](n: int, dtype: _DTypeLike[ScalarT]) -> _Matrix[ScalarT]: ... +@overload +def identity(n: int, dtype: npt.DTypeLike | None = None) -> _Matrix[Any]: ... + +# +@overload +def eye( + n: int, + M: int | None = None, + k: int = 0, + dtype: type[np.float64] | None = ..., + order: _Order = "C", +) -> _Matrix[np.float64]: ... +@overload +def eye[ScalarT: np.generic](n: int, M: int | None, k: int, dtype: _DTypeLike[ScalarT], order: _Order = "C") -> _Matrix[ScalarT]: ... +@overload +def eye[ScalarT: np.generic](n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[ScalarT], order: _Order = "C") -> _Matrix[ScalarT]: ... +@overload +def eye(n: int, M: int | None = None, k: int = 0, dtype: npt.DTypeLike | None = ..., order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def rand(arg: int | tuple[()] | tuple[int] | tuple[int, int], /) -> _Matrix[np.float64]: ... +@overload +def rand(arg: int, /, *args: int) -> _Matrix[np.float64]: ... + +# +@overload +def randn(arg: int | tuple[()] | tuple[int] | tuple[int, int], /) -> _Matrix[np.float64]: ... +@overload +def randn(arg: int, /, *args: int) -> _Matrix[np.float64]: ... + +# +@overload +def repmat[ScalarT: np.generic](a: _Matrix[ScalarT], m: int, n: int) -> _Matrix[ScalarT]: ... +@overload +def repmat[ScalarT: np.generic](a: _ArrayLike[ScalarT], m: int, n: int) -> npt.NDArray[ScalarT]: ... +@overload +def repmat(a: npt.ArrayLike, m: int, n: int) -> npt.NDArray[Any]: ... diff --git a/numpy/matrixlib/__init__.py b/numpy/matrixlib/__init__.py index 8a7597d30387..1ff5cb58cc96 100644 --- a/numpy/matrixlib/__init__.py +++ b/numpy/matrixlib/__init__.py @@ -7,5 +7,6 @@ __all__ = defmatrix.__all__ from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/numpy/matrixlib/__init__.pyi b/numpy/matrixlib/__init__.pyi index a7efab5844af..ad4091d98d06 100644 --- a/numpy/matrixlib/__init__.pyi +++ b/numpy/matrixlib/__init__.pyi @@ -1,14 +1,3 @@ -from numpy._pytesttester import PytestTester +from .defmatrix import asmatrix, bmat, matrix -from numpy import ( - matrix as matrix, -) - -from numpy.matrixlib.defmatrix import ( - bmat as bmat, - mat as mat, - asmatrix as asmatrix, -) - -__all__: list[str] -test: PytestTester +__all__ = ["matrix", "bmat", "asmatrix"] diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py index 866f867c8eaa..2e63d50cb3a6 100644 --- a/numpy/matrixlib/defmatrix.py +++ b/numpy/matrixlib/defmatrix.py @@ -1,12 +1,13 @@ __all__ = ['matrix', 'bmat', 'asmatrix'] +import ast import sys import warnings -import ast -from .._utils import set_module import numpy._core.numeric as N from numpy._core.numeric import concatenate, isscalar +from numpy._utils import set_module + # While not in __all__, matrix_power used to be defined here, so we import # it for backward compatibility. from numpy.linalg import matrix_power @@ -18,8 +19,7 @@ def _convert_from_string(data): rows = data.split(';') newdata = [] - count = 0 - for row in rows: + for count, row in enumerate(rows): trow = row.split(',') newrow = [] for col in trow: @@ -29,7 +29,6 @@ def _convert_from_string(data): Ncols = len(newrow) elif len(newrow) != Ncols: raise ValueError("Rows not the same size.") - count += 1 newdata.append(newrow) return newdata @@ -56,6 +55,7 @@ def asmatrix(data, dtype=None): Examples -------- + >>> import numpy as np >>> x = np.array([[1, 2], [3, 4]]) >>> m = np.asmatrix(x) @@ -103,6 +103,7 @@ class matrix(N.ndarray): Examples -------- + >>> import numpy as np >>> a = np.matrix('1 2; 3 4') >>> a matrix([[1, 2], @@ -114,7 +115,8 @@ class matrix(N.ndarray): """ __array_priority__ = 10.0 - def __new__(subtype, data, dtype=None, copy=True): + + def __new__(cls, data, dtype=None, copy=True): warnings.warn('the matrix subclass is not the recommended way to ' 'represent matrices or deal with linear algebra (see ' 'https://docs.scipy.org/doc/numpy/user/' @@ -134,11 +136,13 @@ def __new__(subtype, data, dtype=None, copy=True): intype = data.dtype else: intype = N.dtype(dtype) - new = data.view(subtype) + new = data.view(cls) if intype != data.dtype: return new.astype(intype) - if copy: return new.copy() - else: return new + if copy: + return new.copy() + else: + return new if isinstance(data, str): data = _convert_from_string(data) @@ -162,31 +166,30 @@ def __new__(subtype, data, dtype=None, copy=True): if not (order or arr.flags.contiguous): arr = arr.copy() - ret = N.ndarray.__new__(subtype, shape, arr.dtype, - buffer=arr, - order=order) + ret = N.ndarray.__new__(cls, shape, arr.dtype, buffer=arr, order=order) return ret def __array_finalize__(self, obj): self._getitem = False - if (isinstance(obj, matrix) and obj._getitem): return + if (isinstance(obj, matrix) and obj._getitem): + return ndim = self.ndim if (ndim == 2): return if (ndim > 2): - newshape = tuple([x for x in self.shape if x > 1]) + newshape = tuple(x for x in self.shape if x > 1) ndim = len(newshape) if ndim == 2: - self.shape = newshape + self._set_shape(newshape) return elif (ndim > 2): raise ValueError("shape too large to be a matrix.") else: newshape = self.shape if ndim == 0: - self.shape = (1, 1) + self._set_shape((1, 1)) elif ndim == 1: - self.shape = (1, newshape[0]) + self._set_shape((1, newshape[0])) return def __getitem__(self, index): @@ -210,16 +213,16 @@ def __getitem__(self, index): except Exception: n = 0 if n > 1 and isscalar(index[1]): - out.shape = (sh, 1) + out = out.reshape((sh, 1)) else: - out.shape = (1, sh) + out = out.reshape((1, sh)) return out def __mul__(self, other): - if isinstance(other, (N.ndarray, list, tuple)) : + if isinstance(other, (N.ndarray, list, tuple)): # This promotes 1-D vectors to row vectors return N.dot(self, asmatrix(other)) - if isscalar(other) or not hasattr(other, '__rmul__') : + if isscalar(other) or not hasattr(other, '__rmul__'): return N.dot(self, other) return NotImplemented @@ -246,9 +249,9 @@ def _align(self, axis): """ if axis is None: return self[0, 0] - elif axis==0: + elif axis == 0: return self - elif axis==1: + elif axis == 1: return self.transpose() else: raise ValueError("unsupported axis") @@ -310,18 +313,17 @@ def sum(self, axis=None, dtype=None, out=None): >>> x.sum(axis=1) matrix([[3], [7]]) - >>> x.sum(axis=1, dtype='float') + >>> x.sum(axis=1, dtype=np.float64) matrix([[3.], [7.]]) - >>> out = np.zeros((2, 1), dtype='float') - >>> x.sum(axis=1, dtype='float', out=np.asmatrix(out)) + >>> out = np.zeros((2, 1), dtype=np.float64) + >>> x.sum(axis=1, dtype=np.float64, out=np.asmatrix(out)) matrix([[3.], [7.]]) """ return N.ndarray.sum(self, axis, dtype, out, keepdims=True)._collapse(axis) - # To update docstring from array to matrix... def squeeze(self, axis=None): """ @@ -374,7 +376,6 @@ def squeeze(self, axis=None): """ return N.ndarray.squeeze(self, axis=axis) - # To update docstring from array to matrix... def flatten(self, order='C'): """ @@ -479,7 +480,8 @@ def std(self, axis=None, dtype=None, out=None, ddof=0): [ 1.11803399]]) """ - return N.ndarray.std(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis) + return N.ndarray.std(self, axis, dtype, out, ddof, + keepdims=True)._collapse(axis) def var(self, axis=None, dtype=None, out=None, ddof=0): """ @@ -513,7 +515,8 @@ def var(self, axis=None, dtype=None, out=None, ddof=0): [1.25]]) """ - return N.ndarray.var(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis) + return N.ndarray.var(self, axis, dtype, out, ddof, + keepdims=True)._collapse(axis) def prod(self, axis=None, dtype=None, out=None): """ @@ -793,7 +796,7 @@ def ptp(self, axis=None, out=None): return N.ptp(self, axis, out)._align(axis) @property - def I(self): + def I(self): # noqa: E743 """ Returns the (multiplicative) inverse of invertible `self`. @@ -896,7 +899,6 @@ def A1(self): """ return self.__array__().ravel() - def ravel(self, order='C'): """ Return a flattened matrix. @@ -1065,6 +1067,7 @@ def bmat(obj, ldict=None, gdict=None): Examples -------- + >>> import numpy as np >>> A = np.asmatrix('1 1; 1 1') >>> B = np.asmatrix('2 2; 2 2') >>> C = np.asmatrix('3 4; 5 6') diff --git a/numpy/matrixlib/defmatrix.pyi b/numpy/matrixlib/defmatrix.pyi index 9d0d1ee50b66..5ec4b6687755 100644 --- a/numpy/matrixlib/defmatrix.pyi +++ b/numpy/matrixlib/defmatrix.pyi @@ -1,16 +1,211 @@ -from collections.abc import Sequence, Mapping -from typing import Any -from numpy import matrix as matrix -from numpy._typing import ArrayLike, DTypeLike, NDArray +from _typeshed import Incomplete +from collections.abc import Mapping, Sequence +from types import EllipsisType +from typing import Any, ClassVar, Literal as L, Self, SupportsIndex, overload +from typing_extensions import TypeVar -__all__: list[str] +import numpy as np +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _AnyShape, + _ArrayLikeInt_co, + _NestedSequence, + _ShapeLike, +) + +__all__ = ["asmatrix", "bmat", "matrix"] + +_ShapeT_co = TypeVar("_ShapeT_co", bound=_2D, default=_2D, covariant=True) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) + +type _2D = tuple[int, int] +type _Matrix[ScalarT: np.generic] = matrix[_2D, np.dtype[ScalarT]] +type _ToIndex1 = slice | EllipsisType | NDArray[np.integer | np.bool] | _NestedSequence[int] | None +type _ToIndex2 = tuple[_ToIndex1, _ToIndex1 | SupportsIndex] | tuple[_ToIndex1 | SupportsIndex, _ToIndex1] + +### + +class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): + __array_priority__: ClassVar[float] = 10.0 # pyright: ignore[reportIncompatibleMethodOverride] + + def __new__(cls, data: ArrayLike, dtype: DTypeLike | None = None, copy: bool = True) -> _Matrix[Incomplete]: ... + + # + @overload # type: ignore[override] + def __getitem__( + self, key: SupportsIndex | _ArrayLikeInt_co | tuple[SupportsIndex | _ArrayLikeInt_co, ...], / + ) -> Incomplete: ... + @overload + def __getitem__(self, key: _ToIndex1 | _ToIndex2, /) -> matrix[_2D, _DTypeT_co]: ... + @overload + def __getitem__(self: _Matrix[np.void], key: str, /) -> _Matrix[Incomplete]: ... + @overload + def __getitem__(self: _Matrix[np.void], key: list[str], /) -> matrix[_2D, _DTypeT_co]: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # + def __mul__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __rmul__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + + # + def __pow__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __rpow__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] + + # keep in sync with `prod` and `mean` + @overload # type: ignore[override] + def sum(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... + @overload + def sum(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... + @overload + def sum[OutT: np.ndarray](self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: OutT) -> OutT: ... + @overload + def sum[OutT: np.ndarray](self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `sum` and `mean` + @overload # type: ignore[override] + def prod(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... + @overload + def prod(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... + @overload + def prod[OutT: np.ndarray](self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: OutT) -> OutT: ... + @overload + def prod[OutT: np.ndarray](self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `sum` and `prod` + @overload # type: ignore[override] + def mean(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... + @overload + def mean(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... + @overload + def mean[OutT: np.ndarray](self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: OutT) -> OutT: ... + @overload + def mean[OutT: np.ndarray](self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `var` + @overload # type: ignore[override] + def std(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ... + @overload + def std(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> _Matrix[Incomplete]: ... + @overload + def std[OutT: np.ndarray](self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: OutT, ddof: float = 0) -> OutT: ... + @overload + def std[OutT: np.ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: OutT, ddof: float = 0 + ) -> OutT: ... + + # keep in sync with `std` + @overload # type: ignore[override] + def var(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ... + @overload + def var(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> _Matrix[Incomplete]: ... + @overload + def var[OutT: np.ndarray](self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: OutT, ddof: float = 0) -> OutT: ... + @overload + def var[OutT: np.ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: OutT, ddof: float = 0 + ) -> OutT: ... + + # keep in sync with `all` + @overload # type: ignore[override] + def any(self, axis: None = None, out: None = None) -> np.bool: ... + @overload + def any(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.bool]: ... + @overload + def any[OutT: np.ndarray](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... + @overload + def any[OutT: np.ndarray](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `any` + @overload # type: ignore[override] + def all(self, axis: None = None, out: None = None) -> np.bool: ... + @overload + def all(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.bool]: ... + @overload + def all[OutT: np.ndarray](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... + @overload + def all[OutT: np.ndarray](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `min` and `ptp` + @overload # type: ignore[override] + def max[ScalarT: np.generic](self: NDArray[ScalarT], axis: None = None, out: None = None) -> ScalarT: ... + @overload + def max(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... + @overload + def max[OutT: np.ndarray](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... + @overload + def max[OutT: np.ndarray](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `max` and `ptp` + @overload # type: ignore[override] + def min[ScalarT: np.generic](self: NDArray[ScalarT], axis: None = None, out: None = None) -> ScalarT: ... + @overload + def min(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... + @overload + def min[OutT: np.ndarray](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... + @overload + def min[OutT: np.ndarray](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `max` and `min` + @overload + def ptp[ScalarT: np.generic](self: NDArray[ScalarT], axis: None = None, out: None = None) -> ScalarT: ... + @overload + def ptp(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... + @overload + def ptp[OutT: np.ndarray](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... + @overload + def ptp[OutT: np.ndarray](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `argmin` + @overload # type: ignore[override] + def argmax[ScalarT: np.generic](self: NDArray[ScalarT], axis: None = None, out: None = None) -> np.intp: ... + @overload + def argmax(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.intp]: ... + @overload + def argmax[OutT: NDArray[np.integer | np.bool]](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... + @overload + def argmax[OutT: NDArray[np.integer | np.bool]](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `argmax` + @overload # type: ignore[override] + def argmin[ScalarT: np.generic](self: NDArray[ScalarT], axis: None = None, out: None = None) -> np.intp: ... + @overload + def argmin(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.intp]: ... + @overload + def argmin[OutT: NDArray[np.integer | np.bool]](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... + @overload + def argmin[OutT: NDArray[np.integer | np.bool]](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # the second overload handles the (rare) case that the matrix is not 2-d + @overload + def tolist[T](self: _Matrix[np.generic[T]]) -> list[list[T]]: ... # pyright: ignore[reportIncompatibleMethodOverride] + @overload + def tolist(self) -> Incomplete: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # these three methods will at least return a `2-d` array of shape (1, n) + def squeeze(self, /, axis: _ShapeLike | None = None) -> matrix[_2D, _DTypeT_co]: ... + def ravel(self, /, order: L["K", "A", "C", "F"] | None = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] + def flatten(self, /, order: L["K", "A", "C", "F"] | None = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] + + # matrix.T is inherited from _ScalarOrArrayCommon + def getT(self) -> Self: ... + @property + def I(self) -> _Matrix[Incomplete]: ... # noqa: E743 + def getI(self) -> _Matrix[Incomplete]: ... + @property + def A(self) -> np.ndarray[_2D, _DTypeT_co]: ... + def getA(self) -> np.ndarray[_2D, _DTypeT_co]: ... + @property + def A1(self) -> np.ndarray[_AnyShape, _DTypeT_co]: ... + def getA1(self) -> np.ndarray[_AnyShape, _DTypeT_co]: ... + @property + def H(self) -> matrix[_2D, _DTypeT_co]: ... + def getH(self) -> matrix[_2D, _DTypeT_co]: ... def bmat( obj: str | Sequence[ArrayLike] | NDArray[Any], - ldict: None | Mapping[str, Any] = ..., - gdict: None | Mapping[str, Any] = ..., -) -> matrix[Any, Any]: ... - -def asmatrix(data: ArrayLike, dtype: DTypeLike = ...) -> matrix[Any, Any]: ... + ldict: Mapping[str, Any] | None = None, + gdict: Mapping[str, Any] | None = None, +) -> _Matrix[Incomplete]: ... -mat = asmatrix +def asmatrix(data: ArrayLike, dtype: DTypeLike | None = None) -> _Matrix[Incomplete]: ... diff --git a/numpy/matrixlib/tests/test_defmatrix.py b/numpy/matrixlib/tests/test_defmatrix.py index 81d955e86fa8..93154c3c2207 100644 --- a/numpy/matrixlib/tests/test_defmatrix.py +++ b/numpy/matrixlib/tests/test_defmatrix.py @@ -1,12 +1,17 @@ import collections.abc import numpy as np -from numpy import matrix, asmatrix, bmat -from numpy.testing import ( - assert_, assert_equal, assert_almost_equal, assert_array_equal, - assert_array_almost_equal, assert_raises - ) +from numpy import asmatrix, bmat, matrix from numpy.linalg import matrix_power +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, +) + class TestCtor: def test_basic(self): @@ -47,11 +52,11 @@ def test_bmat_nondefault_str(self): [5, 6, 1, 2], [7, 8, 3, 4]]) assert_(np.all(bmat("A,A;A,A") == Aresult)) - assert_(np.all(bmat("A,A;A,A", ldict={'A':B}) == Aresult)) - assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A':B}) + assert_(np.all(bmat("A,A;A,A", ldict={'A': B}) == Aresult)) + assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A': B}) assert_( - np.all(bmat("A,A;A,A", ldict={'A':A}, gdict={'A':B}) == Aresult)) - b2 = bmat("A,B;C,D", ldict={'A':A,'B':B}, gdict={'C':B,'D':A}) + np.all(bmat("A,A;A,A", ldict={'A': A}, gdict={'A': B}) == Aresult)) + b2 = bmat("A,B;C,D", ldict={'A': A, 'B': B}, gdict={'C': B, 'D': A}) assert_(np.all(b2 == mixresult)) @@ -132,7 +137,7 @@ def test_basic(self): assert_(np.all(np.array(np.transpose(A) == mA.H))) assert_(np.all(A == mA.A)) - B = A + 2j*A + B = A + 2j * A mB = matrix(B) assert_(np.allclose(linalg.inv(B), mB.I)) assert_(np.all(np.array(np.transpose(B) == mB.T))) @@ -149,9 +154,9 @@ def test_comparisons(self): A = np.arange(100).reshape(10, 10) mA = matrix(A) mB = matrix(A) + 0.1 - assert_(np.all(mB == A+0.1)) - assert_(np.all(mB == matrix(A+0.1))) - assert_(not np.any(mB == matrix(A-0.1))) + assert_(np.all(mB == A + 0.1)) + assert_(np.all(mB == matrix(A + 0.1))) + assert_(not np.any(mB == matrix(A - 0.1))) assert_(np.all(mA < mB)) assert_(np.all(mA <= mB)) assert_(np.all(mA <= mA)) @@ -199,7 +204,7 @@ def test_basic(self): mB = mB + O assert_(mB.dtype.type == np.float64) assert_(np.all(mA != mB)) - assert_(np.all(mB == mA+0.1)) + assert_(np.all(mB == mA + 0.1)) mC = mA.copy() O = np.ones((10, 10), np.complex128) @@ -228,11 +233,11 @@ def test_basic(self): assert_(np.allclose((mA * mA).A, np.dot(A, A))) assert_(np.allclose((mA + mA).A, (A + A))) - assert_(np.allclose((3*mA).A, (3*A))) + assert_(np.allclose((3 * mA).A, (3 * A))) mA2 = matrix(A) mA2 *= 3 - assert_(np.allclose(mA2.A, 3*A)) + assert_(np.allclose(mA2.A, 3 * A)) def test_pow(self): """Test raising a matrix to an integer power works as expected.""" @@ -264,7 +269,7 @@ def test_notimplemented(self): # __mul__ with something not a list, ndarray, tuple, or scalar with assert_raises(TypeError): - A*object() + A * object() class TestMatrixReturn: @@ -283,10 +288,10 @@ def test_instance_methods(self): 'argmin', 'choose', 'dump', 'dumps', 'fill', 'getfield', 'getA', 'getA1', 'item', 'nonzero', 'put', 'putmask', 'resize', 'searchsorted', 'setflags', 'setfield', 'sort', - 'partition', 'argpartition', 'newbyteorder', 'to_device', - 'take', 'tofile', 'tolist', 'tostring', 'tobytes', 'all', 'any', + 'partition', 'argpartition', 'to_device', + 'take', 'tofile', 'tolist', 'tobytes', 'all', 'any', 'sum', 'argmax', 'argmin', 'min', 'max', 'mean', 'var', 'ptp', - 'prod', 'std', 'ctypes', 'itemset', 'bitwise_count', + 'prod', 'std', 'ctypes', 'bitwise_count', ] for attrib in dir(a): if attrib.startswith('_') or attrib in excluded_methods: @@ -296,12 +301,9 @@ def test_instance_methods(self): # reset contents of a a.astype('f8') a.fill(1.0) - if attrib in methodargs: - args = methodargs[attrib] - else: - args = () + args = methodargs.get(attrib, ()) b = f(*args) - assert_(type(b) is matrix, "%s" % attrib) + assert_(type(b) is matrix, f"{attrib}") assert_(type(a.real) is matrix) assert_(type(a.imag) is matrix) c, d = matrix([0.0]).nonzero() @@ -342,10 +344,10 @@ def test_fancy_indexing(self): assert_equal(x, matrix([[3, 4, 3]])) x = a[[1, 0]] assert_(isinstance(x, matrix)) - assert_equal(x, matrix([[3, 4], [1, 2]])) + assert_equal(x, matrix([[3, 4], [1, 2]])) x = a[[[1], [0]], [[1, 0], [0, 1]]] assert_(isinstance(x, matrix)) - assert_equal(x, matrix([[4, 3], [1, 2]])) + assert_equal(x, matrix([[4, 3], [1, 2]])) def test_matrix_element(self): x = matrix([[1, 2, 3], [4, 5, 6]]) @@ -365,24 +367,22 @@ def test_scalar_indexing(self): def test_row_column_indexing(self): x = asmatrix(np.eye(2)) - assert_array_equal(x[0,:], [[1, 0]]) - assert_array_equal(x[1,:], [[0, 1]]) + assert_array_equal(x[0, :], [[1, 0]]) + assert_array_equal(x[1, :], [[0, 1]]) assert_array_equal(x[:, 0], [[1], [0]]) assert_array_equal(x[:, 1], [[0], [1]]) def test_boolean_indexing(self): - A = np.arange(6) - A.shape = (3, 2) + A = np.arange(6).reshape((3, 2)) x = asmatrix(A) assert_array_equal(x[:, np.array([True, False])], x[:, 0]) - assert_array_equal(x[np.array([True, False, False]),:], x[0,:]) + assert_array_equal(x[np.array([True, False, False]), :], x[0, :]) def test_list_indexing(self): - A = np.arange(6) - A.shape = (3, 2) + A = np.arange(6).reshape((3, 2)) x = asmatrix(A) assert_array_equal(x[:, [1, 0]], x[:, ::-1]) - assert_array_equal(x[[2, 1, 0],:], x[::-1,:]) + assert_array_equal(x[[2, 1, 0], :], x[::-1, :]) class TestPower: @@ -451,3 +451,25 @@ def test_expand_dims_matrix(self): expanded = np.expand_dims(a, axis=1) assert_equal(expanded.ndim, 3) assert_(not isinstance(expanded, np.matrix)) + + +class TestPatternMatching: + """Tests for structural pattern matching support (PEP 634).""" + + def test_match_sequence_pattern_2d(self): + # matrix is always 2D, so rows are (1, N) matrices not 1D arrays + arr = matrix([[1, 2], [3, 4]]) + # outer matching + match arr: + case [row1, row2]: + assert_array_equal(row1, [[1, 2]]) + assert_array_equal(row2, [[3, 4]]) + case _: + raise AssertionError("2D matrix did not match sequence pattern") + # inner matching - rows are still 2D matrices, not scalars + match arr: + case [[a], [b]]: + assert_array_equal(a, [[1, 2]]) + assert_array_equal(b, [[3, 4]]) + case _: + raise AssertionError("2D matrix did not match sequence pattern") diff --git a/numpy/matrixlib/tests/test_interaction.py b/numpy/matrixlib/tests/test_interaction.py index 0c6bf210e46e..87d133a2c586 100644 --- a/numpy/matrixlib/tests/test_interaction.py +++ b/numpy/matrixlib/tests/test_interaction.py @@ -2,15 +2,21 @@ Note that tests with MaskedArray and linalg are done in separate files. """ -import pytest - import textwrap import warnings +import pytest + import numpy as np -from numpy.testing import (assert_, assert_equal, assert_raises, - assert_raises_regex, assert_array_equal, - assert_almost_equal, assert_array_almost_equal) +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) def test_fancy_indexing(): @@ -225,7 +231,7 @@ def test_nanfunctions_matrices_general(): assert_(res.shape == (3, 3)) res = f(mat) assert_(isinstance(res, np.matrix)) - assert_(res.shape == (1, 3*3)) + assert_(res.shape == (1, 3 * 3)) def test_average_matrix(): @@ -238,7 +244,7 @@ def test_average_matrix(): r = np.average(a, axis=0, weights=w) assert_equal(type(r), np.matrix) - assert_equal(r, [[2.5, 10.0/3]]) + assert_equal(r, [[2.5, 10.0 / 3]]) def test_dot_matrix(): @@ -255,8 +261,8 @@ def test_dot_matrix(): def test_ediff1d_matrix(): # 2018-04-29: moved here from core.tests.test_arraysetops. - assert(isinstance(np.ediff1d(np.matrix(1)), np.matrix)) - assert(isinstance(np.ediff1d(np.matrix(1), to_begin=1), np.matrix)) + assert isinstance(np.ediff1d(np.matrix(1)), np.matrix) + assert isinstance(np.ediff1d(np.matrix(1), to_begin=1), np.matrix) def test_apply_along_axis_matrix(): diff --git a/numpy/matrixlib/tests/test_masked_matrix.py b/numpy/matrixlib/tests/test_masked_matrix.py index 5303e6ce723f..ee3dc96b9ac5 100644 --- a/numpy/matrixlib/tests/test_masked_matrix.py +++ b/numpy/matrixlib/tests/test_masked_matrix.py @@ -1,13 +1,22 @@ import pickle import numpy as np -from numpy.testing import assert_warns -from numpy.ma.testutils import (assert_, assert_equal, assert_raises, - assert_array_equal) -from numpy.ma.core import (masked_array, masked_values, masked, allequal, - MaskType, getmask, MaskedArray, nomask, - log, add, hypot, divide) +from numpy.ma.core import ( + MaskedArray, + MaskType, + add, + allequal, + divide, + getmask, + hypot, + log, + masked, + masked_array, + masked_values, + nomask, +) from numpy.ma.extras import mr_ +from numpy.ma.testutils import assert_, assert_array_equal, assert_equal, assert_raises class MMatrix(MaskedArray, np.matrix,): @@ -20,7 +29,6 @@ def __new__(cls, data, mask=nomask): def __array_finalize__(self, obj): np.matrix.__array_finalize__(self, obj) MaskedArray.__array_finalize__(self, obj) - return @property def _series(self): @@ -108,7 +116,7 @@ def test_flat(self): # Test setting test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) testflat = test.flat - testflat[:] = testflat[[2, 1, 0]] + testflat[:] = testflat[np.array([2, 1, 0])] assert_equal(test, control) testflat[0] = 9 # test that matrices keep the correct shape (#4615) @@ -174,40 +182,40 @@ def test_view(self): class TestSubclassing: # Test suite for masked subclasses of ndarray. - def setup_method(self): + def _create_data(self): x = np.arange(5, dtype='float') mx = MMatrix(x, mask=[0, 1, 0, 0, 0]) - self.data = (x, mx) + return x, mx def test_maskedarray_subclassing(self): # Tests subclassing MaskedArray - (x, mx) = self.data + mx = self._create_data()[1] assert_(isinstance(mx._data, np.matrix)) def test_masked_unary_operations(self): # Tests masked_unary_operation - (x, mx) = self.data + x, mx = self._create_data() with np.errstate(divide='ignore'): assert_(isinstance(log(mx), MMatrix)) assert_equal(log(x), np.log(x)) def test_masked_binary_operations(self): # Tests masked_binary_operation - (x, mx) = self.data + x, mx = self._create_data() # Result should be a MMatrix assert_(isinstance(add(mx, mx), MMatrix)) assert_(isinstance(add(mx, x), MMatrix)) # Result should work - assert_equal(add(mx, x), mx+x) + assert_equal(add(mx, x), mx + x) assert_(isinstance(add(mx, mx)._data, np.matrix)) - with assert_warns(DeprecationWarning): - assert_(isinstance(add.outer(mx, mx), MMatrix)) + with assert_raises(TypeError): + add.outer(mx, mx) assert_(isinstance(hypot(mx, mx), MMatrix)) assert_(isinstance(hypot(mx, x), MMatrix)) def test_masked_binary_operations2(self): # Tests domained_masked_binary_operation - (x, mx) = self.data + x, mx = self._create_data() xmx = masked_array(mx.data.__array__(), mask=mx.mask) assert_(isinstance(divide(mx, mx), MMatrix)) assert_(isinstance(divide(mx, x), MMatrix)) diff --git a/numpy/matrixlib/tests/test_matrix_linalg.py b/numpy/matrixlib/tests/test_matrix_linalg.py index 106c2e38217a..99acf32adc49 100644 --- a/numpy/matrixlib/tests/test_matrix_linalg.py +++ b/numpy/matrixlib/tests/test_matrix_linalg.py @@ -1,12 +1,26 @@ """ Test functions for linalg module using the matrix class.""" -import numpy as np +import pytest +import numpy as np from numpy.linalg.tests.test_linalg import ( - LinalgCase, apply_tag, TestQR as _TestQR, LinalgTestCase, - _TestNorm2D, _TestNormDoubleBase, _TestNormSingleBase, _TestNormInt64Base, - SolveCases, InvCases, EigvalsCases, EigCases, SVDCases, CondCases, - PinvCases, DetCases, LstsqCases) - + CondCases, + DetCases, + EigCases, + EigvalsCases, + InvCases, + LinalgCase, + LinalgTestCase, + LstsqCases, + PinvCases, + SolveCases, + SVDCases, + TestQR as _TestQR, + _TestNorm2D, + _TestNormDoubleBase, + _TestNormInt64Base, + _TestNormSingleBase, + apply_tag, +) CASES = [] @@ -69,6 +83,9 @@ class TestDetMatrix(DetCases, MatrixTestCase): pass +@pytest.mark.thread_unsafe( + reason="residuals not calculated properly for square tests (gh-29851)" +) class TestLstsqMatrix(LstsqCases, MatrixTestCase): pass diff --git a/numpy/matrixlib/tests/test_multiarray.py b/numpy/matrixlib/tests/test_multiarray.py index 638d0d1534de..2d9d1f8efe41 100644 --- a/numpy/matrixlib/tests/test_multiarray.py +++ b/numpy/matrixlib/tests/test_multiarray.py @@ -1,5 +1,6 @@ import numpy as np -from numpy.testing import assert_, assert_equal, assert_array_equal +from numpy.testing import assert_, assert_array_equal, assert_equal + class TestView: def test_type(self): diff --git a/numpy/matrixlib/tests/test_numeric.py b/numpy/matrixlib/tests/test_numeric.py index a772bb388847..f2c259f2fb97 100644 --- a/numpy/matrixlib/tests/test_numeric.py +++ b/numpy/matrixlib/tests/test_numeric.py @@ -1,14 +1,15 @@ import numpy as np from numpy.testing import assert_equal + class TestDot: def test_matscalar(self): b1 = np.matrix(np.ones((3, 3), dtype=complex)) - assert_equal(b1*1.0, b1) + assert_equal(b1 * 1.0, b1) def test_diagonal(): - b1 = np.matrix([[1,2],[3,4]]) + b1 = np.matrix([[1, 2], [3, 4]]) diag_b1 = np.matrix([[1, 4]]) array_b1 = np.array([1, 4]) diff --git a/numpy/matrixlib/tests/test_regression.py b/numpy/matrixlib/tests/test_regression.py index 27ab63058da7..a78bf74cbb15 100644 --- a/numpy/matrixlib/tests/test_regression.py +++ b/numpy/matrixlib/tests/test_regression.py @@ -20,7 +20,7 @@ def test_matrix_properties(self): def test_matrix_multiply_by_1d_vector(self): # Ticket #473 def mul(): - np.asmatrix(np.eye(2))*np.ones(2) + np.asmatrix(np.eye(2)) * np.ones(2) assert_raises(ValueError, mul) diff --git a/numpy/meson.build b/numpy/meson.build index 7e9ec5244cc9..45d5a2b52eb8 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -32,7 +32,7 @@ endif # than a `.a` file extension in order not to break including them in a # distutils-based build (see gh-23981 and # https://mesonbuild.com/FAQ.html#why-does-building-my-project-with-msvc-output-static-libraries-called-libfooa) -if is_windows and cc.get_id() == 'msvc' +if is_windows and cc.get_id() in ['msvc', 'clang-cl'] name_prefix_staticlib = '' name_suffix_staticlib = 'lib' else @@ -214,6 +214,52 @@ else lapack_dep = declare_dependency(dependencies: [lapack, blas_dep]) endif +# Determine whether it is necessary to link libatomic with gcc. This +# could be the case on 32-bit platforms when atomic operations are used +# on 64-bit types or on RISC-V using 8-bit atomics, so we explicitly +# check for both 64 bit and 8 bit operations. The check is adapted from +# SciPy, who copied it from Mesa. +null_dep = dependency('', required : false) +atomic_dep = null_dep +code_non_lockfree = ''' + #include + #include + int main() { + struct { + void *p; + uint8_t u8v; + } x; + x.p = NULL; + x.u8v = 0; + uint8_t res = __atomic_load_n(&x.u8v, __ATOMIC_SEQ_CST); + __atomic_store_n(&x.u8v, 1, __ATOMIC_SEQ_CST); + void *p = __atomic_load_n((void **)x.p, __ATOMIC_SEQ_CST); + __atomic_store_n((void **)x.p, NULL, __ATOMIC_SEQ_CST); + return 0; + } +''' +if cc.get_id() != 'msvc' + if not cc.links( + code_non_lockfree, + name : 'Check atomic builtins without -latomic' + ) + atomic_dep = cc.find_library('atomic', required: false) + if atomic_dep.found() + # We're not sure that with `-latomic` things will work for all compilers, + # so verify and only keep libatomic as a dependency if this works. It is + # possible the build will fail later otherwise - unclear under what + # circumstances (compilers, runtimes, etc.) exactly and this may need to + # be extended when support is added for new CPUs + if not cc.links( + code_non_lockfree, + dependencies: atomic_dep, + name : 'Check atomic builtins with -latomic' + ) + atomic_dep = null_dep + endif + endif + endif +endif # Copy the main __init__.py|pxd files to the build dir (needed for Cython) __init__py = fs.copyfile('__init__.py') @@ -226,22 +272,26 @@ python_sources = [ '__init__.pxd', '__init__.py', '__init__.pyi', + '__config__.pyi', '_array_api_info.py', '_array_api_info.pyi', '_configtool.py', + '_configtool.pyi', '_distributor_init.py', + '_distributor_init.pyi', '_globals.py', + '_globals.pyi', '_pytesttester.py', '_pytesttester.pyi', '_expired_attrs_2_0.py', + '_expired_attrs_2_0.pyi', 'conftest.py', - 'ctypeslib.py', - 'ctypeslib.pyi', 'exceptions.py', 'exceptions.pyi', 'dtypes.py', 'dtypes.pyi', 'matlib.py', + 'matlib.pyi', 'py.typed', 'version.pyi', ] @@ -267,7 +317,7 @@ pure_subdirs = [ '_pyinstaller', '_typing', '_utils', - 'compat', + 'ctypeslib', 'doc', 'f2py', 'lib', @@ -324,7 +374,7 @@ install_subdir('tests', install_dir: np_dir, install_tag: 'tests') compilers = { 'C': cc, 'CPP': cpp, - 'CYTHON': meson.get_compiler('cython') + 'CYTHON': cy, } machines = { diff --git a/numpy/polynomial/__init__.py b/numpy/polynomial/__init__.py index 2a31e52f2aa4..ed1ad5a2fdd3 100644 --- a/numpy/polynomial/__init__.py +++ b/numpy/polynomial/__init__.py @@ -41,6 +41,8 @@ `~chebyshev.Chebyshev.fit` class method:: >>> from numpy.polynomial import Chebyshev + >>> xdata = [1, 2, 3, 4] + >>> ydata = [1, 4, 9, 16] >>> c = Chebyshev.fit(xdata, ydata, deg=1) is preferred over the `chebyshev.chebfit` function from the @@ -67,7 +69,6 @@ - ``Poly.window`` -- Default window - ``Poly.basis_name`` -- String used to represent the basis - ``Poly.maxpower`` -- Maximum value ``n`` such that ``p**n`` is allowed -- ``Poly.nickname`` -- String used in printing Creation -------- @@ -113,14 +114,14 @@ - ``p.truncate(size)`` -- Truncate ``p`` to given size """ -from .polynomial import Polynomial from .chebyshev import Chebyshev -from .legendre import Legendre from .hermite import Hermite from .hermite_e import HermiteE from .laguerre import Laguerre +from .legendre import Legendre +from .polynomial import Polynomial -__all__ = [ +__all__ = [ # noqa: F822 "set_default_printstyle", "polynomial", "Polynomial", "chebyshev", "Chebyshev", @@ -181,5 +182,6 @@ def set_default_printstyle(style): from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/numpy/polynomial/__init__.pyi b/numpy/polynomial/__init__.pyi index 0fc5ef0f53e4..ad005a2dbe38 100644 --- a/numpy/polynomial/__init__.pyi +++ b/numpy/polynomial/__init__.pyi @@ -1,21 +1,31 @@ -from numpy._pytesttester import PytestTester +from typing import Final, Literal -from numpy.polynomial import ( - chebyshev as chebyshev, - hermite as hermite, - hermite_e as hermite_e, - laguerre as laguerre, - legendre as legendre, - polynomial as polynomial, -) -from numpy.polynomial.chebyshev import Chebyshev as Chebyshev -from numpy.polynomial.hermite import Hermite as Hermite -from numpy.polynomial.hermite_e import HermiteE as HermiteE -from numpy.polynomial.laguerre import Laguerre as Laguerre -from numpy.polynomial.legendre import Legendre as Legendre -from numpy.polynomial.polynomial import Polynomial as Polynomial +from . import chebyshev, hermite, hermite_e, laguerre, legendre, polynomial +from .chebyshev import Chebyshev +from .hermite import Hermite +from .hermite_e import HermiteE +from .laguerre import Laguerre +from .legendre import Legendre +from .polynomial import Polynomial -__all__: list[str] -test: PytestTester +__all__ = [ + "set_default_printstyle", + "polynomial", + "Polynomial", + "chebyshev", + "Chebyshev", + "legendre", + "Legendre", + "hermite", + "Hermite", + "hermite_e", + "HermiteE", + "laguerre", + "Laguerre", +] -def set_default_printstyle(style): ... +def set_default_printstyle(style: Literal["ascii", "unicode"]) -> None: ... + +from numpy._pytesttester import PytestTester as _PytestTester + +test: Final[_PytestTester] diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py index 9c345553eedd..f89343340931 100644 --- a/numpy/polynomial/_polybase.py +++ b/numpy/polynomial/_polybase.py @@ -6,12 +6,13 @@ abc module from the stdlib, hence it is only available for Python >= 2.6. """ -import os import abc import numbers -from typing import Callable +import os +from collections.abc import Callable import numpy as np + from . import polyutils as pu __all__ = ['ABCPolyBase'] @@ -23,8 +24,6 @@ class ABCPolyBase(abc.ABC): '+', '-', '*', '//', '%', 'divmod', '**', and '()' along with the methods listed below. - .. versionadded:: 1.9.0 - Parameters ---------- coef : array_like @@ -39,7 +38,7 @@ class ABCPolyBase(abc.ABC): Window, see domain for its use. The default value is the derived class window. symbol : str, optional - Symbol used to represent the independent variable in string + Symbol used to represent the independent variable in string representations of the polynomial expression, e.g. for printing. The symbol must be a valid Python identifier. Default value is 'x'. @@ -190,8 +189,6 @@ def _fromroots(r): def has_samecoef(self, other): """Check if coefficients match. - .. versionadded:: 1.6.0 - Parameters ---------- other : class instance @@ -203,18 +200,14 @@ def has_samecoef(self, other): True if the coefficients are the same, False otherwise. """ - if len(self.coef) != len(other.coef): - return False - elif not np.all(self.coef == other.coef): - return False - else: - return True + return ( + len(self.coef) == len(other.coef) + and np.all(self.coef == other.coef) + ) def has_samedomain(self, other): """Check if domains match. - .. versionadded:: 1.6.0 - Parameters ---------- other : class instance @@ -231,8 +224,6 @@ def has_samedomain(self, other): def has_samewindow(self, other): """Check if windows match. - .. versionadded:: 1.6.0 - Parameters ---------- other : class instance @@ -249,8 +240,6 @@ def has_samewindow(self, other): def has_sametype(self, other): """Check if types match. - .. versionadded:: 1.7.0 - Parameters ---------- other : object @@ -271,8 +260,6 @@ def _get_coefficients(self, other): class as self with identical domain and window. If so, return its coefficients, otherwise return `other`. - .. versionadded:: 1.9.0 - Parameters ---------- other : anything @@ -444,7 +431,7 @@ def _repr_latex_term(cls, i, arg_str, needs_parens): def _repr_latex_scalar(x, parens=False): # TODO: we're stuck with disabling math formatting until we handle # exponents in this function - return r'\text{{{}}}'.format(pu.format_float(x, parens=parens)) + return fr'\text{{{pu.format_float(x, parens=parens)}}}' def _format_term(self, scalar_format: Callable, off: float, scale: float): """ Format a single term in the expansion """ @@ -464,7 +451,7 @@ def _format_term(self, scalar_format: Callable, off: float, scale: float): ) needs_parens = True return term, needs_parens - + def _repr_latex_(self): # get the scaled argument string to the basis functions off, scale = self.mapparms() @@ -505,8 +492,6 @@ def _repr_latex_(self): return rf"${self.symbol} \mapsto {body}$" - - # Pickle and copy def __getstate__(self): @@ -627,10 +612,6 @@ def __rmul__(self, other): return NotImplemented return self.__class__(coef, self.domain, self.window, self.symbol) - def __rdiv__(self, other): - # set to __floordiv__ /. - return self.__rfloordiv__(other) - def __rtruediv__(self, other): # An instance of ABCPolyBase is not considered a # Number. @@ -689,8 +670,6 @@ def copy(self): def degree(self): """The degree of the series. - .. versionadded:: 1.5.0 - Returns ------- degree : int @@ -701,6 +680,7 @@ def degree(self): Create a polynomial object for ``1 + 7*x + 4*x**2``: + >>> np.polynomial.set_default_printstyle("unicode") >>> poly = np.polynomial.Polynomial([1, 7, 4]) >>> print(poly) 1.0 + 7.0·x + 4.0·x² @@ -730,8 +710,6 @@ def cutdeg(self, deg): squares where the coefficients of the high degree terms may be very small. - .. versionadded:: 1.5.0 - Parameters ---------- deg : non-negative int @@ -893,8 +871,8 @@ def integ(self, m=1, k=[], lbnd=None): if lbnd is None: lbnd = 0 else: - lbnd = off + scl*lbnd - coef = self._int(self.coef, m, k, lbnd, 1./scl) + lbnd = off + scl * lbnd + coef = self._int(self.coef, m, k, lbnd, 1. / scl) return self.__class__(coef, self.domain, self.window, self.symbol) def deriv(self, m=1): @@ -942,8 +920,6 @@ def linspace(self, n=100, domain=None): default the domain is the same as that of the series instance. This method is intended mostly as a plotting aid. - .. versionadded:: 1.5.0 - Parameters ---------- n : int, optional @@ -1010,13 +986,9 @@ class domain in NumPy 1.4 and ``None`` in later versions. chosen so that the errors of the products ``w[i]*y[i]`` all have the same variance. When using inverse-variance weighting, use ``w[i] = 1/sigma(y[i])``. The default value is None. - - .. versionadded:: 1.5.0 window : {[beg, end]}, optional Window to use for the returned series. The default value is the default class domain - - .. versionadded:: 1.6.0 symbol : str, optional Symbol representing the independent variable. Default is 'x'. @@ -1041,7 +1013,10 @@ class domain in NumPy 1.4 and ``None`` in later versions. """ if domain is None: domain = pu.getdomain(x) - elif type(domain) is list and len(domain) == 0: + if domain[0] == domain[1]: + domain[0] -= 1 + domain[1] += 1 + elif isinstance(domain, list) and len(domain) == 0: domain = cls.domain if window is None: @@ -1089,7 +1064,7 @@ def fromroots(cls, roots, domain=[], window=None, symbol='x'): [roots] = pu.as_series([roots], trim=False) if domain is None: domain = pu.getdomain(roots) - elif type(domain) is list and len(domain) == 0: + elif isinstance(domain, list) and len(domain) == 0: domain = cls.domain if window is None: @@ -1097,7 +1072,7 @@ def fromroots(cls, roots, domain=[], window=None, symbol='x'): deg = len(roots) off, scl = pu.mapparms(domain, window) - rnew = off + scl*roots + rnew = off + scl * roots coef = cls._fromroots(rnew) / scl**deg return cls(coef, domain=domain, window=window, symbol=symbol) @@ -1142,8 +1117,6 @@ def basis(cls, deg, domain=None, window=None, symbol='x'): Returns the series representing the basis polynomial of degree `deg`. - .. versionadded:: 1.7.0 - Parameters ---------- deg : int @@ -1175,7 +1148,7 @@ def basis(cls, deg, domain=None, window=None, symbol='x'): if ideg != deg or ideg < 0: raise ValueError("deg must be non-negative integer") - return cls([0]*ideg + [1], domain, window, symbol) + return cls([0] * ideg + [1], domain, window, symbol) @classmethod def cast(cls, series, domain=None, window=None): @@ -1186,8 +1159,6 @@ def cast(cls, series, domain=None, window=None): module, but could be some other class that supports the convert method. - .. versionadded:: 1.7.0 - Parameters ---------- series : series diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index 25c740dbedd0..2fdfd24db7a9 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -1,71 +1,255 @@ import abc -from typing import Any, ClassVar +import decimal +from collections.abc import Iterator, Sequence +from typing import Any, ClassVar, Generic, Literal, Self, SupportsIndex, overload +from typing_extensions import TypeIs, TypeVar -__all__: list[str] +import numpy as np +import numpy.typing as npt +from numpy._typing import ( + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _FloatLike_co, + _NumberLike_co, +) -class ABCPolyBase(abc.ABC): - __hash__: ClassVar[None] # type: ignore[assignment] - __array_ufunc__: ClassVar[None] - maxpower: ClassVar[int] - coef: Any +from ._polytypes import ( + _AnyInt, + _Array2, + _ArrayLikeCoef_co, + _ArrayLikeCoefObject_co, + _CoefLike_co, + _CoefSeries, + _Series, + _SeriesLikeCoef_co, + _SeriesLikeInt_co, + _Tuple2, +) + +__all__ = ["ABCPolyBase"] + +_NameT_co = TypeVar("_NameT_co", bound=str | None, default=str | None, covariant=True) + +type _AnyOther = ABCPolyBase | _CoefLike_co | _SeriesLikeCoef_co + +### + +class ABCPolyBase(Generic[_NameT_co], abc.ABC): # noqa: UP046 + __hash__: ClassVar[None] = None # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + __array_ufunc__: ClassVar[None] = None + maxpower: ClassVar[Literal[100]] = 100 + + _superscript_mapping: ClassVar[dict[int, str]] = ... + _subscript_mapping: ClassVar[dict[int, str]] = ... + _use_unicode: ClassVar[bool] = ... + + _symbol: str @property - def symbol(self) -> str: ... + def symbol(self, /) -> str: ... @property @abc.abstractmethod - def domain(self): ... + def domain(self) -> _Array2[np.float64 | Any]: ... @property @abc.abstractmethod - def window(self): ... + def window(self) -> _Array2[np.float64 | Any]: ... @property @abc.abstractmethod - def basis_name(self): ... - def has_samecoef(self, other): ... - def has_samedomain(self, other): ... - def has_samewindow(self, other): ... - def has_sametype(self, other): ... - def __init__(self, coef, domain=..., window=..., symbol: str = ...) -> None: ... - def __format__(self, fmt_str): ... - def __call__(self, arg): ... - def __iter__(self): ... - def __len__(self): ... - def __neg__(self): ... - def __pos__(self): ... - def __add__(self, other): ... - def __sub__(self, other): ... - def __mul__(self, other): ... - def __truediv__(self, other): ... - def __floordiv__(self, other): ... - def __mod__(self, other): ... - def __divmod__(self, other): ... - def __pow__(self, other): ... - def __radd__(self, other): ... - def __rsub__(self, other): ... - def __rmul__(self, other): ... - def __rdiv__(self, other): ... - def __rtruediv__(self, other): ... - def __rfloordiv__(self, other): ... - def __rmod__(self, other): ... - def __rdivmod__(self, other): ... - def __eq__(self, other): ... - def __ne__(self, other): ... - def copy(self): ... - def degree(self): ... - def cutdeg(self, deg): ... - def trim(self, tol=...): ... - def truncate(self, size): ... - def convert(self, domain=..., kind=..., window=...): ... - def mapparms(self): ... - def integ(self, m=..., k = ..., lbnd=...): ... - def deriv(self, m=...): ... - def roots(self): ... - def linspace(self, n=..., domain=...): ... + def basis_name(self) -> _NameT_co: ... + + coef: _CoefSeries + + def __init__( + self, + /, + coef: _SeriesLikeCoef_co, + domain: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", + ) -> None: ... + + # + @overload + def __call__[PolyT: ABCPolyBase](self, /, arg: PolyT) -> PolyT: ... + @overload + def __call__(self, /, arg: _FloatLike_co | decimal.Decimal) -> np.float64 | Any: ... + @overload + def __call__(self, /, arg: _NumberLike_co) -> np.complex128 | Any: ... + @overload + def __call__(self, /, arg: _ArrayLikeFloat_co) -> npt.NDArray[np.float64 | Any]: ... + @overload + def __call__(self, /, arg: _ArrayLikeComplex_co) -> npt.NDArray[np.complex128 | Any]: ... + @overload + def __call__(self, /, arg: _ArrayLikeCoefObject_co) -> npt.NDArray[np.object_]: ... + + # unary ops + def __neg__(self, /) -> Self: ... + def __pos__(self, /) -> Self: ... + + # binary ops + def __add__(self, x: _AnyOther, /) -> Self: ... + def __sub__(self, x: _AnyOther, /) -> Self: ... + def __mul__(self, x: _AnyOther, /) -> Self: ... + def __pow__(self, x: _AnyOther, /) -> Self: ... + def __truediv__(self, x: _AnyOther, /) -> Self: ... + def __floordiv__(self, x: _AnyOther, /) -> Self: ... + def __mod__(self, x: _AnyOther, /) -> Self: ... + def __divmod__(self, x: _AnyOther, /) -> _Tuple2[Self]: ... + + # reflected binary ops + def __radd__(self, x: _AnyOther, /) -> Self: ... + def __rsub__(self, x: _AnyOther, /) -> Self: ... + def __rmul__(self, x: _AnyOther, /) -> Self: ... + def __rtruediv__(self, x: _AnyOther, /) -> Self: ... + def __rfloordiv__(self, x: _AnyOther, /) -> Self: ... + def __rmod__(self, x: _AnyOther, /) -> Self: ... + def __rdivmod__(self, x: _AnyOther, /) -> _Tuple2[Self]: ... + + # iterable and sized + def __len__(self, /) -> int: ... + def __iter__(self, /) -> Iterator[np.float64 | Any]: ... + + # pickling + def __getstate__(self, /) -> dict[str, Any]: ... + def __setstate__(self, dict: dict[str, Any], /) -> None: ... + + # + def has_samecoef(self, /, other: ABCPolyBase) -> bool: ... + def has_samedomain(self, /, other: ABCPolyBase) -> bool: ... + def has_samewindow(self, /, other: ABCPolyBase) -> bool: ... + def has_sametype(self, /, other: object) -> TypeIs[Self]: ... + + # + def copy(self, /) -> Self: ... + def degree(self, /) -> int: ... + def cutdeg(self, /, deg: int) -> Self: ... + def trim(self, /, tol: _FloatLike_co = 0) -> Self: ... + def truncate(self, /, size: _AnyInt) -> Self: ... + + # + @overload + def convert[PolyT: ABCPolyBase]( + self, + /, + domain: _SeriesLikeCoef_co | None, + kind: type[PolyT], + window: _SeriesLikeCoef_co | None = None, + ) -> PolyT: ... + @overload + def convert[PolyT: ABCPolyBase]( + self, + /, + domain: _SeriesLikeCoef_co | None = None, + *, + kind: type[PolyT], + window: _SeriesLikeCoef_co | None = None, + ) -> PolyT: ... + @overload + def convert( + self, + /, + domain: _SeriesLikeCoef_co | None = None, + kind: None = None, + window: _SeriesLikeCoef_co | None = None, + ) -> Self: ... + + # + def mapparms(self, /) -> _Tuple2[Any]: ... + def integ( + self, + /, + m: SupportsIndex = 1, + k: _CoefLike_co | _SeriesLikeCoef_co = [], + lbnd: _CoefLike_co | None = None, + ) -> Self: ... + def deriv(self, /, m: SupportsIndex = 1) -> Self: ... + def roots(self, /) -> _CoefSeries: ... + def linspace( + self, + /, + n: SupportsIndex = 100, + domain: _SeriesLikeCoef_co | None = None, + ) -> _Tuple2[_Series[np.float64 | np.complex128]]: ... + + # + @overload + @classmethod + def fit( + cls, + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: int | _SeriesLikeInt_co, + domain: _SeriesLikeCoef_co | None = None, + rcond: _FloatLike_co | None = None, + full: Literal[False] = False, + w: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", + ) -> Self: ... + @overload + @classmethod + def fit( + cls, + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: int | _SeriesLikeInt_co, + domain: _SeriesLikeCoef_co | None = None, + rcond: _FloatLike_co | None = None, + *, + full: Literal[True], + w: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", + ) -> tuple[Self, Sequence[np.inexact | np.int32]]: ... + @overload + @classmethod + def fit( + cls, + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: int | _SeriesLikeInt_co, + domain: _SeriesLikeCoef_co | None, + rcond: _FloatLike_co, + full: Literal[True], + /, + w: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", + ) -> tuple[Self, Sequence[np.inexact | np.int32]]: ... + + # + @classmethod + def fromroots( + cls, + roots: _ArrayLikeCoef_co, + domain: _SeriesLikeCoef_co | None = [], + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", + ) -> Self: ... + @classmethod + def identity( + cls, + domain: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", + ) -> Self: ... @classmethod - def fit(cls, x, y, deg, domain=..., rcond=..., full=..., w=..., window=...): ... + def basis( + cls, + deg: _AnyInt, + domain: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", + ) -> Self: ... @classmethod - def fromroots(cls, roots, domain = ..., window=...): ... + def cast( + cls, + series: ABCPolyBase, + domain: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + ) -> Self: ... @classmethod - def identity(cls, domain=..., window=...): ... + def _str_term_unicode(cls, /, i: str, arg_str: str) -> str: ... @classmethod - def basis(cls, deg, domain=..., window=...): ... + def _str_term_ascii(cls, /, i: str, arg_str: str) -> str: ... @classmethod - def cast(cls, series, domain=..., window=...): ... + def _repr_latex_term(cls, /, i: str, arg_str: str, needs_parens: bool) -> str: ... diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi new file mode 100644 index 000000000000..46d17ac6353c --- /dev/null +++ b/numpy/polynomial/_polytypes.pyi @@ -0,0 +1,491 @@ +from collections.abc import Sequence +from typing import ( + Any, + Literal, + NoReturn, + Protocol, + Self, + SupportsIndex, + SupportsInt, + overload, + type_check_only, +) + +import numpy as np +import numpy.typing as npt +from numpy._typing import ( + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeNumber_co, + _ComplexLike_co, + _FloatLike_co, + _IntLike_co, + _NestedSequence, + _NumberLike_co, + _SupportsArray, +) + +# compatible with e.g. int, float, complex, Decimal, Fraction, and ABCPolyBase +@type_check_only +class _SupportsCoefOps[T](Protocol): + def __eq__(self, x: object, /) -> bool: ... + def __ne__(self, x: object, /) -> bool: ... + def __neg__(self, /) -> Self: ... + def __pos__(self, /) -> Self: ... + def __add__(self, x: T, /) -> Self: ... + def __sub__(self, x: T, /) -> Self: ... + def __mul__(self, x: T, /) -> Self: ... + def __pow__(self, x: T, /) -> Self | float: ... + def __radd__(self, x: T, /) -> Self: ... + def __rsub__(self, x: T, /) -> Self: ... + def __rmul__(self, x: T, /) -> Self: ... + +type _PolyScalar = np.bool | np.number | np.object_ + +type _Series[ScalarT: _PolyScalar] = np.ndarray[tuple[int], np.dtype[ScalarT]] + +type _FloatSeries = _Series[np.floating] +type _ComplexSeries = _Series[np.complexfloating] +type _ObjectSeries = _Series[np.object_] +type _CoefSeries = _Series[np.inexact | np.object_] + +type _FloatArray = npt.NDArray[np.floating] +type _ComplexArray = npt.NDArray[np.complexfloating] +type _ObjectArray = npt.NDArray[np.object_] +type _CoefArray = npt.NDArray[np.inexact | np.object_] + +type _Tuple2[_T] = tuple[_T, _T] +type _Array1[ScalarT: _PolyScalar] = np.ndarray[tuple[Literal[1]], np.dtype[ScalarT]] +type _Array2[ScalarT: _PolyScalar] = np.ndarray[tuple[Literal[2]], np.dtype[ScalarT]] + +type _AnyInt = SupportsInt | SupportsIndex + +type _CoefObjectLike_co = np.object_ | _SupportsCoefOps[Any] +type _CoefLike_co = _NumberLike_co | _CoefObjectLike_co + +# The term "series" is used here to refer to 1-d arrays of numeric scalars. +type _SeriesLikeBool_co = _SupportsArray[np.dtype[np.bool]] | Sequence[bool | np.bool] +type _SeriesLikeInt_co = _SupportsArray[np.dtype[np.integer | np.bool]] | Sequence[_IntLike_co] +type _SeriesLikeFloat_co = _SupportsArray[np.dtype[np.floating | np.integer | np.bool]] | Sequence[_FloatLike_co] +type _SeriesLikeComplex_co = _SupportsArray[np.dtype[np.number | np.bool]] | Sequence[_ComplexLike_co] +type _SeriesLikeObject_co = _SupportsArray[np.dtype[np.object_]] | Sequence[_CoefObjectLike_co] +type _SeriesLikeCoef_co = _SupportsArray[np.dtype[_PolyScalar]] | Sequence[_CoefLike_co] + +type _ArrayLikeCoefObject_co = _CoefObjectLike_co | _SeriesLikeObject_co | _NestedSequence[_SeriesLikeObject_co] +type _ArrayLikeCoef_co = npt.NDArray[_PolyScalar] | _ArrayLikeNumber_co | _ArrayLikeCoefObject_co + +type _Line[ScalarT: _PolyScalar] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Companion[ScalarT: _PolyScalar] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] + +type _AnyDegrees = Sequence[SupportsIndex] +type _FullFitResult = Sequence[np.inexact | np.int32] + +@type_check_only +class _FuncLine(Protocol): + @overload + def __call__[ScalarT: _PolyScalar](self, /, off: ScalarT, scl: ScalarT) -> _Line[ScalarT]: ... + @overload + def __call__(self, /, off: int, scl: int) -> _Line[np.int_]: ... + @overload + def __call__(self, /, off: float, scl: float) -> _Line[np.float64]: ... + @overload + def __call__(self, /, off: complex, scl: complex) -> _Line[np.complex128]: ... + @overload + def __call__(self, /, off: _SupportsCoefOps[Any], scl: _SupportsCoefOps[Any]) -> _Line[np.object_]: ... + +@type_check_only +class _FuncFromRoots(Protocol): + @overload + def __call__(self, /, roots: _SeriesLikeFloat_co) -> _FloatSeries: ... + @overload + def __call__(self, /, roots: _SeriesLikeComplex_co) -> _ComplexSeries: ... + @overload + def __call__(self, /, roots: _SeriesLikeCoef_co) -> _ObjectSeries: ... + +@type_check_only +class _FuncBinOp(Protocol): + @overload + def __call__(self, /, c1: _SeriesLikeBool_co, c2: _SeriesLikeBool_co) -> NoReturn: ... + @overload + def __call__(self, /, c1: _SeriesLikeFloat_co, c2: _SeriesLikeFloat_co) -> _FloatSeries: ... + @overload + def __call__(self, /, c1: _SeriesLikeComplex_co, c2: _SeriesLikeComplex_co) -> _ComplexSeries: ... + @overload + def __call__(self, /, c1: _SeriesLikeCoef_co, c2: _SeriesLikeCoef_co) -> _ObjectSeries: ... + +@type_check_only +class _FuncUnOp(Protocol): + @overload + def __call__(self, /, c: _SeriesLikeFloat_co) -> _FloatSeries: ... + @overload + def __call__(self, /, c: _SeriesLikeComplex_co) -> _ComplexSeries: ... + @overload + def __call__(self, /, c: _SeriesLikeCoef_co) -> _ObjectSeries: ... + +@type_check_only +class _FuncPoly2Ortho(Protocol): + @overload + def __call__(self, /, pol: _SeriesLikeFloat_co) -> _FloatSeries: ... + @overload + def __call__(self, /, pol: _SeriesLikeComplex_co) -> _ComplexSeries: ... + @overload + def __call__(self, /, pol: _SeriesLikeCoef_co) -> _ObjectSeries: ... + +@type_check_only +class _FuncPow(Protocol): + @overload + def __call__(self, /, c: _SeriesLikeFloat_co, pow: _IntLike_co, maxpower: _IntLike_co | None = ...) -> _FloatSeries: ... + @overload + def __call__(self, /, c: _SeriesLikeComplex_co, pow: _IntLike_co, maxpower: _IntLike_co | None = ...) -> _ComplexSeries: ... + @overload + def __call__(self, /, c: _SeriesLikeCoef_co, pow: _IntLike_co, maxpower: _IntLike_co | None = ...) -> _ObjectSeries: ... + +@type_check_only +class _FuncDer(Protocol): + @overload + def __call__( + self, + /, + c: _ArrayLikeFloat_co, + m: SupportsIndex = 1, + scl: _FloatLike_co = 1, + axis: SupportsIndex = 0, + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + c: _ArrayLikeComplex_co, + m: SupportsIndex = 1, + scl: _ComplexLike_co = 1, + axis: SupportsIndex = 0, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + c: _ArrayLikeCoef_co, + m: SupportsIndex = 1, + scl: _CoefLike_co = 1, + axis: SupportsIndex = 0, + ) -> _ObjectArray: ... + +@type_check_only +class _FuncInteg(Protocol): + @overload + def __call__( + self, + /, + c: _ArrayLikeFloat_co, + m: SupportsIndex = 1, + k: _FloatLike_co | _SeriesLikeFloat_co = [], + lbnd: _FloatLike_co = 0, + scl: _FloatLike_co = 1, + axis: SupportsIndex = 0, + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + c: _ArrayLikeComplex_co, + m: SupportsIndex = 1, + k: _ComplexLike_co | _SeriesLikeComplex_co = [], + lbnd: _ComplexLike_co = 0, + scl: _ComplexLike_co = 1, + axis: SupportsIndex = 0, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + c: _ArrayLikeCoef_co, + m: SupportsIndex = 1, + k: _CoefLike_co | _SeriesLikeCoef_co = [], + lbnd: _CoefLike_co = 0, + scl: _CoefLike_co = 1, + axis: SupportsIndex = 0, + ) -> _ObjectArray: ... + +@type_check_only +class _FuncVal(Protocol): + @overload + def __call__(self, /, x: _FloatLike_co, c: _SeriesLikeFloat_co, tensor: bool = True) -> np.floating: ... + @overload + def __call__(self, /, x: _NumberLike_co, c: _SeriesLikeComplex_co, tensor: bool = True) -> np.complexfloating: ... + @overload + def __call__(self, /, x: _ArrayLikeFloat_co, c: _ArrayLikeFloat_co, tensor: bool = True) -> _FloatArray: ... + @overload + def __call__(self, /, x: _ArrayLikeComplex_co, c: _ArrayLikeComplex_co, tensor: bool = True) -> _ComplexArray: ... + @overload + def __call__(self, /, x: _ArrayLikeCoef_co, c: _ArrayLikeCoef_co, tensor: bool = True) -> _ObjectArray: ... + @overload + def __call__(self, /, x: _CoefLike_co, c: _SeriesLikeObject_co, tensor: bool = True) -> _SupportsCoefOps[Any]: ... + +@type_check_only +class _FuncVal2D(Protocol): + @overload + def __call__(self, /, x: _FloatLike_co, y: _FloatLike_co, c: _SeriesLikeFloat_co) -> np.floating: ... + @overload + def __call__(self, /, x: _NumberLike_co, y: _NumberLike_co, c: _SeriesLikeComplex_co) -> np.complexfloating: ... + @overload + def __call__(self, /, x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, c: _ArrayLikeFloat_co) -> _FloatArray: ... + @overload + def __call__(self, /, x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, c: _ArrayLikeComplex_co) -> _ComplexArray: ... + @overload + def __call__(self, /, x: _ArrayLikeCoef_co, y: _ArrayLikeCoef_co, c: _ArrayLikeCoef_co) -> _ObjectArray: ... + @overload + def __call__(self, /, x: _CoefLike_co, y: _CoefLike_co, c: _SeriesLikeCoef_co) -> _SupportsCoefOps[Any]: ... + +@type_check_only +class _FuncVal3D(Protocol): + @overload + def __call__( + self, + /, + x: _FloatLike_co, + y: _FloatLike_co, + z: _FloatLike_co, + c: _SeriesLikeFloat_co, + ) -> np.floating: ... + @overload + def __call__( + self, + /, + x: _NumberLike_co, + y: _NumberLike_co, + z: _NumberLike_co, + c: _SeriesLikeComplex_co, + ) -> np.complexfloating: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + z: _ArrayLikeFloat_co, + c: _ArrayLikeFloat_co, + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + z: _ArrayLikeComplex_co, + c: _ArrayLikeComplex_co, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeCoef_co, + y: _ArrayLikeCoef_co, + z: _ArrayLikeCoef_co, + c: _ArrayLikeCoef_co, + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: _CoefLike_co, + y: _CoefLike_co, + z: _CoefLike_co, + c: _SeriesLikeCoef_co, + ) -> _SupportsCoefOps[Any]: ... + +@type_check_only +class _FuncVander(Protocol): + @overload + def __call__(self, /, x: _ArrayLikeFloat_co, deg: SupportsIndex) -> _FloatArray: ... + @overload + def __call__(self, /, x: _ArrayLikeComplex_co, deg: SupportsIndex) -> _ComplexArray: ... + @overload + def __call__(self, /, x: _ArrayLikeCoef_co, deg: SupportsIndex) -> _ObjectArray: ... + @overload + def __call__(self, /, x: npt.ArrayLike, deg: SupportsIndex) -> _CoefArray: ... + +@type_check_only +class _FuncVander2D(Protocol): + @overload + def __call__(self, /, x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, deg: _AnyDegrees) -> _FloatArray: ... + @overload + def __call__(self, /, x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, deg: _AnyDegrees) -> _ComplexArray: ... + @overload + def __call__(self, /, x: _ArrayLikeCoef_co, y: _ArrayLikeCoef_co, deg: _AnyDegrees) -> _ObjectArray: ... + @overload + def __call__(self, /, x: npt.ArrayLike, y: npt.ArrayLike, deg: _AnyDegrees) -> _CoefArray: ... + +@type_check_only +class _FuncVander3D(Protocol): + @overload + def __call__( + self, + /, + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + z: _ArrayLikeFloat_co, + deg: _AnyDegrees, + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + z: _ArrayLikeComplex_co, + deg: _AnyDegrees, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeCoef_co, + y: _ArrayLikeCoef_co, + z: _ArrayLikeCoef_co, + deg: _AnyDegrees, + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: npt.ArrayLike, + y: npt.ArrayLike, + z: npt.ArrayLike, + deg: _AnyDegrees, + ) -> _CoefArray: ... + +@type_check_only +class _FuncFit(Protocol): + @overload + def __call__( + self, + /, + x: _SeriesLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None = ..., + full: Literal[False] = False, + w: _SeriesLikeFloat_co | None = ..., + ) -> _FloatArray: ... + @overload + def __call__( + self, + x: _SeriesLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None, + full: Literal[True], + /, + w: _SeriesLikeFloat_co | None = ..., + ) -> tuple[_FloatArray, _FullFitResult]: ... + @overload + def __call__( + self, + /, + x: _SeriesLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None = ..., + *, + full: Literal[True], + w: _SeriesLikeFloat_co | None = ..., + ) -> tuple[_FloatArray, _FullFitResult]: ... + @overload + def __call__( + self, + /, + x: _SeriesLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None = ..., + full: Literal[False] = False, + w: _SeriesLikeFloat_co | None = ..., + ) -> _ComplexArray: ... + @overload + def __call__( + self, + x: _SeriesLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None, + full: Literal[True], + /, + w: _SeriesLikeFloat_co | None = ..., + ) -> tuple[_ComplexArray, _FullFitResult]: ... + @overload + def __call__( + self, + /, + x: _SeriesLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None = ..., + *, + full: Literal[True], + w: _SeriesLikeFloat_co | None = ..., + ) -> tuple[_ComplexArray, _FullFitResult]: ... + @overload + def __call__( + self, + /, + x: _SeriesLikeComplex_co, + y: _ArrayLikeCoef_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None = ..., + full: Literal[False] = False, + w: _SeriesLikeFloat_co | None = ..., + ) -> _ObjectArray: ... + @overload + def __call__( + self, + x: _SeriesLikeComplex_co, + y: _ArrayLikeCoef_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None, + full: Literal[True], + /, + w: _SeriesLikeFloat_co | None = ..., + ) -> tuple[_ObjectArray, _FullFitResult]: ... + @overload + def __call__( + self, + /, + x: _SeriesLikeComplex_co, + y: _ArrayLikeCoef_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None = ..., + *, + full: Literal[True], + w: _SeriesLikeFloat_co | None = ..., + ) -> tuple[_ObjectArray, _FullFitResult]: ... + +@type_check_only +class _FuncRoots(Protocol): + @overload + def __call__(self, /, c: _SeriesLikeFloat_co) -> _Series[np.float64]: ... + @overload + def __call__(self, /, c: _SeriesLikeComplex_co) -> _Series[np.complex128]: ... + @overload + def __call__(self, /, c: _SeriesLikeCoef_co) -> _ObjectSeries: ... + +@type_check_only +class _FuncCompanion(Protocol): + @overload + def __call__(self, /, c: _SeriesLikeFloat_co) -> _Companion[np.float64]: ... + @overload + def __call__(self, /, c: _SeriesLikeComplex_co) -> _Companion[np.complex128]: ... + @overload + def __call__(self, /, c: _SeriesLikeCoef_co) -> _Companion[np.object_]: ... + +@type_check_only +class _FuncGauss(Protocol): + def __call__(self, /, deg: SupportsIndex) -> _Tuple2[_Series[np.float64]]: ... + +@type_check_only +class _FuncWeight(Protocol): + @overload + def __call__(self, /, x: _ArrayLikeFloat_co) -> npt.NDArray[np.float64]: ... + @overload + def __call__(self, /, x: _ArrayLikeComplex_co) -> npt.NDArray[np.complex128]: ... + @overload + def __call__(self, /, x: _ArrayLikeCoef_co) -> _ObjectArray: ... diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index 20ee10c9980d..4dd2a85e15d7 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -106,10 +106,8 @@ Polynomials," *Journal of Statistical Planning and Inference 14*, 2008 (https://web.archive.org/web/20080221202153/https://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4) -""" +""" # noqa: E501 import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase @@ -150,8 +148,8 @@ def _cseries_to_zseries(c): """ n = c.size - zs = np.zeros(2*n-1, dtype=c.dtype) - zs[n-1:] = c/2 + zs = np.zeros(2 * n - 1, dtype=c.dtype) + zs[n - 1:] = c / 2 return zs + zs[::-1] @@ -174,8 +172,8 @@ def _zseries_to_cseries(zs): Chebyshev coefficients, ordered from low to high. """ - n = (zs.size + 1)//2 - c = zs[n-1:].copy() + n = (zs.size + 1) // 2 + c = zs[n - 1:].copy() c[1:n] *= 2 return c @@ -246,9 +244,9 @@ def _zseries_div(z1, z2): lc2 = len(z2) if lc2 == 1: z1 /= z2 - return z1, z1[:1]*0 + return z1, z1[:1] * 0 elif lc1 < lc2: - return z1[:1]*0, z1 + return z1[:1] * 0, z1 else: dlen = lc1 - lc2 scl = z2[0] @@ -260,17 +258,17 @@ def _zseries_div(z1, z2): r = z1[i] quo[i] = z1[i] quo[dlen - i] = r - tmp = r*z2 - z1[i:i+lc2] -= tmp - z1[j:j+lc2] -= tmp + tmp = r * z2 + z1[i:i + lc2] -= tmp + z1[j:j + lc2] -= tmp i += 1 j -= 1 r = z1[i] quo[i] = r - tmp = r*z2 - z1[i:i+lc2] -= tmp + tmp = r * z2 + z1[i:i + lc2] -= tmp quo /= scl - rem = z1[i+1:i-1+lc2].copy() + rem = z1[i + 1:i - 1 + lc2].copy() return quo, rem @@ -299,9 +297,9 @@ def _zseries_der(zs): division. """ - n = len(zs)//2 + n = len(zs) // 2 ns = np.array([-1, 0, 1], dtype=zs.dtype) - zs *= np.arange(-n, n+1)*2 + zs *= np.arange(-n, n + 1) * 2 d, r = _zseries_div(zs, ns) return d @@ -330,12 +328,12 @@ def _zseries_int(zs): dividing the resulting zs by two. """ - n = 1 + len(zs)//2 + n = 1 + len(zs) // 2 ns = np.array([-1, 0, 1], dtype=zs.dtype) zs = _zseries_mul(zs, ns) - div = np.arange(-n, n+1)*2 + div = np.arange(-n, n + 1) * 2 zs[:n] /= div[:n] - zs[n+1:] /= div[n+1:] + zs[n + 1:] /= div[n + 1:] zs[n] = 0 return zs @@ -438,7 +436,7 @@ def cheb2poly(c): array([-2., -8., 4., 12.]) """ - from .polynomial import polyadd, polysub, polymulx + from .polynomial import polyadd, polymulx, polysub [c] = pu.as_series([c]) n = len(c) @@ -451,7 +449,7 @@ def cheb2poly(c): for i in range(n - 1, 1, -1): tmp = c0 c0 = polysub(c[i - 2], c1) - c1 = polyadd(tmp, polymulx(c1)*2) + c1 = polyadd(tmp, polymulx(c1) * 2) return polyadd(c0, polymulx(c1)) @@ -670,10 +668,9 @@ def chebmulx(c): out : ndarray Array representing the result of the multiplication. - Notes - ----- - - .. versionadded:: 1.5.0 + See Also + -------- + chebadd, chebsub, chebmul, chebdiv, chebpow Examples -------- @@ -689,10 +686,10 @@ def chebmulx(c): return c prd = np.empty(len(c) + 1, dtype=c.dtype) - prd[0] = c[0]*0 + prd[0] = c[0] * 0 prd[1] = c[0] if len(c) > 1: - tmp = c[1:]/2 + tmp = c[1:] / 2 prd[2:] = tmp prd[0:-2] += tmp return prd @@ -796,15 +793,15 @@ def chebdiv(c1, c2): # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if c2[-1] == 0: - raise ZeroDivisionError() + raise ZeroDivisionError # FIXME: add message with details to exception # note: this is more efficient than `pu._div(chebmul, c1, c2)` lc1 = len(c1) lc2 = len(c2) if lc1 < lc2: - return c1[:1]*0, c1 + return c1[:1] * 0, c1 elif lc2 == 1: - return c1/c2[-1], c1[:1]*0 + return c1 / c2[-1], c1[:1] * 0 else: z1 = _cseries_to_zseries(c1) z2 = _cseries_to_zseries(c2) @@ -900,8 +897,6 @@ def chebder(c, m=1, scl=1, axis=0): axis : int, optional Axis over which the derivative is taken. (Default: 0). - .. versionadded:: 1.7.0 - Returns ------- der : ndarray @@ -939,7 +934,7 @@ def chebder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -947,17 +942,17 @@ def chebder(c, m=1, scl=1, axis=0): c = np.moveaxis(c, iaxis, 0) n = len(c) if cnt >= n: - c = c[:1]*0 + c = c[:1] * 0 else: for i in range(cnt): n = n - 1 c *= scl der = np.empty((n,) + c.shape[1:], dtype=c.dtype) for j in range(n, 2, -1): - der[j - 1] = (2*j)*c[j] - c[j - 2] += (j*c[j])/(j - 2) + der[j - 1] = (2 * j) * c[j] + c[j - 2] += (j * c[j]) / (j - 2) if n > 1: - der[1] = 4*c[2] + der[1] = 4 * c[2] der[0] = c[1] c = der c = np.moveaxis(c, 0, iaxis) @@ -1002,8 +997,6 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0): axis : int, optional Axis over which the integral is taken. (Default: 0). - .. versionadded:: 1.7.0 - Returns ------- S : ndarray @@ -1064,13 +1057,13 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c c = np.moveaxis(c, iaxis, 0) - k = list(k) + [0]*(cnt - len(k)) + k = list(k) + [0] * (cnt - len(k)) for i in range(cnt): n = len(c) c *= scl @@ -1078,13 +1071,13 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0): c[0] += k[i] else: tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) - tmp[0] = c[0]*0 + tmp[0] = c[0] * 0 tmp[1] = c[0] if n > 1: - tmp[2] = c[1]/4 + tmp[2] = c[1] / 4 for j in range(2, n): - tmp[j + 1] = c[j]/(2*(j + 1)) - tmp[j - 1] -= c[j]/(2*(j - 1)) + tmp[j + 1] = c[j] / (2 * (j + 1)) + tmp[j - 1] -= c[j] / (2 * (j - 1)) tmp[0] += k[i] - chebval(lbnd, tmp) c = tmp c = np.moveaxis(c, 0, iaxis) @@ -1134,8 +1127,6 @@ def chebval(x, c, tensor=True): over the columns of `c` for the evaluation. This keyword is useful when `c` is multidimensional. The default value is True. - .. versionadded:: 1.7.0 - Returns ------- values : ndarray, algebra_like @@ -1156,7 +1147,7 @@ def chebval(x, c, tensor=True): if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) + c = c.reshape(c.shape + (1,) * x.ndim) if len(c) == 1: c0 = c[0] @@ -1165,14 +1156,14 @@ def chebval(x, c, tensor=True): c0 = c[0] c1 = c[1] else: - x2 = 2*x + x2 = 2 * x c0 = c[-2] c1 = c[-1] for i in range(3, len(c) + 1): tmp = c0 c0 = c[-i] - c1 - c1 = tmp + c1*x2 - return c0 + c1*x + c1 = tmp + c1 * x2 + return c0 + c1 * x def chebval2d(x, y, c): @@ -1214,12 +1205,6 @@ def chebval2d(x, y, c): See Also -------- chebval, chebgrid2d, chebval3d, chebgrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._valnd(chebval, c, x, y) @@ -1267,12 +1252,6 @@ def chebgrid2d(x, y, c): See Also -------- chebval, chebval2d, chebval3d, chebgrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._gridnd(chebval, c, x, y) @@ -1318,12 +1297,6 @@ def chebval3d(x, y, z, c): See Also -------- chebval, chebval2d, chebgrid2d, chebgrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._valnd(chebval, c, x, y, z) @@ -1374,12 +1347,6 @@ def chebgrid3d(x, y, z, c): See Also -------- chebval, chebval2d, chebgrid2d, chebval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._gridnd(chebval, c, x, y, z) @@ -1428,12 +1395,12 @@ def chebvander(x, deg): dtyp = x.dtype v = np.empty(dims, dtype=dtyp) # Use forward recursion to generate the entries. - v[0] = x*0 + 1 + v[0] = x * 0 + 1 if ideg > 0: - x2 = 2*x + x2 = 2 * x v[1] = x for i in range(2, ideg + 1): - v[i] = v[i-1]*x2 - v[i-2] + v[i] = v[i - 1] * x2 - v[i - 2] return np.moveaxis(v, 0, -1) @@ -1453,7 +1420,7 @@ def chebvander2d(x, y, deg): correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + .. math:: c_{00}, c_{01}, c_{02}, ... , c_{10}, c_{11}, c_{12}, ... and ``np.dot(V, c.flat)`` and ``chebval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares @@ -1480,12 +1447,6 @@ def chebvander2d(x, y, deg): See Also -------- chebvander, chebvander3d, chebval2d, chebval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._vander_nd_flat((chebvander, chebvander), (x, y), deg) @@ -1534,12 +1495,6 @@ def chebvander3d(x, y, z, deg): See Also -------- chebvander, chebvander3d, chebval2d, chebval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._vander_nd_flat((chebvander, chebvander, chebvander), (x, y, z), deg) @@ -1588,8 +1543,6 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None): same variance. When using inverse-variance weighting, use ``w[i] = 1/sigma(y[i])``. The default value is None. - .. versionadded:: 1.5.0 - Returns ------- coef : ndarray, shape (M,) or (M, K) @@ -1690,29 +1643,23 @@ def chebcompanion(c): ------- mat : ndarray Scaled companion matrix of dimensions (deg, deg). - - Notes - ----- - - .. versionadded:: 1.7.0 - """ # c is a trimmed copy [c] = pu.as_series([c]) if len(c) < 2: raise ValueError('Series must have maximum degree of at least 1.') if len(c) == 2: - return np.array([[-c[0]/c[1]]]) + return np.array([[-c[0] / c[1]]]) n = len(c) - 1 mat = np.zeros((n, n), dtype=c.dtype) - scl = np.array([1.] + [np.sqrt(.5)]*(n-1)) - top = mat.reshape(-1)[1::n+1] - bot = mat.reshape(-1)[n::n+1] + scl = np.array([1.] + [np.sqrt(.5)] * (n - 1)) + top = mat.reshape(-1)[1::n + 1] + bot = mat.reshape(-1)[n::n + 1] top[0] = np.sqrt(.5) - top[1:] = 1/2 + top[1:] = 1 / 2 bot[...] = top - mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5 + mat[:, -1] -= (c[:-1] / c[-1]) * (scl / scl[-1]) * .5 return mat @@ -1768,11 +1715,11 @@ def chebroots(c): if len(c) < 2: return np.array([], dtype=c.dtype) if len(c) == 2: - return np.array([-c[0]/c[1]]) + return np.array([-c[0] / c[1]]) # rotated companion matrix reduces error - m = chebcompanion(c)[::-1,::-1] - r = la.eigvals(m) + m = chebcompanion(c)[::-1, ::-1] + r = np.linalg.eigvals(m) r.sort() return r @@ -1785,8 +1732,6 @@ def chebinterpolate(func, deg, args=()): series tends to a minmax approximation to `func` with increasing `deg` if the function is continuous in the interval. - .. versionadded:: 1.14.0 - Parameters ---------- func : function @@ -1815,7 +1760,6 @@ def chebinterpolate(func, deg, args=()): Notes ----- - The Chebyshev polynomials used in the interpolation are orthogonal when sampled at the Chebyshev points of the first kind. If it is desired to constrain some of the coefficients they can simply be set to the desired @@ -1839,7 +1783,7 @@ def chebinterpolate(func, deg, args=()): m = chebvander(xcheb, deg) c = np.dot(m.T, yfunc) c[0] /= order - c[1:] /= 0.5*order + c[1:] /= 0.5 * order return c @@ -1867,9 +1811,6 @@ def chebgauss(deg): Notes ----- - - .. versionadded:: 1.7.0 - The results have only been tested up to degree 100, higher degrees may be problematic. For Gauss-Chebyshev there are closed form solutions for the sample points and weights. If n = `deg`, then @@ -1883,8 +1824,8 @@ def chebgauss(deg): if ideg <= 0: raise ValueError("deg must be a positive integer") - x = np.cos(np.pi * np.arange(1, 2*ideg, 2) / (2.0*ideg)) - w = np.ones(ideg)*(np.pi/ideg) + x = np.cos(np.pi * np.arange(1, 2 * ideg, 2) / (2.0 * ideg)) + w = np.ones(ideg) * (np.pi / ideg) return x, w @@ -1906,14 +1847,8 @@ def chebweight(x): ------- w : ndarray The weight function at `x`. - - Notes - ----- - - .. versionadded:: 1.7.0 - """ - w = 1./(np.sqrt(1. + x) * np.sqrt(1. - x)) + w = 1. / (np.sqrt(1. + x) * np.sqrt(1. - x)) return w @@ -1937,12 +1872,6 @@ def chebpts1(npts): See Also -------- chebpts2 - - Notes - ----- - - .. versionadded:: 1.5.0 - """ _npts = int(npts) if _npts != npts: @@ -1950,7 +1879,7 @@ def chebpts1(npts): if _npts < 1: raise ValueError("npts must be >= 1") - x = 0.5 * np.pi / _npts * np.arange(-_npts+1, _npts+1, 2) + x = 0.5 * np.pi / _npts * np.arange(-_npts + 1, _npts + 1, 2) return np.sin(x) @@ -1971,12 +1900,6 @@ def chebpts2(npts): ------- pts : ndarray The Chebyshev points of the second kind. - - Notes - ----- - - .. versionadded:: 1.5.0 - """ _npts = int(npts) if _npts != npts: @@ -2007,11 +1930,9 @@ class Chebyshev(ABCPolyBase): domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [-1, 1]. + The default value is [-1., 1.]. window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [-1, 1]. - - .. versionadded:: 1.6.0 + Window, see `domain` for its use. The default value is [-1., 1.]. symbol : str, optional Symbol used to represent the independent variable in string representations of the polynomial expression, e.g. for printing. @@ -2043,8 +1964,6 @@ def interpolate(cls, func, deg, domain=None, args=()): tends to a minmax approximation of `func` when the function is continuous in the domain. - .. versionadded:: 1.14.0 - Parameters ---------- func : function diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index f8cbacfc2f96..857ce8f6f377 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -1,52 +1,170 @@ -from typing import Any - -from numpy import int_ -from numpy.typing import NDArray -from numpy.polynomial._polybase import ABCPolyBase -from numpy.polynomial.polyutils import trimcoef - -__all__: list[str] - -chebtrim = trimcoef - -def poly2cheb(pol): ... -def cheb2poly(c): ... - -chebdomain: NDArray[int_] -chebzero: NDArray[int_] -chebone: NDArray[int_] -chebx: NDArray[int_] - -def chebline(off, scl): ... -def chebfromroots(roots): ... -def chebadd(c1, c2): ... -def chebsub(c1, c2): ... -def chebmulx(c): ... -def chebmul(c1, c2): ... -def chebdiv(c1, c2): ... -def chebpow(c, pow, maxpower=...): ... -def chebder(c, m=..., scl=..., axis=...): ... -def chebint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... -def chebval(x, c, tensor=...): ... -def chebval2d(x, y, c): ... -def chebgrid2d(x, y, c): ... -def chebval3d(x, y, z, c): ... -def chebgrid3d(x, y, z, c): ... -def chebvander(x, deg): ... -def chebvander2d(x, y, deg): ... -def chebvander3d(x, y, z, deg): ... -def chebfit(x, y, deg, rcond=..., full=..., w=...): ... -def chebcompanion(c): ... -def chebroots(c): ... -def chebinterpolate(func, deg, args = ...): ... -def chebgauss(deg): ... -def chebweight(x): ... -def chebpts1(npts): ... -def chebpts2(npts): ... - -class Chebyshev(ABCPolyBase): +from _typeshed import ConvertibleToInt +from collections.abc import Callable, Iterable +from typing import Any, ClassVar, Concatenate, Final, Literal as L, Self, overload + +import numpy as np +import numpy.typing as npt +from numpy._typing import _IntLike_co + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _CoefSeries, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, + _Series, + _SeriesLikeCoef_co, +) +from .polyutils import trimcoef as chebtrim + +__all__ = [ + "chebzero", + "chebone", + "chebx", + "chebdomain", + "chebline", + "chebadd", + "chebsub", + "chebmulx", + "chebmul", + "chebdiv", + "chebpow", + "chebval", + "chebder", + "chebint", + "cheb2poly", + "poly2cheb", + "chebfromroots", + "chebvander", + "chebfit", + "chebtrim", + "chebroots", + "chebpts1", + "chebpts2", + "Chebyshev", + "chebval2d", + "chebval3d", + "chebgrid2d", + "chebgrid3d", + "chebvander2d", + "chebvander3d", + "chebcompanion", + "chebgauss", + "chebweight", + "chebinterpolate", +] + +### + +def _cseries_to_zseries[ScalarT: np.number | np.object_](c: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... +def _zseries_to_cseries[ScalarT: np.number | np.object_](zs: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... +def _zseries_mul[ScalarT: np.number | np.object_](z1: npt.NDArray[ScalarT], z2: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... +def _zseries_div[ScalarT: np.number | np.object_](z1: npt.NDArray[ScalarT], z2: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... +def _zseries_der[ScalarT: np.number | np.object_](zs: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... +def _zseries_int[ScalarT: np.number | np.object_](zs: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... + +poly2cheb: Final[_FuncPoly2Ortho] = ... +cheb2poly: Final[_FuncUnOp] = ... + +chebdomain: Final[_Array2[np.float64]] = ... +chebzero: Final[_Array1[np.int_]] = ... +chebone: Final[_Array1[np.int_]] = ... +chebx: Final[_Array2[np.int_]] = ... + +chebline: Final[_FuncLine] = ... +chebfromroots: Final[_FuncFromRoots] = ... +chebadd: Final[_FuncBinOp] = ... +chebsub: Final[_FuncBinOp] = ... +chebmulx: Final[_FuncUnOp] = ... +chebmul: Final[_FuncBinOp] = ... +chebdiv: Final[_FuncBinOp] = ... +chebpow: Final[_FuncPow] = ... +chebder: Final[_FuncDer] = ... +chebint: Final[_FuncInteg] = ... +chebval: Final[_FuncVal] = ... +chebval2d: Final[_FuncVal2D] = ... +chebval3d: Final[_FuncVal3D] = ... +chebgrid2d: Final[_FuncVal2D] = ... +chebgrid3d: Final[_FuncVal3D] = ... +chebvander: Final[_FuncVander] = ... +chebvander2d: Final[_FuncVander2D] = ... +chebvander3d: Final[_FuncVander3D] = ... +chebfit: Final[_FuncFit] = ... +chebcompanion: Final[_FuncCompanion] = ... +chebroots: Final[_FuncRoots] = ... +chebgauss: Final[_FuncGauss] = ... +chebweight: Final[_FuncWeight] = ... +def chebpts1(npts: ConvertibleToInt) -> np.ndarray[tuple[int], np.dtype[np.float64]]: ... +def chebpts2(npts: ConvertibleToInt) -> np.ndarray[tuple[int], np.dtype[np.float64]]: ... + +# keep in sync with `Chebyshev.interpolate` (minus `domain` parameter) +@overload +def chebinterpolate( + func: np.ufunc, + deg: _IntLike_co, + args: tuple[()] = (), +) -> npt.NDArray[np.float64 | np.complex128 | np.object_]: ... +@overload +def chebinterpolate[CoefScalarT: np.number | np.bool | np.object_]( + func: Callable[[npt.NDArray[np.float64]], CoefScalarT], + deg: _IntLike_co, + args: tuple[()] = (), +) -> npt.NDArray[CoefScalarT]: ... +@overload +def chebinterpolate[CoefScalarT: np.number | np.bool | np.object_]( + func: Callable[Concatenate[npt.NDArray[np.float64], ...], CoefScalarT], + deg: _IntLike_co, + args: Iterable[Any], +) -> npt.NDArray[CoefScalarT]: ... + +class Chebyshev(ABCPolyBase[L["T"]]): + basis_name: ClassVar[L["T"]] = "T" # pyright: ignore[reportIncompatibleMethodOverride] # pyrefly: ignore[bad-override] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + + @overload + @classmethod + def interpolate( + cls, + func: Callable[[npt.NDArray[np.float64]], _CoefSeries], + deg: _IntLike_co, + domain: _SeriesLikeCoef_co | None = None, + args: tuple[()] = (), + ) -> Self: ... + @overload + @classmethod + def interpolate( + cls, + func: Callable[Concatenate[npt.NDArray[np.float64], ...], _CoefSeries], + deg: _IntLike_co, + domain: _SeriesLikeCoef_co | None = None, + *, + args: Iterable[Any], + ) -> Self: ... + @overload @classmethod - def interpolate(cls, func, deg, domain=..., args = ...): ... - domain: Any - window: Any - basis_name: Any + def interpolate( + cls, + func: Callable[Concatenate[npt.NDArray[np.float64], ...], _CoefSeries], + deg: _IntLike_co, + domain: _SeriesLikeCoef_co | None, + args: Iterable[Any], + ) -> Self: ... diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index 58d18cb0d88c..b2970c914957 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -76,8 +76,6 @@ """ import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase @@ -177,7 +175,7 @@ def herm2poly(c): array([0., 1., 2., 3.]) """ - from .polynomial import polyadd, polysub, polymulx + from .polynomial import polyadd, polymulx, polysub [c] = pu.as_series([c]) n = len(c) @@ -192,9 +190,9 @@ def herm2poly(c): # i is the current degree of c1 for i in range(n - 1, 1, -1): tmp = c0 - c0 = polysub(c[i - 2], c1*(2*(i - 1))) - c1 = polyadd(tmp, polymulx(c1)*2) - return polyadd(c0, polymulx(c1)*2) + c0 = polysub(c[i - 2], c1 * (2 * (i - 1))) + c1 = polyadd(tmp, polymulx(c1) * 2) + return polyadd(c0, polymulx(c1) * 2) # @@ -212,7 +210,7 @@ def herm2poly(c): hermone = np.array([1]) # Hermite coefficients representing the identity x. -hermx = np.array([0, 1/2]) +hermx = np.array([0, 1 / 2]) def hermline(off, scl): @@ -250,7 +248,7 @@ def hermline(off, scl): """ if scl != 0: - return np.array([off, scl/2]) + return np.array([off, scl / 2]) else: return np.array([off]) @@ -436,11 +434,11 @@ def hermmulx(c): return c prd = np.empty(len(c) + 1, dtype=c.dtype) - prd[0] = c[0]*0 - prd[1] = c[0]/2 + prd[0] = c[0] * 0 + prd[1] = c[0] / 2 for i in range(1, len(c)): - prd[i + 1] = c[i]/2 - prd[i - 1] += c[i]*i + prd[i + 1] = c[i] / 2 + prd[i - 1] += c[i] * i return prd @@ -493,21 +491,21 @@ def hermmul(c1, c2): xs = c2 if len(c) == 1: - c0 = c[0]*xs + c0 = c[0] * xs c1 = 0 elif len(c) == 2: - c0 = c[0]*xs - c1 = c[1]*xs + c0 = c[0] * xs + c1 = c[1] * xs else: nd = len(c) - c0 = c[-2]*xs - c1 = c[-1]*xs + c0 = c[-2] * xs + c1 = c[-1] * xs for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 - c0 = hermsub(c[-i]*xs, c1*(2*(nd - 1))) - c1 = hermadd(tmp, hermmulx(c1)*2) - return hermadd(c0, hermmulx(c1)*2) + c0 = hermsub(c[-i] * xs, c1 * (2 * (nd - 1))) + c1 = hermadd(tmp, hermmulx(c1) * 2) + return hermadd(c0, hermmulx(c1) * 2) def hermdiv(c1, c2): @@ -623,8 +621,6 @@ def hermder(c, m=1, scl=1, axis=0): axis : int, optional Axis over which the derivative is taken. (Default: 0). - .. versionadded:: 1.7.0 - Returns ------- der : ndarray @@ -657,7 +653,7 @@ def hermder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -665,14 +661,14 @@ def hermder(c, m=1, scl=1, axis=0): c = np.moveaxis(c, iaxis, 0) n = len(c) if cnt >= n: - c = c[:1]*0 + c = c[:1] * 0 else: for i in range(cnt): n = n - 1 c *= scl der = np.empty((n,) + c.shape[1:], dtype=c.dtype) for j in range(n, 0, -1): - der[j - 1] = (2*j)*c[j] + der[j - 1] = (2 * j) * c[j] c = der c = np.moveaxis(c, 0, iaxis) return c @@ -716,8 +712,6 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): axis : int, optional Axis over which the integral is taken. (Default: 0). - .. versionadded:: 1.7.0 - Returns ------- S : ndarray @@ -776,13 +770,13 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c c = np.moveaxis(c, iaxis, 0) - k = list(k) + [0]*(cnt - len(k)) + k = list(k) + [0] * (cnt - len(k)) for i in range(cnt): n = len(c) c *= scl @@ -790,10 +784,10 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): c[0] += k[i] else: tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) - tmp[0] = c[0]*0 - tmp[1] = c[0]/2 + tmp[0] = c[0] * 0 + tmp[1] = c[0] / 2 for j in range(1, n): - tmp[j + 1] = c[j]/(2*(j + 1)) + tmp[j + 1] = c[j] / (2 * (j + 1)) tmp[0] += k[i] - hermval(lbnd, tmp) c = tmp c = np.moveaxis(c, 0, iaxis) @@ -802,7 +796,7 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): def hermval(x, c, tensor=True): """ - Evaluate an Hermite series at points x. + Evaluate a Hermite series at points x. If `c` is of length ``n + 1``, this function returns the value: @@ -843,8 +837,6 @@ def hermval(x, c, tensor=True): over the columns of `c` for the evaluation. This keyword is useful when `c` is multidimensional. The default value is True. - .. versionadded:: 1.7.0 - Returns ------- values : ndarray, algebra_like @@ -875,9 +867,9 @@ def hermval(x, c, tensor=True): if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) + c = c.reshape(c.shape + (1,) * x.ndim) - x2 = x*2 + x2 = x * 2 if len(c) == 1: c0 = c[0] c1 = 0 @@ -891,9 +883,9 @@ def hermval(x, c, tensor=True): for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 - c0 = c[-i] - c1*(2*(nd - 1)) - c1 = tmp + c1*x2 - return c0 + c1*x2 + c0 = c[-i] - c1 * (2 * (nd - 1)) + c1 = tmp + c1 * x2 + return c0 + c1 * x2 def hermval2d(x, y, c): @@ -936,11 +928,6 @@ def hermval2d(x, y, c): -------- hermval, hermgrid2d, hermval3d, hermgrid3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial.hermite import hermval2d @@ -948,7 +935,7 @@ def hermval2d(x, y, c): >>> y = [4, 5] >>> c = [[1, 2, 3], [4, 5, 6]] >>> hermval2d(x, y, c) - array ([1035., 2883.]) + array([1035., 2883.]) """ return pu._valnd(hermval, c, x, y) @@ -998,11 +985,6 @@ def hermgrid2d(x, y, c): -------- hermval, hermval2d, hermval3d, hermgrid3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial.hermite import hermgrid2d @@ -1060,11 +1042,6 @@ def hermval3d(x, y, z, c): -------- hermval, hermval2d, hermgrid2d, hermgrid3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial.hermite import hermval3d @@ -1074,7 +1051,7 @@ def hermval3d(x, y, z, c): >>> c = [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]] >>> hermval3d(x, y, z, c) array([ 40077., 120131.]) - + """ return pu._valnd(hermval, c, x, y, z) @@ -1126,11 +1103,6 @@ def hermgrid3d(x, y, z, c): -------- hermval, hermval2d, hermgrid2d, hermval3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial.hermite import hermgrid3d @@ -1184,6 +1156,7 @@ def hermvander(x, deg): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.hermite import hermvander >>> x = np.array([-1, 0, 1]) >>> hermvander(x, 3) @@ -1200,12 +1173,12 @@ def hermvander(x, deg): dims = (ideg + 1,) + x.shape dtyp = x.dtype v = np.empty(dims, dtype=dtyp) - v[0] = x*0 + 1 + v[0] = x * 0 + 1 if ideg > 0: - x2 = x*2 + x2 = x * 2 v[1] = x2 for i in range(2, ideg + 1): - v[i] = (v[i-1]*x2 - v[i-2]*(2*(i - 1))) + v[i] = (v[i - 1] * x2 - v[i - 2] * (2 * (i - 1))) return np.moveaxis(v, 0, -1) @@ -1225,7 +1198,7 @@ def hermvander2d(x, y, deg): correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + .. math:: c_{00}, c_{01}, c_{02}, ... , c_{10}, c_{11}, c_{12}, ... and ``np.dot(V, c.flat)`` and ``hermval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares @@ -1253,13 +1226,9 @@ def hermvander2d(x, y, deg): -------- hermvander, hermvander3d, hermval2d, hermval3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- + >>> import numpy as np >>> from numpy.polynomial.hermite import hermvander2d >>> x = np.array([-1, 0, 1]) >>> y = np.array([-1, 0, 1]) @@ -1317,11 +1286,6 @@ def hermvander3d(x, y, z, deg): -------- hermvander, hermvander3d, hermval2d, hermval3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial.hermite import hermvander3d @@ -1332,7 +1296,7 @@ def hermvander3d(x, y, z, deg): array([[ 1., -2., 2., -2., 4., -4.], [ 1., 0., -2., 0., 0., -0.], [ 1., 2., 2., 2., 4., 4.]]) - + """ return pu._vander_nd_flat((hermvander, hermvander, hermvander), (x, y, z), deg) @@ -1458,6 +1422,7 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.hermite import hermfit, hermval >>> x = np.linspace(-10, 10) >>> rng = np.random.default_rng() @@ -1474,7 +1439,7 @@ def hermcompanion(c): """Return the scaled companion matrix of c. The basis polynomials are scaled so that the companion matrix is - symmetric when `c` is an Hermite basis polynomial. This provides + symmetric when `c` is a Hermite basis polynomial. This provides better eigenvalue estimates than the unscaled case and for basis polynomials the eigenvalues are guaranteed to be real if `numpy.linalg.eigvalsh` is used to obtain them. @@ -1490,11 +1455,6 @@ def hermcompanion(c): mat : ndarray Scaled companion matrix of dimensions (deg, deg). - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial.hermite import hermcompanion @@ -1508,17 +1468,17 @@ def hermcompanion(c): if len(c) < 2: raise ValueError('Series must have maximum degree of at least 1.') if len(c) == 2: - return np.array([[-.5*c[0]/c[1]]]) + return np.array([[-.5 * c[0] / c[1]]]) n = len(c) - 1 mat = np.zeros((n, n), dtype=c.dtype) - scl = np.hstack((1., 1./np.sqrt(2.*np.arange(n - 1, 0, -1)))) + scl = np.hstack((1., 1. / np.sqrt(2. * np.arange(n - 1, 0, -1)))) scl = np.multiply.accumulate(scl)[::-1] - top = mat.reshape(-1)[1::n+1] - bot = mat.reshape(-1)[n::n+1] - top[...] = np.sqrt(.5*np.arange(1, n)) + top = mat.reshape(-1)[1::n + 1] + bot = mat.reshape(-1)[n::n + 1] + top[...] = np.sqrt(.5 * np.arange(1, n)) bot[...] = top - mat[:, -1] -= scl*c[:-1]/(2.0*c[-1]) + mat[:, -1] -= scl * c[:-1] / (2.0 * c[-1]) return mat @@ -1577,12 +1537,16 @@ def hermroots(c): if len(c) <= 1: return np.array([], dtype=c.dtype) if len(c) == 2: - return np.array([-.5*c[0]/c[1]]) + return np.array([-.5 * c[0] / c[1]]) # rotated companion matrix reduces error - m = hermcompanion(c)[::-1,::-1] - r = la.eigvals(m) + m = hermcompanion(c)[::-1, ::-1] + r = np.linalg.eigvals(m) r.sort() + + # backwards compat: return real values if possible + from numpy.linalg._linalg import _to_real_if_imag_zero + r = _to_real_if_imag_zero(r, m) return r @@ -1608,25 +1572,23 @@ def _normed_hermite_n(x, n): Notes ----- - .. versionadded:: 1.10.0 - This function is needed for finding the Gauss points and integration weights for high degrees. The values of the standard Hermite functions overflow when n >= 207. """ if n == 0: - return np.full(x.shape, 1/np.sqrt(np.sqrt(np.pi))) + return np.full(x.shape, 1 / np.sqrt(np.sqrt(np.pi))) c0 = 0. - c1 = 1./np.sqrt(np.sqrt(np.pi)) + c1 = 1. / np.sqrt(np.sqrt(np.pi)) nd = float(n) for i in range(n - 1): tmp = c0 - c0 = -c1*np.sqrt((nd - 1.)/nd) - c1 = tmp + c1*x*np.sqrt(2./nd) + c0 = -c1 * np.sqrt((nd - 1.) / nd) + c1 = tmp + c1 * x * np.sqrt(2. / nd) nd = nd - 1.0 - return c0 + c1*x*np.sqrt(2) + return c0 + c1 * x * np.sqrt(2) def hermgauss(deg): @@ -1652,9 +1614,6 @@ def hermgauss(deg): Notes ----- - - .. versionadded:: 1.7.0 - The results have only been tested up to degree 100, higher degrees may be problematic. The weights are determined by using the fact that @@ -1677,24 +1636,24 @@ def hermgauss(deg): # first approximation of roots. We use the fact that the companion # matrix is symmetric in this case in order to obtain better zeros. - c = np.array([0]*deg + [1], dtype=np.float64) + c = np.array([0] * deg + [1], dtype=np.float64) m = hermcompanion(c) - x = la.eigvalsh(m) + x = np.linalg.eigvalsh(m) # improve roots by one application of Newton dy = _normed_hermite_n(x, ideg) - df = _normed_hermite_n(x, ideg - 1) * np.sqrt(2*ideg) - x -= dy/df + df = _normed_hermite_n(x, ideg - 1) * np.sqrt(2 * ideg) + x -= dy / df # compute the weights. We scale the factor to avoid possible numerical # overflow. fm = _normed_hermite_n(x, ideg - 1) fm /= np.abs(fm).max() - w = 1/(fm * fm) + w = 1 / (fm * fm) # for Hermite we can also symmetrize - w = (w + w[::-1])/2 - x = (x - x[::-1])/2 + w = (w + w[::-1]) / 2 + x = (x - x[::-1]) / 2 # scale w to get the right value w *= np.sqrt(np.pi) / w.sum() @@ -1720,13 +1679,9 @@ def hermweight(x): w : ndarray The weight function at `x`. - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- + >>> import numpy as np >>> from numpy.polynomial.hermite import hermweight >>> x = np.arange(-2, 2) >>> hermweight(x) @@ -1742,7 +1697,7 @@ def hermweight(x): # class Hermite(ABCPolyBase): - """An Hermite series class. + """A Hermite series class. The Hermite class provides the standard Python numerical methods '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the @@ -1756,11 +1711,9 @@ class Hermite(ABCPolyBase): domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [-1, 1]. + The default value is [-1., 1.]. window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [-1, 1]. - - .. versionadded:: 1.6.0 + Window, see `domain` for its use. The default value is [-1., 1.]. symbol : str, optional Symbol used to represent the independent variable in string representations of the polynomial expression, e.g. for printing. diff --git a/numpy/polynomial/hermite.pyi b/numpy/polynomial/hermite.pyi index 0a1628ab39c1..17375c9210c4 100644 --- a/numpy/polynomial/hermite.pyi +++ b/numpy/polynomial/hermite.pyi @@ -1,47 +1,107 @@ -from typing import Any - -from numpy import int_, float64 -from numpy.typing import NDArray -from numpy.polynomial._polybase import ABCPolyBase -from numpy.polynomial.polyutils import trimcoef - -__all__: list[str] - -hermtrim = trimcoef - -def poly2herm(pol): ... -def herm2poly(c): ... - -hermdomain: NDArray[int_] -hermzero: NDArray[int_] -hermone: NDArray[int_] -hermx: NDArray[float64] - -def hermline(off, scl): ... -def hermfromroots(roots): ... -def hermadd(c1, c2): ... -def hermsub(c1, c2): ... -def hermmulx(c): ... -def hermmul(c1, c2): ... -def hermdiv(c1, c2): ... -def hermpow(c, pow, maxpower=...): ... -def hermder(c, m=..., scl=..., axis=...): ... -def hermint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... -def hermval(x, c, tensor=...): ... -def hermval2d(x, y, c): ... -def hermgrid2d(x, y, c): ... -def hermval3d(x, y, z, c): ... -def hermgrid3d(x, y, z, c): ... -def hermvander(x, deg): ... -def hermvander2d(x, y, deg): ... -def hermvander3d(x, y, z, deg): ... -def hermfit(x, y, deg, rcond=..., full=..., w=...): ... -def hermcompanion(c): ... -def hermroots(c): ... -def hermgauss(deg): ... -def hermweight(x): ... - -class Hermite(ABCPolyBase): - domain: Any - window: Any - basis_name: Any +from typing import Any, ClassVar, Final, Literal as L + +import numpy as np +from numpy._typing import _Shape + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as hermtrim + +__all__ = [ + "hermzero", + "hermone", + "hermx", + "hermdomain", + "hermline", + "hermadd", + "hermsub", + "hermmulx", + "hermmul", + "hermdiv", + "hermpow", + "hermval", + "hermder", + "hermint", + "herm2poly", + "poly2herm", + "hermfromroots", + "hermvander", + "hermfit", + "hermtrim", + "hermroots", + "Hermite", + "hermval2d", + "hermval3d", + "hermgrid2d", + "hermgrid3d", + "hermvander2d", + "hermvander3d", + "hermcompanion", + "hermgauss", + "hermweight", +] + +poly2herm: Final[_FuncPoly2Ortho] = ... +herm2poly: Final[_FuncUnOp] = ... + +hermdomain: Final[_Array2[np.float64]] = ... +hermzero: Final[_Array1[np.int_]] = ... +hermone: Final[_Array1[np.int_]] = ... +hermx: Final[_Array2[np.int_]] = ... + +hermline: Final[_FuncLine] = ... +hermfromroots: Final[_FuncFromRoots] = ... +hermadd: Final[_FuncBinOp] = ... +hermsub: Final[_FuncBinOp] = ... +hermmulx: Final[_FuncUnOp] = ... +hermmul: Final[_FuncBinOp] = ... +hermdiv: Final[_FuncBinOp] = ... +hermpow: Final[_FuncPow] = ... +hermder: Final[_FuncDer] = ... +hermint: Final[_FuncInteg] = ... +hermval: Final[_FuncVal] = ... +hermval2d: Final[_FuncVal2D] = ... +hermval3d: Final[_FuncVal3D] = ... +hermgrid2d: Final[_FuncVal2D] = ... +hermgrid3d: Final[_FuncVal3D] = ... +hermvander: Final[_FuncVander] = ... +hermvander2d: Final[_FuncVander2D] = ... +hermvander3d: Final[_FuncVander3D] = ... +hermfit: Final[_FuncFit] = ... +hermcompanion: Final[_FuncCompanion] = ... +hermroots: Final[_FuncRoots] = ... + +def _normed_hermite_n[ShapeT: _Shape]( + x: np.ndarray[ShapeT, np.dtype[np.float64]], + n: int, +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... + +hermgauss: Final[_FuncGauss] = ... +hermweight: Final[_FuncWeight] = ... + +class Hermite(ABCPolyBase[L["H"]]): + basis_name: ClassVar[L["H"]] = "H" # pyright: ignore[reportIncompatibleMethodOverride] # pyrefly: ignore[bad-override] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index 0aaf2a78c768..730b60804e9a 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -76,8 +76,6 @@ """ import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase @@ -127,6 +125,7 @@ def poly2herme(pol): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.hermite_e import poly2herme >>> poly2herme(np.arange(4)) array([ 2., 10., 2., 3.]) @@ -178,7 +177,7 @@ def herme2poly(c): array([0., 1., 2., 3.]) """ - from .polynomial import polyadd, polysub, polymulx + from .polynomial import polyadd, polymulx, polysub [c] = pu.as_series([c]) n = len(c) @@ -192,7 +191,7 @@ def herme2poly(c): # i is the current degree of c1 for i in range(n - 1, 1, -1): tmp = c0 - c0 = polysub(c[i - 2], c1*(i - 1)) + c0 = polysub(c[i - 2], c1 * (i - 1)) c1 = polyadd(tmp, polymulx(c1)) return polyadd(c0, polymulx(c1)) @@ -408,6 +407,10 @@ def hermemulx(c): out : ndarray Array representing the result of the multiplication. + See Also + -------- + hermeadd, hermesub, hermemul, hermediv, hermepow + Notes ----- The multiplication uses the recursion relationship for Hermite @@ -431,11 +434,11 @@ def hermemulx(c): return c prd = np.empty(len(c) + 1, dtype=c.dtype) - prd[0] = c[0]*0 + prd[0] = c[0] * 0 prd[1] = c[0] for i in range(1, len(c)): prd[i + 1] = c[i] - prd[i - 1] += c[i]*i + prd[i - 1] += c[i] * i return prd @@ -488,19 +491,19 @@ def hermemul(c1, c2): xs = c2 if len(c) == 1: - c0 = c[0]*xs + c0 = c[0] * xs c1 = 0 elif len(c) == 2: - c0 = c[0]*xs - c1 = c[1]*xs + c0 = c[0] * xs + c1 = c[1] * xs else: nd = len(c) - c0 = c[-2]*xs - c1 = c[-1]*xs + c0 = c[-2] * xs + c1 = c[-1] * xs for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 - c0 = hermesub(c[-i]*xs, c1*(nd - 1)) + c0 = hermesub(c[-i] * xs, c1 * (nd - 1)) c1 = hermeadd(tmp, hermemulx(c1)) return hermeadd(c0, hermemulx(c1)) @@ -616,8 +619,6 @@ def hermeder(c, m=1, scl=1, axis=0): axis : int, optional Axis over which the derivative is taken. (Default: 0). - .. versionadded:: 1.7.0 - Returns ------- der : ndarray @@ -650,7 +651,7 @@ def hermeder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -658,14 +659,14 @@ def hermeder(c, m=1, scl=1, axis=0): c = np.moveaxis(c, iaxis, 0) n = len(c) if cnt >= n: - return c[:1]*0 + return c[:1] * 0 else: for i in range(cnt): n = n - 1 c *= scl der = np.empty((n,) + c.shape[1:], dtype=c.dtype) for j in range(n, 0, -1): - der[j - 1] = j*c[j] + der[j - 1] = j * c[j] c = der c = np.moveaxis(c, 0, iaxis) return c @@ -709,8 +710,6 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): axis : int, optional Axis over which the integral is taken. (Default: 0). - .. versionadded:: 1.7.0 - Returns ------- S : ndarray @@ -769,13 +768,13 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c c = np.moveaxis(c, iaxis, 0) - k = list(k) + [0]*(cnt - len(k)) + k = list(k) + [0] * (cnt - len(k)) for i in range(cnt): n = len(c) c *= scl @@ -783,10 +782,10 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): c[0] += k[i] else: tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) - tmp[0] = c[0]*0 + tmp[0] = c[0] * 0 tmp[1] = c[0] for j in range(1, n): - tmp[j + 1] = c[j]/(j + 1) + tmp[j + 1] = c[j] / (j + 1) tmp[0] += k[i] - hermeval(lbnd, tmp) c = tmp c = np.moveaxis(c, 0, iaxis) @@ -795,7 +794,7 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): def hermeval(x, c, tensor=True): """ - Evaluate an HermiteE series at points x. + Evaluate a HermiteE series at points x. If `c` is of length ``n + 1``, this function returns the value: @@ -836,8 +835,6 @@ def hermeval(x, c, tensor=True): over the columns of `c` for the evaluation. This keyword is useful when `c` is multidimensional. The default value is True. - .. versionadded:: 1.7.0 - Returns ------- values : ndarray, algebra_like @@ -868,7 +865,7 @@ def hermeval(x, c, tensor=True): if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) + c = c.reshape(c.shape + (1,) * x.ndim) if len(c) == 1: c0 = c[0] @@ -883,9 +880,9 @@ def hermeval(x, c, tensor=True): for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 - c0 = c[-i] - c1*(nd - 1) - c1 = tmp + c1*x - return c0 + c1*x + c0 = c[-i] - c1 * (nd - 1) + c1 = tmp + c1 * x + return c0 + c1 * x def hermeval2d(x, y, c): @@ -927,12 +924,6 @@ def hermeval2d(x, y, c): See Also -------- hermeval, hermegrid2d, hermeval3d, hermegrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._valnd(hermeval, c, x, y) @@ -980,12 +971,6 @@ def hermegrid2d(x, y, c): See Also -------- hermeval, hermeval2d, hermeval3d, hermegrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._gridnd(hermeval, c, x, y) @@ -1031,12 +1016,6 @@ def hermeval3d(x, y, z, c): See Also -------- hermeval, hermeval2d, hermegrid2d, hermegrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._valnd(hermeval, c, x, y, z) @@ -1087,12 +1066,6 @@ def hermegrid3d(x, y, z, c): See Also -------- hermeval, hermeval2d, hermegrid2d, hermeval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._gridnd(hermeval, c, x, y, z) @@ -1133,6 +1106,7 @@ def hermevander(x, deg): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.hermite_e import hermevander >>> x = np.array([-1, 0, 1]) >>> hermevander(x, 3) @@ -1149,11 +1123,11 @@ def hermevander(x, deg): dims = (ideg + 1,) + x.shape dtyp = x.dtype v = np.empty(dims, dtype=dtyp) - v[0] = x*0 + 1 + v[0] = x * 0 + 1 if ideg > 0: v[1] = x for i in range(2, ideg + 1): - v[i] = (v[i-1]*x - v[i-2]*(i - 1)) + v[i] = (v[i - 1] * x - v[i - 2] * (i - 1)) return np.moveaxis(v, 0, -1) @@ -1173,7 +1147,7 @@ def hermevander2d(x, y, deg): correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + .. math:: c_{00}, c_{01}, c_{02}, ... , c_{10}, c_{11}, c_{12}, ... and ``np.dot(V, c.flat)`` and ``hermeval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares @@ -1200,12 +1174,6 @@ def hermevander2d(x, y, deg): See Also -------- hermevander, hermevander3d, hermeval2d, hermeval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._vander_nd_flat((hermevander, hermevander), (x, y), deg) @@ -1254,12 +1222,6 @@ def hermevander3d(x, y, z, deg): See Also -------- hermevander, hermevander3d, hermeval2d, hermeval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._vander_nd_flat((hermevander, hermevander, hermevander), (x, y, z), deg) @@ -1385,6 +1347,7 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.hermite_e import hermefit, hermeval >>> x = np.linspace(-10, 10) >>> rng = np.random.default_rng() @@ -1402,7 +1365,7 @@ def hermecompanion(c): Return the scaled companion matrix of c. The basis polynomials are scaled so that the companion matrix is - symmetric when `c` is an HermiteE basis polynomial. This provides + symmetric when `c` is a HermiteE basis polynomial. This provides better eigenvalue estimates than the unscaled case and for basis polynomials the eigenvalues are guaranteed to be real if `numpy.linalg.eigvalsh` is used to obtain them. @@ -1417,29 +1380,23 @@ def hermecompanion(c): ------- mat : ndarray Scaled companion matrix of dimensions (deg, deg). - - Notes - ----- - - .. versionadded:: 1.7.0 - """ # c is a trimmed copy [c] = pu.as_series([c]) if len(c) < 2: raise ValueError('Series must have maximum degree of at least 1.') if len(c) == 2: - return np.array([[-c[0]/c[1]]]) + return np.array([[-c[0] / c[1]]]) n = len(c) - 1 mat = np.zeros((n, n), dtype=c.dtype) - scl = np.hstack((1., 1./np.sqrt(np.arange(n - 1, 0, -1)))) + scl = np.hstack((1., 1. / np.sqrt(np.arange(n - 1, 0, -1)))) scl = np.multiply.accumulate(scl)[::-1] - top = mat.reshape(-1)[1::n+1] - bot = mat.reshape(-1)[n::n+1] + top = mat.reshape(-1)[1::n + 1] + bot = mat.reshape(-1)[n::n + 1] top[...] = np.sqrt(np.arange(1, n)) bot[...] = top - mat[:, -1] -= scl*c[:-1]/c[-1] + mat[:, -1] -= scl * c[:-1] / c[-1] return mat @@ -1498,11 +1455,11 @@ def hermeroots(c): if len(c) <= 1: return np.array([], dtype=c.dtype) if len(c) == 2: - return np.array([-c[0]/c[1]]) + return np.array([-c[0] / c[1]]) # rotated companion matrix reduces error - m = hermecompanion(c)[::-1,::-1] - r = la.eigvals(m) + m = hermecompanion(c)[::-1, ::-1] + r = np.linalg.eigvals(m) r.sort() return r @@ -1529,25 +1486,23 @@ def _normed_hermite_e_n(x, n): Notes ----- - .. versionadded:: 1.10.0 - This function is needed for finding the Gauss points and integration weights for high degrees. The values of the standard HermiteE functions overflow when n >= 207. """ if n == 0: - return np.full(x.shape, 1/np.sqrt(np.sqrt(2*np.pi))) + return np.full(x.shape, 1 / np.sqrt(np.sqrt(2 * np.pi))) c0 = 0. - c1 = 1./np.sqrt(np.sqrt(2*np.pi)) + c1 = 1. / np.sqrt(np.sqrt(2 * np.pi)) nd = float(n) for i in range(n - 1): tmp = c0 - c0 = -c1*np.sqrt((nd - 1.)/nd) - c1 = tmp + c1*x*np.sqrt(1./nd) + c0 = -c1 * np.sqrt((nd - 1.) / nd) + c1 = tmp + c1 * x * np.sqrt(1. / nd) nd = nd - 1.0 - return c0 + c1*x + return c0 + c1 * x def hermegauss(deg): @@ -1573,9 +1528,6 @@ def hermegauss(deg): Notes ----- - - .. versionadded:: 1.7.0 - The results have only been tested up to degree 100, higher degrees may be problematic. The weights are determined by using the fact that @@ -1592,27 +1544,27 @@ def hermegauss(deg): # first approximation of roots. We use the fact that the companion # matrix is symmetric in this case in order to obtain better zeros. - c = np.array([0]*deg + [1]) + c = np.array([0] * deg + [1]) m = hermecompanion(c) - x = la.eigvalsh(m) + x = np.linalg.eigvalsh(m) # improve roots by one application of Newton dy = _normed_hermite_e_n(x, ideg) df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg) - x -= dy/df + x -= dy / df # compute the weights. We scale the factor to avoid possible numerical # overflow. fm = _normed_hermite_e_n(x, ideg - 1) fm /= np.abs(fm).max() - w = 1/(fm * fm) + w = 1 / (fm * fm) # for Hermite_e we can also symmetrize - w = (w + w[::-1])/2 - x = (x - x[::-1])/2 + w = (w + w[::-1]) / 2 + x = (x - x[::-1]) / 2 # scale w to get the right value - w *= np.sqrt(2*np.pi) / w.sum() + w *= np.sqrt(2 * np.pi) / w.sum() return x, w @@ -1633,14 +1585,8 @@ def hermeweight(x): ------- w : ndarray The weight function at `x`. - - Notes - ----- - - .. versionadded:: 1.7.0 - """ - w = np.exp(-.5*x**2) + w = np.exp(-.5 * x**2) return w @@ -1649,7 +1595,7 @@ def hermeweight(x): # class HermiteE(ABCPolyBase): - """An HermiteE series class. + """A HermiteE series class. The HermiteE class provides the standard Python numerical methods '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the @@ -1663,11 +1609,9 @@ class HermiteE(ABCPolyBase): domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [-1, 1]. + The default value is [-1., 1.]. window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [-1, 1]. - - .. versionadded:: 1.6.0 + Window, see `domain` for its use. The default value is [-1., 1.]. symbol : str, optional Symbol used to represent the independent variable in string representations of the polynomial expression, e.g. for printing. diff --git a/numpy/polynomial/hermite_e.pyi b/numpy/polynomial/hermite_e.pyi index cca0dd636785..f1ebf9066a4f 100644 --- a/numpy/polynomial/hermite_e.pyi +++ b/numpy/polynomial/hermite_e.pyi @@ -1,47 +1,107 @@ -from typing import Any - -from numpy import int_ -from numpy.typing import NDArray -from numpy.polynomial._polybase import ABCPolyBase -from numpy.polynomial.polyutils import trimcoef - -__all__: list[str] - -hermetrim = trimcoef - -def poly2herme(pol): ... -def herme2poly(c): ... - -hermedomain: NDArray[int_] -hermezero: NDArray[int_] -hermeone: NDArray[int_] -hermex: NDArray[int_] - -def hermeline(off, scl): ... -def hermefromroots(roots): ... -def hermeadd(c1, c2): ... -def hermesub(c1, c2): ... -def hermemulx(c): ... -def hermemul(c1, c2): ... -def hermediv(c1, c2): ... -def hermepow(c, pow, maxpower=...): ... -def hermeder(c, m=..., scl=..., axis=...): ... -def hermeint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... -def hermeval(x, c, tensor=...): ... -def hermeval2d(x, y, c): ... -def hermegrid2d(x, y, c): ... -def hermeval3d(x, y, z, c): ... -def hermegrid3d(x, y, z, c): ... -def hermevander(x, deg): ... -def hermevander2d(x, y, deg): ... -def hermevander3d(x, y, z, deg): ... -def hermefit(x, y, deg, rcond=..., full=..., w=...): ... -def hermecompanion(c): ... -def hermeroots(c): ... -def hermegauss(deg): ... -def hermeweight(x): ... - -class HermiteE(ABCPolyBase): - domain: Any - window: Any - basis_name: Any +from typing import Any, ClassVar, Final, Literal as L + +import numpy as np +from numpy._typing import _Shape + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as hermetrim + +__all__ = [ + "hermezero", + "hermeone", + "hermex", + "hermedomain", + "hermeline", + "hermeadd", + "hermesub", + "hermemulx", + "hermemul", + "hermediv", + "hermepow", + "hermeval", + "hermeder", + "hermeint", + "herme2poly", + "poly2herme", + "hermefromroots", + "hermevander", + "hermefit", + "hermetrim", + "hermeroots", + "HermiteE", + "hermeval2d", + "hermeval3d", + "hermegrid2d", + "hermegrid3d", + "hermevander2d", + "hermevander3d", + "hermecompanion", + "hermegauss", + "hermeweight", +] + +poly2herme: Final[_FuncPoly2Ortho] = ... +herme2poly: Final[_FuncUnOp] = ... + +hermedomain: Final[_Array2[np.float64]] = ... +hermezero: Final[_Array1[np.int_]] = ... +hermeone: Final[_Array1[np.int_]] = ... +hermex: Final[_Array2[np.int_]] = ... + +hermeline: Final[_FuncLine] = ... +hermefromroots: Final[_FuncFromRoots] = ... +hermeadd: Final[_FuncBinOp] = ... +hermesub: Final[_FuncBinOp] = ... +hermemulx: Final[_FuncUnOp] = ... +hermemul: Final[_FuncBinOp] = ... +hermediv: Final[_FuncBinOp] = ... +hermepow: Final[_FuncPow] = ... +hermeder: Final[_FuncDer] = ... +hermeint: Final[_FuncInteg] = ... +hermeval: Final[_FuncVal] = ... +hermeval2d: Final[_FuncVal2D] = ... +hermeval3d: Final[_FuncVal3D] = ... +hermegrid2d: Final[_FuncVal2D] = ... +hermegrid3d: Final[_FuncVal3D] = ... +hermevander: Final[_FuncVander] = ... +hermevander2d: Final[_FuncVander2D] = ... +hermevander3d: Final[_FuncVander3D] = ... +hermefit: Final[_FuncFit] = ... +hermecompanion: Final[_FuncCompanion] = ... +hermeroots: Final[_FuncRoots] = ... + +def _normed_hermite_e_n[ShapeT: _Shape]( + x: np.ndarray[ShapeT, np.dtype[np.float64]], + n: int, +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... + +hermegauss: Final[_FuncGauss] = ... +hermeweight: Final[_FuncWeight] = ... + +class HermiteE(ABCPolyBase[L["He"]]): + basis_name: ClassVar[L["He"]] = "He" # pyright: ignore[reportIncompatibleMethodOverride] # pyrefly: ignore[bad-override] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index b0de7d9bce35..eb34cbef17ca 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -76,8 +76,6 @@ """ import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase @@ -126,6 +124,7 @@ def poly2lag(pol): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.laguerre import poly2lag >>> poly2lag(np.arange(4)) array([ 23., -63., 58., -18.]) @@ -176,7 +175,7 @@ def lag2poly(c): array([0., 1., 2., 3.]) """ - from .polynomial import polyadd, polysub, polymulx + from .polynomial import polyadd, polymulx, polysub [c] = pu.as_series([c]) n = len(c) @@ -188,8 +187,8 @@ def lag2poly(c): # i is the current degree of c1 for i in range(n - 1, 1, -1): tmp = c0 - c0 = polysub(c[i - 2], (c1*(i - 1))/i) - c1 = polyadd(tmp, polysub((2*i - 1)*c1, polymulx(c1))/i) + c0 = polysub(c[i - 2], (c1 * (i - 1)) / i) + c1 = polyadd(tmp, polysub((2 * i - 1) * c1, polymulx(c1)) / i) return polyadd(c0, polysub(c1, polymulx(c1))) @@ -433,9 +432,9 @@ def lagmulx(c): prd[0] = c[0] prd[1] = -c[0] for i in range(1, len(c)): - prd[i + 1] = -c[i]*(i + 1) - prd[i] += c[i]*(2*i + 1) - prd[i - 1] -= c[i]*i + prd[i + 1] = -c[i] * (i + 1) + prd[i] += c[i] * (2 * i + 1) + prd[i - 1] -= c[i] * i return prd @@ -488,20 +487,20 @@ def lagmul(c1, c2): xs = c2 if len(c) == 1: - c0 = c[0]*xs + c0 = c[0] * xs c1 = 0 elif len(c) == 2: - c0 = c[0]*xs - c1 = c[1]*xs + c0 = c[0] * xs + c1 = c[1] * xs else: nd = len(c) - c0 = c[-2]*xs - c1 = c[-1]*xs + c0 = c[-2] * xs + c1 = c[-1] * xs for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 - c0 = lagsub(c[-i]*xs, (c1*(nd - 1))/nd) - c1 = lagadd(tmp, lagsub((2*nd - 1)*c1, lagmulx(c1))/nd) + c0 = lagsub(c[-i] * xs, (c1 * (nd - 1)) / nd) + c1 = lagadd(tmp, lagsub((2 * nd - 1) * c1, lagmulx(c1)) / nd) return lagadd(c0, lagsub(c1, lagmulx(c1))) @@ -616,8 +615,6 @@ def lagder(c, m=1, scl=1, axis=0): axis : int, optional Axis over which the derivative is taken. (Default: 0). - .. versionadded:: 1.7.0 - Returns ------- der : ndarray @@ -651,7 +648,7 @@ def lagder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -659,7 +656,7 @@ def lagder(c, m=1, scl=1, axis=0): c = np.moveaxis(c, iaxis, 0) n = len(c) if cnt >= n: - c = c[:1]*0 + c = c[:1] * 0 else: for i in range(cnt): n = n - 1 @@ -713,8 +710,6 @@ def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0): axis : int, optional Axis over which the integral is taken. (Default: 0). - .. versionadded:: 1.7.0 - Returns ------- S : ndarray @@ -773,13 +768,13 @@ def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c c = np.moveaxis(c, iaxis, 0) - k = list(k) + [0]*(cnt - len(k)) + k = list(k) + [0] * (cnt - len(k)) for i in range(cnt): n = len(c) c *= scl @@ -841,8 +836,6 @@ def lagval(x, c, tensor=True): over the columns of `c` for the evaluation. This keyword is useful when `c` is multidimensional. The default value is True. - .. versionadded:: 1.7.0 - Returns ------- values : ndarray, algebra_like @@ -873,7 +866,7 @@ def lagval(x, c, tensor=True): if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) + c = c.reshape(c.shape + (1,) * x.ndim) if len(c) == 1: c0 = c[0] @@ -888,9 +881,9 @@ def lagval(x, c, tensor=True): for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 - c0 = c[-i] - (c1*(nd - 1))/nd - c1 = tmp + (c1*((2*nd - 1) - x))/nd - return c0 + c1*(1 - x) + c0 = c[-i] - (c1 * (nd - 1)) / nd + c1 = tmp + (c1 * ((2 * nd - 1) - x)) / nd + return c0 + c1 * (1 - x) def lagval2d(x, y, c): @@ -933,11 +926,6 @@ def lagval2d(x, y, c): -------- lagval, laggrid2d, lagval3d, laggrid3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial.laguerre import lagval2d @@ -992,11 +980,6 @@ def laggrid2d(x, y, c): -------- lagval, lagval2d, lagval3d, laggrid3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial.laguerre import laggrid2d @@ -1051,18 +1034,13 @@ def lagval3d(x, y, z, c): -------- lagval, lagval2d, laggrid2d, laggrid3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial.laguerre import lagval3d >>> c = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] >>> lagval3d(1, 1, 2, c) -1.0 - + """ return pu._valnd(lagval, c, x, y, z) @@ -1114,11 +1092,6 @@ def laggrid3d(x, y, z, c): -------- lagval, lagval2d, laggrid2d, lagval3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial.laguerre import laggrid3d @@ -1128,7 +1101,7 @@ def laggrid3d(x, y, z, c): [ -2., -18.]], [[ -2., -14.], [ -1., -5.]]]) - + """ return pu._gridnd(lagval, c, x, y, z) @@ -1169,6 +1142,7 @@ def lagvander(x, deg): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.laguerre import lagvander >>> x = np.array([0, 1, 2]) >>> lagvander(x, 3) @@ -1185,11 +1159,11 @@ def lagvander(x, deg): dims = (ideg + 1,) + x.shape dtyp = x.dtype v = np.empty(dims, dtype=dtyp) - v[0] = x*0 + 1 + v[0] = x * 0 + 1 if ideg > 0: v[1] = 1 - x for i in range(2, ideg + 1): - v[i] = (v[i-1]*(2*i - 1 - x) - v[i-2]*(i - 1))/i + v[i] = (v[i - 1] * (2 * i - 1 - x) - v[i - 2] * (i - 1)) / i return np.moveaxis(v, 0, -1) @@ -1209,7 +1183,7 @@ def lagvander2d(x, y, deg): correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + .. math:: c_{00}, c_{01}, c_{02}, ... , c_{10}, c_{11}, c_{12}, ... and ``np.dot(V, c.flat)`` and ``lagval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares @@ -1237,19 +1211,15 @@ def lagvander2d(x, y, deg): -------- lagvander, lagvander3d, lagval2d, lagval3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- + >>> import numpy as np >>> from numpy.polynomial.laguerre import lagvander2d >>> x = np.array([0]) >>> y = np.array([2]) >>> lagvander2d(x, y, [2, 1]) array([[ 1., -1., 1., -1., 1., -1.]]) - + """ return pu._vander_nd_flat((lagvander, lagvander), (x, y), deg) @@ -1299,13 +1269,9 @@ def lagvander3d(x, y, z, deg): -------- lagvander, lagvander3d, lagval2d, lagval3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- + >>> import numpy as np >>> from numpy.polynomial.laguerre import lagvander3d >>> x = np.array([0]) >>> y = np.array([2]) @@ -1439,6 +1405,7 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.laguerre import lagfit, lagval >>> x = np.linspace(0, 10) >>> rng = np.random.default_rng() @@ -1470,35 +1437,30 @@ def lagcompanion(c): mat : ndarray Companion matrix of dimensions (deg, deg). - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial.laguerre import lagcompanion >>> lagcompanion([1, 2, 3]) array([[ 1. , -0.33333333], [-1. , 4.33333333]]) - + """ # c is a trimmed copy [c] = pu.as_series([c]) if len(c) < 2: raise ValueError('Series must have maximum degree of at least 1.') if len(c) == 2: - return np.array([[1 + c[0]/c[1]]]) + return np.array([[1 + c[0] / c[1]]]) n = len(c) - 1 mat = np.zeros((n, n), dtype=c.dtype) - top = mat.reshape(-1)[1::n+1] - mid = mat.reshape(-1)[0::n+1] - bot = mat.reshape(-1)[n::n+1] + top = mat.reshape(-1)[1::n + 1] + mid = mat.reshape(-1)[0::n + 1] + bot = mat.reshape(-1)[n::n + 1] top[...] = -np.arange(1, n) - mid[...] = 2.*np.arange(n) + 1. + mid[...] = 2. * np.arange(n) + 1. bot[...] = top - mat[:, -1] += (c[:-1]/c[-1])*n + mat[:, -1] += (c[:-1] / c[-1]) * n return mat @@ -1557,12 +1519,16 @@ def lagroots(c): if len(c) <= 1: return np.array([], dtype=c.dtype) if len(c) == 2: - return np.array([1 + c[0]/c[1]]) + return np.array([1 + c[0] / c[1]]) # rotated companion matrix reduces error - m = lagcompanion(c)[::-1,::-1] - r = la.eigvals(m) + m = lagcompanion(c)[::-1, ::-1] + r = np.linalg.eigvals(m) r.sort() + + # backwards compat: return real values if possible + from numpy.linalg._linalg import _to_real_if_imag_zero + r = _to_real_if_imag_zero(r, m) return r @@ -1589,9 +1555,6 @@ def laggauss(deg): Notes ----- - - .. versionadded:: 1.7.0 - The results have only been tested up to degree 100 higher degrees may be problematic. The weights are determined by using the fact that @@ -1614,21 +1577,21 @@ def laggauss(deg): # first approximation of roots. We use the fact that the companion # matrix is symmetric in this case in order to obtain better zeros. - c = np.array([0]*deg + [1]) + c = np.array([0] * deg + [1]) m = lagcompanion(c) - x = la.eigvalsh(m) + x = np.linalg.eigvalsh(m) # improve roots by one application of Newton dy = lagval(x, c) df = lagval(x, lagder(c)) - x -= dy/df + x -= dy / df # compute the weights. We scale the factor to avoid possible numerical # overflow. fm = lagval(x, c[1:]) fm /= np.abs(fm).max() df /= np.abs(df).max() - w = 1/(fm * df) + w = 1 / (fm * df) # scale w to get the right value, 1 in this case w /= w.sum() @@ -1653,11 +1616,6 @@ def lagweight(x): w : ndarray The weight function at `x`. - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial.laguerre import lagweight @@ -1688,11 +1646,9 @@ class Laguerre(ABCPolyBase): domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [0, 1]. + The default value is [0., 1.]. window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [0, 1]. - - .. versionadded:: 1.6.0 + Window, see `domain` for its use. The default value is [0., 1.]. symbol : str, optional Symbol used to represent the independent variable in string representations of the polynomial expression, e.g. for printing. diff --git a/numpy/polynomial/laguerre.pyi b/numpy/polynomial/laguerre.pyi index 541d3911832f..48fecfd07efe 100644 --- a/numpy/polynomial/laguerre.pyi +++ b/numpy/polynomial/laguerre.pyi @@ -1,47 +1,100 @@ -from typing import Any +from typing import Any, ClassVar, Final, Literal as L -from numpy import int_ -from numpy.typing import NDArray -from numpy.polynomial._polybase import ABCPolyBase -from numpy.polynomial.polyutils import trimcoef +import numpy as np -__all__: list[str] +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as lagtrim -lagtrim = trimcoef +__all__ = [ + "lagzero", + "lagone", + "lagx", + "lagdomain", + "lagline", + "lagadd", + "lagsub", + "lagmulx", + "lagmul", + "lagdiv", + "lagpow", + "lagval", + "lagder", + "lagint", + "lag2poly", + "poly2lag", + "lagfromroots", + "lagvander", + "lagfit", + "lagtrim", + "lagroots", + "Laguerre", + "lagval2d", + "lagval3d", + "laggrid2d", + "laggrid3d", + "lagvander2d", + "lagvander3d", + "lagcompanion", + "laggauss", + "lagweight", +] -def poly2lag(pol): ... -def lag2poly(c): ... +poly2lag: Final[_FuncPoly2Ortho] = ... +lag2poly: Final[_FuncUnOp] = ... -lagdomain: NDArray[int_] -lagzero: NDArray[int_] -lagone: NDArray[int_] -lagx: NDArray[int_] +lagdomain: Final[_Array2[np.float64]] = ... +lagzero: Final[_Array1[np.int_]] = ... +lagone: Final[_Array1[np.int_]] = ... +lagx: Final[_Array2[np.int_]] = ... -def lagline(off, scl): ... -def lagfromroots(roots): ... -def lagadd(c1, c2): ... -def lagsub(c1, c2): ... -def lagmulx(c): ... -def lagmul(c1, c2): ... -def lagdiv(c1, c2): ... -def lagpow(c, pow, maxpower=...): ... -def lagder(c, m=..., scl=..., axis=...): ... -def lagint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... -def lagval(x, c, tensor=...): ... -def lagval2d(x, y, c): ... -def laggrid2d(x, y, c): ... -def lagval3d(x, y, z, c): ... -def laggrid3d(x, y, z, c): ... -def lagvander(x, deg): ... -def lagvander2d(x, y, deg): ... -def lagvander3d(x, y, z, deg): ... -def lagfit(x, y, deg, rcond=..., full=..., w=...): ... -def lagcompanion(c): ... -def lagroots(c): ... -def laggauss(deg): ... -def lagweight(x): ... +lagline: Final[_FuncLine] = ... +lagfromroots: Final[_FuncFromRoots] = ... +lagadd: Final[_FuncBinOp] = ... +lagsub: Final[_FuncBinOp] = ... +lagmulx: Final[_FuncUnOp] = ... +lagmul: Final[_FuncBinOp] = ... +lagdiv: Final[_FuncBinOp] = ... +lagpow: Final[_FuncPow] = ... +lagder: Final[_FuncDer] = ... +lagint: Final[_FuncInteg] = ... +lagval: Final[_FuncVal] = ... +lagval2d: Final[_FuncVal2D] = ... +lagval3d: Final[_FuncVal3D] = ... +laggrid2d: Final[_FuncVal2D] = ... +laggrid3d: Final[_FuncVal3D] = ... +lagvander: Final[_FuncVander] = ... +lagvander2d: Final[_FuncVander2D] = ... +lagvander3d: Final[_FuncVander3D] = ... +lagfit: Final[_FuncFit] = ... +lagcompanion: Final[_FuncCompanion] = ... +lagroots: Final[_FuncRoots] = ... +laggauss: Final[_FuncGauss] = ... +lagweight: Final[_FuncWeight] = ... -class Laguerre(ABCPolyBase): - domain: Any - window: Any - basis_name: Any +class Laguerre(ABCPolyBase[L["L"]]): + basis_name: ClassVar[L["L"]] = "L" # pyright: ignore[reportIncompatibleMethodOverride] # pyrefly: ignore[bad-override] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index cfbf1486d486..b611aed844e7 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -80,8 +80,6 @@ """ import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase @@ -128,6 +126,7 @@ def poly2leg(pol): Examples -------- + >>> import numpy as np >>> from numpy import polynomial as P >>> p = P.Polynomial(np.arange(4)) >>> p @@ -190,7 +189,7 @@ def leg2poly(c): """ - from .polynomial import polyadd, polysub, polymulx + from .polynomial import polyadd, polymulx, polysub [c] = pu.as_series([c]) n = len(c) @@ -202,8 +201,8 @@ def leg2poly(c): # i is the current degree of c1 for i in range(n - 1, 1, -1): tmp = c0 - c0 = polysub(c[i - 2], (c1*(i - 1))/i) - c1 = polyadd(tmp, (polymulx(c1)*(2*i - 1))/i) + c0 = polysub(c[i - 2], (c1 * (i - 1)) / i) + c1 = polyadd(tmp, (polymulx(c1) * (2 * i - 1)) / i) return polyadd(c0, polymulx(c1)) @@ -426,7 +425,7 @@ def legmulx(c): See Also -------- - legadd, legmul, legdiv, legpow + legadd, legsub, legmul, legdiv, legpow Notes ----- @@ -451,14 +450,14 @@ def legmulx(c): return c prd = np.empty(len(c) + 1, dtype=c.dtype) - prd[0] = c[0]*0 + prd[0] = c[0] * 0 prd[1] = c[0] for i in range(1, len(c)): j = i + 1 k = i - 1 s = i + j - prd[j] = (c[i]*j)/s - prd[k] += (c[i]*i)/s + prd[j] = (c[i] * j) / s + prd[k] += (c[i] * i) / s return prd @@ -513,20 +512,20 @@ def legmul(c1, c2): xs = c2 if len(c) == 1: - c0 = c[0]*xs + c0 = c[0] * xs c1 = 0 elif len(c) == 2: - c0 = c[0]*xs - c1 = c[1]*xs + c0 = c[0] * xs + c1 = c[1] * xs else: nd = len(c) - c0 = c[-2]*xs - c1 = c[-1]*xs + c0 = c[-2] * xs + c1 = c[-1] * xs for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 - c0 = legsub(c[-i]*xs, (c1*(nd - 1))/nd) - c1 = legadd(tmp, (legmulx(c1)*(2*nd - 1))/nd) + c0 = legsub(c[-i] * xs, (c1 * (nd - 1)) / nd) + c1 = legadd(tmp, (legmulx(c1) * (2 * nd - 1)) / nd) return legadd(c0, legmulx(c1)) @@ -638,8 +637,6 @@ def legder(c, m=1, scl=1, axis=0): axis : int, optional Axis over which the derivative is taken. (Default: 0). - .. versionadded:: 1.7.0 - Returns ------- der : ndarray @@ -677,7 +674,7 @@ def legder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -685,17 +682,17 @@ def legder(c, m=1, scl=1, axis=0): c = np.moveaxis(c, iaxis, 0) n = len(c) if cnt >= n: - c = c[:1]*0 + c = c[:1] * 0 else: for i in range(cnt): n = n - 1 c *= scl der = np.empty((n,) + c.shape[1:], dtype=c.dtype) for j in range(n, 2, -1): - der[j - 1] = (2*j - 1)*c[j] + der[j - 1] = (2 * j - 1) * c[j] c[j - 2] += c[j] if n > 1: - der[1] = 3*c[2] + der[1] = 3 * c[2] der[0] = c[1] c = der c = np.moveaxis(c, 0, iaxis) @@ -740,8 +737,6 @@ def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0): axis : int, optional Axis over which the integral is taken. (Default: 0). - .. versionadded:: 1.7.0 - Returns ------- S : ndarray @@ -802,13 +797,13 @@ def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c c = np.moveaxis(c, iaxis, 0) - k = list(k) + [0]*(cnt - len(k)) + k = list(k) + [0] * (cnt - len(k)) for i in range(cnt): n = len(c) c *= scl @@ -816,12 +811,12 @@ def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0): c[0] += k[i] else: tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) - tmp[0] = c[0]*0 + tmp[0] = c[0] * 0 tmp[1] = c[0] if n > 1: - tmp[2] = c[1]/3 + tmp[2] = c[1] / 3 for j in range(2, n): - t = c[j]/(2*j + 1) + t = c[j] / (2 * j + 1) tmp[j + 1] = t tmp[j - 1] -= t tmp[0] += k[i] - legval(lbnd, tmp) @@ -873,8 +868,6 @@ def legval(x, c, tensor=True): over the columns of `c` for the evaluation. This keyword is useful when `c` is multidimensional. The default value is True. - .. versionadded:: 1.7.0 - Returns ------- values : ndarray, algebra_like @@ -895,7 +888,7 @@ def legval(x, c, tensor=True): if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) + c = c.reshape(c.shape + (1,) * x.ndim) if len(c) == 1: c0 = c[0] @@ -910,9 +903,9 @@ def legval(x, c, tensor=True): for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 - c0 = c[-i] - (c1*(nd - 1))/nd - c1 = tmp + (c1*x*(2*nd - 1))/nd - return c0 + c1*x + c0 = c[-i] - c1 * ((nd - 1) / nd) + c1 = tmp + c1 * x * ((2 * nd - 1) / nd) + return c0 + c1 * x def legval2d(x, y, c): @@ -954,12 +947,6 @@ def legval2d(x, y, c): See Also -------- legval, leggrid2d, legval3d, leggrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._valnd(legval, c, x, y) @@ -1007,12 +994,6 @@ def leggrid2d(x, y, c): See Also -------- legval, legval2d, legval3d, leggrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._gridnd(legval, c, x, y) @@ -1058,12 +1039,6 @@ def legval3d(x, y, z, c): See Also -------- legval, legval2d, leggrid2d, leggrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._valnd(legval, c, x, y, z) @@ -1114,12 +1089,6 @@ def leggrid3d(x, y, z, c): See Also -------- legval, legval2d, leggrid2d, legval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._gridnd(legval, c, x, y, z) @@ -1169,11 +1138,11 @@ def legvander(x, deg): v = np.empty(dims, dtype=dtyp) # Use forward recursion to generate the entries. This is not as accurate # as reverse recursion in this application but it is more efficient. - v[0] = x*0 + 1 + v[0] = x * 0 + 1 if ideg > 0: v[1] = x for i in range(2, ideg + 1): - v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i + v[i] = (v[i - 1] * x * (2 * i - 1) - v[i - 2] * (i - 1)) / i return np.moveaxis(v, 0, -1) @@ -1193,7 +1162,7 @@ def legvander2d(x, y, deg): correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + .. math:: c_{00}, c_{01}, c_{02}, ... , c_{10}, c_{11}, c_{12}, ... and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares @@ -1220,12 +1189,6 @@ def legvander2d(x, y, deg): See Also -------- legvander, legvander3d, legval2d, legval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._vander_nd_flat((legvander, legvander), (x, y), deg) @@ -1274,12 +1237,6 @@ def legvander3d(x, y, z, deg): See Also -------- legvander, legvander3d, legval2d, legval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._vander_nd_flat((legvander, legvander, legvander), (x, y, z), deg) @@ -1328,8 +1285,6 @@ def legfit(x, y, deg, rcond=None, full=False, w=None): same variance. When using inverse-variance weighting, use ``w[i] = 1/sigma(y[i])``. The default value is None. - .. versionadded:: 1.5.0 - Returns ------- coef : ndarray, shape (M,) or (M, K) @@ -1417,7 +1372,7 @@ def legcompanion(c): """Return the scaled companion matrix of c. The basis polynomials are scaled so that the companion matrix is - symmetric when `c` is an Legendre basis polynomial. This provides + symmetric when `c` is a Legendre basis polynomial. This provides better eigenvalue estimates than the unscaled case and for basis polynomials the eigenvalues are guaranteed to be real if `numpy.linalg.eigvalsh` is used to obtain them. @@ -1432,28 +1387,22 @@ def legcompanion(c): ------- mat : ndarray Scaled companion matrix of dimensions (deg, deg). - - Notes - ----- - - .. versionadded:: 1.7.0 - """ # c is a trimmed copy [c] = pu.as_series([c]) if len(c) < 2: raise ValueError('Series must have maximum degree of at least 1.') if len(c) == 2: - return np.array([[-c[0]/c[1]]]) + return np.array([[-c[0] / c[1]]]) n = len(c) - 1 mat = np.zeros((n, n), dtype=c.dtype) - scl = 1./np.sqrt(2*np.arange(n) + 1) - top = mat.reshape(-1)[1::n+1] - bot = mat.reshape(-1)[n::n+1] - top[...] = np.arange(1, n)*scl[:n-1]*scl[1:n] + scl = 1. / np.sqrt(2 * np.arange(n) + 1) + top = mat.reshape(-1)[1::n + 1] + bot = mat.reshape(-1)[n::n + 1] + top[...] = np.arange(1, n) * scl[:n - 1] * scl[1:n] bot[...] = top - mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*(n/(2*n - 1)) + mat[:, -1] -= (c[:-1] / c[-1]) * (scl / scl[-1]) * (n / (2 * n - 1)) return mat @@ -1509,11 +1458,11 @@ def legroots(c): if len(c) < 2: return np.array([], dtype=c.dtype) if len(c) == 2: - return np.array([-c[0]/c[1]]) + return np.array([-c[0] / c[1]]) # rotated companion matrix reduces error - m = legcompanion(c)[::-1,::-1] - r = la.eigvals(m) + m = legcompanion(c)[::-1, ::-1] + r = np.linalg.eigvals(m) r.sort() return r @@ -1541,9 +1490,6 @@ def leggauss(deg): Notes ----- - - .. versionadded:: 1.7.0 - The results have only been tested up to degree 100, higher degrees may be problematic. The weights are determined by using the fact that @@ -1560,25 +1506,25 @@ def leggauss(deg): # first approximation of roots. We use the fact that the companion # matrix is symmetric in this case in order to obtain better zeros. - c = np.array([0]*deg + [1]) + c = np.array([0] * deg + [1]) m = legcompanion(c) - x = la.eigvalsh(m) + x = np.linalg.eigvalsh(m) # improve roots by one application of Newton dy = legval(x, c) df = legval(x, legder(c)) - x -= dy/df + x -= dy / df # compute the weights. We scale the factor to avoid possible numerical # overflow. fm = legval(x, c[1:]) fm /= np.abs(fm).max() df /= np.abs(df).max() - w = 1/(fm * df) + w = 1 / (fm * df) # for Legendre we can also symmetrize - w = (w + w[::-1])/2 - x = (x - x[::-1])/2 + w = (w + w[::-1]) / 2 + x = (x - x[::-1]) / 2 # scale w to get the right value w *= 2. / w.sum() @@ -1603,14 +1549,8 @@ def legweight(x): ------- w : ndarray The weight function at `x`. - - Notes - ----- - - .. versionadded:: 1.7.0 - """ - w = x*0.0 + 1.0 + w = x * 0.0 + 1.0 return w # @@ -1632,11 +1572,9 @@ class Legendre(ABCPolyBase): domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [-1, 1]. + The default value is [-1., 1.]. window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [-1, 1]. - - .. versionadded:: 1.6.0 + Window, see `domain` for its use. The default value is [-1., 1.]. symbol : str, optional Symbol used to represent the independent variable in string representations of the polynomial expression, e.g. for printing. diff --git a/numpy/polynomial/legendre.pyi b/numpy/polynomial/legendre.pyi index 97c6478f80f8..75fa47b44d3e 100644 --- a/numpy/polynomial/legendre.pyi +++ b/numpy/polynomial/legendre.pyi @@ -1,47 +1,100 @@ -from typing import Any +from typing import Any, ClassVar, Final, Literal as L -from numpy import int_ -from numpy.typing import NDArray -from numpy.polynomial._polybase import ABCPolyBase -from numpy.polynomial.polyutils import trimcoef +import numpy as np -__all__: list[str] +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as legtrim -legtrim = trimcoef +__all__ = [ + "legzero", + "legone", + "legx", + "legdomain", + "legline", + "legadd", + "legsub", + "legmulx", + "legmul", + "legdiv", + "legpow", + "legval", + "legder", + "legint", + "leg2poly", + "poly2leg", + "legfromroots", + "legvander", + "legfit", + "legtrim", + "legroots", + "Legendre", + "legval2d", + "legval3d", + "leggrid2d", + "leggrid3d", + "legvander2d", + "legvander3d", + "legcompanion", + "leggauss", + "legweight", +] -def poly2leg(pol): ... -def leg2poly(c): ... +poly2leg: Final[_FuncPoly2Ortho] = ... +leg2poly: Final[_FuncUnOp] = ... -legdomain: NDArray[int_] -legzero: NDArray[int_] -legone: NDArray[int_] -legx: NDArray[int_] +legdomain: Final[_Array2[np.float64]] = ... +legzero: Final[_Array1[np.int_]] = ... +legone: Final[_Array1[np.int_]] = ... +legx: Final[_Array2[np.int_]] = ... -def legline(off, scl): ... -def legfromroots(roots): ... -def legadd(c1, c2): ... -def legsub(c1, c2): ... -def legmulx(c): ... -def legmul(c1, c2): ... -def legdiv(c1, c2): ... -def legpow(c, pow, maxpower=...): ... -def legder(c, m=..., scl=..., axis=...): ... -def legint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... -def legval(x, c, tensor=...): ... -def legval2d(x, y, c): ... -def leggrid2d(x, y, c): ... -def legval3d(x, y, z, c): ... -def leggrid3d(x, y, z, c): ... -def legvander(x, deg): ... -def legvander2d(x, y, deg): ... -def legvander3d(x, y, z, deg): ... -def legfit(x, y, deg, rcond=..., full=..., w=...): ... -def legcompanion(c): ... -def legroots(c): ... -def leggauss(deg): ... -def legweight(x): ... +legline: Final[_FuncLine] = ... +legfromroots: Final[_FuncFromRoots] = ... +legadd: Final[_FuncBinOp] = ... +legsub: Final[_FuncBinOp] = ... +legmulx: Final[_FuncUnOp] = ... +legmul: Final[_FuncBinOp] = ... +legdiv: Final[_FuncBinOp] = ... +legpow: Final[_FuncPow] = ... +legder: Final[_FuncDer] = ... +legint: Final[_FuncInteg] = ... +legval: Final[_FuncVal] = ... +legval2d: Final[_FuncVal2D] = ... +legval3d: Final[_FuncVal3D] = ... +leggrid2d: Final[_FuncVal2D] = ... +leggrid3d: Final[_FuncVal3D] = ... +legvander: Final[_FuncVander] = ... +legvander2d: Final[_FuncVander2D] = ... +legvander3d: Final[_FuncVander3D] = ... +legfit: Final[_FuncFit] = ... +legcompanion: Final[_FuncCompanion] = ... +legroots: Final[_FuncRoots] = ... +leggauss: Final[_FuncGauss] = ... +legweight: Final[_FuncWeight] = ... -class Legendre(ABCPolyBase): - domain: Any - window: Any - basis_name: Any +class Legendre(ABCPolyBase[L["P"]]): + basis_name: ClassVar[L["P"]] = "P" # pyright: ignore[reportIncompatibleMethodOverride] # pyrefly: ignore[bad-override] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 7b78005fa396..45abd5009c70 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -81,8 +81,7 @@ 'polycompanion'] import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index +from numpy._core.overrides import array_function_dispatch as _array_function_dispatch from . import polyutils as pu from ._polybase import ABCPolyBase @@ -308,11 +307,6 @@ def polymulx(c): -------- polyadd, polysub, polymul, polydiv, polypow - Notes - ----- - - .. versionadded:: 1.5.0 - Examples -------- >>> from numpy.polynomial import polynomial as P @@ -328,7 +322,7 @@ def polymulx(c): return c prd = np.empty(len(c) + 1, dtype=c.dtype) - prd[0] = c[0]*0 + prd[0] = c[0] * 0 prd[1:] = c return prd @@ -407,26 +401,26 @@ def polydiv(c1, c2): # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if c2[-1] == 0: - raise ZeroDivisionError() + raise ZeroDivisionError # FIXME: add message with details to exception # note: this is more efficient than `pu._div(polymul, c1, c2)` lc1 = len(c1) lc2 = len(c2) if lc1 < lc2: - return c1[:1]*0, c1 + return c1[:1] * 0, c1 elif lc2 == 1: - return c1/c2[-1], c1[:1]*0 + return c1 / c2[-1], c1[:1] * 0 else: dlen = lc1 - lc2 scl = c2[-1] - c2 = c2[:-1]/scl + c2 = c2[:-1] / scl i = dlen j = lc1 - 1 while i >= 0: - c1[i:j] -= c2*c1[j] + c1[i:j] -= c2 * c1[j] i -= 1 j -= 1 - return c1[j+1:]/scl, pu.trimseq(c1[:j+1]) + return c1[j + 1:] / scl, pu.trimseq(c1[:j + 1]) def polypow(c, pow, maxpower=None): @@ -495,8 +489,6 @@ def polyder(c, m=1, scl=1, axis=0): axis : int, optional Axis over which the derivative is taken. (Default: 0). - .. versionadded:: 1.7.0 - Returns ------- der : ndarray @@ -529,7 +521,7 @@ def polyder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -537,14 +529,14 @@ def polyder(c, m=1, scl=1, axis=0): c = np.moveaxis(c, iaxis, 0) n = len(c) if cnt >= n: - c = c[:1]*0 + c = c[:1] * 0 else: for i in range(cnt): n = n - 1 c *= scl der = np.empty((n,) + c.shape[1:], dtype=cdt) for j in range(n, 0, -1): - der[j - 1] = j*c[j] + der[j - 1] = j * c[j] c = der c = np.moveaxis(c, 0, iaxis) return c @@ -586,8 +578,6 @@ def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): axis : int, optional Axis over which the integral is taken. (Default: 0). - .. versionadded:: 1.7.0 - Returns ------- S : ndarray @@ -645,12 +635,12 @@ def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c - k = list(k) + [0]*(cnt - len(k)) + k = list(k) + [0] * (cnt - len(k)) c = np.moveaxis(c, iaxis, 0) for i in range(cnt): n = len(c) @@ -659,10 +649,10 @@ def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): c[0] += k[i] else: tmp = np.empty((n + 1,) + c.shape[1:], dtype=cdt) - tmp[0] = c[0]*0 + tmp[0] = c[0] * 0 tmp[1] = c[0] for j in range(1, n): - tmp[j + 1] = c[j]/(j + 1) + tmp[j + 1] = c[j] / (j + 1) tmp[0] += k[i] - polyval(lbnd, tmp) c = tmp c = np.moveaxis(c, 0, iaxis) @@ -712,8 +702,6 @@ def polyval(x, c, tensor=True): over the columns of `c` for the evaluation. This keyword is useful when `c` is multidimensional. The default value is True. - .. versionadded:: 1.7.0 - Returns ------- values : ndarray, compatible object @@ -727,8 +715,13 @@ def polyval(x, c, tensor=True): ----- The evaluation uses Horner's method. + When using coefficients from polynomials created with ``Polynomial.fit()``, + use ``p(x)`` or ``polyval(x, p.convert().coef)`` to handle domain/window + scaling correctly, not ``polyval(x, p.coef)``. + Examples -------- + >>> import numpy as np >>> from numpy.polynomial.polynomial import polyval >>> polyval(1, [1,2,3]) 6.0 @@ -757,11 +750,11 @@ def polyval(x, c, tensor=True): if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: - c = c.reshape(c.shape + (1,)*x.ndim) + c = c.reshape(c.shape + (1,) * x.ndim) - c0 = c[-1] + x*0 + c0 = c[-1] + x * 0 for i in range(2, len(c) + 1): - c0 = c[-i] + c0*x + c0 = c[-i] + c0 * x return c0 @@ -786,8 +779,6 @@ def polyvalfromroots(x, r, tensor=True): evaluated only for the corresponding broadcast value of `x`. Note that scalars have shape (,). - .. versionadded:: 1.12 - Parameters ---------- x : array_like, compatible object @@ -848,12 +839,18 @@ def polyvalfromroots(x, r, tensor=True): x = np.asarray(x) if isinstance(x, np.ndarray): if tensor: - r = r.reshape(r.shape + (1,)*x.ndim) + r = r.reshape(r.shape + (1,) * x.ndim) elif x.ndim >= r.ndim: raise ValueError("x.ndim must be < r.ndim when tensor == False") return np.prod(x - r, axis=0) +def _polyval2d_dispatcher(x, y, c): + return (x, y, c) +def _polygrid2d_dispatcher(x, y, c): + return (x, y, c) + +@_array_function_dispatch(_polyval2d_dispatcher) def polyval2d(x, y, c): """ Evaluate a 2-D polynomial at points (x, y). @@ -895,22 +892,17 @@ def polyval2d(x, y, c): -------- polyval, polygrid2d, polyval3d, polygrid3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial import polynomial as P >>> c = ((1, 2, 3), (4, 5, 6)) - >>> P.polyval2d(1, 1, c) + >>> P.polyval2d(1, 1, c) 21.0 """ return pu._valnd(polyval, c, x, y) - +@_array_function_dispatch(_polygrid2d_dispatcher) def polygrid2d(x, y, c): """ Evaluate a 2-D polynomial on the Cartesian product of x and y. @@ -955,11 +947,6 @@ def polygrid2d(x, y, c): -------- polyval, polyval2d, polyval3d, polygrid3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial import polynomial as P @@ -1014,11 +1001,6 @@ def polyval3d(x, y, z, c): -------- polyval, polyval2d, polygrid2d, polygrid3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial import polynomial as P @@ -1077,11 +1059,6 @@ def polygrid3d(x, y, z, c): -------- polyval, polyval2d, polygrid2d, polyval3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial import polynomial as P @@ -1134,7 +1111,7 @@ def polyvander(x, deg): Examples -------- The Vandermonde matrix of degree ``deg = 5`` and sample points - ``x = [-1, 2, 3]`` contains the element-wise powers of `x` + ``x = [-1, 2, 3]`` contains the element-wise powers of `x` from 0 to 5 as its columns. >>> from numpy.polynomial import polynomial as P @@ -1153,11 +1130,11 @@ def polyvander(x, deg): dims = (ideg + 1,) + x.shape dtyp = x.dtype v = np.empty(dims, dtype=dtyp) - v[0] = x*0 + 1 + v[0] = x * 0 + 1 if ideg > 0: v[1] = x for i in range(2, ideg + 1): - v[i] = v[i-1]*x + v[i] = v[i - 1] * x return np.moveaxis(v, 0, -1) @@ -1177,7 +1154,7 @@ def polyvander2d(x, y, deg): correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + .. math:: c_{00}, c_{01}, c_{02}, ... , c_{10}, c_{11}, c_{12}, ... and ``np.dot(V, c.flat)`` and ``polyval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares @@ -1207,6 +1184,8 @@ def polyvander2d(x, y, deg): Examples -------- + >>> import numpy as np + The 2-D pseudo-Vandermonde matrix of degree ``[1, 2]`` and sample points ``x = [-1, 2]`` and ``y = [1, 3]`` is as follows: @@ -1233,7 +1212,7 @@ def polyvander2d(x, y, deg): >>> P.polyvander2d(x=x, y=0*x, deg=(m, 0)) == P.polyvander(x=x, deg=m) array([[ True, True], [ True, True]]) - + """ return pu._vander_nd_flat((polyvander, polyvander), (x, y), deg) @@ -1283,13 +1262,9 @@ def polyvander3d(x, y, z, deg): -------- polyvander, polyvander3d, polyval2d, polyval3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- + >>> import numpy as np >>> from numpy.polynomial import polynomial as P >>> x = np.asarray([-1, 2, 1]) >>> y = np.asarray([1, -2, -3]) @@ -1304,7 +1279,7 @@ def polyvander3d(x, y, z, deg): -8., 8., 16., 4., 8., -8., -16., 16., 32.], [ 1., 5., -3., -15., 9., 45., 1., 5., -3., -15., 9., 45., 1., 5., -3., -15., 9., 45.]]) - + We can verify the columns for any ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= k <= n`` @@ -1363,8 +1338,6 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): same variance. When using inverse-variance weighting, use ``w[i] = 1/sigma(y[i])``. The default value is None. - .. versionadded:: 1.5.0 - Returns ------- coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`) @@ -1441,6 +1414,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): Examples -------- + >>> import numpy as np >>> from numpy.polynomial import polynomial as P >>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1] >>> rng = np.random.default_rng() @@ -1463,7 +1437,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): array([-6.73496154e-17, -1.00000000e+00, 0.00000000e+00, 1.00000000e+00]) >>> stats # note the minuscule SSR [array([8.79579319e-31]), - 4, + np.int32(4), array([1.38446749, 1.32119158, 0.50443316, 0.28853036]), 1.1324274851176597e-14] @@ -1490,11 +1464,6 @@ def polycompanion(c): mat : ndarray Companion matrix of dimensions (deg, deg). - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial import polynomial as P @@ -1509,13 +1478,13 @@ def polycompanion(c): if len(c) < 2: raise ValueError('Series must have maximum degree of at least 1.') if len(c) == 2: - return np.array([[-c[0]/c[1]]]) + return np.array([[-c[0] / c[1]]]) n = len(c) - 1 mat = np.zeros((n, n), dtype=c.dtype) - bot = mat.reshape(-1)[n::n+1] + bot = mat.reshape(-1)[n::n + 1] bot[...] = 1 - mat[:, -1] -= c[:-1]/c[-1] + mat[:, -1] -= c[:-1] / c[-1] return mat @@ -1573,12 +1542,15 @@ def polyroots(c): if len(c) < 2: return np.array([], dtype=c.dtype) if len(c) == 2: - return np.array([-c[0]/c[1]]) + return np.array([-c[0] / c[1]]) - # rotated companion matrix reduces error - m = polycompanion(c)[::-1,::-1] - r = la.eigvals(m) + m = polycompanion(c) + r = np.linalg.eigvals(m) r.sort() + + # backwards compat: return real values if possible + from numpy.linalg._linalg import _to_real_if_imag_zero + r = _to_real_if_imag_zero(r, m) return r @@ -1601,11 +1573,9 @@ class Polynomial(ABCPolyBase): domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [-1, 1]. + The default value is [-1., 1.]. window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [-1, 1]. - - .. versionadded:: 1.6.0 + Window, see `domain` for its use. The default value is [-1., 1.]. symbol : str, optional Symbol used to represent the independent variable in string representations of the polynomial expression, e.g. for printing. diff --git a/numpy/polynomial/polynomial.pyi b/numpy/polynomial/polynomial.pyi index f8b62e529f23..c394d56affed 100644 --- a/numpy/polynomial/polynomial.pyi +++ b/numpy/polynomial/polynomial.pyi @@ -1,42 +1,109 @@ -from typing import Any - -from numpy import int_ -from numpy.typing import NDArray -from numpy.polynomial._polybase import ABCPolyBase -from numpy.polynomial.polyutils import trimcoef - -__all__: list[str] - -polytrim = trimcoef - -polydomain: NDArray[int_] -polyzero: NDArray[int_] -polyone: NDArray[int_] -polyx: NDArray[int_] - -def polyline(off, scl): ... -def polyfromroots(roots): ... -def polyadd(c1, c2): ... -def polysub(c1, c2): ... -def polymulx(c): ... -def polymul(c1, c2): ... -def polydiv(c1, c2): ... -def polypow(c, pow, maxpower=...): ... -def polyder(c, m=..., scl=..., axis=...): ... -def polyint(c, m=..., k=..., lbnd=..., scl=..., axis=...): ... -def polyval(x, c, tensor=...): ... -def polyvalfromroots(x, r, tensor=...): ... -def polyval2d(x, y, c): ... -def polygrid2d(x, y, c): ... -def polyval3d(x, y, z, c): ... -def polygrid3d(x, y, z, c): ... -def polyvander(x, deg): ... -def polyvander2d(x, y, deg): ... -def polyvander3d(x, y, z, deg): ... -def polyfit(x, y, deg, rcond=..., full=..., w=...): ... -def polyroots(c): ... - -class Polynomial(ABCPolyBase): - domain: Any - window: Any - basis_name: Any +from typing import Any, ClassVar, Final, overload + +import numpy as np +import numpy.typing as npt +from numpy._typing import ( + _ArrayLikeFloat_co, + _ArrayLikeNumber_co, + _FloatLike_co, + _NumberLike_co, +) + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _ArrayLikeCoef_co, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncInteg, + _FuncLine, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncVander, + _FuncVander2D, + _FuncVander3D, +) +from .polyutils import trimcoef as polytrim + +__all__ = [ + "polyzero", + "polyone", + "polyx", + "polydomain", + "polyline", + "polyadd", + "polysub", + "polymulx", + "polymul", + "polydiv", + "polypow", + "polyval", + "polyvalfromroots", + "polyder", + "polyint", + "polyfromroots", + "polyvander", + "polyfit", + "polytrim", + "polyroots", + "Polynomial", + "polyval2d", + "polyval3d", + "polygrid2d", + "polygrid3d", + "polyvander2d", + "polyvander3d", + "polycompanion", +] + +polydomain: Final[_Array2[np.float64]] = ... +polyzero: Final[_Array1[np.int_]] = ... +polyone: Final[_Array1[np.int_]] = ... +polyx: Final[_Array2[np.int_]] = ... + +polyline: Final[_FuncLine] = ... +polyfromroots: Final[_FuncFromRoots] = ... +polyadd: Final[_FuncBinOp] = ... +polysub: Final[_FuncBinOp] = ... +polymulx: Final[_FuncUnOp] = ... +polymul: Final[_FuncBinOp] = ... +polydiv: Final[_FuncBinOp] = ... +polypow: Final[_FuncPow] = ... +polyder: Final[_FuncDer] = ... +polyint: Final[_FuncInteg] = ... +polyval: Final[_FuncVal] = ... +polyval2d: Final[_FuncVal2D] = ... +polyval3d: Final[_FuncVal3D] = ... + +@overload +def polyvalfromroots(x: _FloatLike_co, r: _FloatLike_co, tensor: bool = True) -> np.float64 | Any: ... +@overload +def polyvalfromroots(x: _NumberLike_co, r: _NumberLike_co, tensor: bool = True) -> np.complex128 | Any: ... +@overload +def polyvalfromroots(x: _ArrayLikeFloat_co, r: _ArrayLikeFloat_co, tensor: bool = True) -> npt.NDArray[np.float64 | Any]: ... +@overload +def polyvalfromroots(x: _ArrayLikeNumber_co, r: _ArrayLikeNumber_co, tensor: bool = True) -> npt.NDArray[np.complex128 | Any]: ... +@overload +def polyvalfromroots(x: _ArrayLikeCoef_co, r: _ArrayLikeCoef_co, tensor: bool = True) -> npt.NDArray[np.object_ | Any]: ... + +polygrid2d: Final[_FuncVal2D] = ... +polygrid3d: Final[_FuncVal3D] = ... +polyvander: Final[_FuncVander] = ... +polyvander2d: Final[_FuncVander2D] = ... +polyvander3d: Final[_FuncVander3D] = ... +polyfit: Final[_FuncFit] = ... +polycompanion: Final[_FuncCompanion] = ... +polyroots: Final[_FuncRoots] = ... + +class Polynomial(ABCPolyBase[None]): + basis_name: ClassVar[None] = None # pyright: ignore[reportIncompatibleMethodOverride] # pyrefly: ignore[bad-override] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py index 54ffe5937e8c..5e0e1af973ae 100644 --- a/numpy/polynomial/polyutils.py +++ b/numpy/polynomial/polyutils.py @@ -18,15 +18,12 @@ mapparms parameters of the linear map between domains. """ -import operator import functools +import operator import warnings import numpy as np -from numpy._core.multiarray import dragon4_positional, dragon4_scientific -from numpy.exceptions import RankWarning - __all__ = [ 'as_series', 'trimseq', 'trimcoef', 'getdomain', 'mapdomain', 'mapparms', 'format_float'] @@ -60,7 +57,7 @@ def trimseq(seq): for i in range(len(seq) - 1, -1, -1): if seq[i] != 0: break - return seq[:i+1] + return seq[:i + 1] def as_series(alist, trim=True): @@ -95,6 +92,7 @@ def as_series(alist, trim=True): Examples -------- + >>> import numpy as np >>> from numpy.polynomial import polyutils as pu >>> a = np.arange(4) >>> pu.as_series(a) @@ -117,25 +115,28 @@ def as_series(alist, trim=True): for a in arrays: if a.size == 0: raise ValueError("Coefficient array is empty") - if any(a.ndim != 1 for a in arrays): - raise ValueError("Coefficient array is not 1-d") + if a.ndim != 1: + raise ValueError("Coefficient array is not 1-d") if trim: arrays = [trimseq(a) for a in arrays] - if any(a.dtype == np.dtype(object) for a in arrays): + try: + dtype = np.common_type(*arrays) + except Exception as e: + object_dtype = np.dtypes.ObjectDType() + has_one_object_type = False ret = [] for a in arrays: - if a.dtype != np.dtype(object): - tmp = np.empty(len(a), dtype=np.dtype(object)) + if a.dtype != object_dtype: + tmp = np.empty(len(a), dtype=object_dtype) tmp[:] = a[:] ret.append(tmp) else: + has_one_object_type = True ret.append(a.copy()) - else: - try: - dtype = np.common_type(*arrays) - except Exception as e: + if not has_one_object_type: raise ValueError("Coefficient arrays have no common type") from e + else: ret = [np.array(a, copy=True, dtype=dtype) for a in arrays] return ret @@ -186,7 +187,7 @@ def trimcoef(c, tol=0): [c] = as_series([c]) [ind] = np.nonzero(np.abs(c) > tol) if len(ind) == 0: - return c[:1]*0 + return c[:1] * 0 else: return c[:ind[-1] + 1].copy() @@ -218,6 +219,7 @@ def getdomain(x): Examples -------- + >>> import numpy as np >>> from numpy.polynomial import polyutils as pu >>> points = np.arange(4)**2 - 5; points array([-5, -4, -1, 4]) @@ -279,8 +281,8 @@ def mapparms(old, new): """ oldlen = old[1] - old[0] newlen = new[1] - new[0] - off = (old[1]*new[0] - old[0]*new[1])/oldlen - scl = newlen/oldlen + off = (old[1] * new[0] - old[0] * new[1]) / oldlen + scl = newlen / oldlen return off, scl def mapdomain(x, old, new): @@ -323,6 +325,7 @@ def mapdomain(x, old, new): Examples -------- + >>> import numpy as np >>> from numpy.polynomial import polyutils as pu >>> old_domain = (-1,1) >>> new_domain = (0,2*np.pi) @@ -346,9 +349,10 @@ def mapdomain(x, old, new): array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) # may vary """ - x = np.asanyarray(x) + if type(x) not in (int, float, complex) and not isinstance(x, np.generic): + x = np.asanyarray(x) off, scl = mapparms(old, new) - return off + scl*x + return off + scl * x def _nth_slice(i, ndim): @@ -401,7 +405,7 @@ def _vander_nd(vander_fs, points, degrees): ------- vander_nd : ndarray An array of shape ``points[0].shape + tuple(d + 1 for d in degrees)``. - """ + """ # noqa: E501 n_dims = len(vander_fs) if n_dims != len(points): raise ValueError( @@ -458,7 +462,7 @@ def _fromroots(line_f, mul_f, roots): n = len(p) while n > 1: m, r = divmod(n, 2) - tmp = [mul_f(p[i], p[i+m]) for i in range(m)] + tmp = [mul_f(p[i], p[i + m]) for i in range(m)] if r: tmp[0] = mul_f(tmp[0], p[-1]) p = tmp @@ -479,7 +483,7 @@ def _valnd(val_f, c, *args): """ args = [np.asanyarray(a) for a in args] shape0 = args[0].shape - if not all((a.shape == shape0 for a in args[1:])): + if not all(a.shape == shape0 for a in args[1:]): if len(args) == 3: raise ValueError('x, y, z are incompatible') elif len(args) == 2: @@ -529,21 +533,21 @@ def _div(mul_f, c1, c2): # c1, c2 are trimmed copies [c1, c2] = as_series([c1, c2]) if c2[-1] == 0: - raise ZeroDivisionError() + raise ZeroDivisionError # FIXME: add message with details to exception lc1 = len(c1) lc2 = len(c2) if lc1 < lc2: - return c1[:1]*0, c1 + return c1[:1] * 0, c1 elif lc2 == 1: - return c1/c2[-1], c1[:1]*0 + return c1 / c2[-1], c1[:1] * 0 else: quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) rem = c1 for i in range(lc1 - lc2, - 1, -1): - p = mul_f([0]*i + [1], c2) - q = rem[-1]/p[-1] - rem = rem[:-1] - q*p[:-1] + p = mul_f([0] * i + [1], c2) + q = rem[-1] / p[-1] + rem = rem[:-1] - q * p[:-1] quo[i] = q return quo, trimseq(rem) @@ -630,7 +634,7 @@ def _fit(vander_f, x, y, deg, rcond=None, full=False, w=None): # set rcond if rcond is None: - rcond = len(x)*np.finfo(x.dtype).eps + rcond = len(x) * np.finfo(x.dtype).eps # Determine the norms of the design matrix columns. if issubclass(lhs.dtype.type, np.complexfloating): @@ -640,22 +644,22 @@ def _fit(vander_f, x, y, deg, rcond=None, full=False, w=None): scl[scl == 0] = 1 # Solve the least squares problem. - c, resids, rank, s = np.linalg.lstsq(lhs.T/scl, rhs.T, rcond) - c = (c.T/scl).T + c, resids, rank, s = np.linalg.lstsq(lhs.T / scl, rhs.T, rcond) + c = (c.T / scl).T # Expand c to include non-fitted coefficients which are set to zero if deg.ndim > 0: if c.ndim == 2: - cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) + cc = np.zeros((lmax + 1, c.shape[1]), dtype=c.dtype) else: - cc = np.zeros(lmax+1, dtype=c.dtype) + cc = np.zeros(lmax + 1, dtype=c.dtype) cc[deg] = c c = cc # warn on rank reduction if rank != order and not full: msg = "The fit may be poorly conditioned" - warnings.warn(msg, RankWarning, stacklevel=2) + warnings.warn(msg, np.exceptions.RankWarning, stacklevel=2) if full: return c, [resids, rank, s, rcond] @@ -698,7 +702,7 @@ def _pow(mul_f, c, pow, maxpower): def _as_int(x, desc): """ - Like `operator.index`, but emits a custom exception when passed an + Like `operator.index`, but emits a custom exception when passed an incorrect type Parameters @@ -719,6 +723,8 @@ def _as_int(x, desc): def format_float(x, parens=False): + from numpy._core.multiarray import dragon4_positional, dragon4_scientific + if not np.issubdtype(type(x), np.floating): return str(x) @@ -732,7 +738,7 @@ def format_float(x, parens=False): exp_format = False if x != 0: a = np.abs(x) - if a >= 1.e8 or a < 10**min(0, -(opts['precision']-1)//2): + if a >= 1.e8 or a < 10**min(0, -(opts['precision'] - 1) // 2): exp_format = True trim, unique = '0', True @@ -741,7 +747,7 @@ def format_float(x, parens=False): if exp_format: s = dragon4_scientific(x, precision=opts['precision'], - unique=unique, trim=trim, + unique=unique, trim=trim, sign=opts['sign'] == '+') if parens: s = '(' + s + ')' diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index 0eccd6cdc2a4..fbaaf7d22880 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -1,9 +1,295 @@ -__all__: list[str] - -def trimseq(seq): ... -def as_series(alist, trim=...): ... -def trimcoef(c, tol=...): ... -def getdomain(x): ... -def mapparms(old, new): ... -def mapdomain(x, old, new): ... -def format_float(x, parens=...): ... +from collections.abc import Callable, Iterable, Sequence +from typing import Final, Literal, Protocol, SupportsIndex, overload, type_check_only + +import numpy as np +import numpy.typing as npt +from numpy._typing import ( + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeObject_co, + _FloatLike_co, + _NumberLike_co, +) + +from ._polytypes import ( + _AnyInt, + _Array2, + _ArrayLikeCoef_co, + _CoefArray, + _CoefLike_co, + _CoefSeries, + _ComplexArray, + _ComplexSeries, + _FloatArray, + _FloatSeries, + _FuncBinOp, + _ObjectArray, + _ObjectSeries, + _SeriesLikeCoef_co, + _SeriesLikeComplex_co, + _SeriesLikeFloat_co, + _SeriesLikeInt_co, + _SeriesLikeObject_co, + _Tuple2, +) + +__all__ = ["as_series", "format_float", "getdomain", "mapdomain", "mapparms", "trimcoef", "trimseq"] + +type _AnyLineF = Callable[[float, float], _CoefArray] +type _AnyMulF = Callable[[np.ndarray | list[int], np.ndarray], _CoefArray] +type _AnyVanderF = Callable[[np.ndarray, int], _CoefArray] + +@type_check_only +class _ValFunc[T](Protocol): + def __call__(self, x: np.ndarray, c: T, /, *, tensor: bool = True) -> T: ... + +### + +@overload +def as_series(alist: npt.NDArray[np.integer] | _FloatArray, trim: bool = True) -> list[_FloatSeries]: ... +@overload +def as_series(alist: _ComplexArray, trim: bool = True) -> list[_ComplexSeries]: ... +@overload +def as_series(alist: _ObjectArray, trim: bool = True) -> list[_ObjectSeries]: ... +@overload +def as_series(alist: Iterable[_FloatArray | npt.NDArray[np.integer]], trim: bool = True) -> list[_FloatSeries]: ... +@overload +def as_series(alist: Iterable[_ComplexArray], trim: bool = True) -> list[_ComplexSeries]: ... +@overload +def as_series(alist: Iterable[_ObjectArray], trim: bool = True) -> list[_ObjectSeries]: ... +@overload +def as_series(alist: Iterable[_SeriesLikeFloat_co | float], trim: bool = True) -> list[_FloatSeries]: ... +@overload +def as_series(alist: Iterable[_SeriesLikeComplex_co | complex], trim: bool = True) -> list[_ComplexSeries]: ... +@overload +def as_series(alist: Iterable[_SeriesLikeCoef_co | object], trim: bool = True) -> list[_ObjectSeries]: ... + +# +def trimseq[SeqT: _CoefArray | Sequence[_CoefLike_co]](seq: SeqT) -> SeqT: ... + +# +@overload +def trimcoef(c: npt.NDArray[np.integer] | _FloatArray, tol: _FloatLike_co = 0) -> _FloatSeries: ... +@overload +def trimcoef(c: _ComplexArray, tol: _FloatLike_co = 0) -> _ComplexSeries: ... +@overload +def trimcoef(c: _ObjectArray, tol: _FloatLike_co = 0) -> _ObjectSeries: ... +@overload +def trimcoef(c: _SeriesLikeFloat_co | float, tol: _FloatLike_co = 0) -> _FloatSeries: ... +@overload +def trimcoef(c: _SeriesLikeComplex_co | complex, tol: _FloatLike_co = 0) -> _ComplexSeries: ... +@overload +def trimcoef(c: _SeriesLikeCoef_co | object, tol: _FloatLike_co = 0) -> _ObjectSeries: ... + +# +@overload +def getdomain(x: _FloatArray | npt.NDArray[np.integer]) -> _Array2[np.float64]: ... +@overload +def getdomain(x: _ComplexArray) -> _Array2[np.complex128]: ... +@overload +def getdomain(x: _ObjectArray) -> _Array2[np.object_]: ... +@overload +def getdomain(x: _SeriesLikeFloat_co | float) -> _Array2[np.float64]: ... +@overload +def getdomain(x: _SeriesLikeComplex_co | complex) -> _Array2[np.complex128]: ... +@overload +def getdomain(x: _SeriesLikeCoef_co | object) -> _Array2[np.object_]: ... + +# +@overload +def mapparms(old: npt.NDArray[np.floating | np.integer], new: npt.NDArray[np.floating | np.integer]) -> _Tuple2[np.floating]: ... +@overload +def mapparms(old: npt.NDArray[np.number], new: npt.NDArray[np.number]) -> _Tuple2[np.complexfloating]: ... +@overload +def mapparms(old: npt.NDArray[np.object_ | np.number], new: npt.NDArray[np.object_ | np.number]) -> _Tuple2[object]: ... +@overload +def mapparms(old: Sequence[float], new: Sequence[float]) -> _Tuple2[float]: ... +@overload +def mapparms(old: Sequence[complex], new: Sequence[complex]) -> _Tuple2[complex]: ... +@overload +def mapparms(old: _SeriesLikeFloat_co, new: _SeriesLikeFloat_co) -> _Tuple2[np.floating]: ... +@overload +def mapparms(old: _SeriesLikeComplex_co, new: _SeriesLikeComplex_co) -> _Tuple2[np.complexfloating]: ... +@overload +def mapparms(old: _SeriesLikeCoef_co, new: _SeriesLikeCoef_co) -> _Tuple2[object]: ... + +# +@overload +def mapdomain(x: _FloatLike_co, old: _SeriesLikeFloat_co, new: _SeriesLikeFloat_co) -> np.floating: ... +@overload +def mapdomain(x: _NumberLike_co, old: _SeriesLikeComplex_co, new: _SeriesLikeComplex_co) -> np.complexfloating: ... +@overload +def mapdomain( + x: npt.NDArray[np.floating | np.integer], + old: npt.NDArray[np.floating | np.integer], + new: npt.NDArray[np.floating | np.integer], +) -> _FloatSeries: ... +@overload +def mapdomain(x: npt.NDArray[np.number], old: npt.NDArray[np.number], new: npt.NDArray[np.number]) -> _ComplexSeries: ... +@overload +def mapdomain( + x: npt.NDArray[np.object_ | np.number], + old: npt.NDArray[np.object_ | np.number], + new: npt.NDArray[np.object_ | np.number], +) -> _ObjectSeries: ... +@overload +def mapdomain(x: _SeriesLikeFloat_co, old: _SeriesLikeFloat_co, new: _SeriesLikeFloat_co) -> _FloatSeries: ... +@overload +def mapdomain(x: _SeriesLikeComplex_co, old: _SeriesLikeComplex_co, new: _SeriesLikeComplex_co) -> _ComplexSeries: ... +@overload +def mapdomain(x: _SeriesLikeCoef_co, old: _SeriesLikeCoef_co, new: _SeriesLikeCoef_co) -> _ObjectSeries: ... +@overload +def mapdomain(x: _CoefLike_co, old: _SeriesLikeCoef_co, new: _SeriesLikeCoef_co) -> object: ... + +# +def _nth_slice(i: SupportsIndex, ndim: SupportsIndex) -> tuple[slice | None, ...]: ... + +# keep in sync with `vander_nd_flat` +@overload +def _vander_nd( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeFloat_co], + degrees: Sequence[SupportsIndex], +) -> _FloatArray: ... +@overload +def _vander_nd( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], +) -> _ComplexArray: ... +@overload +def _vander_nd( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeObject_co | _ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], +) -> _ObjectArray: ... +@overload +def _vander_nd( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[npt.ArrayLike], + degrees: Sequence[SupportsIndex], +) -> _CoefArray: ... + +# keep in sync with `vander_nd` +@overload +def _vander_nd_flat( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeFloat_co], + degrees: Sequence[SupportsIndex], +) -> _FloatArray: ... +@overload +def _vander_nd_flat( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], +) -> _ComplexArray: ... +@overload +def _vander_nd_flat( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeObject_co | _ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], +) -> _ObjectArray: ... +@overload +def _vander_nd_flat( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[npt.ArrayLike], + degrees: Sequence[SupportsIndex], +) -> _CoefArray: ... + +# keep in sync with `._polytypes._FuncFromRoots` +@overload +def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeFloat_co) -> _FloatSeries: ... +@overload +def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeComplex_co) -> _ComplexSeries: ... +@overload +def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeObject_co) -> _ObjectSeries: ... +@overload +def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeCoef_co) -> _CoefSeries: ... + +# keep in sync with `_gridnd` +def _valnd[T](val_f: _ValFunc[T], c: T, *args: npt.ArrayLike) -> T: ... + +# keep in sync with `_valnd` +def _gridnd[T](val_f: _ValFunc[T], c: T, *args: npt.ArrayLike) -> T: ... + +# keep in sync with `_polytypes._FuncBinOp` +@overload +def _div(mul_f: _AnyMulF, c1: _SeriesLikeFloat_co, c2: _SeriesLikeFloat_co) -> _Tuple2[_FloatSeries]: ... +@overload +def _div(mul_f: _AnyMulF, c1: _SeriesLikeComplex_co, c2: _SeriesLikeComplex_co) -> _Tuple2[_ComplexSeries]: ... +@overload +def _div(mul_f: _AnyMulF, c1: _SeriesLikeObject_co, c2: _SeriesLikeObject_co) -> _Tuple2[_ObjectSeries]: ... +@overload +def _div(mul_f: _AnyMulF, c1: _SeriesLikeCoef_co, c2: _SeriesLikeCoef_co) -> _Tuple2[_CoefSeries]: ... + +_add: Final[_FuncBinOp] = ... +_sub: Final[_FuncBinOp] = ... + +# keep in sync with `_polytypes._FuncPow` +@overload +def _pow(mul_f: _AnyMulF, c: _SeriesLikeFloat_co, pow: _AnyInt, maxpower: _AnyInt | None) -> _FloatSeries: ... +@overload +def _pow(mul_f: _AnyMulF, c: _SeriesLikeComplex_co, pow: _AnyInt, maxpower: _AnyInt | None) -> _ComplexSeries: ... +@overload +def _pow(mul_f: _AnyMulF, c: _SeriesLikeObject_co, pow: _AnyInt, maxpower: _AnyInt | None) -> _ObjectSeries: ... +@overload +def _pow(mul_f: _AnyMulF, c: _SeriesLikeCoef_co, pow: _AnyInt, maxpower: _AnyInt | None) -> _CoefSeries: ... + +# keep in sync with `_polytypes._FuncFit` +@overload +def _fit( + vander_f: _AnyVanderF, + x: _SeriesLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: _SeriesLikeInt_co, + rcond: _FloatLike_co | None = None, + full: Literal[False] = False, + w: _SeriesLikeFloat_co | None = None, +) -> _FloatArray: ... +@overload +def _fit( + vander_f: _AnyVanderF, + x: _SeriesLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: _SeriesLikeInt_co, + rcond: _FloatLike_co | None = None, + full: Literal[False] = False, + w: _SeriesLikeComplex_co | None = None, +) -> _ComplexArray: ... +@overload +def _fit( + vander_f: _AnyVanderF, + x: _SeriesLikeCoef_co, + y: _ArrayLikeCoef_co, + deg: _SeriesLikeInt_co, + rcond: _FloatLike_co | None = None, + full: Literal[False] = False, + w: _SeriesLikeCoef_co | None = None, +) -> _CoefArray: ... +@overload +def _fit( + vander_f: _AnyVanderF, + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: _SeriesLikeInt_co, + rcond: _FloatLike_co | None, + full: Literal[True], + w: _SeriesLikeCoef_co | None = None, +) -> tuple[_CoefSeries, Sequence[np.inexact | np.int32]]: ... +@overload +def _fit( + vander_f: _AnyVanderF, + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: _SeriesLikeInt_co, + rcond: _FloatLike_co | None = None, + *, + full: Literal[True], + w: _SeriesLikeCoef_co | None = None, +) -> tuple[_CoefSeries, Sequence[np.inexact | np.int32]]: ... + +# +def _as_int(x: SupportsIndex, desc: str) -> int: ... + +# +def format_float(x: _FloatLike_co, parens: bool = False) -> str: ... diff --git a/numpy/polynomial/tests/test_chebyshev.py b/numpy/polynomial/tests/test_chebyshev.py index 2f54bebfdb27..14777ac60375 100644 --- a/numpy/polynomial/tests/test_chebyshev.py +++ b/numpy/polynomial/tests/test_chebyshev.py @@ -6,14 +6,13 @@ import numpy as np import numpy.polynomial.chebyshev as cheb from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises def trim(x): return cheb.chebtrim(x, tol=1e-6) + T0 = [1] T1 = [0, 1] T2 = [-1, 0, 2] @@ -32,15 +31,15 @@ class TestPrivate: def test__cseries_to_zseries(self): for i in range(5): - inp = np.array([2] + [1]*i, np.double) - tgt = np.array([.5]*i + [2] + [.5]*i, np.double) + inp = np.array([2] + [1] * i, np.double) + tgt = np.array([.5] * i + [2] + [.5] * i, np.double) res = cheb._cseries_to_zseries(inp) assert_equal(res, tgt) def test__zseries_to_cseries(self): for i in range(5): - inp = np.array([.5]*i + [2] + [.5]*i, np.double) - tgt = np.array([2] + [1]*i, np.double) + inp = np.array([.5] * i + [2] + [.5] * i, np.double) + tgt = np.array([2] + [1] * i, np.double) res = cheb._zseries_to_cseries(inp) assert_equal(res, tgt) @@ -69,7 +68,7 @@ def test_chebadd(self): tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 - res = cheb.chebadd([0]*i + [1], [0]*j + [1]) + res = cheb.chebadd([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_chebsub(self): @@ -79,15 +78,15 @@ def test_chebsub(self): tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 - res = cheb.chebsub([0]*i + [1], [0]*j + [1]) + res = cheb.chebsub([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_chebmulx(self): assert_equal(cheb.chebmulx([0]), [0]) assert_equal(cheb.chebmulx([1]), [0, 1]) for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [.5, 0, .5] + ser = [0] * i + [1] + tgt = [0] * (i - 1) + [.5, 0, .5] assert_equal(cheb.chebmulx(ser), tgt) def test_chebmul(self): @@ -97,15 +96,15 @@ def test_chebmul(self): tgt = np.zeros(i + j + 1) tgt[i + j] += .5 tgt[abs(i - j)] += .5 - res = cheb.chebmul([0]*i + [1], [0]*j + [1]) + res = cheb.chebmul([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_chebdiv(self): for i in range(5): for j in range(5): msg = f"At i={i}, j={j}" - ci = [0]*i + [1] - cj = [0]*j + [1] + ci = [0] * i + [1] + cj = [0] * j + [1] tgt = cheb.chebadd(ci, cj) quo, rem = cheb.chebdiv(tgt, ci) res = cheb.chebadd(cheb.chebmul(quo, ci), rem) @@ -116,7 +115,7 @@ def test_chebpow(self): for j in range(5): msg = f"At i={i}, j={j}" c = np.arange(i + 1) - tgt = reduce(cheb.chebmul, [c]*j, np.array([1])) + tgt = reduce(cheb.chebmul, [c] * j, np.array([1])) res = cheb.chebpow(c, j) assert_equal(trim(res), trim(tgt), err_msg=msg) @@ -128,25 +127,25 @@ class TestEvaluation: c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 + x = np.random.random((3, 5)) * 2 - 1 y = polyval(x, [1., 2., 3.]) def test_chebval(self): - #check empty input + # check empty input assert_equal(cheb.chebval([], [1]).size, 0) - #check normal input) + # check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Tlist] for i in range(10): msg = f"At i={i}" tgt = y[i] - res = cheb.chebval(x, [0]*i + [1]) + res = cheb.chebval(x, [0] * i + [1]) assert_almost_equal(res, tgt, err_msg=msg) - #check that shape is preserved + # check that shape is preserved for i in range(3): - dims = [2]*i + dims = [2] * i x = np.zeros(dims) assert_equal(cheb.chebval(x, [1]).shape, dims) assert_equal(cheb.chebval(x, [1, 0]).shape, dims) @@ -156,15 +155,15 @@ def test_chebval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, cheb.chebval2d, x1, x2[:2], self.c2d) - #test values - tgt = y1*y2 + # test values + tgt = y1 * y2 res = cheb.chebval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = cheb.chebval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) @@ -173,15 +172,15 @@ def test_chebval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, cheb.chebval3d, x1, x2, x3[:2], self.c3d) - #test values - tgt = y1*y2*y3 + # test values + tgt = y1 * y2 * y3 res = cheb.chebval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = cheb.chebval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) @@ -190,29 +189,29 @@ def test_chebgrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j->ij', y1, y2) res = cheb.chebgrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = cheb.chebgrid2d(z, z, self.c2d) - assert_(res.shape == (2, 3)*2) + assert_(res.shape == (2, 3) * 2) def test_chebgrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = cheb.chebgrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = cheb.chebgrid3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)*3) + assert_(res.shape == (2, 3) * 3) class TestIntegral: @@ -228,15 +227,15 @@ def test_chebint(self): # test integration of zero polynomial for i in range(2, 5): - k = [0]*(i - 2) + [1] + k = [0] * (i - 2) + [1] res = cheb.chebint([0], m=i, k=k) assert_almost_equal(res, [0, 1]) # check single integration with integration constant for i in range(5): scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] chebpol = cheb.poly2cheb(pol) chebint = cheb.chebint(chebpol, m=1, k=[i]) res = cheb.cheb2poly(chebint) @@ -245,7 +244,7 @@ def test_chebint(self): # check single integration with integration constant and lbnd for i in range(5): scl = i + 1 - pol = [0]*i + [1] + pol = [0] * i + [1] chebpol = cheb.poly2cheb(pol) chebint = cheb.chebint(chebpol, m=1, k=[i], lbnd=-1) assert_almost_equal(cheb.chebval(-1, chebint), i) @@ -253,8 +252,8 @@ def test_chebint(self): # check single integration with integration constant and scaling for i in range(5): scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] chebpol = cheb.poly2cheb(pol) chebint = cheb.chebint(chebpol, m=1, k=[i], scl=2) res = cheb.cheb2poly(chebint) @@ -263,7 +262,7 @@ def test_chebint(self): # check multiple integrations with default k for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = cheb.chebint(tgt, m=1) @@ -273,7 +272,7 @@ def test_chebint(self): # check multiple integrations with defined k for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = cheb.chebint(tgt, m=1, k=[k]) @@ -283,7 +282,7 @@ def test_chebint(self): # check multiple integrations with lbnd for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = cheb.chebint(tgt, m=1, k=[k], lbnd=-1) @@ -293,7 +292,7 @@ def test_chebint(self): # check multiple integrations with scaling for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = cheb.chebint(tgt, m=1, k=[k], scl=2) @@ -326,21 +325,21 @@ def test_chebder(self): # check that zeroth derivative does nothing for i in range(5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = cheb.chebder(tgt, m=0) assert_equal(trim(res), trim(tgt)) # check that derivation is the inverse of integration for i in range(5): for j in range(2, 5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = cheb.chebder(cheb.chebint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5): for j in range(2, 5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = cheb.chebder(cheb.chebint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) @@ -359,7 +358,7 @@ def test_chebder_axis(self): class TestVander: # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 + x = np.random.random((3, 5)) * 2 - 1 def test_chebvander(self): # check for 1d x @@ -367,7 +366,7 @@ def test_chebvander(self): v = cheb.chebvander(x, 3) assert_(v.shape == (3, 4)) for i in range(4): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_almost_equal(v[..., i], cheb.chebval(x, coef)) # check for 2d x @@ -375,7 +374,7 @@ def test_chebvander(self): v = cheb.chebvander(x, 3) assert_(v.shape == (3, 2, 4)) for i in range(4): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_almost_equal(v[..., i], cheb.chebval(x, coef)) def test_chebvander2d(self): @@ -409,7 +408,7 @@ class TestFitting: def test_chebfit(self): def f(x): - return x*(x - 1)*(x - 2) + return x * (x - 1) * (x - 2) def f2(x): return x**4 + x**2 + 1 @@ -501,8 +500,8 @@ def powx(x, p): return x**p x = np.linspace(-1, 1, 10) - for deg in range(0, 10): - for p in range(0, deg + 1): + for deg in range(10): + for p in range(deg + 1): c = cheb.chebinterpolate(powx, deg, (p,)) assert_almost_equal(cheb.chebval(x, c), powx(x, p), decimal=12) @@ -515,7 +514,7 @@ def test_raises(self): def test_dimensions(self): for i in range(1, 5): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_(cheb.chebcompanion(coef).shape == (i, i)) def test_linear_root(self): @@ -532,7 +531,7 @@ def test_100(self): # functions like Laguerre can be very confusing. v = cheb.chebvander(x, 99) vv = np.dot(v.T * w, v) - vd = 1/np.sqrt(vv.diagonal()) + vd = 1 / np.sqrt(vv.diagonal()) vv = vd[:, None] * vv * vd assert_almost_equal(vv, np.eye(100)) @@ -547,9 +546,9 @@ def test_chebfromroots(self): res = cheb.chebfromroots([]) assert_almost_equal(trim(res), [1]) for i in range(1, 5): - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) - tgt = [0]*i + [1] - res = cheb.chebfromroots(roots)*2**(i-1) + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) + tgt = [0] * i + [1] + res = cheb.chebfromroots(roots) * 2**(i - 1) assert_almost_equal(trim(res), trim(tgt)) def test_chebroots(self): @@ -576,24 +575,24 @@ def test_chebline(self): def test_cheb2poly(self): for i in range(10): - assert_almost_equal(cheb.cheb2poly([0]*i + [1]), Tlist[i]) + assert_almost_equal(cheb.cheb2poly([0] * i + [1]), Tlist[i]) def test_poly2cheb(self): for i in range(10): - assert_almost_equal(cheb.poly2cheb(Tlist[i]), [0]*i + [1]) + assert_almost_equal(cheb.poly2cheb(Tlist[i]), [0] * i + [1]) def test_weight(self): x = np.linspace(-1, 1, 11)[1:-1] - tgt = 1./(np.sqrt(1 + x) * np.sqrt(1 - x)) + tgt = 1. / (np.sqrt(1 + x) * np.sqrt(1 - x)) res = cheb.chebweight(x) assert_almost_equal(res, tgt) def test_chebpts1(self): - #test exceptions + # test exceptions assert_raises(ValueError, cheb.chebpts1, 1.5) assert_raises(ValueError, cheb.chebpts1, 0) - #test points + # test points tgt = [0] assert_almost_equal(cheb.chebpts1(1), tgt) tgt = [-0.70710678118654746, 0.70710678118654746] @@ -604,11 +603,11 @@ def test_chebpts1(self): assert_almost_equal(cheb.chebpts1(4), tgt) def test_chebpts2(self): - #test exceptions + # test exceptions assert_raises(ValueError, cheb.chebpts2, 1.5) assert_raises(ValueError, cheb.chebpts2, 1) - #test points + # test points tgt = [-1, 1] assert_almost_equal(cheb.chebpts2(2), tgt) tgt = [-1, 0, 1] diff --git a/numpy/polynomial/tests/test_classes.py b/numpy/polynomial/tests/test_classes.py index 75672a148524..156dccf6ea88 100644 --- a/numpy/polynomial/tests/test_classes.py +++ b/numpy/polynomial/tests/test_classes.py @@ -7,13 +7,18 @@ from numbers import Number import pytest + import numpy as np -from numpy.polynomial import ( - Polynomial, Legendre, Chebyshev, Laguerre, Hermite, HermiteE) -from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) from numpy.exceptions import RankWarning +from numpy.polynomial import ( + Chebyshev, + Hermite, + HermiteE, + Laguerre, + Legendre, + Polynomial, +) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises # # fixtures @@ -29,6 +34,7 @@ def Poly(request): return request.param + # # helper functions # @@ -57,12 +63,12 @@ def test_conversion(Poly1, Poly2): x = np.linspace(0, 1, 10) coef = random((3,)) - d1 = Poly1.domain + random((2,))*.25 - w1 = Poly1.window + random((2,))*.25 + d1 = Poly1.domain + random((2,)) * .25 + w1 = Poly1.window + random((2,)) * .25 p1 = Poly1(coef, domain=d1, window=w1) - d2 = Poly2.domain + random((2,))*.25 - w2 = Poly2.window + random((2,))*.25 + d2 = Poly2.domain + random((2,)) * .25 + w2 = Poly2.window + random((2,)) * .25 p2 = p1.convert(kind=Poly2, domain=d2, window=w2) assert_almost_equal(p2.domain, d2) @@ -74,12 +80,12 @@ def test_cast(Poly1, Poly2): x = np.linspace(0, 1, 10) coef = random((3,)) - d1 = Poly1.domain + random((2,))*.25 - w1 = Poly1.window + random((2,))*.25 + d1 = Poly1.domain + random((2,)) * .25 + w1 = Poly1.window + random((2,)) * .25 p1 = Poly1(coef, domain=d1, window=w1) - d2 = Poly2.domain + random((2,))*.25 - w2 = Poly2.window + random((2,))*.25 + d2 = Poly2.domain + random((2,)) * .25 + w2 = Poly2.window + random((2,)) * .25 p2 = Poly2.cast(p1, domain=d2, window=w2) assert_almost_equal(p2.domain, d2) @@ -93,8 +99,8 @@ def test_cast(Poly1, Poly2): def test_identity(Poly): - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 x = np.linspace(d[0], d[1], 11) p = Poly.identity(domain=d, window=w) assert_equal(p.domain, d) @@ -103,19 +109,19 @@ def test_identity(Poly): def test_basis(Poly): - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 p = Poly.basis(5, domain=d, window=w) assert_equal(p.domain, d) assert_equal(p.window, w) - assert_equal(p.coef, [0]*5 + [1]) + assert_equal(p.coef, [0] * 5 + [1]) def test_fromroots(Poly): # check that requested roots are zeros of a polynomial # of correct degree, domain, and window. - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 r = random((5,)) p1 = Poly.fromroots(r, domain=d, window=w) assert_equal(p1.degree(), len(r)) @@ -144,7 +150,7 @@ def test_bad_conditioned_fit(Poly): def test_fit(Poly): def f(x): - return x*(x - 1)*(x - 2) + return x * (x - 1) * (x - 2) x = np.linspace(0, 3) y = f(x) @@ -155,8 +161,8 @@ def f(x): assert_equal(p.degree(), 3) # check with given domains and window - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 p = Poly.fit(x, y, 3, domain=d, window=w) assert_almost_equal(p(x), y) assert_almost_equal(p.domain, d) @@ -176,7 +182,7 @@ def f(x): # check that fit accepts weights. w = np.zeros_like(x) - z = y + random(y.shape)*.25 + z = y + random(y.shape) * .25 w[::2] = 1 p1 = Poly.fit(x[::2], z[::2], 3) p2 = Poly.fit(x, z, 3, w=w) @@ -291,7 +297,7 @@ def test_floordiv(Poly): assert_poly_almost_equal(p4 // np.array(c2), p1) assert_poly_almost_equal(np.array(c4) // p2, p1) assert_poly_almost_equal(2 // p2, Poly([0])) - assert_poly_almost_equal(p2 // 2, 0.5*p2) + assert_poly_almost_equal(p2 // 2, 0.5 * p2) assert_raises( TypeError, op.floordiv, p1, Poly([0], domain=Poly.domain + 1)) assert_raises( @@ -305,7 +311,7 @@ def test_floordiv(Poly): def test_truediv(Poly): # true division is valid only if the denominator is a Number and # not a python bool. - p1 = Poly([1,2,3]) + p1 = Poly([1, 2, 3]) p2 = p1 * 5 for stype in np.ScalarType: @@ -322,7 +328,7 @@ def test_truediv(Poly): s = stype(5, 0) assert_poly_almost_equal(op.truediv(p2, s), p1) assert_raises(TypeError, op.truediv, s, p2) - for s in [tuple(), list(), dict(), bool(), np.array([1])]: + for s in [(), [], {}, False, np.array([1])]: assert_raises(TypeError, op.truediv, p2, s) assert_raises(TypeError, op.truediv, s, p2) for ptype in classes: @@ -388,7 +394,7 @@ def test_divmod(Poly): assert_poly_almost_equal(quo, p1) assert_poly_almost_equal(rem, p3) quo, rem = divmod(p2, 2) - assert_poly_almost_equal(quo, 0.5*p2) + assert_poly_almost_equal(quo, 0.5 * p2) assert_poly_almost_equal(rem, Poly([0])) quo, rem = divmod(2, p2) assert_poly_almost_equal(quo, Poly([0])) @@ -430,26 +436,26 @@ def test_copy(Poly): def test_integ(Poly): P = Polynomial # Check defaults - p0 = Poly.cast(P([1*2, 2*3, 3*4])) + p0 = Poly.cast(P([1 * 2, 2 * 3, 3 * 4])) p1 = P.cast(p0.integ()) p2 = P.cast(p0.integ(2)) assert_poly_almost_equal(p1, P([0, 2, 3, 4])) assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) # Check with k - p0 = Poly.cast(P([1*2, 2*3, 3*4])) + p0 = Poly.cast(P([1 * 2, 2 * 3, 3 * 4])) p1 = P.cast(p0.integ(k=1)) p2 = P.cast(p0.integ(2, k=[1, 1])) assert_poly_almost_equal(p1, P([1, 2, 3, 4])) assert_poly_almost_equal(p2, P([1, 1, 1, 1, 1])) # Check with lbnd - p0 = Poly.cast(P([1*2, 2*3, 3*4])) + p0 = Poly.cast(P([1 * 2, 2 * 3, 3 * 4])) p1 = P.cast(p0.integ(lbnd=1)) p2 = P.cast(p0.integ(2, lbnd=1)) assert_poly_almost_equal(p1, P([-9, 2, 3, 4])) assert_poly_almost_equal(p2, P([6, -9, 1, 1, 1])) # Check scaling - d = 2*Poly.domain - p0 = Poly.cast(P([1*2, 2*3, 3*4]), domain=d) + d = 2 * Poly.domain + p0 = Poly.cast(P([1 * 2, 2 * 3, 3 * 4]), domain=d) p1 = P.cast(p0.integ()) p2 = P.cast(p0.integ(2)) assert_poly_almost_equal(p1, P([0, 2, 3, 4])) @@ -459,8 +465,8 @@ def test_integ(Poly): def test_deriv(Poly): # Check that the derivative is the inverse of integration. It is # assumes that the integration has been checked elsewhere. - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 p1 = Poly([1, 2, 3], domain=d, window=w) p2 = p1.integ(2, k=[1, 2]) p3 = p1.integ(1, k=[1]) @@ -475,8 +481,8 @@ def test_deriv(Poly): def test_linspace(Poly): - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 p = Poly([1, 2, 3], domain=d, window=w) # check default domain xtgt = np.linspace(d[0], d[1], 20) @@ -493,8 +499,8 @@ def test_linspace(Poly): def test_pow(Poly): - d = Poly.domain + random((2,))*.25 - w = Poly.window + random((2,))*.25 + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 tgt = Poly([1], domain=d, window=w) tst = Poly([1, 2, 3], domain=d, window=w) for i in range(5): @@ -518,7 +524,7 @@ def test_call(Poly): # Check defaults p = Poly.cast(P([1, 2, 3])) - tgt = 1 + x*(2 + 3*x) + tgt = 1 + x * (2 + 3 * x) res = p(x) assert_almost_equal(res, tgt) @@ -565,7 +571,7 @@ def test_mapparms(Poly): p = Poly([1], domain=d, window=w) assert_almost_equal([0, 1], p.mapparms()) # - w = 2*d + 1 + w = 2 * d + 1 p = Poly([1], domain=d, window=w) assert_almost_equal([1, 2], p.mapparms()) @@ -601,7 +607,7 @@ def powx(x, p): return x**p x = np.linspace(0, 2, 10) - for deg in range(0, 10): - for t in range(0, deg + 1): + for deg in range(10): + for t in range(deg + 1): p = Chebyshev.interpolate(powx, deg, domain=[0, 2], args=(t,)) assert_almost_equal(p(x), powx(x, t), decimal=11) diff --git a/numpy/polynomial/tests/test_hermite.py b/numpy/polynomial/tests/test_hermite.py index 53ee0844e3c5..a289ba0b50cc 100644 --- a/numpy/polynomial/tests/test_hermite.py +++ b/numpy/polynomial/tests/test_hermite.py @@ -6,9 +6,7 @@ import numpy as np import numpy.polynomial.hermite as herm from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises H0 = np.array([1]) H1 = np.array([0, 2]) @@ -53,7 +51,7 @@ def test_hermadd(self): tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 - res = herm.hermadd([0]*i + [1], [0]*j + [1]) + res = herm.hermadd([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_hermsub(self): @@ -63,37 +61,37 @@ def test_hermsub(self): tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 - res = herm.hermsub([0]*i + [1], [0]*j + [1]) + res = herm.hermsub([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_hermmulx(self): assert_equal(herm.hermmulx([0]), [0]) assert_equal(herm.hermmulx([1]), [0, .5]) for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [i, 0, .5] + ser = [0] * i + [1] + tgt = [0] * (i - 1) + [i, 0, .5] assert_equal(herm.hermmulx(ser), tgt) def test_hermmul(self): # check values of result for i in range(5): - pol1 = [0]*i + [1] + pol1 = [0] * i + [1] val1 = herm.hermval(self.x, pol1) for j in range(5): msg = f"At i={i}, j={j}" - pol2 = [0]*j + [1] + pol2 = [0] * j + [1] val2 = herm.hermval(self.x, pol2) pol3 = herm.hermmul(pol1, pol2) val3 = herm.hermval(self.x, pol3) assert_(len(pol3) == i + j + 1, msg) - assert_almost_equal(val3, val1*val2, err_msg=msg) + assert_almost_equal(val3, val1 * val2, err_msg=msg) def test_hermdiv(self): for i in range(5): for j in range(5): msg = f"At i={i}, j={j}" - ci = [0]*i + [1] - cj = [0]*j + [1] + ci = [0] * i + [1] + cj = [0] * j + [1] tgt = herm.hermadd(ci, cj) quo, rem = herm.hermdiv(tgt, ci) res = herm.hermadd(herm.hermmul(quo, ci), rem) @@ -104,8 +102,8 @@ def test_hermpow(self): for j in range(5): msg = f"At i={i}, j={j}" c = np.arange(i + 1) - tgt = reduce(herm.hermmul, [c]*j, np.array([1])) - res = herm.hermpow(c, j) + tgt = reduce(herm.hermmul, [c] * j, np.array([1])) + res = herm.hermpow(c, j) assert_equal(trim(res), trim(tgt), err_msg=msg) @@ -116,25 +114,25 @@ class TestEvaluation: c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 + x = np.random.random((3, 5)) * 2 - 1 y = polyval(x, [1., 2., 3.]) def test_hermval(self): - #check empty input + # check empty input assert_equal(herm.hermval([], [1]).size, 0) - #check normal input) + # check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Hlist] for i in range(10): msg = f"At i={i}" tgt = y[i] - res = herm.hermval(x, [0]*i + [1]) + res = herm.hermval(x, [0] * i + [1]) assert_almost_equal(res, tgt, err_msg=msg) - #check that shape is preserved + # check that shape is preserved for i in range(3): - dims = [2]*i + dims = [2] * i x = np.zeros(dims) assert_equal(herm.hermval(x, [1]).shape, dims) assert_equal(herm.hermval(x, [1, 0]).shape, dims) @@ -144,15 +142,15 @@ def test_hermval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, herm.hermval2d, x1, x2[:2], self.c2d) - #test values - tgt = y1*y2 + # test values + tgt = y1 * y2 res = herm.hermval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herm.hermval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) @@ -161,15 +159,15 @@ def test_hermval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, herm.hermval3d, x1, x2, x3[:2], self.c3d) - #test values - tgt = y1*y2*y3 + # test values + tgt = y1 * y2 * y3 res = herm.hermval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herm.hermval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) @@ -178,29 +176,29 @@ def test_hermgrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j->ij', y1, y2) res = herm.hermgrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herm.hermgrid2d(z, z, self.c2d) - assert_(res.shape == (2, 3)*2) + assert_(res.shape == (2, 3) * 2) def test_hermgrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = herm.hermgrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herm.hermgrid3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)*3) + assert_(res.shape == (2, 3) * 3) class TestIntegral: @@ -216,15 +214,15 @@ def test_hermint(self): # test integration of zero polynomial for i in range(2, 5): - k = [0]*(i - 2) + [1] + k = [0] * (i - 2) + [1] res = herm.hermint([0], m=i, k=k) assert_almost_equal(res, [0, .5]) # check single integration with integration constant for i in range(5): scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] hermpol = herm.poly2herm(pol) hermint = herm.hermint(hermpol, m=1, k=[i]) res = herm.herm2poly(hermint) @@ -233,7 +231,7 @@ def test_hermint(self): # check single integration with integration constant and lbnd for i in range(5): scl = i + 1 - pol = [0]*i + [1] + pol = [0] * i + [1] hermpol = herm.poly2herm(pol) hermint = herm.hermint(hermpol, m=1, k=[i], lbnd=-1) assert_almost_equal(herm.hermval(-1, hermint), i) @@ -241,8 +239,8 @@ def test_hermint(self): # check single integration with integration constant and scaling for i in range(5): scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] hermpol = herm.poly2herm(pol) hermint = herm.hermint(hermpol, m=1, k=[i], scl=2) res = herm.herm2poly(hermint) @@ -251,7 +249,7 @@ def test_hermint(self): # check multiple integrations with default k for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = herm.hermint(tgt, m=1) @@ -261,7 +259,7 @@ def test_hermint(self): # check multiple integrations with defined k for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = herm.hermint(tgt, m=1, k=[k]) @@ -271,7 +269,7 @@ def test_hermint(self): # check multiple integrations with lbnd for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = herm.hermint(tgt, m=1, k=[k], lbnd=-1) @@ -281,7 +279,7 @@ def test_hermint(self): # check multiple integrations with scaling for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = herm.hermint(tgt, m=1, k=[k], scl=2) @@ -314,21 +312,21 @@ def test_hermder(self): # check that zeroth derivative does nothing for i in range(5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = herm.hermder(tgt, m=0) assert_equal(trim(res), trim(tgt)) # check that derivation is the inverse of integration for i in range(5): for j in range(2, 5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = herm.hermder(herm.hermint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5): for j in range(2, 5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = herm.hermder(herm.hermint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) @@ -347,7 +345,7 @@ def test_hermder_axis(self): class TestVander: # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 + x = np.random.random((3, 5)) * 2 - 1 def test_hermvander(self): # check for 1d x @@ -355,7 +353,7 @@ def test_hermvander(self): v = herm.hermvander(x, 3) assert_(v.shape == (3, 4)) for i in range(4): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_almost_equal(v[..., i], herm.hermval(x, coef)) # check for 2d x @@ -363,7 +361,7 @@ def test_hermvander(self): v = herm.hermvander(x, 3) assert_(v.shape == (3, 2, 4)) for i in range(4): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_almost_equal(v[..., i], herm.hermval(x, coef)) def test_hermvander2d(self): @@ -397,7 +395,7 @@ class TestFitting: def test_hermfit(self): def f(x): - return x*(x - 1)*(x - 2) + return x * (x - 1) * (x - 2) def f2(x): return x**4 + x**2 + 1 @@ -478,7 +476,7 @@ def test_raises(self): def test_dimensions(self): for i in range(1, 5): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_(herm.hermcompanion(coef).shape == (i, i)) def test_linear_root(self): @@ -495,7 +493,7 @@ def test_100(self): # functions like Laguerre can be very confusing. v = herm.hermvander(x, 99) vv = np.dot(v.T * w, v) - vd = 1/np.sqrt(vv.diagonal()) + vd = 1 / np.sqrt(vv.diagonal()) vv = vd[:, None] * vv * vd assert_almost_equal(vv, np.eye(100)) @@ -510,7 +508,7 @@ def test_hermfromroots(self): res = herm.hermfromroots([]) assert_almost_equal(trim(res), [1]) for i in range(1, 5): - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) pol = herm.hermfromroots(roots) res = herm.hermval(roots, pol) tgt = 0 @@ -542,11 +540,11 @@ def test_hermline(self): def test_herm2poly(self): for i in range(10): - assert_almost_equal(herm.herm2poly([0]*i + [1]), Hlist[i]) + assert_almost_equal(herm.herm2poly([0] * i + [1]), Hlist[i]) def test_poly2herm(self): for i in range(10): - assert_almost_equal(herm.poly2herm(Hlist[i]), [0]*i + [1]) + assert_almost_equal(herm.poly2herm(Hlist[i]), [0] * i + [1]) def test_weight(self): x = np.linspace(-5, 5, 11) diff --git a/numpy/polynomial/tests/test_hermite_e.py b/numpy/polynomial/tests/test_hermite_e.py index 2d262a330622..233dfb28254a 100644 --- a/numpy/polynomial/tests/test_hermite_e.py +++ b/numpy/polynomial/tests/test_hermite_e.py @@ -6,9 +6,7 @@ import numpy as np import numpy.polynomial.hermite_e as herme from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises He0 = np.array([1]) He1 = np.array([0, 1]) @@ -53,7 +51,7 @@ def test_hermeadd(self): tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 - res = herme.hermeadd([0]*i + [1], [0]*j + [1]) + res = herme.hermeadd([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_hermesub(self): @@ -63,37 +61,37 @@ def test_hermesub(self): tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 - res = herme.hermesub([0]*i + [1], [0]*j + [1]) + res = herme.hermesub([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_hermemulx(self): assert_equal(herme.hermemulx([0]), [0]) assert_equal(herme.hermemulx([1]), [0, 1]) for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [i, 0, 1] + ser = [0] * i + [1] + tgt = [0] * (i - 1) + [i, 0, 1] assert_equal(herme.hermemulx(ser), tgt) def test_hermemul(self): # check values of result for i in range(5): - pol1 = [0]*i + [1] + pol1 = [0] * i + [1] val1 = herme.hermeval(self.x, pol1) for j in range(5): msg = f"At i={i}, j={j}" - pol2 = [0]*j + [1] + pol2 = [0] * j + [1] val2 = herme.hermeval(self.x, pol2) pol3 = herme.hermemul(pol1, pol2) val3 = herme.hermeval(self.x, pol3) assert_(len(pol3) == i + j + 1, msg) - assert_almost_equal(val3, val1*val2, err_msg=msg) + assert_almost_equal(val3, val1 * val2, err_msg=msg) def test_hermediv(self): for i in range(5): for j in range(5): msg = f"At i={i}, j={j}" - ci = [0]*i + [1] - cj = [0]*j + [1] + ci = [0] * i + [1] + cj = [0] * j + [1] tgt = herme.hermeadd(ci, cj) quo, rem = herme.hermediv(tgt, ci) res = herme.hermeadd(herme.hermemul(quo, ci), rem) @@ -104,7 +102,7 @@ def test_hermepow(self): for j in range(5): msg = f"At i={i}, j={j}" c = np.arange(i + 1) - tgt = reduce(herme.hermemul, [c]*j, np.array([1])) + tgt = reduce(herme.hermemul, [c] * j, np.array([1])) res = herme.hermepow(c, j) assert_equal(trim(res), trim(tgt), err_msg=msg) @@ -116,25 +114,25 @@ class TestEvaluation: c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 + x = np.random.random((3, 5)) * 2 - 1 y = polyval(x, [1., 2., 3.]) def test_hermeval(self): - #check empty input + # check empty input assert_equal(herme.hermeval([], [1]).size, 0) - #check normal input) + # check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Helist] for i in range(10): msg = f"At i={i}" tgt = y[i] - res = herme.hermeval(x, [0]*i + [1]) + res = herme.hermeval(x, [0] * i + [1]) assert_almost_equal(res, tgt, err_msg=msg) - #check that shape is preserved + # check that shape is preserved for i in range(3): - dims = [2]*i + dims = [2] * i x = np.zeros(dims) assert_equal(herme.hermeval(x, [1]).shape, dims) assert_equal(herme.hermeval(x, [1, 0]).shape, dims) @@ -144,15 +142,15 @@ def test_hermeval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d) - #test values - tgt = y1*y2 + # test values + tgt = y1 * y2 res = herme.hermeval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herme.hermeval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) @@ -161,15 +159,15 @@ def test_hermeval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d) - #test values - tgt = y1*y2*y3 + # test values + tgt = y1 * y2 * y3 res = herme.hermeval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herme.hermeval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) @@ -178,29 +176,29 @@ def test_hermegrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j->ij', y1, y2) res = herme.hermegrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herme.hermegrid2d(z, z, self.c2d) - assert_(res.shape == (2, 3)*2) + assert_(res.shape == (2, 3) * 2) def test_hermegrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = herme.hermegrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herme.hermegrid3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)*3) + assert_(res.shape == (2, 3) * 3) class TestIntegral: @@ -216,15 +214,15 @@ def test_hermeint(self): # test integration of zero polynomial for i in range(2, 5): - k = [0]*(i - 2) + [1] + k = [0] * (i - 2) + [1] res = herme.hermeint([0], m=i, k=k) assert_almost_equal(res, [0, 1]) # check single integration with integration constant for i in range(5): scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] hermepol = herme.poly2herme(pol) hermeint = herme.hermeint(hermepol, m=1, k=[i]) res = herme.herme2poly(hermeint) @@ -233,7 +231,7 @@ def test_hermeint(self): # check single integration with integration constant and lbnd for i in range(5): scl = i + 1 - pol = [0]*i + [1] + pol = [0] * i + [1] hermepol = herme.poly2herme(pol) hermeint = herme.hermeint(hermepol, m=1, k=[i], lbnd=-1) assert_almost_equal(herme.hermeval(-1, hermeint), i) @@ -241,8 +239,8 @@ def test_hermeint(self): # check single integration with integration constant and scaling for i in range(5): scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] hermepol = herme.poly2herme(pol) hermeint = herme.hermeint(hermepol, m=1, k=[i], scl=2) res = herme.herme2poly(hermeint) @@ -251,7 +249,7 @@ def test_hermeint(self): # check multiple integrations with default k for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = herme.hermeint(tgt, m=1) @@ -261,7 +259,7 @@ def test_hermeint(self): # check multiple integrations with defined k for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = herme.hermeint(tgt, m=1, k=[k]) @@ -271,7 +269,7 @@ def test_hermeint(self): # check multiple integrations with lbnd for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = herme.hermeint(tgt, m=1, k=[k], lbnd=-1) @@ -281,7 +279,7 @@ def test_hermeint(self): # check multiple integrations with scaling for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = herme.hermeint(tgt, m=1, k=[k], scl=2) @@ -314,21 +312,21 @@ def test_hermeder(self): # check that zeroth derivative does nothing for i in range(5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = herme.hermeder(tgt, m=0) assert_equal(trim(res), trim(tgt)) # check that derivation is the inverse of integration for i in range(5): for j in range(2, 5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = herme.hermeder(herme.hermeint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5): for j in range(2, 5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = herme.hermeder( herme.hermeint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) @@ -348,7 +346,7 @@ def test_hermeder_axis(self): class TestVander: # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 + x = np.random.random((3, 5)) * 2 - 1 def test_hermevander(self): # check for 1d x @@ -356,7 +354,7 @@ def test_hermevander(self): v = herme.hermevander(x, 3) assert_(v.shape == (3, 4)) for i in range(4): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_almost_equal(v[..., i], herme.hermeval(x, coef)) # check for 2d x @@ -364,7 +362,7 @@ def test_hermevander(self): v = herme.hermevander(x, 3) assert_(v.shape == (3, 2, 4)) for i in range(4): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_almost_equal(v[..., i], herme.hermeval(x, coef)) def test_hermevander2d(self): @@ -398,7 +396,7 @@ class TestFitting: def test_hermefit(self): def f(x): - return x*(x - 1)*(x - 2) + return x * (x - 1) * (x - 2) def f2(x): return x**4 + x**2 + 1 @@ -479,7 +477,7 @@ def test_raises(self): def test_dimensions(self): for i in range(1, 5): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_(herme.hermecompanion(coef).shape == (i, i)) def test_linear_root(self): @@ -496,12 +494,12 @@ def test_100(self): # functions like Laguerre can be very confusing. v = herme.hermevander(x, 99) vv = np.dot(v.T * w, v) - vd = 1/np.sqrt(vv.diagonal()) + vd = 1 / np.sqrt(vv.diagonal()) vv = vd[:, None] * vv * vd assert_almost_equal(vv, np.eye(100)) # check that the integral of 1 is correct - tgt = np.sqrt(2*np.pi) + tgt = np.sqrt(2 * np.pi) assert_almost_equal(w.sum(), tgt) @@ -511,7 +509,7 @@ def test_hermefromroots(self): res = herme.hermefromroots([]) assert_almost_equal(trim(res), [1]) for i in range(1, 5): - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) pol = herme.hermefromroots(roots) res = herme.hermeval(roots, pol) tgt = 0 @@ -543,14 +541,14 @@ def test_hermeline(self): def test_herme2poly(self): for i in range(10): - assert_almost_equal(herme.herme2poly([0]*i + [1]), Helist[i]) + assert_almost_equal(herme.herme2poly([0] * i + [1]), Helist[i]) def test_poly2herme(self): for i in range(10): - assert_almost_equal(herme.poly2herme(Helist[i]), [0]*i + [1]) + assert_almost_equal(herme.poly2herme(Helist[i]), [0] * i + [1]) def test_weight(self): x = np.linspace(-5, 5, 11) - tgt = np.exp(-.5*x**2) + tgt = np.exp(-.5 * x**2) res = herme.hermeweight(x) assert_almost_equal(res, tgt) diff --git a/numpy/polynomial/tests/test_laguerre.py b/numpy/polynomial/tests/test_laguerre.py index 227ef3c5576d..884f15a9fe8f 100644 --- a/numpy/polynomial/tests/test_laguerre.py +++ b/numpy/polynomial/tests/test_laguerre.py @@ -6,17 +6,15 @@ import numpy as np import numpy.polynomial.laguerre as lag from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) - -L0 = np.array([1])/1 -L1 = np.array([1, -1])/1 -L2 = np.array([2, -4, 1])/2 -L3 = np.array([6, -18, 9, -1])/6 -L4 = np.array([24, -96, 72, -16, 1])/24 -L5 = np.array([120, -600, 600, -200, 25, -1])/120 -L6 = np.array([720, -4320, 5400, -2400, 450, -36, 1])/720 +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises + +L0 = np.array([1]) / 1 +L1 = np.array([1, -1]) / 1 +L2 = np.array([2, -4, 1]) / 2 +L3 = np.array([6, -18, 9, -1]) / 6 +L4 = np.array([24, -96, 72, -16, 1]) / 24 +L5 = np.array([120, -600, 600, -200, 25, -1]) / 120 +L6 = np.array([720, -4320, 5400, -2400, 450, -36, 1]) / 720 Llist = [L0, L1, L2, L3, L4, L5, L6] @@ -50,7 +48,7 @@ def test_lagadd(self): tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 - res = lag.lagadd([0]*i + [1], [0]*j + [1]) + res = lag.lagadd([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_lagsub(self): @@ -60,37 +58,37 @@ def test_lagsub(self): tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 - res = lag.lagsub([0]*i + [1], [0]*j + [1]) + res = lag.lagsub([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_lagmulx(self): assert_equal(lag.lagmulx([0]), [0]) assert_equal(lag.lagmulx([1]), [1, -1]) for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [-i, 2*i + 1, -(i + 1)] + ser = [0] * i + [1] + tgt = [0] * (i - 1) + [-i, 2 * i + 1, -(i + 1)] assert_almost_equal(lag.lagmulx(ser), tgt) def test_lagmul(self): # check values of result for i in range(5): - pol1 = [0]*i + [1] + pol1 = [0] * i + [1] val1 = lag.lagval(self.x, pol1) for j in range(5): msg = f"At i={i}, j={j}" - pol2 = [0]*j + [1] + pol2 = [0] * j + [1] val2 = lag.lagval(self.x, pol2) pol3 = lag.lagmul(pol1, pol2) val3 = lag.lagval(self.x, pol3) assert_(len(pol3) == i + j + 1, msg) - assert_almost_equal(val3, val1*val2, err_msg=msg) + assert_almost_equal(val3, val1 * val2, err_msg=msg) def test_lagdiv(self): for i in range(5): for j in range(5): msg = f"At i={i}, j={j}" - ci = [0]*i + [1] - cj = [0]*j + [1] + ci = [0] * i + [1] + cj = [0] * j + [1] tgt = lag.lagadd(ci, cj) quo, rem = lag.lagdiv(tgt, ci) res = lag.lagadd(lag.lagmul(quo, ci), rem) @@ -101,8 +99,8 @@ def test_lagpow(self): for j in range(5): msg = f"At i={i}, j={j}" c = np.arange(i + 1) - tgt = reduce(lag.lagmul, [c]*j, np.array([1])) - res = lag.lagpow(c, j) + tgt = reduce(lag.lagmul, [c] * j, np.array([1])) + res = lag.lagpow(c, j) assert_equal(trim(res), trim(tgt), err_msg=msg) @@ -113,25 +111,25 @@ class TestEvaluation: c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 + x = np.random.random((3, 5)) * 2 - 1 y = polyval(x, [1., 2., 3.]) def test_lagval(self): - #check empty input + # check empty input assert_equal(lag.lagval([], [1]).size, 0) - #check normal input) + # check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Llist] for i in range(7): msg = f"At i={i}" tgt = y[i] - res = lag.lagval(x, [0]*i + [1]) + res = lag.lagval(x, [0] * i + [1]) assert_almost_equal(res, tgt, err_msg=msg) - #check that shape is preserved + # check that shape is preserved for i in range(3): - dims = [2]*i + dims = [2] * i x = np.zeros(dims) assert_equal(lag.lagval(x, [1]).shape, dims) assert_equal(lag.lagval(x, [1, 0]).shape, dims) @@ -141,15 +139,15 @@ def test_lagval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, lag.lagval2d, x1, x2[:2], self.c2d) - #test values - tgt = y1*y2 + # test values + tgt = y1 * y2 res = lag.lagval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = lag.lagval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) @@ -158,15 +156,15 @@ def test_lagval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, lag.lagval3d, x1, x2, x3[:2], self.c3d) - #test values - tgt = y1*y2*y3 + # test values + tgt = y1 * y2 * y3 res = lag.lagval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = lag.lagval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) @@ -175,29 +173,29 @@ def test_laggrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j->ij', y1, y2) res = lag.laggrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = lag.laggrid2d(z, z, self.c2d) - assert_(res.shape == (2, 3)*2) + assert_(res.shape == (2, 3) * 2) def test_laggrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = lag.laggrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = lag.laggrid3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)*3) + assert_(res.shape == (2, 3) * 3) class TestIntegral: @@ -213,15 +211,15 @@ def test_lagint(self): # test integration of zero polynomial for i in range(2, 5): - k = [0]*(i - 2) + [1] + k = [0] * (i - 2) + [1] res = lag.lagint([0], m=i, k=k) assert_almost_equal(res, [1, -1]) # check single integration with integration constant for i in range(5): scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] lagpol = lag.poly2lag(pol) lagint = lag.lagint(lagpol, m=1, k=[i]) res = lag.lag2poly(lagint) @@ -230,7 +228,7 @@ def test_lagint(self): # check single integration with integration constant and lbnd for i in range(5): scl = i + 1 - pol = [0]*i + [1] + pol = [0] * i + [1] lagpol = lag.poly2lag(pol) lagint = lag.lagint(lagpol, m=1, k=[i], lbnd=-1) assert_almost_equal(lag.lagval(-1, lagint), i) @@ -238,8 +236,8 @@ def test_lagint(self): # check single integration with integration constant and scaling for i in range(5): scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] lagpol = lag.poly2lag(pol) lagint = lag.lagint(lagpol, m=1, k=[i], scl=2) res = lag.lag2poly(lagint) @@ -248,7 +246,7 @@ def test_lagint(self): # check multiple integrations with default k for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = lag.lagint(tgt, m=1) @@ -258,7 +256,7 @@ def test_lagint(self): # check multiple integrations with defined k for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = lag.lagint(tgt, m=1, k=[k]) @@ -268,7 +266,7 @@ def test_lagint(self): # check multiple integrations with lbnd for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = lag.lagint(tgt, m=1, k=[k], lbnd=-1) @@ -278,7 +276,7 @@ def test_lagint(self): # check multiple integrations with scaling for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = lag.lagint(tgt, m=1, k=[k], scl=2) @@ -311,21 +309,21 @@ def test_lagder(self): # check that zeroth derivative does nothing for i in range(5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = lag.lagder(tgt, m=0) assert_equal(trim(res), trim(tgt)) # check that derivation is the inverse of integration for i in range(5): for j in range(2, 5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = lag.lagder(lag.lagint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5): for j in range(2, 5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = lag.lagder(lag.lagint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) @@ -344,7 +342,7 @@ def test_lagder_axis(self): class TestVander: # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 + x = np.random.random((3, 5)) * 2 - 1 def test_lagvander(self): # check for 1d x @@ -352,7 +350,7 @@ def test_lagvander(self): v = lag.lagvander(x, 3) assert_(v.shape == (3, 4)) for i in range(4): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_almost_equal(v[..., i], lag.lagval(x, coef)) # check for 2d x @@ -360,7 +358,7 @@ def test_lagvander(self): v = lag.lagvander(x, 3) assert_(v.shape == (3, 2, 4)) for i in range(4): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_almost_equal(v[..., i], lag.lagval(x, coef)) def test_lagvander2d(self): @@ -394,7 +392,7 @@ class TestFitting: def test_lagfit(self): def f(x): - return x*(x - 1)*(x - 2) + return x * (x - 1) * (x - 2) # Test exceptions assert_raises(ValueError, lag.lagfit, [1], [1], -1) @@ -460,7 +458,7 @@ def test_raises(self): def test_dimensions(self): for i in range(1, 5): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_(lag.lagcompanion(coef).shape == (i, i)) def test_linear_root(self): @@ -477,7 +475,7 @@ def test_100(self): # functions like Laguerre can be very confusing. v = lag.lagvander(x, 99) vv = np.dot(v.T * w, v) - vd = 1/np.sqrt(vv.diagonal()) + vd = 1 / np.sqrt(vv.diagonal()) vv = vd[:, None] * vv * vd assert_almost_equal(vv, np.eye(100)) @@ -492,7 +490,7 @@ def test_lagfromroots(self): res = lag.lagfromroots([]) assert_almost_equal(trim(res), [1]) for i in range(1, 5): - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) pol = lag.lagfromroots(roots) res = lag.lagval(roots, pol) tgt = 0 @@ -524,11 +522,11 @@ def test_lagline(self): def test_lag2poly(self): for i in range(7): - assert_almost_equal(lag.lag2poly([0]*i + [1]), Llist[i]) + assert_almost_equal(lag.lag2poly([0] * i + [1]), Llist[i]) def test_poly2lag(self): for i in range(7): - assert_almost_equal(lag.poly2lag(Llist[i]), [0]*i + [1]) + assert_almost_equal(lag.poly2lag(Llist[i]), [0] * i + [1]) def test_weight(self): x = np.linspace(0, 10, 11) diff --git a/numpy/polynomial/tests/test_legendre.py b/numpy/polynomial/tests/test_legendre.py index 92399c160ecb..6c87f44ee707 100644 --- a/numpy/polynomial/tests/test_legendre.py +++ b/numpy/polynomial/tests/test_legendre.py @@ -6,20 +6,18 @@ import numpy as np import numpy.polynomial.legendre as leg from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises L0 = np.array([1]) L1 = np.array([0, 1]) -L2 = np.array([-1, 0, 3])/2 -L3 = np.array([0, -3, 0, 5])/2 -L4 = np.array([3, 0, -30, 0, 35])/8 -L5 = np.array([0, 15, 0, -70, 0, 63])/8 -L6 = np.array([-5, 0, 105, 0, -315, 0, 231])/16 -L7 = np.array([0, -35, 0, 315, 0, -693, 0, 429])/16 -L8 = np.array([35, 0, -1260, 0, 6930, 0, -12012, 0, 6435])/128 -L9 = np.array([0, 315, 0, -4620, 0, 18018, 0, -25740, 0, 12155])/128 +L2 = np.array([-1, 0, 3]) / 2 +L3 = np.array([0, -3, 0, 5]) / 2 +L4 = np.array([3, 0, -30, 0, 35]) / 8 +L5 = np.array([0, 15, 0, -70, 0, 63]) / 8 +L6 = np.array([-5, 0, 105, 0, -315, 0, 231]) / 16 +L7 = np.array([0, -35, 0, 315, 0, -693, 0, 429]) / 16 +L8 = np.array([35, 0, -1260, 0, 6930, 0, -12012, 0, 6435]) / 128 +L9 = np.array([0, 315, 0, -4620, 0, 18018, 0, -25740, 0, 12155]) / 128 Llist = [L0, L1, L2, L3, L4, L5, L6, L7, L8, L9] @@ -53,7 +51,7 @@ def test_legadd(self): tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 - res = leg.legadd([0]*i + [1], [0]*j + [1]) + res = leg.legadd([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_legsub(self): @@ -63,38 +61,38 @@ def test_legsub(self): tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 - res = leg.legsub([0]*i + [1], [0]*j + [1]) + res = leg.legsub([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_legmulx(self): assert_equal(leg.legmulx([0]), [0]) assert_equal(leg.legmulx([1]), [0, 1]) for i in range(1, 5): - tmp = 2*i + 1 - ser = [0]*i + [1] - tgt = [0]*(i - 1) + [i/tmp, 0, (i + 1)/tmp] + tmp = 2 * i + 1 + ser = [0] * i + [1] + tgt = [0] * (i - 1) + [i / tmp, 0, (i + 1) / tmp] assert_equal(leg.legmulx(ser), tgt) def test_legmul(self): # check values of result for i in range(5): - pol1 = [0]*i + [1] + pol1 = [0] * i + [1] val1 = leg.legval(self.x, pol1) for j in range(5): msg = f"At i={i}, j={j}" - pol2 = [0]*j + [1] + pol2 = [0] * j + [1] val2 = leg.legval(self.x, pol2) pol3 = leg.legmul(pol1, pol2) val3 = leg.legval(self.x, pol3) assert_(len(pol3) == i + j + 1, msg) - assert_almost_equal(val3, val1*val2, err_msg=msg) + assert_almost_equal(val3, val1 * val2, err_msg=msg) def test_legdiv(self): for i in range(5): for j in range(5): msg = f"At i={i}, j={j}" - ci = [0]*i + [1] - cj = [0]*j + [1] + ci = [0] * i + [1] + cj = [0] * j + [1] tgt = leg.legadd(ci, cj) quo, rem = leg.legdiv(tgt, ci) res = leg.legadd(leg.legmul(quo, ci), rem) @@ -105,8 +103,8 @@ def test_legpow(self): for j in range(5): msg = f"At i={i}, j={j}" c = np.arange(i + 1) - tgt = reduce(leg.legmul, [c]*j, np.array([1])) - res = leg.legpow(c, j) + tgt = reduce(leg.legmul, [c] * j, np.array([1])) + res = leg.legpow(c, j) assert_equal(trim(res), trim(tgt), err_msg=msg) @@ -117,25 +115,25 @@ class TestEvaluation: c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 + x = np.random.random((3, 5)) * 2 - 1 y = polyval(x, [1., 2., 3.]) def test_legval(self): - #check empty input + # check empty input assert_equal(leg.legval([], [1]).size, 0) - #check normal input) + # check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Llist] for i in range(10): msg = f"At i={i}" tgt = y[i] - res = leg.legval(x, [0]*i + [1]) + res = leg.legval(x, [0] * i + [1]) assert_almost_equal(res, tgt, err_msg=msg) - #check that shape is preserved + # check that shape is preserved for i in range(3): - dims = [2]*i + dims = [2] * i x = np.zeros(dims) assert_equal(leg.legval(x, [1]).shape, dims) assert_equal(leg.legval(x, [1, 0]).shape, dims) @@ -145,15 +143,15 @@ def test_legval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, leg.legval2d, x1, x2[:2], self.c2d) - #test values - tgt = y1*y2 + # test values + tgt = y1 * y2 res = leg.legval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = leg.legval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) @@ -162,15 +160,15 @@ def test_legval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, leg.legval3d, x1, x2, x3[:2], self.c3d) - #test values - tgt = y1*y2*y3 + # test values + tgt = y1 * y2 * y3 res = leg.legval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = leg.legval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) @@ -179,29 +177,29 @@ def test_leggrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j->ij', y1, y2) res = leg.leggrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = leg.leggrid2d(z, z, self.c2d) - assert_(res.shape == (2, 3)*2) + assert_(res.shape == (2, 3) * 2) def test_leggrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = leg.leggrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = leg.leggrid3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)*3) + assert_(res.shape == (2, 3) * 3) class TestIntegral: @@ -217,15 +215,15 @@ def test_legint(self): # test integration of zero polynomial for i in range(2, 5): - k = [0]*(i - 2) + [1] + k = [0] * (i - 2) + [1] res = leg.legint([0], m=i, k=k) assert_almost_equal(res, [0, 1]) # check single integration with integration constant for i in range(5): scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] legpol = leg.poly2leg(pol) legint = leg.legint(legpol, m=1, k=[i]) res = leg.leg2poly(legint) @@ -234,7 +232,7 @@ def test_legint(self): # check single integration with integration constant and lbnd for i in range(5): scl = i + 1 - pol = [0]*i + [1] + pol = [0] * i + [1] legpol = leg.poly2leg(pol) legint = leg.legint(legpol, m=1, k=[i], lbnd=-1) assert_almost_equal(leg.legval(-1, legint), i) @@ -242,8 +240,8 @@ def test_legint(self): # check single integration with integration constant and scaling for i in range(5): scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] legpol = leg.poly2leg(pol) legint = leg.legint(legpol, m=1, k=[i], scl=2) res = leg.leg2poly(legint) @@ -252,7 +250,7 @@ def test_legint(self): # check multiple integrations with default k for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = leg.legint(tgt, m=1) @@ -262,7 +260,7 @@ def test_legint(self): # check multiple integrations with defined k for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = leg.legint(tgt, m=1, k=[k]) @@ -272,7 +270,7 @@ def test_legint(self): # check multiple integrations with lbnd for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = leg.legint(tgt, m=1, k=[k], lbnd=-1) @@ -282,7 +280,7 @@ def test_legint(self): # check multiple integrations with scaling for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = leg.legint(tgt, m=1, k=[k], scl=2) @@ -318,21 +316,21 @@ def test_legder(self): # check that zeroth derivative does nothing for i in range(5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = leg.legder(tgt, m=0) assert_equal(trim(res), trim(tgt)) # check that derivation is the inverse of integration for i in range(5): for j in range(2, 5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = leg.legder(leg.legint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5): for j in range(2, 5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = leg.legder(leg.legint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) @@ -354,7 +352,7 @@ def test_legder_orderhigherthancoeff(self): class TestVander: # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 + x = np.random.random((3, 5)) * 2 - 1 def test_legvander(self): # check for 1d x @@ -362,7 +360,7 @@ def test_legvander(self): v = leg.legvander(x, 3) assert_(v.shape == (3, 4)) for i in range(4): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_almost_equal(v[..., i], leg.legval(x, coef)) # check for 2d x @@ -370,7 +368,7 @@ def test_legvander(self): v = leg.legvander(x, 3) assert_(v.shape == (3, 2, 4)) for i in range(4): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_almost_equal(v[..., i], leg.legval(x, coef)) def test_legvander2d(self): @@ -407,7 +405,7 @@ class TestFitting: def test_legfit(self): def f(x): - return x*(x - 1)*(x - 2) + return x * (x - 1) * (x - 2) def f2(x): return x**4 + x**2 + 1 @@ -488,7 +486,7 @@ def test_raises(self): def test_dimensions(self): for i in range(1, 5): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_(leg.legcompanion(coef).shape == (i, i)) def test_linear_root(self): @@ -505,7 +503,7 @@ def test_100(self): # functions like Laguerre can be very confusing. v = leg.legvander(x, 99) vv = np.dot(v.T * w, v) - vd = 1/np.sqrt(vv.diagonal()) + vd = 1 / np.sqrt(vv.diagonal()) vv = vd[:, None] * vv * vd assert_almost_equal(vv, np.eye(100)) @@ -520,7 +518,7 @@ def test_legfromroots(self): res = leg.legfromroots([]) assert_almost_equal(trim(res), [1]) for i in range(1, 5): - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) pol = leg.legfromroots(roots) res = leg.legval(roots, pol) tgt = 0 @@ -555,11 +553,11 @@ def test_legline_zeroscl(self): def test_leg2poly(self): for i in range(10): - assert_almost_equal(leg.leg2poly([0]*i + [1]), Llist[i]) + assert_almost_equal(leg.leg2poly([0] * i + [1]), Llist[i]) def test_poly2leg(self): for i in range(10): - assert_almost_equal(leg.poly2leg(Llist[i]), [0]*i + [1]) + assert_almost_equal(leg.poly2leg(Llist[i]), [0] * i + [1]) def test_weight(self): x = np.linspace(-1, 1, 11) diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index b761668a3b82..4c924a758b06 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -1,20 +1,29 @@ """Tests for polynomial module. """ -from functools import reduce +import pickle +from copy import deepcopy from fractions import Fraction +from functools import reduce + +import pytest + import numpy as np import numpy.polynomial.polynomial as poly -import pickle -from copy import deepcopy from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - assert_array_equal, assert_raises_regex) + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) def trim(x): return poly.polytrim(x, tol=1e-6) + T0 = [1] T1 = [0, 1] T2 = [-1, 0, 2] @@ -62,7 +71,7 @@ def test_polyadd(self): tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 - res = poly.polyadd([0]*i + [1], [0]*j + [1]) + res = poly.polyadd([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_polysub(self): @@ -72,15 +81,15 @@ def test_polysub(self): tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 - res = poly.polysub([0]*i + [1], [0]*j + [1]) + res = poly.polysub([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_polymulx(self): assert_equal(poly.polymulx([0]), [0]) assert_equal(poly.polymulx([1]), [0, 1]) for i in range(1, 5): - ser = [0]*i + [1] - tgt = [0]*(i + 1) + [1] + ser = [0] * i + [1] + tgt = [0] * (i + 1) + [1] assert_equal(poly.polymulx(ser), tgt) def test_polymul(self): @@ -89,7 +98,7 @@ def test_polymul(self): msg = f"At i={i}, j={j}" tgt = np.zeros(i + j + 1) tgt[i + j] += 1 - res = poly.polymul([0]*i + [1], [0]*j + [1]) + res = poly.polymul([0] * i + [1], [0] * j + [1]) assert_equal(trim(res), trim(tgt), err_msg=msg) def test_polydiv(self): @@ -106,8 +115,8 @@ def test_polydiv(self): for i in range(5): for j in range(5): msg = f"At i={i}, j={j}" - ci = [0]*i + [1, 2] - cj = [0]*j + [1, 2] + ci = [0] * i + [1, 2] + cj = [0] * j + [1, 2] tgt = poly.polyadd(ci, cj) quo, rem = poly.polydiv(tgt, ci) res = poly.polyadd(poly.polymul(quo, ci), rem) @@ -118,8 +127,8 @@ def test_polypow(self): for j in range(5): msg = f"At i={i}, j={j}" c = np.arange(i + 1) - tgt = reduce(poly.polymul, [c]*j, np.array([1])) - res = poly.polypow(c, j) + tgt = reduce(poly.polymul, [c] * j, np.array([1])) + res = poly.polypow(c, j) assert_equal(trim(res), trim(tgt), err_msg=msg) class TestFraction: @@ -130,7 +139,7 @@ def test_Fraction(self): one = Fraction(1, 1) zero = Fraction(0, 1) p = poly.Polynomial([f, f], domain=[zero, one], window=[zero, one]) - + x = 2 * p + p ** 2 assert_equal(x.coef, np.array([Fraction(16, 9), Fraction(20, 9), Fraction(4, 9)], dtype=object)) @@ -149,39 +158,39 @@ class TestEvaluation: c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 + x = np.random.random((3, 5)) * 2 - 1 y = poly.polyval(x, [1., 2., 3.]) def test_polyval(self): - #check empty input + # check empty input assert_equal(poly.polyval([], [1]).size, 0) - #check normal input) + # check normal input) x = np.linspace(-1, 1) y = [x**i for i in range(5)] for i in range(5): tgt = y[i] - res = poly.polyval(x, [0]*i + [1]) + res = poly.polyval(x, [0] * i + [1]) assert_almost_equal(res, tgt) - tgt = x*(x**2 - 1) + tgt = x * (x**2 - 1) res = poly.polyval(x, [0, -1, 0, 1]) assert_almost_equal(res, tgt) - #check that shape is preserved + # check that shape is preserved for i in range(3): - dims = [2]*i + dims = [2] * i x = np.zeros(dims) assert_equal(poly.polyval(x, [1]).shape, dims) assert_equal(poly.polyval(x, [1, 0]).shape, dims) assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims) - #check masked arrays are processed correctly + # check masked arrays are processed correctly mask = [False, True, False] mx = np.ma.array([1, 2, 3], mask=mask) res = np.polyval([7, 5, 3], mx) assert_array_equal(res.mask, mask) - #check subtypes of ndarray are preserved + # check subtypes of ndarray are preserved class C(np.ndarray): pass @@ -211,15 +220,15 @@ def test_polyvalfromroots(self): y = [x**i for i in range(5)] for i in range(1, 5): tgt = y[i] - res = poly.polyvalfromroots(x, [0]*i) + res = poly.polyvalfromroots(x, [0] * i) assert_almost_equal(res, tgt) - tgt = x*(x - 1)*(x + 1) + tgt = x * (x - 1) * (x + 1) res = poly.polyvalfromroots(x, [-1, 0, 1]) assert_almost_equal(res, tgt) # check that shape is preserved for i in range(3): - dims = [2]*i + dims = [2] * i x = np.zeros(dims) assert_equal(poly.polyvalfromroots(x, [1]).shape, dims) assert_equal(poly.polyvalfromroots(x, [1, 0]).shape, dims) @@ -244,7 +253,7 @@ def test_polyvalfromroots(self): assert_equal(res, tgt) # check tensor=True - x = np.vstack([x, 2*x]) + x = np.vstack([x, 2 * x]) res = poly.polyvalfromroots(x, r, tensor=True) tgt = np.empty(r.shape[1:] + x.shape) for ii in range(r.shape[1]): @@ -256,16 +265,16 @@ def test_polyval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises_regex(ValueError, 'incompatible', poly.polyval2d, x1, x2[:2], self.c2d) - #test values - tgt = y1*y2 + # test values + tgt = y1 * y2 res = poly.polyval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = poly.polyval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) @@ -274,16 +283,16 @@ def test_polyval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises_regex(ValueError, 'incompatible', poly.polyval3d, x1, x2, x3[:2], self.c3d) - #test values - tgt = y1*y2*y3 + # test values + tgt = y1 * y2 * y3 res = poly.polyval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = poly.polyval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) @@ -292,29 +301,29 @@ def test_polygrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j->ij', y1, y2) res = poly.polygrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = poly.polygrid2d(z, z, self.c2d) - assert_(res.shape == (2, 3)*2) + assert_(res.shape == (2, 3) * 2) def test_polygrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = poly.polygrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = poly.polygrid3d(z, z, z, self.c3d) - assert_(res.shape == (2, 3)*3) + assert_(res.shape == (2, 3) * 3) class TestIntegral: @@ -331,37 +340,37 @@ def test_polyint(self): # test integration of zero polynomial for i in range(2, 5): - k = [0]*(i - 2) + [1] + k = [0] * (i - 2) + [1] res = poly.polyint([0], m=i, k=k) assert_almost_equal(res, [0, 1]) # check single integration with integration constant for i in range(5): scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [1/scl] + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] res = poly.polyint(pol, m=1, k=[i]) assert_almost_equal(trim(res), trim(tgt)) # check single integration with integration constant and lbnd for i in range(5): scl = i + 1 - pol = [0]*i + [1] + pol = [0] * i + [1] res = poly.polyint(pol, m=1, k=[i], lbnd=-1) assert_almost_equal(poly.polyval(-1, res), i) # check single integration with integration constant and scaling for i in range(5): scl = i + 1 - pol = [0]*i + [1] - tgt = [i] + [0]*i + [2/scl] + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] res = poly.polyint(pol, m=1, k=[i], scl=2) assert_almost_equal(trim(res), trim(tgt)) # check multiple integrations with default k for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = poly.polyint(tgt, m=1) @@ -371,7 +380,7 @@ def test_polyint(self): # check multiple integrations with defined k for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = poly.polyint(tgt, m=1, k=[k]) @@ -381,7 +390,7 @@ def test_polyint(self): # check multiple integrations with lbnd for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = poly.polyint(tgt, m=1, k=[k], lbnd=-1) @@ -391,7 +400,7 @@ def test_polyint(self): # check multiple integrations with scaling for i in range(5): for j in range(2, 5): - pol = [0]*i + [1] + pol = [0] * i + [1] tgt = pol[:] for k in range(j): tgt = poly.polyint(tgt, m=1, k=[k], scl=2) @@ -424,21 +433,21 @@ def test_polyder(self): # check that zeroth derivative does nothing for i in range(5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = poly.polyder(tgt, m=0) assert_equal(trim(res), trim(tgt)) # check that derivation is the inverse of integration for i in range(5): for j in range(2, 5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = poly.polyder(poly.polyint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5): for j in range(2, 5): - tgt = [0]*i + [1] + tgt = [0] * i + [1] res = poly.polyder(poly.polyint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) @@ -457,7 +466,7 @@ def test_polyder_axis(self): class TestVander: # some random values in [-1, 1) - x = np.random.random((3, 5))*2 - 1 + x = np.random.random((3, 5)) * 2 - 1 def test_polyvander(self): # check for 1d x @@ -465,7 +474,7 @@ def test_polyvander(self): v = poly.polyvander(x, 3) assert_(v.shape == (3, 4)) for i in range(4): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_almost_equal(v[..., i], poly.polyval(x, coef)) # check for 2d x @@ -473,7 +482,7 @@ def test_polyvander(self): v = poly.polyvander(x, 3) assert_(v.shape == (3, 2, 4)) for i in range(4): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_almost_equal(v[..., i], poly.polyval(x, coef)) def test_polyvander2d(self): @@ -515,7 +524,7 @@ def test_raises(self): def test_dimensions(self): for i in range(1, 5): - coef = [0]*i + [1] + coef = [0] * i + [1] assert_(poly.polycompanion(coef).shape == (i, i)) def test_linear_root(self): @@ -528,9 +537,9 @@ def test_polyfromroots(self): res = poly.polyfromroots([]) assert_almost_equal(trim(res), [1]) for i in range(1, 5): - roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) tgt = Tlist[i] - res = poly.polyfromroots(roots)*2**(i-1) + res = poly.polyfromroots(roots) * 2**(i - 1) assert_almost_equal(trim(res), trim(tgt)) def test_polyroots(self): @@ -541,9 +550,23 @@ def test_polyroots(self): res = poly.polyroots(poly.polyfromroots(tgt)) assert_almost_equal(trim(res), trim(tgt)) + # Testing for larger root values + for i in np.logspace(10, 25, num=1000, base=10): + tgt = np.array([-1, 1, i]) + res = poly.polyroots(poly.polyfromroots(tgt)) + # Adapting the expected precision according to the root value, + # to take into account numerical calculation error. + assert_almost_equal(res, tgt, 15 - int(np.log10(i))) + for i in np.logspace(10, 25, num=1000, base=10): + tgt = np.array([-1, 1.01, i]) + res = poly.polyroots(poly.polyfromroots(tgt)) + # Adapting the expected precision according to the root value, + # to take into account numerical calculation error. + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) + def test_polyfit(self): def f(x): - return x*(x - 1)*(x - 2) + return x * (x - 1) * (x - 2) def f2(x): return x**4 + x**2 + 1 @@ -627,3 +650,42 @@ def test_polyline(self): def test_polyline_zero(self): assert_equal(poly.polyline(3, 0), [3]) + + def test_fit_degenerate_domain(self): + p = poly.Polynomial.fit([1], [2], deg=0) + assert_equal(p.coef, [2.]) + p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=0) + assert_almost_equal(p.coef, [2.05]) + with pytest.warns(np.exceptions.RankWarning): + p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=1) + + def test_result_type(self): + w = np.array([-1, 1], dtype=np.float32) + p = np.polynomial.Polynomial(w, domain=w, window=w) + v = p(2) + assert_equal(v.dtype, np.float32) + + arr = np.polydiv(1, np.float32(1)) + assert_equal(arr[0].dtype, np.float64) + +class ArrayFunctionInterceptor: + def __init__(self): + self.called = False + + def __array_function__(self, func, types, args, kwargs): + self.called = True + return "intercepted" + +def test_polyval2d_array_function_hook(): + x = ArrayFunctionInterceptor() + y = ArrayFunctionInterceptor() + c = ArrayFunctionInterceptor() + result = np.polynomial.polynomial.polyval2d(x, y, c) + assert result == "intercepted" + +def test_polygrid2d_array_function_hook(): + x = ArrayFunctionInterceptor() + y = ArrayFunctionInterceptor() + c = ArrayFunctionInterceptor() + result = np.polynomial.polynomial.polygrid2d(x, y, c) + assert result == "intercepted" diff --git a/numpy/polynomial/tests/test_polyutils.py b/numpy/polynomial/tests/test_polyutils.py index e5143ed5c3e4..a6f5e3990b6b 100644 --- a/numpy/polynomial/tests/test_polyutils.py +++ b/numpy/polynomial/tests/test_polyutils.py @@ -3,9 +3,7 @@ """ import numpy as np import numpy.polynomial.polyutils as pu -from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises class TestMisc: diff --git a/numpy/polynomial/tests/test_printing.py b/numpy/polynomial/tests/test_printing.py index 95dec549350c..fe56f9e16bef 100644 --- a/numpy/polynomial/tests/test_printing.py +++ b/numpy/polynomial/tests/test_printing.py @@ -1,12 +1,14 @@ -from math import nan, inf -import pytest -from numpy._core import array, arange, printoptions -import numpy.polynomial as poly -from numpy.testing import assert_equal, assert_ +from decimal import Decimal # For testing polynomial printing with object arrays from fractions import Fraction -from decimal import Decimal +from math import inf, nan + +import pytest + +import numpy.polynomial as poly +from numpy._core import arange, array, printoptions +from numpy.testing import assert_, assert_equal class TestStrUnicodeSuperSubscripts: @@ -244,6 +246,7 @@ def test_linewidth_printoption(self, lw, tgt): assert_(len(line) < lw) +@pytest.mark.thread_unsafe(reason="set_default_printstyle() is global state") def test_set_default_printoptions(): p = poly.Polynomial([1, 2, 3]) c = poly.Chebyshev([1, 2, 3]) @@ -257,9 +260,10 @@ def test_set_default_printoptions(): poly.set_default_printstyle('invalid_input') +@pytest.mark.thread_unsafe(reason="set_default_printstyle() is global state") def test_complex_coefficients(): """Test both numpy and built-in complex.""" - coefs = [0+1j, 1+1j, -2+2j, 3+0j] + coefs = [0 + 1j, 1 + 1j, -2 + 2j, 3 + 0j] # numpy complex p1 = poly.Polynomial(coefs) # Python complex @@ -413,7 +417,7 @@ def test_simple_polynomial(self): # translated input p = poly.Polynomial([1, 2, 3], domain=[-2, 0]) assert_equal(self.as_latex(p), - r'$x \mapsto 1.0 + 2.0\,\left(1.0 + x\right) + 3.0\,\left(1.0 + x\right)^{2}$') + r'$x \mapsto 1.0 + 2.0\,\left(1.0 + x\right) + 3.0\,\left(1.0 + x\right)^{2}$') # noqa: E501 # scaled input p = poly.Polynomial([1, 2, 3], domain=[-0.5, 0.5]) @@ -423,7 +427,7 @@ def test_simple_polynomial(self): # affine input p = poly.Polynomial([1, 2, 3], domain=[-1, 0]) assert_equal(self.as_latex(p), - r'$x \mapsto 1.0 + 2.0\,\left(1.0 + 2.0x\right) + 3.0\,\left(1.0 + 2.0x\right)^{2}$') + r'$x \mapsto 1.0 + 2.0\,\left(1.0 + 2.0x\right) + 3.0\,\left(1.0 + 2.0x\right)^{2}$') # noqa: E501 def test_basis_func(self): p = poly.Chebyshev([1, 2, 3]) @@ -432,7 +436,7 @@ def test_basis_func(self): # affine input - check no surplus parens are added p = poly.Chebyshev([1, 2, 3], domain=[-1, 0]) assert_equal(self.as_latex(p), - r'$x \mapsto 1.0\,{T}_{0}(1.0 + 2.0x) + 2.0\,{T}_{1}(1.0 + 2.0x) + 3.0\,{T}_{2}(1.0 + 2.0x)$') + r'$x \mapsto 1.0\,{T}_{0}(1.0 + 2.0x) + 2.0\,{T}_{1}(1.0 + 2.0x) + 3.0\,{T}_{2}(1.0 + 2.0x)$') # noqa: E501 def test_multichar_basis_func(self): p = poly.HermiteE([1, 2, 3]) @@ -480,23 +484,25 @@ def test_numeric_object_coefficients(self): p = poly.Polynomial(coefs) assert_equal(self.as_latex(p), '$x \\mapsto 1/2 + 1\\,x$') + SWITCH_TO_EXP = ( '1.0 + (1.0e-01) x + (1.0e-02) x**2', '1.2 + (1.2e-01) x + (1.2e-02) x**2', '1.23 + 0.12 x + (1.23e-02) x**2 + (1.23e-03) x**3', '1.235 + 0.123 x + (1.235e-02) x**2 + (1.235e-03) x**3', '1.2346 + 0.1235 x + 0.0123 x**2 + (1.2346e-03) x**3 + (1.2346e-04) x**4', - '1.23457 + 0.12346 x + 0.01235 x**2 + (1.23457e-03) x**3 + ' - '(1.23457e-04) x**4', - '1.234568 + 0.123457 x + 0.012346 x**2 + 0.001235 x**3 + ' - '(1.234568e-04) x**4 + (1.234568e-05) x**5', - '1.2345679 + 0.1234568 x + 0.0123457 x**2 + 0.0012346 x**3 + ' - '(1.2345679e-04) x**4 + (1.2345679e-05) x**5') + ('1.23457 + 0.12346 x + 0.01235 x**2 + (1.23457e-03) x**3 + ' + '(1.23457e-04) x**4'), + ('1.234568 + 0.123457 x + 0.012346 x**2 + 0.001235 x**3 + ' + '(1.234568e-04) x**4 + (1.234568e-05) x**5'), + ('1.2345679 + 0.1234568 x + 0.0123457 x**2 + 0.0012346 x**3 + ' + '(1.2345679e-04) x**4 + (1.2345679e-05) x**5') +) class TestPrintOptions: """ Test the output is properly configured via printoptions. - The exponential notation is enabled automatically when the values + The exponential notation is enabled automatically when the values are too small or too large. """ @@ -505,7 +511,7 @@ def use_ascii(self): poly.set_default_printstyle('ascii') def test_str(self): - p = poly.Polynomial([1/2, 1/7, 1/7*10**8, 1/7*10**9]) + p = poly.Polynomial([1 / 2, 1 / 7, 1 / 7 * 10**8, 1 / 7 * 10**9]) assert_equal(str(p), '0.5 + 0.14285714 x + 14285714.28571429 x**2 ' '+ (1.42857143e+08) x**3') @@ -514,34 +520,34 @@ def test_str(self): '+ (1.429e+08) x**3') def test_latex(self): - p = poly.Polynomial([1/2, 1/7, 1/7*10**8, 1/7*10**9]) + p = poly.Polynomial([1 / 2, 1 / 7, 1 / 7 * 10**8, 1 / 7 * 10**9]) assert_equal(p._repr_latex_(), r'$x \mapsto \text{0.5} + \text{0.14285714}\,x + ' r'\text{14285714.28571429}\,x^{2} + ' r'\text{(1.42857143e+08)}\,x^{3}$') - + with printoptions(precision=3): assert_equal(p._repr_latex_(), r'$x \mapsto \text{0.5} + \text{0.143}\,x + ' r'\text{14285714.286}\,x^{2} + \text{(1.429e+08)}\,x^{3}$') def test_fixed(self): - p = poly.Polynomial([1/2]) + p = poly.Polynomial([1 / 2]) assert_equal(str(p), '0.5') - + with printoptions(floatmode='fixed'): assert_equal(str(p), '0.50000000') - + with printoptions(floatmode='fixed', precision=4): assert_equal(str(p), '0.5000') def test_switch_to_exp(self): for i, s in enumerate(SWITCH_TO_EXP): with printoptions(precision=i): - p = poly.Polynomial([1.23456789*10**-i - for i in range(i//2+3)]) - assert str(p).replace('\n', ' ') == s - + p = poly.Polynomial([1.23456789 * 10**-i + for i in range(i // 2 + 3)]) + assert str(p).replace('\n', ' ') == s + def test_non_finite(self): p = poly.Polynomial([nan, inf]) assert str(p) == 'nan + inf x' diff --git a/numpy/polynomial/tests/test_symbol.py b/numpy/polynomial/tests/test_symbol.py index f985533f9fe8..3de9e38ced08 100644 --- a/numpy/polynomial/tests/test_symbol.py +++ b/numpy/polynomial/tests/test_symbol.py @@ -3,9 +3,10 @@ """ import pytest + import numpy.polynomial as poly from numpy._core import array -from numpy.testing import assert_equal, assert_raises, assert_ +from numpy.testing import assert_, assert_equal, assert_raises class TestInit: @@ -195,7 +196,7 @@ def test_composition(): def test_fit(): - x, y = (range(10),)*2 + x, y = (range(10),) * 2 p = poly.Polynomial.fit(x, y, deg=1, symbol='z') assert_equal(p.symbol, 'z') diff --git a/numpy/random/__init__.py b/numpy/random/__init__.py index 2e8f99fe3045..3e21d598a88e 100644 --- a/numpy/random/__init__.py +++ b/numpy/random/__init__.py @@ -177,16 +177,13 @@ ] # add these for module-freeze analysis (like PyInstaller) -from . import _pickle -from . import _common -from . import _bounded_integers - +from . import _bounded_integers, _common, _pickle from ._generator import Generator, default_rng -from .bit_generator import SeedSequence, BitGenerator from ._mt19937 import MT19937 from ._pcg64 import PCG64, PCG64DXSM from ._philox import Philox from ._sfc64 import SFC64 +from .bit_generator import BitGenerator, SeedSequence from .mtrand import * __all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937', @@ -211,5 +208,6 @@ def __RandomState_ctor(): from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/numpy/random/__init__.pyi b/numpy/random/__init__.pyi index 26cba3c90502..f949c5aef113 100644 --- a/numpy/random/__init__.pyi +++ b/numpy/random/__init__.pyi @@ -1,71 +1,124 @@ -from numpy._pytesttester import PytestTester - -from numpy.random._generator import Generator as Generator -from numpy.random._generator import default_rng as default_rng -from numpy.random._mt19937 import MT19937 as MT19937 -from numpy.random._pcg64 import ( - PCG64 as PCG64, - PCG64DXSM as PCG64DXSM, -) -from numpy.random._philox import Philox as Philox -from numpy.random._sfc64 import SFC64 as SFC64 -from numpy.random.bit_generator import BitGenerator as BitGenerator -from numpy.random.bit_generator import SeedSequence as SeedSequence -from numpy.random.mtrand import ( - RandomState as RandomState, - beta as beta, - binomial as binomial, - bytes as bytes, - chisquare as chisquare, - choice as choice, - dirichlet as dirichlet, - exponential as exponential, - f as f, - gamma as gamma, - geometric as geometric, - get_bit_generator as get_bit_generator, - get_state as get_state, - gumbel as gumbel, - hypergeometric as hypergeometric, - laplace as laplace, - logistic as logistic, - lognormal as lognormal, - logseries as logseries, - multinomial as multinomial, - multivariate_normal as multivariate_normal, - negative_binomial as negative_binomial, - noncentral_chisquare as noncentral_chisquare, - noncentral_f as noncentral_f, - normal as normal, - pareto as pareto, - permutation as permutation, - poisson as poisson, - power as power, - rand as rand, - randint as randint, - randn as randn, - random as random, - random_integers as random_integers, - random_sample as random_sample, - ranf as ranf, - rayleigh as rayleigh, - sample as sample, - seed as seed, - set_bit_generator as set_bit_generator, - set_state as set_state, - shuffle as shuffle, - standard_cauchy as standard_cauchy, - standard_exponential as standard_exponential, - standard_gamma as standard_gamma, - standard_normal as standard_normal, - standard_t as standard_t, - triangular as triangular, - uniform as uniform, - vonmises as vonmises, - wald as wald, - weibull as weibull, - zipf as zipf, +from ._generator import Generator, default_rng +from ._mt19937 import MT19937 +from ._pcg64 import PCG64, PCG64DXSM +from ._philox import Philox +from ._sfc64 import SFC64 +from .bit_generator import BitGenerator, SeedSequence +from .mtrand import ( + RandomState, + beta, + binomial, + bytes, + chisquare, + choice, + dirichlet, + exponential, + f, + gamma, + geometric, + get_bit_generator, + get_state, + gumbel, + hypergeometric, + laplace, + logistic, + lognormal, + logseries, + multinomial, + multivariate_normal, + negative_binomial, + noncentral_chisquare, + noncentral_f, + normal, + pareto, + permutation, + poisson, + power, + rand, + randint, + randn, + random, + random_integers, + random_sample, + ranf, + rayleigh, + sample, + seed, + set_bit_generator, + set_state, + shuffle, + standard_cauchy, + standard_exponential, + standard_gamma, + standard_normal, + standard_t, + triangular, + uniform, + vonmises, + wald, + weibull, + zipf, ) -__all__: list[str] -test: PytestTester +__all__ = [ + "beta", + "binomial", + "bytes", + "chisquare", + "choice", + "dirichlet", + "exponential", + "f", + "gamma", + "geometric", + "get_state", + "gumbel", + "hypergeometric", + "laplace", + "logistic", + "lognormal", + "logseries", + "multinomial", + "multivariate_normal", + "negative_binomial", + "noncentral_chisquare", + "noncentral_f", + "normal", + "pareto", + "permutation", + "poisson", + "power", + "rand", + "randint", + "randn", + "random", + "random_integers", + "random_sample", + "ranf", + "rayleigh", + "sample", + "seed", + "set_state", + "shuffle", + "standard_cauchy", + "standard_exponential", + "standard_gamma", + "standard_normal", + "standard_t", + "triangular", + "uniform", + "vonmises", + "wald", + "weibull", + "zipf", + "Generator", + "RandomState", + "SeedSequence", + "MT19937", + "Philox", + "PCG64", + "PCG64DXSM", + "SFC64", + "default_rng", + "BitGenerator", +] diff --git a/numpy/random/_bounded_integers.pyi b/numpy/random/_bounded_integers.pyi new file mode 100644 index 000000000000..c9c2ef67bd9d --- /dev/null +++ b/numpy/random/_bounded_integers.pyi @@ -0,0 +1 @@ +__all__: list[str] = [] diff --git a/numpy/random/_bounded_integers.pyx.in b/numpy/random/_bounded_integers.pyx.in index a8a2729535be..bbcdcada0110 100644 --- a/numpy/random/_bounded_integers.pyx.in +++ b/numpy/random/_bounded_integers.pyx.in @@ -120,8 +120,8 @@ cdef object _rand_{{nptype}}_broadcast(np.ndarray low, np.ndarray high, object s if np.any(low_high_comp(low_arr, high_arr)): raise ValueError(format_bounds_error(closed, low_arr)) - low_arr = np.PyArray_FROM_OTF(low, np.{{npctype}}, np.NPY_ALIGNED | np.NPY_FORCECAST) - high_arr = np.PyArray_FROM_OTF(high, np.{{npctype}}, np.NPY_ALIGNED | np.NPY_FORCECAST) + low_arr = np.PyArray_FROM_OTF(low, np.{{npctype}}, np.NPY_ARRAY_ALIGNED | np.NPY_ARRAY_FORCECAST) + high_arr = np.PyArray_FROM_OTF(high, np.{{npctype}}, np.NPY_ARRAY_ALIGNED | np.NPY_ARRAY_FORCECAST) if size is not None: out_arr = np.empty(size, np.{{otype}}) @@ -192,7 +192,7 @@ cdef object _rand_{{nptype}}_broadcast(object low, object high, object size, # We correct if the interval is not closed in this step if we go the long # route. (Not otherwise, since the -1 could overflow in theory.) if np.can_cast(low_arr_orig, np.{{otype}}): - low_arr = np.PyArray_FROM_OTF(low_arr_orig, np.{{npctype}}, np.NPY_ALIGNED) + low_arr = np.PyArray_FROM_OTF(low_arr_orig, np.{{npctype}}, np.NPY_ARRAY_ALIGNED) else: low_arr = np.empty_like(low_arr_orig, dtype=np.{{otype}}) flat = low_arr_orig.flat @@ -207,7 +207,7 @@ cdef object _rand_{{nptype}}_broadcast(object low, object high, object size, del low_arr_orig if np.can_cast(high_arr_orig, np.{{otype}}): - high_arr = np.PyArray_FROM_OTF(high_arr_orig, np.{{npctype}}, np.NPY_ALIGNED) + high_arr = np.PyArray_FROM_OTF(high_arr_orig, np.{{npctype}}, np.NPY_ARRAY_ALIGNED) else: high_arr = np.empty_like(high_arr_orig, dtype=np.{{otype}}) flat = high_arr_orig.flat diff --git a/numpy/random/_common.pxd b/numpy/random/_common.pxd index 0de4456d778f..7b6ae56bfe12 100644 --- a/numpy/random/_common.pxd +++ b/numpy/random/_common.pxd @@ -26,12 +26,15 @@ cdef enum ConstraintType: LEGACY_CONS_NON_NEGATIVE_INBOUNDS_LONG ctypedef ConstraintType constraint_type +ctypedef fused double_or_int64: + double + int64_t cdef object benchmark(bitgen_t *bitgen, object lock, Py_ssize_t cnt, object method) cdef object random_raw(bitgen_t *bitgen, object lock, object size, object output) cdef object prepare_cffi(bitgen_t *bitgen) cdef object prepare_ctypes(bitgen_t *bitgen) -cdef int check_constraint(double val, object name, constraint_type cons) except -1 +cdef int check_constraint(double_or_int64 val, object name, constraint_type cons) except -1 cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1 cdef extern from "include/aligned_malloc.h": diff --git a/numpy/random/_common.pyi b/numpy/random/_common.pyi new file mode 100644 index 000000000000..417387612014 --- /dev/null +++ b/numpy/random/_common.pyi @@ -0,0 +1,17 @@ +from _typeshed import Incomplete +from collections.abc import Callable +from typing import NamedTuple + +import numpy as np + +__all__ = ["interface"] + +type _CDataVoidPointer = Incomplete # currently not expressible + +class interface(NamedTuple): + state_address: int + state: _CDataVoidPointer + next_uint64: Callable[..., np.uint64] + next_uint32: Callable[..., np.uint32] + next_double: Callable[..., np.float64] + bit_generator: _CDataVoidPointer diff --git a/numpy/random/_common.pyx b/numpy/random/_common.pyx index affa26421095..22e0b028e703 100644 --- a/numpy/random/_common.pyx +++ b/numpy/random/_common.pyx @@ -224,8 +224,7 @@ cdef np.ndarray int_to_array(object value, object name, object bits, object uint value = int(value) upper = int(2)**int(bits) if value < 0 or value >= upper: - raise ValueError('{name} must be positive and ' - 'less than 2**{bits}.'.format(name=name, bits=bits)) + raise ValueError(f'{name} must be positive and less than 2**{bits}.') out = np.empty(len, dtype=dtype) for i in range(len): @@ -234,15 +233,13 @@ cdef np.ndarray int_to_array(object value, object name, object bits, object uint else: out = value.astype(dtype) if out.shape != (len,): - raise ValueError('{name} must have {len} elements when using ' - 'array form'.format(name=name, len=len)) + raise ValueError(f'{name} must have {len} elements when using array form') return out cdef validate_output_shape(iter_shape, np.ndarray output): cdef np.npy_intp *dims cdef np.npy_intp ndim, i - cdef bint error dims = np.PyArray_DIMS(output) ndim = np.PyArray_NDIM(output) output_shape = tuple((dims[i] for i in range(ndim))) @@ -283,7 +280,7 @@ cdef check_output(object out, object dtype, object size, bint require_c_array): ) if out_array.dtype != dtype: raise TypeError('Supplied output array has the wrong type. ' - 'Expected {0}, got {1}'.format(np.dtype(dtype), out_array.dtype)) + f'Expected {np.dtype(dtype)}, got {out_array.dtype}') if size is not None: try: tup_size = tuple(size) @@ -298,7 +295,7 @@ cdef object double_fill(void *func, bitgen_t *state, object size, object lock, o cdef double out_val cdef double *out_array_data cdef np.ndarray out_array - cdef np.npy_intp i, n + cdef np.npy_intp n if size is None and out is None: with lock: @@ -322,7 +319,7 @@ cdef object float_fill(void *func, bitgen_t *state, object size, object lock, ob cdef float out_val cdef float *out_array_data cdef np.ndarray out_array - cdef np.npy_intp i, n + cdef np.npy_intp n if size is None and out is None: with lock: @@ -386,90 +383,90 @@ cdef int _check_array_cons_bounded_0_1(np.ndarray val, object name) except -1: cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1: if cons == CONS_NON_NEGATIVE: if np.any(np.logical_and(np.logical_not(np.isnan(val)), np.signbit(val))): - raise ValueError(name + " < 0") + raise ValueError(f"{name} < 0") elif cons == CONS_POSITIVE or cons == CONS_POSITIVE_NOT_NAN: if cons == CONS_POSITIVE_NOT_NAN and np.any(np.isnan(val)): - raise ValueError(name + " must not be NaN") + raise ValueError(f"{name} must not be NaN") elif np.any(np.less_equal(val, 0)): - raise ValueError(name + " <= 0") + raise ValueError(f"{name} <= 0") elif cons == CONS_BOUNDED_0_1: return _check_array_cons_bounded_0_1(val, name) elif cons == CONS_BOUNDED_GT_0_1: if not np.all(np.greater(val, 0)) or not np.all(np.less_equal(val, 1)): - raise ValueError("{0} <= 0, {0} > 1 or {0} contains NaNs".format(name)) + raise ValueError(f"{name} <= 0, {name} > 1 or {name} contains NaNs") elif cons == CONS_BOUNDED_LT_0_1: if not np.all(np.greater_equal(val, 0)) or not np.all(np.less(val, 1)): - raise ValueError("{0} < 0, {0} >= 1 or {0} contains NaNs".format(name)) + raise ValueError(f"{name} < 0, {name} >= 1 or {name} contains NaNs") elif cons == CONS_GT_1: if not np.all(np.greater(val, 1)): - raise ValueError("{0} <= 1 or {0} contains NaNs".format(name)) + raise ValueError(f"{name} <= 1 or {name} contains NaNs") elif cons == CONS_GTE_1: if not np.all(np.greater_equal(val, 1)): - raise ValueError("{0} < 1 or {0} contains NaNs".format(name)) + raise ValueError(f"{name} < 1 or {name} contains NaNs") elif cons == CONS_POISSON: if not np.all(np.less_equal(val, POISSON_LAM_MAX)): - raise ValueError("{0} value too large".format(name)) + raise ValueError(f"{name} value too large") elif not np.all(np.greater_equal(val, 0.0)): - raise ValueError("{0} < 0 or {0} contains NaNs".format(name)) + raise ValueError(f"{name} < 0 or {name} contains NaNs") elif cons == LEGACY_CONS_POISSON: if not np.all(np.less_equal(val, LEGACY_POISSON_LAM_MAX)): - raise ValueError("{0} value too large".format(name)) + raise ValueError(f"{name} value too large") elif not np.all(np.greater_equal(val, 0.0)): - raise ValueError("{0} < 0 or {0} contains NaNs".format(name)) + raise ValueError(f"{name} < 0 or {name} contains NaNs") elif cons == LEGACY_CONS_NON_NEGATIVE_INBOUNDS_LONG: # Note, we assume that array is integral: if not np.all(val >= 0): - raise ValueError(name + " < 0") + raise ValueError(f"{name} < 0") elif not np.all(val <= int(LONG_MAX)): raise ValueError( - name + " is out of bounds for long, consider using " + f"{name} is out of bounds for long, consider using " "the new generator API for 64bit integers.") return 0 -cdef int check_constraint(double val, object name, constraint_type cons) except -1: - cdef bint is_nan +cdef int check_constraint(double_or_int64 val, object name, constraint_type cons) except -1: if cons == CONS_NON_NEGATIVE: - if not isnan(val) and signbit(val): - raise ValueError(name + " < 0") + if ((double_or_int64 is double and not isnan(val) and signbit(val)) or + (double_or_int64 is int64_t and val < 0)): + raise ValueError(f"{name} < 0") elif cons == CONS_POSITIVE or cons == CONS_POSITIVE_NOT_NAN: - if cons == CONS_POSITIVE_NOT_NAN and isnan(val): - raise ValueError(name + " must not be NaN") + if cons == CONS_POSITIVE_NOT_NAN and double_or_int64 is double and isnan(val): + raise ValueError(f"{name} must not be NaN") elif val <= 0: - raise ValueError(name + " <= 0") + raise ValueError(f"{name} <= 0") elif cons == CONS_BOUNDED_0_1: if not (val >= 0) or not (val <= 1): - raise ValueError("{0} < 0, {0} > 1 or {0} is NaN".format(name)) + raise ValueError(f"{name} < 0, {name} > 1 or {name} is NaN") elif cons == CONS_BOUNDED_GT_0_1: if not val >0 or not val <= 1: - raise ValueError("{0} <= 0, {0} > 1 or {0} contains NaNs".format(name)) + raise ValueError(f"{name} <= 0, {name} > 1 or {name} contains NaNs") elif cons == CONS_BOUNDED_LT_0_1: if not (val >= 0) or not (val < 1): - raise ValueError("{0} < 0, {0} >= 1 or {0} is NaN".format(name)) + raise ValueError(f"{name} < 0, {name} >= 1 or {name} is NaN") elif cons == CONS_GT_1: if not (val > 1): - raise ValueError("{0} <= 1 or {0} is NaN".format(name)) + raise ValueError(f"{name} <= 1 or {name} is NaN") elif cons == CONS_GTE_1: if not (val >= 1): - raise ValueError("{0} < 1 or {0} is NaN".format(name)) + raise ValueError(f"{name} < 1 or {name} is NaN") elif cons == CONS_POISSON: if not (val >= 0): - raise ValueError("{0} < 0 or {0} is NaN".format(name)) + raise ValueError(f"{name} < 0 or {name} is NaN") elif not (val <= POISSON_LAM_MAX): - raise ValueError(name + " value too large") + raise ValueError(f"{name} value too large") elif cons == LEGACY_CONS_POISSON: if not (val >= 0): - raise ValueError("{0} < 0 or {0} is NaN".format(name)) + raise ValueError(f"{name} < 0 or {name} is NaN") elif not (val <= LEGACY_POISSON_LAM_MAX): - raise ValueError(name + " value too large") + raise ValueError(f"{name} value too large") elif cons == LEGACY_CONS_NON_NEGATIVE_INBOUNDS_LONG: # Note: Assume value is integral (double of LONG_MAX should work out) if val < 0: - raise ValueError(name + " < 0") - elif val > LONG_MAX: + raise ValueError(f"{name} < 0") + elif val > LONG_MAX: raise ValueError( - name + " is out of bounds for long, consider using " + f"{name} is out of bounds for long, consider using " "the new generator API for 64bit integers.") return 0 @@ -603,13 +600,13 @@ cdef object cont(void *func, void *state, object size, object lock, int narg, cdef bint is_scalar = True check_output(out, np.float64, size, narg > 0) if narg > 0: - a_arr = np.PyArray_FROM_OTF(a, np.NPY_DOUBLE, np.NPY_ALIGNED) + a_arr = np.PyArray_FROM_OTF(a, np.NPY_DOUBLE, np.NPY_ARRAY_ALIGNED) is_scalar = is_scalar and np.PyArray_NDIM(a_arr) == 0 if narg > 1: - b_arr = np.PyArray_FROM_OTF(b, np.NPY_DOUBLE, np.NPY_ALIGNED) + b_arr = np.PyArray_FROM_OTF(b, np.NPY_DOUBLE, np.NPY_ARRAY_ALIGNED) is_scalar = is_scalar and np.PyArray_NDIM(b_arr) == 0 if narg == 3: - c_arr = np.PyArray_FROM_OTF(c, np.NPY_DOUBLE, np.NPY_ALIGNED) + c_arr = np.PyArray_FROM_OTF(c, np.NPY_DOUBLE, np.NPY_ARRAY_ALIGNED) is_scalar = is_scalar and np.PyArray_NDIM(c_arr) == 0 if not is_scalar: @@ -762,7 +759,6 @@ cdef object discrete_broadcast_di(void *func, void *state, object size, object l np.ndarray a_arr, object a_name, constraint_type a_constraint, np.ndarray b_arr, object b_name, constraint_type b_constraint): cdef np.ndarray randoms - cdef int64_t *randoms_data cdef np.broadcast it cdef random_uint_di f = (func) cdef np.npy_intp i, n @@ -779,7 +775,6 @@ cdef object discrete_broadcast_di(void *func, void *state, object size, object l it = np.PyArray_MultiIterNew2(a_arr, b_arr) randoms = np.empty(it.shape, np.int64) - randoms_data = np.PyArray_DATA(randoms) n = np.PyArray_SIZE(randoms) it = np.PyArray_MultiIterNew3(randoms, a_arr, b_arr) @@ -879,23 +874,23 @@ cdef object disc(void *func, void *state, object size, object lock, cdef int64_t _ia = 0, _ib = 0, _ic = 0 cdef bint is_scalar = True if narg_double > 0: - a_arr = np.PyArray_FROM_OTF(a, np.NPY_DOUBLE, np.NPY_ALIGNED) + a_arr = np.PyArray_FROM_OTF(a, np.NPY_DOUBLE, np.NPY_ARRAY_ALIGNED) is_scalar = is_scalar and np.PyArray_NDIM(a_arr) == 0 if narg_double > 1: - b_arr = np.PyArray_FROM_OTF(b, np.NPY_DOUBLE, np.NPY_ALIGNED) + b_arr = np.PyArray_FROM_OTF(b, np.NPY_DOUBLE, np.NPY_ARRAY_ALIGNED) is_scalar = is_scalar and np.PyArray_NDIM(b_arr) == 0 elif narg_int64 == 1: - b_arr = np.PyArray_FROM_OTF(b, np.NPY_INT64, np.NPY_ALIGNED) + b_arr = np.PyArray_FROM_OTF(b, np.NPY_INT64, np.NPY_ARRAY_ALIGNED) is_scalar = is_scalar and np.PyArray_NDIM(b_arr) == 0 else: if narg_int64 > 0: - a_arr = np.PyArray_FROM_OTF(a, np.NPY_INT64, np.NPY_ALIGNED) + a_arr = np.PyArray_FROM_OTF(a, np.NPY_INT64, np.NPY_ARRAY_ALIGNED) is_scalar = is_scalar and np.PyArray_NDIM(a_arr) == 0 if narg_int64 > 1: - b_arr = np.PyArray_FROM_OTF(b, np.NPY_INT64, np.NPY_ALIGNED) + b_arr = np.PyArray_FROM_OTF(b, np.NPY_INT64, np.NPY_ARRAY_ALIGNED) is_scalar = is_scalar and np.PyArray_NDIM(b_arr) == 0 if narg_int64 > 2: - c_arr = np.PyArray_FROM_OTF(c, np.NPY_INT64, np.NPY_ALIGNED) + c_arr = np.PyArray_FROM_OTF(c, np.NPY_INT64, np.NPY_ARRAY_ALIGNED) is_scalar = is_scalar and np.PyArray_NDIM(c_arr) == 0 if not is_scalar: @@ -918,31 +913,33 @@ cdef object disc(void *func, void *state, object size, object lock, else: raise NotImplementedError("No vector path available") + # At this point, we know is_scalar is True. + if narg_double > 0: _da = PyFloat_AsDouble(a) - if a_constraint != CONS_NONE and is_scalar: + if a_constraint != CONS_NONE: check_constraint(_da, a_name, a_constraint) if narg_double > 1: _db = PyFloat_AsDouble(b) - if b_constraint != CONS_NONE and is_scalar: + if b_constraint != CONS_NONE: check_constraint(_db, b_name, b_constraint) elif narg_int64 == 1: _ib = b - if b_constraint != CONS_NONE and is_scalar: + if b_constraint != CONS_NONE: check_constraint(_ib, b_name, b_constraint) else: if narg_int64 > 0: _ia = a - if a_constraint != CONS_NONE and is_scalar: + if a_constraint != CONS_NONE: check_constraint(_ia, a_name, a_constraint) if narg_int64 > 1: _ib = b - if b_constraint != CONS_NONE and is_scalar: + if b_constraint != CONS_NONE: check_constraint(_ib, b_name, b_constraint) if narg_int64 > 2: _ic = c - if c_constraint != CONS_NONE and is_scalar: + if c_constraint != CONS_NONE: check_constraint(_ic, c_name, c_constraint) if size is None: @@ -1047,10 +1044,10 @@ cdef object cont_f(void *func, bitgen_t *state, object size, object lock, object a, object a_name, constraint_type a_constraint, object out): - cdef np.ndarray a_arr, b_arr, c_arr + cdef np.ndarray a_arr cdef float _a cdef bint is_scalar = True - cdef int requirements = np.NPY_ALIGNED | np.NPY_FORCECAST + cdef int requirements = np.NPY_ARRAY_ALIGNED | np.NPY_ARRAY_FORCECAST check_output(out, np.float32, size, True) a_arr = np.PyArray_FROMANY(a, np.NPY_FLOAT32, 0, 0, requirements) is_scalar = np.PyArray_NDIM(a_arr) == 0 diff --git a/numpy/random/_examples/cffi/extending.py b/numpy/random/_examples/cffi/extending.py index 8440d400ea91..ad4c9acbdceb 100644 --- a/numpy/random/_examples/cffi/extending.py +++ b/numpy/random/_examples/cffi/extending.py @@ -2,9 +2,13 @@ Use cffi to access any of the underlying C functions from distributions.h """ import os -import numpy as np + import cffi + +import numpy as np + from .parse import parse_distributions_h + ffi = cffi.FFI() inc_dir = os.path.join(np.get_include(), 'numpy') diff --git a/numpy/random/_examples/cffi/parse.py b/numpy/random/_examples/cffi/parse.py index d41c4c2db23d..0f80adb35250 100644 --- a/numpy/random/_examples/cffi/parse.py +++ b/numpy/random/_examples/cffi/parse.py @@ -30,11 +30,11 @@ def parse_distributions_h(ffi, inc_dir): continue if line.strip().startswith('#ifdef __cplusplus'): ignoring = True - + # massage the include file if line.strip().startswith('#'): continue - + # skip any inlined function definition # which starts with 'static inline xxx(...) {' # and ends with a closing '}' @@ -45,10 +45,9 @@ def parse_distributions_h(ffi, inc_dir): in_skip += line.count('{') in_skip -= line.count('}') continue - + # replace defines with their value or remove them line = line.replace('DECLDIR', '') line = line.replace('RAND_INT_TYPE', 'int64_t') s.append(line) ffi.cdef('\n'.join(s)) - diff --git a/numpy/random/_examples/cython/extending.pyx b/numpy/random/_examples/cython/extending.pyx index 30efd7447748..6a0f45e1be9e 100644 --- a/numpy/random/_examples/cython/extending.pyx +++ b/numpy/random/_examples/cython/extending.pyx @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 #cython: language_level=3 from libc.stdint cimport uint32_t diff --git a/numpy/random/_examples/cython/extending_distributions.pyx b/numpy/random/_examples/cython/extending_distributions.pyx index d908e92d01b0..8de722686304 100644 --- a/numpy/random/_examples/cython/extending_distributions.pyx +++ b/numpy/random/_examples/cython/extending_distributions.pyx @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 #cython: language_level=3 """ This file shows how the to use a BitGenerator to create a distribution. @@ -13,6 +12,8 @@ from numpy.random import PCG64 from numpy.random.c_distributions cimport ( random_standard_uniform_fill, random_standard_uniform_fill_f) +np.import_array() + @cython.boundscheck(False) @cython.wraparound(False) @@ -91,7 +92,6 @@ def uniforms_ex(bit_generator, Py_ssize_t n, dtype=np.float64): Desired dtype, either 'd' (or 'float64') or 'f' (or 'float32'). The default dtype value is 'd' """ - cdef Py_ssize_t i cdef bitgen_t *rng cdef const char *capsule_name = "BitGenerator" cdef np.ndarray randoms diff --git a/numpy/random/_examples/cython/meson.build b/numpy/random/_examples/cython/meson.build index 1ad754c53691..7aa367d13787 100644 --- a/numpy/random/_examples/cython/meson.build +++ b/numpy/random/_examples/cython/meson.build @@ -11,6 +11,11 @@ if not cy.version().version_compare('>=3.0.6') error('tests requires Cython >= 3.0.6') endif +base_cython_args = [] +if cy.version().version_compare('>=3.1.0') + base_cython_args += ['-Xfreethreading_compatible=True'] +endif + _numpy_abs = run_command(py3, ['-c', 'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include() + "../../.."))'], check: true).stdout().strip() @@ -27,6 +32,7 @@ py3.extension_module( install: false, include_directories: [npy_include_path], dependencies: [npyrandom_lib, npymath_lib], + cython_args: base_cython_args, ) py3.extension_module( 'extending', @@ -34,13 +40,14 @@ py3.extension_module( install: false, include_directories: [npy_include_path], dependencies: [npyrandom_lib, npymath_lib], + cython_args: base_cython_args, ) py3.extension_module( 'extending_cpp', 'extending_distributions.pyx', install: false, override_options : ['cython_language=cpp'], - cython_args: ['--module-name', 'extending_cpp'], + cython_args: base_cython_args + ['--module-name', 'extending_cpp'], include_directories: [npy_include_path], dependencies: [npyrandom_lib, npymath_lib], ) diff --git a/numpy/random/_examples/numba/extending.py b/numpy/random/_examples/numba/extending.py index f387db69502a..c1d0f4fbd3e3 100644 --- a/numpy/random/_examples/numba/extending.py +++ b/numpy/random/_examples/numba/extending.py @@ -1,8 +1,9 @@ -import numpy as np +from timeit import timeit + import numba as nb +import numpy as np from numpy.random import PCG64 -from timeit import timeit bit_gen = PCG64() next_d = bit_gen.cffi.next_double @@ -24,6 +25,7 @@ def normals(n, state): out[2 * i + 1] = f * x2 return out + # Compile using Numba normalsj = nb.jit(normals, nopython=True) # Must use state address not state with numba @@ -32,11 +34,13 @@ def normals(n, state): def numbacall(): return normalsj(n, state_addr) + rg = np.random.Generator(PCG64()) def numpycall(): return rg.normal(size=n) + # Check that the functions work r1 = numbacall() r2 = numpycall() @@ -80,5 +84,3 @@ def bounded_uints(lb, ub, n, state): bounded_uints(323, 2394691, 10000000, ctypes_state.value) - - diff --git a/numpy/random/_examples/numba/extending_distributions.py b/numpy/random/_examples/numba/extending_distributions.py index 7ef0753d71d1..d0462e73ee0b 100644 --- a/numpy/random/_examples/numba/extending_distributions.py +++ b/numpy/random/_examples/numba/extending_distributions.py @@ -27,9 +27,9 @@ import os import numba as nb -import numpy as np from cffi import FFI +import numpy as np from numpy.random import PCG64 ffi = FFI() diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index 16a0e5e0ff8d..634aaf68912c 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -1,784 +1,758 @@ -from collections.abc import Callable -from typing import Any, overload, TypeVar, Literal +# Aliases for builtins shadowed by classes to avoid annotations resolving to class members by ty +from builtins import bytes as py_bytes +from collections.abc import Callable, MutableSequence +from typing import Any, Literal, Self, overload import numpy as np -from numpy import ( - dtype, - float32, - float64, - int8, - int16, - int32, - int64, - int_, - uint, - uint8, - uint16, - uint32, - uint64, -) -from numpy.random import BitGenerator, SeedSequence from numpy._typing import ( ArrayLike, + DTypeLike, NDArray, + _ArrayLike, _ArrayLikeFloat_co, _ArrayLikeInt_co, - _DoubleCodes, - _DTypeLikeBool, - _DTypeLikeInt, - _DTypeLikeUInt, + _DTypeLike, _Float32Codes, _Float64Codes, _FloatLike_co, - _Int8Codes, - _Int16Codes, - _Int32Codes, _Int64Codes, - _IntCodes, + _NestedSequence, _ShapeLike, - _SingleCodes, - _SupportsDType, - _UInt8Codes, - _UInt16Codes, - _UInt32Codes, - _UInt64Codes, - _UIntCodes, ) -_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) +from .bit_generator import BitGenerator, SeedSequence +from .mtrand import RandomState -_DTypeLikeFloat32 = ( - dtype[float32] - | _SupportsDType[dtype[float32]] - | type[float32] - | _Float32Codes - | _SingleCodes -) +type _ArrayF32 = NDArray[np.float32] +type _ArrayF64 = NDArray[np.float64] -_DTypeLikeFloat64 = ( - dtype[float64] - | _SupportsDType[dtype[float64]] - | type[float] - | type[float64] - | _Float64Codes - | _DoubleCodes -) +type _DTypeLikeI64 = _DTypeLike[np.int64] | _Int64Codes +type _DTypeLikeF32 = _DTypeLike[np.float32] | _Float32Codes +type _DTypeLikeF64 = type[float] | _DTypeLike[np.float64] | _Float64Codes +# we use `str` to avoid type-checker performance issues because of the many `Literal` variants +type _DTypeLikeFloat = type[float] | _DTypeLike[np.float32 | np.float64] | str + +# Similar to `_ArrayLike{}_co`, but rejects scalars +type _NDArrayLikeInt = NDArray[np.generic[int]] | _NestedSequence[int] +type _NDArrayLikeFloat = NDArray[np.generic[float]] | _NestedSequence[float] + +type _MethodExp = Literal["zig", "inv"] + +### class Generator: def __init__(self, bit_generator: BitGenerator) -> None: ... - def __repr__(self) -> str: ... - def __str__(self) -> str: ... - def __getstate__(self) -> None: ... def __setstate__(self, state: dict[str, Any] | None) -> None: ... - def __reduce__(self) -> tuple[ - Callable[[BitGenerator], Generator], - tuple[BitGenerator], - None]: ... + def __reduce__(self) -> tuple[Callable[[BitGenerator], Generator], tuple[BitGenerator], None]: ... + + # @property def bit_generator(self) -> BitGenerator: ... - def spawn(self, n_children: int) -> list[Generator]: ... - def bytes(self, length: int) -> bytes: ... - @overload - def standard_normal( # type: ignore[misc] - self, - size: None = ..., - dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - out: None = ..., - ) -> float: ... - @overload - def standard_normal( # type: ignore[misc] - self, - size: _ShapeLike = ..., - ) -> NDArray[float64]: ... - @overload - def standard_normal( # type: ignore[misc] - self, - *, - out: NDArray[float64] = ..., - ) -> NDArray[float64]: ... - @overload - def standard_normal( # type: ignore[misc] - self, - size: _ShapeLike = ..., - dtype: _DTypeLikeFloat32 = ..., - out: None | NDArray[float32] = ..., - ) -> NDArray[float32]: ... - @overload - def standard_normal( # type: ignore[misc] - self, - size: _ShapeLike = ..., - dtype: _DTypeLikeFloat64 = ..., - out: None | NDArray[float64] = ..., - ) -> NDArray[float64]: ... - @overload - def permutation(self, x: int, axis: int = ...) -> NDArray[int64]: ... - @overload - def permutation(self, x: ArrayLike, axis: int = ...) -> NDArray[Any]: ... - @overload - def standard_exponential( # type: ignore[misc] - self, - size: None = ..., - dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - method: Literal["zig", "inv"] = ..., - out: None = ..., - ) -> float: ... + def spawn(self, n_children: int) -> list[Self]: ... + def bytes(self, length: int) -> py_bytes: ... + + # continuous distributions + + # @overload - def standard_exponential( - self, - size: _ShapeLike = ..., - ) -> NDArray[float64]: ... - @overload - def standard_exponential( - self, - *, - out: NDArray[float64] = ..., - ) -> NDArray[float64]: ... + def standard_cauchy(self, size: None = None) -> float: ... @overload + def standard_cauchy(self, size: _ShapeLike) -> _ArrayF64: ... + + # + @overload # size=None (default); NOTE: dtype is ignored + def random(self, size: None = None, dtype: _DTypeLikeFloat = ..., out: None = None) -> float: ... + @overload # size=, dtype=f64 (default) + def random(self, size: _ShapeLike, dtype: _DTypeLikeF64 = ..., out: None = None) -> _ArrayF64: ... + @overload # size=, dtype=f32 + def random(self, size: _ShapeLike, dtype: _DTypeLikeF32, out: None = None) -> _ArrayF32: ... + @overload # out: f64 array (keyword) + def random[ArrayT: _ArrayF64](self, size: _ShapeLike | None = None, dtype: _DTypeLikeF64 = ..., *, out: ArrayT) -> ArrayT: ... + @overload # dtype: f32 (keyword), out: f64 array + def random[ArrayT: _ArrayF32](self, size: _ShapeLike | None = None, *, dtype: _DTypeLikeF32, out: ArrayT) -> ArrayT: ... + @overload # out: f64 array (positional) + def random[ArrayT: _ArrayF64](self, size: _ShapeLike | None, dtype: _DTypeLikeF64, out: ArrayT) -> ArrayT: ... + @overload # dtype: f32 (positional), out: f32 array + def random[ArrayT: _ArrayF32](self, size: _ShapeLike | None, dtype: _DTypeLikeF32, out: ArrayT) -> ArrayT: ... + + # + @overload # size=None (default); NOTE: dtype is ignored + def standard_normal(self, size: None = None, dtype: _DTypeLikeFloat = ..., out: None = None) -> float: ... + @overload # size=, dtype: f64 (default) + def standard_normal(self, size: _ShapeLike, dtype: _DTypeLikeF64 = ..., out: None = None) -> _ArrayF64: ... + @overload # size=, dtype: f32 + def standard_normal(self, size: _ShapeLike, dtype: _DTypeLikeF32, *, out: None = None) -> _ArrayF32: ... + @overload # dtype: f64 (default), out: f64 array (keyword) + def standard_normal[ArrayT: _ArrayF64]( + self, size: _ShapeLike | None = None, dtype: _DTypeLikeF64 = ..., *, out: ArrayT + ) -> ArrayT: ... + @overload # dtype: f32 (keyword), out: f32 array + def standard_normal[ArrayT: _ArrayF32]( + self, size: _ShapeLike | None = None, *, dtype: _DTypeLikeF32, out: ArrayT + ) -> ArrayT: ... + @overload # dtype: f32 (positional), out: f32 array + def standard_normal[ArrayT: _ArrayF32](self, size: _ShapeLike | None, dtype: _DTypeLikeF32, out: ArrayT) -> ArrayT: ... + + # + @overload # size=None (default); NOTE: dtype is ignored def standard_exponential( - self, - size: _ShapeLike = ..., - *, - method: Literal["zig", "inv"] = ..., - out: None | NDArray[float64] = ..., - ) -> NDArray[float64]: ... - @overload + self, size: None = None, dtype: _DTypeLikeFloat = ..., method: _MethodExp = "zig", out: None = None + ) -> float: ... + @overload # size=, dtype: f64 (default) def standard_exponential( - self, - size: _ShapeLike = ..., - dtype: _DTypeLikeFloat32 = ..., - method: Literal["zig", "inv"] = ..., - out: None | NDArray[float32] = ..., - ) -> NDArray[float32]: ... - @overload + self, size: _ShapeLike, dtype: _DTypeLikeF64 = ..., method: _MethodExp = "zig", out: None = None + ) -> _ArrayF64: ... + @overload # size=, dtype: f32 (default) def standard_exponential( - self, - size: _ShapeLike = ..., - dtype: _DTypeLikeFloat64 = ..., - method: Literal["zig", "inv"] = ..., - out: None | NDArray[float64] = ..., - ) -> NDArray[float64]: ... - @overload - def random( # type: ignore[misc] - self, - size: None = ..., - dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - out: None = ..., + self, size: _ShapeLike, dtype: _DTypeLikeF32, method: _MethodExp = "zig", out: None = None + ) -> _ArrayF32: ... + @overload # dtype: f64 (default), out: f64 array (keyword) + def standard_exponential[ArrayT: _ArrayF64]( + self, size: _ShapeLike | None = None, dtype: _DTypeLikeF64 = ..., method: _MethodExp = "zig", *, out: ArrayT + ) -> ArrayT: ... + @overload # dtype: f32 (keyword), out: f32 array + def standard_exponential[ArrayT: _ArrayF32]( + self, size: _ShapeLike | None = None, *, dtype: _DTypeLikeF32, method: _MethodExp = "zig", out: ArrayT + ) -> ArrayT: ... + @overload # dtype: f32 (positional), out: f32 array (keyword) + def standard_exponential[ArrayT: _ArrayF32]( + self, size: _ShapeLike | None, dtype: _DTypeLikeF32, method: _MethodExp = "zig", *, out: ArrayT + ) -> ArrayT: ... + + # + @overload # 0d, size=None (default); NOTE: dtype is ignored + def standard_gamma( + self, shape: _FloatLike_co, size: None = None, dtype: _DTypeLikeFloat = ..., out: None = None ) -> float: ... - @overload - def random( + @overload # >0d, dtype: f64 (default) + def standard_gamma( + self, shape: _NDArrayLikeFloat, size: None = None, dtype: _DTypeLikeF64 = ..., out: None = None + ) -> _ArrayF64: ... + @overload # >0d, dtype: f32 (keyword) + def standard_gamma( + self, shape: _NDArrayLikeFloat, size: None = None, *, dtype: _DTypeLikeF32, out: None = None + ) -> _ArrayF32: ... + @overload # >=0d, dtype: f64 (default) + def standard_gamma( + self, shape: _ArrayLikeFloat_co, size: None = None, dtype: _DTypeLikeF64 = ..., out: None = None + ) -> _ArrayF64 | Any: ... + @overload # >=0d, dtype: f32 (keyword) + def standard_gamma( + self, shape: _ArrayLikeFloat_co, size: None = None, *, dtype: _DTypeLikeF32, out: None = None + ) -> _ArrayF32 | Any: ... + @overload # >=0d, size=, dtype: f64 (default) + def standard_gamma( + self, shape: _ArrayLikeFloat_co, size: _ShapeLike, dtype: _DTypeLikeF64 = ..., out: None = None + ) -> _ArrayF64: ... + @overload # >=0d, size=, dtype: f32 + def standard_gamma( + self, shape: _ArrayLikeFloat_co, size: _ShapeLike, dtype: _DTypeLikeF32, *, out: None = None + ) -> _ArrayF32: ... + @overload # >=0d, dtype: f64 (default), out: f64 array (keyword) + def standard_gamma[ArrayT: _ArrayF64]( + self, shape: _ArrayLikeFloat_co, size: _ShapeLike | None = None, dtype: _DTypeLikeF64 = ..., *, out: ArrayT + ) -> ArrayT: ... + @overload # >=0d, dtype: f32 (keyword), out: f32 array + def standard_gamma[ArrayT: _ArrayF32]( + self, shape: _ArrayLikeFloat_co, size: _ShapeLike | None = None, *, dtype: _DTypeLikeF32, out: ArrayT + ) -> ArrayT: ... + @overload # >=0d, dtype: f32 (positional), out: f32 array + def standard_gamma[ArrayT: _ArrayF32]( + self, shape: _ArrayLikeFloat_co, size: _ShapeLike | None, dtype: _DTypeLikeF32, out: ArrayT + ) -> ArrayT: ... + + # + @overload # 0d + def power(self, /, a: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def power(self, /, a: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def power(self, /, a: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def power(self, /, a: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d + def pareto(self, /, a: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def pareto(self, /, a: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def pareto(self, /, a: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def pareto(self, /, a: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d + def weibull(self, /, a: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def weibull(self, /, a: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def weibull(self, /, a: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def weibull(self, /, a: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d + def standard_t(self, /, df: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def standard_t(self, /, df: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def standard_t(self, /, df: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def standard_t(self, /, df: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d + def chisquare(self, /, df: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def chisquare(self, /, df: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def chisquare(self, /, df: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def chisquare(self, /, df: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default) + def exponential(self, /, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (keyword) + def exponential(self, /, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # size= (positional) + def exponential(self, /, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def exponential(self, /, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def exponential(self, /, scale: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default) + def rayleigh(self, /, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (keyword) + def rayleigh(self, /, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # size= (positional) + def rayleigh(self, /, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def rayleigh(self, /, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def rayleigh(self, /, scale: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d + def noncentral_chisquare(self, /, df: _FloatLike_co, nonc: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def noncentral_chisquare(self, /, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def noncentral_chisquare(self, /, df: _ArrayLikeFloat_co, nonc: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def noncentral_chisquare(self, /, df: _NDArrayLikeFloat, nonc: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def noncentral_chisquare(self, /, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d + def f(self, /, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def f(self, /, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def f(self, /, dfnum: _ArrayLikeFloat_co, dfden: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def f(self, /, dfnum: _NDArrayLikeFloat, dfden: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def f(self, /, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d + def vonmises(self, /, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def vonmises(self, /, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def vonmises(self, /, mu: _ArrayLikeFloat_co, kappa: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def vonmises(self, /, mu: _NDArrayLikeFloat, kappa: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def vonmises(self, /, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d + def wald(self, /, mean: _FloatLike_co, scale: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def wald(self, /, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def wald(self, /, mean: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def wald(self, /, mean: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def wald(self, /, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d + def beta(self, /, a: _FloatLike_co, b: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def beta(self, /, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def beta(self, /, a: _ArrayLikeFloat_co, b: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def beta(self, /, a: _NDArrayLikeFloat, b: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def beta(self, /, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d (default) + def gamma(self, /, shape: _FloatLike_co, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def gamma(self, /, shape: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # size= (keyword) + def gamma(self, /, shape: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def gamma(self, /, shape: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def gamma(self, /, shape: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def gamma(self, /, shape: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def uniform(self, /, low: _FloatLike_co = 0.0, high: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # >=0d, >=0d, size= (positional) + def uniform(self, /, low: _ArrayLikeFloat_co, high: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def uniform(self, /, low: _ArrayLikeFloat_co, high: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d, size= (keyword) + def uniform(self, /, low: _ArrayLikeFloat_co = 0.0, high: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def uniform(self, /, low: _ArrayLikeFloat_co = 0.0, *, high: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def uniform(self, /, low: _NDArrayLikeFloat, high: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def uniform(self, /, low: _ArrayLikeFloat_co = 0.0, high: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def normal(self, /, loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def normal(self, /, loc: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def normal(self, /, loc: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None) -> _ArrayF64: ... + @overload # size= (keyword) + def normal(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def normal(self, /, loc: _ArrayLikeFloat_co = 0.0, *, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def normal(self, /, loc: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def normal(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def gumbel(self, /, loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def gumbel(self, /, loc: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def gumbel(self, /, loc: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None) -> _ArrayF64: ... + @overload # size= (keyword) + def gumbel(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def gumbel(self, /, loc: _ArrayLikeFloat_co = 0.0, *, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def gumbel(self, /, loc: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def gumbel(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def logistic(self, /, loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def logistic(self, /, loc: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def logistic(self, /, loc: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None) -> _ArrayF64: ... + @overload # size= (keyword) + def logistic(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def logistic(self, /, loc: _ArrayLikeFloat_co = 0.0, *, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def logistic(self, /, loc: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def logistic( + self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, size: None = None + ) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def laplace(self, /, loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def laplace(self, /, loc: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def laplace(self, /, loc: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None) -> _ArrayF64: ... + @overload # size= (keyword) + def laplace(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def laplace(self, /, loc: _ArrayLikeFloat_co = 0.0, *, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def laplace(self, /, loc: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def laplace( + self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, size: None = None + ) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def lognormal(self, /, mean: _FloatLike_co = 0.0, sigma: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def lognormal(self, /, mean: _ArrayLikeFloat_co, sigma: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def lognormal(self, /, mean: _ArrayLikeFloat_co, sigma: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # size= (keyword) + def lognormal(self, /, mean: _ArrayLikeFloat_co = 0.0, sigma: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def lognormal(self, /, mean: _ArrayLikeFloat_co = 0.0, *, sigma: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def lognormal(self, /, mean: _NDArrayLikeFloat, sigma: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def lognormal( + self, /, mean: _ArrayLikeFloat_co = 0.0, sigma: _ArrayLikeFloat_co = 1.0, size: None = None + ) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d, 0d + def triangular(self, /, left: _FloatLike_co, mode: _FloatLike_co, right: _FloatLike_co, size: None = None) -> float: ... + @overload # >=0d, >=0d, >=0d, size= + def triangular( + self, /, left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, size: _ShapeLike + ) -> _ArrayF64: ... + @overload # >=0d, >=0d, >0d + def triangular( + self, /, left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _NDArrayLikeFloat, size: None = None + ) -> _ArrayF64: ... + @overload # >=0d, >0d, >=0d + def triangular( + self, /, left: _ArrayLikeFloat_co, mode: _NDArrayLikeFloat, right: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64: ... + @overload # >0d, >=0d, >=0d + def triangular( + self, /, left: _NDArrayLikeFloat, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64: ... + @overload # >=0d, >=0d, >=0d (fallback) + def triangular( + self, /, left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d, 0d + def noncentral_f(self, /, dfnum: _FloatLike_co, dfden: _FloatLike_co, nonc: _FloatLike_co, size: None = None) -> float: ... + @overload # >=0d, >=0d, >=0d, size= + def noncentral_f( + self, /, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: _ShapeLike + ) -> _ArrayF64: ... + @overload # >=0d, >=0d, >0d + def noncentral_f( + self, /, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _NDArrayLikeFloat, size: None = None + ) -> _ArrayF64: ... + @overload # >=0d, >0d, >=0d + def noncentral_f( + self, /, dfnum: _ArrayLikeFloat_co, dfden: _NDArrayLikeFloat, nonc: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64: ... + @overload # >0d, >=0d, >=0d + def noncentral_f( + self, /, dfnum: _NDArrayLikeFloat, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64: ... + @overload # >=0d, >=0d, >=0d (fallback) + def noncentral_f( + self, /, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64 | Any: ... + + ### + # discrete + + # + @overload # 0d bool | int + def integers[AnyIntT: (bool, int)]( + self, low: int, high: int | None = None, size: None = None, *, dtype: type[AnyIntT], endpoint: bool = False + ) -> AnyIntT: ... + @overload # 0d integer dtype + def integers[ScalarT: np.integer | np.bool]( + self, low: int, high: int | None = None, size: None = None, *, dtype: _DTypeLike[ScalarT], endpoint: bool = False + ) -> ScalarT: ... + @overload # 0d int64 (default) + def integers( + self, low: int, high: int | None = None, size: None = None, dtype: _DTypeLikeI64 = ..., endpoint: bool = False + ) -> np.int64: ... + @overload # 0d unknown + def integers( + self, low: int, high: int | None = None, size: None = None, dtype: DTypeLike | None = ..., endpoint: bool = False + ) -> Any: ... + @overload # integer dtype, size= + def integers[ScalarT: np.integer | np.bool]( self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, *, - out: NDArray[float64] = ..., - ) -> NDArray[float64]: ... - @overload - def random( + size: _ShapeLike, + dtype: _DTypeLike[ScalarT], + endpoint: bool = False, + ) -> NDArray[ScalarT]: ... + @overload # int64 (default), size= + def integers( self, - size: _ShapeLike = ..., + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, *, - out: None | NDArray[float64] = ..., - ) -> NDArray[float64]: ... - @overload - def random( - self, - size: _ShapeLike = ..., - dtype: _DTypeLikeFloat32 = ..., - out: None | NDArray[float32] = ..., - ) -> NDArray[float32]: ... - @overload - def random( - self, - size: _ShapeLike = ..., - dtype: _DTypeLikeFloat64 = ..., - out: None | NDArray[float64] = ..., - ) -> NDArray[float64]: ... - @overload - def beta( - self, - a: _FloatLike_co, - b: _FloatLike_co, - size: None = ..., - ) -> float: ... # type: ignore[misc] - @overload - def beta( - self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... - @overload - def exponential(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc] - @overload - def exponential( - self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... - @overload - def integers( # type: ignore[misc] - self, - low: int, - high: None | int = ..., - size: None = ..., - ) -> int: ... - @overload - def integers( # type: ignore[misc] - self, - low: int, - high: None | int = ..., - size: None = ..., - dtype: type[bool] = ..., - endpoint: bool = ..., - ) -> bool: ... - @overload - def integers( # type: ignore[misc] - self, - low: int, - high: None | int = ..., - size: None = ..., - dtype: type[np.bool] = ..., - endpoint: bool = ..., - ) -> np.bool: ... - @overload - def integers( # type: ignore[misc] - self, - low: int, - high: None | int = ..., - size: None = ..., - dtype: type[int] = ..., - endpoint: bool = ..., - ) -> int: ... - @overload - def integers( # type: ignore[misc] - self, - low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., - endpoint: bool = ..., - ) -> uint8: ... - @overload - def integers( # type: ignore[misc] - self, - low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., - endpoint: bool = ..., - ) -> uint16: ... - @overload - def integers( # type: ignore[misc] - self, - low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., - endpoint: bool = ..., - ) -> uint32: ... - @overload - def integers( # type: ignore[misc] - self, - low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., - endpoint: bool = ..., - ) -> uint: ... - @overload - def integers( # type: ignore[misc] - self, - low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., - endpoint: bool = ..., - ) -> uint64: ... - @overload - def integers( # type: ignore[misc] - self, - low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., - endpoint: bool = ..., - ) -> int8: ... - @overload - def integers( # type: ignore[misc] - self, - low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., - endpoint: bool = ..., - ) -> int16: ... - @overload - def integers( # type: ignore[misc] - self, - low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., - endpoint: bool = ..., - ) -> int32: ... - @overload - def integers( # type: ignore[misc] - self, - low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., - endpoint: bool = ..., - ) -> int_: ... - @overload - def integers( # type: ignore[misc] - self, - low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., - endpoint: bool = ..., - ) -> int64: ... - @overload - def integers( # type: ignore[misc] + size: _ShapeLike, + dtype: _DTypeLikeI64 = ..., + endpoint: bool = False, + ) -> NDArray[np.int64]: ... + @overload # unknown, size= + def integers( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - ) -> NDArray[int64]: ... - @overload - def integers( # type: ignore[misc] + high: _ArrayLikeInt_co | None = None, + *, + size: _ShapeLike, + dtype: DTypeLike | None = ..., + endpoint: bool = False, + ) -> np.ndarray: ... + @overload # >=0d, integer dtype + def integers[ScalarT: np.integer | np.bool]( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: _DTypeLikeBool = ..., - endpoint: bool = ..., - ) -> NDArray[np.bool]: ... - @overload - def integers( # type: ignore[misc] + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _DTypeLike[ScalarT], + endpoint: bool = False, + ) -> NDArray[ScalarT] | Any: ... + @overload # >=0d, int64 (default) + def integers( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., - endpoint: bool = ..., - ) -> NDArray[int8]: ... - @overload - def integers( # type: ignore[misc] + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: _DTypeLikeI64 = ..., + endpoint: bool = False, + ) -> NDArray[np.int64] | Any: ... + @overload # >=0d, unknown + def integers( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., - endpoint: bool = ..., - ) -> NDArray[int16]: ... - @overload - def integers( # type: ignore[misc] + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: DTypeLike | None = ..., + endpoint: bool = False, + ) -> np.ndarray | Any: ... + + # + @overload # 0d + def zipf(self, /, a: _FloatLike_co, size: None = None) -> int: ... + @overload # size= + def zipf(self, /, a: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >0d + def zipf(self, /, a: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d + def zipf(self, /, a: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d + def geometric(self, /, p: _FloatLike_co, size: None = None) -> int: ... + @overload # size= + def geometric(self, /, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >0d + def geometric(self, /, p: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d + def geometric(self, /, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d + def logseries(self, /, p: _FloatLike_co, size: None = None) -> int: ... + @overload # size= + def logseries(self, /, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >0d + def logseries(self, /, p: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d + def logseries(self, /, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d (default) + def poisson(self, /, lam: _FloatLike_co = 1.0, size: None = None) -> int: ... + @overload # size= (keyword) + def poisson(self, /, lam: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # size= (positional) + def poisson(self, /, lam: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >0d + def poisson(self, /, lam: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d + def poisson(self, /, lam: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d, 0d + def binomial(self, /, n: int, p: _FloatLike_co, size: None = None) -> int: ... + @overload # size= + def binomial(self, /, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >=0d, >0d + def binomial(self, /, n: _ArrayLikeInt_co, p: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >0d, >=0d + def binomial(self, /, n: _NDArrayLikeInt, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d, >=0d + def binomial(self, /, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d, 0d + def negative_binomial(self, /, n: _FloatLike_co, p: _FloatLike_co, size: None = None) -> int: ... + @overload # size= + def negative_binomial(self, /, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >=0d, >0d + def negative_binomial(self, /, n: _ArrayLikeFloat_co, p: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >0d, >=0d + def negative_binomial(self, /, n: _NDArrayLikeFloat, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d, >=0d + def negative_binomial( + self, /, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None = None + ) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d, 0d, 0d + def hypergeometric(self, /, ngood: int, nbad: int, nsample: int, size: None = None) -> int: ... + @overload # size= + def hypergeometric( + self, /, ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, size: _ShapeLike + ) -> NDArray[np.int64]: ... + @overload # >=0d, >=0d, >0d + def hypergeometric( + self, /, ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _NDArrayLikeInt, size: None = None + ) -> NDArray[np.int64] | Any: ... + @overload # >=0d, >0d, >=0d + def hypergeometric( + self, /, ngood: _ArrayLikeInt_co, nbad: _NDArrayLikeInt, nsample: _ArrayLikeInt_co, size: None = None + ) -> NDArray[np.int64] | Any: ... + @overload # >0d, >=0d, >=0d + def hypergeometric( + self, /, ngood: _NDArrayLikeInt, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, size: None = None + ) -> NDArray[np.int64] | Any: ... + @overload # >=0d, >=0d, >=0d + def hypergeometric( + self, /, ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, size: None = None + ) -> NDArray[np.int64] | Any: ... + + ### + # multivariate + + # + def dirichlet(self, /, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> _ArrayF64: ... + + # + def multivariate_normal( self, - low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., - endpoint: bool = ..., - ) -> NDArray[int32]: ... - @overload - def integers( # type: ignore[misc] + /, + mean: _ArrayLikeFloat_co, + cov: _ArrayLikeFloat_co, + size: _ShapeLike | None = None, + check_valid: Literal["warn", "raise", "ignore"] = "warn", + tol: float = 1e-8, + *, + method: Literal["svd", "eigh", "cholesky"] = "svd", + ) -> _ArrayF64: ... + + # + def multinomial( + self, /, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: _ShapeLike | None = None + ) -> NDArray[np.int64]: ... + + # + def multivariate_hypergeometric( self, - low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: None | dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., - endpoint: bool = ..., - ) -> NDArray[int64]: ... + /, + colors: _ArrayLikeInt_co, + nsample: int, + size: _ShapeLike | None = None, + method: Literal["marginals", "count"] = "marginals", + ) -> NDArray[np.int64]: ... + + ### + # resampling + + # axis must be 0 for MutableSequence @overload - def integers( # type: ignore[misc] - self, - low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., - endpoint: bool = ..., - ) -> NDArray[uint8]: ... + def shuffle(self, /, x: np.ndarray, axis: int = 0) -> None: ... @overload - def integers( # type: ignore[misc] - self, - low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., - endpoint: bool = ..., - ) -> NDArray[uint16]: ... + def shuffle(self, /, x: MutableSequence[Any], axis: Literal[0] = 0) -> None: ... + + # @overload - def integers( # type: ignore[misc] - self, - low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., - endpoint: bool = ..., - ) -> NDArray[uint32]: ... + def permutation(self, /, x: int, axis: int = 0) -> NDArray[np.int64]: ... @overload - def integers( # type: ignore[misc] - self, - low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., - endpoint: bool = ..., - ) -> NDArray[uint64]: ... + def permutation(self, /, x: ArrayLike, axis: int = 0) -> np.ndarray: ... + + # @overload - def integers( # type: ignore[misc] - self, - low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., - endpoint: bool = ..., - ) -> NDArray[int_]: ... + def permuted[ArrayT: np.ndarray](self, /, x: ArrayT, *, axis: int | None = None, out: None = None) -> ArrayT: ... @overload - def integers( # type: ignore[misc] - self, - low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., - endpoint: bool = ..., - ) -> NDArray[uint]: ... - # TODO: Use a TypeVar _T here to get away from Any output? Should be int->NDArray[int64], ArrayLike[_T] -> _T | NDArray[Any] + def permuted(self, /, x: ArrayLike, *, axis: int | None = None, out: None = None) -> np.ndarray: ... @overload + def permuted[ArrayT: np.ndarray](self, /, x: ArrayLike, *, axis: int | None = None, out: ArrayT) -> ArrayT: ... + + # + @overload # >=0d int, size=None (default) def choice( self, - a: int, - size: None = ..., - replace: bool = ..., - p: None | _ArrayLikeFloat_co = ..., - axis: int = ..., - shuffle: bool = ..., + /, + a: int | _NestedSequence[int], + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, ) -> int: ... - @overload - def choice( - self, - a: int, - size: _ShapeLike = ..., - replace: bool = ..., - p: None | _ArrayLikeFloat_co = ..., - axis: int = ..., - shuffle: bool = ..., - ) -> NDArray[int64]: ... - @overload + @overload # >=0d known, size=None (default) + def choice[ScalarT: np.generic]( + self, + /, + a: _ArrayLike[ScalarT], + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, + ) -> ScalarT: ... + @overload # >=0d unknown, size=None (default) def choice( self, + /, a: ArrayLike, - size: None = ..., - replace: bool = ..., - p: None | _ArrayLikeFloat_co = ..., - axis: int = ..., - shuffle: bool = ..., + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, ) -> Any: ... - @overload + @overload # >=0d int, size= def choice( self, - a: ArrayLike, - size: _ShapeLike = ..., - replace: bool = ..., - p: None | _ArrayLikeFloat_co = ..., - axis: int = ..., - shuffle: bool = ..., - ) -> NDArray[Any]: ... - @overload - def uniform( - self, - low: _FloatLike_co = ..., - high: _FloatLike_co = ..., - size: None = ..., - ) -> float: ... # type: ignore[misc] - @overload - def uniform( - self, - low: _ArrayLikeFloat_co = ..., - high: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., - ) -> NDArray[float64]: ... - @overload - def normal( - self, - loc: _FloatLike_co = ..., - scale: _FloatLike_co = ..., - size: None = ..., - ) -> float: ... # type: ignore[misc] - @overload - def normal( - self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., - ) -> NDArray[float64]: ... - @overload - def standard_gamma( # type: ignore[misc] - self, - shape: _FloatLike_co, - size: None = ..., - dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - out: None = ..., - ) -> float: ... - @overload - def standard_gamma( - self, - shape: _ArrayLikeFloat_co, - size: None | _ShapeLike = ..., - ) -> NDArray[float64]: ... - @overload - def standard_gamma( - self, - shape: _ArrayLikeFloat_co, - *, - out: NDArray[float64] = ..., - ) -> NDArray[float64]: ... - @overload - def standard_gamma( - self, - shape: _ArrayLikeFloat_co, - size: None | _ShapeLike = ..., - dtype: _DTypeLikeFloat32 = ..., - out: None | NDArray[float32] = ..., - ) -> NDArray[float32]: ... - @overload - def standard_gamma( - self, - shape: _ArrayLikeFloat_co, - size: None | _ShapeLike = ..., - dtype: _DTypeLikeFloat64 = ..., - out: None | NDArray[float64] = ..., - ) -> NDArray[float64]: ... - @overload - def gamma(self, shape: _FloatLike_co, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc] - @overload - def gamma( - self, - shape: _ArrayLikeFloat_co, - scale: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., - ) -> NDArray[float64]: ... - @overload - def f(self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] - @overload - def f( - self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... - @overload - def noncentral_f(self, dfnum: _FloatLike_co, dfden: _FloatLike_co, nonc: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] - @overload - def noncentral_f( - self, - dfnum: _ArrayLikeFloat_co, - dfden: _ArrayLikeFloat_co, - nonc: _ArrayLikeFloat_co, - size: None | _ShapeLike = ..., - ) -> NDArray[float64]: ... - @overload - def chisquare(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] - @overload - def chisquare( - self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... - @overload - def noncentral_chisquare(self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] - @overload - def noncentral_chisquare( - self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... - @overload - def standard_t(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] - @overload - def standard_t( - self, df: _ArrayLikeFloat_co, size: None = ... - ) -> NDArray[float64]: ... - @overload - def standard_t( - self, df: _ArrayLikeFloat_co, size: _ShapeLike = ... - ) -> NDArray[float64]: ... - @overload - def vonmises(self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] - @overload - def vonmises( - self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... - @overload - def pareto(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] - @overload - def pareto( - self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... - @overload - def weibull(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] - @overload - def weibull( - self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... - @overload - def power(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] - @overload - def power( - self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... - @overload - def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] - @overload - def standard_cauchy(self, size: _ShapeLike = ...) -> NDArray[float64]: ... - @overload - def laplace( - self, - loc: _FloatLike_co = ..., - scale: _FloatLike_co = ..., - size: None = ..., - ) -> float: ... # type: ignore[misc] - @overload - def laplace( - self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., - ) -> NDArray[float64]: ... - @overload - def gumbel( - self, - loc: _FloatLike_co = ..., - scale: _FloatLike_co = ..., - size: None = ..., - ) -> float: ... # type: ignore[misc] - @overload - def gumbel( - self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., - ) -> NDArray[float64]: ... - @overload - def logistic( - self, - loc: _FloatLike_co = ..., - scale: _FloatLike_co = ..., - size: None = ..., - ) -> float: ... # type: ignore[misc] - @overload - def logistic( - self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., - ) -> NDArray[float64]: ... - @overload - def lognormal( - self, - mean: _FloatLike_co = ..., - sigma: _FloatLike_co = ..., - size: None = ..., - ) -> float: ... # type: ignore[misc] - @overload - def lognormal( - self, - mean: _ArrayLikeFloat_co = ..., - sigma: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., - ) -> NDArray[float64]: ... - @overload - def rayleigh(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc] - @overload - def rayleigh( - self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... - @overload - def wald(self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] - @overload - def wald( - self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... - @overload - def triangular( - self, - left: _FloatLike_co, - mode: _FloatLike_co, - right: _FloatLike_co, - size: None = ..., - ) -> float: ... # type: ignore[misc] - @overload - def triangular( - self, - left: _ArrayLikeFloat_co, - mode: _ArrayLikeFloat_co, - right: _ArrayLikeFloat_co, - size: None | _ShapeLike = ..., - ) -> NDArray[float64]: ... - @overload - def binomial(self, n: int, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] - @overload - def binomial( - self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[int64]: ... - @overload - def negative_binomial(self, n: _FloatLike_co, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] - @overload - def negative_binomial( - self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[int64]: ... - @overload - def poisson(self, lam: _FloatLike_co = ..., size: None = ...) -> int: ... # type: ignore[misc] - @overload - def poisson( - self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... - ) -> NDArray[int64]: ... - @overload - def zipf(self, a: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] - @overload - def zipf( - self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[int64]: ... - @overload - def geometric(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] - @overload - def geometric( - self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[int64]: ... - @overload - def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc] - @overload - def hypergeometric( - self, - ngood: _ArrayLikeInt_co, - nbad: _ArrayLikeInt_co, - nsample: _ArrayLikeInt_co, - size: None | _ShapeLike = ..., - ) -> NDArray[int64]: ... - @overload - def logseries(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] - @overload - def logseries( - self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[int64]: ... - def multivariate_normal( - self, - mean: _ArrayLikeFloat_co, - cov: _ArrayLikeFloat_co, - size: None | _ShapeLike = ..., - check_valid: Literal["warn", "raise", "ignore"] = ..., - tol: float = ..., - *, - method: Literal["svd", "eigh", "cholesky"] = ..., - ) -> NDArray[float64]: ... - def multinomial( - self, n: _ArrayLikeInt_co, - pvals: _ArrayLikeFloat_co, - size: None | _ShapeLike = ... - ) -> NDArray[int64]: ... - def multivariate_hypergeometric( + /, + a: int | _NestedSequence[int], + size: _ShapeLike, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, + ) -> NDArray[np.int64]: ... + @overload # >=0d known, size= + def choice[ScalarT: np.generic]( + self, + /, + a: _ArrayLike[ScalarT], + size: _ShapeLike, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, + ) -> NDArray[ScalarT]: ... + @overload # >=0d unknown, size= + def choice( self, - colors: _ArrayLikeInt_co, - nsample: int, - size: None | _ShapeLike = ..., - method: Literal["marginals", "count"] = ..., - ) -> NDArray[int64]: ... - def dirichlet( - self, alpha: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... - def permuted( - self, x: ArrayLike, *, axis: None | int = ..., out: None | NDArray[Any] = ... - ) -> NDArray[Any]: ... - def shuffle(self, x: ArrayLike, axis: int = ...) -> None: ... - -def default_rng( - seed: None | _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator = ... -) -> Generator: ... + /, + a: ArrayLike, + size: _ShapeLike, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, + ) -> np.ndarray: ... + +def default_rng(seed: _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator | RandomState | None = None) -> Generator: ... diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 0d134c823588..6623d347a4cf 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -16,8 +16,7 @@ from numpy.lib.array_utils import normalize_axis_index from .c_distributions cimport * from libc cimport string from libc.math cimport sqrt -from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t, - int32_t, int64_t, INT64_MAX, SIZE_MAX) +from libc.stdint cimport (uint64_t, int64_t, INT64_MAX, SIZE_MAX) from ._bounded_integers cimport (_rand_bool, _rand_int32, _rand_int64, _rand_int16, _rand_int8, _rand_uint64, _rand_uint32, _rand_uint16, _rand_uint8, _gen_mask) @@ -141,8 +140,8 @@ cdef bint _check_bit_generator(object bitgen): cdef class Generator: - """ - Generator(bit_generator) + # the first line is used to populate `__text_signature__` + """Generator(bit_generator)\n-- Container for the BitGenerators. @@ -169,9 +168,10 @@ cdef class Generator: Notes ----- - The Python stdlib module `random` contains pseudo-random number generator - with a number of methods that are similar to the ones available in - `Generator`. It uses Mersenne Twister, and this bit generator can + The Python stdlib module :external+python:mod:`random` contains + pseudo-random number generator with a number of methods that are similar + to the ones available in `Generator`. + It uses Mersenne Twister, and this bit generator can be accessed using `MT19937`. `Generator`, besides being NumPy-aware, has the advantage that it provides a much larger number of probability distributions to choose from. @@ -205,12 +205,10 @@ cdef class Generator: self.lock = bit_generator.lock def __repr__(self): - return self.__str__() + ' at 0x{:X}'.format(id(self)) + return f'{self} at 0x{id(self):X}' def __str__(self): - _str = self.__class__.__name__ - _str += '(' + self.bit_generator.__class__.__name__ + ')' - return _str + return f'{self.__class__.__name__}({self.bit_generator.__class__.__name__})' # Pickling support: def __getstate__(self): @@ -296,6 +294,8 @@ cdef class Generator: >>> nested_spawn = child_rng1.spawn(20) """ + if n_children < 0: + raise ValueError("n_children must be non-negative") return [type(self)(g) for g in self._bit_generator.spawn(n_children)] def random(self, size=None, dtype=np.float64, out=None): @@ -352,7 +352,6 @@ cdef class Generator: [-1.23204345, -1.75224494]]) """ - cdef double temp _dtype = np.dtype(dtype) if _dtype == np.float64: return double_fill(&random_standard_uniform_fill, &self._bitgen, size, self.lock, out) @@ -399,8 +398,8 @@ cdef class Generator: Drawn samples from the parameterized beta distribution. Examples - -------- - The beta distribution has mean a/(a+b). If ``a == b`` and both + -------- + The beta distribution has mean a/(a+b). If ``a == b`` and both are > 1, the distribution is symmetric with mean 0.5. >>> rng = np.random.default_rng() @@ -408,11 +407,11 @@ cdef class Generator: >>> sample = rng.beta(a=a, b=b, size=size) >>> np.mean(sample) 0.5047328775385895 # may vary - + Otherwise the distribution is skewed left or right according to whether ``a`` or ``b`` is greater. The distribution is mirror symmetric. See for example: - + >>> a, b, size = 2, 7, 10000 >>> sample_left = rng.beta(a=a, b=b, size=size) >>> sample_right = rng.beta(a=b, b=a, size=size) @@ -425,12 +424,12 @@ cdef class Generator: -0.0003163943736596009 # may vary Display the histogram of the two samples: - + >>> import matplotlib.pyplot as plt - >>> plt.hist([sample_left, sample_right], + >>> plt.hist([sample_left, sample_right], ... 50, density=True, histtype='bar') >>> plt.show() - + References ---------- .. [1] Wikipedia, "Beta distribution", @@ -480,17 +479,17 @@ cdef class Generator: Examples -------- - Assume a company has 10000 customer support agents and the time - between customer calls is exponentially distributed and that the + Assume a company has 10000 customer support agents and the time + between customer calls is exponentially distributed and that the average time between customer calls is 4 minutes. >>> scale, size = 4, 10000 >>> rng = np.random.default_rng() >>> time_between_calls = rng.exponential(scale=scale, size=size) - What is the probability that a customer will call in the next - 4 to 5 minutes? - + What is the probability that a customer will call in the next + 4 to 5 minutes? + >>> x = ((time_between_calls < 5).sum())/size >>> y = ((time_between_calls < 4).sum())/size >>> x - y @@ -542,7 +541,7 @@ cdef class Generator: Byteorder must be native. The default value is np.float64. method : str, optional Either 'inv' or 'zig'. 'inv' uses the default inverse CDF method. - 'zig' uses the much faster Ziggurat method of Marsaglia and Tsang. + 'zig' uses the much faster Ziggurat method of Marsaglia and Tsang [1]_. out : ndarray, optional Alternative output array in which to place the result. If size is not None, it must have the same shape as the provided size and must match the type of @@ -553,6 +552,12 @@ cdef class Generator: out : float or ndarray Drawn samples. + References + ---------- + .. [1] Marsaglia, G. and Tsang, W. W. (2000). The Ziggurat method for + generating random variables. Journal of Statistical Software, 5, 1-7. + https://doi.org/10.18637/jss.v005.i08 + Examples -------- Output a 3x8000 array: @@ -721,10 +726,10 @@ cdef class Generator: Notes ----- - This function generates random bytes from a discrete uniform - distribution. The generated bytes are independent from the CPU's + This function generates random bytes from a discrete uniform + distribution. The generated bytes are independent from the CPU's native endianness. - + Examples -------- >>> rng = np.random.default_rng() @@ -796,7 +801,10 @@ cdef class Generator: than the optimized sampler even if each element of ``p`` is 1 / len(a). ``p`` must sum to 1 when cast to ``float64``. To ensure this, you may wish - to normalize using ``p = p / np.sum(p, dtype=float)``. + to normalize using ``p = p / np.sum(p, dtype=np.float64)``. + + When passing ``a`` as an integer type and ``size`` is not specified, the return + type is a native Python ``int``. Examples -------- @@ -842,7 +850,7 @@ cdef class Generator: """ - cdef int64_t val, t, loc, size_i, pop_size_i + cdef int64_t val, loc, size_i, pop_size_i cdef int64_t *idx_data cdef np.npy_intp j cdef uint64_t set_size, mask @@ -875,7 +883,7 @@ cdef class Generator: atol = max(atol, np.sqrt(np.finfo(p.dtype).eps)) p = np.PyArray_FROM_OTF( - p, np.NPY_DOUBLE, np.NPY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS) + p, np.NPY_DOUBLE, np.NPY_ARRAY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS) pix = np.PyArray_DATA(p) if p.ndim != 1: @@ -949,7 +957,7 @@ cdef class Generator: cutoff = 20 if pop_size_i > 10000 and (size_i > (pop_size_i // cutoff)): # Tail shuffle size elements - idx = np.PyArray_Arange(0, pop_size_i, 1, np.NPY_INT64) + idx = np.arange(0, pop_size_i, dtype=np.int64) idx_data = (idx).data with self.lock, nogil: _shuffle_int(&self._bitgen, pop_size_i, @@ -982,7 +990,7 @@ cdef class Generator: idx_data[j - pop_size_i + size_i] = j if shuffle: _shuffle_int(&self._bitgen, size_i, 1, idx_data) - idx.shape = shape + idx = idx.reshape(shape) if is_scalar and isinstance(idx, np.ndarray): # In most cases a scalar will have been made an array @@ -1024,9 +1032,9 @@ cdef class Generator: greater than or equal to low. The default value is 0. high : float or array_like of floats Upper boundary of the output interval. All values generated will be - less than high. The high limit may be included in the returned array of - floats due to floating-point rounding in the equation - ``low + (high-low) * random_sample()``. high - low must be + less than high. The high limit may be included in the returned array of + floats due to floating-point rounding in the equation + ``low + (high-low) * random_sample()``. high - low must be non-negative. The default value is 1.0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then @@ -1077,13 +1085,12 @@ cdef class Generator: >>> plt.show() """ - cdef bint is_scalar = True cdef np.ndarray alow, ahigh, arange cdef double _low, _high, rng cdef object temp - alow = np.PyArray_FROM_OTF(low, np.NPY_DOUBLE, np.NPY_ALIGNED) - ahigh = np.PyArray_FROM_OTF(high, np.NPY_DOUBLE, np.NPY_ALIGNED) + alow = np.PyArray_FROM_OTF(low, np.NPY_DOUBLE, np.NPY_ARRAY_ALIGNED) + ahigh = np.PyArray_FROM_OTF(high, np.NPY_DOUBLE, np.NPY_ARRAY_ALIGNED) if np.PyArray_NDIM(alow) == np.PyArray_NDIM(ahigh) == 0: _low = PyFloat_AsDouble(low) @@ -1257,7 +1264,7 @@ cdef class Generator: >>> rng = np.random.default_rng() >>> s = rng.normal(mu, sigma, 1000) - Verify the mean and the variance: + Verify the mean and the standard deviation: >>> abs(mu - np.mean(s)) 0.0 # may vary @@ -1367,7 +1374,6 @@ cdef class Generator: >>> plt.show() """ - cdef void *func _dtype = np.dtype(dtype) if _dtype == np.float64: return cont(&random_standard_gamma, &self._bitgen, size, self.lock, 1, @@ -1518,7 +1524,7 @@ cdef class Generator: Examples -------- - An example from Glantz[1], pp 47-40: + An example from Glantz [1]_, pp 47-40: Two groups, children of diabetics (25 people) and children from people without diabetes (25 controls). Fasting blood glucose was measured, @@ -1543,12 +1549,12 @@ cdef class Generator: So there is about a 1% chance that the F statistic will exceed 7.62, the measured value is 36, so the null hypothesis is rejected at the 1% level. - - The corresponding probability density function for ``n = 20`` + + The corresponding probability density function for ``n = 20`` and ``m = 20`` is: - + >>> import matplotlib.pyplot as plt - >>> from scipy import stats # doctest: +SKIP + >>> from scipy import stats >>> dfnum, dfden, size = 20, 20, 10000 >>> s = rng.f(dfnum=dfnum, dfden=dfden, size=size) >>> bins, density, _ = plt.hist(s, 30, density=True) @@ -1556,7 +1562,7 @@ cdef class Generator: >>> plt.plot(x, stats.f.pdf(x, dfnum, dfden)) >>> plt.xlim([0, 5]) >>> plt.show() - + """ return cont(&random_f, &self._bitgen, size, self.lock, 2, dfnum, 'dfnum', CONS_POSITIVE, @@ -1578,9 +1584,6 @@ cdef class Generator: ---------- dfnum : float or array_like of floats Numerator degrees of freedom, must be > 0. - - .. versionchanged:: 1.14.0 - Earlier NumPy versions required dfnum > 1. dfden : float or array_like of floats Denominator degrees of freedom, must be > 0. nonc : float or array_like of floats @@ -1678,7 +1681,7 @@ cdef class Generator: The variable obtained by summing the squares of `df` independent, standard normally distributed random variables: - .. math:: Q = \\sum_{i=0}^{\\mathtt{df}} X^2_i + .. math:: Q = \\sum_{i=1}^{\\mathtt{df}} X^2_i is chi-square distributed, denoted @@ -1706,7 +1709,7 @@ cdef class Generator: The distribution of a chi-square random variable with 20 degrees of freedom looks as follows: - + >>> import matplotlib.pyplot as plt >>> import scipy.stats as stats >>> s = rng.chisquare(20, 10000) @@ -1735,9 +1738,6 @@ cdef class Generator: ---------- df : float or array_like of floats Degrees of freedom, must be > 0. - - .. versionchanged:: 1.10.0 - Earlier NumPy versions required dfnum > 1. nonc : float or array_like of floats Non-centrality, must be non-negative. size : int or tuple of ints, optional @@ -1929,14 +1929,14 @@ cdef class Generator: Does their energy intake deviate systematically from the recommended value of 7725 kJ? Our null hypothesis will be the absence of deviation, and the alternate hypothesis will be the presence of an effect that could be - either positive or negative, hence making our test 2-tailed. + either positive or negative, hence making our test 2-tailed. Because we are estimating the mean and we have N=11 values in our sample, - we have N-1=10 degrees of freedom. We set our significance level to 95% and - compute the t statistic using the empirical mean and empirical standard - deviation of our intake. We use a ddof of 1 to base the computation of our + we have N-1=10 degrees of freedom. We set our significance level to 95% and + compute the t statistic using the empirical mean and empirical standard + deviation of our intake. We use a ddof of 1 to base the computation of our empirical standard deviation on an unbiased estimate of the variance (note: - the final estimate is not unbiased due to the concave nature of the square + the final estimate is not unbiased due to the concave nature of the square root). >>> np.mean(intake) @@ -1955,18 +1955,18 @@ cdef class Generator: >>> s = rng.standard_t(10, size=1000000) >>> h = plt.hist(s, bins=100, density=True) - Does our t statistic land in one of the two critical regions found at + Does our t statistic land in one of the two critical regions found at both tails of the distribution? >>> np.sum(np.abs(t) < np.abs(s)) / float(len(s)) 0.018318 #random < 0.05, statistic is in critical region - The probability value for this 2-tailed test is about 1.83%, which is - lower than the 5% pre-determined significance threshold. + The probability value for this 2-tailed test is about 1.83%, which is + lower than the 5% pre-determined significance threshold. Therefore, the probability of observing values as extreme as our intake - conditionally on the null hypothesis being true is too low, and we reject - the null hypothesis of no deviation. + conditionally on the null hypothesis being true is too low, and we reject + the null hypothesis of no deviation. """ return cont(&random_standard_t, &self._bitgen, size, self.lock, 1, @@ -1982,7 +1982,7 @@ cdef class Generator: Draw samples from a von Mises distribution. Samples are drawn from a von Mises distribution with specified mode - (mu) and dispersion (kappa), on the interval [-pi, pi]. + (mu) and concentration (kappa), on the interval [-pi, pi]. The von Mises distribution (also known as the circular normal distribution) is a continuous probability distribution on the unit @@ -1994,7 +1994,7 @@ cdef class Generator: mu : float or array_like of floats Mode ("center") of the distribution. kappa : float or array_like of floats - Dispersion of the distribution, has to be >=0. + Concentration of the distribution, has to be >=0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. If size is ``None`` (default), @@ -2017,7 +2017,7 @@ cdef class Generator: .. math:: p(x) = \\frac{e^{\\kappa cos(x-\\mu)}}{2\\pi I_0(\\kappa)}, - where :math:`\\mu` is the mode and :math:`\\kappa` the dispersion, + where :math:`\\mu` is the mode and :math:`\\kappa` the concentration, and :math:`I_0(\\kappa)` is the modified Bessel function of order 0. The von Mises is named for Richard Edler von Mises, who was born in @@ -2038,7 +2038,7 @@ cdef class Generator: -------- Draw samples from the distribution: - >>> mu, kappa = 0.0, 4.0 # mean and dispersion + >>> mu, kappa = 0.0, 4.0 # mean and concentration >>> rng = np.random.default_rng() >>> s = rng.vonmises(mu, kappa, 1000) @@ -2091,7 +2091,7 @@ cdef class Generator: ----- The probability density for the Pareto II distribution is - .. math:: p(x) = \\frac{a}{{x+1}^{a+1}} , x \ge 0 + .. math:: p(x) = \\frac{a}{(x+1)^{a+1}} , x \ge 0 where :math:`a > 0` is the shape. @@ -2935,13 +2935,12 @@ cdef class Generator: >>> plt.show() """ - cdef bint is_scalar = True cdef double fleft, fmode, fright cdef np.ndarray oleft, omode, oright - oleft = np.PyArray_FROM_OTF(left, np.NPY_DOUBLE, np.NPY_ALIGNED) - omode = np.PyArray_FROM_OTF(mode, np.NPY_DOUBLE, np.NPY_ALIGNED) - oright = np.PyArray_FROM_OTF(right, np.NPY_DOUBLE, np.NPY_ALIGNED) + oleft = np.PyArray_FROM_OTF(left, np.NPY_DOUBLE, np.NPY_ARRAY_ALIGNED) + omode = np.PyArray_FROM_OTF(mode, np.NPY_DOUBLE, np.NPY_ARRAY_ALIGNED) + oright = np.PyArray_FROM_OTF(right, np.NPY_DOUBLE, np.NPY_ARRAY_ALIGNED) if np.PyArray_NDIM(oleft) == np.PyArray_NDIM(omode) == np.PyArray_NDIM(oright) == 0: fleft = PyFloat_AsDouble(left) @@ -3009,7 +3008,7 @@ cdef class Generator: Notes ----- - The probability density for the binomial distribution is + The probability mass function (PMF) for the binomial distribution is .. math:: P(N) = \\binom{n}{N}p^N(1-p)^{n-N}, @@ -3043,21 +3042,21 @@ cdef class Generator: Draw samples from the distribution: >>> rng = np.random.default_rng() - >>> n, p, size = 10, .5, 10000 + >>> n, p, size = 10, .5, 10000 >>> s = rng.binomial(n, p, 10000) Assume a company drills 9 wild-cat oil exploration wells, each with - an estimated probability of success of ``p=0.1``. All nine wells fail. + an estimated probability of success of ``p=0.1``. All nine wells fail. What is the probability of that happening? - Over ``size = 20,000`` trials the probability of this happening + Over ``size = 20,000`` trials the probability of this happening is on average: >>> n, p, size = 9, 0.1, 20000 >>> np.sum(rng.binomial(n=n, p=p, size=size) == 0)/size 0.39015 # may vary - The following can be used to visualize a sample with ``n=100``, + The following can be used to visualize a sample with ``n=100``, ``p=0.4`` and the corresponding probability density function: >>> import matplotlib.pyplot as plt @@ -3080,9 +3079,9 @@ cdef class Generator: cdef np.int64_t *randoms_data cdef np.broadcast it - p_arr = np.PyArray_FROM_OTF(p, np.NPY_DOUBLE, np.NPY_ALIGNED) + p_arr = np.PyArray_FROM_OTF(p, np.NPY_DOUBLE, np.NPY_ARRAY_ALIGNED) is_scalar = is_scalar and np.PyArray_NDIM(p_arr) == 0 - n_arr = np.PyArray_FROM_OTF(n, np.NPY_INT64, np.NPY_ALIGNED) + n_arr = np.PyArray_FROM_OTF(n, np.NPY_INT64, np.NPY_ARRAY_ALIGNED) is_scalar = is_scalar and np.PyArray_NDIM(n_arr) == 0 if not is_scalar: @@ -3176,10 +3175,10 @@ cdef class Generator: appear before the third "1" is a negative binomial distribution. Because this method internally calls ``Generator.poisson`` with an - intermediate random value, a ValueError is raised when the choice of + intermediate random value, a ValueError is raised when the choice of :math:`n` and :math:`p` would result in the mean + 10 sigma of the sampled - intermediate distribution exceeding the max acceptable value of the - ``Generator.poisson`` method. This happens when :math:`p` is too low + intermediate distribution exceeding the max acceptable value of the + ``Generator.poisson`` method. This happens when :math:`p` is too low (a lot of failures happen for every success) and :math:`n` is too big ( a lot of successes are allowed). Therefore, the :math:`n` and :math:`p` values must satisfy the constraint: @@ -3222,9 +3221,9 @@ cdef class Generator: cdef double *_dp cdef double _dmax_lam - p_arr = np.PyArray_FROM_OTF(p, np.NPY_DOUBLE, np.NPY_ALIGNED) + p_arr = np.PyArray_FROM_OTF(p, np.NPY_DOUBLE, np.NPY_ARRAY_ALIGNED) is_scalar = is_scalar and np.PyArray_NDIM(p_arr) == 0 - n_arr = np.PyArray_FROM_OTF(n, np.NPY_DOUBLE, np.NPY_ALIGNED) + n_arr = np.PyArray_FROM_OTF(n, np.NPY_DOUBLE, np.NPY_ARRAY_ALIGNED) is_scalar = is_scalar and np.PyArray_NDIM(n_arr) == 0 if not is_scalar: @@ -3281,7 +3280,7 @@ cdef class Generator: Notes ----- - The Poisson distribution + The probability mass function (PMF) of Poisson distribution is .. math:: f(k; \\lambda)=\\frac{\\lambda^k e^{-\\lambda}}{k!} @@ -3311,7 +3310,7 @@ cdef class Generator: >>> s = rng.poisson(lam=lam, size=size) Verify the mean and variance, which should be approximately ``lam``: - + >>> s.mean(), s.var() (4.9917 5.1088311) # may vary @@ -3371,7 +3370,7 @@ cdef class Generator: Notes ----- - The probability density for the Zipf distribution is + The probability mass function (PMF) for the Zipf distribution is .. math:: p(k) = \\frac{k^{-a}}{\\zeta(a)}, @@ -3465,7 +3464,7 @@ cdef class Generator: Examples -------- - Draw 10,000 values from the geometric distribution, with the + Draw 10,000 values from the geometric distribution, with the probability of an individual success equal to ``p = 0.35``: >>> p, size = 0.35, 10000 @@ -3484,7 +3483,7 @@ cdef class Generator: >>> plt.plot(bins, (1-p)**(bins-1)*p) >>> plt.xlim([0, 25]) >>> plt.show() - + """ return disc(&random_geometric, &self._bitgen, size, self.lock, 1, 0, p, 'p', CONS_BOUNDED_GT_0_1, @@ -3536,7 +3535,7 @@ cdef class Generator: Notes ----- - The probability density for the Hypergeometric distribution is + The probability mass function (PMF) for the Hypergeometric distribution is .. math:: P(x) = \\frac{\\binom{g}{x}\\binom{b}{n-x}}{\\binom{g+b}{n}}, @@ -3598,13 +3597,12 @@ cdef class Generator: """ cdef double HYPERGEOM_MAX = 10**9 - cdef bint is_scalar = True cdef np.ndarray ongood, onbad, onsample cdef int64_t lngood, lnbad, lnsample - ongood = np.PyArray_FROM_OTF(ngood, np.NPY_INT64, np.NPY_ALIGNED) - onbad = np.PyArray_FROM_OTF(nbad, np.NPY_INT64, np.NPY_ALIGNED) - onsample = np.PyArray_FROM_OTF(nsample, np.NPY_INT64, np.NPY_ALIGNED) + ongood = np.PyArray_FROM_OTF(ngood, np.NPY_INT64, np.NPY_ARRAY_ALIGNED) + onbad = np.PyArray_FROM_OTF(nbad, np.NPY_INT64, np.NPY_ARRAY_ALIGNED) + onsample = np.PyArray_FROM_OTF(nsample, np.NPY_INT64, np.NPY_ARRAY_ALIGNED) if np.PyArray_NDIM(ongood) == np.PyArray_NDIM(onbad) == np.PyArray_NDIM(onsample) == 0: @@ -3673,8 +3671,8 @@ cdef class Generator: The log series distribution is frequently used to represent species richness and occurrence, first proposed by Fisher, Corbet, and - Williams in 1943 [2]. It may also be used to model the numbers of - occupants seen in cars [3]. + Williams in 1943 [2]_. It may also be used to model the numbers of + occupants seen in cars [3]_. References ---------- @@ -3702,7 +3700,7 @@ cdef class Generator: >>> bins = np.arange(-.5, max(s) + .5 ) >>> count, bins, _ = plt.hist(s, bins=bins, label='Sample count') - # plot against distribution + Plot against the distribution: >>> def logseries(k, p): ... return -p**k/(k*np.log(1-p)) @@ -3758,8 +3756,6 @@ cdef class Generator: the slowest method. The method `eigh` uses eigen decomposition to compute A and is faster than svd but slower than cholesky. - .. versionadded:: 1.18.0 - Returns ------- out : ndarray @@ -3778,7 +3774,7 @@ cdef class Generator: Covariance indicates the level to which two variables vary together. From the multivariate normal distribution, we draw N-dimensional - samples, :math:`X = [x_1, x_2, ... x_N]`. The covariance matrix + samples, :math:`X = [x_1, x_2, ..., x_N]`. The covariance matrix element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance of :math:`x_i` (i.e. its "spread"). @@ -3796,7 +3792,8 @@ cdef class Generator: >>> mean = [0, 0] >>> cov = [[1, 0], [0, 100]] # diagonal covariance - Diagonal covariance means that points are oriented along x or y-axis: + Diagonal covariance means that the variables are independent, and the + probability density contours have their axes aligned with the coordinate axes: >>> import matplotlib.pyplot as plt >>> rng = np.random.default_rng() @@ -3957,8 +3954,7 @@ cdef class Generator: _factor = u * np.sqrt(s) x = mean + x @ _factor.T - x.shape = tuple(final_shape) - return x + return x.reshape(tuple(final_shape)) def multinomial(self, object n, object pvals, size=None): """ @@ -4008,9 +4004,6 @@ cdef class Generator: Each entry ``out[i,j,...,:]`` is a ``p``-dimensional value drawn from the distribution. - .. versionchanged:: 1.22.0 - Added support for broadcasting `pvals` against `n` - Examples -------- Throw a dice 20 times: @@ -4304,8 +4297,6 @@ cdef class Generator: performance of the algorithm is important, test the two methods with typical inputs to decide which works best. - .. versionadded:: 1.18.0 - Examples -------- >>> colors = [16, 8, 4] @@ -4662,11 +4653,11 @@ cdef class Generator: -------- shuffle permutation - + Notes ----- - An important distinction between methods ``shuffle`` and ``permuted`` is - how they both treat the ``axis`` parameter which can be found at + An important distinction between methods ``shuffle`` and ``permuted`` is + how they both treat the ``axis`` parameter which can be found at :ref:`generator-handling-axis-parameter`. Examples @@ -4738,7 +4729,7 @@ cdef class Generator: if axis is None: if x.ndim > 1: if not (np.PyArray_FLAGS(out) & (np.NPY_ARRAY_C_CONTIGUOUS | - np.NPY_ARRAY_F_CONTIGUOUS)): + np.NPY_ARRAY_F_CONTIGUOUS)): flags = (np.NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_WRITEBACKIFCOPY) to_shuffle = PyArray_FromArray(out, @@ -4818,8 +4809,8 @@ cdef class Generator: Notes ----- - An important distinction between methods ``shuffle`` and ``permuted`` is - how they both treat the ``axis`` parameter which can be found at + An important distinction between methods ``shuffle`` and ``permuted`` is + how they both treat the ``axis`` parameter which can be found at :ref:`generator-handling-axis-parameter`. Examples @@ -5002,7 +4993,7 @@ def default_rng(seed=None): Parameters ---------- - seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional + seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator, RandomState}, optional A seed to initialize the `BitGenerator`. If None, then fresh, unpredictable entropy will be pulled from the OS. If an ``int`` or ``array_like[ints]`` is passed, then all values must be non-negative and will be @@ -5010,6 +5001,7 @@ def default_rng(seed=None): pass in a `SeedSequence` instance. Additionally, when passed a `BitGenerator`, it will be wrapped by `Generator`. If passed a `Generator`, it will be returned unaltered. + When passed a legacy `RandomState` instance it will be coerced to a `Generator`. Returns ------- @@ -5022,15 +5014,15 @@ def default_rng(seed=None): is instantiated. This function does not manage a default global instance. See :ref:`seeding_and_entropy` for more information about seeding. - + Examples -------- `default_rng` is the recommended constructor for the random number class - `Generator`. Here are several ways we can construct a random - number generator using `default_rng` and the `Generator` class. - + `Generator`. Here are several ways we can construct a random + number generator using `default_rng` and the `Generator` class. + Here we use `default_rng` to generate a random float: - + >>> import numpy as np >>> rng = np.random.default_rng(12345) >>> print(rng) @@ -5040,10 +5032,10 @@ def default_rng(seed=None): 0.22733602246716966 >>> type(rfloat) - - Here we use `default_rng` to generate 3 random integers between 0 + + Here we use `default_rng` to generate 3 random integers between 0 (inclusive) and 10 (exclusive): - + >>> import numpy as np >>> rng = np.random.default_rng(12345) >>> rints = rng.integers(low=0, high=10, size=3) @@ -5051,9 +5043,9 @@ def default_rng(seed=None): array([6, 2, 7]) >>> type(rints[0]) - + Here we specify a seed so that we have reproducible results: - + >>> import numpy as np >>> rng = np.random.default_rng(seed=42) >>> print(rng) @@ -5082,6 +5074,13 @@ def default_rng(seed=None): elif isinstance(seed, Generator): # Pass through a Generator. return seed + elif isinstance(seed, np.random.RandomState): + gen = np.random.Generator(seed._bit_generator) + return gen + # Otherwise we need to instantiate a new BitGenerator and Generator as # normal. return Generator(PCG64(seed)) + + +default_rng.__module__ = "numpy.random" diff --git a/numpy/random/_mt19937.pyi b/numpy/random/_mt19937.pyi index 600411d5f641..074a8eec5447 100644 --- a/numpy/random/_mt19937.pyi +++ b/numpy/random/_mt19937.pyi @@ -1,23 +1,27 @@ -from typing import TypedDict +from typing import TypedDict, type_check_only from numpy import uint32 -from numpy.typing import NDArray -from numpy.random.bit_generator import BitGenerator, SeedSequence from numpy._typing import _ArrayLikeInt_co +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy.typing import NDArray + +__all__ = ["MT19937"] +@type_check_only class _MT19937Internal(TypedDict): key: NDArray[uint32] pos: int +@type_check_only class _MT19937State(TypedDict): bit_generator: str state: _MT19937Internal class MT19937(BitGenerator): - def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... + def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None: ... - def jumped(self, jumps: int = ...) -> MT19937: ... - @property - def state(self) -> _MT19937State: ... + def jumped(self, jumps: int = 1) -> MT19937: ... + @property # type: ignore[override] + def state(self) -> _MT19937State: ... # pyrefly: ignore[bad-override] @state.setter def state(self, value: _MT19937State) -> None: ... diff --git a/numpy/random/_mt19937.pyx b/numpy/random/_mt19937.pyx index 826cb8441ef1..c74498356dda 100644 --- a/numpy/random/_mt19937.pyx +++ b/numpy/random/_mt19937.pyx @@ -43,8 +43,8 @@ cdef uint64_t mt19937_raw(void *st) noexcept nogil: return mt19937_next32( st) cdef class MT19937(BitGenerator): - """ - MT19937(seed=None) + # the first line is used to populate `__text_signature__` + """MT19937(seed=None)\n-- Container for the Mersenne Twister pseudo-random number generator. @@ -284,8 +284,7 @@ cdef class MT19937(BitGenerator): raise TypeError('state must be a dict') bitgen = value.get('bit_generator', '') if bitgen != self.__class__.__name__: - raise ValueError('state must be for a {0} ' - 'PRNG'.format(self.__class__.__name__)) + raise ValueError(f'state must be for a {self.__class__.__name__} PRNG') key = value['state']['key'] for i in range(624): self.rng_state.key[i] = key[i] diff --git a/numpy/random/_pcg64.pyi b/numpy/random/_pcg64.pyi index 470aee867493..aede210d4026 100644 --- a/numpy/random/_pcg64.pyi +++ b/numpy/random/_pcg64.pyi @@ -1,12 +1,16 @@ -from typing import TypedDict +from typing import TypedDict, type_check_only -from numpy.random.bit_generator import BitGenerator, SeedSequence from numpy._typing import _ArrayLikeInt_co +from numpy.random.bit_generator import BitGenerator, SeedSequence + +__all__ = ["PCG64"] +@type_check_only class _PCG64Internal(TypedDict): state: int inc: int +@type_check_only class _PCG64State(TypedDict): bit_generator: str state: _PCG64Internal @@ -14,29 +18,19 @@ class _PCG64State(TypedDict): uinteger: int class PCG64(BitGenerator): - def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... - def jumped(self, jumps: int = ...) -> PCG64: ... - @property - def state( - self, - ) -> _PCG64State: ... + def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... + def jumped(self, jumps: int = 1) -> PCG64: ... + @property # type: ignore[override] + def state(self) -> _PCG64State: ... # pyrefly: ignore[bad-override] @state.setter - def state( - self, - value: _PCG64State, - ) -> None: ... + def state(self, value: _PCG64State) -> None: ... def advance(self, delta: int) -> PCG64: ... class PCG64DXSM(BitGenerator): - def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... - def jumped(self, jumps: int = ...) -> PCG64DXSM: ... - @property - def state( - self, - ) -> _PCG64State: ... + def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... + def jumped(self, jumps: int = 1) -> PCG64DXSM: ... + @property # type: ignore[override] + def state(self) -> _PCG64State: ... # pyrefly: ignore[bad-override] @state.setter - def state( - self, - value: _PCG64State, - ) -> None: ... + def state(self, value: _PCG64State) -> None: ... def advance(self, delta: int) -> PCG64DXSM: ... diff --git a/numpy/random/_pcg64.pyx b/numpy/random/_pcg64.pyx index 250bf967bba2..30a00a11aa1d 100644 --- a/numpy/random/_pcg64.pyx +++ b/numpy/random/_pcg64.pyx @@ -51,8 +51,8 @@ cdef double pcg64_cm_double(void* st) noexcept nogil: return uint64_to_double(pcg64_cm_next64(st)) cdef class PCG64(BitGenerator): - """ - PCG64(seed=None) + # the first line is used to populate `__text_signature__` + """PCG64(seed=None)\n-- BitGenerator for the PCG-64 pseudo-random number generator. @@ -225,8 +225,7 @@ cdef class PCG64(BitGenerator): raise TypeError('state must be a dict') bitgen = value.get('bit_generator', '') if bitgen != self.__class__.__name__: - raise ValueError('state must be for a {0} ' - 'RNG'.format(self.__class__.__name__)) + raise ValueError(f'state must be for a {self.__class__.__name__} RNG') state_vec = np.empty(4, dtype=np.uint64) state_vec[0] = value['state']['state'] // 2 ** 64 state_vec[1] = value['state']['state'] % 2 ** 64 @@ -265,7 +264,7 @@ cdef class PCG64(BitGenerator): * The random values are simulated using a rejection-based method and so, on average, more than one value from the underlying - RNG is required to generate an single draw. + RNG is required to generate a single draw. * The number of bits required to generate a simulated value differs from the number of bits generated by the underlying RNG. For example, two 16-bit integer values can be simulated @@ -285,8 +284,8 @@ cdef class PCG64(BitGenerator): cdef class PCG64DXSM(BitGenerator): - """ - PCG64DXSM(seed=None) + # the first line is used to populate `__text_signature__` + """PCG64DXSM(seed=None)\n-- BitGenerator for the PCG-64 DXSM pseudo-random number generator. @@ -460,8 +459,7 @@ cdef class PCG64DXSM(BitGenerator): raise TypeError('state must be a dict') bitgen = value.get('bit_generator', '') if bitgen != self.__class__.__name__: - raise ValueError('state must be for a {0} ' - 'RNG'.format(self.__class__.__name__)) + raise ValueError(f'state must be for a {self.__class__.__name__} RNG') state_vec = np.empty(4, dtype=np.uint64) state_vec[0] = value['state']['state'] // 2 ** 64 state_vec[1] = value['state']['state'] % 2 ** 64 @@ -500,7 +498,7 @@ cdef class PCG64DXSM(BitGenerator): * The random values are simulated using a rejection-based method and so, on average, more than one value from the underlying - RNG is required to generate an single draw. + RNG is required to generate a single draw. * The number of bits required to generate a simulated value differs from the number of bits generated by the underlying RNG. For example, two 16-bit integer values can be simulated diff --git a/numpy/random/_philox.pyi b/numpy/random/_philox.pyi index 485f3bc82dec..ea9880ea10e2 100644 --- a/numpy/random/_philox.pyi +++ b/numpy/random/_philox.pyi @@ -1,14 +1,18 @@ -from typing import TypedDict +from typing import TypedDict, type_check_only from numpy import uint64 -from numpy.typing import NDArray -from numpy.random.bit_generator import BitGenerator, SeedSequence from numpy._typing import _ArrayLikeInt_co +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy.typing import NDArray +__all__ = ["Philox"] + +@type_check_only class _PhiloxInternal(TypedDict): counter: NDArray[uint64] key: NDArray[uint64] +@type_check_only class _PhiloxState(TypedDict): bit_generator: str state: _PhiloxInternal @@ -20,18 +24,13 @@ class _PhiloxState(TypedDict): class Philox(BitGenerator): def __init__( self, - seed: None | _ArrayLikeInt_co | SeedSequence = ..., - counter: None | _ArrayLikeInt_co = ..., - key: None | _ArrayLikeInt_co = ..., + seed: _ArrayLikeInt_co | SeedSequence | None = ..., + counter: _ArrayLikeInt_co | None = ..., + key: _ArrayLikeInt_co | None = ..., ) -> None: ... - @property - def state( - self, - ) -> _PhiloxState: ... + @property # type: ignore[override] + def state(self) -> _PhiloxState: ... # pyrefly: ignore[bad-override] @state.setter - def state( - self, - value: _PhiloxState, - ) -> None: ... - def jumped(self, jumps: int = ...) -> Philox: ... + def state(self, value: _PhiloxState) -> None: ... + def jumped(self, jumps: int = 1) -> Philox: ... def advance(self, delta: int) -> Philox: ... diff --git a/numpy/random/_philox.pyx b/numpy/random/_philox.pyx index a046d9441fae..da47ad21e2de 100644 --- a/numpy/random/_philox.pyx +++ b/numpy/random/_philox.pyx @@ -1,7 +1,5 @@ #cython: binding=True -from cpython.pycapsule cimport PyCapsule_New - import numpy as np cimport numpy as np @@ -54,8 +52,8 @@ cdef double philox_double(void*st) noexcept nogil: return uint64_to_double(philox_next64( st)) cdef class Philox(BitGenerator): - """ - Philox(seed=None, counter=None, key=None) + # the first line is used to populate `__text_signature__` + """Philox(seed=None, counter=None, key=None)\n-- Container for the Philox (4x64) pseudo-random number generator. @@ -196,7 +194,7 @@ cdef class Philox(BitGenerator): cdef _reset_state_variables(self): cdef philox_state *rng_state = &self.rng_state - + rng_state[0].has_uint32 = 0 rng_state[0].uinteger = 0 rng_state[0].buffer_pos = PHILOX_BUFFER_SIZE @@ -238,8 +236,7 @@ cdef class Philox(BitGenerator): raise TypeError('state must be a dict') bitgen = value.get('bit_generator', '') if bitgen != self.__class__.__name__: - raise ValueError('state must be for a {0} ' - 'PRNG'.format(self.__class__.__name__)) + raise ValueError(f'state must be for a {self.__class__.__name__} PRNG') for i in range(4): self.rng_state.ctr.v[i] = value['state']['counter'][i] if i < 2: @@ -318,7 +315,7 @@ cdef class Philox(BitGenerator): * The random values are simulated using a rejection-based method and so, on average, more than one value from the underlying - RNG is required to generate an single draw. + RNG is required to generate a single draw. * The number of bits required to generate a simulated value differs from the number of bits generated by the underlying RNG. For example, two 16-bit integer values can be simulated diff --git a/numpy/random/_pickle.py b/numpy/random/_pickle.py index 842bd441a502..05f7232e68de 100644 --- a/numpy/random/_pickle.py +++ b/numpy/random/_pickle.py @@ -1,11 +1,10 @@ -from .bit_generator import BitGenerator -from .mtrand import RandomState -from ._philox import Philox -from ._pcg64 import PCG64, PCG64DXSM -from ._sfc64 import SFC64 - from ._generator import Generator from ._mt19937 import MT19937 +from ._pcg64 import PCG64, PCG64DXSM +from ._philox import Philox +from ._sfc64 import SFC64 +from .bit_generator import BitGenerator +from .mtrand import RandomState BitGenerators = {'MT19937': MT19937, 'PCG64': PCG64, diff --git a/numpy/random/_pickle.pyi b/numpy/random/_pickle.pyi new file mode 100644 index 000000000000..b0aa143801ba --- /dev/null +++ b/numpy/random/_pickle.pyi @@ -0,0 +1,43 @@ +from collections.abc import Callable +from typing import Final, Literal, TypedDict, overload, type_check_only + +from numpy.random._generator import Generator +from numpy.random._mt19937 import MT19937 +from numpy.random._pcg64 import PCG64, PCG64DXSM +from numpy.random._philox import Philox +from numpy.random._sfc64 import SFC64 +from numpy.random.bit_generator import BitGenerator +from numpy.random.mtrand import RandomState + +@type_check_only +class _BitGenerators(TypedDict): + MT19937: type[MT19937] + PCG64: type[PCG64] + PCG64DXSM: type[PCG64DXSM] + Philox: type[Philox] + SFC64: type[SFC64] + +### + +BitGenerators: Final[_BitGenerators] = ... + +@overload +def __bit_generator_ctor(bit_generator: Literal["MT19937"] = "MT19937") -> MT19937: ... +@overload +def __bit_generator_ctor(bit_generator: Literal["PCG64"]) -> PCG64: ... +@overload +def __bit_generator_ctor(bit_generator: Literal["PCG64DXSM"]) -> PCG64DXSM: ... +@overload +def __bit_generator_ctor(bit_generator: Literal["Philox"]) -> Philox: ... +@overload +def __bit_generator_ctor(bit_generator: Literal["SFC64"]) -> SFC64: ... +@overload +def __bit_generator_ctor[BitGeneratorT: BitGenerator](bit_generator: type[BitGeneratorT]) -> BitGeneratorT: ... +def __generator_ctor( + bit_generator_name: str | type[BitGenerator] | BitGenerator = "MT19937", + bit_generator_ctor: Callable[[str | type[BitGenerator]], BitGenerator] = ..., +) -> Generator: ... +def __randomstate_ctor( + bit_generator_name: str | type[BitGenerator] | BitGenerator = "MT19937", + bit_generator_ctor: Callable[[str | type[BitGenerator]], BitGenerator] = ..., +) -> RandomState: ... diff --git a/numpy/random/_sfc64.pyi b/numpy/random/_sfc64.pyi index 09ea41139789..5bddaf2b7676 100644 --- a/numpy/random/_sfc64.pyi +++ b/numpy/random/_sfc64.pyi @@ -1,12 +1,16 @@ -from typing import TypedDict +from typing import TypedDict, type_check_only from numpy import uint64 -from numpy.random.bit_generator import BitGenerator, SeedSequence from numpy._typing import NDArray, _ArrayLikeInt_co +from numpy.random.bit_generator import BitGenerator, SeedSequence + +__all__ = ["SFC64"] +@type_check_only class _SFC64Internal(TypedDict): state: NDArray[uint64] +@type_check_only class _SFC64State(TypedDict): bit_generator: str state: _SFC64Internal @@ -14,13 +18,8 @@ class _SFC64State(TypedDict): uinteger: int class SFC64(BitGenerator): - def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... - @property - def state( - self, - ) -> _SFC64State: ... + def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... + @property # type: ignore[override] + def state(self) -> _SFC64State: ... # pyrefly: ignore[bad-override] @state.setter - def state( - self, - value: _SFC64State, - ) -> None: ... + def state(self, value: _SFC64State) -> None: ... diff --git a/numpy/random/_sfc64.pyx b/numpy/random/_sfc64.pyx index 12b48059cef2..81a5fc3d21e5 100644 --- a/numpy/random/_sfc64.pyx +++ b/numpy/random/_sfc64.pyx @@ -34,8 +34,8 @@ cdef double sfc64_double(void* st) noexcept nogil: cdef class SFC64(BitGenerator): - """ - SFC64(seed=None) + # the first line is used to populate `__text_signature__` + """SFC64(seed=None)\n-- BitGenerator for Chris Doty-Humphrey's Small Fast Chaotic PRNG. @@ -135,8 +135,7 @@ cdef class SFC64(BitGenerator): raise TypeError('state must be a dict') bitgen = value.get('bit_generator', '') if bitgen != self.__class__.__name__: - raise ValueError('state must be for a {0} ' - 'RNG'.format(self.__class__.__name__)) + raise ValueError('state must be for a {self.__class__.__name__} RNG') state_vec = np.empty(4, dtype=np.uint64) state_vec[:] = value['state']['state'] has_uint32 = value['has_uint32'] diff --git a/numpy/random/bit_generator.pxd b/numpy/random/bit_generator.pxd index dfa7d0a71c08..dbaab4721fec 100644 --- a/numpy/random/bit_generator.pxd +++ b/numpy/random/bit_generator.pxd @@ -31,5 +31,5 @@ cdef class SeedSequence(): np.ndarray[np.npy_uint32, ndim=1] entropy_array) cdef get_assembled_entropy(self) -cdef class SeedlessSequence(): +cdef class SeedlessSeedSequence: pass diff --git a/numpy/random/bit_generator.pyi b/numpy/random/bit_generator.pyi index d99278e861ea..51ee8188e65f 100644 --- a/numpy/random/bit_generator.pyi +++ b/numpy/random/bit_generator.pyi @@ -1,124 +1,122 @@ import abc -from threading import Lock +from _typeshed import Incomplete from collections.abc import Callable, Mapping, Sequence +from threading import Lock from typing import ( Any, + ClassVar, + Literal, NamedTuple, + Self, TypedDict, - TypeVar, overload, - Literal, + type_check_only, ) +from typing_extensions import CapsuleType -from numpy import dtype, uint32, uint64 +import numpy as np from numpy._typing import ( NDArray, _ArrayLikeInt_co, + _DTypeLike, _ShapeLike, - _SupportsDType, _UInt32Codes, _UInt64Codes, ) -_T = TypeVar("_T") +__all__ = ["BitGenerator", "SeedSequence"] -_DTypeLikeUint32 = ( - dtype[uint32] - | _SupportsDType[dtype[uint32]] - | type[uint32] - | _UInt32Codes -) -_DTypeLikeUint64 = ( - dtype[uint64] - | _SupportsDType[dtype[uint64]] - | type[uint64] - | _UInt64Codes -) +### + +type _DTypeLikeUint_ = _DTypeLike[np.uint32 | np.uint64] | _UInt32Codes | _UInt64Codes +@type_check_only class _SeedSeqState(TypedDict): - entropy: None | int | Sequence[int] + entropy: int | Sequence[int] | None spawn_key: tuple[int, ...] pool_size: int n_children_spawned: int +@type_check_only class _Interface(NamedTuple): - state_address: Any - state: Any - next_uint64: Any - next_uint32: Any - next_double: Any - bit_generator: Any + state_address: Incomplete + state: Incomplete + next_uint64: Incomplete + next_uint32: Incomplete + next_double: Incomplete + bit_generator: Incomplete + +@type_check_only +class _CythonMixin: + def __setstate_cython__(self, pyx_state: object, /) -> None: ... + def __reduce_cython__(self) -> Any: ... + +@type_check_only +class _GenerateStateMixin(_CythonMixin): + def generate_state(self, /, n_words: int, dtype: _DTypeLikeUint_ = ...) -> NDArray[np.uint32 | np.uint64]: ... + +### class ISeedSequence(abc.ABC): @abc.abstractmethod - def generate_state( - self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ... - ) -> NDArray[uint32 | uint64]: ... + def generate_state(self, /, n_words: int, dtype: _DTypeLikeUint_ = ...) -> NDArray[np.uint32 | np.uint64]: ... -class ISpawnableSeedSequence(ISeedSequence): +class ISpawnableSeedSequence(ISeedSequence, abc.ABC): @abc.abstractmethod - def spawn(self: _T, n_children: int) -> list[_T]: ... + def spawn(self, /, n_children: int) -> list[Self]: ... -class SeedlessSeedSequence(ISpawnableSeedSequence): - def generate_state( - self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ... - ) -> NDArray[uint32 | uint64]: ... - def spawn(self: _T, n_children: int) -> list[_T]: ... +class SeedlessSeedSequence(_GenerateStateMixin, ISpawnableSeedSequence): + def spawn(self, /, n_children: int) -> list[Self]: ... -class SeedSequence(ISpawnableSeedSequence): - entropy: None | int | Sequence[int] +class SeedSequence(_GenerateStateMixin, ISpawnableSeedSequence): + __pyx_vtable__: ClassVar[CapsuleType] = ... + + entropy: int | Sequence[int] | None spawn_key: tuple[int, ...] pool_size: int n_children_spawned: int - pool: NDArray[uint32] + pool: NDArray[np.uint32] + def __init__( self, - entropy: None | int | Sequence[int] | _ArrayLikeInt_co = ..., + /, + entropy: _ArrayLikeInt_co | None = None, *, - spawn_key: Sequence[int] = ..., - pool_size: int = ..., + spawn_key: Sequence[int] = (), + pool_size: int = 4, n_children_spawned: int = ..., ) -> None: ... - def __repr__(self) -> str: ... + def spawn(self, /, n_children: int) -> list[Self]: ... @property - def state( - self, - ) -> _SeedSeqState: ... - def generate_state( - self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ... - ) -> NDArray[uint32 | uint64]: ... - def spawn(self, n_children: int) -> list[SeedSequence]: ... + def state(self) -> _SeedSeqState: ... -class BitGenerator(abc.ABC): +class BitGenerator(_CythonMixin, abc.ABC): lock: Lock - def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... - def __getstate__(self) -> tuple[dict[str, Any], ISeedSequence]: ... - def __setstate__( - self, state_seed_seq: dict[str, Any] | tuple[dict[str, Any], ISeedSequence] - ) -> None: ... - def __reduce__( - self, - ) -> tuple[ - Callable[[str], BitGenerator], - tuple[str], - tuple[dict[str, Any], ISeedSequence] - ]: ... - @abc.abstractmethod @property def state(self) -> Mapping[str, Any]: ... @state.setter - def state(self, value: Mapping[str, Any]) -> None: ... + def state(self, value: Mapping[str, Any], /) -> None: ... @property def seed_seq(self) -> ISeedSequence: ... - def spawn(self, n_children: int) -> list[BitGenerator]: ... - @overload - def random_raw(self, size: None = ..., output: Literal[True] = ...) -> int: ... # type: ignore[misc] - @overload - def random_raw(self, size: _ShapeLike = ..., output: Literal[True] = ...) -> NDArray[uint64]: ... # type: ignore[misc] - @overload - def random_raw(self, size: None | _ShapeLike = ..., output: Literal[False] = ...) -> None: ... # type: ignore[misc] - def _benchmark(self, cnt: int, method: str = ...) -> None: ... @property def ctypes(self) -> _Interface: ... @property def cffi(self) -> _Interface: ... + @property + def capsule(self) -> CapsuleType: ... + + # + def __init__(self, /, seed: _ArrayLikeInt_co | SeedSequence | None = None) -> None: ... + def __reduce__(self) -> tuple[Callable[[str], Self], tuple[str], tuple[Mapping[str, Any], ISeedSequence]]: ... + def spawn(self, /, n_children: int) -> list[Self]: ... + def _benchmark(self, /, cnt: int, method: str = "uint64") -> None: ... + + # + @overload + def random_raw(self, /, size: None = None, output: Literal[True] = True) -> int: ... + @overload + def random_raw(self, /, size: _ShapeLike, output: Literal[True] = True) -> NDArray[np.uint64]: ... + @overload + def random_raw(self, /, size: _ShapeLike | None, output: Literal[False]) -> None: ... + @overload + def random_raw(self, /, size: _ShapeLike | None = None, *, output: Literal[False]) -> None: ... diff --git a/numpy/random/bit_generator.pyx b/numpy/random/bit_generator.pyx index c999e6e32794..676f95e5ad70 100644 --- a/numpy/random/bit_generator.pyx +++ b/numpy/random/bit_generator.pyx @@ -34,12 +34,11 @@ SOFTWARE. """ import abc -import sys from itertools import cycle import re from secrets import randbits -from threading import Lock +from threading import RLock from cpython.pycapsule cimport PyCapsule_New @@ -227,8 +226,10 @@ class ISpawnableSeedSequence(ISeedSequence): """ -cdef class SeedlessSeedSequence(): - """ +cdef class SeedlessSeedSequence: + # the first line is used to populate `__text_signature__` + """SeedlessSeedSequence()\n-- + A seed sequence for BitGenerators with no need for seed state. See Also @@ -240,6 +241,8 @@ cdef class SeedlessSeedSequence(): raise NotImplementedError('seedless SeedSequences cannot generate state') def spawn(self, n_children): + if n_children < 0: + raise ValueError("n_children must be non-negative") return [self] * n_children @@ -248,9 +251,9 @@ cdef class SeedlessSeedSequence(): ISpawnableSeedSequence.register(SeedlessSeedSequence) -cdef class SeedSequence(): - """ - SeedSequence(entropy=None, *, spawn_key=(), pool_size=4) +cdef class SeedSequence: + # the first line is used to populate `__text_signature__` + """SeedSequence(entropy=None, *, spawn_key=(), pool_size=4, n_children_spawned=0)\n-- SeedSequence mixes sources of entropy in a reproducible way to set the initial state for independent and very probably non-overlapping @@ -305,7 +308,7 @@ cdef class SeedSequence(): elif not isinstance(entropy, (int, np.integer, list, tuple, range, np.ndarray)): raise TypeError('SeedSequence expects int or sequence of ints for ' - 'entropy not {}'.format(entropy)) + f'entropy not {entropy}') self.entropy = entropy self.spawn_key = tuple(spawn_key) self.pool_size = pool_size @@ -475,6 +478,9 @@ cdef class SeedSequence(): """ cdef uint32_t i + if n_children < 0: + raise ValueError("n_children must be non-negative") + seqs = [] for i in range(self.n_children_spawned, self.n_children_spawned + n_children): @@ -490,9 +496,9 @@ cdef class SeedSequence(): ISpawnableSeedSequence.register(SeedSequence) -cdef class BitGenerator(): - """ - BitGenerator(seed=None) +cdef class BitGenerator: + # the first line is used to populate `__text_signature__` + """BitGenerator(seed=None)\n-- Base Class for generic BitGenerators, which provide a stream of random bits based on different algorithms. Must be overridden. @@ -521,7 +527,7 @@ cdef class BitGenerator(): """ def __init__(self, seed=None): - self.lock = Lock() + self.lock = RLock() self._bitgen.state = 0 if type(self) is BitGenerator: raise NotImplementedError('BitGenerator is a base class and cannot be instantized') @@ -625,6 +631,8 @@ cdef class BitGenerator(): Equivalent method on the generator and seed sequence. """ + if n_children < 0: + raise ValueError("n_children must be non-negative") if not isinstance(self._seed_seq, ISpawnableSeedSequence): raise TypeError( "The underlying SeedSequence does not implement spawning.") @@ -708,3 +716,8 @@ cdef class BitGenerator(): if self._cffi is None: self._cffi = prepare_cffi(&self._bitgen) return self._cffi + +# NOTE: This has no implementation and should not be used. It purely exists for +# backwards compatibility, see https://github.com/scipy/scipy/issues/24215. +cdef class SeedlessSequence: + pass diff --git a/numpy/random/c_distributions.pxd b/numpy/random/c_distributions.pxd index b978d13503ea..da790ca499df 100644 --- a/numpy/random/c_distributions.pxd +++ b/numpy/random/c_distributions.pxd @@ -1,4 +1,3 @@ -#!python #cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3 from numpy cimport npy_intp diff --git a/numpy/random/meson.build b/numpy/random/meson.build index 1c90fb5866f2..16450278c846 100644 --- a/numpy/random/meson.build +++ b/numpy/random/meson.build @@ -52,6 +52,11 @@ if host_machine.system() == 'cygwin' c_args_random += ['-Wl,--export-all-symbols'] endif +cython_args = [] +if cy.version().version_compare('>=3.1.0') + cython_args += ['-Xfreethreading_compatible=True'] +endif + # name, sources, extra c_args, extra static libs to link random_pyx_sources = [ ['_bounded_integers', _bounded_integers_pyx, [], [npyrandom_lib, npymath_lib]], @@ -83,6 +88,7 @@ foreach gen: random_pyx_sources link_with: gen[3], install: true, subdir: 'numpy/random', + cython_args: cython_args, ) endforeach @@ -93,11 +99,14 @@ py.install_sources( '__init__.pxd', '__init__.py', '__init__.pyi', + '_bounded_integers.pyi', '_common.pxd', + '_common.pyi', '_generator.pyi', '_mt19937.pyi', '_pcg64.pyi', '_pickle.py', + '_pickle.pyi', '_philox.pyi', '_sfc64.pyi', 'bit_generator.pxd', diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index dbd3cd609495..066a56545f23 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -1,622 +1,940 @@ -import builtins +from builtins import bytes as py_bytes from collections.abc import Callable -from typing import Any, overload, Literal +from typing import Any, Literal, overload import numpy as np -from numpy import ( - dtype, - float32, - float64, - int8, - int16, - int32, - int64, - int_, - long, - uint8, - uint16, - uint32, - uint64, - uint, - ulong, -) -from numpy.random.bit_generator import BitGenerator from numpy._typing import ( ArrayLike, NDArray, _ArrayLikeFloat_co, _ArrayLikeInt_co, - _DoubleCodes, + _BoolCodes, + _DTypeLike, _DTypeLikeBool, _DTypeLikeInt, - _DTypeLikeUInt, - _Float32Codes, - _Float64Codes, _Int8Codes, _Int16Codes, _Int32Codes, _Int64Codes, - _IntCodes, - _LongCodes, + _IntPCodes, _ShapeLike, - _SingleCodes, - _SupportsDType, _UInt8Codes, _UInt16Codes, _UInt32Codes, _UInt64Codes, - _UIntCodes, - _ULongCodes, -) - -_DTypeLikeFloat32 = ( - dtype[float32] - | _SupportsDType[dtype[float32]] - | type[float32] - | _Float32Codes - | _SingleCodes + _UIntPCodes, ) +from numpy.random.bit_generator import BitGenerator -_DTypeLikeFloat64 = ( - dtype[float64] - | _SupportsDType[dtype[float64]] - | type[float] - | type[float64] - | _Float64Codes - | _DoubleCodes -) +__all__ = [ + "RandomState", + "beta", + "binomial", + "bytes", + "chisquare", + "choice", + "dirichlet", + "exponential", + "f", + "gamma", + "geometric", + "get_bit_generator", + "get_state", + "gumbel", + "hypergeometric", + "laplace", + "logistic", + "lognormal", + "logseries", + "multinomial", + "multivariate_normal", + "negative_binomial", + "noncentral_chisquare", + "noncentral_f", + "normal", + "pareto", + "permutation", + "poisson", + "power", + "rand", + "randint", + "randn", + "random", + "random_integers", + "random_sample", + "ranf", + "rayleigh", + "sample", + "seed", + "set_bit_generator", + "set_state", + "shuffle", + "standard_cauchy", + "standard_exponential", + "standard_gamma", + "standard_normal", + "standard_t", + "triangular", + "uniform", + "vonmises", + "wald", + "weibull", + "zipf", +] class RandomState: _bit_generator: BitGenerator - def __init__(self, seed: None | _ArrayLikeInt_co | BitGenerator = ...) -> None: ... + + def __init__(self, seed: _ArrayLikeInt_co | BitGenerator | None = None) -> None: ... def __repr__(self) -> str: ... def __str__(self) -> str: ... def __getstate__(self) -> dict[str, Any]: ... def __setstate__(self, state: dict[str, Any]) -> None: ... def __reduce__(self) -> tuple[Callable[[BitGenerator], RandomState], tuple[BitGenerator], dict[str, Any]]: ... - def seed(self, seed: None | _ArrayLikeFloat_co = ...) -> None: ... + + # + def seed(self, seed: _ArrayLikeFloat_co | None = None) -> None: ... + + # @overload - def get_state(self, legacy: Literal[False] = ...) -> dict[str, Any]: ... + def get_state(self, legacy: Literal[False] = False) -> dict[str, Any]: ... @overload - def get_state( - self, legacy: Literal[True] = ... - ) -> dict[str, Any] | tuple[str, NDArray[uint32], int, int, float]: ... - def set_state( - self, state: dict[str, Any] | tuple[str, NDArray[uint32], int, int, float] - ) -> None: ... + def get_state(self, legacy: Literal[True] = True) -> dict[str, Any] | tuple[str, NDArray[np.uint32], int, int, float]: ... + + # + def set_state(self, state: dict[str, Any] | tuple[str, NDArray[np.uint32], int, int, float]) -> None: ... + + # @overload - def random_sample(self, size: None = ...) -> float: ... # type: ignore[misc] + def random_sample(self, size: None = None) -> float: ... @overload - def random_sample(self, size: _ShapeLike) -> NDArray[float64]: ... + def random_sample(self, size: _ShapeLike) -> NDArray[np.float64]: ... + + # @overload - def random(self, size: None = ...) -> float: ... # type: ignore[misc] + def random(self, size: None = None) -> float: ... @overload - def random(self, size: _ShapeLike) -> NDArray[float64]: ... + def random(self, size: _ShapeLike) -> NDArray[np.float64]: ... + + # @overload - def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc] + def beta(self, a: float, b: float, size: None = None) -> float: ... @overload - def beta( - self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... + def beta(self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... @overload - def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def beta(self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... + + # @overload - def exponential( - self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... + def exponential(self, scale: float = 1.0, size: None = None) -> float: ... @overload - def standard_exponential(self, size: None = ...) -> float: ... # type: ignore[misc] + def exponential(self, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... @overload - def standard_exponential(self, size: _ShapeLike) -> NDArray[float64]: ... + def exponential(self, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> NDArray[np.float64]: ... @overload - def tomaxint(self, size: None = ...) -> int: ... # type: ignore[misc] + def exponential(self, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> NDArray[np.float64] | Any: ... + + # @overload - # Generates long values, but stores it in a 64bit int: - def tomaxint(self, size: _ShapeLike) -> NDArray[int64]: ... + def standard_exponential(self, size: None = None) -> float: ... @overload - def randint( # type: ignore[misc] - self, - low: int, - high: None | int = ..., - size: None = ..., - ) -> int: ... + def standard_exponential(self, size: _ShapeLike) -> NDArray[np.float64]: ... + + # @overload - def randint( # type: ignore[misc] - self, - low: int, - high: None | int = ..., - size: None = ..., - dtype: type[bool] = ..., - ) -> bool: ... + def tomaxint(self, size: None = None) -> int: ... + @overload # Generates long values, but stores it in a 64bit int: + def tomaxint(self, size: _ShapeLike) -> NDArray[np.int64]: ... + + # @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: type[np.bool] = ..., - ) -> np.bool: ... + high: int | None = None, + size: None = None, + *, + dtype: type[bool], + ) -> bool: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: type[int] = ..., + high: int | None = None, + size: None = None, + dtype: type[int] = int, ) -> int: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., - ) -> uint8: ... + high: int | None = None, + size: None = None, + *, + dtype: _DTypeLike[np.bool] | _BoolCodes, + ) -> np.bool: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., - ) -> uint16: ... + high: int | None = None, + size: None = None, + *, + dtype: _DTypeLike[np.int8] | _Int8Codes, + ) -> np.int8: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., - ) -> uint32: ... + high: int | None = None, + size: None = None, + *, + dtype: _DTypeLike[np.int16] | _Int16Codes, + ) -> np.int16: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., - ) -> uint: ... + high: int | None = None, + size: None = None, + *, + dtype: _DTypeLike[np.int32] | _Int32Codes, + ) -> np.int32: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., - ) -> ulong: ... + high: int | None = None, + size: None = None, + *, + dtype: _DTypeLike[np.int64] | _Int64Codes, + ) -> np.int64: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., - ) -> uint64: ... + high: int | None = None, + size: None = None, + *, + dtype: _DTypeLike[np.int_] | _IntPCodes, + ) -> np.int_: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., - ) -> int8: ... + high: int | None = None, + size: None = None, + *, + dtype: _DTypeLike[np.uint8] | _UInt8Codes, + ) -> np.uint8: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., - ) -> int16: ... + high: int | None = None, + size: None = None, + *, + dtype: _DTypeLike[np.uint16] | _UInt16Codes, + ) -> np.uint16: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., - ) -> int32: ... + high: int | None = None, + size: None = None, + *, + dtype: _DTypeLike[np.uint32] | _UInt32Codes, + ) -> np.uint32: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[int_] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., - ) -> int_: ... + high: int | None = None, + size: None = None, + *, + dtype: _DTypeLike[np.uint64] | _UInt64Codes, + ) -> np.uint64: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[long] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., - ) -> long: ... + high: int | None = None, + size: None = None, + *, + dtype: _DTypeLike[np.uintp] | _UIntPCodes, + ) -> np.uint: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., - ) -> int64: ... - @overload - def randint( # type: ignore[misc] - self, - low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - ) -> NDArray[long]: ... + high: int | None = None, + size: None = None, + *, + dtype: _DTypeLikeInt, + ) -> np.integer | Any: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: _DTypeLikeBool = ..., - ) -> NDArray[np.bool]: ... + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _DTypeLikeBool, + ) -> NDArray[np.bool] | Any: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., - ) -> NDArray[int8]: ... + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _DTypeLike[np.int8] | _Int8Codes, + ) -> NDArray[np.int8] | Any: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., - ) -> NDArray[int16]: ... + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _DTypeLike[np.int16] | _Int16Codes, + ) -> NDArray[np.int16] | Any: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., - ) -> NDArray[int32]: ... + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _DTypeLike[np.int32] | _Int32Codes, + ) -> NDArray[np.int32] | Any: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: None | dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., - ) -> NDArray[int64]: ... + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _DTypeLike[np.int64] | _Int64Codes, + ) -> NDArray[np.int64] | Any: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., - ) -> NDArray[uint8]: ... + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: type[int] | _DTypeLike[np.int_] | _IntPCodes = int, + ) -> NDArray[np.int_] | Any: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., - ) -> NDArray[uint16]: ... + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _DTypeLike[np.uint8] | _UInt8Codes, + ) -> NDArray[np.uint8] | Any: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., - ) -> NDArray[uint32]: ... + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _DTypeLike[np.uint16] | _UInt16Codes, + ) -> NDArray[np.uint16] | Any: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., - ) -> NDArray[uint64]: ... + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _DTypeLike[np.uint32] | _UInt32Codes, + ) -> NDArray[np.uint32] | Any: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[long] | type[int] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., - ) -> NDArray[long]: ... + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _DTypeLike[np.uint64] | _UInt64Codes, + ) -> NDArray[np.uint64] | Any: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., - ) -> NDArray[ulong]: ... - def bytes(self, length: int) -> builtins.bytes: ... + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _DTypeLikeInt, + ) -> NDArray[np.integer] | Any: ... + + # + def bytes(self, length: int) -> py_bytes: ... + + # @overload def choice( self, a: int, - size: None = ..., - replace: bool = ..., - p: None | _ArrayLikeFloat_co = ..., + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, ) -> int: ... @overload def choice( self, a: int, - size: _ShapeLike = ..., - replace: bool = ..., - p: None | _ArrayLikeFloat_co = ..., - ) -> NDArray[long]: ... + size: _ShapeLike, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + ) -> NDArray[np.long]: ... @overload def choice( self, a: ArrayLike, - size: None = ..., - replace: bool = ..., - p: None | _ArrayLikeFloat_co = ..., + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, ) -> Any: ... @overload def choice( self, a: ArrayLike, - size: _ShapeLike = ..., - replace: bool = ..., - p: None | _ArrayLikeFloat_co = ..., + size: _ShapeLike, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, ) -> NDArray[Any]: ... + + # + @overload + def uniform( + self, + low: float = 0.0, + high: float = 1.0, + size: None = None, + ) -> float: ... + @overload + def uniform( + self, + low: _ArrayLikeFloat_co, + high: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... @overload - def uniform(self, low: float = ..., high: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def uniform( + self, + low: _ArrayLikeFloat_co = 0.0, + high: _ArrayLikeFloat_co = 1.0, + *, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... @overload def uniform( self, - low: _ArrayLikeFloat_co = ..., - high: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., - ) -> NDArray[float64]: ... + low: _ArrayLikeFloat_co = 0.0, + high: _ArrayLikeFloat_co = 1.0, + size: None = None, + ) -> NDArray[np.float64] | Any: ... + + # + @overload + def rand(self, /) -> float: ... @overload - def rand(self) -> float: ... + def rand(self, arg0: int, /, *args: int) -> NDArray[np.float64]: ... + + # + @overload + def randn(self, /) -> float: ... @overload - def rand(self, *args: int) -> NDArray[float64]: ... + def randn(self, arg0: int, /, *args: int) -> NDArray[np.float64]: ... + + # @overload - def randn(self) -> float: ... + def random_integers( + self, + low: int, + high: int | None = None, + size: None = None, + ) -> int: ... @overload - def randn(self, *args: int) -> NDArray[float64]: ... + def random_integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None, + size: _ShapeLike, + ) -> NDArray[np.long]: ... @overload - def random_integers(self, low: int, high: None | int = ..., size: None = ...) -> int: ... # type: ignore[misc] + def random_integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + *, + size: _ShapeLike, + ) -> NDArray[np.long]: ... @overload def random_integers( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - ) -> NDArray[long]: ... + high: _ArrayLikeInt_co | None = None, + size: None = None, + ) -> NDArray[np.long] | Any: ... + + # @overload - def standard_normal(self, size: None = ...) -> float: ... # type: ignore[misc] + def standard_normal(self, size: None = None) -> float: ... @overload - def standard_normal( # type: ignore[misc] - self, size: _ShapeLike = ... - ) -> NDArray[float64]: ... + def standard_normal(self, size: _ShapeLike) -> NDArray[np.float64]: ... + + # @overload - def normal(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def normal( + self, + loc: float = 0.0, + scale: float = 1.0, + size: None = None, + ) -> float: ... @overload def normal( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., - ) -> NDArray[float64]: ... + loc: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... @overload - def standard_gamma( # type: ignore[misc] + def normal( + self, + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + *, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... + @overload + def normal( + self, + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: None = None, + ) -> NDArray[np.float64] | Any: ... + + # + @overload + def standard_gamma(self, shape: float, size: None = None) -> float: ... + @overload + def standard_gamma(self, shape: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... + @overload + def standard_gamma(self, shape: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... + + # + @overload + def gamma( self, shape: float, - size: None = ..., + scale: float = 1.0, + size: None = None, ) -> float: ... @overload - def standard_gamma( + def gamma( self, shape: _ArrayLikeFloat_co, - size: None | _ShapeLike = ..., - ) -> NDArray[float64]: ... + scale: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... @overload - def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def gamma( + self, + shape: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co = 1.0, + *, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... @overload def gamma( self, shape: _ArrayLikeFloat_co, - scale: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., - ) -> NDArray[float64]: ... + scale: _ArrayLikeFloat_co = 1.0, + size: None = None, + ) -> NDArray[np.float64] | Any: ... + + # + @overload + def f(self, dfnum: float, dfden: float, size: None = None) -> float: ... + @overload + def f(self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... @overload - def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc] + def f(self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... + + # @overload - def f( - self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... + def noncentral_f( + self, + dfnum: float, + dfden: float, + nonc: float, + size: None = None, + ) -> float: ... @overload - def noncentral_f(self, dfnum: float, dfden: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc] + def noncentral_f( + self, + dfnum: _ArrayLikeFloat_co, + dfden: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... @overload def noncentral_f( self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, - size: None | _ShapeLike = ..., - ) -> NDArray[float64]: ... + size: None = None, + ) -> NDArray[np.float64] | Any: ... + + # + @overload + def chisquare(self, df: float, size: None = None) -> float: ... + @overload + def chisquare(self, df: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... @overload - def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] + def chisquare(self, df: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... + + # @overload - def chisquare( - self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... + def noncentral_chisquare( + self, + df: float, + nonc: float, + size: None = None, + ) -> float: ... @overload - def noncentral_chisquare(self, df: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc] + def noncentral_chisquare( + self, + df: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... @overload def noncentral_chisquare( - self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... + self, + df: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: None = None, + ) -> NDArray[np.float64] | Any: ... + + # @overload - def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] + def standard_t(self, df: float, size: None = None) -> float: ... @overload - def standard_t( - self, df: _ArrayLikeFloat_co, size: None = ... - ) -> NDArray[float64]: ... + def standard_t(self, df: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... @overload - def standard_t( - self, df: _ArrayLikeFloat_co, size: _ShapeLike = ... - ) -> NDArray[float64]: ... + def standard_t(self, df: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... + + # @overload - def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc] + def vonmises(self, mu: float, kappa: float, size: None = None) -> float: ... @overload - def vonmises( - self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... + def vonmises(self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... @overload - def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + def vonmises(self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... + + # @overload - def pareto( - self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... + def pareto(self, a: float, size: None = None) -> float: ... @overload - def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + def pareto(self, a: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... @overload - def weibull( - self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... + def pareto(self, a: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... + + # @overload - def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + def weibull(self, a: float, size: None = None) -> float: ... @overload - def power( - self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... + def weibull(self, a: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... @overload - def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] + def weibull(self, a: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... + + # + @overload + def power(self, a: float, size: None = None) -> float: ... + @overload + def power(self, a: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... @overload - def standard_cauchy(self, size: _ShapeLike = ...) -> NDArray[float64]: ... + def power(self, a: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... + + # + @overload + def standard_cauchy(self, size: None = None) -> float: ... @overload - def laplace(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def standard_cauchy(self, size: _ShapeLike) -> NDArray[np.float64]: ... + + # @overload def laplace( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., - ) -> NDArray[float64]: ... + loc: float = 0.0, + scale: float = 1.0, + size: None = None, + ) -> float: ... + @overload + def laplace( + self, + loc: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... + @overload + def laplace( + self, + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + *, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... @overload - def gumbel(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def laplace( + self, + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: None = None, + ) -> NDArray[np.float64] | Any: ... + + # @overload def gumbel( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., - ) -> NDArray[float64]: ... + loc: float = 0.0, + scale: float = 1.0, + size: None = None, + ) -> float: ... @overload - def logistic(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def gumbel( + self, + loc: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... + @overload + def gumbel( + self, + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + *, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... + @overload + def gumbel( + self, + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: None = None, + ) -> NDArray[np.float64] | Any: ... + + # + @overload + def logistic( + self, + loc: float = 0.0, + scale: float = 1.0, + size: None = None, + ) -> float: ... @overload def logistic( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., - ) -> NDArray[float64]: ... + loc: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... + @overload + def logistic( + self, + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + *, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... + @overload + def logistic( + self, + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: None = None, + ) -> NDArray[np.float64] | Any: ... + + # + @overload + def lognormal( + self, + mean: float = 0.0, + sigma: float = 1.0, + size: None = None, + ) -> float: ... + @overload + def lognormal( + self, + mean: _ArrayLikeFloat_co, + sigma: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... @overload - def lognormal(self, mean: float = ..., sigma: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def lognormal( + self, + mean: _ArrayLikeFloat_co = 0.0, + sigma: _ArrayLikeFloat_co = 1.0, + *, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... @overload def lognormal( self, - mean: _ArrayLikeFloat_co = ..., - sigma: _ArrayLikeFloat_co = ..., - size: None | _ShapeLike = ..., - ) -> NDArray[float64]: ... + mean: _ArrayLikeFloat_co = 0.0, + sigma: _ArrayLikeFloat_co = 1.0, + size: None = None, + ) -> NDArray[np.float64] | Any: ... + + # @overload - def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def rayleigh(self, scale: float = 1.0, size: None = None) -> float: ... @overload - def rayleigh( - self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... + def rayleigh(self, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... @overload - def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc] + def rayleigh(self, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> NDArray[np.float64]: ... @overload - def wald( - self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... + def rayleigh(self, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> NDArray[np.float64] | Any: ... + + # @overload - def triangular(self, left: float, mode: float, right: float, size: None = ...) -> float: ... # type: ignore[misc] + def wald(self, mean: float, scale: float, size: None = None) -> float: ... + @overload + def wald(self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... + @overload + def wald(self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... + + # + @overload + def triangular( + self, + left: float, + mode: float, + right: float, + size: None = None, + ) -> float: ... + @overload + def triangular( + self, + left: _ArrayLikeFloat_co, + mode: _ArrayLikeFloat_co, + right: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... @overload def triangular( self, left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, - size: None | _ShapeLike = ..., - ) -> NDArray[float64]: ... + size: None = None, + ) -> NDArray[np.float64] | Any: ... + + # + @overload + def binomial(self, n: int, p: float, size: None = None) -> int: ... @overload - def binomial(self, n: int, p: float, size: None = ...) -> int: ... # type: ignore[misc] + def binomial(self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.long]: ... @overload - def binomial( - self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[long]: ... + def binomial(self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.long] | Any: ... + + # @overload - def negative_binomial(self, n: float, p: float, size: None = ...) -> int: ... # type: ignore[misc] + def negative_binomial(self, n: float, p: float, size: None = None) -> int: ... @overload - def negative_binomial( - self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[long]: ... + def negative_binomial(self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.long]: ... @overload - def poisson(self, lam: float = ..., size: None = ...) -> int: ... # type: ignore[misc] + def negative_binomial(self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.long] | Any: ... + + # @overload - def poisson( - self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... - ) -> NDArray[long]: ... + def poisson(self, lam: float = 1.0, size: None = None) -> int: ... @overload - def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc] + def poisson(self, lam: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.long]: ... @overload - def zipf( - self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[long]: ... + def poisson(self, lam: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> NDArray[np.long]: ... @overload - def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] + def poisson(self, lam: _ArrayLikeFloat_co = 1.0, size: None = None) -> NDArray[np.long] | Any: ... + + # @overload - def geometric( - self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[long]: ... + def zipf(self, a: float, size: None = None) -> int: ... @overload - def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc] + def zipf(self, a: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.long]: ... + @overload + def zipf(self, a: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.long] | Any: ... + + # + @overload + def geometric(self, p: float, size: None = None) -> int: ... + @overload + def geometric(self, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.long]: ... + @overload + def geometric(self, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.long] | Any: ... + + # + @overload + def hypergeometric( + self, + ngood: int, + nbad: int, + nsample: int, + size: None = None, + ) -> int: ... + @overload + def hypergeometric( + self, + ngood: _ArrayLikeInt_co, + nbad: _ArrayLikeInt_co, + nsample: _ArrayLikeInt_co, + size: _ShapeLike, + ) -> NDArray[np.long]: ... @overload def hypergeometric( self, ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, - size: None | _ShapeLike = ..., - ) -> NDArray[long]: ... + size: None = None, + ) -> NDArray[np.long] | Any: ... + + # + @overload + def logseries(self, p: float, size: None = None) -> int: ... @overload - def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] + def logseries(self, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.long]: ... @overload - def logseries( - self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[long]: ... + def logseries(self, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.long] | Any: ... + + # def multivariate_normal( self, mean: _ArrayLikeFloat_co, cov: _ArrayLikeFloat_co, - size: None | _ShapeLike = ..., - check_valid: Literal["warn", "raise", "ignore"] = ..., - tol: float = ..., - ) -> NDArray[float64]: ... - def multinomial( - self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[long]: ... - def dirichlet( - self, alpha: _ArrayLikeFloat_co, size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... + size: _ShapeLike | None = None, + check_valid: Literal["warn", "raise", "ignore"] = "warn", + tol: float = 1e-8, + ) -> NDArray[np.float64]: ... + + # + def multinomial(self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.long]: ... + + # + def dirichlet(self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... + + # def shuffle(self, x: ArrayLike) -> None: ... + + # @overload - def permutation(self, x: int) -> NDArray[long]: ... + def permutation(self, x: int) -> NDArray[np.long]: ... @overload def permutation(self, x: ArrayLike) -> NDArray[Any]: ... @@ -674,8 +992,5 @@ zipf = _rand.zipf sample = _rand.random_sample ranf = _rand.random_sample -def set_bit_generator(bitgen: BitGenerator) -> None: - ... - -def get_bit_generator() -> BitGenerator: - ... +def set_bit_generator(bitgen: BitGenerator) -> None: ... +def get_bit_generator() -> BitGenerator: ... diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index b42b0a7764b8..987d3edf159f 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -119,8 +119,8 @@ cdef object int64_to_long(object x): cdef class RandomState: - """ - RandomState(seed=None) + # the first line is used to populate `__text_signature__` + """RandomState(seed=None)\n-- Container for the slow Mersenne Twister pseudo-random number generator. Consider using a different BitGenerator with the Generator container @@ -190,7 +190,7 @@ cdef class RandomState: self._initialize_bit_generator(bit_generator) def __repr__(self): - return self.__str__() + ' at 0x{:X}'.format(id(self)) + return f'{self} at 0x{id(self):X}' def __str__(self): _str = self.__class__.__name__ @@ -222,12 +222,13 @@ cdef class RandomState: "be instantized.") self._bitgen = ( PyCapsule_GetPointer(capsule, name))[0] self._aug_state.bit_generator = &self._bitgen - self._reset_gauss() self.lock = bit_generator.lock + self._reset_gauss() cdef _reset_gauss(self): - self._aug_state.has_gauss = 0 - self._aug_state.gauss = 0.0 + with self.lock: + self._aug_state.has_gauss = 0 + self._aug_state.gauss = 0.0 def seed(self, seed=None): """ @@ -251,8 +252,9 @@ cdef class RandomState: """ if not isinstance(self._bit_generator, _MT19937): raise TypeError('can only re-seed a MT19937 BitGenerator') - self._bit_generator._legacy_seeding(seed) - self._reset_gauss() + with self.lock: + self._bit_generator._legacy_seeding(seed) + self._reset_gauss() def get_state(self, legacy=True): """ @@ -300,11 +302,12 @@ cdef class RandomState: 'MT19937 BitGenerator. To silence this warning, ' 'set `legacy` to False.', RuntimeWarning) legacy = False - st['has_gauss'] = self._aug_state.has_gauss - st['gauss'] = self._aug_state.gauss + with self.lock: + st['has_gauss'] = self._aug_state.has_gauss + st['gauss'] = self._aug_state.gauss if legacy and not isinstance(self._bit_generator, _MT19937): raise ValueError( - "legacy can only be True when the underlyign bitgenerator is " + "legacy can only be True when the underlying bitgenerator is " "an instance of MT19937." ) if legacy: @@ -380,11 +383,14 @@ cdef class RandomState: if len(state) > 3: st['has_gauss'] = state[3] st['gauss'] = state[4] - value = st - self._aug_state.gauss = st.get('gauss', 0.0) - self._aug_state.has_gauss = st.get('has_gauss', 0) - self._bit_generator.state = st + cdef double gauss = st.get('gauss', 0.0) + cdef int has_gauss = st.get('has_gauss', 0) + + with self.lock: + self._aug_state.gauss = gauss + self._aug_state.has_gauss = has_gauss + self._bit_generator.state = st def random_sample(self, size=None): """ @@ -437,7 +443,6 @@ cdef class RandomState: [-1.23204345, -1.75224494]]) """ - cdef double temp return double_fill(&random_standard_uniform_fill, &self._bitgen, size, self.lock, None) def random(self, size=None): @@ -544,16 +549,16 @@ cdef class RandomState: Examples -------- - A real world example: Assume a company has 10000 customer support + A real world example: Assume a company has 10000 customer support agents and the average time between customer calls is 4 minutes. >>> n = 10000 >>> time_between_calls = np.random.default_rng().exponential(scale=4, size=n) - What is the probability that a customer will call in the next - 4 to 5 minutes? - - >>> x = ((time_between_calls < 5).sum())/n + What is the probability that a customer will call in the next + 4 to 5 minutes? + + >>> x = ((time_between_calls < 5).sum())/n >>> y = ((time_between_calls < 4).sum())/n >>> x-y 0.08 # may vary @@ -718,8 +723,6 @@ cdef class RandomState: Desired dtype of the result. Byteorder must be native. The default value is long. - .. versionadded:: 1.11.0 - .. warning:: This function defaults to the C-long dtype, which is 32bit on windows and otherwise 64bit on 64bit platforms (and 32bit on 32bit ones). @@ -861,8 +864,6 @@ cdef class RandomState: Generates a random sample from a given 1-D array - .. versionadded:: 1.7.0 - .. note:: New code should use the `~numpy.random.Generator.choice` method of a `~numpy.random.Generator` instance instead; @@ -982,7 +983,7 @@ cdef class RandomState: atol = max(atol, np.sqrt(np.finfo(p.dtype).eps)) p = np.PyArray_FROM_OTF( - p, np.NPY_DOUBLE, np.NPY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS) + p, np.NPY_DOUBLE, np.NPY_ARRAY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS) pix = np.PyArray_DATA(p) if p.ndim != 1: @@ -1048,7 +1049,7 @@ cdef class RandomState: idx = found else: idx = self.permutation(pop_size)[:size] - idx.shape = shape + idx = idx.reshape(shape) if is_scalar and isinstance(idx, np.ndarray): # In most cases a scalar will have been made an array @@ -1093,9 +1094,9 @@ cdef class RandomState: greater than or equal to low. The default value is 0. high : float or array_like of floats Upper boundary of the output interval. All values generated will be - less than or equal to high. The high limit may be included in the - returned array of floats due to floating-point rounding in the - equation ``low + (high-low) * random_sample()``. The default value + less than or equal to high. The high limit may be included in the + returned array of floats due to floating-point rounding in the + equation ``low + (high-low) * random_sample()``. The default value is 1.0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then @@ -1138,7 +1139,7 @@ cdef class RandomState: >>> x = np.float32(5*0.99999999) >>> x - 5.0 + np.float32(5.0) Examples @@ -1163,13 +1164,12 @@ cdef class RandomState: >>> plt.show() """ - cdef bint is_scalar = True cdef np.ndarray alow, ahigh, arange cdef double _low, _high, range cdef object temp - alow = np.PyArray_FROM_OTF(low, np.NPY_DOUBLE, np.NPY_ALIGNED) - ahigh = np.PyArray_FROM_OTF(high, np.NPY_DOUBLE, np.NPY_ALIGNED) + alow = np.PyArray_FROM_OTF(low, np.NPY_DOUBLE, np.NPY_ARRAY_ALIGNED) + ahigh = np.PyArray_FROM_OTF(high, np.NPY_DOUBLE, np.NPY_ARRAY_ALIGNED) if np.PyArray_NDIM(alow) == np.PyArray_NDIM(ahigh) == 0: _low = PyFloat_AsDouble(low) @@ -1391,15 +1391,14 @@ cdef class RandomState: """ if high is None: warnings.warn(("This function is deprecated. Please call " - "randint(1, {low} + 1) instead".format(low=low)), + f"randint(1, {low} + 1) instead"), DeprecationWarning) high = low low = 1 else: warnings.warn(("This function is deprecated. Please call " - "randint({low}, {high} + 1) " - "instead".format(low=low, high=high)), + f"randint({low}, {high} + 1) instead"), DeprecationWarning) return self.randint(low, int(high) + 1, size=size, dtype='l') @@ -1551,13 +1550,13 @@ cdef class RandomState: >>> mu, sigma = 0, 0.1 # mean and standard deviation >>> s = np.random.normal(mu, sigma, 1000) - Verify the mean and the variance: + Verify the mean and the standard deviation: >>> abs(mu - np.mean(s)) 0.0 # may vary >>> abs(sigma - np.std(s, ddof=1)) - 0.1 # may vary + 0.0 # may vary Display the histogram of the samples, along with the probability density function: @@ -1864,9 +1863,6 @@ cdef class RandomState: ---------- dfnum : float or array_like of floats Numerator degrees of freedom, must be > 0. - - .. versionchanged:: 1.14.0 - Earlier NumPy versions required dfnum > 1. dfden : float or array_like of floats Denominator degrees of freedom, must be > 0. nonc : float or array_like of floats @@ -1976,7 +1972,7 @@ cdef class RandomState: The variable obtained by summing the squares of `df` independent, standard normally distributed random variables: - .. math:: Q = \\sum_{i=0}^{\\mathtt{df}} X^2_i + .. math:: Q = \\sum_{i=1}^{\\mathtt{df}} X^2_i is chi-square distributed, denoted @@ -2025,9 +2021,6 @@ cdef class RandomState: ---------- df : float or array_like of floats Degrees of freedom, must be > 0. - - .. versionchanged:: 1.10.0 - Earlier NumPy versions required dfnum > 1. nonc : float or array_like of floats Non-centrality, must be non-negative. size : int or tuple of ints, optional @@ -2240,14 +2233,14 @@ cdef class RandomState: Does their energy intake deviate systematically from the recommended value of 7725 kJ? Our null hypothesis will be the absence of deviation, and the alternate hypothesis will be the presence of an effect that could be - either positive or negative, hence making our test 2-tailed. + either positive or negative, hence making our test 2-tailed. Because we are estimating the mean and we have N=11 values in our sample, - we have N-1=10 degrees of freedom. We set our significance level to 95% and - compute the t statistic using the empirical mean and empirical standard - deviation of our intake. We use a ddof of 1 to base the computation of our + we have N-1=10 degrees of freedom. We set our significance level to 95% and + compute the t statistic using the empirical mean and empirical standard + deviation of our intake. We use a ddof of 1 to base the computation of our empirical standard deviation on an unbiased estimate of the variance (note: - the final estimate is not unbiased due to the concave nature of the square + the final estimate is not unbiased due to the concave nature of the square root). >>> np.mean(intake) @@ -2265,18 +2258,18 @@ cdef class RandomState: >>> s = np.random.standard_t(10, size=1000000) >>> h = plt.hist(s, bins=100, density=True) - Does our t statistic land in one of the two critical regions found at + Does our t statistic land in one of the two critical regions found at both tails of the distribution? >>> np.sum(np.abs(t) < np.abs(s)) / float(len(s)) 0.018318 #random < 0.05, statistic is in critical region - The probability value for this 2-tailed test is about 1.83%, which is - lower than the 5% pre-determined significance threshold. + The probability value for this 2-tailed test is about 1.83%, which is + lower than the 5% pre-determined significance threshold. Therefore, the probability of observing values as extreme as our intake - conditionally on the null hypothesis being true is too low, and we reject - the null hypothesis of no deviation. + conditionally on the null hypothesis being true is too low, and we reject + the null hypothesis of no deviation. """ return cont(&legacy_standard_t, &self._aug_state, size, self.lock, 1, @@ -2292,7 +2285,7 @@ cdef class RandomState: Draw samples from a von Mises distribution. Samples are drawn from a von Mises distribution with specified mode - (mu) and dispersion (kappa), on the interval [-pi, pi]. + (mu) and concentration (kappa), on the interval [-pi, pi]. The von Mises distribution (also known as the circular normal distribution) is a continuous probability distribution on the unit @@ -2309,7 +2302,7 @@ cdef class RandomState: mu : float or array_like of floats Mode ("center") of the distribution. kappa : float or array_like of floats - Dispersion of the distribution, has to be >=0. + Concentration of the distribution, has to be >=0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. If size is ``None`` (default), @@ -2333,7 +2326,7 @@ cdef class RandomState: .. math:: p(x) = \\frac{e^{\\kappa cos(x-\\mu)}}{2\\pi I_0(\\kappa)}, - where :math:`\\mu` is the mode and :math:`\\kappa` the dispersion, + where :math:`\\mu` is the mode and :math:`\\kappa` the concentration, and :math:`I_0(\\kappa)` is the modified Bessel function of order 0. The von Mises is named for Richard Edler von Mises, who was born in @@ -2354,7 +2347,7 @@ cdef class RandomState: -------- Draw samples from the distribution: - >>> mu, kappa = 0.0, 4.0 # mean and dispersion + >>> mu, kappa = 0.0, 4.0 # mean and concentration >>> s = np.random.vonmises(mu, kappa, 1000) Display the histogram of the samples, along with @@ -2428,11 +2421,14 @@ cdef class RandomState: Notes ----- - The probability density for the Pareto distribution is + The probability density for the Pareto II distribution is + + .. math:: p(x) = \\frac{a}{(x+1)^{a+1}} , x \ge 0 - .. math:: p(x) = \\frac{am^a}{x^{a+1}} + where :math:`a > 0` is the shape. - where :math:`a` is the shape and :math:`m` the scale. + The Pareto II distribution is a shifted and scaled version of the + Pareto I distribution, which can be found in `scipy.stats.pareto`. The Pareto distribution, named after the Italian economist Vilfredo Pareto, is a power law probability distribution @@ -3336,13 +3332,12 @@ cdef class RandomState: >>> plt.show() """ - cdef bint is_scalar = True cdef double fleft, fmode, fright cdef np.ndarray oleft, omode, oright - oleft = np.PyArray_FROM_OTF(left, np.NPY_DOUBLE, np.NPY_ALIGNED) - omode = np.PyArray_FROM_OTF(mode, np.NPY_DOUBLE, np.NPY_ALIGNED) - oright = np.PyArray_FROM_OTF(right, np.NPY_DOUBLE, np.NPY_ALIGNED) + oleft = np.PyArray_FROM_OTF(left, np.NPY_DOUBLE, np.NPY_ARRAY_ALIGNED) + omode = np.PyArray_FROM_OTF(mode, np.NPY_DOUBLE, np.NPY_ARRAY_ALIGNED) + oright = np.PyArray_FROM_OTF(right, np.NPY_DOUBLE, np.NPY_ARRAY_ALIGNED) if np.PyArray_NDIM(oleft) == np.PyArray_NDIM(omode) == np.PyArray_NDIM(oright) == 0: fleft = PyFloat_AsDouble(left) @@ -3416,7 +3411,7 @@ cdef class RandomState: Notes ----- - The probability density for the binomial distribution is + The probability mass function (PMF) for the binomial distribution is .. math:: P(N) = \\binom{n}{N}p^N(1-p)^{n-N}, @@ -3474,9 +3469,9 @@ cdef class RandomState: cdef long *randoms_data cdef np.broadcast it - p_arr = np.PyArray_FROM_OTF(p, np.NPY_DOUBLE, np.NPY_ALIGNED) + p_arr = np.PyArray_FROM_OTF(p, np.NPY_DOUBLE, np.NPY_ARRAY_ALIGNED) is_scalar = is_scalar and np.PyArray_NDIM(p_arr) == 0 - n_arr = np.PyArray_FROM_OTF(n, np.NPY_INTP, np.NPY_ALIGNED) + n_arr = np.PyArray_FROM_OTF(n, np.NPY_INTP, np.NPY_ARRAY_ALIGNED) is_scalar = is_scalar and np.PyArray_NDIM(n_arr) == 0 if not is_scalar: @@ -3656,7 +3651,7 @@ cdef class RandomState: Notes ----- - The Poisson distribution + The probability mass function (PMF) of Poisson distribution is .. math:: f(k; \\lambda)=\\frac{\\lambda^k e^{-\\lambda}}{k!} @@ -3744,7 +3739,7 @@ cdef class RandomState: Notes ----- - The probability density for the Zipf distribution is + The probability mass function (PMF) for the Zipf distribution is .. math:: p(k) = \\frac{k^{-a}}{\\zeta(a)}, @@ -3908,7 +3903,7 @@ cdef class RandomState: Notes ----- - The probability density for the Hypergeometric distribution is + The probability mass function (PMF) for the Hypergeometric distribution is .. math:: P(x) = \\frac{\\binom{g}{x}\\binom{b}{n-x}}{\\binom{g+b}{n}}, @@ -3958,14 +3953,13 @@ cdef class RandomState: # answer = 0.003 ... pretty unlikely! """ - cdef bint is_scalar = True cdef np.ndarray ongood, onbad, onsample cdef int64_t lngood, lnbad, lnsample # This legacy function supports "long" values only (checked below). - ongood = np.PyArray_FROM_OTF(ngood, np.NPY_INT64, np.NPY_ALIGNED) - onbad = np.PyArray_FROM_OTF(nbad, np.NPY_INT64, np.NPY_ALIGNED) - onsample = np.PyArray_FROM_OTF(nsample, np.NPY_INT64, np.NPY_ALIGNED) + ongood = np.PyArray_FROM_OTF(ngood, np.NPY_INT64, np.NPY_ARRAY_ALIGNED) + onbad = np.PyArray_FROM_OTF(nbad, np.NPY_INT64, np.NPY_ARRAY_ALIGNED) + onsample = np.PyArray_FROM_OTF(nsample, np.NPY_INT64, np.NPY_ARRAY_ALIGNED) if np.PyArray_NDIM(ongood) == np.PyArray_NDIM(onbad) == np.PyArray_NDIM(onsample) == 0: lngood = ngood @@ -4158,7 +4152,8 @@ cdef class RandomState: >>> mean = [0, 0] >>> cov = [[1, 0], [0, 100]] # diagonal covariance - Diagonal covariance means that points are oriented along x or y-axis: + Diagonal covariance means that the variables are independent, and the + probability density contours have their axes aligned with the coordinate axes: >>> import matplotlib.pyplot as plt >>> x, y = np.random.multivariate_normal(mean, cov, 5000).T @@ -4258,7 +4253,7 @@ cdef class RandomState: # GH10839, ensure double to make tol meaningful cov = cov.astype(np.double) - (u, s, v) = svd(cov) + (_u, s, v) = svd(cov) if check_valid != 'ignore': if check_valid != 'warn' and check_valid != 'raise': @@ -4276,8 +4271,7 @@ cdef class RandomState: x = np.dot(x, np.sqrt(s)[:, None] * v) x += mean - x.shape = tuple(final_shape) - return x + return x.reshape(tuple(final_shape)) def multinomial(self, long n, object pvals, size=None): """ @@ -4912,6 +4906,7 @@ def ranf(*args, **kwargs): return _rand.random_sample(*args, **kwargs) __all__ = [ + 'RandomState', 'beta', 'binomial', 'bytes', @@ -4964,5 +4959,18 @@ __all__ = [ 'wald', 'weibull', 'zipf', - 'RandomState', ] + +seed.__module__ = "numpy.random" +ranf.__module__ = "numpy.random" +sample.__module__ = "numpy.random" +get_bit_generator.__module__ = "numpy.random" +set_bit_generator.__module__ = "numpy.random" + +# The first item in __all__ is 'RandomState', so it can be skipped here. +for method_name in __all__[1:]: + method = getattr(RandomState, method_name, None) + if method is not None: + method.__module__ = "numpy.random" + +del method, method_name diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index 1241329151a9..79cacb2df4a4 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -436,16 +436,23 @@ double random_beta(bitgen_t *bitgen_state, double a, double b) { XpY = X + Y; /* Reject if both U and V are 0.0, which is approx 1 in 10^106 */ if ((XpY <= 1.0) && (U + V > 0.0)) { - if (XpY > 0) { + if ((X > 0) && (Y > 0)) { return X / XpY; } else { - double logX = log(U) / a; - double logY = log(V) / b; - double logM = logX > logY ? logX : logY; - logX -= logM; - logY -= logM; - - return exp(logX - log(exp(logX) + exp(logY))); + /* + * Either X or Y underflowed to 0, so we lost information in + * U**(1/a) or V**(1/b). We still compute X/(X+Y) here, but we + * work with logarithms as much as we can to avoid the underflow. + */ + double logX = log(U)/a; + double logY = log(V)/b; + double delta = logX - logY; + if (delta > 0) { + return exp(-log1p(exp(-delta))); + } + else { + return exp(delta - log1p(exp(delta))); + } } } } @@ -461,12 +468,15 @@ double random_chisquare(bitgen_t *bitgen_state, double df) { } double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) { - return ((random_chisquare(bitgen_state, dfnum) * dfden) / - (random_chisquare(bitgen_state, dfden) * dfnum)); + double subexpr1 = random_chisquare(bitgen_state, dfnum) * dfden; + double subexpr2 = random_chisquare(bitgen_state, dfden) * dfnum; + return subexpr1 / subexpr2; } double random_standard_cauchy(bitgen_t *bitgen_state) { - return random_standard_normal(bitgen_state) / random_standard_normal(bitgen_state); + double subexpr1 = random_standard_normal(bitgen_state); + double subexpr2 = random_standard_normal(bitgen_state); + return subexpr1 / subexpr2; } double random_pareto(bitgen_t *bitgen_state, double a) { @@ -587,7 +597,7 @@ static RAND_INT_TYPE random_poisson_ptrs(bitgen_t *bitgen_state, double lam) { /* log(V) == log(0.0) ok here */ /* if U==0.0 so that us==0.0, log is ok since always returns */ if ((log(V) + log(invalpha) - log(a / (us * us) + b)) <= - (-lam + k * loglam - random_loggam(k + 1))) { + (-lam + (double)k * loglam - random_loggam((double)k + 1))) { return k; } } @@ -723,10 +733,10 @@ RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state, RAND_INT_TYPE n, if (A > (t + rho)) goto Step10; - x1 = y + 1; - f1 = m + 1; - z = n + 1 - m; - w = n - y + 1; + x1 = (double)y + 1; + f1 = (double)m + 1; + z = (double)n + 1 - (double)m; + w = (double)n - (double)y + 1; x2 = x1 * x1; f2 = f1 * f1; z2 = z * z; @@ -763,7 +773,7 @@ RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state, RAND_INT_TYPE n, binomial->psave = p; binomial->has_binomial = 1; binomial->q = q = 1.0 - p; - binomial->r = qn = exp(n * log(q)); + binomial->r = qn = exp(n * log1p(-p)); binomial->c = np = n * p; binomial->m = bound = (RAND_INT_TYPE)MIN(n, np + 10.0 * sqrt(np * q + 1)); } else { @@ -838,12 +848,12 @@ double random_noncentral_f(bitgen_t *bitgen_state, double dfnum, double dfden, double random_wald(bitgen_t *bitgen_state, double mean, double scale) { double U, X, Y; - double mu_2l; + double d; - mu_2l = mean / (2 * scale); Y = random_standard_normal(bitgen_state); Y = mean * Y * Y; - X = mean + mu_2l * (Y - sqrt(4 * scale * Y + Y * Y)); + d = 1 + sqrt(1 + 4 * scale / Y); + X = mean * (1 - 2 / d); U = next_double(bitgen_state); if (U <= mean / (mean + X)) { return X; @@ -998,14 +1008,34 @@ int64_t random_geometric(bitgen_t *bitgen_state, double p) { } RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a) { - double am1, b; + double am1, b, Umin; + if (a >= 1025) { + /* + * If a exceeds 1025, the calculation of b will overflow and the loop + * will not terminate. It is safe to simply return 1 here, because the + * probability of generating a value greater than 1 in this case is + * less than 3e-309. + */ + return (RAND_INT_TYPE) 1; + } am1 = a - 1.0; b = pow(2.0, am1); + /* + * In the while loop, X is generated from the uniform distribution (Umin, 1]. + * Values below Umin would result in X being rejected because it is too + * large, so there is no point in including them in the distribution of U. + */ + Umin = pow((double) RAND_INT_MAX, -am1); while (1) { - double T, U, V, X; + double U01, T, U, V, X; - U = 1.0 - next_double(bitgen_state); + /* + * U is sampled from (Umin, 1]. Note that Umin might be 0, and we don't + * want U to be 0. + */ + U01 = next_double(bitgen_state); + U = U01*Umin + (1 - U01); V = next_double(bitgen_state); X = floor(pow(U, -1.0 / am1)); /* diff --git a/numpy/random/src/distributions/logfactorial.c b/numpy/random/src/distributions/logfactorial.c index 1305164699fa..337ec1a98db5 100644 --- a/numpy/random/src/distributions/logfactorial.c +++ b/numpy/random/src/distributions/logfactorial.c @@ -154,5 +154,5 @@ double logfactorial(int64_t k) * was within 2 ULP of the best 64 bit floating point value for * k up to 10000000.) */ - return (k + 0.5)*log(k) - k + (halfln2pi + (1.0/k)*(1/12.0 - 1/(360.0*k*k))); + return (k + 0.5)*log((double)k) - k + (halfln2pi + (1.0/k)*(1/12.0 - 1/(360.0*k*k))); } diff --git a/numpy/random/src/legacy/legacy-distributions.c b/numpy/random/src/legacy/legacy-distributions.c index b518b8a03994..e84bd19fdaee 100644 --- a/numpy/random/src/legacy/legacy-distributions.c +++ b/numpy/random/src/legacy/legacy-distributions.c @@ -228,6 +228,44 @@ double legacy_exponential(aug_bitgen_t *aug_state, double scale) { return scale * legacy_standard_exponential(aug_state); } +static RAND_INT_TYPE legacy_random_binomial_inversion( + bitgen_t *bitgen_state, RAND_INT_TYPE n, double p, binomial_t *binomial +) +{ + double q, qn, np, px, U; + RAND_INT_TYPE X, bound; + + if (!(binomial->has_binomial) || (binomial->nsave != n) || + (binomial->psave != p)) { + binomial->nsave = n; + binomial->psave = p; + binomial->has_binomial = 1; + binomial->q = q = 1.0 - p; + binomial->r = qn = exp(n * log(q)); + binomial->c = np = n * p; + binomial->m = bound = (RAND_INT_TYPE)MIN(n, np + 10.0 * sqrt(np * q + 1)); + } else { + q = binomial->q; + qn = binomial->r; + np = binomial->c; + bound = binomial->m; + } + X = 0; + px = qn; + U = next_double(bitgen_state); + while (U > px) { + X++; + if (X > bound) { + X = 0; + px = qn; + U = next_double(bitgen_state); + } else { + U -= px; + px = ((n - X + 1) * p * px) / (X * q); + } + } + return X; +} static RAND_INT_TYPE legacy_random_binomial_original(bitgen_t *bitgen_state, double p, @@ -237,14 +275,14 @@ static RAND_INT_TYPE legacy_random_binomial_original(bitgen_t *bitgen_state, if (p <= 0.5) { if (p * n <= 30.0) { - return random_binomial_inversion(bitgen_state, n, p, binomial); + return legacy_random_binomial_inversion(bitgen_state, n, p, binomial); } else { return random_binomial_btpe(bitgen_state, n, p, binomial); } } else { q = 1.0 - p; if (q * n <= 30.0) { - return n - random_binomial_inversion(bitgen_state, n, q, binomial); + return n - legacy_random_binomial_inversion(bitgen_state, n, q, binomial); } else { return n - random_binomial_btpe(bitgen_state, n, q, binomial); } @@ -388,7 +426,31 @@ int64_t legacy_random_poisson(bitgen_t *bitgen_state, double lam) { } int64_t legacy_random_zipf(bitgen_t *bitgen_state, double a) { - return (int64_t)random_zipf(bitgen_state, a); + double am1, b; + + am1 = a - 1.0; + b = pow(2.0, am1); + while (1) { + double T, U, V, X; + + U = 1.0 - next_double(bitgen_state); + V = next_double(bitgen_state); + X = floor(pow(U, -1.0 / am1)); + /* + * The real result may be above what can be represented in a signed + * long. Since this is a straightforward rejection algorithm, we can + * just reject this value. This function then models a Zipf + * distribution truncated to sys.maxint. + */ + if (X > (double)RAND_INT_MAX || X < 1.0) { + continue; + } + + T = pow(1.0 + 1.0 / X, am1); + if (V * X * (T - 1.0) / (b - 1.0) <= T / b) { + return (RAND_INT_TYPE)X; + } + } } @@ -407,7 +469,7 @@ int64_t legacy_random_geometric(bitgen_t *bitgen_state, double p) { void legacy_random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, RAND_INT_TYPE *mnix, double *pix, npy_intp d, binomial_t *binomial) { - return random_multinomial(bitgen_state, n, mnix, pix, d, binomial); + random_multinomial(bitgen_state, n, mnix, pix, d, binomial); } double legacy_vonmises(bitgen_t *bitgen_state, double mu, double kappa) { diff --git a/numpy/random/src/mt19937/mt19937-jump.c b/numpy/random/src/mt19937/mt19937-jump.c index 1a83a4c2e23b..14ca818ad218 100644 --- a/numpy/random/src/mt19937/mt19937-jump.c +++ b/numpy/random/src/mt19937/mt19937-jump.c @@ -13,7 +13,7 @@ unsigned long get_coef(unsigned long *pf, unsigned int deg) { void copy_state(mt19937_state *target_state, mt19937_state *state) { int i; - for (i = 0; i < N; i++) + for (i = 0; i < _MT19937_N; i++) target_state->key[i] = state->key[i]; target_state->pos = state->pos; @@ -26,17 +26,17 @@ void gen_next(mt19937_state *state) { static unsigned long mag02[2] = {0x0ul, MATRIX_A}; num = state->pos; - if (num < N - M) { + if (num < _MT19937_N - _MT19937_M) { y = (state->key[num] & UPPER_MASK) | (state->key[num + 1] & LOWER_MASK); - state->key[num] = state->key[num + M] ^ (y >> 1) ^ mag02[y % 2]; + state->key[num] = state->key[num + _MT19937_M] ^ (y >> 1) ^ mag02[y % 2]; state->pos++; - } else if (num < N - 1) { + } else if (num < _MT19937_N - 1) { y = (state->key[num] & UPPER_MASK) | (state->key[num + 1] & LOWER_MASK); - state->key[num] = state->key[num + (M - N)] ^ (y >> 1) ^ mag02[y % 2]; + state->key[num] = state->key[num + (_MT19937_M - _MT19937_N)] ^ (y >> 1) ^ mag02[y % 2]; state->pos++; - } else if (num == N - 1) { - y = (state->key[N - 1] & UPPER_MASK) | (state->key[0] & LOWER_MASK); - state->key[N - 1] = state->key[M - 1] ^ (y >> 1) ^ mag02[y % 2]; + } else if (num == _MT19937_N - 1) { + y = (state->key[_MT19937_N - 1] & UPPER_MASK) | (state->key[0] & LOWER_MASK); + state->key[_MT19937_N - 1] = state->key[_MT19937_M - 1] ^ (y >> 1) ^ mag02[y % 2]; state->pos = 0; } } @@ -45,19 +45,19 @@ void add_state(mt19937_state *state1, mt19937_state *state2) { int i, pt1 = state1->pos, pt2 = state2->pos; if (pt2 - pt1 >= 0) { - for (i = 0; i < N - pt2; i++) + for (i = 0; i < _MT19937_N - pt2; i++) state1->key[i + pt1] ^= state2->key[i + pt2]; - for (; i < N - pt1; i++) - state1->key[i + pt1] ^= state2->key[i + (pt2 - N)]; - for (; i < N; i++) - state1->key[i + (pt1 - N)] ^= state2->key[i + (pt2 - N)]; + for (; i < _MT19937_N - pt1; i++) + state1->key[i + pt1] ^= state2->key[i + (pt2 - _MT19937_N)]; + for (; i < _MT19937_N; i++) + state1->key[i + (pt1 - _MT19937_N)] ^= state2->key[i + (pt2 - _MT19937_N)]; } else { - for (i = 0; i < N - pt1; i++) + for (i = 0; i < _MT19937_N - pt1; i++) state1->key[i + pt1] ^= state2->key[i + pt2]; - for (; i < N - pt2; i++) - state1->key[i + (pt1 - N)] ^= state2->key[i + pt2]; - for (; i < N; i++) - state1->key[i + (pt1 - N)] ^= state2->key[i + (pt2 - N)]; + for (; i < _MT19937_N - pt2; i++) + state1->key[i + (pt1 - _MT19937_N)] ^= state2->key[i + pt2]; + for (; i < _MT19937_N; i++) + state1->key[i + (pt1 - _MT19937_N)] ^= state2->key[i + (pt2 - _MT19937_N)]; } } @@ -104,7 +104,7 @@ void mt19937_jump_state(mt19937_state *state) { pf[i] = poly_coef[i]; } - if (state->pos >= N) { + if (state->pos >= _MT19937_N) { state->pos = 0; } diff --git a/numpy/random/src/mt19937/mt19937.c b/numpy/random/src/mt19937/mt19937.c index bec518af8059..d52442858dbe 100644 --- a/numpy/random/src/mt19937/mt19937.c +++ b/numpy/random/src/mt19937/mt19937.c @@ -83,16 +83,16 @@ void mt19937_gen(mt19937_state *state) { uint32_t y; int i; - for (i = 0; i < N - M; i++) { + for (i = 0; i < _MT19937_N - _MT19937_M; i++) { y = (state->key[i] & UPPER_MASK) | (state->key[i + 1] & LOWER_MASK); - state->key[i] = state->key[i + M] ^ (y >> 1) ^ (-(y & 1) & MATRIX_A); + state->key[i] = state->key[i + _MT19937_M] ^ (y >> 1) ^ (-(y & 1) & MATRIX_A); } - for (; i < N - 1; i++) { + for (; i < _MT19937_N - 1; i++) { y = (state->key[i] & UPPER_MASK) | (state->key[i + 1] & LOWER_MASK); - state->key[i] = state->key[i + (M - N)] ^ (y >> 1) ^ (-(y & 1) & MATRIX_A); + state->key[i] = state->key[i + (_MT19937_M - _MT19937_N)] ^ (y >> 1) ^ (-(y & 1) & MATRIX_A); } - y = (state->key[N - 1] & UPPER_MASK) | (state->key[0] & LOWER_MASK); - state->key[N - 1] = state->key[M - 1] ^ (y >> 1) ^ (-(y & 1) & MATRIX_A); + y = (state->key[_MT19937_N - 1] & UPPER_MASK) | (state->key[0] & LOWER_MASK); + state->key[_MT19937_N - 1] = state->key[_MT19937_M - 1] ^ (y >> 1) ^ (-(y & 1) & MATRIX_A); state->pos = 0; } diff --git a/numpy/random/src/mt19937/mt19937.h b/numpy/random/src/mt19937/mt19937.h index 83129336a953..d84dc57fb301 100644 --- a/numpy/random/src/mt19937/mt19937.h +++ b/numpy/random/src/mt19937/mt19937.h @@ -8,8 +8,8 @@ #define RK_STATE_LEN 624 -#define N 624 -#define M 397 +#define _MT19937_N 624 +#define _MT19937_M 397 #define MATRIX_A 0x9908b0dfUL #define UPPER_MASK 0x80000000UL #define LOWER_MASK 0x7fffffffUL diff --git a/numpy/random/src/mt19937/randomkit.c b/numpy/random/src/mt19937/randomkit.c index e718c2d06cc8..21d270234c9a 100644 --- a/numpy/random/src/mt19937/randomkit.c +++ b/numpy/random/src/mt19937/randomkit.c @@ -62,6 +62,8 @@ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +#include + /* static char const rcsid[] = "@(#) $Jeannot: randomkit.c,v 1.28 2005/07/21 22:14:09 js Exp $"; */ @@ -135,7 +137,7 @@ #define RK_DEV_RANDOM "/dev/random" #endif -char *rk_strerror[RK_ERR_MAX] = {"no error", "random device unvavailable"}; +char *rk_strerror[RK_ERR_MAX] = {"no error", "random device unavailable"}; /* static functions */ static unsigned long rk_hash(unsigned long key); diff --git a/numpy/random/tests/test_direct.py b/numpy/random/tests/test_direct.py index 12c2f1d5ab57..bdd2ee7d633e 100644 --- a/numpy/random/tests/test_direct.py +++ b/numpy/random/tests/test_direct.py @@ -1,17 +1,28 @@ import os -from os.path import join import sys +from os.path import join -import numpy as np -from numpy.testing import (assert_equal, assert_allclose, assert_array_equal, - assert_raises) import pytest +import numpy as np from numpy.random import ( - Generator, MT19937, PCG64, PCG64DXSM, Philox, RandomState, SeedSequence, - SFC64, default_rng + MT19937, + PCG64, + PCG64DXSM, + SFC64, + Generator, + Philox, + RandomState, + SeedSequence, + default_rng, ) from numpy.random._common import interface +from numpy.testing import ( + assert_allclose, + assert_array_equal, + assert_equal, + assert_raises, +) try: import cffi # noqa: F401 @@ -130,9 +141,11 @@ def gauss_from_uint(x, n, bits): def test_seedsequence(): - from numpy.random.bit_generator import (ISeedSequence, - ISpawnableSeedSequence, - SeedlessSeedSequence) + from numpy.random.bit_generator import ( + ISeedSequence, + ISpawnableSeedSequence, + SeedlessSeedSequence, + ) s1 = SeedSequence(range(10), spawn_key=(1, 2), pool_size=6) s1.spawn(10) @@ -170,6 +183,31 @@ def test_generator_spawning(): assert new_rngs[0].uniform() != new_rngs[1].uniform() +def test_spawn_negative_n_children(): + """Test that spawn raises ValueError for negative n_children.""" + from numpy.random.bit_generator import SeedlessSeedSequence + + rng = np.random.default_rng(42) + seq = rng.bit_generator.seed_seq + + # Test SeedSequence.spawn + with pytest.raises(ValueError, match="n_children must be non-negative"): + seq.spawn(-1) + + # Test SeedlessSeedSequence.spawn + seedless = SeedlessSeedSequence() + with pytest.raises(ValueError, match="n_children must be non-negative"): + seedless.spawn(-1) + + # Test BitGenerator.spawn + with pytest.raises(ValueError, match="n_children must be non-negative"): + rng.bit_generator.spawn(-1) + + # Test Generator.spawn + with pytest.raises(ValueError, match="n_children must be non-negative"): + rng.spawn(-1) + + def test_non_spawnable(): from numpy.random.bit_generator import ISeedSequence @@ -432,7 +470,6 @@ def test_advange_large(self): assert state["state"] == advanced_state - class TestPCG64DXSM(Base): @classmethod def setup_class(cls): @@ -538,7 +575,7 @@ def test_legacy_pickle(self): ) base_path = os.path.split(os.path.abspath(__file__))[0] - pkl_file = os.path.join(base_path, "data", f"sfc64_np126.pkl.gz") + pkl_file = os.path.join(base_path, "data", "sfc64_np126.pkl.gz") with gzip.open(pkl_file) as gz: sfc = pickle.load(gz) @@ -559,3 +596,25 @@ def test_passthrough(self): rg2 = default_rng(rg) assert rg2 is rg assert rg2.bit_generator is bg + + @pytest.mark.thread_unsafe( + reason="np.random.set_bit_generator affects global state" + ) + def test_coercion_RandomState_Generator(self): + # use default_rng to coerce RandomState to Generator + rs = RandomState(1234) + rg = default_rng(rs) + assert isinstance(rg.bit_generator, MT19937) + assert rg.bit_generator is rs._bit_generator + + # RandomState with a non MT19937 bit generator + _original = np.random.get_bit_generator() + bg = PCG64(12342298) + np.random.set_bit_generator(bg) + rs = np.random.mtrand._rand + rg = default_rng(rs) + assert rg.bit_generator is bg + + # vital to get global state back to original, otherwise + # other tests start to fail. + np.random.set_bit_generator(_original) diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py index 791fbaba9850..a1e64ecbe343 100644 --- a/numpy/random/tests/test_extending.py +++ b/numpy/random/tests/test_extending.py @@ -1,17 +1,15 @@ -from importlib.util import spec_from_file_location, module_from_spec import os -import pathlib -import pytest import shutil import subprocess import sys import sysconfig -import textwrap import warnings +from importlib.util import module_from_spec, spec_from_file_location -import numpy as np -from numpy.testing import IS_WASM, IS_EDITABLE +import pytest +import numpy as np +from numpy.testing import IS_EDITABLE, IS_WASM try: import cffi @@ -56,7 +54,13 @@ ) @pytest.mark.skipif(IS_WASM, reason="Can't start subprocess") @pytest.mark.skipif(cython is None, reason="requires cython") +@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', + reason='Meson unable to find MSVC linker on win-arm64') @pytest.mark.slow +@pytest.mark.thread_unsafe( + reason="building cython code in a subprocess doesn't make sense to do in many " + "threads and sometimes crashes" +) def test_cython(tmp_path): import glob # build the examples in a temporary directory @@ -65,14 +69,23 @@ def test_cython(tmp_path): build_dir = tmp_path / 'random' / '_examples' / 'cython' target_dir = build_dir / "build" os.makedirs(target_dir, exist_ok=True) + # Ensure we use the correct Python interpreter even when `meson` is + # installed in a different Python environment (see gh-24956) + native_file = str(build_dir / 'interpreter-native-file.ini') + with open(native_file, 'w') as f: + f.write("[binaries]\n") + f.write(f"python = '{sys.executable}'\n") + f.write(f"python3 = '{sys.executable}'") if sys.platform == "win32": subprocess.check_call(["meson", "setup", - "--buildtype=release", - "--vsenv", str(build_dir)], + "--buildtype=release", + "--vsenv", "--native-file", native_file, + str(build_dir)], cwd=target_dir, ) else: - subprocess.check_call(["meson", "setup", str(build_dir)], + subprocess.check_call(["meson", "setup", + "--native-file", native_file, str(build_dir)], cwd=target_dir ) subprocess.check_call(["meson", "compile", "-vv"], cwd=target_dir) @@ -83,12 +96,11 @@ def test_cython(tmp_path): g = glob.glob(str(target_dir / "*" / "extending.pyx.c")) with open(g[0]) as fid: txt_to_find = 'NumPy API declarations from "numpy/__init__' - for i, line in enumerate(fid): + for line in fid: if txt_to_find in line: break else: - assert False, ("Could not find '{}' in C file, " - "wrong pxd used".format(txt_to_find)) + assert False, f"Could not find '{txt_to_find}' in C file, wrong pxd used" # import without adding the directory to sys.path suffix = sysconfig.get_config_var('EXT_SUFFIX') diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index 514f9af2ce8c..7d13c49149b3 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -1,18 +1,24 @@ +import hashlib import os.path import sys -import hashlib +import warnings import pytest import numpy as np from numpy.exceptions import AxisError from numpy.linalg import LinAlgError +from numpy.random import MT19937, Generator, RandomState, SeedSequence from numpy.testing import ( - assert_, assert_raises, assert_equal, assert_allclose, - assert_warns, assert_no_warnings, assert_array_equal, - assert_array_almost_equal, suppress_warnings, IS_WASM) - -from numpy.random import Generator, MT19937, SeedSequence, RandomState + IS_WASM, + assert_, + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_no_warnings, + assert_raises, +) random = Generator(MT19937()) @@ -20,20 +26,20 @@ { "seed": 0, "steps": 10, - "initial": {"key_sha256": "bb1636883c2707b51c5b7fc26c6927af4430f2e0785a8c7bc886337f919f9edf", "pos": 9}, - "jumped": {"key_sha256": "ff682ac12bb140f2d72fba8d3506cf4e46817a0db27aae1683867629031d8d55", "pos": 598}, + "initial": {"key_sha256": "bb1636883c2707b51c5b7fc26c6927af4430f2e0785a8c7bc886337f919f9edf", "pos": 9}, # noqa: E501 + "jumped": {"key_sha256": "ff682ac12bb140f2d72fba8d3506cf4e46817a0db27aae1683867629031d8d55", "pos": 598}, # noqa: E501 }, { - "seed":384908324, - "steps":312, - "initial": {"key_sha256": "16b791a1e04886ccbbb4d448d6ff791267dc458ae599475d08d5cced29d11614", "pos": 311}, - "jumped": {"key_sha256": "a0110a2cf23b56be0feaed8f787a7fc84bef0cb5623003d75b26bdfa1c18002c", "pos": 276}, + "seed": 384908324, + "steps": 312, + "initial": {"key_sha256": "16b791a1e04886ccbbb4d448d6ff791267dc458ae599475d08d5cced29d11614", "pos": 311}, # noqa: E501 + "jumped": {"key_sha256": "a0110a2cf23b56be0feaed8f787a7fc84bef0cb5623003d75b26bdfa1c18002c", "pos": 276}, # noqa: E501 }, { "seed": [839438204, 980239840, 859048019, 821], "steps": 511, - "initial": {"key_sha256": "d306cf01314d51bd37892d874308200951a35265ede54d200f1e065004c3e9ea", "pos": 510}, - "jumped": {"key_sha256": "0e00ab449f01a5195a83b4aee0dfbc2ce8d46466a640b92e33977d2e42f777f8", "pos": 475}, + "initial": {"key_sha256": "d306cf01314d51bd37892d874308200951a35265ede54d200f1e065004c3e9ea", "pos": 510}, # noqa: E501 + "jumped": {"key_sha256": "0e00ab449f01a5195a83b4aee0dfbc2ce8d46466a640b92e33977d2e42f777f8", "pos": 475}, # noqa: E501 }, ] @@ -93,6 +99,24 @@ def test_p_is_nan(self): # Issue #4571. assert_raises(ValueError, random.binomial, 1, np.nan) + def test_p_extremely_small(self): + n = 50000000000 + p = 5e-17 + sample_size = 20000000 + x = random.binomial(n, p, size=sample_size) + sample_mean = x.mean() + expected_mean = n * p + sigma = np.sqrt(n * p * (1 - p) / sample_size) + # Note: the parameters were chosen so that expected_mean - 6*sigma + # is a positive value. The first `assert` below validates that + # assumption (in case someone edits the parameters in the future). + # The second `assert` is the actual test. + low_bound = expected_mean - 6 * sigma + assert low_bound > 0, "bad test params: 6-sigma lower bound is negative" + test_msg = (f"sample mean {sample_mean} deviates from the expected mean " + f"{expected_mean} by more than 6*sigma") + assert abs(expected_mean - sample_mean) < 6 * sigma, test_msg + class TestMultinomial: def test_basic(self): @@ -151,8 +175,7 @@ def test_multinomial_pvals_float32(self): class TestMultivariateHypergeometric: - def setup_method(self): - self.seed = 8675309 + seed = 8675309 def test_argument_validation(self): # Error cases... @@ -215,7 +238,7 @@ def test_edge_cases(self, method): x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3, method=method) - assert_array_equal(x, [[3, 4, 5]]*3) + assert_array_equal(x, [[3, 4, 5]] * 3) # Cases for nsample: # nsample < 10 @@ -284,37 +307,40 @@ def test_repeatability3(self): class TestSetState: - def setup_method(self): - self.seed = 1234567890 - self.rg = Generator(MT19937(self.seed)) - self.bit_generator = self.rg.bit_generator - self.state = self.bit_generator.state - self.legacy_state = (self.state['bit_generator'], - self.state['state']['key'], - self.state['state']['pos']) + def _create_rng(self): + seed = 1234567890 + rg = Generator(MT19937(seed)) + bit_generator = rg.bit_generator + state = bit_generator.state + legacy_state = (state['bit_generator'], + state['state']['key'], + state['state']['pos']) + return rg, bit_generator, state def test_gaussian_reset(self): # Make sure the cached every-other-Gaussian is reset. - old = self.rg.standard_normal(size=3) - self.bit_generator.state = self.state - new = self.rg.standard_normal(size=3) + rg, bit_generator, state = self._create_rng() + old = rg.standard_normal(size=3) + bit_generator.state = state + new = rg.standard_normal(size=3) assert_(np.all(old == new)) def test_gaussian_reset_in_media_res(self): # When the state is saved with a cached Gaussian, make sure the # cached Gaussian is restored. - - self.rg.standard_normal() - state = self.bit_generator.state - old = self.rg.standard_normal(size=3) - self.bit_generator.state = state - new = self.rg.standard_normal(size=3) + rg, bit_generator, state = self._create_rng() + rg.standard_normal() + state = bit_generator.state + old = rg.standard_normal(size=3) + bit_generator.state = state + new = rg.standard_normal(size=3) assert_(np.all(old == new)) def test_negative_binomial(self): # Ensure that the negative binomial results take floating point # arguments without truncation. - self.rg.negative_binomial(0.5, 0.5) + rg, _, _ = self._create_rng() + rg.negative_binomial(0.5, 0.5) class TestIntegers: @@ -349,7 +375,7 @@ def test_bounds_checking(self, endpoint): endpoint=endpoint, dtype=dt) assert_raises(ValueError, self.rfunc, 1, [0], endpoint=endpoint, dtype=dt) - assert_raises(ValueError, self.rfunc, [ubnd+1], [ubnd], + assert_raises(ValueError, self.rfunc, [ubnd + 1], [ubnd], endpoint=endpoint, dtype=dt) def test_bounds_checking_array(self, endpoint): @@ -494,15 +520,15 @@ def test_repeatability(self, endpoint): # We use a sha256 hash of generated sequences of 1000 samples # in the range [0, 6) for all but bool, where the range # is [0, 2). Hashes are for little endian numbers. - tgt = {'bool': '053594a9b82d656f967c54869bc6970aa0358cf94ad469c81478459c6a90eee3', - 'int16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4', - 'int32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b', - 'int64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1', - 'int8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1', - 'uint16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4', - 'uint32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b', - 'uint64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1', - 'uint8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1'} + tgt = {'bool': '053594a9b82d656f967c54869bc6970aa0358cf94ad469c81478459c6a90eee3', # noqa: E501 + 'int16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4', # noqa: E501 + 'int32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b', # noqa: E501 + 'int64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1', # noqa: E501 + 'int8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1', # noqa: E501 + 'uint16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4', # noqa: E501 + 'uint32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b', # noqa: E501 + 'uint64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1', # noqa: E501 + 'uint8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1'} # noqa: E501 for dt in self.itype[1:]: random = Generator(MT19937(1234)) @@ -589,12 +615,12 @@ def test_repeatability_32bit_boundary_broadcasting(self): def test_int64_uint64_broadcast_exceptions(self, endpoint): configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)), np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0), - (-2**63-1, -2**63-1))} + (-2**63 - 1, -2**63 - 1))} for dtype in configs: for config in configs[dtype]: low, high = config high = high - endpoint - low_a = np.array([[low]*10]) + low_a = np.array([[low] * 10]) high_a = np.array([high] * 10) assert_raises(ValueError, random.integers, low, high, endpoint=endpoint, dtype=dtype) @@ -605,7 +631,7 @@ def test_int64_uint64_broadcast_exceptions(self, endpoint): assert_raises(ValueError, random.integers, low_a, high_a, endpoint=endpoint, dtype=dtype) - low_o = np.array([[low]*10], dtype=object) + low_o = np.array([[low] * 10], dtype=object) high_o = np.array([high] * 10, dtype=object) assert_raises(ValueError, random.integers, low_o, high, endpoint=endpoint, dtype=dtype) @@ -712,9 +738,7 @@ def test_integers_small_dtype_chisquared(self, sample_size, high, class TestRandomDist: # Make sure the random distribution returns the correct value for a # given seed - - def setup_method(self): - self.seed = 1234567890 + seed = 1234567890 def test_integers(self): random = Generator(MT19937(self.seed)) @@ -733,7 +757,7 @@ def test_integers_masked(self): def test_integers_closed(self): random = Generator(MT19937(self.seed)) actual = random.integers(-99, 99, size=(3, 2), endpoint=True) - desired = np.array([[-80, -56], [ 41, 38], [-83, -15]]) + desired = np.array([[-80, -56], [41, 38], [-83, -15]]) assert_array_equal(actual, desired) def test_integers_max_int(self): @@ -763,7 +787,7 @@ def test_random(self): def test_random_float(self): random = Generator(MT19937(self.seed)) actual = random.random((3, 2)) - desired = np.array([[0.0969992 , 0.70751746], + desired = np.array([[0.0969992 , 0.70751746], # noqa: E203 [0.08436483, 0.76773121], [0.66506902, 0.71548719]]) assert_array_almost_equal(actual, desired, decimal=7) @@ -867,7 +891,7 @@ def test_choice_return_shape(self): assert_(random.choice(arr, replace=True) is a) # Check 0-d array - s = tuple() + s = () assert_(not np.isscalar(random.choice(2, s, replace=True))) assert_(not np.isscalar(random.choice(2, s, replace=False))) assert_(not np.isscalar(random.choice(2, s, replace=True, p=p))) @@ -1179,10 +1203,10 @@ def test_dirichlet(self): alpha = np.array([51.72840233779265162, 39.74494232180943953]) actual = random.dirichlet(alpha, size=(3, 2)) desired = np.array([[[0.5439892869558927, 0.45601071304410745], - [0.5588917345860708, 0.4411082654139292 ]], + [0.5588917345860708, 0.4411082654139292 ]], # noqa: E202 [[0.5632074165063435, 0.43679258349365657], [0.54862581112627, 0.45137418887373015]], - [[0.49961831357047226, 0.5003816864295278 ], + [[0.49961831357047226, 0.5003816864295278 ], # noqa: E202 [0.52374806183482, 0.47625193816517997]]]) assert_array_almost_equal(actual, desired, decimal=15) bad_alpha = np.array([5.4e-01, -1.0e-16]) @@ -1242,6 +1266,7 @@ def test_dirichlet_small_alpha(self): assert_array_almost_equal(actual, expected, decimal=15) @pytest.mark.slow + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_dirichlet_moderately_small_alpha(self): # Use alpha.max() < 0.1 to trigger stick breaking code path alpha = np.array([0.02, 0.04, 0.03]) @@ -1275,7 +1300,7 @@ def test_exponential(self): actual = random.exponential(1.1234, size=(3, 2)) desired = np.array([[0.098845481066258, 1.560752510746964], [0.075730916041636, 1.769098974710777], - [1.488602544592235, 2.49684815275751 ]]) + [1.488602544592235, 2.49684815275751 ]]) # noqa: E202 assert_array_almost_equal(actual, desired, decimal=15) def test_exponential_0(self): @@ -1286,14 +1311,14 @@ def test_f(self): random = Generator(MT19937(self.seed)) actual = random.f(12, 77, size=(3, 2)) desired = np.array([[0.461720027077085, 1.100441958872451], - [1.100337455217484, 0.91421736740018 ], + [1.100337455217484, 0.91421736740018 ], # noqa: E202 [0.500811891303113, 0.826802454552058]]) assert_array_almost_equal(actual, desired, decimal=15) def test_gamma(self): random = Generator(MT19937(self.seed)) actual = random.gamma(5, 3, size=(3, 2)) - desired = np.array([[ 5.03850858902096, 7.9228656732049 ], + desired = np.array([[ 5.03850858902096, 7.9228656732049 ], # noqa: E202 [18.73983605132985, 19.57961681699238], [18.17897755150825, 18.17653912505234]]) assert_array_almost_equal(actual, desired, decimal=14) @@ -1373,7 +1398,7 @@ def test_logistic(self): random = Generator(MT19937(self.seed)) actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[-4.338584631510999, 1.890171436749954], - [-4.64547787337966 , 2.514545562919217], + [-4.64547787337966 , 2.514545562919217], # noqa: E203 [ 1.495389489198666, 1.967827627577474]]) assert_array_almost_equal(actual, desired, decimal=15) @@ -1433,12 +1458,12 @@ def test_multivariate_normal(self, method): cov = [[1, 0], [0, 1]] size = (3, 2) actual = random.multivariate_normal(mean, cov, size, method=method) - desired = np.array([[[-1.747478062846581, 11.25613495182354 ], - [-0.9967333370066214, 10.342002097029821 ]], - [[ 0.7850019631242964, 11.181113712443013 ], - [ 0.8901349653255224, 8.873825399642492 ]], - [[ 0.7130260107430003, 9.551628690083056 ], - [ 0.7127098726541128, 11.991709234143173 ]]]) + desired = np.array([[[-1.747478062846581, 11.25613495182354 ], # noqa: E202 + [-0.9967333370066214, 10.342002097029821]], + [[ 0.7850019631242964, 11.181113712443013], + [ 0.8901349653255224, 8.873825399642492]], + [[ 0.7130260107430003, 9.551628690083056], + [ 0.7127098726541128, 11.991709234143173]]]) assert_array_almost_equal(actual, desired, decimal=15) @@ -1456,8 +1481,8 @@ def test_multivariate_normal(self, method): # Check that non positive-semidefinite covariance warns with # RuntimeWarning cov = [[1, 2], [2, 1]] - assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov) - assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov, + pytest.warns(RuntimeWarning, random.multivariate_normal, mean, cov) + pytest.warns(RuntimeWarning, random.multivariate_normal, mean, cov, method='eigh') assert_raises(LinAlgError, random.multivariate_normal, mean, cov, method='cholesky') @@ -1484,10 +1509,9 @@ def test_multivariate_normal(self, method): method='cholesky') cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) - with suppress_warnings() as sup: + with warnings.catch_warnings(): + warnings.simplefilter("error") random.multivariate_normal(mean, cov, method=method) - w = sup.record(RuntimeWarning) - assert len(w) == 0 mu = np.zeros(2) cov = np.eye(2) @@ -1500,7 +1524,7 @@ def test_multivariate_normal(self, method): assert_raises(ValueError, random.multivariate_normal, mu, np.eye(3)) - @pytest.mark.parametrize('mean, cov', [([0], [[1+1j]]), ([0j], [[1]])]) + @pytest.mark.parametrize('mean, cov', [([0], [[1 + 1j]]), ([0j], [[1]])]) def test_multivariate_normal_disallow_complex(self, mean, cov): random = Generator(MT19937(self.seed)) with pytest.raises(TypeError, match="must not be complex"): @@ -1550,7 +1574,7 @@ def test_noncentral_chisquare(self): actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) desired = np.array([[ 1.70561552362133, 15.97378184942111], [13.71483425173724, 20.17859633310629], - [11.3615477156643 , 3.67891108738029]]) + [11.3615477156643 , 3.67891108738029]]) # noqa: E203 assert_array_almost_equal(actual, desired, decimal=14) actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) @@ -1570,8 +1594,8 @@ def test_noncentral_f(self): random = Generator(MT19937(self.seed)) actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1, size=(3, 2)) - desired = np.array([[0.060310671139 , 0.23866058175939], - [0.86860246709073, 0.2668510459738 ], + desired = np.array([[0.060310671139 , 0.23866058175939], # noqa: E203 + [0.86860246709073, 0.2668510459738 ], # noqa: E202 [0.23375780078364, 1.88922102885943]]) assert_array_almost_equal(actual, desired, decimal=14) @@ -1668,7 +1692,7 @@ def test_standard_gamma(self): random = Generator(MT19937(self.seed)) actual = random.standard_gamma(shape=3, size=(3, 2)) desired = np.array([[0.62970724056362, 1.22379851271008], - [3.899412530884 , 4.12479964250139], + [3.899412530884 , 4.12479964250139], # noqa: E203 [3.74994102464584, 3.74929307690815]]) assert_array_almost_equal(actual, desired, decimal=14) @@ -1681,8 +1705,8 @@ def test_standard_gammma_scalar_float(self): def test_standard_gamma_float(self): random = Generator(MT19937(self.seed)) actual = random.standard_gamma(shape=3, size=(3, 2)) - desired = np.array([[0.62971, 1.2238 ], - [3.89941, 4.1248 ], + desired = np.array([[0.62971, 1.2238], + [3.89941, 4.1248], [3.74994, 3.74929]]) assert_array_almost_equal(actual, desired, decimal=5) @@ -1717,7 +1741,7 @@ def test_standard_gamma_0(self): def test_standard_normal(self): random = Generator(MT19937(self.seed)) actual = random.standard_normal(size=(3, 2)) - desired = np.array([[-1.870934851846581, 1.25613495182354 ], + desired = np.array([[-1.870934851846581, 1.25613495182354 ], # noqa: E202 [-1.120190126006621, 0.342002097029821], [ 0.661545174124296, 1.181113712443012]]) assert_array_almost_equal(actual, desired, decimal=15) @@ -1728,7 +1752,7 @@ def test_standard_normal_unsupported_type(self): def test_standard_t(self): random = Generator(MT19937(self.seed)) actual = random.standard_t(df=10, size=(3, 2)) - desired = np.array([[-1.484666193042647, 0.30597891831161 ], + desired = np.array([[-1.484666193042647, 0.30597891831161], [ 1.056684299648085, -0.407312602088507], [ 0.130704414281157, -2.038053410490321]]) assert_array_almost_equal(actual, desired, decimal=15) @@ -1737,7 +1761,7 @@ def test_triangular(self): random = Generator(MT19937(self.seed)) actual = random.triangular(left=5.12, mode=10.23, right=20.34, size=(3, 2)) - desired = np.array([[ 7.86664070590917, 13.6313848513185 ], + desired = np.array([[ 7.86664070590917, 13.6313848513185 ], # noqa: E202 [ 7.68152445215983, 14.36169131136546], [13.16105603911429, 13.72341621856971]]) assert_array_almost_equal(actual, desired, decimal=14) @@ -1745,7 +1769,7 @@ def test_triangular(self): def test_uniform(self): random = Generator(MT19937(self.seed)) actual = random.uniform(low=1.23, high=10.54, size=(3, 2)) - desired = np.array([[2.13306255040998 , 7.816987531021207], + desired = np.array([[2.13306255040998 , 7.816987531021207], # noqa: E203 [2.015436610109887, 8.377577533009589], [7.421792588856135, 7.891185744455209]]) assert_array_almost_equal(actual, desired, decimal=15) @@ -1779,7 +1803,7 @@ def test_uniform_neg_range(self): func = random.uniform assert_raises(ValueError, func, 2, 1) assert_raises(ValueError, func, [1, 2], [1, 1]) - assert_raises(ValueError, func, [[0, 1],[2, 3]], 2) + assert_raises(ValueError, func, [[0, 1], [2, 3]], 2) def test_scalar_exception_propagation(self): # Tests that exceptions are correctly propagated in distributions @@ -1846,11 +1870,16 @@ def test_vonmises_large_kappa_range(self, mu, kappa): def test_wald(self): random = Generator(MT19937(self.seed)) actual = random.wald(mean=1.23, scale=1.54, size=(3, 2)) - desired = np.array([[0.26871721804551, 3.2233942732115 ], + desired = np.array([[0.26871721804551, 3.2233942732115 ], # noqa: E202 [2.20328374987066, 2.40958405189353], [2.07093587449261, 0.73073890064369]]) assert_array_almost_equal(actual, desired, decimal=14) + def test_wald_nonnegative(self): + random = Generator(MT19937(self.seed)) + samples = random.wald(mean=1e9, scale=2.25, size=1000) + assert_(np.all(samples >= 0.0)) + def test_weibull(self): random = Generator(MT19937(self.seed)) actual = random.weibull(a=1.23, size=(3, 2)) @@ -1876,8 +1905,7 @@ def test_zipf(self): class TestBroadcast: # tests that functions that broadcast behave # correctly when presented with non-scalar arguments - def setup_method(self): - self.seed = 123456789 + seed = 123456789 def test_uniform(self): random = Generator(MT19937(self.seed)) @@ -1899,7 +1927,7 @@ def test_normal(self): scale = [1] bad_scale = [-1] random = Generator(MT19937(self.seed)) - desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097]) + desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097]) random = Generator(MT19937(self.seed)) actual = random.normal(loc * 3, scale) @@ -2094,7 +2122,7 @@ def test_vonmises(self): def test_pareto(self): a = [1] bad_a = [-1] - desired = np.array([0.95905052946317, 0.2383810889437 , 1.04988745750013]) + desired = np.array([0.95905052946317, 0.2383810889437, 1.04988745750013]) random = Generator(MT19937(self.seed)) actual = random.pareto(a * 3) @@ -2367,16 +2395,16 @@ def test_hypergeometric(self): assert_array_equal(actual, desired) assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample) assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample) - assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one) - assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two) + assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one) # noqa: E501 + assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two) # noqa: E501 random = Generator(MT19937(self.seed)) actual = random.hypergeometric(ngood, nbad * 3, nsample) assert_array_equal(actual, desired) assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample) assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample) - assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one) - assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two) + assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one) # noqa: E501 + assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two) # noqa: E501 random = Generator(MT19937(self.seed)) hypergeom = random.hypergeometric @@ -2450,7 +2478,7 @@ def test_multinomial_pval_broadcast(self, n): random = Generator(MT19937(self.seed)) pvals = np.array([1 / 4] * 4) actual = random.multinomial(n, pvals) - n_shape = tuple() if isinstance(n, int) else n.shape + n_shape = () if isinstance(n, int) else n.shape expected_shape = n_shape + (4,) assert actual.shape == expected_shape pvals = np.vstack([pvals, pvals]) @@ -2489,8 +2517,7 @@ def test_empty_outputs(self): @pytest.mark.skipif(IS_WASM, reason="can't start thread") class TestThread: # make sure each state produces the same sequence even in threads - def setup_method(self): - self.seeds = range(4) + seeds = range(4) def check_function(self, function, sz): from threading import Thread @@ -2535,13 +2562,11 @@ def gen_random(state, out): # See Issue #4263 class TestSingleEltArrayInput: - def setup_method(self): - self.argOne = np.array([2]) - self.argTwo = np.array([3]) - self.argThree = np.array([4]) - self.tgtShape = (1,) + def _create_arrays(self): + return np.array([2]), np.array([3]), np.array([4]), (1,) def test_one_arg_funcs(self): + argOne, _, _, tgtShape = self._create_arrays() funcs = (random.exponential, random.standard_gamma, random.chisquare, random.standard_t, random.pareto, random.weibull, @@ -2556,11 +2581,12 @@ def test_one_arg_funcs(self): out = func(np.array([0.5])) else: - out = func(self.argOne) + out = func(argOne) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) def test_two_arg_funcs(self): + argOne, argTwo, _, tgtShape = self._create_arrays() funcs = (random.uniform, random.normal, random.beta, random.gamma, random.f, random.noncentral_chisquare, @@ -2576,18 +2602,19 @@ def test_two_arg_funcs(self): argTwo = np.array([0.5]) else: - argTwo = self.argTwo + argTwo = argTwo - out = func(self.argOne, argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, argTwo[0]) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0]) + assert_equal(out.shape, tgtShape) def test_integers(self, endpoint): + _, _, _, tgtShape = self._create_arrays() itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64] func = random.integers @@ -2596,27 +2623,28 @@ def test_integers(self, endpoint): for dt in itype: out = func(low, high, endpoint=endpoint, dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) out = func(low[0], high, endpoint=endpoint, dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) out = func(low, high[0], endpoint=endpoint, dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) def test_three_arg_funcs(self): + argOne, argTwo, argThree, tgtShape = self._create_arrays() funcs = [random.noncentral_f, random.triangular, random.hypergeometric] for func in funcs: - out = func(self.argOne, self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, self.argTwo[0], self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0], argThree) + assert_equal(out.shape, tgtShape) @pytest.mark.parametrize("config", JUMP_TEST_DATA) @@ -2780,8 +2808,8 @@ def test_pickle_preserves_seed_sequence(): @pytest.mark.parametrize("version", [121, 126]) def test_legacy_pickle(version): # Pickling format was changes in 1.22.x and in 2.0.x - import pickle import gzip + import pickle base_path = os.path.split(os.path.abspath(__file__))[0] pkl_file = os.path.join( diff --git a/numpy/random/tests/test_generator_mt19937_regressions.py b/numpy/random/tests/test_generator_mt19937_regressions.py index d451c6acd16d..21093ef73eb6 100644 --- a/numpy/random/tests/test_generator_mt19937_regressions.py +++ b/numpy/random/tests/test_generator_mt19937_regressions.py @@ -1,34 +1,37 @@ -from numpy.testing import (assert_, assert_array_equal) -import numpy as np import pytest -from numpy.random import Generator, MT19937 +import numpy as np +from numpy.random import MT19937, Generator +from numpy.testing import assert_, assert_array_equal -class TestRegression: - def setup_method(self): - self.mt19937 = Generator(MT19937(121263137472525314065)) +class TestRegression: + def _create_generator(self): + return Generator(MT19937(121263137472525314065)) def test_vonmises_range(self): # Make sure generated random variables are in [-pi, pi]. # Regression test for ticket #986. + mt19937 = self._create_generator() for mu in np.linspace(-7., 7., 5): - r = self.mt19937.vonmises(mu, 1, 50) + r = mt19937.vonmises(mu, 1, 50) assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) def test_hypergeometric_range(self): # Test for ticket #921 - assert_(np.all(self.mt19937.hypergeometric(3, 18, 11, size=10) < 4)) - assert_(np.all(self.mt19937.hypergeometric(18, 3, 11, size=10) > 0)) + mt19937 = self._create_generator() + assert_(np.all(mt19937.hypergeometric(3, 18, 11, size=10) < 4)) + assert_(np.all(mt19937.hypergeometric(18, 3, 11, size=10) > 0)) # Test for ticket #5623 args = (2**20 - 2, 2**20 - 2, 2**20 - 2) # Check for 32-bit systems - assert_(self.mt19937.hypergeometric(*args) > 0) + assert_(mt19937.hypergeometric(*args) > 0) def test_logseries_convergence(self): # Test for ticket #923 + mt19937 = self._create_generator() N = 1000 - rvsn = self.mt19937.logseries(0.8, size=N) + rvsn = mt19937.logseries(0.8, size=N) # these two frequency counts should be close to theoretical # numbers with this large sample # theoretical large N result is 0.49706795 @@ -59,55 +62,85 @@ def test_call_within_randomstate(self): mt19937 = Generator(MT19937(i)) m = Generator(MT19937(4321)) # If m.state is not honored, the result will change - assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res) + assert_array_equal(m.choice(10, size=10, p=np.ones(10) / 10.), res) def test_multivariate_normal_size_types(self): # Test for multivariate_normal issue with 'size' argument. # Check that the multivariate_normal size argument can be a # numpy integer. - self.mt19937.multivariate_normal([0], [[0]], size=1) - self.mt19937.multivariate_normal([0], [[0]], size=np.int_(1)) - self.mt19937.multivariate_normal([0], [[0]], size=np.int64(1)) + mt19937 = self._create_generator() + mt19937.multivariate_normal([0], [[0]], size=1) + mt19937.multivariate_normal([0], [[0]], size=np.int_(1)) + mt19937.multivariate_normal([0], [[0]], size=np.int64(1)) def test_beta_small_parameters(self): # Test that beta with small a and b parameters does not produce # NaNs due to roundoff errors causing 0 / 0, gh-5851 - x = self.mt19937.beta(0.0001, 0.0001, size=100) + mt19937 = self._create_generator() + x = mt19937.beta(0.0001, 0.0001, size=100) assert_(not np.any(np.isnan(x)), 'Nans in mt19937.beta') def test_beta_very_small_parameters(self): # gh-24203: beta would hang with very small parameters. - self.mt19937.beta(1e-49, 1e-40) + mt19937 = self._create_generator() + mt19937.beta(1e-49, 1e-40) def test_beta_ridiculously_small_parameters(self): # gh-24266: beta would generate nan when the parameters # were subnormal or a small multiple of the smallest normal. + mt19937 = self._create_generator() tiny = np.finfo(1.0).tiny - x = self.mt19937.beta(tiny/32, tiny/40, size=50) + x = mt19937.beta(tiny / 32, tiny / 40, size=50) assert not np.any(np.isnan(x)) + def test_beta_expected_zero_frequency(self): + # gh-24475: For small a and b (e.g. a=0.0025, b=0.0025), beta + # would generate too many zeros. + mt19937 = self._create_generator() + a = 0.0025 + b = 0.0025 + n = 1000000 + x = mt19937.beta(a, b, size=n) + nzeros = np.count_nonzero(x == 0) + # beta CDF at x = np.finfo(np.double).smallest_subnormal/2 + # is p = 0.0776169083131899, e.g, + # + # import numpy as np + # from mpmath import mp + # mp.dps = 160 + # x = mp.mpf(np.finfo(np.float64).smallest_subnormal)/2 + # # CDF of the beta distribution at x: + # p = mp.betainc(a, b, x1=0, x2=x, regularized=True) + # n = 1000000 + # exprected_freq = float(n*p) + # + expected_freq = 77616.90831318991 + assert 0.95 * expected_freq < nzeros < 1.05 * expected_freq + def test_choice_sum_of_probs_tolerance(self): # The sum of probs should be 1.0 with some tolerance. # For low precision dtypes the tolerance was too tight. # See numpy github issue 6123. + mt19937 = self._create_generator() a = [1, 2, 3] counts = [4, 4, 2] for dt in np.float16, np.float32, np.float64: probs = np.array(counts, dtype=dt) / sum(counts) - c = self.mt19937.choice(a, p=probs) + c = mt19937.choice(a, p=probs) assert_(c in a) with pytest.raises(ValueError): - self.mt19937.choice(a, p=probs*0.9) + mt19937.choice(a, p=probs * 0.9) def test_shuffle_of_array_of_different_length_strings(self): # Test that permuting an array of different length strings # will not cause a segfault on garbage collection # Tests gh-7710 + mt19937 = self._create_generator() a = np.array(['a', 'a' * 1000]) for _ in range(100): - self.mt19937.shuffle(a) + mt19937.shuffle(a) # Force Garbage Collection - should not segfault. import gc @@ -117,10 +150,11 @@ def test_shuffle_of_array_of_objects(self): # Test that permuting an array of objects will not cause # a segfault on garbage collection. # See gh-7719 + mt19937 = self._create_generator() a = np.array([np.arange(1), np.arange(4)], dtype=object) for _ in range(1000): - self.mt19937.shuffle(a) + mt19937.shuffle(a) # Force Garbage Collection - should not segfault. import gc @@ -150,10 +184,11 @@ def __array__(self, dtype=None, copy=None): assert_array_equal(m.__array__(), np.arange(5)) def test_gamma_0(self): - assert self.mt19937.standard_gamma(0.0) == 0.0 - assert_array_equal(self.mt19937.standard_gamma([0.0]), 0.0) + mt19937 = self._create_generator() + assert mt19937.standard_gamma(0.0) == 0.0 + assert_array_equal(mt19937.standard_gamma([0.0]), 0.0) - actual = self.mt19937.standard_gamma([0.0], dtype='float') + actual = mt19937.standard_gamma([0.0], dtype='float') expected = np.array([0.], dtype=np.float32) assert_array_equal(actual, expected) @@ -161,5 +196,26 @@ def test_geometric_tiny_prob(self): # Regression test for gh-17007. # When p = 1e-30, the probability that a sample will exceed 2**63-1 # is 0.9999999999907766, so we expect the result to be all 2**63-1. - assert_array_equal(self.mt19937.geometric(p=1e-30, size=3), + mt19937 = self._create_generator() + assert_array_equal(mt19937.geometric(p=1e-30, size=3), np.iinfo(np.int64).max) + + def test_zipf_large_parameter(self): + # Regression test for part of gh-9829: a call such as rng.zipf(10000) + # would hang. + mt19937 = self._create_generator() + n = 8 + sample = mt19937.zipf(10000, size=n) + assert_array_equal(sample, np.ones(n, dtype=np.int64)) + + def test_zipf_a_near_1(self): + # Regression test for gh-9829: a call such as rng.zipf(1.0000000000001) + # would hang. + mt19937 = self._create_generator() + n = 100000 + sample = mt19937.zipf(1.0000000000001, size=n) + # Not much of a test, but let's do something more than verify that + # it doesn't hang. Certainly for a monotonically decreasing + # discrete distribution truncated to signed 64 bit integers, more + # than half should be less than 2**62. + assert np.count_nonzero(sample < 2**62) > n / 2 diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index c98584aeda9d..f110aa892b31 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -1,15 +1,19 @@ +import sys import warnings import pytest import numpy as np -from numpy.testing import ( - assert_, assert_raises, assert_equal, assert_warns, - assert_no_warnings, assert_array_equal, assert_array_almost_equal, - suppress_warnings, IS_WASM - ) from numpy import random -import sys +from numpy.testing import ( + IS_WASM, + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_no_warnings, + assert_raises, +) class TestSeed: @@ -101,101 +105,109 @@ def test_multidimensional_pvals(self): class TestSetState: - def setup_method(self): - self.seed = 1234567890 - self.prng = random.RandomState(self.seed) - self.state = self.prng.get_state() + def _create_rng(self): + seed = 1234567890 + prng = random.RandomState(seed) + state = prng.get_state() + return prng, state def test_basic(self): - old = self.prng.tomaxint(16) - self.prng.set_state(self.state) - new = self.prng.tomaxint(16) + prng, state = self._create_rng() + old = prng.tomaxint(16) + prng.set_state(state) + new = prng.tomaxint(16) assert_(np.all(old == new)) def test_gaussian_reset(self): # Make sure the cached every-other-Gaussian is reset. - old = self.prng.standard_normal(size=3) - self.prng.set_state(self.state) - new = self.prng.standard_normal(size=3) + prng, state = self._create_rng() + old = prng.standard_normal(size=3) + prng.set_state(state) + new = prng.standard_normal(size=3) assert_(np.all(old == new)) def test_gaussian_reset_in_media_res(self): # When the state is saved with a cached Gaussian, make sure the # cached Gaussian is restored. - - self.prng.standard_normal() - state = self.prng.get_state() - old = self.prng.standard_normal(size=3) - self.prng.set_state(state) - new = self.prng.standard_normal(size=3) + prng, state = self._create_rng() + prng.standard_normal() + state = prng.get_state() + old = prng.standard_normal(size=3) + prng.set_state(state) + new = prng.standard_normal(size=3) assert_(np.all(old == new)) def test_backwards_compatibility(self): # Make sure we can accept old state tuples that do not have the # cached Gaussian value. - old_state = self.state[:-2] - x1 = self.prng.standard_normal(size=16) - self.prng.set_state(old_state) - x2 = self.prng.standard_normal(size=16) - self.prng.set_state(self.state) - x3 = self.prng.standard_normal(size=16) + prng, state = self._create_rng() + old_state = state[:-2] + x1 = prng.standard_normal(size=16) + prng.set_state(old_state) + x2 = prng.standard_normal(size=16) + prng.set_state(state) + x3 = prng.standard_normal(size=16) assert_(np.all(x1 == x2)) assert_(np.all(x1 == x3)) def test_negative_binomial(self): # Ensure that the negative binomial results take floating point # arguments without truncation. - self.prng.negative_binomial(0.5, 0.5) + prng, _ = self._create_rng() + prng.negative_binomial(0.5, 0.5) def test_set_invalid_state(self): # gh-25402 + prng, _ = self._create_rng() with pytest.raises(IndexError): - self.prng.set_state(()) + prng.set_state(()) class TestRandint: - rfunc = np.random.randint - # valid integer/boolean types itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64] def test_unsupported_type(self): - assert_raises(TypeError, self.rfunc, 1, dtype=float) + rng = random.RandomState() + assert_raises(TypeError, rng.randint, 1, dtype=float) def test_bounds_checking(self): + rng = random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 - assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt) - assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt) - assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt) - assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt) + assert_raises(ValueError, rng.randint, lbnd - 1, ubnd, dtype=dt) + assert_raises(ValueError, rng.randint, lbnd, ubnd + 1, dtype=dt) + assert_raises(ValueError, rng.randint, ubnd, lbnd, dtype=dt) + assert_raises(ValueError, rng.randint, 1, 0, dtype=dt) def test_rng_zero_and_extremes(self): + rng = random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 tgt = ubnd - 1 - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) tgt = lbnd - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) - tgt = (lbnd + ubnd)//2 - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + tgt = (lbnd + ubnd) // 2 + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) def test_full_range(self): # Test for ticket #1690 + rng = random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 try: - self.rfunc(lbnd, ubnd, dtype=dt) + rng.randint(lbnd, ubnd, dtype=dt) except Exception as e: raise AssertionError("No error should have been raised, " "but one was with the following " @@ -203,15 +215,15 @@ def test_full_range(self): def test_in_bounds_fuzz(self): # Don't use fixed seed - np.random.seed() + rng = random.RandomState() for dt in self.itype[1:]: for ubnd in [4, 8, 16]: - vals = self.rfunc(2, ubnd, size=2**16, dtype=dt) + vals = rng.randint(2, ubnd, size=2**16, dtype=dt) assert_(vals.max() < ubnd) assert_(vals.min() >= 2) - vals = self.rfunc(0, 2, size=2**16, dtype=np.bool) + vals = rng.randint(0, 2, size=2**16, dtype=np.bool) assert_(vals.max() < 2) assert_(vals.min() >= 0) @@ -221,31 +233,31 @@ def test_repeatability(self): # We use a sha256 hash of generated sequences of 1000 samples # in the range [0, 6) for all but bool, where the range # is [0, 2). Hashes are for little endian numbers. - tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71', - 'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', - 'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', - 'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', - 'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404', - 'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', - 'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', - 'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', - 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'} + tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71', # noqa: E501 + 'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', # noqa: E501 + 'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', # noqa: E501 + 'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', # noqa: E501 + 'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404', # noqa: E501 + 'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', # noqa: E501 + 'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', # noqa: E501 + 'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', # noqa: E501 + 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'} # noqa: E501 for dt in self.itype[1:]: - np.random.seed(1234) + rng = random.RandomState(1234) # view as little endian for hash if sys.byteorder == 'little': - val = self.rfunc(0, 6, size=1000, dtype=dt) + val = rng.randint(0, 6, size=1000, dtype=dt) else: - val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap() + val = rng.randint(0, 6, size=1000, dtype=dt).byteswap() res = hashlib.sha256(val.view(np.int8)).hexdigest() assert_(tgt[np.dtype(dt).name] == res) # bools do not depend on endianness - np.random.seed(1234) - val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8) + rng = random.RandomState(1234) + val = rng.randint(0, 2, size=1000, dtype=bool).view(np.int8) res = hashlib.sha256(val).hexdigest() assert_(tgt[np.dtype(bool).name] == res) @@ -274,11 +286,12 @@ def test_int64_uint64_corner_case(self): def test_respect_dtype_singleton(self): # See gh-7203 + rng = random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 - sample = self.rfunc(lbnd, ubnd, dtype=dt) + sample = rng.randint(lbnd, ubnd, dtype=dt) assert_equal(sample.dtype, np.dtype(dt)) for dt in (bool, int): @@ -287,7 +300,7 @@ def test_respect_dtype_singleton(self): ubnd = 2 if dt is bool else np.iinfo("long").max + 1 # gh-7284: Ensure that we get Python data types - sample = self.rfunc(lbnd, ubnd, dtype=dt) + sample = rng.randint(lbnd, ubnd, dtype=dt) assert_(not hasattr(sample, 'dtype')) assert_equal(type(sample), dt) @@ -295,40 +308,36 @@ def test_respect_dtype_singleton(self): class TestRandomDist: # Make sure the random distribution returns the correct value for a # given seed - - def setup_method(self): - self.seed = 1234567890 + seed = 1234567890 def test_rand(self): - np.random.seed(self.seed) - actual = np.random.rand(3, 2) + rng = random.RandomState(self.seed) + actual = rng.rand(3, 2) desired = np.array([[0.61879477158567997, 0.59162362775974664], [0.88868358904449662, 0.89165480011560816], [0.4575674820298663, 0.7781880808593471]]) assert_array_almost_equal(actual, desired, decimal=15) def test_randn(self): - np.random.seed(self.seed) - actual = np.random.randn(3, 2) + rng = random.RandomState(self.seed) + actual = rng.randn(3, 2) desired = np.array([[1.34016345771863121, 1.73759122771936081], [1.498988344300628, -0.2286433324536169], [2.031033998682787, 2.17032494605655257]]) assert_array_almost_equal(actual, desired, decimal=15) def test_randint(self): - np.random.seed(self.seed) - actual = np.random.randint(-99, 99, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.randint(-99, 99, size=(3, 2)) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) assert_array_equal(actual, desired) def test_random_integers(self): - np.random.seed(self.seed) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) - actual = np.random.random_integers(-99, 99, size=(3, 2)) - assert_(len(w) == 1) + rng = random.RandomState(self.seed) + with pytest.warns(DeprecationWarning): + actual = rng.random_integers(-99, 99, size=(3, 2)) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) @@ -340,11 +349,9 @@ def test_random_integers_max_int(self): # into a C long. Previous implementations of this # method have thrown an OverflowError when attempting # to generate this integer. - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): actual = np.random.random_integers(np.iinfo('l').max, np.iinfo('l').max) - assert_(len(w) == 1) desired = np.iinfo('l').max assert_equal(actual, desired) @@ -364,41 +371,41 @@ def test_random_integers_deprecated(self): np.iinfo('l').max, np.iinfo('l').max) def test_random(self): - np.random.seed(self.seed) - actual = np.random.random((3, 2)) + rng = random.RandomState(self.seed) + actual = rng.random((3, 2)) desired = np.array([[0.61879477158567997, 0.59162362775974664], [0.88868358904449662, 0.89165480011560816], [0.4575674820298663, 0.7781880808593471]]) assert_array_almost_equal(actual, desired, decimal=15) def test_choice_uniform_replace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 4) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 4) desired = np.array([2, 3, 2, 3]) assert_array_equal(actual, desired) def test_choice_nonuniform_replace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) desired = np.array([1, 1, 2, 2]) assert_array_equal(actual, desired) def test_choice_uniform_noreplace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 3, replace=False) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 3, replace=False) desired = np.array([0, 1, 3]) assert_array_equal(actual, desired) def test_choice_nonuniform_noreplace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 3, replace=False, + rng = random.RandomState(self.seed) + actual = rng.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) desired = np.array([2, 3, 1]) assert_array_equal(actual, desired) def test_choice_noninteger(self): - np.random.seed(self.seed) - actual = np.random.choice(['a', 'b', 'c', 'd'], 4) + rng = random.RandomState(self.seed) + actual = rng.choice(['a', 'b', 'c', 'd'], 4) desired = np.array(['c', 'd', 'c', 'd']) assert_array_equal(actual, desired) @@ -436,7 +443,7 @@ def test_choice_return_shape(self): assert_(np.random.choice(arr, replace=True) is a) # Check 0-d array - s = tuple() + s = () assert_(not np.isscalar(np.random.choice(2, s, replace=True))) assert_(not np.isscalar(np.random.choice(2, s, replace=False))) assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p))) @@ -473,8 +480,8 @@ def test_choice_nan_probabilities(self): assert_raises(ValueError, np.random.choice, a, p=p) def test_bytes(self): - np.random.seed(self.seed) - actual = np.random.bytes(10) + rng = random.RandomState(self.seed) + actual = rng.bytes(10) desired = b'\x82Ui\x9e\xff\x97+Wf\xa5' assert_equal(actual, desired) @@ -497,9 +504,9 @@ def test_shuffle(self): # gh-4270 lambda x: np.asarray([(i, i) for i in x], [("a", object), ("b", np.int32)])]: - np.random.seed(self.seed) + rng = random.RandomState(self.seed) alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) - np.random.shuffle(alist) + rng.shuffle(alist) actual = alist desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) assert_array_equal(actual, desired) @@ -559,11 +566,11 @@ def test_shuffle_memoryview(self): # gh-18273 # allow graceful handling of memoryviews # (treat the same as arrays) - np.random.seed(self.seed) + rng = random.RandomState(self.seed) a = np.arange(5).data - np.random.shuffle(a) + rng.shuffle(a) assert_equal(np.asarray(a), [0, 1, 4, 3, 2]) - rng = np.random.RandomState(self.seed) + rng = random.RandomState(self.seed) rng.shuffle(a) assert_equal(np.asarray(a), [0, 1, 2, 3, 4]) rng = np.random.default_rng(self.seed) @@ -577,8 +584,8 @@ def test_shuffle_not_writeable(self): np.random.shuffle(a) def test_beta(self): - np.random.seed(self.seed) - actual = np.random.beta(.1, .9, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.beta(.1, .9, size=(3, 2)) desired = np.array( [[1.45341850513746058e-02, 5.31297615662868145e-04], [1.85366619058432324e-06, 4.19214516800110563e-03], @@ -586,25 +593,25 @@ def test_beta(self): assert_array_almost_equal(actual, desired, decimal=15) def test_binomial(self): - np.random.seed(self.seed) - actual = np.random.binomial(100, .456, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.binomial(100, .456, size=(3, 2)) desired = np.array([[37, 43], [42, 48], [46, 45]]) assert_array_equal(actual, desired) def test_chisquare(self): - np.random.seed(self.seed) - actual = np.random.chisquare(50, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.chisquare(50, size=(3, 2)) desired = np.array([[63.87858175501090585, 68.68407748911370447], [65.77116116901505904, 47.09686762438974483], [72.3828403199695174, 74.18408615260374006]]) assert_array_almost_equal(actual, desired, decimal=13) def test_dirichlet(self): - np.random.seed(self.seed) + rng = random.RandomState(self.seed) alpha = np.array([51.72840233779265162, 39.74494232180943953]) - actual = np.random.mtrand.dirichlet(alpha, size=(3, 2)) + actual = rng.dirichlet(alpha, size=(3, 2)) desired = np.array([[[0.54539444573611562, 0.45460555426388438], [0.62345816822039413, 0.37654183177960598]], [[0.55206000085785778, 0.44793999914214233], @@ -637,8 +644,8 @@ def test_dirichlet_bad_alpha(self): assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]])) def test_exponential(self): - np.random.seed(self.seed) - actual = np.random.exponential(1.1234, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.exponential(1.1234, size=(3, 2)) desired = np.array([[1.08342649775011624, 1.00607889924557314], [2.46628830085216721, 2.49668106809923884], [0.68717433461363442, 1.69175666993575979]]) @@ -649,16 +656,16 @@ def test_exponential_0(self): assert_raises(ValueError, np.random.exponential, scale=-0.) def test_f(self): - np.random.seed(self.seed) - actual = np.random.f(12, 77, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.f(12, 77, size=(3, 2)) desired = np.array([[1.21975394418575878, 1.75135759791559775], [1.44803115017146489, 1.22108959480396262], [1.02176975757740629, 1.34431827623300415]]) assert_array_almost_equal(actual, desired, decimal=15) def test_gamma(self): - np.random.seed(self.seed) - actual = np.random.gamma(5, 3, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.gamma(5, 3, size=(3, 2)) desired = np.array([[24.60509188649287182, 28.54993563207210627], [26.13476110204064184, 12.56988482927716078], [31.71863275789960568, 33.30143302795922011]]) @@ -669,16 +676,16 @@ def test_gamma_0(self): assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.) def test_geometric(self): - np.random.seed(self.seed) - actual = np.random.geometric(.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.geometric(.123456789, size=(3, 2)) desired = np.array([[8, 7], [17, 17], [5, 12]]) assert_array_equal(actual, desired) def test_gumbel(self): - np.random.seed(self.seed) - actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[0.19591898743416816, 0.34405539668096674], [-1.4492522252274278, -1.47374816298446865], [1.10651090478803416, -0.69535848626236174]]) @@ -689,34 +696,34 @@ def test_gumbel_0(self): assert_raises(ValueError, np.random.gumbel, scale=-0.) def test_hypergeometric(self): - np.random.seed(self.seed) - actual = np.random.hypergeometric(10, 5, 14, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(10, 5, 14, size=(3, 2)) desired = np.array([[10, 10], [10, 10], [9, 9]]) assert_array_equal(actual, desired) # Test nbad = 0 - actual = np.random.hypergeometric(5, 0, 3, size=4) + actual = rng.hypergeometric(5, 0, 3, size=4) desired = np.array([3, 3, 3, 3]) assert_array_equal(actual, desired) - actual = np.random.hypergeometric(15, 0, 12, size=4) + actual = rng.hypergeometric(15, 0, 12, size=4) desired = np.array([12, 12, 12, 12]) assert_array_equal(actual, desired) # Test ngood = 0 - actual = np.random.hypergeometric(0, 5, 3, size=4) + actual = rng.hypergeometric(0, 5, 3, size=4) desired = np.array([0, 0, 0, 0]) assert_array_equal(actual, desired) - actual = np.random.hypergeometric(0, 15, 12, size=4) + actual = rng.hypergeometric(0, 15, 12, size=4) desired = np.array([0, 0, 0, 0]) assert_array_equal(actual, desired) def test_laplace(self): - np.random.seed(self.seed) - actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[0.66599721112760157, 0.52829452552221945], [3.12791959514407125, 3.18202813572992005], [-0.05391065675859356, 1.74901336242837324]]) @@ -727,16 +734,16 @@ def test_laplace_0(self): assert_raises(ValueError, np.random.laplace, scale=-0.) def test_logistic(self): - np.random.seed(self.seed) - actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[1.09232835305011444, 0.8648196662399954], [4.27818590694950185, 4.33897006346929714], [-0.21682183359214885, 2.63373365386060332]]) assert_array_almost_equal(actual, desired, decimal=15) def test_lognormal(self): - np.random.seed(self.seed) - actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) desired = np.array([[16.50698631688883822, 36.54846706092654784], [22.67886599981281748, 0.71617561058995771], [65.72798501792723869, 86.84341601437161273]]) @@ -747,16 +754,16 @@ def test_lognormal_0(self): assert_raises(ValueError, np.random.lognormal, sigma=-0.) def test_logseries(self): - np.random.seed(self.seed) - actual = np.random.logseries(p=.923456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.logseries(p=.923456789, size=(3, 2)) desired = np.array([[2, 2], [6, 17], [3, 6]]) assert_array_equal(actual, desired) def test_multinomial(self): - np.random.seed(self.seed) - actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.multinomial(20, [1 / 6.] * 6, size=(3, 2)) desired = np.array([[[4, 3, 5, 4, 2, 2], [5, 2, 8, 2, 2, 1]], [[3, 4, 3, 6, 0, 4], @@ -766,11 +773,11 @@ def test_multinomial(self): assert_array_equal(actual, desired) def test_multivariate_normal(self): - np.random.seed(self.seed) + rng = random.RandomState(self.seed) mean = (.123456789, 10) cov = [[1, 0], [0, 1]] size = (3, 2) - actual = np.random.multivariate_normal(mean, cov, size) + actual = rng.multivariate_normal(mean, cov, size) desired = np.array([[[1.463620246718631, 11.73759122771936], [1.622445133300628, 9.771356667546383]], [[2.154490787682787, 12.170324946056553], @@ -781,7 +788,7 @@ def test_multivariate_normal(self): assert_array_almost_equal(actual, desired, decimal=15) # Check for default size, was raising deprecation warning - actual = np.random.multivariate_normal(mean, cov) + actual = rng.multivariate_normal(mean, cov) desired = np.array([0.895289569463708, 9.17180864067987]) assert_array_almost_equal(actual, desired, decimal=15) @@ -789,54 +796,53 @@ def test_multivariate_normal(self): # RuntimeWarning mean = [0, 0] cov = [[1, 2], [2, 1]] - assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov) + pytest.warns(RuntimeWarning, rng.multivariate_normal, mean, cov) # and that it doesn't warn with RuntimeWarning check_valid='ignore' - assert_no_warnings(np.random.multivariate_normal, mean, cov, + assert_no_warnings(rng.multivariate_normal, mean, cov, check_valid='ignore') # and that it raises with RuntimeWarning check_valid='raises' - assert_raises(ValueError, np.random.multivariate_normal, mean, cov, + assert_raises(ValueError, rng.multivariate_normal, mean, cov, check_valid='raise') cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) - with suppress_warnings() as sup: - np.random.multivariate_normal(mean, cov) - w = sup.record(RuntimeWarning) - assert len(w) == 0 + with warnings.catch_warnings(): + warnings.simplefilter('error') + rng.multivariate_normal(mean, cov) def test_negative_binomial(self): - np.random.seed(self.seed) - actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n=100, p=.12345, size=(3, 2)) desired = np.array([[848, 841], [892, 611], [779, 647]]) assert_array_equal(actual, desired) def test_noncentral_chisquare(self): - np.random.seed(self.seed) - actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) desired = np.array([[23.91905354498517511, 13.35324692733826346], [31.22452661329736401, 16.60047399466177254], [5.03461598262724586, 17.94973089023519464]]) assert_array_almost_equal(actual, desired, decimal=14) - actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + actual = rng.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) desired = np.array([[1.47145377828516666, 0.15052899268012659], [0.00943803056963588, 1.02647251615666169], [0.332334982684171, 0.15451287602753125]]) assert_array_almost_equal(actual, desired, decimal=14) - np.random.seed(self.seed) - actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) desired = np.array([[9.597154162763948, 11.725484450296079], [10.413711048138335, 3.694475922923986], [13.484222138963087, 14.377255424602957]]) assert_array_almost_equal(actual, desired, decimal=14) def test_noncentral_f(self): - np.random.seed(self.seed) - actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1, + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum=5, dfden=2, nonc=1, size=(3, 2)) desired = np.array([[1.40598099674926669, 0.34207973179285761], [3.57715069265772545, 7.92632662577829805], @@ -844,8 +850,8 @@ def test_noncentral_f(self): assert_array_almost_equal(actual, desired, decimal=14) def test_normal(self): - np.random.seed(self.seed) - actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.normal(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[2.80378370443726244, 3.59863924443872163], [3.121433477601256, -0.33382987590723379], [4.18552478636557357, 4.46410668111310471]]) @@ -856,8 +862,8 @@ def test_normal_0(self): assert_raises(ValueError, np.random.normal, scale=-0.) def test_pareto(self): - np.random.seed(self.seed) - actual = np.random.pareto(a=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.pareto(a=.123456789, size=(3, 2)) desired = np.array( [[2.46852460439034849e+03, 1.41286880810518346e+03], [5.28287797029485181e+07, 6.57720981047328785e+07], @@ -871,8 +877,8 @@ def test_pareto(self): np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) def test_poisson(self): - np.random.seed(self.seed) - actual = np.random.poisson(lam=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.poisson(lam=.123456789, size=(3, 2)) desired = np.array([[0, 0], [1, 0], [0, 0]]) @@ -882,21 +888,21 @@ def test_poisson_exceptions(self): lambig = np.iinfo('l').max lamneg = -1 assert_raises(ValueError, np.random.poisson, lamneg) - assert_raises(ValueError, np.random.poisson, [lamneg]*10) + assert_raises(ValueError, np.random.poisson, [lamneg] * 10) assert_raises(ValueError, np.random.poisson, lambig) - assert_raises(ValueError, np.random.poisson, [lambig]*10) + assert_raises(ValueError, np.random.poisson, [lambig] * 10) def test_power(self): - np.random.seed(self.seed) - actual = np.random.power(a=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.power(a=.123456789, size=(3, 2)) desired = np.array([[0.02048932883240791, 0.01424192241128213], [0.38446073748535298, 0.39499689943484395], [0.00177699707563439, 0.13115505880863756]]) assert_array_almost_equal(actual, desired, decimal=15) def test_rayleigh(self): - np.random.seed(self.seed) - actual = np.random.rayleigh(scale=10, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.rayleigh(scale=10, size=(3, 2)) desired = np.array([[13.8882496494248393, 13.383318339044731], [20.95413364294492098, 21.08285015800712614], [11.06066537006854311, 17.35468505778271009]]) @@ -907,24 +913,24 @@ def test_rayleigh_0(self): assert_raises(ValueError, np.random.rayleigh, scale=-0.) def test_standard_cauchy(self): - np.random.seed(self.seed) - actual = np.random.standard_cauchy(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_cauchy(size=(3, 2)) desired = np.array([[0.77127660196445336, -6.55601161955910605], [0.93582023391158309, -2.07479293013759447], [-4.74601644297011926, 0.18338989290760804]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_exponential(self): - np.random.seed(self.seed) - actual = np.random.standard_exponential(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_exponential(size=(3, 2)) desired = np.array([[0.96441739162374596, 0.89556604882105506], [2.1953785836319808, 2.22243285392490542], [0.6116915921431676, 1.50592546727413201]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_gamma(self): - np.random.seed(self.seed) - actual = np.random.standard_gamma(shape=3, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_gamma(shape=3, size=(3, 2)) desired = np.array([[5.50841531318455058, 6.62953470301903103], [5.93988484943779227, 2.31044849402133989], [7.54838614231317084, 8.012756093271868]]) @@ -935,24 +941,24 @@ def test_standard_gamma_0(self): assert_raises(ValueError, np.random.standard_gamma, shape=-0.) def test_standard_normal(self): - np.random.seed(self.seed) - actual = np.random.standard_normal(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_normal(size=(3, 2)) desired = np.array([[1.34016345771863121, 1.73759122771936081], [1.498988344300628, -0.2286433324536169], [2.031033998682787, 2.17032494605655257]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_t(self): - np.random.seed(self.seed) - actual = np.random.standard_t(df=10, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_t(df=10, size=(3, 2)) desired = np.array([[0.97140611862659965, -0.08830486548450577], [1.36311143689505321, -0.55317463909867071], [-0.18473749069684214, 0.61181537341755321]]) assert_array_almost_equal(actual, desired, decimal=15) def test_triangular(self): - np.random.seed(self.seed) - actual = np.random.triangular(left=5.12, mode=10.23, right=20.34, + rng = random.RandomState(self.seed) + actual = rng.triangular(left=5.12, mode=10.23, right=20.34, size=(3, 2)) desired = np.array([[12.68117178949215784, 12.4129206149193152], [16.20131377335158263, 16.25692138747600524], @@ -960,8 +966,8 @@ def test_triangular(self): assert_array_almost_equal(actual, desired, decimal=14) def test_uniform(self): - np.random.seed(self.seed) - actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.uniform(low=1.23, high=10.54, size=(3, 2)) desired = np.array([[6.99097932346268003, 6.73801597444323974], [9.50364421400426274, 9.53130618907631089], [5.48995325769805476, 8.47493103280052118]]) @@ -1008,8 +1014,8 @@ def __int__(self): assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1) def test_vonmises(self): - np.random.seed(self.seed) - actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) desired = np.array([[2.28567572673902042, 2.89163838442285037], [0.38198375564286025, 2.57638023113890746], [1.19153771588353052, 1.83509849681825354]]) @@ -1022,16 +1028,16 @@ def test_vonmises_small(self): np.testing.assert_(np.isfinite(r).all()) def test_wald(self): - np.random.seed(self.seed) - actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.wald(mean=1.23, scale=1.54, size=(3, 2)) desired = np.array([[3.82935265715889983, 5.13125249184285526], [0.35045403618358717, 1.50832396872003538], [0.24124319895843183, 0.22031101461955038]]) assert_array_almost_equal(actual, desired, decimal=14) def test_weibull(self): - np.random.seed(self.seed) - actual = np.random.weibull(a=1.23, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.weibull(a=1.23, size=(3, 2)) desired = np.array([[0.97097342648766727, 0.91422896443565516], [1.89517770034962929, 1.91414357960479564], [0.67057783752390987, 1.39494046635066793]]) @@ -1043,8 +1049,8 @@ def test_weibull_0(self): assert_raises(ValueError, np.random.weibull, a=-0.) def test_zipf(self): - np.random.seed(self.seed) - actual = np.random.zipf(a=1.23, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.zipf(a=1.23, size=(3, 2)) desired = np.array([[66, 29], [1, 1], [3, 13]]) @@ -1054,11 +1060,7 @@ def test_zipf(self): class TestBroadcast: # tests that functions that broadcast behave # correctly when presented with non-scalar arguments - def setup_method(self): - self.seed = 123456789 - - def setSeed(self): - np.random.seed(self.seed) + seed = 123456789 # TODO: Include test for randint once it can broadcast # Can steal the test written in PR #6938 @@ -1066,129 +1068,122 @@ def setSeed(self): def test_uniform(self): low = [0] high = [1] - uniform = np.random.uniform desired = np.array([0.53283302478975902, 0.53413660089041659, 0.50955303552646702]) - self.setSeed() - actual = uniform(low * 3, high) + rng = random.RandomState(self.seed) + actual = rng.uniform(low * 3, high) assert_array_almost_equal(actual, desired, decimal=14) - self.setSeed() - actual = uniform(low, high * 3) + rng = random.RandomState(self.seed) + actual = rng.uniform(low, high * 3) assert_array_almost_equal(actual, desired, decimal=14) def test_normal(self): loc = [0] scale = [1] bad_scale = [-1] - normal = np.random.normal desired = np.array([2.2129019979039612, 2.1283977976520019, 1.8417114045748335]) - self.setSeed() - actual = normal(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.normal(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, normal, loc * 3, bad_scale) + assert_raises(ValueError, rng.normal, loc * 3, bad_scale) - self.setSeed() - actual = normal(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.normal(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, normal, loc, bad_scale * 3) + assert_raises(ValueError, rng.normal, loc, bad_scale * 3) def test_beta(self): a = [1] b = [2] bad_a = [-1] bad_b = [-2] - beta = np.random.beta desired = np.array([0.19843558305989056, 0.075230336409423643, 0.24976865978980844]) - self.setSeed() - actual = beta(a * 3, b) + rng = random.RandomState(self.seed) + actual = rng.beta(a * 3, b) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, beta, bad_a * 3, b) - assert_raises(ValueError, beta, a * 3, bad_b) + assert_raises(ValueError, rng.beta, bad_a * 3, b) + assert_raises(ValueError, rng.beta, a * 3, bad_b) - self.setSeed() - actual = beta(a, b * 3) + rng = random.RandomState(self.seed) + actual = rng.beta(a, b * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, beta, bad_a, b * 3) - assert_raises(ValueError, beta, a, bad_b * 3) + assert_raises(ValueError, rng.beta, bad_a, b * 3) + assert_raises(ValueError, rng.beta, a, bad_b * 3) def test_exponential(self): scale = [1] bad_scale = [-1] - exponential = np.random.exponential desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.setSeed() - actual = exponential(scale * 3) + rng = random.RandomState(self.seed) + actual = rng.exponential(scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, exponential, bad_scale * 3) + assert_raises(ValueError, rng.exponential, bad_scale * 3) def test_standard_gamma(self): shape = [1] bad_shape = [-1] - std_gamma = np.random.standard_gamma desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.setSeed() - actual = std_gamma(shape * 3) + rng = random.RandomState(self.seed) + actual = rng.standard_gamma(shape * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, std_gamma, bad_shape * 3) + assert_raises(ValueError, rng.standard_gamma, bad_shape * 3) def test_gamma(self): shape = [1] scale = [2] bad_shape = [-1] bad_scale = [-2] - gamma = np.random.gamma desired = np.array([1.5221370731769048, 1.5277256455738331, 1.4248762625178359]) - self.setSeed() - actual = gamma(shape * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.gamma(shape * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape * 3, scale) - assert_raises(ValueError, gamma, shape * 3, bad_scale) + assert_raises(ValueError, rng.gamma, bad_shape * 3, scale) + assert_raises(ValueError, rng.gamma, shape * 3, bad_scale) - self.setSeed() - actual = gamma(shape, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.gamma(shape, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape, scale * 3) - assert_raises(ValueError, gamma, shape, bad_scale * 3) + assert_raises(ValueError, rng.gamma, bad_shape, scale * 3) + assert_raises(ValueError, rng.gamma, shape, bad_scale * 3) def test_f(self): dfnum = [1] dfden = [2] bad_dfnum = [-1] bad_dfden = [-2] - f = np.random.f desired = np.array([0.80038951638264799, 0.86768719635363512, 2.7251095168386801]) - self.setSeed() - actual = f(dfnum * 3, dfden) + rng = random.RandomState(self.seed) + actual = rng.f(dfnum * 3, dfden) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum * 3, dfden) - assert_raises(ValueError, f, dfnum * 3, bad_dfden) + assert_raises(ValueError, rng.f, bad_dfnum * 3, dfden) + assert_raises(ValueError, rng.f, dfnum * 3, bad_dfden) - self.setSeed() - actual = f(dfnum, dfden * 3) + rng = random.RandomState(self.seed) + actual = rng.f(dfnum, dfden * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum, dfden * 3) - assert_raises(ValueError, f, dfnum, bad_dfden * 3) + assert_raises(ValueError, rng.f, bad_dfnum, dfden * 3) + assert_raises(ValueError, rng.f, dfnum, bad_dfden * 3) def test_noncentral_f(self): dfnum = [2] @@ -1197,256 +1192,242 @@ def test_noncentral_f(self): bad_dfnum = [0] bad_dfden = [-1] bad_nonc = [-2] - nonc_f = np.random.noncentral_f desired = np.array([9.1393943263705211, 13.025456344595602, 8.8018098359100545]) - self.setSeed() - actual = nonc_f(dfnum * 3, dfden, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum * 3, dfden, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum * 3, dfden, bad_nonc) - self.setSeed() - actual = nonc_f(dfnum, dfden * 3, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum, dfden * 3, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum, dfden * 3, bad_nonc) - self.setSeed() - actual = nonc_f(dfnum, dfden, nonc * 3) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum, dfden, nonc * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, rng.noncentral_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, rng.noncentral_f, dfnum, dfden, bad_nonc * 3) def test_noncentral_f_small_df(self): - self.setSeed() + rng = random.RandomState(self.seed) desired = np.array([6.869638627492048, 0.785880199263955]) - actual = np.random.noncentral_f(0.9, 0.9, 2, size=2) + actual = rng.noncentral_f(0.9, 0.9, 2, size=2) assert_array_almost_equal(actual, desired, decimal=14) def test_chisquare(self): df = [1] bad_df = [-1] - chisquare = np.random.chisquare desired = np.array([0.57022801133088286, 0.51947702108840776, 0.1320969254923558]) - self.setSeed() - actual = chisquare(df * 3) + rng = random.RandomState(self.seed) + actual = rng.chisquare(df * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, chisquare, bad_df * 3) + assert_raises(ValueError, rng.chisquare, bad_df * 3) def test_noncentral_chisquare(self): df = [1] nonc = [2] bad_df = [-1] bad_nonc = [-2] - nonc_chi = np.random.noncentral_chisquare desired = np.array([9.0015599467913763, 4.5804135049718742, 6.0872302432834564]) - self.setSeed() - actual = nonc_chi(df * 3, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df * 3, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) - assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) + assert_raises(ValueError, rng.noncentral_chisquare, bad_df * 3, nonc) + assert_raises(ValueError, rng.noncentral_chisquare, df * 3, bad_nonc) - self.setSeed() - actual = nonc_chi(df, nonc * 3) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df, nonc * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) - assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) + assert_raises(ValueError, rng.noncentral_chisquare, bad_df, nonc * 3) + assert_raises(ValueError, rng.noncentral_chisquare, df, bad_nonc * 3) def test_standard_t(self): df = [1] bad_df = [-1] - t = np.random.standard_t desired = np.array([3.0702872575217643, 5.8560725167361607, 1.0274791436474273]) - self.setSeed() - actual = t(df * 3) + rng = random.RandomState(self.seed) + actual = rng.standard_t(df * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, t, bad_df * 3) + assert_raises(ValueError, rng.standard_t, bad_df * 3) def test_vonmises(self): mu = [2] kappa = [1] bad_kappa = [-1] - vonmises = np.random.vonmises desired = np.array([2.9883443664201312, -2.7064099483995943, -1.8672476700665914]) - self.setSeed() - actual = vonmises(mu * 3, kappa) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu * 3, kappa) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, vonmises, mu * 3, bad_kappa) + assert_raises(ValueError, rng.vonmises, mu * 3, bad_kappa) - self.setSeed() - actual = vonmises(mu, kappa * 3) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu, kappa * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, vonmises, mu, bad_kappa * 3) + assert_raises(ValueError, rng.vonmises, mu, bad_kappa * 3) def test_pareto(self): a = [1] bad_a = [-1] - pareto = np.random.pareto desired = np.array([1.1405622680198362, 1.1465519762044529, 1.0389564467453547]) - self.setSeed() - actual = pareto(a * 3) + rng = random.RandomState(self.seed) + actual = rng.pareto(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, pareto, bad_a * 3) + assert_raises(ValueError, rng.pareto, bad_a * 3) def test_weibull(self): a = [1] bad_a = [-1] - weibull = np.random.weibull desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.setSeed() - actual = weibull(a * 3) + rng = random.RandomState(self.seed) + actual = rng.weibull(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, weibull, bad_a * 3) + assert_raises(ValueError, rng.weibull, bad_a * 3) def test_power(self): a = [1] bad_a = [-1] - power = np.random.power desired = np.array([0.53283302478975902, 0.53413660089041659, 0.50955303552646702]) - self.setSeed() - actual = power(a * 3) + rng = random.RandomState(self.seed) + actual = rng.power(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, power, bad_a * 3) + assert_raises(ValueError, rng.power, bad_a * 3) def test_laplace(self): loc = [0] scale = [1] bad_scale = [-1] - laplace = np.random.laplace desired = np.array([0.067921356028507157, 0.070715642226971326, 0.019290950698972624]) - self.setSeed() - actual = laplace(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc * 3, bad_scale) + assert_raises(ValueError, rng.laplace, loc * 3, bad_scale) - self.setSeed() - actual = laplace(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc, bad_scale * 3) + assert_raises(ValueError, rng.laplace, loc, bad_scale * 3) def test_gumbel(self): loc = [0] scale = [1] bad_scale = [-1] - gumbel = np.random.gumbel desired = np.array([0.2730318639556768, 0.26936705726291116, 0.33906220393037939]) - self.setSeed() - actual = gumbel(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc * 3, bad_scale) + assert_raises(ValueError, rng.gumbel, loc * 3, bad_scale) - self.setSeed() - actual = gumbel(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc, bad_scale * 3) + assert_raises(ValueError, rng.gumbel, loc, bad_scale * 3) def test_logistic(self): loc = [0] scale = [1] bad_scale = [-1] - logistic = np.random.logistic desired = np.array([0.13152135837586171, 0.13675915696285773, 0.038216792802833396]) - self.setSeed() - actual = logistic(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, logistic, loc * 3, bad_scale) + assert_raises(ValueError, rng.logistic, loc * 3, bad_scale) - self.setSeed() - actual = logistic(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, logistic, loc, bad_scale * 3) + assert_raises(ValueError, rng.logistic, loc, bad_scale * 3) def test_lognormal(self): mean = [0] sigma = [1] bad_sigma = [-1] - lognormal = np.random.lognormal desired = np.array([9.1422086044848427, 8.4013952870126261, 6.3073234116578671]) - self.setSeed() - actual = lognormal(mean * 3, sigma) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean * 3, sigma) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, lognormal, mean * 3, bad_sigma) + assert_raises(ValueError, rng.lognormal, mean * 3, bad_sigma) - self.setSeed() - actual = lognormal(mean, sigma * 3) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean, sigma * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, lognormal, mean, bad_sigma * 3) + assert_raises(ValueError, rng.lognormal, mean, bad_sigma * 3) def test_rayleigh(self): scale = [1] bad_scale = [-1] - rayleigh = np.random.rayleigh desired = np.array([1.2337491937897689, 1.2360119924878694, 1.1936818095781789]) - self.setSeed() - actual = rayleigh(scale * 3) + rng = random.RandomState(self.seed) + actual = rng.rayleigh(scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, rayleigh, bad_scale * 3) + assert_raises(ValueError, rng.rayleigh, bad_scale * 3) def test_wald(self): mean = [0.5] scale = [1] bad_mean = [0] bad_scale = [-2] - wald = np.random.wald desired = np.array([0.11873681120271318, 0.12450084820795027, 0.9096122728408238]) - self.setSeed() - actual = wald(mean * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.wald(mean * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, wald, bad_mean * 3, scale) - assert_raises(ValueError, wald, mean * 3, bad_scale) + assert_raises(ValueError, rng.wald, bad_mean * 3, scale) + assert_raises(ValueError, rng.wald, mean * 3, bad_scale) - self.setSeed() - actual = wald(mean, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.wald(mean, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, wald, bad_mean, scale * 3) - assert_raises(ValueError, wald, mean, bad_scale * 3) - assert_raises(ValueError, wald, 0.0, 1) - assert_raises(ValueError, wald, 0.5, 0.0) + assert_raises(ValueError, rng.wald, bad_mean, scale * 3) + assert_raises(ValueError, rng.wald, mean, bad_scale * 3) + assert_raises(ValueError, rng.wald, 0.0, 1) + assert_raises(ValueError, rng.wald, 0.5, 0.0) def test_triangular(self): left = [1] @@ -1455,33 +1436,32 @@ def test_triangular(self): bad_left_one = [3] bad_mode_one = [4] bad_left_two, bad_mode_two = right * 2 - triangular = np.random.triangular desired = np.array([2.03339048710429, 2.0347400359389356, 2.0095991069536208]) - self.setSeed() - actual = triangular(left * 3, mode, right) + rng = random.RandomState(self.seed) + actual = rng.triangular(left * 3, mode, right) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) - assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) - assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, + assert_raises(ValueError, rng.triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, rng.triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, rng.triangular, bad_left_two * 3, bad_mode_two, right) - self.setSeed() - actual = triangular(left, mode * 3, right) + rng = random.RandomState(self.seed) + actual = rng.triangular(left, mode * 3, right) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) - assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, + assert_raises(ValueError, rng.triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, rng.triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, rng.triangular, bad_left_two, bad_mode_two * 3, right) - self.setSeed() - actual = triangular(left, mode, right * 3) + rng = random.RandomState(self.seed) + actual = rng.triangular(left, mode, right * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) - assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, + assert_raises(ValueError, rng.triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, rng.triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, rng.triangular, bad_left_two, bad_mode_two, right * 3) def test_binomial(self): @@ -1490,22 +1470,21 @@ def test_binomial(self): bad_n = [-1] bad_p_one = [-1] bad_p_two = [1.5] - binom = np.random.binomial desired = np.array([1, 1, 1]) - self.setSeed() - actual = binom(n * 3, p) + rng = random.RandomState(self.seed) + actual = rng.binomial(n * 3, p) assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n * 3, p) - assert_raises(ValueError, binom, n * 3, bad_p_one) - assert_raises(ValueError, binom, n * 3, bad_p_two) + assert_raises(ValueError, rng.binomial, bad_n * 3, p) + assert_raises(ValueError, rng.binomial, n * 3, bad_p_one) + assert_raises(ValueError, rng.binomial, n * 3, bad_p_two) - self.setSeed() - actual = binom(n, p * 3) + rng = random.RandomState(self.seed) + actual = rng.binomial(n, p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n, p * 3) - assert_raises(ValueError, binom, n, bad_p_one * 3) - assert_raises(ValueError, binom, n, bad_p_two * 3) + assert_raises(ValueError, rng.binomial, bad_n, p * 3) + assert_raises(ValueError, rng.binomial, n, bad_p_one * 3) + assert_raises(ValueError, rng.binomial, n, bad_p_two * 3) def test_negative_binomial(self): n = [1] @@ -1513,22 +1492,21 @@ def test_negative_binomial(self): bad_n = [-1] bad_p_one = [-1] bad_p_two = [1.5] - neg_binom = np.random.negative_binomial desired = np.array([1, 0, 1]) - self.setSeed() - actual = neg_binom(n * 3, p) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n * 3, p) assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n * 3, p) - assert_raises(ValueError, neg_binom, n * 3, bad_p_one) - assert_raises(ValueError, neg_binom, n * 3, bad_p_two) + assert_raises(ValueError, rng.negative_binomial, bad_n * 3, p) + assert_raises(ValueError, rng.negative_binomial, n * 3, bad_p_one) + assert_raises(ValueError, rng.negative_binomial, n * 3, bad_p_two) - self.setSeed() - actual = neg_binom(n, p * 3) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n, p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n, p * 3) - assert_raises(ValueError, neg_binom, n, bad_p_one * 3) - assert_raises(ValueError, neg_binom, n, bad_p_two * 3) + assert_raises(ValueError, rng.negative_binomial, bad_n, p * 3) + assert_raises(ValueError, rng.negative_binomial, n, bad_p_one * 3) + assert_raises(ValueError, rng.negative_binomial, n, bad_p_two * 3) def test_poisson(self): max_lam = np.random.RandomState()._poisson_lam_max @@ -1536,41 +1514,38 @@ def test_poisson(self): lam = [1] bad_lam_one = [-1] bad_lam_two = [max_lam * 2] - poisson = np.random.poisson desired = np.array([1, 1, 0]) - self.setSeed() - actual = poisson(lam * 3) + rng = random.RandomState(self.seed) + actual = rng.poisson(lam * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, poisson, bad_lam_one * 3) - assert_raises(ValueError, poisson, bad_lam_two * 3) + assert_raises(ValueError, rng.poisson, bad_lam_one * 3) + assert_raises(ValueError, rng.poisson, bad_lam_two * 3) def test_zipf(self): a = [2] bad_a = [0] - zipf = np.random.zipf desired = np.array([2, 2, 1]) - self.setSeed() - actual = zipf(a * 3) + rng = random.RandomState(self.seed) + actual = rng.zipf(a * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, zipf, bad_a * 3) + assert_raises(ValueError, rng.zipf, bad_a * 3) with np.errstate(invalid='ignore'): - assert_raises(ValueError, zipf, np.nan) - assert_raises(ValueError, zipf, [0, 0, np.nan]) + assert_raises(ValueError, rng.zipf, np.nan) + assert_raises(ValueError, rng.zipf, [0, 0, np.nan]) def test_geometric(self): p = [0.5] bad_p_one = [-1] bad_p_two = [1.5] - geom = np.random.geometric desired = np.array([2, 2, 2]) - self.setSeed() - actual = geom(p * 3) + rng = random.RandomState(self.seed) + actual = rng.geometric(p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, geom, bad_p_one * 3) - assert_raises(ValueError, geom, bad_p_two * 3) + assert_raises(ValueError, rng.geometric, bad_p_one * 3) + assert_raises(ValueError, rng.geometric, bad_p_two * 3) def test_hypergeometric(self): ngood = [1] @@ -1580,52 +1555,49 @@ def test_hypergeometric(self): bad_nbad = [-2] bad_nsample_one = [0] bad_nsample_two = [4] - hypergeom = np.random.hypergeometric desired = np.array([1, 1, 1]) - self.setSeed() - actual = hypergeom(ngood * 3, nbad, nsample) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood * 3, nbad, nsample) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample) - assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample) - assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one) - assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two) + assert_raises(ValueError, rng.hypergeometric, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, nbad, bad_nsample_one) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, nbad, bad_nsample_two) - self.setSeed() - actual = hypergeom(ngood, nbad * 3, nsample) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood, nbad * 3, nsample) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample) - assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample) - assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one) - assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two) + assert_raises(ValueError, rng.hypergeometric, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad * 3, bad_nsample_one) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad * 3, bad_nsample_two) - self.setSeed() - actual = hypergeom(ngood, nbad, nsample * 3) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood, nbad, nsample * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) + assert_raises(ValueError, rng.hypergeometric, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad, bad_nsample_two * 3) def test_logseries(self): p = [0.5] bad_p_one = [2] bad_p_two = [-1] - logseries = np.random.logseries desired = np.array([1, 1, 1]) - self.setSeed() - actual = logseries(p * 3) + rng = random.RandomState(self.seed) + actual = rng.logseries(p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, logseries, bad_p_one * 3) - assert_raises(ValueError, logseries, bad_p_two * 3) + assert_raises(ValueError, rng.logseries, bad_p_one * 3) + assert_raises(ValueError, rng.logseries, bad_p_two * 3) @pytest.mark.skipif(IS_WASM, reason="can't start thread") class TestThread: # make sure each state produces the same sequence even in threads - def setup_method(self): - self.seeds = range(4) + seeds = range(4) def check_function(self, function, sz): from threading import Thread @@ -1661,19 +1633,17 @@ def gen_random(state, out): def test_multinomial(self): def gen_random(state, out): - out[...] = state.multinomial(10, [1/6.]*6, size=10000) + out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000) self.check_function(gen_random, sz=(10000, 6)) # See Issue #4263 class TestSingleEltArrayInput: - def setup_method(self): - self.argOne = np.array([2]) - self.argTwo = np.array([3]) - self.argThree = np.array([4]) - self.tgtShape = (1,) + def _create_arrays(self): + return np.array([2]), np.array([3]), np.array([4]), (1,) def test_one_arg_funcs(self): + argOne, _, _, tgtShape = self._create_arrays() funcs = (np.random.exponential, np.random.standard_gamma, np.random.chisquare, np.random.standard_t, np.random.pareto, np.random.weibull, @@ -1688,11 +1658,12 @@ def test_one_arg_funcs(self): out = func(np.array([0.5])) else: - out = func(self.argOne) + out = func(argOne) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) def test_two_arg_funcs(self): + argOne, argTwo, _, tgtShape = self._create_arrays() funcs = (np.random.uniform, np.random.normal, np.random.beta, np.random.gamma, np.random.f, np.random.noncentral_chisquare, @@ -1708,18 +1679,19 @@ def test_two_arg_funcs(self): argTwo = np.array([0.5]) else: - argTwo = self.argTwo + argTwo = argTwo - out = func(self.argOne, argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, argTwo[0]) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0]) + assert_equal(out.shape, tgtShape) def test_randint(self): + _, _, _, tgtShape = self._create_arrays() itype = [bool, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64] func = np.random.randint @@ -1728,24 +1700,25 @@ def test_randint(self): for dt in itype: out = func(low, high, dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) out = func(low[0], high, dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) out = func(low, high[0], dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) def test_three_arg_funcs(self): + argOne, argTwo, argThree, tgtShape = self._create_arrays() funcs = [np.random.noncentral_f, np.random.triangular, np.random.hypergeometric] for func in funcs: - out = func(self.argOne, self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, self.argTwo[0], self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0], argThree) + assert_equal(out.shape, tgtShape) diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index 5121a684f693..63ffb5a86389 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -3,16 +3,20 @@ import sys import warnings -import numpy as np import pytest -from numpy.testing import ( - assert_, assert_raises, assert_equal, assert_warns, - assert_no_warnings, assert_array_equal, assert_array_almost_equal, - suppress_warnings, IS_WASM - ) -from numpy.random import MT19937, PCG64 +import numpy as np from numpy import random +from numpy.random import MT19937, PCG64 +from numpy.testing import ( + IS_WASM, + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_no_warnings, + assert_raises, +) INT_FUNCS = {'binomial': (100.0, 0.6), 'geometric': (.5,), @@ -26,24 +30,24 @@ if np.iinfo(np.long).max < 2**32: # Windows and some 32-bit platforms, e.g., ARM - INT_FUNC_HASHES = {'binomial': '2fbead005fc63942decb5326d36a1f32fe2c9d32c904ee61e46866b88447c263', - 'logseries': '23ead5dcde35d4cfd4ef2c105e4c3d43304b45dc1b1444b7823b9ee4fa144ebb', - 'geometric': '0d764db64f5c3bad48c8c33551c13b4d07a1e7b470f77629bef6c985cac76fcf', - 'hypergeometric': '7b59bf2f1691626c5815cdcd9a49e1dd68697251d4521575219e4d2a1b8b2c67', - 'multinomial': 'd754fa5b92943a38ec07630de92362dd2e02c43577fc147417dc5b9db94ccdd3', - 'negative_binomial': '8eb216f7cb2a63cf55605422845caaff002fddc64a7dc8b2d45acd477a49e824', - 'poisson': '70c891d76104013ebd6f6bcf30d403a9074b886ff62e4e6b8eb605bf1a4673b7', - 'zipf': '01f074f97517cd5d21747148ac6ca4074dde7fcb7acbaec0a936606fecacd93f', + INT_FUNC_HASHES = {'binomial': '2fbead005fc63942decb5326d36a1f32fe2c9d32c904ee61e46866b88447c263', # noqa: E501 + 'logseries': '23ead5dcde35d4cfd4ef2c105e4c3d43304b45dc1b1444b7823b9ee4fa144ebb', # noqa: E501 + 'geometric': '0d764db64f5c3bad48c8c33551c13b4d07a1e7b470f77629bef6c985cac76fcf', # noqa: E501 + 'hypergeometric': '7b59bf2f1691626c5815cdcd9a49e1dd68697251d4521575219e4d2a1b8b2c67', # noqa: E501 + 'multinomial': 'd754fa5b92943a38ec07630de92362dd2e02c43577fc147417dc5b9db94ccdd3', # noqa: E501 + 'negative_binomial': '8eb216f7cb2a63cf55605422845caaff002fddc64a7dc8b2d45acd477a49e824', # noqa: E501 + 'poisson': '70c891d76104013ebd6f6bcf30d403a9074b886ff62e4e6b8eb605bf1a4673b7', # noqa: E501 + 'zipf': '01f074f97517cd5d21747148ac6ca4074dde7fcb7acbaec0a936606fecacd93f', # noqa: E501 } else: - INT_FUNC_HASHES = {'binomial': '8626dd9d052cb608e93d8868de0a7b347258b199493871a1dc56e2a26cacb112', - 'geometric': '8edd53d272e49c4fc8fbbe6c7d08d563d62e482921f3131d0a0e068af30f0db9', - 'hypergeometric': '83496cc4281c77b786c9b7ad88b74d42e01603a55c60577ebab81c3ba8d45657', - 'logseries': '65878a38747c176bc00e930ebafebb69d4e1e16cd3a704e264ea8f5e24f548db', - 'multinomial': '7a984ae6dca26fd25374479e118b22f55db0aedccd5a0f2584ceada33db98605', - 'negative_binomial': 'd636d968e6a24ae92ab52fe11c46ac45b0897e98714426764e820a7d77602a61', - 'poisson': '956552176f77e7c9cb20d0118fc9cf690be488d790ed4b4c4747b965e61b0bb4', - 'zipf': 'f84ba7feffda41e606e20b28dfc0f1ea9964a74574513d4a4cbc98433a8bfa45', + INT_FUNC_HASHES = {'binomial': '8626dd9d052cb608e93d8868de0a7b347258b199493871a1dc56e2a26cacb112', # noqa: E501 + 'geometric': '8edd53d272e49c4fc8fbbe6c7d08d563d62e482921f3131d0a0e068af30f0db9', # noqa: E501 + 'hypergeometric': '83496cc4281c77b786c9b7ad88b74d42e01603a55c60577ebab81c3ba8d45657', # noqa: E501 + 'logseries': '65878a38747c176bc00e930ebafebb69d4e1e16cd3a704e264ea8f5e24f548db', # noqa: E501 + 'multinomial': '7a984ae6dca26fd25374479e118b22f55db0aedccd5a0f2584ceada33db98605', # noqa: E501 + 'negative_binomial': 'd636d968e6a24ae92ab52fe11c46ac45b0897e98714426764e820a7d77602a61', # noqa: E501 + 'poisson': '956552176f77e7c9cb20d0118fc9cf690be488d790ed4b4c4747b965e61b0bb4', # noqa: E501 + 'zipf': 'f84ba7feffda41e606e20b28dfc0f1ea9964a74574513d4a4cbc98433a8bfa45', # noqa: E501 } @@ -169,10 +173,10 @@ def test_p_non_contiguous(self): p = np.arange(15.) p /= np.sum(p[1::3]) pvals = p[1::3] - random.seed(1432985819) - non_contig = random.multinomial(100, pvals=pvals) - random.seed(1432985819) - contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals)) + rng = random.RandomState(1432985819) + non_contig = rng.multinomial(100, pvals=pvals) + rng = random.RandomState(1432985819) + contig = rng.multinomial(100, pvals=np.ascontiguousarray(pvals)) assert_array_equal(non_contig, contig) def test_multinomial_pvals_float32(self): @@ -187,136 +191,146 @@ def test_multinomial_n_float(self): # Non-index integer types should gracefully truncate floats random.multinomial(100.5, [0.2, 0.8]) + class TestSetState: - def setup_method(self): - self.seed = 1234567890 - self.random_state = random.RandomState(self.seed) - self.state = self.random_state.get_state() + def _create_state(self): + seed = 1234567890 + random_state = random.RandomState(seed) + state = random_state.get_state() + return random_state, state def test_basic(self): - old = self.random_state.tomaxint(16) - self.random_state.set_state(self.state) - new = self.random_state.tomaxint(16) + random_state, state = self._create_state() + old = random_state.tomaxint(16) + random_state.set_state(state) + new = random_state.tomaxint(16) assert_(np.all(old == new)) def test_gaussian_reset(self): # Make sure the cached every-other-Gaussian is reset. - old = self.random_state.standard_normal(size=3) - self.random_state.set_state(self.state) - new = self.random_state.standard_normal(size=3) + random_state, state = self._create_state() + old = random_state.standard_normal(size=3) + random_state.set_state(state) + new = random_state.standard_normal(size=3) assert_(np.all(old == new)) def test_gaussian_reset_in_media_res(self): # When the state is saved with a cached Gaussian, make sure the # cached Gaussian is restored. - - self.random_state.standard_normal() - state = self.random_state.get_state() - old = self.random_state.standard_normal(size=3) - self.random_state.set_state(state) - new = self.random_state.standard_normal(size=3) + random_state, state = self._create_state() + random_state.standard_normal() + state = random_state.get_state() + old = random_state.standard_normal(size=3) + random_state.set_state(state) + new = random_state.standard_normal(size=3) assert_(np.all(old == new)) def test_backwards_compatibility(self): # Make sure we can accept old state tuples that do not have the # cached Gaussian value. - old_state = self.state[:-2] - x1 = self.random_state.standard_normal(size=16) - self.random_state.set_state(old_state) - x2 = self.random_state.standard_normal(size=16) - self.random_state.set_state(self.state) - x3 = self.random_state.standard_normal(size=16) + random_state, state = self._create_state() + old_state = state[:-2] + x1 = random_state.standard_normal(size=16) + random_state.set_state(old_state) + x2 = random_state.standard_normal(size=16) + random_state.set_state(state) + x3 = random_state.standard_normal(size=16) assert_(np.all(x1 == x2)) assert_(np.all(x1 == x3)) def test_negative_binomial(self): # Ensure that the negative binomial results take floating point # arguments without truncation. - self.random_state.negative_binomial(0.5, 0.5) + random_state, _ = self._create_state() + random_state.negative_binomial(0.5, 0.5) def test_get_state_warning(self): rs = random.RandomState(PCG64()) - with suppress_warnings() as sup: - w = sup.record(RuntimeWarning) + with pytest.warns(RuntimeWarning): state = rs.get_state() - assert_(len(w) == 1) - assert isinstance(state, dict) - assert state['bit_generator'] == 'PCG64' + assert isinstance(state, dict) + assert state['bit_generator'] == 'PCG64' def test_invalid_legacy_state_setting(self): - state = self.random_state.get_state() + random_state, state = self._create_state() + state = random_state.get_state() new_state = ('Unknown', ) + state[1:] - assert_raises(ValueError, self.random_state.set_state, new_state) - assert_raises(TypeError, self.random_state.set_state, + assert_raises(ValueError, random_state.set_state, new_state) + assert_raises(TypeError, random_state.set_state, np.array(new_state, dtype=object)) - state = self.random_state.get_state(legacy=False) + state = random_state.get_state(legacy=False) del state['bit_generator'] - assert_raises(ValueError, self.random_state.set_state, state) + assert_raises(ValueError, random_state.set_state, state) def test_pickle(self): - self.random_state.seed(0) - self.random_state.random_sample(100) - self.random_state.standard_normal() - pickled = self.random_state.get_state(legacy=False) + random_state, _ = self._create_state() + random_state.seed(0) + random_state.random_sample(100) + random_state.standard_normal() + pickled = random_state.get_state(legacy=False) assert_equal(pickled['has_gauss'], 1) - rs_unpick = pickle.loads(pickle.dumps(self.random_state)) + rs_unpick = pickle.loads(pickle.dumps(random_state)) unpickled = rs_unpick.get_state(legacy=False) assert_mt19937_state_equal(pickled, unpickled) def test_state_setting(self): - attr_state = self.random_state.__getstate__() - self.random_state.standard_normal() - self.random_state.__setstate__(attr_state) - state = self.random_state.get_state(legacy=False) + random_state, state = self._create_state() + attr_state = random_state.__getstate__() + random_state.standard_normal() + random_state.__setstate__(attr_state) + state = random_state.get_state(legacy=False) assert_mt19937_state_equal(attr_state, state) def test_repr(self): - assert repr(self.random_state).startswith('RandomState(MT19937)') + random_state, _ = self._create_state() + assert repr(random_state).startswith('RandomState(MT19937)') class TestRandint: - rfunc = random.randint - # valid integer/boolean types itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64] def test_unsupported_type(self): - assert_raises(TypeError, self.rfunc, 1, dtype=float) + rng = np.random.RandomState() + assert_raises(TypeError, rng.randint, 1, dtype=float) def test_bounds_checking(self): + rng = np.random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 - assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt) - assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt) - assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt) - assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt) + assert_raises(ValueError, rng.randint, lbnd - 1, ubnd, dtype=dt) + assert_raises(ValueError, rng.randint, lbnd, ubnd + 1, dtype=dt) + assert_raises(ValueError, rng.randint, ubnd, lbnd, dtype=dt) + assert_raises(ValueError, rng.randint, 1, 0, dtype=dt) def test_rng_zero_and_extremes(self): + rng = np.random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 tgt = ubnd - 1 - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) tgt = lbnd - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) - tgt = (lbnd + ubnd)//2 - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + tgt = (lbnd + ubnd) // 2 + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) def test_full_range(self): # Test for ticket #1690 + rng = np.random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 try: - self.rfunc(lbnd, ubnd, dtype=dt) + rng.randint(lbnd, ubnd, dtype=dt) except Exception as e: raise AssertionError("No error should have been raised, " "but one was with the following " @@ -324,15 +338,15 @@ def test_full_range(self): def test_in_bounds_fuzz(self): # Don't use fixed seed - random.seed() + rng = np.random.RandomState() for dt in self.itype[1:]: for ubnd in [4, 8, 16]: - vals = self.rfunc(2, ubnd, size=2**16, dtype=dt) + vals = rng.randint(2, ubnd, size=2**16, dtype=dt) assert_(vals.max() < ubnd) assert_(vals.min() >= 2) - vals = self.rfunc(0, 2, size=2**16, dtype=np.bool) + vals = rng.randint(0, 2, size=2**16, dtype=np.bool) assert_(vals.max() < 2) assert_(vals.min() >= 0) @@ -341,31 +355,31 @@ def test_repeatability(self): # We use a sha256 hash of generated sequences of 1000 samples # in the range [0, 6) for all but bool, where the range # is [0, 2). Hashes are for little endian numbers. - tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71', - 'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', - 'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', - 'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', - 'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404', - 'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', - 'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', - 'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', - 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'} + tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71', # noqa: E501 + 'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', # noqa: E501 + 'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', # noqa: E501 + 'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', # noqa: E501 + 'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404', # noqa: E501 + 'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', # noqa: E501 + 'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', # noqa: E501 + 'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', # noqa: E501 + 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'} # noqa: E501 for dt in self.itype[1:]: - random.seed(1234) + rng = random.RandomState(1234) # view as little endian for hash if sys.byteorder == 'little': - val = self.rfunc(0, 6, size=1000, dtype=dt) + val = rng.randint(0, 6, size=1000, dtype=dt) else: - val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap() + val = rng.randint(0, 6, size=1000, dtype=dt).byteswap() res = hashlib.sha256(val.view(np.int8)).hexdigest() assert_(tgt[np.dtype(dt).name] == res) # bools do not depend on endianness - random.seed(1234) - val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8) + rng = random.RandomState(1234) + val = rng.randint(0, 2, size=1000, dtype=bool).view(np.int8) res = hashlib.sha256(val).hexdigest() assert_(tgt[np.dtype(bool).name] == res) @@ -388,8 +402,8 @@ def test_repeatability_32bit_boundary_broadcasting(self): [2978368172, 764731833, 2282559898], [ 105711276, 720447391, 3596512484]]]) for size in [None, (5, 3, 3)]: - random.seed(12345) - x = self.rfunc([[-1], [0], [1]], [2**32 - 1, 2**32, 2**32 + 1], + rng = random.RandomState(12345) + x = rng.randint([[-1], [0], [1]], [2**32 - 1, 2**32, 2**32 + 1], size=size) assert_array_equal(x, desired if size is not None else desired[0]) @@ -418,11 +432,13 @@ def test_int64_uint64_corner_case(self): def test_respect_dtype_singleton(self): # See gh-7203 + rng = np.random.RandomState() + for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 - sample = self.rfunc(lbnd, ubnd, dtype=dt) + sample = rng.randint(lbnd, ubnd, dtype=dt) assert_equal(sample.dtype, np.dtype(dt)) for dt in (bool, int): @@ -433,7 +449,7 @@ def test_respect_dtype_singleton(self): lbnd = 0 if dt is bool else np.iinfo(op_dtype).min ubnd = 2 if dt is bool else np.iinfo(op_dtype).max + 1 - sample = self.rfunc(lbnd, ubnd, dtype=dt) + sample = rng.randint(lbnd, ubnd, dtype=dt) assert_(not hasattr(sample, 'dtype')) assert_equal(type(sample), dt) @@ -441,64 +457,57 @@ def test_respect_dtype_singleton(self): class TestRandomDist: # Make sure the random distribution returns the correct value for a # given seed - - def setup_method(self): - self.seed = 1234567890 + seed = 1234567890 def test_rand(self): - random.seed(self.seed) - actual = random.rand(3, 2) + rng = random.RandomState(self.seed) + actual = rng.rand(3, 2) desired = np.array([[0.61879477158567997, 0.59162362775974664], [0.88868358904449662, 0.89165480011560816], [0.4575674820298663, 0.7781880808593471]]) assert_array_almost_equal(actual, desired, decimal=15) def test_rand_singleton(self): - random.seed(self.seed) - actual = random.rand() + rng = random.RandomState(self.seed) + actual = rng.rand() desired = 0.61879477158567997 assert_array_almost_equal(actual, desired, decimal=15) def test_randn(self): - random.seed(self.seed) - actual = random.randn(3, 2) + rng = random.RandomState(self.seed) + actual = rng.randn(3, 2) desired = np.array([[1.34016345771863121, 1.73759122771936081], [1.498988344300628, -0.2286433324536169], [2.031033998682787, 2.17032494605655257]]) assert_array_almost_equal(actual, desired, decimal=15) - random.seed(self.seed) - actual = random.randn() + rng = random.RandomState(self.seed) + actual = rng.randn() assert_array_almost_equal(actual, desired[0, 0], decimal=15) def test_randint(self): - random.seed(self.seed) - actual = random.randint(-99, 99, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.randint(-99, 99, size=(3, 2)) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) assert_array_equal(actual, desired) def test_random_integers(self): - random.seed(self.seed) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) - actual = random.random_integers(-99, 99, size=(3, 2)) - assert_(len(w) == 1) + rng = random.RandomState(self.seed) + with pytest.warns(DeprecationWarning): + actual = rng.random_integers(-99, 99, size=(3, 2)) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) assert_array_equal(actual, desired) - random.seed(self.seed) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) - actual = random.random_integers(198, size=(3, 2)) - assert_(len(w) == 1) + rng = random.RandomState(self.seed) + with pytest.warns(DeprecationWarning): + actual = rng.random_integers(198, size=(3, 2)) assert_array_equal(actual, desired + 100) def test_tomaxint(self): - random.seed(self.seed) rs = random.RandomState(self.seed) actual = rs.tomaxint(size=(3, 2)) if np.iinfo(np.long).max == 2147483647: @@ -523,20 +532,16 @@ def test_random_integers_max_int(self): # into a C long. Previous implementations of this # method have thrown an OverflowError when attempting # to generate this integer. - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): actual = random.random_integers(np.iinfo('l').max, np.iinfo('l').max) - assert_(len(w) == 1) desired = np.iinfo('l').max assert_equal(actual, desired) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): typer = np.dtype('l').type actual = random.random_integers(typer(np.iinfo('l').max), typer(np.iinfo('l').max)) - assert_(len(w) == 1) assert_equal(actual, desired) def test_random_integers_deprecated(self): @@ -554,44 +559,44 @@ def test_random_integers_deprecated(self): np.iinfo('l').max, np.iinfo('l').max) def test_random_sample(self): - random.seed(self.seed) - actual = random.random_sample((3, 2)) + rng = random.RandomState(self.seed) + actual = rng.random_sample((3, 2)) desired = np.array([[0.61879477158567997, 0.59162362775974664], [0.88868358904449662, 0.89165480011560816], [0.4575674820298663, 0.7781880808593471]]) assert_array_almost_equal(actual, desired, decimal=15) - random.seed(self.seed) - actual = random.random_sample() + rng = random.RandomState(self.seed) + actual = rng.random_sample() assert_array_almost_equal(actual, desired[0, 0], decimal=15) def test_choice_uniform_replace(self): - random.seed(self.seed) - actual = random.choice(4, 4) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 4) desired = np.array([2, 3, 2, 3]) assert_array_equal(actual, desired) def test_choice_nonuniform_replace(self): - random.seed(self.seed) - actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) desired = np.array([1, 1, 2, 2]) assert_array_equal(actual, desired) def test_choice_uniform_noreplace(self): - random.seed(self.seed) - actual = random.choice(4, 3, replace=False) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 3, replace=False) desired = np.array([0, 1, 3]) assert_array_equal(actual, desired) def test_choice_nonuniform_noreplace(self): - random.seed(self.seed) - actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) desired = np.array([2, 3, 1]) assert_array_equal(actual, desired) def test_choice_noninteger(self): - random.seed(self.seed) - actual = random.choice(['a', 'b', 'c', 'd'], 4) + rng = random.RandomState(self.seed) + actual = rng.choice(['a', 'b', 'c', 'd'], 4) desired = np.array(['c', 'd', 'c', 'd']) assert_array_equal(actual, desired) @@ -629,7 +634,7 @@ def test_choice_return_shape(self): assert_(random.choice(arr, replace=True) is a) # Check 0-d array - s = tuple() + s = () assert_(not np.isscalar(random.choice(2, s, replace=True))) assert_(not np.isscalar(random.choice(2, s, replace=False))) assert_(not np.isscalar(random.choice(2, s, replace=True, p=p))) @@ -668,15 +673,15 @@ def test_choice_nan_probabilities(self): def test_choice_p_non_contiguous(self): p = np.ones(10) / 5 p[1::2] = 3.0 - random.seed(self.seed) - non_contig = random.choice(5, 3, p=p[::2]) - random.seed(self.seed) - contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2])) + rng = random.RandomState(self.seed) + non_contig = rng.choice(5, 3, p=p[::2]) + rng = random.RandomState(self.seed) + contig = rng.choice(5, 3, p=np.ascontiguousarray(p[::2])) assert_array_equal(non_contig, contig) def test_bytes(self): - random.seed(self.seed) - actual = random.bytes(10) + rng = random.RandomState(self.seed) + actual = rng.bytes(10) desired = b'\x82Ui\x9e\xff\x97+Wf\xa5' assert_equal(actual, desired) @@ -700,9 +705,9 @@ def test_shuffle(self): lambda x: np.asarray([(i, i) for i in x], [("a", object, (1,)), ("b", np.int32, (1,))])]: - random.seed(self.seed) + rng = random.RandomState(self.seed) alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) - random.shuffle(alist) + rng.shuffle(alist) actual = alist desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) assert_array_equal(actual, desired) @@ -726,35 +731,35 @@ def test_shuffle_invalid_objects(self): assert_raises(TypeError, random.shuffle, x) def test_permutation(self): - random.seed(self.seed) + rng = random.RandomState(self.seed) alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] - actual = random.permutation(alist) + actual = rng.permutation(alist) desired = [0, 1, 9, 6, 2, 4, 5, 8, 7, 3] assert_array_equal(actual, desired) - random.seed(self.seed) + rng = random.RandomState(self.seed) arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T - actual = random.permutation(arr_2d) + actual = rng.permutation(arr_2d) assert_array_equal(actual, np.atleast_2d(desired).T) - random.seed(self.seed) + rng = random.RandomState(self.seed) bad_x_str = "abcd" assert_raises(IndexError, random.permutation, bad_x_str) - random.seed(self.seed) + rng = random.RandomState(self.seed) bad_x_float = 1.2 assert_raises(IndexError, random.permutation, bad_x_float) integer_val = 10 desired = [9, 0, 8, 5, 1, 3, 4, 7, 6, 2] - random.seed(self.seed) - actual = random.permutation(integer_val) + rng = random.RandomState(self.seed) + actual = rng.permutation(integer_val) assert_array_equal(actual, desired) def test_beta(self): - random.seed(self.seed) - actual = random.beta(.1, .9, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.beta(.1, .9, size=(3, 2)) desired = np.array( [[1.45341850513746058e-02, 5.31297615662868145e-04], [1.85366619058432324e-06, 4.19214516800110563e-03], @@ -762,30 +767,30 @@ def test_beta(self): assert_array_almost_equal(actual, desired, decimal=15) def test_binomial(self): - random.seed(self.seed) - actual = random.binomial(100.123, .456, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.binomial(100.123, .456, size=(3, 2)) desired = np.array([[37, 43], [42, 48], [46, 45]]) assert_array_equal(actual, desired) - random.seed(self.seed) - actual = random.binomial(100.123, .456) + rng = random.RandomState(self.seed) + actual = rng.binomial(100.123, .456) desired = 37 assert_array_equal(actual, desired) def test_chisquare(self): - random.seed(self.seed) - actual = random.chisquare(50, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.chisquare(50, size=(3, 2)) desired = np.array([[63.87858175501090585, 68.68407748911370447], [65.77116116901505904, 47.09686762438974483], [72.3828403199695174, 74.18408615260374006]]) assert_array_almost_equal(actual, desired, decimal=13) def test_dirichlet(self): - random.seed(self.seed) + rng = random.RandomState(self.seed) alpha = np.array([51.72840233779265162, 39.74494232180943953]) - actual = random.dirichlet(alpha, size=(3, 2)) + actual = rng.dirichlet(alpha, size=(3, 2)) desired = np.array([[[0.54539444573611562, 0.45460555426388438], [0.62345816822039413, 0.37654183177960598]], [[0.55206000085785778, 0.44793999914214233], @@ -796,9 +801,9 @@ def test_dirichlet(self): bad_alpha = np.array([5.4e-01, -1.0e-16]) assert_raises(ValueError, random.dirichlet, bad_alpha) - random.seed(self.seed) + rng = random.RandomState(self.seed) alpha = np.array([51.72840233779265162, 39.74494232180943953]) - actual = random.dirichlet(alpha) + actual = rng.dirichlet(alpha) assert_array_almost_equal(actual, desired[0, 0], decimal=15) def test_dirichlet_size(self): @@ -821,16 +826,16 @@ def test_dirichlet_bad_alpha(self): def test_dirichlet_alpha_non_contiguous(self): a = np.array([51.72840233779265162, -1.0, 39.74494232180943953]) alpha = a[::2] - random.seed(self.seed) - non_contig = random.dirichlet(alpha, size=(3, 2)) - random.seed(self.seed) - contig = random.dirichlet(np.ascontiguousarray(alpha), + rng = random.RandomState(self.seed) + non_contig = rng.dirichlet(alpha, size=(3, 2)) + rng = random.RandomState(self.seed) + contig = rng.dirichlet(np.ascontiguousarray(alpha), size=(3, 2)) assert_array_almost_equal(non_contig, contig) def test_exponential(self): - random.seed(self.seed) - actual = random.exponential(1.1234, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.exponential(1.1234, size=(3, 2)) desired = np.array([[1.08342649775011624, 1.00607889924557314], [2.46628830085216721, 2.49668106809923884], [0.68717433461363442, 1.69175666993575979]]) @@ -841,16 +846,16 @@ def test_exponential_0(self): assert_raises(ValueError, random.exponential, scale=-0.) def test_f(self): - random.seed(self.seed) - actual = random.f(12, 77, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.f(12, 77, size=(3, 2)) desired = np.array([[1.21975394418575878, 1.75135759791559775], [1.44803115017146489, 1.22108959480396262], [1.02176975757740629, 1.34431827623300415]]) assert_array_almost_equal(actual, desired, decimal=15) def test_gamma(self): - random.seed(self.seed) - actual = random.gamma(5, 3, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.gamma(5, 3, size=(3, 2)) desired = np.array([[24.60509188649287182, 28.54993563207210627], [26.13476110204064184, 12.56988482927716078], [31.71863275789960568, 33.30143302795922011]]) @@ -861,8 +866,8 @@ def test_gamma_0(self): assert_raises(ValueError, random.gamma, shape=-0., scale=-0.) def test_geometric(self): - random.seed(self.seed) - actual = random.geometric(.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.geometric(.123456789, size=(3, 2)) desired = np.array([[8, 7], [17, 17], [5, 12]]) @@ -873,14 +878,14 @@ def test_geometric_exceptions(self): assert_raises(ValueError, random.geometric, [1.1] * 10) assert_raises(ValueError, random.geometric, -0.1) assert_raises(ValueError, random.geometric, [-0.1] * 10) - with suppress_warnings() as sup: - sup.record(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) assert_raises(ValueError, random.geometric, np.nan) assert_raises(ValueError, random.geometric, [np.nan] * 10) def test_gumbel(self): - random.seed(self.seed) - actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[0.19591898743416816, 0.34405539668096674], [-1.4492522252274278, -1.47374816298446865], [1.10651090478803416, -0.69535848626236174]]) @@ -891,34 +896,34 @@ def test_gumbel_0(self): assert_raises(ValueError, random.gumbel, scale=-0.) def test_hypergeometric(self): - random.seed(self.seed) - actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(10.1, 5.5, 14, size=(3, 2)) desired = np.array([[10, 10], [10, 10], [9, 9]]) assert_array_equal(actual, desired) # Test nbad = 0 - actual = random.hypergeometric(5, 0, 3, size=4) + actual = rng.hypergeometric(5, 0, 3, size=4) desired = np.array([3, 3, 3, 3]) assert_array_equal(actual, desired) - actual = random.hypergeometric(15, 0, 12, size=4) + actual = rng.hypergeometric(15, 0, 12, size=4) desired = np.array([12, 12, 12, 12]) assert_array_equal(actual, desired) # Test ngood = 0 - actual = random.hypergeometric(0, 5, 3, size=4) + actual = rng.hypergeometric(0, 5, 3, size=4) desired = np.array([0, 0, 0, 0]) assert_array_equal(actual, desired) - actual = random.hypergeometric(0, 15, 12, size=4) + actual = rng.hypergeometric(0, 15, 12, size=4) desired = np.array([0, 0, 0, 0]) assert_array_equal(actual, desired) def test_laplace(self): - random.seed(self.seed) - actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[0.66599721112760157, 0.52829452552221945], [3.12791959514407125, 3.18202813572992005], [-0.05391065675859356, 1.74901336242837324]]) @@ -929,16 +934,16 @@ def test_laplace_0(self): assert_raises(ValueError, random.laplace, scale=-0.) def test_logistic(self): - random.seed(self.seed) - actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[1.09232835305011444, 0.8648196662399954], [4.27818590694950185, 4.33897006346929714], [-0.21682183359214885, 2.63373365386060332]]) assert_array_almost_equal(actual, desired, decimal=15) def test_lognormal(self): - random.seed(self.seed) - actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) desired = np.array([[16.50698631688883822, 36.54846706092654784], [22.67886599981281748, 0.71617561058995771], [65.72798501792723869, 86.84341601437161273]]) @@ -949,8 +954,8 @@ def test_lognormal_0(self): assert_raises(ValueError, random.lognormal, sigma=-0.) def test_logseries(self): - random.seed(self.seed) - actual = random.logseries(p=.923456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.logseries(p=.923456789, size=(3, 2)) desired = np.array([[2, 2], [6, 17], [3, 6]]) @@ -972,8 +977,8 @@ def test_logseries_exceptions(self, value): random.logseries(np.array([value] * 10)[::2]) def test_multinomial(self): - random.seed(self.seed) - actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.multinomial(20, [1 / 6.] * 6, size=(3, 2)) desired = np.array([[[4, 3, 5, 4, 2, 2], [5, 2, 8, 2, 2, 1]], [[3, 4, 3, 6, 0, 4], @@ -983,11 +988,11 @@ def test_multinomial(self): assert_array_equal(actual, desired) def test_multivariate_normal(self): - random.seed(self.seed) + rng = random.RandomState(self.seed) mean = (.123456789, 10) cov = [[1, 0], [0, 1]] size = (3, 2) - actual = random.multivariate_normal(mean, cov, size) + actual = rng.multivariate_normal(mean, cov, size) desired = np.array([[[1.463620246718631, 11.73759122771936], [1.622445133300628, 9.771356667546383]], [[2.154490787682787, 12.170324946056553], @@ -998,7 +1003,7 @@ def test_multivariate_normal(self): assert_array_almost_equal(actual, desired, decimal=15) # Check for default size, was raising deprecation warning - actual = random.multivariate_normal(mean, cov) + actual = rng.multivariate_normal(mean, cov) desired = np.array([0.895289569463708, 9.17180864067987]) assert_array_almost_equal(actual, desired, decimal=15) @@ -1006,72 +1011,71 @@ def test_multivariate_normal(self): # RuntimeWarning mean = [0, 0] cov = [[1, 2], [2, 1]] - assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov) + pytest.warns(RuntimeWarning, rng.multivariate_normal, mean, cov) # and that it doesn't warn with RuntimeWarning check_valid='ignore' - assert_no_warnings(random.multivariate_normal, mean, cov, + assert_no_warnings(rng.multivariate_normal, mean, cov, check_valid='ignore') # and that it raises with RuntimeWarning check_valid='raises' - assert_raises(ValueError, random.multivariate_normal, mean, cov, + assert_raises(ValueError, rng.multivariate_normal, mean, cov, check_valid='raise') cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) - with suppress_warnings() as sup: - random.multivariate_normal(mean, cov) - w = sup.record(RuntimeWarning) - assert len(w) == 0 + with warnings.catch_warnings(): + warnings.simplefilter('error', RuntimeWarning) + rng.multivariate_normal(mean, cov) mu = np.zeros(2) cov = np.eye(2) - assert_raises(ValueError, random.multivariate_normal, mean, cov, + assert_raises(ValueError, rng.multivariate_normal, mean, cov, check_valid='other') - assert_raises(ValueError, random.multivariate_normal, + assert_raises(ValueError, rng.multivariate_normal, np.zeros((2, 1, 1)), cov) - assert_raises(ValueError, random.multivariate_normal, + assert_raises(ValueError, rng.multivariate_normal, mu, np.empty((3, 2))) - assert_raises(ValueError, random.multivariate_normal, + assert_raises(ValueError, rng.multivariate_normal, mu, np.eye(3)) def test_negative_binomial(self): - random.seed(self.seed) - actual = random.negative_binomial(n=100, p=.12345, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n=100, p=.12345, size=(3, 2)) desired = np.array([[848, 841], [892, 611], [779, 647]]) assert_array_equal(actual, desired) def test_negative_binomial_exceptions(self): - with suppress_warnings() as sup: - sup.record(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) assert_raises(ValueError, random.negative_binomial, 100, np.nan) assert_raises(ValueError, random.negative_binomial, 100, [np.nan] * 10) def test_noncentral_chisquare(self): - random.seed(self.seed) - actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) desired = np.array([[23.91905354498517511, 13.35324692733826346], [31.22452661329736401, 16.60047399466177254], [5.03461598262724586, 17.94973089023519464]]) assert_array_almost_equal(actual, desired, decimal=14) - actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + actual = rng.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) desired = np.array([[1.47145377828516666, 0.15052899268012659], [0.00943803056963588, 1.02647251615666169], [0.332334982684171, 0.15451287602753125]]) assert_array_almost_equal(actual, desired, decimal=14) - random.seed(self.seed) - actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) desired = np.array([[9.597154162763948, 11.725484450296079], [10.413711048138335, 3.694475922923986], [13.484222138963087, 14.377255424602957]]) assert_array_almost_equal(actual, desired, decimal=14) def test_noncentral_f(self): - random.seed(self.seed) - actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1, + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum=5, dfden=2, nonc=1, size=(3, 2)) desired = np.array([[1.40598099674926669, 0.34207973179285761], [3.57715069265772545, 7.92632662577829805], @@ -1084,8 +1088,8 @@ def test_noncentral_f_nan(self): assert np.isnan(actual) def test_normal(self): - random.seed(self.seed) - actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.normal(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[2.80378370443726244, 3.59863924443872163], [3.121433477601256, -0.33382987590723379], [4.18552478636557357, 4.46410668111310471]]) @@ -1096,8 +1100,8 @@ def test_normal_0(self): assert_raises(ValueError, random.normal, scale=-0.) def test_pareto(self): - random.seed(self.seed) - actual = random.pareto(a=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.pareto(a=.123456789, size=(3, 2)) desired = np.array( [[2.46852460439034849e+03, 1.41286880810518346e+03], [5.28287797029485181e+07, 6.57720981047328785e+07], @@ -1111,8 +1115,8 @@ def test_pareto(self): np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) def test_poisson(self): - random.seed(self.seed) - actual = random.poisson(lam=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.poisson(lam=.123456789, size=(3, 2)) desired = np.array([[0, 0], [1, 0], [0, 0]]) @@ -1125,22 +1129,22 @@ def test_poisson_exceptions(self): assert_raises(ValueError, random.poisson, [lamneg] * 10) assert_raises(ValueError, random.poisson, lambig) assert_raises(ValueError, random.poisson, [lambig] * 10) - with suppress_warnings() as sup: - sup.record(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) assert_raises(ValueError, random.poisson, np.nan) assert_raises(ValueError, random.poisson, [np.nan] * 10) def test_power(self): - random.seed(self.seed) - actual = random.power(a=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.power(a=.123456789, size=(3, 2)) desired = np.array([[0.02048932883240791, 0.01424192241128213], [0.38446073748535298, 0.39499689943484395], [0.00177699707563439, 0.13115505880863756]]) assert_array_almost_equal(actual, desired, decimal=15) def test_rayleigh(self): - random.seed(self.seed) - actual = random.rayleigh(scale=10, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.rayleigh(scale=10, size=(3, 2)) desired = np.array([[13.8882496494248393, 13.383318339044731], [20.95413364294492098, 21.08285015800712614], [11.06066537006854311, 17.35468505778271009]]) @@ -1151,24 +1155,24 @@ def test_rayleigh_0(self): assert_raises(ValueError, random.rayleigh, scale=-0.) def test_standard_cauchy(self): - random.seed(self.seed) - actual = random.standard_cauchy(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_cauchy(size=(3, 2)) desired = np.array([[0.77127660196445336, -6.55601161955910605], [0.93582023391158309, -2.07479293013759447], [-4.74601644297011926, 0.18338989290760804]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_exponential(self): - random.seed(self.seed) - actual = random.standard_exponential(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_exponential(size=(3, 2)) desired = np.array([[0.96441739162374596, 0.89556604882105506], [2.1953785836319808, 2.22243285392490542], [0.6116915921431676, 1.50592546727413201]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_gamma(self): - random.seed(self.seed) - actual = random.standard_gamma(shape=3, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_gamma(shape=3, size=(3, 2)) desired = np.array([[5.50841531318455058, 6.62953470301903103], [5.93988484943779227, 2.31044849402133989], [7.54838614231317084, 8.012756093271868]]) @@ -1179,30 +1183,30 @@ def test_standard_gamma_0(self): assert_raises(ValueError, random.standard_gamma, shape=-0.) def test_standard_normal(self): - random.seed(self.seed) - actual = random.standard_normal(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_normal(size=(3, 2)) desired = np.array([[1.34016345771863121, 1.73759122771936081], [1.498988344300628, -0.2286433324536169], [2.031033998682787, 2.17032494605655257]]) assert_array_almost_equal(actual, desired, decimal=15) def test_randn_singleton(self): - random.seed(self.seed) - actual = random.randn() + rng = random.RandomState(self.seed) + actual = rng.randn() desired = np.array(1.34016345771863121) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_t(self): - random.seed(self.seed) - actual = random.standard_t(df=10, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_t(df=10, size=(3, 2)) desired = np.array([[0.97140611862659965, -0.08830486548450577], [1.36311143689505321, -0.55317463909867071], [-0.18473749069684214, 0.61181537341755321]]) assert_array_almost_equal(actual, desired, decimal=15) def test_triangular(self): - random.seed(self.seed) - actual = random.triangular(left=5.12, mode=10.23, right=20.34, + rng = random.RandomState(self.seed) + actual = rng.triangular(left=5.12, mode=10.23, right=20.34, size=(3, 2)) desired = np.array([[12.68117178949215784, 12.4129206149193152], [16.20131377335158263, 16.25692138747600524], @@ -1210,8 +1214,8 @@ def test_triangular(self): assert_array_almost_equal(actual, desired, decimal=14) def test_uniform(self): - random.seed(self.seed) - actual = random.uniform(low=1.23, high=10.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.uniform(low=1.23, high=10.54, size=(3, 2)) desired = np.array([[6.99097932346268003, 6.73801597444323974], [9.50364421400426274, 9.53130618907631089], [5.48995325769805476, 8.47493103280052118]]) @@ -1256,8 +1260,8 @@ def __int__(self): assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1) def test_vonmises(self): - random.seed(self.seed) - actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) desired = np.array([[2.28567572673902042, 2.89163838442285037], [0.38198375564286025, 2.57638023113890746], [1.19153771588353052, 1.83509849681825354]]) @@ -1271,8 +1275,8 @@ def test_vonmises_small(self): def test_vonmises_large(self): # guard against changes in RandomState when Generator is fixed - random.seed(self.seed) - actual = random.vonmises(mu=0., kappa=1e7, size=3) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu=0., kappa=1e7, size=3) desired = np.array([4.634253748521111e-04, 3.558873596114509e-04, -2.337119622577433e-04]) @@ -1284,16 +1288,16 @@ def test_vonmises_nan(self): assert_(np.isnan(r)) def test_wald(self): - random.seed(self.seed) - actual = random.wald(mean=1.23, scale=1.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.wald(mean=1.23, scale=1.54, size=(3, 2)) desired = np.array([[3.82935265715889983, 5.13125249184285526], [0.35045403618358717, 1.50832396872003538], [0.24124319895843183, 0.22031101461955038]]) assert_array_almost_equal(actual, desired, decimal=14) def test_weibull(self): - random.seed(self.seed) - actual = random.weibull(a=1.23, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.weibull(a=1.23, size=(3, 2)) desired = np.array([[0.97097342648766727, 0.91422896443565516], [1.89517770034962929, 1.91414357960479564], [0.67057783752390987, 1.39494046635066793]]) @@ -1305,8 +1309,8 @@ def test_weibull_0(self): assert_raises(ValueError, random.weibull, a=-0.) def test_zipf(self): - random.seed(self.seed) - actual = random.zipf(a=1.23, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.zipf(a=1.23, size=(3, 2)) desired = np.array([[66, 29], [1, 1], [3, 13]]) @@ -1316,138 +1320,127 @@ def test_zipf(self): class TestBroadcast: # tests that functions that broadcast behave # correctly when presented with non-scalar arguments - def setup_method(self): - self.seed = 123456789 - - def set_seed(self): - random.seed(self.seed) + seed = 123456789 def test_uniform(self): low = [0] high = [1] - uniform = random.uniform desired = np.array([0.53283302478975902, 0.53413660089041659, 0.50955303552646702]) - self.set_seed() - actual = uniform(low * 3, high) + rng = random.RandomState(self.seed) + actual = rng.uniform(low * 3, high) assert_array_almost_equal(actual, desired, decimal=14) - self.set_seed() - actual = uniform(low, high * 3) + rng = random.RandomState(self.seed) + actual = rng.uniform(low, high * 3) assert_array_almost_equal(actual, desired, decimal=14) def test_normal(self): loc = [0] scale = [1] bad_scale = [-1] - normal = random.normal desired = np.array([2.2129019979039612, 2.1283977976520019, 1.8417114045748335]) - self.set_seed() - actual = normal(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.normal(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, normal, loc * 3, bad_scale) + assert_raises(ValueError, rng.normal, loc * 3, bad_scale) - self.set_seed() - actual = normal(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.normal(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, normal, loc, bad_scale * 3) + assert_raises(ValueError, rng.normal, loc, bad_scale * 3) def test_beta(self): a = [1] b = [2] bad_a = [-1] bad_b = [-2] - beta = random.beta desired = np.array([0.19843558305989056, 0.075230336409423643, 0.24976865978980844]) - self.set_seed() - actual = beta(a * 3, b) + rng = random.RandomState(self.seed) + actual = rng.beta(a * 3, b) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, beta, bad_a * 3, b) - assert_raises(ValueError, beta, a * 3, bad_b) + assert_raises(ValueError, rng.beta, bad_a * 3, b) + assert_raises(ValueError, rng.beta, a * 3, bad_b) - self.set_seed() - actual = beta(a, b * 3) + rng = random.RandomState(self.seed) + actual = rng.beta(a, b * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, beta, bad_a, b * 3) - assert_raises(ValueError, beta, a, bad_b * 3) + assert_raises(ValueError, rng.beta, bad_a, b * 3) + assert_raises(ValueError, rng.beta, a, bad_b * 3) def test_exponential(self): scale = [1] bad_scale = [-1] - exponential = random.exponential desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.set_seed() - actual = exponential(scale * 3) + rng = random.RandomState(self.seed) + actual = rng.exponential(scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, exponential, bad_scale * 3) + assert_raises(ValueError, rng.exponential, bad_scale * 3) def test_standard_gamma(self): shape = [1] bad_shape = [-1] - std_gamma = random.standard_gamma desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.set_seed() - actual = std_gamma(shape * 3) + rng = random.RandomState(self.seed) + actual = rng.standard_gamma(shape * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, std_gamma, bad_shape * 3) + assert_raises(ValueError, rng.standard_gamma, bad_shape * 3) def test_gamma(self): shape = [1] scale = [2] bad_shape = [-1] bad_scale = [-2] - gamma = random.gamma desired = np.array([1.5221370731769048, 1.5277256455738331, 1.4248762625178359]) - self.set_seed() - actual = gamma(shape * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.gamma(shape * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape * 3, scale) - assert_raises(ValueError, gamma, shape * 3, bad_scale) + assert_raises(ValueError, rng.gamma, bad_shape * 3, scale) + assert_raises(ValueError, rng.gamma, shape * 3, bad_scale) - self.set_seed() - actual = gamma(shape, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.gamma(shape, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape, scale * 3) - assert_raises(ValueError, gamma, shape, bad_scale * 3) + assert_raises(ValueError, rng.gamma, bad_shape, scale * 3) + assert_raises(ValueError, rng.gamma, shape, bad_scale * 3) def test_f(self): dfnum = [1] dfden = [2] bad_dfnum = [-1] bad_dfden = [-2] - f = random.f desired = np.array([0.80038951638264799, 0.86768719635363512, 2.7251095168386801]) - self.set_seed() - actual = f(dfnum * 3, dfden) + rng = random.RandomState(self.seed) + actual = rng.f(dfnum * 3, dfden) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum * 3, dfden) - assert_raises(ValueError, f, dfnum * 3, bad_dfden) + assert_raises(ValueError, rng.f, bad_dfnum * 3, dfden) + assert_raises(ValueError, rng.f, dfnum * 3, bad_dfden) - self.set_seed() - actual = f(dfnum, dfden * 3) + rng = random.RandomState(self.seed) + actual = rng.f(dfnum, dfden * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum, dfden * 3) - assert_raises(ValueError, f, dfnum, bad_dfden * 3) + assert_raises(ValueError, rng.f, bad_dfnum, dfden * 3) + assert_raises(ValueError, rng.f, dfnum, bad_dfden * 3) def test_noncentral_f(self): dfnum = [2] @@ -1456,267 +1449,253 @@ def test_noncentral_f(self): bad_dfnum = [0] bad_dfden = [-1] bad_nonc = [-2] - nonc_f = random.noncentral_f desired = np.array([9.1393943263705211, 13.025456344595602, 8.8018098359100545]) - self.set_seed() - actual = nonc_f(dfnum * 3, dfden, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum * 3, dfden, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3))) + assert np.all(np.isnan(rng.noncentral_f(dfnum, dfden, [np.nan] * 3))) - assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum * 3, dfden, bad_nonc) - self.set_seed() - actual = nonc_f(dfnum, dfden * 3, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum, dfden * 3, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum, dfden * 3, bad_nonc) - self.set_seed() - actual = nonc_f(dfnum, dfden, nonc * 3) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum, dfden, nonc * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, rng.noncentral_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, rng.noncentral_f, dfnum, dfden, bad_nonc * 3) def test_noncentral_f_small_df(self): - self.set_seed() + rng = random.RandomState(self.seed) desired = np.array([6.869638627492048, 0.785880199263955]) - actual = random.noncentral_f(0.9, 0.9, 2, size=2) + actual = rng.noncentral_f(0.9, 0.9, 2, size=2) assert_array_almost_equal(actual, desired, decimal=14) def test_chisquare(self): df = [1] bad_df = [-1] - chisquare = random.chisquare desired = np.array([0.57022801133088286, 0.51947702108840776, 0.1320969254923558]) - self.set_seed() - actual = chisquare(df * 3) + rng = random.RandomState(self.seed) + actual = rng.chisquare(df * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, chisquare, bad_df * 3) + assert_raises(ValueError, rng.chisquare, bad_df * 3) def test_noncentral_chisquare(self): df = [1] nonc = [2] bad_df = [-1] bad_nonc = [-2] - nonc_chi = random.noncentral_chisquare desired = np.array([9.0015599467913763, 4.5804135049718742, 6.0872302432834564]) - self.set_seed() - actual = nonc_chi(df * 3, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df * 3, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) - assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) + assert_raises(ValueError, rng.noncentral_chisquare, bad_df * 3, nonc) + assert_raises(ValueError, rng.noncentral_chisquare, df * 3, bad_nonc) - self.set_seed() - actual = nonc_chi(df, nonc * 3) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df, nonc * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) - assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) + assert_raises(ValueError, rng.noncentral_chisquare, bad_df, nonc * 3) + assert_raises(ValueError, rng.noncentral_chisquare, df, bad_nonc * 3) def test_standard_t(self): df = [1] bad_df = [-1] - t = random.standard_t desired = np.array([3.0702872575217643, 5.8560725167361607, 1.0274791436474273]) - self.set_seed() - actual = t(df * 3) + rng = random.RandomState(self.seed) + actual = rng.standard_t(df * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, t, bad_df * 3) + assert_raises(ValueError, rng.standard_t, bad_df * 3) assert_raises(ValueError, random.standard_t, bad_df * 3) def test_vonmises(self): mu = [2] kappa = [1] bad_kappa = [-1] - vonmises = random.vonmises desired = np.array([2.9883443664201312, -2.7064099483995943, -1.8672476700665914]) - self.set_seed() - actual = vonmises(mu * 3, kappa) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu * 3, kappa) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, vonmises, mu * 3, bad_kappa) + assert_raises(ValueError, rng.vonmises, mu * 3, bad_kappa) - self.set_seed() - actual = vonmises(mu, kappa * 3) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu, kappa * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, vonmises, mu, bad_kappa * 3) + assert_raises(ValueError, rng.vonmises, mu, bad_kappa * 3) def test_pareto(self): a = [1] bad_a = [-1] - pareto = random.pareto desired = np.array([1.1405622680198362, 1.1465519762044529, 1.0389564467453547]) - self.set_seed() - actual = pareto(a * 3) + rng = random.RandomState(self.seed) + actual = rng.pareto(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, pareto, bad_a * 3) + assert_raises(ValueError, rng.pareto, bad_a * 3) assert_raises(ValueError, random.pareto, bad_a * 3) def test_weibull(self): a = [1] bad_a = [-1] - weibull = random.weibull desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.set_seed() - actual = weibull(a * 3) + rng = random.RandomState(self.seed) + actual = rng.weibull(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, weibull, bad_a * 3) + assert_raises(ValueError, rng.weibull, bad_a * 3) assert_raises(ValueError, random.weibull, bad_a * 3) def test_power(self): a = [1] bad_a = [-1] - power = random.power desired = np.array([0.53283302478975902, 0.53413660089041659, 0.50955303552646702]) - self.set_seed() - actual = power(a * 3) + rng = random.RandomState(self.seed) + actual = rng.power(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, power, bad_a * 3) + assert_raises(ValueError, rng.power, bad_a * 3) assert_raises(ValueError, random.power, bad_a * 3) def test_laplace(self): loc = [0] scale = [1] bad_scale = [-1] - laplace = random.laplace desired = np.array([0.067921356028507157, 0.070715642226971326, 0.019290950698972624]) - self.set_seed() - actual = laplace(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc * 3, bad_scale) + assert_raises(ValueError, rng.laplace, loc * 3, bad_scale) - self.set_seed() - actual = laplace(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc, bad_scale * 3) + assert_raises(ValueError, rng.laplace, loc, bad_scale * 3) def test_gumbel(self): loc = [0] scale = [1] bad_scale = [-1] - gumbel = random.gumbel desired = np.array([0.2730318639556768, 0.26936705726291116, 0.33906220393037939]) - self.set_seed() - actual = gumbel(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc * 3, bad_scale) + assert_raises(ValueError, rng.gumbel, loc * 3, bad_scale) - self.set_seed() - actual = gumbel(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc, bad_scale * 3) + assert_raises(ValueError, rng.gumbel, loc, bad_scale * 3) def test_logistic(self): loc = [0] scale = [1] bad_scale = [-1] - logistic = random.logistic desired = np.array([0.13152135837586171, 0.13675915696285773, 0.038216792802833396]) - self.set_seed() - actual = logistic(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, logistic, loc * 3, bad_scale) + assert_raises(ValueError, rng.logistic, loc * 3, bad_scale) - self.set_seed() - actual = logistic(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, logistic, loc, bad_scale * 3) - assert_equal(random.logistic(1.0, 0.0), 1.0) + assert_raises(ValueError, rng.logistic, loc, bad_scale * 3) + assert_equal(rng.logistic(1.0, 0.0), 1.0) def test_lognormal(self): mean = [0] sigma = [1] bad_sigma = [-1] - lognormal = random.lognormal desired = np.array([9.1422086044848427, 8.4013952870126261, 6.3073234116578671]) - self.set_seed() - actual = lognormal(mean * 3, sigma) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean * 3, sigma) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, lognormal, mean * 3, bad_sigma) + assert_raises(ValueError, rng.lognormal, mean * 3, bad_sigma) assert_raises(ValueError, random.lognormal, mean * 3, bad_sigma) - self.set_seed() - actual = lognormal(mean, sigma * 3) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean, sigma * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, lognormal, mean, bad_sigma * 3) + assert_raises(ValueError, rng.lognormal, mean, bad_sigma * 3) assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3) def test_rayleigh(self): scale = [1] bad_scale = [-1] - rayleigh = random.rayleigh desired = np.array([1.2337491937897689, 1.2360119924878694, 1.1936818095781789]) - self.set_seed() - actual = rayleigh(scale * 3) + rng = random.RandomState(self.seed) + actual = rng.rayleigh(scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, rayleigh, bad_scale * 3) + assert_raises(ValueError, rng.rayleigh, bad_scale * 3) def test_wald(self): mean = [0.5] scale = [1] bad_mean = [0] bad_scale = [-2] - wald = random.wald desired = np.array([0.11873681120271318, 0.12450084820795027, 0.9096122728408238]) - self.set_seed() - actual = wald(mean * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.wald(mean * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, wald, bad_mean * 3, scale) - assert_raises(ValueError, wald, mean * 3, bad_scale) + assert_raises(ValueError, rng.wald, bad_mean * 3, scale) + assert_raises(ValueError, rng.wald, mean * 3, bad_scale) assert_raises(ValueError, random.wald, bad_mean * 3, scale) assert_raises(ValueError, random.wald, mean * 3, bad_scale) - self.set_seed() - actual = wald(mean, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.wald(mean, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, wald, bad_mean, scale * 3) - assert_raises(ValueError, wald, mean, bad_scale * 3) - assert_raises(ValueError, wald, 0.0, 1) - assert_raises(ValueError, wald, 0.5, 0.0) + assert_raises(ValueError, rng.wald, bad_mean, scale * 3) + assert_raises(ValueError, rng.wald, mean, bad_scale * 3) + assert_raises(ValueError, rng.wald, 0.0, 1) + assert_raises(ValueError, rng.wald, 0.5, 0.0) def test_triangular(self): left = [1] @@ -1725,38 +1704,37 @@ def test_triangular(self): bad_left_one = [3] bad_mode_one = [4] bad_left_two, bad_mode_two = right * 2 - triangular = random.triangular desired = np.array([2.03339048710429, 2.0347400359389356, 2.0095991069536208]) - self.set_seed() - actual = triangular(left * 3, mode, right) + rng = random.RandomState(self.seed) + actual = rng.triangular(left * 3, mode, right) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) - assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) - assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, + assert_raises(ValueError, rng.triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, rng.triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, rng.triangular, bad_left_two * 3, bad_mode_two, right) - self.set_seed() - actual = triangular(left, mode * 3, right) + rng = random.RandomState(self.seed) + actual = rng.triangular(left, mode * 3, right) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) - assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, + assert_raises(ValueError, rng.triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, rng.triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, rng.triangular, bad_left_two, bad_mode_two * 3, right) - self.set_seed() - actual = triangular(left, mode, right * 3) + rng = random.RandomState(self.seed) + actual = rng.triangular(left, mode, right * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) - assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, + assert_raises(ValueError, rng.triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, rng.triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, rng.triangular, bad_left_two, bad_mode_two, right * 3) - assert_raises(ValueError, triangular, 10., 0., 20.) - assert_raises(ValueError, triangular, 10., 25., 20.) - assert_raises(ValueError, triangular, 10., 10., 10.) + assert_raises(ValueError, rng.triangular, 10., 0., 20.) + assert_raises(ValueError, rng.triangular, 10., 25., 20.) + assert_raises(ValueError, rng.triangular, 10., 10., 10.) def test_binomial(self): n = [1] @@ -1764,22 +1742,21 @@ def test_binomial(self): bad_n = [-1] bad_p_one = [-1] bad_p_two = [1.5] - binom = random.binomial desired = np.array([1, 1, 1]) - self.set_seed() - actual = binom(n * 3, p) + rng = random.RandomState(self.seed) + actual = rng.binomial(n * 3, p) assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n * 3, p) - assert_raises(ValueError, binom, n * 3, bad_p_one) - assert_raises(ValueError, binom, n * 3, bad_p_two) + assert_raises(ValueError, rng.binomial, bad_n * 3, p) + assert_raises(ValueError, rng.binomial, n * 3, bad_p_one) + assert_raises(ValueError, rng.binomial, n * 3, bad_p_two) - self.set_seed() - actual = binom(n, p * 3) + rng = random.RandomState(self.seed) + actual = rng.binomial(n, p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n, p * 3) - assert_raises(ValueError, binom, n, bad_p_one * 3) - assert_raises(ValueError, binom, n, bad_p_two * 3) + assert_raises(ValueError, rng.binomial, bad_n, p * 3) + assert_raises(ValueError, rng.binomial, n, bad_p_one * 3) + assert_raises(ValueError, rng.binomial, n, bad_p_two * 3) def test_negative_binomial(self): n = [1] @@ -1787,22 +1764,21 @@ def test_negative_binomial(self): bad_n = [-1] bad_p_one = [-1] bad_p_two = [1.5] - neg_binom = random.negative_binomial desired = np.array([1, 0, 1]) - self.set_seed() - actual = neg_binom(n * 3, p) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n * 3, p) assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n * 3, p) - assert_raises(ValueError, neg_binom, n * 3, bad_p_one) - assert_raises(ValueError, neg_binom, n * 3, bad_p_two) + assert_raises(ValueError, rng.negative_binomial, bad_n * 3, p) + assert_raises(ValueError, rng.negative_binomial, n * 3, bad_p_one) + assert_raises(ValueError, rng.negative_binomial, n * 3, bad_p_two) - self.set_seed() - actual = neg_binom(n, p * 3) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n, p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n, p * 3) - assert_raises(ValueError, neg_binom, n, bad_p_one * 3) - assert_raises(ValueError, neg_binom, n, bad_p_two * 3) + assert_raises(ValueError, rng.negative_binomial, bad_n, p * 3) + assert_raises(ValueError, rng.negative_binomial, n, bad_p_one * 3) + assert_raises(ValueError, rng.negative_binomial, n, bad_p_two * 3) def test_poisson(self): max_lam = random.RandomState()._poisson_lam_max @@ -1810,41 +1786,38 @@ def test_poisson(self): lam = [1] bad_lam_one = [-1] bad_lam_two = [max_lam * 2] - poisson = random.poisson desired = np.array([1, 1, 0]) - self.set_seed() - actual = poisson(lam * 3) + rng = random.RandomState(self.seed) + actual = rng.poisson(lam * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, poisson, bad_lam_one * 3) - assert_raises(ValueError, poisson, bad_lam_two * 3) + assert_raises(ValueError, rng.poisson, bad_lam_one * 3) + assert_raises(ValueError, rng.poisson, bad_lam_two * 3) def test_zipf(self): a = [2] bad_a = [0] - zipf = random.zipf desired = np.array([2, 2, 1]) - self.set_seed() - actual = zipf(a * 3) + rng = random.RandomState(self.seed) + actual = rng.zipf(a * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, zipf, bad_a * 3) + assert_raises(ValueError, rng.zipf, bad_a * 3) with np.errstate(invalid='ignore'): - assert_raises(ValueError, zipf, np.nan) - assert_raises(ValueError, zipf, [0, 0, np.nan]) + assert_raises(ValueError, rng.zipf, np.nan) + assert_raises(ValueError, rng.zipf, [0, 0, np.nan]) def test_geometric(self): p = [0.5] bad_p_one = [-1] bad_p_two = [1.5] - geom = random.geometric desired = np.array([2, 2, 2]) - self.set_seed() - actual = geom(p * 3) + rng = random.RandomState(self.seed) + actual = rng.geometric(p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, geom, bad_p_one * 3) - assert_raises(ValueError, geom, bad_p_two * 3) + assert_raises(ValueError, rng.geometric, bad_p_one * 3) + assert_raises(ValueError, rng.geometric, bad_p_two * 3) def test_hypergeometric(self): ngood = [1] @@ -1854,57 +1827,54 @@ def test_hypergeometric(self): bad_nbad = [-2] bad_nsample_one = [0] bad_nsample_two = [4] - hypergeom = random.hypergeometric desired = np.array([1, 1, 1]) - self.set_seed() - actual = hypergeom(ngood * 3, nbad, nsample) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood * 3, nbad, nsample) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample) - assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample) - assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one) - assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two) + assert_raises(ValueError, rng.hypergeometric, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, nbad, bad_nsample_one) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, nbad, bad_nsample_two) - self.set_seed() - actual = hypergeom(ngood, nbad * 3, nsample) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood, nbad * 3, nsample) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample) - assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample) - assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one) - assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two) + assert_raises(ValueError, rng.hypergeometric, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad * 3, bad_nsample_one) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad * 3, bad_nsample_two) - self.set_seed() - actual = hypergeom(ngood, nbad, nsample * 3) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood, nbad, nsample * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) + assert_raises(ValueError, rng.hypergeometric, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad, bad_nsample_two * 3) - assert_raises(ValueError, hypergeom, -1, 10, 20) - assert_raises(ValueError, hypergeom, 10, -1, 20) - assert_raises(ValueError, hypergeom, 10, 10, 0) - assert_raises(ValueError, hypergeom, 10, 10, 25) + assert_raises(ValueError, rng.hypergeometric, -1, 10, 20) + assert_raises(ValueError, rng.hypergeometric, 10, -1, 20) + assert_raises(ValueError, rng.hypergeometric, 10, 10, 0) + assert_raises(ValueError, rng.hypergeometric, 10, 10, 25) def test_logseries(self): p = [0.5] bad_p_one = [2] bad_p_two = [-1] - logseries = random.logseries desired = np.array([1, 1, 1]) - self.set_seed() - actual = logseries(p * 3) + rng = random.RandomState(self.seed) + actual = rng.logseries(p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, logseries, bad_p_one * 3) - assert_raises(ValueError, logseries, bad_p_two * 3) + assert_raises(ValueError, rng.logseries, bad_p_one * 3) + assert_raises(ValueError, rng.logseries, bad_p_two * 3) @pytest.mark.skipif(IS_WASM, reason="can't start thread") class TestThread: # make sure each state produces the same sequence even in threads - def setup_method(self): - self.seeds = range(4) + seeds = range(4) def check_function(self, function, sz): from threading import Thread @@ -1949,13 +1919,11 @@ def gen_random(state, out): # See Issue #4263 class TestSingleEltArrayInput: - def setup_method(self): - self.argOne = np.array([2]) - self.argTwo = np.array([3]) - self.argThree = np.array([4]) - self.tgtShape = (1,) + def _create_arrays(self): + return np.array([2]), np.array([3]), np.array([4]), (1,) def test_one_arg_funcs(self): + argOne, _, _, tgtShape = self._create_arrays() funcs = (random.exponential, random.standard_gamma, random.chisquare, random.standard_t, random.pareto, random.weibull, @@ -1970,11 +1938,12 @@ def test_one_arg_funcs(self): out = func(np.array([0.5])) else: - out = func(self.argOne) + out = func(argOne) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) def test_two_arg_funcs(self): + argOne, argTwo, _, tgtShape = self._create_arrays() funcs = (random.uniform, random.normal, random.beta, random.gamma, random.f, random.noncentral_chisquare, @@ -1990,30 +1959,31 @@ def test_two_arg_funcs(self): argTwo = np.array([0.5]) else: - argTwo = self.argTwo + argTwo = argTwo - out = func(self.argOne, argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, argTwo[0]) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0]) + assert_equal(out.shape, tgtShape) def test_three_arg_funcs(self): + argOne, argTwo, argThree, tgtShape = self._create_arrays() funcs = [random.noncentral_f, random.triangular, random.hypergeometric] for func in funcs: - out = func(self.argOne, self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, self.argTwo[0], self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0], argThree) + assert_equal(out.shape, tgtShape) # Ensure returned array dtype is correct for platform @@ -2026,9 +1996,9 @@ def test_integer_dtype(int_func): def test_integer_repeat(int_func): - random.seed(123456789) + rng = random.RandomState(123456789) fname, args, sha256 = int_func - f = getattr(random, fname) + f = getattr(rng, fname) val = f(*args, size=1000000) if sys.byteorder != 'little': val = val.byteswap() @@ -2064,6 +2034,7 @@ def test_randomstate_ctor_old_style_pickle(): assert_equal(state_a['gauss'], state_b['gauss']) +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") def test_hot_swap(restore_singleton_bitgen): # GH 21808 def_bg = np.random.default_rng(0) @@ -2075,6 +2046,7 @@ def test_hot_swap(restore_singleton_bitgen): assert bg is second_bg +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") def test_seed_alt_bit_gen(restore_singleton_bitgen): # GH 21808 bg = PCG64(0) @@ -2089,6 +2061,7 @@ def test_seed_alt_bit_gen(restore_singleton_bitgen): assert state["state"]["inc"] != new_state["state"]["inc"] +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") def test_state_error_alt_bit_gen(restore_singleton_bitgen): # GH 21808 state = np.random.get_state() @@ -2098,6 +2071,7 @@ def test_state_error_alt_bit_gen(restore_singleton_bitgen): np.random.set_state(state) +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") def test_swap_worked(restore_singleton_bitgen): # GH 21808 np.random.seed(98765) @@ -2116,6 +2090,7 @@ def test_swap_worked(restore_singleton_bitgen): assert new_state["state"]["inc"] == new_state["state"]["inc"] +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") def test_swapped_singleton_against_direct(restore_singleton_bitgen): np.random.set_bit_generator(PCG64(98765)) singleton_vals = np.random.randint(0, 2 ** 30, 10) diff --git a/numpy/random/tests/test_randomstate_regression.py b/numpy/random/tests/test_randomstate_regression.py index 3fd8776c7f96..1c8882d1b672 100644 --- a/numpy/random/tests/test_randomstate_regression.py +++ b/numpy/random/tests/test_randomstate_regression.py @@ -2,12 +2,9 @@ import pytest -from numpy.testing import ( - assert_, assert_array_equal, assert_raises, - ) import numpy as np - from numpy import random +from numpy.testing import assert_, assert_array_equal, assert_raises class TestRegression: @@ -57,9 +54,9 @@ def test_shuffle_mixed_dimension(self): [(1, 1), (2, 2), (3, 3), None], [1, (2, 2), (3, 3), None], [(1, 1), 2, 3, None]]: - random.seed(12345) + rng = random.RandomState(12345) shuffled = list(t) - random.shuffle(shuffled) + rng.shuffle(shuffled) expected = np.array([t[0], t[3], t[1], t[2]], dtype=object) assert_array_equal(np.array(shuffled, dtype=object), expected) @@ -71,7 +68,7 @@ def test_call_within_randomstate(self): random.seed(i) m.seed(4321) # If m.state is not honored, the result will change - assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res) + assert_array_equal(m.choice(10, size=10, p=np.ones(10) / 10.), res) def test_multivariate_normal_size_types(self): # Test for multivariate_normal issue with 'size' argument. @@ -99,7 +96,7 @@ def test_choice_sum_of_probs_tolerance(self): probs = np.array(counts, dtype=dt) / sum(counts) c = random.choice(a, p=probs) assert_(c in a) - assert_raises(ValueError, random.choice, a, p=probs*0.9) + assert_raises(ValueError, random.choice, a, p=probs * 0.9) def test_shuffle_of_array_of_different_length_strings(self): # Test that permuting an array of different length strings @@ -134,9 +131,9 @@ def test_permutation_subclass(self): class N(np.ndarray): pass - random.seed(1) + rng = random.RandomState(1) orig = np.arange(3).view(N) - perm = random.permutation(orig) + perm = rng.permutation(orig) assert_array_equal(perm, np.array([0, 2, 1])) assert_array_equal(orig, np.arange(3).view(N)) @@ -146,9 +143,9 @@ class M: def __array__(self, dtype=None, copy=None): return self.a - random.seed(1) + rng = random.RandomState(1) m = M() - perm = random.permutation(m) + perm = rng.permutation(m) assert_array_equal(perm, np.array([2, 1, 4, 0, 3])) assert_array_equal(m.__array__(), np.arange(5)) @@ -166,9 +163,9 @@ def test_named_argument_initialization(self): def test_choice_retun_dtype(self): # GH 9867, now long since the NumPy default changed. - c = np.random.choice(10, p=[.1]*10, size=2) + c = np.random.choice(10, p=[.1] * 10, size=2) assert c.dtype == np.dtype(np.long) - c = np.random.choice(10, p=[.1]*10, replace=False, size=2) + c = np.random.choice(10, p=[.1] * 10, replace=False, size=2) assert c.dtype == np.dtype(np.long) c = np.random.choice(10, size=2) assert c.dtype == np.dtype(np.long) @@ -179,27 +176,27 @@ def test_choice_retun_dtype(self): reason='Cannot test with 32-bit C long') def test_randint_117(self): # GH 14189 - random.seed(0) + rng = random.RandomState(0) expected = np.array([2357136044, 2546248239, 3071714933, 3626093760, 2588848963, 3684848379, 2340255427, 3638918503, 1819583497, 2678185683], dtype='int64') - actual = random.randint(2**32, size=10) + actual = rng.randint(2**32, size=10) assert_array_equal(actual, expected) def test_p_zero_stream(self): # Regression test for gh-14522. Ensure that future versions # generate the same variates as version 1.16. - np.random.seed(12345) - assert_array_equal(random.binomial(1, [0, 0.25, 0.5, 0.75, 1]), + rng = random.RandomState(12345) + assert_array_equal(rng.binomial(1, [0, 0.25, 0.5, 0.75, 1]), [0, 0, 0, 1, 1]) def test_n_zero_stream(self): # Regression test for gh-14522. Ensure that future versions # generate the same variates as version 1.16. - np.random.seed(8675309) + rng = random.RandomState(8675309) expected = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [3, 4, 2, 3, 3, 1, 5, 3, 1, 3]]) - assert_array_equal(random.binomial([[0], [10]], 0.25, size=(2, 10)), + assert_array_equal(rng.binomial([[0], [10]], 0.25, size=(2, 10)), expected) diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py index f7b02dc4f7d7..f63c16650df8 100644 --- a/numpy/random/tests/test_regression.py +++ b/numpy/random/tests/test_regression.py @@ -1,9 +1,11 @@ +import inspect import sys -from numpy.testing import ( - assert_, assert_array_equal, assert_raises, - ) -from numpy import random + +import pytest + import numpy as np +from numpy import random +from numpy.testing import assert_, assert_array_equal, assert_raises class TestRegression: @@ -53,9 +55,9 @@ def test_shuffle_mixed_dimension(self): [(1, 1), (2, 2), (3, 3), None], [1, (2, 2), (3, 3), None], [(1, 1), 2, 3, None]]: - np.random.seed(12345) + rng = np.random.RandomState(12345) shuffled = list(t) - random.shuffle(shuffled) + rng.shuffle(shuffled) expected = np.array([t[0], t[3], t[1], t[2]], dtype=object) assert_array_equal(np.array(shuffled, dtype=object), expected) @@ -67,7 +69,7 @@ def test_call_within_randomstate(self): np.random.seed(i) m.seed(4321) # If m.state is not honored, the result will change - assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res) + assert_array_equal(m.choice(10, size=10, p=np.ones(10) / 10.), res) def test_multivariate_normal_size_types(self): # Test for multivariate_normal issue with 'size' argument. @@ -95,7 +97,7 @@ def test_choice_sum_of_probs_tolerance(self): probs = np.array(counts, dtype=dt) / sum(counts) c = np.random.choice(a, p=probs) assert_(c in a) - assert_raises(ValueError, np.random.choice, a, p=probs*0.9) + assert_raises(ValueError, np.random.choice, a, p=probs * 0.9) def test_shuffle_of_array_of_different_length_strings(self): # Test that permuting an array of different length strings @@ -130,9 +132,9 @@ def test_permutation_subclass(self): class N(np.ndarray): pass - np.random.seed(1) + rng = np.random.RandomState(1) orig = np.arange(3).view(N) - perm = np.random.permutation(orig) + perm = rng.permutation(orig) assert_array_equal(perm, np.array([0, 2, 1])) assert_array_equal(orig, np.arange(3).view(N)) @@ -142,8 +144,31 @@ class M: def __array__(self, dtype=None, copy=None): return self.a - np.random.seed(1) + rng = np.random.RandomState(1) m = M() - perm = np.random.permutation(m) + perm = rng.permutation(m) assert_array_equal(perm, np.array([2, 1, 4, 0, 3])) assert_array_equal(m.__array__(), np.arange(5)) + + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.parametrize( + "cls", + [ + random.Generator, + random.MT19937, + random.PCG64, + random.PCG64DXSM, + random.Philox, + random.RandomState, + random.SFC64, + random.BitGenerator, + random.SeedSequence, + random.bit_generator.SeedlessSeedSequence, + ], + ) + def test_inspect_signature(self, cls: type) -> None: + assert hasattr(cls, "__text_signature__") + try: + inspect.signature(cls) + except ValueError: + pytest.fail(f"invalid signature: {cls.__module__}.{cls.__qualname__}") diff --git a/numpy/random/tests/test_seed_sequence.py b/numpy/random/tests/test_seed_sequence.py index f08cf80faafa..87ae4ff72139 100644 --- a/numpy/random/tests/test_seed_sequence.py +++ b/numpy/random/tests/test_seed_sequence.py @@ -1,7 +1,6 @@ import numpy as np -from numpy.testing import assert_array_equal, assert_array_compare - from numpy.random import SeedSequence +from numpy.testing import assert_array_compare, assert_array_equal def test_reference_data(): diff --git a/numpy/random/tests/test_smoke.py b/numpy/random/tests/test_smoke.py index 7e12561962a9..b30fddabd43e 100644 --- a/numpy/random/tests/test_smoke.py +++ b/numpy/random/tests/test_smoke.py @@ -1,16 +1,15 @@ import pickle +from dataclasses import dataclass from functools import partial -import numpy as np import pytest -from numpy.testing import assert_equal, assert_, assert_array_equal -from numpy.random import (Generator, MT19937, PCG64, PCG64DXSM, Philox, SFC64) -@pytest.fixture(scope='module', - params=(np.bool, np.int8, np.int16, np.int32, np.int64, - np.uint8, np.uint16, np.uint32, np.uint64)) -def dtype(request): - return request.param +import numpy as np +from numpy.random import MT19937, PCG64, PCG64DXSM, SFC64, Generator, Philox +from numpy.testing import assert_, assert_array_equal, assert_equal + +DTYPES_BOOL_INT_UINT = (np.bool, np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64) def params_0(f): @@ -64,15 +63,14 @@ def comp_state(state1, state2): if isinstance(state1, dict): for key in state1: identical &= comp_state(state1[key], state2[key]) - elif type(state1) != type(state2): - identical &= type(state1) == type(state2) + elif type(state1) is not type(state2): + identical &= type(state1) is type(state2) + elif (isinstance(state1, (list, tuple, np.ndarray)) and isinstance( + state2, (list, tuple, np.ndarray))): + for s1, s2 in zip(state1, state2): + identical &= comp_state(s1, s2) else: - if (isinstance(state1, (list, tuple, np.ndarray)) and isinstance( - state2, (list, tuple, np.ndarray))): - for s1, s2 in zip(state1, state2): - identical &= comp_state(s1, s2) - else: - identical &= state1 == state2 + identical &= state1 == state2 return identical @@ -91,403 +89,459 @@ def warmup(rg, n=None): rg.random(n, dtype=np.float32) +@dataclass +class RNGData: + bit_generator: type[np.random.BitGenerator] + advance: int + seed: list[int] + rg: Generator + seed_vector_bits: int + + class RNG: @classmethod - def setup_class(cls): + def _create_rng(cls): # Overridden in test classes. Place holder to silence IDE noise - cls.bit_generator = PCG64 - cls.advance = None - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() - - @classmethod - def _extra_setup(cls): - cls.vec_1d = np.arange(2.0, 102.0) - cls.vec_2d = np.arange(2.0, 102.0)[None, :] - cls.mat = np.arange(2.0, 102.0, 0.01).reshape((100, 100)) - cls.seed_error = TypeError - - def _reset_state(self): - self.rg.bit_generator.state = self.initial_state + bit_generator = PCG64 + advance = None + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) def test_init(self): - rg = Generator(self.bit_generator()) - state = rg.bit_generator.state - rg.standard_normal(1) - rg.standard_normal(1) - rg.bit_generator.state = state - new_state = rg.bit_generator.state + data = self._create_rng() + data.rg = Generator(data.bit_generator()) + state = data.rg.bit_generator.state + data.rg.standard_normal(1) + data.rg.standard_normal(1) + data.rg.bit_generator.state = state + new_state = data.rg.bit_generator.state assert_(comp_state(state, new_state)) def test_advance(self): - state = self.rg.bit_generator.state - if hasattr(self.rg.bit_generator, 'advance'): - self.rg.bit_generator.advance(self.advance) - assert_(not comp_state(state, self.rg.bit_generator.state)) + data = self._create_rng() + state = data.rg.bit_generator.state + if hasattr(data.rg.bit_generator, 'advance'): + data.rg.bit_generator.advance(data.advance) + assert_(not comp_state(state, data.rg.bit_generator.state)) else: - bitgen_name = self.rg.bit_generator.__class__.__name__ + bitgen_name = data.rg.bit_generator.__class__.__name__ pytest.skip(f'Advance is not supported by {bitgen_name}') def test_jump(self): - state = self.rg.bit_generator.state - if hasattr(self.rg.bit_generator, 'jumped'): - bit_gen2 = self.rg.bit_generator.jumped() + rg = self._create_rng().rg + state = rg.bit_generator.state + if hasattr(rg.bit_generator, 'jumped'): + bit_gen2 = rg.bit_generator.jumped() jumped_state = bit_gen2.state assert_(not comp_state(state, jumped_state)) - self.rg.random(2 * 3 * 5 * 7 * 11 * 13 * 17) - self.rg.bit_generator.state = state - bit_gen3 = self.rg.bit_generator.jumped() + rg.random(2 * 3 * 5 * 7 * 11 * 13 * 17) + rg.bit_generator.state = state + bit_gen3 = rg.bit_generator.jumped() rejumped_state = bit_gen3.state assert_(comp_state(jumped_state, rejumped_state)) else: - bitgen_name = self.rg.bit_generator.__class__.__name__ + bitgen_name = rg.bit_generator.__class__.__name__ if bitgen_name not in ('SFC64',): raise AttributeError(f'no "jumped" in {bitgen_name}') pytest.skip(f'Jump is not supported by {bitgen_name}') def test_uniform(self): - r = self.rg.uniform(-1.0, 0.0, size=10) + rg = self._create_rng().rg + r = rg.uniform(-1.0, 0.0, size=10) assert_(len(r) == 10) assert_((r > -1).all()) assert_((r <= 0).all()) def test_uniform_array(self): - r = self.rg.uniform(np.array([-1.0] * 10), 0.0, size=10) + rg = self._create_rng().rg + r = rg.uniform(np.array([-1.0] * 10), 0.0, size=10) assert_(len(r) == 10) assert_((r > -1).all()) assert_((r <= 0).all()) - r = self.rg.uniform(np.array([-1.0] * 10), + r = rg.uniform(np.array([-1.0] * 10), np.array([0.0] * 10), size=10) assert_(len(r) == 10) assert_((r > -1).all()) assert_((r <= 0).all()) - r = self.rg.uniform(-1.0, np.array([0.0] * 10), size=10) + r = rg.uniform(-1.0, np.array([0.0] * 10), size=10) assert_(len(r) == 10) assert_((r > -1).all()) assert_((r <= 0).all()) def test_random(self): - assert_(len(self.rg.random(10)) == 10) - params_0(self.rg.random) + rg = self._create_rng().rg + assert_(len(rg.random(10)) == 10) + params_0(rg.random) def test_standard_normal_zig(self): - assert_(len(self.rg.standard_normal(10)) == 10) + rg = self._create_rng().rg + assert_(len(rg.standard_normal(10)) == 10) def test_standard_normal(self): - assert_(len(self.rg.standard_normal(10)) == 10) - params_0(self.rg.standard_normal) + rg = self._create_rng().rg + assert_(len(rg.standard_normal(10)) == 10) + params_0(rg.standard_normal) def test_standard_gamma(self): - assert_(len(self.rg.standard_gamma(10, 10)) == 10) - assert_(len(self.rg.standard_gamma(np.array([10] * 10), 10)) == 10) - params_1(self.rg.standard_gamma) + rg = self._create_rng().rg + assert_(len(rg.standard_gamma(10, 10)) == 10) + assert_(len(rg.standard_gamma(np.array([10] * 10), 10)) == 10) + params_1(rg.standard_gamma) def test_standard_exponential(self): - assert_(len(self.rg.standard_exponential(10)) == 10) - params_0(self.rg.standard_exponential) + rg = self._create_rng().rg + assert_(len(rg.standard_exponential(10)) == 10) + params_0(rg.standard_exponential) def test_standard_exponential_float(self): - randoms = self.rg.standard_exponential(10, dtype='float32') + rg = self._create_rng().rg + randoms = rg.standard_exponential(10, dtype='float32') assert_(len(randoms) == 10) assert randoms.dtype == np.float32 - params_0(partial(self.rg.standard_exponential, dtype='float32')) + params_0(partial(rg.standard_exponential, dtype='float32')) def test_standard_exponential_float_log(self): - randoms = self.rg.standard_exponential(10, dtype='float32', + rg = self._create_rng().rg + randoms = rg.standard_exponential(10, dtype='float32', method='inv') assert_(len(randoms) == 10) assert randoms.dtype == np.float32 - params_0(partial(self.rg.standard_exponential, dtype='float32', + params_0(partial(rg.standard_exponential, dtype='float32', method='inv')) def test_standard_cauchy(self): - assert_(len(self.rg.standard_cauchy(10)) == 10) - params_0(self.rg.standard_cauchy) + rg = self._create_rng().rg + assert_(len(rg.standard_cauchy(10)) == 10) + params_0(rg.standard_cauchy) def test_standard_t(self): - assert_(len(self.rg.standard_t(10, 10)) == 10) - params_1(self.rg.standard_t) + rg = self._create_rng().rg + assert_(len(rg.standard_t(10, 10)) == 10) + params_1(rg.standard_t) def test_binomial(self): - assert_(self.rg.binomial(10, .5) >= 0) - assert_(self.rg.binomial(1000, .5) >= 0) + rg = self._create_rng().rg + assert_(rg.binomial(10, .5) >= 0) + assert_(rg.binomial(1000, .5) >= 0) def test_reset_state(self): - state = self.rg.bit_generator.state - int_1 = self.rg.integers(2**31) - self.rg.bit_generator.state = state - int_2 = self.rg.integers(2**31) + rg = self._create_rng().rg + state = rg.bit_generator.state + int_1 = rg.integers(2**31) + rg.bit_generator.state = state + int_2 = rg.integers(2**31) assert_(int_1 == int_2) def test_entropy_init(self): - rg = Generator(self.bit_generator()) - rg2 = Generator(self.bit_generator()) + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator()) + rg2 = Generator(bit_generator()) assert_(not comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_seed(self): - rg = Generator(self.bit_generator(*self.seed)) - rg2 = Generator(self.bit_generator(*self.seed)) + data = self._create_rng() + rg = Generator(data.bit_generator(*data.seed)) + rg2 = Generator(data.bit_generator(*data.seed)) rg.random() rg2.random() assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_reset_state_gauss(self): - rg = Generator(self.bit_generator(*self.seed)) + data = self._create_rng() + rg = Generator(data.bit_generator(*data.seed)) rg.standard_normal() state = rg.bit_generator.state n1 = rg.standard_normal(size=10) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(data.bit_generator()) rg2.bit_generator.state = state n2 = rg2.standard_normal(size=10) assert_array_equal(n1, n2) def test_reset_state_uint32(self): - rg = Generator(self.bit_generator(*self.seed)) + data = self._create_rng() + rg = Generator(data.bit_generator(*data.seed)) rg.integers(0, 2 ** 24, 120, dtype=np.uint32) state = rg.bit_generator.state n1 = rg.integers(0, 2 ** 24, 10, dtype=np.uint32) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(data.bit_generator()) rg2.bit_generator.state = state n2 = rg2.integers(0, 2 ** 24, 10, dtype=np.uint32) assert_array_equal(n1, n2) def test_reset_state_float(self): - rg = Generator(self.bit_generator(*self.seed)) + data = self._create_rng() + rg = Generator(data.bit_generator(*data.seed)) rg.random(dtype='float32') state = rg.bit_generator.state n1 = rg.random(size=10, dtype='float32') - rg2 = Generator(self.bit_generator()) + rg2 = Generator(data.bit_generator()) rg2.bit_generator.state = state n2 = rg2.random(size=10, dtype='float32') assert_((n1 == n2).all()) def test_shuffle(self): + rg = self._create_rng().rg original = np.arange(200, 0, -1) - permuted = self.rg.permutation(original) + permuted = rg.permutation(original) assert_((original != permuted).any()) def test_permutation(self): + rg = self._create_rng().rg original = np.arange(200, 0, -1) - permuted = self.rg.permutation(original) + permuted = rg.permutation(original) assert_((original != permuted).any()) def test_beta(self): - vals = self.rg.beta(2.0, 2.0, 10) + rg = self._create_rng().rg + vals = rg.beta(2.0, 2.0, 10) assert_(len(vals) == 10) - vals = self.rg.beta(np.array([2.0] * 10), 2.0) + vals = rg.beta(np.array([2.0] * 10), 2.0) assert_(len(vals) == 10) - vals = self.rg.beta(2.0, np.array([2.0] * 10)) + vals = rg.beta(2.0, np.array([2.0] * 10)) assert_(len(vals) == 10) - vals = self.rg.beta(np.array([2.0] * 10), np.array([2.0] * 10)) + vals = rg.beta(np.array([2.0] * 10), np.array([2.0] * 10)) assert_(len(vals) == 10) - vals = self.rg.beta(np.array([2.0] * 10), np.array([[2.0]] * 10)) + vals = rg.beta(np.array([2.0] * 10), np.array([[2.0]] * 10)) assert_(vals.shape == (10, 10)) def test_bytes(self): - vals = self.rg.bytes(10) + rg = self._create_rng().rg + vals = rg.bytes(10) assert_(len(vals) == 10) def test_chisquare(self): - vals = self.rg.chisquare(2.0, 10) + rg = self._create_rng().rg + vals = rg.chisquare(2.0, 10) assert_(len(vals) == 10) - params_1(self.rg.chisquare) + params_1(rg.chisquare) def test_exponential(self): - vals = self.rg.exponential(2.0, 10) + rg = self._create_rng().rg + vals = rg.exponential(2.0, 10) assert_(len(vals) == 10) - params_1(self.rg.exponential) + params_1(rg.exponential) def test_f(self): - vals = self.rg.f(3, 1000, 10) + rg = self._create_rng().rg + vals = rg.f(3, 1000, 10) assert_(len(vals) == 10) def test_gamma(self): - vals = self.rg.gamma(3, 2, 10) + rg = self._create_rng().rg + vals = rg.gamma(3, 2, 10) assert_(len(vals) == 10) def test_geometric(self): - vals = self.rg.geometric(0.5, 10) + rg = self._create_rng().rg + vals = rg.geometric(0.5, 10) assert_(len(vals) == 10) - params_1(self.rg.exponential, bounded=True) + params_1(rg.exponential, bounded=True) def test_gumbel(self): - vals = self.rg.gumbel(2.0, 2.0, 10) + rg = self._create_rng().rg + vals = rg.gumbel(2.0, 2.0, 10) assert_(len(vals) == 10) def test_laplace(self): - vals = self.rg.laplace(2.0, 2.0, 10) + rg = self._create_rng().rg + vals = rg.laplace(2.0, 2.0, 10) assert_(len(vals) == 10) def test_logitic(self): - vals = self.rg.logistic(2.0, 2.0, 10) + rg = self._create_rng().rg + vals = rg.logistic(2.0, 2.0, 10) assert_(len(vals) == 10) def test_logseries(self): - vals = self.rg.logseries(0.5, 10) + rg = self._create_rng().rg + vals = rg.logseries(0.5, 10) assert_(len(vals) == 10) def test_negative_binomial(self): - vals = self.rg.negative_binomial(10, 0.2, 10) + rg = self._create_rng().rg + vals = rg.negative_binomial(10, 0.2, 10) assert_(len(vals) == 10) def test_noncentral_chisquare(self): - vals = self.rg.noncentral_chisquare(10, 2, 10) + rg = self._create_rng().rg + vals = rg.noncentral_chisquare(10, 2, 10) assert_(len(vals) == 10) def test_noncentral_f(self): - vals = self.rg.noncentral_f(3, 1000, 2, 10) + rg = self._create_rng().rg + vals = rg.noncentral_f(3, 1000, 2, 10) assert_(len(vals) == 10) - vals = self.rg.noncentral_f(np.array([3] * 10), 1000, 2) + vals = rg.noncentral_f(np.array([3] * 10), 1000, 2) assert_(len(vals) == 10) - vals = self.rg.noncentral_f(3, np.array([1000] * 10), 2) + vals = rg.noncentral_f(3, np.array([1000] * 10), 2) assert_(len(vals) == 10) - vals = self.rg.noncentral_f(3, 1000, np.array([2] * 10)) + vals = rg.noncentral_f(3, 1000, np.array([2] * 10)) assert_(len(vals) == 10) def test_normal(self): - vals = self.rg.normal(10, 0.2, 10) + rg = self._create_rng().rg + vals = rg.normal(10, 0.2, 10) assert_(len(vals) == 10) def test_pareto(self): - vals = self.rg.pareto(3.0, 10) + rg = self._create_rng().rg + vals = rg.pareto(3.0, 10) assert_(len(vals) == 10) def test_poisson(self): - vals = self.rg.poisson(10, 10) + rg = self._create_rng().rg + vals = rg.poisson(10, 10) assert_(len(vals) == 10) - vals = self.rg.poisson(np.array([10] * 10)) + vals = rg.poisson(np.array([10] * 10)) assert_(len(vals) == 10) - params_1(self.rg.poisson) + params_1(rg.poisson) def test_power(self): - vals = self.rg.power(0.2, 10) + rg = self._create_rng().rg + vals = rg.power(0.2, 10) assert_(len(vals) == 10) def test_integers(self): - vals = self.rg.integers(10, 20, 10) + rg = self._create_rng().rg + vals = rg.integers(10, 20, 10) assert_(len(vals) == 10) def test_rayleigh(self): - vals = self.rg.rayleigh(0.2, 10) + rg = self._create_rng().rg + vals = rg.rayleigh(0.2, 10) assert_(len(vals) == 10) - params_1(self.rg.rayleigh, bounded=True) + params_1(rg.rayleigh, bounded=True) def test_vonmises(self): - vals = self.rg.vonmises(10, 0.2, 10) + rg = self._create_rng().rg + vals = rg.vonmises(10, 0.2, 10) assert_(len(vals) == 10) def test_wald(self): - vals = self.rg.wald(1.0, 1.0, 10) + rg = self._create_rng().rg + vals = rg.wald(1.0, 1.0, 10) assert_(len(vals) == 10) def test_weibull(self): - vals = self.rg.weibull(1.0, 10) + rg = self._create_rng().rg + vals = rg.weibull(1.0, 10) assert_(len(vals) == 10) def test_zipf(self): - vals = self.rg.zipf(10, 10) + rg = self._create_rng().rg + vec_1d = np.arange(2.0, 102.0) + vec_2d = np.arange(2.0, 102.0)[None, :] + mat = np.arange(2.0, 102.0, 0.01).reshape((100, 100)) + vals = rg.zipf(10, 10) assert_(len(vals) == 10) - vals = self.rg.zipf(self.vec_1d) + vals = rg.zipf(vec_1d) assert_(len(vals) == 100) - vals = self.rg.zipf(self.vec_2d) + vals = rg.zipf(vec_2d) assert_(vals.shape == (1, 100)) - vals = self.rg.zipf(self.mat) + vals = rg.zipf(mat) assert_(vals.shape == (100, 100)) def test_hypergeometric(self): - vals = self.rg.hypergeometric(25, 25, 20) + rg = self._create_rng().rg + vals = rg.hypergeometric(25, 25, 20) assert_(np.isscalar(vals)) - vals = self.rg.hypergeometric(np.array([25] * 10), 25, 20) + vals = rg.hypergeometric(np.array([25] * 10), 25, 20) assert_(vals.shape == (10,)) def test_triangular(self): - vals = self.rg.triangular(-5, 0, 5) + rg = self._create_rng().rg + vals = rg.triangular(-5, 0, 5) assert_(np.isscalar(vals)) - vals = self.rg.triangular(-5, np.array([0] * 10), 5) + vals = rg.triangular(-5, np.array([0] * 10), 5) assert_(vals.shape == (10,)) def test_multivariate_normal(self): + rg = self._create_rng().rg mean = [0, 0] cov = [[1, 0], [0, 100]] # diagonal covariance - x = self.rg.multivariate_normal(mean, cov, 5000) + x = rg.multivariate_normal(mean, cov, 5000) assert_(x.shape == (5000, 2)) - x_zig = self.rg.multivariate_normal(mean, cov, 5000) + x_zig = rg.multivariate_normal(mean, cov, 5000) assert_(x.shape == (5000, 2)) - x_inv = self.rg.multivariate_normal(mean, cov, 5000) + x_inv = rg.multivariate_normal(mean, cov, 5000) assert_(x.shape == (5000, 2)) assert_((x_zig != x_inv).any()) def test_multinomial(self): - vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3]) + rg = self._create_rng().rg + vals = rg.multinomial(100, [1.0 / 3, 2.0 / 3]) assert_(vals.shape == (2,)) - vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3], size=10) + vals = rg.multinomial(100, [1.0 / 3, 2.0 / 3], size=10) assert_(vals.shape == (10, 2)) def test_dirichlet(self): - s = self.rg.dirichlet((10, 5, 3), 20) + rg = self._create_rng().rg + s = rg.dirichlet((10, 5, 3), 20) assert_(s.shape == (20, 3)) def test_pickle(self): - pick = pickle.dumps(self.rg) + rg = self._create_rng().rg + pick = pickle.dumps(rg) unpick = pickle.loads(pick) - assert_((type(self.rg) == type(unpick))) - assert_(comp_state(self.rg.bit_generator.state, + assert_(type(rg) is type(unpick)) + assert_(comp_state(rg.bit_generator.state, unpick.bit_generator.state)) - pick = pickle.dumps(self.rg) + pick = pickle.dumps(rg) unpick = pickle.loads(pick) - assert_((type(self.rg) == type(unpick))) - assert_(comp_state(self.rg.bit_generator.state, + assert_(type(rg) is type(unpick)) + assert_(comp_state(rg.bit_generator.state, unpick.bit_generator.state)) def test_seed_array(self): - if self.seed_vector_bits is None: - bitgen_name = self.bit_generator.__name__ + data = self._create_rng() + if data.seed_vector_bits is None: + bitgen_name = data.bit_generator.__name__ pytest.skip(f'Vector seeding is not supported by {bitgen_name}') - if self.seed_vector_bits == 32: + if data.seed_vector_bits == 32: dtype = np.uint32 else: dtype = np.uint64 seed = np.array([1], dtype=dtype) - bg = self.bit_generator(seed) + bg = data.bit_generator(seed) state1 = bg.state - bg = self.bit_generator(1) + bg = data.bit_generator(1) state2 = bg.state assert_(comp_state(state1, state2)) seed = np.arange(4, dtype=dtype) - bg = self.bit_generator(seed) + bg = data.bit_generator(seed) state1 = bg.state - bg = self.bit_generator(seed[0]) + bg = data.bit_generator(seed[0]) state2 = bg.state assert_(not comp_state(state1, state2)) seed = np.arange(1500, dtype=dtype) - bg = self.bit_generator(seed) + bg = data.bit_generator(seed) state1 = bg.state - bg = self.bit_generator(seed[0]) + bg = data.bit_generator(seed[0]) state2 = bg.state assert_(not comp_state(state1, state2)) seed = 2 ** np.mod(np.arange(1500, dtype=dtype), - self.seed_vector_bits - 1) + 1 - bg = self.bit_generator(seed) + data.seed_vector_bits - 1) + 1 + bg = data.bit_generator(seed) state1 = bg.state - bg = self.bit_generator(seed[0]) + bg = data.bit_generator(seed[0]) state2 = bg.state assert_(not comp_state(state1, state2)) def test_uniform_float(self): - rg = Generator(self.bit_generator(12345)) + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator(12345)) warmup(rg) state = rg.bit_generator.state r1 = rg.random(11, dtype=np.float32) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(bit_generator()) warmup(rg2) rg2.bit_generator.state = state r2 = rg2.random(11, dtype=np.float32) @@ -496,11 +550,12 @@ def test_uniform_float(self): assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_gamma_floats(self): - rg = Generator(self.bit_generator()) + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator()) warmup(rg) state = rg.bit_generator.state r1 = rg.standard_gamma(4.0, 11, dtype=np.float32) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(bit_generator()) warmup(rg2) rg2.bit_generator.state = state r2 = rg2.standard_gamma(4.0, 11, dtype=np.float32) @@ -509,11 +564,12 @@ def test_gamma_floats(self): assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_normal_floats(self): - rg = Generator(self.bit_generator()) + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator()) warmup(rg) state = rg.bit_generator.state r1 = rg.standard_normal(11, dtype=np.float32) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(bit_generator()) warmup(rg2) rg2.bit_generator.state = state r2 = rg2.standard_normal(11, dtype=np.float32) @@ -522,11 +578,12 @@ def test_normal_floats(self): assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_normal_zig_floats(self): - rg = Generator(self.bit_generator()) + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator()) warmup(rg) state = rg.bit_generator.state r1 = rg.standard_normal(11, dtype=np.float32) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(bit_generator()) warmup(rg2) rg2.bit_generator.state = state r2 = rg2.standard_normal(11, dtype=np.float32) @@ -535,7 +592,7 @@ def test_normal_zig_floats(self): assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_output_fill(self): - rg = self.rg + rg = self._create_rng().rg state = rg.bit_generator.state size = (31, 7, 97) existing = np.empty(size) @@ -557,7 +614,7 @@ def test_output_fill(self): assert_equal(direct, existing) def test_output_filling_uniform(self): - rg = self.rg + rg = self._create_rng().rg state = rg.bit_generator.state size = (31, 7, 97) existing = np.empty(size) @@ -575,7 +632,7 @@ def test_output_filling_uniform(self): assert_equal(direct, existing) def test_output_filling_exponential(self): - rg = self.rg + rg = self._create_rng().rg state = rg.bit_generator.state size = (31, 7, 97) existing = np.empty(size) @@ -593,7 +650,7 @@ def test_output_filling_exponential(self): assert_equal(direct, existing) def test_output_filling_gamma(self): - rg = self.rg + rg = self._create_rng().rg state = rg.bit_generator.state size = (31, 7, 97) existing = np.zeros(size) @@ -611,7 +668,7 @@ def test_output_filling_gamma(self): assert_equal(direct, existing) def test_output_filling_gamma_broadcast(self): - rg = self.rg + rg = self._create_rng().rg state = rg.bit_generator.state size = (31, 7, 97) mu = np.arange(97.0) + 1.0 @@ -630,7 +687,7 @@ def test_output_filling_gamma_broadcast(self): assert_equal(direct, existing) def test_output_fill_error(self): - rg = self.rg + rg = self._create_rng().rg size = (31, 7, 97) existing = np.empty(size) with pytest.raises(TypeError): @@ -652,7 +709,14 @@ def test_output_fill_error(self): with pytest.raises(ValueError): rg.standard_gamma(1.0, out=existing[::3]) + @pytest.mark.parametrize("dtype", DTYPES_BOOL_INT_UINT) def test_integers_broadcast(self, dtype): + rg = self._create_rng().rg + initial_state = rg.bit_generator.state + + def reset_state(rng): + rng.bit_generator.state = initial_state + if dtype == np.bool: upper = 2 lower = 0 @@ -660,45 +724,50 @@ def test_integers_broadcast(self, dtype): info = np.iinfo(dtype) upper = int(info.max) + 1 lower = info.min - self._reset_state() - a = self.rg.integers(lower, [upper] * 10, dtype=dtype) - self._reset_state() - b = self.rg.integers([lower] * 10, upper, dtype=dtype) + reset_state(rg) + rg.bit_generator.state = initial_state + a = rg.integers(lower, [upper] * 10, dtype=dtype) + reset_state(rg) + b = rg.integers([lower] * 10, upper, dtype=dtype) assert_equal(a, b) - self._reset_state() - c = self.rg.integers(lower, upper, size=10, dtype=dtype) + reset_state(rg) + c = rg.integers(lower, upper, size=10, dtype=dtype) assert_equal(a, c) - self._reset_state() - d = self.rg.integers(np.array( + reset_state(rg) + d = rg.integers(np.array( [lower] * 10), np.array([upper], dtype=object), size=10, dtype=dtype) assert_equal(a, d) - self._reset_state() - e = self.rg.integers( + reset_state(rg) + e = rg.integers( np.array([lower] * 10), np.array([upper] * 10), size=10, dtype=dtype) assert_equal(a, e) - self._reset_state() - a = self.rg.integers(0, upper, size=10, dtype=dtype) - self._reset_state() - b = self.rg.integers([upper] * 10, dtype=dtype) + reset_state(rg) + a = rg.integers(0, upper, size=10, dtype=dtype) + reset_state(rg) + b = rg.integers([upper] * 10, dtype=dtype) assert_equal(a, b) + @pytest.mark.parametrize("dtype", DTYPES_BOOL_INT_UINT) def test_integers_numpy(self, dtype): + rg = self._create_rng().rg high = np.array([1]) low = np.array([0]) - out = self.rg.integers(low, high, dtype=dtype) + out = rg.integers(low, high, dtype=dtype) assert out.shape == (1,) - out = self.rg.integers(low[0], high, dtype=dtype) + out = rg.integers(low[0], high, dtype=dtype) assert out.shape == (1,) - out = self.rg.integers(low, high[0], dtype=dtype) + out = rg.integers(low, high[0], dtype=dtype) assert out.shape == (1,) + @pytest.mark.parametrize("dtype", DTYPES_BOOL_INT_UINT) def test_integers_broadcast_errors(self, dtype): + rg = self._create_rng().rg if dtype == np.bool: upper = 2 lower = 0 @@ -707,102 +776,97 @@ def test_integers_broadcast_errors(self, dtype): upper = int(info.max) + 1 lower = info.min with pytest.raises(ValueError): - self.rg.integers(lower, [upper + 1] * 10, dtype=dtype) + rg.integers(lower, [upper + 1] * 10, dtype=dtype) with pytest.raises(ValueError): - self.rg.integers(lower - 1, [upper] * 10, dtype=dtype) + rg.integers(lower - 1, [upper] * 10, dtype=dtype) with pytest.raises(ValueError): - self.rg.integers([lower - 1], [upper] * 10, dtype=dtype) + rg.integers([lower - 1], [upper] * 10, dtype=dtype) with pytest.raises(ValueError): - self.rg.integers([0], [0], dtype=dtype) + rg.integers([0], [0], dtype=dtype) class TestMT19937(RNG): @classmethod - def setup_class(cls): - cls.bit_generator = MT19937 - cls.advance = None - cls.seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 32 - cls._extra_setup() - cls.seed_error = ValueError + def _create_rng(cls): + bit_generator = MT19937 + advance = None + seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 32 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) def test_numpy_state(self): + rg = self._create_rng().rg nprg = np.random.RandomState() nprg.standard_normal(99) state = nprg.get_state() - self.rg.bit_generator.state = state - state2 = self.rg.bit_generator.state + rg.bit_generator.state = state + state2 = rg.bit_generator.state assert_((state[1] == state2['state']['key']).all()) - assert_((state[2] == state2['state']['pos'])) + assert_(state[2] == state2['state']['pos']) class TestPhilox(RNG): @classmethod - def setup_class(cls): - cls.bit_generator = Philox - cls.advance = 2**63 + 2**31 + 2**15 + 1 - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() + def _create_rng(cls): + bit_generator = Philox + advance = 2**63 + 2**31 + 2**15 + 1 + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) class TestSFC64(RNG): @classmethod - def setup_class(cls): - cls.bit_generator = SFC64 - cls.advance = None - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 192 - cls._extra_setup() + def _create_rng(cls): + bit_generator = SFC64 + advance = None + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 192 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) class TestPCG64(RNG): @classmethod - def setup_class(cls): - cls.bit_generator = PCG64 - cls.advance = 2**63 + 2**31 + 2**15 + 1 - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() + def _create_rng(cls): + bit_generator = PCG64 + advance = 2**63 + 2**31 + 2**15 + 1 + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) class TestPCG64DXSM(RNG): @classmethod - def setup_class(cls): - cls.bit_generator = PCG64DXSM - cls.advance = 2**63 + 2**31 + 2**15 + 1 - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() + def _create_rng(cls): + bit_generator = PCG64DXSM + advance = 2**63 + 2**31 + 2**15 + 1 + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) class TestDefaultRNG(RNG): @classmethod - def setup_class(cls): + def _create_rng(cls): # This will duplicate some tests that directly instantiate a fresh # Generator(), but that's okay. - cls.bit_generator = PCG64 - cls.advance = 2**63 + 2**31 + 2**15 + 1 - cls.seed = [12345] - cls.rg = np.random.default_rng(*cls.seed) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() + bit_generator = PCG64 + advance = 2**63 + 2**31 + 2**15 + 1 + seed = [12345] + rg = np.random.default_rng(*seed) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) def test_default_is_pcg64(self): # In order to change the default BitGenerator, we'll go through # a deprecation cycle to move to a different function. - assert_(isinstance(self.rg.bit_generator, PCG64)) + rg = self._create_rng().rg + assert_(isinstance(rg.bit_generator, PCG64)) def test_seed(self): np.random.default_rng() diff --git a/numpy/rec/__init__.py b/numpy/rec/__init__.py index 1a439ada8c35..420240c8d4d1 100644 --- a/numpy/rec/__init__.py +++ b/numpy/rec/__init__.py @@ -1,2 +1,2 @@ -from numpy._core.records import __all__, __doc__ from numpy._core.records import * +from numpy._core.records import __all__, __doc__ diff --git a/numpy/rec/__init__.pyi b/numpy/rec/__init__.pyi index 776db577cf9c..6a78c66ff2c2 100644 --- a/numpy/rec/__init__.pyi +++ b/numpy/rec/__init__.pyi @@ -1,13 +1,23 @@ from numpy._core.records import ( - record as record, - recarray as recarray, - format_parser as format_parser, - fromarrays as fromarrays, - fromrecords as fromrecords, - fromstring as fromstring, - fromfile as fromfile, - array as array + array, + find_duplicate, + format_parser, + fromarrays, + fromfile, + fromrecords, + fromstring, + recarray, + record, ) -__all__: list[str] -__path__: list[str] +__all__ = [ + "record", + "recarray", + "format_parser", + "fromarrays", + "fromrecords", + "fromstring", + "fromfile", + "array", + "find_duplicate", +] diff --git a/numpy/strings/__init__.py b/numpy/strings/__init__.py index f370ba71f296..561dadcf37d0 100644 --- a/numpy/strings/__init__.py +++ b/numpy/strings/__init__.py @@ -1,2 +1,2 @@ -from numpy._core.strings import __all__, __doc__ from numpy._core.strings import * +from numpy._core.strings import __all__, __doc__ diff --git a/numpy/strings/__init__.pyi b/numpy/strings/__init__.pyi index 927b0c9bd415..b2fb363531d4 100644 --- a/numpy/strings/__init__.pyi +++ b/numpy/strings/__init__.pyi @@ -1,53 +1,97 @@ from numpy._core.strings import ( - equal as equal, - not_equal as not_equal, - greater_equal as greater_equal, - less_equal as less_equal, - greater as greater, - less as less, - add as add, - multiply as multiply, - mod as mod, - isalpha as isalpha, - isalnum as isalnum, - isdigit as isdigit, - isspace as isspace, - isnumeric as isnumeric, - isdecimal as isdecimal, - islower as islower, - isupper as isupper, - istitle as istitle, - str_len as str_len, - find as find, - rfind as rfind, - index as index, - rindex as rindex, - count as count, - startswith as startswith, - endswith as endswith, - decode as decode, - encode as encode, - expandtabs as expandtabs, - center as center, - ljust as ljust, - rjust as rjust, - lstrip as lstrip, - rstrip as rstrip, - strip as strip, - zfill as zfill, - upper as upper, - lower as lower, - swapcase as swapcase, - capitalize as capitalize, - title as title, - replace as replace, - join as join, - split as split, - rsplit as rsplit, - splitlines as splitlines, - partition as partition, - rpartition as rpartition, - translate as translate, + add, + capitalize, + center, + count, + decode, + encode, + endswith, + equal, + expandtabs, + find, + greater, + greater_equal, + index, + isalnum, + isalpha, + isdecimal, + isdigit, + islower, + isnumeric, + isspace, + istitle, + isupper, + less, + less_equal, + ljust, + lower, + lstrip, + mod, + multiply, + not_equal, + partition, + replace, + rfind, + rindex, + rjust, + rpartition, + rstrip, + slice, + startswith, + str_len, + strip, + swapcase, + title, + translate, + upper, + zfill, ) -__all__: list[str] +__all__ = [ + "equal", + "not_equal", + "less", + "less_equal", + "greater", + "greater_equal", + "add", + "multiply", + "isalpha", + "isdigit", + "isspace", + "isalnum", + "islower", + "isupper", + "istitle", + "isdecimal", + "isnumeric", + "str_len", + "find", + "rfind", + "index", + "rindex", + "count", + "startswith", + "endswith", + "lstrip", + "rstrip", + "strip", + "replace", + "expandtabs", + "center", + "ljust", + "rjust", + "zfill", + "partition", + "rpartition", + "upper", + "lower", + "swapcase", + "capitalize", + "title", + "mod", + "decode", + "encode", + "translate", + "slice", +] diff --git a/numpy/testing/__init__.py b/numpy/testing/__init__.py index 8a34221e4dde..fe0c4f2367f2 100644 --- a/numpy/testing/__init__.py +++ b/numpy/testing/__init__.py @@ -7,16 +7,16 @@ """ from unittest import TestCase -from . import _private -from ._private.utils import * -from ._private.utils import (_assert_valid_refcount, _gen_alignment_data) +from . import _private, overrides from ._private import extbuild -from . import overrides +from ._private.utils import * +from ._private.utils import _assert_valid_refcount, _gen_alignment_data __all__ = ( _private.utils.__all__ + ['TestCase', 'overrides'] ) from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/numpy/testing/__init__.pyi b/numpy/testing/__init__.pyi index 2e4f76471b7c..3d6c53b1e638 100644 --- a/numpy/testing/__init__.pyi +++ b/numpy/testing/__init__.pyi @@ -1,49 +1,107 @@ -from numpy._pytesttester import PytestTester +from unittest import TestCase -from unittest import ( - TestCase as TestCase, +from . import _private as _private, overrides +from ._private import extbuild as extbuild +from ._private.utils import ( # type: ignore[deprecated] + BLAS_SUPPORTS_FPE, + HAS_LAPACK64, + HAS_REFCOUNT, + IS_64BIT, + IS_EDITABLE, + IS_INSTALLED, + IS_MUSL, + IS_PYPY, + IS_PYSTON, + IS_WASM, + NOGIL_BUILD, + NUMPY_ROOT, + IgnoreException, + KnownFailureException, + SkipTest, + assert_, + assert_allclose, + assert_almost_equal, + assert_approx_equal, + assert_array_almost_equal, + assert_array_almost_equal_nulp, + assert_array_compare, + assert_array_equal, + assert_array_less, + assert_array_max_ulp, + assert_equal, + assert_no_gc_cycles, + assert_no_warnings, + assert_raises, + assert_raises_regex, + assert_string_equal, + assert_warns, + break_cycles, + build_err_msg, + check_support_sve, + clear_and_catch_warnings, + decorate_methods, + jiffies, + measure, + memusage, + print_assert_equal, + run_threaded, + rundocs, + runstring, + suppress_warnings, # pyrefly: ignore[deprecated] + tempdir, + temppath, + verbose, ) -from numpy.testing._private.utils import ( - assert_equal as assert_equal, - assert_almost_equal as assert_almost_equal, - assert_approx_equal as assert_approx_equal, - assert_array_equal as assert_array_equal, - assert_array_less as assert_array_less, - assert_string_equal as assert_string_equal, - assert_array_almost_equal as assert_array_almost_equal, - assert_raises as assert_raises, - build_err_msg as build_err_msg, - decorate_methods as decorate_methods, - jiffies as jiffies, - memusage as memusage, - print_assert_equal as print_assert_equal, - rundocs as rundocs, - runstring as runstring, - verbose as verbose, - measure as measure, - assert_ as assert_, - assert_array_almost_equal_nulp as assert_array_almost_equal_nulp, - assert_raises_regex as assert_raises_regex, - assert_array_max_ulp as assert_array_max_ulp, - assert_warns as assert_warns, - assert_no_warnings as assert_no_warnings, - assert_allclose as assert_allclose, - IgnoreException as IgnoreException, - clear_and_catch_warnings as clear_and_catch_warnings, - SkipTest as SkipTest, - KnownFailureException as KnownFailureException, - temppath as temppath, - tempdir as tempdir, - IS_PYPY as IS_PYPY, - IS_PYSTON as IS_PYSTON, - HAS_REFCOUNT as HAS_REFCOUNT, - suppress_warnings as suppress_warnings, - assert_array_compare as assert_array_compare, - assert_no_gc_cycles as assert_no_gc_cycles, - break_cycles as break_cycles, - HAS_LAPACK64 as HAS_LAPACK64, -) - -__all__: list[str] -test: PytestTester +__all__ = [ + "BLAS_SUPPORTS_FPE", + "HAS_LAPACK64", + "HAS_REFCOUNT", + "IS_64BIT", + "IS_EDITABLE", + "IS_INSTALLED", + "IS_MUSL", + "IS_PYPY", + "IS_PYSTON", + "IS_WASM", + "NOGIL_BUILD", + "NUMPY_ROOT", + "IgnoreException", + "KnownFailureException", + "SkipTest", + "TestCase", + "assert_", + "assert_allclose", + "assert_almost_equal", + "assert_approx_equal", + "assert_array_almost_equal", + "assert_array_almost_equal_nulp", + "assert_array_compare", + "assert_array_equal", + "assert_array_less", + "assert_array_max_ulp", + "assert_equal", + "assert_no_gc_cycles", + "assert_no_warnings", + "assert_raises", + "assert_raises_regex", + "assert_string_equal", + "assert_warns", + "break_cycles", + "build_err_msg", + "check_support_sve", + "clear_and_catch_warnings", + "decorate_methods", + "jiffies", + "measure", + "memusage", + "overrides", + "print_assert_equal", + "run_threaded", + "rundocs", + "runstring", + "suppress_warnings", + "tempdir", + "temppath", + "verbose", +] diff --git a/numpy/testing/_private/__init__.pyi b/numpy/testing/_private/__init__.pyi new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/numpy/testing/_private/extbuild.py b/numpy/testing/_private/extbuild.py index 65465ed19760..2a724b73cfc3 100644 --- a/numpy/testing/_private/extbuild.py +++ b/numpy/testing/_private/extbuild.py @@ -16,7 +16,7 @@ def build_and_import_extension( modname, functions, *, prologue="", build_dir=None, - include_dirs=[], more_init=""): + include_dirs=None, more_init=""): """ Build and imports a c-extension module `modname` from a list of function fragments `functions`. @@ -53,8 +53,14 @@ def build_and_import_extension( >>> assert not mod.test_bytes('abc') >>> assert mod.test_bytes(b'abc') """ + if include_dirs is None: + include_dirs = [] body = prologue + _make_methods(functions, modname) - init = """PyObject *mod = PyModule_Create(&moduledef); + init = """ + PyObject *mod = PyModule_Create(&moduledef); + #ifdef Py_GIL_DISABLED + PyUnstable_Module_SetGIL(mod, Py_MOD_GIL_NOT_USED); + #endif """ if not build_dir: build_dir = pathlib.Path('.') @@ -64,12 +70,8 @@ def build_and_import_extension( init += more_init init += "\nreturn mod;" source_string = _make_source(modname, init, body) - try: - mod_so = compile_extension_module( - modname, build_dir, include_dirs, source_string) - except Exception as e: - # shorten the exception chain - raise RuntimeError(f"could not compile in {build_dir}:") from e + mod_so = compile_extension_module( + modname, build_dir, include_dirs, source_string) import importlib.util spec = importlib.util.spec_from_file_location(modname, mod_so) foo = importlib.util.module_from_spec(spec) @@ -79,7 +81,7 @@ def build_and_import_extension( def compile_extension_module( name, builddir, include_dirs, - source_string, libraries=[], library_dirs=[]): + source_string, libraries=None, library_dirs=None): """ Build an extension module and return the filename of the resulting native code file. @@ -102,11 +104,14 @@ def compile_extension_module( dirname = builddir / name dirname.mkdir(exist_ok=True) cfile = _convert_str_to_file(source_string, dirname) - include_dirs = include_dirs + [sysconfig.get_config_var('INCLUDEPY')] + include_dirs = include_dirs or [] + libraries = libraries or [] + library_dirs = library_dirs or [] return _c_compile( cfile, outputfilename=dirname / modname, - include_dirs=include_dirs, libraries=[], library_dirs=[], + include_dirs=include_dirs, libraries=libraries, + library_dirs=library_dirs, ) @@ -129,19 +134,19 @@ def _make_methods(functions, modname): methods_table = [] codes = [] for funcname, flags, code in functions: - cfuncname = "%s_%s" % (modname, funcname) + cfuncname = f"{modname}_{funcname}" if 'METH_KEYWORDS' in flags: signature = '(PyObject *self, PyObject *args, PyObject *kwargs)' else: signature = '(PyObject *self, PyObject *args)' methods_table.append( "{\"%s\", (PyCFunction)%s, %s}," % (funcname, cfuncname, flags)) - func_code = """ + func_code = f""" static PyObject* {cfuncname}{signature} {{ {code} }} - """.format(cfuncname=cfuncname, signature=signature, code=code) + """ codes.append(func_code) body = "\n".join(codes) + """ @@ -156,7 +161,7 @@ def _make_methods(functions, modname): -1, /* m_size */ methods, /* m_methods */ }; - """ % dict(methods='\n'.join(methods_table), modname=modname) + """ % {'methods': '\n'.join(methods_table), 'modname': modname} return body @@ -172,41 +177,28 @@ def _make_source(name, init, body): PyInit_%(name)s(void) { %(init)s } - """ % dict( - name=name, init=init, body=body, - ) + """ % { + 'name': name, 'init': init, 'body': body, + } return code -def _c_compile(cfile, outputfilename, include_dirs=[], libraries=[], - library_dirs=[]): +def _c_compile(cfile, outputfilename, include_dirs, libraries, + library_dirs): + link_extra = [] if sys.platform == 'win32': compile_extra = ["/we4013"] - link_extra = ["/LIBPATH:" + os.path.join(sys.base_prefix, 'libs')] + link_extra.append('/DEBUG') # generate .pdb file elif sys.platform.startswith('linux'): compile_extra = [ "-O0", "-g", "-Werror=implicit-function-declaration", "-fPIC"] - link_extra = [] else: - compile_extra = link_extra = [] - pass - if sys.platform == 'win32': - link_extra = link_extra + ['/DEBUG'] # generate .pdb file - if sys.platform == 'darwin': - # support Fink & Darwinports - for s in ('/sw/', '/opt/local/'): - if (s + 'include' not in include_dirs - and os.path.exists(s + 'include')): - include_dirs.append(s + 'include') - if s + 'lib' not in library_dirs and os.path.exists(s + 'lib'): - library_dirs.append(s + 'lib') - - outputfilename = outputfilename.with_suffix(get_so_suffix()) - build( + compile_extra = [] + + return build( cfile, outputfilename, compile_extra, link_extra, include_dirs, libraries, library_dirs) - return outputfilename def build(cfile, outputfilename, compile_extra, link_extra, @@ -215,33 +207,43 @@ def build(cfile, outputfilename, compile_extra, link_extra, build_dir = cfile.parent / "build" os.makedirs(build_dir, exist_ok=True) - so_name = outputfilename.parts[-1] with open(cfile.parent / "meson.build", "wt") as fid: - includes = ['-I' + d for d in include_dirs] link_dirs = ['-L' + d for d in library_dirs] fid.write(textwrap.dedent(f"""\ project('foo', 'c') - shared_module('{so_name}', '{cfile.parts[-1]}', - c_args: {includes} + {compile_extra}, - link_args: {link_dirs} + {link_extra}, - link_with: {libraries}, - name_prefix: '', - name_suffix: 'dummy', + py = import('python').find_installation(pure: false) + py.extension_module( + '{outputfilename.parts[-1]}', + '{cfile.parts[-1]}', + c_args: {compile_extra}, + link_args: {link_dirs}, + include_directories: {include_dirs}, ) """)) + native_file_name = cfile.parent / ".mesonpy-native-file.ini" + with open(native_file_name, "wt") as fid: + fid.write(textwrap.dedent(f"""\ + [binaries] + python = '{sys.executable}' + """)) if sys.platform == "win32": subprocess.check_call(["meson", "setup", - "--buildtype=release", + "--buildtype=release", "--vsenv", ".."], cwd=build_dir, ) else: - subprocess.check_call(["meson", "setup", "--vsenv", ".."], + subprocess.check_call(["meson", "setup", "--vsenv", + "..", f'--native-file={os.fspath(native_file_name)}'], cwd=build_dir ) + + so_name = outputfilename.parts[-1] + get_so_suffix() subprocess.check_call(["meson", "compile"], cwd=build_dir) - os.rename(str(build_dir / so_name) + ".dummy", cfile.parent / so_name) - + os.rename(str(build_dir / so_name), cfile.parent / so_name) + return cfile.parent / so_name + + def get_so_suffix(): ret = sysconfig.get_config_var('EXT_SUFFIX') assert ret diff --git a/numpy/testing/_private/extbuild.pyi b/numpy/testing/_private/extbuild.pyi new file mode 100644 index 000000000000..c1ae507d6a49 --- /dev/null +++ b/numpy/testing/_private/extbuild.pyi @@ -0,0 +1,25 @@ +import pathlib +import types +from collections.abc import Sequence + +__all__ = ["build_and_import_extension", "compile_extension_module"] + +def build_and_import_extension( + modname: str, + functions: Sequence[tuple[str, str, str]], + *, + prologue: str = "", + build_dir: pathlib.Path | None = None, + include_dirs: Sequence[str] | None = None, + more_init: str = "", +) -> types.ModuleType: ... + +# +def compile_extension_module( + name: str, + builddir: pathlib.Path, + include_dirs: Sequence[str], + source_string: str, + libraries: Sequence[str] | None = None, + library_dirs: Sequence[str] | None = None, +) -> pathlib.Path: ... diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index a913c1a69f88..54d040a6ed3f 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -2,30 +2,32 @@ Utility function to facilitate testing. """ +import concurrent.futures +import contextlib +import gc +import importlib.metadata +import importlib.util +import operator import os -import sys +import pathlib import platform +import pprint import re -import gc -import operator +import shutil +import sys +import sysconfig +import threading import warnings from functools import partial, wraps -import shutil -import contextlib +from io import StringIO from tempfile import mkdtemp, mkstemp from unittest.case import SkipTest from warnings import WarningMessage -import pprint -import sysconfig import numpy as np -from numpy._core import ( - intp, float32, empty, arange, array_repr, ndarray, isnat, array) -from numpy import isfinite, isnan, isinf import numpy.linalg._umath_linalg -from numpy._utils import _rename_parameter - -from io import StringIO +from numpy import isfinite, isnan +from numpy._core import arange, array, array_repr, empty, float32, intp, isnat, ndarray __all__ = [ 'assert_equal', 'assert_almost_equal', 'assert_approx_equal', @@ -39,8 +41,9 @@ 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY', 'HAS_REFCOUNT', "IS_WASM", 'suppress_warnings', 'assert_array_compare', 'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON', - '_OLD_PROMOTION', 'IS_MUSL', '_SUPPORTS_SVE', 'NOGIL_BUILD', - 'IS_EDITABLE' + 'IS_MUSL', 'check_support_sve', 'NOGIL_BUILD', + 'IS_EDITABLE', 'IS_INSTALLED', 'NUMPY_ROOT', 'run_threaded', 'IS_64BIT', + 'BLAS_SUPPORTS_FPE', ] @@ -52,14 +55,45 @@ class KnownFailureException(Exception): KnownFailureTest = KnownFailureException # backwards compat verbose = 0 +NUMPY_ROOT = pathlib.Path(np.__file__).parent + +try: + np_dist = importlib.metadata.distribution('numpy') +except importlib.metadata.PackageNotFoundError: + IS_INSTALLED = IS_EDITABLE = False +else: + IS_INSTALLED = True + try: + if sys.version_info < (3, 13): + # Backport importlib.metadata.Distribution.origin + import json + import types + origin = json.loads( + np_dist.read_text('direct_url.json') or '{}', + object_hook=lambda data: types.SimpleNamespace(**data), + ) + IS_EDITABLE = origin.dir_info.editable + else: + IS_EDITABLE = np_dist.origin.dir_info.editable + except AttributeError: + IS_EDITABLE = False + + # spin installs numpy directly via meson, instead of using meson-python, and + # runs the module by setting PYTHONPATH. This is problematic because the + # resulting installation lacks the Python metadata (.dist-info), and numpy + # might already be installed on the environment, causing us to find its + # metadata, even though we are not actually loading that package. + # Work around this issue by checking if the numpy root matches. + if not IS_EDITABLE and np_dist.locate_file('numpy') != NUMPY_ROOT: + IS_INSTALLED = False + IS_WASM = platform.machine() in ["wasm32", "wasm64"] IS_PYPY = sys.implementation.name == 'pypy' IS_PYSTON = hasattr(sys, "pyston_version_info") -IS_EDITABLE = not bool(np.__path__) or 'editable' in np.__path__[0] HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON -HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64 +BLAS_SUPPORTS_FPE = np._core._multiarray_umath._blas_supports_fpe(None) -_OLD_PROMOTION = lambda: np._get_promotion_state() == 'legacy' +HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64 IS_MUSL = False # alternate way is @@ -71,6 +105,7 @@ class KnownFailureException(Exception): IS_MUSL = True NOGIL_BUILD = bool(sysconfig.get_config_var("Py_GIL_DISABLED")) +IS_64BIT = np.dtype(np.intp).itemsize == 8 def assert_(val, msg=''): """ @@ -101,14 +136,15 @@ def GetPerformanceAttributes(object, counter, instance=None, # thread's CPU usage is either 0 or 100). To read counters like this, # you should copy this function, but keep the counter open, and call # CollectQueryData() each time you need to know. - # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp (dead link) + # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp + # (dead link) # My older explanation for this was that the "AddCounter" process # forced the CPU to 100%, but the above makes more sense :) import win32pdh if format is None: format = win32pdh.PDH_FMT_LONG - path = win32pdh.MakeCounterPath( (machine, object, instance, None, - inum, counter)) + path = win32pdh.MakeCounterPath((machine, object, instance, None, + inum, counter)) hq = win32pdh.OpenQuery() try: hc = win32pdh.AddCounter(hq, path) @@ -129,11 +165,12 @@ def memusage(processName="python", instance=0): win32pdh.PDH_FMT_LONG, None) elif sys.platform[:5] == 'linux': - def memusage(_proc_pid_stat=f'/proc/{os.getpid()}/stat'): + def memusage(_proc_pid_stat=None): """ Return virtual memory size in bytes of the running python. """ + _proc_pid_stat = _proc_pid_stat or f'/proc/{os.getpid()}/stat' try: with open(_proc_pid_stat) as f: l = f.readline().split(' ') @@ -150,7 +187,7 @@ def memusage(): if sys.platform[:5] == 'linux': - def jiffies(_proc_pid_stat=f'/proc/{os.getpid()}/stat', _load_time=[]): + def jiffies(_proc_pid_stat=None, _load_time=None): """ Return number of jiffies elapsed. @@ -158,6 +195,8 @@ def jiffies(_proc_pid_stat=f'/proc/{os.getpid()}/stat', _load_time=[]): process has been scheduled in user mode. See man 5 proc. """ + _proc_pid_stat = _proc_pid_stat or f'/proc/{os.getpid()}/stat' + _load_time = _load_time or [] import time if not _load_time: _load_time.append(time.time()) @@ -166,7 +205,7 @@ def jiffies(_proc_pid_stat=f'/proc/{os.getpid()}/stat', _load_time=[]): l = f.readline().split(' ') return int(l[13]) except Exception: - return int(100*(time.time()-_load_time[0])) + return int(100 * (time.time() - _load_time[0])) else: # os.getpid is not in all platforms available. # Using time is safe but inaccurate, especially when process @@ -182,7 +221,7 @@ def jiffies(_load_time=[]): import time if not _load_time: _load_time.append(time.time()) - return int(100*(time.time()-_load_time[0])) + return int(100 * (time.time() - _load_time[0])) def build_err_msg(arrays, err_msg, header='Items are not equal:', @@ -190,7 +229,7 @@ def build_err_msg(arrays, err_msg, header='Items are not equal:', msg = ['\n' + header] err_msg = str(err_msg) if err_msg: - if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header): + if err_msg.find('\n') == -1 and len(err_msg) < 79 - len(header): msg = [msg[0] + ' ' + err_msg] else: msg.append(err_msg) @@ -258,9 +297,10 @@ def assert_equal(actual, desired, err_msg='', verbose=True, *, strict=False): Notes ----- - By default, when one of `actual` and `desired` is a scalar and the other is - an array, the function checks that each element of the array is equal to - the scalar. This behaviour can be disabled by setting ``strict==True``. + When one of `actual` and `desired` is a scalar and the other is array_like, the + function checks that each element of the array_like is equal to the scalar. + Note that empty arrays are therefore considered equal to scalars. + This behaviour can be disabled by setting ``strict==True``. Examples -------- @@ -318,7 +358,7 @@ def assert_equal(actual, desired, err_msg='', verbose=True, *, strict=False): if not isinstance(actual, dict): raise AssertionError(repr(type(actual))) assert_equal(len(actual), len(desired), err_msg, verbose) - for k, i in desired.items(): + for k in desired: if k not in actual: raise AssertionError(repr(k)) assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}', @@ -330,8 +370,8 @@ def assert_equal(actual, desired, err_msg='', verbose=True, *, strict=False): assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}', verbose) return - from numpy._core import ndarray, isscalar, signbit - from numpy import iscomplexobj, real, imag + from numpy import imag, iscomplexobj, real + from numpy._core import isscalar, ndarray, signbit if isinstance(actual, ndarray) or isinstance(desired, ndarray): return assert_array_equal(actual, desired, err_msg, verbose, strict=strict) @@ -465,7 +505,6 @@ def print_assert_equal(test_string, actual, desired): raise AssertionError(msg.getvalue()) -@np._no_nep50_warning() def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): """ Raises an AssertionError if two items are not equal up to desired @@ -529,6 +568,8 @@ def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): Arrays are not almost equal to 9 decimals Mismatched elements: 1 / 2 (50%) + Mismatch at index: + [1]: 2.3333333333333 (ACTUAL), 2.33333334 (DESIRED) Max absolute difference among violations: 6.66669964e-09 Max relative difference among violations: 2.85715698e-09 ACTUAL: array([1. , 2.333333333]) @@ -536,8 +577,8 @@ def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): """ __tracebackhide__ = True # Hide traceback for py.test + from numpy import imag, iscomplexobj, real from numpy._core import ndarray - from numpy import iscomplexobj, real, imag # Handle complex numbers: separate into real/imag to handle # nan/inf/negative zero correctly @@ -582,9 +623,8 @@ def _build_err_msg(): if isnan(desired) or isnan(actual): if not (isnan(desired) and isnan(actual)): raise AssertionError(_build_err_msg()) - else: - if not desired == actual: - raise AssertionError(_build_err_msg()) + elif not desired == actual: + raise AssertionError(_build_err_msg()) return except (NotImplementedError, TypeError): pass @@ -592,7 +632,6 @@ def _build_err_msg(): raise AssertionError(_build_err_msg()) -@np._no_nep50_warning() def assert_approx_equal(actual, desired, significant=7, err_msg='', verbose=True): """ @@ -661,14 +700,14 @@ def assert_approx_equal(actual, desired, significant=7, err_msg='', # Normalized the numbers to be in range (-10.0,10.0) # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual)))))) with np.errstate(invalid='ignore'): - scale = 0.5*(np.abs(desired) + np.abs(actual)) + scale = 0.5 * (np.abs(desired) + np.abs(actual)) scale = np.power(10, np.floor(np.log10(scale))) try: - sc_desired = desired/scale + sc_desired = desired / scale except ZeroDivisionError: sc_desired = 0.0 try: - sc_actual = actual/scale + sc_actual = actual / scale except ZeroDivisionError: sc_actual = 0.0 msg = build_err_msg( @@ -683,23 +722,20 @@ def assert_approx_equal(actual, desired, significant=7, err_msg='', if isnan(desired) or isnan(actual): if not (isnan(desired) and isnan(actual)): raise AssertionError(msg) - else: - if not desired == actual: - raise AssertionError(msg) + elif not desired == actual: + raise AssertionError(msg) return except (TypeError, NotImplementedError): pass - if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)): + if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant - 1)): raise AssertionError(msg) -@np._no_nep50_warning() def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', precision=6, equal_nan=True, equal_inf=True, *, strict=False, names=('ACTUAL', 'DESIRED')): __tracebackhide__ = True # Hide traceback for py.test - from numpy._core import (array2string, isnan, inf, errstate, - all, max, object_) + from numpy._core import all, array2string, errstate, inf, isnan, max, object_ x = np.asanyarray(x) y = np.asanyarray(y) @@ -716,6 +752,24 @@ def istime(x): def isvstring(x): return x.dtype.char == "T" + def robust_any_difference(x, y): + # We include work-arounds here to handle three types of slightly + # pathological ndarray subclasses: + # (1) all() on fully masked arrays returns np.ma.masked, so we use != True + # (np.ma.masked != True evaluates as np.ma.masked, which is falsy). + # (2) __eq__ on some ndarray subclasses returns Python booleans + # instead of element-wise comparisons, so we cast to np.bool() in + # that case (or in case __eq__ returns some other value with no + # all() method). + # (3) subclasses with bare-bones __array_function__ implementations may + # not implement np.all(), so favor using the .all() method + # We are not committed to supporting cases (2) and (3), but it's nice to + # support them if possible. + result = x == y + if not hasattr(result, "all") or not callable(result.all): + result = np.bool(result) + return result.all() != True + def func_assert_same_pos(x, y, func=isnan, hasval='nan'): """Handling nan/inf. @@ -727,18 +781,7 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): x_id = func(x) y_id = func(y) - # We include work-arounds here to handle three types of slightly - # pathological ndarray subclasses: - # (1) all() on `masked` array scalars can return masked arrays, so we - # use != True - # (2) __eq__ on some ndarray subclasses returns Python booleans - # instead of element-wise comparisons, so we cast to np.bool() and - # use isinstance(..., bool) checks - # (3) subclasses with bare-bones __array_function__ implementations may - # not implement np.all(), so favor using the .all() method - # We are not committed to supporting such subclasses, but it's nice to - # support them if possible. - if np.bool(x_id == y_id).all() != True: + if robust_any_difference(x_id, y_id): msg = build_err_msg( [x, y], err_msg + '\n%s location mismatch:' @@ -748,6 +791,9 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): raise AssertionError(msg) # If there is a scalar, then here we know the array has the same # flag as it everywhere, so we should return the scalar flag. + # np.ma.masked is also handled and converted to np.False_ (even if the other + # array has nans/infs etc.; that's OK given the handling later of fully-masked + # results). if isinstance(x_id, bool) or x_id.ndim == 0: return np.bool(x_id) elif isinstance(y_id, bool) or y_id.ndim == 0: @@ -755,6 +801,29 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): else: return y_id + def assert_same_inf_values(x, y, infs_mask): + """ + Verify all inf values match in the two arrays + """ + __tracebackhide__ = True # Hide traceback for py.test + + if not infs_mask.any(): + return + if x.ndim > 0 and y.ndim > 0: + x = x[infs_mask] + y = y[infs_mask] + else: + assert infs_mask.all() + + if robust_any_difference(x, y): + msg = build_err_msg( + [x, y], + err_msg + '\ninf values mismatch:', + verbose=verbose, header=header, + names=names, + precision=precision) + raise AssertionError(msg) + try: if strict: cond = x.shape == y.shape and x.dtype == y.dtype @@ -779,12 +848,15 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan') if equal_inf: - flagged |= func_assert_same_pos(x, y, - func=lambda xy: xy == +inf, - hasval='+inf') - flagged |= func_assert_same_pos(x, y, - func=lambda xy: xy == -inf, - hasval='-inf') + # If equal_nan=True, skip comparing nans below for equality if they are + # also infs (e.g. inf+nanj) since that would always fail. + isinf_func = lambda xy: np.logical_and(np.isinf(xy), np.invert(flagged)) + infs_mask = func_assert_same_pos( + x, y, + func=isinf_func, + hasval='inf') + assert_same_inf_values(x, y, infs_mask) + flagged |= infs_mask elif istime(x) and istime(y): # If one is datetime64 and the other timedelta64 there is no point @@ -833,9 +905,33 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): n_mismatch = reduced.size - reduced.sum(dtype=intp) n_elements = flagged.size if flagged.ndim != 0 else reduced.size percent_mismatch = 100 * n_mismatch / n_elements - remarks = [ - 'Mismatched elements: {} / {} ({:.3g}%)'.format( - n_mismatch, n_elements, percent_mismatch)] + remarks = [(f'Mismatched elements: {n_mismatch} / {n_elements} ' + f'({percent_mismatch:.3g}%)')] + if invalids.ndim != 0: + if flagged.ndim > 0: + positions = np.argwhere(np.asarray(~flagged))[invalids] + else: + positions = np.argwhere(np.asarray(invalids)) + s = "\n".join( + [ + f" {p.tolist()}: {ox if ox.ndim == 0 else ox[tuple(p)]} " + f"({names[0]}), {oy if oy.ndim == 0 else oy[tuple(p)]} " + f"({names[1]})" + for p in positions[:5] + ] + ) + if len(positions) == 1: + remarks.append( + f"Mismatch at index:\n{s}" + ) + elif len(positions) <= 5: + remarks.append( + f"Mismatch at indices:\n{s}" + ) + else: + remarks.append( + f"First 5 mismatches are at indices:\n{s}" + ) with errstate(all='ignore'): # ignore errors for non-numeric types @@ -855,27 +951,27 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): remarks.append( 'Max absolute difference among violations: ' + array2string(max_abs_error)) - + # note: this definition of relative error matches that one # used by assert_allclose (found in np.isclose) # Filter values where the divisor would be zero nonzero = np.bool(y != 0) nonzero_and_invalid = np.logical_and(invalids, nonzero) - + if all(~nonzero_and_invalid): max_rel_error = array(inf) else: nonzero_invalid_error = error[nonzero_and_invalid] broadcasted_y = np.broadcast_to(y, error.shape) nonzero_invalid_y = broadcasted_y[nonzero_and_invalid] - max_rel_error = max(nonzero_invalid_error + max_rel_error = max(nonzero_invalid_error / abs(nonzero_invalid_y)) - if getattr(error, 'dtype', object_) == object_: + if getattr(error, 'dtype', object_) == object_: remarks.append( 'Max relative difference among violations: ' + str(max_rel_error)) - else: + else: remarks.append( 'Max relative difference among violations: ' + array2string(max_rel_error)) @@ -896,7 +992,6 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): raise ValueError(msg) -@_rename_parameter(['x', 'y'], ['actual', 'desired'], dep_version='2.0.0') def assert_array_equal(actual, desired, err_msg='', verbose=True, *, strict=False): """ @@ -953,12 +1048,15 @@ def assert_array_equal(actual, desired, err_msg='', verbose=True, *, Notes ----- - When one of `actual` and `desired` is a scalar and the other is array_like, - the function checks that each element of the array_like object is equal to - the scalar. This behaviour can be disabled with the `strict` parameter. + When one of `actual` and `desired` is a scalar and the other is array_like, the + function checks that each element of the array_like is equal to the scalar. + Note that empty arrays are therefore considered equal to scalars. + This behaviour can be disabled by setting ``strict==True``. Examples -------- + >>> import numpy as np + The first assert does not raise an exception: >>> np.testing.assert_array_equal([1.0,2.33333,np.nan], @@ -974,6 +1072,8 @@ def assert_array_equal(actual, desired, err_msg='', verbose=True, *, Arrays are not equal Mismatched elements: 1 / 3 (33.3%) + Mismatch at index: + [1]: 3.141592653589793 (ACTUAL), 3.1415926535897927 (DESIRED) Max absolute difference among violations: 4.4408921e-16 Max relative difference among violations: 1.41357986e-16 ACTUAL: array([1. , 3.141593, nan]) @@ -1026,8 +1126,6 @@ def assert_array_equal(actual, desired, err_msg='', verbose=True, *, strict=strict) -@np._no_nep50_warning() -@_rename_parameter(['x', 'y'], ['actual', 'desired'], dep_version='2.0.0') def assert_array_almost_equal(actual, desired, decimal=6, err_msg='', verbose=True): """ @@ -1089,6 +1187,8 @@ def assert_array_almost_equal(actual, desired, decimal=6, err_msg='', Arrays are not almost equal to 5 decimals Mismatched elements: 1 / 3 (33.3%) + Mismatch at index: + [1]: 2.33333 (ACTUAL), 2.33339 (DESIRED) Max absolute difference among violations: 6.e-05 Max relative difference among violations: 2.57136612e-05 ACTUAL: array([1. , 2.33333, nan]) @@ -1109,23 +1209,8 @@ def assert_array_almost_equal(actual, desired, decimal=6, err_msg='', __tracebackhide__ = True # Hide traceback for py.test from numpy._core import number, result_type from numpy._core.numerictypes import issubdtype - from numpy._core.fromnumeric import any as npany def compare(x, y): - try: - if npany(isinf(x)) or npany(isinf(y)): - xinfid = isinf(x) - yinfid = isinf(y) - if not (xinfid == yinfid).all(): - return False - # if one item, x and y is +- inf - if x.size == y.size == 1: - return x == y - x = x[~xinfid] - y = y[~yinfid] - except (TypeError, NotImplementedError): - pass - # make sure y is an inexact type to avoid abs(MIN_INT); will cause # casting of x later. dtype = result_type(y, 1.) @@ -1210,6 +1295,8 @@ def assert_array_less(x, y, err_msg='', verbose=True, *, strict=False): Arrays are not strictly ordered `x < y` Mismatched elements: 1 / 3 (33.3%) + Mismatch at index: + [0]: 1.0 (x), 1.0 (y) Max absolute difference among violations: 0. Max relative difference among violations: 0. x: array([ 1., 1., nan]) @@ -1355,20 +1442,22 @@ def rundocs(filename=None, raise_on_error=True): >>> np.lib.test(doctests=True) # doctest: +SKIP """ - from numpy.distutils.misc_util import exec_mod_from_location import doctest + if filename is None: f = sys._getframe(1) filename = f.f_globals['__file__'] name = os.path.splitext(os.path.basename(filename))[0] - m = exec_mod_from_location(name, filename) + spec = importlib.util.spec_from_file_location(name, filename) + m = importlib.util.module_from_spec(spec) + spec.loader.exec_module(m) tests = doctest.DocTestFinder().find(m) runner = doctest.DocTestRunner(verbose=False) msg = [] if raise_on_error: - out = lambda s: msg.append(s) + out = msg.append else: out = None @@ -1379,21 +1468,24 @@ def rundocs(filename=None, raise_on_error=True): raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg)) -def check_support_sve(): +def check_support_sve(__cache=[]): """ gh-22982 """ - + + if __cache: + return __cache[0] + import subprocess cmd = 'lscpu' try: output = subprocess.run(cmd, capture_output=True, text=True) - return 'sve' in output.stdout - except OSError: - return False - + result = 'sve' in output.stdout + except (OSError, subprocess.SubprocessError): + result = False + __cache.append(result) + return __cache[0] -_SUPPORTS_SVE = check_support_sve() # # assert_raises and assert_raises_regex are taken from unittest. @@ -1449,11 +1541,6 @@ def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs): args and keyword arguments kwargs. Alternatively, can be used as a context manager like `assert_raises`. - - Notes - ----- - .. versionadded:: 1.9.0 - """ __tracebackhide__ = True # Hide traceback for py.test return _d.assertRaisesRegex(exception_class, expected_regexp, *args, **kwargs) @@ -1503,7 +1590,6 @@ def decorate_methods(cls, decorator, testmatch=None): continue if testmatch.search(funcname) and not funcname.startswith('_'): setattr(cls, funcname, decorator(function)) - return def measure(code_str, times=1, label=None): @@ -1549,7 +1635,7 @@ def measure(code_str, times=1, label=None): i += 1 exec(code, globs, locs) elapsed = jiffies() - elapsed - return 0.01*elapsed + return 0.01 * elapsed def _assert_valid_refcount(op): @@ -1561,9 +1647,10 @@ def _assert_valid_refcount(op): return True import gc + import numpy as np - b = np.arange(100*100).reshape(100, 100) + b = np.arange(100 * 100).reshape(100, 100) c = b i = 1 @@ -1575,7 +1662,6 @@ def _assert_valid_refcount(op): assert_(sys.getrefcount(i) >= rc) finally: gc.enable() - del d # for pyflakes def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, @@ -1590,11 +1676,10 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, contrast to the standard usage in numpy, NaNs are compared like numbers, no assertion is raised if both objects have NaNs in the same positions. - The test is equivalent to ``allclose(actual, desired, rtol, atol)`` (note - that ``allclose`` has different default values). It compares the difference - between `actual` and `desired` to ``atol + rtol * abs(desired)``. - - .. versionadded:: 1.5.0 + The test is equivalent to ``allclose(actual, desired, rtol, atol)``, + except that it is stricter: it doesn't broadcast its operands, and has + tighter default tolerance values. It compares the difference between + `actual` and `desired` to ``atol + rtol * abs(desired)``. Parameters ---------- @@ -1604,7 +1689,7 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, Array desired. rtol : float, optional Relative tolerance. - atol : float, optional + atol : float | np.timedelta64, optional Absolute tolerance. equal_nan : bool, optional. If True, NaNs will compare equal. @@ -1630,10 +1715,10 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, Notes ----- - When one of `actual` and `desired` is a scalar and the other is - array_like, the function performs the comparison as if the scalar were - broadcasted to the shape of the array. - This behaviour can be disabled with the `strict` parameter. + When one of `actual` and `desired` is a scalar and the other is array_like, the + function performs the comparison as if the scalar were broadcasted to the shape + of the array. Note that empty arrays are therefore considered equal to scalars. + This behaviour can be disabled by setting ``strict==True``. Examples -------- @@ -1683,7 +1768,11 @@ def compare(x, y): equal_nan=equal_nan) actual, desired = np.asanyarray(actual), np.asanyarray(desired) - header = f'Not equal to tolerance rtol={rtol:g}, atol={atol:g}' + if isinstance(atol, np.timedelta64): + atol_str = str(atol) + else: + atol_str = f"{atol:g}" + header = f'Not equal to tolerance rtol={rtol:g}, atol={atol_str}' assert_array_compare(compare, actual, desired, err_msg=str(err_msg), verbose=verbose, header=header, equal_nan=equal_nan, strict=strict) @@ -1743,7 +1832,7 @@ def assert_array_almost_equal_nulp(x, y, nulp=1): ax = np.abs(x) ay = np.abs(y) ref = nulp * np.spacing(np.where(ax > ay, ax, ay)) - if not np.all(np.abs(x-y) <= ref): + if not np.all(np.abs(x - y) <= ref): if np.iscomplexobj(x) or np.iscomplexobj(y): msg = f"Arrays are not equal to {nulp} ULP" else: @@ -1855,11 +1944,10 @@ def nulp_diff(x, y, dtype=None): y[np.isnan(y)] = np.nan if not x.shape == y.shape: - raise ValueError("Arrays do not have the same shape: %s - %s" % - (x.shape, y.shape)) + raise ValueError(f"Arrays do not have the same shape: {x.shape} - {y.shape}") def _diff(rx, ry, vdt): - diff = np.asarray(rx-ry, dtype=vdt) + diff = np.asarray(rx - ry, dtype=vdt) return np.abs(diff) rx = integer_repr(x) @@ -1875,9 +1963,8 @@ def _integer_repr(x, vdt, comp): rx = x.view(vdt) if not (rx.size == 1): rx[rx < 0] = comp - rx[rx < 0] - else: - if rx < 0: - rx = comp - rx + elif rx < 0: + rx = comp - rx return rx @@ -1899,7 +1986,7 @@ def integer_repr(x): @contextlib.contextmanager def _assert_warns_context(warning_class, name=None): __tracebackhide__ = True # Hide traceback for py.test - with suppress_warnings() as sup: + with suppress_warnings(_warn=False) as sup: l = sup.record(warning_class) yield if not len(l) > 0: @@ -1923,7 +2010,10 @@ def assert_warns(warning_class, *args, **kwargs): The ability to be used as a context manager is new in NumPy v1.11.0. - .. versionadded:: 1.4.0 + .. deprecated:: 2.4 + + This is deprecated. Use `warnings.catch_warnings` or + ``pytest.warns`` instead. Parameters ---------- @@ -1952,6 +2042,11 @@ def assert_warns(warning_class, *args, **kwargs): >>> ret = np.testing.assert_warns(DeprecationWarning, deprecated_func, 4) >>> assert ret == 16 """ + warnings.warn( + "NumPy warning suppression and assertion utilities are deprecated. " + "Use warnings.catch_warnings, warnings.filterwarnings, pytest.warns, " + "or pytest.filterwarnings instead. (Deprecated NumPy 2.4)", + DeprecationWarning, stacklevel=2) if not args and not kwargs: return _assert_warns_context(warning_class) elif len(args) < 1: @@ -1990,8 +2085,6 @@ def assert_no_warnings(*args, **kwargs): The ability to be used as a context manager is new in NumPy v1.11.0. - .. versionadded:: 1.7.0 - Parameters ---------- func : callable @@ -2062,7 +2155,7 @@ def _gen_alignment_data(dtype=float32, type='binary', max_size=24): inp1 = lambda: arange(s, dtype=dtype)[o:] inp2 = lambda: arange(s, dtype=dtype)[o:] out = empty((s,), dtype=dtype)[o:] - yield out, inp1(), inp2(), bfmt % \ + yield out, inp1(), inp2(), bfmt % \ (o, o, o, s, dtype, 'out of place') d = inp1() yield d, d, inp2(), bfmt % \ @@ -2142,7 +2235,7 @@ class clear_and_catch_warnings(warnings.catch_warnings): This makes it possible to trigger any warning afresh inside the context manager without disturbing the state of warnings outside. - For compatibility with Python 3.0, please consider all arguments to be + For compatibility with Python, please consider all arguments to be keyword-only. Parameters @@ -2206,6 +2299,11 @@ class suppress_warnings: tests might need to see the warning. Additionally it allows easier specificity for testing warnings and can be nested. + .. deprecated:: 2.4 + + This is deprecated. Use `warnings.filterwarnings` or + ``pytest.filterwarnings`` instead. + Parameters ---------- forwarding_rule : str, optional @@ -2266,7 +2364,13 @@ def some_function(): # do something which causes a warning in np.ma.core pass """ - def __init__(self, forwarding_rule="always"): + def __init__(self, forwarding_rule="always", _warn=True): + if _warn: + warnings.warn( + "NumPy warning suppression and assertion utilities are deprecated. " + "Use warnings.catch_warnings, warnings.filterwarnings, pytest.warns, " + "or pytest.filterwarnings instead. (Deprecated NumPy 2.4)", + DeprecationWarning, stacklevel=2) self._entered = False # Suppressions are either instance or defined inside one with block: @@ -2531,8 +2635,6 @@ def assert_no_gc_cycles(*args, **kwargs): with assert_no_gc_cycles(): do_something() - .. versionadded:: 1.15.0 - Parameters ---------- func : callable @@ -2610,7 +2712,7 @@ def check_free_memory(free_bytes): except ValueError as exc: raise ValueError(f'Invalid environment variable {env_var}: {exc}') - msg = (f'{free_bytes/1e9} GB memory required, but environment variable ' + msg = (f'{free_bytes / 1e9} GB memory required, but environment variable ' f'NPY_AVAILABLE_MEM={env_value} set') else: mem_free = _get_mem_available() @@ -2621,7 +2723,9 @@ def check_free_memory(free_bytes): "the test.") mem_free = -1 else: - msg = f'{free_bytes/1e9} GB memory required, but {mem_free/1e9} GB available' + free_bytes_gb = free_bytes / 1e9 + mem_free_gb = mem_free / 1e9 + msg = f'{free_bytes_gb} GB memory required, but {mem_free_gb} GB available' return msg if mem_free < free_bytes else None @@ -2633,8 +2737,9 @@ def _parse_size(size_str): 'kb': 1000, 'mb': 1000**2, 'gb': 1000**3, 'tb': 1000**4, 'kib': 1024, 'mib': 1024**2, 'gib': 1024**3, 'tib': 1024**4} - size_re = re.compile(r'^\s*(\d+|\d+\.\d+)\s*({0})\s*$'.format( - '|'.join(suffixes.keys())), re.I) + pipe_suffixes = "|".join(suffixes.keys()) + + size_re = re.compile(fr'^\s*(\d+|\d+\.\d+)\s*({pipe_suffixes})\s*$', re.I) m = size_re.match(size_str.lower()) if not m or m.group(2) not in suffixes: @@ -2697,3 +2802,66 @@ def _get_glibc_version(): _glibcver = _get_glibc_version() _glibc_older_than = lambda x: (_glibcver != '0.0' and _glibcver < x) + + +def run_threaded(func, max_workers=8, pass_count=False, + pass_barrier=False, outer_iterations=1, + prepare_args=None): + """Runs a function many times in parallel""" + for _ in range(outer_iterations): + with (concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) + as tpe): + if prepare_args is None: + args = [] + else: + args = prepare_args() + if pass_barrier: + barrier = threading.Barrier(max_workers) + args.append(barrier) + if pass_count: + all_args = [(func, i, *args) for i in range(max_workers)] + else: + all_args = [(func, *args) for i in range(max_workers)] + try: + futures = [] + for arg in all_args: + futures.append(tpe.submit(*arg)) + except RuntimeError as e: + import pytest + pytest.skip(f"Spawning {max_workers} threads failed with " + f"error {e!r} (likely due to resource limits on the " + "system running the tests)") + finally: + if len(futures) < max_workers and pass_barrier: + barrier.abort() + for f in futures: + f.result() + + +def requires_deep_recursion(func): + """Decorator to skip test if deep recursion is not supported.""" + import pytest + + @wraps(func) + def wrapper(*args, **kwargs): + if IS_PYSTON: + pytest.skip("Pyston disables recursion checking") + if IS_WASM: + pytest.skip("WASM has limited stack size") + if not IS_64BIT: + pytest.skip("32 bit Python has limited stack size") + cflags = sysconfig.get_config_var('CFLAGS') or '' + config_args = sysconfig.get_config_var('CONFIG_ARGS') or '' + address_sanitizer = ( + '-fsanitize=address' in cflags or + '--with-address-sanitizer' in config_args + ) + thread_sanitizer = ( + '-fsanitize=thread' in cflags or + '--with-thread-sanitizer' in config_args + ) + if address_sanitizer or thread_sanitizer: + pytest.skip("AddressSanitizer and ThreadSanitizer do not support " + "deep recursion") + return func(*args, **kwargs) + return wrapper diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index e2272ad2f7d0..f2953298f02f 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -1,99 +1,111 @@ -import os -import sys import ast +import sys import types -import warnings import unittest -import contextlib -from re import Pattern +import warnings +from _typeshed import ConvertibleToFloat, GenericPath, StrOrBytesPath, StrPath from collections.abc import Callable, Iterable, Sequence +from contextlib import _GeneratorContextManager +from pathlib import Path +from re import Pattern from typing import ( - Literal as L, Any, - AnyStr, ClassVar, + Final, + Generic, + Literal as L, NoReturn, + Self, + SupportsIndex, overload, type_check_only, - TypeVar, - Final, - SupportsIndex, ) -if sys.version_info >= (3, 10): - from typing import ParamSpec -else: - from typing_extensions import ParamSpec +from typing_extensions import TypeVar, deprecated +from unittest.case import SkipTest import numpy as np -from numpy import number, object_, _FloatValue from numpy._typing import ( - NDArray, ArrayLike, DTypeLike, + NDArray, + _ArrayLikeDT64_co, _ArrayLikeNumber_co, _ArrayLikeObject_co, _ArrayLikeTD64_co, - _ArrayLikeDT64_co, ) -from unittest.case import ( - SkipTest as SkipTest, -) - -_P = ParamSpec("_P") -_T = TypeVar("_T") -_ET = TypeVar("_ET", bound=BaseException) -_FT = TypeVar("_FT", bound=Callable[..., Any]) - -# Must return a bool or an ndarray/generic type -# that is supported by `np.logical_and.reduce` -_ComparisonFunc = Callable[ - [NDArray[Any], NDArray[Any]], - ( - bool - | np.bool - | number[Any] - | NDArray[np.bool | number[Any] | object_] - ) +__all__ = [ + "IS_EDITABLE", + "IS_MUSL", + "IS_PYPY", + "IS_PYSTON", + "IS_WASM", + "IS_INSTALLED", + "IS_64BIT", + "HAS_LAPACK64", + "HAS_REFCOUNT", + "BLAS_SUPPORTS_FPE", + "NOGIL_BUILD", + "NUMPY_ROOT", + "assert_", + "assert_array_almost_equal_nulp", + "assert_raises_regex", + "assert_array_max_ulp", + "assert_warns", + "assert_no_warnings", + "assert_allclose", + "assert_equal", + "assert_almost_equal", + "assert_approx_equal", + "assert_array_equal", + "assert_array_less", + "assert_string_equal", + "assert_array_almost_equal", + "assert_raises", + "build_err_msg", + "decorate_methods", + "jiffies", + "memusage", + "print_assert_equal", + "rundocs", + "runstring", + "verbose", + "measure", + "IgnoreException", + "clear_and_catch_warnings", + "SkipTest", + "KnownFailureException", + "temppath", + "tempdir", + "suppress_warnings", + "assert_array_compare", + "assert_no_gc_cycles", + "break_cycles", + "check_support_sve", + "run_threaded", ] -__all__: list[str] +### -class KnownFailureException(Exception): ... -class IgnoreException(Exception): ... +_W_co = TypeVar("_W_co", bound=_WarnLog | None, default=_WarnLog | None, covariant=True) -class clear_and_catch_warnings(warnings.catch_warnings): - class_modules: ClassVar[tuple[types.ModuleType, ...]] - modules: set[types.ModuleType] - @overload - def __new__( - cls, - record: L[False] = ..., - modules: Iterable[types.ModuleType] = ..., - ) -> _clear_and_catch_warnings_without_records: ... - @overload - def __new__( - cls, - record: L[True], - modules: Iterable[types.ModuleType] = ..., - ) -> _clear_and_catch_warnings_with_records: ... - @overload - def __new__( - cls, - record: bool, - modules: Iterable[types.ModuleType] = ..., - ) -> clear_and_catch_warnings: ... - def __enter__(self) -> None | list[warnings.WarningMessage]: ... - def __exit__( - self, - __exc_type: None | type[BaseException] = ..., - __exc_val: None | BaseException = ..., - __exc_tb: None | types.TracebackType = ..., - ) -> None: ... +type _StrLike = str | bytes +type _RegexLike = _StrLike | Pattern[Any] +type _NumericArrayLike = _ArrayLikeNumber_co | _ArrayLikeObject_co + +type _ExceptionSpec[ExceptionT: BaseException] = type[ExceptionT] | tuple[type[ExceptionT], ...] +type _WarningSpec = type[Warning] +type _WarnLog = list[warnings.WarningMessage] +type _ToModules = Iterable[types.ModuleType] + +# Must return a bool or an ndarray/generic type that is supported by `np.logical_and.reduce` +type _ComparisonFunc = Callable[ + [np.ndarray, np.ndarray], + bool | np.bool | np.number | NDArray[np.bool | np.number | np.object_], +] # Type-check only `clear_and_catch_warnings` subclasses for both values of the # `record` parameter. Copied from the stdlib `warnings` stubs. - @type_check_only class _clear_and_catch_warnings_with_records(clear_and_catch_warnings): def __enter__(self) -> list[warnings.WarningMessage]: ... @@ -102,315 +114,388 @@ class _clear_and_catch_warnings_with_records(clear_and_catch_warnings): class _clear_and_catch_warnings_without_records(clear_and_catch_warnings): def __enter__(self) -> None: ... +### + +verbose: int = 0 +NUMPY_ROOT: Final[Path] = ... +IS_INSTALLED: Final[bool] = ... +IS_EDITABLE: Final[bool] = ... +IS_MUSL: Final[bool] = ... +IS_PYPY: Final[bool] = ... +IS_PYSTON: Final[bool] = ... +IS_WASM: Final[bool] = ... +IS_64BIT: Final[bool] = ... +HAS_REFCOUNT: Final[bool] = ... +HAS_LAPACK64: Final[bool] = ... +BLAS_SUPPORTS_FPE: Final[bool] = ... +NOGIL_BUILD: Final[bool] = ... + +class KnownFailureException(Exception): ... +class IgnoreException(Exception): ... + +class clear_and_catch_warnings(warnings.catch_warnings[_W_co], Generic[_W_co]): + class_modules: ClassVar[tuple[types.ModuleType, ...]] = () + modules: Final[set[types.ModuleType]] + @overload # record: True + def __init__(self: clear_and_catch_warnings[_WarnLog], /, record: L[True], modules: _ToModules = ()) -> None: ... + @overload # record: False (default) + def __init__(self: clear_and_catch_warnings[None], /, record: L[False] = False, modules: _ToModules = ()) -> None: ... + @overload # record; bool + def __init__(self, /, record: bool, modules: _ToModules = ()) -> None: ... + +@deprecated("Please use warnings.filterwarnings or pytest.mark.filterwarnings instead") class suppress_warnings: - log: list[warnings.WarningMessage] - def __init__( - self, - forwarding_rule: L["always", "module", "once", "location"] = ..., - ) -> None: ... - def filter( - self, - category: type[Warning] = ..., - message: str = ..., - module: None | types.ModuleType = ..., - ) -> None: ... - def record( - self, - category: type[Warning] = ..., - message: str = ..., - module: None | types.ModuleType = ..., - ) -> list[warnings.WarningMessage]: ... - def __enter__(self: _T) -> _T: ... - def __exit__( - self, - __exc_type: None | type[BaseException] = ..., - __exc_val: None | BaseException = ..., - __exc_tb: None | types.TracebackType = ..., - ) -> None: ... - def __call__(self, func: _FT) -> _FT: ... - -verbose: int -IS_PYPY: Final[bool] -IS_PYSTON: Final[bool] -HAS_REFCOUNT: Final[bool] -HAS_LAPACK64: Final[bool] - -def assert_(val: object, msg: str | Callable[[], str] = ...) -> None: ... + log: Final[_WarnLog] + def __init__(self, /, forwarding_rule: L["always", "module", "once", "location"] = "always") -> None: ... + def __enter__(self) -> Self: ... + def __exit__(self, cls: type[BaseException] | None, exc: BaseException | None, tb: types.TracebackType | None, /) -> None: ... + def __call__[FuncT: Callable[..., Any]](self, /, func: FuncT) -> FuncT: ... + + # + def filter(self, /, category: type[Warning] = ..., message: str = "", module: types.ModuleType | None = None) -> None: ... + def record(self, /, category: type[Warning] = ..., message: str = "", module: types.ModuleType | None = None) -> _WarnLog: ... # Contrary to runtime we can't do `os.name` checks while type checking, # only `sys.platform` checks if sys.platform == "win32" or sys.platform == "cygwin": - def memusage(processName: str = ..., instance: int = ...) -> int: ... + def memusage(processName: str = "python", instance: int = 0) -> int: ... elif sys.platform == "linux": - def memusage(_proc_pid_stat: str | bytes | os.PathLike[Any] = ...) -> None | int: ... + def memusage(_proc_pid_stat: StrOrBytesPath | None = None) -> int | None: ... else: def memusage() -> NoReturn: ... if sys.platform == "linux": - def jiffies( - _proc_pid_stat: str | bytes | os.PathLike[Any] = ..., - _load_time: list[float] = ..., - ) -> int: ... + def jiffies(_proc_pid_stat: StrOrBytesPath | None = None, _load_time: list[float] | None = None) -> int: ... else: - def jiffies(_load_time: list[float] = ...) -> int: ... + def jiffies(_load_time: list[float] = []) -> int: ... +# def build_err_msg( arrays: Iterable[object], - err_msg: str, - header: str = ..., - verbose: bool = ..., - names: Sequence[str] = ..., - precision: None | SupportsIndex = ..., + err_msg: object, + header: str = "Items are not equal:", + verbose: bool = True, + names: Sequence[str] = ("ACTUAL", "DESIRED"), # = ('ACTUAL', 'DESIRED') + precision: SupportsIndex | None = 8, ) -> str: ... +# +def print_assert_equal(test_string: str, actual: object, desired: object) -> None: ... + +# +def assert_(val: object, msg: str | Callable[[], str] = "") -> None: ... + +# def assert_equal( actual: object, desired: object, - err_msg: object = ..., - verbose: bool = ..., + err_msg: object = "", + verbose: bool = True, *, - strict: bool = ... -) -> None: ... - -def print_assert_equal( - test_string: str, - actual: object, - desired: object, + strict: bool = False, ) -> None: ... def assert_almost_equal( - actual: _ArrayLikeNumber_co | _ArrayLikeObject_co, - desired: _ArrayLikeNumber_co | _ArrayLikeObject_co, - decimal: int = ..., - err_msg: object = ..., - verbose: bool = ..., + actual: _NumericArrayLike, + desired: _NumericArrayLike, + decimal: int = 7, + err_msg: object = "", + verbose: bool = True, ) -> None: ... -# Anything that can be coerced into `builtins.float` +# def assert_approx_equal( - actual: _FloatValue, - desired: _FloatValue, - significant: int = ..., - err_msg: object = ..., - verbose: bool = ..., + actual: ConvertibleToFloat, + desired: ConvertibleToFloat, + significant: int = 7, + err_msg: object = "", + verbose: bool = True, ) -> None: ... +# def assert_array_compare( comparison: _ComparisonFunc, x: ArrayLike, y: ArrayLike, - err_msg: object = ..., - verbose: bool = ..., - header: str = ..., - precision: SupportsIndex = ..., - equal_nan: bool = ..., - equal_inf: bool = ..., + err_msg: object = "", + verbose: bool = True, + header: str = "", + precision: SupportsIndex = 6, + equal_nan: bool = True, + equal_inf: bool = True, *, - strict: bool = ... + strict: bool = False, + names: tuple[str, str] = ("ACTUAL", "DESIRED"), ) -> None: ... +# def assert_array_equal( - x: ArrayLike, - y: ArrayLike, - /, - err_msg: object = ..., - verbose: bool = ..., + actual: object, + desired: object, + err_msg: object = "", + verbose: bool = True, *, - strict: bool = ... + strict: bool = False, ) -> None: ... +# def assert_array_almost_equal( - x: _ArrayLikeNumber_co | _ArrayLikeObject_co, - y: _ArrayLikeNumber_co | _ArrayLikeObject_co, - /, - decimal: float = ..., - err_msg: object = ..., - verbose: bool = ..., + actual: _NumericArrayLike, + desired: _NumericArrayLike, + decimal: float = 6, + err_msg: object = "", + verbose: bool = True, ) -> None: ... @overload def assert_array_less( - x: _ArrayLikeNumber_co | _ArrayLikeObject_co, - y: _ArrayLikeNumber_co | _ArrayLikeObject_co, - err_msg: object = ..., - verbose: bool = ..., + x: _ArrayLikeDT64_co, + y: _ArrayLikeDT64_co, + err_msg: object = "", + verbose: bool = True, *, - strict: bool = ... + strict: bool = False, ) -> None: ... @overload def assert_array_less( x: _ArrayLikeTD64_co, y: _ArrayLikeTD64_co, - err_msg: object = ..., - verbose: bool = ..., + err_msg: object = "", + verbose: bool = True, *, - strict: bool = ... + strict: bool = False, ) -> None: ... @overload def assert_array_less( - x: _ArrayLikeDT64_co, - y: _ArrayLikeDT64_co, - err_msg: object = ..., - verbose: bool = ..., + x: _NumericArrayLike, + y: _NumericArrayLike, + err_msg: object = "", + verbose: bool = True, *, - strict: bool = ... + strict: bool = False, ) -> None: ... -def runstring( - astr: str | bytes | types.CodeType, - dict: None | dict[str, Any], -) -> Any: ... - +# def assert_string_equal(actual: str, desired: str) -> None: ... -def rundocs( - filename: None | str | os.PathLike[str] = ..., - raise_on_error: bool = ..., -) -> None: ... - -def raises(*args: type[BaseException]) -> Callable[[_FT], _FT]: ... - +# @overload -def assert_raises( # type: ignore - expected_exception: type[BaseException] | tuple[type[BaseException], ...], - callable: Callable[_P, Any], +def assert_raises[ExceptionT: BaseException]( + exception_class: _ExceptionSpec[ExceptionT], /, - *args: _P.args, - **kwargs: _P.kwargs, -) -> None: ... -@overload -def assert_raises( - expected_exception: type[_ET] | tuple[type[_ET], ...], *, - msg: None | str = ..., -) -> unittest.case._AssertRaisesContext[_ET]: ... - + msg: str | None = None, +) -> unittest.case._AssertRaisesContext[ExceptionT]: ... @overload -def assert_raises_regex( - expected_exception: type[BaseException] | tuple[type[BaseException], ...], - expected_regex: str | bytes | Pattern[Any], - callable: Callable[_P, Any], +def assert_raises[**Tss]( + exception_class: _ExceptionSpec[BaseException], + callable: Callable[Tss, Any], /, - *args: _P.args, - **kwargs: _P.kwargs, + *args: Tss.args, + **kwargs: Tss.kwargs, ) -> None: ... + +# @overload -def assert_raises_regex( - expected_exception: type[_ET] | tuple[type[_ET], ...], - expected_regex: str | bytes | Pattern[Any], +def assert_raises_regex[ExceptionT: BaseException]( + exception_class: _ExceptionSpec[ExceptionT], + expected_regexp: _RegexLike, *, - msg: None | str = ..., -) -> unittest.case._AssertRaisesContext[_ET]: ... - -def decorate_methods( - cls: type[Any], - decorator: Callable[[Callable[..., Any]], Any], - testmatch: None | str | bytes | Pattern[Any] = ..., + msg: str | None = None, +) -> unittest.case._AssertRaisesContext[ExceptionT]: ... +@overload +def assert_raises_regex[**Tss]( + exception_class: _ExceptionSpec[BaseException], + expected_regexp: _RegexLike, + callable: Callable[Tss, Any], + *args: Tss.args, + **kwargs: Tss.kwargs, ) -> None: ... -def measure( - code_str: str | bytes | ast.mod | ast.AST, - times: int = ..., - label: None | str = ..., -) -> float: ... - +# @overload def assert_allclose( - actual: _ArrayLikeNumber_co | _ArrayLikeObject_co, - desired: _ArrayLikeNumber_co | _ArrayLikeObject_co, - rtol: float = ..., - atol: float = ..., - equal_nan: bool = ..., - err_msg: object = ..., - verbose: bool = ..., + actual: _ArrayLikeTD64_co, + desired: _ArrayLikeTD64_co, + rtol: float = 1e-7, + atol: float | np.timedelta64 = 0, + equal_nan: bool = True, + err_msg: object = "", + verbose: bool = True, *, - strict: bool = ... + strict: bool = False, ) -> None: ... @overload def assert_allclose( - actual: _ArrayLikeTD64_co, - desired: _ArrayLikeTD64_co, - rtol: float = ..., - atol: float = ..., - equal_nan: bool = ..., - err_msg: object = ..., - verbose: bool = ..., + actual: _NumericArrayLike, + desired: _NumericArrayLike, + rtol: float = 1e-7, + atol: float = 0, + equal_nan: bool = True, + err_msg: object = "", + verbose: bool = True, *, - strict: bool = ... + strict: bool = False, ) -> None: ... +# def assert_array_almost_equal_nulp( x: _ArrayLikeNumber_co, y: _ArrayLikeNumber_co, - nulp: float = ..., + nulp: float = 1, ) -> None: ... +# def assert_array_max_ulp( a: _ArrayLikeNumber_co, b: _ArrayLikeNumber_co, - maxulp: float = ..., - dtype: DTypeLike = ..., + maxulp: float = 1, + dtype: DTypeLike | None = None, ) -> NDArray[Any]: ... +# @overload -def assert_warns( - warning_class: type[Warning], -) -> contextlib._GeneratorContextManager[None]: ... +@deprecated("Please use warnings.catch_warnings or pytest.warns instead") +def assert_warns(warning_class: _WarningSpec) -> _GeneratorContextManager[None]: ... @overload -def assert_warns( - warning_class: type[Warning], - func: Callable[_P, _T], - /, - *args: _P.args, - **kwargs: _P.kwargs, -) -> _T: ... +@deprecated("Please use warnings.catch_warnings or pytest.warns instead") +def assert_warns[**Tss, ReturnT]( + warning_class: _WarningSpec, + func: Callable[Tss, ReturnT], + *args: Tss.args, + **kwargs: Tss.kwargs, +) -> ReturnT: ... + +# +@overload +def assert_no_warnings() -> _GeneratorContextManager[None]: ... +@overload +def assert_no_warnings[**Tss, ReturnT](func: Callable[Tss, ReturnT], /, *args: Tss.args, **kwargs: Tss.kwargs) -> ReturnT: ... +# @overload -def assert_no_warnings() -> contextlib._GeneratorContextManager[None]: ... +def assert_no_gc_cycles() -> _GeneratorContextManager[None]: ... @overload -def assert_no_warnings( - func: Callable[_P, _T], - /, - *args: _P.args, - **kwargs: _P.kwargs, -) -> _T: ... +def assert_no_gc_cycles[**Tss](func: Callable[Tss, Any], /, *args: Tss.args, **kwargs: Tss.kwargs) -> None: ... + +### +# @overload def tempdir( - suffix: None = ..., - prefix: None = ..., - dir: None = ..., -) -> contextlib._GeneratorContextManager[str]: ... + suffix: None = None, + prefix: None = None, + dir: None = None, +) -> _GeneratorContextManager[str]: ... @overload -def tempdir( - suffix: None | AnyStr = ..., - prefix: None | AnyStr = ..., - dir: None | AnyStr | os.PathLike[AnyStr] = ..., -) -> contextlib._GeneratorContextManager[AnyStr]: ... +def tempdir[AnyStr: (bytes, str)]( + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + *, + dir: GenericPath[AnyStr], +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def tempdir[AnyStr: (bytes, str)]( + suffix: AnyStr | None = None, + *, + prefix: AnyStr, + dir: GenericPath[AnyStr] | None = None, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def tempdir[AnyStr: (bytes, str)]( + suffix: AnyStr, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, +) -> _GeneratorContextManager[AnyStr]: ... +# @overload def temppath( - suffix: None = ..., - prefix: None = ..., - dir: None = ..., - text: bool = ..., -) -> contextlib._GeneratorContextManager[str]: ... + suffix: None = None, + prefix: None = None, + dir: None = None, + text: bool = False, +) -> _GeneratorContextManager[str]: ... @overload -def temppath( - suffix: None | AnyStr = ..., - prefix: None | AnyStr = ..., - dir: None | AnyStr | os.PathLike[AnyStr] = ..., - text: bool = ..., -) -> contextlib._GeneratorContextManager[AnyStr]: ... +def temppath[AnyStr: (bytes, str)]( + suffix: AnyStr | None, + prefix: AnyStr | None, + dir: GenericPath[AnyStr], + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def temppath[AnyStr: (bytes, str)]( + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + *, + dir: GenericPath[AnyStr], + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def temppath[AnyStr: (bytes, str)]( + suffix: AnyStr | None, + prefix: AnyStr, + dir: GenericPath[AnyStr] | None = None, + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def temppath[AnyStr: (bytes, str)]( + suffix: AnyStr | None = None, + *, + prefix: AnyStr, + dir: GenericPath[AnyStr] | None = None, + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def temppath[AnyStr: (bytes, str)]( + suffix: AnyStr, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... + +# +def check_support_sve(__cache: list[bool] = ..., /) -> bool: ... # stubdefaulter: ignore[missing-default] + +# +def decorate_methods( + cls: type, + decorator: Callable[[Callable[..., Any]], Any], + testmatch: _RegexLike | None = None, +) -> None: ... +# @overload -def assert_no_gc_cycles() -> contextlib._GeneratorContextManager[None]: ... +def run_threaded( + func: Callable[[], None], + max_workers: int = 8, + pass_count: bool = False, + pass_barrier: bool = False, + outer_iterations: int = 1, + prepare_args: None = None, +) -> None: ... @overload -def assert_no_gc_cycles( - func: Callable[_P, Any], - /, - *args: _P.args, - **kwargs: _P.kwargs, +def run_threaded[*Ts]( + func: Callable[[*Ts], None], + max_workers: int, + pass_count: bool, + pass_barrier: bool, + outer_iterations: int, + prepare_args: tuple[*Ts], +) -> None: ... +@overload +def run_threaded[*Ts]( + func: Callable[[*Ts], None], + max_workers: int = 8, + pass_count: bool = False, + pass_barrier: bool = False, + outer_iterations: int = 1, + *, + prepare_args: tuple[*Ts], ) -> None: ... +# +def runstring(astr: _StrLike | types.CodeType, dict: dict[str, Any] | None) -> Any: ... +def rundocs(filename: StrPath | None = None, raise_on_error: bool = True) -> None: ... +def measure(code_str: _StrLike | ast.AST, times: int = 1, label: str | None = None) -> float: ... def break_cycles() -> None: ... diff --git a/numpy/testing/overrides.py b/numpy/testing/overrides.py index 98bed23c4f45..61771c4c0b58 100644 --- a/numpy/testing/overrides.py +++ b/numpy/testing/overrides.py @@ -3,9 +3,10 @@ """ -from numpy._core.overrides import ARRAY_FUNCTIONS as _array_functions -from numpy import ufunc as _ufunc import numpy._core.umath as _umath +from numpy import ufunc as _ufunc +from numpy._core.overrides import ARRAY_FUNCTIONS as _array_functions + def get_overridable_numpy_ufuncs(): """List all numpy ufuncs overridable via `__array_ufunc__` @@ -22,7 +23,7 @@ def get_overridable_numpy_ufuncs(): ufuncs = {obj for obj in _umath.__dict__.values() if isinstance(obj, _ufunc)} return ufuncs - + def allows_array_ufunc_override(func): """Determine if a function can be overridden via `__array_ufunc__` @@ -44,7 +45,7 @@ def allows_array_ufunc_override(func): will work correctly for ufuncs defined outside of Numpy. """ - return isinstance(func, np.ufunc) + return isinstance(func, _ufunc) def get_overridable_numpy_array_functions(): @@ -63,7 +64,7 @@ def get_overridable_numpy_array_functions(): """ # 'import numpy' doesn't import recfunctions, so make sure it's imported # so ufuncs defined there show up in the ufunc listing - from numpy.lib import recfunctions + from numpy.lib import recfunctions # noqa: F401 return _array_functions.copy() def allows_array_function_override(func): diff --git a/numpy/testing/overrides.pyi b/numpy/testing/overrides.pyi new file mode 100644 index 000000000000..916154c155b1 --- /dev/null +++ b/numpy/testing/overrides.pyi @@ -0,0 +1,10 @@ +from collections.abc import Callable, Hashable +from typing import Any +from typing_extensions import TypeIs + +import numpy as np + +def get_overridable_numpy_ufuncs() -> set[np.ufunc]: ... +def get_overridable_numpy_array_functions() -> set[Callable[..., Any]]: ... +def allows_array_ufunc_override(func: object) -> TypeIs[np.ufunc]: ... +def allows_array_function_override(func: Hashable) -> bool: ... diff --git a/numpy/testing/print_coercion_tables.py b/numpy/testing/print_coercion_tables.py index 649c1cd6bc21..89f0de3932ed 100755 --- a/numpy/testing/print_coercion_tables.py +++ b/numpy/testing/print_coercion_tables.py @@ -2,9 +2,11 @@ """Prints type-coercion tables for the built-in NumPy types """ +from collections import namedtuple + import numpy as np from numpy._core.numerictypes import obj2sctype -from collections import namedtuple + # Generic object that can be added, but doesn't do anything else class GenericObject: @@ -40,7 +42,8 @@ def print_cancast_table(ntypes): print(cast, end=' ') print() -def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, use_promote_types=False): +def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, + use_promote_types=False): print('+', end=' ') for char in ntypes: print(char, end=' ') @@ -96,7 +99,7 @@ def print_new_cast_table(*, can_cast=True, legacy=False, flags=False): 4: ".", # unsafe casting } flags_table = { - 0 : "▗", 7: "█", + 0: "▗", 7: "█", 1: "▚", 2: "▐", 4: "▄", 3: "▜", 5: "▙", 6: "▟", @@ -132,6 +135,7 @@ def print_new_cast_table(*, can_cast=True, legacy=False, flags=False): # The np.dtype(x.type) is a bit strange, because dtype classes do # not expose much yet. types = np.typecodes["All"] + def sorter(x): # This is a bit weird hack, to get a table as close as possible to # the one printing all typecodes (but expecting user-dtypes). @@ -171,8 +175,10 @@ def print_table(field="can_cast"): if flags: print() - print(f"{flags_table[0]}: no flags, {flags_table[1]}: PyAPI, " - f"{flags_table[2]}: supports unaligned, {flags_table[4]}: no-float-errors") + print(f"{flags_table[0]}: no flags, " + f"{flags_table[1]}: PyAPI, " + f"{flags_table[2]}: supports unaligned, " + f"{flags_table[4]}: no-float-errors") print() print_table("flags") diff --git a/numpy/testing/print_coercion_tables.pyi b/numpy/testing/print_coercion_tables.pyi new file mode 100644 index 000000000000..f463a18c05e4 --- /dev/null +++ b/numpy/testing/print_coercion_tables.pyi @@ -0,0 +1,26 @@ +from collections.abc import Iterable +from typing import ClassVar, Generic, Self +from typing_extensions import TypeVar + +import numpy as np + +_VT_co = TypeVar("_VT_co", default=object, covariant=True) + +# undocumented +class GenericObject(Generic[_VT_co]): + dtype: ClassVar[np.dtype[np.object_]] = ... + v: _VT_co + + def __init__(self, /, v: _VT_co) -> None: ... + def __add__(self, other: object, /) -> Self: ... + def __radd__(self, other: object, /) -> Self: ... + +def print_cancast_table(ntypes: Iterable[str]) -> None: ... +def print_coercion_table( + ntypes: Iterable[str], + inputfirstvalue: int, + inputsecondvalue: int, + firstarray: bool, + use_promote_types: bool = False, +) -> None: ... +def print_new_cast_table(*, can_cast: bool = True, legacy: bool = False, flags: bool = False) -> None: ... diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 247bbeaec6f7..6d43343ef98a 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -1,25 +1,44 @@ -import warnings -import sys -import os import itertools -import pytest -import weakref +import os import re +import sys +import warnings +import weakref + +import pytest import numpy as np import numpy._core._multiarray_umath as ncu from numpy.testing import ( - assert_equal, assert_array_equal, assert_almost_equal, - assert_array_almost_equal, assert_array_less, build_err_msg, - assert_raises, assert_warns, assert_no_warnings, assert_allclose, - assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp, - clear_and_catch_warnings, suppress_warnings, assert_string_equal, assert_, - tempdir, temppath, assert_no_gc_cycles, HAS_REFCOUNT + HAS_REFCOUNT, + assert_, + assert_allclose, + assert_almost_equal, + assert_approx_equal, + assert_array_almost_equal, + assert_array_almost_equal_nulp, + assert_array_equal, + assert_array_less, + assert_array_max_ulp, + assert_equal, + assert_no_gc_cycles, + assert_no_warnings, + assert_raises, + assert_string_equal, + assert_warns, + build_err_msg, + clear_and_catch_warnings, + suppress_warnings, + tempdir, + temppath, ) class _GenericTest: + def _assert_func(self, *args, **kwargs): + pass + def _test_equal(self, a, b): self._assert_func(a, b) @@ -66,8 +85,8 @@ def test_array_likes(self): class TestArrayEqual(_GenericTest): - def setup_method(self): - self._assert_func = assert_array_equal + def _assert_func(self, *args, **kwargs): + assert_array_equal(*args, **kwargs) def test_generic_rank1(self): """Test rank 1 array for all dtypes.""" @@ -181,6 +200,40 @@ def test_masked_nan_inf(self): self._test_equal(a, b) self._test_equal(b, a) + # Also provides test cases for gh-11121 + def test_masked_scalar(self): + # Test masked scalar vs. plain/masked scalar + for a_val, b_val, b_masked in itertools.product( + [3., np.nan, np.inf], + [3., 4., np.nan, np.inf, -np.inf], + [False, True], + ): + a = np.ma.MaskedArray(a_val, mask=True) + b = np.ma.MaskedArray(b_val, mask=True) if b_masked else np.array(b_val) + self._test_equal(a, b) + self._test_equal(b, a) + + # Test masked scalar vs. plain array + for a_val, b_val in itertools.product( + [3., np.nan, -np.inf], + itertools.product([3., 4., np.nan, np.inf, -np.inf], repeat=2), + ): + a = np.ma.MaskedArray(a_val, mask=True) + b = np.array(b_val) + self._test_equal(a, b) + self._test_equal(b, a) + + # Test masked scalar vs. masked array + for a_val, b_val, b_mask in itertools.product( + [3., np.nan, np.inf], + itertools.product([3., 4., np.nan, np.inf, -np.inf], repeat=2), + itertools.product([False, True], repeat=2), + ): + a = np.ma.MaskedArray(a_val, mask=True) + b = np.ma.MaskedArray(b_val, mask=b_mask) + self._test_equal(a, b) + self._test_equal(b, a) + def test_subclass_that_overrides_eq(self): # While we cannot guarantee testing functions will always work for # subclasses, the tests should ideally rely only on subclasses having @@ -249,6 +302,8 @@ def test_array_vs_array_not_equal(self): b = np.array([34986, 545676, 439655, 0]) expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 563766 (ACTUAL), 0 (DESIRED)\n' 'Max absolute difference among violations: 563766\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -256,6 +311,9 @@ def test_array_vs_array_not_equal(self): a = np.array([34986, 545676, 439655.2, 563766]) expected_msg = ('Mismatched elements: 2 / 4 (50%)\n' + 'Mismatch at indices:\n' + ' [2]: 439655.2 (ACTUAL), 439655 (DESIRED)\n' + ' [3]: 563766.0 (ACTUAL), 0 (DESIRED)\n' 'Max absolute difference among violations: ' '563766.\n' 'Max relative difference among violations: ' @@ -334,8 +392,8 @@ def test_build_err_msg_custom_precision(self): class TestEqual(TestArrayEqual): - def setup_method(self): - self._assert_func = assert_equal + def _assert_func(self, *args, **kwargs): + assert_equal(*args, **kwargs) def test_nan_items(self): self._assert_func(np.nan, np.nan) @@ -429,8 +487,8 @@ def test_object(self): class TestArrayAlmostEqual(_GenericTest): - def setup_method(self): - self._assert_func = assert_array_almost_equal + def _assert_func(self, *args, **kwargs): + assert_array_almost_equal(*args, **kwargs) def test_closeness(self): # Note that in the course of time we ended up with @@ -450,6 +508,8 @@ def test_closeness(self): self._assert_func([1.499999], [0.0], decimal=0) expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Mismatch at index:\n' + ' [0]: 1.5 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: 1.5\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -458,12 +518,16 @@ def test_closeness(self): a = [1.4999999, 0.00003] b = [1.49999991, 0] expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 3e-05 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: 3.e-05\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): self._assert_func(a, b, decimal=7) expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 0.0 (ACTUAL), 3e-05 (DESIRED)\n' 'Max absolute difference among violations: 3.e-05\n' 'Max relative difference among violations: 1.') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -477,6 +541,8 @@ def test_simple(self): self._assert_func(x, y, decimal=4) expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Mismatch at index:\n' + ' [0]: 1234.2222 (ACTUAL), 1234.2223 (DESIRED)\n' 'Max absolute difference among violations: ' '1.e-04\n' 'Max relative difference among violations: ' @@ -488,6 +554,9 @@ def test_array_vs_scalar(self): a = [5498.42354, 849.54345, 0.00] b = 5498.42354 expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Mismatch at indices:\n' + ' [1]: 849.54345 (ACTUAL), 5498.42354 (DESIRED)\n' + ' [2]: 0.0 (ACTUAL), 5498.42354 (DESIRED)\n' 'Max absolute difference among violations: ' '5498.42354\n' 'Max relative difference among violations: 1.') @@ -495,6 +564,9 @@ def test_array_vs_scalar(self): self._assert_func(a, b, decimal=9) expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Mismatch at indices:\n' + ' [1]: 5498.42354 (ACTUAL), 849.54345 (DESIRED)\n' + ' [2]: 5498.42354 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: ' '5498.42354\n' 'Max relative difference among violations: 5.4722099') @@ -503,6 +575,8 @@ def test_array_vs_scalar(self): a = [5498.42354, 0.00] expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 5498.42354 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: ' '5498.42354\n' 'Max relative difference among violations: inf') @@ -511,6 +585,8 @@ def test_array_vs_scalar(self): b = 0 expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [0]: 5498.42354 (ACTUAL), 0 (DESIRED)\n' 'Max absolute difference among violations: ' '5498.42354\n' 'Max relative difference among violations: inf') @@ -539,6 +615,18 @@ def test_inf(self): assert_raises(AssertionError, lambda: self._assert_func(a, b)) + def test_complex_inf(self): + a = np.array([np.inf + 1.j, 2. + 1.j, 3. + 1.j]) + b = a.copy() + self._assert_func(a, b) + b[1] = 3. + 1.j + expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [1]: (2+1j) (ACTUAL), (3+1j) (DESIRED)\n' + 'Max absolute difference among violations: 1.\n') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b) + def test_subclass(self): a = np.array([[1., 2.], [3., 4.]]) b = np.ma.masked_array([[1., 2.], [0., 4.]], @@ -587,6 +675,8 @@ def all(self, *args, **kwargs): all(z) b = np.array([1., 202]).view(MyArray) expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 2.0 (ACTUAL), 202.0 (DESIRED)\n' 'Max absolute difference among violations: 200.\n' 'Max relative difference among violations: 0.99009') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -613,8 +703,8 @@ def all(self, *args, **kwargs): class TestAlmostEqual(_GenericTest): - def setup_method(self): - self._assert_func = assert_almost_equal + def _assert_func(self, *args, **kwargs): + assert_almost_equal(*args, **kwargs) def test_closeness(self): # Note that in the course of time we ended up with @@ -677,6 +767,10 @@ def test_error_message(self): # Test with a different amount of decimal digits expected_msg = ('Mismatched elements: 3 / 3 (100%)\n' + 'Mismatch at indices:\n' + ' [0]: 1.00000000001 (ACTUAL), 1.00000000002 (DESIRED)\n' + ' [1]: 2.00000000002 (ACTUAL), 2.00000000003 (DESIRED)\n' + ' [2]: 3.00003 (ACTUAL), 3.00004 (DESIRED)\n' 'Max absolute difference among violations: 1.e-05\n' 'Max relative difference among violations: ' '3.33328889e-06\n' @@ -692,6 +786,8 @@ def test_error_message(self): # differs. Note that we only check for the formatting of the arrays # themselves. expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [2]: 3.00003 (ACTUAL), 3.00004 (DESIRED)\n' 'Max absolute difference among violations: 1.e-05\n' 'Max relative difference among violations: ' '3.33328889e-06\n' @@ -704,6 +800,8 @@ def test_error_message(self): x = np.array([np.inf, 0]) y = np.array([np.inf, 1]) expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 0.0 (ACTUAL), 1.0 (DESIRED)\n' 'Max absolute difference among violations: 1.\n' 'Max relative difference among violations: 1.\n' ' ACTUAL: array([inf, 0.])\n' @@ -715,6 +813,9 @@ def test_error_message(self): x = np.array([1, 2]) y = np.array([0, 0]) expected_msg = ('Mismatched elements: 2 / 2 (100%)\n' + 'Mismatch at indices:\n' + ' [0]: 1 (ACTUAL), 0 (DESIRED)\n' + ' [1]: 2 (ACTUAL), 0 (DESIRED)\n' 'Max absolute difference among violations: 2\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -726,6 +827,12 @@ def test_error_message_2(self): x = 2 y = np.ones(20) expected_msg = ('Mismatched elements: 20 / 20 (100%)\n' + 'First 5 mismatches are at indices:\n' + ' [0]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [1]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [2]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [3]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [4]: 2 (ACTUAL), 1.0 (DESIRED)\n' 'Max absolute difference among violations: 1.\n' 'Max relative difference among violations: 1.') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -734,6 +841,12 @@ def test_error_message_2(self): y = 2 x = np.ones(20) expected_msg = ('Mismatched elements: 20 / 20 (100%)\n' + 'First 5 mismatches are at indices:\n' + ' [0]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [1]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [2]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [3]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [4]: 1.0 (ACTUAL), 2 (DESIRED)\n' 'Max absolute difference among violations: 1.\n' 'Max relative difference among violations: 0.5') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -760,8 +873,8 @@ def all(self, *args, **kwargs): class TestApproxEqual: - def setup_method(self): - self._assert_func = assert_approx_equal + def _assert_func(self, *args, **kwargs): + assert_approx_equal(*args, **kwargs) def test_simple_0d_arrays(self): x = np.array(1234.22) @@ -803,8 +916,8 @@ def test_nan_items(self): class TestArrayAssertLess: - def setup_method(self): - self._assert_func = assert_array_less + def _assert_func(self, *args, **kwargs): + assert_array_less(*args, **kwargs) def test_simple_arrays(self): x = np.array([1.1, 2.2]) @@ -822,6 +935,9 @@ def test_simple_arrays(self): b = np.array([2, 4, 6, 8]) expected_msg = ('Mismatched elements: 2 / 4 (50%)\n' + 'Mismatch at indices:\n' + ' [2]: 6 (x), 6 (y)\n' + ' [3]: 20 (x), 8 (y)\n' 'Max absolute difference among violations: 12\n' 'Max relative difference among violations: 1.5') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -833,6 +949,11 @@ def test_rank2(self): self._assert_func(x, y) expected_msg = ('Mismatched elements: 4 / 4 (100%)\n' + 'Mismatch at indices:\n' + ' [0, 0]: 1.2 (x), 1.1 (y)\n' + ' [0, 1]: 2.3 (x), 2.2 (y)\n' + ' [1, 0]: 3.4 (x), 3.3 (y)\n' + ' [1, 1]: 4.5 (x), 4.4 (y)\n' 'Max absolute difference among violations: 0.1\n' 'Max relative difference among violations: 0.09090909') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -844,13 +965,15 @@ def test_rank2(self): def test_rank3(self): x = np.ones(shape=(2, 2, 2)) - y = np.ones(shape=(2, 2, 2))+1 + y = np.ones(shape=(2, 2, 2)) + 1 self._assert_func(x, y) assert_raises(AssertionError, lambda: self._assert_func(y, x)) y[0, 0, 0] = 0 expected_msg = ('Mismatched elements: 1 / 8 (12.5%)\n' + 'Mismatch at index:\n' + ' [0, 0, 0]: 1.0 (x), 0.0 (y)\n' 'Max absolute difference among violations: 1.\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -894,12 +1017,20 @@ def test_simple_items_and_array(self): y = 999090.54 expected_msg = ('Mismatched elements: 1 / 12 (8.33%)\n' + 'Mismatch at index:\n' + ' [1, 1]: 999090.54 (x), 999090.54 (y)\n' 'Max absolute difference among violations: 0.\n' 'Max relative difference among violations: 0.') with pytest.raises(AssertionError, match=re.escape(expected_msg)): self._assert_func(x, y) expected_msg = ('Mismatched elements: 12 / 12 (100%)\n' + 'First 5 mismatches are at indices:\n' + ' [0, 0]: 999090.54 (x), 3.4536 (y)\n' + ' [0, 1]: 999090.54 (x), 2390.5436 (y)\n' + ' [0, 2]: 999090.54 (x), 435.54657 (y)\n' + ' [0, 3]: 999090.54 (x), 324525.4535 (y)\n' + ' [1, 0]: 999090.54 (x), 5449.54 (y)\n' 'Max absolute difference among violations: ' '999087.0864\n' 'Max relative difference among violations: ' @@ -912,12 +1043,17 @@ def test_zeroes(self): y = np.array(87654.) expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [0]: 546456.0 (x), 87654.0 (y)\n' 'Max absolute difference among violations: 458802.\n' 'Max relative difference among violations: 5.23423917') with pytest.raises(AssertionError, match=re.escape(expected_msg)): self._assert_func(x, y) expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Mismatch at indices:\n' + ' [1]: 87654.0 (x), 0.0 (y)\n' + ' [2]: 87654.0 (x), 15.455 (y)\n' 'Max absolute difference among violations: 87654.\n' 'Max relative difference among violations: ' '5670.5626011') @@ -927,12 +1063,18 @@ def test_zeroes(self): y = 0 expected_msg = ('Mismatched elements: 3 / 3 (100%)\n' + 'Mismatch at indices:\n' + ' [0]: 546456.0 (x), 0 (y)\n' + ' [1]: 0.0 (x), 0 (y)\n' + ' [2]: 15.455 (x), 0 (y)\n' 'Max absolute difference among violations: 546456.\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): self._assert_func(x, y) expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [1]: 0 (x), 0.0 (y)\n' 'Max absolute difference among violations: 0.\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -1001,7 +1143,10 @@ def test_strict(self): with pytest.raises(AssertionError): self._assert_func(x, y.astype(np.float32), strict=True) - +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") class TestWarns: def test_warn(self): @@ -1118,12 +1263,16 @@ def test_simple(self): b = np.array([x, y, x, x]) c = np.array([x, y, x, z]) expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 0.001 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: 0.001\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): assert_allclose(b, c) expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 0.0 (ACTUAL), 0.001 (DESIRED)\n' 'Max absolute difference among violations: 0.001\n' 'Max relative difference among violations: 1.') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -1139,6 +1288,8 @@ def test_report_fail_percentage(self): b = np.array([1, 1, 1, 2]) expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 1 (ACTUAL), 2 (DESIRED)\n' 'Max absolute difference among violations: 1\n' 'Max relative difference among violations: 0.5') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -1150,11 +1301,21 @@ def test_equal_nan(self): # Should not raise: assert_allclose(a, b, equal_nan=True) + a = np.array([complex(np.nan, np.inf)]) + b = np.array([complex(np.nan, np.inf)]) + assert_allclose(a, b, equal_nan=True) + b = np.array([complex(np.nan, -np.inf)]) + assert_allclose(a, b, equal_nan=True) + def test_not_equal_nan(self): a = np.array([np.nan]) b = np.array([np.nan]) assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False) + a = np.array([complex(np.nan, np.inf)]) + b = np.array([complex(np.nan, np.inf)]) + assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False) + def test_equal_nan_default(self): # Make sure equal_nan default behavior remains unchanged. (All # of these functions use assert_array_compare under the hood.) @@ -1203,6 +1364,33 @@ def test_strict(self): with pytest.raises(AssertionError): assert_allclose(x, x.astype(np.float32), strict=True) + def test_infs(self): + a = np.array([np.inf]) + b = np.array([np.inf]) + assert_allclose(a, b) + + b = np.array([3.]) + expected_msg = 'inf location mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + + b = np.array([-np.inf]) + expected_msg = 'inf values mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + b = np.array([complex(np.inf, 1.)]) + expected_msg = 'inf values mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + + a = np.array([complex(np.inf, 1.)]) + b = np.array([complex(np.inf, 1.)]) + assert_allclose(a, b) + + b = np.array([complex(np.inf, 2.)]) + expected_msg = 'inf values mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) class TestArrayAlmostEqualNulp: @@ -1216,12 +1404,12 @@ def test_float64_pass(self): # Addition eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp/2. + y = x + x * eps * nulp / 2. assert_array_almost_equal_nulp(x, y, nulp) # Subtraction epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp/2. + y = x - x * epsneg * nulp / 2. assert_array_almost_equal_nulp(x, y, nulp) def test_float64_fail(self): @@ -1231,12 +1419,12 @@ def test_float64_fail(self): x = np.r_[-x, x] eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp*2. + y = x + x * eps * nulp * 2. assert_raises(AssertionError, assert_array_almost_equal_nulp, x, y, nulp) epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp*2. + y = x - x * epsneg * nulp * 2. assert_raises(AssertionError, assert_array_almost_equal_nulp, x, y, nulp) @@ -1258,11 +1446,11 @@ def test_float32_pass(self): x = np.r_[-x, x] eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp/2. + y = x + x * eps * nulp / 2. assert_array_almost_equal_nulp(x, y, nulp) epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp/2. + y = x - x * epsneg * nulp / 2. assert_array_almost_equal_nulp(x, y, nulp) def test_float32_fail(self): @@ -1272,12 +1460,12 @@ def test_float32_fail(self): x = np.r_[-x, x] eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp*2. + y = x + x * eps * nulp * 2. assert_raises(AssertionError, assert_array_almost_equal_nulp, x, y, nulp) epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp*2. + y = x - x * epsneg * nulp * 2. assert_raises(AssertionError, assert_array_almost_equal_nulp, x, y, nulp) @@ -1299,11 +1487,11 @@ def test_float16_pass(self): x = np.r_[-x, x] eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp/2. + y = x + x * eps * nulp / 2. assert_array_almost_equal_nulp(x, y, nulp) epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp/2. + y = x - x * epsneg * nulp / 2. assert_array_almost_equal_nulp(x, y, nulp) def test_float16_fail(self): @@ -1313,12 +1501,12 @@ def test_float16_fail(self): x = np.r_[-x, x] eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp*2. + y = x + x * eps * nulp * 2. assert_raises(AssertionError, assert_array_almost_equal_nulp, x, y, nulp) epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp*2. + y = x - x * epsneg * nulp * 2. assert_raises(AssertionError, assert_array_almost_equal_nulp, x, y, nulp) @@ -1338,100 +1526,100 @@ def test_complex128_pass(self): x = np.linspace(-20, 20, 50, dtype=np.float64) x = 10**x x = np.r_[-x, x] - xi = x + x*1j + xi = x + x * 1j eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp/2. - assert_array_almost_equal_nulp(xi, x + y*1j, nulp) - assert_array_almost_equal_nulp(xi, y + x*1j, nulp) + y = x + x * eps * nulp / 2. + assert_array_almost_equal_nulp(xi, x + y * 1j, nulp) + assert_array_almost_equal_nulp(xi, y + x * 1j, nulp) # The test condition needs to be at least a factor of sqrt(2) smaller # because the real and imaginary parts both change - y = x + x*eps*nulp/4. - assert_array_almost_equal_nulp(xi, y + y*1j, nulp) + y = x + x * eps * nulp / 4. + assert_array_almost_equal_nulp(xi, y + y * 1j, nulp) epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp/2. - assert_array_almost_equal_nulp(xi, x + y*1j, nulp) - assert_array_almost_equal_nulp(xi, y + x*1j, nulp) - y = x - x*epsneg*nulp/4. - assert_array_almost_equal_nulp(xi, y + y*1j, nulp) + y = x - x * epsneg * nulp / 2. + assert_array_almost_equal_nulp(xi, x + y * 1j, nulp) + assert_array_almost_equal_nulp(xi, y + x * 1j, nulp) + y = x - x * epsneg * nulp / 4. + assert_array_almost_equal_nulp(xi, y + y * 1j, nulp) def test_complex128_fail(self): nulp = 5 x = np.linspace(-20, 20, 50, dtype=np.float64) x = 10**x x = np.r_[-x, x] - xi = x + x*1j + xi = x + x * 1j eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp*2. + y = x + x * eps * nulp * 2. assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, x + y*1j, nulp) + xi, x + y * 1j, nulp) assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, y + x*1j, nulp) + xi, y + x * 1j, nulp) # The test condition needs to be at least a factor of sqrt(2) smaller # because the real and imaginary parts both change - y = x + x*eps*nulp + y = x + x * eps * nulp assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, y + y*1j, nulp) + xi, y + y * 1j, nulp) epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp*2. + y = x - x * epsneg * nulp * 2. assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, x + y*1j, nulp) + xi, x + y * 1j, nulp) assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, y + x*1j, nulp) - y = x - x*epsneg*nulp + xi, y + x * 1j, nulp) + y = x - x * epsneg * nulp assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, y + y*1j, nulp) + xi, y + y * 1j, nulp) def test_complex64_pass(self): nulp = 5 x = np.linspace(-20, 20, 50, dtype=np.float32) x = 10**x x = np.r_[-x, x] - xi = x + x*1j + xi = x + x * 1j eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp/2. - assert_array_almost_equal_nulp(xi, x + y*1j, nulp) - assert_array_almost_equal_nulp(xi, y + x*1j, nulp) - y = x + x*eps*nulp/4. - assert_array_almost_equal_nulp(xi, y + y*1j, nulp) + y = x + x * eps * nulp / 2. + assert_array_almost_equal_nulp(xi, x + y * 1j, nulp) + assert_array_almost_equal_nulp(xi, y + x * 1j, nulp) + y = x + x * eps * nulp / 4. + assert_array_almost_equal_nulp(xi, y + y * 1j, nulp) epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp/2. - assert_array_almost_equal_nulp(xi, x + y*1j, nulp) - assert_array_almost_equal_nulp(xi, y + x*1j, nulp) - y = x - x*epsneg*nulp/4. - assert_array_almost_equal_nulp(xi, y + y*1j, nulp) + y = x - x * epsneg * nulp / 2. + assert_array_almost_equal_nulp(xi, x + y * 1j, nulp) + assert_array_almost_equal_nulp(xi, y + x * 1j, nulp) + y = x - x * epsneg * nulp / 4. + assert_array_almost_equal_nulp(xi, y + y * 1j, nulp) def test_complex64_fail(self): nulp = 5 x = np.linspace(-20, 20, 50, dtype=np.float32) x = 10**x x = np.r_[-x, x] - xi = x + x*1j + xi = x + x * 1j eps = np.finfo(x.dtype).eps - y = x + x*eps*nulp*2. + y = x + x * eps * nulp * 2. assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, x + y*1j, nulp) + xi, x + y * 1j, nulp) assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, y + x*1j, nulp) - y = x + x*eps*nulp + xi, y + x * 1j, nulp) + y = x + x * eps * nulp assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, y + y*1j, nulp) + xi, y + y * 1j, nulp) epsneg = np.finfo(x.dtype).epsneg - y = x - x*epsneg*nulp*2. + y = x - x * epsneg * nulp * 2. assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, x + y*1j, nulp) + xi, x + y * 1j, nulp) assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, y + x*1j, nulp) - y = x - x*epsneg*nulp + xi, y + x * 1j, nulp) + y = x - x * epsneg * nulp assert_raises(AssertionError, assert_array_almost_equal_nulp, - xi, y + y*1j, nulp) + xi, y + y * 1j, nulp) class TestULP: @@ -1445,14 +1633,14 @@ def test_single(self): x = np.ones(10).astype(np.float32) x += 0.01 * np.random.randn(10).astype(np.float32) eps = np.finfo(np.float32).eps - assert_array_max_ulp(x, x+eps, maxulp=20) + assert_array_max_ulp(x, x + eps, maxulp=20) def test_double(self): # Generate 1 + small deviation, check that adding eps gives a few UNL x = np.ones(10).astype(np.float64) x += 0.01 * np.random.randn(10).astype(np.float64) eps = np.finfo(np.float64).eps - assert_array_max_ulp(x, x+eps, maxulp=200) + assert_array_max_ulp(x, x + eps, maxulp=200) def test_inf(self): for dt in [np.float32, np.float64]: @@ -1526,7 +1714,7 @@ def assert_warn_len_equal(mod, n_in_context): num_warns = len(mod_warns) if 'version' in mod_warns: - # Python 3 adds a 'version' entry to the registry, + # Python adds a 'version' entry to the registry, # do not count it. num_warns -= 1 @@ -1573,6 +1761,7 @@ def _get_fresh_mod(): return my_mod +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") def test_clear_and_catch_warnings(): # Initial state of module, no warnings my_mod = _get_fresh_mod() @@ -1605,6 +1794,10 @@ def test_clear_and_catch_warnings(): assert_warn_len_equal(my_mod, 0) +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") def test_suppress_warnings_module(): # Initial state of module, no warnings my_mod = _get_fresh_mod() @@ -1651,6 +1844,10 @@ def warn(arr): assert_warn_len_equal(my_mod, 0) +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") def test_suppress_warnings_type(): # Initial state of module, no warnings my_mod = _get_fresh_mod() @@ -1679,6 +1876,12 @@ def test_suppress_warnings_type(): assert_warn_len_equal(my_mod, 0) +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") +@pytest.mark.thread_unsafe( + reason="uses deprecated thread-unsafe warnings control utilities" +) def test_suppress_warnings_decorate_no_record(): sup = suppress_warnings() sup.filter(UserWarning) @@ -1689,11 +1892,17 @@ def warn(category): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") - warn(UserWarning) # should be supppressed + warn(UserWarning) # should be suppressed warn(RuntimeWarning) assert_equal(len(w), 1) +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") +@pytest.mark.thread_unsafe( + reason="uses deprecated thread-unsafe warnings control utilities" +) def test_suppress_warnings_record(): sup = suppress_warnings() log1 = sup.record() @@ -1731,9 +1940,16 @@ def test_suppress_warnings_record(): warnings.warn('Some warning') warnings.warn('Some other warning') assert_equal(len(sup2.log), 1) - assert_equal(len(sup.log), 1) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 2) +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") +@pytest.mark.thread_unsafe( + reason="uses deprecated thread-unsafe warnings control utilities" +) def test_suppress_warnings_forwarding(): def warn_other_module(): # Apply along axis is implemented in python; stacklevel=2 means @@ -1749,7 +1965,8 @@ def warn(arr): for i in range(2): warnings.warn("Some warning") - assert_equal(len(sup.log), 2) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 3) with suppress_warnings() as sup: sup.record() @@ -1758,7 +1975,8 @@ def warn(arr): warnings.warn("Some warning") warnings.warn("Some warning") - assert_equal(len(sup.log), 2) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 3) with suppress_warnings() as sup: sup.record() @@ -1768,7 +1986,8 @@ def warn(arr): warnings.warn("Some warning") warn_other_module() - assert_equal(len(sup.log), 2) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 3) with suppress_warnings() as sup: sup.record() @@ -1778,7 +1997,8 @@ def warn(arr): warnings.warn("Some other warning") warn_other_module() - assert_equal(len(sup.log), 2) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 3) def test_tempdir(): @@ -1791,7 +2011,7 @@ def test_tempdir(): raised = False try: with tempdir() as tdir: - raise ValueError() + raise ValueError except ValueError: raised = True assert_(raised) @@ -1807,7 +2027,7 @@ def test_temppath(): raised = False try: with temppath() as fpath: - raise ValueError() + raise ValueError except ValueError: raised = True assert_(raised) @@ -1819,6 +2039,7 @@ class my_cacw(clear_and_catch_warnings): class_modules = (sys.modules[__name__],) +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") def test_clear_and_catch_warnings_inherit(): # Test can subclass and add default modules my_mod = _get_fresh_mod() @@ -1829,6 +2050,7 @@ def test_clear_and_catch_warnings_inherit(): @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +@pytest.mark.thread_unsafe(reason="garbage collector is global state") class TestAssertNoGcCycles: """ Test assert_no_gc_cycles """ @@ -1879,7 +2101,7 @@ def __del__(self): self.cycle = None if ReferenceCycleInDel.make_cycle: - # but create a new one so that the garbage collector has more + # but create a new one so that the garbage collector (GC) has more # work to do. ReferenceCycleInDel() @@ -1891,7 +2113,7 @@ def __del__(self): assert_no_gc_cycles(lambda: None) except AssertionError: # the above test is only necessary if the GC actually tried to free - # our object anyway, which python 2.7 does not. + # our object anyway. if w() is not None: pytest.skip("GC does not call __del__ on cyclic objects") raise @@ -1899,32 +2121,3 @@ def __del__(self): finally: # make sure that we stop creating reference cycles ReferenceCycleInDel.make_cycle = False - - -@pytest.mark.parametrize('assert_func', [assert_array_equal, - assert_array_almost_equal]) -def test_xy_rename(assert_func): - # Test that keywords `x` and `y` have been renamed to `actual` and - # `desired`, respectively. These tests and use of `_rename_parameter` - # decorator can be removed before the release of NumPy 2.2.0. - assert_func(1, 1) - assert_func(actual=1, desired=1) - - assert_message = "Arrays are not..." - with pytest.raises(AssertionError, match=assert_message): - assert_func(1, 2) - with pytest.raises(AssertionError, match=assert_message): - assert_func(actual=1, desired=2) - - dep_message = 'Use of keyword argument...' - with pytest.warns(DeprecationWarning, match=dep_message): - assert_func(x=1, desired=1) - with pytest.warns(DeprecationWarning, match=dep_message): - assert_func(1, y=1) - - type_message = '...got multiple values for argument' - # explicit linebreak to support Python 3.9 - with pytest.warns(DeprecationWarning, match=dep_message), \ - pytest.raises(TypeError, match=type_message): - assert_func(1, x=1) - assert_func(1, 2, y=2) diff --git a/numpy/tests/test__all__.py b/numpy/tests/test__all__.py index e44bda3d58ab..2dc81669d9fb 100644 --- a/numpy/tests/test__all__.py +++ b/numpy/tests/test__all__.py @@ -1,5 +1,6 @@ import collections + import numpy as np diff --git a/numpy/tests/test_configtool.py b/numpy/tests/test_configtool.py index 5215057f644a..917bbf55712f 100644 --- a/numpy/tests/test_configtool.py +++ b/numpy/tests/test_configtool.py @@ -1,43 +1,51 @@ +import importlib.metadata import os +import pathlib import subprocess -import sysconfig import pytest + import numpy as np +import numpy._core.include +import numpy._core.lib.pkgconfig +from numpy.testing import IS_EDITABLE, IS_INSTALLED, IS_WASM, NUMPY_ROOT + +INCLUDE_DIR = NUMPY_ROOT / '_core' / 'include' +PKG_CONFIG_DIR = NUMPY_ROOT / '_core' / 'lib' / 'pkgconfig' -from numpy.testing import IS_WASM +@pytest.mark.skipif(not IS_INSTALLED, + reason="`numpy-config` not expected to be installed") +@pytest.mark.skipif(IS_WASM, + reason="wasm interpreter cannot start subprocess") +class TestNumpyConfig: + def check_numpyconfig(self, arg): + p = subprocess.run(['numpy-config', arg], capture_output=True, text=True) + p.check_returncode() + return p.stdout.strip() -is_editable = not bool(np.__path__) -numpy_in_sitepackages = sysconfig.get_path('platlib') in np.__file__ -# We only expect to have a `numpy-config` available if NumPy was installed via -# a build frontend (and not `spin` for example) -if not (numpy_in_sitepackages or is_editable): - pytest.skip("`numpy-config` not expected to be installed", - allow_module_level=True) + def test_configtool_version(self): + stdout = self.check_numpyconfig('--version') + assert stdout == np.__version__ + def test_configtool_cflags(self): + stdout = self.check_numpyconfig('--cflags') + assert f'-I{os.fspath(INCLUDE_DIR)}' in stdout -def check_numpyconfig(arg): - p = subprocess.run(['numpy-config', arg], capture_output=True, text=True) - p.check_returncode() - return p.stdout.strip() + def test_configtool_pkgconfigdir(self): + stdout = self.check_numpyconfig('--pkgconfigdir') + assert pathlib.Path(stdout) == PKG_CONFIG_DIR.resolve() -@pytest.mark.skipif(IS_WASM, reason="wasm interpreter cannot start subprocess") -def test_configtool_version(): - stdout = check_numpyconfig('--version') - assert stdout == np.__version__ -@pytest.mark.skipif(IS_WASM, reason="wasm interpreter cannot start subprocess") -def test_configtool_cflags(): - stdout = check_numpyconfig('--cflags') - assert stdout.endswith(os.path.join('numpy', '_core', 'include')) +@pytest.mark.skipif(not IS_INSTALLED, + reason="numpy must be installed to check its entrypoints") +def test_pkg_config_entrypoint(): + (entrypoint,) = importlib.metadata.entry_points(group='pkg_config', name='numpy') + assert entrypoint.value == numpy._core.lib.pkgconfig.__name__ -@pytest.mark.skipif(IS_WASM, reason="wasm interpreter cannot start subprocess") -def test_configtool_pkgconfigdir(): - stdout = check_numpyconfig('--pkgconfigdir') - assert stdout.endswith(os.path.join('numpy', '_core', 'lib', 'pkgconfig')) - if not is_editable: - # Also check that the .pc file actually exists (unless we're using an - # editable install, then it'll be hiding in the build dir) - assert os.path.exists(os.path.join(stdout, 'numpy.pc')) +@pytest.mark.skipif(not IS_INSTALLED, + reason="numpy.pc is only available when numpy is installed") +@pytest.mark.skipif(IS_EDITABLE, reason="editable installs don't have a numpy.pc") +def test_pkg_config_config_exists(): + assert PKG_CONFIG_DIR.joinpath('numpy.pc').is_file() diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py index 2fd0c042f2ca..659c3d639e97 100644 --- a/numpy/tests/test_ctypeslib.py +++ b/numpy/tests/test_ctypeslib.py @@ -6,8 +6,8 @@ import pytest import numpy as np -from numpy.ctypeslib import ndpointer, load_library, as_array -from numpy.testing import assert_, assert_array_equal, assert_raises, assert_equal +from numpy.ctypeslib import as_array, load_library, ndpointer +from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises try: import ctypes @@ -61,7 +61,7 @@ def test_basic2(self): # (including extension) does not work. try: so_ext = sysconfig.get_config_var('EXT_SUFFIX') - load_library('_multiarray_umath%s' % so_ext, + load_library(f'_multiarray_umath{so_ext}', np._core._multiarray_umath.__file__) except ImportError as e: msg = ("ctypes is not available on this python: skipping the test" @@ -124,6 +124,7 @@ def test_flags(self): assert_(p.from_param(x)) assert_raises(TypeError, p.from_param, np.array([[1, 2], [3, 4]])) + @pytest.mark.thread_unsafe(reason="checks that global ndpointer cache is updating") def test_cache(self): assert_(ndpointer(dtype=np.float64) is ndpointer(dtype=np.float64)) @@ -150,12 +151,12 @@ def test_arguments(self): @pytest.mark.parametrize( 'dt', [ float, - np.dtype(dict( - formats=[' 0: + module = modules_queue.pop() + for member_name in dir(module): + member = getattr(module, member_name) + # first check if we got a module + if ( + inspect.ismodule(member) and # it's a module + "numpy" in member.__name__ and # inside NumPy + not member_name.startswith("_") and # not private + "numpy._core" not in member.__name__ and # outside _core + # not in a skip module list + member_name not in [ + "char", "core", "f2py", "ma", "lapack_lite", "mrecords", + "testing", "tests", "polynomial", "typing", "mtrand", + "bit_generator", + ] and + member not in visited_modules # not visited yet + ): + modules_queue.append(member) + visited_modules.add(member) + elif ( + not inspect.ismodule(member) and + hasattr(member, "__name__") and + not member.__name__.startswith("_") and + member.__module__ != module.__name__ and + member not in visited_functions + ): + # skip ufuncs that are exported in np.strings as well + if member.__name__ in ( + "add", "equal", "not_equal", "greater", "greater_equal", + "less", "less_equal", + ) and module.__name__ == "numpy.strings": + continue + + # recarray and record are exported in np and np.rec + if ( + (member.__name__ == "recarray" and module.__name__ == "numpy") or + (member.__name__ == "record" and module.__name__ == "numpy.rec") + ): + continue + + # ctypeslib exports ctypes c_long/c_longlong + if ( + member.__name__ in ("c_long", "c_longlong") and + module.__name__ == "numpy.ctypeslib" + ): + continue + + # skip cdef classes + if member.__name__ in ( + "BitGenerator", "Generator", "MT19937", "PCG64", "PCG64DXSM", + "Philox", "RandomState", "SFC64", "SeedSequence", + ): + continue + + incorrect_entries.append( + { + "Func": member.__name__, + "actual": member.__module__, + "expected": module.__name__, + } + ) + visited_functions.add(member) + + if incorrect_entries: + assert len(incorrect_entries) == 0, incorrect_entries + + +def _check_correct_qualname_and_module(obj) -> bool: + qualname = obj.__qualname__ + name = obj.__name__ + module_name = obj.__module__ + assert name == qualname.split(".")[-1] + + module = sys.modules[module_name] + actual_obj = functools.reduce(getattr, qualname.split("."), module) + return ( + actual_obj is obj or + # `obj` may be a bound method/property of `actual_obj`: + ( + hasattr(actual_obj, "__get__") and hasattr(obj, "__self__") and + actual_obj.__module__ == obj.__module__ and + actual_obj.__qualname__ == qualname + ) + ) + + +@pytest.mark.filterwarnings(r"ignore:\w+ chararray \w+:DeprecationWarning") +def test___qualname___and___module___attribute(): + # NumPy messes with module and name/qualname attributes, but any object + # should be discoverable based on its module and qualname, so test that. + # We do this for anything with a name (ensuring qualname is also set). + modules_queue = [np] + visited_modules = {np} + visited_functions = set() + incorrect_entries = [] + + while len(modules_queue) > 0: + module = modules_queue.pop() + for member_name in dir(module): + member = getattr(module, member_name) + # first check if we got a module + if ( + inspect.ismodule(member) and # it's a module + "numpy" in member.__name__ and # inside NumPy + not member_name.startswith("_") and # not private + member_name not in {"tests", "typing"} and # type names don't match + "numpy._core" not in member.__name__ and # outside _core + member not in visited_modules # not visited yet + ): + modules_queue.append(member) + visited_modules.add(member) + elif ( + not inspect.ismodule(member) and + hasattr(member, "__name__") and + not member.__name__.startswith("_") and + not member_name.startswith("_") and + not _check_correct_qualname_and_module(member) and + member not in visited_functions + ): + incorrect_entries.append( + { + "found_at": f"{module.__name__}:{member_name}", + "advertises": f"{member.__module__}:{member.__qualname__}", + } + ) + visited_functions.add(member) + + if incorrect_entries: + assert len(incorrect_entries) == 0, incorrect_entries diff --git a/numpy/tests/test_reloading.py b/numpy/tests/test_reloading.py index 22bff7212e59..aa87ae104318 100644 --- a/numpy/tests/test_reloading.py +++ b/numpy/tests/test_reloading.py @@ -1,21 +1,16 @@ -import sys +import pickle import subprocess +import sys import textwrap from importlib import reload -import pickle import pytest import numpy.exceptions as ex -from numpy.testing import ( - assert_raises, - assert_warns, - assert_, - assert_equal, - IS_WASM, -) +from numpy.testing import IS_WASM, assert_, assert_equal, assert_raises +@pytest.mark.thread_unsafe(reason="reloads global module") def test_numpy_reloading(): # gh-7844. Also check that relevant globals retain their identity. import numpy as np @@ -25,14 +20,14 @@ def test_numpy_reloading(): VisibleDeprecationWarning = ex.VisibleDeprecationWarning ModuleDeprecationWarning = ex.ModuleDeprecationWarning - with assert_warns(UserWarning): + with pytest.warns(UserWarning): reload(np) assert_(_NoValue is np._NoValue) assert_(ModuleDeprecationWarning is ex.ModuleDeprecationWarning) assert_(VisibleDeprecationWarning is ex.VisibleDeprecationWarning) assert_raises(RuntimeError, reload, numpy._globals) - with assert_warns(UserWarning): + with pytest.warns(UserWarning): reload(np) assert_(_NoValue is np._NoValue) assert_(ModuleDeprecationWarning is ex.ModuleDeprecationWarning) @@ -48,27 +43,34 @@ def test_novalue(): @pytest.mark.skipif(IS_WASM, reason="can't start subprocess") def test_full_reimport(): - """At the time of writing this, it is *not* truly supported, but - apparently enough users rely on it, for it to be an annoying change - when it started failing previously. - """ + # Reimporting numpy like this is not safe due to use of global C state, + # and has unexpected side effects. Test that an ImportError is raised. + # When all extension modules are isolated, this should test that clearing + # sys.modules and reimporting numpy works without error. + # Test within a new process, to ensure that we do not mess with the # global state during the test run (could lead to cryptic test failures). # This is generally unsafe, especially, since we also reload the C-modules. code = textwrap.dedent(r""" import sys - from pytest import warns import numpy as np - for k in list(sys.modules.keys()): - if "numpy" in k: - del sys.modules[k] + for k in [k for k in sys.modules if k.startswith('numpy')]: + del sys.modules[k] - with warns(UserWarning): + try: import numpy as np + except ImportError as err: + if str(err) != "cannot load module more than once per process": + raise SystemExit(f"Unexpected ImportError: {err}") + else: + raise SystemExit("DID NOT RAISE ImportError") """) - p = subprocess.run([sys.executable, '-c', code], capture_output=True) - if p.returncode: - raise AssertionError( - f"Non-zero return code: {p.returncode!r}\n\n{p.stderr.decode()}" - ) + p = subprocess.run( + (sys.executable, '-c', code), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + encoding='utf-8', + check=False, + ) + assert p.returncode == 0, p.stdout diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py index 892c04eef0be..e5f0a07436c8 100644 --- a/numpy/tests/test_scripts.py +++ b/numpy/tests/test_scripts.py @@ -2,22 +2,21 @@ Test that we can run executable scripts that have been installed with numpy. """ -import sys import os -import pytest -from os.path import join as pathjoin, isfile, dirname import subprocess +import sys +from os.path import dirname -import numpy as np -from numpy.testing import assert_equal, IS_WASM +import pytest -is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py')) +import numpy as np +from numpy.testing import IS_WASM, assert_equal def find_f2py_commands(): if sys.platform == 'win32': exe_dir = dirname(sys.executable) - if exe_dir.endswith('Scripts'): # virtualenv + if exe_dir.endswith('Scripts'): # virtualenv return [os.path.join(exe_dir, 'f2py')] else: return [os.path.join(exe_dir, "Scripts", 'f2py')] @@ -32,7 +31,6 @@ def find_f2py_commands(): return ['f2py', 'f2py' + major, 'f2py' + major + '.' + minor] -@pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace") @pytest.mark.xfail(reason="Test is unreliable") @pytest.mark.parametrize('f2py_cmd', find_f2py_commands()) def test_f2py(f2py_cmd): diff --git a/numpy/tests/test_warnings.py b/numpy/tests/test_warnings.py index df90fcef8c59..7efa2a1d1896 100644 --- a/numpy/tests/test_warnings.py +++ b/numpy/tests/test_warnings.py @@ -2,13 +2,15 @@ Tests which scan for certain occurrences in the code, they may not find all of these occurrences but should catch almost all. """ -import pytest - -from pathlib import Path import ast import tokenize +from pathlib import Path + +import pytest + import numpy + class ParseCall(ast.NodeVisitor): def __init__(self): self.ls = [] @@ -32,10 +34,11 @@ def visit_Call(self, node): ast.NodeVisitor.generic_visit(self, node) if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings': - if node.args[0].value == "ignore": - raise AssertionError( - "warnings should have an appropriate stacklevel; found in " - "{} on line {}".format(self.__filename, node.lineno)) + if getattr(node.args[0], "value", None) == "ignore": + if not self.__filename.name.startswith("test_"): + raise AssertionError( + "ignore filters should only be used in tests; " + f"found in {self.__filename} on line {node.lineno}") if p.ls[-1] == 'warn' and ( len(p.ls) == 1 or p.ls[-2] == 'warnings'): @@ -51,8 +54,8 @@ def visit_Call(self, node): if "stacklevel" in args: return raise AssertionError( - "warnings should have an appropriate stacklevel; found in " - "{} on line {}".format(self.__filename, node.lineno)) + "warnings should have an appropriate stacklevel; " + f"found in {self.__filename} on line {node.lineno}") @pytest.mark.slow @@ -67,6 +70,8 @@ def test_warning_calls(): continue if path == base / "random" / "__init__.py": continue + if path == base / "conftest.py": + continue # use tokenize to auto-detect encoding on systems where no # default encoding is defined (e.g. LANG='C') with tokenize.open(str(path)) as file: diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index b247921818e2..1a90f9e0c212 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -93,7 +93,7 @@ Number precision ~~~~~~~~~~~~~~~~ -The precision of `numpy.number` subclasses is treated as a invariant generic +The precision of `numpy.number` subclasses is treated as an invariant generic parameter (see :class:`~NBitBase`), simplifying the annotating of processes involving precision-based casting. @@ -104,13 +104,45 @@ >>> import numpy.typing as npt >>> T = TypeVar("T", bound=npt.NBitBase) - >>> def func(a: "np.floating[T]", b: "np.floating[T]") -> "np.floating[T]": + >>> def func(a: np.floating[T], b: np.floating[T]) -> np.floating[T]: ... ... Consequently, the likes of `~numpy.float16`, `~numpy.float32` and `~numpy.float64` are still sub-types of `~numpy.floating`, but, contrary to runtime, they're not necessarily considered as sub-classes. +.. deprecated:: 2.3 + The :class:`~numpy.typing.NBitBase` helper is deprecated and will be + removed in a future release. Prefer expressing precision relationships via + ``typing.overload`` or ``TypeVar`` definitions bounded by concrete scalar + classes. For example: + + .. code-block:: python + + from typing import TypeVar + import numpy as np + + S = TypeVar("S", bound=np.floating) + + def func(a: S, b: S) -> S: + ... + + or in the case of different input types mapping to different output types: + + .. code-block:: python + + from typing import overload + import numpy as np + + @overload + def phase(x: np.complex64) -> np.float32: ... + @overload + def phase(x: np.complex128) -> np.float64: ... + @overload + def phase(x: np.clongdouble) -> np.longdouble: ... + def phase(x: np.complexfloating) -> np.floating: + ... + Timedelta64 ~~~~~~~~~~~ @@ -125,7 +157,7 @@ corresponding `~numpy.generic` instance. Until the introduction of shape typing (see :pep:`646`) it is unfortunately not possible to make the necessary distinction between 0D and >0D arrays. While thus not strictly -correct, all operations are that can potentially perform a 0D-array -> scalar +correct, all operations that can potentially perform a 0D-array -> scalar cast are currently annotated as exclusively returning an `~numpy.ndarray`. If it is known in advance that an operation *will* perform a @@ -155,15 +187,40 @@ # NOTE: The API section will be appended with additional entries # further down in this file -from numpy._typing import ( - ArrayLike, - DTypeLike, - NBitBase, - NDArray, -) +# pyright: reportDeprecated=false + +from numpy._typing import ArrayLike, DTypeLike, NBitBase, NDArray __all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"] + +__DIR = __all__ + [k for k in globals() if k.startswith("__") and k.endswith("__")] +__DIR_SET = frozenset(__DIR) + + +def __dir__() -> list[str]: + return __DIR + +def __getattr__(name: str) -> object: + if name == "NBitBase": + import warnings + + # Deprecated in NumPy 2.3, 2025-05-01 + warnings.warn( + "`NBitBase` is deprecated and will be removed from numpy.typing in the " + "future. Use `@typing.overload` or a `TypeVar` with a scalar-type as upper " + "bound, instead. (deprecated in NumPy 2.3)", + DeprecationWarning, + stacklevel=2, + ) + return NBitBase + + if name in __DIR_SET: + return globals()[name] + + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") + + if __doc__ is not None: from numpy._typing._add_docstring import _docstrings __doc__ += _docstrings @@ -171,5 +228,6 @@ del _docstrings from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/numpy/typing/__init__.pyi b/numpy/typing/__init__.pyi new file mode 100644 index 000000000000..5af10da218d9 --- /dev/null +++ b/numpy/typing/__init__.pyi @@ -0,0 +1,8 @@ +from numpy._typing import ( # type: ignore[deprecated] + ArrayLike, + DTypeLike, + NBitBase, + NDArray, +) + +__all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"] diff --git a/numpy/typing/mypy_plugin.py b/numpy/typing/mypy_plugin.py index 63f063ccc795..04014a9e867b 100644 --- a/numpy/typing/mypy_plugin.py +++ b/numpy/typing/mypy_plugin.py @@ -17,6 +17,13 @@ .. versionadded:: 1.22 +.. deprecated:: 2.3 + The :mod:`numpy.typing.mypy_plugin` entry-point is deprecated in favor of + platform-agnostic static type inference. Remove + ``numpy.typing.mypy_plugin`` from the ``plugins`` section of your mypy + configuration; if that surfaces new errors, please open an issue with a + minimal reproducer. + Examples -------- To enable the plugin, one must add it to their mypy `configuration file`_: @@ -31,25 +38,11 @@ """ -from __future__ import annotations - -from collections.abc import Iterable -from typing import Final, TYPE_CHECKING, Callable +from collections.abc import Callable, Iterable +from typing import TYPE_CHECKING, Final, cast import numpy as np -try: - import mypy.types - from mypy.types import Type - from mypy.plugin import Plugin, AnalyzeTypeContext - from mypy.nodes import MypyFile, ImportFrom, Statement - from mypy.build import PRI_MED - - _HookFunc = Callable[[AnalyzeTypeContext], Type] - MYPY_EX: None | ModuleNotFoundError = None -except ModuleNotFoundError as ex: - MYPY_EX = ex - __all__: list[str] = [] @@ -68,43 +61,32 @@ def _get_precision_dict() -> dict[str, str]: ("_NBitDouble", np.double), ("_NBitLongDouble", np.longdouble), ] - ret = {} + ret: dict[str, str] = {} for name, typ in names: - n: int = 8 * typ().dtype.itemsize - ret[f'numpy._typing._nbit.{name}'] = f"numpy._{n}Bit" + n = 8 * np.dtype(typ).itemsize + ret[f"{_MODULE}._nbit.{name}"] = f"{_MODULE}._nbit_base._{n}Bit" return ret def _get_extended_precision_list() -> list[str]: extended_names = [ - "uint128", - "uint256", - "int128", - "int256", - "float80", "float96", "float128", - "float256", - "complex160", "complex192", "complex256", - "complex512", ] return [i for i in extended_names if hasattr(np, i)] - def _get_c_intp_name() -> str: # Adapted from `np.core._internal._getintp_ctype` - char = np.dtype('n').char - if char == 'i': - return "c_int" - elif char == 'l': - return "c_long" - elif char == 'q': - return "c_longlong" - else: - return "c_long" + return { + "i": "c_int", + "l": "c_long", + "q": "c_longlong", + }.get(np.dtype("n").char, "c_long") + +_MODULE: Final = "numpy._typing" #: A dictionary mapping type-aliases in `numpy._typing._nbit` to #: concrete `numpy.typing.NBitBase` subclasses. @@ -113,19 +95,35 @@ def _get_c_intp_name() -> str: #: A list with the names of all extended precision `np.number` subclasses. _EXTENDED_PRECISION_LIST: Final = _get_extended_precision_list() -#: The name of the ctypes quivalent of `np.intp` +#: The name of the ctypes equivalent of `np.intp` _C_INTP: Final = _get_c_intp_name() -def _hook(ctx: AnalyzeTypeContext) -> Type: - """Replace a type-alias with a concrete ``NBitBase`` subclass.""" - typ, _, api = ctx - name = typ.name.split(".")[-1] - name_new = _PRECISION_DICT[f"numpy._typing._nbit.{name}"] - return api.named_type(name_new) +try: + if TYPE_CHECKING: + from mypy.typeanal import TypeAnalyser + import mypy.types + from mypy.build import PRI_MED + from mypy.nodes import ImportFrom, MypyFile, Statement + from mypy.plugin import AnalyzeTypeContext, Plugin + +except ModuleNotFoundError as _exc: + + def plugin(version: str) -> type: + raise _exc + +else: + + type _HookFunc = Callable[[AnalyzeTypeContext], mypy.types.Type] + + def _hook(ctx: AnalyzeTypeContext) -> mypy.types.Type: + """Replace a type-alias with a concrete ``NBitBase`` subclass.""" + typ, _, api = ctx + name = typ.name.split(".")[-1] + name_new = _PRECISION_DICT[f"{_MODULE}._nbit.{name}"] + return cast("TypeAnalyser", api).named_type(name_new) -if TYPE_CHECKING or MYPY_EX is None: def _index(iterable: Iterable[Statement], id: str) -> int: """Identify the first ``ImportFrom`` instance the specified `id`.""" for i, value in enumerate(iterable): @@ -137,7 +135,7 @@ def _index(iterable: Iterable[Statement], id: str) -> int: def _override_imports( file: MypyFile, module: str, - imports: list[tuple[str, None | str]], + imports: list[tuple[str, str | None]], ) -> None: """Override the first `module`-based import with new `imports`.""" # Construct a new `from module import y` statement @@ -145,14 +143,14 @@ def _override_imports( import_obj.is_top_level = True # Replace the first `module`-based import statement with `import_obj` - for lst in [file.defs, file.imports]: # type: list[Statement] + for lst in [file.defs, cast("list[Statement]", file.imports)]: i = _index(lst, module) lst[i] = import_obj class _NumpyPlugin(Plugin): """A mypy plugin for handling versus numpy-specific typing tasks.""" - def get_type_analyze_hook(self, fullname: str) -> None | _HookFunc: + def get_type_analyze_hook(self, fullname: str) -> _HookFunc | None: """Set the precision of platform-specific `numpy.number` subclasses. @@ -168,30 +166,35 @@ def get_additional_deps( """Handle all import-based overrides. * Import platform-specific extended-precision `numpy.number` - subclasses (*e.g.* `numpy.float96`, `numpy.float128` and - `numpy.complex256`). + subclasses (*e.g.* `numpy.float96` and `numpy.float128`). * Import the appropriate `ctypes` equivalent to `numpy.intp`. """ - ret = [(PRI_MED, file.fullname, -1)] - - if file.fullname == "numpy": + fullname = file.fullname + if fullname == "numpy": _override_imports( - file, "numpy._typing._extended_precision", + file, + f"{_MODULE}._extended_precision", imports=[(v, v) for v in _EXTENDED_PRECISION_LIST], ) - elif file.fullname == "numpy.ctypeslib": + elif fullname == "numpy.ctypeslib": _override_imports( - file, "ctypes", + file, + "ctypes", imports=[(_C_INTP, "_c_intp")], ) - return ret + return [(PRI_MED, fullname, -1)] - def plugin(version: str) -> type[_NumpyPlugin]: - """An entry-point for mypy.""" - return _NumpyPlugin + def plugin(version: str) -> type: + import warnings -else: - def plugin(version: str) -> type[_NumpyPlugin]: - """An entry-point for mypy.""" - raise MYPY_EX + plugin = "numpy.typing.mypy_plugin" + # Deprecated 2025-01-10, NumPy 2.3 + warn_msg = ( + f"`{plugin}` is deprecated, and will be removed in a future " + f"release. Please remove `plugins = {plugin}` in your mypy config." + f"(deprecated in NumPy 2.3)" + ) + warnings.warn(warn_msg, DeprecationWarning, stacklevel=3) + + return _NumpyPlugin diff --git a/numpy/typing/tests/data/fail/arithmetic.pyi b/numpy/typing/tests/data/fail/arithmetic.pyi index d6ff59fc4756..a68df2ea53c3 100644 --- a/numpy/typing/tests/data/fail/arithmetic.pyi +++ b/numpy/typing/tests/data/fail/arithmetic.pyi @@ -10,7 +10,7 @@ td = np.timedelta64(0, "D") AR_b: npt.NDArray[np.bool] AR_u: npt.NDArray[np.uint32] AR_i: npt.NDArray[np.int64] -AR_f: npt.NDArray[np.float64] +AR_f: npt.NDArray[np.longdouble] AR_c: npt.NDArray[np.complex128] AR_m: npt.NDArray[np.timedelta64] AR_M: npt.NDArray[np.datetime64] @@ -28,96 +28,99 @@ AR_LIKE_M: list[np.datetime64] # Array subtraction # NOTE: mypys `NoReturn` errors are, unfortunately, not that great -_1 = AR_b - AR_LIKE_b # E: Need type annotation -_2 = AR_LIKE_b - AR_b # E: Need type annotation -AR_i - bytes() # E: No overload variant +_1 = AR_b - AR_LIKE_b # type: ignore[var-annotated] +_2 = AR_LIKE_b - AR_b # type: ignore[var-annotated] +AR_i - b"" # type: ignore[operator] -AR_f - AR_LIKE_m # E: Unsupported operand types -AR_f - AR_LIKE_M # E: Unsupported operand types -AR_c - AR_LIKE_m # E: Unsupported operand types -AR_c - AR_LIKE_M # E: Unsupported operand types +AR_f - AR_LIKE_m # type: ignore[operator] +AR_f - AR_LIKE_M # type: ignore[operator] +AR_c - AR_LIKE_m # type: ignore[operator] +AR_c - AR_LIKE_M # type: ignore[operator] -AR_m - AR_LIKE_f # E: Unsupported operand types -AR_M - AR_LIKE_f # E: Unsupported operand types -AR_m - AR_LIKE_c # E: Unsupported operand types -AR_M - AR_LIKE_c # E: Unsupported operand types +AR_m - AR_LIKE_f # type: ignore[operator] +AR_M - AR_LIKE_f # type: ignore[operator] +AR_m - AR_LIKE_c # type: ignore[operator] +AR_M - AR_LIKE_c # type: ignore[operator] -AR_m - AR_LIKE_M # E: Unsupported operand types -AR_LIKE_m - AR_M # E: Unsupported operand types +AR_m - AR_LIKE_M # type: ignore[operator] +AR_LIKE_m - AR_M # type: ignore[operator] # array floor division -AR_M // AR_LIKE_b # E: Unsupported operand types -AR_M // AR_LIKE_u # E: Unsupported operand types -AR_M // AR_LIKE_i # E: Unsupported operand types -AR_M // AR_LIKE_f # E: Unsupported operand types -AR_M // AR_LIKE_c # E: Unsupported operand types -AR_M // AR_LIKE_m # E: Unsupported operand types -AR_M // AR_LIKE_M # E: Unsupported operand types - -AR_b // AR_LIKE_M # E: Unsupported operand types -AR_u // AR_LIKE_M # E: Unsupported operand types -AR_i // AR_LIKE_M # E: Unsupported operand types -AR_f // AR_LIKE_M # E: Unsupported operand types -AR_c // AR_LIKE_M # E: Unsupported operand types -AR_m // AR_LIKE_M # E: Unsupported operand types -AR_M // AR_LIKE_M # E: Unsupported operand types - -_3 = AR_m // AR_LIKE_b # E: Need type annotation -AR_m // AR_LIKE_c # E: Unsupported operand types - -AR_b // AR_LIKE_m # E: Unsupported operand types -AR_u // AR_LIKE_m # E: Unsupported operand types -AR_i // AR_LIKE_m # E: Unsupported operand types -AR_f // AR_LIKE_m # E: Unsupported operand types -AR_c // AR_LIKE_m # E: Unsupported operand types +AR_M // AR_LIKE_b # type: ignore[operator] +AR_M // AR_LIKE_u # type: ignore[operator] +AR_M // AR_LIKE_i # type: ignore[operator] +AR_M // AR_LIKE_f # type: ignore[operator] +AR_M // AR_LIKE_c # type: ignore[operator] +AR_M // AR_LIKE_m # type: ignore[operator] +AR_M // AR_LIKE_M # type: ignore[operator] + +AR_b // AR_LIKE_M # type: ignore[operator] +AR_u // AR_LIKE_M # type: ignore[operator] +AR_i // AR_LIKE_M # type: ignore[operator] +AR_f // AR_LIKE_M # type: ignore[operator] +AR_c // AR_LIKE_M # type: ignore[operator] +AR_m // AR_LIKE_M # type: ignore[operator] +AR_M // AR_LIKE_M # type: ignore[operator] + +_3 = AR_m // AR_LIKE_b # type: ignore[var-annotated] +AR_m // AR_LIKE_c # type: ignore[operator] + +AR_b // AR_LIKE_m # type: ignore[operator] +AR_u // AR_LIKE_m # type: ignore[operator] +AR_i // AR_LIKE_m # type: ignore[operator] +AR_f // AR_LIKE_m # type: ignore[operator] +AR_c // AR_LIKE_m # type: ignore[operator] + +# regression tests for https://github.com/numpy/numpy/issues/28957 +AR_c // 2 # type: ignore[operator] +AR_c // AR_i # type: ignore[operator] +AR_c // AR_c # type: ignore[operator] # Array multiplication -AR_b *= AR_LIKE_u # E: incompatible type -AR_b *= AR_LIKE_i # E: incompatible type -AR_b *= AR_LIKE_f # E: incompatible type -AR_b *= AR_LIKE_c # E: incompatible type -AR_b *= AR_LIKE_m # E: incompatible type +AR_b *= AR_LIKE_u # type: ignore[arg-type] +AR_b *= AR_LIKE_i # type: ignore[arg-type] +AR_b *= AR_LIKE_f # type: ignore[arg-type] +AR_b *= AR_LIKE_c # type: ignore[arg-type] +AR_b *= AR_LIKE_m # type: ignore[arg-type] -AR_u *= AR_LIKE_i # E: incompatible type -AR_u *= AR_LIKE_f # E: incompatible type -AR_u *= AR_LIKE_c # E: incompatible type -AR_u *= AR_LIKE_m # E: incompatible type +AR_u *= AR_LIKE_f # type: ignore[arg-type] +AR_u *= AR_LIKE_c # type: ignore[arg-type] +AR_u *= AR_LIKE_m # type: ignore[arg-type] -AR_i *= AR_LIKE_f # E: incompatible type -AR_i *= AR_LIKE_c # E: incompatible type -AR_i *= AR_LIKE_m # E: incompatible type +AR_i *= AR_LIKE_f # type: ignore[arg-type] +AR_i *= AR_LIKE_c # type: ignore[arg-type] +AR_i *= AR_LIKE_m # type: ignore[arg-type] -AR_f *= AR_LIKE_c # E: incompatible type -AR_f *= AR_LIKE_m # E: incompatible type +AR_f *= AR_LIKE_c # type: ignore[arg-type] +AR_f *= AR_LIKE_m # type: ignore[arg-type] # Array power -AR_b **= AR_LIKE_b # E: Invalid self argument -AR_b **= AR_LIKE_u # E: Invalid self argument -AR_b **= AR_LIKE_i # E: Invalid self argument -AR_b **= AR_LIKE_f # E: Invalid self argument -AR_b **= AR_LIKE_c # E: Invalid self argument +AR_b **= AR_LIKE_b # type: ignore[misc] +AR_b **= AR_LIKE_u # type: ignore[misc] +AR_b **= AR_LIKE_i # type: ignore[misc] +AR_b **= AR_LIKE_f # type: ignore[misc] +AR_b **= AR_LIKE_c # type: ignore[misc] -AR_u **= AR_LIKE_i # E: incompatible type -AR_u **= AR_LIKE_f # E: incompatible type -AR_u **= AR_LIKE_c # E: incompatible type +AR_u **= AR_LIKE_f # type: ignore[arg-type] +AR_u **= AR_LIKE_c # type: ignore[arg-type] -AR_i **= AR_LIKE_f # E: incompatible type -AR_i **= AR_LIKE_c # E: incompatible type +AR_i **= AR_LIKE_f # type: ignore[arg-type] +AR_i **= AR_LIKE_c # type: ignore[arg-type] -AR_f **= AR_LIKE_c # E: incompatible type +AR_f **= AR_LIKE_c # type: ignore[arg-type] # Scalars -b_ - b_ # E: No overload variant +b_ - b_ # type: ignore[operator] -dt + dt # E: Unsupported operand types -td - dt # E: Unsupported operand types -td % 1 # E: Unsupported operand types -td / dt # E: No overload -td % dt # E: Unsupported operand types +dt + dt # type: ignore[operator] +td - dt # type: ignore[operator] +td % 1 # type: ignore[operator] +td / dt # type: ignore[operator] +td % dt # type: ignore[operator] --b_ # E: Unsupported operand type -+b_ # E: Unsupported operand type +-b_ # type: ignore[operator] ++b_ # type: ignore[operator] diff --git a/numpy/typing/tests/data/fail/array_constructors.pyi b/numpy/typing/tests/data/fail/array_constructors.pyi index 27eefe3c918d..6ed619958c1c 100644 --- a/numpy/typing/tests/data/fail/array_constructors.pyi +++ b/numpy/typing/tests/data/fail/array_constructors.pyi @@ -4,31 +4,31 @@ import numpy.typing as npt a: npt.NDArray[np.float64] generator = (i for i in range(10)) -np.require(a, requirements=1) # E: No overload variant -np.require(a, requirements="TEST") # E: incompatible type +np.require(a, requirements=1) # type: ignore[call-overload] +np.require(a, requirements="TEST") # type: ignore[arg-type] -np.zeros("test") # E: incompatible type -np.zeros() # E: require at least one argument +np.zeros("test") # type: ignore[arg-type] +np.zeros() # type: ignore[call-overload] -np.ones("test") # E: incompatible type -np.ones() # E: require at least one argument +np.ones("test") # type: ignore[arg-type] +np.ones() # type: ignore[call-overload] -np.array(0, float, True) # E: No overload variant +np.array(0, float, True) # type: ignore[call-overload] -np.linspace(None, 'bob') # E: No overload variant -np.linspace(0, 2, num=10.0) # E: No overload variant -np.linspace(0, 2, endpoint='True') # E: No overload variant -np.linspace(0, 2, retstep=b'False') # E: No overload variant -np.linspace(0, 2, dtype=0) # E: No overload variant -np.linspace(0, 2, axis=None) # E: No overload variant +np.linspace(None, "bob") # type: ignore[call-overload] +np.linspace(0, 2, num=10.0) # type: ignore[call-overload] +np.linspace(0, 2, endpoint="True") # type: ignore[call-overload] +np.linspace(0, 2, retstep=b"False") # type: ignore[call-overload] +np.linspace(0, 2, dtype=0) # type: ignore[call-overload] +np.linspace(0, 2, axis=None) # type: ignore[call-overload] -np.logspace(None, 'bob') # E: No overload variant -np.logspace(0, 2, base=None) # E: No overload variant +np.logspace(None, "bob") # type: ignore[call-overload] +np.logspace(0, 2, base=None) # type: ignore[call-overload] -np.geomspace(None, 'bob') # E: No overload variant +np.geomspace(None, "bob") # type: ignore[call-overload] -np.stack(generator) # E: No overload variant -np.hstack({1, 2}) # E: No overload variant -np.vstack(1) # E: No overload variant +np.stack(generator) # type: ignore[call-overload] +np.hstack({1, 2}) # type: ignore[call-overload] +np.vstack(1) # type: ignore[call-overload] -np.array([1], like=1) # E: No overload variant +np.array([1], like=1) # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/array_like.pyi b/numpy/typing/tests/data/fail/array_like.pyi index 133b5fd49700..4e37354e8eba 100644 --- a/numpy/typing/tests/data/fail/array_like.pyi +++ b/numpy/typing/tests/data/fail/array_like.pyi @@ -1,16 +1,15 @@ import numpy as np from numpy._typing import ArrayLike +class A: ... -class A: - pass - - -x1: ArrayLike = (i for i in range(10)) # E: Incompatible types in assignment -x2: ArrayLike = A() # E: Incompatible types in assignment -x3: ArrayLike = {1: "foo", 2: "bar"} # E: Incompatible types in assignment +x1: ArrayLike = (i for i in range(10)) # type: ignore[assignment] +x2: ArrayLike = A() # type: ignore[assignment] +x3: ArrayLike = {1: "foo", 2: "bar"} # type: ignore[assignment] scalar = np.int64(1) -scalar.__array__(dtype=np.float64) # E: No overload variant +scalar.__array__(dtype=np.float64) # type: ignore[call-overload] array = np.array([1]) -array.__array__(dtype=np.float64) # E: No overload variant +array.__array__(dtype=np.float64) # type: ignore[call-overload] + +array.setfield(np.eye(1), np.int32, (0, 1)) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/array_pad.pyi b/numpy/typing/tests/data/fail/array_pad.pyi index 2be51a87181d..42e61c8d70d6 100644 --- a/numpy/typing/tests/data/fail/array_pad.pyi +++ b/numpy/typing/tests/data/fail/array_pad.pyi @@ -3,4 +3,4 @@ import numpy.typing as npt AR_i8: npt.NDArray[np.int64] -np.pad(AR_i8, 2, mode="bob") # E: No overload variant +np.pad(AR_i8, 2, mode="bob") # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/arrayprint.pyi b/numpy/typing/tests/data/fail/arrayprint.pyi index f8c8a3237816..3c9dc9330a2b 100644 --- a/numpy/typing/tests/data/fail/arrayprint.pyi +++ b/numpy/typing/tests/data/fail/arrayprint.pyi @@ -6,11 +6,10 @@ import numpy.typing as npt AR: npt.NDArray[np.float64] func1: Callable[[Any], str] -func2: Callable[[np.integer[Any]], str] +func2: Callable[[np.integer], str] -np.array2string(AR, style=None) # E: Unexpected keyword argument -np.array2string(AR, legacy="1.14") # E: incompatible type -np.array2string(AR, sign="*") # E: incompatible type -np.array2string(AR, floatmode="default") # E: incompatible type -np.array2string(AR, formatter={"A": func1}) # E: incompatible type -np.array2string(AR, formatter={"float": func2}) # E: Incompatible types +np.array2string(AR, legacy="1.14") # type: ignore[arg-type] +np.array2string(AR, sign="*") # type: ignore[arg-type] +np.array2string(AR, floatmode="default") # type: ignore[arg-type] +np.array2string(AR, formatter={"A": func1}) # type: ignore[arg-type] +np.array2string(AR, formatter={"float": func2}) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/arrayterator.pyi b/numpy/typing/tests/data/fail/arrayterator.pyi index 00280b3a6a2c..8d2295a5859f 100644 --- a/numpy/typing/tests/data/fail/arrayterator.pyi +++ b/numpy/typing/tests/data/fail/arrayterator.pyi @@ -4,11 +4,11 @@ import numpy.typing as npt AR_i8: npt.NDArray[np.int64] ar_iter = np.lib.Arrayterator(AR_i8) -np.lib.Arrayterator(np.int64()) # E: incompatible type -ar_iter.shape = (10, 5) # E: is read-only -ar_iter[None] # E: Invalid index type -ar_iter[None, 1] # E: Invalid index type -ar_iter[np.intp()] # E: Invalid index type -ar_iter[np.intp(), ...] # E: Invalid index type -ar_iter[AR_i8] # E: Invalid index type -ar_iter[AR_i8, :] # E: Invalid index type +np.lib.Arrayterator(np.int64()) # type: ignore[arg-type] +ar_iter.shape = (10, 5) # type: ignore[misc] +ar_iter[None] # type: ignore[index] +ar_iter[None, 1] # type: ignore[index] +ar_iter[np.intp()] # type: ignore[index] +ar_iter[np.intp(), ...] # type: ignore[index] +ar_iter[AR_i8] # type: ignore[index] +ar_iter[AR_i8, :] # type: ignore[index] diff --git a/numpy/typing/tests/data/fail/bitwise_ops.pyi b/numpy/typing/tests/data/fail/bitwise_ops.pyi index 13b47c485b41..f4de2928ff54 100644 --- a/numpy/typing/tests/data/fail/bitwise_ops.pyi +++ b/numpy/typing/tests/data/fail/bitwise_ops.pyi @@ -4,18 +4,14 @@ i8 = np.int64() i4 = np.int32() u8 = np.uint64() b_ = np.bool() -i = int() +i = 0 f8 = np.float64() -b_ >> f8 # E: No overload variant -i8 << f8 # E: No overload variant -i | f8 # E: Unsupported operand types -i8 ^ f8 # E: No overload variant -u8 & f8 # E: No overload variant -~f8 # E: Unsupported operand type +b_ >> f8 # type: ignore[operator] +i8 << f8 # type: ignore[operator] +i | f8 # type: ignore[operator] +i8 ^ f8 # type: ignore[operator] +u8 & f8 # type: ignore[operator] +~f8 # type: ignore[operator] # TODO: Certain mixes like i4 << u8 go to float and thus should fail - -# mypys' error message for `NoReturn` is unfortunately pretty bad -# TODO: Re-enable this once we add support for numerical precision for `number`s -# a = u8 | 0 # E: Need type annotation diff --git a/numpy/typing/tests/data/fail/char.pyi b/numpy/typing/tests/data/fail/char.pyi index 542a273baef5..3dbe5eda296e 100644 --- a/numpy/typing/tests/data/fail/char.pyi +++ b/numpy/typing/tests/data/fail/char.pyi @@ -4,66 +4,60 @@ import numpy.typing as npt AR_U: npt.NDArray[np.str_] AR_S: npt.NDArray[np.bytes_] -np.char.equal(AR_U, AR_S) # E: incompatible type - -np.char.not_equal(AR_U, AR_S) # E: incompatible type - -np.char.greater_equal(AR_U, AR_S) # E: incompatible type - -np.char.less_equal(AR_U, AR_S) # E: incompatible type - -np.char.greater(AR_U, AR_S) # E: incompatible type - -np.char.less(AR_U, AR_S) # E: incompatible type - -np.char.encode(AR_S) # E: incompatible type -np.char.decode(AR_U) # E: incompatible type - -np.char.join(AR_U, b"_") # E: incompatible type -np.char.join(AR_S, "_") # E: incompatible type - -np.char.ljust(AR_U, 5, fillchar=b"a") # E: incompatible type -np.char.ljust(AR_S, 5, fillchar="a") # E: incompatible type -np.char.rjust(AR_U, 5, fillchar=b"a") # E: incompatible type -np.char.rjust(AR_S, 5, fillchar="a") # E: incompatible type - -np.char.lstrip(AR_U, chars=b"a") # E: incompatible type -np.char.lstrip(AR_S, chars="a") # E: incompatible type -np.char.strip(AR_U, chars=b"a") # E: incompatible type -np.char.strip(AR_S, chars="a") # E: incompatible type -np.char.rstrip(AR_U, chars=b"a") # E: incompatible type -np.char.rstrip(AR_S, chars="a") # E: incompatible type - -np.char.partition(AR_U, b"a") # E: incompatible type -np.char.partition(AR_S, "a") # E: incompatible type -np.char.rpartition(AR_U, b"a") # E: incompatible type -np.char.rpartition(AR_S, "a") # E: incompatible type - -np.char.replace(AR_U, b"_", b"-") # E: incompatible type -np.char.replace(AR_S, "_", "-") # E: incompatible type - -np.char.split(AR_U, b"_") # E: incompatible type -np.char.split(AR_S, "_") # E: incompatible type -np.char.rsplit(AR_U, b"_") # E: incompatible type -np.char.rsplit(AR_S, "_") # E: incompatible type - -np.char.count(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type -np.char.count(AR_S, "a", end=9) # E: incompatible type - -np.char.endswith(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type -np.char.endswith(AR_S, "a", end=9) # E: incompatible type -np.char.startswith(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type -np.char.startswith(AR_S, "a", end=9) # E: incompatible type - -np.char.find(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type -np.char.find(AR_S, "a", end=9) # E: incompatible type -np.char.rfind(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type -np.char.rfind(AR_S, "a", end=9) # E: incompatible type - -np.char.index(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type -np.char.index(AR_S, "a", end=9) # E: incompatible type -np.char.rindex(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type -np.char.rindex(AR_S, "a", end=9) # E: incompatible type - -np.char.isdecimal(AR_S) # E: incompatible type -np.char.isnumeric(AR_S) # E: incompatible type +np.char.equal(AR_U, AR_S) # type: ignore[arg-type] +np.char.not_equal(AR_U, AR_S) # type: ignore[arg-type] + +np.char.greater_equal(AR_U, AR_S) # type: ignore[arg-type] +np.char.less_equal(AR_U, AR_S) # type: ignore[arg-type] +np.char.greater(AR_U, AR_S) # type: ignore[arg-type] +np.char.less(AR_U, AR_S) # type: ignore[arg-type] + +np.char.encode(AR_S) # type: ignore[arg-type] +np.char.decode(AR_U) # type: ignore[arg-type] + +np.char.join(AR_U, b"_") # type: ignore[arg-type] +np.char.join(AR_S, "_") # type: ignore[arg-type] + +np.char.ljust(AR_U, 5, fillchar=b"a") # type: ignore[arg-type] +np.char.rjust(AR_U, 5, fillchar=b"a") # type: ignore[arg-type] + +np.char.lstrip(AR_U, chars=b"a") # type: ignore[arg-type] +np.char.lstrip(AR_S, chars="a") # type: ignore[arg-type] +np.char.strip(AR_U, chars=b"a") # type: ignore[arg-type] +np.char.strip(AR_S, chars="a") # type: ignore[arg-type] +np.char.rstrip(AR_U, chars=b"a") # type: ignore[arg-type] +np.char.rstrip(AR_S, chars="a") # type: ignore[arg-type] + +np.char.partition(AR_U, b"a") # type: ignore[arg-type] +np.char.partition(AR_S, "a") # type: ignore[arg-type] +np.char.rpartition(AR_U, b"a") # type: ignore[arg-type] +np.char.rpartition(AR_S, "a") # type: ignore[arg-type] + +np.char.replace(AR_U, b"_", b"-") # type: ignore[arg-type] +np.char.replace(AR_S, "_", "-") # type: ignore[arg-type] + +np.char.split(AR_U, b"_") # type: ignore[arg-type] +np.char.split(AR_S, "_") # type: ignore[arg-type] +np.char.rsplit(AR_U, b"_") # type: ignore[arg-type] +np.char.rsplit(AR_S, "_") # type: ignore[arg-type] + +np.char.count(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.count(AR_S, "a", end=9) # type: ignore[arg-type] + +np.char.endswith(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.endswith(AR_S, "a", end=9) # type: ignore[arg-type] +np.char.startswith(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.startswith(AR_S, "a", end=9) # type: ignore[arg-type] + +np.char.find(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.find(AR_S, "a", end=9) # type: ignore[arg-type] +np.char.rfind(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.rfind(AR_S, "a", end=9) # type: ignore[arg-type] + +np.char.index(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.index(AR_S, "a", end=9) # type: ignore[arg-type] +np.char.rindex(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.rindex(AR_S, "a", end=9) # type: ignore[arg-type] + +np.char.isdecimal(AR_S) # type: ignore[arg-type] +np.char.isnumeric(AR_S) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/chararray.pyi b/numpy/typing/tests/data/fail/chararray.pyi index d334f689d121..806ec5a0d303 100644 --- a/numpy/typing/tests/data/fail/chararray.pyi +++ b/numpy/typing/tests/data/fail/chararray.pyi @@ -1,62 +1,61 @@ -import numpy as np from typing import Any -AR_U: np.char.chararray[Any, np.dtype[np.str_]] -AR_S: np.char.chararray[Any, np.dtype[np.bytes_]] - -AR_S.encode() # E: Invalid self argument -AR_U.decode() # E: Invalid self argument - -AR_U.join(b"_") # E: incompatible type -AR_S.join("_") # E: incompatible type - -AR_U.ljust(5, fillchar=b"a") # E: incompatible type -AR_S.ljust(5, fillchar="a") # E: incompatible type -AR_U.rjust(5, fillchar=b"a") # E: incompatible type -AR_S.rjust(5, fillchar="a") # E: incompatible type - -AR_U.lstrip(chars=b"a") # E: incompatible type -AR_S.lstrip(chars="a") # E: incompatible type -AR_U.strip(chars=b"a") # E: incompatible type -AR_S.strip(chars="a") # E: incompatible type -AR_U.rstrip(chars=b"a") # E: incompatible type -AR_S.rstrip(chars="a") # E: incompatible type - -AR_U.partition(b"a") # E: incompatible type -AR_S.partition("a") # E: incompatible type -AR_U.rpartition(b"a") # E: incompatible type -AR_S.rpartition("a") # E: incompatible type - -AR_U.replace(b"_", b"-") # E: incompatible type -AR_S.replace("_", "-") # E: incompatible type - -AR_U.split(b"_") # E: incompatible type -AR_S.split("_") # E: incompatible type -AR_S.split(1) # E: incompatible type -AR_U.rsplit(b"_") # E: incompatible type -AR_S.rsplit("_") # E: incompatible type - -AR_U.count(b"a", start=[1, 2, 3]) # E: incompatible type -AR_S.count("a", end=9) # E: incompatible type - -AR_U.endswith(b"a", start=[1, 2, 3]) # E: incompatible type -AR_S.endswith("a", end=9) # E: incompatible type -AR_U.startswith(b"a", start=[1, 2, 3]) # E: incompatible type -AR_S.startswith("a", end=9) # E: incompatible type - -AR_U.find(b"a", start=[1, 2, 3]) # E: incompatible type -AR_S.find("a", end=9) # E: incompatible type -AR_U.rfind(b"a", start=[1, 2, 3]) # E: incompatible type -AR_S.rfind("a", end=9) # E: incompatible type - -AR_U.index(b"a", start=[1, 2, 3]) # E: incompatible type -AR_S.index("a", end=9) # E: incompatible type -AR_U.rindex(b"a", start=[1, 2, 3]) # E: incompatible type -AR_S.rindex("a", end=9) # E: incompatible type - -AR_U == AR_S # E: Unsupported operand types -AR_U != AR_S # E: Unsupported operand types -AR_U >= AR_S # E: Unsupported operand types -AR_U <= AR_S # E: Unsupported operand types -AR_U > AR_S # E: Unsupported operand types -AR_U < AR_S # E: Unsupported operand types +import numpy as np + +AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] # type: ignore[deprecated] +AR_S: np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] # type: ignore[deprecated] + +AR_S.encode() # type: ignore[misc] +AR_U.decode() # type: ignore[misc] + +AR_U.join(b"_") # type: ignore[arg-type] +AR_S.join("_") # type: ignore[arg-type] + +AR_U.ljust(5, fillchar=b"a") # type: ignore[arg-type] +AR_U.rjust(5, fillchar=b"a") # type: ignore[arg-type] + +AR_U.lstrip(chars=b"a") # type: ignore[arg-type] +AR_S.lstrip(chars="a") # type: ignore[arg-type] +AR_U.strip(chars=b"a") # type: ignore[arg-type] +AR_S.strip(chars="a") # type: ignore[arg-type] +AR_U.rstrip(chars=b"a") # type: ignore[arg-type] +AR_S.rstrip(chars="a") # type: ignore[arg-type] + +AR_U.partition(b"a") # type: ignore[arg-type] +AR_S.partition("a") # type: ignore[arg-type] +AR_U.rpartition(b"a") # type: ignore[arg-type] +AR_S.rpartition("a") # type: ignore[arg-type] + +AR_U.replace(b"_", b"-") # type: ignore[arg-type] +AR_S.replace("_", "-") # type: ignore[arg-type] + +AR_U.split(b"_") # type: ignore[arg-type] +AR_S.split("_") # type: ignore[arg-type] +AR_S.split(1) # type: ignore[arg-type] +AR_U.rsplit(b"_") # type: ignore[arg-type] +AR_S.rsplit("_") # type: ignore[arg-type] + +AR_U.count(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.count("a", end=9) # type: ignore[arg-type] + +AR_U.endswith(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.endswith("a", end=9) # type: ignore[arg-type] +AR_U.startswith(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.startswith("a", end=9) # type: ignore[arg-type] + +AR_U.find(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.find("a", end=9) # type: ignore[arg-type] +AR_U.rfind(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.rfind("a", end=9) # type: ignore[arg-type] + +AR_U.index(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.index("a", end=9) # type: ignore[arg-type] +AR_U.rindex(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.rindex("a", end=9) # type: ignore[arg-type] + +AR_U == AR_S # type: ignore[operator] +AR_U != AR_S # type: ignore[operator] +AR_U >= AR_S # type: ignore[operator] +AR_U <= AR_S # type: ignore[operator] +AR_U > AR_S # type: ignore[operator] +AR_U < AR_S # type: ignore[operator] diff --git a/numpy/typing/tests/data/fail/comparisons.pyi b/numpy/typing/tests/data/fail/comparisons.pyi index 1ae8149082b6..d2965b5c1a91 100644 --- a/numpy/typing/tests/data/fail/comparisons.pyi +++ b/numpy/typing/tests/data/fail/comparisons.pyi @@ -7,21 +7,21 @@ AR_c: npt.NDArray[np.complex128] AR_m: npt.NDArray[np.timedelta64] AR_M: npt.NDArray[np.datetime64] -AR_f > AR_m # E: Unsupported operand types -AR_c > AR_m # E: Unsupported operand types +AR_f > AR_m # type: ignore[operator] +AR_c > AR_m # type: ignore[operator] -AR_m > AR_f # E: Unsupported operand types -AR_m > AR_c # E: Unsupported operand types +AR_m > AR_f # type: ignore[operator] +AR_m > AR_c # type: ignore[operator] -AR_i > AR_M # E: Unsupported operand types -AR_f > AR_M # E: Unsupported operand types -AR_m > AR_M # E: Unsupported operand types +AR_i > AR_M # type: ignore[operator] +AR_f > AR_M # type: ignore[operator] +AR_m > AR_M # type: ignore[operator] -AR_M > AR_i # E: Unsupported operand types -AR_M > AR_f # E: Unsupported operand types -AR_M > AR_m # E: Unsupported operand types +AR_M > AR_i # type: ignore[operator] +AR_M > AR_f # type: ignore[operator] +AR_M > AR_m # type: ignore[operator] -AR_i > str() # E: No overload variant -AR_i > bytes() # E: No overload variant -str() > AR_M # E: Unsupported operand types -bytes() > AR_M # E: Unsupported operand types +AR_i > "" # type: ignore[operator] +AR_i > b"" # type: ignore[operator] +"" > AR_M # type: ignore[operator] +b"" > AR_M # type: ignore[operator] diff --git a/numpy/typing/tests/data/fail/constants.pyi b/numpy/typing/tests/data/fail/constants.pyi index b5d6d27eae46..10717f664e0a 100644 --- a/numpy/typing/tests/data/fail/constants.pyi +++ b/numpy/typing/tests/data/fail/constants.pyi @@ -1,3 +1,3 @@ import numpy as np -np.little_endian = np.little_endian # E: Cannot assign to final +np.little_endian = np.little_endian # type: ignore[misc] diff --git a/numpy/typing/tests/data/fail/datasource.pyi b/numpy/typing/tests/data/fail/datasource.pyi index 44f4fa27307a..4c603cf693a1 100644 --- a/numpy/typing/tests/data/fail/datasource.pyi +++ b/numpy/typing/tests/data/fail/datasource.pyi @@ -1,15 +1,16 @@ from pathlib import Path + import numpy as np path: Path d1: np.lib.npyio.DataSource -d1.abspath(path) # E: incompatible type -d1.abspath(b"...") # E: incompatible type +d1.abspath(path) # type: ignore[arg-type] +d1.abspath(b"...") # type: ignore[arg-type] -d1.exists(path) # E: incompatible type -d1.exists(b"...") # E: incompatible type +d1.exists(path) # type: ignore[arg-type] +d1.exists(b"...") # type: ignore[arg-type] -d1.open(path, "r") # E: incompatible type -d1.open(b"...", encoding="utf8") # E: incompatible type -d1.open(None, newline="/n") # E: incompatible type +d1.open(path, "r") # type: ignore[arg-type] +d1.open(b"...", encoding="utf8") # type: ignore[arg-type] +d1.open(None, newline="/n") # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/dtype.pyi b/numpy/typing/tests/data/fail/dtype.pyi index 0f3810f3c014..64a7c3f775e1 100644 --- a/numpy/typing/tests/data/fail/dtype.pyi +++ b/numpy/typing/tests/data/fail/dtype.pyi @@ -1,18 +1,15 @@ import numpy as np - class Test1: not_dtype = np.dtype(float) - class Test2: dtype = float +np.dtype(Test1()) # type: ignore[call-overload] +np.dtype(Test2()) # type: ignore[arg-type] -np.dtype(Test1()) # E: No overload variant of "dtype" matches -np.dtype(Test2()) # E: incompatible type - -np.dtype( # E: No overload variant of "dtype" matches +np.dtype( # type: ignore[call-overload] { "field1": (float, 1), "field2": (int, 3), diff --git a/numpy/typing/tests/data/fail/einsumfunc.pyi b/numpy/typing/tests/data/fail/einsumfunc.pyi index e51f72e47b25..982ad986297f 100644 --- a/numpy/typing/tests/data/fail/einsumfunc.pyi +++ b/numpy/typing/tests/data/fail/einsumfunc.pyi @@ -6,7 +6,7 @@ AR_f: npt.NDArray[np.float64] AR_m: npt.NDArray[np.timedelta64] AR_U: npt.NDArray[np.str_] -np.einsum("i,i->i", AR_i, AR_m) # E: incompatible type -np.einsum("i,i->i", AR_f, AR_f, dtype=np.int32) # E: incompatible type -np.einsum("i,i->i", AR_i, AR_i, out=AR_U) # E: Value of type variable "_ArrayType" of "einsum" cannot be -np.einsum("i,i->i", AR_i, AR_i, out=AR_U, casting="unsafe") # E: No overload variant +np.einsum("i,i->i", AR_i, AR_m) # type: ignore[arg-type] +np.einsum("i,i->i", AR_f, AR_f, dtype=np.int32) # type: ignore[arg-type] +np.einsum("i,i->i", AR_i, AR_i, out=AR_U) # type: ignore[type-var] +np.einsum("i,i->i", AR_i, AR_i, out=AR_U, casting="unsafe") # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/false_positives.pyi b/numpy/typing/tests/data/fail/false_positives.pyi deleted file mode 100644 index 7e79230663c2..000000000000 --- a/numpy/typing/tests/data/fail/false_positives.pyi +++ /dev/null @@ -1,11 +0,0 @@ -import numpy as np -import numpy.typing as npt - -AR_f8: npt.NDArray[np.float64] - -# NOTE: Mypy bug presumably due to the special-casing of heterogeneous tuples; -# xref numpy/numpy#20901 -# -# The expected output should be no different than, e.g., when using a -# list instead of a tuple -np.concatenate(([1], AR_f8)) # E: Argument 1 to "concatenate" has incompatible type diff --git a/numpy/typing/tests/data/fail/flatiter.pyi b/numpy/typing/tests/data/fail/flatiter.pyi index b0c3b023f16b..2c6e912bd318 100644 --- a/numpy/typing/tests/data/fail/flatiter.pyi +++ b/numpy/typing/tests/data/fail/flatiter.pyi @@ -1,25 +1,38 @@ from typing import Any import numpy as np -import numpy._typing as npt +import numpy.typing as npt +class _Index: + def __index__(self) -> int: ... -class Index: - def __index__(self) -> int: - ... +class _MyArray: + def __array__(self) -> np.ndarray[tuple[int], np.dtypes.Float64DType]: ... +_index: _Index +_my_array: _MyArray +_something: Any +_dtype: np.dtype[np.int8] -a: np.flatiter[npt.NDArray[np.float64]] -supports_array: npt._SupportsArray[np.dtype[np.float64]] +_a_nd: np.flatiter[npt.NDArray[np.float64]] -a.base = Any # E: Property "base" defined in "flatiter" is read-only -a.coords = Any # E: Property "coords" defined in "flatiter" is read-only -a.index = Any # E: Property "index" defined in "flatiter" is read-only -a.copy(order='C') # E: Unexpected keyword argument +### + +_a_nd.base = _something # type: ignore[misc] +_a_nd.coords = _something # type: ignore[misc] +_a_nd.index = _something # type: ignore[misc] + +_a_nd.copy("C") # type: ignore[call-arg] +_a_nd.copy(order="C") # type: ignore[call-arg] # NOTE: Contrary to `ndarray.__getitem__` its counterpart in `flatiter` # does not accept objects with the `__array__` or `__index__` protocols; # boolean indexing is just plain broken (gh-17175) -a[np.bool()] # E: No overload variant of "__getitem__" -a[Index()] # E: No overload variant of "__getitem__" -a[supports_array] # E: No overload variant of "__getitem__" +_a_nd[np.True_] # type: ignore[call-overload] +_a_nd[_index] # type: ignore[call-overload] +_a_nd[_my_array] # type: ignore[call-overload] + +# `dtype` and `copy` are no-ops in `flatiter.__array__` +_a_nd.__array__(_dtype) # type: ignore[arg-type] +_a_nd.__array__(dtype=_dtype) # type: ignore[call-arg] +_a_nd.__array__(copy=True) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/fromnumeric.pyi b/numpy/typing/tests/data/fail/fromnumeric.pyi index accddaf8c3bc..c3f060679089 100644 --- a/numpy/typing/tests/data/fail/fromnumeric.pyi +++ b/numpy/typing/tests/data/fail/fromnumeric.pyi @@ -6,156 +6,144 @@ import numpy.typing as npt A = np.array(True, ndmin=2, dtype=bool) A.setflags(write=False) AR_U: npt.NDArray[np.str_] +AR_M: npt.NDArray[np.datetime64] +AR_f4: npt.NDArray[np.float32] a = np.bool(True) -np.take(a, None) # E: No overload variant -np.take(a, axis=1.0) # E: No overload variant -np.take(A, out=1) # E: No overload variant -np.take(A, mode="bob") # E: No overload variant +np.take(a, None) # type: ignore[call-overload] +np.take(a, axis=1.0) # type: ignore[call-overload] +np.take(A, out=1) # type: ignore[call-overload] +np.take(A, mode="bob") # type: ignore[call-overload] -np.reshape(a, None) # E: No overload variant -np.reshape(A, 1, order="bob") # E: No overload variant - -np.choose(a, None) # E: No overload variant -np.choose(a, out=1.0) # E: No overload variant -np.choose(A, mode="bob") # E: No overload variant - -np.repeat(a, None) # E: No overload variant -np.repeat(A, 1, axis=1.0) # E: No overload variant - -np.swapaxes(A, None, 1) # E: No overload variant -np.swapaxes(A, 1, [0]) # E: No overload variant - -np.transpose(A, axes=1.0) # E: No overload variant - -np.partition(a, None) # E: No overload variant -np.partition( # E: No overload variant - a, 0, axis="bob" -) -np.partition( # E: No overload variant - A, 0, kind="bob" -) -np.partition( - A, 0, order=range(5) # E: Argument "order" to "partition" has incompatible type -) - -np.argpartition( - a, None # E: incompatible type -) -np.argpartition( - a, 0, axis="bob" # E: incompatible type -) -np.argpartition( - A, 0, kind="bob" # E: incompatible type -) -np.argpartition( - A, 0, order=range(5) # E: Argument "order" to "argpartition" has incompatible type -) - -np.sort(A, axis="bob") # E: No overload variant -np.sort(A, kind="bob") # E: No overload variant -np.sort(A, order=range(5)) # E: Argument "order" to "sort" has incompatible type - -np.argsort(A, axis="bob") # E: Argument "axis" to "argsort" has incompatible type -np.argsort(A, kind="bob") # E: Argument "kind" to "argsort" has incompatible type -np.argsort(A, order=range(5)) # E: Argument "order" to "argsort" has incompatible type - -np.argmax(A, axis="bob") # E: No overload variant of "argmax" matches argument type -np.argmax(A, kind="bob") # E: No overload variant of "argmax" matches argument type - -np.argmin(A, axis="bob") # E: No overload variant of "argmin" matches argument type -np.argmin(A, kind="bob") # E: No overload variant of "argmin" matches argument type - -np.searchsorted( # E: No overload variant of "searchsorted" matches argument type - A[0], 0, side="bob" -) -np.searchsorted( # E: No overload variant of "searchsorted" matches argument type - A[0], 0, sorter=1.0 -) - -np.resize(A, 1.0) # E: No overload variant - -np.squeeze(A, 1.0) # E: No overload variant of "squeeze" matches argument type - -np.diagonal(A, offset=None) # E: No overload variant -np.diagonal(A, axis1="bob") # E: No overload variant -np.diagonal(A, axis2=[]) # E: No overload variant - -np.trace(A, offset=None) # E: No overload variant -np.trace(A, axis1="bob") # E: No overload variant -np.trace(A, axis2=[]) # E: No overload variant - -np.ravel(a, order="bob") # E: No overload variant - -np.compress( # E: No overload variant - [True], A, axis=1.0 -) - -np.clip(a, 1, 2, out=1) # E: No overload variant of "clip" matches argument type - -np.sum(a, axis=1.0) # E: No overload variant -np.sum(a, keepdims=1.0) # E: No overload variant -np.sum(a, initial=[1]) # E: No overload variant - -np.all(a, axis=1.0) # E: No overload variant -np.all(a, keepdims=1.0) # E: No overload variant -np.all(a, out=1.0) # E: No overload variant - -np.any(a, axis=1.0) # E: No overload variant -np.any(a, keepdims=1.0) # E: No overload variant -np.any(a, out=1.0) # E: No overload variant - -np.cumsum(a, axis=1.0) # E: No overload variant -np.cumsum(a, dtype=1.0) # E: No overload variant -np.cumsum(a, out=1.0) # E: No overload variant - -np.ptp(a, axis=1.0) # E: No overload variant -np.ptp(a, keepdims=1.0) # E: No overload variant -np.ptp(a, out=1.0) # E: No overload variant - -np.amax(a, axis=1.0) # E: No overload variant -np.amax(a, keepdims=1.0) # E: No overload variant -np.amax(a, out=1.0) # E: No overload variant -np.amax(a, initial=[1.0]) # E: No overload variant -np.amax(a, where=[1.0]) # E: incompatible type - -np.amin(a, axis=1.0) # E: No overload variant -np.amin(a, keepdims=1.0) # E: No overload variant -np.amin(a, out=1.0) # E: No overload variant -np.amin(a, initial=[1.0]) # E: No overload variant -np.amin(a, where=[1.0]) # E: incompatible type - -np.prod(a, axis=1.0) # E: No overload variant -np.prod(a, out=False) # E: No overload variant -np.prod(a, keepdims=1.0) # E: No overload variant -np.prod(a, initial=int) # E: No overload variant -np.prod(a, where=1.0) # E: No overload variant -np.prod(AR_U) # E: incompatible type - -np.cumprod(a, axis=1.0) # E: No overload variant -np.cumprod(a, out=False) # E: No overload variant -np.cumprod(AR_U) # E: incompatible type - -np.size(a, axis=1.0) # E: Argument "axis" to "size" has incompatible type - -np.around(a, decimals=1.0) # E: No overload variant -np.around(a, out=type) # E: No overload variant -np.around(AR_U) # E: incompatible type - -np.mean(a, axis=1.0) # E: No overload variant -np.mean(a, out=False) # E: No overload variant -np.mean(a, keepdims=1.0) # E: No overload variant -np.mean(AR_U) # E: incompatible type - -np.std(a, axis=1.0) # E: No overload variant -np.std(a, out=False) # E: No overload variant -np.std(a, ddof='test') # E: No overload variant -np.std(a, keepdims=1.0) # E: No overload variant -np.std(AR_U) # E: incompatible type - -np.var(a, axis=1.0) # E: No overload variant -np.var(a, out=False) # E: No overload variant -np.var(a, ddof='test') # E: No overload variant -np.var(a, keepdims=1.0) # E: No overload variant -np.var(AR_U) # E: incompatible type +np.reshape(a, None) # type: ignore[call-overload] +np.reshape(A, 1, order="bob") # type: ignore[call-overload] + +np.choose(a, None) # type: ignore[call-overload] +np.choose(a, out=1.0) # type: ignore[call-overload] +np.choose(A, mode="bob") # type: ignore[call-overload] + +np.repeat(a, None) # type: ignore[call-overload] +np.repeat(A, 1, axis=1.0) # type: ignore[call-overload] + +np.swapaxes(A, None, 1) # type: ignore[call-overload] +np.swapaxes(A, 1, [0]) # type: ignore[call-overload] + +np.transpose(A, axes=1.0) # type: ignore[call-overload] + +np.partition(a, None) # type: ignore[call-overload] +np.partition(a, 0, axis="bob") # type: ignore[call-overload] +np.partition(A, 0, kind="bob") # type: ignore[call-overload] +np.partition(A, 0, order=range(5)) # type: ignore[arg-type] + +np.argpartition(a, None) # type: ignore[call-overload] +np.argpartition(a, 0, axis="bob") # type: ignore[call-overload] +np.argpartition(A, 0, kind="bob") # type: ignore[call-overload] +np.argpartition(A, 0, order=range(5)) # type: ignore[arg-type] +np.argpartition(AR_f4, 0, order="a") # type: ignore[arg-type] + +np.sort(A, axis="bob") # type: ignore[call-overload] +np.sort(A, kind="bob") # type: ignore[call-overload] +np.sort(A, order=range(5)) # type: ignore[arg-type] + +np.argsort(A, axis="bob") # type: ignore[arg-type] +np.argsort(A, kind="bob") # type: ignore[arg-type] +np.argsort(A, order=range(5)) # type: ignore[arg-type] + +np.argmax(A, axis="bob") # type: ignore[call-overload] +np.argmax(A, kind="bob") # type: ignore[call-overload] +np.argmax(A, out=AR_f4) # type: ignore[type-var] + +np.argmin(A, axis="bob") # type: ignore[call-overload] +np.argmin(A, kind="bob") # type: ignore[call-overload] +np.argmin(A, out=AR_f4) # type: ignore[type-var] + +np.searchsorted(A[0], 0, side="bob") # type: ignore[call-overload] +np.searchsorted(A[0], 0, sorter=1.0) # type: ignore[call-overload] + +np.resize(A, 1.0) # type: ignore[call-overload] + +np.squeeze(A, 1.0) # type: ignore[call-overload] + +np.diagonal(A, offset=None) # type: ignore[call-overload] +np.diagonal(A, axis1="bob") # type: ignore[call-overload] +np.diagonal(A, axis2=[]) # type: ignore[call-overload] + +np.trace(A, offset=None) # type: ignore[call-overload] +np.trace(A, axis1="bob") # type: ignore[call-overload] +np.trace(A, axis2=[]) # type: ignore[call-overload] + +np.ravel(a, order="bob") # type: ignore[call-overload] + +np.nonzero(0) # type: ignore[arg-type] + +np.compress([True], A, axis=1.0) # type: ignore[call-overload] + +np.clip(a, 1, 2, out=1) # type: ignore[call-overload] + +np.sum(a, axis=1.0) # type: ignore[call-overload] +np.sum(a, keepdims=1.0) # type: ignore[call-overload] +np.sum(a, initial=[1]) # type: ignore[call-overload] + +np.all(a, axis=1.0) # type: ignore[call-overload] +np.all(a, keepdims=1.0) # type: ignore[call-overload] +np.all(a, out=1.0) # type: ignore[call-overload] + +np.any(a, axis=1.0) # type: ignore[call-overload] +np.any(a, keepdims=1.0) # type: ignore[call-overload] +np.any(a, out=1.0) # type: ignore[call-overload] + +np.cumsum(a, axis=1.0) # type: ignore[call-overload] +np.cumsum(a, dtype=1.0) # type: ignore[call-overload] +np.cumsum(a, out=1.0) # type: ignore[call-overload] + +np.ptp(a, axis=1.0) # type: ignore[call-overload] +np.ptp(a, keepdims=1.0) # type: ignore[call-overload] +np.ptp(a, out=1.0) # type: ignore[call-overload] + +np.amax(a, axis=1.0) # type: ignore[call-overload] +np.amax(a, keepdims=1.0) # type: ignore[call-overload] +np.amax(a, out=1.0) # type: ignore[call-overload] +np.amax(a, initial=[1.0]) # type: ignore[call-overload] +np.amax(a, where=[1.0]) # type: ignore[arg-type] + +np.amin(a, axis=1.0) # type: ignore[call-overload] +np.amin(a, keepdims=1.0) # type: ignore[call-overload] +np.amin(a, out=1.0) # type: ignore[call-overload] +np.amin(a, initial=[1.0]) # type: ignore[call-overload] +np.amin(a, where=[1.0]) # type: ignore[arg-type] + +np.prod(a, axis=1.0) # type: ignore[call-overload] +np.prod(a, out=False) # type: ignore[call-overload] +np.prod(a, keepdims=1.0) # type: ignore[call-overload] +np.prod(a, initial=int) # type: ignore[call-overload] +np.prod(a, where=1.0) # type: ignore[call-overload] +np.prod(AR_U) # type: ignore[arg-type] + +np.cumprod(a, axis=1.0) # type: ignore[call-overload] +np.cumprod(a, out=False) # type: ignore[call-overload] +np.cumprod(AR_U) # type: ignore[arg-type] + +np.size(a, axis=1.0) # type: ignore[arg-type] + +np.around(a, decimals=1.0) # type: ignore[call-overload] +np.around(a, out=type) # type: ignore[call-overload] +np.around(AR_U) # type: ignore[arg-type] + +np.mean(a, axis=1.0) # type: ignore[call-overload] +np.mean(a, out=False) # type: ignore[call-overload] +np.mean(a, keepdims=1.0) # type: ignore[call-overload] +np.mean(AR_U) # type: ignore[arg-type] +np.mean(AR_M) # type: ignore[arg-type] + +np.std(a, axis=1.0) # type: ignore[call-overload] +np.std(a, out=False) # type: ignore[call-overload] +np.std(a, ddof="test") # type: ignore[call-overload] +np.std(a, keepdims=1.0) # type: ignore[call-overload] +np.std(AR_U) # type: ignore[arg-type] + +np.var(a, axis=1.0) # type: ignore[call-overload] +np.var(a, out=False) # type: ignore[call-overload] +np.var(a, ddof="test") # type: ignore[call-overload] +np.var(a, keepdims=1.0) # type: ignore[call-overload] +np.var(AR_U) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/histograms.pyi b/numpy/typing/tests/data/fail/histograms.pyi index 22499d39175a..5f7892719eb4 100644 --- a/numpy/typing/tests/data/fail/histograms.pyi +++ b/numpy/typing/tests/data/fail/histograms.pyi @@ -4,9 +4,9 @@ import numpy.typing as npt AR_i8: npt.NDArray[np.int64] AR_f8: npt.NDArray[np.float64] -np.histogram_bin_edges(AR_i8, range=(0, 1, 2)) # E: incompatible type +np.histogram_bin_edges(AR_i8, range=(0, 1, 2)) # type: ignore[arg-type] -np.histogram(AR_i8, range=(0, 1, 2)) # E: incompatible type +np.histogram(AR_i8, range=(0, 1, 2)) # type: ignore[arg-type] -np.histogramdd(AR_i8, range=(0, 1)) # E: incompatible type -np.histogramdd(AR_i8, range=[(0, 1, 2)]) # E: incompatible type +np.histogramdd(AR_i8, range=(0, 1)) # type: ignore[arg-type] +np.histogramdd(AR_i8, range=[(0, 1, 2)]) # type: ignore[list-item] diff --git a/numpy/typing/tests/data/fail/index_tricks.pyi b/numpy/typing/tests/data/fail/index_tricks.pyi index 22f6f4a61e8e..8b7b1ae2b5bf 100644 --- a/numpy/typing/tests/data/fail/index_tricks.pyi +++ b/numpy/typing/tests/data/fail/index_tricks.pyi @@ -3,12 +3,12 @@ import numpy as np AR_LIKE_i: list[int] AR_LIKE_f: list[float] -np.ndindex([1, 2, 3]) # E: No overload variant -np.unravel_index(AR_LIKE_f, (1, 2, 3)) # E: incompatible type -np.ravel_multi_index(AR_LIKE_i, (1, 2, 3), mode="bob") # E: No overload variant -np.mgrid[1] # E: Invalid index type -np.mgrid[...] # E: Invalid index type -np.ogrid[1] # E: Invalid index type -np.ogrid[...] # E: Invalid index type -np.fill_diagonal(AR_LIKE_f, 2) # E: incompatible type -np.diag_indices(1.0) # E: incompatible type +np.ndindex([1, 2, 3]) # type: ignore[call-overload] +np.unravel_index(AR_LIKE_f, (1, 2, 3)) # type: ignore[arg-type] +np.ravel_multi_index(AR_LIKE_i, (1, 2, 3), mode="bob") # type: ignore[call-overload] +np.mgrid[1] # type: ignore[index] +np.mgrid[...] # type: ignore[index] +np.ogrid[1] # type: ignore[index] +np.ogrid[...] # type: ignore[index] +np.fill_diagonal(AR_LIKE_f, 2) # type: ignore[arg-type] +np.diag_indices(1.0) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/lib_function_base.pyi b/numpy/typing/tests/data/fail/lib_function_base.pyi index dccb3dbb0632..d7be993bf04c 100644 --- a/numpy/typing/tests/data/fail/lib_function_base.pyi +++ b/numpy/typing/tests/data/fail/lib_function_base.pyi @@ -8,44 +8,53 @@ AR_c16: npt.NDArray[np.complex128] AR_m: npt.NDArray[np.timedelta64] AR_M: npt.NDArray[np.datetime64] AR_O: npt.NDArray[np.object_] +AR_b_list: list[npt.NDArray[np.bool]] -def func(a: int) -> None: ... - -np.average(AR_m) # E: incompatible type -np.select(1, [AR_f8]) # E: incompatible type -np.angle(AR_m) # E: incompatible type -np.unwrap(AR_m) # E: incompatible type -np.unwrap(AR_c16) # E: incompatible type -np.trim_zeros(1) # E: incompatible type -np.place(1, [True], 1.5) # E: incompatible type -np.vectorize(1) # E: incompatible type -np.place(AR_f8, slice(None), 5) # E: incompatible type - -np.interp(AR_f8, AR_c16, AR_f8) # E: incompatible type -np.interp(AR_c16, AR_f8, AR_f8) # E: incompatible type -np.interp(AR_f8, AR_f8, AR_f8, period=AR_c16) # E: No overload variant -np.interp(AR_f8, AR_f8, AR_O) # E: incompatible type - -np.cov(AR_m) # E: incompatible type -np.cov(AR_O) # E: incompatible type -np.corrcoef(AR_m) # E: incompatible type -np.corrcoef(AR_O) # E: incompatible type -np.corrcoef(AR_f8, bias=True) # E: No overload variant -np.corrcoef(AR_f8, ddof=2) # E: No overload variant -np.blackman(1j) # E: incompatible type -np.bartlett(1j) # E: incompatible type -np.hanning(1j) # E: incompatible type -np.hamming(1j) # E: incompatible type -np.hamming(AR_c16) # E: incompatible type -np.kaiser(1j, 1) # E: incompatible type -np.sinc(AR_O) # E: incompatible type -np.median(AR_M) # E: incompatible type - -np.percentile(AR_f8, 50j) # E: No overload variant -np.percentile(AR_f8, 50, interpolation="bob") # E: No overload variant -np.quantile(AR_f8, 0.5j) # E: No overload variant -np.quantile(AR_f8, 0.5, interpolation="bob") # E: No overload variant -np.meshgrid(AR_f8, AR_f8, indexing="bob") # E: incompatible type -np.delete(AR_f8, AR_f8) # E: incompatible type -np.insert(AR_f8, AR_f8, 1.5) # E: incompatible type -np.digitize(AR_f8, 1j) # E: No overload variant +def fn_none_i(a: None, /) -> npt.NDArray[Any]: ... +def fn_ar_i(a: npt.NDArray[np.float64], posarg: int, /) -> npt.NDArray[Any]: ... + +np.average(AR_m) # type: ignore[type-var] +np.select(1, [AR_f8]) # type: ignore[call-overload] +np.angle(AR_m) # type: ignore[type-var] +np.unwrap(AR_m) # type: ignore[type-var] +np.unwrap(AR_c16) # type: ignore[type-var] +np.trim_zeros(1) # type: ignore[arg-type] +np.place(1, [True], 1.5) # type: ignore[arg-type] +np.vectorize(1) # type: ignore[arg-type] +np.place(AR_f8, slice(None), 5) # type: ignore[arg-type] + +np.piecewise(AR_f8, True, [fn_ar_i], "wrong") # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_none_i]) # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_ar_i]) # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 3.14) # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, None) # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, _=None) # type: ignore[list-item] + +np.interp(AR_f8, AR_c16, AR_f8) # type: ignore[arg-type] +np.interp(AR_c16, AR_f8, AR_f8) # type: ignore[arg-type] +np.interp(AR_f8, AR_f8, AR_f8, period=AR_c16) # type: ignore[call-overload] +np.interp(AR_f8, AR_f8, AR_O) # type: ignore[arg-type] + +np.cov(AR_m) # type: ignore[type-var] +np.cov(AR_O) # type: ignore[type-var] +np.corrcoef(AR_m) # type: ignore[type-var] +np.corrcoef(AR_O) # type: ignore[type-var] +np.corrcoef(AR_f8, bias=True) # type: ignore[call-overload] +np.corrcoef(AR_f8, ddof=2) # type: ignore[call-overload] +np.blackman(1j) # type: ignore[arg-type] +np.bartlett(1j) # type: ignore[arg-type] +np.hanning(1j) # type: ignore[arg-type] +np.hamming(1j) # type: ignore[arg-type] +np.hamming(AR_c16) # type: ignore[arg-type] +np.kaiser(1j, 1) # type: ignore[arg-type] +np.sinc(AR_O) # type: ignore[type-var] +np.median(AR_M) # type: ignore[type-var] + +np.percentile(AR_f8, 50j) # type: ignore[call-overload] +np.percentile(AR_f8, 50, interpolation="bob") # type: ignore[call-overload] +np.quantile(AR_f8, 0.5j) # type: ignore[call-overload] +np.quantile(AR_f8, 0.5, interpolation="bob") # type: ignore[call-overload] +np.meshgrid(AR_f8, AR_f8, indexing="bob") # type: ignore[call-overload] +np.delete(AR_f8, AR_f8) # type: ignore[arg-type] +np.insert(AR_f8, AR_f8, 1.5) # type: ignore[arg-type] +np.digitize(AR_f8, 1j) # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/lib_polynomial.pyi b/numpy/typing/tests/data/fail/lib_polynomial.pyi index e51b6b58e307..727eb7f4b2b1 100644 --- a/numpy/typing/tests/data/fail/lib_polynomial.pyi +++ b/numpy/typing/tests/data/fail/lib_polynomial.pyi @@ -8,22 +8,22 @@ AR_U: npt.NDArray[np.str_] poly_obj: np.poly1d -np.polymul(AR_f8, AR_U) # E: incompatible type -np.polydiv(AR_f8, AR_U) # E: incompatible type +np.polymul(AR_f8, AR_U) # type: ignore[arg-type] +np.polydiv(AR_f8, AR_U) # type: ignore[arg-type] -5**poly_obj # E: No overload variant +5**poly_obj # type: ignore[operator] -np.polyint(AR_U) # E: incompatible type -np.polyint(AR_f8, m=1j) # E: No overload variant +np.polyint(AR_U) # type: ignore[arg-type] +np.polyint(AR_f8, m=1j) # type: ignore[call-overload] -np.polyder(AR_U) # E: incompatible type -np.polyder(AR_f8, m=1j) # E: No overload variant +np.polyder(AR_U) # type: ignore[arg-type] +np.polyder(AR_f8, m=1j) # type: ignore[call-overload] -np.polyfit(AR_O, AR_f8, 1) # E: incompatible type -np.polyfit(AR_f8, AR_f8, 1, rcond=1j) # E: No overload variant -np.polyfit(AR_f8, AR_f8, 1, w=AR_c16) # E: incompatible type -np.polyfit(AR_f8, AR_f8, 1, cov="bob") # E: No overload variant +np.polyfit(AR_O, AR_f8, 1) # type: ignore[arg-type] +np.polyfit(AR_f8, AR_f8, 1, rcond=1j) # type: ignore[call-overload] +np.polyfit(AR_f8, AR_f8, 1, w=AR_c16) # type: ignore[arg-type] +np.polyfit(AR_f8, AR_f8, 1, cov="bob") # type: ignore[call-overload] -np.polyval(AR_f8, AR_U) # E: incompatible type -np.polyadd(AR_f8, AR_U) # E: incompatible type -np.polysub(AR_f8, AR_U) # E: incompatible type +np.polyval(AR_f8, AR_U) # type: ignore[arg-type] +np.polyadd(AR_f8, AR_U) # type: ignore[arg-type] +np.polysub(AR_f8, AR_U) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/lib_utils.pyi b/numpy/typing/tests/data/fail/lib_utils.pyi index 8b8482eeff6d..25af32b43297 100644 --- a/numpy/typing/tests/data/fail/lib_utils.pyi +++ b/numpy/typing/tests/data/fail/lib_utils.pyi @@ -1,3 +1,3 @@ import numpy.lib.array_utils as array_utils -array_utils.byte_bounds(1) # E: incompatible type +array_utils.byte_bounds(1) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/lib_version.pyi b/numpy/typing/tests/data/fail/lib_version.pyi index 2758cfe40438..62011a848cc1 100644 --- a/numpy/typing/tests/data/fail/lib_version.pyi +++ b/numpy/typing/tests/data/fail/lib_version.pyi @@ -2,5 +2,5 @@ from numpy.lib import NumpyVersion version: NumpyVersion -NumpyVersion(b"1.8.0") # E: incompatible type -version >= b"1.8.0" # E: Unsupported operand types +NumpyVersion(b"1.8.0") # type: ignore[arg-type] +version >= b"1.8.0" # type: ignore[operator] diff --git a/numpy/typing/tests/data/fail/linalg.pyi b/numpy/typing/tests/data/fail/linalg.pyi index da9390328bd7..c59238eab878 100644 --- a/numpy/typing/tests/data/fail/linalg.pyi +++ b/numpy/typing/tests/data/fail/linalg.pyi @@ -5,44 +5,44 @@ AR_f8: npt.NDArray[np.float64] AR_O: npt.NDArray[np.object_] AR_M: npt.NDArray[np.datetime64] -np.linalg.tensorsolve(AR_O, AR_O) # E: incompatible type +np.linalg.tensorsolve(AR_O, AR_O) # type: ignore[arg-type] -np.linalg.solve(AR_O, AR_O) # E: incompatible type +np.linalg.solve(AR_O, AR_O) # type: ignore[arg-type] -np.linalg.tensorinv(AR_O) # E: incompatible type +np.linalg.tensorinv(AR_O) # type: ignore[type-var] +np.linalg.inv(AR_O) # type: ignore[type-var] +np.linalg.pinv(AR_O) # type: ignore[type-var] +np.linalg.cholesky(AR_O) # type: ignore[type-var] -np.linalg.inv(AR_O) # E: incompatible type +np.linalg.matrix_power(AR_M, 5) # type: ignore[arg-type] -np.linalg.matrix_power(AR_M, 5) # E: incompatible type +np.linalg.eig(AR_O) # type: ignore[arg-type] -np.linalg.cholesky(AR_O) # E: incompatible type +np.linalg.eigh(AR_O) # type: ignore[arg-type] +np.linalg.eigh(AR_O, UPLO="bob") # type: ignore[call-overload] -np.linalg.qr(AR_O) # E: incompatible type -np.linalg.qr(AR_f8, mode="bob") # E: No overload variant +np.linalg.qr(AR_O) # type: ignore[type-var] +np.linalg.qr(AR_f8, mode="bob") # type: ignore[call-overload] -np.linalg.eigvals(AR_O) # E: incompatible type +np.linalg.svd(AR_O) # type: ignore[arg-type] -np.linalg.eigvalsh(AR_O) # E: incompatible type -np.linalg.eigvalsh(AR_O, UPLO="bob") # E: No overload variant +np.linalg.eigvals(AR_O) # type: ignore[arg-type] -np.linalg.eig(AR_O) # E: incompatible type +np.linalg.eigvalsh(AR_O) # type: ignore[arg-type] +np.linalg.eigvalsh(AR_O, UPLO="bob") # type: ignore[call-overload] -np.linalg.eigh(AR_O) # E: incompatible type -np.linalg.eigh(AR_O, UPLO="bob") # E: No overload variant +np.linalg.svdvals(AR_O) # type: ignore[arg-type] +np.linalg.svdvals(AR_M) # type: ignore[arg-type] +np.linalg.svdvals(x=AR_f8) # type: ignore[call-overload] -np.linalg.svd(AR_O) # E: incompatible type +np.linalg.matrix_rank(AR_O) # type: ignore[arg-type] -np.linalg.cond(AR_O) # E: incompatible type -np.linalg.cond(AR_f8, p="bob") # E: incompatible type +np.linalg.cond(AR_O) # type: ignore[arg-type] +np.linalg.cond(AR_f8, p="bob") # type: ignore[call-overload] -np.linalg.matrix_rank(AR_O) # E: incompatible type +np.linalg.slogdet(AR_O) # type: ignore[arg-type] +np.linalg.det(AR_O) # type: ignore[arg-type] -np.linalg.pinv(AR_O) # E: incompatible type +np.linalg.norm(AR_f8, ord="bob") # type: ignore[call-overload] -np.linalg.slogdet(AR_O) # E: incompatible type - -np.linalg.det(AR_O) # E: incompatible type - -np.linalg.norm(AR_f8, ord="bob") # E: No overload variant - -np.linalg.multi_dot([AR_M]) # E: incompatible type +np.linalg.multi_dot([AR_M]) # type: ignore[type-var] diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi new file mode 100644 index 000000000000..59698264e7fd --- /dev/null +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -0,0 +1,152 @@ +import numpy as np +import numpy.typing as npt +from numpy._typing import _AnyShape + +type _MArray[ScalarT: np.generic] = np.ma.MaskedArray[_AnyShape, np.dtype[ScalarT]] + +MAR_1d_f8: np.ma.MaskedArray[tuple[int], np.dtype[np.float64]] +MAR_b: _MArray[np.bool] +MAR_c: _MArray[np.complex128] +MAR_td64: _MArray[np.timedelta64] + +AR_b: npt.NDArray[np.bool] + +MAR_1d_f8.shape = (3, 1) # type: ignore[assignment] +MAR_1d_f8.dtype = np.bool # type: ignore[assignment] + +def invalid_recordmask_setter() -> None: + # We make an inner function for this one to avoid the + # `NoReturn` causing an early exit for type checkers. + MAR_1d_f8.recordmask = [True] # type: ignore[assignment] + +np.ma.min(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.min(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.min(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.min(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.min(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.min(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.min(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.min(fill_value=lambda x: 27) # type: ignore[call-overload] + +np.ma.max(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.max(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.max(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.max(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.max(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.max(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.max(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.max(fill_value=lambda x: 27) # type: ignore[call-overload] + +np.ma.ptp(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.ptp(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.ptp(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.ptp(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.ptp(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.ptp(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.ptp(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.ptp(fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.argmin(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmin(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmin(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmin(fill_value=lambda x: 27) # type: ignore[call-overload] + +np.ma.argmin(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.argmin(MAR_1d_f8, axis=(1,)) # type: ignore[call-overload] +np.ma.argmin(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.argmin(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.argmin(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.argmax(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmax(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmax(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmax(fill_value=lambda x: 27) # type: ignore[call-overload] + +np.ma.argmax(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.argmax(MAR_1d_f8, axis=(0,)) # type: ignore[call-overload] +np.ma.argmax(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.argmax(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.argmax(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.all(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.all(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.all(out=1.0) # type: ignore[call-overload] + +MAR_1d_f8.any(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.any(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.any(out=1.0) # type: ignore[call-overload] + +MAR_1d_f8.sort(axis=(0, 1)) # type: ignore[arg-type] +MAR_1d_f8.sort(axis=None) # type: ignore[arg-type] +MAR_1d_f8.sort(kind="cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(order=lambda: "cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(endwith="cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(fill_value=lambda: "cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(stable="cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(stable=True) # type: ignore[arg-type] + +MAR_1d_f8.take(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.take(out=1) # type: ignore[call-overload] +MAR_1d_f8.take(mode="bob") # type: ignore[call-overload] + +np.ma.take(None) # type: ignore[call-overload] +np.ma.take(axis=1.0) # type: ignore[call-overload] +np.ma.take(out=1) # type: ignore[call-overload] +np.ma.take(mode="bob") # type: ignore[call-overload] + +MAR_1d_f8.partition(["cabbage"]) # type: ignore[arg-type] +MAR_1d_f8.partition(axis=(0, 1)) # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(kind="cabbage") # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(order=lambda: "cabbage") # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(AR_b) # type: ignore[arg-type] + +MAR_1d_f8.argpartition(["cabbage"]) # type: ignore[arg-type] +MAR_1d_f8.argpartition(axis=(0, 1)) # type: ignore[call-overload] +MAR_1d_f8.argpartition(kind="cabbage") # type: ignore[call-overload] +MAR_1d_f8.argpartition(order=lambda: "cabbage") # type: ignore[call-overload] +MAR_1d_f8.argpartition(AR_b) # type: ignore[arg-type] + +np.ma.ndim(lambda: "lambda") # type: ignore[arg-type] + +np.ma.size(AR_b, axis="0") # type: ignore[arg-type] + +MAR_1d_f8 >= (lambda x: "mango") # type: ignore[operator] +MAR_1d_f8 > (lambda x: "mango") # type: ignore[operator] +MAR_1d_f8 <= (lambda x: "mango") # type: ignore[operator] +MAR_1d_f8 < (lambda x: "mango") # type: ignore[operator] + +MAR_1d_f8.count(axis=0.) # type: ignore[call-overload] + +np.ma.count(MAR_1d_f8, axis=0.) # type: ignore[call-overload] + +MAR_1d_f8.put(4, 999, mode="flip") # type: ignore[arg-type] + +np.ma.put(MAR_1d_f8, 4, 999, mode="flip") # type: ignore[arg-type] + +np.ma.put([1, 1, 3], 0, 999) # type: ignore[arg-type] + +np.ma.compressed(lambda: "compress me") # type: ignore[call-overload] + +np.ma.allequal(MAR_1d_f8, [1, 2, 3], fill_value=1.5) # type: ignore[arg-type] + +np.ma.allclose(MAR_1d_f8, [1, 2, 3], masked_equal=4.5) # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1, 2, 3], rtol=".4") # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1, 2, 3], atol=".5") # type: ignore[arg-type] + +MAR_1d_f8.__setmask__("mask") # type: ignore[arg-type] + +MAR_b *= 2 # type: ignore[arg-type] +MAR_c //= 2 # type: ignore[misc] +MAR_td64 **= 2 # type: ignore[misc] + +MAR_1d_f8.swapaxes(axis1=1, axis2=0) # type: ignore[call-arg] + +MAR_1d_f8.argsort(axis=(1, 0)) # type: ignore[arg-type] + +np.ma.MaskedArray(np.array([1, 2, 3]), keep_mask="yes") # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), subok=None) # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), ndim=None) # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), order="Corinthian") # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/memmap.pyi b/numpy/typing/tests/data/fail/memmap.pyi index 434870b60e41..3a4fc7df0689 100644 --- a/numpy/typing/tests/data/fail/memmap.pyi +++ b/numpy/typing/tests/data/fail/memmap.pyi @@ -1,5 +1,5 @@ import numpy as np with open("file.txt", "r") as f: - np.memmap(f) # E: No overload variant -np.memmap("test.txt", shape=[10, 5]) # E: No overload variant + np.memmap(f) # type: ignore[call-overload] +np.memmap("test.txt", shape=[10, 5]) # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/modules.pyi b/numpy/typing/tests/data/fail/modules.pyi index c86627e0c8ea..c12a182807d3 100644 --- a/numpy/typing/tests/data/fail/modules.pyi +++ b/numpy/typing/tests/data/fail/modules.pyi @@ -1,18 +1,17 @@ import numpy as np -np.testing.bob # E: Module has no attribute -np.bob # E: Module has no attribute +np.testing.bob # type: ignore[attr-defined] +np.bob # type: ignore[attr-defined] # Stdlib modules in the namespace by accident -np.warnings # E: Module has no attribute -np.sys # E: Module has no attribute -np.os # E: Module "numpy" does not explicitly export -np.math # E: Module has no attribute +np.warnings # type: ignore[attr-defined] +np.sys # type: ignore[attr-defined] +np.os # type: ignore[attr-defined] +np.math # type: ignore[attr-defined] # Public sub-modules that are not imported to their parent module by default; # e.g. one must first execute `import numpy.lib.recfunctions` -np.lib.recfunctions # E: Module has no attribute +np.lib.recfunctions # type: ignore[attr-defined] -np.__NUMPY_SETUP__ # E: Module has no attribute -np.__deprecated_attrs__ # E: Module has no attribute -np.__expired_functions__ # E: Module has no attribute +np.__deprecated_attrs__ # type: ignore[attr-defined] +np.__expired_functions__ # type: ignore[attr-defined] diff --git a/numpy/typing/tests/data/fail/multiarray.pyi b/numpy/typing/tests/data/fail/multiarray.pyi index 0ee6c11c6dff..51128dfbf6f7 100644 --- a/numpy/typing/tests/data/fail/multiarray.pyi +++ b/numpy/typing/tests/data/fail/multiarray.pyi @@ -15,39 +15,38 @@ AR_LIKE_f: list[float] def func(a: int) -> None: ... -np.where(AR_b, 1) # E: No overload variant +np.where(AR_b, 1) # type: ignore[call-overload] -np.can_cast(AR_f8, 1) # E: incompatible type +np.can_cast(AR_f8, 1) # type: ignore[arg-type] -np.vdot(AR_M, AR_M) # E: incompatible type +np.vdot(AR_M, AR_M) # type: ignore[arg-type] -np.copyto(AR_LIKE_f, AR_f8) # E: incompatible type +np.copyto(AR_LIKE_f, AR_f8) # type: ignore[arg-type] -np.putmask(AR_LIKE_f, [True, True, False], 1.5) # E: incompatible type +np.putmask(AR_LIKE_f, [True, True, False], 1.5) # type: ignore[arg-type] -np.packbits(AR_f8) # E: incompatible type -np.packbits(AR_u1, bitorder=">") # E: incompatible type +np.packbits(AR_f8) # type: ignore[arg-type] +np.packbits(AR_u1, bitorder=">") # type: ignore[call-overload] -np.unpackbits(AR_i8) # E: incompatible type -np.unpackbits(AR_u1, bitorder=">") # E: incompatible type +np.unpackbits(AR_i8) # type: ignore[arg-type] +np.unpackbits(AR_u1, bitorder=">") # type: ignore[call-overload] -np.shares_memory(1, 1, max_work=i8) # E: incompatible type -np.may_share_memory(1, 1, max_work=i8) # E: incompatible type +np.shares_memory(1, 1, max_work=i8) # type: ignore[arg-type] +np.may_share_memory(1, 1, max_work=i8) # type: ignore[arg-type] -np.arange(M) # E: No overload variant -np.arange(stop=10) # E: No overload variant +np.arange(stop=10) # type: ignore[call-overload] -np.datetime_data(int) # E: incompatible type +np.datetime_data(int) # type: ignore[arg-type] -np.busday_offset("2012", 10) # E: No overload variant +np.busday_offset("2012", 10) # type: ignore[call-overload] -np.datetime_as_string("2012") # E: No overload variant +np.datetime_as_string("2012") # type: ignore[call-overload] -np.char.compare_chararrays("a", b"a", "==", False) # E: No overload variant +np.char.compare_chararrays("a", b"a", "==", False) # type: ignore[call-overload] -np.nested_iters([AR_i8, AR_i8]) # E: Missing positional argument -np.nested_iters([AR_i8, AR_i8], 0) # E: incompatible type -np.nested_iters([AR_i8, AR_i8], [0]) # E: incompatible type -np.nested_iters([AR_i8, AR_i8], [[0], [1]], flags=["test"]) # E: incompatible type -np.nested_iters([AR_i8, AR_i8], [[0], [1]], op_flags=[["test"]]) # E: incompatible type -np.nested_iters([AR_i8, AR_i8], [[0], [1]], buffersize=1.0) # E: incompatible type +np.nested_iters([AR_i8, AR_i8]) # type: ignore[call-arg] +np.nested_iters([AR_i8, AR_i8], 0) # type: ignore[arg-type] +np.nested_iters([AR_i8, AR_i8], [0]) # type: ignore[list-item] +np.nested_iters([AR_i8, AR_i8], [[0], [1]], flags=["test"]) # type: ignore[list-item] +np.nested_iters([AR_i8, AR_i8], [[0], [1]], op_flags=[["test"]]) # type: ignore[list-item] +np.nested_iters([AR_i8, AR_i8], [[0], [1]], buffersize=1.0) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/ndarray.pyi b/numpy/typing/tests/data/fail/ndarray.pyi index 5ecae02e6178..2aeec0883e3f 100644 --- a/numpy/typing/tests/data/fail/ndarray.pyi +++ b/numpy/typing/tests/data/fail/ndarray.pyi @@ -8,4 +8,4 @@ import numpy as np # # for more context. float_array = np.array([1.0]) -float_array.dtype = np.bool # E: Property "dtype" defined in "ndarray" is read-only +float_array.dtype = np.bool # type: ignore[assignment, misc] diff --git a/numpy/typing/tests/data/fail/ndarray_misc.pyi b/numpy/typing/tests/data/fail/ndarray_misc.pyi index 674b378829a0..29418930061c 100644 --- a/numpy/typing/tests/data/fail/ndarray_misc.pyi +++ b/numpy/typing/tests/data/fail/ndarray_misc.pyi @@ -5,6 +5,7 @@ More extensive tests are performed for the methods' function-based counterpart in `../from_numeric.py`. """ +from typing import Never import numpy as np import numpy.typing as npt @@ -16,28 +17,33 @@ AR_b: npt.NDArray[np.bool] ctypes_obj = AR_f8.ctypes -reveal_type(ctypes_obj.get_data()) # E: has no attribute -reveal_type(ctypes_obj.get_shape()) # E: has no attribute -reveal_type(ctypes_obj.get_strides()) # E: has no attribute -reveal_type(ctypes_obj.get_as_parameter()) # E: has no attribute - -f8.argpartition(0) # E: has no attribute -f8.diagonal() # E: has no attribute -f8.dot(1) # E: has no attribute -f8.nonzero() # E: has no attribute -f8.partition(0) # E: has no attribute -f8.put(0, 2) # E: has no attribute -f8.setfield(2, np.float64) # E: has no attribute -f8.sort() # E: has no attribute -f8.trace() # E: has no attribute - -AR_M.__int__() # E: Invalid self argument -AR_M.__float__() # E: Invalid self argument -AR_M.__complex__() # E: Invalid self argument -AR_b.__index__() # E: Invalid self argument - -AR_f8[1.5] # E: No overload variant -AR_f8["field_a"] # E: No overload variant -AR_f8[["field_a", "field_b"]] # E: Invalid index type - -AR_f8.__array_finalize__(object()) # E: incompatible type +f8.argpartition(0) # type: ignore[attr-defined] +f8.partition(0) # type: ignore[attr-defined] +f8.dot(1) # type: ignore[attr-defined] + +# NOTE: The following functions retur `Never`, causing mypy to stop analysis at that +# point, which we circumvent by wrapping them in a function. + +def f8_diagonal(x: np.float64) -> Never: + return x.diagonal() # type: ignore[misc] + +def f8_nonzero(x: np.float64) -> Never: + return x.nonzero() # type: ignore[misc] + +def f8_setfield(x: np.float64) -> Never: + return x.setfield(2, np.float64) # type: ignore[misc] + +def f8_sort(x: np.float64) -> Never: + return x.sort() # type: ignore[misc] + +def f8_trace(x: np.float64) -> Never: + return x.trace() # type: ignore[misc] + +AR_M.__complex__() # type: ignore[misc] +AR_b.__index__() # type: ignore[misc] + +AR_f8[1.5] # type: ignore[call-overload] +AR_f8["field_a"] # type: ignore[call-overload] +AR_f8[["field_a", "field_b"]] # type: ignore[index] + +AR_f8.__array_finalize__(object()) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/nditer.pyi b/numpy/typing/tests/data/fail/nditer.pyi index 1e8e37ee5fe0..fae728da454e 100644 --- a/numpy/typing/tests/data/fail/nditer.pyi +++ b/numpy/typing/tests/data/fail/nditer.pyi @@ -1,8 +1,8 @@ import numpy as np -class Test(np.nditer): ... # E: Cannot inherit from final class +class Test(np.nditer): ... # type: ignore[misc] -np.nditer([0, 1], flags=["test"]) # E: incompatible type -np.nditer([0, 1], op_flags=[["test"]]) # E: incompatible type -np.nditer([0, 1], itershape=(1.0,)) # E: incompatible type -np.nditer([0, 1], buffersize=1.0) # E: incompatible type +np.nditer([0, 1], flags=["test"]) # type: ignore[list-item] +np.nditer([0, 1], op_flags=[["test"]]) # type: ignore[list-item] +np.nditer([0, 1], itershape=(1.0,)) # type: ignore[arg-type] +np.nditer([0, 1], buffersize=1.0) # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/nested_sequence.pyi b/numpy/typing/tests/data/fail/nested_sequence.pyi index 6301e51769fe..1004a36accc7 100644 --- a/numpy/typing/tests/data/fail/nested_sequence.pyi +++ b/numpy/typing/tests/data/fail/nested_sequence.pyi @@ -1,4 +1,5 @@ from collections.abc import Sequence + from numpy._typing import _NestedSequence a: Sequence[float] @@ -7,11 +8,10 @@ c: tuple[str, ...] d: int e: str -def func(a: _NestedSequence[int]) -> None: - ... +def func(a: _NestedSequence[int]) -> None: ... -reveal_type(func(a)) # E: incompatible type -reveal_type(func(b)) # E: incompatible type -reveal_type(func(c)) # E: incompatible type -reveal_type(func(d)) # E: incompatible type -reveal_type(func(e)) # E: incompatible type +reveal_type(func(a)) # type: ignore[arg-type, misc] +reveal_type(func(b)) # type: ignore[arg-type, misc] +reveal_type(func(c)) # type: ignore[arg-type, misc] +reveal_type(func(d)) # type: ignore[arg-type, misc] +reveal_type(func(e)) # type: ignore[arg-type, misc] diff --git a/numpy/typing/tests/data/fail/npyio.pyi b/numpy/typing/tests/data/fail/npyio.pyi index 95b6c426697c..e20be3a2a247 100644 --- a/numpy/typing/tests/data/fail/npyio.pyi +++ b/numpy/typing/tests/data/fail/npyio.pyi @@ -1,8 +1,8 @@ import pathlib from typing import IO -import numpy.typing as npt import numpy as np +import numpy.typing as npt str_path: str bytes_path: bytes @@ -10,14 +10,15 @@ pathlib_path: pathlib.Path str_file: IO[str] AR_i8: npt.NDArray[np.int64] -np.load(str_file) # E: incompatible type +np.load(str_file) # type: ignore[arg-type] -np.save(bytes_path, AR_i8) # E: incompatible type +np.save(bytes_path, AR_i8) # type: ignore[arg-type] +np.save(str_path, AR_i8, fix_imports=True) # type: ignore[call-arg] -np.savez(bytes_path, AR_i8) # E: incompatible type +np.savez(bytes_path, AR_i8) # type: ignore[arg-type] -np.savez_compressed(bytes_path, AR_i8) # E: incompatible type +np.savez_compressed(bytes_path, AR_i8) # type: ignore[arg-type] -np.loadtxt(bytes_path) # E: incompatible type +np.loadtxt(bytes_path) # type: ignore[arg-type] -np.fromregex(bytes_path, ".", np.int64) # E: No overload variant +np.fromregex(bytes_path, ".", np.int64) # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/numerictypes.pyi b/numpy/typing/tests/data/fail/numerictypes.pyi index 29a3cf30dd95..a1fd47a6f479 100644 --- a/numpy/typing/tests/data/fail/numerictypes.pyi +++ b/numpy/typing/tests/data/fail/numerictypes.pyi @@ -1,5 +1,5 @@ import numpy as np -np.isdtype(1, np.int64) # E: incompatible type +np.isdtype(1, np.int64) # type: ignore[arg-type] -np.issubdtype(1, np.int64) # E: incompatible type +np.issubdtype(1, np.int64) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/random.pyi b/numpy/typing/tests/data/fail/random.pyi index aa1eae4424e2..1abf4b77653c 100644 --- a/numpy/typing/tests/data/fail/random.pyi +++ b/numpy/typing/tests/data/fail/random.pyi @@ -8,55 +8,55 @@ SEED_SEED_SEQ: np.random.SeedSequence = np.random.SeedSequence(0) SEED_STR: str = "String seeding not allowed" # default rng -np.random.default_rng(SEED_FLOAT) # E: incompatible type -np.random.default_rng(SEED_ARR_FLOAT) # E: incompatible type -np.random.default_rng(SEED_ARRLIKE_FLOAT) # E: incompatible type -np.random.default_rng(SEED_STR) # E: incompatible type +np.random.default_rng(SEED_FLOAT) # type: ignore[arg-type] +np.random.default_rng(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.default_rng(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.default_rng(SEED_STR) # type: ignore[arg-type] # Seed Sequence -np.random.SeedSequence(SEED_FLOAT) # E: incompatible type -np.random.SeedSequence(SEED_ARR_FLOAT) # E: incompatible type -np.random.SeedSequence(SEED_ARRLIKE_FLOAT) # E: incompatible type -np.random.SeedSequence(SEED_SEED_SEQ) # E: incompatible type -np.random.SeedSequence(SEED_STR) # E: incompatible type +np.random.SeedSequence(SEED_FLOAT) # type: ignore[arg-type] +np.random.SeedSequence(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.SeedSequence(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.SeedSequence(SEED_SEED_SEQ) # type: ignore[arg-type] +np.random.SeedSequence(SEED_STR) # type: ignore[arg-type] seed_seq: np.random.bit_generator.SeedSequence = np.random.SeedSequence() -seed_seq.spawn(11.5) # E: incompatible type -seed_seq.generate_state(3.14) # E: incompatible type -seed_seq.generate_state(3, np.uint8) # E: incompatible type -seed_seq.generate_state(3, "uint8") # E: incompatible type -seed_seq.generate_state(3, "u1") # E: incompatible type -seed_seq.generate_state(3, np.uint16) # E: incompatible type -seed_seq.generate_state(3, "uint16") # E: incompatible type -seed_seq.generate_state(3, "u2") # E: incompatible type -seed_seq.generate_state(3, np.int32) # E: incompatible type -seed_seq.generate_state(3, "int32") # E: incompatible type -seed_seq.generate_state(3, "i4") # E: incompatible type +seed_seq.spawn(11.5) # type: ignore[arg-type] +seed_seq.generate_state(3.14) # type: ignore[arg-type] +seed_seq.generate_state(3, np.uint8) # type: ignore[arg-type] +seed_seq.generate_state(3, "uint8") # type: ignore[arg-type] +seed_seq.generate_state(3, "u1") # type: ignore[arg-type] +seed_seq.generate_state(3, np.uint16) # type: ignore[arg-type] +seed_seq.generate_state(3, "uint16") # type: ignore[arg-type] +seed_seq.generate_state(3, "u2") # type: ignore[arg-type] +seed_seq.generate_state(3, np.int32) # type: ignore[arg-type] +seed_seq.generate_state(3, "int32") # type: ignore[arg-type] +seed_seq.generate_state(3, "i4") # type: ignore[arg-type] # Bit Generators -np.random.MT19937(SEED_FLOAT) # E: incompatible type -np.random.MT19937(SEED_ARR_FLOAT) # E: incompatible type -np.random.MT19937(SEED_ARRLIKE_FLOAT) # E: incompatible type -np.random.MT19937(SEED_STR) # E: incompatible type - -np.random.PCG64(SEED_FLOAT) # E: incompatible type -np.random.PCG64(SEED_ARR_FLOAT) # E: incompatible type -np.random.PCG64(SEED_ARRLIKE_FLOAT) # E: incompatible type -np.random.PCG64(SEED_STR) # E: incompatible type - -np.random.Philox(SEED_FLOAT) # E: incompatible type -np.random.Philox(SEED_ARR_FLOAT) # E: incompatible type -np.random.Philox(SEED_ARRLIKE_FLOAT) # E: incompatible type -np.random.Philox(SEED_STR) # E: incompatible type - -np.random.SFC64(SEED_FLOAT) # E: incompatible type -np.random.SFC64(SEED_ARR_FLOAT) # E: incompatible type -np.random.SFC64(SEED_ARRLIKE_FLOAT) # E: incompatible type -np.random.SFC64(SEED_STR) # E: incompatible type +np.random.MT19937(SEED_FLOAT) # type: ignore[arg-type] +np.random.MT19937(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.MT19937(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.MT19937(SEED_STR) # type: ignore[arg-type] + +np.random.PCG64(SEED_FLOAT) # type: ignore[arg-type] +np.random.PCG64(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.PCG64(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.PCG64(SEED_STR) # type: ignore[arg-type] + +np.random.Philox(SEED_FLOAT) # type: ignore[arg-type] +np.random.Philox(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.Philox(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.Philox(SEED_STR) # type: ignore[arg-type] + +np.random.SFC64(SEED_FLOAT) # type: ignore[arg-type] +np.random.SFC64(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.SFC64(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.SFC64(SEED_STR) # type: ignore[arg-type] # Generator -np.random.Generator(None) # E: incompatible type -np.random.Generator(12333283902830213) # E: incompatible type -np.random.Generator("OxFEEDF00D") # E: incompatible type -np.random.Generator([123, 234]) # E: incompatible type -np.random.Generator(np.array([123, 234], dtype="u4")) # E: incompatible type +np.random.Generator(None) # type: ignore[arg-type] +np.random.Generator(12333283902830213) # type: ignore[arg-type] +np.random.Generator("OxFEEDF00D") # type: ignore[arg-type] +np.random.Generator([123, 234]) # type: ignore[arg-type] +np.random.Generator(np.array([123, 234], dtype="u4")) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/rec.pyi b/numpy/typing/tests/data/fail/rec.pyi index a57f1ba27d74..c9d43dd2ff1f 100644 --- a/numpy/typing/tests/data/fail/rec.pyi +++ b/numpy/typing/tests/data/fail/rec.pyi @@ -3,15 +3,15 @@ import numpy.typing as npt AR_i8: npt.NDArray[np.int64] -np.rec.fromarrays(1) # E: No overload variant -np.rec.fromarrays([1, 2, 3], dtype=[("f8", "f8")], formats=["f8", "f8"]) # E: No overload variant +np.rec.fromarrays(1) # type: ignore[call-overload] +np.rec.fromarrays([1, 2, 3], dtype=[("f8", "f8")], formats=["f8", "f8"]) # type: ignore[call-overload] -np.rec.fromrecords(AR_i8) # E: incompatible type -np.rec.fromrecords([(1.5,)], dtype=[("f8", "f8")], formats=["f8", "f8"]) # E: No overload variant +np.rec.fromrecords(AR_i8) # type: ignore[arg-type] +np.rec.fromrecords([(1.5,)], dtype=[("f8", "f8")], formats=["f8", "f8"]) # type: ignore[call-overload] -np.rec.fromstring("string", dtype=[("f8", "f8")]) # E: No overload variant -np.rec.fromstring(b"bytes") # E: No overload variant -np.rec.fromstring(b"(1.5,)", dtype=[("f8", "f8")], formats=["f8", "f8"]) # E: No overload variant +np.rec.fromstring("string", dtype=[("f8", "f8")]) # type: ignore[call-overload] +np.rec.fromstring(b"bytes") # type: ignore[call-overload] +np.rec.fromstring(b"(1.5,)", dtype=[("f8", "f8")], formats=["f8", "f8"]) # type: ignore[call-overload] with open("test", "r") as f: - np.rec.fromfile(f, dtype=[("f8", "f8")]) # E: No overload variant + np.rec.fromfile(f, dtype=[("f8", "f8")]) # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/scalars.pyi b/numpy/typing/tests/data/fail/scalars.pyi index e65e111c3a65..018a88e652ae 100644 --- a/numpy/typing/tests/data/fail/scalars.pyi +++ b/numpy/typing/tests/data/fail/scalars.pyi @@ -1,4 +1,3 @@ -import sys import numpy as np f2: np.float16 @@ -7,7 +6,7 @@ c8: np.complex64 # Construction -np.float32(3j) # E: incompatible type +np.float32(3j) # type: ignore[arg-type] # Technically the following examples are valid NumPy code. But they # are not considered a best practice, and people who wish to use the @@ -25,68 +24,63 @@ np.float32(3j) # E: incompatible type # https://github.com/numpy/numpy-stubs/issues/41 # # for more context. -np.float32([1.0, 0.0, 0.0]) # E: incompatible type -np.complex64([]) # E: incompatible type +np.float32([1.0, 0.0, 0.0]) # type: ignore[arg-type] +np.complex64([]) # type: ignore[call-overload] -np.complex64(1, 2) # E: Too many arguments # TODO: protocols (can't check for non-existent protocols w/ __getattr__) -np.datetime64(0) # E: No overload variant +np.datetime64(0) # type: ignore[call-overload] class A: - def __float__(self): - return 1.0 - - -np.int8(A()) # E: incompatible type -np.int16(A()) # E: incompatible type -np.int32(A()) # E: incompatible type -np.int64(A()) # E: incompatible type -np.uint8(A()) # E: incompatible type -np.uint16(A()) # E: incompatible type -np.uint32(A()) # E: incompatible type -np.uint64(A()) # E: incompatible type - -np.void("test") # E: No overload variant -np.void("test", dtype=None) # E: No overload variant - -np.generic(1) # E: Cannot instantiate abstract class -np.number(1) # E: Cannot instantiate abstract class -np.integer(1) # E: Cannot instantiate abstract class -np.inexact(1) # E: Cannot instantiate abstract class -np.character("test") # E: Cannot instantiate abstract class -np.flexible(b"test") # E: Cannot instantiate abstract class - -np.float64(value=0.0) # E: Unexpected keyword argument -np.int64(value=0) # E: Unexpected keyword argument -np.uint64(value=0) # E: Unexpected keyword argument -np.complex128(value=0.0j) # E: Unexpected keyword argument -np.str_(value='bob') # E: No overload variant -np.bytes_(value=b'test') # E: No overload variant -np.void(value=b'test') # E: No overload variant -np.bool(value=True) # E: Unexpected keyword argument -np.datetime64(value="2019") # E: No overload variant -np.timedelta64(value=0) # E: Unexpected keyword argument - -np.bytes_(b"hello", encoding='utf-8') # E: No overload variant -np.str_("hello", encoding='utf-8') # E: No overload variant - -f8.item(1) # E: incompatible type -f8.item((0, 1)) # E: incompatible type -f8.squeeze(axis=1) # E: incompatible type -f8.squeeze(axis=(0, 1)) # E: incompatible type -f8.transpose(1) # E: incompatible type + def __float__(self) -> float: ... + +np.int8(A()) # type: ignore[arg-type] +np.int16(A()) # type: ignore[arg-type] +np.int32(A()) # type: ignore[arg-type] +np.int64(A()) # type: ignore[arg-type] +np.uint8(A()) # type: ignore[arg-type] +np.uint16(A()) # type: ignore[arg-type] +np.uint32(A()) # type: ignore[arg-type] +np.uint64(A()) # type: ignore[arg-type] + +np.void("test") # type: ignore[call-overload] +np.void("test", dtype=None) # type: ignore[call-overload] + +np.generic(1) # type: ignore[abstract] +np.number(1) # type: ignore[abstract] +np.integer(1) # type: ignore[abstract] +np.inexact(1) # type: ignore[abstract] +np.character("test") # type: ignore[abstract] +np.flexible(b"test") # type: ignore[abstract] + +np.float64(value=0.0) # type: ignore[call-arg] +np.int64(value=0) # type: ignore[call-arg] +np.uint64(value=0) # type: ignore[call-arg] +np.complex128(value=0.0j) # type: ignore[call-overload] +np.str_(value="bob") # type: ignore[call-overload] +np.bytes_(value=b"test") # type: ignore[call-overload] +np.void(value=b"test") # type: ignore[call-overload] +np.bool(value=True) # type: ignore[call-overload] +np.datetime64(value="2019") # type: ignore[call-overload] +np.timedelta64(value=0) # type: ignore[call-overload] + +np.bytes_(b"hello", encoding="utf-8") # type: ignore[call-overload] +np.str_("hello", encoding="utf-8") # type: ignore[call-overload] + +f8.item(1) # type: ignore[call-overload] +f8.item((0, 1)) # type: ignore[arg-type] +f8.squeeze(axis=1) # type: ignore[arg-type] +f8.squeeze(axis=(0, 1)) # type: ignore[arg-type] +f8.transpose(1) # type: ignore[arg-type] def func(a: np.float32) -> None: ... -func(f2) # E: incompatible type -func(f8) # E: incompatible type +func(f2) # type: ignore[arg-type] +func(f8) # type: ignore[arg-type] -round(c8) # E: No overload variant - -c8.__getnewargs__() # E: Invalid self argument -f2.__getnewargs__() # E: Invalid self argument -f2.hex() # E: Invalid self argument -np.float16.fromhex("0x0.0p+0") # E: Invalid self argument -f2.__trunc__() # E: Invalid self argument -f2.__getformat__("float") # E: Invalid self argument +c8.__getnewargs__() # type: ignore[attr-defined] +f2.__getnewargs__() # type: ignore[attr-defined] +f2.hex() # type: ignore[attr-defined] +np.float16.fromhex("0x0.0p+0") # type: ignore[attr-defined] +f2.__trunc__() # type: ignore[attr-defined] +f2.__getformat__("float") # type: ignore[attr-defined] diff --git a/numpy/typing/tests/data/fail/shape.pyi b/numpy/typing/tests/data/fail/shape.pyi new file mode 100644 index 000000000000..a024d7bda273 --- /dev/null +++ b/numpy/typing/tests/data/fail/shape.pyi @@ -0,0 +1,7 @@ +from typing import Any + +import numpy as np + +# test bounds of _ShapeT_co + +np.ndarray[tuple[str, str], Any] # type: ignore[type-var] diff --git a/numpy/typing/tests/data/fail/shape_base.pyi b/numpy/typing/tests/data/fail/shape_base.pyi index e709741b7935..652b24ba311e 100644 --- a/numpy/typing/tests/data/fail/shape_base.pyi +++ b/numpy/typing/tests/data/fail/shape_base.pyi @@ -5,4 +5,4 @@ class DTypeLike: dtype_like: DTypeLike -np.expand_dims(dtype_like, (5, 10)) # E: No overload variant +np.expand_dims(dtype_like, (5, 10)) # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/stride_tricks.pyi b/numpy/typing/tests/data/fail/stride_tricks.pyi index f2bfba7432a8..7f9a26b96924 100644 --- a/numpy/typing/tests/data/fail/stride_tricks.pyi +++ b/numpy/typing/tests/data/fail/stride_tricks.pyi @@ -3,7 +3,7 @@ import numpy.typing as npt AR_f8: npt.NDArray[np.float64] -np.lib.stride_tricks.as_strided(AR_f8, shape=8) # E: No overload variant -np.lib.stride_tricks.as_strided(AR_f8, strides=8) # E: No overload variant +np.lib.stride_tricks.as_strided(AR_f8, shape=8) # type: ignore[call-overload] +np.lib.stride_tricks.as_strided(AR_f8, strides=8) # type: ignore[call-overload] -np.lib.stride_tricks.sliding_window_view(AR_f8, axis=(1,)) # E: No overload variant +np.lib.stride_tricks.sliding_window_view(AR_f8, axis=(1,)) # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/strings.pyi b/numpy/typing/tests/data/fail/strings.pyi index 66fcf6b23f5d..328a521ae679 100644 --- a/numpy/typing/tests/data/fail/strings.pyi +++ b/numpy/typing/tests/data/fail/strings.pyi @@ -4,66 +4,49 @@ import numpy.typing as npt AR_U: npt.NDArray[np.str_] AR_S: npt.NDArray[np.bytes_] -np.strings.equal(AR_U, AR_S) # E: incompatible type - -np.strings.not_equal(AR_U, AR_S) # E: incompatible type - -np.strings.greater_equal(AR_U, AR_S) # E: incompatible type - -np.strings.less_equal(AR_U, AR_S) # E: incompatible type - -np.strings.greater(AR_U, AR_S) # E: incompatible type - -np.strings.less(AR_U, AR_S) # E: incompatible type - -np.strings.encode(AR_S) # E: incompatible type -np.strings.decode(AR_U) # E: incompatible type - -np.strings.join(AR_U, b"_") # E: incompatible type -np.strings.join(AR_S, "_") # E: incompatible type - -np.strings.ljust(AR_U, 5, fillchar=b"a") # E: incompatible type -np.strings.ljust(AR_S, 5, fillchar="a") # E: incompatible type -np.strings.rjust(AR_U, 5, fillchar=b"a") # E: incompatible type -np.strings.rjust(AR_S, 5, fillchar="a") # E: incompatible type - -np.strings.lstrip(AR_U, b"a") # E: incompatible type -np.strings.lstrip(AR_S, "a") # E: incompatible type -np.strings.strip(AR_U, b"a") # E: incompatible type -np.strings.strip(AR_S, "a") # E: incompatible type -np.strings.rstrip(AR_U, b"a") # E: incompatible type -np.strings.rstrip(AR_S, "a") # E: incompatible type - -np.strings.partition(AR_U, b"a") # E: incompatible type -np.strings.partition(AR_S, "a") # E: incompatible type -np.strings.rpartition(AR_U, b"a") # E: incompatible type -np.strings.rpartition(AR_S, "a") # E: incompatible type - -np.strings.split(AR_U, b"_") # E: incompatible type -np.strings.split(AR_S, "_") # E: incompatible type -np.strings.rsplit(AR_U, b"_") # E: incompatible type -np.strings.rsplit(AR_S, "_") # E: incompatible type - -np.strings.count(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # E: incompatible type -np.strings.count(AR_S, "a", 0, 9) # E: incompatible type - -np.strings.endswith(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # E: incompatible type -np.strings.endswith(AR_S, "a", 0, 9) # E: incompatible type -np.strings.startswith(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # E: incompatible type -np.strings.startswith(AR_S, "a", 0, 9) # E: incompatible type - -np.strings.find(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # E: incompatible type -np.strings.find(AR_S, "a", 0, 9) # E: incompatible type -np.strings.rfind(AR_U, b"a", [1, 2, 3], [1, 2 , 3]) # E: incompatible type -np.strings.rfind(AR_S, "a", 0, 9) # E: incompatible type - -np.strings.index(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type -np.strings.index(AR_S, "a", end=9) # E: incompatible type -np.strings.rindex(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type -np.strings.rindex(AR_S, "a", end=9) # E: incompatible type - -np.strings.isdecimal(AR_S) # E: incompatible type -np.strings.isnumeric(AR_S) # E: incompatible type - -np.strings.replace(AR_U, b"_", b"-", 10) # E: incompatible type -np.strings.replace(AR_S, "_", "-", 1) # E: incompatible type +np.strings.equal(AR_U, AR_S) # type: ignore[arg-type] +np.strings.not_equal(AR_U, AR_S) # type: ignore[arg-type] + +np.strings.greater_equal(AR_U, AR_S) # type: ignore[arg-type] +np.strings.less_equal(AR_U, AR_S) # type: ignore[arg-type] +np.strings.greater(AR_U, AR_S) # type: ignore[arg-type] +np.strings.less(AR_U, AR_S) # type: ignore[arg-type] + +np.strings.encode(AR_S) # type: ignore[arg-type] +np.strings.decode(AR_U) # type: ignore[arg-type] + +np.strings.lstrip(AR_U, b"a") # type: ignore[arg-type] +np.strings.lstrip(AR_S, "a") # type: ignore[arg-type] +np.strings.strip(AR_U, b"a") # type: ignore[arg-type] +np.strings.strip(AR_S, "a") # type: ignore[arg-type] +np.strings.rstrip(AR_U, b"a") # type: ignore[arg-type] +np.strings.rstrip(AR_S, "a") # type: ignore[arg-type] + +np.strings.partition(AR_U, b"a") # type: ignore[arg-type] +np.strings.partition(AR_S, "a") # type: ignore[arg-type] +np.strings.rpartition(AR_U, b"a") # type: ignore[arg-type] +np.strings.rpartition(AR_S, "a") # type: ignore[arg-type] + +np.strings.count(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.strings.count(AR_S, "a", 0, 9) # type: ignore[arg-type] + +np.strings.endswith(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.strings.endswith(AR_S, "a", 0, 9) # type: ignore[arg-type] +np.strings.startswith(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.strings.startswith(AR_S, "a", 0, 9) # type: ignore[arg-type] + +np.strings.find(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.strings.find(AR_S, "a", 0, 9) # type: ignore[arg-type] +np.strings.rfind(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.strings.rfind(AR_S, "a", 0, 9) # type: ignore[arg-type] + +np.strings.index(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.strings.index(AR_S, "a", end=9) # type: ignore[arg-type] +np.strings.rindex(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.strings.rindex(AR_S, "a", end=9) # type: ignore[arg-type] + +np.strings.isdecimal(AR_S) # type: ignore[arg-type] +np.strings.isnumeric(AR_S) # type: ignore[arg-type] + +np.strings.replace(AR_U, b"_", b"-", 10) # type: ignore[arg-type] +np.strings.replace(AR_S, "_", "-", 1) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/testing.pyi b/numpy/typing/tests/data/fail/testing.pyi index 803870e2fead..517062c4c952 100644 --- a/numpy/typing/tests/data/fail/testing.pyi +++ b/numpy/typing/tests/data/fail/testing.pyi @@ -3,26 +3,26 @@ import numpy.typing as npt AR_U: npt.NDArray[np.str_] -def func() -> bool: ... +def func(x: object) -> bool: ... -np.testing.assert_(True, msg=1) # E: incompatible type -np.testing.build_err_msg(1, "test") # E: incompatible type -np.testing.assert_almost_equal(AR_U, AR_U) # E: incompatible type -np.testing.assert_approx_equal([1, 2, 3], [1, 2, 3]) # E: incompatible type -np.testing.assert_array_almost_equal(AR_U, AR_U) # E: incompatible type -np.testing.assert_array_less(AR_U, AR_U) # E: incompatible type -np.testing.assert_string_equal(b"a", b"a") # E: incompatible type +np.testing.assert_(True, msg=1) # type: ignore[arg-type] +np.testing.build_err_msg(1, "test") # type: ignore[arg-type] +np.testing.assert_almost_equal(AR_U, AR_U) # type: ignore[arg-type] +np.testing.assert_approx_equal([1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.testing.assert_array_almost_equal(AR_U, AR_U) # type: ignore[arg-type] +np.testing.assert_array_less(AR_U, AR_U) # type: ignore[arg-type] +np.testing.assert_string_equal(b"a", b"a") # type: ignore[arg-type] -np.testing.assert_raises(expected_exception=TypeError, callable=func) # E: No overload variant -np.testing.assert_raises_regex(expected_exception=TypeError, expected_regex="T", callable=func) # E: No overload variant +np.testing.assert_raises(expected_exception=TypeError, callable=func) # type: ignore[call-overload] +np.testing.assert_raises_regex(expected_exception=TypeError, expected_regex="T", callable=func) # type: ignore[call-overload] -np.testing.assert_allclose(AR_U, AR_U) # E: incompatible type -np.testing.assert_array_almost_equal_nulp(AR_U, AR_U) # E: incompatible type -np.testing.assert_array_max_ulp(AR_U, AR_U) # E: incompatible type +np.testing.assert_allclose(AR_U, AR_U) # type: ignore[arg-type] +np.testing.assert_array_almost_equal_nulp(AR_U, AR_U) # type: ignore[arg-type] +np.testing.assert_array_max_ulp(AR_U, AR_U) # type: ignore[arg-type] -np.testing.assert_warns(warning_class=RuntimeWarning, func=func) # E: No overload variant -np.testing.assert_no_warnings(func=func) # E: No overload variant -np.testing.assert_no_warnings(func, None) # E: Too many arguments -np.testing.assert_no_warnings(func, test=None) # E: Unexpected keyword argument +np.testing.assert_warns(RuntimeWarning, func) # type: ignore[call-overload] +np.testing.assert_no_warnings(func=func) # type: ignore[call-overload] +np.testing.assert_no_warnings(func) # type: ignore[call-overload] +np.testing.assert_no_warnings(func, y=None) # type: ignore[call-overload] -np.testing.assert_no_gc_cycles(func=func) # E: No overload variant +np.testing.assert_no_gc_cycles(func=func) # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/twodim_base.pyi b/numpy/typing/tests/data/fail/twodim_base.pyi index 76186285669b..473419cecef1 100644 --- a/numpy/typing/tests/data/fail/twodim_base.pyi +++ b/numpy/typing/tests/data/fail/twodim_base.pyi @@ -1,37 +1,39 @@ -from typing import Any, TypeVar +from typing import type_check_only import numpy as np import numpy.typing as npt +_0d_bool: np.bool +_nd_bool: npt.NDArray[np.bool] +_nd_td64: npt.NDArray[np.timedelta64] +_to_2d_bool: list[list[bool]] -def func1(ar: npt.NDArray[Any], a: int) -> npt.NDArray[np.str_]: - pass +@type_check_only +def func1(ar: np.ndarray, a: int) -> npt.NDArray[np.str_]: ... +@type_check_only +def func2(ar: np.ndarray, a: float) -> float: ... +### -def func2(ar: npt.NDArray[Any], a: float) -> float: - pass +np.eye(10, M=20.0) # type: ignore[call-overload] +np.eye(10, k=2.5, dtype=int) # type: ignore[call-overload] +np.diag(_nd_bool, k=0.5) # type: ignore[call-overload] +np.diagflat(_nd_bool, k=0.5) # type: ignore[call-overload] -AR_b: npt.NDArray[np.bool] -AR_m: npt.NDArray[np.timedelta64] +np.tri(10, M=20.0) # type: ignore[call-overload] +np.tri(10, k=2.5, dtype=int) # type: ignore[call-overload] -AR_LIKE_b: list[bool] +np.tril(_nd_bool, k=0.5) # type: ignore[call-overload] +np.triu(_nd_bool, k=0.5) # type: ignore[call-overload] -np.eye(10, M=20.0) # E: No overload variant -np.eye(10, k=2.5, dtype=int) # E: No overload variant +np.vander(_nd_td64) # type: ignore[type-var] -np.diag(AR_b, k=0.5) # E: No overload variant -np.diagflat(AR_b, k=0.5) # E: No overload variant +np.histogram2d(_nd_td64) # type: ignore[call-overload] -np.tri(10, M=20.0) # E: No overload variant -np.tri(10, k=2.5, dtype=int) # E: No overload variant +np.mask_indices(10, func1) # type: ignore[arg-type] +np.mask_indices(10, func2, 10.5) # type: ignore[arg-type] -np.tril(AR_b, k=0.5) # E: No overload variant -np.triu(AR_b, k=0.5) # E: No overload variant +np.tril_indices(3.14) # type: ignore[arg-type] -np.vander(AR_m) # E: incompatible type - -np.histogram2d(AR_m) # E: No overload variant - -np.mask_indices(10, func1) # E: incompatible type -np.mask_indices(10, func2, 10.5) # E: incompatible type +np.tril_indices_from(_to_2d_bool) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/type_check.pyi b/numpy/typing/tests/data/fail/type_check.pyi index 95f52bfbd260..8b68e996304c 100644 --- a/numpy/typing/tests/data/fail/type_check.pyi +++ b/numpy/typing/tests/data/fail/type_check.pyi @@ -1,13 +1,12 @@ import numpy as np -import numpy.typing as npt DTYPE_i8: np.dtype[np.int64] -np.mintypecode(DTYPE_i8) # E: incompatible type -np.iscomplexobj(DTYPE_i8) # E: incompatible type -np.isrealobj(DTYPE_i8) # E: incompatible type +np.mintypecode(DTYPE_i8) # type: ignore[arg-type] +np.iscomplexobj(DTYPE_i8) # type: ignore[arg-type] +np.isrealobj(DTYPE_i8) # type: ignore[arg-type] -np.typename(DTYPE_i8) # E: No overload variant -np.typename("invalid") # E: No overload variant +np.typename(DTYPE_i8) # type: ignore[call-overload] +np.typename("invalid") # type: ignore[call-overload] -np.common_type(np.timedelta64()) # E: incompatible type +np.common_type(np.timedelta64()) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/ufunc_config.pyi b/numpy/typing/tests/data/fail/ufunc_config.pyi index b080804b0fcf..c67b6a3acf98 100644 --- a/numpy/typing/tests/data/fail/ufunc_config.pyi +++ b/numpy/typing/tests/data/fail/ufunc_config.pyi @@ -14,8 +14,8 @@ class Write2: class Write3: def write(self, *, a: str) -> None: ... -np.seterrcall(func1) # E: Argument 1 to "seterrcall" has incompatible type -np.seterrcall(func2) # E: Argument 1 to "seterrcall" has incompatible type -np.seterrcall(Write1()) # E: Argument 1 to "seterrcall" has incompatible type -np.seterrcall(Write2()) # E: Argument 1 to "seterrcall" has incompatible type -np.seterrcall(Write3()) # E: Argument 1 to "seterrcall" has incompatible type +np.seterrcall(func1) # type: ignore[arg-type] +np.seterrcall(func2) # type: ignore[arg-type] +np.seterrcall(Write1()) # type: ignore[arg-type] +np.seterrcall(Write2()) # type: ignore[arg-type] +np.seterrcall(Write3()) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/ufunclike.pyi b/numpy/typing/tests/data/fail/ufunclike.pyi index be5e6a1530c2..e556e409ebbc 100644 --- a/numpy/typing/tests/data/fail/ufunclike.pyi +++ b/numpy/typing/tests/data/fail/ufunclike.pyi @@ -6,16 +6,16 @@ AR_m: npt.NDArray[np.timedelta64] AR_M: npt.NDArray[np.datetime64] AR_O: npt.NDArray[np.object_] -np.fix(AR_c) # E: incompatible type -np.fix(AR_m) # E: incompatible type -np.fix(AR_M) # E: incompatible type +np.fix(AR_c) # type: ignore[arg-type] +np.fix(AR_m) # type: ignore[arg-type] +np.fix(AR_M) # type: ignore[arg-type] -np.isposinf(AR_c) # E: incompatible type -np.isposinf(AR_m) # E: incompatible type -np.isposinf(AR_M) # E: incompatible type -np.isposinf(AR_O) # E: incompatible type +np.isposinf(AR_c) # type: ignore[arg-type] +np.isposinf(AR_m) # type: ignore[arg-type] +np.isposinf(AR_M) # type: ignore[arg-type] +np.isposinf(AR_O) # type: ignore[arg-type] -np.isneginf(AR_c) # E: incompatible type -np.isneginf(AR_m) # E: incompatible type -np.isneginf(AR_M) # E: incompatible type -np.isneginf(AR_O) # E: incompatible type +np.isneginf(AR_c) # type: ignore[arg-type] +np.isneginf(AR_m) # type: ignore[arg-type] +np.isneginf(AR_M) # type: ignore[arg-type] +np.isneginf(AR_O) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/ufuncs.pyi b/numpy/typing/tests/data/fail/ufuncs.pyi index e827267c6072..1b1628d7da44 100644 --- a/numpy/typing/tests/data/fail/ufuncs.pyi +++ b/numpy/typing/tests/data/fail/ufuncs.pyi @@ -3,39 +3,15 @@ import numpy.typing as npt AR_f8: npt.NDArray[np.float64] -np.sin.nin + "foo" # E: Unsupported operand types -np.sin(1, foo="bar") # E: No overload variant +np.sin.nin + "foo" # type: ignore[operator] +np.sin(1, foo="bar") # type: ignore[call-overload] -np.abs(None) # E: No overload variant +np.abs(None) # type: ignore[call-overload] -np.add(1, 1, 1) # E: No overload variant -np.add(1, 1, axis=0) # E: No overload variant +np.add(1, 1, 1) # type: ignore[call-overload] +np.add(1, 1, axis=0) # type: ignore[call-overload] -np.matmul(AR_f8, AR_f8, where=True) # E: No overload variant +np.matmul(AR_f8, AR_f8, where=True) # type: ignore[call-overload] -np.frexp(AR_f8, out=None) # E: No overload variant -np.frexp(AR_f8, out=AR_f8) # E: No overload variant - -np.absolute.outer() # E: "None" not callable -np.frexp.outer() # E: "None" not callable -np.divmod.outer() # E: "None" not callable -np.matmul.outer() # E: "None" not callable - -np.absolute.reduceat() # E: "None" not callable -np.frexp.reduceat() # E: "None" not callable -np.divmod.reduceat() # E: "None" not callable -np.matmul.reduceat() # E: "None" not callable - -np.absolute.reduce() # E: "None" not callable -np.frexp.reduce() # E: "None" not callable -np.divmod.reduce() # E: "None" not callable -np.matmul.reduce() # E: "None" not callable - -np.absolute.accumulate() # E: "None" not callable -np.frexp.accumulate() # E: "None" not callable -np.divmod.accumulate() # E: "None" not callable -np.matmul.accumulate() # E: "None" not callable - -np.frexp.at() # E: "None" not callable -np.divmod.at() # E: "None" not callable -np.matmul.at() # E: "None" not callable +np.frexp(AR_f8, out=None) # type: ignore[call-overload] +np.frexp(AR_f8, out=AR_f8) # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/warnings_and_errors.pyi b/numpy/typing/tests/data/fail/warnings_and_errors.pyi index fae96d6bf016..8ba34f6dfa3e 100644 --- a/numpy/typing/tests/data/fail/warnings_and_errors.pyi +++ b/numpy/typing/tests/data/fail/warnings_and_errors.pyi @@ -1,5 +1,5 @@ import numpy.exceptions as ex -ex.AxisError(1.0) # E: No overload variant -ex.AxisError(1, ndim=2.0) # E: No overload variant -ex.AxisError(2, msg_prefix=404) # E: No overload variant +ex.AxisError(1.0) # type: ignore[call-overload] +ex.AxisError(1, ndim=2.0) # type: ignore[call-overload] +ex.AxisError(2, msg_prefix=404) # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/misc/extended_precision.pyi b/numpy/typing/tests/data/misc/extended_precision.pyi index 78d8d93c6560..7978faf4d5bd 100644 --- a/numpy/typing/tests/data/misc/extended_precision.pyi +++ b/numpy/typing/tests/data/misc/extended_precision.pyi @@ -1,25 +1,9 @@ -import sys +from typing import assert_type import numpy as np -from numpy._typing import _80Bit, _96Bit, _128Bit, _256Bit +from numpy._typing import _96Bit, _128Bit -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - -assert_type(np.uint128(), np.unsignedinteger[_128Bit]) -assert_type(np.uint256(), np.unsignedinteger[_256Bit]) - -assert_type(np.int128(), np.signedinteger[_128Bit]) -assert_type(np.int256(), np.signedinteger[_256Bit]) - -assert_type(np.float80(), np.floating[_80Bit]) assert_type(np.float96(), np.floating[_96Bit]) assert_type(np.float128(), np.floating[_128Bit]) -assert_type(np.float256(), np.floating[_256Bit]) - -assert_type(np.complex160(), np.complexfloating[_80Bit, _80Bit]) assert_type(np.complex192(), np.complexfloating[_96Bit, _96Bit]) assert_type(np.complex256(), np.complexfloating[_128Bit, _128Bit]) -assert_type(np.complex512(), np.complexfloating[_256Bit, _256Bit]) diff --git a/numpy/typing/tests/data/mypy.ini b/numpy/typing/tests/data/mypy.ini index 7553012050c7..4aa465ae087b 100644 --- a/numpy/typing/tests/data/mypy.ini +++ b/numpy/typing/tests/data/mypy.ini @@ -1,7 +1,8 @@ [mypy] -plugins = numpy.typing.mypy_plugin +strict = True +enable_error_code = deprecated, ignore-without-code, truthy-bool +disallow_any_unimported = True +allow_redefinition_new = True +local_partial_types = True show_absolute_path = True -implicit_reexport = False pretty = True -disallow_any_unimported = True -disallow_any_generics = True diff --git a/numpy/typing/tests/data/pass/arithmetic.py b/numpy/typing/tests/data/pass/arithmetic.py index 496586821582..e347ec096e21 100644 --- a/numpy/typing/tests/data/pass/arithmetic.py +++ b/numpy/typing/tests/data/pass/arithmetic.py @@ -1,9 +1,12 @@ from __future__ import annotations -from typing import Any, Optional -import numpy as np +from typing import Any, cast + import pytest +import numpy as np +import numpy.typing as npt + c16 = np.complex128(1) f8 = np.float64(1) i8 = np.int64(1) @@ -22,12 +25,12 @@ b = bool(1) c = complex(1) f = float(1) -i = int(1) +i = 1 class Object: - def __array__(self, dtype: Optional[np.typing.DTypeLike] = None, - copy: Optional[bool] = None) -> np.ndarray[Any, np.dtype[np.object_]]: + def __array__(self, dtype: np.typing.DTypeLike | None = None, + copy: bool | None = None) -> np.ndarray[Any, np.dtype[np.object_]]: ret = np.empty((), dtype=object) ret[()] = self return ret @@ -57,14 +60,15 @@ def __rpow__(self, value: Any) -> Object: return self -AR_b: np.ndarray[Any, np.dtype[np.bool]] = np.array([True]) -AR_u: np.ndarray[Any, np.dtype[np.uint32]] = np.array([1], dtype=np.uint32) -AR_i: np.ndarray[Any, np.dtype[np.int64]] = np.array([1]) -AR_f: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.0]) -AR_c: np.ndarray[Any, np.dtype[np.complex128]] = np.array([1j]) -AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] = np.array([np.timedelta64(1, "D")]) -AR_M: np.ndarray[Any, np.dtype[np.datetime64]] = np.array([np.datetime64(1, "D")]) -AR_O: np.ndarray[Any, np.dtype[np.object_]] = np.array([Object()]) +AR_b: npt.NDArray[np.bool] = np.array([True]) +AR_u: npt.NDArray[np.uint32] = np.array([1], dtype=np.uint32) +AR_i: npt.NDArray[np.int64] = np.array([1]) +AR_integer: npt.NDArray[np.integer] = cast(npt.NDArray[np.integer], AR_i) +AR_f: npt.NDArray[np.float64] = np.array([1.0]) +AR_c: npt.NDArray[np.complex128] = np.array([1j]) +AR_m: npt.NDArray[np.timedelta64] = np.array([np.timedelta64(1, "D")]) +AR_M: npt.NDArray[np.datetime64] = np.array([np.datetime64(1, "D")]) +AR_O: npt.NDArray[np.object_] = np.array([Object()]) AR_LIKE_b = [True] AR_LIKE_u = [np.uint32(1)] @@ -251,6 +255,13 @@ def __rpow__(self, value: Any) -> Object: AR_LIKE_m // AR_m +AR_m /= f +AR_m //= f +AR_m /= AR_f +AR_m /= AR_LIKE_f +AR_m //= AR_f +AR_m //= AR_LIKE_f + AR_O // AR_LIKE_b AR_O // AR_LIKE_u AR_O // AR_LIKE_i @@ -274,6 +285,10 @@ def __rpow__(self, value: Any) -> Object: AR_i *= AR_LIKE_u AR_i *= AR_LIKE_i +AR_integer *= AR_LIKE_b +AR_integer *= AR_LIKE_u +AR_integer *= AR_LIKE_i + AR_f *= AR_LIKE_b AR_f *= AR_LIKE_u AR_f *= AR_LIKE_i @@ -306,6 +321,10 @@ def __rpow__(self, value: Any) -> Object: AR_i **= AR_LIKE_u AR_i **= AR_LIKE_i +AR_integer **= AR_LIKE_b +AR_integer **= AR_LIKE_u +AR_integer **= AR_LIKE_i + AR_f **= AR_LIKE_b AR_f **= AR_LIKE_u AR_f **= AR_LIKE_i diff --git a/numpy/typing/tests/data/pass/array_constructors.py b/numpy/typing/tests/data/pass/array_constructors.py index 17b6fab93ad8..27cbffa06a5c 100644 --- a/numpy/typing/tests/data/pass/array_constructors.py +++ b/numpy/typing/tests/data/pass/array_constructors.py @@ -1,14 +1,14 @@ from typing import Any import numpy as np -import numpy.typing as npt + class Index: def __index__(self) -> int: return 0 -class SubClass(npt.NDArray[np.float64]): +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.float64]]): pass diff --git a/numpy/typing/tests/data/pass/array_like.py b/numpy/typing/tests/data/pass/array_like.py index 822e6a1d4bed..f1e09b03a4ec 100644 --- a/numpy/typing/tests/data/pass/array_like.py +++ b/numpy/typing/tests/data/pass/array_like.py @@ -1,9 +1,5 @@ -from __future__ import annotations - -from typing import Any - import numpy as np -from numpy._typing import NDArray, ArrayLike, _SupportsArray +from numpy._typing import ArrayLike, NDArray, _SupportsArray x1: ArrayLike = True x2: ArrayLike = 5 @@ -20,9 +16,7 @@ class A: - def __array__( - self, dtype: None | np.dtype[Any] = None - ) -> NDArray[np.float64]: + def __array__(self, dtype: np.dtype | None = None) -> NDArray[np.float64]: return np.array([1.0, 2.0, 3.0]) diff --git a/numpy/typing/tests/data/pass/arrayterator.py b/numpy/typing/tests/data/pass/arrayterator.py index 572be5e2fe29..a99c09a25231 100644 --- a/numpy/typing/tests/data/pass/arrayterator.py +++ b/numpy/typing/tests/data/pass/arrayterator.py @@ -2,6 +2,7 @@ from __future__ import annotations from typing import Any + import numpy as np AR_i8: np.ndarray[Any, np.dtype[np.int_]] = np.arange(10) diff --git a/numpy/typing/tests/data/pass/bitwise_ops.py b/numpy/typing/tests/data/pass/bitwise_ops.py index 22a245d21809..2d4815b0d940 100644 --- a/numpy/typing/tests/data/pass/bitwise_ops.py +++ b/numpy/typing/tests/data/pass/bitwise_ops.py @@ -9,7 +9,7 @@ b_ = np.bool(1) b = bool(1) -i = int(1) +i = 1 AR = np.array([0, 1, 2], dtype=np.int32) AR.setflags(write=False) diff --git a/numpy/typing/tests/data/pass/comparisons.py b/numpy/typing/tests/data/pass/comparisons.py index 0babc321b32d..b2e52762c7a8 100644 --- a/numpy/typing/tests/data/pass/comparisons.py +++ b/numpy/typing/tests/data/pass/comparisons.py @@ -1,6 +1,7 @@ from __future__ import annotations -from typing import Any +from typing import Any, cast + import numpy as np c16 = np.complex128() @@ -18,10 +19,10 @@ b_ = np.bool() -b = bool() +b = False c = complex() -f = float() -i = int() +f = 0.0 +i = 0 SEQ = (0, 1, 2, 3, 4) @@ -30,6 +31,9 @@ AR_i: np.ndarray[Any, np.dtype[np.int_]] = np.array([1]) AR_f: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.0]) AR_c: np.ndarray[Any, np.dtype[np.complex128]] = np.array([1.0j]) +AR_S: np.ndarray[Any, np.dtype[np.bytes_]] = np.array([b"a"], "S") +AR_T = cast(np.ndarray[Any, np.dtypes.StringDType], np.array(["a"], "T")) +AR_U: np.ndarray[Any, np.dtype[np.str_]] = np.array(["a"], "U") AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] = np.array([np.timedelta64("1")]) AR_M: np.ndarray[Any, np.dtype[np.datetime64]] = np.array([np.datetime64("1")]) AR_O: np.ndarray[Any, np.dtype[np.object_]] = np.array([1], dtype=object) @@ -66,6 +70,17 @@ AR_c > AR_f AR_c > AR_c +AR_S > AR_S +AR_S > b"" + +AR_T > AR_T +AR_T > AR_U +AR_T > "" + +AR_U > AR_U +AR_U > AR_T +AR_U > "" + AR_m > AR_b AR_m > AR_u AR_m > AR_i diff --git a/numpy/typing/tests/data/pass/flatiter.py b/numpy/typing/tests/data/pass/flatiter.py index 63c839af4b23..70de3a67917d 100644 --- a/numpy/typing/tests/data/pass/flatiter.py +++ b/numpy/typing/tests/data/pass/flatiter.py @@ -9,8 +9,18 @@ iter(a) next(a) a[0] -a[[0, 1, 2]] a[...] a[:] a.__array__() -a.__array__(np.dtype(np.float64)) + +b = np.array([1]).flat +a[b] + +a[0] = "1" +a[:] = "2" +a[...] = "3" +a[[]] = "4" +a[[0]] = "5" +a[[[0]]] = "6" +a[[[[[0]]]]] = "7" +a[b] = "8" diff --git a/numpy/typing/tests/data/pass/fromnumeric.py b/numpy/typing/tests/data/pass/fromnumeric.py index 3d7ef2938e20..7cc2bcfd8b50 100644 --- a/numpy/typing/tests/data/pass/fromnumeric.py +++ b/numpy/typing/tests/data/pass/fromnumeric.py @@ -159,6 +159,12 @@ np.cumsum(A) np.cumsum(B) +np.cumulative_sum(a) +np.cumulative_sum(b) +np.cumulative_sum(c) +np.cumulative_sum(A, axis=0) +np.cumulative_sum(B, axis=0) + np.ptp(b) np.ptp(c) np.ptp(B) @@ -205,6 +211,12 @@ np.cumprod(A) np.cumprod(B) +np.cumulative_prod(a) +np.cumulative_prod(b) +np.cumulative_prod(c) +np.cumulative_prod(A, axis=0) +np.cumulative_prod(B, axis=0) + np.ndim(a) np.ndim(b) np.ndim(c) diff --git a/numpy/typing/tests/data/pass/index_tricks.py b/numpy/typing/tests/data/pass/index_tricks.py index 4c4c1195990a..ea98156a8f2e 100644 --- a/numpy/typing/tests/data/pass/index_tricks.py +++ b/numpy/typing/tests/data/pass/index_tricks.py @@ -1,5 +1,7 @@ from __future__ import annotations + from typing import Any + import numpy as np AR_LIKE_b = [[True, True], [True, True]] @@ -13,10 +15,6 @@ np.ndenumerate(AR_LIKE_f) np.ndenumerate(AR_LIKE_U) -np.ndenumerate(AR_i8).iter -np.ndenumerate(AR_LIKE_f).iter -np.ndenumerate(AR_LIKE_U).iter - next(np.ndenumerate(AR_i8)) next(np.ndenumerate(AR_LIKE_f)) next(np.ndenumerate(AR_LIKE_U)) diff --git a/numpy/typing/tests/data/pass/lib_user_array.py b/numpy/typing/tests/data/pass/lib_user_array.py new file mode 100644 index 000000000000..f79dc38af508 --- /dev/null +++ b/numpy/typing/tests/data/pass/lib_user_array.py @@ -0,0 +1,22 @@ +"""Based on the `if __name__ == "__main__"` test code in `lib/_user_array_impl.py`.""" + +from __future__ import annotations + +import numpy as np +from numpy.lib.user_array import container # type: ignore[deprecated] + +N = 10_000 +W = H = int(N**0.5) + +a: np.ndarray[tuple[int, int], np.dtype[np.int32]] +ua: container[tuple[int, int], np.dtype[np.int32]] + +a = np.arange(N, dtype=np.int32).reshape(W, H) +ua = container(a) + +ua_small: container[tuple[int, int], np.dtype[np.int32]] = ua[:3, :5] +ua_small[0, 0] = 10 + +ua_bool: container[tuple[int, int], np.dtype[np.bool]] = ua_small > 1 + +# shape: tuple[int, int] = np.shape(ua) diff --git a/numpy/typing/tests/data/pass/literal.py b/numpy/typing/tests/data/pass/literal.py index 5ef8122d1195..c6d86baf7397 100644 --- a/numpy/typing/tests/data/pass/literal.py +++ b/numpy/typing/tests/data/pass/literal.py @@ -1,12 +1,15 @@ from __future__ import annotations -from typing import Any from functools import partial -from collections.abc import Callable +from typing import TYPE_CHECKING, Any import pytest + import numpy as np +if TYPE_CHECKING: + from collections.abc import Callable + AR = np.array(0) AR.setflags(write=False) @@ -15,7 +18,6 @@ CF = frozenset({None, "C", "F"}) order_list: list[tuple[frozenset[str | None], Callable[..., Any]]] = [ - (KACF, partial(np.ndarray, 1)), (KACF, AR.tobytes), (KACF, partial(AR.astype, int)), (KACF, AR.copy), @@ -23,6 +25,8 @@ (KACF, AR.flatten), (KACF, AR.ravel), (KACF, partial(np.array, 1)), + # NOTE: __call__ is needed due to python/mypy#17620 + (KACF, partial(np.ndarray.__call__, 1)), (CF, partial(np.zeros, 1)), (CF, partial(np.ones, 1)), (CF, partial(np.empty, 1)), @@ -31,7 +35,7 @@ (KACF, partial(np.ones_like, AR)), (KACF, partial(np.empty_like, AR)), (KACF, partial(np.full_like, AR, 1)), - (KACF, partial(np.add, 1, 1)), # i.e. np.ufunc.__call__ + (KACF, partial(np.add.__call__, 1, 1)), # i.e. np.ufunc.__call__ (ACF, partial(np.reshape, AR, 1)), (KACF, partial(np.ravel, AR)), (KACF, partial(np.asarray, 1)), diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py new file mode 100644 index 000000000000..72cbc5d9b98e --- /dev/null +++ b/numpy/typing/tests/data/pass/ma.py @@ -0,0 +1,198 @@ +import datetime as dt +from typing import Any, cast + +import numpy as np +import numpy.typing as npt +from numpy._typing import _Shape + +type MaskedArray[ScalarT: np.generic] = np.ma.MaskedArray[_Shape, np.dtype[ScalarT]] + +# mypy: disable-error-code=no-untyped-call + +MAR_b: MaskedArray[np.bool] = np.ma.MaskedArray([True]) +MAR_u: MaskedArray[np.uint32] = np.ma.MaskedArray([1], dtype=np.uint32) +MAR_i: MaskedArray[np.int64] = np.ma.MaskedArray([1]) +MAR_f: MaskedArray[np.float64] = np.ma.MaskedArray([1.0]) +MAR_c: MaskedArray[np.complex128] = np.ma.MaskedArray([1j]) +MAR_td64: MaskedArray[np.timedelta64] = np.ma.MaskedArray([np.timedelta64(1, "D")]) +MAR_dt64: MaskedArray[np.datetime64] = np.ma.MaskedArray([np.datetime64(1, "D")]) +MAR_S: MaskedArray[np.bytes_] = np.ma.MaskedArray([b'foo'], dtype=np.bytes_) +MAR_U: MaskedArray[np.str_] = np.ma.MaskedArray(['foo'], dtype=np.str_) +MAR_T = cast(np.ma.MaskedArray[Any, np.dtypes.StringDType], + np.ma.MaskedArray(["a"], dtype="T")) +MAR_V: MaskedArray[np.void] = np.ma.MaskedArray( + [(1, 1)], + mask=[(False, False)], + dtype=[('a', int), ('b', int)] +) + +AR_b: npt.NDArray[np.bool] = np.array([True, False, True]) + +AR_LIKE_b = [True] +AR_LIKE_u = [np.uint32(1)] +AR_LIKE_i = [1] +AR_LIKE_f = [1.0] +AR_LIKE_c = [1j] +AR_LIKE_m = [np.timedelta64(1, "D")] +AR_LIKE_M = [np.datetime64(1, "D")] + +MAR_f.mask = AR_b +MAR_f.mask = np.False_ + +MAR_i.fill_value = 0 + +MAR_b.flat[MAR_i > 0] = False +MAR_i.flat[:] = 1 +MAR_f.flat[[0]] = AR_LIKE_f +MAR_c.flat[[0, 0]] = [3, 4 + 3j] +MAR_td64.flat[0] = dt.timedelta(1) +MAR_dt64.flat[0] = dt.datetime(2020, 1, 1) + +MAR_b[MAR_i > 0] = False +MAR_i[:] = 1 +MAR_f[[0]] = AR_LIKE_f +MAR_c[[0, 0]] = [3, 4 + 3j] +MAR_td64[0] = dt.timedelta(1) +MAR_dt64[0] = dt.datetime(2020, 1, 1) +MAR_V['a'] = [2] + +# Inplace addition + +MAR_b += AR_LIKE_b + +MAR_u += AR_LIKE_b +MAR_u += AR_LIKE_u + +MAR_i += AR_LIKE_b +MAR_i += 2 +MAR_i += AR_LIKE_i + +MAR_f += AR_LIKE_b +MAR_f += 2 +MAR_f += AR_LIKE_u +MAR_f += AR_LIKE_i +MAR_f += AR_LIKE_f + +MAR_c += AR_LIKE_b +MAR_c += AR_LIKE_u +MAR_c += AR_LIKE_i +MAR_c += AR_LIKE_f +MAR_c += AR_LIKE_c + +MAR_td64 += AR_LIKE_b +MAR_td64 += AR_LIKE_u +MAR_td64 += AR_LIKE_i +MAR_td64 += AR_LIKE_m +MAR_dt64 += AR_LIKE_b +MAR_dt64 += AR_LIKE_u +MAR_dt64 += AR_LIKE_i +MAR_dt64 += AR_LIKE_m + +MAR_S += b'snakes' +MAR_U += 'snakes' +MAR_T += 'snakes' + +# Inplace subtraction + +MAR_u -= AR_LIKE_b +MAR_u -= AR_LIKE_u + +MAR_i -= AR_LIKE_b +MAR_i -= AR_LIKE_i + +MAR_f -= AR_LIKE_b +MAR_f -= AR_LIKE_u +MAR_f -= AR_LIKE_i +MAR_f -= AR_LIKE_f + +MAR_c -= AR_LIKE_b +MAR_c -= AR_LIKE_u +MAR_c -= AR_LIKE_i +MAR_c -= AR_LIKE_f +MAR_c -= AR_LIKE_c + +MAR_td64 -= AR_LIKE_b +MAR_td64 -= AR_LIKE_u +MAR_td64 -= AR_LIKE_i +MAR_td64 -= AR_LIKE_m +MAR_dt64 -= AR_LIKE_b +MAR_dt64 -= AR_LIKE_u +MAR_dt64 -= AR_LIKE_i +MAR_dt64 -= AR_LIKE_m + +# Inplace floor division + +MAR_f //= AR_LIKE_b +MAR_f //= 2 +MAR_f //= AR_LIKE_u +MAR_f //= AR_LIKE_i +MAR_f //= AR_LIKE_f + +MAR_td64 //= AR_LIKE_i + +# Inplace true division + +MAR_f /= AR_LIKE_b +MAR_f /= 2 +MAR_f /= AR_LIKE_u +MAR_f /= AR_LIKE_i +MAR_f /= AR_LIKE_f + +MAR_c /= AR_LIKE_b +MAR_c /= AR_LIKE_u +MAR_c /= AR_LIKE_i +MAR_c /= AR_LIKE_f +MAR_c /= AR_LIKE_c + +MAR_td64 /= AR_LIKE_i + +# Inplace multiplication + +MAR_b *= AR_LIKE_b + +MAR_u *= AR_LIKE_b +MAR_u *= AR_LIKE_u + +MAR_i *= AR_LIKE_b +MAR_i *= 2 +MAR_i *= AR_LIKE_i + +MAR_f *= AR_LIKE_b +MAR_f *= 2 +MAR_f *= AR_LIKE_u +MAR_f *= AR_LIKE_i +MAR_f *= AR_LIKE_f + +MAR_c *= AR_LIKE_b +MAR_c *= AR_LIKE_u +MAR_c *= AR_LIKE_i +MAR_c *= AR_LIKE_f +MAR_c *= AR_LIKE_c + +MAR_td64 *= AR_LIKE_b +MAR_td64 *= AR_LIKE_u +MAR_td64 *= AR_LIKE_i +MAR_td64 *= AR_LIKE_f + +MAR_S *= 2 +MAR_U *= 2 +MAR_T *= 2 + +# Inplace power + +MAR_u **= AR_LIKE_b +MAR_u **= AR_LIKE_u + +MAR_i **= AR_LIKE_b +MAR_i **= AR_LIKE_i + +MAR_f **= AR_LIKE_b +MAR_f **= AR_LIKE_u +MAR_f **= AR_LIKE_i +MAR_f **= AR_LIKE_f + +MAR_c **= AR_LIKE_b +MAR_c **= AR_LIKE_u +MAR_c **= AR_LIKE_i +MAR_c **= AR_LIKE_f +MAR_c **= AR_LIKE_c diff --git a/numpy/typing/tests/data/pass/mod.py b/numpy/typing/tests/data/pass/mod.py index 2b7e6cd85c73..464326486fa2 100644 --- a/numpy/typing/tests/data/pass/mod.py +++ b/numpy/typing/tests/data/pass/mod.py @@ -13,7 +13,7 @@ b = bool(1) f = float(1) -i = int(1) +i = 1 AR = np.array([1], dtype=np.bool) AR.setflags(write=False) diff --git a/numpy/typing/tests/data/pass/multiarray.py b/numpy/typing/tests/data/pass/multiarray.py index 26cedfd77566..3a505590b5d3 100644 --- a/numpy/typing/tests/data/pass/multiarray.py +++ b/numpy/typing/tests/data/pass/multiarray.py @@ -70,7 +70,8 @@ np.unpackbits(AR_u1) np.shares_memory(1, 2) -np.shares_memory(AR_f8, AR_f8, max_work=1) +np.shares_memory(AR_f8, AR_f8, max_work=-1) np.may_share_memory(1, 2) -np.may_share_memory(AR_f8, AR_f8, max_work=1) +np.may_share_memory(AR_f8, AR_f8, max_work=0) +np.may_share_memory(AR_f8, AR_f8, max_work=-1) diff --git a/numpy/typing/tests/data/pass/ndarray_conversion.py b/numpy/typing/tests/data/pass/ndarray_conversion.py index 76da1dadd327..a4e0bcf34bdd 100644 --- a/numpy/typing/tests/data/pass/ndarray_conversion.py +++ b/numpy/typing/tests/data/pass/ndarray_conversion.py @@ -74,14 +74,8 @@ # setflags nd.setflags() - -nd.setflags(True) nd.setflags(write=True) - -nd.setflags(True, True) nd.setflags(write=True, align=True) - -nd.setflags(True, True, False) nd.setflags(write=True, align=True, uic=False) # fill is pretty simple diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index 7b8ebea52a16..0a9302a2a116 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -9,21 +9,28 @@ from __future__ import annotations import operator -from typing import cast, Any +from collections.abc import Hashable +from typing import Any, cast import numpy as np import numpy.typing as npt -class SubClass(npt.NDArray[np.float64]): ... + +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.float64]]): ... +class IntSubClass(np.ndarray[tuple[Any, ...], np.dtype[np.intp]]): ... + i4 = np.int32(1) A: np.ndarray[Any, np.dtype[np.int32]] = np.array([[1]], dtype=np.int32) B0 = np.empty((), dtype=np.int32).view(SubClass) B1 = np.empty((1,), dtype=np.int32).view(SubClass) B2 = np.empty((1, 1), dtype=np.int32).view(SubClass) +B_int0: IntSubClass = np.empty((), dtype=np.intp).view(IntSubClass) C: np.ndarray[Any, np.dtype[np.int32]] = np.array([0, 1, 2], dtype=np.int32) D = np.ones(3).view(SubClass) +ctypes_obj = A.ctypes + i4.all() A.all() A.all(axis=0) @@ -39,15 +46,20 @@ class SubClass(npt.NDArray[np.float64]): ... i4.argmax() A.argmax() A.argmax(axis=0) -A.argmax(out=B0) +A.argmax(out=B_int0) i4.argmin() A.argmin() A.argmin(axis=0) -A.argmin(out=B0) +A.argmin(out=B_int0) i4.argsort() +i4.argsort(stable=True) A.argsort() +A.argsort(stable=True) + +A.sort() +A.sort(stable=True) i4.choose([()]) _choices = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype=np.int32) @@ -115,7 +127,7 @@ class SubClass(npt.NDArray[np.float64]): ... i4.std() A.std() A.std(axis=0) -A.std(keepdims=True) +A.std(keepdims=True, mean=0.) A.std(out=B0.astype(np.float64)) i4.sum() @@ -133,7 +145,7 @@ class SubClass(npt.NDArray[np.float64]): ... i4.var() A.var() A.var(axis=0) -A.var(keepdims=True) +A.var(keepdims=True, mean=0.) A.var(out=B0) A.argpartition([0]) @@ -174,3 +186,14 @@ class SubClass(npt.NDArray[np.float64]): ... complex(np.array(1.0, dtype=np.float64)) operator.index(np.array(1, dtype=np.int64)) + +# this fails on numpy 2.2.1 +# https://github.com/scipy/scipy/blob/a755ee77ec47a64849abe42c349936475a6c2f24/scipy/io/arff/tests/test_arffread.py#L41-L44 +A_float = np.array([[1, 5], [2, 4], [np.nan, np.nan]]) +A_void: npt.NDArray[np.void] = np.empty(3, [("yop", float), ("yap", float)]) +A_void["yop"] = A_float[:, 0] +A_void["yap"] = A_float[:, 1] + +# regression test for https://github.com/numpy/numpy/issues/30445 +def f(x: np.generic) -> Hashable: + return x diff --git a/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py b/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py index 0ca3dff392e1..1458339bf6ae 100644 --- a/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py +++ b/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py @@ -11,13 +11,13 @@ nd1.reshape(4, order="C") # resize -nd1.resize() -nd1.resize(4) -nd1.resize(2, 2) -nd1.resize((2, 2)) +nd1.resize() # type: ignore[deprecated] +nd1.resize(4) # type: ignore[deprecated] +nd1.resize(2, 2) # type: ignore[deprecated] +nd1.resize((2, 2)) # type: ignore[deprecated] -nd1.resize((2, 2), refcheck=True) -nd1.resize(4, refcheck=True) +nd1.resize((2, 2), refcheck=True) # type: ignore[deprecated] +nd1.resize(4, refcheck=True) # type: ignore[deprecated] nd2 = np.array([[1, 2], [3, 4]]) diff --git a/numpy/typing/tests/data/pass/nditer.py b/numpy/typing/tests/data/pass/nditer.py new file mode 100644 index 000000000000..25a5b44d7aec --- /dev/null +++ b/numpy/typing/tests/data/pass/nditer.py @@ -0,0 +1,4 @@ +import numpy as np + +arr = np.array([1]) +np.nditer([arr, None]) diff --git a/numpy/typing/tests/data/pass/numeric.py b/numpy/typing/tests/data/pass/numeric.py index 7f8f92973901..825a6dd74f34 100644 --- a/numpy/typing/tests/data/pass/numeric.py +++ b/numpy/typing/tests/data/pass/numeric.py @@ -2,21 +2,20 @@ Tests for :mod:`numpy._core.numeric`. Does not include tests which fall under ``array_constructors``. - """ -from __future__ import annotations +from typing import Any import numpy as np -import numpy.typing as npt -class SubClass(npt.NDArray[np.float64]): - ... + +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.float64]]): ... + i8 = np.int64(1) A = np.arange(27).reshape(3, 3, 3) -B: list[list[list[int]]] = A.tolist() +B = A.tolist() C = np.empty((27, 27)).view(SubClass) np.count_nonzero(i8) diff --git a/numpy/typing/tests/data/pass/random.py b/numpy/typing/tests/data/pass/random.py index 69afb28c48ec..fd07e378e553 100644 --- a/numpy/typing/tests/data/pass/random.py +++ b/numpy/typing/tests/data/pass/random.py @@ -1,6 +1,7 @@ from __future__ import annotations from typing import Any + import numpy as np SEED_NONE = None @@ -1493,5 +1494,5 @@ random_st.tomaxint(1) random_st.tomaxint((1,)) -np.random.set_bit_generator(SEED_PCG64) -np.random.get_bit_generator() +np.random.mtrand.set_bit_generator(SEED_PCG64) +np.random.mtrand.get_bit_generator() diff --git a/numpy/typing/tests/data/pass/recfunctions.py b/numpy/typing/tests/data/pass/recfunctions.py new file mode 100644 index 000000000000..586f0502b366 --- /dev/null +++ b/numpy/typing/tests/data/pass/recfunctions.py @@ -0,0 +1,164 @@ +"""These tests are based on the doctests from `numpy/lib/recfunctions.py`.""" + +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt +from numpy.lib import recfunctions as rfn + + +def test_recursive_fill_fields() -> None: + a: npt.NDArray[np.void] = np.array( + [(1, 10.0), (2, 20.0)], + dtype=[("A", np.int64), ("B", np.float64)], + ) + b = np.zeros((3,), dtype=a.dtype) + out = rfn.recursive_fill_fields(a, b) + assert_type(out, np.ndarray[tuple[int], np.dtype[np.void]]) + + +def test_get_names() -> None: + names: tuple[str | Any, ...] + names = rfn.get_names(np.empty((1,), dtype=[("A", int)]).dtype) + names = rfn.get_names(np.empty((1,), dtype=[("A", int), ("B", float)]).dtype) + + adtype = np.dtype([("a", int), ("b", [("b_a", int), ("b_b", int)])]) + names = rfn.get_names(adtype) + + +def test_get_names_flat() -> None: + names: tuple[str, ...] + names = rfn.get_names_flat(np.empty((1,), dtype=[("A", int)]).dtype) + names = rfn.get_names_flat(np.empty((1,), dtype=[("A", int), ("B", float)]).dtype) + + adtype = np.dtype([("a", int), ("b", [("b_a", int), ("b_b", int)])]) + names = rfn.get_names_flat(adtype) + + +def test_flatten_descr() -> None: + ndtype = np.dtype([("a", " None: + ndtype = np.dtype([ + ("A", int), + ("B", [("B_A", int), ("B_B", [("B_B_A", int), ("B_B_B", int)])]), + ]) + assert_type(rfn.get_fieldstructure(ndtype), dict[str, list[str]]) + + +def test_merge_arrays() -> None: + assert_type( + rfn.merge_arrays(( + np.ones((2,), np.int_), + np.ones((3,), np.float64), + )), + np.recarray[tuple[int], np.dtype[np.void]], + ) + + +def test_drop_fields() -> None: + ndtype = [("a", np.int64), ("b", [("b_a", np.double), ("b_b", np.int64)])] + a = np.ones((3,), dtype=ndtype) + + assert_type( + rfn.drop_fields(a, "a"), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + assert_type( + rfn.drop_fields(a, "a", asrecarray=True), + np.rec.recarray[tuple[int], np.dtype[np.void]], + ) + assert_type( + rfn.rec_drop_fields(a, "a"), + np.rec.recarray[tuple[int], np.dtype[np.void]], + ) + + +def test_rename_fields() -> None: + ndtype = [("a", np.int64), ("b", [("b_a", np.double), ("b_b", np.int64)])] + a = np.ones((3,), dtype=ndtype) + + assert_type( + rfn.rename_fields(a, {"a": "A", "b_b": "B_B"}), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_repack_fields() -> None: + dt: np.dtype[np.void] = np.dtype("u1, None: + a = np.zeros(4, dtype=[("a", "i4"), ("b", "f4,u2"), ("c", "f4", 2)]) + assert_type(rfn.structured_to_unstructured(a), npt.NDArray[Any]) + + +def unstructured_to_structured() -> None: + dt: np.dtype[np.void] = np.dtype([("a", "i4"), ("b", "f4,u2"), ("c", "f4", 2)]) + a = np.arange(20, dtype=np.int32).reshape((4, 5)) + assert_type(rfn.unstructured_to_structured(a, dt), npt.NDArray[np.void]) + + +def test_apply_along_fields() -> None: + b = np.ones(4, dtype=[("x", "i4"), ("y", "f4"), ("z", "f8")]) + assert_type( + rfn.apply_along_fields(np.mean, b), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_assign_fields_by_name() -> None: + b = np.ones(4, dtype=[("x", "i4"), ("y", "f4"), ("z", "f8")]) + assert_type( + rfn.apply_along_fields(np.mean, b), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_require_fields() -> None: + a = np.ones(4, dtype=[("a", "i4"), ("b", "f8"), ("c", "u1")]) + assert_type( + rfn.require_fields(a, [("b", "f4"), ("c", "u1")]), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_stack_arrays() -> None: + x = np.zeros((2,), np.int32) + assert_type( + rfn.stack_arrays(x), + np.ndarray[tuple[int], np.dtype[np.int32]], + ) + + z = np.ones((2,), [("A", "|S3"), ("B", float)]) + zz = np.ones((2,), [("A", "|S3"), ("B", np.float64), ("C", np.float64)]) + assert_type( + rfn.stack_arrays((z, zz)), + np.ma.MaskedArray[tuple[Any, ...], np.dtype[np.void]], + ) + + +def test_find_duplicates() -> None: + ndtype = np.dtype([("a", int)]) + + a = np.ma.ones(7).view(ndtype) + assert_type( + rfn.find_duplicates(a), + np.ma.MaskedArray[tuple[int], np.dtype[np.void]], + ) + assert_type( + rfn.find_duplicates(a, ignoremask=True, return_index=True), + tuple[ + np.ma.MaskedArray[tuple[int], np.dtype[np.void]], + np.ndarray[tuple[int], np.dtype[np.int_]], + ], + ) diff --git a/numpy/typing/tests/data/pass/scalars.py b/numpy/typing/tests/data/pass/scalars.py index 53caf7ff817d..133c5627e6e5 100644 --- a/numpy/typing/tests/data/pass/scalars.py +++ b/numpy/typing/tests/data/pass/scalars.py @@ -1,10 +1,10 @@ -import sys import datetime as dt import pytest + import numpy as np -b = np.bool() +b = np.bool() b_ = np.bool_() u8 = np.uint64() i8 = np.int64() @@ -90,9 +90,18 @@ def __float__(self) -> float: np.datetime64("2019") np.datetime64(b"2019") np.datetime64("2019", "D") +np.datetime64("2019", "us") +np.datetime64("2019", "as") +np.datetime64(np.datetime64()) np.datetime64(np.datetime64()) np.datetime64(dt.datetime(2000, 5, 3)) +np.datetime64(dt.datetime(2000, 5, 3), "D") +np.datetime64(dt.datetime(2000, 5, 3), "us") +np.datetime64(dt.datetime(2000, 5, 3), "as") np.datetime64(dt.date(2000, 5, 3)) +np.datetime64(dt.date(2000, 5, 3), "D") +np.datetime64(dt.date(2000, 5, 3), "us") +np.datetime64(dt.date(2000, 5, 3), "as") np.datetime64(None) np.datetime64(None, "D") @@ -238,3 +247,16 @@ def __float__(self) -> float: c16.reshape(1) U.reshape(1) S.reshape(1) + +# Indexing scalars with any of {None, ..., tuple[()], tuple[None], tuple[...], +# tuple[None, ...]} should be valid +b[None] +i8[None] +u8[None] +f8[None] +c16[None] +c16[...] +c16[()] +c16[(None,)] +c16[(...,)] +c16[None, None] diff --git a/numpy/typing/tests/data/pass/shape.py b/numpy/typing/tests/data/pass/shape.py new file mode 100644 index 000000000000..e3b497bc0310 --- /dev/null +++ b/numpy/typing/tests/data/pass/shape.py @@ -0,0 +1,19 @@ +from typing import Any, NamedTuple + +import numpy as np + + +# Subtype of tuple[int, int] +class XYGrid(NamedTuple): + x_axis: int + y_axis: int + +# Test variance of _ShapeT_co +def accepts_2d(a: np.ndarray[tuple[int, int], Any]) -> None: + return None + + +accepts_2d(np.empty(XYGrid(2, 2))) +accepts_2d(np.zeros(XYGrid(2, 2), dtype=int)) +accepts_2d(np.ones(XYGrid(2, 2), dtype=int)) +accepts_2d(np.full(XYGrid(2, 2), fill_value=5, dtype=int)) diff --git a/numpy/typing/tests/data/pass/simple.py b/numpy/typing/tests/data/pass/simple.py index 1337bd52860a..003e9ee58bb1 100644 --- a/numpy/typing/tests/data/pass/simple.py +++ b/numpy/typing/tests/data/pass/simple.py @@ -1,9 +1,9 @@ """Simple expression that should pass with mypy.""" import operator +from collections.abc import Iterable import numpy as np import numpy.typing as npt -from collections.abc import Iterable # Basic checks array = np.array([1, 2]) @@ -61,7 +61,7 @@ def iterable_func(x: Iterable[object]) -> Iterable[object]: iterable_func(array) -[element for element in array] +list(array) iter(array) zip(array, array) array[1] @@ -71,8 +71,13 @@ def iterable_func(x: Iterable[object]) -> Iterable[object]: array_2d = np.ones((3, 3)) array_2d[:2, :2] -array_2d[..., 0] array_2d[:2, :2] = 0 +array_2d[..., 0] +array_2d[..., 0] = 2 +array_2d[-1, -1] = None + +array_obj = np.zeros(1, dtype=np.object_) +array_obj[0] = slice(None) # Other special methods len(array) @@ -80,8 +85,7 @@ def iterable_func(x: Iterable[object]) -> Iterable[object]: array_scalar = np.array(1) int(array_scalar) float(array_scalar) -# currently does not work due to https://github.com/python/typeshed/issues/1904 -# complex(array_scalar) +complex(array_scalar) bytes(array_scalar) operator.index(array_scalar) bool(array_scalar) @@ -161,4 +165,6 @@ def iterable_func(x: Iterable[object]) -> Iterable[object]: ~array # Other methods -np.array([1, 2]).transpose() +array.transpose() + +array @ array diff --git a/numpy/typing/tests/data/pass/simple_py3.py b/numpy/typing/tests/data/pass/simple_py3.py deleted file mode 100644 index c05a1ce612ac..000000000000 --- a/numpy/typing/tests/data/pass/simple_py3.py +++ /dev/null @@ -1,6 +0,0 @@ -import numpy as np - -array = np.array([1, 2]) - -# The @ operator is not in python 2 -array @ array diff --git a/numpy/typing/tests/data/pass/ufunclike.py b/numpy/typing/tests/data/pass/ufunclike.py index 4baa0334a404..7e556d10bef7 100644 --- a/numpy/typing/tests/data/pass/ufunclike.py +++ b/numpy/typing/tests/data/pass/ufunclike.py @@ -1,5 +1,7 @@ from __future__ import annotations -from typing import Any, Optional + +from typing import Any + import numpy as np @@ -10,11 +12,14 @@ def __ceil__(self) -> Object: def __floor__(self) -> Object: return self + def __trunc__(self) -> Object: + return self + def __ge__(self, value: object) -> bool: return True - def __array__(self, dtype: Optional[np.typing.DTypeLike] = None, - copy: Optional[bool] = None) -> np.ndarray[Any, np.dtype[np.object_]]: + def __array__(self, dtype: np.typing.DTypeLike | None = None, + copy: bool | None = None) -> np.ndarray[Any, np.dtype[np.object_]]: ret = np.empty((), dtype=object) ret[()] = self return ret @@ -27,12 +32,12 @@ def __array__(self, dtype: Optional[np.typing.DTypeLike] = None, AR_LIKE_O = [Object(), Object(), Object()] AR_U: np.ndarray[Any, np.dtype[np.str_]] = np.zeros(3, dtype="U5") -np.fix(AR_LIKE_b) -np.fix(AR_LIKE_u) -np.fix(AR_LIKE_i) -np.fix(AR_LIKE_f) -np.fix(AR_LIKE_O) -np.fix(AR_LIKE_f, out=AR_U) +np.fix(AR_LIKE_b) # type: ignore[deprecated] +np.fix(AR_LIKE_u) # type: ignore[deprecated] +np.fix(AR_LIKE_i) # type: ignore[deprecated] +np.fix(AR_LIKE_f) # type: ignore[deprecated] +np.fix(AR_LIKE_O) # type: ignore[deprecated] +np.fix(AR_LIKE_f, out=AR_U) # type: ignore[deprecated] np.isposinf(AR_LIKE_b) np.isposinf(AR_LIKE_u) diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index 003affe02385..68fa5b5230a6 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -1,37 +1,44 @@ -import sys -from typing import Any +import datetime as dt +from typing import Any, assert_type import numpy as np import numpy.typing as npt -from numpy._typing import _32Bit,_64Bit, _128Bit +from numpy._typing import _64Bit, _128Bit -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +b: bool +c: complex +f: float +i: int + +c16: np.complex128 +c8: np.complex64 # Can't directly import `np.float128` as it is not available on all platforms f16: np.floating[_128Bit] +f8: np.float64 +f4: np.float32 -c16 = np.complex128() -f8 = np.float64() -i8 = np.int64() -u8 = np.uint64() +i8: np.int64 +i4: np.int32 -c8 = np.complex64() -f4 = np.float32() -i4 = np.int32() -u4 = np.uint32() +u8: np.uint64 +u4: np.uint32 -dt = np.datetime64(0, "D") -td = np.timedelta64(0, "D") +b_: np.bool -b_ = np.bool() +M8: np.datetime64 +M8_none: np.datetime64[None] +M8_date: np.datetime64[dt.date] +M8_time: np.datetime64[dt.datetime] +M8_int: np.datetime64[int] +date: dt.date +time: dt.datetime -b = bool() -c = complex() -f = float() -i = int() +m8: np.timedelta64 +m8_none: np.timedelta64[None] +m8_int: np.timedelta64[int] +m8_delta: np.timedelta64[dt.timedelta] +delta: dt.timedelta AR_b: npt.NDArray[np.bool] AR_u: npt.NDArray[np.uint32] @@ -41,7 +48,12 @@ AR_c: npt.NDArray[np.complex128] AR_m: npt.NDArray[np.timedelta64] AR_M: npt.NDArray[np.datetime64] AR_O: npt.NDArray[np.object_] -AR_number: npt.NDArray[np.number[Any]] +AR_S: npt.NDArray[np.bytes_] +AR_U: npt.NDArray[np.str_] +AR_T: np.ndarray[tuple[Any, ...], np.dtypes.StringDType] +AR_floating: npt.NDArray[np.floating] +AR_number: npt.NDArray[np.number] +AR_Any: npt.NDArray[Any] AR_LIKE_b: list[bool] AR_LIKE_u: list[np.uint32] @@ -54,83 +66,83 @@ AR_LIKE_O: list[np.object_] # Array subtraction -assert_type(AR_number - AR_number, npt.NDArray[np.number[Any]]) +assert_type(AR_number - AR_number, npt.NDArray[np.number]) -assert_type(AR_b - AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]]) -assert_type(AR_b - AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_b - AR_LIKE_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_b - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_b - AR_LIKE_u, npt.NDArray[np.uint32]) +assert_type(AR_b - AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_b - AR_LIKE_f, npt.NDArray[np.floating]) +assert_type(AR_b - AR_LIKE_c, npt.NDArray[np.complexfloating]) assert_type(AR_b - AR_LIKE_m, npt.NDArray[np.timedelta64]) assert_type(AR_b - AR_LIKE_O, Any) -assert_type(AR_LIKE_u - AR_b, npt.NDArray[np.unsignedinteger[Any]]) -assert_type(AR_LIKE_i - AR_b, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_LIKE_f - AR_b, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_c - AR_b, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_LIKE_u - AR_b, npt.NDArray[np.uint32]) +assert_type(AR_LIKE_i - AR_b, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f - AR_b, npt.NDArray[np.floating]) +assert_type(AR_LIKE_c - AR_b, npt.NDArray[np.complexfloating]) assert_type(AR_LIKE_m - AR_b, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_M - AR_b, npt.NDArray[np.datetime64]) assert_type(AR_LIKE_O - AR_b, Any) -assert_type(AR_u - AR_LIKE_b, npt.NDArray[np.unsignedinteger[Any]]) -assert_type(AR_u - AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]]) -assert_type(AR_u - AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_u - AR_LIKE_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_u - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_u - AR_LIKE_b, npt.NDArray[np.uint32]) +assert_type(AR_u - AR_LIKE_u, npt.NDArray[np.unsignedinteger]) +assert_type(AR_u - AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_u - AR_LIKE_f, npt.NDArray[np.floating]) +assert_type(AR_u - AR_LIKE_c, npt.NDArray[np.complexfloating]) assert_type(AR_u - AR_LIKE_m, npt.NDArray[np.timedelta64]) assert_type(AR_u - AR_LIKE_O, Any) -assert_type(AR_LIKE_b - AR_u, npt.NDArray[np.unsignedinteger[Any]]) -assert_type(AR_LIKE_u - AR_u, npt.NDArray[np.unsignedinteger[Any]]) -assert_type(AR_LIKE_i - AR_u, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_LIKE_f - AR_u, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_c - AR_u, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_LIKE_b - AR_u, npt.NDArray[np.uint32]) +assert_type(AR_LIKE_u - AR_u, npt.NDArray[np.unsignedinteger]) +assert_type(AR_LIKE_i - AR_u, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f - AR_u, npt.NDArray[np.floating]) +assert_type(AR_LIKE_c - AR_u, npt.NDArray[np.complexfloating]) assert_type(AR_LIKE_m - AR_u, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_M - AR_u, npt.NDArray[np.datetime64]) assert_type(AR_LIKE_O - AR_u, Any) -assert_type(AR_i - AR_LIKE_b, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_i - AR_LIKE_u, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_i - AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_i - AR_LIKE_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_i - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_i - AR_LIKE_b, npt.NDArray[np.int64]) +assert_type(AR_i - AR_LIKE_u, npt.NDArray[np.signedinteger]) +assert_type(AR_i - AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_i - AR_LIKE_f, npt.NDArray[np.floating]) +assert_type(AR_i - AR_LIKE_c, npt.NDArray[np.complexfloating]) assert_type(AR_i - AR_LIKE_m, npt.NDArray[np.timedelta64]) assert_type(AR_i - AR_LIKE_O, Any) -assert_type(AR_LIKE_b - AR_i, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_LIKE_u - AR_i, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_LIKE_i - AR_i, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_LIKE_f - AR_i, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_c - AR_i, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_LIKE_b - AR_i, npt.NDArray[np.int64]) +assert_type(AR_LIKE_u - AR_i, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_i - AR_i, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f - AR_i, npt.NDArray[np.floating]) +assert_type(AR_LIKE_c - AR_i, npt.NDArray[np.complexfloating]) assert_type(AR_LIKE_m - AR_i, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_M - AR_i, npt.NDArray[np.datetime64]) assert_type(AR_LIKE_O - AR_i, Any) -assert_type(AR_f - AR_LIKE_b, npt.NDArray[np.floating[Any]]) -assert_type(AR_f - AR_LIKE_u, npt.NDArray[np.floating[Any]]) -assert_type(AR_f - AR_LIKE_i, npt.NDArray[np.floating[Any]]) -assert_type(AR_f - AR_LIKE_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_f - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_f - AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_f - AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_f - AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_f - AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_f - AR_LIKE_c, npt.NDArray[np.complexfloating]) assert_type(AR_f - AR_LIKE_O, Any) -assert_type(AR_LIKE_b - AR_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_u - AR_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_i - AR_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_f - AR_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_c - AR_f, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_LIKE_b - AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u - AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i - AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f - AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_c - AR_f, npt.NDArray[np.complexfloating]) assert_type(AR_LIKE_O - AR_f, Any) -assert_type(AR_c - AR_LIKE_b, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_c - AR_LIKE_u, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_c - AR_LIKE_i, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_c - AR_LIKE_f, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_c - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_c - AR_LIKE_b, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_u, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_i, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_f, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_c, npt.NDArray[np.complex128]) assert_type(AR_c - AR_LIKE_O, Any) -assert_type(AR_LIKE_b - AR_c, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_LIKE_u - AR_c, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_LIKE_i - AR_c, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_LIKE_f - AR_c, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_LIKE_c - AR_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_LIKE_b - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_u - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_i - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_f - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_c - AR_c, npt.NDArray[np.complex128]) assert_type(AR_LIKE_O - AR_c, Any) assert_type(AR_m - AR_LIKE_b, npt.NDArray[np.timedelta64]) @@ -174,56 +186,142 @@ assert_type(AR_LIKE_m - AR_O, Any) assert_type(AR_LIKE_M - AR_O, Any) assert_type(AR_LIKE_O - AR_O, Any) +# Array "true" division + +assert_type(AR_f / b, npt.NDArray[np.float64]) +assert_type(AR_f / i, npt.NDArray[np.float64]) +assert_type(AR_f / f, npt.NDArray[np.float64]) + +assert_type(b / AR_f, npt.NDArray[np.float64]) +assert_type(i / AR_f, npt.NDArray[np.float64]) +assert_type(f / AR_f, npt.NDArray[np.float64]) + +assert_type(AR_b / AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_b / AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_b / AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_b / AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_b / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_b, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u / AR_b, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i / AR_b, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f / AR_b, npt.NDArray[np.float64]) +assert_type(AR_LIKE_O / AR_b, Any) + +assert_type(AR_u / AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_u / AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_u / AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_u / AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_u / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_u, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u / AR_u, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i / AR_u, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f / AR_u, npt.NDArray[np.float64]) +assert_type(AR_LIKE_m / AR_u, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O / AR_u, Any) + +assert_type(AR_i / AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_i / AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_i / AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_i / AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_i / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_i, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u / AR_i, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i / AR_i, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f / AR_i, npt.NDArray[np.float64]) +assert_type(AR_LIKE_m / AR_i, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O / AR_i, Any) + +assert_type(AR_f / AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_f / AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_f / AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_f / AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_f / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u / AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i / AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f / AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_m / AR_f, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O / AR_f, Any) + +assert_type(AR_m / AR_LIKE_u, npt.NDArray[np.timedelta64]) +assert_type(AR_m / AR_LIKE_i, npt.NDArray[np.timedelta64]) +assert_type(AR_m / AR_LIKE_f, npt.NDArray[np.timedelta64]) +assert_type(AR_m / AR_LIKE_m, npt.NDArray[np.float64]) +assert_type(AR_m / AR_LIKE_O, Any) + +assert_type(AR_LIKE_m / AR_m, npt.NDArray[np.float64]) +assert_type(AR_LIKE_O / AR_m, Any) + +assert_type(AR_O / AR_LIKE_b, Any) +assert_type(AR_O / AR_LIKE_u, Any) +assert_type(AR_O / AR_LIKE_i, Any) +assert_type(AR_O / AR_LIKE_f, Any) +assert_type(AR_O / AR_LIKE_m, Any) +assert_type(AR_O / AR_LIKE_M, Any) +assert_type(AR_O / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_O, Any) +assert_type(AR_LIKE_u / AR_O, Any) +assert_type(AR_LIKE_i / AR_O, Any) +assert_type(AR_LIKE_f / AR_O, Any) +assert_type(AR_LIKE_m / AR_O, Any) +assert_type(AR_LIKE_M / AR_O, Any) +assert_type(AR_LIKE_O / AR_O, Any) + # Array floor division assert_type(AR_b // AR_LIKE_b, npt.NDArray[np.int8]) -assert_type(AR_b // AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]]) -assert_type(AR_b // AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_b // AR_LIKE_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_b // AR_LIKE_u, npt.NDArray[np.uint32]) +assert_type(AR_b // AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_b // AR_LIKE_f, npt.NDArray[np.floating]) assert_type(AR_b // AR_LIKE_O, Any) assert_type(AR_LIKE_b // AR_b, npt.NDArray[np.int8]) -assert_type(AR_LIKE_u // AR_b, npt.NDArray[np.unsignedinteger[Any]]) -assert_type(AR_LIKE_i // AR_b, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_LIKE_f // AR_b, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_u // AR_b, npt.NDArray[np.uint32]) +assert_type(AR_LIKE_i // AR_b, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f // AR_b, npt.NDArray[np.floating]) assert_type(AR_LIKE_O // AR_b, Any) -assert_type(AR_u // AR_LIKE_b, npt.NDArray[np.unsignedinteger[Any]]) -assert_type(AR_u // AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]]) -assert_type(AR_u // AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_u // AR_LIKE_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_u // AR_LIKE_b, npt.NDArray[np.uint32]) +assert_type(AR_u // AR_LIKE_u, npt.NDArray[np.unsignedinteger]) +assert_type(AR_u // AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_u // AR_LIKE_f, npt.NDArray[np.floating]) assert_type(AR_u // AR_LIKE_O, Any) -assert_type(AR_LIKE_b // AR_u, npt.NDArray[np.unsignedinteger[Any]]) -assert_type(AR_LIKE_u // AR_u, npt.NDArray[np.unsignedinteger[Any]]) -assert_type(AR_LIKE_i // AR_u, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_LIKE_f // AR_u, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_b // AR_u, npt.NDArray[np.uint32]) +assert_type(AR_LIKE_u // AR_u, npt.NDArray[np.unsignedinteger]) +assert_type(AR_LIKE_i // AR_u, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f // AR_u, npt.NDArray[np.floating]) assert_type(AR_LIKE_m // AR_u, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_O // AR_u, Any) -assert_type(AR_i // AR_LIKE_b, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_i // AR_LIKE_u, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_i // AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_i // AR_LIKE_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_i // AR_LIKE_b, npt.NDArray[np.int64]) +assert_type(AR_i // AR_LIKE_u, npt.NDArray[np.signedinteger]) +assert_type(AR_i // AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_i // AR_LIKE_f, npt.NDArray[np.floating]) assert_type(AR_i // AR_LIKE_O, Any) -assert_type(AR_LIKE_b // AR_i, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_LIKE_u // AR_i, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_LIKE_i // AR_i, npt.NDArray[np.signedinteger[Any]]) -assert_type(AR_LIKE_f // AR_i, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_b // AR_i, npt.NDArray[np.int64]) +assert_type(AR_LIKE_u // AR_i, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_i // AR_i, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f // AR_i, npt.NDArray[np.floating]) assert_type(AR_LIKE_m // AR_i, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_O // AR_i, Any) -assert_type(AR_f // AR_LIKE_b, npt.NDArray[np.floating[Any]]) -assert_type(AR_f // AR_LIKE_u, npt.NDArray[np.floating[Any]]) -assert_type(AR_f // AR_LIKE_i, npt.NDArray[np.floating[Any]]) -assert_type(AR_f // AR_LIKE_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_f // AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_f // AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_f // AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_f // AR_LIKE_f, npt.NDArray[np.float64]) assert_type(AR_f // AR_LIKE_O, Any) -assert_type(AR_LIKE_b // AR_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_u // AR_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_i // AR_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_f // AR_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_b // AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u // AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i // AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f // AR_f, npt.NDArray[np.float64]) assert_type(AR_LIKE_m // AR_f, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_O // AR_f, Any) @@ -263,7 +361,10 @@ assert_type(-i8, np.int64) assert_type(-i4, np.int32) assert_type(-u8, np.uint64) assert_type(-u4, np.uint32) -assert_type(-td, np.timedelta64) +assert_type(-m8, np.timedelta64) +assert_type(-m8_none, np.timedelta64[None]) +assert_type(-m8_int, np.timedelta64[int]) +assert_type(-m8_delta, np.timedelta64[dt.timedelta]) assert_type(-AR_f, npt.NDArray[np.float64]) assert_type(+f16, np.floating[_128Bit]) @@ -275,7 +376,9 @@ assert_type(+i8, np.int64) assert_type(+i4, np.int32) assert_type(+u8, np.uint64) assert_type(+u4, np.uint32) -assert_type(+td, np.timedelta64) +assert_type(+m8_none, np.timedelta64[None]) +assert_type(+m8_int, np.timedelta64[int]) +assert_type(+m8_delta, np.timedelta64[dt.timedelta]) assert_type(+AR_f, npt.NDArray[np.float64]) assert_type(abs(f16), np.floating[_128Bit]) @@ -287,33 +390,74 @@ assert_type(abs(i8), np.int64) assert_type(abs(i4), np.int32) assert_type(abs(u8), np.uint64) assert_type(abs(u4), np.uint32) -assert_type(abs(td), np.timedelta64) +assert_type(abs(m8), np.timedelta64) +assert_type(abs(m8_none), np.timedelta64[None]) +assert_type(abs(m8_int), np.timedelta64[int]) +assert_type(abs(m8_delta), np.timedelta64[dt.timedelta]) assert_type(abs(b_), np.bool) +assert_type(abs(AR_O), npt.NDArray[np.object_]) # Time structures -assert_type(dt + td, np.datetime64) -assert_type(dt + i, np.datetime64) -assert_type(dt + i4, np.datetime64) -assert_type(dt + i8, np.datetime64) -assert_type(dt - dt, np.timedelta64) -assert_type(dt - i, np.datetime64) -assert_type(dt - i4, np.datetime64) -assert_type(dt - i8, np.datetime64) - -assert_type(td + td, np.timedelta64) -assert_type(td + i, np.timedelta64) -assert_type(td + i4, np.timedelta64) -assert_type(td + i8, np.timedelta64) -assert_type(td - td, np.timedelta64) -assert_type(td - i, np.timedelta64) -assert_type(td - i4, np.timedelta64) -assert_type(td - i8, np.timedelta64) -assert_type(td / f, np.timedelta64) -assert_type(td / f4, np.timedelta64) -assert_type(td / f8, np.timedelta64) -assert_type(td / td, np.float64) -assert_type(td // td, np.int64) +assert_type(M8 + m8, np.datetime64) +assert_type(M8 + i, np.datetime64) +assert_type(M8 + i8, np.datetime64) +assert_type(M8 - M8, np.timedelta64) +assert_type(M8 - i, np.datetime64) +assert_type(M8 - i8, np.datetime64) + +assert_type(M8_none + i, np.datetime64[None]) +assert_type(M8_none - i, np.datetime64[None]) + +assert_type(M8_none + i8, np.datetime64[None]) +assert_type(M8_none - i8, np.datetime64[None]) + +# NOTE: Mypy incorrectly infers `timedelta64[Any]`, but pyright behaves correctly. +assert_type(M8_none + m8, np.datetime64[None]) # type: ignore[assert-type] +assert_type(M8_none - M8, np.timedelta64[None]) # type: ignore[assert-type] +# NOTE: Mypy incorrectly infers `datetime64[Any]`, but pyright behaves correctly. +assert_type(M8_none - m8, np.datetime64[None]) # type: ignore[assert-type] + +assert_type(m8 + m8, np.timedelta64) +assert_type(m8 + i, np.timedelta64) +assert_type(m8 + i8, np.timedelta64) +assert_type(m8 - m8, np.timedelta64) +assert_type(m8 - i, np.timedelta64) +assert_type(m8 - i8, np.timedelta64) +assert_type(m8 * f, np.timedelta64) +assert_type(m8 * f4, np.timedelta64) +assert_type(m8 * np.True_, np.timedelta64) +assert_type(m8 / f, np.timedelta64) +assert_type(m8 / f4, np.timedelta64) +assert_type(m8 / m8, np.float64) +assert_type(m8 // m8, np.int64) +assert_type(m8 % m8, np.timedelta64) +# NOTE: Mypy incorrectly infers `tuple[Any, ...]`, but pyright behaves correctly. +assert_type(divmod(m8, m8), tuple[np.int64, np.timedelta64]) # type: ignore[assert-type] + +assert_type(m8_none + m8, np.timedelta64[None]) +assert_type(m8_none + i, np.timedelta64[None]) +assert_type(m8_none + i8, np.timedelta64[None]) +assert_type(m8_none - i, np.timedelta64[None]) +assert_type(m8_none - i8, np.timedelta64[None]) + +assert_type(m8_int + i, np.timedelta64[int]) +assert_type(m8_int + m8_delta, np.timedelta64[int]) +assert_type(m8_int + m8, np.timedelta64) +assert_type(m8_int - i, np.timedelta64[int]) +assert_type(m8_int - m8_delta, np.timedelta64[int]) +assert_type(m8_int - m8_int, np.timedelta64[int]) +assert_type(m8_int - m8_none, np.timedelta64[None]) +assert_type(m8_int - m8, np.timedelta64) + +assert_type(m8_delta + date, dt.date) +assert_type(m8_delta + time, dt.datetime) +assert_type(m8_delta + delta, dt.timedelta) +assert_type(m8_delta - delta, dt.timedelta) +assert_type(m8_delta / delta, float) +assert_type(m8_delta // delta, int) +assert_type(m8_delta % delta, dt.timedelta) +assert_type(divmod(m8_delta, delta), tuple[int, dt.timedelta]) # boolean @@ -349,168 +493,234 @@ assert_type(c8 / b_, np.complex64) # Complex -assert_type(c16 + f16, np.complexfloating[_64Bit | _128Bit, _64Bit | _128Bit]) +assert_type(c16 + f16, np.complexfloating) assert_type(c16 + c16, np.complex128) assert_type(c16 + f8, np.complex128) assert_type(c16 + i8, np.complex128) -assert_type(c16 + c8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(c16 + f4, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(c16 + i4, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(c16 + c8, np.complex128) +assert_type(c16 + f4, np.complex128) +assert_type(c16 + i4, np.complex128) assert_type(c16 + b_, np.complex128) assert_type(c16 + b, np.complex128) assert_type(c16 + c, np.complex128) assert_type(c16 + f, np.complex128) -assert_type(c16 + AR_f, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(c16 + AR_f, npt.NDArray[np.complex128]) -assert_type(f16 + c16, np.complexfloating[_64Bit | _128Bit, _64Bit | _128Bit]) +assert_type(f16 + c16, np.complexfloating) assert_type(c16 + c16, np.complex128) assert_type(f8 + c16, np.complex128) assert_type(i8 + c16, np.complex128) -assert_type(c8 + c16, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(f4 + c16, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(i4 + c16, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(c8 + c16, np.complex128 | np.complex64) +assert_type(f4 + c16, np.complexfloating) +assert_type(i4 + c16, np.complex128) assert_type(b_ + c16, np.complex128) assert_type(b + c16, np.complex128) assert_type(c + c16, np.complex128) assert_type(f + c16, np.complex128) -assert_type(AR_f + c16, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_f + c16, npt.NDArray[np.complex128]) -assert_type(c8 + f16, np.complexfloating[_32Bit | _128Bit, _32Bit | _128Bit]) -assert_type(c8 + c16, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(c8 + f8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(c8 + i8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(c8 + f16, np.complex64 | np.complexfloating[_128Bit, _128Bit]) +assert_type(c8 + c16, np.complex64 | np.complex128) +assert_type(c8 + f8, np.complex64 | np.complex128) +assert_type(c8 + i8, np.complex64 | np.complexfloating[_64Bit, _64Bit]) assert_type(c8 + c8, np.complex64) assert_type(c8 + f4, np.complex64) assert_type(c8 + i4, np.complex64) assert_type(c8 + b_, np.complex64) assert_type(c8 + b, np.complex64) -assert_type(c8 + c, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(c8 + f, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(c8 + AR_f, npt.NDArray[np.complexfloating[Any, Any]]) - -assert_type(f16 + c8, np.complexfloating[_32Bit | _128Bit, _32Bit | _128Bit]) -assert_type(c16 + c8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(f8 + c8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(i8 + c8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(c8 + c, np.complex64 | np.complex128) +assert_type(c8 + f, np.complex64 | np.complex128) +assert_type(c8 + AR_f, npt.NDArray[np.complexfloating]) + +assert_type(f16 + c8, np.complexfloating[_128Bit, _128Bit] | np.complex64) +assert_type(c16 + c8, np.complex128) +assert_type(f8 + c8, np.complexfloating[_64Bit, _64Bit]) +assert_type(i8 + c8, np.complexfloating[_64Bit, _64Bit] | np.complex64) assert_type(c8 + c8, np.complex64) assert_type(f4 + c8, np.complex64) assert_type(i4 + c8, np.complex64) assert_type(b_ + c8, np.complex64) assert_type(b + c8, np.complex64) -assert_type(c + c8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(f + c8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(AR_f + c8, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(c + c8, np.complex64 | np.complex128) +assert_type(f + c8, np.complex64 | np.complex128) +assert_type(AR_f + c8, npt.NDArray[np.complexfloating]) # Float -assert_type(f8 + f16, np.floating[_64Bit | _128Bit]) +assert_type(f8 + f16, np.floating) assert_type(f8 + f8, np.float64) assert_type(f8 + i8, np.float64) -assert_type(f8 + f4, np.floating[_32Bit | _64Bit]) -assert_type(f8 + i4, np.floating[_32Bit | _64Bit]) +assert_type(f8 + f4, np.float64) +assert_type(f8 + i4, np.float64) assert_type(f8 + b_, np.float64) assert_type(f8 + b, np.float64) -assert_type(f8 + c, np.complex128) +assert_type(f8 + c, np.float64 | np.complex128) assert_type(f8 + f, np.float64) -assert_type(f8 + AR_f, npt.NDArray[np.floating[Any]]) +assert_type(f8 + AR_f, npt.NDArray[np.float64]) -assert_type(f16 + f8, np.floating[_64Bit | _128Bit]) +assert_type(f16 + f8, np.floating) assert_type(f8 + f8, np.float64) assert_type(i8 + f8, np.float64) -assert_type(f4 + f8, np.floating[_32Bit | _64Bit]) -assert_type(i4 + f8, np.floating[_32Bit | _64Bit]) +assert_type(f4 + f8, np.floating) +assert_type(i4 + f8, np.float64) assert_type(b_ + f8, np.float64) assert_type(b + f8, np.float64) -assert_type(c + f8, np.complex128) +assert_type(c + f8, np.complex128 | np.float64) assert_type(f + f8, np.float64) -assert_type(AR_f + f8, npt.NDArray[np.floating[Any]]) +assert_type(AR_f + f8, npt.NDArray[np.float64]) -assert_type(f4 + f16, np.floating[_32Bit | _128Bit]) -assert_type(f4 + f8, np.floating[_32Bit | _64Bit]) -assert_type(f4 + i8, np.floating[_32Bit | _64Bit]) +assert_type(f4 + f16, np.floating) +assert_type(f4 + f8, np.floating) +assert_type(f4 + i8, np.floating) assert_type(f4 + f4, np.float32) -assert_type(f4 + i4, np.float32) +assert_type(f4 + i4, np.floating) assert_type(f4 + b_, np.float32) assert_type(f4 + b, np.float32) -assert_type(f4 + c, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(f4 + f, np.floating[_32Bit | _64Bit]) -assert_type(f4 + AR_f, npt.NDArray[np.floating[Any]]) +assert_type(f4 + c, np.complexfloating) +assert_type(f4 + f, np.float32) +assert_type(f4 + AR_f, npt.NDArray[np.float64]) -assert_type(f16 + f4, np.floating[_32Bit | _128Bit]) -assert_type(f8 + f4, np.floating[_32Bit | _64Bit]) -assert_type(i8 + f4, np.floating[_32Bit | _64Bit]) +assert_type(f16 + f4, np.floating) +assert_type(f8 + f4, np.float64) +assert_type(i8 + f4, np.floating) assert_type(f4 + f4, np.float32) -assert_type(i4 + f4, np.float32) +assert_type(i4 + f4, np.floating) assert_type(b_ + f4, np.float32) assert_type(b + f4, np.float32) -assert_type(c + f4, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(f + f4, np.floating[_32Bit | _64Bit]) -assert_type(AR_f + f4, npt.NDArray[np.floating[Any]]) +assert_type(c + f4, np.complexfloating) +assert_type(f + f4, np.float32) +assert_type(AR_f + f4, npt.NDArray[np.float64]) # Int assert_type(i8 + i8, np.int64) assert_type(i8 + u8, Any) -assert_type(i8 + i4, np.signedinteger[_32Bit | _64Bit]) +assert_type(i8 + i4, np.signedinteger) assert_type(i8 + u4, Any) assert_type(i8 + b_, np.int64) assert_type(i8 + b, np.int64) assert_type(i8 + c, np.complex128) assert_type(i8 + f, np.float64) -assert_type(i8 + AR_f, npt.NDArray[np.floating[Any]]) +assert_type(i8 + AR_f, npt.NDArray[np.float64]) assert_type(u8 + u8, np.uint64) assert_type(u8 + i4, Any) -assert_type(u8 + u4, np.unsignedinteger[_32Bit | _64Bit]) +assert_type(u8 + u4, np.unsignedinteger) assert_type(u8 + b_, np.uint64) assert_type(u8 + b, np.uint64) assert_type(u8 + c, np.complex128) assert_type(u8 + f, np.float64) -assert_type(u8 + AR_f, npt.NDArray[np.floating[Any]]) +assert_type(u8 + AR_f, npt.NDArray[np.float64]) assert_type(i8 + i8, np.int64) assert_type(u8 + i8, Any) -assert_type(i4 + i8, np.signedinteger[_32Bit | _64Bit]) +assert_type(i4 + i8, np.signedinteger) assert_type(u4 + i8, Any) assert_type(b_ + i8, np.int64) assert_type(b + i8, np.int64) assert_type(c + i8, np.complex128) assert_type(f + i8, np.float64) -assert_type(AR_f + i8, npt.NDArray[np.floating[Any]]) +assert_type(AR_f + i8, npt.NDArray[np.float64]) assert_type(u8 + u8, np.uint64) assert_type(i4 + u8, Any) -assert_type(u4 + u8, np.unsignedinteger[_32Bit | _64Bit]) +assert_type(u4 + u8, np.unsignedinteger) assert_type(b_ + u8, np.uint64) assert_type(b + u8, np.uint64) assert_type(c + u8, np.complex128) assert_type(f + u8, np.float64) -assert_type(AR_f + u8, npt.NDArray[np.floating[Any]]) +assert_type(AR_f + u8, npt.NDArray[np.float64]) -assert_type(i4 + i8, np.signedinteger[_32Bit | _64Bit]) +assert_type(i4 + i8, np.signedinteger) assert_type(i4 + i4, np.int32) assert_type(i4 + b_, np.int32) assert_type(i4 + b, np.int32) -assert_type(i4 + AR_f, npt.NDArray[np.floating[Any]]) +assert_type(i4 + AR_f, npt.NDArray[np.float64]) assert_type(u4 + i8, Any) assert_type(u4 + i4, Any) -assert_type(u4 + u8, np.unsignedinteger[_32Bit | _64Bit]) +assert_type(u4 + u8, np.unsignedinteger) assert_type(u4 + u4, np.uint32) assert_type(u4 + b_, np.uint32) assert_type(u4 + b, np.uint32) -assert_type(u4 + AR_f, npt.NDArray[np.floating[Any]]) +assert_type(u4 + AR_f, npt.NDArray[np.float64]) -assert_type(i8 + i4, np.signedinteger[_32Bit | _64Bit]) +assert_type(i8 + i4, np.signedinteger) assert_type(i4 + i4, np.int32) assert_type(b_ + i4, np.int32) assert_type(b + i4, np.int32) -assert_type(AR_f + i4, npt.NDArray[np.floating[Any]]) +assert_type(AR_f + i4, npt.NDArray[np.float64]) assert_type(i8 + u4, Any) assert_type(i4 + u4, Any) -assert_type(u8 + u4, np.unsignedinteger[_32Bit | _64Bit]) +assert_type(u8 + u4, np.unsignedinteger) assert_type(u4 + u4, np.uint32) assert_type(b_ + u4, np.uint32) assert_type(b + u4, np.uint32) -assert_type(AR_f + u4, npt.NDArray[np.floating[Any]]) +assert_type(AR_f + u4, npt.NDArray[np.float64]) + +# Any + +assert_type(AR_Any + 2, npt.NDArray[Any]) + +# regression tests for https://github.com/numpy/numpy/issues/28805 + +assert_type(AR_floating + f, npt.NDArray[np.floating]) +assert_type(AR_floating - f, npt.NDArray[np.floating]) +assert_type(AR_floating * f, npt.NDArray[np.floating]) +assert_type(AR_floating ** f, npt.NDArray[np.floating]) +assert_type(AR_floating / f, npt.NDArray[np.floating]) +assert_type(AR_floating // f, npt.NDArray[np.floating]) +assert_type(AR_floating % f, npt.NDArray[np.floating]) +assert_type(divmod(AR_floating, f), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) + +assert_type(f + AR_floating, npt.NDArray[np.floating]) +assert_type(f - AR_floating, npt.NDArray[np.floating]) +assert_type(f * AR_floating, npt.NDArray[np.floating]) +assert_type(f ** AR_floating, npt.NDArray[np.floating]) +assert_type(f / AR_floating, npt.NDArray[np.floating]) +assert_type(f // AR_floating, npt.NDArray[np.floating]) +assert_type(f % AR_floating, npt.NDArray[np.floating]) +assert_type(divmod(f, AR_floating), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) + +# character-like + +assert_type(AR_S + b"", npt.NDArray[np.bytes_]) +assert_type(AR_S + [b""], npt.NDArray[np.bytes_]) +assert_type([b""] + AR_S, npt.NDArray[np.bytes_]) +assert_type(AR_S + AR_S, npt.NDArray[np.bytes_]) + +assert_type(AR_U + "", npt.NDArray[np.str_]) +assert_type(AR_U + [""], npt.NDArray[np.str_]) +assert_type("" + AR_U, npt.NDArray[np.str_]) +assert_type([""] + AR_U, npt.NDArray[np.str_]) +assert_type(AR_U + AR_U, npt.NDArray[np.str_]) + +assert_type(AR_T + "", np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T + [""], np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type("" + AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type([""] + AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T + AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T + AR_U, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_U + AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) + +assert_type(AR_S * i, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) +assert_type(AR_S * AR_LIKE_i, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) +assert_type(AR_S * AR_i, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) +assert_type(i * AR_S, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) +# mypy incorrectly infers `AR_LIKE_i * AR_S` as `list[int]` +assert_type(AR_i * AR_S, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) + +assert_type(AR_U * i, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) +assert_type(AR_U * AR_LIKE_i, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) +assert_type(AR_U * AR_i, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) +assert_type(i * AR_U, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) +# mypy incorrectly infers `AR_LIKE_i * AR_U` as `list[int]` +assert_type(AR_i * AR_U, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) + +assert_type(AR_T * i, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T * AR_LIKE_i, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T * AR_i, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(i * AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +# mypy incorrectly infers `AR_LIKE_i * AR_T` as `list[int]` +assert_type(AR_i * AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) diff --git a/numpy/typing/tests/data/reveal/array_api_info.pyi b/numpy/typing/tests/data/reveal/array_api_info.pyi index 056547681366..765f9eff5168 100644 --- a/numpy/typing/tests/data/reveal/array_api_info.pyi +++ b/numpy/typing/tests/data/reveal/array_api_info.pyi @@ -1,18 +1,70 @@ -import sys -from typing import List +from typing import Literal, Never, assert_type import numpy as np -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +info = np.__array_namespace_info__() -array_namespace_info = np.__array_namespace_info__() +assert_type(info.__module__, Literal["numpy"]) -assert_type(array_namespace_info.__module__, str) -assert_type(array_namespace_info.capabilities(), np._array_api_info.Capabilities) -assert_type(array_namespace_info.default_device(), str) -assert_type(array_namespace_info.default_dtypes(), np._array_api_info.DefaultDataTypes) -assert_type(array_namespace_info.dtypes(), np._array_api_info.DataTypes) -assert_type(array_namespace_info.devices(), List[str]) +assert_type(info.default_device(), Literal["cpu"]) +assert_type(info.devices()[0], Literal["cpu"]) +assert_type(info.devices()[-1], Literal["cpu"]) + +assert_type(info.capabilities()["boolean indexing"], Literal[True]) +assert_type(info.capabilities()["data-dependent shapes"], Literal[True]) + +assert_type(info.default_dtypes()["real floating"], np.dtype[np.float64]) +assert_type(info.default_dtypes()["complex floating"], np.dtype[np.complex128]) +assert_type(info.default_dtypes()["integral"], np.dtype[np.int_]) +assert_type(info.default_dtypes()["indexing"], np.dtype[np.intp]) + +assert_type(info.dtypes()["bool"], np.dtype[np.bool]) +assert_type(info.dtypes()["int8"], np.dtype[np.int8]) +assert_type(info.dtypes()["uint8"], np.dtype[np.uint8]) +assert_type(info.dtypes()["float32"], np.dtype[np.float32]) +assert_type(info.dtypes()["complex64"], np.dtype[np.complex64]) + +assert_type(info.dtypes(kind="bool")["bool"], np.dtype[np.bool]) +assert_type(info.dtypes(kind="signed integer")["int64"], np.dtype[np.int64]) +assert_type(info.dtypes(kind="unsigned integer")["uint64"], np.dtype[np.uint64]) +assert_type(info.dtypes(kind="integral")["int32"], np.dtype[np.int32]) +assert_type(info.dtypes(kind="integral")["uint32"], np.dtype[np.uint32]) +assert_type(info.dtypes(kind="real floating")["float64"], np.dtype[np.float64]) +assert_type(info.dtypes(kind="complex floating")["complex128"], np.dtype[np.complex128]) +assert_type(info.dtypes(kind="numeric")["int16"], np.dtype[np.int16]) +assert_type(info.dtypes(kind="numeric")["uint16"], np.dtype[np.uint16]) +assert_type(info.dtypes(kind="numeric")["float64"], np.dtype[np.float64]) +assert_type(info.dtypes(kind="numeric")["complex128"], np.dtype[np.complex128]) + +assert_type(info.dtypes(kind=()), dict[Never, Never]) + +assert_type(info.dtypes(kind=("bool",))["bool"], np.dtype[np.bool]) +assert_type(info.dtypes(kind=("signed integer",))["int64"], np.dtype[np.int64]) +assert_type(info.dtypes(kind=("integral",))["uint32"], np.dtype[np.uint32]) +assert_type(info.dtypes(kind=("complex floating",))["complex128"], np.dtype[np.complex128]) +assert_type(info.dtypes(kind=("numeric",))["float64"], np.dtype[np.float64]) + +assert_type( + info.dtypes(kind=("signed integer", "unsigned integer"))["int8"], + np.dtype[np.int8], +) +assert_type( + info.dtypes(kind=("signed integer", "unsigned integer"))["uint8"], + np.dtype[np.uint8], +) +assert_type( + info.dtypes(kind=("integral", "real floating", "complex floating"))["int16"], + np.dtype[np.int16], +) +assert_type( + info.dtypes(kind=("integral", "real floating", "complex floating"))["uint16"], + np.dtype[np.uint16], +) +assert_type( + info.dtypes(kind=("integral", "real floating", "complex floating"))["float32"], + np.dtype[np.float32], +) +assert_type( + info.dtypes(kind=("integral", "real floating", "complex floating"))["complex64"], + np.dtype[np.complex64], +) diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 814da1b9d639..ffe0834e3309 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -1,25 +1,27 @@ -import sys -from typing import Any, TypeVar -from pathlib import Path from collections import deque +from pathlib import Path +from typing import Any, assert_type import numpy as np import numpy.typing as npt +from numpy._typing import _AnyShape -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] -_SCT = TypeVar("_SCT", bound=np.generic, covariant=True) +class SubClass[ScalarT: np.generic](np.ndarray[_AnyShape, np.dtype[ScalarT]]): ... -class SubClass(npt.NDArray[_SCT]): ... +class IntoSubClass[ScalarT: np.generic]: + def __array__(self) -> SubClass[ScalarT]: ... i8: np.int64 A: npt.NDArray[np.float64] B: SubClass[np.float64] C: list[int] +D: SubClass[np.float64 | np.int64] +E: IntoSubClass[np.float64 | np.int64] + +mixed_shape: tuple[int, np.int64] def func(i: int, j: int, **kwargs: Any) -> SubClass[np.float64]: ... @@ -27,94 +29,111 @@ assert_type(np.empty_like(A), npt.NDArray[np.float64]) assert_type(np.empty_like(B), SubClass[np.float64]) assert_type(np.empty_like([1, 1.0]), npt.NDArray[Any]) assert_type(np.empty_like(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.empty_like(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.empty_like(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.array(A), npt.NDArray[np.float64]) assert_type(np.array(B), npt.NDArray[np.float64]) -assert_type(np.array(B, subok=True), SubClass[np.float64]) assert_type(np.array([1, 1.0]), npt.NDArray[Any]) assert_type(np.array(deque([1, 2, 3])), npt.NDArray[Any]) assert_type(np.array(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.array(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.array(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.array(A, like=A), npt.NDArray[np.float64]) +assert_type(np.array(A, subok=True), npt.NDArray[np.float64]) +assert_type(np.array(B, subok=True), SubClass[np.float64]) +assert_type(np.array(B, subok=True, ndmin=0), SubClass[np.float64]) +assert_type(np.array(B, subok=True, ndmin=1), SubClass[np.float64]) +assert_type(np.array(D), npt.NDArray[np.float64 | np.int64]) +assert_type(np.array(E, subok=True), SubClass[np.float64 | np.int64]) +# https://github.com/numpy/numpy/issues/29245 +assert_type(np.array([], dtype=np.bool), npt.NDArray[np.bool]) assert_type(np.zeros([1, 5, 6]), npt.NDArray[np.float64]) assert_type(np.zeros([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.zeros([1, 5, 6], dtype='c16'), npt.NDArray[Any]) +assert_type(np.zeros([1, 5, 6], dtype="c16"), npt.NDArray[Any]) +assert_type(np.zeros(mixed_shape), npt.NDArray[np.float64]) assert_type(np.empty([1, 5, 6]), npt.NDArray[np.float64]) assert_type(np.empty([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.empty([1, 5, 6], dtype='c16'), npt.NDArray[Any]) +assert_type(np.empty([1, 5, 6], dtype="c16"), npt.NDArray[Any]) +assert_type(np.empty(mixed_shape), npt.NDArray[np.float64]) assert_type(np.concatenate(A), npt.NDArray[np.float64]) -assert_type(np.concatenate([A, A]), Any) +assert_type(np.concatenate([A, A]), npt.NDArray[Any]) # pyright correctly infers this as NDArray[float64] assert_type(np.concatenate([[1], A]), npt.NDArray[Any]) assert_type(np.concatenate([[1], [1]]), npt.NDArray[Any]) assert_type(np.concatenate((A, A)), npt.NDArray[np.float64]) assert_type(np.concatenate(([1], [1])), npt.NDArray[Any]) assert_type(np.concatenate([1, 1.0]), npt.NDArray[Any]) assert_type(np.concatenate(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.concatenate(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.concatenate(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.concatenate([1, 1.0], out=A), npt.NDArray[np.float64]) assert_type(np.asarray(A), npt.NDArray[np.float64]) assert_type(np.asarray(B), npt.NDArray[np.float64]) assert_type(np.asarray([1, 1.0]), npt.NDArray[Any]) assert_type(np.asarray(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.asarray(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.asarray(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.asanyarray(A), npt.NDArray[np.float64]) assert_type(np.asanyarray(B), SubClass[np.float64]) assert_type(np.asanyarray([1, 1.0]), npt.NDArray[Any]) assert_type(np.asanyarray(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.asanyarray(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.asanyarray(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.ascontiguousarray(A), npt.NDArray[np.float64]) assert_type(np.ascontiguousarray(B), npt.NDArray[np.float64]) assert_type(np.ascontiguousarray([1, 1.0]), npt.NDArray[Any]) assert_type(np.ascontiguousarray(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.ascontiguousarray(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.ascontiguousarray(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.asfortranarray(A), npt.NDArray[np.float64]) assert_type(np.asfortranarray(B), npt.NDArray[np.float64]) assert_type(np.asfortranarray([1, 1.0]), npt.NDArray[Any]) assert_type(np.asfortranarray(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.asfortranarray(A, dtype='c16'), npt.NDArray[Any]) - -assert_type(np.fromstring("1 1 1", sep=" "), npt.NDArray[np.float64]) -assert_type(np.fromstring(b"1 1 1", sep=" "), npt.NDArray[np.float64]) -assert_type(np.fromstring("1 1 1", dtype=np.int64, sep=" "), npt.NDArray[np.int64]) -assert_type(np.fromstring(b"1 1 1", dtype=np.int64, sep=" "), npt.NDArray[np.int64]) -assert_type(np.fromstring("1 1 1", dtype="c16", sep=" "), npt.NDArray[Any]) -assert_type(np.fromstring(b"1 1 1", dtype="c16", sep=" "), npt.NDArray[Any]) - -assert_type(np.fromfile("test.txt", sep=" "), npt.NDArray[np.float64]) -assert_type(np.fromfile("test.txt", dtype=np.int64, sep=" "), npt.NDArray[np.int64]) -assert_type(np.fromfile("test.txt", dtype="c16", sep=" "), npt.NDArray[Any]) +assert_type(np.asfortranarray(A, dtype="c16"), npt.NDArray[Any]) + +assert_type(np.fromstring("1 1 1", sep=" "), _Array1D[np.float64]) +assert_type(np.fromstring(b"1 1 1", sep=" "), _Array1D[np.float64]) +assert_type(np.fromstring("1 1 1", dtype=np.int64, sep=" "), _Array1D[np.int64]) +assert_type(np.fromstring(b"1 1 1", dtype=np.int64, sep=" "), _Array1D[np.int64]) +assert_type(np.fromstring("1 1 1", dtype="c16", sep=" "), _Array1D[Any]) +assert_type(np.fromstring(b"1 1 1", dtype="c16", sep=" "), _Array1D[Any]) + +assert_type(np.fromfile("test.txt", sep=" "), _Array1D[np.float64]) +assert_type(np.fromfile("test.txt", dtype=np.int64, sep=" "), _Array1D[np.int64]) +assert_type(np.fromfile("test.txt", dtype="c16", sep=" "), _Array1D[Any]) with open("test.txt") as f: - assert_type(np.fromfile(f, sep=" "), npt.NDArray[np.float64]) - assert_type(np.fromfile(b"test.txt", sep=" "), npt.NDArray[np.float64]) - assert_type(np.fromfile(Path("test.txt"), sep=" "), npt.NDArray[np.float64]) + assert_type(np.fromfile(f, sep=" "), _Array1D[np.float64]) + assert_type(np.fromfile(b"test.txt", sep=" "), _Array1D[np.float64]) + assert_type(np.fromfile(Path("test.txt"), sep=" "), _Array1D[np.float64]) assert_type(np.fromiter("12345", np.float64), npt.NDArray[np.float64]) assert_type(np.fromiter("12345", float), npt.NDArray[Any]) -assert_type(np.frombuffer(A), npt.NDArray[np.float64]) -assert_type(np.frombuffer(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.frombuffer(A, dtype="c16"), npt.NDArray[Any]) - -assert_type(np.arange(False, True), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.arange(10), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.arange(0, 10, step=2), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.arange(10.0), npt.NDArray[np.floating[Any]]) -assert_type(np.arange(start=0, stop=10.0), npt.NDArray[np.floating[Any]]) -assert_type(np.arange(np.timedelta64(0)), npt.NDArray[np.timedelta64]) -assert_type(np.arange(0, np.timedelta64(10)), npt.NDArray[np.timedelta64]) -assert_type(np.arange(np.datetime64("0"), np.datetime64("10")), npt.NDArray[np.datetime64]) -assert_type(np.arange(10, dtype=np.float64), npt.NDArray[np.float64]) -assert_type(np.arange(0, 10, step=2, dtype=np.int16), npt.NDArray[np.int16]) -assert_type(np.arange(10, dtype=int), npt.NDArray[Any]) -assert_type(np.arange(0, 10, dtype="f8"), npt.NDArray[Any]) +assert_type(np.frombuffer(A), _Array1D[np.float64]) +assert_type(np.frombuffer(A, dtype=np.int64), _Array1D[np.int64]) +assert_type(np.frombuffer(A, dtype="c16"), _Array1D[Any]) + +_x_bool: bool +_x_int: int +_x_float: float +_x_timedelta: np.timedelta64[int] +_x_datetime: np.datetime64[int] + +assert_type(np.arange(False, True), _Array1D[np.int_]) +assert_type(np.arange(10), _Array1D[np.int_]) +assert_type(np.arange(0, 10, step=2), _Array1D[np.int_]) +assert_type(np.arange(10.0), _Array1D[np.float64 | Any]) +assert_type(np.arange(0, stop=10.0), _Array1D[np.float64 | Any]) +assert_type(np.arange(_x_timedelta), _Array1D[np.timedelta64]) +assert_type(np.arange(0, _x_timedelta), _Array1D[np.timedelta64]) +assert_type(np.arange(_x_datetime, _x_datetime), _Array1D[np.datetime64]) +assert_type(np.arange(10, dtype=np.float64), _Array1D[np.float64]) +assert_type(np.arange(0, 10, step=2, dtype=np.int16), _Array1D[np.int16]) +assert_type(np.arange(10, dtype=int), _Array1D[np.int_]) +assert_type(np.arange(0, 10, dtype="f8"), _Array1D[Any]) +# https://github.com/numpy/numpy/issues/30628 +assert_type(np.arange("2025-12-20", "2025-12-23", dtype="datetime64[D]"), _Array1D[np.datetime64]) assert_type(np.require(A), npt.NDArray[np.float64]) assert_type(np.require(B), SubClass[np.float64]) @@ -128,23 +147,23 @@ assert_type(np.require(B, requirements="W"), SubClass[np.float64]) assert_type(np.require(B, requirements="A"), SubClass[np.float64]) assert_type(np.require(C), npt.NDArray[Any]) -assert_type(np.linspace(0, 10), npt.NDArray[np.floating[Any]]) -assert_type(np.linspace(0, 10j), npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(np.linspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.linspace(0, 10), _Array1D[np.float64]) +assert_type(np.linspace(0, 10j), _Array1D[np.complex128 | Any]) +assert_type(np.linspace(0, 10, dtype=np.int64), _Array1D[np.int64]) assert_type(np.linspace(0, 10, dtype=int), npt.NDArray[Any]) -assert_type(np.linspace(0, 10, retstep=True), tuple[npt.NDArray[np.floating[Any]], np.floating[Any]]) -assert_type(np.linspace(0j, 10, retstep=True), tuple[npt.NDArray[np.complexfloating[Any, Any]], np.complexfloating[Any, Any]]) -assert_type(np.linspace(0, 10, retstep=True, dtype=np.int64), tuple[npt.NDArray[np.int64], np.int64]) +assert_type(np.linspace(0, 10, retstep=True), tuple[_Array1D[np.float64], np.float64]) +assert_type(np.linspace(0j, 10, retstep=True), tuple[_Array1D[np.complex128 | Any], np.complex128 | Any]) +assert_type(np.linspace(0, 10, retstep=True, dtype=np.int64), tuple[_Array1D[np.int64], np.int64]) assert_type(np.linspace(0j, 10, retstep=True, dtype=int), tuple[npt.NDArray[Any], Any]) -assert_type(np.logspace(0, 10), npt.NDArray[np.floating[Any]]) -assert_type(np.logspace(0, 10j), npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(np.logspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.logspace(0, 10), _Array1D[np.float64]) +assert_type(np.logspace(0, 10j), _Array1D[np.complex128 | Any]) +assert_type(np.logspace(0, 10, dtype=np.int64), _Array1D[np.int64]) assert_type(np.logspace(0, 10, dtype=int), npt.NDArray[Any]) -assert_type(np.geomspace(0, 10), npt.NDArray[np.floating[Any]]) -assert_type(np.geomspace(0, 10j), npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(np.geomspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.geomspace(0, 10), _Array1D[np.float64]) +assert_type(np.geomspace(0, 10j), _Array1D[np.complex128 | Any]) +assert_type(np.geomspace(0, 10, dtype=np.int64), _Array1D[np.int64]) assert_type(np.geomspace(0, 10, dtype=int), npt.NDArray[Any]) assert_type(np.zeros_like(A), npt.NDArray[np.float64]) @@ -165,59 +184,95 @@ assert_type(np.full_like(A, i8, dtype=int), npt.NDArray[Any]) assert_type(np.full_like(B, i8), SubClass[np.float64]) assert_type(np.full_like(B, i8, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.ones(1), npt.NDArray[np.float64]) -assert_type(np.ones([1, 1, 1]), npt.NDArray[np.float64]) -assert_type(np.ones(5, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.ones(5, dtype=int), npt.NDArray[Any]) - -assert_type(np.full(1, i8), npt.NDArray[Any]) -assert_type(np.full([1, 1, 1], i8), npt.NDArray[Any]) -assert_type(np.full(1, i8, dtype=np.float64), npt.NDArray[np.float64]) -assert_type(np.full(1, i8, dtype=float), npt.NDArray[Any]) +_size: int +_shape_0d: tuple[()] +_shape_1d: tuple[int] +_shape_2d: tuple[int, int] +_shape_nd: tuple[int, ...] +_shape_like: list[int] + +assert_type(np.ones(_shape_0d), np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(np.ones(_size), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.ones(_shape_2d), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.ones(_shape_nd), np.ndarray[tuple[int, ...], np.dtype[np.float64]]) +assert_type(np.ones(_shape_1d, dtype=np.int64), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.ones(_shape_like), npt.NDArray[np.float64]) +assert_type( + np.ones(_shape_like, dtype=np.dtypes.Int64DType()), + np.ndarray[tuple[Any, ...], np.dtypes.Int64DType], +) +assert_type(np.ones(_shape_like, dtype=int), npt.NDArray[Any]) +assert_type(np.ones(mixed_shape), npt.NDArray[np.float64]) + +assert_type(np.full(_size, i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.full(_shape_2d, i8), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.full(_shape_like, i8), npt.NDArray[np.int64]) +assert_type(np.full(_shape_like, 42), npt.NDArray[Any]) +assert_type(np.full(_size, i8, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.full(_size, i8, dtype=float), np.ndarray[tuple[int], np.dtype]) +assert_type(np.full(_shape_like, 42, dtype=float), npt.NDArray[Any]) +assert_type(np.full(_shape_0d, i8, dtype=object), np.ndarray[tuple[()], np.dtype]) assert_type(np.indices([1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.indices([1, 2, 3], sparse=True), tuple[npt.NDArray[np.int_], ...]) assert_type(np.fromfunction(func, (3, 5)), SubClass[np.float64]) -assert_type(np.identity(10), npt.NDArray[np.float64]) -assert_type(np.identity(10, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.identity(10, dtype=int), npt.NDArray[Any]) +assert_type(np.identity(3), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.identity(3, dtype=np.int8), np.ndarray[tuple[int, int], np.dtype[np.int8]]) +assert_type(np.identity(3, dtype=bool), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.identity(3, dtype="bool"), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.identity(3, dtype="b1"), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.identity(3, dtype="?"), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.identity(3, dtype=int), np.ndarray[tuple[int, int], np.dtype[np.int_ | Any]]) +assert_type(np.identity(3, dtype="int"), np.ndarray[tuple[int, int], np.dtype[np.int_ | Any]]) +assert_type(np.identity(3, dtype="n"), np.ndarray[tuple[int, int], np.dtype[np.int_ | Any]]) +assert_type(np.identity(3, dtype=float), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.identity(3, dtype="float"), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.identity(3, dtype="f8"), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.identity(3, dtype="d"), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.identity(3, dtype=complex), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.identity(3, dtype="complex"), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.identity(3, dtype="c16"), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.identity(3, dtype="D"), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) assert_type(np.atleast_1d(A), npt.NDArray[np.float64]) assert_type(np.atleast_1d(C), npt.NDArray[Any]) -assert_type(np.atleast_1d(A, A), tuple[npt.NDArray[Any], ...]) -assert_type(np.atleast_1d(A, C), tuple[npt.NDArray[Any], ...]) -assert_type(np.atleast_1d(C, C), tuple[npt.NDArray[Any], ...]) +assert_type(np.atleast_1d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.atleast_1d(A, C), tuple[npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.atleast_1d(C, C), tuple[npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.atleast_1d(A, A, A), tuple[npt.NDArray[np.float64], ...]) +assert_type(np.atleast_1d(C, C, C), tuple[npt.NDArray[Any], ...]) assert_type(np.atleast_2d(A), npt.NDArray[np.float64]) -assert_type(np.atleast_2d(A, A), tuple[npt.NDArray[Any], ...]) +assert_type(np.atleast_2d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.atleast_2d(A, A, A), tuple[npt.NDArray[np.float64], ...]) assert_type(np.atleast_3d(A), npt.NDArray[np.float64]) -assert_type(np.atleast_3d(A, A), tuple[npt.NDArray[Any], ...]) +assert_type(np.atleast_3d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.atleast_3d(A, A, A), tuple[npt.NDArray[np.float64], ...]) -assert_type(np.vstack([A, A]), np.ndarray[Any, Any]) -assert_type(np.vstack([A, A], dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.vstack([A, A]), npt.NDArray[np.float64]) +assert_type(np.vstack([A, A], dtype=np.float32), npt.NDArray[np.float32]) assert_type(np.vstack([A, C]), npt.NDArray[Any]) assert_type(np.vstack([C, C]), npt.NDArray[Any]) -assert_type(np.hstack([A, A]), np.ndarray[Any, Any]) -assert_type(np.hstack([A, A], dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.hstack([A, A]), npt.NDArray[np.float64]) +assert_type(np.hstack([A, A], dtype=np.float32), npt.NDArray[np.float32]) -assert_type(np.stack([A, A]), Any) -assert_type(np.stack([A, A], dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.stack([A, A]), npt.NDArray[np.float64]) +assert_type(np.stack([A, A], dtype=np.float32), npt.NDArray[np.float32]) assert_type(np.stack([A, C]), npt.NDArray[Any]) assert_type(np.stack([C, C]), npt.NDArray[Any]) -assert_type(np.stack([A, A], axis=0), Any) +assert_type(np.stack([A, A], axis=0), npt.NDArray[np.float64]) assert_type(np.stack([A, A], out=B), SubClass[np.float64]) -assert_type(np.block([[A, A], [A, A]]), npt.NDArray[Any]) +assert_type(np.block([[A, A], [A, A]]), npt.NDArray[Any]) # pyright correctly infers this as NDArray[float64] assert_type(np.block(C), npt.NDArray[Any]) -if sys.version_info >= (3, 12): - from collections.abc import Buffer +from collections.abc import Buffer - def create_array(obj: npt.ArrayLike) -> npt.NDArray[Any]: ... +def create_array(obj: npt.ArrayLike) -> npt.NDArray[Any]: ... - buffer: Buffer - assert_type(create_array(buffer), npt.NDArray[Any]) +buffer: Buffer +assert_type(create_array(buffer), npt.NDArray[Any]) diff --git a/numpy/typing/tests/data/reveal/arraypad.pyi b/numpy/typing/tests/data/reveal/arraypad.pyi index f53613ba2fd4..3d53d913a770 100644 --- a/numpy/typing/tests/data/reveal/arraypad.pyi +++ b/numpy/typing/tests/data/reveal/arraypad.pyi @@ -1,17 +1,11 @@ -import sys from collections.abc import Mapping -from typing import Any, SupportsIndex +from typing import Any, SupportsIndex, assert_type import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - def mode_func( - ar: npt.NDArray[np.number[Any]], + ar: npt.NDArray[np.number], width: tuple[int, int], iaxis: SupportsIndex, kwargs: Mapping[str, Any], @@ -26,3 +20,8 @@ assert_type(np.pad(AR_LIKE, (2, 3), "constant"), npt.NDArray[Any]) assert_type(np.pad(AR_f8, (2, 3), mode_func), npt.NDArray[np.float64]) assert_type(np.pad(AR_f8, (2, 3), mode_func, a=1, b=2), npt.NDArray[np.float64]) + +assert_type(np.pad(AR_i8, {-1: (2, 3)}), npt.NDArray[np.int64]) +assert_type(np.pad(AR_i8, {-2: 4}), npt.NDArray[np.int64]) +pad_width: dict[int, int | tuple[int, int]] = {-1: (2, 3), -2: 4} +assert_type(np.pad(AR_i8, pad_width), npt.NDArray[np.int64]) diff --git a/numpy/typing/tests/data/reveal/arrayprint.pyi b/numpy/typing/tests/data/reveal/arrayprint.pyi index c4a161959547..17e175edc2b7 100644 --- a/numpy/typing/tests/data/reveal/arrayprint.pyi +++ b/numpy/typing/tests/data/reveal/arrayprint.pyi @@ -1,24 +1,18 @@ -import sys import contextlib from collections.abc import Callable -from typing import Any +from typing import assert_type import numpy as np import numpy.typing as npt from numpy._core.arrayprint import _FormatOptions -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - AR: npt.NDArray[np.int64] -func_float: Callable[[np.floating[Any]], str] -func_int: Callable[[np.integer[Any]], str] +func_float: Callable[[np.floating], str] +func_int: Callable[[np.integer], str] assert_type(np.get_printoptions(), _FormatOptions) assert_type( - np.array2string(AR, formatter={'float_kind': func_float, 'int_kind': func_int}), + np.array2string(AR, formatter={"float_kind": func_float, "int_kind": func_int}), str, ) assert_type(np.format_float_scientific(1.0), str) diff --git a/numpy/typing/tests/data/reveal/arraysetops.pyi b/numpy/typing/tests/data/reveal/arraysetops.pyi index 3b0a2448fdbc..ee74eedd61c7 100644 --- a/numpy/typing/tests/data/reveal/arraysetops.pyi +++ b/numpy/typing/tests/data/reveal/arraysetops.pyi @@ -1,16 +1,14 @@ -import sys -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt from numpy.lib._arraysetops_impl import ( - UniqueAllResult, UniqueCountsResult, UniqueInverseResult + UniqueAllResult, + UniqueCountsResult, + UniqueInverseResult, ) -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] AR_b: npt.NDArray[np.bool] AR_i8: npt.NDArray[np.int64] @@ -20,33 +18,40 @@ AR_O: npt.NDArray[np.object_] AR_LIKE_f8: list[float] -assert_type(np.ediff1d(AR_b), npt.NDArray[np.int8]) -assert_type(np.ediff1d(AR_i8, to_end=[1, 2, 3]), npt.NDArray[np.int64]) -assert_type(np.ediff1d(AR_M), npt.NDArray[np.timedelta64]) -assert_type(np.ediff1d(AR_O), npt.NDArray[np.object_]) -assert_type(np.ediff1d(AR_LIKE_f8, to_begin=[1, 1.5]), npt.NDArray[Any]) +assert_type(np.ediff1d(AR_b), _Array1D[np.int8]) +assert_type(np.ediff1d(AR_i8, to_end=[1, 2, 3]), _Array1D[np.int64]) +assert_type(np.ediff1d(AR_M), _Array1D[np.timedelta64]) +assert_type(np.ediff1d(AR_O), _Array1D[np.object_]) +assert_type(np.ediff1d(AR_LIKE_f8, to_begin=[1, 1.5]), _Array1D[Any]) -assert_type(np.intersect1d(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.intersect1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) -assert_type(np.intersect1d(AR_f8, AR_i8), npt.NDArray[Any]) -assert_type(np.intersect1d(AR_f8, AR_f8, return_indices=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.intersect1d(AR_i8, AR_i8), _Array1D[np.int64]) +# NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.intersect1d(AR_M, AR_M, assume_unique=True), _Array1D[np.datetime64]) # type: ignore[assert-type] +assert_type(np.intersect1d(AR_f8, AR_i8), _Array1D[Any]) +assert_type( + np.intersect1d(AR_f8, AR_f8, return_indices=True), + tuple[_Array1D[np.float64], _Array1D[np.intp], _Array1D[np.intp]], +) -assert_type(np.setxor1d(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.setxor1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) -assert_type(np.setxor1d(AR_f8, AR_i8), npt.NDArray[Any]) +assert_type(np.setxor1d(AR_i8, AR_i8), _Array1D[np.int64]) +# NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.setxor1d(AR_M, AR_M, assume_unique=True), _Array1D[np.datetime64]) # type: ignore[assert-type] +assert_type(np.setxor1d(AR_f8, AR_i8), _Array1D[Any]) assert_type(np.isin(AR_i8, AR_i8), npt.NDArray[np.bool]) assert_type(np.isin(AR_M, AR_M, assume_unique=True), npt.NDArray[np.bool]) assert_type(np.isin(AR_f8, AR_i8), npt.NDArray[np.bool]) assert_type(np.isin(AR_f8, AR_LIKE_f8, invert=True), npt.NDArray[np.bool]) -assert_type(np.union1d(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.union1d(AR_M, AR_M), npt.NDArray[np.datetime64]) -assert_type(np.union1d(AR_f8, AR_i8), npt.NDArray[Any]) +assert_type(np.union1d(AR_i8, AR_i8), _Array1D[np.int64]) +# NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.union1d(AR_M, AR_M), _Array1D[np.datetime64]) # type: ignore[assert-type] +assert_type(np.union1d(AR_f8, AR_i8), _Array1D[Any]) -assert_type(np.setdiff1d(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.setdiff1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) -assert_type(np.setdiff1d(AR_f8, AR_i8), npt.NDArray[Any]) +assert_type(np.setdiff1d(AR_i8, AR_i8), _Array1D[np.int64]) +# NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.setdiff1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) # type: ignore[assert-type] +assert_type(np.setdiff1d(AR_f8, AR_i8), _Array1D[Any]) assert_type(np.unique(AR_f8), npt.NDArray[np.float64]) assert_type(np.unique(AR_LIKE_f8, axis=0), npt.NDArray[Any]) @@ -71,5 +76,5 @@ assert_type(np.unique_counts(AR_f8), UniqueCountsResult[np.float64]) assert_type(np.unique_counts(AR_LIKE_f8), UniqueCountsResult[Any]) assert_type(np.unique_inverse(AR_f8), UniqueInverseResult[np.float64]) assert_type(np.unique_inverse(AR_LIKE_f8), UniqueInverseResult[Any]) -assert_type(np.unique_values(AR_f8), npt.NDArray[np.float64]) -assert_type(np.unique_values(AR_LIKE_f8), npt.NDArray[Any]) +assert_type(np.unique_values(AR_f8), _Array1D[np.float64]) +assert_type(np.unique_values(AR_LIKE_f8), _Array1D[Any]) diff --git a/numpy/typing/tests/data/reveal/arrayterator.pyi b/numpy/typing/tests/data/reveal/arrayterator.pyi index 5514bf6d773f..85eeff4add08 100644 --- a/numpy/typing/tests/data/reveal/arrayterator.pyi +++ b/numpy/typing/tests/data/reveal/arrayterator.pyi @@ -1,33 +1,27 @@ -import sys -from typing import Any from collections.abc import Generator +from typing import Any, assert_type import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - AR_i8: npt.NDArray[np.int64] ar_iter = np.lib.Arrayterator(AR_i8) assert_type(ar_iter.var, npt.NDArray[np.int64]) -assert_type(ar_iter.buf_size, None | int) +assert_type(ar_iter.buf_size, int | None) assert_type(ar_iter.start, list[int]) assert_type(ar_iter.stop, list[int]) assert_type(ar_iter.step, list[int]) -assert_type(ar_iter.shape, tuple[int, ...]) -assert_type(ar_iter.flat, Generator[np.int64, None, None]) +assert_type(ar_iter.shape, tuple[Any, ...]) +assert_type(ar_iter.flat, Generator[np.int64]) assert_type(ar_iter.__array__(), npt.NDArray[np.int64]) for i in ar_iter: assert_type(i, npt.NDArray[np.int64]) -assert_type(ar_iter[0], np.lib.Arrayterator[Any, np.dtype[np.int64]]) -assert_type(ar_iter[...], np.lib.Arrayterator[Any, np.dtype[np.int64]]) -assert_type(ar_iter[:], np.lib.Arrayterator[Any, np.dtype[np.int64]]) -assert_type(ar_iter[0, 0, 0], np.lib.Arrayterator[Any, np.dtype[np.int64]]) -assert_type(ar_iter[..., 0, :], np.lib.Arrayterator[Any, np.dtype[np.int64]]) +assert_type(ar_iter[0], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) +assert_type(ar_iter[...], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) +assert_type(ar_iter[:], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) +assert_type(ar_iter[0, 0, 0], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) +assert_type(ar_iter[..., 0, :], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) diff --git a/numpy/typing/tests/data/reveal/bitwise_ops.pyi b/numpy/typing/tests/data/reveal/bitwise_ops.pyi index 1f04f4b045fe..809f77d9736d 100644 --- a/numpy/typing/tests/data/reveal/bitwise_ops.pyi +++ b/numpy/typing/tests/data/reveal/bitwise_ops.pyi @@ -1,29 +1,28 @@ -import sys -from typing import Any +from typing import Literal as L, assert_type import numpy as np import numpy.typing as npt -from numpy._typing import _64Bit, _32Bit -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +type FalseType = L[False] +type TrueType = L[True] -i8 = np.int64(1) -u8 = np.uint64(1) +i4: np.int32 +i8: np.int64 -i4 = np.int32(1) -u4 = np.uint32(1) +u4: np.uint32 +u8: np.uint64 -b_ = np.bool(1) +b_: np.bool[bool] +b0_: np.bool[FalseType] +b1_: np.bool[TrueType] -b = bool(1) -i = int(1) +b: bool +b0: FalseType +b1: TrueType -AR = np.array([0, 1, 2], dtype=np.int32) -AR.setflags(write=False) +i: int +AR: npt.NDArray[np.int32] assert_type(i8 << i8, np.int64) assert_type(i8 >> i8, np.int64) @@ -31,11 +30,11 @@ assert_type(i8 | i8, np.int64) assert_type(i8 ^ i8, np.int64) assert_type(i8 & i8, np.int64) -assert_type(i8 << AR, npt.NDArray[np.signedinteger[Any]]) -assert_type(i8 >> AR, npt.NDArray[np.signedinteger[Any]]) -assert_type(i8 | AR, npt.NDArray[np.signedinteger[Any]]) -assert_type(i8 ^ AR, npt.NDArray[np.signedinteger[Any]]) -assert_type(i8 & AR, npt.NDArray[np.signedinteger[Any]]) +assert_type(i8 << AR, npt.NDArray[np.signedinteger]) +assert_type(i8 >> AR, npt.NDArray[np.signedinteger]) +assert_type(i8 | AR, npt.NDArray[np.signedinteger]) +assert_type(i8 ^ AR, npt.NDArray[np.signedinteger]) +assert_type(i8 & AR, npt.NDArray[np.signedinteger]) assert_type(i4 << i4, np.int32) assert_type(i4 >> i4, np.int32) @@ -43,11 +42,11 @@ assert_type(i4 | i4, np.int32) assert_type(i4 ^ i4, np.int32) assert_type(i4 & i4, np.int32) -assert_type(i8 << i4, np.signedinteger[_32Bit | _64Bit]) -assert_type(i8 >> i4, np.signedinteger[_32Bit | _64Bit]) -assert_type(i8 | i4, np.signedinteger[_32Bit | _64Bit]) -assert_type(i8 ^ i4, np.signedinteger[_32Bit | _64Bit]) -assert_type(i8 & i4, np.signedinteger[_32Bit | _64Bit]) +assert_type(i8 << i4, np.signedinteger) +assert_type(i8 >> i4, np.signedinteger) +assert_type(i8 | i4, np.signedinteger) +assert_type(i8 ^ i4, np.signedinteger) +assert_type(i8 & i4, np.signedinteger) assert_type(i8 << b_, np.int64) assert_type(i8 >> b_, np.int64) @@ -67,11 +66,11 @@ assert_type(u8 | u8, np.uint64) assert_type(u8 ^ u8, np.uint64) assert_type(u8 & u8, np.uint64) -assert_type(u8 << AR, npt.NDArray[np.signedinteger[Any]]) -assert_type(u8 >> AR, npt.NDArray[np.signedinteger[Any]]) -assert_type(u8 | AR, npt.NDArray[np.signedinteger[Any]]) -assert_type(u8 ^ AR, npt.NDArray[np.signedinteger[Any]]) -assert_type(u8 & AR, npt.NDArray[np.signedinteger[Any]]) +assert_type(u8 << AR, npt.NDArray[np.signedinteger]) +assert_type(u8 >> AR, npt.NDArray[np.signedinteger]) +assert_type(u8 | AR, npt.NDArray[np.signedinteger]) +assert_type(u8 ^ AR, npt.NDArray[np.signedinteger]) +assert_type(u8 & AR, npt.NDArray[np.signedinteger]) assert_type(u4 << u4, np.uint32) assert_type(u4 >> u4, np.uint32) @@ -79,17 +78,17 @@ assert_type(u4 | u4, np.uint32) assert_type(u4 ^ u4, np.uint32) assert_type(u4 & u4, np.uint32) -assert_type(u4 << i4, np.signedinteger[Any]) -assert_type(u4 >> i4, np.signedinteger[Any]) -assert_type(u4 | i4, np.signedinteger[Any]) -assert_type(u4 ^ i4, np.signedinteger[Any]) -assert_type(u4 & i4, np.signedinteger[Any]) +assert_type(u4 << i4, np.signedinteger) +assert_type(u4 >> i4, np.signedinteger) +assert_type(u4 | i4, np.signedinteger) +assert_type(u4 ^ i4, np.signedinteger) +assert_type(u4 & i4, np.signedinteger) -assert_type(u4 << i, np.signedinteger[Any]) -assert_type(u4 >> i, np.signedinteger[Any]) -assert_type(u4 | i, np.signedinteger[Any]) -assert_type(u4 ^ i, np.signedinteger[Any]) -assert_type(u4 & i, np.signedinteger[Any]) +assert_type(u4 << i, np.uint32) +assert_type(u4 >> i, np.uint32) +assert_type(u4 | i, np.uint32) +assert_type(u4 ^ i, np.uint32) +assert_type(u4 & i, np.uint32) assert_type(u8 << b_, np.uint64) assert_type(u8 >> b_, np.uint64) @@ -109,11 +108,11 @@ assert_type(b_ | b_, np.bool) assert_type(b_ ^ b_, np.bool) assert_type(b_ & b_, np.bool) -assert_type(b_ << AR, npt.NDArray[np.signedinteger[Any]]) -assert_type(b_ >> AR, npt.NDArray[np.signedinteger[Any]]) -assert_type(b_ | AR, npt.NDArray[np.signedinteger[Any]]) -assert_type(b_ ^ AR, npt.NDArray[np.signedinteger[Any]]) -assert_type(b_ & AR, npt.NDArray[np.signedinteger[Any]]) +assert_type(b_ << AR, npt.NDArray[np.signedinteger]) +assert_type(b_ >> AR, npt.NDArray[np.signedinteger]) +assert_type(b_ | AR, npt.NDArray[np.signedinteger]) +assert_type(b_ ^ AR, npt.NDArray[np.signedinteger]) +assert_type(b_ & AR, npt.NDArray[np.signedinteger]) assert_type(b_ << b, np.int8) assert_type(b_ >> b, np.int8) @@ -123,13 +122,45 @@ assert_type(b_ & b, np.bool) assert_type(b_ << i, np.int_) assert_type(b_ >> i, np.int_) -assert_type(b_ | i, np.int_) -assert_type(b_ ^ i, np.int_) -assert_type(b_ & i, np.int_) +assert_type(b_ | i, np.bool | np.int_) +assert_type(b_ ^ i, np.bool | np.int_) +assert_type(b_ & i, np.bool | np.int_) assert_type(~i8, np.int64) assert_type(~i4, np.int32) assert_type(~u8, np.uint64) assert_type(~u4, np.uint32) assert_type(~b_, np.bool) +assert_type(~b0_, np.bool[TrueType]) +assert_type(~b1_, np.bool[FalseType]) assert_type(~AR, npt.NDArray[np.int32]) + +assert_type(b_ | b0_, np.bool) +assert_type(b0_ | b_, np.bool) +assert_type(b_ | b1_, np.bool[TrueType]) +assert_type(b1_ | b_, np.bool[TrueType]) + +assert_type(b_ ^ b0_, np.bool) +assert_type(b0_ ^ b_, np.bool) +assert_type(b_ ^ b1_, np.bool) +assert_type(b1_ ^ b_, np.bool) + +assert_type(b_ & b0_, np.bool[FalseType]) +assert_type(b0_ & b_, np.bool[FalseType]) +assert_type(b_ & b1_, np.bool) +assert_type(b1_ & b_, np.bool) + +assert_type(b0_ | b0_, np.bool[FalseType]) +assert_type(b0_ | b1_, np.bool[TrueType]) +assert_type(b1_ | b0_, np.bool[TrueType]) +assert_type(b1_ | b1_, np.bool[TrueType]) + +assert_type(b0_ ^ b0_, np.bool[FalseType]) +assert_type(b0_ ^ b1_, np.bool[TrueType]) +assert_type(b1_ ^ b0_, np.bool[TrueType]) +assert_type(b1_ ^ b1_, np.bool[FalseType]) + +assert_type(b0_ & b0_, np.bool[FalseType]) +assert_type(b0_ & b1_, np.bool[FalseType]) +assert_type(b1_ & b0_, np.bool[FalseType]) +assert_type(b1_ & b1_, np.bool[TrueType]) diff --git a/numpy/typing/tests/data/reveal/char.pyi b/numpy/typing/tests/data/reveal/char.pyi index ab7186fadce4..b6866a6d9f96 100644 --- a/numpy/typing/tests/data/reveal/char.pyi +++ b/numpy/typing/tests/data/reveal/char.pyi @@ -1,152 +1,228 @@ -import sys -from typing import Any +from typing import assert_type import numpy as np +import numpy._typing as np_t import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +type AR_T_alias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] +type AR_TU_alias = AR_T_alias | npt.NDArray[np.str_] AR_U: npt.NDArray[np.str_] AR_S: npt.NDArray[np.bytes_] +AR_T: AR_T_alias assert_type(np.char.equal(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.char.equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.equal(AR_T, AR_T), npt.NDArray[np.bool]) assert_type(np.char.not_equal(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.char.not_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.not_equal(AR_T, AR_T), npt.NDArray[np.bool]) assert_type(np.char.greater_equal(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.char.greater_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.greater_equal(AR_T, AR_T), npt.NDArray[np.bool]) assert_type(np.char.less_equal(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.char.less_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.less_equal(AR_T, AR_T), npt.NDArray[np.bool]) assert_type(np.char.greater(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.char.greater(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.greater(AR_T, AR_T), npt.NDArray[np.bool]) assert_type(np.char.less(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.char.less(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.less(AR_T, AR_T), npt.NDArray[np.bool]) assert_type(np.char.multiply(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.char.multiply(AR_S, [5, 4, 3]), npt.NDArray[np.bytes_]) +assert_type(np.char.multiply(AR_T, 5), AR_T_alias) assert_type(np.char.mod(AR_U, "test"), npt.NDArray[np.str_]) assert_type(np.char.mod(AR_S, "test"), npt.NDArray[np.bytes_]) +assert_type(np.char.mod(AR_T, "test"), AR_T_alias) assert_type(np.char.capitalize(AR_U), npt.NDArray[np.str_]) assert_type(np.char.capitalize(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.char.capitalize(AR_T), AR_T_alias) assert_type(np.char.center(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.char.center(AR_S, [2, 3, 4], b"a"), npt.NDArray[np.bytes_]) +assert_type(np.char.center(AR_T, 5), AR_T_alias) assert_type(np.char.encode(AR_U), npt.NDArray[np.bytes_]) +assert_type(np.char.encode(AR_T), npt.NDArray[np.bytes_]) assert_type(np.char.decode(AR_S), npt.NDArray[np.str_]) assert_type(np.char.expandtabs(AR_U), npt.NDArray[np.str_]) assert_type(np.char.expandtabs(AR_S, tabsize=4), npt.NDArray[np.bytes_]) +assert_type(np.char.expandtabs(AR_T), AR_T_alias) assert_type(np.char.join(AR_U, "_"), npt.NDArray[np.str_]) assert_type(np.char.join(AR_S, [b"_", b""]), npt.NDArray[np.bytes_]) +assert_type(np.char.join(AR_T, "_"), AR_TU_alias) assert_type(np.char.ljust(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.char.ljust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.ljust(AR_S, [4, 3, 1], fillchar="a"), npt.NDArray[np.bytes_]) +assert_type(np.char.ljust(AR_T, 5), AR_T_alias) +assert_type(np.char.ljust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_TU_alias) + assert_type(np.char.rjust(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.char.rjust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.rjust(AR_T, 5), AR_T_alias) +assert_type(np.char.rjust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_TU_alias) assert_type(np.char.lstrip(AR_U), npt.NDArray[np.str_]) -assert_type(np.char.lstrip(AR_S, chars=b"_"), npt.NDArray[np.bytes_]) +assert_type(np.char.lstrip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.char.lstrip(AR_T), AR_T_alias) +assert_type(np.char.lstrip(AR_T, "_"), AR_TU_alias) + assert_type(np.char.rstrip(AR_U), npt.NDArray[np.str_]) -assert_type(np.char.rstrip(AR_S, chars=b"_"), npt.NDArray[np.bytes_]) +assert_type(np.char.rstrip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.char.rstrip(AR_T), AR_T_alias) +assert_type(np.char.rstrip(AR_T, "_"), AR_TU_alias) + assert_type(np.char.strip(AR_U), npt.NDArray[np.str_]) -assert_type(np.char.strip(AR_S, chars=b"_"), npt.NDArray[np.bytes_]) +assert_type(np.char.strip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.char.strip(AR_T), AR_T_alias) +assert_type(np.char.strip(AR_T, "_"), AR_TU_alias) + +assert_type(np.char.count(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.count(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.count(AR_T, AR_T, start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.count(AR_T, ["a", "b", "c"], end=9), npt.NDArray[np.int_]) assert_type(np.char.partition(AR_U, "\n"), npt.NDArray[np.str_]) assert_type(np.char.partition(AR_S, [b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.partition(AR_T, "\n"), AR_TU_alias) + assert_type(np.char.rpartition(AR_U, "\n"), npt.NDArray[np.str_]) assert_type(np.char.rpartition(AR_S, [b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.rpartition(AR_T, "\n"), AR_TU_alias) assert_type(np.char.replace(AR_U, "_", "-"), npt.NDArray[np.str_]) assert_type(np.char.replace(AR_S, [b"_", b""], [b"a", b"b"]), npt.NDArray[np.bytes_]) +assert_type(np.char.replace(AR_T, "_", "_"), AR_TU_alias) assert_type(np.char.split(AR_U, "_"), npt.NDArray[np.object_]) assert_type(np.char.split(AR_S, maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) +assert_type(np.char.split(AR_T, "_"), npt.NDArray[np.object_]) + assert_type(np.char.rsplit(AR_U, "_"), npt.NDArray[np.object_]) assert_type(np.char.rsplit(AR_S, maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) +assert_type(np.char.rsplit(AR_T, "_"), npt.NDArray[np.object_]) assert_type(np.char.splitlines(AR_U), npt.NDArray[np.object_]) assert_type(np.char.splitlines(AR_S, keepends=[True, True, False]), npt.NDArray[np.object_]) +assert_type(np.char.splitlines(AR_T), npt.NDArray[np.object_]) + +assert_type(np.char.lower(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.lower(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.char.lower(AR_T), AR_T_alias) + +assert_type(np.char.upper(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.upper(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.char.upper(AR_T), AR_T_alias) assert_type(np.char.swapcase(AR_U), npt.NDArray[np.str_]) assert_type(np.char.swapcase(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.char.swapcase(AR_T), AR_T_alias) assert_type(np.char.title(AR_U), npt.NDArray[np.str_]) assert_type(np.char.title(AR_S), npt.NDArray[np.bytes_]) - -assert_type(np.char.upper(AR_U), npt.NDArray[np.str_]) -assert_type(np.char.upper(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.char.title(AR_T), AR_T_alias) assert_type(np.char.zfill(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.char.zfill(AR_S, [2, 3, 4]), npt.NDArray[np.bytes_]) - -assert_type(np.char.count(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) -assert_type(np.char.count(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.zfill(AR_T, 5), AR_T_alias) assert_type(np.char.endswith(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) assert_type(np.char.endswith(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) +assert_type(np.char.endswith(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) + assert_type(np.char.startswith(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) assert_type(np.char.startswith(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) +assert_type(np.char.startswith(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) assert_type(np.char.find(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.char.find(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.find(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + assert_type(np.char.rfind(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.char.rfind(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.rfind(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.char.index(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.char.index(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.index(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + assert_type(np.char.rindex(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.char.rindex(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.rindex(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.char.isalpha(AR_U), npt.NDArray[np.bool]) assert_type(np.char.isalpha(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.isalpha(AR_T), npt.NDArray[np.bool]) assert_type(np.char.isalnum(AR_U), npt.NDArray[np.bool]) assert_type(np.char.isalnum(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.isalnum(AR_T), npt.NDArray[np.bool]) assert_type(np.char.isdecimal(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.isdecimal(AR_T), npt.NDArray[np.bool]) assert_type(np.char.isdigit(AR_U), npt.NDArray[np.bool]) assert_type(np.char.isdigit(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.isdigit(AR_T), npt.NDArray[np.bool]) assert_type(np.char.islower(AR_U), npt.NDArray[np.bool]) assert_type(np.char.islower(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.islower(AR_T), npt.NDArray[np.bool]) assert_type(np.char.isnumeric(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.isnumeric(AR_T), npt.NDArray[np.bool]) assert_type(np.char.isspace(AR_U), npt.NDArray[np.bool]) assert_type(np.char.isspace(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.isspace(AR_T), npt.NDArray[np.bool]) assert_type(np.char.istitle(AR_U), npt.NDArray[np.bool]) assert_type(np.char.istitle(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.istitle(AR_T), npt.NDArray[np.bool]) assert_type(np.char.isupper(AR_U), npt.NDArray[np.bool]) assert_type(np.char.isupper(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.isupper(AR_T), npt.NDArray[np.bool]) assert_type(np.char.str_len(AR_U), npt.NDArray[np.int_]) assert_type(np.char.str_len(AR_S), npt.NDArray[np.int_]) - -assert_type(np.char.array(AR_U), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(np.char.array(AR_S, order="K"), np.char.chararray[Any, np.dtype[np.bytes_]]) -assert_type(np.char.array("bob", copy=True), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(np.char.array(b"bob", itemsize=5), np.char.chararray[Any, np.dtype[np.bytes_]]) -assert_type(np.char.array(1, unicode=False), np.char.chararray[Any, np.dtype[np.bytes_]]) -assert_type(np.char.array(1, unicode=True), np.char.chararray[Any, np.dtype[np.str_]]) - -assert_type(np.char.asarray(AR_U), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(np.char.asarray(AR_S, order="K"), np.char.chararray[Any, np.dtype[np.bytes_]]) -assert_type(np.char.asarray("bob"), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(np.char.asarray(b"bob", itemsize=5), np.char.chararray[Any, np.dtype[np.bytes_]]) -assert_type(np.char.asarray(1, unicode=False), np.char.chararray[Any, np.dtype[np.bytes_]]) -assert_type(np.char.asarray(1, unicode=True), np.char.chararray[Any, np.dtype[np.str_]]) +assert_type(np.char.str_len(AR_T), npt.NDArray[np.int_]) + +assert_type(np.char.translate(AR_U, ""), npt.NDArray[np.str_]) +assert_type(np.char.translate(AR_S, ""), npt.NDArray[np.bytes_]) +assert_type(np.char.translate(AR_T, ""), AR_T_alias) + +# mypy: disable-error-code="deprecated" +# pyright: reportDeprecated=false + +assert_type(np.char.array(AR_U), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.array(AR_S, order="K"), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array("bob", copy=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.array(b"bob", itemsize=5), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(1, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(1, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.array(1), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]] | np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(AR_U, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(AR_S, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) + +assert_type(np.char.asarray(AR_U), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.asarray(AR_S, order="K"), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray("bob"), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.asarray(b"bob", itemsize=5), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(1, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(1, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.asarray(1), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]] | np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(AR_U, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(AR_S, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) diff --git a/numpy/typing/tests/data/reveal/chararray.pyi b/numpy/typing/tests/data/reveal/chararray.pyi index 0fb621526288..771276e41110 100644 --- a/numpy/typing/tests/data/reveal/chararray.pyi +++ b/numpy/typing/tests/data/reveal/chararray.pyi @@ -1,16 +1,13 @@ -import sys -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +type _BytesCharArray = np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] # type: ignore[deprecated] +type _StrCharArray = np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] # type: ignore[deprecated] -AR_U: np.char.chararray[Any, np.dtype[np.str_]] -AR_S: np.char.chararray[Any, np.dtype[np.bytes_]] +AR_U: _StrCharArray +AR_S: _BytesCharArray assert_type(AR_U == AR_U, npt.NDArray[np.bool]) assert_type(AR_S == AR_S, npt.NDArray[np.bool]) @@ -30,46 +27,47 @@ assert_type(AR_S > AR_S, npt.NDArray[np.bool]) assert_type(AR_U < AR_U, npt.NDArray[np.bool]) assert_type(AR_S < AR_S, npt.NDArray[np.bool]) -assert_type(AR_U * 5, np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S * [5], np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U * 5, _StrCharArray) +assert_type(AR_S * [5], _BytesCharArray) -assert_type(AR_U % "test", np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S % b"test", np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U % "test", _StrCharArray) +assert_type(AR_S % b"test", _BytesCharArray) -assert_type(AR_U.capitalize(), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.capitalize(), np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.capitalize(), _StrCharArray) +assert_type(AR_S.capitalize(), _BytesCharArray) -assert_type(AR_U.center(5), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.center([2, 3, 4], b"a"), np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.center(5), _StrCharArray) +assert_type(AR_S.center([2, 3, 4], b"a"), _BytesCharArray) -assert_type(AR_U.encode(), np.char.chararray[Any, np.dtype[np.bytes_]]) -assert_type(AR_S.decode(), np.char.chararray[Any, np.dtype[np.str_]]) +assert_type(AR_U.encode(), _BytesCharArray) +assert_type(AR_S.decode(), _StrCharArray) -assert_type(AR_U.expandtabs(), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.expandtabs(tabsize=4), np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.expandtabs(), _StrCharArray) +assert_type(AR_S.expandtabs(tabsize=4), _BytesCharArray) -assert_type(AR_U.join("_"), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.join([b"_", b""]), np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.join("_"), _StrCharArray) +assert_type(AR_S.join([b"_", b""]), _BytesCharArray) -assert_type(AR_U.ljust(5), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.ljust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), np.char.chararray[Any, np.dtype[np.bytes_]]) -assert_type(AR_U.rjust(5), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.rjust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.ljust(5), _StrCharArray) +assert_type(AR_S.ljust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), _BytesCharArray) +assert_type(AR_S.ljust([4, 3, 1], fillchar="a"), _BytesCharArray) +assert_type(AR_U.rjust(5), _StrCharArray) +assert_type(AR_S.rjust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), _BytesCharArray) -assert_type(AR_U.lstrip(), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.lstrip(chars=b"_"), np.char.chararray[Any, np.dtype[np.bytes_]]) -assert_type(AR_U.rstrip(), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.rstrip(chars=b"_"), np.char.chararray[Any, np.dtype[np.bytes_]]) -assert_type(AR_U.strip(), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.strip(chars=b"_"), np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.lstrip(), _StrCharArray) +assert_type(AR_S.lstrip(chars=b"_"), _BytesCharArray) +assert_type(AR_U.rstrip(), _StrCharArray) +assert_type(AR_S.rstrip(chars=b"_"), _BytesCharArray) +assert_type(AR_U.strip(), _StrCharArray) +assert_type(AR_S.strip(chars=b"_"), _BytesCharArray) -assert_type(AR_U.partition("\n"), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.partition([b"a", b"b", b"c"]), np.char.chararray[Any, np.dtype[np.bytes_]]) -assert_type(AR_U.rpartition("\n"), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.rpartition([b"a", b"b", b"c"]), np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.partition("\n"), _StrCharArray) +assert_type(AR_S.partition([b"a", b"b", b"c"]), _BytesCharArray) +assert_type(AR_U.rpartition("\n"), _StrCharArray) +assert_type(AR_S.rpartition([b"a", b"b", b"c"]), _BytesCharArray) -assert_type(AR_U.replace("_", "-"), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.replace([b"_", b""], [b"a", b"b"]), np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.replace("_", "-"), _StrCharArray) +assert_type(AR_S.replace([b"_", b""], [b"a", b"b"]), _BytesCharArray) assert_type(AR_U.split("_"), npt.NDArray[np.object_]) assert_type(AR_S.split(maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) @@ -79,17 +77,17 @@ assert_type(AR_S.rsplit(maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) assert_type(AR_U.splitlines(), npt.NDArray[np.object_]) assert_type(AR_S.splitlines(keepends=[True, True, False]), npt.NDArray[np.object_]) -assert_type(AR_U.swapcase(), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.swapcase(), np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.swapcase(), _StrCharArray) +assert_type(AR_S.swapcase(), _BytesCharArray) -assert_type(AR_U.title(), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.title(), np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.title(), _StrCharArray) +assert_type(AR_S.title(), _BytesCharArray) -assert_type(AR_U.upper(), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.upper(), np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.upper(), _StrCharArray) +assert_type(AR_S.upper(), _BytesCharArray) -assert_type(AR_U.zfill(5), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.zfill([2, 3, 4]), np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.zfill(5), _StrCharArray) +assert_type(AR_S.zfill([2, 3, 4]), _BytesCharArray) assert_type(AR_U.count("a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(AR_S.count([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) diff --git a/numpy/typing/tests/data/reveal/comparisons.pyi b/numpy/typing/tests/data/reveal/comparisons.pyi index 78c6a8e207fe..6df5a3d94314 100644 --- a/numpy/typing/tests/data/reveal/comparisons.pyi +++ b/numpy/typing/tests/data/reveal/comparisons.pyi @@ -1,16 +1,10 @@ -import sys -import fractions import decimal -from typing import Any +import fractions +from typing import assert_type import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - c16 = np.complex128() f8 = np.float64() i8 = np.int64() @@ -26,10 +20,10 @@ td = np.timedelta64(0, "D") b_ = np.bool() -b = bool() +b = False c = complex() -f = float() -i = int() +f = 0.0 +i = 0 AR = np.array([0], dtype=np.int64) AR.setflags(write=False) @@ -38,10 +32,10 @@ SEQ = (0, 1, 2, 3, 4) # object-like comparisons -assert_type(i8 > fractions.Fraction(1, 5), Any) -assert_type(i8 > [fractions.Fraction(1, 5)], Any) -assert_type(i8 > decimal.Decimal("1.5"), Any) -assert_type(i8 > [decimal.Decimal("1.5")], Any) +assert_type(i8 > fractions.Fraction(1, 5), np.bool) +assert_type(i8 > [fractions.Fraction(1, 5)], npt.NDArray[np.bool]) +assert_type(i8 > decimal.Decimal("1.5"), np.bool) +assert_type(i8 > [decimal.Decimal("1.5")], npt.NDArray[np.bool]) # Time structures diff --git a/numpy/typing/tests/data/reveal/constants.pyi b/numpy/typing/tests/data/reveal/constants.pyi index 5166d4f26d76..d4474f46ce7e 100644 --- a/numpy/typing/tests/data/reveal/constants.pyi +++ b/numpy/typing/tests/data/reveal/constants.pyi @@ -1,12 +1,7 @@ -import sys +from typing import Literal, assert_type import numpy as np -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - assert_type(np.e, float) assert_type(np.euler_gamma, float) assert_type(np.inf, float) @@ -14,6 +9,6 @@ assert_type(np.nan, float) assert_type(np.pi, float) assert_type(np.little_endian, bool) -assert_type(np.True_, np.bool) -assert_type(np.False_, np.bool) +assert_type(np.True_, np.bool[Literal[True]]) +assert_type(np.False_, np.bool[Literal[False]]) diff --git a/numpy/typing/tests/data/reveal/ctypeslib.pyi b/numpy/typing/tests/data/reveal/ctypeslib.pyi index 992eb4bb43b9..e3558925e4d0 100644 --- a/numpy/typing/tests/data/reveal/ctypeslib.pyi +++ b/numpy/typing/tests/data/reveal/ctypeslib.pyi @@ -1,30 +1,22 @@ -import sys import ctypes as ct -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt from numpy import ctypeslib -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - AR_bool: npt.NDArray[np.bool] -AR_ubyte: npt.NDArray[np.ubyte] -AR_ushort: npt.NDArray[np.ushort] -AR_uintc: npt.NDArray[np.uintc] -AR_ulong: npt.NDArray[np.ulong] -AR_ulonglong: npt.NDArray[np.ulonglong] -AR_byte: npt.NDArray[np.byte] -AR_short: npt.NDArray[np.short] -AR_intc: npt.NDArray[np.intc] -AR_long: npt.NDArray[np.long] -AR_longlong: npt.NDArray[np.longlong] -AR_single: npt.NDArray[np.single] -AR_double: npt.NDArray[np.double] -AR_longdouble: npt.NDArray[np.longdouble] +AR_i8: npt.NDArray[np.int8] +AR_u8: npt.NDArray[np.uint8] +AR_i16: npt.NDArray[np.int16] +AR_u16: npt.NDArray[np.uint16] +AR_i32: npt.NDArray[np.int32] +AR_u32: npt.NDArray[np.uint32] +AR_i64: npt.NDArray[np.int64] +AR_u64: npt.NDArray[np.uint64] +AR_f32: npt.NDArray[np.float32] +AR_f64: npt.NDArray[np.float64] +AR_f80: npt.NDArray[np.longdouble] AR_void: npt.NDArray[np.void] pointer: ct._Pointer[Any] @@ -33,64 +25,62 @@ assert_type(np.ctypeslib.c_intp(), ctypeslib.c_intp) assert_type(np.ctypeslib.ndpointer(), type[ctypeslib._ndptr[None]]) assert_type(np.ctypeslib.ndpointer(dtype=np.float64), type[ctypeslib._ndptr[np.dtype[np.float64]]]) -assert_type(np.ctypeslib.ndpointer(dtype=float), type[ctypeslib._ndptr[np.dtype[Any]]]) +assert_type(np.ctypeslib.ndpointer(dtype=float), type[ctypeslib._ndptr[np.dtype]]) assert_type(np.ctypeslib.ndpointer(shape=(10, 3)), type[ctypeslib._ndptr[None]]) assert_type(np.ctypeslib.ndpointer(np.int64, shape=(10, 3)), type[ctypeslib._concrete_ndptr[np.dtype[np.int64]]]) -assert_type(np.ctypeslib.ndpointer(int, shape=(1,)), type[np.ctypeslib._concrete_ndptr[np.dtype[Any]]]) +assert_type(np.ctypeslib.ndpointer(int, shape=(1,)), type[np.ctypeslib._concrete_ndptr[np.dtype]]) assert_type(np.ctypeslib.as_ctypes_type(np.bool), type[ct.c_bool]) -assert_type(np.ctypeslib.as_ctypes_type(np.ubyte), type[ct.c_ubyte]) -assert_type(np.ctypeslib.as_ctypes_type(np.ushort), type[ct.c_ushort]) -assert_type(np.ctypeslib.as_ctypes_type(np.uintc), type[ct.c_uint]) -assert_type(np.ctypeslib.as_ctypes_type(np.byte), type[ct.c_byte]) -assert_type(np.ctypeslib.as_ctypes_type(np.short), type[ct.c_short]) -assert_type(np.ctypeslib.as_ctypes_type(np.intc), type[ct.c_int]) -assert_type(np.ctypeslib.as_ctypes_type(np.single), type[ct.c_float]) -assert_type(np.ctypeslib.as_ctypes_type(np.double), type[ct.c_double]) -assert_type(np.ctypeslib.as_ctypes_type(ct.c_double), type[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes_type(np.int8), type[ct.c_int8]) +assert_type(np.ctypeslib.as_ctypes_type(np.uint8), type[ct.c_uint8]) +assert_type(np.ctypeslib.as_ctypes_type(np.int16), type[ct.c_int16]) +assert_type(np.ctypeslib.as_ctypes_type(np.uint16), type[ct.c_uint16]) +assert_type(np.ctypeslib.as_ctypes_type(np.int32), type[ct.c_int32]) +assert_type(np.ctypeslib.as_ctypes_type(np.uint32), type[ct.c_uint32]) +assert_type(np.ctypeslib.as_ctypes_type(np.int64), type[ct.c_int64]) +assert_type(np.ctypeslib.as_ctypes_type(np.uint64), type[ct.c_uint64]) +assert_type(np.ctypeslib.as_ctypes_type(np.float32), type[ct.c_float]) +assert_type(np.ctypeslib.as_ctypes_type(np.float64), type[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes_type(np.longdouble), type[ct.c_longdouble]) +assert_type(np.ctypeslib.as_ctypes_type("?"), type[ct.c_bool]) +assert_type(np.ctypeslib.as_ctypes_type("intp"), type[ct.c_ssize_t]) assert_type(np.ctypeslib.as_ctypes_type("q"), type[ct.c_longlong]) +assert_type(np.ctypeslib.as_ctypes_type("i8"), type[ct.c_int64]) +assert_type(np.ctypeslib.as_ctypes_type("f8"), type[ct.c_double]) assert_type(np.ctypeslib.as_ctypes_type([("i8", np.int64), ("f8", np.float64)]), type[Any]) -assert_type(np.ctypeslib.as_ctypes_type("i8"), type[Any]) -assert_type(np.ctypeslib.as_ctypes_type("f8"), type[Any]) assert_type(np.ctypeslib.as_ctypes(AR_bool.take(0)), ct.c_bool) -assert_type(np.ctypeslib.as_ctypes(AR_ubyte.take(0)), ct.c_ubyte) -assert_type(np.ctypeslib.as_ctypes(AR_ushort.take(0)), ct.c_ushort) -assert_type(np.ctypeslib.as_ctypes(AR_uintc.take(0)), ct.c_uint) +assert_type(np.ctypeslib.as_ctypes(AR_u8.take(0)), ct.c_uint8) +assert_type(np.ctypeslib.as_ctypes(AR_u16.take(0)), ct.c_uint16) +assert_type(np.ctypeslib.as_ctypes(AR_u32.take(0)), ct.c_uint32) -assert_type(np.ctypeslib.as_ctypes(AR_byte.take(0)), ct.c_byte) -assert_type(np.ctypeslib.as_ctypes(AR_short.take(0)), ct.c_short) -assert_type(np.ctypeslib.as_ctypes(AR_intc.take(0)), ct.c_int) -assert_type(np.ctypeslib.as_ctypes(AR_single.take(0)), ct.c_float) -assert_type(np.ctypeslib.as_ctypes(AR_double.take(0)), ct.c_double) -assert_type(np.ctypeslib.as_ctypes(AR_void.take(0)), Any) +assert_type(np.ctypeslib.as_ctypes(np.bool()), ct.c_bool) +assert_type(np.ctypeslib.as_ctypes(np.int8()), ct.c_int8) +assert_type(np.ctypeslib.as_ctypes(np.uint8()), ct.c_uint8) +assert_type(np.ctypeslib.as_ctypes(np.int16()), ct.c_int16) +assert_type(np.ctypeslib.as_ctypes(np.uint16()), ct.c_uint16) +assert_type(np.ctypeslib.as_ctypes(np.int32()), ct.c_int32) +assert_type(np.ctypeslib.as_ctypes(np.uint32()), ct.c_uint32) +assert_type(np.ctypeslib.as_ctypes(np.int64()), ct.c_int64) +assert_type(np.ctypeslib.as_ctypes(np.uint64()), ct.c_uint64) +assert_type(np.ctypeslib.as_ctypes(np.float32()), ct.c_float) +assert_type(np.ctypeslib.as_ctypes(np.float64()), ct.c_double) +assert_type(np.ctypeslib.as_ctypes(np.longdouble()), ct.c_longdouble) +assert_type(np.ctypeslib.as_ctypes(np.void(b"")), Any) assert_type(np.ctypeslib.as_ctypes(AR_bool), ct.Array[ct.c_bool]) -assert_type(np.ctypeslib.as_ctypes(AR_ubyte), ct.Array[ct.c_ubyte]) -assert_type(np.ctypeslib.as_ctypes(AR_ushort), ct.Array[ct.c_ushort]) -assert_type(np.ctypeslib.as_ctypes(AR_uintc), ct.Array[ct.c_uint]) -assert_type(np.ctypeslib.as_ctypes(AR_byte), ct.Array[ct.c_byte]) -assert_type(np.ctypeslib.as_ctypes(AR_short), ct.Array[ct.c_short]) -assert_type(np.ctypeslib.as_ctypes(AR_intc), ct.Array[ct.c_int]) -assert_type(np.ctypeslib.as_ctypes(AR_single), ct.Array[ct.c_float]) -assert_type(np.ctypeslib.as_ctypes(AR_double), ct.Array[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes(AR_i8), ct.Array[ct.c_int8]) +assert_type(np.ctypeslib.as_ctypes(AR_u8), ct.Array[ct.c_uint8]) +assert_type(np.ctypeslib.as_ctypes(AR_i16), ct.Array[ct.c_int16]) +assert_type(np.ctypeslib.as_ctypes(AR_u16), ct.Array[ct.c_uint16]) +assert_type(np.ctypeslib.as_ctypes(AR_i32), ct.Array[ct.c_int32]) +assert_type(np.ctypeslib.as_ctypes(AR_u32), ct.Array[ct.c_uint32]) +assert_type(np.ctypeslib.as_ctypes(AR_i64), ct.Array[ct.c_int64]) +assert_type(np.ctypeslib.as_ctypes(AR_u64), ct.Array[ct.c_uint64]) +assert_type(np.ctypeslib.as_ctypes(AR_f32), ct.Array[ct.c_float]) +assert_type(np.ctypeslib.as_ctypes(AR_f64), ct.Array[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes(AR_f80), ct.Array[ct.c_longdouble]) assert_type(np.ctypeslib.as_ctypes(AR_void), ct.Array[Any]) -assert_type(np.ctypeslib.as_array(AR_ubyte), npt.NDArray[np.ubyte]) +assert_type(np.ctypeslib.as_array(AR_u8), npt.NDArray[np.ubyte]) assert_type(np.ctypeslib.as_array(1), npt.NDArray[Any]) assert_type(np.ctypeslib.as_array(pointer), npt.NDArray[Any]) - -if sys.platform == "win32": - # Mainly on windows int is the same size as long but gets picked first: - assert_type(np.ctypeslib.as_ctypes_type(np.long), type[ct.c_int]) - assert_type(np.ctypeslib.as_ctypes_type(np.ulong), type[ct.c_uint]) - assert_type(np.ctypeslib.as_ctypes(AR_ulong), ct.Array[ct.c_uint]) - assert_type(np.ctypeslib.as_ctypes(AR_long), ct.Array[ct.c_int]) - assert_type(np.ctypeslib.as_ctypes(AR_long.take(0)), ct.c_int) - assert_type(np.ctypeslib.as_ctypes(AR_ulong.take(0)), ct.c_uint) -else: - assert_type(np.ctypeslib.as_ctypes_type(np.long), type[ct.c_long]) - assert_type(np.ctypeslib.as_ctypes_type(np.ulong), type[ct.c_ulong]) - assert_type(np.ctypeslib.as_ctypes(AR_ulong), ct.Array[ct.c_ulong]) - assert_type(np.ctypeslib.as_ctypes(AR_long), ct.Array[ct.c_long]) - assert_type(np.ctypeslib.as_ctypes(AR_long.take(0)), ct.c_long) - assert_type(np.ctypeslib.as_ctypes(AR_ulong.take(0)), ct.c_ulong) diff --git a/numpy/typing/tests/data/reveal/datasource.pyi b/numpy/typing/tests/data/reveal/datasource.pyi index cc5a84852a0f..9f017911a1ff 100644 --- a/numpy/typing/tests/data/reveal/datasource.pyi +++ b/numpy/typing/tests/data/reveal/datasource.pyi @@ -1,14 +1,8 @@ -import sys from pathlib import Path -from typing import IO, Any +from typing import IO, Any, assert_type import numpy as np -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - path1: Path path2: str diff --git a/numpy/typing/tests/data/reveal/dtype.pyi b/numpy/typing/tests/data/reveal/dtype.pyi index 10f6ccd05a41..c8c9e393f76e 100644 --- a/numpy/typing/tests/data/reveal/dtype.pyi +++ b/numpy/typing/tests/data/reveal/dtype.pyi @@ -1,18 +1,34 @@ -import sys import ctypes as ct -from typing import Any +import datetime as dt +from decimal import Decimal +from fractions import Fraction +from typing import Any, Literal, LiteralString, assert_type import numpy as np +from numpy.dtypes import StringDType -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +# a combination of likely `object` dtype-like candidates (no `_co`) +type _PyObjectLike = Decimal | Fraction | dt.datetime | dt.timedelta dtype_U: np.dtype[np.str_] dtype_V: np.dtype[np.void] dtype_i8: np.dtype[np.int64] +py_object: type[_PyObjectLike] +py_character: type[str | bytes] + +ct_floating: type[ct.c_float | ct.c_double | ct.c_longdouble] +ct_number: type[ct.c_uint8 | ct.c_float] +ct_generic: type[ct.c_bool | ct.c_char] + +cs_integer: Literal["u1", "V", "S"] +cs_generic: Literal["H", "U", "h", "|M8[Y]", "?"] + +dt_inexact: np.dtype[np.inexact] +dt_string: StringDType + assert_type(np.dtype(np.float64), np.dtype[np.float64]) assert_type(np.dtype(np.float64, metadata={"test": "test"}), np.dtype[np.float64]) assert_type(np.dtype(np.int64), np.dtype[np.int64]) @@ -27,16 +43,68 @@ assert_type(np.dtype("bytes"), np.dtype[np.bytes_]) assert_type(np.dtype("str"), np.dtype[np.str_]) # Python types -assert_type(np.dtype(complex), np.dtype[np.cdouble]) -assert_type(np.dtype(float), np.dtype[np.double]) -assert_type(np.dtype(int), np.dtype[np.int_]) assert_type(np.dtype(bool), np.dtype[np.bool]) +assert_type(np.dtype(int), np.dtype[np.int_ | Any]) +assert_type(np.dtype(float), np.dtype[np.float64 | Any]) +assert_type(np.dtype(complex), np.dtype[np.complex128 | Any]) +assert_type(np.dtype(py_object), np.dtype[np.object_]) assert_type(np.dtype(str), np.dtype[np.str_]) assert_type(np.dtype(bytes), np.dtype[np.bytes_]) -assert_type(np.dtype(object), np.dtype[np.object_]) +assert_type(np.dtype(memoryview), np.dtype[np.void]) +assert_type(np.dtype(py_character), np.dtype[np.character]) + +# object types +assert_type(np.dtype(list), np.dtype[np.object_]) +assert_type(np.dtype(dt.datetime), np.dtype[np.object_]) +assert_type(np.dtype(dt.timedelta), np.dtype[np.object_]) +assert_type(np.dtype(Decimal), np.dtype[np.object_]) +assert_type(np.dtype(Fraction), np.dtype[np.object_]) + +# char-codes +assert_type(np.dtype("?"), np.dtype[np.bool]) +assert_type(np.dtype("|b1"), np.dtype[np.bool]) +assert_type(np.dtype("u1"), np.dtype[np.uint8]) +assert_type(np.dtype("l"), np.dtype[np.int32 | np.int64]) +assert_type(np.dtype("longlong"), np.dtype[np.longlong]) +assert_type(np.dtype(">g"), np.dtype[np.longdouble]) +assert_type(np.dtype(cs_integer), np.dtype[np.integer]) +# char-codes - datetime64 +assert_type(np.dtype("datetime64[Y]"), np.dtype[np.datetime64[dt.date]]) +assert_type(np.dtype("datetime64[M]"), np.dtype[np.datetime64[dt.date]]) +assert_type(np.dtype("datetime64[W]"), np.dtype[np.datetime64[dt.date]]) +assert_type(np.dtype("datetime64[D]"), np.dtype[np.datetime64[dt.date]]) +assert_type(np.dtype("datetime64[h]"), np.dtype[np.datetime64[dt.datetime]]) +assert_type(np.dtype("datetime64[m]"), np.dtype[np.datetime64[dt.datetime]]) +assert_type(np.dtype("datetime64[s]"), np.dtype[np.datetime64[dt.datetime]]) +assert_type(np.dtype("datetime64[ms]"), np.dtype[np.datetime64[dt.datetime]]) +assert_type(np.dtype("datetime64[us]"), np.dtype[np.datetime64[dt.datetime]]) +assert_type(np.dtype("datetime64[ns]"), np.dtype[np.datetime64[int]]) +assert_type(np.dtype("datetime64[ps]"), np.dtype[np.datetime64[int]]) +assert_type(np.dtype("datetime64[fs]"), np.dtype[np.datetime64[int]]) +assert_type(np.dtype("datetime64[as]"), np.dtype[np.datetime64[int]]) +assert_type(np.dtype("datetime64"), np.dtype[np.datetime64]) +assert_type(np.dtype("M8"), np.dtype[np.datetime64]) +assert_type(np.dtype("M"), np.dtype[np.datetime64]) +# char-codes - timedelta64 +assert_type(np.dtype("timedelta64[Y]"), np.dtype[np.timedelta64[int]]) +assert_type(np.dtype("timedelta64[M]"), np.dtype[np.timedelta64[int]]) +assert_type(np.dtype("timedelta64[W]"), np.dtype[np.timedelta64[dt.timedelta]]) +assert_type(np.dtype("timedelta64[D]"), np.dtype[np.timedelta64[dt.timedelta]]) +assert_type(np.dtype("timedelta64[h]"), np.dtype[np.timedelta64[dt.timedelta]]) +assert_type(np.dtype("timedelta64[m]"), np.dtype[np.timedelta64[dt.timedelta]]) +assert_type(np.dtype("timedelta64[s]"), np.dtype[np.timedelta64[dt.timedelta]]) +assert_type(np.dtype("timedelta64[ms]"), np.dtype[np.timedelta64[dt.timedelta]]) +assert_type(np.dtype("timedelta64[us]"), np.dtype[np.timedelta64[dt.timedelta]]) +assert_type(np.dtype("timedelta64[ns]"), np.dtype[np.timedelta64[int]]) +assert_type(np.dtype("timedelta64[ps]"), np.dtype[np.timedelta64[int]]) +assert_type(np.dtype("timedelta64[fs]"), np.dtype[np.timedelta64[int]]) +assert_type(np.dtype("timedelta64[as]"), np.dtype[np.timedelta64[int]]) +assert_type(np.dtype("timedelta64"), np.dtype[np.timedelta64]) +assert_type(np.dtype("m8"), np.dtype[np.timedelta64]) +assert_type(np.dtype("m"), np.dtype[np.timedelta64]) # ctypes -assert_type(np.dtype(ct.c_double), np.dtype[np.double]) +assert_type(np.dtype(ct.c_double), np.dtype[np.float64]) # see numpy/numpy#29155 assert_type(np.dtype(ct.c_longlong), np.dtype[np.longlong]) assert_type(np.dtype(ct.c_uint32), np.dtype[np.uint32]) assert_type(np.dtype(ct.c_bool), np.dtype[np.bool]) @@ -44,24 +112,32 @@ assert_type(np.dtype(ct.c_char), np.dtype[np.bytes_]) assert_type(np.dtype(ct.py_object), np.dtype[np.object_]) # Special case for None -assert_type(np.dtype(None), np.dtype[np.double]) +assert_type(np.dtype(None), np.dtype[np.float64]) -# Dtypes of dtypes +# dtypes of dtypes assert_type(np.dtype(np.dtype(np.float64)), np.dtype[np.float64]) +assert_type(np.dtype(dt_inexact), np.dtype[np.inexact]) # Parameterized dtypes -assert_type(np.dtype("S8"), np.dtype[Any]) +assert_type(np.dtype("S8"), np.dtype) # Void assert_type(np.dtype(("U", 10)), np.dtype[np.void]) +assert_type(np.dtype({"formats": (int, "u8"), "names": ("n", "B")}), np.dtype[np.void]) + +# StringDType +assert_type(np.dtype(dt_string), StringDType) +assert_type(np.dtype("T"), StringDType) +assert_type(np.dtype("=T"), StringDType) +assert_type(np.dtype("|T"), StringDType) # Methods and attributes -assert_type(dtype_U.base, np.dtype[Any]) -assert_type(dtype_U.subdtype, None | tuple[np.dtype[Any], tuple[int, ...]]) +assert_type(dtype_U.base, np.dtype) +assert_type(dtype_U.subdtype, tuple[np.dtype, tuple[Any, ...]] | None) assert_type(dtype_U.newbyteorder(), np.dtype[np.str_]) assert_type(dtype_U.type, type[np.str_]) -assert_type(dtype_U.name, str) -assert_type(dtype_U.names, None | tuple[str, ...]) +assert_type(dtype_U.name, LiteralString) +assert_type(dtype_U.names, tuple[str, ...] | None) assert_type(dtype_U * 0, np.dtype[np.str_]) assert_type(dtype_U * 1, np.dtype[np.str_]) @@ -75,11 +151,16 @@ assert_type(0 * dtype_U, np.dtype[np.str_]) assert_type(1 * dtype_U, np.dtype[np.str_]) assert_type(2 * dtype_U, np.dtype[np.str_]) -assert_type(0 * dtype_i8, np.dtype[Any]) -assert_type(1 * dtype_i8, np.dtype[Any]) -assert_type(2 * dtype_i8, np.dtype[Any]) +assert_type(0 * dtype_i8, np.dtype) +assert_type(1 * dtype_i8, np.dtype) +assert_type(2 * dtype_i8, np.dtype) -assert_type(dtype_V["f0"], np.dtype[Any]) -assert_type(dtype_V[0], np.dtype[Any]) +assert_type(dtype_V["f0"], np.dtype) +assert_type(dtype_V[0], np.dtype) assert_type(dtype_V[["f0", "f1"]], np.dtype[np.void]) assert_type(dtype_V[["f0"]], np.dtype[np.void]) + +class _D: + __numpy_dtype__: np.dtype[np.int8] + +assert_type(np.dtype(_D()), np.dtype[np.int8]) diff --git a/numpy/typing/tests/data/reveal/einsumfunc.pyi b/numpy/typing/tests/data/reveal/einsumfunc.pyi index 645aaad31cf1..cc58f006e249 100644 --- a/numpy/typing/tests/data/reveal/einsumfunc.pyi +++ b/numpy/typing/tests/data/reveal/einsumfunc.pyi @@ -1,14 +1,8 @@ -import sys -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - AR_LIKE_b: list[bool] AR_LIKE_u: list[np.uint32] AR_LIKE_i: list[int] diff --git a/numpy/typing/tests/data/reveal/emath.pyi b/numpy/typing/tests/data/reveal/emath.pyi index d1027bf48d50..1d7bff893e73 100644 --- a/numpy/typing/tests/data/reveal/emath.pyi +++ b/numpy/typing/tests/data/reveal/emath.pyi @@ -1,14 +1,8 @@ -import sys -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - AR_f8: npt.NDArray[np.float64] AR_c16: npt.NDArray[np.complex128] f8: np.float64 @@ -16,45 +10,45 @@ c16: np.complex128 assert_type(np.emath.sqrt(f8), Any) assert_type(np.emath.sqrt(AR_f8), npt.NDArray[Any]) -assert_type(np.emath.sqrt(c16), np.complexfloating[Any, Any]) -assert_type(np.emath.sqrt(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.emath.sqrt(c16), np.complexfloating) +assert_type(np.emath.sqrt(AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.emath.log(f8), Any) assert_type(np.emath.log(AR_f8), npt.NDArray[Any]) -assert_type(np.emath.log(c16), np.complexfloating[Any, Any]) -assert_type(np.emath.log(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.emath.log(c16), np.complexfloating) +assert_type(np.emath.log(AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.emath.log10(f8), Any) assert_type(np.emath.log10(AR_f8), npt.NDArray[Any]) -assert_type(np.emath.log10(c16), np.complexfloating[Any, Any]) -assert_type(np.emath.log10(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.emath.log10(c16), np.complexfloating) +assert_type(np.emath.log10(AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.emath.log2(f8), Any) assert_type(np.emath.log2(AR_f8), npt.NDArray[Any]) -assert_type(np.emath.log2(c16), np.complexfloating[Any, Any]) -assert_type(np.emath.log2(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.emath.log2(c16), np.complexfloating) +assert_type(np.emath.log2(AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.emath.logn(f8, 2), Any) assert_type(np.emath.logn(AR_f8, 4), npt.NDArray[Any]) -assert_type(np.emath.logn(f8, 1j), np.complexfloating[Any, Any]) -assert_type(np.emath.logn(AR_c16, 1.5), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.emath.logn(f8, 1j), np.complexfloating) +assert_type(np.emath.logn(AR_c16, 1.5), npt.NDArray[np.complexfloating]) assert_type(np.emath.power(f8, 2), Any) assert_type(np.emath.power(AR_f8, 4), npt.NDArray[Any]) -assert_type(np.emath.power(f8, 2j), np.complexfloating[Any, Any]) -assert_type(np.emath.power(AR_c16, 1.5), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.emath.power(f8, 2j), np.complexfloating) +assert_type(np.emath.power(AR_c16, 1.5), npt.NDArray[np.complexfloating]) assert_type(np.emath.arccos(f8), Any) assert_type(np.emath.arccos(AR_f8), npt.NDArray[Any]) -assert_type(np.emath.arccos(c16), np.complexfloating[Any, Any]) -assert_type(np.emath.arccos(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.emath.arccos(c16), np.complexfloating) +assert_type(np.emath.arccos(AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.emath.arcsin(f8), Any) assert_type(np.emath.arcsin(AR_f8), npt.NDArray[Any]) -assert_type(np.emath.arcsin(c16), np.complexfloating[Any, Any]) -assert_type(np.emath.arcsin(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.emath.arcsin(c16), np.complexfloating) +assert_type(np.emath.arcsin(AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.emath.arctanh(f8), Any) assert_type(np.emath.arctanh(AR_f8), npt.NDArray[Any]) -assert_type(np.emath.arctanh(c16), np.complexfloating[Any, Any]) -assert_type(np.emath.arctanh(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.emath.arctanh(c16), np.complexfloating) +assert_type(np.emath.arctanh(AR_c16), npt.NDArray[np.complexfloating]) diff --git a/numpy/typing/tests/data/reveal/false_positives.pyi b/numpy/typing/tests/data/reveal/false_positives.pyi deleted file mode 100644 index 7a2e016245a6..000000000000 --- a/numpy/typing/tests/data/reveal/false_positives.pyi +++ /dev/null @@ -1,18 +0,0 @@ -import sys -from typing import Any - -import numpy as np -import numpy.typing as npt - -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - -AR_Any: npt.NDArray[Any] - -# Mypy bug where overload ambiguity is ignored for `Any`-parametrized types; -# xref numpy/numpy#20099 and python/mypy#11347 -# -# The expected output would be something akin to `npt.NDArray[Any]` -assert_type(AR_Any + 2, npt.NDArray[np.signedinteger[Any]]) diff --git a/numpy/typing/tests/data/reveal/fft.pyi b/numpy/typing/tests/data/reveal/fft.pyi index d6e9ba756d97..38a3d2f8e692 100644 --- a/numpy/typing/tests/data/reveal/fft.pyi +++ b/numpy/typing/tests/data/reveal/fft.pyi @@ -1,43 +1,115 @@ -import sys -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +### -AR_f8: npt.NDArray[np.float64] -AR_c16: npt.NDArray[np.complex128] -AR_LIKE_f8: list[float] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] -assert_type(np.fft.fftshift(AR_f8), npt.NDArray[np.float64]) -assert_type(np.fft.fftshift(AR_LIKE_f8, axes=0), npt.NDArray[Any]) +_f64_nd: npt.NDArray[np.float64] +_c128_nd: npt.NDArray[np.complex128] +_py_float_1d: list[float] -assert_type(np.fft.ifftshift(AR_f8), npt.NDArray[np.float64]) -assert_type(np.fft.ifftshift(AR_LIKE_f8, axes=0), npt.NDArray[Any]) +_i64: np.int64 +_f32: np.float16 +_f80: np.longdouble +_c64: np.complex64 +_c160: np.clongdouble -assert_type(np.fft.fftfreq(5, AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.fft.fftfreq(np.int64(), AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +_i64_2d: _Array2D[np.int64] +_f32_2d: _Array2D[np.float16] +_f80_2d: _Array2D[np.longdouble] +_c64_2d: _Array2D[np.complex64] +_c160_2d: _Array2D[np.clongdouble] -assert_type(np.fft.fftfreq(5, AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.fft.fftfreq(np.int64(), AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +_i64_nd: npt.NDArray[np.int64] +_f32_nd: npt.NDArray[np.float16] +_f80_nd: npt.NDArray[np.longdouble] +_c64_nd: npt.NDArray[np.complex64] +_c160_nd: npt.NDArray[np.clongdouble] -assert_type(np.fft.fft(AR_f8), npt.NDArray[np.complex128]) -assert_type(np.fft.ifft(AR_f8, axis=1), npt.NDArray[np.complex128]) -assert_type(np.fft.rfft(AR_f8, n=None), npt.NDArray[np.complex128]) -assert_type(np.fft.irfft(AR_f8, norm="ortho"), npt.NDArray[np.float64]) -assert_type(np.fft.hfft(AR_f8, n=2), npt.NDArray[np.float64]) -assert_type(np.fft.ihfft(AR_f8), npt.NDArray[np.complex128]) +### -assert_type(np.fft.fftn(AR_f8), npt.NDArray[np.complex128]) -assert_type(np.fft.ifftn(AR_f8), npt.NDArray[np.complex128]) -assert_type(np.fft.rfftn(AR_f8), npt.NDArray[np.complex128]) -assert_type(np.fft.irfftn(AR_f8), npt.NDArray[np.float64]) +# fftshift -assert_type(np.fft.rfft2(AR_f8), npt.NDArray[np.complex128]) -assert_type(np.fft.ifft2(AR_f8), npt.NDArray[np.complex128]) -assert_type(np.fft.fft2(AR_f8), npt.NDArray[np.complex128]) -assert_type(np.fft.irfft2(AR_f8), npt.NDArray[np.float64]) +assert_type(np.fft.fftshift(_f64_nd), npt.NDArray[np.float64]) +assert_type(np.fft.fftshift(_py_float_1d, axes=0), npt.NDArray[Any]) + +# ifftshift + +assert_type(np.fft.ifftshift(_f64_nd), npt.NDArray[np.float64]) +assert_type(np.fft.ifftshift(_py_float_1d, axes=0), npt.NDArray[Any]) + +# fftfreq + +assert_type(np.fft.fftfreq(5), _Array1D[np.float64]) +assert_type(np.fft.fftfreq(5, True), _Array1D[np.float64]) +assert_type(np.fft.fftfreq(5, 1), _Array1D[np.float64]) +assert_type(np.fft.fftfreq(5, 1.0), _Array1D[np.float64]) +assert_type(np.fft.fftfreq(5, 1j), _Array1D[np.complex128 | Any]) + +assert_type(np.fft.fftfreq(5, _i64), _Array1D[np.float64]) +assert_type(np.fft.fftfreq(5, _f32), _Array1D[np.float64]) +assert_type(np.fft.fftfreq(5, _f80), _Array1D[np.longdouble]) +assert_type(np.fft.fftfreq(5, _c64), _Array1D[np.complex128]) +assert_type(np.fft.fftfreq(5, _c160), _Array1D[np.clongdouble]) + +assert_type(np.fft.fftfreq(5, _i64_2d), _Array2D[np.float64]) +assert_type(np.fft.fftfreq(5, _f32_2d), _Array2D[np.float64]) +assert_type(np.fft.fftfreq(5, _f80_2d), _Array2D[np.longdouble]) +assert_type(np.fft.fftfreq(5, _c64_2d), _Array2D[np.complex128]) +assert_type(np.fft.fftfreq(5, _c160_2d), _Array2D[np.clongdouble]) + +assert_type(np.fft.fftfreq(5, _i64_nd), npt.NDArray[np.float64]) +assert_type(np.fft.fftfreq(5, _f32_nd), npt.NDArray[np.float64]) +assert_type(np.fft.fftfreq(5, _f80_nd), npt.NDArray[np.longdouble]) +assert_type(np.fft.fftfreq(5, _c64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.fftfreq(5, _c160_nd), npt.NDArray[np.clongdouble]) + +# rfftfreq (same as fftfreq) + +assert_type(np.fft.rfftfreq(5), _Array1D[np.float64]) +assert_type(np.fft.rfftfreq(5, True), _Array1D[np.float64]) +assert_type(np.fft.rfftfreq(5, 1), _Array1D[np.float64]) +assert_type(np.fft.rfftfreq(5, 1.0), _Array1D[np.float64]) +assert_type(np.fft.rfftfreq(5, 1j), _Array1D[np.complex128 | Any]) + +assert_type(np.fft.rfftfreq(5, _i64), _Array1D[np.float64]) +assert_type(np.fft.rfftfreq(5, _f32), _Array1D[np.float64]) +assert_type(np.fft.rfftfreq(5, _f80), _Array1D[np.longdouble]) +assert_type(np.fft.rfftfreq(5, _c64), _Array1D[np.complex128]) +assert_type(np.fft.rfftfreq(5, _c160), _Array1D[np.clongdouble]) + +assert_type(np.fft.rfftfreq(5, _i64_2d), _Array2D[np.float64]) +assert_type(np.fft.rfftfreq(5, _f32_2d), _Array2D[np.float64]) +assert_type(np.fft.rfftfreq(5, _f80_2d), _Array2D[np.longdouble]) +assert_type(np.fft.rfftfreq(5, _c64_2d), _Array2D[np.complex128]) +assert_type(np.fft.rfftfreq(5, _c160_2d), _Array2D[np.clongdouble]) + +assert_type(np.fft.rfftfreq(5, _i64_nd), npt.NDArray[np.float64]) +assert_type(np.fft.rfftfreq(5, _f32_nd), npt.NDArray[np.float64]) +assert_type(np.fft.rfftfreq(5, _f80_nd), npt.NDArray[np.longdouble]) +assert_type(np.fft.rfftfreq(5, _c64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.rfftfreq(5, _c160_nd), npt.NDArray[np.clongdouble]) +... + +# the other fft functions + +assert_type(np.fft.fft(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.ifft(_f64_nd, axis=1), npt.NDArray[np.complex128]) +assert_type(np.fft.rfft(_f64_nd, n=None), npt.NDArray[np.complex128]) +assert_type(np.fft.irfft(_f64_nd, norm="ortho"), npt.NDArray[np.float64]) +assert_type(np.fft.hfft(_f64_nd, n=2), npt.NDArray[np.float64]) +assert_type(np.fft.ihfft(_f64_nd), npt.NDArray[np.complex128]) + +assert_type(np.fft.fftn(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.ifftn(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.rfftn(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.irfftn(_f64_nd), npt.NDArray[np.float64]) + +assert_type(np.fft.rfft2(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.ifft2(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.fft2(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.irfft2(_f64_nd), npt.NDArray[np.float64]) diff --git a/numpy/typing/tests/data/reveal/flatiter.pyi b/numpy/typing/tests/data/reveal/flatiter.pyi index 84d3b03b7d37..4907f8464cf2 100644 --- a/numpy/typing/tests/data/reveal/flatiter.pyi +++ b/numpy/typing/tests/data/reveal/flatiter.pyi @@ -1,31 +1,86 @@ -import sys -from typing import Any +from typing import Any, assert_type import numpy as np -import numpy.typing as npt - -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - -a: np.flatiter[npt.NDArray[np.str_]] - -assert_type(a.base, npt.NDArray[np.str_]) -assert_type(a.copy(), npt.NDArray[np.str_]) -assert_type(a.coords, tuple[int, ...]) -assert_type(a.index, int) -assert_type(iter(a), np.flatiter[npt.NDArray[np.str_]]) -assert_type(next(a), np.str_) -assert_type(a[0], np.str_) -assert_type(a[[0, 1, 2]], npt.NDArray[np.str_]) -assert_type(a[...], npt.NDArray[np.str_]) -assert_type(a[:], npt.NDArray[np.str_]) -assert_type(a[(...,)], npt.NDArray[np.str_]) -assert_type(a[(0,)], np.str_) -assert_type(a.__array__(), npt.NDArray[np.str_]) -assert_type(a.__array__(np.dtype(np.float64)), npt.NDArray[np.float64]) -a[0] = "a" -a[:5] = "a" -a[...] = "a" -a[(...,)] = "a" + +type _ArrayND = np.ndarray[tuple[Any, ...], np.dtypes.StrDType] +type _Array1D = np.ndarray[tuple[int], np.dtypes.BytesDType] +type _Array2D = np.ndarray[tuple[int, int], np.dtypes.Int8DType] + +_a_nd: np.flatiter[_ArrayND] +_a_1d: np.flatiter[_Array1D] +_a_2d: np.flatiter[_Array2D] + +### + +# .base +assert_type(_a_nd.base, _ArrayND) +assert_type(_a_1d.base, _Array1D) +assert_type(_a_2d.base, _Array2D) + +# .coords +assert_type(_a_nd.coords, tuple[Any, ...]) +assert_type(_a_1d.coords, tuple[int]) +assert_type(_a_2d.coords, tuple[int, int]) + +# .index +assert_type(_a_nd.index, int) +assert_type(_a_1d.index, int) +assert_type(_a_2d.index, int) + +# .__len__() +assert_type(len(_a_nd), int) +assert_type(len(_a_1d), int) +assert_type(len(_a_2d), int) + +# .__iter__() +assert_type(iter(_a_nd), np.flatiter[_ArrayND]) +assert_type(iter(_a_1d), np.flatiter[_Array1D]) +assert_type(iter(_a_2d), np.flatiter[_Array2D]) + +# .__next__() +assert_type(next(_a_nd), np.str_) +assert_type(next(_a_1d), np.bytes_) +assert_type(next(_a_2d), np.int8) + +# .__getitem__(()) +assert_type(_a_nd[()], _ArrayND) +assert_type(_a_1d[()], _Array1D) +assert_type(_a_2d[()], _Array2D) +# .__getitem__(int) +assert_type(_a_nd[0], np.str_) +assert_type(_a_1d[0], np.bytes_) +assert_type(_a_2d[0], np.int8) +# .__getitem__(slice) +assert_type(_a_nd[::], np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d[::], np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d[::], np.ndarray[tuple[int], np.dtypes.Int8DType]) +# .__getitem__(EllipsisType) +assert_type(_a_nd[...], np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d[...], np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d[...], np.ndarray[tuple[int], np.dtypes.Int8DType]) +# .__getitem__(list[!]) +assert_type(_a_nd[[]], np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d[[]], np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d[[]], np.ndarray[tuple[int], np.dtypes.Int8DType]) +# .__getitem__(list[int]) +assert_type(_a_nd[[0]], np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d[[0]], np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d[[0]], np.ndarray[tuple[int], np.dtypes.Int8DType]) +# .__getitem__(list[list[int]]) +assert_type(_a_nd[[[0]]], np.ndarray[tuple[int, int], np.dtypes.StrDType]) +assert_type(_a_1d[[[0]]], np.ndarray[tuple[int, int], np.dtypes.BytesDType]) +assert_type(_a_2d[[[0]]], np.ndarray[tuple[int, int], np.dtypes.Int8DType]) +# .__getitem__(list[list[list[list[int]]]]) +assert_type(_a_nd[[[[[0]]]]], np.ndarray[tuple[Any, ...], np.dtypes.StrDType]) +assert_type(_a_1d[[[[[0]]]]], np.ndarray[tuple[Any, ...], np.dtypes.BytesDType]) +assert_type(_a_2d[[[[[0]]]]], np.ndarray[tuple[Any, ...], np.dtypes.Int8DType]) + +# __array__() +assert_type(_a_nd.__array__(), np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d.__array__(), np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d.__array__(), np.ndarray[tuple[int], np.dtypes.Int8DType]) + +# .copy() +assert_type(_a_nd.copy(), np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d.copy(), np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d.copy(), np.ndarray[tuple[int], np.dtypes.Int8DType]) diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 7fa2260bc312..4474f375b716 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -1,18 +1,11 @@ """Tests for :mod:`_core.fromnumeric`.""" -import sys -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - -class NDArraySubclass(npt.NDArray[np.complex128]): - ... +class NDArraySubclass(np.ndarray[tuple[Any, ...], np.dtype[np.complex128]]): ... AR_b: npt.NDArray[np.bool] AR_f4: npt.NDArray[np.float32] @@ -21,12 +14,20 @@ AR_u8: npt.NDArray[np.uint64] AR_i8: npt.NDArray[np.int64] AR_O: npt.NDArray[np.object_] AR_subclass: NDArraySubclass +AR_m: npt.NDArray[np.timedelta64] +AR_0d: np.ndarray[tuple[()]] +AR_1d: np.ndarray[tuple[int]] +AR_nd: np.ndarray b: np.bool f4: np.float32 i8: np.int64 f: float +# integer‑dtype subclass for argmin/argmax +class NDArrayIntSubclass(np.ndarray[tuple[Any, ...], np.dtype[np.intp]]): ... +AR_sub_i: NDArrayIntSubclass + assert_type(np.take(b, 0), np.bool) assert_type(np.take(f4, 0), np.float32) assert_type(np.take(f, 0), Any) @@ -37,22 +38,24 @@ assert_type(np.take(AR_f4, [0]), npt.NDArray[np.float32]) assert_type(np.take([1], [0]), npt.NDArray[Any]) assert_type(np.take(AR_f4, [0], out=AR_subclass), NDArraySubclass) -assert_type(np.reshape(b, 1), npt.NDArray[np.bool]) -assert_type(np.reshape(f4, 1), npt.NDArray[np.float32]) -assert_type(np.reshape(f, 1), npt.NDArray[Any]) -assert_type(np.reshape(AR_b, 1), npt.NDArray[np.bool]) -assert_type(np.reshape(AR_f4, 1), npt.NDArray[np.float32]) +assert_type(np.reshape(b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.reshape(f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.reshape(f, 1), np.ndarray[tuple[int], np.dtype]) +assert_type(np.reshape(AR_b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.reshape(AR_f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) assert_type(np.choose(1, [True, True]), Any) assert_type(np.choose([1], [True, True]), npt.NDArray[Any]) assert_type(np.choose([1], AR_b), npt.NDArray[np.bool]) assert_type(np.choose([1], AR_b, out=AR_f4), npt.NDArray[np.float32]) -assert_type(np.repeat(b, 1), npt.NDArray[np.bool]) -assert_type(np.repeat(f4, 1), npt.NDArray[np.float32]) -assert_type(np.repeat(f, 1), npt.NDArray[Any]) -assert_type(np.repeat(AR_b, 1), npt.NDArray[np.bool]) -assert_type(np.repeat(AR_f4, 1), npt.NDArray[np.float32]) +assert_type(np.repeat(b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.repeat(b, 1, axis=0), npt.NDArray[np.bool]) +assert_type(np.repeat(f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.repeat(f, 1), np.ndarray[tuple[int], np.dtype[Any]]) +assert_type(np.repeat(AR_b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.repeat(AR_f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.repeat(AR_f4, 1, axis=0), npt.NDArray[np.float32]) # TODO: array_bdd tests for np.put() @@ -77,6 +80,11 @@ assert_type(np.argpartition(f4, 0), npt.NDArray[np.intp]) assert_type(np.argpartition(f, 0), npt.NDArray[np.intp]) assert_type(np.argpartition(AR_b, 0), npt.NDArray[np.intp]) assert_type(np.argpartition(AR_f4, 0), npt.NDArray[np.intp]) +assert_type(np.argpartition(b, 0, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argpartition(f4, 0, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argpartition(f, 0, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argpartition(AR_b, 0, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argpartition(AR_f4, 0, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) assert_type(np.sort([2, 1], 0), npt.NDArray[Any]) assert_type(np.sort(AR_b, 0), npt.NDArray[np.bool]) @@ -89,24 +97,24 @@ assert_type(np.argmax(AR_b), np.intp) assert_type(np.argmax(AR_f4), np.intp) assert_type(np.argmax(AR_b, axis=0), Any) assert_type(np.argmax(AR_f4, axis=0), Any) -assert_type(np.argmax(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.argmax(AR_f4, out=AR_sub_i), NDArrayIntSubclass) assert_type(np.argmin(AR_b), np.intp) assert_type(np.argmin(AR_f4), np.intp) assert_type(np.argmin(AR_b, axis=0), Any) assert_type(np.argmin(AR_f4, axis=0), Any) -assert_type(np.argmin(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.argmin(AR_f4, out=AR_sub_i), NDArrayIntSubclass) assert_type(np.searchsorted(AR_b[0], 0), np.intp) assert_type(np.searchsorted(AR_f4[0], 0), np.intp) assert_type(np.searchsorted(AR_b[0], [0]), npt.NDArray[np.intp]) assert_type(np.searchsorted(AR_f4[0], [0]), npt.NDArray[np.intp]) -assert_type(np.resize(b, (5, 5)), npt.NDArray[np.bool]) -assert_type(np.resize(f4, (5, 5)), npt.NDArray[np.float32]) -assert_type(np.resize(f, (5, 5)), npt.NDArray[Any]) -assert_type(np.resize(AR_b, (5, 5)), npt.NDArray[np.bool]) -assert_type(np.resize(AR_f4, (5, 5)), npt.NDArray[np.float32]) +assert_type(np.resize(b, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.resize(f4, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.resize(f, (5, 5)), np.ndarray[tuple[int, int], np.dtype]) +assert_type(np.resize(AR_b, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.resize(AR_f4, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.float32]]) assert_type(np.squeeze(b), np.bool) assert_type(np.squeeze(f4), np.float32) @@ -120,24 +128,30 @@ assert_type(np.diagonal(AR_f4), npt.NDArray[np.float32]) assert_type(np.trace(AR_b), Any) assert_type(np.trace(AR_f4), Any) assert_type(np.trace(AR_f4, out=AR_subclass), NDArraySubclass) - -assert_type(np.ravel(b), npt.NDArray[np.bool]) -assert_type(np.ravel(f4), npt.NDArray[np.float32]) -assert_type(np.ravel(f), npt.NDArray[Any]) -assert_type(np.ravel(AR_b), npt.NDArray[np.bool]) -assert_type(np.ravel(AR_f4), npt.NDArray[np.float32]) - -assert_type(np.nonzero(b), tuple[npt.NDArray[np.intp], ...]) -assert_type(np.nonzero(f4), tuple[npt.NDArray[np.intp], ...]) -assert_type(np.nonzero(f), tuple[npt.NDArray[np.intp], ...]) -assert_type(np.nonzero(AR_b), tuple[npt.NDArray[np.intp], ...]) -assert_type(np.nonzero(AR_f4), tuple[npt.NDArray[np.intp], ...]) - -assert_type(np.shape(b), tuple[int, ...]) -assert_type(np.shape(f4), tuple[int, ...]) -assert_type(np.shape(f), tuple[int, ...]) -assert_type(np.shape(AR_b), tuple[int, ...]) -assert_type(np.shape(AR_f4), tuple[int, ...]) +assert_type(np.trace(AR_f4, out=AR_subclass, dtype=None), NDArraySubclass) + +assert_type(np.ravel(b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.ravel(f4), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.ravel(f), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.ravel(AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.ravel(AR_f4), np.ndarray[tuple[int], np.dtype[np.float32]]) + +assert_type(np.nonzero(AR_b), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) +assert_type(np.nonzero(AR_f4), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) +assert_type(np.nonzero(AR_1d), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) +assert_type(np.nonzero(AR_nd), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) + +assert_type(np.shape(b), tuple[()]) +assert_type(np.shape(f), tuple[()]) +assert_type(np.shape([1]), tuple[int]) +assert_type(np.shape([[2]]), tuple[int, int]) +assert_type(np.shape([[[3]]]), tuple[Any, ...]) +assert_type(np.shape(AR_b), tuple[Any, ...]) +assert_type(np.shape(AR_nd), tuple[Any, ...]) +# these fail on mypy, but it works as expected with pyright/pylance +# assert_type(np.shape(AR_0d), tuple[()]) +# assert_type(np.shape(AR_1d), tuple[int]) +# assert_type(np.shape(AR_2d), tuple[int, int]) assert_type(np.compress([True], b), npt.NDArray[np.bool]) assert_type(np.compress([True], f4), npt.NDArray[np.float32]) @@ -161,6 +175,12 @@ assert_type(np.sum(AR_f4), np.float32) assert_type(np.sum(AR_b, axis=0), Any) assert_type(np.sum(AR_f4, axis=0), Any) assert_type(np.sum(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.sum(AR_f4, dtype=np.float64), np.float64) +assert_type(np.sum(AR_f4, None, np.float64), np.float64) +assert_type(np.sum(AR_f4, dtype=np.float64, keepdims=False), np.float64) +assert_type(np.sum(AR_f4, None, np.float64, keepdims=False), np.float64) +assert_type(np.sum(AR_f4, dtype=np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) +assert_type(np.sum(AR_f4, None, np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) assert_type(np.all(b), np.bool) assert_type(np.all(f4), np.bool) @@ -193,6 +213,15 @@ assert_type(np.cumsum(f, dtype=float), npt.NDArray[Any]) assert_type(np.cumsum(f, dtype=np.float64), npt.NDArray[np.float64]) assert_type(np.cumsum(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.cumulative_sum(b), npt.NDArray[np.bool]) +assert_type(np.cumulative_sum(f4), npt.NDArray[np.float32]) +assert_type(np.cumulative_sum(f), npt.NDArray[Any]) +assert_type(np.cumulative_sum(AR_b), npt.NDArray[np.bool]) +assert_type(np.cumulative_sum(AR_f4), npt.NDArray[np.float32]) +assert_type(np.cumulative_sum(f, dtype=float), npt.NDArray[Any]) +assert_type(np.cumulative_sum(f, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.cumulative_sum(AR_f4, out=AR_subclass), NDArraySubclass) + assert_type(np.ptp(b), np.bool) assert_type(np.ptp(f4), np.float32) assert_type(np.ptp(f), Any) @@ -229,8 +258,8 @@ assert_type(np.amin(AR_f4, out=AR_subclass), NDArraySubclass) assert_type(np.prod(AR_b), np.int_) assert_type(np.prod(AR_u8), np.uint64) assert_type(np.prod(AR_i8), np.int64) -assert_type(np.prod(AR_f4), np.floating[Any]) -assert_type(np.prod(AR_c16), np.complexfloating[Any, Any]) +assert_type(np.prod(AR_f4), np.floating) +assert_type(np.prod(AR_c16), np.complexfloating) assert_type(np.prod(AR_O), Any) assert_type(np.prod(AR_f4, axis=0), Any) assert_type(np.prod(AR_f4, keepdims=True), Any) @@ -241,14 +270,25 @@ assert_type(np.prod(AR_f4, out=AR_subclass), NDArraySubclass) assert_type(np.cumprod(AR_b), npt.NDArray[np.int_]) assert_type(np.cumprod(AR_u8), npt.NDArray[np.uint64]) assert_type(np.cumprod(AR_i8), npt.NDArray[np.int64]) -assert_type(np.cumprod(AR_f4), npt.NDArray[np.floating[Any]]) -assert_type(np.cumprod(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.cumprod(AR_f4), npt.NDArray[np.floating]) +assert_type(np.cumprod(AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.cumprod(AR_O), npt.NDArray[np.object_]) -assert_type(np.cumprod(AR_f4, axis=0), npt.NDArray[np.floating[Any]]) +assert_type(np.cumprod(AR_f4, axis=0), npt.NDArray[np.floating]) assert_type(np.cumprod(AR_f4, dtype=np.float64), npt.NDArray[np.float64]) assert_type(np.cumprod(AR_f4, dtype=float), npt.NDArray[Any]) assert_type(np.cumprod(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.cumulative_prod(AR_b), npt.NDArray[np.int_]) +assert_type(np.cumulative_prod(AR_u8), npt.NDArray[np.uint64]) +assert_type(np.cumulative_prod(AR_i8), npt.NDArray[np.int64]) +assert_type(np.cumulative_prod(AR_f4), npt.NDArray[np.floating]) +assert_type(np.cumulative_prod(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.cumulative_prod(AR_O), npt.NDArray[np.object_]) +assert_type(np.cumulative_prod(AR_f4, axis=0), npt.NDArray[np.floating]) +assert_type(np.cumulative_prod(AR_f4, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.cumulative_prod(AR_f4, dtype=float), npt.NDArray[Any]) +assert_type(np.cumulative_prod(AR_f4, out=AR_subclass), NDArraySubclass) + assert_type(np.ndim(b), int) assert_type(np.ndim(f4), int) assert_type(np.ndim(f), int) @@ -271,21 +311,28 @@ assert_type(np.around(AR_f4), npt.NDArray[np.float32]) assert_type(np.around([1.5]), npt.NDArray[Any]) assert_type(np.around(AR_f4, out=AR_subclass), NDArraySubclass) -assert_type(np.mean(AR_b), np.floating[Any]) -assert_type(np.mean(AR_i8), np.floating[Any]) -assert_type(np.mean(AR_f4), np.floating[Any]) -assert_type(np.mean(AR_c16), np.complexfloating[Any, Any]) +assert_type(np.mean(AR_b), np.floating) +assert_type(np.mean(AR_i8), np.floating) +assert_type(np.mean(AR_f4), np.floating) +assert_type(np.mean(AR_m), np.timedelta64) +assert_type(np.mean(AR_c16), np.complexfloating) assert_type(np.mean(AR_O), Any) assert_type(np.mean(AR_f4, axis=0), Any) assert_type(np.mean(AR_f4, keepdims=True), Any) assert_type(np.mean(AR_f4, dtype=float), Any) assert_type(np.mean(AR_f4, dtype=np.float64), np.float64) assert_type(np.mean(AR_f4, out=AR_subclass), NDArraySubclass) - -assert_type(np.std(AR_b), np.floating[Any]) -assert_type(np.std(AR_i8), np.floating[Any]) -assert_type(np.std(AR_f4), np.floating[Any]) -assert_type(np.std(AR_c16), np.floating[Any]) +assert_type(np.mean(AR_f4, dtype=np.float64), np.float64) +assert_type(np.mean(AR_f4, None, np.float64), np.float64) +assert_type(np.mean(AR_f4, dtype=np.float64, keepdims=False), np.float64) +assert_type(np.mean(AR_f4, None, np.float64, keepdims=False), np.float64) +assert_type(np.mean(AR_f4, dtype=np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) +assert_type(np.mean(AR_f4, None, np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) + +assert_type(np.std(AR_b), np.floating) +assert_type(np.std(AR_i8), np.floating) +assert_type(np.std(AR_f4), np.floating) +assert_type(np.std(AR_c16), np.floating) assert_type(np.std(AR_O), Any) assert_type(np.std(AR_f4, axis=0), Any) assert_type(np.std(AR_f4, keepdims=True), Any) @@ -293,10 +340,10 @@ assert_type(np.std(AR_f4, dtype=float), Any) assert_type(np.std(AR_f4, dtype=np.float64), np.float64) assert_type(np.std(AR_f4, out=AR_subclass), NDArraySubclass) -assert_type(np.var(AR_b), np.floating[Any]) -assert_type(np.var(AR_i8), np.floating[Any]) -assert_type(np.var(AR_f4), np.floating[Any]) -assert_type(np.var(AR_c16), np.floating[Any]) +assert_type(np.var(AR_b), np.floating) +assert_type(np.var(AR_i8), np.floating) +assert_type(np.var(AR_f4), np.floating) +assert_type(np.var(AR_c16), np.floating) assert_type(np.var(AR_O), Any) assert_type(np.var(AR_f4, axis=0), Any) assert_type(np.var(AR_f4, keepdims=True), Any) diff --git a/numpy/typing/tests/data/reveal/getlimits.pyi b/numpy/typing/tests/data/reveal/getlimits.pyi index f53fdf48824e..cc964d753055 100644 --- a/numpy/typing/tests/data/reveal/getlimits.pyi +++ b/numpy/typing/tests/data/reveal/getlimits.pyi @@ -1,16 +1,11 @@ -import sys -from typing import Any +from typing import assert_type import numpy as np -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - f: float f8: np.float64 c8: np.complex64 +c16: np.complex128 i: int i8: np.int64 @@ -19,10 +14,11 @@ u4: np.uint32 finfo_f8: np.finfo[np.float64] iinfo_i8: np.iinfo[np.int64] -assert_type(np.finfo(f), np.finfo[np.double]) +assert_type(np.finfo(f), np.finfo[np.float64]) assert_type(np.finfo(f8), np.finfo[np.float64]) assert_type(np.finfo(c8), np.finfo[np.float32]) -assert_type(np.finfo('f2'), np.finfo[np.floating[Any]]) +assert_type(np.finfo(c16), np.finfo[np.float64]) +assert_type(np.finfo("f2"), np.finfo[np.float16]) assert_type(finfo_f8.dtype, np.dtype[np.float64]) assert_type(finfo_f8.bits, int) @@ -46,7 +42,8 @@ assert_type(finfo_f8.smallest_subnormal, np.float64) assert_type(np.iinfo(i), np.iinfo[np.int_]) assert_type(np.iinfo(i8), np.iinfo[np.int64]) assert_type(np.iinfo(u4), np.iinfo[np.uint32]) -assert_type(np.iinfo('i2'), np.iinfo[Any]) +assert_type(np.iinfo("i2"), np.iinfo[np.int16]) +assert_type(np.iinfo("u2"), np.iinfo[np.uint16]) assert_type(iinfo_i8.dtype, np.dtype[np.int64]) assert_type(iinfo_i8.kind, str) diff --git a/numpy/typing/tests/data/reveal/histograms.pyi b/numpy/typing/tests/data/reveal/histograms.pyi index 67067eb7d63f..278961247698 100644 --- a/numpy/typing/tests/data/reveal/histograms.pyi +++ b/numpy/typing/tests/data/reveal/histograms.pyi @@ -1,31 +1,69 @@ -import sys -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +AR_i4: npt.NDArray[np.int32] AR_i8: npt.NDArray[np.int64] +AR_f4: npt.NDArray[np.float32] AR_f8: npt.NDArray[np.float64] +AR_c8: npt.NDArray[np.complex64] +AR_c16: npt.NDArray[np.complex128] -assert_type(np.histogram_bin_edges(AR_i8, bins="auto"), npt.NDArray[Any]) -assert_type(np.histogram_bin_edges(AR_i8, bins="rice", range=(0, 3)), npt.NDArray[Any]) -assert_type(np.histogram_bin_edges(AR_i8, bins="scott", weights=AR_f8), npt.NDArray[Any]) - -assert_type(np.histogram(AR_i8, bins="auto"), tuple[npt.NDArray[Any], npt.NDArray[Any]]) -assert_type(np.histogram(AR_i8, bins="rice", range=(0, 3)), tuple[npt.NDArray[Any], npt.NDArray[Any]]) -assert_type(np.histogram(AR_i8, bins="scott", weights=AR_f8), tuple[npt.NDArray[Any], npt.NDArray[Any]]) -assert_type(np.histogram(AR_f8, bins=1, density=True), tuple[npt.NDArray[Any], npt.NDArray[Any]]) - -assert_type(np.histogramdd(AR_i8, bins=[1]), - tuple[npt.NDArray[Any], tuple[npt.NDArray[Any], ...]]) -assert_type(np.histogramdd(AR_i8, range=[(0, 3)]), - tuple[npt.NDArray[Any], tuple[npt.NDArray[Any], ...]]) -assert_type(np.histogramdd(AR_i8, weights=AR_f8), - tuple[npt.NDArray[Any], tuple[npt.NDArray[Any], ...]]) -assert_type(np.histogramdd(AR_f8, density=True), - tuple[npt.NDArray[Any], tuple[npt.NDArray[Any], ...]]) +list_i: list[int] +list_f: list[float] +list_c: list[complex] + +### + +assert_type(np.histogram_bin_edges(AR_i8, bins="auto"), _Array1D[np.float64]) +assert_type(np.histogram_bin_edges(AR_i8, bins="rice", range=(0, 3)), _Array1D[np.float64]) +assert_type(np.histogram_bin_edges(AR_i8, bins="scott", weights=AR_f8), _Array1D[np.float64]) +assert_type(np.histogram_bin_edges(AR_f4), _Array1D[np.float32]) +assert_type(np.histogram_bin_edges(AR_f8), _Array1D[np.float64]) +assert_type(np.histogram_bin_edges(AR_c8), _Array1D[np.complex64]) +assert_type(np.histogram_bin_edges(AR_c16), _Array1D[np.complex128]) +assert_type(np.histogram_bin_edges(list_i), _Array1D[np.float64]) +assert_type(np.histogram_bin_edges(list_f), _Array1D[np.float64]) +assert_type(np.histogram_bin_edges(list_c), _Array1D[np.complex128]) + +assert_type(np.histogram(AR_i8, bins="auto"), tuple[_Array1D[np.intp], _Array1D[np.float64]]) +assert_type(np.histogram(AR_i8, bins="rice", range=(0, 3)), tuple[_Array1D[np.intp], _Array1D[np.float64]]) +assert_type(np.histogram(AR_i8, bins="scott", weights=AR_f8), tuple[_Array1D[np.float64], _Array1D[np.float64]]) +assert_type(np.histogram(AR_f8, bins=1, density=True), tuple[_Array1D[np.float64], _Array1D[np.float64]]) +assert_type(np.histogram(AR_f4), tuple[_Array1D[np.intp], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f8), tuple[_Array1D[np.intp], _Array1D[np.float64]]) +assert_type(np.histogram(AR_c8), tuple[_Array1D[np.intp], _Array1D[np.complex64]]) +assert_type(np.histogram(AR_c16), tuple[_Array1D[np.intp], _Array1D[np.complex128]]) +assert_type(np.histogram(list_i), tuple[_Array1D[np.intp], _Array1D[np.float64]]) +assert_type(np.histogram(list_f), tuple[_Array1D[np.intp], _Array1D[np.float64]]) +assert_type(np.histogram(list_c), tuple[_Array1D[np.intp], _Array1D[np.complex128]]) +assert_type(np.histogram(AR_f4, density=True), tuple[_Array1D[np.float64], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, density=True, weights=AR_i4), tuple[_Array1D[np.float64], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, density=True, weights=AR_f4), tuple[_Array1D[np.float64], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, density=True, weights=AR_f8), tuple[_Array1D[np.float64], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, density=True, weights=AR_c8), tuple[_Array1D[np.complex128], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, density=True, weights=AR_c16), tuple[_Array1D[np.complex128], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, density=True, weights=list_i), tuple[_Array1D[np.float64], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, density=True, weights=list_f), tuple[_Array1D[np.float64], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, density=True, weights=list_c), tuple[_Array1D[np.complex128], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, weights=AR_i4), tuple[_Array1D[np.int32], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, weights=AR_f4), tuple[_Array1D[np.float32], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, weights=AR_f8), tuple[_Array1D[np.float64], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, weights=AR_c8), tuple[_Array1D[np.complex64], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, weights=AR_c16), tuple[_Array1D[np.complex128], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, weights=list_i), tuple[_Array1D[np.intp], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, weights=list_f), tuple[_Array1D[Any], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, weights=list_c), tuple[_Array1D[Any], _Array1D[np.float32]]) + +assert_type(np.histogramdd(AR_i8, bins=[1]), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.float64], ...]]) +assert_type(np.histogramdd(AR_i8, range=[(0, 3)]), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.float64], ...]]) +assert_type(np.histogramdd(AR_i8, weights=AR_f8), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.float64], ...]]) +assert_type(np.histogramdd(AR_f8, density=True), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.float64], ...]]) +assert_type(np.histogramdd(AR_i4), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.float64], ...]]) +assert_type(np.histogramdd(AR_i8), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.float64], ...]]) +assert_type(np.histogramdd(AR_f4), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.float32], ...]]) +assert_type(np.histogramdd(AR_c8), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.complex64], ...]]) +assert_type(np.histogramdd(AR_c16), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.complex128], ...]]) diff --git a/numpy/typing/tests/data/reveal/index_tricks.pyi b/numpy/typing/tests/data/reveal/index_tricks.pyi index 029c8228cae7..f6067c3bed6b 100644 --- a/numpy/typing/tests/data/reveal/index_tricks.pyi +++ b/numpy/typing/tests/data/reveal/index_tricks.pyi @@ -1,41 +1,37 @@ -import sys -from typing import Any, Literal +from types import EllipsisType +from typing import Any, Literal, assert_type import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - AR_LIKE_b: list[bool] AR_LIKE_i: list[int] AR_LIKE_f: list[float] AR_LIKE_U: list[str] +AR_LIKE_O: list[object] AR_i8: npt.NDArray[np.int64] +AR_O: npt.NDArray[np.object_] assert_type(np.ndenumerate(AR_i8), np.ndenumerate[np.int64]) assert_type(np.ndenumerate(AR_LIKE_f), np.ndenumerate[np.float64]) assert_type(np.ndenumerate(AR_LIKE_U), np.ndenumerate[np.str_]) +assert_type(np.ndenumerate(AR_LIKE_O), np.ndenumerate[Any]) -assert_type(np.ndenumerate(AR_i8).iter, np.flatiter[npt.NDArray[np.int64]]) -assert_type(np.ndenumerate(AR_LIKE_f).iter, np.flatiter[npt.NDArray[np.float64]]) -assert_type(np.ndenumerate(AR_LIKE_U).iter, np.flatiter[npt.NDArray[np.str_]]) - -assert_type(next(np.ndenumerate(AR_i8)), tuple[tuple[int, ...], np.int64]) -assert_type(next(np.ndenumerate(AR_LIKE_f)), tuple[tuple[int, ...], np.float64]) -assert_type(next(np.ndenumerate(AR_LIKE_U)), tuple[tuple[int, ...], np.str_]) +assert_type(next(np.ndenumerate(AR_i8)), tuple[tuple[Any, ...], np.int64]) +assert_type(next(np.ndenumerate(AR_LIKE_f)), tuple[tuple[Any, ...], np.float64]) +assert_type(next(np.ndenumerate(AR_LIKE_U)), tuple[tuple[Any, ...], np.str_]) +assert_type(next(np.ndenumerate(AR_LIKE_O)), tuple[tuple[Any, ...], Any]) assert_type(iter(np.ndenumerate(AR_i8)), np.ndenumerate[np.int64]) assert_type(iter(np.ndenumerate(AR_LIKE_f)), np.ndenumerate[np.float64]) assert_type(iter(np.ndenumerate(AR_LIKE_U)), np.ndenumerate[np.str_]) +assert_type(iter(np.ndenumerate(AR_LIKE_O)), np.ndenumerate[Any]) assert_type(np.ndindex(1, 2, 3), np.ndindex) assert_type(np.ndindex((1, 2, 3)), np.ndindex) assert_type(iter(np.ndindex(1, 2, 3)), np.ndindex) -assert_type(next(np.ndindex(1, 2, 3)), tuple[int, ...]) +assert_type(next(np.ndindex(1, 2, 3)), tuple[Any, ...]) assert_type(np.unravel_index([22, 41, 37], (7, 6)), tuple[npt.NDArray[np.intp], ...]) assert_type(np.unravel_index([31, 41, 13], (7, 6), order="F"), tuple[npt.NDArray[np.intp], ...]) @@ -54,13 +50,13 @@ assert_type(np.mgrid[1:1:2, None:10], npt.NDArray[Any]) assert_type(np.ogrid[1:1:2], tuple[npt.NDArray[Any], ...]) assert_type(np.ogrid[1:1:2, None:10], tuple[npt.NDArray[Any], ...]) -assert_type(np.index_exp[0:1], tuple[slice]) -assert_type(np.index_exp[0:1, None:3], tuple[slice, slice]) -assert_type(np.index_exp[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice, ellipsis, list[int]]) +assert_type(np.index_exp[0:1], tuple[slice[int, int, None]]) +assert_type(np.index_exp[0:1, None:3], tuple[slice[int, int, None], slice[None, int, None]]) +assert_type(np.index_exp[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice[int, int, None], EllipsisType, list[int]]) -assert_type(np.s_[0:1], slice) -assert_type(np.s_[0:1, None:3], tuple[slice, slice]) -assert_type(np.s_[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice, ellipsis, list[int]]) +assert_type(np.s_[0:1], slice[int, int, None]) +assert_type(np.s_[0:1, None:3], tuple[slice[int, int, None], slice[None, int, None]]) +assert_type(np.s_[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice[int, int, None], EllipsisType, list[int]]) assert_type(np.ix_(AR_LIKE_b), tuple[npt.NDArray[np.bool], ...]) assert_type(np.ix_(AR_LIKE_i, AR_LIKE_f), tuple[npt.NDArray[np.float64], ...]) diff --git a/numpy/typing/tests/data/reveal/lib_function_base.pyi b/numpy/typing/tests/data/reveal/lib_function_base.pyi index 72974dce64bf..bab43106d1c5 100644 --- a/numpy/typing/tests/data/reveal/lib_function_base.pyi +++ b/numpy/typing/tests/data/reveal/lib_function_base.pyi @@ -1,38 +1,55 @@ -import sys -from typing import Any from collections.abc import Callable +from fractions import Fraction +from typing import Any, LiteralString, assert_type, type_check_only import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - -vectorized_func: np.vectorize - f8: np.float64 +AR_LIKE_b: list[bool] +AR_LIKE_i8: list[int] AR_LIKE_f8: list[float] +AR_LIKE_c16: list[complex] +AR_LIKE_O: list[Fraction] +AR_u1: npt.NDArray[np.uint8] AR_i8: npt.NDArray[np.int64] +AR_f2: npt.NDArray[np.float16] +AR_f4: npt.NDArray[np.float32] AR_f8: npt.NDArray[np.float64] +AR_f10: npt.NDArray[np.longdouble] +AR_c8: npt.NDArray[np.complex64] AR_c16: npt.NDArray[np.complex128] +AR_c20: npt.NDArray[np.clongdouble] AR_m: npt.NDArray[np.timedelta64] AR_M: npt.NDArray[np.datetime64] AR_O: npt.NDArray[np.object_] AR_b: npt.NDArray[np.bool] AR_U: npt.NDArray[np.str_] -CHAR_AR_U: np.char.chararray[Any, np.dtype[np.str_]] +CHAR_AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] # type: ignore[deprecated] + +AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] +AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] +AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] +AR_c16_1d: np.ndarray[tuple[int], np.dtype[np.complex128]] + +AR_b_list: list[npt.NDArray[np.bool]] -def func(*args: Any, **kwargs: Any) -> Any: ... +@type_check_only +def func(a: np.ndarray, posarg: bool = ..., /, arg: int = ..., *, kwarg: str = ...) -> np.ndarray: ... +@type_check_only +def func_f8(a: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: ... +### + +# vectorize +vectorized_func: np.vectorize assert_type(vectorized_func.pyfunc, Callable[..., Any]) assert_type(vectorized_func.cache, bool) -assert_type(vectorized_func.signature, None | str) -assert_type(vectorized_func.otypes, None | str) +assert_type(vectorized_func.signature, LiteralString | None) +assert_type(vectorized_func.otypes, LiteralString | None) assert_type(vectorized_func.excluded, set[int | str]) -assert_type(vectorized_func.__doc__, None | str) +assert_type(vectorized_func.__doc__, str | None) assert_type(vectorized_func([1]), Any) assert_type(np.vectorize(int), np.vectorize) assert_type( @@ -40,136 +57,355 @@ assert_type( np.vectorize, ) +# rot90 +assert_type(np.rot90(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.rot90(AR_f8, k=2), npt.NDArray[np.float64]) -assert_type(np.rot90(AR_LIKE_f8, axes=(0, 1)), npt.NDArray[Any]) +assert_type(np.rot90(AR_LIKE_f8, axes=(0, 1)), np.ndarray) -assert_type(np.flip(f8), np.float64) -assert_type(np.flip(1.0), Any) +# flip +assert_type(np.flip(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.flip(AR_f8, axis=(0, 1)), npt.NDArray[np.float64]) -assert_type(np.flip(AR_LIKE_f8, axis=0), npt.NDArray[Any]) +assert_type(np.flip(AR_LIKE_f8, axis=0), np.ndarray) +# iterable assert_type(np.iterable(1), bool) assert_type(np.iterable([1]), bool) -assert_type(np.average(AR_f8), np.floating[Any]) -assert_type(np.average(AR_f8, weights=AR_c16), np.complexfloating[Any, Any]) +# average +assert_type(np.average(AR_f8_2d), np.float64) +assert_type(np.average(AR_f8_2d, axis=1), npt.NDArray[np.float64]) +assert_type(np.average(AR_f8_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.average(AR_f8), np.float64) +assert_type(np.average(AR_f8, axis=1), npt.NDArray[np.float64]) +assert_type(np.average(AR_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.average(AR_f8, returned=True), tuple[np.float64, np.float64]) +assert_type(np.average(AR_f8, axis=1, returned=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.average(AR_f8, keepdims=True, returned=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.average(AR_LIKE_f8), np.float64) +assert_type(np.average(AR_LIKE_f8, weights=AR_f8), np.float64) +assert_type(np.average(AR_LIKE_f8, axis=1), npt.NDArray[np.float64]) +assert_type(np.average(AR_LIKE_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.average(AR_LIKE_f8, returned=True), tuple[np.float64, np.float64]) +assert_type(np.average(AR_LIKE_f8, axis=1, returned=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.average(AR_LIKE_f8, keepdims=True, returned=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) assert_type(np.average(AR_O), Any) -assert_type(np.average(AR_f8, returned=True), tuple[np.floating[Any], np.floating[Any]]) -assert_type(np.average(AR_f8, weights=AR_c16, returned=True), tuple[np.complexfloating[Any, Any], np.complexfloating[Any, Any]]) +assert_type(np.average(AR_O, axis=1), np.ndarray) +assert_type(np.average(AR_O, keepdims=True), np.ndarray) assert_type(np.average(AR_O, returned=True), tuple[Any, Any]) -assert_type(np.average(AR_f8, axis=0), Any) -assert_type(np.average(AR_f8, axis=0, returned=True), tuple[Any, Any]) +assert_type(np.average(AR_O, axis=1, returned=True), tuple[np.ndarray, np.ndarray]) +assert_type(np.average(AR_O, keepdims=True, returned=True), tuple[np.ndarray, np.ndarray]) +# asarray_chkfinite +assert_type(np.asarray_chkfinite(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.asarray_chkfinite(AR_f8), npt.NDArray[np.float64]) -assert_type(np.asarray_chkfinite(AR_LIKE_f8), npt.NDArray[Any]) +assert_type(np.asarray_chkfinite(AR_LIKE_f8), np.ndarray) assert_type(np.asarray_chkfinite(AR_f8, dtype=np.float64), npt.NDArray[np.float64]) -assert_type(np.asarray_chkfinite(AR_f8, dtype=float), npt.NDArray[Any]) +assert_type(np.asarray_chkfinite(AR_f8, dtype=float), np.ndarray) +# piecewise +assert_type(np.piecewise(AR_f8_1d, AR_b, [func]), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.piecewise(AR_f8, AR_b, [func]), npt.NDArray[np.float64]) -assert_type(np.piecewise(AR_LIKE_f8, AR_b, [func]), npt.NDArray[Any]) - -assert_type(np.select([AR_f8], [AR_f8]), npt.NDArray[Any]) +assert_type(np.piecewise(AR_f8, AR_b, [func_f8]), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func]), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func_f8]), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, -1, kwarg=""), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, arg=-1, kwarg=""), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_LIKE_f8, AR_b_list, [func]), np.ndarray) +assert_type(np.piecewise(AR_LIKE_f8, AR_b_list, [func_f8]), npt.NDArray[np.float64]) + +# extract +assert_type(np.extract(AR_i8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.extract(AR_i8, AR_LIKE_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.extract(AR_i8, AR_LIKE_i8), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.extract(AR_i8, AR_LIKE_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.extract(AR_i8, AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# select +assert_type(np.select([AR_b], [AR_f8_1d]), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.select([AR_b], [AR_f8]), npt.NDArray[np.float64]) + +# places +assert_type(np.place(AR_f8, mask=AR_i8, vals=5.0), None) -assert_type(np.copy(AR_LIKE_f8), npt.NDArray[Any]) +# copy +assert_type(np.copy(AR_LIKE_f8), np.ndarray) assert_type(np.copy(AR_U), npt.NDArray[np.str_]) -assert_type(np.copy(CHAR_AR_U), np.ndarray[Any, Any]) -assert_type(np.copy(CHAR_AR_U, "K", subok=True), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(np.copy(CHAR_AR_U, subok=True), np.char.chararray[Any, np.dtype[np.str_]]) +assert_type(np.copy(CHAR_AR_U, "K", subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) # type: ignore[deprecated] +assert_type(np.copy(CHAR_AR_U, subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) # type: ignore[deprecated] +# pyright correctly infers `NDArray[str_]` here +assert_type(np.copy(CHAR_AR_U), np.ndarray[Any, Any]) # pyright: ignore[reportAssertTypeFailure] -assert_type(np.gradient(AR_f8, axis=None), Any) -assert_type(np.gradient(AR_LIKE_f8, edge_order=2), Any) - -assert_type(np.diff("bob", n=0), str) -assert_type(np.diff(AR_f8, axis=0), npt.NDArray[Any]) -assert_type(np.diff(AR_LIKE_f8, prepend=1.5), npt.NDArray[Any]) - -assert_type(np.angle(f8), np.floating[Any]) -assert_type(np.angle(AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.angle(AR_c16, deg=True), npt.NDArray[np.floating[Any]]) -assert_type(np.angle(AR_O), npt.NDArray[np.object_]) - -assert_type(np.unwrap(AR_f8), npt.NDArray[np.floating[Any]]) +# gradient +assert_type(np.gradient(AR_f8_1d, 1), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type( + np.gradient(AR_f8_2d, [1, 2], [2, 3.5, 4]), + tuple[ + np.ndarray[tuple[int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int], np.dtype[np.float64]], + ], +) +assert_type( + np.gradient(AR_f8_3d), + tuple[ + np.ndarray[tuple[int, int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int, int], np.dtype[np.float64]], + ], +) +assert_type(np.gradient(AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]] | Any) +assert_type(np.gradient(AR_LIKE_f8, edge_order=2), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.gradient(AR_LIKE_c16, axis=0), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# diff +assert_type(np.diff("git", n=0), str) +assert_type(np.diff(AR_f8), npt.NDArray[np.float64]) +assert_type(np.diff(AR_f8_1d, axis=0), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.diff(AR_f8_2d, axis=0), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.diff(AR_LIKE_f8, prepend=1.5), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.diff(AR_c16), npt.NDArray[np.complex128]) +assert_type(np.diff(AR_c16_1d), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.diff(AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# interp +assert_type(np.interp(1, [1], AR_f8), np.float64) +assert_type(np.interp(1, [1], [1]), np.float64) +assert_type(np.interp(1, [1], AR_c16), np.complex128) +assert_type(np.interp(1, [1], [1j]), np.complex128) +assert_type(np.interp([1], [1], AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.interp([1], [1], [1]), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.interp([1], [1], AR_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.interp([1], [1], [1j]), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# angle +assert_type(np.angle(1), np.float64) +assert_type(np.angle(1, deg=True), np.float64) +assert_type(np.angle(1j), np.float64) +assert_type(np.angle(f8), np.float64) +assert_type(np.angle(AR_b), npt.NDArray[np.float64]) +assert_type(np.angle(AR_u1), npt.NDArray[np.float64]) +assert_type(np.angle(AR_i8), npt.NDArray[np.float64]) +assert_type(np.angle(AR_f2), npt.NDArray[np.float16]) +assert_type(np.angle(AR_f4), npt.NDArray[np.float32]) +assert_type(np.angle(AR_c8), npt.NDArray[np.float32]) +assert_type(np.angle(AR_f8), npt.NDArray[np.float64]) +assert_type(np.angle(AR_c16), npt.NDArray[np.float64]) +assert_type(np.angle(AR_f10), npt.NDArray[np.longdouble]) +assert_type(np.angle(AR_c20), npt.NDArray[np.longdouble]) +assert_type(np.angle(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_c16_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_LIKE_b), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_LIKE_i8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_LIKE_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.float64]]) + +# unwrap +assert_type(np.unwrap(AR_f2), npt.NDArray[np.float16]) +assert_type(np.unwrap(AR_f8), npt.NDArray[np.float64]) +assert_type(np.unwrap(AR_f10), npt.NDArray[np.longdouble]) assert_type(np.unwrap(AR_O), npt.NDArray[np.object_]) - -assert_type(np.sort_complex(AR_f8), npt.NDArray[np.complexfloating[Any, Any]]) - +assert_type(np.unwrap(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_f8_2d), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_f8_3d), np.ndarray[tuple[int, int, int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_LIKE_b), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_LIKE_i8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_LIKE_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) + +# sort_complex +assert_type(np.sort_complex(AR_u1), npt.NDArray[np.complex64]) +assert_type(np.sort_complex(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.sort_complex(AR_f10), npt.NDArray[np.clongdouble]) +assert_type(np.sort_complex(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.sort_complex(AR_c16_1d), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# trim_zeros assert_type(np.trim_zeros(AR_f8), npt.NDArray[np.float64]) assert_type(np.trim_zeros(AR_LIKE_f8), list[float]) -assert_type(np.extract(AR_i8, AR_f8), npt.NDArray[np.float64]) -assert_type(np.extract(AR_i8, AR_LIKE_f8), npt.NDArray[Any]) - -assert_type(np.place(AR_f8, mask=AR_i8, vals=5.0), None) - -assert_type(np.cov(AR_f8, bias=True), npt.NDArray[np.floating[Any]]) -assert_type(np.cov(AR_f8, AR_c16, ddof=1), npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(np.cov(AR_f8, aweights=AR_f8, dtype=np.float32), npt.NDArray[np.float32]) -assert_type(np.cov(AR_f8, fweights=AR_f8, dtype=float), npt.NDArray[Any]) - -assert_type(np.corrcoef(AR_f8, rowvar=True), npt.NDArray[np.floating[Any]]) -assert_type(np.corrcoef(AR_f8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(np.corrcoef(AR_f8, dtype=np.float32), npt.NDArray[np.float32]) -assert_type(np.corrcoef(AR_f8, dtype=float), npt.NDArray[Any]) - -assert_type(np.blackman(5), npt.NDArray[np.floating[Any]]) -assert_type(np.bartlett(6), npt.NDArray[np.floating[Any]]) -assert_type(np.hanning(4.5), npt.NDArray[np.floating[Any]]) -assert_type(np.hamming(0), npt.NDArray[np.floating[Any]]) -assert_type(np.i0(AR_i8), npt.NDArray[np.floating[Any]]) -assert_type(np.kaiser(4, 5.9), npt.NDArray[np.floating[Any]]) - -assert_type(np.sinc(1.0), np.floating[Any]) -assert_type(np.sinc(1j), np.complexfloating[Any, Any]) -assert_type(np.sinc(AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.sinc(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) - -assert_type(np.median(AR_f8, keepdims=False), np.floating[Any]) -assert_type(np.median(AR_c16, overwrite_input=True), np.complexfloating[Any, Any]) -assert_type(np.median(AR_m), np.timedelta64) +# cov +assert_type(np.cov(AR_f8_1d), np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(np.cov(AR_f8_2d), npt.NDArray[np.float64]) +assert_type(np.cov(AR_f8), npt.NDArray[np.float64]) +assert_type(np.cov(AR_f8, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.cov(AR_c16, AR_c16), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(np.cov(AR_LIKE_f8), np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(np.cov(AR_LIKE_f8, AR_LIKE_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.cov(AR_LIKE_f8, dtype=np.float16), np.ndarray[tuple[()], np.dtype[np.float16]]) +assert_type(np.cov(AR_LIKE_f8, AR_LIKE_f8, dtype=np.float32), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.cov(AR_f8, AR_f8, dtype=float), np.ndarray[tuple[int, int]]) +assert_type(np.cov(AR_LIKE_f8, dtype=float), np.ndarray[tuple[()]]) +assert_type(np.cov(AR_LIKE_f8, AR_LIKE_f8, dtype=float), np.ndarray[tuple[int, int]]) + +# corrcoef +assert_type(np.corrcoef(AR_f8_1d), np.float64) +assert_type(np.corrcoef(AR_f8_2d), np.ndarray[tuple[int, int], np.dtype[np.float64]] | np.float64) +assert_type(np.corrcoef(AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]] | np.float64) +assert_type(np.corrcoef(AR_f8, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.corrcoef(AR_c16, AR_c16), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(np.corrcoef(AR_LIKE_f8), np.float64) +assert_type(np.corrcoef(AR_LIKE_f8, AR_LIKE_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.corrcoef(AR_LIKE_f8, dtype=np.float16), np.float16) +assert_type(np.corrcoef(AR_LIKE_f8, AR_LIKE_f8, dtype=np.float32), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.corrcoef(AR_f8, AR_f8, dtype=float), np.ndarray[tuple[int, int]]) +assert_type(np.corrcoef(AR_LIKE_f8, dtype=float), Any) +assert_type(np.corrcoef(AR_LIKE_f8, AR_LIKE_f8, dtype=float), np.ndarray[tuple[int, int]]) + +# window functions +assert_type(np.blackman(5), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.bartlett(6), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.hanning(4.5), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.hamming(0), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.kaiser(4, 5.9), np.ndarray[tuple[int], np.dtype[np.float64]]) + +# i0 (bessel function) +assert_type(np.i0(AR_i8), npt.NDArray[np.float64]) + +# sinc (cardinal sine function) +assert_type(np.sinc(1.0), np.float64) +assert_type(np.sinc(1j), np.complex128 | Any) +assert_type(np.sinc(AR_f8), npt.NDArray[np.float64]) +assert_type(np.sinc(AR_c16), npt.NDArray[np.complex128]) +assert_type(np.sinc(AR_LIKE_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.sinc(AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# median +assert_type(np.median(AR_f8, keepdims=False), np.float64) +assert_type(np.median(AR_c16, overwrite_input=True), np.complex128) +# NOTE: Mypy incorrectly infers `Any`, but pyright behaves correctly. +assert_type(np.median(AR_m), np.timedelta64) # type: ignore[assert-type] assert_type(np.median(AR_O), Any) -assert_type(np.median(AR_f8, keepdims=True), Any) -assert_type(np.median(AR_c16, axis=0), Any) +assert_type(np.median(AR_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.median(AR_f8, axis=0), npt.NDArray[np.float64]) +assert_type(np.median(AR_c16, keepdims=True), npt.NDArray[np.complex128]) +assert_type(np.median(AR_c16, axis=0), npt.NDArray[np.complex128]) +assert_type(np.median(AR_LIKE_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.median(AR_LIKE_c16, keepdims=True), npt.NDArray[np.complex128]) assert_type(np.median(AR_LIKE_f8, out=AR_c16), npt.NDArray[np.complex128]) -assert_type(np.percentile(AR_f8, 50), np.floating[Any]) -assert_type(np.percentile(AR_c16, 50), np.complexfloating[Any, Any]) +# percentile +assert_type(np.percentile(AR_f8, 50), np.float64) +assert_type(np.percentile(AR_f8, 50, axis=1), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, 50, axis=(1, 0)), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, 50, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, 50, axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_c16, 50), np.complex128) assert_type(np.percentile(AR_m, 50), np.timedelta64) assert_type(np.percentile(AR_M, 50, overwrite_input=True), np.datetime64) assert_type(np.percentile(AR_O, 50), Any) -assert_type(np.percentile(AR_f8, [50]), npt.NDArray[np.floating[Any]]) -assert_type(np.percentile(AR_c16, [50]), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.percentile(AR_f8, [50]), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, [50], axis=1), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, [50], keepdims=True), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_c16, [50]), npt.NDArray[np.complex128]) assert_type(np.percentile(AR_m, [50]), npt.NDArray[np.timedelta64]) assert_type(np.percentile(AR_M, [50], method="nearest"), npt.NDArray[np.datetime64]) assert_type(np.percentile(AR_O, [50]), npt.NDArray[np.object_]) -assert_type(np.percentile(AR_f8, [50], keepdims=True), Any) -assert_type(np.percentile(AR_f8, [50], axis=[1]), Any) +assert_type(np.percentile(AR_f8, [50], keepdims=True), npt.NDArray[np.float64]) assert_type(np.percentile(AR_f8, [50], out=AR_c16), npt.NDArray[np.complex128]) -assert_type(np.quantile(AR_f8, 0.5), np.floating[Any]) -assert_type(np.quantile(AR_c16, 0.5), np.complexfloating[Any, Any]) -assert_type(np.quantile(AR_m, 0.5), np.timedelta64) -assert_type(np.quantile(AR_M, 0.5, overwrite_input=True), np.datetime64) -assert_type(np.quantile(AR_O, 0.5), Any) -assert_type(np.quantile(AR_f8, [0.5]), npt.NDArray[np.floating[Any]]) -assert_type(np.quantile(AR_c16, [0.5]), npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(np.quantile(AR_m, [0.5]), npt.NDArray[np.timedelta64]) -assert_type(np.quantile(AR_M, [0.5], method="nearest"), npt.NDArray[np.datetime64]) -assert_type(np.quantile(AR_O, [0.5]), npt.NDArray[np.object_]) -assert_type(np.quantile(AR_f8, [0.5], keepdims=True), Any) -assert_type(np.quantile(AR_f8, [0.5], axis=[1]), Any) -assert_type(np.quantile(AR_f8, [0.5], out=AR_c16), npt.NDArray[np.complex128]) - -assert_type(np.meshgrid(AR_f8, AR_i8, copy=False), tuple[npt.NDArray[Any], ...]) -assert_type(np.meshgrid(AR_f8, AR_i8, AR_c16, indexing="ij"), tuple[npt.NDArray[Any], ...]) +# quantile +assert_type(np.quantile(AR_f8, 0.50), np.float64) +assert_type(np.quantile(AR_f8, 0.50, axis=1), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, 0.50, axis=(1, 0)), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, 0.50, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, 0.50, axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_c16, 0.50), np.complex128) +assert_type(np.quantile(AR_m, 0.50), np.timedelta64) +assert_type(np.quantile(AR_M, 0.50, overwrite_input=True), np.datetime64) +assert_type(np.quantile(AR_O, 0.50), Any) +assert_type(np.quantile(AR_f8, [0.50]), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, [0.50], axis=1), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, [0.50], keepdims=True), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_c16, [0.50]), npt.NDArray[np.complex128]) +assert_type(np.quantile(AR_m, [0.50]), npt.NDArray[np.timedelta64]) +assert_type(np.quantile(AR_M, [0.50], method="nearest"), npt.NDArray[np.datetime64]) +assert_type(np.quantile(AR_O, [0.50]), npt.NDArray[np.object_]) +assert_type(np.quantile(AR_f8, [0.50], keepdims=True), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, [0.50], out=AR_c16), npt.NDArray[np.complex128]) + +# trapezoid +assert_type(np.trapezoid(AR_LIKE_f8), np.float64) +assert_type(np.trapezoid(AR_LIKE_f8, AR_LIKE_f8), np.float64) +assert_type(np.trapezoid(AR_LIKE_c16), np.complex128) +assert_type(np.trapezoid(AR_LIKE_c16, AR_LIKE_f8), np.complex128) +assert_type(np.trapezoid(AR_LIKE_f8, AR_LIKE_c16), np.complex128) +assert_type(np.trapezoid(AR_LIKE_O), float) +assert_type(np.trapezoid(AR_LIKE_O, AR_LIKE_f8), float) +assert_type(np.trapezoid(AR_f8), np.float64 | npt.NDArray[np.float64]) +assert_type(np.trapezoid(AR_f8, AR_f8), np.float64 | npt.NDArray[np.float64]) +assert_type(np.trapezoid(AR_c16), np.complex128 | npt.NDArray[np.complex128]) +assert_type(np.trapezoid(AR_c16, AR_c16), np.complex128 | npt.NDArray[np.complex128]) +# NOTE: Mypy incorrectly infers `Any`, but pyright behaves correctly. +assert_type(np.trapezoid(AR_m), np.timedelta64 | npt.NDArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(np.trapezoid(AR_O), npt.NDArray[np.object_] | Any) +assert_type(np.trapezoid(AR_O, AR_LIKE_f8), npt.NDArray[np.object_] | Any) + +# meshgrid +assert_type(np.meshgrid(), tuple[()]) +assert_type( + np.meshgrid(AR_f8), + tuple[ + np.ndarray[tuple[int], np.dtype[np.float64]], + ], +) +assert_type( + np.meshgrid(AR_c16, indexing="ij"), + tuple[ + np.ndarray[tuple[int], np.dtype[np.complex128]], + ], +) +assert_type( + np.meshgrid(AR_i8, AR_f8, copy=False), + tuple[ + np.ndarray[tuple[int, int], np.dtype[np.int64]], + np.ndarray[tuple[int, int], np.dtype[np.float64]], + ], +) +assert_type( + np.meshgrid(AR_LIKE_f8, AR_f8), + tuple[ + np.ndarray[tuple[int, int]], + np.ndarray[tuple[int, int], np.dtype[np.float64]], + ], +) +assert_type( + np.meshgrid(AR_f8, AR_LIKE_f8), + tuple[ + np.ndarray[tuple[int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int]], + ], +) +assert_type( + np.meshgrid(AR_LIKE_f8, AR_LIKE_f8), + tuple[ + np.ndarray[tuple[int, int]], + np.ndarray[tuple[int, int]], + ], +) +assert_type( + np.meshgrid(AR_f8, AR_i8, AR_c16), + tuple[ + np.ndarray[tuple[int, int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int, int], np.dtype[np.int64]], + np.ndarray[tuple[int, int, int], np.dtype[np.complex128]], + ], +) +assert_type(np.meshgrid(AR_f8, AR_f8, AR_f8, AR_f8), tuple[npt.NDArray[np.float64], ...]) +assert_type(np.meshgrid(AR_f8, AR_f8, AR_f8, AR_LIKE_f8), tuple[np.ndarray, ...]) +assert_type(np.meshgrid(*AR_LIKE_f8), tuple[np.ndarray, ...]) -assert_type(np.delete(AR_f8, np.s_[:5]), npt.NDArray[np.float64]) -assert_type(np.delete(AR_LIKE_f8, [0, 4, 9], axis=0), npt.NDArray[Any]) +# delete +assert_type(np.delete(AR_f8, np.s_[:5]), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.delete(AR_LIKE_f8, [0, 4, 9], axis=0), np.ndarray) -assert_type(np.insert(AR_f8, np.s_[:5], 5), npt.NDArray[np.float64]) -assert_type(np.insert(AR_LIKE_f8, [0, 4, 9], [0.5, 9.2, 7], axis=0), npt.NDArray[Any]) +# insert +assert_type(np.insert(AR_f8, np.s_[:5], 5), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.insert(AR_LIKE_f8, [0, 4, 9], [0.5, 9.2, 7], axis=0), np.ndarray) -assert_type(np.append(AR_f8, 5), npt.NDArray[Any]) -assert_type(np.append(AR_LIKE_f8, 1j, axis=0), npt.NDArray[Any]) +# append +assert_type(np.append(f8, f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.append(AR_f8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.append(AR_LIKE_f8, AR_LIKE_c16, axis=0), np.ndarray) +assert_type(np.append(AR_f8, AR_LIKE_f8, axis=0), np.ndarray) +# digitize assert_type(np.digitize(4.5, [1]), np.intp) assert_type(np.digitize(AR_f8, [1, 2, 3]), npt.NDArray[np.intp]) diff --git a/numpy/typing/tests/data/reveal/lib_polynomial.pyi b/numpy/typing/tests/data/reveal/lib_polynomial.pyi index 885b40ee80a4..b7cbadefc610 100644 --- a/numpy/typing/tests/data/reveal/lib_polynomial.pyi +++ b/numpy/typing/tests/data/reveal/lib_polynomial.pyi @@ -1,15 +1,9 @@ -import sys -from typing import Any, NoReturn from collections.abc import Iterator +from typing import Any, NoReturn, assert_type import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - AR_b: npt.NDArray[np.bool] AR_u4: npt.NDArray[np.uint32] AR_i8: npt.NDArray[np.int64] @@ -55,18 +49,18 @@ assert_type(iter(poly_obj), Iterator[Any]) assert_type(poly_obj.deriv(), np.poly1d) assert_type(poly_obj.integ(), np.poly1d) -assert_type(np.poly(poly_obj), npt.NDArray[np.floating[Any]]) -assert_type(np.poly(AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.poly(AR_c16), npt.NDArray[np.floating[Any]]) +assert_type(np.poly(poly_obj), npt.NDArray[np.floating]) +assert_type(np.poly(AR_f8), npt.NDArray[np.floating]) +assert_type(np.poly(AR_c16), npt.NDArray[np.floating]) assert_type(np.polyint(poly_obj), np.poly1d) -assert_type(np.polyint(AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.polyint(AR_f8, k=AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.polyint(AR_f8), npt.NDArray[np.floating]) +assert_type(np.polyint(AR_f8, k=AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.polyint(AR_O, m=2), npt.NDArray[np.object_]) assert_type(np.polyder(poly_obj), np.poly1d) -assert_type(np.polyder(AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.polyder(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.polyder(AR_f8), npt.NDArray[np.floating]) +assert_type(np.polyder(AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.polyder(AR_O, m=2), npt.NDArray[np.object_]) assert_type(np.polyfit(AR_f8, AR_f8, 2), npt.NDArray[np.float64]) @@ -107,44 +101,47 @@ assert_type( ) assert_type(np.polyval(AR_b, AR_b), npt.NDArray[np.int64]) -assert_type(np.polyval(AR_u4, AR_b), npt.NDArray[np.unsignedinteger[Any]]) -assert_type(np.polyval(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.polyval(AR_f8, AR_i8), npt.NDArray[np.floating[Any]]) -assert_type(np.polyval(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.polyval(AR_u4, AR_b), npt.NDArray[np.unsignedinteger]) +assert_type(np.polyval(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.polyval(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(np.polyval(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.polyval(AR_O, AR_O), npt.NDArray[np.object_]) assert_type(np.polyadd(poly_obj, AR_i8), np.poly1d) assert_type(np.polyadd(AR_f8, poly_obj), np.poly1d) assert_type(np.polyadd(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.polyadd(AR_u4, AR_b), npt.NDArray[np.unsignedinteger[Any]]) -assert_type(np.polyadd(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.polyadd(AR_f8, AR_i8), npt.NDArray[np.floating[Any]]) -assert_type(np.polyadd(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.polyadd(AR_u4, AR_b), npt.NDArray[np.unsignedinteger]) +assert_type(np.polyadd(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.polyadd(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(np.polyadd(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.polyadd(AR_O, AR_O), npt.NDArray[np.object_]) assert_type(np.polysub(poly_obj, AR_i8), np.poly1d) assert_type(np.polysub(AR_f8, poly_obj), np.poly1d) -assert_type(np.polysub(AR_b, AR_b), NoReturn) -assert_type(np.polysub(AR_u4, AR_b), npt.NDArray[np.unsignedinteger[Any]]) -assert_type(np.polysub(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.polysub(AR_f8, AR_i8), npt.NDArray[np.floating[Any]]) -assert_type(np.polysub(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) + +def test_invalid_polysub() -> None: + assert_type(np.polysub(AR_b, AR_b), NoReturn) + +assert_type(np.polysub(AR_u4, AR_b), npt.NDArray[np.unsignedinteger]) +assert_type(np.polysub(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.polysub(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(np.polysub(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.polysub(AR_O, AR_O), npt.NDArray[np.object_]) assert_type(np.polymul(poly_obj, AR_i8), np.poly1d) assert_type(np.polymul(AR_f8, poly_obj), np.poly1d) assert_type(np.polymul(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.polymul(AR_u4, AR_b), npt.NDArray[np.unsignedinteger[Any]]) -assert_type(np.polymul(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.polymul(AR_f8, AR_i8), npt.NDArray[np.floating[Any]]) -assert_type(np.polymul(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.polymul(AR_u4, AR_b), npt.NDArray[np.unsignedinteger]) +assert_type(np.polymul(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.polymul(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(np.polymul(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.polymul(AR_O, AR_O), npt.NDArray[np.object_]) assert_type(np.polydiv(poly_obj, AR_i8), tuple[np.poly1d, np.poly1d]) assert_type(np.polydiv(AR_f8, poly_obj), tuple[np.poly1d, np.poly1d]) -assert_type(np.polydiv(AR_b, AR_b), tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]) -assert_type(np.polydiv(AR_u4, AR_b), tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]) -assert_type(np.polydiv(AR_i8, AR_i8), tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]) -assert_type(np.polydiv(AR_f8, AR_i8), tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]) -assert_type(np.polydiv(AR_i8, AR_c16), tuple[npt.NDArray[np.complexfloating[Any, Any]], npt.NDArray[np.complexfloating[Any, Any]]]) +assert_type(np.polydiv(AR_b, AR_b), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) +assert_type(np.polydiv(AR_u4, AR_b), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) +assert_type(np.polydiv(AR_i8, AR_i8), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) +assert_type(np.polydiv(AR_f8, AR_i8), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) +assert_type(np.polydiv(AR_i8, AR_c16), tuple[npt.NDArray[np.complexfloating], npt.NDArray[np.complexfloating]]) assert_type(np.polydiv(AR_O, AR_O), tuple[npt.NDArray[Any], npt.NDArray[Any]]) diff --git a/numpy/typing/tests/data/reveal/lib_utils.pyi b/numpy/typing/tests/data/reveal/lib_utils.pyi index 094b60140833..c9470e00a359 100644 --- a/numpy/typing/tests/data/reveal/lib_utils.pyi +++ b/numpy/typing/tests/data/reveal/lib_utils.pyi @@ -1,14 +1,9 @@ -import sys from io import StringIO +from typing import assert_type import numpy as np -import numpy.typing as npt import numpy.lib.array_utils as array_utils - -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +import numpy.typing as npt AR: npt.NDArray[np.float64] AR_DICT: dict[str, npt.NDArray[np.float64]] diff --git a/numpy/typing/tests/data/reveal/lib_version.pyi b/numpy/typing/tests/data/reveal/lib_version.pyi index 142d88bdbb8a..03735375ae3e 100644 --- a/numpy/typing/tests/data/reveal/lib_version.pyi +++ b/numpy/typing/tests/data/reveal/lib_version.pyi @@ -1,12 +1,7 @@ -import sys +from typing import assert_type from numpy.lib import NumpyVersion -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - version = NumpyVersion("1.8.0") assert_type(version.vstring, str) diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 8d594d42c3c1..a1155e2bb5ed 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -1,134 +1,509 @@ -import sys -from typing import Any +from typing import Any, Literal, assert_type import numpy as np import numpy.typing as npt from numpy.linalg._linalg import ( - QRResult, EigResult, EighResult, SVDResult, SlogdetResult + EighResult, + EigResult, + QRResult, + SlogdetResult, + SVDResult, ) -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _Array3D[ScalarT: np.generic] = np.ndarray[tuple[int, int, int], np.dtype[ScalarT]] +bool_list_1d: list[bool] +bool_list_2d: list[list[bool]] +int_list_1d: list[int] +int_list_2d: list[list[int]] +float_list_1d: list[float] +float_list_2d: list[list[float]] +float_list_3d: list[list[list[float]]] +float_list_4d: list[list[list[list[float]]]] +complex_list_1d: list[complex] +complex_list_2d: list[list[complex]] +complex_list_3d: list[list[list[complex]]] +bytes_list_2d: list[list[bytes]] +str_list_2d: list[list[str]] + +AR_any: np.ndarray +AR_f_: npt.NDArray[np.floating] +AR_c_: npt.NDArray[np.complexfloating] AR_i8: npt.NDArray[np.int64] +AR_f2: npt.NDArray[np.float16] +AR_f4: npt.NDArray[np.float32] AR_f8: npt.NDArray[np.float64] +AR_f10: npt.NDArray[np.longdouble] +AR_c8: npt.NDArray[np.complex64] AR_c16: npt.NDArray[np.complex128] +AR_c20: npt.NDArray[np.clongdouble] AR_O: npt.NDArray[np.object_] +AR_M: npt.NDArray[np.datetime64] AR_m: npt.NDArray[np.timedelta64] -AR_S: npt.NDArray[np.str_] +AR_S: npt.NDArray[np.bytes_] +AR_U: npt.NDArray[np.str_] AR_b: npt.NDArray[np.bool] +AR_b_1d: np.ndarray[tuple[int], np.dtype[np.bool]] +AR_b_2d: np.ndarray[tuple[int, int], np.dtype[np.bool]] + +AR_i8_1d: np.ndarray[tuple[int], np.dtype[np.int64]] +AR_i8_2d: np.ndarray[tuple[int, int], np.dtype[np.int64]] + +SC_f8: np.float64 +AR_f8_0d: np.ndarray[tuple[()], np.dtype[np.float64]] +AR_f8_1d: _Array1D[np.float64] +AR_f8_2d: _Array2D[np.float64] +AR_f8_3d: _Array3D[np.float64] +AR_f8_4d: np.ndarray[tuple[int, int, int, int], np.dtype[np.float64]] + +AR_f2_2d: _Array2D[np.float16] +AR_f4_1d: _Array1D[np.float32] +AR_f4_2d: _Array2D[np.float32] +AR_f4_3d: _Array3D[np.float32] +AR_f10_2d: _Array2D[np.longdouble] +AR_f10_3d: _Array3D[np.longdouble] + +AR_c16_1d: np.ndarray[tuple[int], np.dtype[np.complex128]] +AR_c16_2d: np.ndarray[tuple[int, int], np.dtype[np.complex128]] + +### + assert_type(np.linalg.tensorsolve(AR_i8, AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.tensorsolve(AR_i8, AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.linalg.tensorsolve(AR_c16, AR_f8), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.linalg.tensorsolve(AR_i8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.tensorsolve(AR_f4, AR_f4), npt.NDArray[np.float32]) +assert_type(np.linalg.tensorsolve(AR_c16, AR_f8), npt.NDArray[np.complex128]) +assert_type(np.linalg.tensorsolve(AR_c8, AR_f4), npt.NDArray[np.complex64]) +assert_type(np.linalg.tensorsolve(AR_f4, AR_c8), npt.NDArray[np.complex64]) assert_type(np.linalg.solve(AR_i8, AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.solve(AR_i8, AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.linalg.solve(AR_c16, AR_f8), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.linalg.solve(AR_i8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.solve(AR_f4, AR_f4), npt.NDArray[np.float32]) +assert_type(np.linalg.solve(AR_c16, AR_f8), npt.NDArray[np.complex128]) +assert_type(np.linalg.solve(AR_c8, AR_f4), npt.NDArray[np.complex64]) +assert_type(np.linalg.solve(AR_f4, AR_c8), npt.NDArray[np.complex64]) assert_type(np.linalg.tensorinv(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.tensorinv(AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.linalg.tensorinv(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.linalg.tensorinv(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.tensorinv(AR_c16), npt.NDArray[np.complex128]) assert_type(np.linalg.inv(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.inv(AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.linalg.inv(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.linalg.inv(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.inv(AR_c16), npt.NDArray[np.complex128]) -assert_type(np.linalg.matrix_power(AR_i8, -1), npt.NDArray[Any]) -assert_type(np.linalg.matrix_power(AR_f8, 0), npt.NDArray[Any]) -assert_type(np.linalg.matrix_power(AR_c16, 1), npt.NDArray[Any]) -assert_type(np.linalg.matrix_power(AR_O, 2), npt.NDArray[Any]) +assert_type(np.linalg.pinv(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.pinv(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.pinv(AR_c16), npt.NDArray[np.complex128]) -assert_type(np.linalg.cholesky(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.cholesky(AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.linalg.cholesky(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.linalg.matrix_power(AR_i8, -1), npt.NDArray[np.float64]) +assert_type(np.linalg.matrix_power(AR_i8, 1), npt.NDArray[np.int64]) +assert_type(np.linalg.matrix_power(AR_f8, 0), npt.NDArray[np.float64]) +assert_type(np.linalg.matrix_power(AR_c16, 1), npt.NDArray[np.complex128]) +assert_type(np.linalg.matrix_power(AR_O, 2), npt.NDArray[np.object_]) -assert_type(np.linalg.outer(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.linalg.outer(AR_f8, AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.linalg.outer(AR_c16, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(np.linalg.outer(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.linalg.outer(AR_O, AR_O), npt.NDArray[np.object_]) -assert_type(np.linalg.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.linalg.cholesky(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.cholesky(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.cholesky(AR_c16), npt.NDArray[np.complex128]) -assert_type(np.linalg.qr(AR_i8), QRResult) -assert_type(np.linalg.qr(AR_f8), QRResult) -assert_type(np.linalg.qr(AR_c16), QRResult) +assert_type(np.linalg.qr(AR_i8), QRResult[np.float64]) +assert_type(np.linalg.qr(AR_i8, "r"), npt.NDArray[np.float64]) +assert_type(np.linalg.qr(AR_i8, "raw"), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.linalg.qr(AR_f4), QRResult[np.float32]) +assert_type(np.linalg.qr(AR_f4, "r"), npt.NDArray[np.float32]) +assert_type(np.linalg.qr(AR_f4, "raw"), tuple[npt.NDArray[np.float32], npt.NDArray[np.float32]]) +assert_type(np.linalg.qr(AR_f8), QRResult[np.float64]) +assert_type(np.linalg.qr(AR_f8, "r"), npt.NDArray[np.float64]) +assert_type(np.linalg.qr(AR_f8, "raw"), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.linalg.qr(AR_c8), QRResult[np.complex64]) +assert_type(np.linalg.qr(AR_c8, "r"), npt.NDArray[np.complex64]) +assert_type(np.linalg.qr(AR_c8, "raw"), tuple[npt.NDArray[np.complex64], npt.NDArray[np.complex64]]) +assert_type(np.linalg.qr(AR_c16), QRResult[np.complex128]) +assert_type(np.linalg.qr(AR_c16, "r"), npt.NDArray[np.complex128]) +assert_type(np.linalg.qr(AR_c16, "raw"), tuple[npt.NDArray[np.complex128], npt.NDArray[np.complex128]]) +# Mypy bug: `Expression is of type "QRResult[Any]", not "QRResult[Any]"` +assert_type(np.linalg.qr(AR_any), QRResult[Any]) # type: ignore[assert-type] +# Mypy bug: `Expression is of type "ndarray[Any, Any]", not "ndarray[tuple[Any, ...], dtype[Any]]"` +assert_type(np.linalg.qr(AR_any, "r"), npt.NDArray[Any]) # type: ignore[assert-type] +# Mypy bug: `Expression is of type "tuple[Any, ...]", <--snip-->"` +assert_type(np.linalg.qr(AR_any, "raw"), tuple[npt.NDArray[Any], npt.NDArray[Any]]) # type: ignore[assert-type] assert_type(np.linalg.eigvals(AR_i8), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) -assert_type(np.linalg.eigvals(AR_f8), npt.NDArray[np.floating[Any]] | npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(np.linalg.eigvals(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.linalg.eigvals(AR_f8), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) +assert_type(np.linalg.eigvals(AR_c16), npt.NDArray[np.complex128]) assert_type(np.linalg.eigvalsh(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.eigvalsh(AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.linalg.eigvalsh(AR_c16), npt.NDArray[np.floating[Any]]) +assert_type(np.linalg.eigvalsh(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.eigvalsh(AR_c16), npt.NDArray[np.float64]) -assert_type(np.linalg.eig(AR_i8), EigResult) -assert_type(np.linalg.eig(AR_f8), EigResult) -assert_type(np.linalg.eig(AR_c16), EigResult) +assert_type(np.linalg.eig(AR_i8), EigResult[np.complex128]) +assert_type(np.linalg.eig(AR_f4), EigResult[np.complex64]) +assert_type(np.linalg.eig(AR_f8), EigResult[np.complex128]) +assert_type(np.linalg.eig(AR_c8), EigResult[np.complex64]) +assert_type(np.linalg.eig(AR_c16), EigResult[np.complex128]) +# Mypy bug: `Expression is of type "EigResult[Any]", not "EigResult[Any]"` +assert_type(np.linalg.eig(AR_f_), EigResult[Any]) # type: ignore[assert-type] +assert_type(np.linalg.eig(AR_c_), EigResult[Any]) # type: ignore[assert-type] +assert_type(np.linalg.eig(AR_any), EigResult[Any]) # type: ignore[assert-type] -assert_type(np.linalg.eigh(AR_i8), EighResult) -assert_type(np.linalg.eigh(AR_f8), EighResult) -assert_type(np.linalg.eigh(AR_c16), EighResult) +assert_type(np.linalg.eigh(AR_i8), EighResult[np.float64, np.float64]) +assert_type(np.linalg.eigh(AR_f4), EighResult[np.float32, np.float32]) +assert_type(np.linalg.eigh(AR_f8), EighResult[np.float64, np.float64]) +assert_type(np.linalg.eigh(AR_c8), EighResult[np.float32, np.complex64]) +assert_type(np.linalg.eigh(AR_c16), EighResult[np.float64, np.complex128]) +# Mypy bug: `Expression is of type "EighResult[Any, Any]", not "EighResult[Any, Any]"` +assert_type(np.linalg.eigh(AR_any), EighResult[Any, Any]) # type: ignore[assert-type] -assert_type(np.linalg.svd(AR_i8), SVDResult) -assert_type(np.linalg.svd(AR_f8), SVDResult) -assert_type(np.linalg.svd(AR_c16), SVDResult) +assert_type(np.linalg.svd(AR_i8), SVDResult[np.float64, np.float64]) assert_type(np.linalg.svd(AR_i8, compute_uv=False), npt.NDArray[np.float64]) -assert_type(np.linalg.svd(AR_f8, compute_uv=False), npt.NDArray[np.floating[Any]]) -assert_type(np.linalg.svd(AR_c16, compute_uv=False), npt.NDArray[np.floating[Any]]) +assert_type(np.linalg.svd(AR_f4), SVDResult[np.float32, np.float32]) +assert_type(np.linalg.svd(AR_f4, compute_uv=False), npt.NDArray[np.float32]) +assert_type(np.linalg.svd(AR_f8), SVDResult[np.float64, np.float64]) +assert_type(np.linalg.svd(AR_f8, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(AR_c8), SVDResult[np.float32, np.complex64]) +assert_type(np.linalg.svd(AR_c8, compute_uv=False), npt.NDArray[np.float32]) +assert_type(np.linalg.svd(AR_c16), SVDResult[np.float64, np.complex128]) +assert_type(np.linalg.svd(AR_c16, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(int_list_2d), SVDResult[np.float64, np.float64]) +assert_type(np.linalg.svd(int_list_2d, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(float_list_2d), SVDResult[np.float64, np.float64]) +assert_type(np.linalg.svd(float_list_2d, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(complex_list_2d), SVDResult[np.float64, np.complex128]) +assert_type(np.linalg.svd(complex_list_2d, compute_uv=False), npt.NDArray[np.float64]) +# Mypy bug: `Expression is of type "SVDResult[Any, Any]", not "SVDResult[Any, Any]"` +assert_type(np.linalg.svd(AR_any), SVDResult[Any, Any]) # type: ignore[assert-type] +# Mypy bug: `Expression is of type "ndarray[Any, Any]", not "ndarray[tuple[Any, ...], dtype[Any]]"` +assert_type(np.linalg.svd(AR_any, compute_uv=False), npt.NDArray[Any]) # type: ignore[assert-type] -assert_type(np.linalg.cond(AR_i8), Any) -assert_type(np.linalg.cond(AR_f8), Any) -assert_type(np.linalg.cond(AR_c16), Any) +assert_type(np.linalg.svdvals(AR_b), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(AR_f4), npt.NDArray[np.float32]) +assert_type(np.linalg.svdvals(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(AR_c8), npt.NDArray[np.float32]) +assert_type(np.linalg.svdvals(AR_c16), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(int_list_2d), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(float_list_2d), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(complex_list_2d), npt.NDArray[np.float64]) assert_type(np.linalg.matrix_rank(AR_i8), Any) assert_type(np.linalg.matrix_rank(AR_f8), Any) assert_type(np.linalg.matrix_rank(AR_c16), Any) +assert_type(np.linalg.matrix_rank(SC_f8), Literal[0, 1]) +assert_type(np.linalg.matrix_rank(AR_f8_1d), Literal[0, 1]) +assert_type(np.linalg.matrix_rank(float_list_1d), Literal[0, 1]) +assert_type(np.linalg.matrix_rank(AR_f8_2d), np.int_) +assert_type(np.linalg.matrix_rank(float_list_2d), np.int_) +assert_type(np.linalg.matrix_rank(AR_f8_3d), _Array1D[np.int_]) +assert_type(np.linalg.matrix_rank(float_list_3d), _Array1D[np.int_]) +assert_type(np.linalg.matrix_rank(AR_f8_4d), npt.NDArray[np.int_]) +assert_type(np.linalg.matrix_rank(float_list_4d), npt.NDArray[np.int_]) -assert_type(np.linalg.pinv(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.pinv(AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.linalg.pinv(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.linalg.cond(AR_i8), Any) +assert_type(np.linalg.cond(AR_f8), Any) +assert_type(np.linalg.cond(AR_c16), Any) +assert_type(np.linalg.cond(AR_f4_2d), np.float32) +assert_type(np.linalg.cond(AR_f8_2d), np.float64) +assert_type(np.linalg.cond(AR_f4_3d), npt.NDArray[np.float32]) +assert_type(np.linalg.cond(AR_f8_3d), npt.NDArray[np.float64]) assert_type(np.linalg.slogdet(AR_i8), SlogdetResult) assert_type(np.linalg.slogdet(AR_f8), SlogdetResult) assert_type(np.linalg.slogdet(AR_c16), SlogdetResult) +assert_type(np.linalg.slogdet(AR_f4_2d), SlogdetResult[np.float32, np.float32]) +assert_type(np.linalg.slogdet(AR_f8_2d), SlogdetResult[np.float64, np.float64]) +assert_type(np.linalg.slogdet(AR_f4_3d), SlogdetResult[npt.NDArray[np.float32], npt.NDArray[np.float32]]) +assert_type(np.linalg.slogdet(AR_f8_3d), SlogdetResult[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.linalg.slogdet(complex_list_2d), SlogdetResult[np.float64, np.complex128]) +assert_type(np.linalg.slogdet(complex_list_3d), SlogdetResult[npt.NDArray[np.float64], npt.NDArray[np.complex128]]) assert_type(np.linalg.det(AR_i8), Any) assert_type(np.linalg.det(AR_f8), Any) assert_type(np.linalg.det(AR_c16), Any) +assert_type(np.linalg.det(AR_f4_2d), np.float32) +assert_type(np.linalg.det(AR_f8_2d), np.float64) +assert_type(np.linalg.det(AR_f4_3d), npt.NDArray[np.float32]) +assert_type(np.linalg.det(AR_f8_3d), npt.NDArray[np.float64]) +assert_type(np.linalg.det(complex_list_2d), np.complex128) +assert_type(np.linalg.det(complex_list_3d), npt.NDArray[np.complex128]) + +assert_type( + np.linalg.lstsq(AR_i8, AR_i8), + tuple[npt.NDArray[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f4, AR_f4), + tuple[npt.NDArray[np.float32], _Array1D[np.float32], np.int32, _Array1D[np.float32]], +) +assert_type( + np.linalg.lstsq(AR_i8, AR_f8), + tuple[npt.NDArray[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f4, AR_f8), + tuple[npt.NDArray[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f8, AR_i8), + tuple[npt.NDArray[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f8, AR_f4), + tuple[npt.NDArray[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_c8, AR_c8), + tuple[npt.NDArray[np.complex64], _Array1D[np.float32], np.int32, _Array1D[np.float32]], +) +assert_type( + np.linalg.lstsq(AR_c8, AR_c16), + tuple[npt.NDArray[np.complex128], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_c16, AR_c8), + tuple[npt.NDArray[np.complex128], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f8, AR_f8_1d), + tuple[_Array1D[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f4, AR_f4_1d), + tuple[_Array1D[np.float32], _Array1D[np.float32], np.int32, _Array1D[np.float32]], +) +assert_type( + np.linalg.lstsq(AR_f8, AR_f8_2d), + tuple[_Array2D[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f4, AR_f4_2d), + tuple[_Array2D[np.float32], _Array1D[np.float32], np.int32, _Array1D[np.float32]], +) + +assert_type(np.linalg.norm(AR_i8), np.float64) +assert_type(np.linalg.norm(AR_f8), np.float64) +assert_type(np.linalg.norm(AR_c16), np.float64) +# Mypy incorrectly infers `Any` for datetime64 and timedelta64, but pyright behaves correctly. +assert_type(np.linalg.norm(AR_M), np.float64) # type: ignore[assert-type] +assert_type(np.linalg.norm(AR_m), np.float64) # type: ignore[assert-type] +assert_type(np.linalg.norm(AR_U), np.float64) +assert_type(np.linalg.norm(AR_S), np.float64) +assert_type(np.linalg.norm(AR_f8, 0, 1), npt.NDArray[np.float64]) +assert_type(np.linalg.norm(AR_f8, axis=0), npt.NDArray[np.float64]) +assert_type(np.linalg.norm(AR_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.linalg.norm(AR_f8_2d, keepdims=True), _Array2D[np.float64]) +assert_type(np.linalg.norm(AR_f2), np.float16) +assert_type(np.linalg.norm(AR_f2, 0, 1), npt.NDArray[np.float16]) +assert_type(np.linalg.norm(AR_f2, axis=1), npt.NDArray[np.float16]) +assert_type(np.linalg.norm(AR_f2, keepdims=True), npt.NDArray[np.float16]) +assert_type(np.linalg.norm(AR_f2_2d, keepdims=True), _Array2D[np.float16]) +assert_type(np.linalg.norm(AR_f4), np.float32) +assert_type(np.linalg.norm(AR_c8), np.float32) +assert_type(np.linalg.norm(AR_f4, 0, 1), npt.NDArray[np.float32]) +assert_type(np.linalg.norm(AR_f4, axis=1), npt.NDArray[np.float32]) +assert_type(np.linalg.norm(AR_f4, keepdims=True), npt.NDArray[np.float32]) +assert_type(np.linalg.norm(AR_f4_2d, keepdims=True), _Array2D[np.float32]) +assert_type(np.linalg.norm(AR_f10), np.longdouble) +assert_type(np.linalg.norm(AR_c20), np.longdouble) +assert_type(np.linalg.norm(AR_f10, 0, 1), npt.NDArray[np.longdouble]) +assert_type(np.linalg.norm(AR_f10, axis=1), npt.NDArray[np.longdouble]) +assert_type(np.linalg.norm(AR_f10, keepdims=True), npt.NDArray[np.longdouble]) +assert_type(np.linalg.norm(AR_f10_2d, keepdims=True), _Array2D[np.longdouble]) + +assert_type(np.linalg.matrix_norm(AR_i8), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matrix_norm(AR_f8), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matrix_norm(AR_c16), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matrix_norm(AR_U), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matrix_norm(AR_S), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matrix_norm(AR_f8_2d), np.float64) +assert_type(np.linalg.matrix_norm(AR_f8_3d), npt.NDArray[np.float64]) +assert_type(np.linalg.matrix_norm(AR_f8_2d, keepdims=True), _Array2D[np.float64]) +assert_type(np.linalg.matrix_norm(AR_f4), npt.NDArray[np.float32] | Any) +assert_type(np.linalg.matrix_norm(AR_c8), npt.NDArray[np.float32] | Any) +assert_type(np.linalg.matrix_norm(AR_f4_2d), np.float32) +assert_type(np.linalg.matrix_norm(AR_f4_3d), npt.NDArray[np.float32]) +assert_type(np.linalg.matrix_norm(AR_f4_2d, keepdims=True), _Array2D[np.float32]) +assert_type(np.linalg.matrix_norm(AR_f10), npt.NDArray[np.longdouble] | Any) +assert_type(np.linalg.matrix_norm(AR_c20), npt.NDArray[np.longdouble] | Any) +assert_type(np.linalg.matrix_norm(AR_f10_2d), np.longdouble) +assert_type(np.linalg.matrix_norm(AR_f10_3d), npt.NDArray[np.longdouble]) +assert_type(np.linalg.matrix_norm(AR_f10_2d, keepdims=True), _Array2D[np.longdouble]) +assert_type(np.linalg.matrix_norm(complex_list_2d), np.float64) +assert_type(np.linalg.matrix_norm(complex_list_3d), npt.NDArray[np.float64]) +assert_type(np.linalg.matrix_norm(complex_list_2d, keepdims=True), npt.NDArray[np.float64]) + +assert_type(np.linalg.vector_norm(AR_i8), np.float64) +assert_type(np.linalg.vector_norm(AR_f8), np.float64) +assert_type(np.linalg.vector_norm(AR_c16), np.float64) +# Mypy incorrectly infers `Any` for datetime64 and timedelta64, but pyright behaves correctly. +assert_type(np.linalg.vector_norm(AR_M), np.float64) # type: ignore[assert-type] +assert_type(np.linalg.vector_norm(AR_m), np.float64) # type: ignore[assert-type] +assert_type(np.linalg.vector_norm(AR_U), np.float64) +assert_type(np.linalg.vector_norm(AR_S), np.float64) +assert_type(np.linalg.vector_norm(AR_f8, axis=0), npt.NDArray[np.float64]) +assert_type(np.linalg.vector_norm(AR_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.linalg.vector_norm(AR_f8_2d, keepdims=True), _Array2D[np.float64]) +assert_type(np.linalg.vector_norm(AR_f2), np.float16) +assert_type(np.linalg.vector_norm(AR_f2, axis=1), npt.NDArray[np.float16]) +assert_type(np.linalg.vector_norm(AR_f2, keepdims=True), npt.NDArray[np.float16]) +assert_type(np.linalg.vector_norm(AR_f2_2d, keepdims=True), _Array2D[np.float16]) +assert_type(np.linalg.vector_norm(AR_f4), np.float32) +assert_type(np.linalg.vector_norm(AR_c8), np.float32) +assert_type(np.linalg.vector_norm(AR_f4, axis=1), npt.NDArray[np.float32]) +assert_type(np.linalg.vector_norm(AR_f4, keepdims=True), npt.NDArray[np.float32]) +assert_type(np.linalg.vector_norm(AR_f4_2d, keepdims=True), _Array2D[np.float32]) +assert_type(np.linalg.vector_norm(AR_f10), np.longdouble) +assert_type(np.linalg.vector_norm(AR_c20), np.longdouble) +assert_type(np.linalg.vector_norm(AR_f10, axis=1), npt.NDArray[np.longdouble]) +assert_type(np.linalg.vector_norm(AR_f10, keepdims=True), npt.NDArray[np.longdouble]) +assert_type(np.linalg.vector_norm(AR_f10_2d, keepdims=True), _Array2D[np.longdouble]) + +assert_type(np.linalg.tensordot(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.linalg.tensordot(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.tensordot(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.tensordot(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.linalg.tensordot(AR_m, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.linalg.tensordot(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.linalg.multi_dot([AR_i8, AR_i8]), npt.NDArray[np.int64]) +assert_type(np.linalg.multi_dot([AR_f8, AR_f8]), npt.NDArray[np.float64]) +assert_type(np.linalg.multi_dot([AR_c16, AR_c16]), npt.NDArray[np.complex128]) +assert_type(np.linalg.multi_dot([AR_O, AR_O]), npt.NDArray[np.object_]) +# Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.linalg.multi_dot([AR_i8, AR_f8]), npt.NDArray[np.float64 | Any]) # type: ignore[assert-type] +assert_type(np.linalg.multi_dot([AR_f8, AR_c16]), npt.NDArray[np.complex128 | Any]) # type: ignore[assert-type] +assert_type(np.linalg.multi_dot([AR_m, AR_m]), npt.NDArray[np.timedelta64]) # type: ignore[assert-type] + +# Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.linalg.diagonal(AR_any), np.ndarray) # type: ignore[assert-type] +assert_type(np.linalg.diagonal(AR_f4), npt.NDArray[np.float32]) +assert_type(np.linalg.diagonal(AR_f4_2d), _Array1D[np.float32]) +assert_type(np.linalg.diagonal(AR_f8_2d), _Array1D[np.float64]) +assert_type(np.linalg.diagonal(bool_list_2d), npt.NDArray[np.bool]) +assert_type(np.linalg.diagonal(int_list_2d), npt.NDArray[np.int_]) +assert_type(np.linalg.diagonal(float_list_2d), npt.NDArray[np.float64]) +assert_type(np.linalg.diagonal(complex_list_2d), npt.NDArray[np.complex128]) +assert_type(np.linalg.diagonal(bytes_list_2d), npt.NDArray[np.bytes_]) +assert_type(np.linalg.diagonal(str_list_2d), npt.NDArray[np.str_]) + +assert_type(np.linalg.trace(AR_any), Any) +assert_type(np.linalg.trace(AR_f4), Any) +assert_type(np.linalg.trace(AR_f4_2d), np.float32) +assert_type(np.linalg.trace(AR_f8_2d), np.float64) +assert_type(np.linalg.trace(AR_f4_3d), _Array1D[np.float32]) +assert_type(np.linalg.trace(AR_f8_3d), _Array1D[np.float64]) +assert_type(np.linalg.trace(AR_f8_4d), np.ndarray[tuple[int, *tuple[Any, ...]], np.dtype[np.float64]]) +assert_type(np.linalg.trace(bool_list_2d), np.bool) +assert_type(np.linalg.trace(int_list_2d), np.int_) +assert_type(np.linalg.trace(float_list_2d), np.float64) +assert_type(np.linalg.trace(complex_list_2d), np.complex128) +assert_type(np.linalg.trace(float_list_3d), npt.NDArray[np.float64]) + +assert_type(np.linalg.outer(bool_list_1d, bool_list_1d), _Array2D[np.bool]) +assert_type(np.linalg.outer(int_list_1d, int_list_1d), _Array2D[np.int64]) +assert_type(np.linalg.outer(float_list_1d, float_list_1d), _Array2D[np.float64]) +assert_type(np.linalg.outer(complex_list_1d, complex_list_1d), _Array2D[np.complex128]) +assert_type(np.linalg.outer(AR_i8, AR_i8), _Array2D[np.int64]) +assert_type(np.linalg.outer(AR_f8, AR_f8), _Array2D[np.float64]) +assert_type(np.linalg.outer(AR_c16, AR_c16), _Array2D[np.complex128]) +assert_type(np.linalg.outer(AR_b, AR_b), _Array2D[np.bool]) +assert_type(np.linalg.outer(AR_O, AR_O), _Array2D[np.object_]) +assert_type(np.linalg.outer(AR_i8, AR_m), _Array2D[np.timedelta64]) + +assert_type(np.linalg.cross(int_list_1d, int_list_1d), npt.NDArray[np.int64]) +assert_type(np.linalg.cross(float_list_1d, int_list_1d), npt.NDArray[np.float64]) +assert_type(np.linalg.cross(float_list_1d, complex_list_1d), npt.NDArray[np.complex128]) +assert_type(np.linalg.cross(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.cross(AR_f8, AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.cross(AR_f2, AR_f2), npt.NDArray[np.float16]) +assert_type(np.linalg.cross(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.cross(AR_c16, AR_i8), npt.NDArray[np.complex128]) +assert_type(np.linalg.cross(AR_c16, AR_f8), npt.NDArray[np.complex128]) +assert_type(np.linalg.cross(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.linalg.cross(AR_O, AR_f8), npt.NDArray[np.object_]) +assert_type(np.linalg.cross(AR_f8, AR_O), npt.NDArray[np.object_]) +assert_type(np.linalg.cross(AR_O, AR_O), npt.NDArray[np.object_]) -assert_type(np.linalg.lstsq(AR_i8, AR_i8), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], np.int32, npt.NDArray[np.float64]]) -assert_type(np.linalg.lstsq(AR_i8, AR_f8), tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]], np.int32, npt.NDArray[np.floating[Any]]]) -assert_type(np.linalg.lstsq(AR_f8, AR_c16), tuple[npt.NDArray[np.complexfloating[Any, Any]], npt.NDArray[np.floating[Any]], np.int32, npt.NDArray[np.floating[Any]]]) - -assert_type(np.linalg.norm(AR_i8), np.floating[Any]) -assert_type(np.linalg.norm(AR_f8), np.floating[Any]) -assert_type(np.linalg.norm(AR_c16), np.floating[Any]) -assert_type(np.linalg.norm(AR_S), np.floating[Any]) -assert_type(np.linalg.norm(AR_f8, axis=0), Any) - -assert_type(np.linalg.matrix_norm(AR_i8), np.floating[Any]) -assert_type(np.linalg.matrix_norm(AR_f8), np.floating[Any]) -assert_type(np.linalg.matrix_norm(AR_c16), np.floating[Any]) -assert_type(np.linalg.matrix_norm(AR_S), np.floating[Any]) - -assert_type(np.linalg.vector_norm(AR_i8), np.floating[Any]) -assert_type(np.linalg.vector_norm(AR_f8), np.floating[Any]) -assert_type(np.linalg.vector_norm(AR_c16), np.floating[Any]) -assert_type(np.linalg.vector_norm(AR_S), np.floating[Any]) - -assert_type(np.linalg.multi_dot([AR_i8, AR_i8]), Any) -assert_type(np.linalg.multi_dot([AR_i8, AR_f8]), Any) -assert_type(np.linalg.multi_dot([AR_f8, AR_c16]), Any) -assert_type(np.linalg.multi_dot([AR_O, AR_O]), Any) -assert_type(np.linalg.multi_dot([AR_m, AR_m]), Any) - -assert_type(np.linalg.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.linalg.cross(AR_f8, AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.linalg.cross(AR_c16, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) - -assert_type(np.linalg.matmul(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.linalg.matmul(AR_f8, AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.linalg.matmul(AR_c16, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.linalg.matmul(AR_b, AR_b), npt.NDArray[np.bool] | Any) +assert_type(np.linalg.matmul(AR_i8, AR_b), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_b, AR_i8), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_i8, AR_i8), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_f4, AR_f4), npt.NDArray[np.float32] | Any) +assert_type(np.linalg.matmul(AR_f8, AR_i8), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_i8, AR_f8), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_f8, AR_f8), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_c16, AR_i8), npt.NDArray[np.complex128] | Any) +assert_type(np.linalg.matmul(AR_f8, AR_c16), npt.NDArray[np.complex128] | Any) +assert_type(np.linalg.matmul(AR_c16, AR_c16), npt.NDArray[np.complex128] | Any) +# same as the block above, but for the 1d x 1d case +assert_type(np.linalg.matmul(AR_b_1d, AR_b_1d), np.bool) +assert_type(np.linalg.matmul(AR_i8_1d, AR_b_1d), np.int64) +assert_type(np.linalg.matmul(AR_b_1d, AR_i8_1d), np.int64) +assert_type(np.linalg.matmul(AR_i8_1d, AR_i8_1d), np.int64) +assert_type(np.linalg.matmul(AR_f4_1d, AR_f4_1d), np.float32) +assert_type(np.linalg.matmul(AR_f8_1d, AR_i8_1d), np.float64) +assert_type(np.linalg.matmul(AR_i8_1d, AR_f8_1d), np.float64) +assert_type(np.linalg.matmul(AR_f8_1d, AR_f8_1d), np.float64) +assert_type(np.linalg.matmul(AR_c16_1d, AR_i8_1d), np.complex128) +assert_type(np.linalg.matmul(AR_f8_1d, AR_c16_1d), np.complex128) +assert_type(np.linalg.matmul(AR_c16_1d, AR_c16_1d), np.complex128) +# 1d x 2d +assert_type(np.linalg.matmul(AR_b_1d, AR_b_2d), npt.NDArray[np.bool]) +assert_type(np.linalg.matmul(AR_i8_1d, AR_b_2d), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_b_1d, AR_i8_2d), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_i8_1d, AR_i8_2d), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_f4_1d, AR_f4_2d), npt.NDArray[np.float32]) +assert_type(np.linalg.matmul(AR_f8_1d, AR_i8_2d), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_i8_1d, AR_f8_2d), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_f8_1d, AR_f8_2d), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_c16_1d, AR_i8_2d), npt.NDArray[np.complex128]) +assert_type(np.linalg.matmul(AR_f8_1d, AR_c16_2d), npt.NDArray[np.complex128]) +assert_type(np.linalg.matmul(AR_c16_1d, AR_c16_2d), npt.NDArray[np.complex128]) +# 1d x ?d +assert_type(np.linalg.matmul(AR_b_1d, AR_b), npt.NDArray[np.bool] | Any) +assert_type(np.linalg.matmul(AR_i8_1d, AR_b), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_b_1d, AR_i8), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_i8_1d, AR_i8), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_f4_1d, AR_f4), npt.NDArray[np.float32] | Any) +assert_type(np.linalg.matmul(AR_f8_1d, AR_i8), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_i8_1d, AR_f8), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_f8_1d, AR_f8), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_c16_1d, AR_i8), npt.NDArray[np.complex128] | Any) +assert_type(np.linalg.matmul(AR_f8_1d, AR_c16), npt.NDArray[np.complex128] | Any) +assert_type(np.linalg.matmul(AR_c16_1d, AR_c16), npt.NDArray[np.complex128] | Any) +# 2d x 1d +assert_type(np.linalg.matmul(AR_b_2d, AR_b_1d), npt.NDArray[np.bool]) +assert_type(np.linalg.matmul(AR_i8_2d, AR_b_1d), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_b_2d, AR_i8_1d), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_i8_2d, AR_i8_1d), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_f4_2d, AR_f4_1d), npt.NDArray[np.float32]) +assert_type(np.linalg.matmul(AR_f8_2d, AR_i8_1d), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_i8_2d, AR_f8_1d), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_f8_2d, AR_f8_1d), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_c16_2d, AR_i8_1d), npt.NDArray[np.complex128]) +assert_type(np.linalg.matmul(AR_f8_2d, AR_c16_1d), npt.NDArray[np.complex128]) +assert_type(np.linalg.matmul(AR_c16_2d, AR_c16_1d), npt.NDArray[np.complex128]) +# 2d x ?d +assert_type(np.linalg.matmul(AR_b_2d, AR_b), npt.NDArray[np.bool]) +assert_type(np.linalg.matmul(AR_i8_2d, AR_b), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_b_2d, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_i8_2d, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_f4_2d, AR_f4), npt.NDArray[np.float32]) +assert_type(np.linalg.matmul(AR_f8_2d, AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_i8_2d, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_f8_2d, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_c16_2d, AR_i8), npt.NDArray[np.complex128]) +assert_type(np.linalg.matmul(AR_f8_2d, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.linalg.matmul(AR_c16_2d, AR_c16), npt.NDArray[np.complex128]) +# ?d x 1d +assert_type(np.linalg.matmul(AR_b, AR_b_1d), npt.NDArray[np.bool] | Any) +assert_type(np.linalg.matmul(AR_i8, AR_b_1d), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_b, AR_i8_1d), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_i8, AR_i8_1d), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_f4, AR_f4_1d), npt.NDArray[np.float32] | Any) +assert_type(np.linalg.matmul(AR_f8, AR_i8_1d), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_i8, AR_f8_1d), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_f8, AR_f8_1d), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_c16, AR_i8_1d), npt.NDArray[np.complex128] | Any) +assert_type(np.linalg.matmul(AR_f8, AR_c16_1d), npt.NDArray[np.complex128] | Any) +assert_type(np.linalg.matmul(AR_c16, AR_c16_1d), npt.NDArray[np.complex128] | Any) diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi new file mode 100644 index 000000000000..944983b97fa1 --- /dev/null +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -0,0 +1,1099 @@ +from typing import Any, Literal, NoReturn, assert_type + +import numpy as np +from numpy._typing import NDArray, _AnyShape + +type MaskedArray[ScalarT: np.generic] = np.ma.MaskedArray[_AnyShape, np.dtype[ScalarT]] +type _NoMaskType = np.bool[Literal[False]] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] + +### + +class MaskedArraySubclass[ScalarT: np.generic](np.ma.MaskedArray[_AnyShape, np.dtype[ScalarT]]): ... + +class IntoMaskedArraySubClass[ScalarT: np.generic]: + def __array__(self) -> MaskedArraySubclass[ScalarT]: ... + +type MaskedArraySubclassC = MaskedArraySubclass[np.complex128] + +AR_b: NDArray[np.bool] +AR_f4: NDArray[np.float32] +AR_i8: NDArray[np.int64] +AR_u4: NDArray[np.uint32] +AR_dt64: NDArray[np.datetime64] +AR_td64: NDArray[np.timedelta64] +AR_o: NDArray[np.timedelta64] + +AR_LIKE_b: list[bool] +AR_LIKE_u: list[np.uint32] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] +AR_LIKE_td64: list[np.timedelta64] +AR_LIKE_dt64: list[np.datetime64] +AR_LIKE_o: list[np.object_] +AR_number: NDArray[np.number] + +MAR_c8: MaskedArray[np.complex64] +MAR_c16: MaskedArray[np.complex128] +MAR_b: MaskedArray[np.bool] +MAR_f4: MaskedArray[np.float32] +MAR_f8: MaskedArray[np.float64] +MAR_i8: MaskedArray[np.int64] +MAR_u4: MaskedArray[np.uint32] +MAR_dt64: MaskedArray[np.datetime64] +MAR_td64: MaskedArray[np.timedelta64] +MAR_o: MaskedArray[np.object_] +MAR_s: MaskedArray[np.str_] +MAR_byte: MaskedArray[np.bytes_] +MAR_V: MaskedArray[np.void] +MAR_floating: MaskedArray[np.floating] +MAR_number: MaskedArray[np.number] + +MAR_subclass: MaskedArraySubclassC +MAR_into_subclass: IntoMaskedArraySubClass[np.float32] + +MAR_1d: np.ma.MaskedArray[tuple[int], np.dtype] +MAR_2d_f4: np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]] +MAR_2d_V: np.ma.MaskedArray[tuple[int, int], np.dtype[np.void]] + +b: np.bool +f4: np.float32 +f: float +i: int + +assert_type(MAR_1d.shape, tuple[int]) + +assert_type(MAR_f4.dtype, np.dtype[np.float32]) + +assert_type(int(MAR_i8), int) +assert_type(float(MAR_f4), float) + +assert_type(np.ma.min(MAR_b), np.bool) +assert_type(np.ma.min(MAR_f4), np.float32) +assert_type(np.ma.min(MAR_b, axis=0), Any) +assert_type(np.ma.min(MAR_f4, axis=0), Any) +assert_type(np.ma.min(MAR_b, keepdims=True), Any) +assert_type(np.ma.min(MAR_f4, keepdims=True), Any) +assert_type(np.ma.min(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.min(MAR_f4, 0, MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.min(MAR_f4, None, MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_b.min(), np.bool) +assert_type(MAR_f4.min(), np.float32) +assert_type(MAR_b.min(axis=0), Any) +assert_type(MAR_f4.min(axis=0), Any) +assert_type(MAR_b.min(keepdims=True), Any) +assert_type(MAR_f4.min(keepdims=True), Any) +assert_type(MAR_f4.min(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.min(0, MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.min(None, MAR_subclass), MaskedArraySubclassC) + +assert_type(np.ma.max(MAR_b), np.bool) +assert_type(np.ma.max(MAR_f4), np.float32) +assert_type(np.ma.max(MAR_b, axis=0), Any) +assert_type(np.ma.max(MAR_f4, axis=0), Any) +assert_type(np.ma.max(MAR_b, keepdims=True), Any) +assert_type(np.ma.max(MAR_f4, keepdims=True), Any) +assert_type(np.ma.max(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.max(MAR_f4, 0, MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.max(MAR_f4, None, MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_b.max(), np.bool) +assert_type(MAR_f4.max(), np.float32) +assert_type(MAR_b.max(axis=0), Any) +assert_type(MAR_f4.max(axis=0), Any) +assert_type(MAR_b.max(keepdims=True), Any) +assert_type(MAR_f4.max(keepdims=True), Any) +assert_type(MAR_f4.max(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.max(0, MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.max(None, MAR_subclass), MaskedArraySubclassC) + +assert_type(np.ma.ptp(MAR_b), np.bool) +assert_type(np.ma.ptp(MAR_f4), np.float32) +assert_type(np.ma.ptp(MAR_b, axis=0), Any) +assert_type(np.ma.ptp(MAR_f4, axis=0), Any) +assert_type(np.ma.ptp(MAR_b, keepdims=True), Any) +assert_type(np.ma.ptp(MAR_f4, keepdims=True), Any) +assert_type(np.ma.ptp(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.ptp(MAR_f4, 0, MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.ptp(MAR_f4, None, MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_b.ptp(), np.bool) +assert_type(MAR_f4.ptp(), np.float32) +assert_type(MAR_b.ptp(axis=0), Any) +assert_type(MAR_f4.ptp(axis=0), Any) +assert_type(MAR_b.ptp(keepdims=True), Any) +assert_type(MAR_f4.ptp(keepdims=True), Any) +assert_type(MAR_f4.ptp(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.ptp(0, MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.ptp(None, MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_b.argmin(), np.intp) +assert_type(MAR_f4.argmin(), np.intp) +assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) +assert_type(MAR_b.argmin(axis=0), Any) +assert_type(MAR_f4.argmin(axis=0), Any) +assert_type(MAR_b.argmin(keepdims=True), Any) +assert_type(MAR_f4.argmin(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.argmin(None, None, out=MAR_subclass), MaskedArraySubclassC) + +assert_type(np.ma.argmin(MAR_b), np.intp) +assert_type(np.ma.argmin(MAR_f4), np.intp) +assert_type(np.ma.argmin(MAR_f4, fill_value=6.28318, keepdims=False), np.intp) +assert_type(np.ma.argmin(MAR_b, axis=0), Any) +assert_type(np.ma.argmin(MAR_f4, axis=0), Any) +assert_type(np.ma.argmin(MAR_b, keepdims=True), Any) +assert_type(np.ma.argmin(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.argmin(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_b.argmax(), np.intp) +assert_type(MAR_f4.argmax(), np.intp) +assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) +assert_type(MAR_b.argmax(axis=0), Any) +assert_type(MAR_f4.argmax(axis=0), Any) +assert_type(MAR_b.argmax(keepdims=True), Any) +assert_type(MAR_f4.argmax(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.argmax(None, None, out=MAR_subclass), MaskedArraySubclassC) + +assert_type(np.ma.argmax(MAR_b), np.intp) +assert_type(np.ma.argmax(MAR_f4), np.intp) +assert_type(np.ma.argmax(MAR_f4, fill_value=6.28318, keepdims=False), np.intp) +assert_type(np.ma.argmax(MAR_b, axis=0), Any) +assert_type(np.ma.argmax(MAR_f4, axis=0), Any) +assert_type(np.ma.argmax(MAR_b, keepdims=True), Any) +assert_type(np.ma.argmax(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.argmax(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_b.all(), np.bool) +assert_type(MAR_f4.all(), np.bool) +assert_type(MAR_f4.all(keepdims=False), np.bool) +assert_type(MAR_b.all(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.all(axis=0, keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_b.all(0, None, True), MaskedArray[np.bool]) +assert_type(MAR_f4.all(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.all(keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_f4.all(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.all(None, out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_b.any(), np.bool) +assert_type(MAR_f4.any(), np.bool) +assert_type(MAR_f4.any(keepdims=False), np.bool) +assert_type(MAR_b.any(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.any(axis=0, keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_b.any(0, None, True), MaskedArray[np.bool]) +assert_type(MAR_f4.any(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.any(keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_f4.any(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.any(None, out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f4.sort(), None) +assert_type(MAR_f4.sort(axis=0, kind="quicksort", order="K", endwith=False, fill_value=42., stable=False), None) + +assert_type(np.ma.sort(MAR_f4), MaskedArray[np.float32]) +assert_type(np.ma.sort(MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.sort([[0, 1], [2, 3]]), NDArray[Any]) +assert_type(np.ma.sort(AR_f4), NDArray[np.float32]) + +assert_type(MAR_f8.take(0), np.float64) +assert_type(MAR_1d.take(0), Any) +assert_type(MAR_f8.take([0]), MaskedArray[np.float64]) +assert_type(MAR_f8.take(0, out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f8.take([0], out=MAR_subclass), MaskedArraySubclassC) + +assert_type(np.ma.take(f, 0), Any) +assert_type(np.ma.take(f4, 0), np.float32) +assert_type(np.ma.take(MAR_f8, 0), np.float64) +assert_type(np.ma.take(AR_f4, 0), np.float32) +assert_type(np.ma.take(MAR_1d, 0), Any) +assert_type(np.ma.take(MAR_f8, [0]), MaskedArray[np.float64]) +assert_type(np.ma.take(AR_f4, [0]), MaskedArray[np.float32]) +assert_type(np.ma.take(MAR_f8, 0, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.take(MAR_f8, [0], out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.take([1], [0]), MaskedArray[Any]) +assert_type(np.ma.take(np.eye(2), 1, axis=0), MaskedArray[np.float64]) + +assert_type(MAR_f4.partition(1), None) +assert_type(MAR_V.partition(1, axis=0, kind="introselect", order="K"), None) + +assert_type(MAR_f4.argpartition(1), MaskedArray[np.intp]) +assert_type( + MAR_1d.argpartition(1, axis=0, kind="introselect", order="K"), + np.ma.MaskedArray[tuple[int], np.dtype[np.intp]], +) + +assert_type(np.ma.ndim(f4), int) +assert_type(np.ma.ndim(MAR_b), int) +assert_type(np.ma.ndim(AR_f4), int) + +assert_type(np.ma.size(b), int) +assert_type(np.ma.size(MAR_f4, axis=0), int) +assert_type(np.ma.size(AR_f4), int) + +assert_type(np.ma.is_masked(MAR_f4), bool) + +assert_type(MAR_f4.ids(), tuple[int, int]) + +assert_type(MAR_f4.iscontiguous(), bool) + +assert_type(MAR_f4 >= 3, MaskedArray[np.bool]) +assert_type(MAR_i8 >= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b >= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 >= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 >= AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o >= AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d >= 0, MaskedArray[np.bool]) +assert_type(MAR_s >= MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte >= MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 > 3, MaskedArray[np.bool]) +assert_type(MAR_i8 > AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b > AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 > AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 > AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o > AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d > 0, MaskedArray[np.bool]) +assert_type(MAR_s > MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte > MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 <= 3, MaskedArray[np.bool]) +assert_type(MAR_i8 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 <= AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o <= AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d <= 0, MaskedArray[np.bool]) +assert_type(MAR_s <= MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte <= MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 < 3, MaskedArray[np.bool]) +assert_type(MAR_i8 < AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b < AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 < AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 < AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o < AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d < 0, MaskedArray[np.bool]) +assert_type(MAR_s < MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte < MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 <= 3, MaskedArray[np.bool]) +assert_type(MAR_i8 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 <= AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o <= AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d <= 0, MaskedArray[np.bool]) +assert_type(MAR_s <= MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte <= MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_byte.count(), int) +assert_type(MAR_f4.count(axis=None), int) +assert_type(MAR_f4.count(axis=0), NDArray[np.int_]) +assert_type(MAR_b.count(axis=(0, 1)), NDArray[np.int_]) +assert_type(MAR_o.count(keepdims=True), NDArray[np.int_]) +assert_type(MAR_o.count(axis=None, keepdims=True), NDArray[np.int_]) +assert_type(MAR_o.count(None, True), NDArray[np.int_]) + +assert_type(np.ma.count(MAR_byte), int) +assert_type(np.ma.count(MAR_byte, axis=None), int) +assert_type(np.ma.count(MAR_f4, axis=0), NDArray[np.int_]) +assert_type(np.ma.count(MAR_b, axis=(0, 1)), NDArray[np.int_]) +assert_type(np.ma.count(MAR_o, keepdims=True), NDArray[np.int_]) +assert_type(np.ma.count(MAR_o, axis=None, keepdims=True), NDArray[np.int_]) +assert_type(np.ma.count(MAR_o, None, True), NDArray[np.int_]) + +assert_type(MAR_f4.compressed(), np.ndarray[tuple[int], np.dtype[np.float32]]) + +assert_type(MAR_f4.compress([True, False]), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_f4.compress([True, False], axis=0), MaskedArray[np.float32]) +assert_type(MAR_f4.compress([True, False], axis=0, out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.compress([True, False], 0, MAR_subclass), MaskedArraySubclassC) + +assert_type(np.ma.compressed(MAR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.ma.compressed([[1, 2, 3]]), np.ndarray[tuple[int], np.dtype]) + +assert_type(MAR_f4.put([0, 4, 8], [10, 20, 30]), None) +assert_type(MAR_f4.put(4, 999), None) +assert_type(MAR_f4.put(4, 999, mode="clip"), None) + +assert_type(MAR_c8.__array_wrap__(AR_b), MaskedArray[np.bool]) + +assert_type(np.ma.put(MAR_f4, [0, 4, 8], [10, 20, 30]), None) +assert_type(np.ma.put(MAR_f4, 4, 999), None) +assert_type(np.ma.put(MAR_f4, 4, 999, mode="clip"), None) + +assert_type(np.ma.putmask(MAR_f4, [True, False], [0, 1]), None) +assert_type(np.ma.putmask(MAR_f4, np.False_, [0, 1]), None) + +assert_type(MAR_f4.filled(float("nan")), NDArray[np.float32]) +assert_type(MAR_i8.filled(), NDArray[np.int64]) +assert_type(MAR_1d.filled(), np.ndarray[tuple[int], np.dtype]) + +assert_type(np.ma.filled(MAR_f4, float("nan")), NDArray[np.float32]) +assert_type(np.ma.filled([[1, 2, 3]]), NDArray[Any]) +# PyRight detects this one correctly, but mypy doesn't. +# https://github.com/numpy/numpy/pull/28742#discussion_r2048968375 +assert_type(np.ma.filled(MAR_1d), np.ndarray[tuple[int], np.dtype]) # type: ignore[assert-type] + +assert_type(MAR_b.repeat(3), np.ma.MaskedArray[tuple[int], np.dtype[np.bool]]) +assert_type(MAR_2d_f4.repeat(MAR_i8), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.repeat(MAR_i8, axis=None), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.repeat(MAR_i8, axis=0), MaskedArray[np.float32]) + +assert_type(np.ma.allequal(AR_f4, MAR_f4), bool) +assert_type(np.ma.allequal(AR_f4, MAR_f4, fill_value=False), bool) + +assert_type(np.ma.allclose(AR_f4, MAR_f4), bool) +assert_type(np.ma.allclose(AR_f4, MAR_f4, masked_equal=False), bool) +assert_type(np.ma.allclose(AR_f4, MAR_f4, rtol=.4, atol=.3), bool) + +assert_type(MAR_2d_f4.ravel(), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_1d.ravel(order="A"), np.ma.MaskedArray[tuple[int], np.dtype[Any]]) + +assert_type(np.ma.getmask(MAR_f4), NDArray[np.bool] | _NoMaskType) +# PyRight detects this one correctly, but mypy doesn't: +# `Revealed type is "Union[numpy.ndarray[Any, Any], numpy.bool[Any]]"` +assert_type(np.ma.getmask(MAR_1d), np.ndarray[tuple[int], np.dtype[np.bool]] | np.bool) # type: ignore[assert-type] +assert_type(np.ma.getmask(MAR_2d_f4), np.ndarray[tuple[int, int], np.dtype[np.bool]] | _NoMaskType) +assert_type(np.ma.getmask([1, 2]), NDArray[np.bool] | _NoMaskType) +assert_type(np.ma.getmask(np.int64(1)), _NoMaskType) + +assert_type(np.ma.is_mask(MAR_1d), bool) +assert_type(np.ma.is_mask(AR_b), bool) + +def func(x: object) -> None: + if np.ma.is_mask(x): + assert_type(x, NDArray[np.bool]) + else: + assert_type(x, object) + +assert_type(MAR_2d_f4.mT, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) + +assert_type(MAR_c16.real, MaskedArray[np.float64]) +assert_type(MAR_c16.imag, MaskedArray[np.float64]) + +assert_type(MAR_2d_f4.baseclass, type[NDArray[Any]]) + +assert_type(MAR_b.swapaxes(0, 1), MaskedArray[np.bool]) +assert_type(MAR_2d_f4.swapaxes(1, 0), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) + +assert_type(MAR_2d_f4[AR_i8], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[[1, 2, 3]], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[1:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[0, 0], Any) +assert_type(MAR_2d_f4[:, np.newaxis], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[..., -1], MaskedArray[np.float32]) +assert_type(MAR_2d_V["field_0"], np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_2d_V[["field_0", "field_1"]], np.ma.MaskedArray[tuple[int, int], np.dtype[np.void]]) + +assert_type(np.ma.nomask, np.bool[Literal[False]]) +assert_type(np.ma.MaskType, type[np.bool]) + +assert_type(MAR_1d.__setmask__([True, False]), None) +assert_type(MAR_1d.__setmask__(np.False_), None) + +assert_type(MAR_2d_f4.harden_mask(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_i8.harden_mask(), MaskedArray[np.int64]) +assert_type(MAR_2d_f4.soften_mask(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_i8.soften_mask(), MaskedArray[np.int64]) +assert_type(MAR_f4.unshare_mask(), MaskedArray[np.float32]) +assert_type(MAR_b.shrink_mask(), MaskedArray[np.bool_]) + +assert_type(MAR_i8.hardmask, bool) +assert_type(MAR_i8.sharedmask, bool) + +assert_type(MAR_i8.recordmask, np.ma.MaskType | NDArray[np.ma.MaskType]) +assert_type(MAR_2d_f4.recordmask, np.ma.MaskType | np.ndarray[tuple[int, int], np.dtype[np.ma.MaskType]]) + +assert_type(MAR_2d_f4.anom(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.anom(axis=0, dtype=np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_2d_f4.anom(0, np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_2d_f4.anom(0, "float16"), np.ma.MaskedArray[tuple[int, int], np.dtype]) + +assert_type(MAR_i8.fill_value, np.int64) + +assert_type(MAR_b.transpose(), MaskedArray[np.bool]) +assert_type(MAR_2d_f4.transpose(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.transpose(1, 0), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.transpose((1, 0)), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_b.T, MaskedArray[np.bool]) +assert_type(MAR_2d_f4.T, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) + +assert_type(MAR_2d_f4.dot(1), MaskedArray[Any]) +assert_type(MAR_2d_f4.dot([1]), MaskedArray[Any]) +assert_type(MAR_2d_f4.dot(1, out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], ...]) +assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) + +assert_type(MAR_f8.trace(), Any) +assert_type(MAR_f8.trace(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f8.trace(out=MAR_subclass, dtype=None), MaskedArraySubclassC) + +assert_type(MAR_f8.round(), MaskedArray[np.float64]) +assert_type(MAR_f8.round(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_i8.reshape(None), MaskedArray[np.int64]) +assert_type(MAR_f8.reshape(-1), np.ma.MaskedArray[tuple[int], np.dtype[np.float64]]) +assert_type(MAR_c8.reshape(2, 3, 4, 5), np.ma.MaskedArray[tuple[int, int, int, int], np.dtype[np.complex64]]) +assert_type(MAR_td64.reshape(()), np.ma.MaskedArray[tuple[()], np.dtype[np.timedelta64]]) +assert_type(MAR_s.reshape([]), np.ma.MaskedArray[tuple[()], np.dtype[np.str_]]) +assert_type(MAR_V.reshape((480, 720, 4)), np.ma.MaskedArray[tuple[int, int, int], np.dtype[np.void]]) + +assert_type(MAR_f8.cumprod(), MaskedArray[Any]) +assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f8.cumsum(), MaskedArray[Any]) +assert_type(MAR_f8.cumsum(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f8.view(), MaskedArray[np.float64]) +assert_type(MAR_f8.view(dtype=np.float32), MaskedArray[np.float32]) +assert_type(MAR_f8.view(dtype=np.dtype(np.float32)), MaskedArray[np.float32]) +assert_type(MAR_f8.view(dtype=np.float32, fill_value=0), MaskedArray[np.float32]) +assert_type(MAR_f8.view(type=np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_f8.view(None, np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_f8.view(dtype=np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_f8.view(dtype="float32"), MaskedArray[Any]) +assert_type(MAR_f8.view(dtype="float32", type=np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_2d_f4.view(dtype=np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float16]]) +assert_type(MAR_2d_f4.view(dtype=np.dtype(np.float16)), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float16]]) + +assert_type(MAR_f8.__deepcopy__(), MaskedArray[np.float64]) + +assert_type(MAR_f8.argsort(), MaskedArray[np.intp]) +assert_type(MAR_f8.argsort(axis=0, kind="heap", order=("x", "y")), MaskedArray[np.intp]) +assert_type(MAR_f8.argsort(endwith=True, fill_value=1.5, stable=False), MaskedArray[np.intp]) + +assert_type(MAR_2d_f4.flat, np.ma.core.MaskedIterator[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.flat.ma, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.flat[AR_i8], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[[1, 2, 3]], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[1:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[0, 0], Any) +assert_type(MAR_2d_f4.flat[:, np.newaxis], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[..., -1], MaskedArray[np.float32]) + +def invalid_resize() -> None: + assert_type(MAR_f8.resize((1, 1)), NoReturn) # type: ignore[arg-type] + +assert_type(np.ma.MaskedArray(AR_f4), MaskedArray[np.float32]) +assert_type(np.ma.MaskedArray(np.array([1, 2, 3]), [True, True, False], np.float16), MaskedArray[np.float16]) +assert_type(np.ma.MaskedArray(np.array([1, 2, 3]), dtype=np.float16), MaskedArray[np.float16]) +assert_type(np.ma.MaskedArray(np.array([1, 2, 3]), copy=True), MaskedArray[Any]) +# TODO: This one could be made more precise, the return type could be `MaskedArraySubclassC` +assert_type(np.ma.MaskedArray(MAR_subclass), MaskedArray[np.complex128]) +# TODO: This one could be made more precise, the return type could be `MaskedArraySubclass[np.float32]` +assert_type(np.ma.MaskedArray(MAR_into_subclass), MaskedArray[np.float32]) + +# Masked Array addition + +assert_type(MAR_b + AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b + AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b + AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_b + AR_LIKE_o, Any) + +assert_type(AR_LIKE_u + MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i + MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f + MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_c + MAR_b, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 + MAR_b, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_b, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_b, Any) + +assert_type(MAR_u4 + AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 + AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 + AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 + AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_u4 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u + MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i + MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f + MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_c + MAR_u4, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 + MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_u4, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_u4, Any) + +assert_type(MAR_i8 + AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 + AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 + AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 + AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_i8 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_i8, MaskedArray[np.int64]) +assert_type(AR_LIKE_u + MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i + MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f + MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_c + MAR_i8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 + MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_i8, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_i8, Any) + +assert_type(MAR_f8 + AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_c + MAR_f8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o + MAR_f8, Any) + +assert_type(MAR_c16 + AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_u + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_i + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_f + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_c + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_o + MAR_c16, Any) + +assert_type(MAR_td64 + AR_LIKE_b, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_u + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_i + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_td64 + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_td64, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_td64, Any) + +assert_type(MAR_dt64 + AR_LIKE_b, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_u, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_i, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_td64, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_o + MAR_dt64, Any) + +assert_type(MAR_o + AR_LIKE_b, Any) +assert_type(MAR_o + AR_LIKE_u, Any) +assert_type(MAR_o + AR_LIKE_i, Any) +assert_type(MAR_o + AR_LIKE_f, Any) +assert_type(MAR_o + AR_LIKE_c, Any) +assert_type(MAR_o + AR_LIKE_td64, Any) +assert_type(MAR_o + AR_LIKE_dt64, Any) +assert_type(MAR_o + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_o, Any) +assert_type(AR_LIKE_u + MAR_o, Any) +assert_type(AR_LIKE_i + MAR_o, Any) +assert_type(AR_LIKE_f + MAR_o, Any) +assert_type(AR_LIKE_c + MAR_o, Any) +assert_type(AR_LIKE_td64 + MAR_o, Any) +assert_type(AR_LIKE_dt64 + MAR_o, Any) +assert_type(AR_LIKE_o + MAR_o, Any) + +# Masked Array subtraction +# Keep in sync with numpy/typing/tests/data/reveal/arithmetic.pyi + +assert_type(MAR_number - AR_number, MaskedArray[np.number]) + +assert_type(MAR_b - AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b - AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b - AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_b - AR_LIKE_o, Any) + +assert_type(AR_LIKE_u - MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i - MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f - MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_c - MAR_b, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 - MAR_b, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_b, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_b, Any) + +assert_type(MAR_u4 - AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 - AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 - AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 - AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_u4 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u - MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i - MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f - MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_c - MAR_u4, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 - MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_u4, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_u4, Any) + +assert_type(MAR_i8 - AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 - AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 - AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 - AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_i8 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_i8, MaskedArray[np.int64]) +assert_type(AR_LIKE_u - MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i - MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f - MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_c - MAR_i8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 - MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_i8, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_i8, Any) + +assert_type(MAR_f8 - AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_c - MAR_f8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o - MAR_f8, Any) + +assert_type(MAR_c16 - AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_u - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_i - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_f - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_c - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_o - MAR_c16, Any) + +assert_type(MAR_td64 - AR_LIKE_b, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_u - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_i - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_td64 - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_td64, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_td64, Any) + +assert_type(MAR_dt64 - AR_LIKE_b, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_u, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_i, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_td64, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_dt64, MaskedArray[np.timedelta64]) +assert_type(MAR_dt64 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_dt64 - MAR_dt64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o - MAR_dt64, Any) + +assert_type(MAR_o - AR_LIKE_b, Any) +assert_type(MAR_o - AR_LIKE_u, Any) +assert_type(MAR_o - AR_LIKE_i, Any) +assert_type(MAR_o - AR_LIKE_f, Any) +assert_type(MAR_o - AR_LIKE_c, Any) +assert_type(MAR_o - AR_LIKE_td64, Any) +assert_type(MAR_o - AR_LIKE_dt64, Any) +assert_type(MAR_o - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_o, Any) +assert_type(AR_LIKE_u - MAR_o, Any) +assert_type(AR_LIKE_i - MAR_o, Any) +assert_type(AR_LIKE_f - MAR_o, Any) +assert_type(AR_LIKE_c - MAR_o, Any) +assert_type(AR_LIKE_td64 - MAR_o, Any) +assert_type(AR_LIKE_dt64 - MAR_o, Any) +assert_type(AR_LIKE_o - MAR_o, Any) + +# Masked Array multiplication + +assert_type(MAR_b * AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b * AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b * AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b * AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_b * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_u * MAR_b, MaskedArray[np.uint32]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_b, MaskedArray[np.signedinteger]) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_b, MaskedArray[np.floating]) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_b, MaskedArray[np.complexfloating]) # type: ignore[assert-type] +assert_type(AR_LIKE_td64 * MAR_b, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_b, Any) # type: ignore[assert-type] + +assert_type(MAR_u4 * AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 * AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 * AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 * AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 * AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_u4 * AR_LIKE_o, Any) + +assert_type(MAR_i8 * AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 * AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 * AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 * AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 * AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_i8 * AR_LIKE_o, Any) + +assert_type(MAR_f8 * AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_f8, MaskedArray[np.complexfloating]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_f8, Any) # type: ignore[assert-type] + +assert_type(MAR_c16 * AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_c16, Any) # type: ignore[assert-type] + +assert_type(MAR_td64 * AR_LIKE_b, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 * AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 * AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_td64 * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_dt64 * MAR_td64, MaskedArray[np.datetime64]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_td64, Any) # type: ignore[assert-type] + +assert_type(AR_LIKE_o * MAR_dt64, Any) # type: ignore[assert-type] + +assert_type(MAR_o * AR_LIKE_b, Any) +assert_type(MAR_o * AR_LIKE_u, Any) +assert_type(MAR_o * AR_LIKE_i, Any) +assert_type(MAR_o * AR_LIKE_f, Any) +assert_type(MAR_o * AR_LIKE_c, Any) +assert_type(MAR_o * AR_LIKE_td64, Any) +assert_type(MAR_o * AR_LIKE_dt64, Any) +assert_type(MAR_o * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_td64 * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_dt64 * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_o, Any) # type: ignore[assert-type] + +assert_type(MAR_f8.sum(), Any) +assert_type(MAR_f8.sum(axis=0), Any) +assert_type(MAR_f8.sum(keepdims=True), Any) +assert_type(MAR_f8.sum(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f8.std(), Any) +assert_type(MAR_f8.std(axis=0), Any) +assert_type(MAR_f8.std(keepdims=True, mean=0.), Any) +assert_type(MAR_f8.std(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f8.var(), Any) +assert_type(MAR_f8.var(axis=0), Any) +assert_type(MAR_f8.var(keepdims=True, mean=0.), Any) +assert_type(MAR_f8.var(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f8.mean(), Any) +assert_type(MAR_f8.mean(axis=0), Any) +assert_type(MAR_f8.mean(keepdims=True), Any) +assert_type(MAR_f8.mean(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f8.prod(), Any) +assert_type(MAR_f8.prod(axis=0), Any) +assert_type(MAR_f8.prod(keepdims=True), Any) +assert_type(MAR_f8.prod(out=MAR_subclass), MaskedArraySubclassC) + +# MaskedArray "true" division + +assert_type(MAR_f8 / b, MaskedArray[np.float64]) +assert_type(MAR_f8 / i, MaskedArray[np.float64]) +assert_type(MAR_f8 / f, MaskedArray[np.float64]) + +assert_type(b / MAR_f8, MaskedArray[np.float64]) +assert_type(i / MAR_f8, MaskedArray[np.float64]) +assert_type(f / MAR_f8, MaskedArray[np.float64]) + +assert_type(MAR_b / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_o / MAR_b, Any) + +assert_type(MAR_u4 / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 / MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o / MAR_u4, Any) + +assert_type(MAR_i8 / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 / MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o / MAR_i8, Any) + +assert_type(MAR_f8 / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 / MAR_f8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o / MAR_f8, Any) + +assert_type(MAR_td64 / AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 / AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 / AR_LIKE_f, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 / AR_LIKE_td64, MaskedArray[np.float64]) +assert_type(MAR_td64 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_td64 / MAR_td64, MaskedArray[np.float64]) +assert_type(AR_LIKE_o / MAR_td64, Any) + +assert_type(MAR_o / AR_LIKE_b, Any) +assert_type(MAR_o / AR_LIKE_u, Any) +assert_type(MAR_o / AR_LIKE_i, Any) +assert_type(MAR_o / AR_LIKE_f, Any) +assert_type(MAR_o / AR_LIKE_td64, Any) +assert_type(MAR_o / AR_LIKE_dt64, Any) +assert_type(MAR_o / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_o, Any) +assert_type(AR_LIKE_u / MAR_o, Any) +assert_type(AR_LIKE_i / MAR_o, Any) +assert_type(AR_LIKE_f / MAR_o, Any) +assert_type(AR_LIKE_td64 / MAR_o, Any) +assert_type(AR_LIKE_dt64 / MAR_o, Any) +assert_type(AR_LIKE_o / MAR_o, Any) + +# MaskedArray floor division + +assert_type(MAR_b // AR_LIKE_b, MaskedArray[np.int8]) +assert_type(MAR_b // AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b // AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b // AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_b, MaskedArray[np.int8]) +assert_type(AR_LIKE_u // MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i // MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f // MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_o // MAR_b, Any) + +assert_type(MAR_u4 // AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 // AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 // AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 // AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u // MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i // MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f // MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_td64 // MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o // MAR_u4, Any) + +assert_type(MAR_i8 // AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 // AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 // AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 // AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_i8, MaskedArray[np.int64]) +assert_type(AR_LIKE_u // MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i // MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f // MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_td64 // MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o // MAR_i8, Any) + +assert_type(MAR_f8 // AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 // MAR_f8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o // MAR_f8, Any) + +assert_type(MAR_td64 // AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 // AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 // AR_LIKE_f, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 // AR_LIKE_td64, MaskedArray[np.int64]) +assert_type(MAR_td64 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_td64 // MAR_td64, MaskedArray[np.int64]) +assert_type(AR_LIKE_o // MAR_td64, Any) + +assert_type(MAR_o // AR_LIKE_b, Any) +assert_type(MAR_o // AR_LIKE_u, Any) +assert_type(MAR_o // AR_LIKE_i, Any) +assert_type(MAR_o // AR_LIKE_f, Any) +assert_type(MAR_o // AR_LIKE_td64, Any) +assert_type(MAR_o // AR_LIKE_dt64, Any) +assert_type(MAR_o // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_o, Any) +assert_type(AR_LIKE_u // MAR_o, Any) +assert_type(AR_LIKE_i // MAR_o, Any) +assert_type(AR_LIKE_f // MAR_o, Any) +assert_type(AR_LIKE_td64 // MAR_o, Any) +assert_type(AR_LIKE_dt64 // MAR_o, Any) +assert_type(AR_LIKE_o // MAR_o, Any) + +# Masked Array power + +assert_type(MAR_b ** AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b ** AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b ** AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_u ** MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i ** MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_b, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_b, Any) + +assert_type(MAR_u4 ** AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 ** AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 ** AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 ** AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u ** MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i ** MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_u4, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_u4, Any) + +assert_type(MAR_i8 ** AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 ** AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 ** AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 ** AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 ** AR_LIKE_o, Any) +assert_type(MAR_i8 ** AR_LIKE_b, MaskedArray[np.int64]) + +assert_type(AR_LIKE_u ** MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i ** MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_i8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_i8, Any) + +assert_type(MAR_f8 ** AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_c ** MAR_f8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_f8, Any) + +assert_type(MAR_c16 ** AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_u ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_i ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_f ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_c ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_o ** MAR_c16, Any) + +assert_type(MAR_o ** AR_LIKE_b, Any) +assert_type(MAR_o ** AR_LIKE_u, Any) +assert_type(MAR_o ** AR_LIKE_i, Any) +assert_type(MAR_o ** AR_LIKE_f, Any) +assert_type(MAR_o ** AR_LIKE_c, Any) +assert_type(MAR_o ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_o, Any) +assert_type(AR_LIKE_u ** MAR_o, Any) +assert_type(AR_LIKE_i ** MAR_o, Any) +assert_type(AR_LIKE_f ** MAR_o, Any) +assert_type(AR_LIKE_c ** MAR_o, Any) +assert_type(AR_LIKE_o ** MAR_o, Any) diff --git a/numpy/typing/tests/data/reveal/matrix.pyi b/numpy/typing/tests/data/reveal/matrix.pyi index 1a0aa4e3c7b4..b76760d547b9 100644 --- a/numpy/typing/tests/data/reveal/matrix.pyi +++ b/numpy/typing/tests/data/reveal/matrix.pyi @@ -1,22 +1,19 @@ -import sys -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +type _Shape2D = tuple[int, int] -mat: np.matrix[Any, np.dtype[np.int64]] +mat: np.matrix[_Shape2D, np.dtype[np.int64]] ar_f8: npt.NDArray[np.float64] +ar_ip: npt.NDArray[np.intp] -assert_type(mat * 5, np.matrix[Any, Any]) -assert_type(5 * mat, np.matrix[Any, Any]) +assert_type(mat * 5, np.matrix) +assert_type(5 * mat, np.matrix) mat *= 5 -assert_type(mat**5, np.matrix[Any, Any]) +assert_type(mat**5, np.matrix) mat **= 5 assert_type(mat.sum(), Any) @@ -32,18 +29,18 @@ assert_type(mat.argmax(), np.intp) assert_type(mat.argmin(), np.intp) assert_type(mat.ptp(), np.int64) -assert_type(mat.sum(axis=0), np.matrix[Any, Any]) -assert_type(mat.mean(axis=0), np.matrix[Any, Any]) -assert_type(mat.std(axis=0), np.matrix[Any, Any]) -assert_type(mat.var(axis=0), np.matrix[Any, Any]) -assert_type(mat.prod(axis=0), np.matrix[Any, Any]) -assert_type(mat.any(axis=0), np.matrix[Any, np.dtype[np.bool]]) -assert_type(mat.all(axis=0), np.matrix[Any, np.dtype[np.bool]]) -assert_type(mat.max(axis=0), np.matrix[Any, np.dtype[np.int64]]) -assert_type(mat.min(axis=0), np.matrix[Any, np.dtype[np.int64]]) -assert_type(mat.argmax(axis=0), np.matrix[Any, np.dtype[np.intp]]) -assert_type(mat.argmin(axis=0), np.matrix[Any, np.dtype[np.intp]]) -assert_type(mat.ptp(axis=0), np.matrix[Any, np.dtype[np.int64]]) +assert_type(mat.sum(axis=0), np.matrix) +assert_type(mat.mean(axis=0), np.matrix) +assert_type(mat.std(axis=0), np.matrix) +assert_type(mat.var(axis=0), np.matrix) +assert_type(mat.prod(axis=0), np.matrix) +assert_type(mat.any(axis=0), np.matrix[_Shape2D, np.dtype[np.bool]]) +assert_type(mat.all(axis=0), np.matrix[_Shape2D, np.dtype[np.bool]]) +assert_type(mat.max(axis=0), np.matrix[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.min(axis=0), np.matrix[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.argmax(axis=0), np.matrix[_Shape2D, np.dtype[np.intp]]) +assert_type(mat.argmin(axis=0), np.matrix[_Shape2D, np.dtype[np.intp]]) +assert_type(mat.ptp(axis=0), np.matrix[_Shape2D, np.dtype[np.int64]]) assert_type(mat.sum(out=ar_f8), npt.NDArray[np.float64]) assert_type(mat.mean(out=ar_f8), npt.NDArray[np.float64]) @@ -54,23 +51,23 @@ assert_type(mat.any(out=ar_f8), npt.NDArray[np.float64]) assert_type(mat.all(out=ar_f8), npt.NDArray[np.float64]) assert_type(mat.max(out=ar_f8), npt.NDArray[np.float64]) assert_type(mat.min(out=ar_f8), npt.NDArray[np.float64]) -assert_type(mat.argmax(out=ar_f8), npt.NDArray[np.float64]) -assert_type(mat.argmin(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.argmax(out=ar_ip), npt.NDArray[np.intp]) +assert_type(mat.argmin(out=ar_ip), npt.NDArray[np.intp]) assert_type(mat.ptp(out=ar_f8), npt.NDArray[np.float64]) -assert_type(mat.T, np.matrix[Any, np.dtype[np.int64]]) -assert_type(mat.I, np.matrix[Any, Any]) -assert_type(mat.A, npt.NDArray[np.int64]) +assert_type(mat.T, np.matrix[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.I, np.matrix) +assert_type(mat.A, np.ndarray[_Shape2D, np.dtype[np.int64]]) assert_type(mat.A1, npt.NDArray[np.int64]) -assert_type(mat.H, np.matrix[Any, np.dtype[np.int64]]) -assert_type(mat.getT(), np.matrix[Any, np.dtype[np.int64]]) -assert_type(mat.getI(), np.matrix[Any, Any]) -assert_type(mat.getA(), npt.NDArray[np.int64]) +assert_type(mat.H, np.matrix[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.getT(), np.matrix[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.getI(), np.matrix) +assert_type(mat.getA(), np.ndarray[_Shape2D, np.dtype[np.int64]]) assert_type(mat.getA1(), npt.NDArray[np.int64]) -assert_type(mat.getH(), np.matrix[Any, np.dtype[np.int64]]) +assert_type(mat.getH(), np.matrix[_Shape2D, np.dtype[np.int64]]) -assert_type(np.bmat(ar_f8), np.matrix[Any, Any]) -assert_type(np.bmat([[0, 1, 2]]), np.matrix[Any, Any]) -assert_type(np.bmat("mat"), np.matrix[Any, Any]) +assert_type(np.bmat(ar_f8), np.matrix) +assert_type(np.bmat([[0, 1, 2]]), np.matrix) +assert_type(np.bmat("mat"), np.matrix) -assert_type(np.asmatrix(ar_f8, dtype=np.int64), np.matrix[Any, Any]) +assert_type(np.asmatrix(ar_f8, dtype=np.int64), np.matrix) diff --git a/numpy/typing/tests/data/reveal/memmap.pyi b/numpy/typing/tests/data/reveal/memmap.pyi index 53278ff1122b..0babdeefb6f1 100644 --- a/numpy/typing/tests/data/reveal/memmap.pyi +++ b/numpy/typing/tests/data/reveal/memmap.pyi @@ -1,25 +1,21 @@ -import sys -from typing import Any +from typing import Any, Literal, assert_type import numpy as np -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +type _Memmap[ScalarT: np.generic] = np.memmap[tuple[Any, ...], np.dtype[ScalarT]] -memmap_obj: np.memmap[Any, np.dtype[np.str_]] +memmap_obj: _Memmap[np.str_] assert_type(np.memmap.__array_priority__, float) assert_type(memmap_obj.__array_priority__, float) assert_type(memmap_obj.filename, str | None) assert_type(memmap_obj.offset, int) -assert_type(memmap_obj.mode, str) +assert_type(memmap_obj.mode, Literal["r", "r+", "w+", "c"]) assert_type(memmap_obj.flush(), None) -assert_type(np.memmap("file.txt", offset=5), np.memmap[Any, np.dtype[np.uint8]]) -assert_type(np.memmap(b"file.txt", dtype=np.float64, shape=(10, 3)), np.memmap[Any, np.dtype[np.float64]]) +assert_type(np.memmap("file.txt", offset=5), _Memmap[np.uint8]) +assert_type(np.memmap(b"file.txt", dtype=np.float64, shape=(10, 3)), _Memmap[np.float64]) with open("file.txt", "rb") as f: - assert_type(np.memmap(f, dtype=float, order="K"), np.memmap[Any, np.dtype[Any]]) + assert_type(np.memmap(f, dtype=float, order="K"), np.memmap) assert_type(memmap_obj.__array_finalize__(object()), None) diff --git a/numpy/typing/tests/data/reveal/mod.pyi b/numpy/typing/tests/data/reveal/mod.pyi index 11cdeb2a4273..131e9259b6b5 100644 --- a/numpy/typing/tests/data/reveal/mod.pyi +++ b/numpy/typing/tests/data/reveal/mod.pyi @@ -1,42 +1,73 @@ -import sys -from typing import Any +import datetime as dt +from typing import Literal as L, assert_type import numpy as np import numpy.typing as npt -from numpy._typing import _32Bit, _64Bit -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +f8: np.float64 +i8: np.int64 +u8: np.uint64 -f8 = np.float64() -i8 = np.int64() -u8 = np.uint64() +f4: np.float32 +i4: np.int32 +u4: np.uint32 -f4 = np.float32() -i4 = np.int32() -u4 = np.uint32() +m: np.timedelta64 +m_nat: np.timedelta64[None] +m_int0: np.timedelta64[L[0]] +m_int: np.timedelta64[int] +m_td: np.timedelta64[dt.timedelta] -td = np.timedelta64(0, "D") -b_ = np.bool() +b_: np.bool -b = bool() -f = float() -i = int() +b: bool +i: int +f: float AR_b: npt.NDArray[np.bool] AR_m: npt.NDArray[np.timedelta64] -# Time structures +# NOTE: the __divmod__ calls are workarounds for https://github.com/microsoft/pyright/issues/9663 -assert_type(td % td, np.timedelta64) -assert_type(AR_m % td, npt.NDArray[np.timedelta64]) -assert_type(td % AR_m, npt.NDArray[np.timedelta64]) +# Time structures -assert_type(divmod(td, td), tuple[np.int64, np.timedelta64]) -assert_type(divmod(AR_m, td), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) -assert_type(divmod(td, AR_m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) +assert_type(m % m, np.timedelta64) +assert_type(m % m_nat, np.timedelta64[None]) +assert_type(m % m_int0, np.timedelta64[None]) +assert_type(m % m_int, np.timedelta64[int | None]) +# NOTE: Mypy incorrectly infers `timedelta64[Any]`, but pyright behaves correctly. +assert_type(m_nat % m, np.timedelta64[None]) # type: ignore[assert-type] +assert_type(m_int % m_nat, np.timedelta64[None]) +assert_type(m_int % m_int0, np.timedelta64[None]) +assert_type(m_int % m_int, np.timedelta64[int | None]) +assert_type(m_int % m_td, np.timedelta64[int | None]) +assert_type(m_td % m_nat, np.timedelta64[None]) +assert_type(m_td % m_int0, np.timedelta64[None]) +assert_type(m_td % m_int, np.timedelta64[int | None]) +assert_type(m_td % m_td, np.timedelta64[dt.timedelta | None]) + +assert_type(AR_m % m, npt.NDArray[np.timedelta64]) +assert_type(m % AR_m, npt.NDArray[np.timedelta64]) + +# +# NOTE: Mypy incorrectly infers `tuple[Any, ...]`, but pyright behaves correctly. +assert_type(m.__divmod__(m), tuple[np.int64, np.timedelta64]) # type: ignore[assert-type] +assert_type(m.__divmod__(m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(m.__divmod__(m_int0), tuple[np.int64, np.timedelta64[None]]) +assert_type(m.__divmod__(m_int), tuple[np.int64, np.timedelta64[int | None]]) +# NOTE: Mypy incorrectly infers `tuple[Any, ...]`, but pyright behaves correctly. +assert_type(m_nat.__divmod__(m), tuple[np.int64, np.timedelta64[None]]) # type: ignore[assert-type] +assert_type(m_int.__divmod__(m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(m_int.__divmod__(m_int0), tuple[np.int64, np.timedelta64[None]]) +assert_type(m_int.__divmod__(m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(m_int.__divmod__(m_td), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(m_td.__divmod__(m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(m_td.__divmod__(m_int0), tuple[np.int64, np.timedelta64[None]]) +assert_type(m_td.__divmod__(m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(m_td.__divmod__(m_td), tuple[np.int64, np.timedelta64[dt.timedelta | None]]) + +assert_type(divmod(AR_m, m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) +assert_type(divmod(m, AR_m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) # Bool @@ -50,11 +81,11 @@ assert_type(b_ % f8, np.float64) assert_type(b_ % AR_b, npt.NDArray[np.int8]) assert_type(divmod(b_, b), tuple[np.int8, np.int8]) -assert_type(divmod(b_, i), tuple[np.int_, np.int_]) -assert_type(divmod(b_, f), tuple[np.float64, np.float64]) assert_type(divmod(b_, b_), tuple[np.int8, np.int8]) -assert_type(divmod(b_, i8), tuple[np.int64, np.int64]) -assert_type(divmod(b_, u8), tuple[np.uint64, np.uint64]) +assert_type(b_.__divmod__(i), tuple[np.int_, np.int_]) +assert_type(b_.__divmod__(f), tuple[np.float64, np.float64]) +assert_type(b_.__divmod__(i8), tuple[np.int64, np.int64]) +assert_type(b_.__divmod__(u8), tuple[np.uint64, np.uint64]) assert_type(divmod(b_, f8), tuple[np.float64, np.float64]) assert_type(divmod(b_, AR_b), tuple[npt.NDArray[np.int8], npt.NDArray[np.int8]]) @@ -79,70 +110,70 @@ assert_type(divmod(AR_b, b_), tuple[npt.NDArray[np.int8], npt.NDArray[np.int8]]) # int assert_type(i8 % b, np.int64) -assert_type(i8 % f, np.float64) assert_type(i8 % i8, np.int64) +assert_type(i8 % f, np.float64) assert_type(i8 % f8, np.float64) -assert_type(i4 % i8, np.signedinteger[_32Bit | _64Bit]) -assert_type(i4 % f8, np.floating[_32Bit | _64Bit]) +assert_type(i4 % i8, np.signedinteger) +assert_type(i4 % f8, np.float64) assert_type(i4 % i4, np.int32) -assert_type(i4 % f4, np.float32) -assert_type(i8 % AR_b, npt.NDArray[np.signedinteger[Any]]) +assert_type(i4 % f4, np.floating) +assert_type(i8 % AR_b, npt.NDArray[np.int64]) assert_type(divmod(i8, b), tuple[np.int64, np.int64]) -assert_type(divmod(i8, f), tuple[np.float64, np.float64]) +assert_type(divmod(i8, i4), tuple[np.signedinteger, np.signedinteger]) assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) -assert_type(divmod(i8, f8), tuple[np.float64, np.float64]) -assert_type(divmod(i8, i4), tuple[np.signedinteger[_32Bit | _64Bit], np.signedinteger[_32Bit | _64Bit]]) -assert_type(divmod(i8, f4), tuple[np.floating[_32Bit | _64Bit], np.floating[_32Bit | _64Bit]]) +assert_type(i8.__divmod__(f), tuple[np.float64, np.float64]) +assert_type(i8.__divmod__(f8), tuple[np.float64, np.float64]) +assert_type(divmod(i8, f4), tuple[np.floating, np.floating]) assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) -assert_type(divmod(i4, f4), tuple[np.float32, np.float32]) -assert_type(divmod(i8, AR_b), tuple[npt.NDArray[np.signedinteger[Any]], npt.NDArray[np.signedinteger[Any]]]) +assert_type(divmod(i4, f4), tuple[np.floating, np.floating]) +assert_type(divmod(i8, AR_b), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) assert_type(b % i8, np.int64) assert_type(f % i8, np.float64) assert_type(i8 % i8, np.int64) assert_type(f8 % i8, np.float64) -assert_type(i8 % i4, np.signedinteger[_32Bit | _64Bit]) -assert_type(f8 % i4, np.floating[_32Bit | _64Bit]) +assert_type(i8 % i4, np.signedinteger) +assert_type(f8 % i4, np.float64) assert_type(i4 % i4, np.int32) -assert_type(f4 % i4, np.float32) -assert_type(AR_b % i8, npt.NDArray[np.signedinteger[Any]]) +assert_type(f4 % i4, np.floating) +assert_type(AR_b % i8, npt.NDArray[np.int64]) assert_type(divmod(b, i8), tuple[np.int64, np.int64]) assert_type(divmod(f, i8), tuple[np.float64, np.float64]) assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) assert_type(divmod(f8, i8), tuple[np.float64, np.float64]) -assert_type(divmod(i4, i8), tuple[np.signedinteger[_32Bit | _64Bit], np.signedinteger[_32Bit | _64Bit]]) -assert_type(divmod(f4, i8), tuple[np.floating[_32Bit | _64Bit], np.floating[_32Bit | _64Bit]]) +assert_type(divmod(i4, i8), tuple[np.signedinteger, np.signedinteger]) assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) -assert_type(divmod(f4, i4), tuple[np.float32, np.float32]) -assert_type(divmod(AR_b, i8), tuple[npt.NDArray[np.signedinteger[Any]], npt.NDArray[np.signedinteger[Any]]]) +assert_type(f4.__divmod__(i8), tuple[np.floating, np.floating]) +assert_type(f4.__divmod__(i4), tuple[np.floating, np.floating]) +assert_type(AR_b.__divmod__(i8), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) # float assert_type(f8 % b, np.float64) assert_type(f8 % f, np.float64) -assert_type(i8 % f4, np.floating[_32Bit | _64Bit]) +assert_type(i8 % f4, np.floating) assert_type(f4 % f4, np.float32) -assert_type(f8 % AR_b, npt.NDArray[np.floating[Any]]) +assert_type(f8 % AR_b, npt.NDArray[np.float64]) assert_type(divmod(f8, b), tuple[np.float64, np.float64]) assert_type(divmod(f8, f), tuple[np.float64, np.float64]) assert_type(divmod(f8, f8), tuple[np.float64, np.float64]) -assert_type(divmod(f8, f4), tuple[np.floating[_32Bit | _64Bit], np.floating[_32Bit | _64Bit]]) +assert_type(divmod(f8, f4), tuple[np.float64, np.float64]) assert_type(divmod(f4, f4), tuple[np.float32, np.float32]) -assert_type(divmod(f8, AR_b), tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]) +assert_type(divmod(f8, AR_b), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) assert_type(b % f8, np.float64) -assert_type(f % f8, np.float64) +assert_type(f % f8, np.float64) # pyright: ignore[reportAssertTypeFailure] # pyright incorrectly infers `builtins.float` assert_type(f8 % f8, np.float64) assert_type(f8 % f8, np.float64) assert_type(f4 % f4, np.float32) -assert_type(AR_b % f8, npt.NDArray[np.floating[Any]]) +assert_type(AR_b % f8, npt.NDArray[np.float64]) assert_type(divmod(b, f8), tuple[np.float64, np.float64]) -assert_type(divmod(f, f8), tuple[np.float64, np.float64]) assert_type(divmod(f8, f8), tuple[np.float64, np.float64]) -assert_type(divmod(f4, f8), tuple[np.floating[_32Bit | _64Bit], np.floating[_32Bit | _64Bit]]) assert_type(divmod(f4, f4), tuple[np.float32, np.float32]) -assert_type(divmod(AR_b, f8), tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]) +assert_type(f8.__rdivmod__(f), tuple[np.float64, np.float64]) +assert_type(f8.__rdivmod__(f4), tuple[np.float64, np.float64]) +assert_type(AR_b.__divmod__(f8), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) diff --git a/numpy/typing/tests/data/reveal/modules.pyi b/numpy/typing/tests/data/reveal/modules.pyi index 1ab01cd079c2..628fb500bfda 100644 --- a/numpy/typing/tests/data/reveal/modules.pyi +++ b/numpy/typing/tests/data/reveal/modules.pyi @@ -1,14 +1,9 @@ -import sys import types +from typing import assert_type import numpy as np from numpy import f2py -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - assert_type(np, types.ModuleType) assert_type(np.char, types.ModuleType) diff --git a/numpy/typing/tests/data/reveal/multiarray.pyi b/numpy/typing/tests/data/reveal/multiarray.pyi index 085c5ff568be..fefb6e2fbb5d 100644 --- a/numpy/typing/tests/data/reveal/multiarray.pyi +++ b/numpy/typing/tests/data/reveal/multiarray.pyi @@ -1,18 +1,10 @@ -import sys import datetime as dt -from typing import Any, TypeVar +from typing import Any, Literal, assert_type import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - -_SCT = TypeVar("_SCT", bound=np.generic, covariant=True) - -class SubClass(npt.NDArray[_SCT]): ... +class SubClass[ScalarT: np.generic](np.ndarray[tuple[Any, ...], np.dtype[ScalarT]]): ... subclass: SubClass[np.float64] @@ -37,7 +29,15 @@ date_scalar: dt.date date_seq: list[dt.date] timedelta_seq: list[dt.timedelta] -def func(a: int) -> bool: ... +n1: Literal[1] +n2: Literal[2] +n3: Literal[3] + +f8: np.float64 + +def func11(a: int) -> bool: ... +def func21(a: int, b: int) -> int: ... +def func12(a: int) -> tuple[complex, bool]: ... assert_type(next(b_f8), tuple[Any, ...]) assert_type(b_f8.reset(), None) @@ -46,7 +46,7 @@ assert_type(b_f8.iters, tuple[np.flatiter[Any], ...]) assert_type(b_f8.nd, int) assert_type(b_f8.ndim, int) assert_type(b_f8.numiter, int) -assert_type(b_f8.shape, tuple[int, ...]) +assert_type(b_f8.shape, tuple[Any, ...]) assert_type(b_f8.size, int) assert_type(next(b_i8_f8_f8), tuple[Any, ...]) @@ -56,7 +56,7 @@ assert_type(b_i8_f8_f8.iters, tuple[np.flatiter[Any], ...]) assert_type(b_i8_f8_f8.nd, int) assert_type(b_i8_f8_f8.ndim, int) assert_type(b_i8_f8_f8.numiter, int) -assert_type(b_i8_f8_f8.shape, tuple[int, ...]) +assert_type(b_i8_f8_f8.shape, tuple[Any, ...]) assert_type(b_i8_f8_f8.size, int) assert_type(np.inner(AR_f8, AR_i8), Any) @@ -64,49 +64,101 @@ assert_type(np.inner(AR_f8, AR_i8), Any) assert_type(np.where([True, True, False]), tuple[npt.NDArray[np.intp], ...]) assert_type(np.where([True, True, False], 1, 0), npt.NDArray[Any]) -assert_type(np.lexsort([0, 1, 2]), Any) +assert_type(np.lexsort([0, 1, 2]), npt.NDArray[np.intp]) assert_type(np.can_cast(np.dtype("i8"), int), bool) assert_type(np.can_cast(AR_f8, "f8"), bool) assert_type(np.can_cast(AR_f8, np.complex128, casting="unsafe"), bool) -assert_type(np.min_scalar_type([1]), np.dtype[Any]) -assert_type(np.min_scalar_type(AR_f8), np.dtype[Any]) +assert_type(np.min_scalar_type([1]), np.dtype) +assert_type(np.min_scalar_type(AR_f8), np.dtype) -assert_type(np.result_type(int, [1]), np.dtype[Any]) -assert_type(np.result_type(AR_f8, AR_u1), np.dtype[Any]) -assert_type(np.result_type(AR_f8, np.complex128), np.dtype[Any]) +assert_type(np.result_type(int, [1]), np.dtype) +assert_type(np.result_type(AR_f8, AR_u1), np.dtype) +assert_type(np.result_type(AR_f8, np.complex128), np.dtype) assert_type(np.dot(AR_LIKE_f, AR_i8), Any) assert_type(np.dot(AR_u1, 1), Any) assert_type(np.dot(1.5j, 1), Any) assert_type(np.dot(AR_u1, 1, out=AR_f8), npt.NDArray[np.float64]) -assert_type(np.vdot(AR_LIKE_f, AR_i8), np.floating[Any]) -assert_type(np.vdot(AR_u1, 1), np.signedinteger[Any]) -assert_type(np.vdot(1.5j, 1), np.complexfloating[Any, Any]) +assert_type(np.vdot(AR_LIKE_f, AR_i8), np.floating) +assert_type(np.vdot(AR_u1, 1), np.signedinteger) +assert_type(np.vdot(1.5j, 1), np.complexfloating) -assert_type(np.bincount(AR_i8), npt.NDArray[np.intp]) +assert_type(np.bincount(AR_i8), np.ndarray[tuple[int], np.dtype[np.intp]]) assert_type(np.copyto(AR_f8, [1., 1.5, 1.6]), None) assert_type(np.putmask(AR_f8, [True, True, False], 1.5), None) -assert_type(np.packbits(AR_i8), npt.NDArray[np.uint8]) -assert_type(np.packbits(AR_u1), npt.NDArray[np.uint8]) +assert_type(np.packbits(AR_i8), np.ndarray[tuple[int], np.dtype[np.uint8]]) +assert_type(np.packbits(AR_u1), np.ndarray[tuple[int], np.dtype[np.uint8]]) +assert_type(np.packbits(AR_i8, axis=1), npt.NDArray[np.uint8]) +assert_type(np.packbits(AR_u1, axis=1), npt.NDArray[np.uint8]) -assert_type(np.unpackbits(AR_u1), npt.NDArray[np.uint8]) +assert_type(np.unpackbits(AR_u1), np.ndarray[tuple[int], np.dtype[np.uint8]]) +assert_type(np.unpackbits(AR_u1, axis=1), npt.NDArray[np.uint8]) assert_type(np.shares_memory(1, 2), bool) -assert_type(np.shares_memory(AR_f8, AR_f8, max_work=1), bool) +assert_type(np.shares_memory(AR_f8, AR_f8, max_work=-1), bool) assert_type(np.may_share_memory(1, 2), bool) -assert_type(np.may_share_memory(AR_f8, AR_f8, max_work=1), bool) - -assert_type(np.promote_types(np.int32, np.int64), np.dtype[Any]) -assert_type(np.promote_types("f4", float), np.dtype[Any]) - -assert_type(np.frompyfunc(func, 1, 1, identity=None), np.ufunc) +assert_type(np.may_share_memory(AR_f8, AR_f8, max_work=0), bool) + +assert_type(np.promote_types(np.int32, np.int64), np.dtype) +assert_type(np.promote_types("f4", float), np.dtype) + +assert_type(np.frompyfunc(func11, n1, n1).nin, Literal[1]) +assert_type(np.frompyfunc(func11, n1, n1).nout, Literal[1]) +assert_type(np.frompyfunc(func11, n1, n1).nargs, Literal[2]) +assert_type(np.frompyfunc(func11, n1, n1).ntypes, Literal[1]) +assert_type(np.frompyfunc(func11, n1, n1).identity, None) +assert_type(np.frompyfunc(func11, n1, n1).signature, None) +assert_type(np.frompyfunc(func11, n1, n1)(f8), bool) +assert_type(np.frompyfunc(func11, n1, n1)(AR_f8), bool | npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func11, n1, n1).at(AR_f8, AR_i8), None) + +assert_type(np.frompyfunc(func21, n2, n1).nin, Literal[2]) +assert_type(np.frompyfunc(func21, n2, n1).nout, Literal[1]) +assert_type(np.frompyfunc(func21, n2, n1).nargs, Literal[3]) +assert_type(np.frompyfunc(func21, n2, n1).ntypes, Literal[1]) +assert_type(np.frompyfunc(func21, n2, n1).identity, None) +assert_type(np.frompyfunc(func21, n2, n1).signature, None) +assert_type(np.frompyfunc(func21, n2, n1)(f8, f8), int) +assert_type(np.frompyfunc(func21, n2, n1)(AR_f8, f8), int | npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func21, n2, n1)(f8, AR_f8), int | npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func21, n2, n1).reduce(AR_f8, axis=0), int | npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func21, n2, n1).accumulate(AR_f8), npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func21, n2, n1).reduceat(AR_f8, AR_i8), npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func21, n2, n1).outer(f8, f8), int) +assert_type(np.frompyfunc(func21, n2, n1).outer(AR_f8, f8), int | npt.NDArray[np.object_]) + +assert_type(np.frompyfunc(func21, n2, n1, identity=0).nin, Literal[2]) +assert_type(np.frompyfunc(func21, n2, n1, identity=0).nout, Literal[1]) +assert_type(np.frompyfunc(func21, n2, n1, identity=0).nargs, Literal[3]) +assert_type(np.frompyfunc(func21, n2, n1, identity=0).ntypes, Literal[1]) +assert_type(np.frompyfunc(func21, n2, n1, identity=0).identity, int) +assert_type(np.frompyfunc(func21, n2, n1, identity=0).signature, None) + +assert_type(np.frompyfunc(func12, n1, n2).nin, Literal[1]) +assert_type(np.frompyfunc(func12, n1, n2).nout, Literal[2]) +assert_type(np.frompyfunc(func12, n1, n2).nargs, int) +assert_type(np.frompyfunc(func12, n1, n2).ntypes, Literal[1]) +assert_type(np.frompyfunc(func12, n1, n2).identity, None) +assert_type(np.frompyfunc(func12, n1, n2).signature, None) +assert_type( + np.frompyfunc(func12, n2, n2)(f8, f8), + tuple[complex, complex, *tuple[complex, ...]], +) +assert_type( + np.frompyfunc(func12, n2, n2)(AR_f8, f8), + tuple[ + complex | npt.NDArray[np.object_], + complex | npt.NDArray[np.object_], + *tuple[complex | npt.NDArray[np.object_], ...], + ], +) assert_type(np.datetime_data("m8[D]"), tuple[str, int]) assert_type(np.datetime_data(np.datetime64), tuple[str, int]) @@ -116,9 +168,10 @@ assert_type(np.busday_count("2011-01", "2011-02"), np.int_) assert_type(np.busday_count(["2011-01"], "2011-02"), npt.NDArray[np.int_]) assert_type(np.busday_count(["2011-01"], date_scalar), npt.NDArray[np.int_]) -assert_type(np.busday_offset(M, m), np.datetime64) +# NOTE: Mypy incorrectly infers `Any`, but pyright behaves correctly. +assert_type(np.busday_offset(M, m), np.datetime64) # type: ignore[assert-type] +assert_type(np.busday_offset(M, 5), np.datetime64) # type: ignore[assert-type] assert_type(np.busday_offset(date_scalar, m), np.datetime64) -assert_type(np.busday_offset(M, 5), np.datetime64) assert_type(np.busday_offset(AR_M, m), npt.NDArray[np.datetime64]) assert_type(np.busday_offset(M, timedelta_seq), npt.NDArray[np.datetime64]) assert_type(np.busday_offset("2011-01", "2011-02", roll="forward"), np.datetime64) @@ -128,7 +181,8 @@ assert_type(np.is_busday("2012"), np.bool) assert_type(np.is_busday(date_scalar), np.bool) assert_type(np.is_busday(["2012"]), npt.NDArray[np.bool]) -assert_type(np.datetime_as_string(M), np.str_) +# NOTE: Mypy incorrectly infers `Any`, but pyright behaves correctly. +assert_type(np.datetime_as_string(M), np.str_) # type: ignore[assert-type] assert_type(np.datetime_as_string(AR_M), npt.NDArray[np.str_]) assert_type(np.busdaycalendar(holidays=date_seq), np.busdaycalendar) diff --git a/numpy/typing/tests/data/reveal/nbit_base_example.pyi b/numpy/typing/tests/data/reveal/nbit_base_example.pyi index ac2eb1d25323..c6e931eaca84 100644 --- a/numpy/typing/tests/data/reveal/nbit_base_example.pyi +++ b/numpy/typing/tests/data/reveal/nbit_base_example.pyi @@ -1,27 +1,17 @@ -import sys -from typing import TypeVar +from typing import assert_type import numpy as np import numpy.typing as npt -from numpy._typing import _64Bit, _32Bit +from numpy._typing import _32Bit, _64Bit -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - -T1 = TypeVar("T1", bound=npt.NBitBase) -T2 = TypeVar("T2", bound=npt.NBitBase) - -def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: - return a + b +def add[T1: npt.NBitBase, T2: npt.NBitBase](a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: ... # type: ignore[deprecated] i8: np.int64 i4: np.int32 f8: np.float64 f4: np.float32 -assert_type(add(f8, i8), np.float64) +assert_type(add(f8, i8), np.floating[_64Bit]) assert_type(add(f4, i8), np.floating[_32Bit | _64Bit]) assert_type(add(f8, i4), np.floating[_32Bit | _64Bit]) -assert_type(add(f4, i4), np.float32) +assert_type(add(f4, i4), np.floating[_32Bit]) diff --git a/numpy/typing/tests/data/reveal/ndarray_assignability.pyi b/numpy/typing/tests/data/reveal/ndarray_assignability.pyi new file mode 100644 index 000000000000..e8ccc573d642 --- /dev/null +++ b/numpy/typing/tests/data/reveal/ndarray_assignability.pyi @@ -0,0 +1,79 @@ +from typing import Any, Protocol, assert_type + +import numpy as np +from numpy._typing import _64Bit + +class CanAbs[T](Protocol): + def __abs__(self, /) -> T: ... + +class CanInvert[T](Protocol): + def __invert__(self, /) -> T: ... + +class CanNeg[T](Protocol): + def __neg__(self, /) -> T: ... + +class CanPos[T](Protocol): + def __pos__(self, /) -> T: ... + +def do_abs[T](x: CanAbs[T]) -> T: ... +def do_invert[T](x: CanInvert[T]) -> T: ... +def do_neg[T](x: CanNeg[T]) -> T: ... +def do_pos[T](x: CanPos[T]) -> T: ... + +type _Bool_1d = np.ndarray[tuple[int], np.dtype[np.bool]] +type _UInt8_1d = np.ndarray[tuple[int], np.dtype[np.uint8]] +type _Int16_1d = np.ndarray[tuple[int], np.dtype[np.int16]] +type _LongLong_1d = np.ndarray[tuple[int], np.dtype[np.longlong]] +type _Float32_1d = np.ndarray[tuple[int], np.dtype[np.float32]] +type _Float64_1d = np.ndarray[tuple[int], np.dtype[np.float64]] +type _LongDouble_1d = np.ndarray[tuple[int], np.dtype[np.longdouble]] +type _Complex64_1d = np.ndarray[tuple[int], np.dtype[np.complex64]] +type _Complex128_1d = np.ndarray[tuple[int], np.dtype[np.complex128]] +type _CLongDouble_1d = np.ndarray[tuple[int], np.dtype[np.clongdouble]] +type _Void_1d = np.ndarray[tuple[int], np.dtype[np.void]] + +b1_1d: _Bool_1d +u1_1d: _UInt8_1d +i2_1d: _Int16_1d +q_1d: _LongLong_1d +f4_1d: _Float32_1d +f8_1d: _Float64_1d +g_1d: _LongDouble_1d +c8_1d: _Complex64_1d +c16_1d: _Complex128_1d +G_1d: _CLongDouble_1d +V_1d: _Void_1d + +assert_type(do_abs(b1_1d), _Bool_1d) +assert_type(do_abs(u1_1d), _UInt8_1d) +assert_type(do_abs(i2_1d), _Int16_1d) +assert_type(do_abs(q_1d), _LongLong_1d) +assert_type(do_abs(f4_1d), _Float32_1d) +assert_type(do_abs(f8_1d), _Float64_1d) +assert_type(do_abs(g_1d), _LongDouble_1d) + +assert_type(do_abs(c8_1d), _Float32_1d) +# NOTE: Unfortunately it's not possible to have this return a `float64` sctype, see +# https://github.com/python/mypy/issues/14070 +assert_type(do_abs(c16_1d), np.ndarray[tuple[int], np.dtype[np.floating[_64Bit]]]) +assert_type(do_abs(G_1d), _LongDouble_1d) + +assert_type(do_invert(b1_1d), _Bool_1d) +assert_type(do_invert(u1_1d), _UInt8_1d) +assert_type(do_invert(i2_1d), _Int16_1d) +assert_type(do_invert(q_1d), _LongLong_1d) + +assert_type(do_neg(u1_1d), _UInt8_1d) +assert_type(do_neg(i2_1d), _Int16_1d) +assert_type(do_neg(q_1d), _LongLong_1d) +assert_type(do_neg(f4_1d), _Float32_1d) +assert_type(do_neg(c16_1d), _Complex128_1d) + +assert_type(do_pos(u1_1d), _UInt8_1d) +assert_type(do_pos(i2_1d), _Int16_1d) +assert_type(do_pos(q_1d), _LongLong_1d) +assert_type(do_pos(f4_1d), _Float32_1d) +assert_type(do_pos(c16_1d), _Complex128_1d) + +# this shape is effectively equivalent to `tuple[int, *tuple[Any, ...]]`, i.e. ndim >= 1 +assert_type(V_1d["field"], np.ndarray[tuple[int] | tuple[Any, ...]]) diff --git a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi index a5495b55b030..2af616440c5e 100644 --- a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi @@ -1,61 +1,83 @@ -import sys -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - -nd: npt.NDArray[np.int_] +b1_0d: np.ndarray[tuple[()], np.dtype[np.bool]] +u2_1d: np.ndarray[tuple[int], np.dtype[np.uint16]] +i4_2d: np.ndarray[tuple[int, int], np.dtype[np.int32]] +f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] +cG_4d: np.ndarray[tuple[int, int, int, int], np.dtype[np.clongdouble]] +i0_nd: npt.NDArray[np.int_] +uncertain_dtype: np.int32 | np.float64 | np.str_ # item -assert_type(nd.item(), int) -assert_type(nd.item(1), int) -assert_type(nd.item(0, 1), int) -assert_type(nd.item((0, 1)), int) +assert_type(i0_nd.item(), int) +assert_type(i0_nd.item(1), int) +assert_type(i0_nd.item(0, 1), int) +assert_type(i0_nd.item((0, 1)), int) + +assert_type(b1_0d.item(()), bool) +assert_type(u2_1d.item((0,)), int) +assert_type(i4_2d.item(-1, 2), int) +assert_type(f8_3d.item(2, 1, -1), float) +assert_type(cG_4d.item(-0xEd_fed_Deb_a_dead_bee), complex) # c'mon Ed, we talked about this... # tolist -assert_type(nd.tolist(), Any) +assert_type(b1_0d.tolist(), bool) +assert_type(u2_1d.tolist(), list[int]) +assert_type(i4_2d.tolist(), list[list[int]]) +assert_type(f8_3d.tolist(), list[list[list[float]]]) +assert_type(cG_4d.tolist(), Any) +assert_type(i0_nd.tolist(), Any) + +# regression tests for numpy/numpy#27944 +any_dtype: np.ndarray[Any, Any] +any_sctype: np.ndarray[Any, Any] +assert_type(any_dtype.tolist(), Any) +assert_type(any_sctype.tolist(), Any) -# itemset does not return a value -# tostring is pretty simple # tobytes is pretty simple # tofile does not return a value # dump does not return a value # dumps is pretty simple # astype -assert_type(nd.astype("float"), npt.NDArray[Any]) -assert_type(nd.astype(float), npt.NDArray[Any]) -assert_type(nd.astype(np.float64), npt.NDArray[np.float64]) -assert_type(nd.astype(np.float64, "K"), npt.NDArray[np.float64]) -assert_type(nd.astype(np.float64, "K", "unsafe"), npt.NDArray[np.float64]) -assert_type(nd.astype(np.float64, "K", "unsafe", True), npt.NDArray[np.float64]) -assert_type(nd.astype(np.float64, "K", "unsafe", True, True), npt.NDArray[np.float64]) +assert_type(i0_nd.astype("float"), npt.NDArray[Any]) +assert_type(i0_nd.astype(float), npt.NDArray[Any]) +assert_type(i0_nd.astype(np.float64), npt.NDArray[np.float64]) +assert_type(i0_nd.astype(np.float64, "K"), npt.NDArray[np.float64]) +assert_type(i0_nd.astype(np.float64, "K", "unsafe"), npt.NDArray[np.float64]) +assert_type(i0_nd.astype(np.float64, "K", "unsafe", True), npt.NDArray[np.float64]) +assert_type(i0_nd.astype(np.float64, "K", "unsafe", True, True), npt.NDArray[np.float64]) + +assert_type(np.astype(i0_nd, np.float64), npt.NDArray[np.float64]) -assert_type(np.astype(nd, np.float64), npt.NDArray[np.float64]) +assert_type(i4_2d.astype(np.uint16), np.ndarray[tuple[int, int], np.dtype[np.uint16]]) +assert_type(np.astype(i4_2d, np.uint16), np.ndarray[tuple[int, int], np.dtype[np.uint16]]) +assert_type(f8_3d.astype(np.int16), np.ndarray[tuple[int, int, int], np.dtype[np.int16]]) +assert_type(np.astype(f8_3d, np.int16), np.ndarray[tuple[int, int, int], np.dtype[np.int16]]) +assert_type(i4_2d.astype(uncertain_dtype), np.ndarray[tuple[int, int], np.dtype[np.generic]]) +assert_type(np.astype(i4_2d, uncertain_dtype), np.ndarray[tuple[int, int], np.dtype]) # byteswap -assert_type(nd.byteswap(), npt.NDArray[np.int_]) -assert_type(nd.byteswap(True), npt.NDArray[np.int_]) +assert_type(i0_nd.byteswap(), npt.NDArray[np.int_]) +assert_type(i0_nd.byteswap(True), npt.NDArray[np.int_]) # copy -assert_type(nd.copy(), npt.NDArray[np.int_]) -assert_type(nd.copy("C"), npt.NDArray[np.int_]) +assert_type(i0_nd.copy(), npt.NDArray[np.int_]) +assert_type(i0_nd.copy("C"), npt.NDArray[np.int_]) -assert_type(nd.view(), npt.NDArray[np.int_]) -assert_type(nd.view(np.float64), npt.NDArray[np.float64]) -assert_type(nd.view(float), npt.NDArray[Any]) -assert_type(nd.view(np.float64, np.matrix), np.matrix[Any, Any]) +assert_type(i0_nd.view(), npt.NDArray[np.int_]) +assert_type(i0_nd.view(np.float64), npt.NDArray[np.float64]) +assert_type(i0_nd.view(float), npt.NDArray[Any]) +assert_type(i0_nd.view(np.float64, np.matrix), np.matrix) # getfield -assert_type(nd.getfield("float"), npt.NDArray[Any]) -assert_type(nd.getfield(float), npt.NDArray[Any]) -assert_type(nd.getfield(np.float64), npt.NDArray[np.float64]) -assert_type(nd.getfield(np.float64, 8), npt.NDArray[np.float64]) +assert_type(i0_nd.getfield("float"), npt.NDArray[Any]) +assert_type(i0_nd.getfield(float), npt.NDArray[Any]) +assert_type(i0_nd.getfield(np.float64), npt.NDArray[np.float64]) +assert_type(i0_nd.getfield(np.float64, 8), npt.NDArray[np.float64]) # setflags does not return a value # fill does not return a value diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 5f3526a72d45..fa2c6020919f 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -6,32 +6,37 @@ function-based counterpart in `../from_numeric.py`. """ -import sys -import operator import ctypes as ct -from typing import Any, Literal +import operator +from collections.abc import Iterator +from types import ModuleType +from typing import Any, Literal, assert_type +from typing_extensions import CapsuleType import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - -class SubClass(npt.NDArray[np.object_]): ... +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.object_]]): ... f8: np.float64 +i8: np.int64 B: SubClass AR_f8: npt.NDArray[np.float64] AR_i8: npt.NDArray[np.int64] +AR_u1: npt.NDArray[np.uint8] +AR_c8: npt.NDArray[np.complex64] +AR_m: npt.NDArray[np.timedelta64] AR_U: npt.NDArray[np.str_] AR_V: npt.NDArray[np.void] +AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] +AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] +AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] + ctypes_obj = AR_f8.ctypes -assert_type(AR_f8.__dlpack__(), Any) -assert_type(AR_f8.__dlpack_device__(), tuple[int, Literal[0]]) +assert_type(AR_f8.__dlpack__(), CapsuleType) +assert_type(AR_f8.__dlpack_device__(), tuple[Literal[1], Literal[0]]) assert_type(ctypes_obj.data, int) assert_type(ctypes_obj.shape, ct.Array[np.ctypeslib.c_intp]) @@ -44,28 +49,28 @@ assert_type(ctypes_obj.strides_as(ct.c_ubyte), ct.Array[ct.c_ubyte]) assert_type(f8.all(), np.bool) assert_type(AR_f8.all(), np.bool) -assert_type(AR_f8.all(axis=0), Any) -assert_type(AR_f8.all(keepdims=True), Any) +assert_type(AR_f8.all(axis=0), np.bool | npt.NDArray[np.bool]) +assert_type(AR_f8.all(keepdims=True), np.bool | npt.NDArray[np.bool]) assert_type(AR_f8.all(out=B), SubClass) assert_type(f8.any(), np.bool) assert_type(AR_f8.any(), np.bool) -assert_type(AR_f8.any(axis=0), Any) -assert_type(AR_f8.any(keepdims=True), Any) +assert_type(AR_f8.any(axis=0), np.bool | npt.NDArray[np.bool]) +assert_type(AR_f8.any(keepdims=True), np.bool | npt.NDArray[np.bool]) assert_type(AR_f8.any(out=B), SubClass) assert_type(f8.argmax(), np.intp) assert_type(AR_f8.argmax(), np.intp) assert_type(AR_f8.argmax(axis=0), Any) -assert_type(AR_f8.argmax(out=B), SubClass) +assert_type(AR_f8.argmax(out=AR_i8), npt.NDArray[np.int64]) assert_type(f8.argmin(), np.intp) assert_type(AR_f8.argmin(), np.intp) assert_type(AR_f8.argmin(axis=0), Any) -assert_type(AR_f8.argmin(out=B), SubClass) +assert_type(AR_f8.argmin(out=AR_i8), npt.NDArray[np.int64]) -assert_type(f8.argsort(), npt.NDArray[Any]) -assert_type(AR_f8.argsort(), npt.NDArray[Any]) +assert_type(f8.argsort(), npt.NDArray[np.intp]) +assert_type(AR_f8.argsort(), npt.NDArray[np.intp]) assert_type(f8.astype(np.int64).choose([()]), npt.NDArray[Any]) assert_type(AR_f8.choose([0]), npt.NDArray[Any]) @@ -125,9 +130,12 @@ assert_type(f8.round(), np.float64) assert_type(AR_f8.round(), npt.NDArray[np.float64]) assert_type(AR_f8.round(out=B), SubClass) -assert_type(f8.repeat(1), npt.NDArray[np.float64]) -assert_type(AR_f8.repeat(1), npt.NDArray[np.float64]) -assert_type(B.repeat(1), npt.NDArray[np.object_]) +assert_type(f8.repeat(1), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(f8.repeat(1, axis=0), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_f8.repeat(1), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_f8.repeat(1, axis=0), npt.NDArray[np.float64]) +assert_type(B.repeat(1), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(B.repeat(1, axis=0), npt.NDArray[np.object_]) assert_type(f8.std(), Any) assert_type(AR_f8.std(), Any) @@ -153,7 +161,9 @@ assert_type(AR_f8.var(axis=0), Any) assert_type(AR_f8.var(keepdims=True), Any) assert_type(AR_f8.var(out=B), SubClass) +assert_type(AR_f8.argpartition(0), npt.NDArray[np.intp]) assert_type(AR_f8.argpartition([0]), npt.NDArray[np.intp]) +assert_type(AR_f8.argpartition(0, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) assert_type(AR_f8.diagonal(), npt.NDArray[np.float64]) @@ -161,7 +171,7 @@ assert_type(AR_f8.dot(1), npt.NDArray[Any]) assert_type(AR_f8.dot([1]), Any) assert_type(AR_f8.dot(1, out=B), SubClass) -assert_type(AR_f8.nonzero(), tuple[npt.NDArray[np.intp], ...]) +assert_type(AR_f8.nonzero(), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) assert_type(AR_f8.searchsorted(1), np.intp) assert_type(AR_f8.searchsorted([1]), npt.NDArray[np.intp]) @@ -172,14 +182,18 @@ assert_type(AR_f8.trace(out=B), SubClass) assert_type(AR_f8.item(), float) assert_type(AR_U.item(), str) -assert_type(AR_f8.ravel(), npt.NDArray[np.float64]) -assert_type(AR_U.ravel(), npt.NDArray[np.str_]) +assert_type(AR_f8.ravel(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_U.ravel(), np.ndarray[tuple[int], np.dtype[np.str_]]) -assert_type(AR_f8.flatten(), npt.NDArray[np.float64]) -assert_type(AR_U.flatten(), npt.NDArray[np.str_]) +assert_type(AR_f8.flatten(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_U.flatten(), np.ndarray[tuple[int], np.dtype[np.str_]]) -assert_type(AR_f8.reshape(1), npt.NDArray[np.float64]) -assert_type(AR_U.reshape(1), npt.NDArray[np.str_]) +assert_type(AR_i8.reshape(None), npt.NDArray[np.int64]) +assert_type(AR_f8.reshape(-1), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_c8.reshape(2, 3, 4, 5), np.ndarray[tuple[int, int, int, int], np.dtype[np.complex64]]) +assert_type(AR_m.reshape(()), np.ndarray[tuple[()], np.dtype[np.timedelta64]]) +assert_type(AR_U.reshape([]), np.ndarray[tuple[()], np.dtype[np.str_]]) +assert_type(AR_V.reshape((480, 720, 4)), np.ndarray[tuple[int, int, int], np.dtype[np.void]]) assert_type(int(AR_f8), int) assert_type(int(AR_U), int) @@ -213,3 +227,22 @@ with open("test_file", "wb") as f: assert_type(AR_f8.__array_finalize__(None), None) assert_type(AR_f8.__array_finalize__(B), None) assert_type(AR_f8.__array_finalize__(AR_f8), None) + +assert_type(f8.device, Literal["cpu"]) +assert_type(AR_f8.device, Literal["cpu"]) + +assert_type(f8.to_device("cpu"), np.float64) +assert_type(i8.to_device("cpu"), np.int64) +assert_type(AR_f8.to_device("cpu"), npt.NDArray[np.float64]) +assert_type(AR_i8.to_device("cpu"), npt.NDArray[np.int64]) +assert_type(AR_u1.to_device("cpu"), npt.NDArray[np.uint8]) +assert_type(AR_c8.to_device("cpu"), npt.NDArray[np.complex64]) +assert_type(AR_m.to_device("cpu"), npt.NDArray[np.timedelta64]) + +assert_type(f8.__array_namespace__(), ModuleType) +assert_type(AR_f8.__array_namespace__(), ModuleType) + +assert_type(iter(AR_f8), Iterator[Any]) # any-D +assert_type(iter(AR_f8_1d), Iterator[np.float64]) # 1-D +assert_type(iter(AR_f8_2d), Iterator[npt.NDArray[np.float64]]) # 2-D +assert_type(iter(AR_f8_3d), Iterator[npt.NDArray[np.float64]]) # 3-D diff --git a/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi index 9a41a90f1ee9..6bbe057ff5b7 100644 --- a/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi @@ -1,44 +1,47 @@ -import sys -from typing import Any +from typing import assert_type import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +type _ArrayND = npt.NDArray[np.int64] +type _Array2D = np.ndarray[tuple[int, int], np.dtype[np.int8]] +type _Array3D = np.ndarray[tuple[int, int, int], np.dtype[np.bool]] -nd: npt.NDArray[np.int64] +_nd: _ArrayND +_2d: _Array2D +_3d: _Array3D # reshape -assert_type(nd.reshape(), npt.NDArray[np.int64]) -assert_type(nd.reshape(4), npt.NDArray[np.int64]) -assert_type(nd.reshape(2, 2), npt.NDArray[np.int64]) -assert_type(nd.reshape((2, 2)), npt.NDArray[np.int64]) +assert_type(_nd.reshape(None), npt.NDArray[np.int64]) +assert_type(_nd.reshape(4), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.reshape((4,)), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.reshape(2, 2), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(_nd.reshape((2, 2)), np.ndarray[tuple[int, int], np.dtype[np.int64]]) -assert_type(nd.reshape((2, 2), order="C"), npt.NDArray[np.int64]) -assert_type(nd.reshape(4, order="C"), npt.NDArray[np.int64]) +assert_type(_nd.reshape((2, 2), order="C"), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(_nd.reshape(4, order="C"), np.ndarray[tuple[int], np.dtype[np.int64]]) # resize does not return a value # transpose -assert_type(nd.transpose(), npt.NDArray[np.int64]) -assert_type(nd.transpose(1, 0), npt.NDArray[np.int64]) -assert_type(nd.transpose((1, 0)), npt.NDArray[np.int64]) +assert_type(_nd.transpose(), npt.NDArray[np.int64]) +assert_type(_nd.transpose(1, 0), npt.NDArray[np.int64]) +assert_type(_nd.transpose((1, 0)), npt.NDArray[np.int64]) # swapaxes -assert_type(nd.swapaxes(0, 1), npt.NDArray[np.int64]) +assert_type(_nd.swapaxes(0, 1), _ArrayND) +assert_type(_2d.swapaxes(0, 1), _Array2D) +assert_type(_3d.swapaxes(0, 1), _Array3D) # flatten -assert_type(nd.flatten(), npt.NDArray[np.int64]) -assert_type(nd.flatten("C"), npt.NDArray[np.int64]) +assert_type(_nd.flatten(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.flatten("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) # ravel -assert_type(nd.ravel(), npt.NDArray[np.int64]) -assert_type(nd.ravel("C"), npt.NDArray[np.int64]) +assert_type(_nd.ravel(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.ravel("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) # squeeze -assert_type(nd.squeeze(), npt.NDArray[np.int64]) -assert_type(nd.squeeze(0), npt.NDArray[np.int64]) -assert_type(nd.squeeze((0, 2)), npt.NDArray[np.int64]) +assert_type(_nd.squeeze(), npt.NDArray[np.int64]) +assert_type(_nd.squeeze(0), npt.NDArray[np.int64]) +assert_type(_nd.squeeze((0, 2)), npt.NDArray[np.int64]) diff --git a/numpy/typing/tests/data/reveal/nditer.pyi b/numpy/typing/tests/data/reveal/nditer.pyi index 589453e777f2..8965f3c03e6d 100644 --- a/numpy/typing/tests/data/reveal/nditer.pyi +++ b/numpy/typing/tests/data/reveal/nditer.pyi @@ -1,14 +1,8 @@ -import sys -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - nditer_obj: np.nditer assert_type(np.nditer([0, 1], flags=["c_index"]), np.nditer) @@ -16,7 +10,7 @@ assert_type(np.nditer([0, 1], op_flags=[["readonly", "readonly"]]), np.nditer) assert_type(np.nditer([0, 1], op_dtypes=np.int_), np.nditer) assert_type(np.nditer([0, 1], order="C", casting="no"), np.nditer) -assert_type(nditer_obj.dtypes, tuple[np.dtype[Any], ...]) +assert_type(nditer_obj.dtypes, tuple[np.dtype, ...]) assert_type(nditer_obj.finished, bool) assert_type(nditer_obj.has_delayed_bufalloc, bool) assert_type(nditer_obj.has_index, bool) diff --git a/numpy/typing/tests/data/reveal/nested_sequence.pyi b/numpy/typing/tests/data/reveal/nested_sequence.pyi index 3ca23d6875e8..b4f98b79c333 100644 --- a/numpy/typing/tests/data/reveal/nested_sequence.pyi +++ b/numpy/typing/tests/data/reveal/nested_sequence.pyi @@ -1,14 +1,8 @@ -import sys from collections.abc import Sequence -from typing import Any +from typing import Any, assert_type from numpy._typing import _NestedSequence -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - a: Sequence[int] b: Sequence[Sequence[int]] c: Sequence[Sequence[Sequence[int]]] @@ -18,8 +12,7 @@ f: tuple[int, ...] g: list[int] h: Sequence[Any] -def func(a: _NestedSequence[int]) -> None: - ... +def func(a: _NestedSequence[int]) -> None: ... assert_type(func(a), None) assert_type(func(b), None) diff --git a/numpy/typing/tests/data/reveal/npyio.pyi b/numpy/typing/tests/data/reveal/npyio.pyi index 1267b2811c68..e3eaa45a5fa1 100644 --- a/numpy/typing/tests/data/reveal/npyio.pyi +++ b/numpy/typing/tests/data/reveal/npyio.pyi @@ -1,19 +1,13 @@ +import pathlib import re -import sys import zipfile -import pathlib -from typing import IO, Any from collections.abc import Mapping +from typing import IO, Any, assert_type -import numpy.typing as npt import numpy as np +import numpy.typing as npt from numpy.lib._npyio_impl import BagObj -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - str_path: str pathlib_path: pathlib.Path str_file: IO[str] @@ -34,11 +28,11 @@ class BytesReader: bytes_writer: BytesWriter bytes_reader: BytesReader -assert_type(npz_file.zip, zipfile.ZipFile) -assert_type(npz_file.fid, None | IO[str]) +assert_type(npz_file.zip, zipfile.ZipFile | None) +assert_type(npz_file.fid, IO[str] | None) assert_type(npz_file.files, list[str]) assert_type(npz_file.allow_pickle, bool) -assert_type(npz_file.pickle_kwargs, None | Mapping[str, Any]) +assert_type(npz_file.pickle_kwargs, Mapping[str, Any] | None) assert_type(npz_file.f, BagObj[np.lib.npyio.NpzFile]) assert_type(npz_file["test"], npt.NDArray[Any]) assert_type(len(npz_file), int) diff --git a/numpy/typing/tests/data/reveal/numeric.pyi b/numpy/typing/tests/data/reveal/numeric.pyi index 1f0a8b36fff8..7b3abc2d6761 100644 --- a/numpy/typing/tests/data/reveal/numeric.pyi +++ b/numpy/typing/tests/data/reveal/numeric.pyi @@ -5,19 +5,12 @@ Does not include tests which fall under ``array_constructors``. """ -import sys -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - -class SubClass(npt.NDArray[np.int64]): - ... +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.int64]]): ... i8: np.int64 @@ -29,89 +22,127 @@ AR_c16: npt.NDArray[np.complex128] AR_m: npt.NDArray[np.timedelta64] AR_O: npt.NDArray[np.object_] -B: list[int] -C: SubClass +_sub_nd_i8: SubClass + +_to_1d_bool: list[bool] +_to_1d_int: list[int] +_to_1d_float: list[float] +_to_1d_complex: list[complex] + +### -assert_type(np.count_nonzero(i8), int) -assert_type(np.count_nonzero(AR_i8), int) -assert_type(np.count_nonzero(B), int) -assert_type(np.count_nonzero(AR_i8, keepdims=True), Any) +assert_type(np.count_nonzero(i8), np.intp) +assert_type(np.count_nonzero(AR_i8), np.intp) +assert_type(np.count_nonzero(_to_1d_int), np.intp) +assert_type(np.count_nonzero(AR_i8, keepdims=True), npt.NDArray[np.intp]) assert_type(np.count_nonzero(AR_i8, axis=0), Any) assert_type(np.isfortran(i8), bool) assert_type(np.isfortran(AR_i8), bool) -assert_type(np.argwhere(i8), npt.NDArray[np.intp]) -assert_type(np.argwhere(AR_i8), npt.NDArray[np.intp]) - -assert_type(np.flatnonzero(i8), npt.NDArray[np.intp]) -assert_type(np.flatnonzero(AR_i8), npt.NDArray[np.intp]) - -assert_type(np.correlate(B, AR_i8, mode="valid"), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.correlate(AR_i8, AR_i8, mode="same"), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.correlate(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.correlate(AR_b, AR_u8), npt.NDArray[np.unsignedinteger[Any]]) -assert_type(np.correlate(AR_i8, AR_b), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.correlate(AR_i8, AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.correlate(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(np.correlate(AR_i8, AR_m), npt.NDArray[np.timedelta64]) -assert_type(np.correlate(AR_O, AR_O), npt.NDArray[np.object_]) - -assert_type(np.convolve(B, AR_i8, mode="valid"), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.convolve(AR_i8, AR_i8, mode="same"), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.convolve(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.convolve(AR_b, AR_u8), npt.NDArray[np.unsignedinteger[Any]]) -assert_type(np.convolve(AR_i8, AR_b), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.convolve(AR_i8, AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.convolve(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(np.convolve(AR_i8, AR_m), npt.NDArray[np.timedelta64]) -assert_type(np.convolve(AR_O, AR_O), npt.NDArray[np.object_]) - -assert_type(np.outer(i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.outer(B, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.outer(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.outer(AR_i8, AR_i8, out=C), SubClass) -assert_type(np.outer(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.outer(AR_b, AR_u8), npt.NDArray[np.unsignedinteger[Any]]) -assert_type(np.outer(AR_i8, AR_b), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.convolve(AR_i8, AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.outer(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(np.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) -assert_type(np.outer(AR_O, AR_O), npt.NDArray[np.object_]) - -assert_type(np.tensordot(B, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.tensordot(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.tensordot(AR_i8, AR_i8, axes=0), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.tensordot(AR_i8, AR_i8, axes=(0, 1)), npt.NDArray[np.signedinteger[Any]]) +assert_type(np.argwhere(i8), np.ndarray[tuple[int, int], np.dtype[np.intp]]) +assert_type(np.argwhere(AR_i8), np.ndarray[tuple[int, int], np.dtype[np.intp]]) + +assert_type(np.flatnonzero(i8), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.flatnonzero(AR_i8), np.ndarray[tuple[int], np.dtype[np.intp]]) + +# NOTE: Mypy incorrectly infers `np.ndarray[Any, Any]` for timedelta64 + +# correlate +assert_type(np.correlate(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.correlate(AR_b, AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.correlate(AR_u8, AR_u8), np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(np.correlate(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.correlate(AR_f8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.correlate(AR_f8, AR_i8), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.correlate(AR_c16, AR_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.correlate(AR_c16, AR_f8), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) +assert_type(np.correlate(AR_m, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) # type: ignore[assert-type] +assert_type(np.correlate(AR_i8, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64 | Any]]) +assert_type(np.correlate(AR_O, AR_O), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(np.correlate(_to_1d_bool, _to_1d_bool), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.correlate(_to_1d_int, _to_1d_int), np.ndarray[tuple[int], np.dtype[np.int_ | Any]]) +assert_type(np.correlate(_to_1d_float, _to_1d_float), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.correlate(_to_1d_complex, _to_1d_complex), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) + +# convolve (same as correlate) +assert_type(np.convolve(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.convolve(AR_b, AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.convolve(AR_u8, AR_u8), np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(np.convolve(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.convolve(AR_f8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.convolve(AR_f8, AR_i8), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.convolve(AR_c16, AR_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.convolve(AR_c16, AR_f8), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) +assert_type(np.convolve(AR_m, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) # type: ignore[assert-type] +assert_type(np.convolve(AR_i8, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64 | Any]]) +assert_type(np.convolve(AR_O, AR_O), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(np.convolve(_to_1d_bool, _to_1d_bool), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.convolve(_to_1d_int, _to_1d_int), np.ndarray[tuple[int], np.dtype[np.int_ | Any]]) +assert_type(np.convolve(_to_1d_float, _to_1d_float), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.convolve(_to_1d_complex, _to_1d_complex), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) + +# outer (very similar to above, but 2D output) +assert_type(np.outer(AR_i8, AR_i8), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.outer(AR_b, AR_b), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.outer(AR_u8, AR_u8), np.ndarray[tuple[int, int], np.dtype[np.uint64]]) +assert_type(np.outer(AR_i8, AR_i8), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.outer(AR_f8, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.outer(AR_f8, AR_i8), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.outer(AR_c16, AR_c16), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(np.outer(AR_c16, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.outer(AR_m, AR_m), np.ndarray[tuple[int, int], np.dtype[np.timedelta64]]) # type: ignore[assert-type] +assert_type(np.outer(AR_i8, AR_m), np.ndarray[tuple[int, int], np.dtype[np.timedelta64 | Any]]) +assert_type(np.outer(AR_O, AR_O), np.ndarray[tuple[int, int], np.dtype[np.object_]]) +assert_type(np.outer(AR_i8, AR_i8, out=_sub_nd_i8), SubClass) +assert_type(np.outer(_to_1d_bool, _to_1d_bool), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.outer(_to_1d_int, _to_1d_int), np.ndarray[tuple[int, int], np.dtype[np.int_ | Any]]) +assert_type(np.outer(_to_1d_float, _to_1d_float), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.outer(_to_1d_complex, _to_1d_complex), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) + +# tensordot +assert_type(np.tensordot(AR_i8, AR_i8), npt.NDArray[np.int64]) assert_type(np.tensordot(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.tensordot(AR_b, AR_u8), npt.NDArray[np.unsignedinteger[Any]]) -assert_type(np.tensordot(AR_i8, AR_b), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.tensordot(AR_i8, AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.tensordot(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(np.tensordot(AR_i8, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.tensordot(AR_u8, AR_u8), npt.NDArray[np.uint64]) +assert_type(np.tensordot(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.tensordot(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.tensordot(AR_f8, AR_i8), npt.NDArray[np.float64 | Any]) +assert_type(np.tensordot(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.tensordot(AR_c16, AR_f8), npt.NDArray[np.complex128 | Any]) +assert_type(np.tensordot(AR_m, AR_m), npt.NDArray[np.timedelta64]) assert_type(np.tensordot(AR_O, AR_O), npt.NDArray[np.object_]) +assert_type(np.tensordot(_to_1d_bool, _to_1d_bool), npt.NDArray[np.bool]) +assert_type(np.tensordot(_to_1d_int, _to_1d_int), npt.NDArray[np.int_ | Any]) +assert_type(np.tensordot(_to_1d_float, _to_1d_float), npt.NDArray[np.float64 | Any]) +assert_type(np.tensordot(_to_1d_complex, _to_1d_complex), npt.NDArray[np.complex128 | Any]) + +# cross +assert_type(np.cross(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.cross(AR_u8, AR_u8), npt.NDArray[np.uint64]) +assert_type(np.cross(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.cross(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.cross(AR_f8, AR_i8), npt.NDArray[np.float64 | Any]) +assert_type(np.cross(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.cross(AR_c16, AR_f8), npt.NDArray[np.complex128 | Any]) +assert_type(np.cross(AR_m, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.cross(AR_O, AR_O), npt.NDArray[np.object_]) +assert_type(np.cross(_to_1d_int, _to_1d_int), npt.NDArray[np.int_ | Any]) +assert_type(np.cross(_to_1d_float, _to_1d_float), npt.NDArray[np.float64 | Any]) +assert_type(np.cross(_to_1d_complex, _to_1d_complex), npt.NDArray[np.complex128 | Any]) assert_type(np.isscalar(i8), bool) assert_type(np.isscalar(AR_i8), bool) -assert_type(np.isscalar(B), bool) +assert_type(np.isscalar(_to_1d_int), bool) assert_type(np.roll(AR_i8, 1), npt.NDArray[np.int64]) assert_type(np.roll(AR_i8, (1, 2)), npt.NDArray[np.int64]) -assert_type(np.roll(B, 1), npt.NDArray[Any]) +assert_type(np.roll(_to_1d_int, 1), npt.NDArray[Any]) assert_type(np.rollaxis(AR_i8, 0, 1), npt.NDArray[np.int64]) assert_type(np.moveaxis(AR_i8, 0, 1), npt.NDArray[np.int64]) assert_type(np.moveaxis(AR_i8, (0, 1), (1, 2)), npt.NDArray[np.int64]) -assert_type(np.cross(B, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.cross(AR_b, AR_u8), npt.NDArray[np.unsignedinteger[Any]]) -assert_type(np.cross(AR_i8, AR_b), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.cross(AR_i8, AR_f8), npt.NDArray[np.floating[Any]]) -assert_type(np.cross(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(np.cross(AR_O, AR_O), npt.NDArray[np.object_]) - assert_type(np.indices([0, 1, 2]), npt.NDArray[np.int_]) assert_type(np.indices([0, 1, 2], sparse=True), tuple[npt.NDArray[np.int_], ...]) assert_type(np.indices([0, 1, 2], dtype=np.float64), npt.NDArray[np.float64]) @@ -124,18 +155,18 @@ assert_type(np.binary_repr(1), str) assert_type(np.base_repr(1), str) assert_type(np.allclose(i8, AR_i8), bool) -assert_type(np.allclose(B, AR_i8), bool) +assert_type(np.allclose(_to_1d_int, AR_i8), bool) assert_type(np.allclose(AR_i8, AR_i8), bool) assert_type(np.isclose(i8, i8), np.bool) assert_type(np.isclose(i8, AR_i8), npt.NDArray[np.bool]) -assert_type(np.isclose(B, AR_i8), npt.NDArray[np.bool]) +assert_type(np.isclose(_to_1d_int, _to_1d_int), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.isclose(AR_i8, AR_i8), npt.NDArray[np.bool]) assert_type(np.array_equal(i8, AR_i8), bool) -assert_type(np.array_equal(B, AR_i8), bool) +assert_type(np.array_equal(_to_1d_int, AR_i8), bool) assert_type(np.array_equal(AR_i8, AR_i8), bool) assert_type(np.array_equiv(i8, AR_i8), bool) -assert_type(np.array_equiv(B, AR_i8), bool) +assert_type(np.array_equiv(_to_1d_int, AR_i8), bool) assert_type(np.array_equiv(AR_i8, AR_i8), bool) diff --git a/numpy/typing/tests/data/reveal/numerictypes.pyi b/numpy/typing/tests/data/reveal/numerictypes.pyi index 9f094ba72e3c..75d108ce5a0f 100644 --- a/numpy/typing/tests/data/reveal/numerictypes.pyi +++ b/numpy/typing/tests/data/reveal/numerictypes.pyi @@ -1,55 +1,16 @@ -import sys -from typing import Literal +from typing import Literal, assert_type import numpy as np -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - -assert_type( - np.ScalarType, - tuple[ - type[int], - type[float], - type[complex], - type[bool], - type[bytes], - type[str], - type[memoryview], - type[np.bool], - type[np.csingle], - type[np.cdouble], - type[np.clongdouble], - type[np.half], - type[np.single], - type[np.double], - type[np.longdouble], - type[np.byte], - type[np.short], - type[np.intc], - type[np.long], - type[np.longlong], - type[np.timedelta64], - type[np.datetime64], - type[np.object_], - type[np.bytes_], - type[np.str_], - type[np.ubyte], - type[np.ushort], - type[np.uintc], - type[np.ulong], - type[np.ulonglong], - type[np.void], - ], -) assert_type(np.ScalarType[0], type[int]) assert_type(np.ScalarType[3], type[bool]) -assert_type(np.ScalarType[8], type[np.csingle]) -assert_type(np.ScalarType[10], type[np.clongdouble]) -assert_type(np.bool_, type[np.bool]) +assert_type(np.ScalarType[8], type[np.complex64]) +assert_type(np.ScalarType[9], type[np.complex128]) +assert_type(np.ScalarType[-1], type[np.void]) +assert_type(np.bool_(object()), np.bool) assert_type(np.typecodes["Character"], Literal["c"]) assert_type(np.typecodes["Complex"], Literal["FDG"]) -assert_type(np.typecodes["All"], Literal["?bhilqpBHILQPefdgFDGSUVOMm"]) +assert_type(np.typecodes["All"], Literal["?bhilqnpBHILQNPefdgFDGSUVOMm"]) + +assert_type(np.sctypeDict["uint8"], type[np.generic]) diff --git a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi new file mode 100644 index 000000000000..faba91273c91 --- /dev/null +++ b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi @@ -0,0 +1,216 @@ +from collections.abc import Sequence +from decimal import Decimal +from typing import Any, Literal as L, assert_type + +import numpy as np +import numpy.polynomial as npp +import numpy.typing as npt + +type _Ar_x = npt.NDArray[np.inexact | np.object_] +type _Ar_f = npt.NDArray[np.floating] +type _Ar_c = npt.NDArray[np.complexfloating] +type _Ar_O = npt.NDArray[np.object_] + +type _Ar_x_n = np.ndarray[tuple[int], np.dtype[np.inexact | np.object_]] +type _Ar_f_n = np.ndarray[tuple[int], np.dtype[np.floating]] +type _Ar_c_n = np.ndarray[tuple[int], np.dtype[np.complexfloating]] +type _Ar_O_n = np.ndarray[tuple[int], np.dtype[np.object_]] + +type _Ar_x_2 = np.ndarray[tuple[L[2]], np.dtype[np.float64 | Any]] +type _Ar_f_2 = np.ndarray[tuple[L[2]], np.dtype[np.floating]] +type _Ar_c_2 = np.ndarray[tuple[L[2]], np.dtype[np.complexfloating]] +type _Ar_O_2 = np.ndarray[tuple[L[2]], np.dtype[np.object_]] + +type _Ar_1d[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] + +type _BasisName = L["X"] + +SC_i: np.int_ +SC_i_co: int | np.int_ +SC_f: np.float64 +SC_f_co: float | np.float64 | np.int_ +SC_c: np.complex128 +SC_c_co: complex | np.complex128 +SC_O: Decimal + +AR_i: npt.NDArray[np.int_] +AR_f: npt.NDArray[np.float64] +AR_f_co: npt.NDArray[np.float64] | npt.NDArray[np.int_] +AR_c: npt.NDArray[np.complex128] +AR_c_co: npt.NDArray[np.complex128] | npt.NDArray[np.float64] | npt.NDArray[np.int_] +AR_O: npt.NDArray[np.object_] +AR_O_co: npt.NDArray[np.object_ | np.number] + +SQ_i: Sequence[int] +SQ_f: Sequence[float] +SQ_c: Sequence[complex] +SQ_O: Sequence[Decimal] + +PS_poly: npp.Polynomial +PS_cheb: npp.Chebyshev +PS_herm: npp.Hermite +PS_herme: npp.HermiteE +PS_lag: npp.Laguerre +PS_leg: npp.Legendre +PS_all: ( + npp.Polynomial + | npp.Chebyshev + | npp.Hermite + | npp.HermiteE + | npp.Laguerre + | npp.Legendre +) + +# static- and classmethods + +assert_type(type(PS_poly).basis_name, None) +assert_type(type(PS_cheb).basis_name, L["T"]) +assert_type(type(PS_herm).basis_name, L["H"]) +assert_type(type(PS_herme).basis_name, L["He"]) +assert_type(type(PS_lag).basis_name, L["L"]) +assert_type(type(PS_leg).basis_name, L["P"]) + +assert_type(type(PS_all).__hash__, None) +assert_type(type(PS_all).__array_ufunc__, None) +assert_type(type(PS_all).maxpower, L[100]) + +assert_type(type(PS_poly).fromroots(SC_i), npp.Polynomial) +assert_type(type(PS_poly).fromroots(SQ_i), npp.Polynomial) +assert_type(type(PS_poly).fromroots(AR_i), npp.Polynomial) +assert_type(type(PS_cheb).fromroots(SC_f), npp.Chebyshev) +assert_type(type(PS_cheb).fromroots(SQ_f), npp.Chebyshev) +assert_type(type(PS_cheb).fromroots(AR_f_co), npp.Chebyshev) +assert_type(type(PS_herm).fromroots(SC_c), npp.Hermite) +assert_type(type(PS_herm).fromroots(SQ_c), npp.Hermite) +assert_type(type(PS_herm).fromroots(AR_c_co), npp.Hermite) +assert_type(type(PS_leg).fromroots(SC_O), npp.Legendre) +assert_type(type(PS_leg).fromroots(SQ_O), npp.Legendre) +assert_type(type(PS_leg).fromroots(AR_O_co), npp.Legendre) + +assert_type(type(PS_poly).identity(), npp.Polynomial) +assert_type(type(PS_cheb).identity(symbol="z"), npp.Chebyshev) + +assert_type(type(PS_lag).basis(SC_i), npp.Laguerre) +assert_type(type(PS_leg).basis(32, symbol="u"), npp.Legendre) + +assert_type(type(PS_herm).cast(PS_poly), npp.Hermite) +assert_type(type(PS_herme).cast(PS_leg), npp.HermiteE) + +# attributes / properties + +assert_type(PS_all.coef, _Ar_x_n) +assert_type(PS_all.domain, _Ar_x_2) +assert_type(PS_all.window, _Ar_x_2) +assert_type(PS_all.symbol, str) + +# instance methods + +assert_type(PS_all.has_samecoef(PS_all), bool) +assert_type(PS_all.has_samedomain(PS_all), bool) +assert_type(PS_all.has_samewindow(PS_all), bool) +assert_type(PS_all.has_sametype(PS_all), bool) +assert_type(PS_poly.has_sametype(PS_poly), bool) +assert_type(PS_poly.has_sametype(PS_leg), bool) +assert_type(PS_poly.has_sametype(NotADirectoryError), bool) + +assert_type(PS_poly.copy(), npp.Polynomial) +assert_type(PS_cheb.copy(), npp.Chebyshev) +assert_type(PS_herm.copy(), npp.Hermite) +assert_type(PS_herme.copy(), npp.HermiteE) +assert_type(PS_lag.copy(), npp.Laguerre) +assert_type(PS_leg.copy(), npp.Legendre) + +assert_type(PS_leg.cutdeg(3), npp.Legendre) +assert_type(PS_leg.trim(), npp.Legendre) +assert_type(PS_leg.trim(tol=SC_f_co), npp.Legendre) +assert_type(PS_leg.truncate(SC_i_co), npp.Legendre) + +assert_type(PS_all.convert(None, npp.Chebyshev), npp.Chebyshev) +assert_type(PS_all.convert((0, 1), npp.Laguerre), npp.Laguerre) +assert_type(PS_all.convert([0, 1], npp.Hermite, [-1, 1]), npp.Hermite) + +assert_type(PS_all.degree(), int) +assert_type(PS_all.mapparms(), tuple[Any, Any]) + +assert_type(PS_poly.integ(), npp.Polynomial) +assert_type(PS_herme.integ(SC_i_co), npp.HermiteE) +assert_type(PS_lag.integ(SC_i_co, SC_f_co), npp.Laguerre) +assert_type(PS_poly.deriv(), npp.Polynomial) +assert_type(PS_herm.deriv(SC_i_co), npp.Hermite) + +assert_type(PS_poly.roots(), _Ar_x_n) + +assert_type( + PS_poly.linspace(), + tuple[_Ar_1d[np.float64 | np.complex128], _Ar_1d[np.float64 | np.complex128]], +) + +assert_type( + PS_poly.linspace(9), + tuple[_Ar_1d[np.float64 | np.complex128], _Ar_1d[np.float64 | np.complex128]], +) + +assert_type(PS_cheb.fit(AR_c_co, AR_c_co, SC_i_co), npp.Chebyshev) +assert_type(PS_leg.fit(AR_c_co, AR_c_co, AR_i), npp.Legendre) +assert_type(PS_herm.fit(AR_c_co, AR_c_co, SQ_i), npp.Hermite) +assert_type(PS_poly.fit(AR_c_co, SQ_c, SQ_i), npp.Polynomial) +assert_type(PS_lag.fit(SQ_c, SQ_c, SQ_i, full=False), npp.Laguerre) +assert_type( + PS_herme.fit(SQ_c, AR_c_co, SC_i_co, full=True), + tuple[npp.HermiteE, Sequence[np.inexact | np.int32]], +) + +# custom operations + +assert_type(PS_all.__hash__, None) +assert_type(PS_all.__array_ufunc__, None) + +assert_type(str(PS_all), str) +assert_type(repr(PS_all), str) +assert_type(format(PS_all), str) + +assert_type(len(PS_all), int) +assert_type(next(iter(PS_all)), np.float64 | Any) + +assert_type(PS_all(SC_f_co), np.float64 | Any) +assert_type(PS_all(SC_c_co), np.complex128 | Any) +assert_type(PS_all(Decimal()), np.float64 | Any) +assert_type(PS_poly(SQ_f), npt.NDArray[np.float64 | Any]) +assert_type(PS_poly(SQ_c), npt.NDArray[np.complex128 | Any]) +assert_type(PS_poly(SQ_O), npt.NDArray[np.object_]) +assert_type(PS_poly(AR_f), npt.NDArray[np.float64 | Any]) +assert_type(PS_poly(AR_c), npt.NDArray[np.complex128 | Any]) +assert_type(PS_poly(AR_O), npt.NDArray[np.object_]) +assert_type(PS_all(PS_poly), npp.Polynomial) + +assert_type(PS_poly == PS_poly, bool) +assert_type(PS_poly != PS_poly, bool) + +assert_type(-PS_poly, npp.Polynomial) +assert_type(+PS_poly, npp.Polynomial) + +assert_type(PS_poly + 5, npp.Polynomial) +assert_type(PS_poly - 5, npp.Polynomial) +assert_type(PS_poly * 5, npp.Polynomial) +assert_type(PS_poly / 5, npp.Polynomial) +assert_type(PS_poly // 5, npp.Polynomial) +assert_type(PS_poly % 5, npp.Polynomial) + +assert_type(PS_poly + PS_leg, npp.Polynomial) +assert_type(PS_poly - PS_leg, npp.Polynomial) +assert_type(PS_poly * PS_leg, npp.Polynomial) +assert_type(PS_poly / PS_leg, npp.Polynomial) +assert_type(PS_poly // PS_leg, npp.Polynomial) +assert_type(PS_poly % PS_leg, npp.Polynomial) + +assert_type(5 + PS_poly, npp.Polynomial) +assert_type(5 - PS_poly, npp.Polynomial) +assert_type(5 * PS_poly, npp.Polynomial) +assert_type(5 / PS_poly, npp.Polynomial) +assert_type(5 // PS_poly, npp.Polynomial) +assert_type(5 % PS_poly, npp.Polynomial) +assert_type(divmod(PS_poly, 5), tuple[npp.Polynomial, npp.Polynomial]) +assert_type(divmod(5, PS_poly), tuple[npp.Polynomial, npp.Polynomial]) + +assert_type(PS_poly**1, npp.Polynomial) +assert_type(PS_poly**1.0, npp.Polynomial) diff --git a/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi b/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi new file mode 100644 index 000000000000..9c5aff1117dc --- /dev/null +++ b/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi @@ -0,0 +1,218 @@ +from collections.abc import Sequence +from decimal import Decimal +from fractions import Fraction +from typing import Literal as L, assert_type + +import numpy as np +import numpy.polynomial.polyutils as pu +import numpy.typing as npt +from numpy.polynomial._polytypes import _Tuple2 + +type _ArrFloat1D = np.ndarray[tuple[int], np.dtype[np.floating]] +type _ArrComplex1D = np.ndarray[tuple[int], np.dtype[np.complexfloating]] +type _ArrObject1D = np.ndarray[tuple[int], np.dtype[np.object_]] + +type _ArrFloat1D_2 = np.ndarray[tuple[L[2]], np.dtype[np.float64]] +type _ArrComplex1D_2 = np.ndarray[tuple[L[2]], np.dtype[np.complex128]] +type _ArrObject1D_2 = np.ndarray[tuple[L[2]], np.dtype[np.object_]] + +num_int: int +num_float: float +num_complex: complex +# will result in an `object_` dtype +num_object: Decimal | Fraction + +sct_int: np.int_ +sct_float: np.float64 +sct_complex: np.complex128 +sct_object: np.object_ # doesn't exist at runtime + +arr_int: npt.NDArray[np.int_] +arr_float: npt.NDArray[np.float64] +arr_complex: npt.NDArray[np.complex128] +arr_object: npt.NDArray[np.object_] + +seq_num_int: Sequence[int] +seq_num_float: Sequence[float] +seq_num_complex: Sequence[complex] +seq_num_object: Sequence[Decimal | Fraction] + +seq_sct_int: Sequence[np.int_] +seq_sct_float: Sequence[np.float64] +seq_sct_complex: Sequence[np.complex128] +seq_sct_object: Sequence[np.object_] + +seq_arr_int: Sequence[npt.NDArray[np.int_]] +seq_arr_float: Sequence[npt.NDArray[np.float64]] +seq_arr_complex: Sequence[npt.NDArray[np.complex128]] +seq_arr_object: Sequence[npt.NDArray[np.object_]] + +seq_seq_num_int: Sequence[Sequence[int]] +seq_seq_num_float: Sequence[Sequence[float]] +seq_seq_num_complex: Sequence[Sequence[complex]] +seq_seq_num_object: Sequence[Sequence[Decimal | Fraction]] + +seq_seq_sct_int: Sequence[Sequence[np.int_]] +seq_seq_sct_float: Sequence[Sequence[np.float64]] +seq_seq_sct_complex: Sequence[Sequence[np.complex128]] +seq_seq_sct_object: Sequence[Sequence[np.object_]] # doesn't exist at runtime + +# as_series + +assert_type(pu.as_series(arr_int), list[_ArrFloat1D]) +assert_type(pu.as_series(arr_float), list[_ArrFloat1D]) +assert_type(pu.as_series(arr_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(arr_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_num_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_num_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_num_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_num_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_sct_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_sct_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_sct_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_sct_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_arr_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_arr_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_arr_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_arr_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_seq_num_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_seq_num_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_seq_num_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_seq_num_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_seq_sct_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_seq_sct_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_seq_sct_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_seq_sct_object), list[_ArrObject1D]) + +# trimcoef + +assert_type(pu.trimcoef(num_int), _ArrFloat1D) +assert_type(pu.trimcoef(num_float), _ArrFloat1D) +assert_type(pu.trimcoef(num_complex), _ArrComplex1D) +assert_type(pu.trimcoef(num_object), _ArrObject1D) +assert_type(pu.trimcoef(num_object), _ArrObject1D) + +assert_type(pu.trimcoef(sct_int), _ArrFloat1D) +assert_type(pu.trimcoef(sct_float), _ArrFloat1D) +assert_type(pu.trimcoef(sct_complex), _ArrComplex1D) +assert_type(pu.trimcoef(sct_object), _ArrObject1D) + +assert_type(pu.trimcoef(arr_int), _ArrFloat1D) +assert_type(pu.trimcoef(arr_float), _ArrFloat1D) +assert_type(pu.trimcoef(arr_complex), _ArrComplex1D) +assert_type(pu.trimcoef(arr_object), _ArrObject1D) + +assert_type(pu.trimcoef(seq_num_int), _ArrFloat1D) +assert_type(pu.trimcoef(seq_num_float), _ArrFloat1D) +assert_type(pu.trimcoef(seq_num_complex), _ArrComplex1D) +assert_type(pu.trimcoef(seq_num_object), _ArrObject1D) + +assert_type(pu.trimcoef(seq_sct_int), _ArrFloat1D) +assert_type(pu.trimcoef(seq_sct_float), _ArrFloat1D) +assert_type(pu.trimcoef(seq_sct_complex), _ArrComplex1D) +assert_type(pu.trimcoef(seq_sct_object), _ArrObject1D) + +# getdomain + +assert_type(pu.getdomain(num_int), _ArrFloat1D_2) +assert_type(pu.getdomain(num_float), _ArrFloat1D_2) +assert_type(pu.getdomain(num_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(num_object), _ArrObject1D_2) +assert_type(pu.getdomain(num_object), _ArrObject1D_2) + +assert_type(pu.getdomain(sct_int), _ArrFloat1D_2) +assert_type(pu.getdomain(sct_float), _ArrFloat1D_2) +assert_type(pu.getdomain(sct_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(sct_object), _ArrObject1D_2) + +assert_type(pu.getdomain(arr_int), _ArrFloat1D_2) +assert_type(pu.getdomain(arr_float), _ArrFloat1D_2) +assert_type(pu.getdomain(arr_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(arr_object), _ArrObject1D_2) + +assert_type(pu.getdomain(seq_num_int), _ArrFloat1D_2) +assert_type(pu.getdomain(seq_num_float), _ArrFloat1D_2) +assert_type(pu.getdomain(seq_num_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(seq_num_object), _ArrObject1D_2) + +assert_type(pu.getdomain(seq_sct_int), _ArrFloat1D_2) +assert_type(pu.getdomain(seq_sct_float), _ArrFloat1D_2) +assert_type(pu.getdomain(seq_sct_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(seq_sct_object), _ArrObject1D_2) + +# mapparms + +assert_type(pu.mapparms(seq_num_int, seq_num_int), _Tuple2[float]) +assert_type(pu.mapparms(seq_num_int, seq_num_float), _Tuple2[float]) +assert_type(pu.mapparms(seq_num_float, seq_num_float), _Tuple2[float]) +assert_type(pu.mapparms(seq_num_float, seq_num_complex), _Tuple2[complex]) +assert_type(pu.mapparms(seq_num_complex, seq_num_complex), _Tuple2[complex]) +assert_type(pu.mapparms(seq_num_complex, seq_num_object), _Tuple2[object]) +assert_type(pu.mapparms(seq_num_object, seq_num_object), _Tuple2[object]) + +assert_type(pu.mapparms(seq_sct_int, seq_sct_int), _Tuple2[np.floating]) +assert_type(pu.mapparms(seq_sct_int, seq_sct_float), _Tuple2[np.floating]) +assert_type(pu.mapparms(seq_sct_float, seq_sct_float), _Tuple2[float]) +assert_type(pu.mapparms(seq_sct_float, seq_sct_complex), _Tuple2[complex]) +assert_type(pu.mapparms(seq_sct_complex, seq_sct_complex), _Tuple2[complex]) +assert_type(pu.mapparms(seq_sct_complex, seq_sct_object), _Tuple2[object]) +assert_type(pu.mapparms(seq_sct_object, seq_sct_object), _Tuple2[object]) + +assert_type(pu.mapparms(arr_int, arr_int), _Tuple2[np.floating]) +assert_type(pu.mapparms(arr_int, arr_float), _Tuple2[np.floating]) +assert_type(pu.mapparms(arr_float, arr_float), _Tuple2[np.floating]) +assert_type(pu.mapparms(arr_float, arr_complex), _Tuple2[np.complexfloating]) +assert_type(pu.mapparms(arr_complex, arr_complex), _Tuple2[np.complexfloating]) +assert_type(pu.mapparms(arr_complex, arr_object), _Tuple2[object]) +assert_type(pu.mapparms(arr_object, arr_object), _Tuple2[object]) + +# mapdomain + +assert_type(pu.mapdomain(num_int, seq_num_int, seq_num_int), np.floating) +assert_type(pu.mapdomain(num_int, seq_num_int, seq_num_float), np.floating) +assert_type(pu.mapdomain(num_int, seq_num_float, seq_num_float), np.floating) +assert_type(pu.mapdomain(num_float, seq_num_float, seq_num_float), np.floating) +assert_type(pu.mapdomain(num_float, seq_num_float, seq_num_complex), np.complexfloating) +assert_type(pu.mapdomain(num_float, seq_num_complex, seq_num_complex), np.complexfloating) +assert_type(pu.mapdomain(num_complex, seq_num_complex, seq_num_complex), np.complexfloating) +assert_type(pu.mapdomain(num_complex, seq_num_complex, seq_num_object), object) +assert_type(pu.mapdomain(num_complex, seq_num_object, seq_num_object), object) +assert_type(pu.mapdomain(num_object, seq_num_object, seq_num_object), object) + +assert_type(pu.mapdomain(seq_num_int, seq_num_int, seq_num_int), _ArrFloat1D) +assert_type(pu.mapdomain(seq_num_int, seq_num_int, seq_num_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_num_int, seq_num_float, seq_num_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_num_float, seq_num_float, seq_num_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_num_float, seq_num_float, seq_num_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_num_float, seq_num_complex, seq_num_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_num_complex, seq_num_complex, seq_num_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_num_complex, seq_num_complex, seq_num_object), _ArrObject1D) +assert_type(pu.mapdomain(seq_num_complex, seq_num_object, seq_num_object), _ArrObject1D) +assert_type(pu.mapdomain(seq_num_object, seq_num_object, seq_num_object), _ArrObject1D) + +assert_type(pu.mapdomain(seq_sct_int, seq_sct_int, seq_sct_int), _ArrFloat1D) +assert_type(pu.mapdomain(seq_sct_int, seq_sct_int, seq_sct_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_sct_int, seq_sct_float, seq_sct_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_sct_float, seq_sct_float, seq_sct_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_sct_float, seq_sct_float, seq_sct_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_sct_float, seq_sct_complex, seq_sct_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_sct_complex, seq_sct_complex, seq_sct_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_sct_complex, seq_sct_complex, seq_sct_object), _ArrObject1D) +assert_type(pu.mapdomain(seq_sct_complex, seq_sct_object, seq_sct_object), _ArrObject1D) +assert_type(pu.mapdomain(seq_sct_object, seq_sct_object, seq_sct_object), _ArrObject1D) + +assert_type(pu.mapdomain(arr_int, arr_int, arr_int), _ArrFloat1D) +assert_type(pu.mapdomain(arr_int, arr_int, arr_float), _ArrFloat1D) +assert_type(pu.mapdomain(arr_int, arr_float, arr_float), _ArrFloat1D) +assert_type(pu.mapdomain(arr_float, arr_float, arr_float), _ArrFloat1D) +assert_type(pu.mapdomain(arr_float, arr_float, arr_complex), _ArrComplex1D) +assert_type(pu.mapdomain(arr_float, arr_complex, arr_complex), _ArrComplex1D) +assert_type(pu.mapdomain(arr_complex, arr_complex, arr_complex), _ArrComplex1D) +assert_type(pu.mapdomain(arr_complex, arr_complex, arr_object), _ArrObject1D) +assert_type(pu.mapdomain(arr_complex, arr_object, arr_object), _ArrObject1D) +assert_type(pu.mapdomain(arr_object, arr_object, arr_object), _ArrObject1D) diff --git a/numpy/typing/tests/data/reveal/polynomial_series.pyi b/numpy/typing/tests/data/reveal/polynomial_series.pyi new file mode 100644 index 000000000000..b87ba4fb2677 --- /dev/null +++ b/numpy/typing/tests/data/reveal/polynomial_series.pyi @@ -0,0 +1,138 @@ +from collections.abc import Sequence +from typing import Any, assert_type + +import numpy as np +import numpy.polynomial as npp +import numpy.typing as npt + +type _ArrFloat1D = np.ndarray[tuple[int], np.dtype[np.floating]] +type _ArrFloat1D64 = np.ndarray[tuple[int], np.dtype[np.float64]] +type _ArrComplex1D = np.ndarray[tuple[int], np.dtype[np.complexfloating]] +type _ArrComplex1D128 = np.ndarray[tuple[int], np.dtype[np.complex128]] +type _ArrObject1D = np.ndarray[tuple[int], np.dtype[np.object_]] + +AR_b: npt.NDArray[np.bool] +AR_u4: npt.NDArray[np.uint32] +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_O: npt.NDArray[np.object_] + +PS_poly: npp.Polynomial +PS_cheb: npp.Chebyshev + +assert_type(npp.polynomial.polyroots(AR_f8), _ArrFloat1D64) +assert_type(npp.polynomial.polyroots(AR_c16), _ArrComplex1D128) +assert_type(npp.polynomial.polyroots(AR_O), _ArrObject1D) + +assert_type(npp.polynomial.polyfromroots(AR_f8), _ArrFloat1D) +assert_type(npp.polynomial.polyfromroots(AR_c16), _ArrComplex1D) +assert_type(npp.polynomial.polyfromroots(AR_O), _ArrObject1D) + +# assert_type(npp.polynomial.polyadd(AR_b, AR_b), NoReturn) +assert_type(npp.polynomial.polyadd(AR_u4, AR_b), _ArrFloat1D) +assert_type(npp.polynomial.polyadd(AR_i8, AR_i8), _ArrFloat1D) +assert_type(npp.polynomial.polyadd(AR_f8, AR_i8), _ArrFloat1D) +assert_type(npp.polynomial.polyadd(AR_i8, AR_c16), _ArrComplex1D) +assert_type(npp.polynomial.polyadd(AR_O, AR_O), _ArrObject1D) + +assert_type(npp.polynomial.polymulx(AR_u4), _ArrFloat1D) +assert_type(npp.polynomial.polymulx(AR_i8), _ArrFloat1D) +assert_type(npp.polynomial.polymulx(AR_f8), _ArrFloat1D) +assert_type(npp.polynomial.polymulx(AR_c16), _ArrComplex1D) +assert_type(npp.polynomial.polymulx(AR_O), _ArrObject1D) + +assert_type(npp.polynomial.polypow(AR_u4, 2), _ArrFloat1D) +assert_type(npp.polynomial.polypow(AR_i8, 2), _ArrFloat1D) +assert_type(npp.polynomial.polypow(AR_f8, 2), _ArrFloat1D) +assert_type(npp.polynomial.polypow(AR_c16, 2), _ArrComplex1D) +assert_type(npp.polynomial.polypow(AR_O, 2), _ArrObject1D) + +# assert_type(npp.polynomial.polyder(PS_poly), npt.NDArray[np.object_]) +assert_type(npp.polynomial.polyder(AR_f8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyder(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyder(AR_O, m=2), npt.NDArray[np.object_]) + +# assert_type(npp.polynomial.polyint(PS_poly), npt.NDArray[np.object_]) +assert_type(npp.polynomial.polyint(AR_f8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyint(AR_f8, k=AR_c16), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyint(AR_O, m=2), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyval(AR_b, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval(AR_u4, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval(AR_i8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyval(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyval2d(AR_b, AR_b, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval2d(AR_u4, AR_u4, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval2d(AR_i8, AR_i8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval2d(AR_f8, AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval2d(AR_i8, AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyval2d(AR_O, AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyval3d(AR_b, AR_b, AR_b, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval3d(AR_u4, AR_u4, AR_u4, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval3d(AR_i8, AR_i8, AR_i8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval3d(AR_f8, AR_f8, AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval3d(AR_i8, AR_i8, AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyval3d(AR_O, AR_O, AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyvalfromroots(AR_b, AR_b), npt.NDArray[np.float64 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_u4, AR_b), npt.NDArray[np.float64 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_i8), npt.NDArray[np.float64 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_f8, AR_i8), npt.NDArray[np.float64 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_c16), npt.NDArray[np.complex128 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_O, AR_O), npt.NDArray[np.object_ | Any]) + +assert_type(npp.polynomial.polyvander(AR_f8, 3), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyvander(AR_c16, 3), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyvander(AR_O, 3), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyvander2d(AR_f8, AR_f8, [4, 2]), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyvander2d(AR_c16, AR_c16, [4, 2]), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyvander2d(AR_O, AR_O, [4, 2]), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyvander3d(AR_f8, AR_f8, AR_f8, [4, 3, 2]), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyvander3d(AR_c16, AR_c16, AR_c16, [4, 3, 2]), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyvander3d(AR_O, AR_O, AR_O, [4, 3, 2]), npt.NDArray[np.object_]) + +assert_type( + npp.polynomial.polyfit(AR_f8, AR_f8, 2), + npt.NDArray[np.floating], +) +assert_type( + npp.polynomial.polyfit(AR_f8, AR_i8, 1, full=True), + tuple[npt.NDArray[np.floating], Sequence[np.inexact | np.int32]], +) +assert_type( + npp.polynomial.polyfit(AR_c16, AR_f8, 2), + npt.NDArray[np.complexfloating], +) +assert_type( + npp.polynomial.polyfit(AR_f8, AR_c16, 1, full=True)[0], + npt.NDArray[np.complexfloating], +) + +assert_type(npp.chebyshev.chebgauss(2), tuple[_ArrFloat1D64, _ArrFloat1D64]) + +assert_type(npp.chebyshev.chebweight(AR_f8), npt.NDArray[np.float64]) +assert_type(npp.chebyshev.chebweight(AR_c16), npt.NDArray[np.complex128]) +assert_type(npp.chebyshev.chebweight(AR_O), npt.NDArray[np.object_]) + +assert_type(npp.chebyshev.poly2cheb(AR_f8), _ArrFloat1D) +assert_type(npp.chebyshev.poly2cheb(AR_c16), _ArrComplex1D) +assert_type(npp.chebyshev.poly2cheb(AR_O), _ArrObject1D) + +assert_type(npp.chebyshev.cheb2poly(AR_f8), _ArrFloat1D) +assert_type(npp.chebyshev.cheb2poly(AR_c16), _ArrComplex1D) +assert_type(npp.chebyshev.cheb2poly(AR_O), _ArrObject1D) + +assert_type(npp.chebyshev.chebpts1(6), _ArrFloat1D64) +assert_type(npp.chebyshev.chebpts2(6), _ArrFloat1D64) + +assert_type( + npp.chebyshev.chebinterpolate(np.tanh, 3), + npt.NDArray[np.float64 | np.complex128 | np.object_], +) diff --git a/numpy/typing/tests/data/reveal/random.pyi b/numpy/typing/tests/data/reveal/random.pyi index b31b4b56f870..72f8c62f79e0 100644 --- a/numpy/typing/tests/data/reveal/random.pyi +++ b/numpy/typing/tests/data/reveal/random.pyi @@ -1,21 +1,15 @@ -import sys import threading -from typing import Any from collections.abc import Sequence +from typing import Any, assert_type import numpy as np import numpy.typing as npt from numpy.random._generator import Generator from numpy.random._mt19937 import MT19937 from numpy.random._pcg64 import PCG64 -from numpy.random._sfc64 import SFC64 from numpy.random._philox import Philox -from numpy.random.bit_generator import SeedSequence, SeedlessSeedSequence - -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from numpy.random._sfc64 import SFC64 +from numpy.random.bit_generator import SeedlessSeedSequence, SeedSequence def_rng = np.random.default_rng() seed_seq = np.random.SeedSequence() @@ -78,12 +72,11 @@ assert_type(sfc64_raw_arr, npt.NDArray[np.uint64]) assert_type(sfc64.lock, threading.Lock) assert_type(seed_seq.pool, npt.NDArray[np.uint32]) -assert_type(seed_seq.entropy, None | int | Sequence[int]) +assert_type(seed_seq.entropy, int | Sequence[int] | None) assert_type(seed_seq.spawn(1), list[np.random.SeedSequence]) assert_type(seed_seq.generate_state(8, "uint32"), npt.NDArray[np.uint32 | np.uint64]) assert_type(seed_seq.generate_state(8, "uint64"), npt.NDArray[np.uint32 | np.uint64]) - def_gen: np.random.Generator = np.random.default_rng() D_arr_0p1: npt.NDArray[np.float64] = np.array([0.1]) @@ -356,11 +349,11 @@ assert_type(def_gen.gumbel(0.5, 0.5), float) assert_type(def_gen.gumbel(0.5, 0.5, size=None), float) assert_type(def_gen.gumbel(0.5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.gumbel(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.gumbel(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.gumbel(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.gumbel(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.gumbel(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.gumbel(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) assert_type(def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) assert_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) @@ -370,11 +363,11 @@ assert_type(def_gen.laplace(0.5, 0.5), float) assert_type(def_gen.laplace(0.5, 0.5, size=None), float) assert_type(def_gen.laplace(0.5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.laplace(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.laplace(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.laplace(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.laplace(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.laplace(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.laplace(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.laplace(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) assert_type(def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) assert_type(def_gen.laplace(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) @@ -384,11 +377,11 @@ assert_type(def_gen.logistic(0.5, 0.5), float) assert_type(def_gen.logistic(0.5, 0.5, size=None), float) assert_type(def_gen.logistic(0.5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.logistic(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.logistic(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.logistic(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.logistic(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.logistic(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.logistic(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.logistic(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) assert_type(def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) assert_type(def_gen.logistic(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) @@ -426,11 +419,11 @@ assert_type(def_gen.normal(0.5, 0.5), float) assert_type(def_gen.normal(0.5, 0.5, size=None), float) assert_type(def_gen.normal(0.5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.normal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.normal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.normal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.normal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.normal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.normal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.normal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) assert_type(def_gen.normal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) assert_type(def_gen.normal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) @@ -495,23 +488,23 @@ assert_type(def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1), np assert_type(def_gen.hypergeometric(20, 20, 10), int) assert_type(def_gen.hypergeometric(20, 20, 10, size=None), int) assert_type(def_gen.hypergeometric(20, 20, 10, size=1), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(I_arr_20, 20, 10), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(20, I_arr_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_20, 20, 10), npt.NDArray[np.int64] | Any) +assert_type(def_gen.hypergeometric(20, I_arr_20, 10), npt.NDArray[np.int64] | Any) assert_type(def_gen.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1), npt.NDArray[np.int64]) assert_type(def_gen.hypergeometric(20, I_arr_20, 10, size=1), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(20, I_arr_like_20, 10), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(I_arr_20, I_arr_20, 10), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10), npt.NDArray[np.int64] | Any) +assert_type(def_gen.hypergeometric(20, I_arr_like_20, 10), npt.NDArray[np.int64] | Any) +assert_type(def_gen.hypergeometric(I_arr_20, I_arr_20, 10), npt.NDArray[np.int64] | Any) +assert_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10), npt.NDArray[np.int64] | Any) assert_type(def_gen.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1), npt.NDArray[np.int64]) assert_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1), npt.NDArray[np.int64]) I_int64_100: npt.NDArray[np.int64] = np.array([100], dtype=np.int64) -assert_type(def_gen.integers(0, 100), int) -assert_type(def_gen.integers(100), int) -assert_type(def_gen.integers([100]), npt.NDArray[np.int64]) -assert_type(def_gen.integers(0, [100]), npt.NDArray[np.int64]) +assert_type(def_gen.integers(0, 100), np.int64) +assert_type(def_gen.integers(100), np.int64) +assert_type(def_gen.integers([100]), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(0, [100]), npt.NDArray[np.int64] | Any) I_bool_low: npt.NDArray[np.bool] = np.array([0], dtype=np.bool) I_bool_low_like: list[int] = [0] @@ -522,107 +515,59 @@ assert_type(def_gen.integers(2, dtype=bool), bool) assert_type(def_gen.integers(0, 2, dtype=bool), bool) assert_type(def_gen.integers(1, dtype=bool, endpoint=True), bool) assert_type(def_gen.integers(0, 1, dtype=bool, endpoint=True), bool) -assert_type(def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(0, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(I_bool_high_open, dtype=bool), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(0, I_bool_high_open, dtype=bool), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[Any] | Any) assert_type(def_gen.integers(2, dtype=np.bool), np.bool) assert_type(def_gen.integers(0, 2, dtype=np.bool), np.bool) assert_type(def_gen.integers(1, dtype=np.bool, endpoint=True), np.bool) assert_type(def_gen.integers(0, 1, dtype=np.bool, endpoint=True), np.bool) -assert_type(def_gen.integers(I_bool_low_like, 1, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(0, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(0, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_low_like, 1, dtype=np.bool, endpoint=True), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(0, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(0, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool] | Any) I_u1_low: npt.NDArray[np.uint8] = np.array([0], dtype=np.uint8) I_u1_low_like: list[int] = [0] I_u1_high_open: npt.NDArray[np.uint8] = np.array([255], dtype=np.uint8) I_u1_high_closed: npt.NDArray[np.uint8] = np.array([255], dtype=np.uint8) -assert_type(def_gen.integers(256, dtype="u1"), np.uint8) -assert_type(def_gen.integers(0, 256, dtype="u1"), np.uint8) -assert_type(def_gen.integers(255, dtype="u1", endpoint=True), np.uint8) -assert_type(def_gen.integers(0, 255, dtype="u1", endpoint=True), np.uint8) -assert_type(def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) - -assert_type(def_gen.integers(256, dtype="uint8"), np.uint8) -assert_type(def_gen.integers(0, 256, dtype="uint8"), np.uint8) -assert_type(def_gen.integers(255, dtype="uint8", endpoint=True), np.uint8) -assert_type(def_gen.integers(0, 255, dtype="uint8", endpoint=True), np.uint8) -assert_type(def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) - assert_type(def_gen.integers(256, dtype=np.uint8), np.uint8) assert_type(def_gen.integers(0, 256, dtype=np.uint8), np.uint8) assert_type(def_gen.integers(255, dtype=np.uint8, endpoint=True), np.uint8) assert_type(def_gen.integers(0, 255, dtype=np.uint8, endpoint=True), np.uint8) -assert_type(def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8] | Any) I_u2_low: npt.NDArray[np.uint16] = np.array([0], dtype=np.uint16) I_u2_low_like: list[int] = [0] I_u2_high_open: npt.NDArray[np.uint16] = np.array([65535], dtype=np.uint16) I_u2_high_closed: npt.NDArray[np.uint16] = np.array([65535], dtype=np.uint16) -assert_type(def_gen.integers(65536, dtype="u2"), np.uint16) -assert_type(def_gen.integers(0, 65536, dtype="u2"), np.uint16) -assert_type(def_gen.integers(65535, dtype="u2", endpoint=True), np.uint16) -assert_type(def_gen.integers(0, 65535, dtype="u2", endpoint=True), np.uint16) -assert_type(def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) - -assert_type(def_gen.integers(65536, dtype="uint16"), np.uint16) -assert_type(def_gen.integers(0, 65536, dtype="uint16"), np.uint16) -assert_type(def_gen.integers(65535, dtype="uint16", endpoint=True), np.uint16) -assert_type(def_gen.integers(0, 65535, dtype="uint16", endpoint=True), np.uint16) -assert_type(def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) - assert_type(def_gen.integers(65536, dtype=np.uint16), np.uint16) assert_type(def_gen.integers(0, 65536, dtype=np.uint16), np.uint16) assert_type(def_gen.integers(65535, dtype=np.uint16, endpoint=True), np.uint16) assert_type(def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True), np.uint16) -assert_type(def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16] | Any) I_u4_low: npt.NDArray[np.uint32] = np.array([0], dtype=np.uint32) I_u4_low_like: list[int] = [0] @@ -633,268 +578,122 @@ assert_type(def_gen.integers(4294967296, dtype=np.int_), np.int_) assert_type(def_gen.integers(0, 4294967296, dtype=np.int_), np.int_) assert_type(def_gen.integers(4294967295, dtype=np.int_, endpoint=True), np.int_) assert_type(def_gen.integers(0, 4294967295, dtype=np.int_, endpoint=True), np.int_) -assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) -assert_type(def_gen.integers(I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) -assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) -assert_type(def_gen.integers(I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) -assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) - - -assert_type(def_gen.integers(4294967296, dtype="u4"), np.uint32) -assert_type(def_gen.integers(0, 4294967296, dtype="u4"), np.uint32) -assert_type(def_gen.integers(4294967295, dtype="u4", endpoint=True), np.uint32) -assert_type(def_gen.integers(0, 4294967295, dtype="u4", endpoint=True), np.uint32) -assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) - -assert_type(def_gen.integers(4294967296, dtype="uint32"), np.uint32) -assert_type(def_gen.integers(0, 4294967296, dtype="uint32"), np.uint32) -assert_type(def_gen.integers(4294967295, dtype="uint32", endpoint=True), np.uint32) -assert_type(def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True), np.uint32) -assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.int_, endpoint=True), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_] | Any) assert_type(def_gen.integers(4294967296, dtype=np.uint32), np.uint32) assert_type(def_gen.integers(0, 4294967296, dtype=np.uint32), np.uint32) assert_type(def_gen.integers(4294967295, dtype=np.uint32, endpoint=True), np.uint32) assert_type(def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True), np.uint32) -assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32] | Any) assert_type(def_gen.integers(4294967296, dtype=np.uint), np.uint) assert_type(def_gen.integers(0, 4294967296, dtype=np.uint), np.uint) assert_type(def_gen.integers(4294967295, dtype=np.uint, endpoint=True), np.uint) assert_type(def_gen.integers(0, 4294967295, dtype=np.uint, endpoint=True), np.uint) -assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) -assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) -assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) -assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) -assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint, endpoint=True), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint] | Any) I_u8_low: npt.NDArray[np.uint64] = np.array([0], dtype=np.uint64) I_u8_low_like: list[int] = [0] I_u8_high_open: npt.NDArray[np.uint64] = np.array([18446744073709551615], dtype=np.uint64) I_u8_high_closed: npt.NDArray[np.uint64] = np.array([18446744073709551615], dtype=np.uint64) -assert_type(def_gen.integers(18446744073709551616, dtype="u8"), np.uint64) -assert_type(def_gen.integers(0, 18446744073709551616, dtype="u8"), np.uint64) -assert_type(def_gen.integers(18446744073709551615, dtype="u8", endpoint=True), np.uint64) -assert_type(def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True), np.uint64) -assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) - -assert_type(def_gen.integers(18446744073709551616, dtype="uint64"), np.uint64) -assert_type(def_gen.integers(0, 18446744073709551616, dtype="uint64"), np.uint64) -assert_type(def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True), np.uint64) -assert_type(def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True), np.uint64) -assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) - assert_type(def_gen.integers(18446744073709551616, dtype=np.uint64), np.uint64) assert_type(def_gen.integers(0, 18446744073709551616, dtype=np.uint64), np.uint64) assert_type(def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) assert_type(def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) -assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64] | Any) I_i1_low: npt.NDArray[np.int8] = np.array([-128], dtype=np.int8) I_i1_low_like: list[int] = [-128] I_i1_high_open: npt.NDArray[np.int8] = np.array([127], dtype=np.int8) I_i1_high_closed: npt.NDArray[np.int8] = np.array([127], dtype=np.int8) -assert_type(def_gen.integers(128, dtype="i1"), np.int8) -assert_type(def_gen.integers(-128, 128, dtype="i1"), np.int8) -assert_type(def_gen.integers(127, dtype="i1", endpoint=True), np.int8) -assert_type(def_gen.integers(-128, 127, dtype="i1", endpoint=True), np.int8) -assert_type(def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) - -assert_type(def_gen.integers(128, dtype="int8"), np.int8) -assert_type(def_gen.integers(-128, 128, dtype="int8"), np.int8) -assert_type(def_gen.integers(127, dtype="int8", endpoint=True), np.int8) -assert_type(def_gen.integers(-128, 127, dtype="int8", endpoint=True), np.int8) -assert_type(def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) - assert_type(def_gen.integers(128, dtype=np.int8), np.int8) assert_type(def_gen.integers(-128, 128, dtype=np.int8), np.int8) assert_type(def_gen.integers(127, dtype=np.int8, endpoint=True), np.int8) assert_type(def_gen.integers(-128, 127, dtype=np.int8, endpoint=True), np.int8) -assert_type(def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8] | Any) I_i2_low: npt.NDArray[np.int16] = np.array([-32768], dtype=np.int16) I_i2_low_like: list[int] = [-32768] I_i2_high_open: npt.NDArray[np.int16] = np.array([32767], dtype=np.int16) I_i2_high_closed: npt.NDArray[np.int16] = np.array([32767], dtype=np.int16) -assert_type(def_gen.integers(32768, dtype="i2"), np.int16) -assert_type(def_gen.integers(-32768, 32768, dtype="i2"), np.int16) -assert_type(def_gen.integers(32767, dtype="i2", endpoint=True), np.int16) -assert_type(def_gen.integers(-32768, 32767, dtype="i2", endpoint=True), np.int16) -assert_type(def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) - -assert_type(def_gen.integers(32768, dtype="int16"), np.int16) -assert_type(def_gen.integers(-32768, 32768, dtype="int16"), np.int16) -assert_type(def_gen.integers(32767, dtype="int16", endpoint=True), np.int16) -assert_type(def_gen.integers(-32768, 32767, dtype="int16", endpoint=True), np.int16) -assert_type(def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) - assert_type(def_gen.integers(32768, dtype=np.int16), np.int16) assert_type(def_gen.integers(-32768, 32768, dtype=np.int16), np.int16) assert_type(def_gen.integers(32767, dtype=np.int16, endpoint=True), np.int16) assert_type(def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True), np.int16) -assert_type(def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16] | Any) I_i4_low: npt.NDArray[np.int32] = np.array([-2147483648], dtype=np.int32) I_i4_low_like: list[int] = [-2147483648] I_i4_high_open: npt.NDArray[np.int32] = np.array([2147483647], dtype=np.int32) I_i4_high_closed: npt.NDArray[np.int32] = np.array([2147483647], dtype=np.int32) -assert_type(def_gen.integers(2147483648, dtype="i4"), np.int32) -assert_type(def_gen.integers(-2147483648, 2147483648, dtype="i4"), np.int32) -assert_type(def_gen.integers(2147483647, dtype="i4", endpoint=True), np.int32) -assert_type(def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True), np.int32) -assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) - -assert_type(def_gen.integers(2147483648, dtype="int32"), np.int32) -assert_type(def_gen.integers(-2147483648, 2147483648, dtype="int32"), np.int32) -assert_type(def_gen.integers(2147483647, dtype="int32", endpoint=True), np.int32) -assert_type(def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True), np.int32) -assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) - assert_type(def_gen.integers(2147483648, dtype=np.int32), np.int32) assert_type(def_gen.integers(-2147483648, 2147483648, dtype=np.int32), np.int32) assert_type(def_gen.integers(2147483647, dtype=np.int32, endpoint=True), np.int32) assert_type(def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True), np.int32) -assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32] | Any) I_i8_low: npt.NDArray[np.int64] = np.array([-9223372036854775808], dtype=np.int64) I_i8_low_like: list[int] = [-9223372036854775808] I_i8_high_open: npt.NDArray[np.int64] = np.array([9223372036854775807], dtype=np.int64) I_i8_high_closed: npt.NDArray[np.int64] = np.array([9223372036854775807], dtype=np.int64) -assert_type(def_gen.integers(9223372036854775808, dtype="i8"), np.int64) -assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) -assert_type(def_gen.integers(9223372036854775807, dtype="i8", endpoint=True), np.int64) -assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True), np.int64) -assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) - -assert_type(def_gen.integers(9223372036854775808, dtype="int64"), np.int64) -assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) -assert_type(def_gen.integers(9223372036854775807, dtype="int64", endpoint=True), np.int64) -assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True), np.int64) -assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) - assert_type(def_gen.integers(9223372036854775808, dtype=np.int64), np.int64) assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) assert_type(def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True), np.int64) assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True), np.int64) -assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) - +assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64] | Any) assert_type(def_gen.bit_generator, np.random.BitGenerator) @@ -906,11 +705,12 @@ assert_type(def_gen.choice(5, 3, replace=True), npt.NDArray[np.int64]) assert_type(def_gen.choice(5, 3, p=[1 / 5] * 5), npt.NDArray[np.int64]) assert_type(def_gen.choice(5, 3, p=[1 / 5] * 5, replace=False), npt.NDArray[np.int64]) -assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"]), Any) -assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3), npt.NDArray[Any]) -assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4), npt.NDArray[Any]) -assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True), npt.NDArray[Any]) -assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])), npt.NDArray[Any]) +str_list: list[str] +assert_type(def_gen.choice(str_list), Any) +assert_type(def_gen.choice(str_list, 3), npt.NDArray[Any]) +assert_type(def_gen.choice(str_list, 3, p=[1 / 4] * 4), npt.NDArray[Any]) +assert_type(def_gen.choice(str_list, 3, replace=True), npt.NDArray[Any]) +assert_type(def_gen.choice(str_list, 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])), npt.NDArray[Any]) assert_type(def_gen.dirichlet([0.5, 0.5]), npt.NDArray[np.float64]) assert_type(def_gen.dirichlet(np.array([0.5, 0.5])), npt.NDArray[np.float64]) @@ -938,13 +738,13 @@ assert_type(def_gen.permutation(10), npt.NDArray[np.int64]) assert_type(def_gen.permutation([1, 2, 3, 4]), npt.NDArray[Any]) assert_type(def_gen.permutation(np.array([1, 2, 3, 4])), npt.NDArray[Any]) assert_type(def_gen.permutation(D_2D, axis=1), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D), npt.NDArray[np.float64]) assert_type(def_gen.permuted(D_2D_like), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D, axis=1), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D, out=D_2D), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D, axis=1, out=D_2D), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D, axis=1), npt.NDArray[np.float64]) +assert_type(def_gen.permuted(D_2D, out=D_2D), npt.NDArray[np.float64]) +assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[np.float64]) +assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[np.float64]) +assert_type(def_gen.permuted(D_2D, axis=1, out=D_2D), npt.NDArray[np.float64]) assert_type(def_gen.shuffle(np.arange(10)), None) assert_type(def_gen.shuffle([1, 2, 3, 4, 5]), None) @@ -977,510 +777,511 @@ assert_type(random_st.standard_exponential(size=1), npt.NDArray[np.float64]) assert_type(random_st.zipf(1.5), int) assert_type(random_st.zipf(1.5, size=None), int) assert_type(random_st.zipf(1.5, size=1), npt.NDArray[np.long]) -assert_type(random_st.zipf(D_arr_1p5), npt.NDArray[np.long]) +assert_type(random_st.zipf(D_arr_1p5), npt.NDArray[np.long] | Any) assert_type(random_st.zipf(D_arr_1p5, size=1), npt.NDArray[np.long]) -assert_type(random_st.zipf(D_arr_like_1p5), npt.NDArray[np.long]) +assert_type(random_st.zipf(D_arr_like_1p5), npt.NDArray[np.long] | Any) assert_type(random_st.zipf(D_arr_like_1p5, size=1), npt.NDArray[np.long]) assert_type(random_st.weibull(0.5), float) assert_type(random_st.weibull(0.5, size=None), float) assert_type(random_st.weibull(0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.weibull(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.weibull(D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.weibull(D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.weibull(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.weibull(D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.weibull(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.standard_t(0.5), float) assert_type(random_st.standard_t(0.5, size=None), float) assert_type(random_st.standard_t(0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.standard_t(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_t(D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.standard_t(D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.standard_t(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_t(D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.standard_t(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.poisson(0.5), int) assert_type(random_st.poisson(0.5, size=None), int) assert_type(random_st.poisson(0.5, size=1), npt.NDArray[np.long]) -assert_type(random_st.poisson(D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.poisson(D_arr_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.poisson(D_arr_0p5, size=1), npt.NDArray[np.long]) -assert_type(random_st.poisson(D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.poisson(D_arr_like_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.poisson(D_arr_like_0p5, size=1), npt.NDArray[np.long]) assert_type(random_st.power(0.5), float) assert_type(random_st.power(0.5, size=None), float) assert_type(random_st.power(0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.power(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.power(D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.power(D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.power(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.power(D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.power(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.pareto(0.5), float) assert_type(random_st.pareto(0.5, size=None), float) assert_type(random_st.pareto(0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.pareto(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.pareto(D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.pareto(D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.pareto(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.pareto(D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.pareto(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.chisquare(0.5), float) assert_type(random_st.chisquare(0.5, size=None), float) assert_type(random_st.chisquare(0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.chisquare(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.chisquare(D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.chisquare(D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.chisquare(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.chisquare(D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.chisquare(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.exponential(0.5), float) assert_type(random_st.exponential(0.5, size=None), float) assert_type(random_st.exponential(0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.exponential(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.exponential(D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.exponential(D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.exponential(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.exponential(D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.exponential(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.geometric(0.5), int) assert_type(random_st.geometric(0.5, size=None), int) assert_type(random_st.geometric(0.5, size=1), npt.NDArray[np.long]) -assert_type(random_st.geometric(D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.geometric(D_arr_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.geometric(D_arr_0p5, size=1), npt.NDArray[np.long]) -assert_type(random_st.geometric(D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.geometric(D_arr_like_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.geometric(D_arr_like_0p5, size=1), npt.NDArray[np.long]) assert_type(random_st.logseries(0.5), int) assert_type(random_st.logseries(0.5, size=None), int) assert_type(random_st.logseries(0.5, size=1), npt.NDArray[np.long]) -assert_type(random_st.logseries(D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.logseries(D_arr_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.logseries(D_arr_0p5, size=1), npt.NDArray[np.long]) -assert_type(random_st.logseries(D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.logseries(D_arr_like_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.logseries(D_arr_like_0p5, size=1), npt.NDArray[np.long]) assert_type(random_st.rayleigh(0.5), float) assert_type(random_st.rayleigh(0.5, size=None), float) assert_type(random_st.rayleigh(0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.rayleigh(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.rayleigh(D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.rayleigh(D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.rayleigh(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.rayleigh(D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.rayleigh(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.standard_gamma(0.5), float) assert_type(random_st.standard_gamma(0.5, size=None), float) assert_type(random_st.standard_gamma(0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.standard_gamma(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.standard_gamma(D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.standard_gamma(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.standard_gamma(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.standard_gamma(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.vonmises(0.5, 0.5), float) assert_type(random_st.vonmises(0.5, 0.5, size=None), float) assert_type(random_st.vonmises(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.vonmises(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.vonmises(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.vonmises(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.vonmises(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.vonmises(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.vonmises(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.vonmises(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.vonmises(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.vonmises(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.vonmises(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.vonmises(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.wald(0.5, 0.5), float) assert_type(random_st.wald(0.5, 0.5, size=None), float) assert_type(random_st.wald(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.wald(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.wald(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.wald(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.wald(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.wald(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.wald(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.wald(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.wald(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.wald(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.wald(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.wald(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.wald(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.wald(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.wald(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.uniform(0.5, 0.5), float) assert_type(random_st.uniform(0.5, 0.5, size=None), float) assert_type(random_st.uniform(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.uniform(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.uniform(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.uniform(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.uniform(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.uniform(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.uniform(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.uniform(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.uniform(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.uniform(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.uniform(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.uniform(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.uniform(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.uniform(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.beta(0.5, 0.5), float) assert_type(random_st.beta(0.5, 0.5, size=None), float) assert_type(random_st.beta(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.beta(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.beta(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.beta(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.beta(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.beta(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.beta(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.beta(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.beta(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.beta(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.beta(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.beta(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.beta(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.beta(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.beta(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.f(0.5, 0.5), float) assert_type(random_st.f(0.5, 0.5, size=None), float) assert_type(random_st.f(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.f(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.f(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.f(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.f(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.f(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.f(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.f(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.f(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.f(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.f(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.f(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.f(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.f(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.f(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.gamma(0.5, 0.5), float) assert_type(random_st.gamma(0.5, 0.5, size=None), float) assert_type(random_st.gamma(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.gamma(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.gamma(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.gamma(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.gamma(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.gamma(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.gamma(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.gamma(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.gamma(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.gamma(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.gamma(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.gamma(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.gamma(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.gamma(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.gumbel(0.5, 0.5), float) assert_type(random_st.gumbel(0.5, 0.5, size=None), float) assert_type(random_st.gumbel(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.gumbel(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.gumbel(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.gumbel(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.gumbel(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.gumbel(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.gumbel(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.gumbel(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.gumbel(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.gumbel(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.gumbel(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.gumbel(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.laplace(0.5, 0.5), float) assert_type(random_st.laplace(0.5, 0.5, size=None), float) assert_type(random_st.laplace(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.laplace(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.laplace(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.laplace(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.laplace(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.laplace(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.laplace(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.laplace(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.laplace(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.laplace(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.laplace(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.laplace(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.laplace(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.laplace(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.logistic(0.5, 0.5), float) assert_type(random_st.logistic(0.5, 0.5, size=None), float) assert_type(random_st.logistic(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.logistic(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.logistic(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.logistic(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.logistic(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.logistic(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.logistic(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.logistic(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.logistic(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.logistic(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.logistic(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.logistic(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.logistic(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.logistic(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.lognormal(0.5, 0.5), float) assert_type(random_st.lognormal(0.5, 0.5, size=None), float) assert_type(random_st.lognormal(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.lognormal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.lognormal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.lognormal(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.lognormal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.lognormal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.lognormal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.lognormal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.lognormal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.lognormal(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.lognormal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.lognormal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.noncentral_chisquare(0.5, 0.5), float) assert_type(random_st.noncentral_chisquare(0.5, 0.5, size=None), float) assert_type(random_st.noncentral_chisquare(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_chisquare(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_chisquare(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.noncentral_chisquare(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.noncentral_chisquare(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.noncentral_chisquare(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_chisquare(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.noncentral_chisquare(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.normal(0.5, 0.5), float) assert_type(random_st.normal(0.5, 0.5, size=None), float) assert_type(random_st.normal(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.normal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.normal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.normal(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.normal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.normal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.normal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.normal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.normal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.normal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.normal(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.normal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.normal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.normal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.normal(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.triangular(0.1, 0.5, 0.9), float) assert_type(random_st.triangular(0.1, 0.5, 0.9, size=None), float) assert_type(random_st.triangular(0.1, 0.5, 0.9, size=1), npt.NDArray[np.float64]) -assert_type(random_st.triangular(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64]) -assert_type(random_st.triangular(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64] | Any) +assert_type(random_st.triangular(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64] | Any) assert_type(random_st.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) assert_type(random_st.triangular(0.1, D_arr_0p5, 0.9, size=1), npt.NDArray[np.float64]) -assert_type(random_st.triangular(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64]) -assert_type(random_st.triangular(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) -assert_type(random_st.triangular(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) -assert_type(random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64] | Any) +assert_type(random_st.triangular(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64] | Any) +assert_type(random_st.triangular(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64] | Any) +assert_type(random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64] | Any) assert_type(random_st.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1), npt.NDArray[np.float64]) assert_type(random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) assert_type(random_st.noncentral_f(0.1, 0.5, 0.9), float) assert_type(random_st.noncentral_f(0.1, 0.5, 0.9, size=None), float) assert_type(random_st.noncentral_f(0.1, 0.5, 0.9, size=1), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_f(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_f(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64] | Any) +assert_type(random_st.noncentral_f(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64] | Any) assert_type(random_st.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) assert_type(random_st.noncentral_f(0.1, D_arr_0p5, 0.9, size=1), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_f(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64] | Any) +assert_type(random_st.noncentral_f(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64] | Any) +assert_type(random_st.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64] | Any) +assert_type(random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64] | Any) assert_type(random_st.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1), npt.NDArray[np.float64]) assert_type(random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) assert_type(random_st.binomial(10, 0.5), int) assert_type(random_st.binomial(10, 0.5, size=None), int) assert_type(random_st.binomial(10, 0.5, size=1), npt.NDArray[np.long]) -assert_type(random_st.binomial(I_arr_10, 0.5), npt.NDArray[np.long]) -assert_type(random_st.binomial(10, D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_10, 0.5), npt.NDArray[np.long] | Any) +assert_type(random_st.binomial(10, D_arr_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.binomial(I_arr_10, 0.5, size=1), npt.NDArray[np.long]) assert_type(random_st.binomial(10, D_arr_0p5, size=1), npt.NDArray[np.long]) -assert_type(random_st.binomial(I_arr_like_10, 0.5), npt.NDArray[np.long]) -assert_type(random_st.binomial(10, D_arr_like_0p5), npt.NDArray[np.long]) -assert_type(random_st.binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.long]) -assert_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_like_10, 0.5), npt.NDArray[np.long] | Any) +assert_type(random_st.binomial(10, D_arr_like_0p5), npt.NDArray[np.long] | Any) +assert_type(random_st.binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.long] | Any) +assert_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.binomial(I_arr_10, D_arr_0p5, size=1), npt.NDArray[np.long]) assert_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5, size=1), npt.NDArray[np.long]) assert_type(random_st.negative_binomial(10, 0.5), int) assert_type(random_st.negative_binomial(10, 0.5, size=None), int) assert_type(random_st.negative_binomial(10, 0.5, size=1), npt.NDArray[np.long]) -assert_type(random_st.negative_binomial(I_arr_10, 0.5), npt.NDArray[np.long]) -assert_type(random_st.negative_binomial(10, D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_10, 0.5), npt.NDArray[np.long] | Any) +assert_type(random_st.negative_binomial(10, D_arr_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.negative_binomial(I_arr_10, 0.5, size=1), npt.NDArray[np.long]) assert_type(random_st.negative_binomial(10, D_arr_0p5, size=1), npt.NDArray[np.long]) -assert_type(random_st.negative_binomial(I_arr_like_10, 0.5), npt.NDArray[np.long]) -assert_type(random_st.negative_binomial(10, D_arr_like_0p5), npt.NDArray[np.long]) -assert_type(random_st.negative_binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.long]) -assert_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_like_10, 0.5), npt.NDArray[np.long] | Any) +assert_type(random_st.negative_binomial(10, D_arr_like_0p5), npt.NDArray[np.long] | Any) +assert_type(random_st.negative_binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.long] | Any) +assert_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.long] | Any) +assert_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.negative_binomial(I_arr_10, D_arr_0p5, size=1), npt.NDArray[np.long]) assert_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1), npt.NDArray[np.long]) assert_type(random_st.hypergeometric(20, 20, 10), int) assert_type(random_st.hypergeometric(20, 20, 10, size=None), int) assert_type(random_st.hypergeometric(20, 20, 10, size=1), npt.NDArray[np.long]) -assert_type(random_st.hypergeometric(I_arr_20, 20, 10), npt.NDArray[np.long]) -assert_type(random_st.hypergeometric(20, I_arr_20, 10), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_20, 20, 10), npt.NDArray[np.long] | Any) +assert_type(random_st.hypergeometric(20, I_arr_20, 10), npt.NDArray[np.long] | Any) assert_type(random_st.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1), npt.NDArray[np.long]) assert_type(random_st.hypergeometric(20, I_arr_20, 10, size=1), npt.NDArray[np.long]) -assert_type(random_st.hypergeometric(I_arr_like_20, 20, I_arr_10), npt.NDArray[np.long]) -assert_type(random_st.hypergeometric(20, I_arr_like_20, 10), npt.NDArray[np.long]) -assert_type(random_st.hypergeometric(I_arr_20, I_arr_20, 10), npt.NDArray[np.long]) -assert_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_like_20, 20, I_arr_10), npt.NDArray[np.long] | Any) +assert_type(random_st.hypergeometric(20, I_arr_like_20, 10), npt.NDArray[np.long] | Any) +assert_type(random_st.hypergeometric(I_arr_20, I_arr_20, 10), npt.NDArray[np.long] | Any) +assert_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10), npt.NDArray[np.long] | Any) assert_type(random_st.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1), npt.NDArray[np.long]) assert_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1), npt.NDArray[np.long]) assert_type(random_st.randint(0, 100), int) assert_type(random_st.randint(100), int) -assert_type(random_st.randint([100]), npt.NDArray[np.long]) -assert_type(random_st.randint(0, [100]), npt.NDArray[np.long]) +assert_type(random_st.randint([100]), npt.NDArray[np.long] | Any) +assert_type(random_st.randint(0, [100]), npt.NDArray[np.long] | Any) assert_type(random_st.randint(2, dtype=bool), bool) assert_type(random_st.randint(0, 2, dtype=bool), bool) -assert_type(random_st.randint(I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) -assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) -assert_type(random_st.randint(0, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) +assert_type(random_st.randint(I_bool_high_open, dtype=bool), npt.NDArray[np.bool] | Any) +assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[np.bool] | Any) +assert_type(random_st.randint(0, I_bool_high_open, dtype=bool), npt.NDArray[np.bool] | Any) assert_type(random_st.randint(2, dtype=np.bool), np.bool) assert_type(random_st.randint(0, 2, dtype=np.bool), np.bool) -assert_type(random_st.randint(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) -assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) -assert_type(random_st.randint(0, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) +assert_type(random_st.randint(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool] | Any) +assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool] | Any) +assert_type(random_st.randint(0, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool] | Any) assert_type(random_st.randint(256, dtype="u1"), np.uint8) assert_type(random_st.randint(0, 256, dtype="u1"), np.uint8) -assert_type(random_st.randint(I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) -assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) -assert_type(random_st.randint(0, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) +assert_type(random_st.randint(I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8] | Any) +assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8] | Any) +assert_type(random_st.randint(0, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8] | Any) assert_type(random_st.randint(256, dtype="uint8"), np.uint8) assert_type(random_st.randint(0, 256, dtype="uint8"), np.uint8) -assert_type(random_st.randint(I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) -assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) -assert_type(random_st.randint(0, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) +assert_type(random_st.randint(I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8] | Any) +assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8] | Any) +assert_type(random_st.randint(0, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8] | Any) assert_type(random_st.randint(256, dtype=np.uint8), np.uint8) assert_type(random_st.randint(0, 256, dtype=np.uint8), np.uint8) -assert_type(random_st.randint(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) -assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) -assert_type(random_st.randint(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) +assert_type(random_st.randint(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8] | Any) +assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8] | Any) +assert_type(random_st.randint(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8] | Any) assert_type(random_st.randint(65536, dtype="u2"), np.uint16) assert_type(random_st.randint(0, 65536, dtype="u2"), np.uint16) -assert_type(random_st.randint(I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) -assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) -assert_type(random_st.randint(0, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) +assert_type(random_st.randint(I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16] | Any) +assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16] | Any) +assert_type(random_st.randint(0, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16] | Any) assert_type(random_st.randint(65536, dtype="uint16"), np.uint16) assert_type(random_st.randint(0, 65536, dtype="uint16"), np.uint16) -assert_type(random_st.randint(I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) -assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) -assert_type(random_st.randint(0, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) +assert_type(random_st.randint(I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16] | Any) +assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16] | Any) +assert_type(random_st.randint(0, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16] | Any) assert_type(random_st.randint(65536, dtype=np.uint16), np.uint16) assert_type(random_st.randint(0, 65536, dtype=np.uint16), np.uint16) -assert_type(random_st.randint(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) -assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) -assert_type(random_st.randint(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) +assert_type(random_st.randint(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16] | Any) +assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16] | Any) +assert_type(random_st.randint(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16] | Any) assert_type(random_st.randint(4294967296, dtype="u4"), np.uint32) assert_type(random_st.randint(0, 4294967296, dtype="u4"), np.uint32) -assert_type(random_st.randint(I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) -assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) -assert_type(random_st.randint(0, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) +assert_type(random_st.randint(I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32] | Any) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32] | Any) +assert_type(random_st.randint(0, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32] | Any) assert_type(random_st.randint(4294967296, dtype="uint32"), np.uint32) assert_type(random_st.randint(0, 4294967296, dtype="uint32"), np.uint32) -assert_type(random_st.randint(I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) -assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) -assert_type(random_st.randint(0, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) +assert_type(random_st.randint(I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32] | Any) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32] | Any) +assert_type(random_st.randint(0, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32] | Any) assert_type(random_st.randint(4294967296, dtype=np.uint32), np.uint32) assert_type(random_st.randint(0, 4294967296, dtype=np.uint32), np.uint32) -assert_type(random_st.randint(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) -assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) -assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) +assert_type(random_st.randint(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32] | Any) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32] | Any) +assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32] | Any) assert_type(random_st.randint(4294967296, dtype=np.uint), np.uint) assert_type(random_st.randint(0, 4294967296, dtype=np.uint), np.uint) -assert_type(random_st.randint(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) -assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) -assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) +assert_type(random_st.randint(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint] | Any) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint] | Any) +assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint] | Any) assert_type(random_st.randint(18446744073709551616, dtype="u8"), np.uint64) assert_type(random_st.randint(0, 18446744073709551616, dtype="u8"), np.uint64) -assert_type(random_st.randint(I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) -assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) -assert_type(random_st.randint(0, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) +assert_type(random_st.randint(I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64] | Any) +assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64] | Any) +assert_type(random_st.randint(0, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64] | Any) assert_type(random_st.randint(18446744073709551616, dtype="uint64"), np.uint64) assert_type(random_st.randint(0, 18446744073709551616, dtype="uint64"), np.uint64) -assert_type(random_st.randint(I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) -assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) -assert_type(random_st.randint(0, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) +assert_type(random_st.randint(I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64] | Any) +assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64] | Any) +assert_type(random_st.randint(0, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64] | Any) assert_type(random_st.randint(18446744073709551616, dtype=np.uint64), np.uint64) assert_type(random_st.randint(0, 18446744073709551616, dtype=np.uint64), np.uint64) -assert_type(random_st.randint(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) -assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) -assert_type(random_st.randint(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) +assert_type(random_st.randint(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64] | Any) +assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64] | Any) +assert_type(random_st.randint(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64] | Any) assert_type(random_st.randint(128, dtype="i1"), np.int8) assert_type(random_st.randint(-128, 128, dtype="i1"), np.int8) -assert_type(random_st.randint(I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) -assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) -assert_type(random_st.randint(-128, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) +assert_type(random_st.randint(I_i1_high_open, dtype="i1"), npt.NDArray[np.int8] | Any) +assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8] | Any) +assert_type(random_st.randint(-128, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8] | Any) assert_type(random_st.randint(128, dtype="int8"), np.int8) assert_type(random_st.randint(-128, 128, dtype="int8"), np.int8) -assert_type(random_st.randint(I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) -assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) -assert_type(random_st.randint(-128, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) +assert_type(random_st.randint(I_i1_high_open, dtype="int8"), npt.NDArray[np.int8] | Any) +assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8] | Any) +assert_type(random_st.randint(-128, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8] | Any) assert_type(random_st.randint(128, dtype=np.int8), np.int8) assert_type(random_st.randint(-128, 128, dtype=np.int8), np.int8) -assert_type(random_st.randint(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) -assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) -assert_type(random_st.randint(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) +assert_type(random_st.randint(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8] | Any) +assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8] | Any) +assert_type(random_st.randint(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8] | Any) assert_type(random_st.randint(32768, dtype="i2"), np.int16) assert_type(random_st.randint(-32768, 32768, dtype="i2"), np.int16) -assert_type(random_st.randint(I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) -assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) -assert_type(random_st.randint(-32768, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) +assert_type(random_st.randint(I_i2_high_open, dtype="i2"), npt.NDArray[np.int16] | Any) +assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16] | Any) +assert_type(random_st.randint(-32768, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16] | Any) assert_type(random_st.randint(32768, dtype="int16"), np.int16) assert_type(random_st.randint(-32768, 32768, dtype="int16"), np.int16) -assert_type(random_st.randint(I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) -assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) -assert_type(random_st.randint(-32768, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) +assert_type(random_st.randint(I_i2_high_open, dtype="int16"), npt.NDArray[np.int16] | Any) +assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16] | Any) +assert_type(random_st.randint(-32768, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16] | Any) assert_type(random_st.randint(32768, dtype=np.int16), np.int16) assert_type(random_st.randint(-32768, 32768, dtype=np.int16), np.int16) -assert_type(random_st.randint(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) -assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) -assert_type(random_st.randint(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) +assert_type(random_st.randint(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16] | Any) +assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16] | Any) +assert_type(random_st.randint(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16] | Any) assert_type(random_st.randint(2147483648, dtype="i4"), np.int32) assert_type(random_st.randint(-2147483648, 2147483648, dtype="i4"), np.int32) -assert_type(random_st.randint(I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) -assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) -assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) +assert_type(random_st.randint(I_i4_high_open, dtype="i4"), npt.NDArray[np.int32] | Any) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32] | Any) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32] | Any) assert_type(random_st.randint(2147483648, dtype="int32"), np.int32) assert_type(random_st.randint(-2147483648, 2147483648, dtype="int32"), np.int32) -assert_type(random_st.randint(I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) -assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) -assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) +assert_type(random_st.randint(I_i4_high_open, dtype="int32"), npt.NDArray[np.int32] | Any) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32] | Any) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32] | Any) assert_type(random_st.randint(2147483648, dtype=np.int32), np.int32) assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int32), np.int32) -assert_type(random_st.randint(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) -assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) -assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) +assert_type(random_st.randint(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32] | Any) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32] | Any) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32] | Any) assert_type(random_st.randint(2147483648, dtype=np.int_), np.int_) assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int_), np.int_) -assert_type(random_st.randint(I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) -assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) -assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) +assert_type(random_st.randint(I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_] | Any) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_] | Any) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_] | Any) assert_type(random_st.randint(9223372036854775808, dtype="i8"), np.int64) assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) -assert_type(random_st.randint(I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) -assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) -assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) +assert_type(random_st.randint(I_i8_high_open, dtype="i8"), npt.NDArray[np.int64] | Any) +assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64] | Any) +assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64] | Any) assert_type(random_st.randint(9223372036854775808, dtype="int64"), np.int64) assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) -assert_type(random_st.randint(I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) -assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) -assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) +assert_type(random_st.randint(I_i8_high_open, dtype="int64"), npt.NDArray[np.int64] | Any) +assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64] | Any) +assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64] | Any) assert_type(random_st.randint(9223372036854775808, dtype=np.int64), np.int64) assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) -assert_type(random_st.randint(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(random_st.randint(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64] | Any) +assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64] | Any) +assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64] | Any) assert_type(random_st._bit_generator, np.random.BitGenerator) @@ -1551,5 +1352,5 @@ assert_type(random_st.tomaxint(), int) assert_type(random_st.tomaxint(1), npt.NDArray[np.int64]) assert_type(random_st.tomaxint((1,)), npt.NDArray[np.int64]) -assert_type(np.random.set_bit_generator(pcg64), None) -assert_type(np.random.get_bit_generator(), np.random.BitGenerator) +assert_type(np.random.mtrand.set_bit_generator(pcg64), None) +assert_type(np.random.mtrand.get_bit_generator(), np.random.BitGenerator) diff --git a/numpy/typing/tests/data/reveal/rec.pyi b/numpy/typing/tests/data/reveal/rec.pyi index f2ae0891b485..da66ab003078 100644 --- a/numpy/typing/tests/data/reveal/rec.pyi +++ b/numpy/typing/tests/data/reveal/rec.pyi @@ -1,17 +1,13 @@ import io -import sys -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +type _RecArray = np.recarray[tuple[Any, ...], np.dtype[np.record]] AR_i8: npt.NDArray[np.int64] -REC_AR_V: np.recarray[Any, np.dtype[np.record]] +REC_AR_V: _RecArray AR_LIST: list[npt.NDArray[np.int64]] record: np.record @@ -47,7 +43,7 @@ assert_type( order="K", byteorder="|", ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) assert_type( @@ -56,13 +52,13 @@ assert_type( dtype=[("f8", np.float64), ("i8", np.int64)], strides=(5, 5), ), - np.recarray[Any, np.dtype[Any]], + np.recarray, ) -assert_type(np.rec.fromarrays(AR_LIST), np.recarray[Any, np.dtype[Any]]) +assert_type(np.rec.fromarrays(AR_LIST), np.recarray) assert_type( np.rec.fromarrays(AR_LIST, dtype=np.int64), - np.recarray[Any, np.dtype[Any]], + np.recarray, ) assert_type( np.rec.fromarrays( @@ -70,12 +66,12 @@ assert_type( formats=[np.int64, np.float64], names=["i8", "f8"] ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) assert_type( - np.rec.fromrecords((1, 1.5)), - np.recarray[Any, np.dtype[np.record]] + np.rec.fromrecords((1, 1.5)), + _RecArray ) assert_type( @@ -83,7 +79,7 @@ assert_type( [(1, 1.5)], dtype=[("i8", np.int64), ("f8", np.float64)], ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) assert_type( @@ -92,7 +88,7 @@ assert_type( formats=[np.int64, np.float64], names=["i8", "f8"] ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) assert_type( @@ -100,7 +96,7 @@ assert_type( b"(1, 1.5)", dtype=[("i8", np.int64), ("f8", np.float64)], ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) assert_type( @@ -109,13 +105,16 @@ assert_type( formats=[np.int64, np.float64], names=["i8", "f8"] ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) -assert_type(np.rec.fromfile( - "test_file.txt", - dtype=[("i8", np.int64), ("f8", np.float64)], -), np.recarray[Any, np.dtype[Any]]) +assert_type( + np.rec.fromfile( + "test_file.txt", + dtype=[("i8", np.int64), ("f8", np.float64)], + ), + np.recarray, +) assert_type( np.rec.fromfile( @@ -123,14 +122,14 @@ assert_type( formats=[np.int64, np.float64], names=["i8", "f8"] ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) -assert_type(np.rec.array(AR_i8), np.recarray[Any, np.dtype[np.int64]]) +assert_type(np.rec.array(AR_i8), np.recarray[tuple[Any, ...], np.dtype[np.int64]]) assert_type( np.rec.array([(1, 1.5)], dtype=[("i8", np.int64), ("f8", np.float64)]), - np.recarray[Any, np.dtype[Any]], + np.recarray, ) assert_type( @@ -139,7 +138,7 @@ assert_type( formats=[np.int64, np.float64], names=["i8", "f8"] ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) assert_type( @@ -148,7 +147,7 @@ assert_type( dtype=np.float64, shape=(10, 3), ), - np.recarray[Any, np.dtype[Any]], + np.recarray, ) assert_type( @@ -158,15 +157,15 @@ assert_type( names=["i8", "f8"], shape=(10, 3), ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) assert_type( np.rec.array(file_obj, dtype=np.float64), - np.recarray[Any, np.dtype[Any]], + np.recarray, ) assert_type( np.rec.array(file_obj, formats=[np.int64, np.float64], names=["i8", "f8"]), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) diff --git a/numpy/typing/tests/data/reveal/scalars.pyi b/numpy/typing/tests/data/reveal/scalars.pyi index 95775e9a8dbe..c56c8e88092c 100644 --- a/numpy/typing/tests/data/reveal/scalars.pyi +++ b/numpy/typing/tests/data/reveal/scalars.pyi @@ -1,13 +1,6 @@ -import sys -from typing import Any, Literal +from typing import Any, Literal, assert_type import numpy as np -import numpy.typing as npt - -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type b: np.bool u8: np.uint64 @@ -19,6 +12,11 @@ m: np.timedelta64 U: np.str_ S: np.bytes_ V: np.void +O: np.object_ # cannot exists at runtime + +array_nd: np.ndarray[Any, Any] +array_0d: np.ndarray[tuple[()], Any] +array_2d_2x2: np.ndarray[tuple[Literal[2], Literal[2]], Any] assert_type(c8.real, np.float32) assert_type(c8.imag, np.float32) @@ -42,15 +40,103 @@ assert_type(c8.dtype, np.dtype[np.complex64]) assert_type(c8.real, np.float32) assert_type(c16.imag, np.float64) -assert_type(np.str_('foo'), np.str_) +assert_type(np.str_("foo"), np.str_) + +# Indexing +assert_type(b[()], np.bool) +assert_type(i8[()], np.int64) +assert_type(u8[()], np.uint64) +assert_type(f8[()], np.float64) +assert_type(c8[()], np.complex64) +assert_type(c16[()], np.complex128) +assert_type(U[()], np.str_) +assert_type(S[()], np.bytes_) +assert_type(V[()], np.void) + +assert_type(b[...], np.ndarray[tuple[()], np.dtype[np.bool]]) +assert_type(b[(...,)], np.ndarray[tuple[()], np.dtype[np.bool]]) +assert_type(i8[...], np.ndarray[tuple[()], np.dtype[np.int64]]) +assert_type(i8[(...,)], np.ndarray[tuple[()], np.dtype[np.int64]]) +assert_type(u8[...], np.ndarray[tuple[()], np.dtype[np.uint64]]) +assert_type(u8[(...,)], np.ndarray[tuple[()], np.dtype[np.uint64]]) +assert_type(f8[...], np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(f8[(...,)], np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(c8[...], np.ndarray[tuple[()], np.dtype[np.complex64]]) +assert_type(c8[(...,)], np.ndarray[tuple[()], np.dtype[np.complex64]]) +assert_type(c16[...], np.ndarray[tuple[()], np.dtype[np.complex128]]) +assert_type(c16[(...,)], np.ndarray[tuple[()], np.dtype[np.complex128]]) +assert_type(U[...], np.ndarray[tuple[()], np.dtype[np.str_]]) +assert_type(U[(...,)], np.ndarray[tuple[()], np.dtype[np.str_]]) +assert_type(S[...], np.ndarray[tuple[()], np.dtype[np.bytes_]]) +assert_type(S[(...,)], np.ndarray[tuple[()], np.dtype[np.bytes_]]) +assert_type(V[...], np.ndarray[tuple[()], np.dtype[np.void]]) +assert_type(V[(...,)], np.ndarray[tuple[()], np.dtype[np.void]]) + +None1 = (None,) +None2 = (None, None) +None3 = (None, None, None) +None4 = (None, None, None, None) + +assert_type(b[None], np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(b[None1], np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(b[None2], np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(b[None3], np.ndarray[tuple[int, int, int], np.dtype[np.bool]]) +assert_type(b[None4], np.ndarray[tuple[Any, ...], np.dtype[np.bool]]) + +assert_type(u8[None], np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(u8[None1], np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(u8[None2], np.ndarray[tuple[int, int], np.dtype[np.uint64]]) +assert_type(u8[None3], np.ndarray[tuple[int, int, int], np.dtype[np.uint64]]) +assert_type(u8[None4], np.ndarray[tuple[Any, ...], np.dtype[np.uint64]]) + +assert_type(i8[None], np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(i8[None1], np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(i8[None2], np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(i8[None3], np.ndarray[tuple[int, int, int], np.dtype[np.int64]]) +assert_type(i8[None4], np.ndarray[tuple[Any, ...], np.dtype[np.int64]]) + +assert_type(f8[None], np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(f8[None1], np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(f8[None2], np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(f8[None3], np.ndarray[tuple[int, int, int], np.dtype[np.float64]]) +assert_type(f8[None4], np.ndarray[tuple[Any, ...], np.dtype[np.float64]]) +assert_type(c8[None], np.ndarray[tuple[int], np.dtype[np.complex64]]) +assert_type(c8[None1], np.ndarray[tuple[int], np.dtype[np.complex64]]) +assert_type(c8[None2], np.ndarray[tuple[int, int], np.dtype[np.complex64]]) +assert_type(c8[None3], np.ndarray[tuple[int, int, int], np.dtype[np.complex64]]) +assert_type(c8[None4], np.ndarray[tuple[Any, ...], np.dtype[np.complex64]]) + +assert_type(c16[None], np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(c16[None1], np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(c16[None2], np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(c16[None3], np.ndarray[tuple[int, int, int], np.dtype[np.complex128]]) +assert_type(c16[None4], np.ndarray[tuple[Any, ...], np.dtype[np.complex128]]) + +assert_type(U[None], np.ndarray[tuple[int], np.dtype[np.str_]]) +assert_type(U[None1], np.ndarray[tuple[int], np.dtype[np.str_]]) +assert_type(U[None2], np.ndarray[tuple[int, int], np.dtype[np.str_]]) +assert_type(U[None3], np.ndarray[tuple[int, int, int], np.dtype[np.str_]]) +assert_type(U[None4], np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) + +assert_type(S[None], np.ndarray[tuple[int], np.dtype[np.bytes_]]) +assert_type(S[None1], np.ndarray[tuple[int], np.dtype[np.bytes_]]) +assert_type(S[None2], np.ndarray[tuple[int, int], np.dtype[np.bytes_]]) +assert_type(S[None3], np.ndarray[tuple[int, int, int], np.dtype[np.bytes_]]) +assert_type(S[None4], np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) + +assert_type(V[None], np.ndarray[tuple[int], np.dtype[np.void]]) +assert_type(V[None1], np.ndarray[tuple[int], np.dtype[np.void]]) +assert_type(V[None2], np.ndarray[tuple[int, int], np.dtype[np.void]]) +assert_type(V[None3], np.ndarray[tuple[int, int, int], np.dtype[np.void]]) +assert_type(V[None4], np.ndarray[tuple[Any, ...], np.dtype[np.void]]) assert_type(V[0], Any) assert_type(V["field1"], Any) assert_type(V[["field1", "field2"]], np.void) V[0] = 5 # Aliases -assert_type(np.bool_(), np.bool) +assert_type(np.bool_(), np.bool[Literal[False]]) assert_type(np.byte(), np.byte) assert_type(np.short(), np.short) assert_type(np.intc(), np.intc) @@ -92,29 +178,38 @@ assert_type(c16.tolist(), complex) assert_type(U.tolist(), str) assert_type(S.tolist(), bytes) -assert_type(b.ravel(), npt.NDArray[np.bool]) -assert_type(i8.ravel(), npt.NDArray[np.int64]) -assert_type(u8.ravel(), npt.NDArray[np.uint64]) -assert_type(f8.ravel(), npt.NDArray[np.float64]) -assert_type(c16.ravel(), npt.NDArray[np.complex128]) -assert_type(U.ravel(), npt.NDArray[np.str_]) -assert_type(S.ravel(), npt.NDArray[np.bytes_]) - -assert_type(b.flatten(), npt.NDArray[np.bool]) -assert_type(i8.flatten(), npt.NDArray[np.int64]) -assert_type(u8.flatten(), npt.NDArray[np.uint64]) -assert_type(f8.flatten(), npt.NDArray[np.float64]) -assert_type(c16.flatten(), npt.NDArray[np.complex128]) -assert_type(U.flatten(), npt.NDArray[np.str_]) -assert_type(S.flatten(), npt.NDArray[np.bytes_]) - -assert_type(b.reshape(1), npt.NDArray[np.bool]) -assert_type(i8.reshape(1), npt.NDArray[np.int64]) -assert_type(u8.reshape(1), npt.NDArray[np.uint64]) -assert_type(f8.reshape(1), npt.NDArray[np.float64]) -assert_type(c16.reshape(1), npt.NDArray[np.complex128]) -assert_type(U.reshape(1), npt.NDArray[np.str_]) -assert_type(S.reshape(1), npt.NDArray[np.bytes_]) +assert_type(b.ravel(), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(i8.ravel(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(u8.ravel(), np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(f8.ravel(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(c16.ravel(), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(U.ravel(), np.ndarray[tuple[int], np.dtype[np.str_]]) +assert_type(S.ravel(), np.ndarray[tuple[int], np.dtype[np.bytes_]]) + +assert_type(b.flatten(), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(i8.flatten(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(u8.flatten(), np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(f8.flatten(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(c16.flatten(), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(U.flatten(), np.ndarray[tuple[int], np.dtype[np.str_]]) +assert_type(S.flatten(), np.ndarray[tuple[int], np.dtype[np.bytes_]]) + +assert_type(b.reshape(()), np.bool) +assert_type(i8.reshape([]), np.int64) +assert_type(b.reshape(1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(i8.reshape(-1), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(u8.reshape(1, 1), np.ndarray[tuple[int, int], np.dtype[np.uint64]]) +assert_type(f8.reshape(1, -1), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(c16.reshape(1, 1, 1), np.ndarray[tuple[int, int, int], np.dtype[np.complex128]]) +assert_type(U.reshape(1, 1, 1, 1), np.ndarray[tuple[int, int, int, int], np.dtype[np.str_]]) +assert_type( + S.reshape(1, 1, 1, 1, 1), + np.ndarray[ + # len(shape) >= 5 + tuple[int, int, int, int, int, *tuple[int, ...]], + np.dtype[np.bytes_], + ], +) assert_type(i8.astype(float), Any) assert_type(i8.astype(np.float64), np.float64) @@ -156,3 +251,27 @@ assert_type(f8.__ceil__(), int) assert_type(f8.__floor__(), int) assert_type(i8.is_integer(), Literal[True]) + +assert_type(O.real, np.object_) +assert_type(O.imag, np.object_) +assert_type(int(O), int) +assert_type(float(O), float) +assert_type(complex(O), complex) + +# These fail fail because of a mypy __new__ bug: +# https://github.com/python/mypy/issues/15182 +# According to the typing spec, the following statements are valid, see +# https://typing.readthedocs.io/en/latest/spec/constructors.html#new-method + +# assert_type(np.object_(), None) +# assert_type(np.object_(None), None) +# assert_type(np.object_(array_nd), np.ndarray[Any, np.dtype[np.object_]]) +# assert_type(np.object_([]), npt.NDArray[np.object_]) +# assert_type(np.object_(()), npt.NDArray[np.object_]) +# assert_type(np.object_(range(4)), npt.NDArray[np.object_]) +# assert_type(np.object_(+42), int) +# assert_type(np.object_(1 / 137), float) +# assert_type(np.object_('Developers! ' * (1 << 6)), str) +# assert_type(np.object_(object()), object) +# assert_type(np.object_({False, True, NotADirectoryError}), set[Any]) +# assert_type(np.object_({'spam': 'food', 'ham': 'food'}), dict[str, str]) diff --git a/numpy/typing/tests/data/reveal/shape.pyi b/numpy/typing/tests/data/reveal/shape.pyi new file mode 100644 index 000000000000..2406a39f9682 --- /dev/null +++ b/numpy/typing/tests/data/reveal/shape.pyi @@ -0,0 +1,13 @@ +from typing import Any, NamedTuple, assert_type + +import numpy as np + +# Subtype of tuple[int, int] +class XYGrid(NamedTuple): + x_axis: int + y_axis: int + +arr: np.ndarray[XYGrid, Any] + +# Test shape property matches shape typevar +assert_type(arr.shape, XYGrid) diff --git a/numpy/typing/tests/data/reveal/shape_base.pyi b/numpy/typing/tests/data/reveal/shape_base.pyi index 69940cc1ac2c..ce033e97d070 100644 --- a/numpy/typing/tests/data/reveal/shape_base.pyi +++ b/numpy/typing/tests/data/reveal/shape_base.pyi @@ -1,14 +1,8 @@ -import sys -from typing import Any +from typing import Any, Self, assert_type import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - i8: np.int64 f8: np.float64 @@ -18,6 +12,15 @@ AR_f8: npt.NDArray[np.float64] AR_LIKE_f8: list[float] +# Duck-typed class implementing _SupportsSplitOps protocol for testing +class _SplitableArray: + shape: tuple[int, ...] + ndim: int + def swapaxes(self, axis1: int, axis2: int, /) -> Self: ... + def __getitem__(self, key: Any, /) -> Self: ... + +splitable: _SplitableArray + assert_type(np.take_along_axis(AR_f8, AR_i8, axis=1), npt.NDArray[np.float64]) assert_type(np.take_along_axis(f8, AR_i8, axis=None), npt.NDArray[np.float64]) @@ -34,22 +37,30 @@ assert_type(np.dstack([AR_LIKE_f8]), npt.NDArray[Any]) assert_type(np.array_split(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) assert_type(np.array_split(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) +assert_type(np.array_split(splitable, 2), list[_SplitableArray]) assert_type(np.split(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) assert_type(np.split(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) +assert_type(np.split(splitable, 2), list[_SplitableArray]) assert_type(np.hsplit(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) assert_type(np.hsplit(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) +assert_type(np.hsplit(splitable, 2), list[_SplitableArray]) assert_type(np.vsplit(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) assert_type(np.vsplit(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) +assert_type(np.vsplit(splitable, 2), list[_SplitableArray]) assert_type(np.dsplit(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) assert_type(np.dsplit(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) +assert_type(np.dsplit(splitable, 2), list[_SplitableArray]) assert_type(np.kron(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.kron(AR_b, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.kron(AR_f8, AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(np.kron(AR_b, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.kron(AR_f8, AR_f8), npt.NDArray[np.floating]) assert_type(np.tile(AR_i8, 5), npt.NDArray[np.int64]) assert_type(np.tile(AR_LIKE_f8, [2, 2]), npt.NDArray[Any]) + +assert_type(np.unstack(AR_i8, axis=0), tuple[npt.NDArray[np.int64], ...]) +assert_type(np.unstack(AR_LIKE_f8, axis=0), tuple[npt.NDArray[Any], ...]) diff --git a/numpy/typing/tests/data/reveal/stride_tricks.pyi b/numpy/typing/tests/data/reveal/stride_tricks.pyi index 893e1bc314bc..8fde9b8ae30d 100644 --- a/numpy/typing/tests/data/reveal/stride_tricks.pyi +++ b/numpy/typing/tests/data/reveal/stride_tricks.pyi @@ -1,14 +1,8 @@ -import sys -from typing import Any +from typing import Any, assert_type import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - AR_f8: npt.NDArray[np.float64] AR_LIKE_f: list[float] interface_dict: dict[str, Any] @@ -26,8 +20,8 @@ assert_type(np.broadcast_to(AR_f8, 5), npt.NDArray[np.float64]) assert_type(np.broadcast_to(AR_LIKE_f, (1, 5)), npt.NDArray[Any]) assert_type(np.broadcast_to(AR_f8, [4, 6], subok=True), npt.NDArray[np.float64]) -assert_type(np.broadcast_shapes((1, 2), [3, 1], (3, 2)), tuple[int, ...]) -assert_type(np.broadcast_shapes((6, 7), (5, 6, 1), 7, (5, 1, 7)), tuple[int, ...]) +assert_type(np.broadcast_shapes((1, 2), [3, 1], (3, 2)), tuple[Any, ...]) +assert_type(np.broadcast_shapes((6, 7), (5, 6, 1), 7, (5, 1, 7)), tuple[Any, ...]) assert_type(np.broadcast_arrays(AR_f8, AR_f8), tuple[npt.NDArray[Any], ...]) assert_type(np.broadcast_arrays(AR_f8, AR_LIKE_f), tuple[npt.NDArray[Any], ...]) diff --git a/numpy/typing/tests/data/reveal/strings.pyi b/numpy/typing/tests/data/reveal/strings.pyi index 500a250b055a..166481d80922 100644 --- a/numpy/typing/tests/data/reveal/strings.pyi +++ b/numpy/typing/tests/data/reveal/strings.pyi @@ -1,137 +1,196 @@ -import sys +from typing import assert_type import numpy as np +import numpy._typing as np_t import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +type AR_T_alias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] +type AR_TU_alias = AR_T_alias | npt.NDArray[np.str_] AR_U: npt.NDArray[np.str_] AR_S: npt.NDArray[np.bytes_] +AR_T: AR_T_alias assert_type(np.strings.equal(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.strings.equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.equal(AR_T, AR_T), npt.NDArray[np.bool]) assert_type(np.strings.not_equal(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.strings.not_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.not_equal(AR_T, AR_T), npt.NDArray[np.bool]) assert_type(np.strings.greater_equal(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.strings.greater_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.greater_equal(AR_T, AR_T), npt.NDArray[np.bool]) assert_type(np.strings.less_equal(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.strings.less_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.less_equal(AR_T, AR_T), npt.NDArray[np.bool]) assert_type(np.strings.greater(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.strings.greater(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.greater(AR_T, AR_T), npt.NDArray[np.bool]) assert_type(np.strings.less(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.strings.less(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.less(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.add(AR_U, AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.add(AR_S, AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.add(AR_T, AR_T), AR_T_alias) assert_type(np.strings.multiply(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.strings.multiply(AR_S, [5, 4, 3]), npt.NDArray[np.bytes_]) +assert_type(np.strings.multiply(AR_T, 5), AR_T_alias) assert_type(np.strings.mod(AR_U, "test"), npt.NDArray[np.str_]) assert_type(np.strings.mod(AR_S, "test"), npt.NDArray[np.bytes_]) +assert_type(np.strings.mod(AR_T, "test"), AR_T_alias) assert_type(np.strings.capitalize(AR_U), npt.NDArray[np.str_]) assert_type(np.strings.capitalize(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.capitalize(AR_T), AR_T_alias) assert_type(np.strings.center(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.strings.center(AR_S, [2, 3, 4], b"a"), npt.NDArray[np.bytes_]) +assert_type(np.strings.center(AR_T, 5), AR_T_alias) assert_type(np.strings.encode(AR_U), npt.NDArray[np.bytes_]) +assert_type(np.strings.encode(AR_T), npt.NDArray[np.bytes_]) assert_type(np.strings.decode(AR_S), npt.NDArray[np.str_]) assert_type(np.strings.expandtabs(AR_U), npt.NDArray[np.str_]) assert_type(np.strings.expandtabs(AR_S, tabsize=4), npt.NDArray[np.bytes_]) - -assert_type(np.strings.join(AR_U, "_"), npt.NDArray[np.str_]) -assert_type(np.strings.join(AR_S, [b"_", b""]), npt.NDArray[np.bytes_]) +assert_type(np.strings.expandtabs(AR_T), AR_T_alias) assert_type(np.strings.ljust(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.strings.ljust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.strings.ljust(AR_T, 5), AR_T_alias) +assert_type(np.strings.ljust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_T_alias) + assert_type(np.strings.rjust(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.strings.rjust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.strings.rjust(AR_T, 5), AR_T_alias) +assert_type(np.strings.rjust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_T_alias) assert_type(np.strings.lstrip(AR_U), npt.NDArray[np.str_]) assert_type(np.strings.lstrip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.strings.lstrip(AR_T), AR_T_alias) +assert_type(np.strings.lstrip(AR_T, "_"), AR_T_alias) + assert_type(np.strings.rstrip(AR_U), npt.NDArray[np.str_]) assert_type(np.strings.rstrip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.strings.rstrip(AR_T), AR_T_alias) +assert_type(np.strings.rstrip(AR_T, "_"), AR_T_alias) + assert_type(np.strings.strip(AR_U), npt.NDArray[np.str_]) assert_type(np.strings.strip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.strings.strip(AR_T), AR_T_alias) +assert_type(np.strings.strip(AR_T, "_"), AR_T_alias) assert_type(np.strings.count(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.strings.count(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.strings.count(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.strings.count(AR_T, ["a", "b", "c"], end=9), npt.NDArray[np.int_]) assert_type(np.strings.partition(AR_U, "\n"), npt.NDArray[np.str_]) assert_type(np.strings.partition(AR_S, [b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.strings.partition(AR_T, "\n"), AR_TU_alias) + assert_type(np.strings.rpartition(AR_U, "\n"), npt.NDArray[np.str_]) assert_type(np.strings.rpartition(AR_S, [b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.strings.rpartition(AR_T, "\n"), AR_TU_alias) assert_type(np.strings.replace(AR_U, "_", "-"), npt.NDArray[np.str_]) assert_type(np.strings.replace(AR_S, [b"_", b""], [b"a", b"b"]), npt.NDArray[np.bytes_]) +assert_type(np.strings.replace(AR_T, "_", "_"), AR_TU_alias) -assert_type(np.strings.split(AR_U, "_"), npt.NDArray[np.object_]) -assert_type(np.strings.split(AR_S, maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) -assert_type(np.strings.rsplit(AR_U, "_"), npt.NDArray[np.object_]) -assert_type(np.strings.rsplit(AR_S, maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) +assert_type(np.strings.lower(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.lower(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.lower(AR_T), AR_T_alias) -assert_type(np.strings.splitlines(AR_U), npt.NDArray[np.object_]) -assert_type(np.strings.splitlines(AR_S, keepends=[True, True, False]), npt.NDArray[np.object_]) +assert_type(np.strings.upper(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.upper(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.upper(AR_T), AR_T_alias) assert_type(np.strings.swapcase(AR_U), npt.NDArray[np.str_]) assert_type(np.strings.swapcase(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.swapcase(AR_T), AR_T_alias) assert_type(np.strings.title(AR_U), npt.NDArray[np.str_]) assert_type(np.strings.title(AR_S), npt.NDArray[np.bytes_]) - -assert_type(np.strings.upper(AR_U), npt.NDArray[np.str_]) -assert_type(np.strings.upper(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.title(AR_T), AR_T_alias) assert_type(np.strings.zfill(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.strings.zfill(AR_S, [2, 3, 4]), npt.NDArray[np.bytes_]) +assert_type(np.strings.zfill(AR_T, 5), AR_T_alias) assert_type(np.strings.endswith(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) assert_type(np.strings.endswith(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) +assert_type(np.strings.endswith(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) + assert_type(np.strings.startswith(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) assert_type(np.strings.startswith(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) +assert_type(np.strings.startswith(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) assert_type(np.strings.find(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.strings.find(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.strings.find(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + assert_type(np.strings.rfind(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.strings.rfind(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.strings.rfind(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.strings.index(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.strings.index(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.strings.index(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + assert_type(np.strings.rindex(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.strings.rindex(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.strings.rindex(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.strings.isalpha(AR_U), npt.NDArray[np.bool]) assert_type(np.strings.isalpha(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.isalpha(AR_T), npt.NDArray[np.bool]) assert_type(np.strings.isalnum(AR_U), npt.NDArray[np.bool]) assert_type(np.strings.isalnum(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.isalnum(AR_T), npt.NDArray[np.bool]) assert_type(np.strings.isdecimal(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.isdecimal(AR_T), npt.NDArray[np.bool]) assert_type(np.strings.isdigit(AR_U), npt.NDArray[np.bool]) assert_type(np.strings.isdigit(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.isdigit(AR_T), npt.NDArray[np.bool]) assert_type(np.strings.islower(AR_U), npt.NDArray[np.bool]) assert_type(np.strings.islower(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.islower(AR_T), npt.NDArray[np.bool]) assert_type(np.strings.isnumeric(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.isnumeric(AR_T), npt.NDArray[np.bool]) assert_type(np.strings.isspace(AR_U), npt.NDArray[np.bool]) assert_type(np.strings.isspace(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.isspace(AR_T), npt.NDArray[np.bool]) assert_type(np.strings.istitle(AR_U), npt.NDArray[np.bool]) assert_type(np.strings.istitle(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.istitle(AR_T), npt.NDArray[np.bool]) assert_type(np.strings.isupper(AR_U), npt.NDArray[np.bool]) assert_type(np.strings.isupper(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.isupper(AR_T), npt.NDArray[np.bool]) assert_type(np.strings.str_len(AR_U), npt.NDArray[np.int_]) assert_type(np.strings.str_len(AR_S), npt.NDArray[np.int_]) +assert_type(np.strings.str_len(AR_T), npt.NDArray[np.int_]) + +assert_type(np.strings.translate(AR_U, ""), npt.NDArray[np.str_]) +assert_type(np.strings.translate(AR_S, ""), npt.NDArray[np.bytes_]) +assert_type(np.strings.translate(AR_T, ""), AR_T_alias) + +assert_type(np.strings.slice(AR_U, 1, 5, 2), npt.NDArray[np.str_]) +assert_type(np.strings.slice(AR_S, 1, 5, 2), npt.NDArray[np.bytes_]) +assert_type(np.strings.slice(AR_T, 1, 5, 2), AR_T_alias) diff --git a/numpy/typing/tests/data/reveal/testing.pyi b/numpy/typing/tests/data/reveal/testing.pyi index 2a0d83493f6e..0361f635a848 100644 --- a/numpy/typing/tests/data/reveal/testing.pyi +++ b/numpy/typing/tests/data/reveal/testing.pyi @@ -1,33 +1,27 @@ +import contextlib import re import sys -import warnings import types import unittest -import contextlib +import warnings from collections.abc import Callable -from typing import Any, TypeVar from pathlib import Path +from typing import Any, assert_type import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - AR_f8: npt.NDArray[np.float64] AR_i8: npt.NDArray[np.int64] bool_obj: bool -suppress_obj: np.testing.suppress_warnings -FT = TypeVar("FT", bound=Callable[..., Any]) +suppress_obj: np.testing.suppress_warnings # type: ignore[deprecated] # pyright: ignore[reportDeprecated] def func() -> int: ... def func2( - x: npt.NDArray[np.number[Any]], - y: npt.NDArray[np.number[Any]], + x: npt.NDArray[np.number], + y: npt.NDArray[np.number], ) -> npt.NDArray[np.bool]: ... assert_type(np.testing.KnownFailureException(), np.testing.KnownFailureException) @@ -35,15 +29,15 @@ assert_type(np.testing.IgnoreException(), np.testing.IgnoreException) assert_type( np.testing.clear_and_catch_warnings(modules=[np.testing]), - np.testing._private.utils._clear_and_catch_warnings_without_records, + np.testing.clear_and_catch_warnings[None], ) assert_type( np.testing.clear_and_catch_warnings(True), - np.testing._private.utils._clear_and_catch_warnings_with_records, + np.testing.clear_and_catch_warnings[list[warnings.WarningMessage]], ) assert_type( np.testing.clear_and_catch_warnings(False), - np.testing._private.utils._clear_and_catch_warnings_without_records, + np.testing.clear_and_catch_warnings[None], ) assert_type( np.testing.clear_and_catch_warnings(bool_obj), @@ -63,15 +57,14 @@ with np.testing.clear_and_catch_warnings(True) as c1: with np.testing.clear_and_catch_warnings() as c2: assert_type(c2, None) -assert_type(np.testing.suppress_warnings("once"), np.testing.suppress_warnings) -assert_type(np.testing.suppress_warnings()(func), Callable[[], int]) +assert_type(np.testing.suppress_warnings("once"), np.testing.suppress_warnings) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +assert_type(np.testing.suppress_warnings()(func), Callable[[], int]) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] assert_type(suppress_obj.filter(RuntimeWarning), None) assert_type(suppress_obj.record(RuntimeWarning), list[warnings.WarningMessage]) with suppress_obj as c3: - assert_type(c3, np.testing.suppress_warnings) + assert_type(c3, np.testing.suppress_warnings) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] assert_type(np.testing.verbose, int) -assert_type(np.testing.IS_PYPY, bool) assert_type(np.testing.HAS_REFCOUNT, bool) assert_type(np.testing.HAS_LAPACK64, bool) @@ -81,7 +74,7 @@ assert_type(np.testing.assert_(2, msg=lambda: "test"), None) if sys.platform == "win32" or sys.platform == "cygwin": assert_type(np.testing.memusage(), int) elif sys.platform == "linux": - assert_type(np.testing.memusage(), None | int) + assert_type(np.testing.memusage(), int | None) assert_type(np.testing.jiffies(), int) @@ -95,7 +88,7 @@ assert_type(np.testing.assert_equal({1}, {1}), None) assert_type(np.testing.assert_equal([1, 2, 3], [1, 2, 3], err_msg="fail"), None) assert_type(np.testing.assert_equal(1, 1.0, verbose=True), None) -assert_type(np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]), None) +assert_type(np.testing.print_assert_equal("Test XYZ of func xyz", [0, 1], [0, 1]), None) assert_type(np.testing.assert_almost_equal(1.0, 1.1), None) assert_type(np.testing.assert_almost_equal([1, 2, 3], [1, 2, 3], err_msg="fail"), None) @@ -153,7 +146,7 @@ assert_type(np.testing.assert_raises_regex(RuntimeWarning, re.compile(b"test"), class Test: ... -def decorate(a: FT) -> FT: +def decorate[FT: Callable[..., Any]](a: FT) -> FT: return a assert_type(np.testing.decorate_methods(Test, decorate), None) @@ -177,8 +170,8 @@ assert_type(np.testing.assert_array_almost_equal_nulp(AR_i8, AR_f8, nulp=2), Non assert_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, maxulp=2), npt.NDArray[Any]) assert_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, dtype=np.float32), npt.NDArray[Any]) -assert_type(np.testing.assert_warns(RuntimeWarning), contextlib._GeneratorContextManager[None]) -assert_type(np.testing.assert_warns(RuntimeWarning, func3, 5), bool) +assert_type(np.testing.assert_warns(RuntimeWarning), contextlib._GeneratorContextManager[None]) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +assert_type(np.testing.assert_warns(RuntimeWarning, func3, 5), bool) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] def func4(a: int, b: str) -> bool: ... diff --git a/numpy/typing/tests/data/reveal/twodim_base.pyi b/numpy/typing/tests/data/reveal/twodim_base.pyi index 9d808dbb1e0d..d8c45afe44ab 100644 --- a/numpy/typing/tests/data/reveal/twodim_base.pyi +++ b/numpy/typing/tests/data/reveal/twodim_base.pyi @@ -1,99 +1,223 @@ -import sys -from typing import Any, TypeVar +from typing import Any, assert_type, type_check_only import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - -_SCT = TypeVar("_SCT", bound=np.generic) - - -def func1(ar: npt.NDArray[_SCT], a: int) -> npt.NDArray[_SCT]: - pass - - -def func2(ar: npt.NDArray[np.number[Any]], a: str) -> npt.NDArray[np.float64]: - pass - - -AR_b: npt.NDArray[np.bool] -AR_u: npt.NDArray[np.uint64] -AR_i: npt.NDArray[np.int64] -AR_f: npt.NDArray[np.float64] -AR_c: npt.NDArray[np.complex128] -AR_O: npt.NDArray[np.object_] - -AR_LIKE_b: list[bool] - -assert_type(np.fliplr(AR_b), npt.NDArray[np.bool]) -assert_type(np.fliplr(AR_LIKE_b), npt.NDArray[Any]) - -assert_type(np.flipud(AR_b), npt.NDArray[np.bool]) -assert_type(np.flipud(AR_LIKE_b), npt.NDArray[Any]) - -assert_type(np.eye(10), npt.NDArray[np.float64]) -assert_type(np.eye(10, M=20, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.eye(10, k=2, dtype=int), npt.NDArray[Any]) - -assert_type(np.diag(AR_b), npt.NDArray[np.bool]) -assert_type(np.diag(AR_LIKE_b, k=0), npt.NDArray[Any]) - -assert_type(np.diagflat(AR_b), npt.NDArray[np.bool]) -assert_type(np.diagflat(AR_LIKE_b, k=0), npt.NDArray[Any]) - -assert_type(np.tri(10), npt.NDArray[np.float64]) -assert_type(np.tri(10, M=20, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.tri(10, k=2, dtype=int), npt.NDArray[Any]) - -assert_type(np.tril(AR_b), npt.NDArray[np.bool]) -assert_type(np.tril(AR_LIKE_b, k=0), npt.NDArray[Any]) - -assert_type(np.triu(AR_b), npt.NDArray[np.bool]) -assert_type(np.triu(AR_LIKE_b, k=0), npt.NDArray[Any]) - -assert_type(np.vander(AR_b), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.vander(AR_u), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.vander(AR_i, N=2), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.vander(AR_f, increasing=True), npt.NDArray[np.floating[Any]]) -assert_type(np.vander(AR_c), npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(np.vander(AR_O), npt.NDArray[np.object_]) - +type _1D = tuple[int] +type _2D = tuple[int, int] +type _ND = tuple[Any, ...] + +type _Indices2D = tuple[ + np.ndarray[_1D, np.dtype[np.intp]], + np.ndarray[_1D, np.dtype[np.intp]], +] + +### + +_nd_bool: np.ndarray[_ND, np.dtype[np.bool]] +_1d_bool: np.ndarray[_1D, np.dtype[np.bool]] +_2d_bool: np.ndarray[_2D, np.dtype[np.bool]] +_nd_u64: np.ndarray[_ND, np.dtype[np.uint64]] +_nd_i64: np.ndarray[_ND, np.dtype[np.int64]] +_nd_f64: np.ndarray[_ND, np.dtype[np.float64]] +_nd_c128: np.ndarray[_ND, np.dtype[np.complex128]] +_nd_obj: np.ndarray[_ND, np.dtype[np.object_]] + +_to_nd_bool: list[bool] | list[list[bool]] +_to_1d_bool: list[bool] +_to_2d_bool: list[list[bool]] + +_to_1d_f64: list[float] +_to_1d_c128: list[complex] + +@type_check_only +def func1[ScalarT: np.generic](ar: npt.NDArray[ScalarT], a: int) -> npt.NDArray[ScalarT]: ... +@type_check_only +def func2(ar: npt.NDArray[np.number], a: str) -> npt.NDArray[np.float64]: ... + +@type_check_only +class _Cube: + shape = 3, 4 + ndim = 2 + +### + +# fliplr +assert_type(np.fliplr(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.fliplr(_1d_bool), np.ndarray[_1D, np.dtype[np.bool]]) +assert_type(np.fliplr(_2d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.fliplr(_to_nd_bool), np.ndarray) +assert_type(np.fliplr(_to_1d_bool), np.ndarray) +assert_type(np.fliplr(_to_2d_bool), np.ndarray) + +# flipud +assert_type(np.flipud(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.flipud(_1d_bool), np.ndarray[_1D, np.dtype[np.bool]]) +assert_type(np.flipud(_2d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.flipud(_to_nd_bool), np.ndarray) +assert_type(np.flipud(_to_1d_bool), np.ndarray) +assert_type(np.flipud(_to_2d_bool), np.ndarray) + +# eye +assert_type(np.eye(10), np.ndarray[_2D, np.dtype[np.float64]]) +assert_type(np.eye(10, M=20, dtype=np.int64), np.ndarray[_2D, np.dtype[np.int64]]) +assert_type(np.eye(10, k=2, dtype=int), np.ndarray[_2D]) + +# diag +assert_type(np.diag(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.diag(_1d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.diag(_2d_bool), np.ndarray[_1D, np.dtype[np.bool]]) +assert_type(np.diag(_to_nd_bool, k=0), np.ndarray) +assert_type(np.diag(_to_1d_bool, k=0), np.ndarray[_2D]) +assert_type(np.diag(_to_2d_bool, k=0), np.ndarray[_1D]) + +# diagflat +assert_type(np.diagflat(_nd_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.diagflat(_1d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.diagflat(_2d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.diagflat(_to_nd_bool, k=0), np.ndarray[_2D]) +assert_type(np.diagflat(_to_1d_bool, k=0), np.ndarray[_2D]) +assert_type(np.diagflat(_to_2d_bool, k=0), np.ndarray[_2D]) + +# tri +assert_type(np.tri(10), np.ndarray[_2D, np.dtype[np.float64]]) +assert_type(np.tri(10, M=20, dtype=np.int64), np.ndarray[_2D, np.dtype[np.int64]]) +assert_type(np.tri(10, k=2, dtype=int), np.ndarray[_2D]) + +# tril +assert_type(np.tril(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.tril(_to_nd_bool, k=0), np.ndarray) +assert_type(np.tril(_to_1d_bool, k=0), np.ndarray) +assert_type(np.tril(_to_2d_bool, k=0), np.ndarray) + +# triu +assert_type(np.triu(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.triu(_to_nd_bool, k=0), np.ndarray) +assert_type(np.triu(_to_1d_bool, k=0), np.ndarray) +assert_type(np.triu(_to_2d_bool, k=0), np.ndarray) + +# vander +assert_type(np.vander(_nd_bool), np.ndarray[_2D, np.dtype[np.int_]]) +assert_type(np.vander(_nd_u64), np.ndarray[_2D, np.dtype[np.uint64]]) +assert_type(np.vander(_nd_i64, N=2), np.ndarray[_2D, np.dtype[np.int64]]) +assert_type(np.vander(_nd_f64, increasing=True), np.ndarray[_2D, np.dtype[np.float64]]) +assert_type(np.vander(_nd_c128), np.ndarray[_2D, np.dtype[np.complex128]]) +assert_type(np.vander(_nd_obj), np.ndarray[_2D, np.dtype[np.object_]]) + +# histogram2d assert_type( - np.histogram2d(AR_i, AR_b), + np.histogram2d(_to_1d_f64, _to_1d_f64), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.floating[Any]], - npt.NDArray[np.floating[Any]], + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], ], ) assert_type( - np.histogram2d(AR_f, AR_f), + np.histogram2d(_to_1d_c128, _to_1d_c128), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.floating[Any]], - npt.NDArray[np.floating[Any]], + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128 | Any]], + np.ndarray[_1D, np.dtype[np.complex128 | Any]], ], ) assert_type( - np.histogram2d(AR_f, AR_c, weights=AR_LIKE_b), + np.histogram2d(_nd_i64, _nd_bool), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.complexfloating[Any, Any]], - npt.NDArray[np.complexfloating[Any, Any]], + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + ], +) +assert_type( + np.histogram2d(_nd_f64, _nd_i64), + tuple[ + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + ], +) +assert_type( + np.histogram2d(_nd_i64, _nd_f64), + tuple[ + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + ], +) +assert_type( + np.histogram2d(_nd_f64, _nd_c128, weights=_to_1d_bool), + tuple[ + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128]], + np.ndarray[_1D, np.dtype[np.complex128]], + ], +) +assert_type( + np.histogram2d(_nd_f64, _nd_c128, bins=8), + tuple[ + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128]], + np.ndarray[_1D, np.dtype[np.complex128]], + ], +) +assert_type( + np.histogram2d(_nd_c128, _nd_f64, bins=(8, 5)), + tuple[ + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128]], + np.ndarray[_1D, np.dtype[np.complex128]], + ], +) +assert_type( + np.histogram2d(_nd_c128, _nd_i64, bins=_nd_u64), + tuple[ + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.uint64]], + np.ndarray[_1D, np.dtype[np.uint64]], + ], +) +assert_type( + np.histogram2d(_nd_c128, _nd_c128, bins=(_nd_u64, _nd_u64)), + tuple[ + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.uint64]], + np.ndarray[_1D, np.dtype[np.uint64]], + ], +) +assert_type( + np.histogram2d(_nd_c128, _nd_c128, bins=(_nd_bool, 8)), + tuple[ + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128 | np.bool]], + np.ndarray[_1D, np.dtype[np.complex128 | np.bool]], + ], +) +assert_type( + np.histogram2d(_nd_c128, _nd_c128, bins=(_to_1d_f64, 8)), + tuple[ + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128 | Any]], + np.ndarray[_1D, np.dtype[np.complex128 | Any]], ], ) -assert_type(np.mask_indices(10, func1), tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]) -assert_type(np.mask_indices(8, func2, "0"), tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]) - -assert_type(np.tril_indices(10), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) - -assert_type(np.tril_indices_from(AR_b), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) - -assert_type(np.triu_indices(10), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) - -assert_type(np.triu_indices_from(AR_b), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) +# mask_indices +assert_type(np.mask_indices(10, func1), _Indices2D) +assert_type(np.mask_indices(8, func2, "0"), _Indices2D) + +# tril_indices +assert_type(np.tril_indices(3), _Indices2D) +assert_type(np.tril_indices(3, 1), _Indices2D) +assert_type(np.tril_indices(3, 1, 2), _Indices2D) +# tril_indices +assert_type(np.triu_indices(3), _Indices2D) +assert_type(np.triu_indices(3, 1), _Indices2D) +assert_type(np.triu_indices(3, 1, 2), _Indices2D) + +# tril_indices_from +assert_type(np.tril_indices_from(_2d_bool), _Indices2D) +assert_type(np.tril_indices_from(_Cube()), _Indices2D) +# triu_indices_from +assert_type(np.triu_indices_from(_2d_bool), _Indices2D) +assert_type(np.triu_indices_from(_Cube()), _Indices2D) diff --git a/numpy/typing/tests/data/reveal/type_check.pyi b/numpy/typing/tests/data/reveal/type_check.pyi index 6d357278762b..22eed7493689 100644 --- a/numpy/typing/tests/data/reveal/type_check.pyi +++ b/numpy/typing/tests/data/reveal/type_check.pyi @@ -1,14 +1,7 @@ -import sys -from typing import Any, Literal +from typing import Any, Literal, assert_type import numpy as np import numpy.typing as npt -from numpy._typing import _16Bit, _32Bit, _64Bit, _128Bit - -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type f8: np.float64 f: float @@ -18,26 +11,24 @@ AR_i8: npt.NDArray[np.int64] AR_i4: npt.NDArray[np.int32] AR_f2: npt.NDArray[np.float16] AR_f8: npt.NDArray[np.float64] -AR_f16: npt.NDArray[np.floating[_128Bit]] +AR_f16: npt.NDArray[np.longdouble] AR_c8: npt.NDArray[np.complex64] AR_c16: npt.NDArray[np.complex128] AR_LIKE_f: list[float] -class RealObj: +class ComplexObj: real: slice - -class ImagObj: imag: slice assert_type(np.mintypecode(["f8"], typeset="qfQF"), str) -assert_type(np.real(RealObj()), slice) +assert_type(np.real(ComplexObj()), slice) assert_type(np.real(AR_f8), npt.NDArray[np.float64]) assert_type(np.real(AR_c16), npt.NDArray[np.float64]) assert_type(np.real(AR_LIKE_f), npt.NDArray[Any]) -assert_type(np.imag(ImagObj()), slice) +assert_type(np.imag(ComplexObj()), slice) assert_type(np.imag(AR_f8), npt.NDArray[np.float64]) assert_type(np.imag(AR_c16), npt.NDArray[np.float64]) assert_type(np.imag(AR_LIKE_f), npt.NDArray[Any]) @@ -59,24 +50,18 @@ assert_type(np.nan_to_num(AR_f8, nan=1.5), npt.NDArray[np.float64]) assert_type(np.nan_to_num(AR_LIKE_f, posinf=9999), npt.NDArray[Any]) assert_type(np.real_if_close(AR_f8), npt.NDArray[np.float64]) -assert_type(np.real_if_close(AR_c16), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) -assert_type(np.real_if_close(AR_c8), npt.NDArray[np.float32] | npt.NDArray[np.complex64]) +assert_type(np.real_if_close(AR_c16), npt.NDArray[np.float64 | np.complex128]) +assert_type(np.real_if_close(AR_c8), npt.NDArray[np.float32 | np.complex64]) assert_type(np.real_if_close(AR_LIKE_f), npt.NDArray[Any]) -assert_type(np.typename("h"), Literal["short"]) -assert_type(np.typename("B"), Literal["unsigned char"]) -assert_type(np.typename("V"), Literal["void"]) -assert_type(np.typename("S1"), Literal["character"]) +assert_type(np.typename("h"), Literal["short"]) # type: ignore[deprecated] +assert_type(np.typename("B"), Literal["unsigned char"]) # type: ignore[deprecated] +assert_type(np.typename("V"), Literal["void"]) # type: ignore[deprecated] +assert_type(np.typename("S1"), Literal["character"]) # type: ignore[deprecated] assert_type(np.common_type(AR_i4), type[np.float64]) assert_type(np.common_type(AR_f2), type[np.float16]) -assert_type(np.common_type(AR_f2, AR_i4), type[np.floating[_16Bit | _64Bit]]) -assert_type(np.common_type(AR_f16, AR_i4), type[np.floating[_64Bit | _128Bit]]) -assert_type( - np.common_type(AR_c8, AR_f2), - type[np.complexfloating[_16Bit | _32Bit, _16Bit | _32Bit]], -) -assert_type( - np.common_type(AR_f2, AR_c8, AR_i4), - type[np.complexfloating[_16Bit | _32Bit | _64Bit, _16Bit | _32Bit | _64Bit]], -) +assert_type(np.common_type(AR_f2, AR_i4), type[np.float64]) +assert_type(np.common_type(AR_f16, AR_i4), type[np.longdouble]) +assert_type(np.common_type(AR_c8, AR_f2), type[np.complex64]) +assert_type(np.common_type(AR_f2, AR_c8, AR_i4), type[np.complexfloating]) diff --git a/numpy/typing/tests/data/reveal/ufunc_config.pyi b/numpy/typing/tests/data/reveal/ufunc_config.pyi index 9d74abf42322..f205b82b4f75 100644 --- a/numpy/typing/tests/data/reveal/ufunc_config.pyi +++ b/numpy/typing/tests/data/reveal/ufunc_config.pyi @@ -1,37 +1,30 @@ """Typing tests for `_core._ufunc_config`.""" -import sys -from typing import Any, Protocol +from _typeshed import SupportsWrite from collections.abc import Callable +from typing import Any, assert_type import numpy as np - -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from numpy._core._ufunc_config import _ErrDict def func(a: str, b: int) -> None: ... class Write: def write(self, value: str) -> None: ... -class SupportsWrite(Protocol): - def write(self, s: str, /) -> object: ... - -assert_type(np.seterr(all=None), np._core._ufunc_config._ErrDict) -assert_type(np.seterr(divide="ignore"), np._core._ufunc_config._ErrDict) -assert_type(np.seterr(over="warn"), np._core._ufunc_config._ErrDict) -assert_type(np.seterr(under="call"), np._core._ufunc_config._ErrDict) -assert_type(np.seterr(invalid="raise"), np._core._ufunc_config._ErrDict) -assert_type(np.geterr(), np._core._ufunc_config._ErrDict) +assert_type(np.seterr(all=None), _ErrDict) +assert_type(np.seterr(divide="ignore"), _ErrDict) +assert_type(np.seterr(over="warn"), _ErrDict) +assert_type(np.seterr(under="call"), _ErrDict) +assert_type(np.seterr(invalid="raise"), _ErrDict) +assert_type(np.geterr(), _ErrDict) assert_type(np.setbufsize(4096), int) assert_type(np.getbufsize(), int) -assert_type(np.seterrcall(func), Callable[[str, int], Any] | None | SupportsWrite) -assert_type(np.seterrcall(Write()), Callable[[str, int], Any] | None | SupportsWrite) -assert_type(np.geterrcall(), Callable[[str, int], Any] | None | SupportsWrite) +assert_type(np.seterrcall(func), Callable[[str, int], Any] | SupportsWrite[str] | None) +assert_type(np.seterrcall(Write()), Callable[[str, int], Any] | SupportsWrite[str] | None) +assert_type(np.geterrcall(), Callable[[str, int], Any] | SupportsWrite[str] | None) assert_type(np.errstate(call=func, all="call"), np.errstate) assert_type(np.errstate(call=Write(), divide="log", over="log"), np.errstate) diff --git a/numpy/typing/tests/data/reveal/ufunclike.pyi b/numpy/typing/tests/data/reveal/ufunclike.pyi index e29e76ed14e4..c679b82d2836 100644 --- a/numpy/typing/tests/data/reveal/ufunclike.pyi +++ b/numpy/typing/tests/data/reveal/ufunclike.pyi @@ -1,14 +1,8 @@ -import sys -from typing import Any +from typing import assert_type import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - AR_LIKE_b: list[bool] AR_LIKE_u: list[np.uint32] AR_LIKE_i: list[int] @@ -17,12 +11,12 @@ AR_LIKE_O: list[np.object_] AR_U: npt.NDArray[np.str_] -assert_type(np.fix(AR_LIKE_b), npt.NDArray[np.floating[Any]]) -assert_type(np.fix(AR_LIKE_u), npt.NDArray[np.floating[Any]]) -assert_type(np.fix(AR_LIKE_i), npt.NDArray[np.floating[Any]]) -assert_type(np.fix(AR_LIKE_f), npt.NDArray[np.floating[Any]]) -assert_type(np.fix(AR_LIKE_O), npt.NDArray[np.object_]) -assert_type(np.fix(AR_LIKE_f, out=AR_U), npt.NDArray[np.str_]) +assert_type(np.fix(AR_LIKE_b), npt.NDArray[np.floating]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_u), npt.NDArray[np.floating]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_i), npt.NDArray[np.floating]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_f), npt.NDArray[np.floating]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_O), npt.NDArray[np.object_]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_f, out=AR_U), npt.NDArray[np.str_]) # type: ignore[deprecated] assert_type(np.isposinf(AR_LIKE_b), npt.NDArray[np.bool]) assert_type(np.isposinf(AR_LIKE_u), npt.NDArray[np.bool]) diff --git a/numpy/typing/tests/data/reveal/ufuncs.pyi b/numpy/typing/tests/data/reveal/ufuncs.pyi index 859c202c3766..eda92f2117c6 100644 --- a/numpy/typing/tests/data/reveal/ufuncs.pyi +++ b/numpy/typing/tests/data/reveal/ufuncs.pyi @@ -1,14 +1,8 @@ -import sys -from typing import Literal, Any +from typing import Any, Literal, NoReturn, assert_type import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - i8: np.int64 f8: np.float64 AR_f8: npt.NDArray[np.float64] @@ -18,6 +12,7 @@ assert_type(np.absolute.__doc__, str) assert_type(np.absolute.types, list[str]) assert_type(np.absolute.__name__, Literal["absolute"]) +assert_type(np.absolute.__qualname__, Literal["absolute"]) assert_type(np.absolute.ntypes, Literal[20]) assert_type(np.absolute.identity, None) assert_type(np.absolute.nin, Literal[1]) @@ -30,6 +25,7 @@ assert_type(np.absolute(AR_f8), npt.NDArray[Any]) assert_type(np.absolute.at(AR_f8, AR_i8), None) assert_type(np.add.__name__, Literal["add"]) +assert_type(np.add.__qualname__, Literal["add"]) assert_type(np.add.ntypes, Literal[22]) assert_type(np.add.identity, Literal[0]) assert_type(np.add.nin, Literal[2]) @@ -46,6 +42,7 @@ assert_type(np.add.outer(f8, f8), Any) assert_type(np.add.outer(AR_f8, f8), npt.NDArray[Any]) assert_type(np.frexp.__name__, Literal["frexp"]) +assert_type(np.frexp.__qualname__, Literal["frexp"]) assert_type(np.frexp.ntypes, Literal[4]) assert_type(np.frexp.identity, None) assert_type(np.frexp.nin, Literal[1]) @@ -56,6 +53,7 @@ assert_type(np.frexp(f8), tuple[Any, Any]) assert_type(np.frexp(AR_f8), tuple[npt.NDArray[Any], npt.NDArray[Any]]) assert_type(np.divmod.__name__, Literal["divmod"]) +assert_type(np.divmod.__qualname__, Literal["divmod"]) assert_type(np.divmod.ntypes, Literal[15]) assert_type(np.divmod.identity, None) assert_type(np.divmod.nin, Literal[2]) @@ -66,6 +64,7 @@ assert_type(np.divmod(f8, f8), tuple[Any, Any]) assert_type(np.divmod(AR_f8, f8), tuple[npt.NDArray[Any], npt.NDArray[Any]]) assert_type(np.matmul.__name__, Literal["matmul"]) +assert_type(np.matmul.__qualname__, Literal["matmul"]) assert_type(np.matmul.ntypes, Literal[19]) assert_type(np.matmul.identity, None) assert_type(np.matmul.nin, Literal[2]) @@ -77,6 +76,7 @@ assert_type(np.matmul(AR_f8, AR_f8), Any) assert_type(np.matmul(AR_f8, AR_f8, axes=[(0, 1), (0, 1), (0, 1)]), Any) assert_type(np.vecdot.__name__, Literal["vecdot"]) +assert_type(np.vecdot.__qualname__, Literal["vecdot"]) assert_type(np.vecdot.ntypes, Literal[19]) assert_type(np.vecdot.identity, None) assert_type(np.vecdot.nin, Literal[2]) @@ -86,7 +86,8 @@ assert_type(np.vecdot.signature, Literal["(n),(n)->()"]) assert_type(np.vecdot.identity, None) assert_type(np.vecdot(AR_f8, AR_f8), Any) -assert_type(np.bitwise_count.__name__, Literal['bitwise_count']) +assert_type(np.bitwise_count.__name__, Literal["bitwise_count"]) +assert_type(np.bitwise_count.__qualname__, Literal["bitwise_count"]) assert_type(np.bitwise_count.ntypes, Literal[11]) assert_type(np.bitwise_count.identity, None) assert_type(np.bitwise_count.nin, Literal[1]) @@ -96,3 +97,46 @@ assert_type(np.bitwise_count.signature, None) assert_type(np.bitwise_count.identity, None) assert_type(np.bitwise_count(i8), Any) assert_type(np.bitwise_count(AR_i8), npt.NDArray[Any]) + +def test_absolute_outer_invalid() -> None: + assert_type(np.absolute.outer(AR_f8, AR_f8), NoReturn) # type: ignore[arg-type] +def test_frexp_outer_invalid() -> None: + assert_type(np.frexp.outer(AR_f8, AR_f8), NoReturn) # type: ignore[arg-type] +def test_divmod_outer_invalid() -> None: + assert_type(np.divmod.outer(AR_f8, AR_f8), NoReturn) # type: ignore[arg-type] +def test_matmul_outer_invalid() -> None: + assert_type(np.matmul.outer(AR_f8, AR_f8), NoReturn) # type: ignore[arg-type] + +def test_absolute_reduceat_invalid() -> None: + assert_type(np.absolute.reduceat(AR_f8, AR_i8), NoReturn) # type: ignore[arg-type] +def test_frexp_reduceat_invalid() -> None: + assert_type(np.frexp.reduceat(AR_f8, AR_i8), NoReturn) # type: ignore[arg-type] +def test_divmod_reduceat_invalid() -> None: + assert_type(np.divmod.reduceat(AR_f8, AR_i8), NoReturn) # type: ignore[arg-type] +def test_matmul_reduceat_invalid() -> None: + assert_type(np.matmul.reduceat(AR_f8, AR_i8), NoReturn) # type: ignore[arg-type] + +def test_absolute_reduce_invalid() -> None: + assert_type(np.absolute.reduce(AR_f8), NoReturn) # type: ignore[arg-type] +def test_frexp_reduce_invalid() -> None: + assert_type(np.frexp.reduce(AR_f8), NoReturn) # type: ignore[arg-type] +def test_divmod_reduce_invalid() -> None: + assert_type(np.divmod.reduce(AR_f8), NoReturn) # type: ignore[arg-type] +def test_matmul_reduce_invalid() -> None: + assert_type(np.matmul.reduce(AR_f8), NoReturn) # type: ignore[arg-type] + +def test_absolute_accumulate_invalid() -> None: + assert_type(np.absolute.accumulate(AR_f8), NoReturn) # type: ignore[arg-type] +def test_frexp_accumulate_invalid() -> None: + assert_type(np.frexp.accumulate(AR_f8), NoReturn) # type: ignore[arg-type] +def test_divmod_accumulate_invalid() -> None: + assert_type(np.divmod.accumulate(AR_f8), NoReturn) # type: ignore[arg-type] +def test_matmul_accumulate_invalid() -> None: + assert_type(np.matmul.accumulate(AR_f8), NoReturn) # type: ignore[arg-type] + +def test_frexp_at_invalid() -> None: + assert_type(np.frexp.at(AR_f8, i8), NoReturn) # type: ignore[arg-type] +def test_divmod_at_invalid() -> None: + assert_type(np.divmod.at(AR_f8, i8, AR_f8), NoReturn) # type: ignore[arg-type] +def test_matmul_at_invalid() -> None: + assert_type(np.matmul.at(AR_f8, i8, AR_f8), NoReturn) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/reveal/warnings_and_errors.pyi b/numpy/typing/tests/data/reveal/warnings_and_errors.pyi index e498fee0d3cc..f756a8e45d46 100644 --- a/numpy/typing/tests/data/reveal/warnings_and_errors.pyi +++ b/numpy/typing/tests/data/reveal/warnings_and_errors.pyi @@ -1,12 +1,7 @@ -import sys +from typing import assert_type import numpy.exceptions as ex -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - assert_type(ex.ModuleDeprecationWarning(), ex.ModuleDeprecationWarning) assert_type(ex.VisibleDeprecationWarning(), ex.VisibleDeprecationWarning) assert_type(ex.ComplexWarning(), ex.ComplexWarning) diff --git a/numpy/typing/tests/test_isfile.py b/numpy/typing/tests/test_isfile.py index e77b560f8c76..250686a98ee8 100644 --- a/numpy/typing/tests/test_isfile.py +++ b/numpy/typing/tests/test_isfile.py @@ -1,7 +1,8 @@ import os -import sys from pathlib import Path +import pytest + import numpy as np from numpy.testing import assert_ @@ -9,7 +10,7 @@ FILES = [ ROOT / "py.typed", ROOT / "__init__.pyi", - ROOT / "ctypeslib.pyi", + ROOT / "ctypeslib" / "__init__.pyi", ROOT / "_core" / "__init__.pyi", ROOT / "f2py" / "__init__.pyi", ROOT / "fft" / "__init__.pyi", @@ -21,10 +22,12 @@ ROOT / "random" / "__init__.pyi", ROOT / "testing" / "__init__.pyi", ] -if sys.version_info < (3, 12): - FILES += [ROOT / "distutils" / "__init__.pyi"] +@pytest.mark.thread_unsafe( + reason="os.path has a thread-safety bug (python/cpython#140054). " + "Expected to only be a problem in 3.14.0" +) class TestIsFile: def test_isfile(self): """Test if all ``.pyi`` files are properly installed.""" diff --git a/numpy/typing/tests/test_runtime.py b/numpy/typing/tests/test_runtime.py index c32c5db3266a..9db74c8ddc28 100644 --- a/numpy/typing/tests/test_runtime.py +++ b/numpy/typing/tests/test_runtime.py @@ -1,35 +1,40 @@ """Test the runtime usage of `numpy.typing`.""" -from __future__ import annotations - from typing import ( - get_type_hints, - Union, + Any, NamedTuple, + Self, + TypeAliasType, get_args, get_origin, - Any, + get_type_hints, ) import pytest + import numpy as np -import numpy.typing as npt import numpy._typing as _npt +import numpy.typing as npt class TypeTup(NamedTuple): - typ: type - args: tuple[type, ...] - origin: None | type + typ: type # type expression + args: tuple[type, ...] # generic type parameters or arguments + origin: type | None # e.g. `UnionType` or `GenericAlias` + @classmethod + def from_type_alias(cls, alias: TypeAliasType, /) -> Self: + # PEP 695 `type _ = ...` aliases wrap the type expression as a + # `types.TypeAliasType` instance with a `__value__` attribute. + tp = alias.__value__ + return cls(typ=tp, args=get_args(tp), origin=get_origin(tp)) -NDArrayTup = TypeTup(npt.NDArray, npt.NDArray.__args__, np.ndarray) TYPES = { - "ArrayLike": TypeTup(npt.ArrayLike, npt.ArrayLike.__args__, Union), - "DTypeLike": TypeTup(npt.DTypeLike, npt.DTypeLike.__args__, Union), - "NBitBase": TypeTup(npt.NBitBase, (), None), - "NDArray": NDArrayTup, + "ArrayLike": TypeTup.from_type_alias(npt.ArrayLike), + "DTypeLike": TypeTup.from_type_alias(npt.DTypeLike), + "NBitBase": TypeTup(npt.NBitBase, (), None), # type: ignore[deprecated] # pyright: ignore[reportDeprecated] + "NDArray": TypeTup.from_type_alias(npt.NDArray), } @@ -54,10 +59,7 @@ def test_get_type_hints(name: type, tup: TypeTup) -> None: """Test `typing.get_type_hints`.""" typ = tup.typ - # Explicitly set `__annotations__` in order to circumvent the - # stringification performed by `from __future__ import annotations` - def func(a): pass - func.__annotations__ = {"a": typ, "return": None} + def func(a: typ) -> None: pass out = get_type_hints(func) ref = {"a": typ, "return": type(None)} @@ -69,13 +71,10 @@ def test_get_type_hints_str(name: type, tup: TypeTup) -> None: """Test `typing.get_type_hints` with string-representation of types.""" typ_str, typ = f"npt.{name}", tup.typ - # Explicitly set `__annotations__` in order to circumvent the - # stringification performed by `from __future__ import annotations` - def func(a): pass - func.__annotations__ = {"a": typ_str, "return": None} + def func(a: typ_str) -> None: pass out = get_type_hints(func) - ref = {"a": typ, "return": type(None)} + ref = {"a": getattr(npt, str(name)), "return": type(None)} assert out == ref @@ -87,7 +86,6 @@ def test_keys() -> None: PROTOCOLS: dict[str, tuple[type[Any], object]] = { - "_SupportsDType": (_npt._SupportsDType, np.int64(1)), "_SupportsArray": (_npt._SupportsArray, np.arange(10)), "_SupportsArrayFunc": (_npt._SupportsArrayFunc, np.arange(10)), "_NestedSequence": (_npt._NestedSequence, [1]), @@ -101,9 +99,5 @@ def test_isinstance(self, cls: type[Any], obj: object) -> None: assert not isinstance(None, cls) def test_issubclass(self, cls: type[Any], obj: object) -> None: - if cls is _npt._SupportsDType: - pytest.xfail( - "Protocols with non-method members don't support issubclass()" - ) assert issubclass(type(obj), cls) assert not issubclass(type(None), cls) diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index dc65a51a2027..dbe16a37ada4 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -1,16 +1,12 @@ -from __future__ import annotations - import importlib.util import os import re import shutil +import textwrap from collections import defaultdict -from collections.abc import Iterator from typing import TYPE_CHECKING import pytest -from numpy.typing.mypy_plugin import _EXTENDED_PRECISION_LIST - # Only trigger a full `mypy` run if this environment variable is set # Note that these tests tend to take over a minute even on a macOS M1 CPU, @@ -34,6 +30,8 @@ NO_MYPY = False if TYPE_CHECKING: + from collections.abc import Iterator + # We need this as annotation, but it's located in a private namespace. # As a compromise, do *not* import it during runtime from _pytest.mark.structures import ParameterSet @@ -99,9 +97,9 @@ def run_mypy() -> None: directory, ]) if stderr: - pytest.fail(f"Unexpected mypy standard error\n\n{stderr}") + pytest.fail(f"Unexpected mypy standard error\n\n{stderr}", False) elif exit_code not in {0, 1}: - pytest.fail(f"Unexpected mypy exit code: {exit_code}\n\n{stdout}") + pytest.fail(f"Unexpected mypy exit code: {exit_code}\n\n{stdout}", False) str_concat = "" filename: str | None = None @@ -118,98 +116,47 @@ def run_mypy() -> None: filename = None -def get_test_cases(directory: str) -> Iterator[ParameterSet]: - for root, _, files in os.walk(directory): - for fname in files: - short_fname, ext = os.path.splitext(fname) - if ext in (".pyi", ".py"): +def get_test_cases(*directories: str) -> "Iterator[ParameterSet]": + for directory in directories: + for root, _, files in os.walk(directory): + for fname in files: + short_fname, ext = os.path.splitext(fname) + if ext not in (".pyi", ".py"): + continue + fullpath = os.path.join(root, fname) yield pytest.param(fullpath, id=short_fname) -@pytest.mark.slow -@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") -@pytest.mark.parametrize("path", get_test_cases(PASS_DIR)) -def test_success(path) -> None: - # Alias `OUTPUT_MYPY` so that it appears in the local namespace - output_mypy = OUTPUT_MYPY - if path in output_mypy: - msg = "Unexpected mypy output\n\n" - msg += "\n".join(_strip_filename(v)[1] for v in output_mypy[path]) - raise AssertionError(msg) - +_FAIL_INDENT = " " * 4 +_FAIL_SEP = "\n" + "_" * 79 + "\n\n" -@pytest.mark.slow -@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") -@pytest.mark.parametrize("path", get_test_cases(FAIL_DIR)) -def test_fail(path: str) -> None: - __tracebackhide__ = True +_FAIL_MSG_REVEAL = """{}:{} - reveal mismatch: - with open(path) as fin: - lines = fin.readlines() +{}""" - errors = defaultdict(lambda: "") +@pytest.mark.slow +@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +@pytest.mark.parametrize("path", get_test_cases(PASS_DIR, FAIL_DIR)) +def test_pass(path) -> None: + # Alias `OUTPUT_MYPY` so that it appears in the local namespace output_mypy = OUTPUT_MYPY - assert path in output_mypy - - for error_line in output_mypy[path]: - lineno, error_line = _strip_filename(error_line) - errors[lineno] += f'{error_line}\n' - - for i, line in enumerate(lines): - lineno = i + 1 - if ( - line.startswith('#') - or (" E:" not in line and lineno not in errors) - ): - continue - - target_line = lines[lineno - 1] - if "# E:" in target_line: - expression, _, marker = target_line.partition(" # E: ") - expected_error = errors[lineno].strip() - marker = marker.strip() - _test_fail(path, expression, marker, expected_error, lineno) - else: - pytest.fail( - f"Unexpected mypy output at line {lineno}\n\n{errors[lineno]}" - ) + if path not in output_mypy: + return -_FAIL_MSG1 = """Extra error at line {} - -Expression: {} -Extra error: {!r} -""" - -_FAIL_MSG2 = """Error mismatch at line {} - -Expression: {} -Expected error: {} -Observed error: {!r} -""" - - -def _test_fail( - path: str, - expression: str, - error: str, - expected_error: None | str, - lineno: int, -) -> None: - if expected_error is None: - raise AssertionError(_FAIL_MSG1.format(lineno, expression, error)) - elif error not in expected_error: - raise AssertionError(_FAIL_MSG2.format( - lineno, expression, expected_error, error - )) - + relpath = os.path.relpath(path) -_REVEAL_MSG = """Reveal mismatch at line {} + # collect any reported errors, and clean up the output + messages = [] + for message in output_mypy[path]: + lineno, content = _strip_filename(message) + content = content.removeprefix("error:").lstrip() + messages.append(f"{relpath}:{lineno} - {content}") -{} -""" + if messages: + pytest.fail("\n".join(messages), pytrace=False) @pytest.mark.slow @@ -225,13 +172,24 @@ def test_reveal(path: str) -> None: if path not in output_mypy: return + relpath = os.path.relpath(path) + + # collect any reported errors, and clean up the output + failures = [] for error_line in output_mypy[path]: - lineno, error_line = _strip_filename(error_line) - raise AssertionError(_REVEAL_MSG.format(lineno, error_line)) + lineno, error_msg = _strip_filename(error_line) + error_msg = textwrap.indent(error_msg, _FAIL_INDENT) + reason = _FAIL_MSG_REVEAL.format(relpath, lineno, error_msg) + failures.append(reason) + + if failures: + reasons = _FAIL_SEP.join(failures) + pytest.fail(reasons, pytrace=False) @pytest.mark.slow @pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +@pytest.mark.filterwarnings("ignore:numpy.fix is deprecated:DeprecationWarning") @pytest.mark.parametrize("path", get_test_cases(PASS_DIR)) def test_code_runs(path: str) -> None: """Validate that the code in `path` properly during runtime.""" @@ -246,41 +204,3 @@ def test_code_runs(path: str) -> None: test_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(test_module) - - -LINENO_MAPPING = { - 11: "uint128", - 12: "uint256", - 14: "int128", - 15: "int256", - 17: "float80", - 18: "float96", - 19: "float128", - 20: "float256", - 22: "complex160", - 23: "complex192", - 24: "complex256", - 25: "complex512", -} - - -@pytest.mark.slow -@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") -def test_extended_precision() -> None: - path = os.path.join(MISC_DIR, "extended_precision.pyi") - output_mypy = OUTPUT_MYPY - assert path in output_mypy - - with open(path) as f: - expression_list = f.readlines() - - for _msg in output_mypy[path]: - lineno, msg = _strip_filename(_msg) - expression = expression_list[lineno - 1].rstrip("\n") - - if LINENO_MAPPING[lineno] in _EXTENDED_PRECISION_LIST: - raise AssertionError(_REVEAL_MSG.format(lineno, msg)) - elif "error" not in msg: - _test_fail( - path, expression, msg, 'Expression is of type "Any"', lineno - ) diff --git a/numpy/version.pyi b/numpy/version.pyi index 2c305466a7e0..073885c017c2 100644 --- a/numpy/version.pyi +++ b/numpy/version.pyi @@ -1,7 +1,9 @@ -version: str -__version__: str -full_version: str +from typing import Final, LiteralString -git_revision: str -release: bool -short_version: str +version: Final[LiteralString] = ... +__version__: Final[LiteralString] = ... +full_version: Final[LiteralString] = ... + +git_revision: Final[LiteralString] = ... +release: Final[bool] = ... +short_version: Final[LiteralString] = ... diff --git a/pavement.py b/pavement.py deleted file mode 100644 index 43dc28675eb9..000000000000 --- a/pavement.py +++ /dev/null @@ -1,187 +0,0 @@ -r""" -This paver file is intended to help with the release process as much as -possible. It relies on virtualenv to generate 'bootstrap' environments as -independent from the user system as possible (e.g. to make sure the sphinx doc -is built against the built numpy, not an installed one). - -Building changelog + notes -========================== - -Assumes you have git and the binaries/tarballs in installers/:: - - paver write_release - paver write_note - -This automatically put the checksum into README.rst, and writes the Changelog. - -TODO -==== - - the script is messy, lots of global variables - - make it more easily customizable (through command line args) - - missing targets: install & test, sdist test, debian packaging - - fix bdist_mpkg: we build the same source twice -> how to make sure we use - the same underlying python for egg install in venv and for bdist_mpkg -""" -import os -import sys -import shutil -import hashlib -import textwrap - -# The paver package needs to be installed to run tasks -import paver -from paver.easy import Bunch, options, task, sh - - -#----------------------------------- -# Things to be changed for a release -#----------------------------------- - -# Path to the release notes -RELEASE_NOTES = 'doc/source/release/2.1.0-notes.rst' - - -#------------------------------------------------------- -# Hardcoded build/install dirs, virtualenv options, etc. -#------------------------------------------------------- - -# Where to put the release installers -options(installers=Bunch(releasedir="release", - installersdir=os.path.join("release", "installers")),) - - -#------------- -# README stuff -#------------- - -def _compute_hash(idirs, hashfunc): - """Hash files using given hashfunc. - - Parameters - ---------- - idirs : directory path - Directory containing files to be hashed. - hashfunc : hash function - Function to be used to hash the files. - - """ - released = paver.path.path(idirs).listdir() - checksums = [] - for fpath in sorted(released): - with open(fpath, 'rb') as fin: - fhash = hashfunc(fin.read()) - checksums.append( - '%s %s' % (fhash.hexdigest(), os.path.basename(fpath))) - return checksums - - -def compute_md5(idirs): - """Compute md5 hash of files in idirs. - - Parameters - ---------- - idirs : directory path - Directory containing files to be hashed. - - """ - return _compute_hash(idirs, hashlib.md5) - - -def compute_sha256(idirs): - """Compute sha256 hash of files in idirs. - - Parameters - ---------- - idirs : directory path - Directory containing files to be hashed. - - """ - # better checksum so gpg signed README.rst containing the sums can be used - # to verify the binaries instead of signing all binaries - return _compute_hash(idirs, hashlib.sha256) - - -def write_release_task(options, filename='README'): - """Append hashes of release files to release notes. - - This appends file hashes to the release notes and creates - four README files of the result in various formats: - - - README.rst - - README.rst.gpg - - README.md - - README.md.gpg - - The md file are created using `pandoc` so that the links are - properly updated. The gpg files are kept separate, so that - the unsigned files may be edited before signing if needed. - - Parameters - ---------- - options : - Set by ``task`` decorator. - filename : str - Filename of the modified notes. The file is written - in the release directory. - - """ - idirs = options.installers.installersdir - notes = paver.path.path(RELEASE_NOTES) - rst_readme = paver.path.path(filename + '.rst') - md_readme = paver.path.path(filename + '.md') - - # append hashes - with open(rst_readme, 'w') as freadme: - with open(notes) as fnotes: - freadme.write(fnotes.read()) - - freadme.writelines(textwrap.dedent( - """ - Checksums - ========= - - MD5 - --- - :: - - """)) - freadme.writelines([f' {c}\n' for c in compute_md5(idirs)]) - - freadme.writelines(textwrap.dedent( - """ - SHA256 - ------ - :: - - """)) - freadme.writelines([f' {c}\n' for c in compute_sha256(idirs)]) - - # generate md file using pandoc before signing - sh(f"pandoc -s -o {md_readme} {rst_readme}") - - # Sign files - if hasattr(options, 'gpg_key'): - cmd = f'gpg --clearsign --armor --default_key {options.gpg_key}' - else: - cmd = 'gpg --clearsign --armor' - - sh(cmd + f' --output {rst_readme}.gpg {rst_readme}') - sh(cmd + f' --output {md_readme}.gpg {md_readme}') - - -@task -def write_release(options): - """Write the README files. - - Two README files are generated from the release notes, one in ``rst`` - markup for the general release, the other in ``md`` markup for the github - release notes. - - Parameters - ---------- - options : - Set by ``task`` decorator. - - """ - rdir = options.installers.releasedir - write_release_task(options, os.path.join(rdir, 'README')) diff --git a/pixi-packages/README.md b/pixi-packages/README.md new file mode 100644 index 000000000000..9f1fed5fdb2c --- /dev/null +++ b/pixi-packages/README.md @@ -0,0 +1,37 @@ +# NumPy Pixi packages + +This directory contains definitions for [Pixi packages](https://pixi.sh/latest/reference/pixi_manifest/#the-package-section) +which can be built from the NumPy source code. + +Downstream developers can make use of these packages by adding them as Git dependencies in a +[Pixi workspace](https://pixi.sh/latest/first_workspace/), like: + +```toml +[dependencies] +numpy = { git = "https://github.com/numpy/numpy", subdirectory = "pixi-packages/asan" } +``` + +This is particularly useful when developers need to build NumPy from source +(for example, for an ASan-instrumented build), as it does not require any manual +clone or build steps. Instead, Pixi will automatically handle both the build +and installation of the package. + +See https://github.com/scipy/scipy/pull/24066 for a full example of downstream use. + +Each package definition is contained in a subdirectory. +Currently defined package variants: + +- `default` +- `asan`: ASan-instrumented build with `-Db_sanitize=address` + +## Maintenance + +- Keep host dependency requirements up to date +- For dependencies on upstream CPython Pixi packages, keep the git revision at a compatible version + +## Opportunities for future improvement + +- More package variants (such as TSan, UBSan) +- Support for Windows +- Using a single `pixi.toml` for all package variants is blocked on https://github.com/prefix-dev/pixi/issues/2813 +- Consider pinning dependency versions to guard against upstream breakages over time diff --git a/pixi-packages/asan/pixi.toml b/pixi-packages/asan/pixi.toml new file mode 100644 index 000000000000..ce25939a0fcb --- /dev/null +++ b/pixi-packages/asan/pixi.toml @@ -0,0 +1,27 @@ +[workspace] +channels = ["https://prefix.dev/conda-forge"] +platforms = ["osx-arm64", "linux-64"] +preview = ["pixi-build"] + +[package.build] +source.path = "../.." + +[package.build.backend] +name = "pixi-build-python" +version = "*" + +[package.build.config] +extra-input-globs = ["**/*.c.src"] +compilers = ["c", "cxx"] +env.ASAN_OPTIONS = "detect_leaks=0:symbolize=1:strict_init_order=true:allocator_may_return_null=1:use_sigaltstack=0" +extra-args = ["-Csetup-args=-Db_sanitize=address", "-Csetup-args=-Dbuildtype=debug"] + +[package.host-dependencies] +python.git = "https://github.com/python/cpython" +python.subdirectory = "Tools/pixi-packages/asan" +# v3.15.0a6 +python.rev = "15b216f30d0445469ec31bc7509fcc55a216ef7c" + +meson-python = "*" +cython = "*" +uv = "*" # used to invoke the wheel build diff --git a/pixi-packages/default/pixi.toml b/pixi-packages/default/pixi.toml new file mode 100644 index 000000000000..0b5d53e41ef2 --- /dev/null +++ b/pixi-packages/default/pixi.toml @@ -0,0 +1,21 @@ +[workspace] +channels = ["https://prefix.dev/conda-forge"] +platforms = ["osx-arm64", "linux-64"] +preview = ["pixi-build"] + +[package.build] +source.path = "../.." + +[package.build.backend] +name = "pixi-build-python" +version = "*" + +[package.build.config] +extra-input-globs = ["**/*.c.src"] +compilers = ["c", "cxx"] + +[package.host-dependencies] +python = "*" +meson-python = "*" +cython = "*" +uv = "*" # used to invoke the wheel build diff --git a/pyproject.toml b/pyproject.toml index b4df3c36d71f..732ab2741993 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,34 +1,30 @@ [build-system] build-backend = "mesonpy" requires = [ - "meson-python>=0.15.0", + "meson-python>=0.18.0", "Cython>=3.0.6", # keep in sync with version check in meson.build ] [project] name = "numpy" -version = "2.1.0.dev0" -# TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) -license = {file = "LICENSE.txt"} - +version = "2.5.0.dev0" description = "Fundamental package for array computing in Python" authors = [{name = "Travis E. Oliphant et al."}] maintainers = [ {name = "NumPy Developers", email="numpy-discussion@python.org"}, ] -requires-python = ">=3.10" +requires-python = ">=3.12" readme = "README.md" classifiers = [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Science/Research', 'Intended Audience :: Developers', - 'License :: OSI Approved :: BSD License', 'Programming Language :: C', 'Programming Language :: Python', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.10', - 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', + 'Programming Language :: Python :: 3.13', + 'Programming Language :: Python :: 3.14', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: Python :: Implementation :: CPython', 'Topic :: Software Development', @@ -39,11 +35,60 @@ classifiers = [ 'Operating System :: Unix', 'Operating System :: MacOS', ] +# License info: +# - The main NumPy project license is BSD-3-Clause. +# - The SPDX license expression below reflects installed numpy packages when +# built from source (e.g., with `python -m build --wheel`), with no vendoring. +# - That SPDX expression is therefore incomplete for: +# (a) sdists - see the comment below `license-files` for other licenses +# included in the sdist +# (b) wheels on PyPI - most wheels include vendored libraries with additional licenses: +# - libopenblas : BSD-3-Clause AND BSD-3-Clause-Attribution (all except arm64 macOS>=14) +# - libgfortran : GPL-3.0-with-GCC-exception (all except arm64 macOS>=14) +# - libquadmath : LGPL-2.1-or-later (all except arm64 macOS>=14 and Windows) +# The licenses for these vendored components are dynamically included +# in the build process for PyPI wheels. +license = 'BSD-3-Clause AND 0BSD AND MIT AND Zlib AND CC0-1.0' +license-files = [ + 'LICENSE.txt', # BSD-3-Clause + 'numpy/_core/include/numpy/libdivide/LICENSE.txt', # Zlib + 'numpy/_core/src/common/pythoncapi-compat/COPYING', # 0BSD + 'numpy/_core/src/highway/LICENSE', # Dual-licensed: Apache 2.0 or BSD 3-Clause + 'numpy/_core/src/multiarray/dragon4_LICENSE.txt', # MIT + 'numpy/_core/src/npysort/x86-simd-sort/LICENSE.md', # BSD-3-Clause + 'numpy/_core/src/umath/svml/LICENSE', # BSD-3-Clause + 'numpy/fft/pocketfft/LICENSE.md', # BSD-3-Clause + 'numpy/ma/LICENSE', # BSD-3-Clause + 'numpy/linalg/lapack_lite/LICENSE.txt', # BSD-3-Clause + 'numpy/random/LICENSE.md', # BSD-3-Clause + 'numpy/random/src/distributions/LICENSE.md', # BSD-3-Clause AND MIT + 'numpy/random/src/mt19937/LICENSE.md', # BSD-3-Clause AND MIT + 'numpy/random/src/pcg64/LICENSE.md', # MIT + 'numpy/random/src/philox/LICENSE.md', # BSD-3-Clause + 'numpy/random/src/sfc64/LICENSE.md', # MIT + 'numpy/random/src/splitmix64/LICENSE.md', # CC0-1.0 +] +# The license files below apply only to files in the repo and sdist, not to +# installed `numpy` packages or wheels (build/doc tools don't affect the +# license of the installed package). We have to make a choice whether to add +# those to the SPDX expression above since PEP 639 is unclear on the +# differences; we choose to make the SPDX expression reflect *a wheel built +# from the sources*. +# '.spin/LICENSE', # BSD-3-Clause +# 'doc/source/_static/scipy-mathjax/LICENSE', # Apache-2.0 +# 'numpy/_build_utils/tempita/LICENSE.txt', # BSD-3-Clause +# 'vendored-meson/meson/COPYING', # Apache-2.0 +# Note that the commented out license files are still included in the sdist, +# just not in Core Metadata and in the .dist-info directory. + [project.scripts] f2py = 'numpy.f2py.f2py2e:main' numpy-config = 'numpy._configtool:main' +[project.entry-points.pkg_config] +numpy = 'numpy._core.lib.pkgconfig' + [project.entry-points.array_api] numpy = 'numpy' @@ -128,6 +173,11 @@ tracker = "https://github.com/numpy/numpy/issues" name = "Performance improvements and changes" showcontent = true + [[tool.towncrier.type]] + directory = "typing" + name = "Typing improvements and changes" + showcontent = true + [[tool.towncrier.type]] directory = "change" name = "Changes" @@ -136,56 +186,60 @@ tracker = "https://github.com/numpy/numpy/issues" [tool.cibuildwheel] # Note: the below skip command doesn't do much currently, the platforms to -# build wheels for in CI are controlled in `.github/workflows/wheels.yml` and -# `tools/ci/cirrus_wheels.yml`. -build-frontend = "build" -skip = "cp36-* cp37-* cp-38* pp37-* *-manylinux_i686 *_ppc64le *_s390x *_universal2" +# build wheels for in CI are controlled in `.github/workflows/wheels.yml`. +# universal2 wheels are not supported (see gh-21233), use `delocate-fuse` if you need them +skip = ["*_i686", "*_ppc64le", "*_s390x", "*_universal2"] before-build = "bash {project}/tools/wheels/cibw_before_build.sh {project}" -# The build will use openblas64 everywhere, except on arm64 macOS >=14.0 (uses Accelerate) -config-settings = "setup-args=-Duse-ilp64=true setup-args=-Dallow-noblas=false build-dir=build" before-test = "pip install -r {project}/requirements/test_requirements.txt" test-command = "bash {project}/tools/wheels/cibw_test_command.sh {project}" +enable = ["cpython-freethreading", "cpython-prerelease"] + +# The build will use openblas64 everywhere, except on arm64 macOS >=14.0 (uses Accelerate) +[tool.cibuildwheel.config-settings] +setup-args = ["-Duse-ilp64=true", "-Dallow-noblas=false"] +build-dir = "build" [tool.cibuildwheel.linux] -manylinux-x86_64-image = "manylinux2014" -manylinux-aarch64-image = "manylinux2014" -musllinux-x86_64-image = "musllinux_1_1" +manylinux-x86_64-image = "manylinux_2_28" +manylinux-aarch64-image = "manylinux_2_28" +musllinux-x86_64-image = "musllinux_1_2" +musllinux-aarch64-image = "musllinux_1_2" [tool.cibuildwheel.linux.environment] -# RUNNER_OS is a GitHub Actions specific env var; define it here so it works on Cirrus CI too +# RUNNER_OS is a GitHub Actions specific env var; define it here so it's +# defined when running cibuildwheel locally RUNNER_OS="Linux" # /project will be the $PWD equivalent inside the docker used to build the wheel PKG_CONFIG_PATH="/project/.openblas" -LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/project/.openblas/lib" - -[tool.cibuildwheel.macos] -# universal2 wheels are not supported (see gh-21233), use `delocate-fuse` if you need them -# note that universal2 wheels are not built, they're listed in the tool.cibuildwheel.skip -# section -# Not clear why the DYLD_LIBRARY_PATH is not passed through from the environment -repair-wheel-command = [ - "export DYLD_LIBRARY_PATH=$PWD/.openblas/lib", - "echo DYLD_LIBRARY_PATH $DYLD_LIBRARY_PATH", - "delocate-wheel --require-archs {delocate_archs} -w {dest_dir} -v {wheel}", -] [tool.cibuildwheel.windows] -# This does not work, use CIBW_ENVIRONMENT_WINDOWS -environment = {PKG_CONFIG_PATH="./.openblas"} -config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=false build-dir=build" +config-settings = {setup-args = ["--vsenv", "-Dallow-noblas=false"], build-dir="build"} repair-wheel-command = "bash -el ./tools/wheels/repair_windows.sh {wheel} {dest_dir}" [[tool.cibuildwheel.overrides]] -select = "*-win32" -config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=true build-dir=build" +select = ["*-win32"] +config-settings = {setup-args = ["--vsenv", "-Dallow-noblas=true"], build-dir="build"} repair-wheel-command = "" +[tool.cibuildwheel.pyodide] +before-test = "pip install -r {project}/requirements/emscripten_test_requirements.txt" +# Pyodide ensures that the wheels are already repaired by auditwheel-emscripten +repair-wheel-command = "" +test-command = "python -m pytest --pyargs numpy -m 'not slow'" + +[tool.cibuildwheel.pyodide.config-settings] +build-dir = "build" +setup-args = ["--cross-file=$PWD/tools/ci/emscripten/emscripten.meson.cross", "-Dblas=none", "-Dlapack=none"] + + + [tool.meson-python] meson = 'vendored-meson/meson/meson.py' [tool.meson-python.args] install = ['--tags=runtime,python-runtime,tests,devel'] + [tool.spin] package = 'numpy' @@ -197,6 +251,8 @@ cli = 'vendored-meson/meson/meson.py' ".spin/cmds.py:build", ".spin/cmds.py:test", ".spin/cmds.py:mypy", + ".spin/cmds.py:pyrefly", + ".spin/cmds.py:stubtest", ".spin/cmds.py:config_openblas", ".spin/cmds.py:lint", ] @@ -211,5 +267,33 @@ cli = 'vendored-meson/meson/meson.py' ".spin/cmds.py:docs", ".spin/cmds.py:changelog", ".spin/cmds.py:notes", + ".spin/cmds.py:check_docs", + ".spin/cmds.py:check_tutorials", ] "Metrics" = [".spin/cmds.py:bench"] + + +[tool.pyrefly] +project-includes = ["numpy/**/*.pyi"] +project-excludes = ["numpy/typing/tests/**"] + +[tool.pyrefly.errors] +implicit-any = "error" +unannotated-parameter = "error" +unannotated-return = "error" + +[[tool.pyrefly.sub-config]] +matches = "numpy/__init__.pyi" +errors = { bad-override = "ignore" } + +[[tool.pyrefly.sub-config]] +matches = "numpy/_typing/_nbit_base.pyi" +errors = { invalid-inheritance = "ignore" } + +[[tool.pyrefly.sub-config]] +matches = "numpy/ma/core.pyi" +errors = { bad-override = "ignore" } + +[[tool.pyrefly.sub-config]] +matches = "numpy/matrixlib/defmatrix.pyi" +errors = { bad-override = "ignore" } diff --git a/pytest.ini b/pytest.ini index 71542643e170..532095ab9aa7 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,5 +1,5 @@ [pytest] -addopts = -l +addopts = -l -ra --strict-markers --strict-config norecursedirs = doc tools numpy/linalg/lapack_lite numpy/_core/code_generators numpy/_core/src/common/pythoncapi-compat doctest_optionflags = NORMALIZE_WHITESPACE ELLIPSIS ALLOW_UNICODE ALLOW_BYTES junit_family=xunit2 @@ -22,6 +22,8 @@ filterwarnings = ignore:The numpy.array_api submodule is still experimental. See NEP 47. # ignore matplotlib headless warning for pyplot ignore:Matplotlib is currently using agg, which is a non-GUI backend, so cannot show the figure.:UserWarning -# Ignore DeprecationWarnings from distutils - ignore::DeprecationWarning:.*distutils - ignore:\n\n `numpy.distutils`:DeprecationWarning +# Ignore DeprecationWarning from typing.mypy_plugin + ignore:`numpy.typing.mypy_plugin` is deprecated:DeprecationWarning +# Ignore DeprecationWarning from struct module +# see https://github.com/numpy/numpy/issues/28926 + ignore:Due to \'_pack_\', the diff --git a/requirements/all_requirements.txt b/requirements/all_requirements.txt index 2e457cb0bdbe..ad15045c15e2 100644 --- a/requirements/all_requirements.txt +++ b/requirements/all_requirements.txt @@ -3,4 +3,5 @@ -r linter_requirements.txt -r release_requirements.txt -r test_requirements.txt +-r typing_requirements.txt -r ci_requirements.txt diff --git a/requirements/build_requirements.txt b/requirements/build_requirements.txt index 701867b64465..18db99508a09 100644 --- a/requirements/build_requirements.txt +++ b/requirements/build_requirements.txt @@ -1,5 +1,5 @@ meson-python>=0.13.1 Cython>=3.0.6 ninja -spin==0.8 +spin build diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index e134b0dae82e..da8c8141917f 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ spin # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.27.44.3 +scipy-openblas32==0.3.31.22.1 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index f688bfb6eb3a..6a63af65d96c 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ spin # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.27.44.3 -scipy-openblas64==0.3.27.44.3 +scipy-openblas32==0.3.31.22.1 +scipy-openblas64==0.3.31.22.1 diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index 7dfb228c83f1..ea75103117a3 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -1,10 +1,11 @@ # doxygen required, use apt-get or dnf sphinx==7.2.6 -numpydoc==1.4 +numpydoc==1.10.0 pydata-sphinx-theme>=0.15.2 +sphinx-copybutton sphinx-design scipy -matplotlib +matplotlib!=3.10.6 pandas breathe>4.33.0 ipython!=8.1.0 @@ -15,3 +16,11 @@ pickleshare # needed to build release notes towncrier toml + +scipy-doctest>=1.8.0 + +# interactive documentation utilities +# see https://github.com/jupyterlite/pyodide-kernel#compatibility +jupyterlite-sphinx>=0.18.0 +# Works with Pyodide 0.27.1 +jupyterlite-pyodide-kernel==0.5.2 diff --git a/requirements/emscripten_test_requirements.txt b/requirements/emscripten_test_requirements.txt index 18cfb219034d..71e736ceed90 100644 --- a/requirements/emscripten_test_requirements.txt +++ b/requirements/emscripten_test_requirements.txt @@ -1,4 +1,4 @@ -hypothesis==6.81.1 -pytest==7.4.0 -pytz==2023.3.post1 +hypothesis==6.151.9 +pytest==9.0.2 +tzdata pytest-xdist diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index 2e0298baed52..cffefb4b0183 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,2 +1,5 @@ -pycodestyle==2.8.0 +# keep in sync with `environment.yml` +cython-lint +ruff==0.15.2 GitPython>=3.1.30 +spin diff --git a/requirements/release_requirements.txt b/requirements/release_requirements.txt index d23e69fa1fa8..55079d795ed9 100644 --- a/requirements/release_requirements.txt +++ b/requirements/release_requirements.txt @@ -1,19 +1,9 @@ # These packages are needed for a release in addition to those needed # for building, testing, and the creation of documentation. -# download-wheels.py -urllib3 -beautifulsoup4 - # changelog.py pygithub gitpython>=3.1.30 -# uploading wheels -twine - -# building and notes -Paver - # uploading release documentation packaging diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 4e53f86d355c..f704fab81ede 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -1,21 +1,11 @@ Cython -wheel==0.38.1 -#setuptools==65.5.1 ; python_version < '3.12' -#setuptools ; python_version >= '3.12' -setuptools -hypothesis==6.81.1 -pytest==7.4.0 -pytz==2023.3.post1 -pytest-cov==4.1.0 +hypothesis==6.151.9 +pytest==9.0.2 +pytest-cov==7.0.0 meson ninja; sys_platform != "emscripten" pytest-xdist -# for numpy.random.test.test_extending -cffi; python_version < '3.10' -# For testing types. Notes on the restrictions: -# - Mypy relies on C API features not present in PyPy -# NOTE: Keep mypy in sync with environment.yml -mypy==1.10.0; platform_python_implementation != "PyPy" -typing_extensions>=4.2.0 +pytest-timeout # for optional f2py encoding detection charset-normalizer +tzdata diff --git a/requirements/typing_requirements.txt b/requirements/typing_requirements.txt new file mode 100644 index 000000000000..5b5d3c5b4cab --- /dev/null +++ b/requirements/typing_requirements.txt @@ -0,0 +1,6 @@ +# static typing requirements that are not needed for runtime tests + +-r test_requirements.txt + +mypy==1.19.1 +pyrefly==0.53.0 diff --git a/ruff.toml b/ruff.toml new file mode 100644 index 000000000000..f204c77545c0 --- /dev/null +++ b/ruff.toml @@ -0,0 +1,121 @@ +extend-exclude = [ + "numpy/__config__.py", + "numpy/typing/_char_codes.py", + "spin/cmds.py", + # Submodules. + "doc/source/_static/scipy-mathjax", + "vendored-meson/meson", + "numpy/fft/pocketfft", + "numpy/_core/src/umath/svml", + "numpy/_core/src/npysort/x86-simd-sort", + "numpy/_core/src/highway", + "numpy/_core/src/common/pythoncapi-compat", +] + +line-length = 88 + +[format] +line-ending = "lf" + +[lint] +preview = true +select = [ + "B", # flake8-bugbear + "C4", # flake8-comprehensions + "ISC", # flake8-implicit-str-concat + "LOG", # flake8-logging + "G", # flake8-logging-format + "PIE", # flake8-pie + "Q", # flake8-quotes + "TID", # flake8-tidy-imports + "FLY", # flynt + "I", # isort + "PD", # pandas-vet + "PERF", # perflint + "E", # pycodestyle/error + "W", # pycodestyle/warning + "F", # pyflakes + "PGH", # pygrep-hooks + "PLE", # pylint/error + "UP", # pyupgrade + "RUF012", # ruff: mutable-class-default + "RUF100", # ruff: unused-noqa +] +ignore = [ + # flake8-bugbear + "B006", # Do not use mutable data structures for argument defaults + "B007", # Loop control variable not used within loop body + "B011", # Do not `assert False` (`python -O` removes these calls), raise `AssertionError()` + "B023", # Function definition does not bind loop variable + "B028", # No explicit `stacklevel` keyword argument found + "B904", # Within an `except` clause distinguish raised exceptions from errors in exception handling + "B905", #`zip()` without an explicit `strict=` parameter + # flake8-comprehensions + "C408", # Unnecessary `dict()` call (rewrite as a literal) + # flake8-implicit-str-concat + "ISC002", # Implicitly concatenated string literals over multiple lines + # flake8-pie + "PIE790", # Unnecessary `pass` statement + # perflint + "PERF401", # Use a list comprehension to create a transformed list + # pycodestyle/error + "E241", # Multiple spaces after comma + "E265", # Block comment should start with `# ` + "E266", # Too many leading `#` before block comment + "E302", # TODO: Expected 2 blank lines, found 1 + "E402", # Module level import not at top of file + "E712", # Avoid equality comparisons to `True` or `False` + "E731", # Do not assign a `lambda` expression, use a `def` + "E741", # Ambiguous variable name + # pyflakes + "F403", # `from ... import *` used; unable to detect undefined names + "F405", # may be undefined, or defined from star imports + "F821", # Undefined name + "F841", # Local variable is assigned to but never used + # pyupgrade + "UP015" , # Unnecessary mode argument + "UP031", # TODO: Use format specifiers instead of percent format +] + +[lint.per-file-ignores] +"_tempita.py" = ["B909", "RUF012"] +"bench_*.py" = ["B015", "B018", "RUF012"] +"test*.py" = ["B015", "B018", "E201", "E714", "RUF012"] + +"numpy/_core/_add_newdocs.py" = ["E501"] +"numpy/_core/_add_newdocs_scalars.py" = ["E501"] +"numpy/lib/tests/test_format.py" = ["E501"] +"numpy/linalg/tests/test_linalg.py" = ["E501"] +"numpy/f2py/*py" = ["E501"] +"numpy*pyi" = ["E501"] +# "useless assignments" aren't so useless when you're testing that they don't make type checkers scream +"numpy/typing/tests/data/*" = ["B015", "B018", "E501"] +# too disruptive to enable all at once +"**/*.py" = ["Q"] + +"__init__.py" = ["F401", "F403", "F405"] +"__init__.pyi" = ["F401"] +"numpy/_core/defchararray.py" = ["F403", "F405"] +"numpy/_core/multiarray.py" = ["F405"] +"numpy/_core/numeric.py" = ["F403", "F405"] +"numpy/_core/umath.py" = ["F401", "F403", "F405"] +"numpy/f2py/capi_maps.py" = ["F403", "F405"] +"numpy/f2py/crackfortran.py" = ["F403", "F405"] +"numpy/f2py/f90mod_rules.py" = ["F403", "F405"] +"numpy/ma/core.pyi" = ["F403", "F405"] +"numpy/matlib.py" = ["F405"] +"numpy/matlib.pyi" = ["F811"] + +[lint.flake8-builtins] +builtins-allowed-modules = ["random", "typing"] + +[lint.flake8-import-conventions.extend-aliases] +"numpy" = "np" +"numpy.typing" = "npt" + +[lint.isort] +# these are treated as stdlib within .pyi stubs +extra-standard-library = ["_typeshed", "typing_extensions"] +known-first-party = ["numpy"] +combine-as-imports = true +split-on-trailing-comma = false diff --git a/tools/c_coverage/c_coverage_report.py b/tools/c_coverage/c_coverage_report.py index bd3eeaee9776..d188b3280e95 100755 --- a/tools/c_coverage/c_coverage_report.py +++ b/tools/c_coverage/c_coverage_report.py @@ -7,15 +7,15 @@ import os import re import sys -from xml.sax.saxutils import quoteattr, escape +from xml.sax.saxutils import escape, quoteattr try: import pygments - if tuple([int(x) for x in pygments.__version__.split('.')]) < (0, 11): - raise ImportError() + if tuple(int(x) for x in pygments.__version__.split('.')) < (0, 11): + raise ImportError from pygments import highlight - from pygments.lexers import CLexer from pygments.formatters import HtmlFormatter + from pygments.lexers import CLexer has_pygments = True except ImportError: print("This script requires pygments 0.11 or greater to generate HTML") @@ -30,7 +30,7 @@ def __init__(self, lines, **kwargs): def wrap(self, source, outfile): for i, (c, t) in enumerate(HtmlFormatter.wrap(self, source, outfile)): - as_functions = self.lines.get(i-1, None) + as_functions = self.lines.get(i - 1, None) if as_functions is not None: yield 0, ('

[%2d]' % (quoteattr('as ' + ', '.join(as_functions)), @@ -54,25 +54,23 @@ def mark_line(self, lineno, as_func=None): line.add(as_func) def write_text(self, fd): - source = open(self.path, "r") - for i, line in enumerate(source): - if i + 1 in self.lines: - fd.write("> ") - else: - fd.write("! ") - fd.write(line) - source.close() + with open(self.path, "r") as source: + for i, line in enumerate(source): + if i + 1 in self.lines: + fd.write("> ") + else: + fd.write("! ") + fd.write(line) def write_html(self, fd): - source = open(self.path, 'r') - code = source.read() - lexer = CLexer() - formatter = FunctionHtmlFormatter( - self.lines, - full=True, - linenos='inline') - fd.write(highlight(code, lexer, formatter)) - source.close() + with open(self.path, 'r') as source: + code = source.read() + lexer = CLexer() + formatter = FunctionHtmlFormatter( + self.lines, + full=True, + linenos='inline') + fd.write(highlight(code, lexer, formatter)) class SourceFiles: @@ -86,7 +84,7 @@ def get_file(self, path): if self.prefix is None: self.prefix = path else: - self.prefix = os.path.commonprefix([self.prefix, path]) + self.prefix = os.path.commonpath([self.prefix, path]) return self.files[path] def clean_path(self, path): @@ -95,24 +93,24 @@ def clean_path(self, path): def write_text(self, root): for path, source in self.files.items(): - fd = open(os.path.join(root, self.clean_path(path)), "w") - source.write_text(fd) - fd.close() + with open(os.path.join(root, self.clean_path(path)), "w") as fd: + source.write_text(fd) def write_html(self, root): for path, source in self.files.items(): - fd = open(os.path.join(root, self.clean_path(path) + ".html"), "w") - source.write_html(fd) - fd.close() + with open( + os.path.join(root, self.clean_path(path) + ".html"), "w" + ) as fd: + source.write_html(fd) - fd = open(os.path.join(root, 'index.html'), 'w') - fd.write("") - paths = sorted(self.files.keys()) - for path in paths: - fd.write('

%s

' % - (self.clean_path(path), escape(path[len(self.prefix):]))) - fd.write("") - fd.close() + with open(os.path.join(root, 'index.html'), 'w') as fd: + fd.write("") + paths = sorted(self.files.keys()) + for path in paths: + fd.write('

%s

' % + (self.clean_path(path), + escape(path[len(self.prefix):]))) + fd.write("") def collect_stats(files, fd, pattern): @@ -124,14 +122,14 @@ def collect_stats(files, fd, pattern): current_file = None current_function = None - for i, line in enumerate(fd): - if re.match("f[lie]=.+", line): + for line in fd: + if re.match(r"f[lie]=.+", line): path = line.split('=', 2)[1].strip() if os.path.exists(path) and re.search(pattern, path): current_file = files.get_file(path) else: current_file = None - elif re.match("fn=.+", line): + elif re.match(r"fn=.+", line): current_function = line.split('=', 2)[1].strip() elif current_file is not None: for regex in line_regexs: @@ -164,9 +162,8 @@ def collect_stats(files, fd, pattern): files = SourceFiles() for log_file in args.callgrind_file: - log_fd = open(log_file, 'r') - collect_stats(files, log_fd, args.pattern) - log_fd.close() + with open(log_file, 'r') as log_fd: + collect_stats(files, log_fd, args.pattern) if not os.path.exists(args.directory): os.makedirs(args.directory) diff --git a/tools/changelog.py b/tools/changelog.py index 7b7e66ddb511..6013d70adfbc 100755 --- a/tools/changelog.py +++ b/tools/changelog.py @@ -34,8 +34,8 @@ """ import os -import sys import re + from git import Repo from github import Github @@ -75,7 +75,7 @@ def get_authors(revision_range): # Append '+' to new authors. authors_new = [s + ' +' for s in authors_cur - authors_pre] - authors_old = [s for s in authors_cur & authors_pre] + authors_old = list(authors_cur & authors_pre) authors = authors_new + authors_old authors.sort() return authors @@ -117,7 +117,7 @@ def main(token, revision_range): heading = "Contributors" print() print(heading) - print("="*len(heading)) + print("=" * len(heading)) print(author_msg % len(authors)) for s in authors: @@ -130,17 +130,50 @@ def main(token, revision_range): print() print(heading) - print("="*len(heading)) + print("=" * len(heading)) print(pull_request_msg % len(pull_requests)) + def backtick_repl(matchobj): + """repl to add an escaped space following a code block if needed""" + if matchobj.group(2) != ' ': + post = r'\ ' + matchobj.group(2) + else: + post = matchobj.group(2) + return '``' + matchobj.group(1) + '``' + post + for pull in pull_requests: + # sanitize whitespace title = re.sub(r"\s+", " ", pull.title.strip()) + + # substitute any single backtick not adjacent to a backtick + # for a double backtick + title = re.sub( + r"(?P
(?:^|(?<=[^`])))`(?P(?=[^`]|$))",
+            r"\g
``\g",
+            title
+        )
+        # add an escaped space if code block is not followed by a space
+        title = re.sub(r"``(.*?)``(.)", backtick_repl, title)
+
+        # sanitize asterisks
+        title = title.replace('*', '\\*')
+
         if len(title) > 60:
             remainder = re.sub(r"\s.*$", "...", title[60:])
             if len(remainder) > 20:
-                remainder = title[:80] + "..."
+                # just use the first 80 characters, with ellipses.
+                # note: this was previously bugged,
+                # assigning to `remainder` rather than `title`
+                title = title[:80] + "..."
             else:
+                # use the first 60 characters and the next word
                 title = title[:60] + remainder
+
+            if title.count('`') % 4 != 0:
+                # ellipses have cut in the middle of a code block,
+                # so finish the code block before the ellipses
+                title = title[:-3] + '``...'
+
         print(pull_msg.format(pull.number, pull.html_url, title))
 
 
diff --git a/tools/check_installed_files.py b/tools/check_installed_files.py
index c45a046b1ca2..fd66b68a43fc 100644
--- a/tools/check_installed_files.py
+++ b/tools/check_installed_files.py
@@ -18,11 +18,10 @@
 
 """
 
-import os
 import glob
-import sys
 import json
-
+import os
+import sys
 
 CUR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
 ROOT_DIR = os.path.dirname(CUR_DIR)
@@ -55,7 +54,7 @@ def main(install_dir, tests_check):
         for test_file in numpy_test_files.keys():
             if test_file not in installed_test_files.keys():
                 raise Exception(
-                    "%s is not installed" % numpy_test_files[test_file]
+                    f"{numpy_test_files[test_file]} is not installed"
                 )
 
         print("----------- All the test files were installed --------------")
@@ -69,14 +68,14 @@ def main(install_dir, tests_check):
             if (tests_check == "--no-tests" and
                     "tests" in numpy_pyi_files[pyi_file]):
                 continue
-            raise Exception("%s is not installed" % numpy_pyi_files[pyi_file])
+            raise Exception(f"{numpy_pyi_files[pyi_file]} is not installed")
 
     print("----------- All the necessary .pyi files "
           "were installed --------------")
 
 
 def get_files(dir_to_check, kind='test'):
-    files = dict()
+    files = {}
     patterns = {
         'test': f'{dir_to_check}/**/test_*.py',
         'stub': f'{dir_to_check}/**/*.pyi',
@@ -85,11 +84,6 @@ def get_files(dir_to_check, kind='test'):
         relpath = os.path.relpath(path, dir_to_check)
         files[relpath] = path
 
-    if sys.version_info >= (3, 12):
-        files = {
-            k: v for k, v in files.items() if not k.startswith('distutils')
-        }
-
     # ignore python files in vendored pythoncapi-compat submodule
     files = {
         k: v for k, v in files.items() if 'pythoncapi-compat' not in k
@@ -117,8 +111,8 @@ def get_files(dir_to_check, kind='test'):
 
     for key in targets.keys():
         for values in list(targets[key].values()):
-            if not values['tag'] in all_tags:
+            if values['tag'] not in all_tags:
                 all_tags.add(values['tag'])
 
-    if all_tags != set(['runtime', 'python-runtime', 'devel', 'tests']):
+    if all_tags != {'runtime', 'python-runtime', 'devel', 'tests'}:
         raise AssertionError(f"Found unexpected install tag: {all_tags}")
diff --git a/tools/check_openblas_version.py b/tools/check_openblas_version.py
index b51e68047fd4..085308b71171 100644
--- a/tools/check_openblas_version.py
+++ b/tools/check_openblas_version.py
@@ -1,19 +1,107 @@
 """
-usage: check_openblas_version.py 
+Checks related to the OpenBLAS version used in CI.
 
-Check the blas version is blas from scipy-openblas and is higher than
-min_version
-example: check_openblas_version.py 0.3.26
+Options:
+1. Check that the BLAS used at build time is (a) scipy-openblas, and (b) its version is
+   higher than a given minimum version. Note: this method only seems to give
+   the first 3 version components, so 0.3.30.0.7 gets translated to 0.3.30 when reading
+   it back out from `scipy.show_config()`.
+2. Check requirements files in the main numpy repo and compare with the numpy-release
+   repo. Goal is to ensure that `numpy-release` is not behind.
+
+Both of these checks are primarily useful in a CI job.
+
+Examples:
+
+    # Requires install numpy
+    $ python check_openblas_version.py --min-version 0.3.30
+
+    # Only needs the requirements files
+    $ python check_openblas_version.py --req-files \
+            ../numpy-release/requirements/openblas_requirements.txt
 """
 
-import numpy
+import argparse
+import os.path
 import pprint
-import sys
-
-version = sys.argv[1]
-deps = numpy.show_config('dicts')['Build Dependencies']
-assert "blas" in deps
-print("Build Dependencies: blas")
-pprint.pprint(deps["blas"])
-assert deps["blas"]["version"].split(".") >= version.split(".")
-assert deps["blas"]["name"] == "scipy-openblas"
+
+
+def check_built_version(min_version):
+    import numpy
+    deps = numpy.show_config('dicts')['Build Dependencies']
+    assert "blas" in deps
+    print("Build Dependencies: blas")
+    pprint.pprint(deps["blas"])
+    assert deps["blas"]["version"].split(".") >= min_version.split(".")
+    assert deps["blas"]["name"] == "scipy-openblas"
+
+
+def check_requirements_files(reqfile):
+    if not os.path.exists(reqfile):
+        print(f"Path does not exist: {reqfile}")
+
+    def get_version(line):
+        req = line.split(";")[0].split("==")[1].split(".")[:5]
+        return tuple(int(s) for s in req)
+
+    def parse_reqs(reqfile):
+        with open(reqfile) as f:
+            lines = f.readlines()
+
+        v32 = None
+        v64 = None
+        for line in lines:
+            if "scipy-openblas32" in line:
+                v32 = get_version(line)
+            if "scipy-openblas64" in line:
+                v64 = get_version(line)
+        if v32 is None or v64 is None:
+            raise AssertionError("Expected `scipy-openblas32` and "
+                                 "`scipy-openblas64` in `ci_requirements.txt`, "
+                                 f"got:\n  {'  '.join(lines)}")
+        return v32, v64
+
+    this_dir = os.path.abspath(os.path.dirname(__file__))
+    reqfile_thisrepo = os.path.join(this_dir, '..', 'requirements',
+                                    'ci_requirements.txt')
+
+    v32_thisrepo, v64_thisrepo = parse_reqs(reqfile_thisrepo)
+    v32_rel, v64_rel = parse_reqs(reqfile)
+
+    def compare_versions(v_rel, v_thisrepo, bits):
+        if not v_rel >= v_thisrepo:
+            raise AssertionError(f"`numpy-release` version of scipy-openblas{bits} "
+                                 f"{v_rel} is behind this repo: {v_thisrepo}")
+
+    compare_versions(v64_rel, v64_thisrepo, "64")
+    compare_versions(v32_rel, v32_thisrepo, "32")
+
+
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--req-files",
+        type=str,
+        help="Path to the requirements file to compare with the one in this repo"
+    )
+    parser.add_argument(
+        "--min-version",
+        type=str,
+        help="The minimum version that should have been used at build time for "
+             "installed `numpy` package"
+    )
+    args = parser.parse_args()
+
+    if args.min_version is None and args.req_files is None:
+        raise ValueError("One of `--req-files` or `--min-version` needs to be "
+                         "specified")
+
+    if args.min_version:
+        check_built_version(args.min_version)
+
+    if args.req_files:
+        check_requirements_files(args.req_files)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/tools/check_python_h_first.py b/tools/check_python_h_first.py
new file mode 100755
index 000000000000..c0d44ad635f4
--- /dev/null
+++ b/tools/check_python_h_first.py
@@ -0,0 +1,254 @@
+#!/usr/bin/env python
+"""Check that Python.h is included before any stdlib headers.
+
+May be a bit overzealous, but it should get the job done.
+"""
+import argparse
+import fnmatch
+import os.path
+import re
+import subprocess
+import sys
+
+from get_submodule_paths import get_submodule_paths
+
+HEADER_PATTERN = re.compile(
+    r'^\s*#\s*include\s*[<"]((?:\w+/)*\w+(?:\.h[hp+]{0,2})?)[>"]\s*$'
+)
+
+PYTHON_INCLUDING_HEADERS = [
+    "Python.h",
+    # This isn't all of Python.h, but it is the visibility macros
+    "pyconfig.h",
+    "numpy/npy_common.h",
+    "numpy/npy_math.h",
+    "numpy/arrayobject.h",
+    "numpy/ndarrayobject.h",
+    "numpy/ndarraytypes.h",
+    "numpy/random/distributions.h",
+    "npy_sort.h",
+    "npy_config.h",
+    "common.h",
+    "npy_cpu_features.h",
+    # Boost::Python
+    "boost/python.hpp",
+]
+LEAF_HEADERS = [
+    "numpy/numpyconfig.h",
+    "numpy/npy_os.h",
+    "numpy/npy_cpu.h",
+    "numpy/utils.h",
+]
+
+C_CPP_EXTENSIONS = (".c", ".h", ".cpp", ".hpp", ".cc", ".hh", ".cxx", ".hxx")
+# check against list in diff_files
+
+PARSER = argparse.ArgumentParser(description=__doc__)
+PARSER.add_argument(
+    "files",
+    nargs="*",
+    help="Lint these files or directories; use **/*.c to lint all files\n"
+    "Expects relative paths",
+)
+
+
+def check_python_h_included_first(name_to_check: str) -> int:
+    """Check that the passed file includes Python.h first if it does at all.
+
+    Perhaps overzealous, but that should work around concerns with
+    recursion.
+
+    Parameters
+    ----------
+    name_to_check : str
+        The name of the file to check.
+
+    Returns
+    -------
+    int
+        The number of headers before Python.h
+    """
+    included_python = False
+    included_non_python_header = []
+    warned_python_construct = False
+    basename_to_check = os.path.basename(name_to_check)
+    in_comment = False
+    includes_headers = False
+    with open(name_to_check) as in_file:
+        for i, line in enumerate(in_file, 1):
+            # Very basic comment parsing
+            # Assumes /*...*/ comments are on their own lines
+            if "/*" in line:
+                if "*/" not in line:
+                    in_comment = True
+                # else-branch could use regex to remove comment and continue
+                continue
+            if in_comment:
+                if "*/" in line:
+                    in_comment = False
+                continue
+            line = line.split("//", 1)[0].strip()
+            match = HEADER_PATTERN.match(line)
+            if match:
+                includes_headers = True
+                this_header = match.group(1)
+                if this_header in PYTHON_INCLUDING_HEADERS:
+                    if included_non_python_header and not included_python:
+                        # Headers before python-including header
+                        print(
+                            f"Header before Python.h in file {name_to_check:s}\n"
+                            f"Python.h on line {i:d}, other header(s) on line(s)"
+                            f" {included_non_python_header}",
+                            file=sys.stderr,
+                        )
+                    # else:  # no headers before python-including header
+                    included_python = True
+                    PYTHON_INCLUDING_HEADERS.append(basename_to_check)
+                    if os.path.dirname(name_to_check).endswith("include/numpy"):
+                        PYTHON_INCLUDING_HEADERS.append(f"numpy/{basename_to_check:s}")
+                    # We just found out where Python.h comes in this file
+                    break
+                elif this_header in LEAF_HEADERS:
+                    # This header is just defines, so it won't include
+                    # the system headers that cause problems
+                    continue
+                elif not included_python and (
+                    "numpy/" in this_header
+                    and this_header not in LEAF_HEADERS
+                    or "python" in this_header.lower()
+                ):
+                    print(
+                        f"Python.h not included before python-including header "
+                        f"in file {name_to_check:s}\n"
+                        f"{this_header:s} on line {i:d}",
+                        file=sys.stderr,
+                    )
+                    included_python = True
+                    PYTHON_INCLUDING_HEADERS.append(basename_to_check)
+                elif not included_python and this_header not in LEAF_HEADERS:
+                    included_non_python_header.append(i)
+            elif (
+                not included_python
+                and not warned_python_construct
+                and ".h" not in basename_to_check
+            ) and ("py::" in line or "PYBIND11_" in line):
+                print(
+                    "Python-including header not used before python constructs "
+                    f"in file {name_to_check:s}\nConstruct on line {i:d}",
+                    file=sys.stderr,
+                )
+                warned_python_construct = True
+    if not includes_headers:
+        LEAF_HEADERS.append(basename_to_check)
+    return included_python and len(included_non_python_header)
+
+
+def sort_order(path: str) -> tuple[int, str]:
+    if "include/numpy" in path:
+        # Want to process numpy/*.h first, to work out which of those
+        # include Python.h directly
+        priority = 0x00
+    elif "h" in os.path.splitext(path)[1].lower():
+        # Then other headers, which tend to include numpy/*.h
+        priority = 0x10
+    else:
+        # Source files after headers, to give the best chance of
+        # properly checking whether they include Python.h
+        priority = 0x20
+    if "common" in path:
+        priority -= 8
+    path_basename = os.path.basename(path)
+    if path_basename.startswith("npy_"):
+        priority -= 4
+    elif path_basename.startswith("npy"):
+        priority -= 3
+    elif path_basename.startswith("np"):
+        priority -= 2
+    if "config" in path_basename:
+        priority -= 1
+    return priority, path
+
+
+def process_files(file_list: list[str]) -> int:
+    n_out_of_order = 0
+    submodule_paths = get_submodule_paths()
+    root_directory = os.path.dirname(os.path.dirname(__file__))
+    for name_to_check in sorted(file_list, key=sort_order):
+        name_to_check = os.path.join(root_directory, name_to_check)
+        if any(submodule_path in name_to_check for submodule_path in submodule_paths):
+            continue
+        if ".dispatch." in name_to_check:
+            continue
+        try:
+            n_out_of_order += check_python_h_included_first(name_to_check)
+        except UnicodeDecodeError:
+            print(f"File {name_to_check:s} not utf-8", sys.stdout)
+    return n_out_of_order
+
+
+def find_c_cpp_files(root: str) -> list[str]:
+
+    result = []
+
+    for dirpath, dirnames, filenames in os.walk(root):
+        # I'm assuming other people have checked boost
+        for name in ("build", ".git", "boost"):
+            try:
+                dirnames.remove(name)
+            except ValueError:
+                pass
+        for name in fnmatch.filter(dirnames, "*.p"):
+            dirnames.remove(name)
+        result.extend(
+            [
+                os.path.join(dirpath, name)
+                for name in filenames
+                if os.path.splitext(name)[1].lower() in C_CPP_EXTENSIONS
+            ]
+        )
+    # Check the headers before the source files
+    result.sort(key=lambda path: "h" in os.path.splitext(path)[1], reverse=True)
+    return result
+
+
+def diff_files(sha: str) -> list[str]:
+    """Find the diff since the given SHA.
+
+    Adapted from lint.py
+    """
+    res = subprocess.run(
+        [
+            "git",
+            "diff",
+            "--name-only",
+            "--diff-filter=ACMR",
+            "-z",
+            sha,
+            "--",
+            # Check against C_CPP_EXTENSIONS
+            "*.[chCH]",
+            "*.[ch]pp",
+            "*.[ch]xx",
+            "*.cc",
+            "*.hh",
+        ],
+        stdout=subprocess.PIPE,
+        encoding="utf-8",
+    )
+    res.check_returncode()
+    return [f for f in res.stdout.split("\0") if f]
+
+
+if __name__ == "__main__":
+    args = PARSER.parse_args()
+
+    if len(args.files) == 0:
+        files = find_c_cpp_files("numpy")
+    else:
+        files = args.files
+        if len(files) == 1 and os.path.isdir(files[0]):
+            files = find_c_cpp_files(files[0])
+
+    # See which of the headers include Python.h and add them to the list
+    n_out_of_order = process_files(files)
+    sys.exit(n_out_of_order)
diff --git a/tools/ci/array-api-skips.txt b/tools/ci/array-api-skips.txt
deleted file mode 100644
index 74c4b49c5dfc..000000000000
--- a/tools/ci/array-api-skips.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-# finfo return type misalignment
-array_api_tests/test_data_type_functions.py::test_finfo[float32]
-
-# out.dtype=float32, but should be int16
-# dtype('float16') not found
-array_api_tests/test_operators_and_elementwise_functions.py::test_ceil
-array_api_tests/test_operators_and_elementwise_functions.py::test_floor
-array_api_tests/test_operators_and_elementwise_functions.py::test_trunc
-
-# 'shape' arg is present. 'newshape' is retained for backward compat.
-array_api_tests/test_signatures.py::test_func_signature[reshape]
-
-# missing 'descending' keyword arguments
-array_api_tests/test_signatures.py::test_func_signature[argsort]
-array_api_tests/test_signatures.py::test_func_signature[sort]
-
-# ufuncs signature on linux is always 
-# np.vecdot is the only ufunc with a keyword argument which causes a failure
-array_api_tests/test_signatures.py::test_func_signature[vecdot]
diff --git a/tools/ci/array-api-xfails.txt b/tools/ci/array-api-xfails.txt
new file mode 100644
index 000000000000..8370099015c5
--- /dev/null
+++ b/tools/ci/array-api-xfails.txt
@@ -0,0 +1,65 @@
+# finfo return type misalignment
+array_api_tests/test_data_type_functions.py::test_finfo[float32]
+array_api_tests/test_data_type_functions.py::test_finfo[complex64]
+
+# finfo: data type  not inexact
+array_api_tests/test_data_type_functions.py::test_finfo[float64]
+array_api_tests/test_data_type_functions.py::test_finfo[complex128]
+
+# iinfo: Invalid integer data type 'O'
+array_api_tests/test_data_type_functions.py::test_iinfo[int8]
+array_api_tests/test_data_type_functions.py::test_iinfo[uint8]
+array_api_tests/test_data_type_functions.py::test_iinfo[int16]
+array_api_tests/test_data_type_functions.py::test_iinfo[uint16]
+array_api_tests/test_data_type_functions.py::test_iinfo[int32]
+array_api_tests/test_data_type_functions.py::test_iinfo[uint32]
+array_api_tests/test_data_type_functions.py::test_iinfo[int64]
+array_api_tests/test_data_type_functions.py::test_iinfo[uint64]
+
+# 'shape' arg is present. 'newshape' is retained for backward compat.
+array_api_tests/test_signatures.py::test_func_signature[reshape]
+
+# 'min/max' args are present. 'a_min/a_max' are retained for backward compat.
+array_api_tests/test_signatures.py::test_func_signature[clip]
+
+# missing 'descending' keyword argument
+array_api_tests/test_signatures.py::test_func_signature[argsort]
+array_api_tests/test_signatures.py::test_func_signature[sort]
+
+# missing 'descending' keyword argument
+array_api_tests/test_sorting_functions.py::test_argsort
+array_api_tests/test_sorting_functions.py::test_sort
+
+# ufuncs signature on linux is always 
+# np.vecdot is the only ufunc with a keyword argument which causes a failure
+array_api_tests/test_signatures.py::test_func_signature[vecdot]
+
+# input is cast to min/max's dtype if they're different
+array_api_tests/test_operators_and_elementwise_functions.py::test_clip
+
+# missing 'dtype' keyword argument
+array_api_tests/test_signatures.py::test_extension_func_signature[fft.fftfreq]
+array_api_tests/test_signatures.py::test_extension_func_signature[fft.rfftfreq]
+
+# fails on np.repeat(np.array([]), np.array([])) test case
+array_api_tests/test_manipulation_functions.py::test_repeat
+
+# NumPy matches Python behavior and it returns NaN and -1 in these cases
+array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity]
+array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity]
+array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity]
+array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity]
+array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity]
+array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity]
+array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity]
+array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity]
+array_api_tests/test_special_cases.py::test_binary[floor_divide(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0]
+array_api_tests/test_special_cases.py::test_binary[floor_divide(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0]
+array_api_tests/test_special_cases.py::test_binary[__floordiv__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0]
+array_api_tests/test_special_cases.py::test_binary[__floordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0]
+array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity]
+array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity]
+array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity]
+array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity]
+array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0]
+array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0]
diff --git a/tools/ci/check_c_api_usage.py b/tools/ci/check_c_api_usage.py
new file mode 100644
index 000000000000..49c317a1259c
--- /dev/null
+++ b/tools/ci/check_c_api_usage.py
@@ -0,0 +1,265 @@
+#!/usr/bin/env python3
+from __future__ import annotations
+
+import argparse
+import os
+import re
+import sys
+import tempfile
+from concurrent.futures import ThreadPoolExecutor, as_completed
+from pathlib import Path
+from re import Pattern
+
+"""
+Borrow-ref C API linter (Python version).
+
+- Recursively scans source files under --root (default: numpy)
+- Matches suspicious CPython C-API calls as whole identifiers
+- Skips:
+  - lines with '// noqa: borrowed-ref OK' or
+    '// noqa: borrowed-ref - manual fix needed'
+  - line comments (// ...)
+  - block comments (/* ... */), even when they span lines
+- Prints findings and exits 1 if any issues found, else 0
+"""
+
+def strip_comments(line: str, in_block: bool) -> tuple[str, bool]:
+    """
+    Return (code_without_comments, updated_in_block).
+    Removes // line comments and /* ... */ block comments (non-nesting, C-style).
+    """
+    i = 0
+    out_parts: list[str] = []
+    n = len(line)
+
+    while i < n:
+        if in_block:
+            end = line.find("*/", i)
+            if end == -1:
+                # Entire remainder is inside a block comment.
+                return ("".join(out_parts), True)
+            i = end + 2
+            in_block = False
+            continue
+
+        # Not in block: look for next // or /* from current i
+        sl = line.find("//", i)
+        bl = line.find("/*", i)
+
+        if sl != -1 and (bl == -1 or sl < bl):
+            # Line comment starts first: take code up to '//' and stop
+            out_parts.append(line[i:sl])
+            return ("".join(out_parts), in_block)
+
+        if bl != -1:
+            # Block comment starts: take code up to '/*', then enter block
+            out_parts.append(line[i:bl])
+            i = bl + 2
+            in_block = True
+            continue
+
+        # No more comments
+        out_parts.append(line[i:])
+        break
+
+    return ("".join(out_parts), in_block)
+
+def iter_source_files(root: Path, exts: set[str], excludes: set[str]) -> list[Path]:
+    """
+    Return a list of source files under 'root', where filenames end with any of the
+    extensions in 'exts' (e.g., '.c.src', '.c', '.h').
+    Excludes directories whose names are in 'excludes'.
+    """
+    results: list[Path] = []
+
+    for dirpath, dirnames, filenames in os.walk(root):
+        # Prune excluded directories
+        dirnames[:] = [d for d in dirnames if d not in excludes]
+        for fn in filenames:
+            # endswith handles mult-suffice patterns, e.g., .c.src
+            if any(fn.endswith(ext) for ext in exts):
+                results.append(Path(dirpath) / fn)
+    return results
+
+def build_func_rx(funcs: tuple[str, ...]) -> Pattern[str]:
+    return re.compile(r"(? list[tuple[str, int, str, str]]:
+    """
+    Scan a single file.
+    Returns list of (func_name, line_number, path_str, raw_line_str).
+    """
+    hits: list[tuple[str, int, str, str]] = []
+    in_block = False
+    noqa_set = set(noqa_markers)
+
+    try:
+        with path.open("r", encoding="utf-8", errors="ignore") as f:
+            for lineno, raw in enumerate(f, 1):
+                # Skip if approved by noqa markers
+                if any(mark in raw for mark in noqa_set):
+                    continue
+
+                # Remove comments; if nothing remains, skip
+                code, in_block = strip_comments(raw.rstrip("\n"), in_block)
+                if not code.strip():
+                    continue
+
+                # Find all suspicious calls in non-comment code
+                for m in func_rx.finditer(code):
+                    hits.append((m.group(0), lineno, str(path), raw.rstrip("\n")))
+    except FileNotFoundError:
+        # File may have disappeared; ignore gracefully
+        pass
+    return hits
+
+
+def main(argv: list[str] | None = None) -> int:
+    # List of suspicious function calls:
+    suspicious_funcs: tuple[str, ...] = (
+        "PyList_GetItem",
+        "PyDict_GetItem",
+        "PyDict_GetItemWithError",
+        "PyDict_GetItemString",
+        "PyDict_SetDefault",
+        "PyDict_Next",
+        "PyWeakref_GetObject",
+        "PyWeakref_GET_OBJECT",
+        "PyList_GET_ITEM",
+        "_PyDict_GetItemStringWithError",
+        "PySequence_Fast"
+    )
+    func_rx = build_func_rx(suspicious_funcs)
+    noqa_markers = (
+        "noqa: borrowed-ref OK",
+        "noqa: borrowed-ref - manual fix needed"
+        )
+    default_exts = {".c", ".h", ".c.src", ".cpp"}
+    default_excludes = {"pythoncapi-compat"}
+
+    ap = argparse.ArgumentParser(description="Borrow-ref C API linter (Python).")
+    ap.add_argument(
+        "--quiet",
+        action="store_true",
+        help="Suppress normal output; exit status alone indicates result (useful\
+              for CI).",
+    )
+    ap.add_argument(
+        "-j", "--jobs",
+        type=int,
+        default=0,
+        help="Number of worker threads (0=auto, 1=sequential).",
+    )
+    ap.add_argument(
+        "--root",
+        default="numpy",
+        type=str,
+        help="Root directory to scan (default: numpy)"
+        )
+    ap.add_argument(
+        "--ext",
+        action="append",
+        default=None,
+        help=f"File extension(s) to include (repeatable). Defaults to {default_exts}",
+    )
+    ap.add_argument(
+        "--exclude",
+        action="append",
+        default=None,
+        help=f"Directory name(s) to exclude (repeatable). Default: {default_excludes}",
+    )
+    args = ap.parse_args(argv)
+
+    if args.ext:
+        exts = {e if e.startswith(".") else f".{e}" for e in args.ext}
+    else:
+        exts = set(default_exts)
+    excludes = set(args.exclude) if args.exclude else set(default_excludes)
+
+    root = Path(args.root)
+    if not root.exists():
+        print(f"error: root '{root}' does not exist", file=sys.stderr)
+        return 2
+
+    files = sorted(iter_source_files(root, exts, excludes), key=str)
+
+    # Determine concurrency: auto picks a reasonable cap for I/O-bound work
+    if args.jobs is None or args.jobs <= 0:
+        max_workers = min(32, (os.cpu_count() or 1) * 5)
+    else:
+        max_workers = max(1, args.jobs)
+    print(f'Scanning {len(files)} C/C++ source files...\n')
+
+    # Output file (mirrors your shell behavior)
+    tmpdir = Path(".tmp")
+    tmpdir.mkdir(exist_ok=True)
+
+    findings = 0
+
+    # Run the scanning in parallel; only the main thread writes the report
+    all_hits: list[tuple[str, int, str, str]] = []
+    if max_workers == 1:
+        for p in files:
+            all_hits.extend(scan_file(p, func_rx, noqa_markers))
+    else:
+        with ThreadPoolExecutor(max_workers=max_workers) as ex:
+            fut_to_file = {ex.submit(scan_file, p, func_rx, noqa_markers):
+                           p for p in files}
+            for fut in as_completed(fut_to_file):
+                try:
+                    all_hits.extend(fut.result())
+                except Exception as e:
+                    print(f'Failed to scan {fut_to_file[fut]}: {e}')
+
+    # Sort for deterministic output: by path, then line number
+    all_hits.sort(key=lambda t: (t[2], t[1]))
+
+    # There no hits, linter passed
+    if not all_hits:
+        if not args.quiet:
+            print("All checks passed! C API borrow-ref linter found no issues.\n")
+        return 0
+
+    # There are some linter failures: create a log file
+    with tempfile.NamedTemporaryFile(
+        prefix="c_api_usage_report.",
+        suffix=".txt",
+        dir=tmpdir,
+        mode="w+",
+        encoding="utf-8",
+        delete=False,
+        ) as out:
+        report_path = Path(out.name)
+        out.write("Running Suspicious C API usage report workflow...\n\n")
+        for func, lineo, pstr, raw in all_hits:
+            findings += 1
+            out.write(f"Found suspicious call to {func} in file: {pstr}\n")
+            out.write(f" -> {pstr}:{lineo}a:{raw}\n")
+            out.write("Recommendation:\n")
+            out.write(
+                "If this use is intentional and safe, add "
+                "'// noqa: borrowed-ref OK' on the same line "
+                "to silence this warning.\n"
+            )
+            out.write(
+                "Otherwise, consider replacing the call "
+                "with a thread-safe API function.\n\n"
+            )
+
+        out.flush()
+        if not args.quiet:
+            out.seek(0)
+            sys.stdout.write(out.read())
+            print(f"Report written to: {report_path}\n\n\
+C API borrow-ref linter FAILED.")
+
+    return 1
+
+
+if __name__ == "__main__":
+
+    sys.exit(main())
diff --git a/tools/ci/cirrus_arm.yml b/tools/ci/cirrus_arm.yml
index 3b48089dcc08..977921d8236d 100644
--- a/tools/ci/cirrus_arm.yml
+++ b/tools/ci/cirrus_arm.yml
@@ -9,7 +9,7 @@ modified_clone: &MODIFIED_CLONE
       # it's a PR so clone the main branch then merge the changes from the PR
       git clone https://x-access-token:${CIRRUS_REPO_CLONE_TOKEN}@github.com/${CIRRUS_REPO_FULL_NAME}.git $CIRRUS_WORKING_DIR
       git fetch origin pull/$CIRRUS_PR/head:pull/$CIRRUS_PR
-    
+
       # CIRRUS_BASE_BRANCH will probably be `main` for the majority of the time
       # However, if you do a PR against a maintenance branch we will want to
       # merge the PR into the maintenance branch, not main
@@ -21,60 +21,18 @@ modified_clone: &MODIFIED_CLONE
     fi
 
 
-linux_aarch64_test_task:
-  use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true'
-  compute_engine_instance:
-    image_project: cirrus-images
-    image: family/docker-builder-arm64
-    architecture: arm64
-    platform: linux
-    cpu: 1
-    memory: 4G
-
-  <<: *MODIFIED_CLONE
-
-  ccache_cache:
-    folder: .ccache
-    populate_script:
-      - mkdir -p .ccache
-    fingerprint_key: ccache-linux_aarch64
-
-  prepare_env_script: |
-    apt-get update
-    apt-get install -y --no-install-recommends software-properties-common gcc g++ gfortran pkg-config ccache
-    apt-get install -y --no-install-recommends python3.10 python3.10-venv libopenblas-dev libatlas-base-dev liblapack-dev
-
-    # python3.10 -m ensurepip --default-pip --user
-    ln -s $(which python3.10) python
-
-    # put ccache and python on PATH
-    export PATH=/usr/lib/ccache:$PWD:$PATH
-    echo "PATH=$PATH" >> $CIRRUS_ENV
-    echo "CCACHE_DIR=$PWD/.ccache" >> $CIRRUS_ENV
-
-    pip install -r requirements/build_requirements.txt
-    pip install -r requirements/test_requirements.txt
-
-  build_script: |
-    spin build -- -Dallow-noblas=true
-
-  test_script: |
-    spin test -j 1
-    ccache -s
-
-
 freebsd_test_task:
   use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true'
   compute_engine_instance:
     image_project: freebsd-org-cloud-dev
-    image: family/freebsd-13-2
+    image: family/freebsd-14-3
     platform: freebsd
     cpu: 1
     memory: 4G
 
   install_devtools_script: |
     pkg install -y git bash ninja ccache blas cblas lapack pkgconf
-    pkg install -y python311
+    pkg install -y python312
 
   <<: *MODIFIED_CLONE
 
@@ -87,22 +45,22 @@ freebsd_test_task:
   prepare_env_script: |
     # Create a venv (the `source` command needs bash, not the default sh shell)
     chsh -s /usr/local/bin/bash
-    python3.11 -m venv .venv
+    python3.12 -m venv .venv
     source .venv/bin/activate
     # Minimal build and test requirements
-    python3.11 -m pip install -U pip
-    python3.11 -m pip install meson-python Cython pytest hypothesis
+    python3.12 -m pip install -U pip
+    python3.12 -m pip install meson-python Cython pytest hypothesis
 
   build_script: |
     chsh -s /usr/local/bin/bash
     source .venv/bin/activate
-    python3.11 -m pip install . --no-build-isolation -v -Csetup-args="-Dallow-noblas=false"
+    python3.12 -m pip install . --no-build-isolation -v -Csetup-args="-Dallow-noblas=false"
 
   test_script: |
     chsh -s /usr/local/bin/bash
     source .venv/bin/activate
     cd tools
-    python3.11 -m pytest --pyargs numpy -m "not slow"
+    python3.12 -m pytest --pyargs numpy -m "not slow"
     ccache -s
 
   on_failure:
diff --git a/tools/ci/cirrus_wheels.yml b/tools/ci/cirrus_wheels.yml
deleted file mode 100644
index 8705bd9b9cbd..000000000000
--- a/tools/ci/cirrus_wheels.yml
+++ /dev/null
@@ -1,166 +0,0 @@
-build_and_store_wheels: &BUILD_AND_STORE_WHEELS
-  install_cibuildwheel_script:
-    - python -m pip install cibuildwheel
-  cibuildwheel_script:
-    - cibuildwheel
-  wheels_artifacts:
-    path: "wheelhouse/*"
-
-######################################################################
-# Build linux_aarch64 natively
-######################################################################
-
-linux_aarch64_task:
-  use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true'
-  compute_engine_instance:
-    image_project: cirrus-images
-    image: family/docker-builder-arm64
-    architecture: arm64
-    platform: linux
-    cpu: 1
-    memory: 4G
-  matrix:
-    # build in a matrix because building and testing all four wheels in a
-    # single task takes longer than 60 mins (the default time limit for a
-    # cirrus-ci task).
-    - env:
-        CIRRUS_CLONE_SUBMODULES: true
-        CIBW_BUILD: cp310-*
-    - env:
-        CIRRUS_CLONE_SUBMODULES: true
-        CIBW_BUILD: cp311-*
-    - env:
-        CIRRUS_CLONE_SUBMODULES: true
-        CIBW_BUILD: cp312-*
-
-  initial_setup_script: |
-    apt update
-    apt install -y python3-venv python-is-python3 gfortran libatlas-base-dev libgfortran5 eatmydata
-    git fetch origin
-    bash ./tools/wheels/cibw_before_build.sh ${PWD}
-    which python
-    echo $CIRRUS_CHANGE_MESSAGE
-  <<: *BUILD_AND_STORE_WHEELS
-
-
-######################################################################
-# Build macosx_arm64 natively
-#
-# macosx_arm64 for macos >= 14 used to be built here, but are now
-# built on GHA.
-######################################################################
-
-macosx_arm64_task:
-  use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true'
-  macos_instance:
-    matrix:
-      image: ghcr.io/cirruslabs/macos-monterey-xcode
-
-  matrix:
-    - env:
-        CIRRUS_CLONE_SUBMODULES: true
-        CIBW_BUILD: cp310-* cp311
-    - env:
-        CIRRUS_CLONE_SUBMODULES: true
-        CIBW_BUILD: cp312-*
-  env:
-    PATH: /usr/local/lib:/usr/local/include:$PATH
-    CIBW_ARCHS: arm64
-
-  build_script: |
-    brew install micromamba gfortran
-    micromamba shell init -s bash -p ~/micromamba
-    source ~/.bash_profile
-    
-    micromamba create -n numpydev
-    micromamba activate numpydev
-    micromamba install -y -c conda-forge python=3.11 2>/dev/null
-    
-    # Use scipy-openblas wheels
-    export INSTALL_OPENBLAS=true
-    export CIBW_ENVIRONMENT_MACOS="MACOSX_DEPLOYMENT_TARGET='11.0' INSTALL_OPENBLAS=true RUNNER_OS=macOS PKG_CONFIG_PATH=$PWD/.openblas"
-
-    # needed for submodules
-    git submodule update --init
-    # need to obtain all the tags so setup.py can determine FULLVERSION
-    git fetch origin
-    uname -m
-    python -c "import platform;print(platform.python_version());print(platform.system());print(platform.machine())"
-    clang --version
-
-    python -m pip install cibuildwheel
-    cibuildwheel
-
-  wheels_artifacts:
-    path: "wheelhouse/*"
-
-######################################################################
-# Upload all wheels
-######################################################################
-
-wheels_upload_task:
-  use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true'
-  # Artifacts don't seem to be persistent from task to task.
-  # Rather than upload wheels at the end of each cibuildwheel run we do a
-  # final upload here. This is because a run may be on different OS for
-  # which bash, etc, may not be present.
-  depends_on:
-    - linux_aarch64
-    - macosx_arm64
-  compute_engine_instance:
-    image_project: cirrus-images
-    image: family/docker-builder
-    platform: linux
-    cpu: 1
-
-  env:
-    NUMPY_STAGING_UPLOAD_TOKEN: ENCRYPTED[!5a69522ae0c2af9edb2bc1cdfeaca6292fb3666d9ecd82dca0615921834a6ce3b702352835d8bde4ea2a9ed5ef8424ac!]
-    NUMPY_NIGHTLY_UPLOAD_TOKEN: ENCRYPTED[ef04347663cfcb58d121385707e55951dc8e03b009edeed988aa4a33ba8205c54ca9980ac4da88e1adfdebff8b9d7ed4]
-
-  upload_script: |
-    apt-get update
-    apt-get install -y curl wget
-    export IS_SCHEDULE_DISPATCH="false"
-    export IS_PUSH="false"
-
-    # cron job
-    if [[ "$CIRRUS_CRON" == "nightly" ]]; then
-      export IS_SCHEDULE_DISPATCH="true"
-    fi
-
-    # a manual build was started
-    if [[ "$CIRRUS_BUILD_SOURCE" == "api" && "$CIRRUS_COMMIT_MESSAGE" == "API build for null" ]]; then
-      export IS_SCHEDULE_DISPATCH="true"
-    fi
-
-    # only upload wheels to staging if it's a tag beginning with 'v' and you're
-    # on a maintenance branch
-    if [[ "$CIRRUS_TAG" == v* ]] && [[ $CIRRUS_TAG != *"dev0"* ]]; then
-      export IS_PUSH="true"
-    fi
-
-    if [[ $IS_PUSH == "true" ]] || [[ $IS_SCHEDULE_DISPATCH == "true" ]]; then
-        # install miniconda in the home directory. For some reason HOME isn't set by Cirrus
-        export HOME=$PWD
-
-        # install miniconda for uploading to anaconda
-        wget -q https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh
-        bash miniconda.sh -b -p $HOME/miniconda3
-        $HOME/miniconda3/bin/conda init bash
-        source $HOME/miniconda3/bin/activate
-        conda install -y anaconda-client
-
-        # The name of the zip file is derived from the `wheels_artifact` line.
-        # If you change the artifact line to `myfile_artifact` then it would be
-        # called myfile.zip
-
-        curl https://api.cirrus-ci.com/v1/artifact/build/$CIRRUS_BUILD_ID/wheels.zip --output wheels.zip
-        unzip wheels.zip
-
-        source ./tools/wheels/upload_wheels.sh
-        # IS_PUSH takes precedence over IS_SCHEDULE_DISPATCH
-        set_upload_vars
-
-        # Will be skipped if not a push/tag/scheduled build
-        upload_wheels
-    fi
diff --git a/tools/ci/lsan_suppressions.txt b/tools/ci/lsan_suppressions.txt
new file mode 100644
index 000000000000..74e2b335f575
--- /dev/null
+++ b/tools/ci/lsan_suppressions.txt
@@ -0,0 +1,33 @@
+# This file contains suppressions for the LSAN tool
+#
+# Reference: https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer#suppressions
+
+# 2 leaks when importing "numpy.exceptions" in initialize_static_globals
+# (check the duplicate frame number for the second leak)
+#0 0xffffb64476d0 in malloc ../../../../src/libsanitizer/asan/asan_malloc_linux.cpp:69
+#1 0xffffb598e8d0 in PyFloat_FromDouble Objects/floatobject.c:128
+#2 0xffffb5dac0fc in fill_time Modules/posixmodule.c:2622
+#3 0xffffb5dc137c in _pystat_fromstructstat Modules/posixmodule.c:2740
+#3 0xffffb5dc13b4 in _pystat_fromstructstat Modules/posixmodule.c:2743
+#4 0xffffb5dc2d2c in posix_do_stat Modules/posixmodule.c:2868
+#5 0xffffb5dc331c in os_stat_impl Modules/posixmodule.c:3235
+#6 0xffffb5dc331c in os_stat Modules/clinic/posixmodule.c.h:105
+#7 0xffffb58484d8 in _PyEval_EvalFrameDefault Python/generated_cases.c.h:2383
+#8 0xffffb5c18174 in _PyEval_EvalFrame Include/internal/pycore_ceval.h:121
+#9 0xffffb5c18174 in _PyEval_Vector Python/ceval.c:2083
+#10 0xffffb593ccf4 in _PyObject_VectorcallTstate Include/internal/pycore_call.h:169
+#11 0xffffb593ccf4 in object_vacall Objects/call.c:819
+#12 0xffffb593d168 in PyObject_CallMethodObjArgs Objects/call.c:880
+#13 0xffffb5cbc6f0 in import_find_and_load Python/import.c:3737
+#14 0xffffb5cbc6f0 in PyImport_ImportModuleLevelObject Python/import.c:3819
+#15 0xffffb5bffca0 in builtin___import___impl Python/bltinmodule.c:285
+#16 0xffffb5bffca0 in builtin___import__ Python/clinic/bltinmodule.c.h:110
+#17 0xffffb593db9c in _PyObject_VectorcallTstate Include/internal/pycore_call.h:169
+#18 0xffffb593db9c in _PyObject_CallFunctionVa Objects/call.c:552
+#19 0xffffb593df38 in PyObject_CallFunction Objects/call.c:574
+#20 0xffffb5cbdb5c in PyImport_Import Python/import.c:4011
+#21 0xffffb5cbe17c in PyImport_ImportModule Python/import.c:3434
+#22 0xffffb1c61b44 in npy_import ../numpy/_core/src/common/npy_import.h:71
+#23 0xffffb1c61b44 in initialize_static_globals ../numpy/_core/src/multiarray/npy_static_data.c:124
+
+leak:initialize_static_globals
diff --git a/tools/ci/push_docs_to_repo.py b/tools/ci/push_docs_to_repo.py
index 0471e38246e3..801454792304 100755
--- a/tools/ci/push_docs_to_repo.py
+++ b/tools/ci/push_docs_to_repo.py
@@ -1,12 +1,11 @@
 #!/usr/bin/env python3
 
 import argparse
-import subprocess
-import tempfile
 import os
-import sys
 import shutil
-
+import subprocess
+import sys
+import tempfile
 
 parser = argparse.ArgumentParser(
     description='Upload files to a remote repo, replacing existing content'
@@ -33,7 +32,8 @@
     print('Content directory does not exist')
     sys.exit(1)
 
-count = len([name for name in os.listdir(args.dir) if os.path.isfile(os.path.join(args.dir, name))])
+count = len([name for name in os.listdir(args.dir)
+             if os.path.isfile(os.path.join(args.dir, name))])
 
 if count < args.count:
     print(f"Expected {args.count} top-directory files to upload, got {count}")
@@ -44,7 +44,7 @@ def run(cmd, stdout=True):
     try:
         subprocess.check_call(cmd, stdout=pipe, stderr=pipe)
     except subprocess.CalledProcessError:
-        print("\n! Error executing: `%s;` aborting" % ' '.join(cmd))
+        print(f"\n! Error executing: `{' '.join(cmd)};` aborting")
         sys.exit(1)
 
 
@@ -55,16 +55,16 @@ def run(cmd, stdout=True):
 # ensure the working branch is called "main"
 # (`--initial-branch=main` appeared to have failed on older git versions):
 run(['git', 'checkout', '-b', 'main'])
-run(['git', 'remote', 'add', 'origin',  args.remote])
+run(['git', 'remote', 'add', 'origin', args.remote])
 run(['git', 'config', '--local', 'user.name', args.committer])
 run(['git', 'config', '--local', 'user.email', args.email])
 
-print('- committing new content: "%s"' % args.message)
+print(f'- committing new content: "{args.message}"')
 run(['cp', '-R', os.path.join(args.dir, '.'), '.'])
 run(['git', 'add', '.'], stdout=False)
 run(['git', 'commit', '--allow-empty', '-m', args.message], stdout=False)
 
-print('- uploading as %s <%s>' % (args.committer, args.email))
+print(f'- uploading as {args.committer} <{args.email}>')
 if args.force:
     run(['git', 'push', 'origin', 'main', '--force'])
 else:
diff --git a/tools/ci/run_32_bit_linux_docker.sh b/tools/ci/run_32_bit_linux_docker.sh
deleted file mode 100644
index 5e5e8bae4f96..000000000000
--- a/tools/ci/run_32_bit_linux_docker.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-set -xe
-
-git config --global --add safe.directory /numpy
-cd /numpy
-/opt/python/cp310-cp310/bin/python -mvenv venv
-source venv/bin/activate
-pip install -r requirements/ci32_requirements.txt
-python3 -m pip install -r requirements/test_requirements.txt
-echo CFLAGS \$CFLAGS
-spin config-openblas --with-scipy-openblas=32
-export PKG_CONFIG_PATH=/numpy/.openblas
-python3 -m pip install .
-cd tools
-python3 -m pytest --pyargs numpy
diff --git a/tools/ci/test_all_newsfragments_used.py b/tools/ci/test_all_newsfragments_used.py
index 1df58791ad82..25b5103cb153 100755
--- a/tools/ci/test_all_newsfragments_used.py
+++ b/tools/ci/test_all_newsfragments_used.py
@@ -1,8 +1,10 @@
 #!/usr/bin/env python3
 
+import os
 import sys
+
 import toml
-import os
+
 
 def main():
     path = toml.load("pyproject.toml")["tool"]["towncrier"]["directory"]
diff --git a/tools/ci/tsan_suppressions.txt b/tools/ci/tsan_suppressions.txt
new file mode 100644
index 000000000000..0745debd8e5f
--- /dev/null
+++ b/tools/ci/tsan_suppressions.txt
@@ -0,0 +1,11 @@
+# This file contains suppressions for the TSAN tool
+#
+# Reference: https://github.com/google/sanitizers/wiki/ThreadSanitizerSuppressions
+
+# For np.nonzero, see gh-28361
+race:PyArray_Nonzero
+race:count_nonzero_int
+race:count_nonzero_bool
+race:count_nonzero_float
+race:DOUBLE_nonzero
+
diff --git a/tools/ci/ubsan_suppressions_arm64.txt b/tools/ci/ubsan_suppressions_arm64.txt
new file mode 100644
index 000000000000..69de4a4c425f
--- /dev/null
+++ b/tools/ci/ubsan_suppressions_arm64.txt
@@ -0,0 +1,51 @@
+# This file contains suppressions for the default (with GIL) build to prevent runtime errors
+# when numpy is built with -Db_sanitize=undefined for arm64 architecture
+#
+# reference: https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html#available-checks
+
+# Per this prior discussion, integer overflow is not a concern
+# https://github.com/numpy/numpy/issues/24209#issuecomment-2160154181
+signed-integer-overflow:*
+
+# all alignment runtime errors ignored in favor of this being tracked via TypeSanitizer
+# otherwise ubsan may detect system file alignment errors outside numpy
+alignment:*
+
+# suggested fix for runtime error: use INT_MIN constant
+shift-base:_core/src/umath/_rational_tests.c
+# suggested fix for runtime error: check for overflow if signed
+shift-base:_core/src/npymath/npy_math_internal.h
+
+# suggested fix for runtime error: null check before loop
+pointer-overflow:_core/src/common/simd/neon/memory.h
+pointer-overflow:_core/src/multiarray/datetime_busdaycal.c
+pointer-overflow:_core/src/multiarray/nditer_templ.c
+pointer-overflow:_core/src/multiarray/nditer_constr.c
+pointer-overflow:_core/src/umath/loops_arithm_fp.dispatch.c.src
+pointer-overflow:_core/src/umath/loops_unary.dispatch.c.src
+pointer-overflow:_core/src/umath/loops_unary_complex.dispatch.c.src
+pointer-overflow:_core/src/umath/loops_unary_fp_le.dispatch.c.src
+pointer-overflow:_core/src/umath/string_buffer.h
+pointer-overflow:linalg/umath_linalg.cpp
+pointer-overflow:numpy/random/bit_generator.pyx.c
+
+float-cast-overflow:_core/src/multiarray/lowlevel_strided_loops.c.src
+
+# flagged in CI - call to function through pointer to incorrect function type
+# Many functions in the modules/files listed below cause undefined behavior in CI
+# general disable this check until further investigation, but keep the specific files
+# as a starting point for resolving the checks later
+function:_core/src/*
+function:numpy/random/*
+# function:_core/src/common/cblasfunc.c
+# function:_core/src/common/npy_argparse.c
+# function:_core/src/multiarray/number.c
+# function:_core/src/multiarray/ctors.c
+# function:_core/src/multiarray/convert_datatype.c
+# function:_core/src/multiarray/dtype_transfer.c
+# function:_core/src/multiarray/dtype_traversal.c
+# function:_core/src/multiarray/getset.c
+# function:_core/src/multiarray/scalarapi.c
+# function:_core/src/multiarray/scalartypes.c.src
+# function:_core/src/umath/*
+# function:numpy/random/*
diff --git a/tools/ci/ubsan_suppressions_x86_64.txt b/tools/ci/ubsan_suppressions_x86_64.txt
new file mode 100644
index 000000000000..5e4316ce3715
--- /dev/null
+++ b/tools/ci/ubsan_suppressions_x86_64.txt
@@ -0,0 +1,28 @@
+# This file contains suppressions for the default (with GIL) build to prevent runtime errors
+# when numpy is built with -Db_sanitize=undefined for x86_64 architecture
+#
+# reference: https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html#available-checks
+
+# Per this prior discussion, integer overflow is not a concern
+# https://github.com/numpy/numpy/issues/24209#issuecomment-2160154181
+signed-integer-overflow:*
+
+# all alignment runtime errors ignored in favor of this being tracked via TypeSanitizer
+# otherwise ubsan may detect system file alignment errors outside numpy
+alignment:*
+
+# suggested fix for runtime error: use INT_MIN constant
+shift-base:_core/src/umath/_rational_tests.c
+# suggested fix for runtime error: check for overflow if signed
+shift-base:_core/src/npymath/npy_math_internal.h
+
+
+# suggested fix for runtime error: check that pointer is not null before calling function
+nonnull-attribute:_core/src/multiarray/array_coercion.c
+nonnull-attribute:_core/src/multiarray/ctors.c
+nonnull-attribute:_core/src/multiarray/datetime_busdaycal.c
+nonnull-attribute:_core/src/multiarray/scalarapi.c
+nonnull-attribute:_core/src/multiarray/calculation.c
+
+# suggested fix for runtime error: null check before loop
+pointer-overflow:_core/src/multiarray/nditer_templ.c
diff --git a/tools/commitstats.py b/tools/commitstats.py
deleted file mode 100644
index 534f0a1b8416..000000000000
--- a/tools/commitstats.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Run svn log -l 
-
-import re
-import numpy as np
-import os
-
-names = re.compile(r'r\d+\s\|\s(.*)\s\|\s200')
-
-def get_count(filename, repo):
-    mystr = open(filename).read()
-    result = names.findall(mystr)
-    u = np.unique(result)
-    count = [(x, result.count(x), repo) for x in u]
-    return count
-
-
-command = 'svn log -l 2300 > output.txt'
-os.chdir('..')
-os.system(command)
-
-count = get_count('output.txt', 'NumPy')
-
-
-os.chdir('../scipy')
-os.system(command)
-
-count.extend(get_count('output.txt', 'SciPy'))
-
-os.chdir('../scikits')
-os.system(command)
-count.extend(get_count('output.txt', 'SciKits'))
-count.sort()
-
-
-
-print("** SciPy and NumPy **")
-print("=====================")
-for val in count:
-    print(val)
diff --git a/tools/download-wheels.py b/tools/download-wheels.py
deleted file mode 100644
index e5753eb2148c..000000000000
--- a/tools/download-wheels.py
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/usr/bin/env python3
-"""
-Script to download NumPy wheels from the Anaconda staging area.
-
-Usage::
-
-    $ ./tools/download-wheels.py  -w 
-
-The default wheelhouse is ``release/installers``.
-
-Dependencies
-------------
-
-- beautifulsoup4
-- urllib3
-
-Examples
---------
-
-While in the repository root::
-
-    $ python tools/download-wheels.py 1.19.0
-    $ python tools/download-wheels.py 1.19.0 -w ~/wheelhouse
-
-"""
-import os
-import re
-import shutil
-import argparse
-
-import urllib3
-from bs4 import BeautifulSoup
-
-__version__ = "0.1"
-
-# Edit these for other projects.
-STAGING_URL = "https://anaconda.org/multibuild-wheels-staging/numpy"
-PREFIX = "numpy"
-
-# Name endings of the files to download.
-WHL = r"-.*\.whl$"
-ZIP = r"\.zip$"
-GZIP = r"\.tar\.gz$"
-SUFFIX = rf"({WHL}|{GZIP}|{ZIP})"
-
-
-def get_wheel_names(version):
-    """ Get wheel names from Anaconda HTML directory.
-
-    This looks in the Anaconda multibuild-wheels-staging page and
-    parses the HTML to get all the wheel names for a release version.
-
-    Parameters
-    ----------
-    version : str
-        The release version. For instance, "1.18.3".
-
-    """
-    http = urllib3.PoolManager(cert_reqs="CERT_REQUIRED")
-    tmpl = re.compile(rf"^.*{PREFIX}-{version}{SUFFIX}")
-    index_url = f"{STAGING_URL}/files"
-    index_html = http.request("GET", index_url)
-    soup = BeautifulSoup(index_html.data, "html.parser")
-    return soup.find_all(string=tmpl)
-
-
-def download_wheels(version, wheelhouse):
-    """Download release wheels.
-
-    The release wheels for the given NumPy version are downloaded
-    into the given directory.
-
-    Parameters
-    ----------
-    version : str
-        The release version. For instance, "1.18.3".
-    wheelhouse : str
-        Directory in which to download the wheels.
-
-    """
-    http = urllib3.PoolManager(cert_reqs="CERT_REQUIRED")
-    wheel_names = get_wheel_names(version)
-
-    for i, wheel_name in enumerate(wheel_names):
-        wheel_url = f"{STAGING_URL}/{version}/download/{wheel_name}"
-        wheel_path = os.path.join(wheelhouse, wheel_name)
-        with open(wheel_path, "wb") as f:
-            with http.request("GET", wheel_url, preload_content=False,) as r:
-                print(f"{i + 1:<4}{wheel_name}")
-                shutil.copyfileobj(r, f)
-    print(f"\nTotal files downloaded: {len(wheel_names)}")
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser()
-    parser.add_argument(
-        "version",
-        help="NumPy version to download.")
-    parser.add_argument(
-        "-w", "--wheelhouse",
-        default=os.path.join(os.getcwd(), "release", "installers"),
-        help="Directory in which to store downloaded wheels\n"
-             "[defaults to /release/installers]")
-
-    args = parser.parse_args()
-
-    wheelhouse = os.path.expanduser(args.wheelhouse)
-    if not os.path.isdir(wheelhouse):
-        raise RuntimeError(
-            f"{wheelhouse} wheelhouse directory is not present."
-            " Perhaps you need to use the '-w' flag to specify one.")
-
-    download_wheels(args.version, wheelhouse)
diff --git a/tools/find_deprecated_escaped_characters.py b/tools/find_deprecated_escaped_characters.py
deleted file mode 100644
index d7225b8e85f6..000000000000
--- a/tools/find_deprecated_escaped_characters.py
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/env python3
-r"""
-Look for escape sequences deprecated in Python 3.6.
-
-Python 3.6 deprecates a number of non-escape sequences starting with '\' that
-were accepted before. For instance, '\(' was previously accepted but must now
-be written as '\\(' or r'\('.
-
-"""
-
-
-def main(root):
-    """Find deprecated escape sequences.
-
-    Checks for deprecated escape sequences in ``*.py files``. If `root` is a
-    file, that file is checked, if `root` is a directory all ``*.py`` files
-    found in a recursive descent are checked.
-
-    If a deprecated escape sequence is found, the file and line where found is
-    printed. Note that for multiline strings the line where the string ends is
-    printed and the error(s) are somewhere in the body of the string.
-
-    Parameters
-    ----------
-    root : str
-        File or directory to check.
-    Returns
-    -------
-    None
-
-    """
-    import ast
-    import tokenize
-    import warnings
-    from pathlib import Path
-
-    count = 0
-    base = Path(root)
-    paths = base.rglob("*.py") if base.is_dir() else [base]
-    for path in paths:
-        # use tokenize to auto-detect encoding on systems where no
-        # default encoding is defined (e.g. LANG='C')
-        with tokenize.open(str(path)) as f:
-            with warnings.catch_warnings(record=True) as w:
-                warnings.simplefilter('always')
-                tree = ast.parse(f.read())
-            if w:
-                print("file: ", str(path))
-                for e in w:
-                    print('line: ', e.lineno, ': ', e.message)
-                print()
-                count += len(w)
-    print("Errors Found", count)
-
-
-if __name__ == "__main__":
-    from argparse import ArgumentParser
-
-    parser = ArgumentParser(description="Find deprecated escaped characters")
-    parser.add_argument('root', help='directory or file to be checked')
-    args = parser.parse_args()
-    main(args.root)
diff --git a/tools/functions_missing_types.py b/tools/functions_missing_types.py
index dc161621b1b0..9362c1478bd0 100755
--- a/tools/functions_missing_types.py
+++ b/tools/functions_missing_types.py
@@ -33,11 +33,9 @@
         # Accidentally public, deprecated, or shouldn't be used
         "Tester",
         "_core",
-        "get_array_wrap",
         "int_asbuffer",
         "numarray",
         "oldnumeric",
-        "safe_eval",
         "test",
         "typeDict",
         # Builtins
@@ -82,7 +80,6 @@ def visit_FunctionDef(self, node):
     def visit_ClassDef(self, node):
         if not node.name.startswith("_"):
             self.attributes.add(node.name)
-        return
 
     def visit_AnnAssign(self, node):
         self.attributes.add(node.target.id)
diff --git a/tools/get_submodule_paths.py b/tools/get_submodule_paths.py
new file mode 100644
index 000000000000..abab86140712
--- /dev/null
+++ b/tools/get_submodule_paths.py
@@ -0,0 +1,31 @@
+import glob
+import os.path
+
+
+def get_submodule_paths():
+    '''
+    Get paths to submodules so that we can exclude them from things like
+    check_test_name.py, check_unicode.py, etc.
+    '''
+    root_directory = os.path.dirname(os.path.dirname(__file__))
+    gitmodule_file = os.path.join(root_directory, '.gitmodules')
+    with open(gitmodule_file) as gitmodules:
+        data = gitmodules.read().split('\n')
+        submodule_paths = [datum.split(' = ')[1] for datum in data if
+                        datum.startswith('\tpath = ')]
+        submodule_paths = [os.path.join(root_directory, path) for path in
+                           submodule_paths]
+    # vendored with a script rather than via gitmodules
+    with open(
+            os.path.join(root_directory, ".gitattributes"), "r"
+    ) as attr_file:
+        for line in attr_file:
+            if "vendored" in line:
+                pattern = line.split(" ", 1)[0]
+                submodule_paths.extend(glob.glob(pattern))
+
+    return submodule_paths
+
+
+if __name__ == "__main__":
+    print('\n'.join(get_submodule_paths()))
diff --git a/tools/lint_diff.ini b/tools/lint_diff.ini
deleted file mode 100644
index dbebe483b4ab..000000000000
--- a/tools/lint_diff.ini
+++ /dev/null
@@ -1,5 +0,0 @@
-[pycodestyle]
-max_line_length = 79
-statistics = True
-ignore = E121,E122,E123,E125,E126,E127,E128,E226,E241,E251,E265,E266,E302,E402,E704,E712,E721,E731,E741,W291,W293,W391,W503,W504
-exclude = numpy/__config__.py,numpy/typing/tests/data,.spin/cmds.py
diff --git a/tools/linter.py b/tools/linter.py
index 0031ff83a479..4e9aed85054a 100644
--- a/tools/linter.py
+++ b/tools/linter.py
@@ -1,85 +1,91 @@
 import os
-import sys
 import subprocess
+import sys
 from argparse import ArgumentParser
-from git import Repo, exc
-
-CONFIG = os.path.join(
-         os.path.abspath(os.path.dirname(__file__)),
-         'lint_diff.ini',
-)
 
-# NOTE: The `diff` and `exclude` options of pycodestyle seem to be
-# incompatible, so instead just exclude the necessary files when
-# computing the diff itself.
-EXCLUDE = (
-    "numpy/typing/tests/data/",
-    "numpy/typing/_char_codes.py",
-    "numpy/__config__.py",
-    "numpy/f2py",
-)
+CWD = os.path.abspath(os.path.dirname(__file__))
 
 
 class DiffLinter:
-    def __init__(self, branch):
-        self.branch = branch
-        self.repo = Repo('.')
-        self.head = self.repo.head.commit
+    def __init__(self) -> None:
+        self.repository_root = os.path.realpath(os.path.join(CWD, ".."))
 
-    def get_branch_diff(self, uncommitted = False):
+    def run_ruff(self, fix: bool) -> tuple[int, str]:
         """
-            Determine the first common ancestor commit.
-            Find diff between branch and FCA commit.
-            Note: if `uncommitted` is set, check only
-                  uncommitted changes
+        Original Author: Josh Wilson (@person142)
+        Source:
+            https://github.com/scipy/scipy/blob/main/tools/lint_diff.py
+        Unlike pycodestyle, ruff by itself is not capable of limiting
+        its output to the given diff.
         """
-        try:
-            commit = self.repo.merge_base(self.branch, self.head)[0]
-        except exc.GitCommandError:
-            print(f"Branch with name `{self.branch}` does not exist")
-            sys.exit(1)
-
-        exclude = [f':(exclude){i}' for i in EXCLUDE]
-        if uncommitted:
-            diff = self.repo.git.diff(
-                self.head, '--unified=0', '***.py', *exclude
-            )
-        else:
-            diff = self.repo.git.diff(
-                commit, self.head, '--unified=0', '***.py', *exclude
-            )
-        return diff
+        print("Running Ruff Check...")
+        command = ["ruff", "check"]
+        if fix:
+            command.append("--fix")
+
+        res = subprocess.run(
+            command,
+            stdout=subprocess.PIPE,
+            cwd=self.repository_root,
+            encoding="utf-8",
+        )
+        return res.returncode, res.stdout
+
+    def run_cython_lint(self) -> tuple[int, str]:
+        print("Running cython-lint...")
+        command = ["cython-lint", "--no-pycodestyle", "numpy"]
 
-    def run_pycodestyle(self, diff):
-        """
-            Original Author: Josh Wilson (@person142)
-            Source:
-              https://github.com/scipy/scipy/blob/main/tools/lint_diff.py
-            Run pycodestyle on the given diff.
-        """
         res = subprocess.run(
-            ['pycodestyle', '--diff', '--config', CONFIG],
-            input=diff,
+            command,
             stdout=subprocess.PIPE,
-            encoding='utf-8',
+            cwd=self.repository_root,
+            encoding="utf-8",
         )
         return res.returncode, res.stdout
 
-    def run_lint(self, uncommitted):
-        diff = self.get_branch_diff(uncommitted)
-        retcode, errors = self.run_pycodestyle(diff)
+    def run_lint(self, fix: bool) -> None:
+
+        # Ruff Linter
+        retcode, ruff_errors = self.run_ruff(fix)
+        ruff_errors and print(ruff_errors)
+
+        if retcode:
+            sys.exit(retcode)
+
+        # C API Borrowed-ref Linter
+        retcode, c_API_errors = self.run_check_c_api()
+        c_API_errors and print(c_API_errors)
+
+        if retcode:
+            sys.exit(retcode)
 
-        errors and print(errors)
+        # Cython Linter
+        retcode, cython_errors = self.run_cython_lint()
+        cython_errors and print(cython_errors)
 
         sys.exit(retcode)
 
+    def run_check_c_api(self) -> tuple[int, str]:
+        """Run C-API borrowed-ref checker"""
+        print("Running C API borrow-reference linter...")
+        borrowed_ref_script = os.path.join(
+            self.repository_root, "tools", "ci", "check_c_api_usage.py"
+            )
+        borrowed_res = subprocess.run(
+            [sys.executable, borrowed_ref_script],
+            cwd=self.repository_root,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.STDOUT,
+            text=True,
+            check=False,
+        )
+
+        # Exit with non-zero if C API Check fails
+        return borrowed_res.returncode, borrowed_res.stdout
+
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     parser = ArgumentParser()
-    parser.add_argument("--branch", type=str, default='main',
-                        help="The branch to diff against")
-    parser.add_argument("--uncommitted", action='store_true',
-                        help="Check only uncommitted changes")
     args = parser.parse_args()
 
-    DiffLinter(args.branch).run_lint(args.uncommitted)
+    DiffLinter().run_lint(fix=False)
diff --git a/tools/pyright_completeness.py b/tools/pyright_completeness.py
new file mode 100644
index 000000000000..f1c52913a9c5
--- /dev/null
+++ b/tools/pyright_completeness.py
@@ -0,0 +1,77 @@
+"""
+Run PyRight's `--verifytypes` and check that its reported type completeness is above
+a minimum threshold.
+
+Requires `basedpyright` to be installed in the environment.
+
+Example usage:
+
+    spin run python tools/pyright_completeness.py --verifytypes numpy --ignoreexternal \
+        --exclude-like '*.tests.*' '*.conftest.*'
+
+We use `--ignoreexternal` to avoid "partially unknown" reports coming from the stdlib
+`numbers` module, see https://github.com/microsoft/pyright/discussions/9911.
+"""
+
+import argparse
+import fnmatch
+import json
+import subprocess
+import sys
+from collections.abc import Sequence
+
+
+def main(argv: Sequence[str] | None = None) -> int:
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--exclude-like",
+        required=False,
+        nargs="*",
+        type=str,
+        help="Exclude symbols whose names matches this glob pattern",
+    )
+    args, unknownargs = parser.parse_known_args(argv)
+    pyright_args = list(unknownargs)
+    if "--outputjson" not in pyright_args:
+        pyright_args.append("--outputjson")
+    return run_pyright_with_coverage(pyright_args, args.exclude_like)
+
+
+def run_pyright_with_coverage(
+    pyright_args: list[str],
+    exclude_like: Sequence[str],
+) -> int:
+    result = subprocess.run(
+        ["basedpyright", *pyright_args],
+        capture_output=True,
+        text=True,
+    )
+
+    try:
+        data = json.loads(result.stdout)
+    except json.decoder.JSONDecodeError:
+        sys.stdout.write(result.stdout)
+        sys.stderr.write(result.stderr)
+        return 1
+
+    if exclude_like:
+        symbols = data["typeCompleteness"]["symbols"]
+        matched_symbols = [
+            x
+            for x in symbols
+            if not any(fnmatch.fnmatch(x["name"], pattern) for pattern in exclude_like)
+            and x["isExported"]
+        ]
+        covered = sum(x["isTypeKnown"] for x in matched_symbols) / len(matched_symbols)
+    else:
+        covered = data["typeCompleteness"]["completenessScore"]
+    sys.stderr.write(result.stderr)
+    if covered < 1:
+        sys.stdout.write(f"Coverage {covered:.1%} is below minimum required 100%\n")
+        return 1
+    sys.stdout.write("Coverage is at 100%\n")
+    return 0
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/tools/refguide_check.py b/tools/refguide_check.py
index 6e63ffccf7cc..486789414373 100644
--- a/tools/refguide_check.py
+++ b/tools/refguide_check.py
@@ -27,32 +27,23 @@
 
 """
 import copy
-import doctest
 import inspect
 import io
 import os
 import re
-import shutil
 import sys
-import tempfile
 import warnings
-import docutils.core
 from argparse import ArgumentParser
-from contextlib import contextmanager, redirect_stderr
-from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
 
+import docutils.core
 from docutils.parsers.rst import directives
 
-import sphinx
-import numpy as np
-
 sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))
 from numpydoc.docscrape_sphinx import get_doc_object
 
-SKIPBLOCK = doctest.register_optionflag('SKIPBLOCK')
-
 # Enable specific Sphinx directives
-from sphinx.directives.other import SeeAlso, Only
+from sphinx.directives.other import Only, SeeAlso
+
 directives.register_directive('seealso', SeeAlso)
 directives.register_directive('only', Only)
 
@@ -97,52 +88,6 @@
     'io.arff': 'io',
 }
 
-# these names are known to fail doctesting and we like to keep it that way
-# e.g. sometimes pseudocode is acceptable etc
-#
-# Optionally, a subset of methods can be skipped by setting dict-values
-# to a container of method-names
-DOCTEST_SKIPDICT = {
-    # cases where NumPy docstrings import things from SciPy:
-    'numpy.lib.vectorize': None,
-    'numpy.random.standard_gamma': None,
-    'numpy.random.gamma': None,
-    'numpy.random.vonmises': None,
-    'numpy.random.power': None,
-    'numpy.random.zipf': None,
-    # cases where NumPy docstrings import things from other 3'rd party libs:
-    'numpy._core.from_dlpack': None,
-    # remote / local file IO with DataSource is problematic in doctest:
-    'numpy.lib.npyio.DataSource': None,
-    'numpy.lib.Repository': None,
-}
-
-# Skip non-numpy RST files, historical release notes
-# Any single-directory exact match will skip the directory and all subdirs.
-# Any exact match (like 'doc/release') will scan subdirs but skip files in
-# the matched directory.
-# Any filename will skip that file
-RST_SKIPLIST = [
-    'scipy-sphinx-theme',
-    'sphinxext',
-    'neps',
-    'changelog',
-    'doc/release',
-    'doc/source/release',
-    'doc/release/upcoming_changes',
-    'c-info.ufunc-tutorial.rst',
-    'c-info.python-as-glue.rst',
-    'f2py.getting-started.rst',
-    'f2py-examples.rst',
-    'arrays.nditer.cython.rst',
-    'how-to-verify-bug.rst',
-    # See PR 17222, these should be fixed
-    'basics.dispatch.rst',
-    'basics.subclassing.rst',
-    'basics.interoperability.rst',
-    'misc.rst',
-    'TESTS.rst'
-]
 
 # these names are not required to be present in ALL despite being in
 # autosummary:: listing
@@ -161,14 +106,6 @@
     # priority -- focus on just getting docstrings executed / correct
     r'numpy\.*',
 ]
-# deprecated windows in scipy.signal namespace
-for name in ('barthann', 'bartlett', 'blackmanharris', 'blackman', 'bohman',
-             'boxcar', 'chebwin', 'cosine', 'exponential', 'flattop',
-             'gaussian', 'general_gaussian', 'hamming', 'hann', 'hanning',
-             'kaiser', 'nuttall', 'parzen', 'slepian', 'triang', 'tukey'):
-    REFGUIDE_AUTOSUMMARY_SKIPLIST.append(r'scipy\.signal\.' + name)
-
-HAVE_MATPLOTLIB = False
 
 
 def short_path(path, cwd=None):
@@ -235,7 +172,8 @@ def find_names(module, names_dict):
     module_name = module.__name__
 
     for line in module.__doc__.splitlines():
-        res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
+        res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$",
+                        line)
         if res:
             module_name = res.group(1)
             continue
@@ -244,7 +182,7 @@ def find_names(module, names_dict):
             res = re.match(pattern, line)
             if res is not None:
                 name = res.group(1)
-                entry = '.'.join([module_name, name])
+                entry = f'{module_name}.{name}'
                 names_dict.setdefault(module_name, set()).add(name)
                 break
 
@@ -296,7 +234,7 @@ def get_all_dict(module):
         else:
             not_deprecated.append(name)
 
-    others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
+    others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))  # noqa: E501
 
     return not_deprecated, deprecated, others
 
@@ -365,7 +303,7 @@ def is_deprecated(f):
     with warnings.catch_warnings(record=True) as w:
         warnings.simplefilter("error")
         try:
-            f(**{"not a kwarg":None})
+            f(**{"not a kwarg": None})
         except DeprecationWarning:
             return True
         except Exception:
@@ -403,8 +341,8 @@ def check_items(all_dict, names, deprecated, others, module_name, dots=True):
 
     output = ""
 
-    output += "Non-deprecated objects in __all__: %i\n" % num_all
-    output += "Objects in refguide: %i\n\n" % num_ref
+    output += f"Non-deprecated objects in __all__: {num_all}\n"
+    output += f"Objects in refguide: {num_ref}\n\n"
 
     only_all, only_ref, missing = compare(all_dict, others, names, module_name)
     dep_in_ref = only_ref.intersection(deprecated)
@@ -421,7 +359,7 @@ def check_items(all_dict, names, deprecated, others, module_name, dots=True):
         return [(None, True, output)]
     else:
         if len(only_all) > 0:
-            output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
+            output += f"ERROR: objects in {module_name}.__all__ but not in refguide::\n\n"  # noqa: E501
             for name in sorted(only_all):
                 output += "    " + name + "\n"
 
@@ -429,7 +367,7 @@ def check_items(all_dict, names, deprecated, others, module_name, dots=True):
             output += "the function listing in __init__.py for this module\n"
 
         if len(only_ref) > 0:
-            output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
+            output += f"ERROR: objects in refguide but not in {module_name}.__all__::\n\n"  # noqa: E501
             for name in sorted(only_ref):
                 output += "    " + name + "\n"
 
@@ -466,14 +404,14 @@ def validate_rst_syntax(text, name, dots=True):
     if text is None:
         if dots:
             output_dot('E')
-        return False, "ERROR: %s: no documentation" % (name,)
+        return False, f"ERROR: {name}: no documentation"
 
-    ok_unknown_items = set([
+    ok_unknown_items = {
         'mod', 'doc', 'currentmodule', 'autosummary', 'data', 'attr',
         'obj', 'versionadded', 'versionchanged', 'module', 'class',
         'ref', 'func', 'toctree', 'moduleauthor', 'term', 'c:member',
         'sectionauthor', 'codeauthor', 'eq', 'doi', 'DOI', 'arXiv', 'arxiv'
-    ])
+    }
 
     # Run through docutils
     error_stream = io.StringIO()
@@ -485,16 +423,16 @@ def resolve(name, is_label=False):
 
     docutils.core.publish_doctree(
         text, token,
-        settings_overrides = dict(halt_level=5,
-                                  traceback=True,
-                                  default_reference_context='title-reference',
-                                  default_role='emphasis',
-                                  link_base='',
-                                  resolve_name=resolve,
-                                  stylesheet_path='',
-                                  raw_enabled=0,
-                                  file_insertion_enabled=0,
-                                  warning_stream=error_stream))
+        settings_overrides={'halt_level': 5,
+                            'traceback': True,
+                            'default_reference_context': 'title-reference',
+                            'default_role': 'emphasis',
+                            'link_base': '',
+                            'resolve_name': resolve,
+                            'stylesheet_path': '',
+                            'raw_enabled': 0,
+                            'file_insertion_enabled': 0,
+                            'warning_stream': error_stream})
 
     # Print errors, disregarding unimportant ones
     error_msg = error_stream.getvalue()
@@ -507,23 +445,23 @@ def resolve(name, is_label=False):
         if not lines:
             continue
 
-        m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
+        m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])  # noqa: E501
         if m:
             if m.group(1) in ok_unknown_items:
                 continue
 
-        m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
+        m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)  # noqa: E501
         if m:
             continue
 
-        output += name + lines[0] + "::\n    " + "\n    ".join(lines[1:]).rstrip() + "\n"
+        output += name + lines[0] + "::\n    " + "\n    ".join(lines[1:]).rstrip() + "\n"  # noqa: E501
         success = False
 
     if not success:
-        output += "    " + "-"*72 + "\n"
+        output += "    " + "-" * 72 + "\n"
         for lineno, line in enumerate(text.splitlines()):
-            output += "    %-4d    %s\n" % (lineno+1, line)
-        output += "    " + "-"*72 + "\n\n"
+            output += "    %-4d    %s\n" % (lineno + 1, line)
+        output += "    " + "-" * 72 + "\n\n"
 
     if dots:
         output_dot('.' if success else 'F')
@@ -551,12 +489,7 @@ def check_rest(module, names, dots=True):
         List of [(module_name, success_flag, output),...]
     """
 
-    try:
-        skip_types = (dict, str, unicode, float, int)
-    except NameError:
-        # python 3
-        skip_types = (dict, str, float, int)
-
+    skip_types = (dict, str, float, int)
 
     results = []
 
@@ -570,7 +503,7 @@ def check_rest(module, names, dots=True):
         obj = getattr(module, name, None)
 
         if obj is None:
-            results.append((full_name, False, "%s has no docstring" % (full_name,)))
+            results.append((full_name, False, f"{full_name} has no docstring"))
             continue
         elif isinstance(obj, skip_types):
             continue
@@ -604,526 +537,12 @@ def check_rest(module, names, dots=True):
         else:
             file_full_name = full_name
 
-        results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))
-
-    return results
-
-
-### Doctest helpers ####
-
-# the namespace to run examples in
-DEFAULT_NAMESPACE = {'np': np}
-
-# the namespace to do checks in
-CHECK_NAMESPACE = {
-      'np': np,
-      'numpy': np,
-      'assert_allclose': np.testing.assert_allclose,
-      'assert_equal': np.testing.assert_equal,
-      # recognize numpy repr's
-      'array': np.array,
-      'matrix': np.matrix,
-      'int64': np.int64,
-      'uint64': np.uint64,
-      'int8': np.int8,
-      'int32': np.int32,
-      'float32': np.float32,
-      'float64': np.float64,
-      'dtype': np.dtype,
-      'nan': np.nan,
-      'inf': np.inf,
-      'StringIO': io.StringIO,
-}
-
-
-class DTRunner(doctest.DocTestRunner):
-    """
-    The doctest runner
-    """
-    DIVIDER = "\n"
-
-    def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
-        self._item_name = item_name
-        doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
-                                       optionflags=optionflags)
-
-    def _report_item_name(self, out, new_line=False):
-        if self._item_name is not None:
-            if new_line:
-                out("\n")
-            self._item_name = None
-
-    def report_start(self, out, test, example):
-        self._checker._source = example.source
-        return doctest.DocTestRunner.report_start(self, out, test, example)
-
-    def report_success(self, out, test, example, got):
-        if self._verbose:
-            self._report_item_name(out, new_line=True)
-        return doctest.DocTestRunner.report_success(self, out, test, example, got)
-
-    def report_unexpected_exception(self, out, test, example, exc_info):
-        self._report_item_name(out)
-        return doctest.DocTestRunner.report_unexpected_exception(
-            self, out, test, example, exc_info)
-
-    def report_failure(self, out, test, example, got):
-        self._report_item_name(out)
-        return doctest.DocTestRunner.report_failure(self, out, test,
-                                                    example, got)
-
-class Checker(doctest.OutputChecker):
-    """
-    Check the docstrings
-    """
-    obj_pattern = re.compile('at 0x[0-9a-fA-F]+>')
-    vanilla = doctest.OutputChecker()
-    rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary",
-                    "# uninitialized", "#uninitialized", "# uninit"}
-    stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
-                 'set_title', 'imshow', 'plt.show', '.axis(', '.plot(',
-                 '.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim',
-                 '# reformatted', '.set_xlabel(', '.set_ylabel(', '.set_zlabel(',
-                 '.set(xlim=', '.set(ylim=', '.set(xlabel=', '.set(ylabel='}
-
-    def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):
-        self.parse_namedtuples = parse_namedtuples
-        self.atol, self.rtol = atol, rtol
-        if ns is None:
-            self.ns = CHECK_NAMESPACE
-        else:
-            self.ns = ns
-
-    def check_output(self, want, got, optionflags):
-        # cut it short if they are equal
-        if want == got:
-            return True
-
-        # skip stopwords in source
-        if any(word in self._source for word in self.stopwords):
-            return True
-
-        # skip random stuff
-        if any(word in want for word in self.rndm_markers):
-            return True
-
-        # skip function/object addresses
-        if self.obj_pattern.search(got):
-            return True
-
-        # ignore comments (e.g. signal.freqresp)
-        if want.lstrip().startswith("#"):
-            return True
-
-        # try the standard doctest
-        try:
-            if self.vanilla.check_output(want, got, optionflags):
-                return True
-        except Exception:
-            pass
-
-        # OK then, convert strings to objects
-        try:
-            a_want = eval(want, dict(self.ns))
-            a_got = eval(got, dict(self.ns))
-        except Exception:
-            # Maybe we're printing a numpy array? This produces invalid python
-            # code: `print(np.arange(3))` produces "[0 1 2]" w/o commas between
-            # values. So, reinsert commas and retry.
-            # TODO: handle (1) abbreviation (`print(np.arange(10000))`), and
-            #              (2) n-dim arrays with n > 1
-            s_want = want.strip()
-            s_got = got.strip()
-            cond = (s_want.startswith("[") and s_want.endswith("]") and
-                    s_got.startswith("[") and s_got.endswith("]"))
-            if cond:
-                s_want = ", ".join(s_want[1:-1].split())
-                s_got = ", ".join(s_got[1:-1].split())
-                return self.check_output(s_want, s_got, optionflags)
-
-            if not self.parse_namedtuples:
-                return False
-            # suppose that "want"  is a tuple, and "got" is smth like
-            # MoodResult(statistic=10, pvalue=0.1).
-            # Then convert the latter to the tuple (10, 0.1),
-            # and then compare the tuples.
-            try:
-                num = len(a_want)
-                regex = (r'[\w\d_]+\(' +
-                         ', '.join([r'[\w\d_]+=(.+)']*num) +
-                         r'\)')
-                grp = re.findall(regex, got.replace('\n', ' '))
-                if len(grp) > 1:  # no more than one for now
-                    return False
-                # fold it back to a tuple
-                got_again = '(' + ', '.join(grp[0]) + ')'
-                return self.check_output(want, got_again, optionflags)
-            except Exception:
-                return False
-
-        # ... and defer to numpy
-        try:
-            return self._do_check(a_want, a_got)
-        except Exception:
-            # heterog tuple, eg (1, np.array([1., 2.]))
-           try:
-                return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
-           except (TypeError, ValueError):
-                return False
-
-    def _do_check(self, want, got):
-        # This should be done exactly as written to correctly handle all of
-        # numpy-comparable objects, strings, and heterogeneous tuples
-        try:
-            if want == got:
-                return True
-        except Exception:
-            pass
-        return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
-
-
-def _run_doctests(tests, full_name, verbose, doctest_warnings):
-    """
-    Run modified doctests for the set of `tests`.
-
-    Parameters
-    ----------
-    tests : list
-
-    full_name : str
-
-    verbose : bool
-    doctest_warnings : bool
-
-    Returns
-    -------
-    tuple(bool, list)
-        Tuple of (success, output)
-    """
-    flags = NORMALIZE_WHITESPACE | ELLIPSIS
-    runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
-                      verbose=verbose)
-
-    output = io.StringIO(newline='')
-    success = True
-
-    # Redirect stderr to the stdout or output
-    tmp_stderr = sys.stdout if doctest_warnings else output
-
-    @contextmanager
-    def temp_cwd():
-        cwd = os.getcwd()
-        tmpdir = tempfile.mkdtemp()
-        try:
-            os.chdir(tmpdir)
-            yield tmpdir
-        finally:
-            os.chdir(cwd)
-            shutil.rmtree(tmpdir)
-
-    # Run tests, trying to restore global state afterward
-    cwd = os.getcwd()
-    with np.errstate(), np.printoptions(), temp_cwd() as tmpdir, \
-            redirect_stderr(tmp_stderr):
-        # try to ensure random seed is NOT reproducible
-        np.random.seed(None)
-
-        ns = {}
-        for t in tests:
-            # We broke the tests up into chunks to try to avoid PSEUDOCODE
-            # This has the unfortunate side effect of restarting the global
-            # namespace for each test chunk, so variables will be "lost" after
-            # a chunk. Chain the globals to avoid this
-            t.globs.update(ns)
-            t.filename = short_path(t.filename, cwd)
-            # Process our options
-            if any([SKIPBLOCK in ex.options for ex in t.examples]):
-                continue
-            fails, successes = runner.run(t, out=output.write, clear_globs=False)
-            if fails > 0:
-                success = False
-            ns = t.globs
-
-    output.seek(0)
-    return success, output.read()
-
-
-def check_doctests(module, verbose, ns=None,
-                   dots=True, doctest_warnings=False):
-    """
-    Check code in docstrings of the module's public symbols.
-
-    Parameters
-    ----------
-    module : ModuleType
-        Name of module
-    verbose : bool
-        Should the result be verbose
-    ns : dict
-        Name space of module
-    dots : bool
-
-    doctest_warnings : bool
-
-    Returns
-    -------
-    results : list
-        List of [(item_name, success_flag, output), ...]
-    """
-    if ns is None:
-        ns = dict(DEFAULT_NAMESPACE)
-
-    # Loop over non-deprecated items
-    results = []
-
-    for name in get_all_dict(module)[0]:
-        full_name = module.__name__ + '.' + name
-
-        if full_name in DOCTEST_SKIPDICT:
-            skip_methods = DOCTEST_SKIPDICT[full_name]
-            if skip_methods is None:
-                continue
-        else:
-            skip_methods = None
-
-        try:
-            obj = getattr(module, name)
-        except AttributeError:
-            import traceback
-            results.append((full_name, False,
-                            "Missing item!\n" +
-                            traceback.format_exc()))
-            continue
-
-        finder = doctest.DocTestFinder()
-        try:
-            tests = finder.find(obj, name, globs=dict(ns))
-        except Exception:
-            import traceback
-            results.append((full_name, False,
-                            "Failed to get doctests!\n" +
-                            traceback.format_exc()))
-            continue
-
-        if skip_methods is not None:
-            tests = [i for i in tests if
-                     i.name.partition(".")[2] not in skip_methods]
-
-        success, output = _run_doctests(tests, full_name, verbose,
-                                        doctest_warnings)
-
-        if dots:
-            output_dot('.' if success else 'F')
-
-        results.append((full_name, success, output))
-
-        if HAVE_MATPLOTLIB:
-            import matplotlib.pyplot as plt
-            plt.close('all')
+        results.append((full_name,) +
+                       validate_rst_syntax(text, file_full_name, dots=dots))
 
     return results
 
 
-def check_doctests_testfile(fname, verbose, ns=None,
-                   dots=True, doctest_warnings=False):
-    """
-    Check code in a text file.
-
-    Mimic `check_doctests` above, differing mostly in test discovery.
-    (which is borrowed from stdlib's doctest.testfile here,
-     https://github.com/python-git/python/blob/master/Lib/doctest.py)
-
-    Parameters
-    ----------
-    fname : str
-        File name
-    verbose : bool
-
-    ns : dict
-        Name space
-
-    dots : bool
-
-    doctest_warnings : bool
-
-    Returns
-    -------
-    list
-        List of [(item_name, success_flag, output), ...]
-
-    Notes
-    -----
-
-    refguide can be signalled to skip testing code by adding
-    ``#doctest: +SKIP`` to the end of the line. If the output varies or is
-    random, add ``# may vary`` or ``# random`` to the comment. for example
-
-    >>> plt.plot(...)  # doctest: +SKIP
-    >>> random.randint(0,10)
-    5 # random
-
-    We also try to weed out pseudocode:
-    * We maintain a list of exceptions which signal pseudocode,
-    * We split the text file into "blocks" of code separated by empty lines
-      and/or intervening text.
-    * If a block contains a marker, the whole block is then assumed to be
-      pseudocode. It is then not being doctested.
-
-    The rationale is that typically, the text looks like this:
-
-    blah
-    
-    >>> from numpy import some_module   # pseudocode!
-    >>> func = some_module.some_function
-    >>> func(42)                  # still pseudocode
-    146
-    
-    blah
-    
-    >>> 2 + 3        # real code, doctest it
-    5
-
-    """
-    if ns is None:
-        ns = CHECK_NAMESPACE
-    results = []
-
-    _, short_name = os.path.split(fname)
-    if short_name in DOCTEST_SKIPDICT:
-        return results
-
-    full_name = fname
-    with open(fname, encoding='utf-8') as f:
-        text = f.read()
-
-    PSEUDOCODE = set(['some_function', 'some_module', 'import example',
-                      'ctypes.CDLL',     # likely need compiling, skip it
-                      'integrate.nquad(func,'  # ctypes integrate tutotial
-    ])
-
-    # split the text into "blocks" and try to detect and omit pseudocode blocks.
-    parser = doctest.DocTestParser()
-    good_parts = []
-    base_line_no = 0
-    for part in text.split('\n\n'):
-        try:
-            tests = parser.get_doctest(part, ns, fname, fname, base_line_no)
-        except ValueError as e:
-            if e.args[0].startswith('line '):
-                # fix line number since `parser.get_doctest` does not increment
-                # the reported line number by base_line_no in the error message
-                parts = e.args[0].split()
-                parts[1] = str(int(parts[1]) + base_line_no)
-                e.args = (' '.join(parts),) + e.args[1:]
-            raise
-        if any(word in ex.source for word in PSEUDOCODE
-                                 for ex in tests.examples):
-            # omit it
-            pass
-        else:
-            # `part` looks like a good code, let's doctest it
-            good_parts.append((part, base_line_no))
-        base_line_no += part.count('\n') + 2
-
-    # Reassemble the good bits and doctest them:
-    tests = []
-    for good_text, line_no in good_parts:
-        tests.append(parser.get_doctest(good_text, ns, fname, fname, line_no))
-    success, output = _run_doctests(tests, full_name, verbose,
-                                    doctest_warnings)
-
-    if dots:
-        output_dot('.' if success else 'F')
-
-    results.append((full_name, success, output))
-
-    if HAVE_MATPLOTLIB:
-        import matplotlib.pyplot as plt
-        plt.close('all')
-
-    return results
-
-
-def iter_included_files(base_path, verbose=0, suffixes=('.rst',)):
-    """
-    Generator function to walk `base_path` and its subdirectories, skipping
-    files or directories in RST_SKIPLIST, and yield each file with a suffix in
-    `suffixes`
-
-    Parameters
-    ----------
-    base_path : str
-        Base path of the directory to be processed
-    verbose : int
-
-    suffixes : tuple
-
-    Yields
-    ------
-    path
-        Path of the directory and its sub directories
-    """
-    if os.path.exists(base_path) and os.path.isfile(base_path):
-        yield base_path
-    for dir_name, subdirs, files in os.walk(base_path, topdown=True):
-        if dir_name in RST_SKIPLIST:
-            if verbose > 0:
-                sys.stderr.write('skipping files in %s' % dir_name)
-            files = []
-        for p in RST_SKIPLIST:
-            if p in subdirs:
-                if verbose > 0:
-                    sys.stderr.write('skipping %s and subdirs' % p)
-                subdirs.remove(p)
-        for f in files:
-            if (os.path.splitext(f)[1] in suffixes and
-                    f not in RST_SKIPLIST):
-                yield os.path.join(dir_name, f)
-
-
-def check_documentation(base_path, results, args, dots):
-    """
-    Check examples in any *.rst located inside `base_path`.
-    Add the output to `results`.
-
-    See Also
-    --------
-    check_doctests_testfile
-    """
-    for filename in iter_included_files(base_path, args.verbose):
-        if dots:
-            sys.stderr.write(filename + ' ')
-            sys.stderr.flush()
-
-        tut_results = check_doctests_testfile(
-            filename,
-            (args.verbose >= 2), dots=dots,
-            doctest_warnings=args.doctest_warnings)
-
-        # stub out a "module" which is needed when reporting the result
-        def scratch():
-            pass
-        scratch.__name__ = filename
-        results.append((scratch, tut_results))
-        if dots:
-            sys.stderr.write('\n')
-            sys.stderr.flush()
-
-
-def init_matplotlib():
-    """
-    Check feasibility of matplotlib initialization.
-    """
-    global HAVE_MATPLOTLIB
-
-    try:
-        import matplotlib
-        matplotlib.use('Agg')
-        HAVE_MATPLOTLIB = True
-    except ImportError:
-        HAVE_MATPLOTLIB = False
-
-
 def main(argv):
     """
     Validates the docstrings of all the pre decided set of
@@ -1132,15 +551,7 @@ def main(argv):
     parser = ArgumentParser(usage=__doc__.lstrip())
     parser.add_argument("module_names", metavar="SUBMODULES", default=[],
                         nargs='*', help="Submodules to check (default: all public)")
-    parser.add_argument("--doctests", action="store_true",
-                        help="Run also doctests on ")
     parser.add_argument("-v", "--verbose", action="count", default=0)
-    parser.add_argument("--doctest-warnings", action="store_true",
-                        help="Enforce warning checking for doctests")
-    parser.add_argument("--rst", nargs='?', const='doc', default=None,
-                        help=("Run also examples from *rst files "
-                              "discovered walking the directory(s) specified, "
-                              "defaults to 'doc'"))
     args = parser.parse_args(argv)
 
     modules = []
@@ -1149,24 +560,19 @@ def main(argv):
     if not args.module_names:
         args.module_names = list(PUBLIC_SUBMODULES) + [BASE_MODULE]
 
-    os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
-
-    module_names = list(args.module_names)
-    for name in module_names:
-        if name in OTHER_MODULE_DOCS:
-            name = OTHER_MODULE_DOCS[name]
-            if name not in module_names:
-                module_names.append(name)
+    module_names = args.module_names + [
+        OTHER_MODULE_DOCS[name]
+        for name in args.module_names
+        if name in OTHER_MODULE_DOCS
+    ]
+    # remove duplicates while maintaining order
+    module_names = list(dict.fromkeys(module_names))
 
     dots = True
     success = True
     results = []
     errormsgs = []
 
-
-    if args.doctests or args.rst:
-        init_matplotlib()
-
     for submodule_name in module_names:
         prefix = BASE_MODULE + '.'
         if not (
@@ -1186,8 +592,8 @@ def main(argv):
         if submodule_name in args.module_names:
             modules.append(module)
 
-    if args.doctests or not args.rst:
-        print("Running checks for %d modules:" % (len(modules),))
+    if modules:
+        print(f"Running checks for {len(modules)} modules:")
         for module in modules:
             if dots:
                 sys.stderr.write(module.__name__ + ' ')
@@ -1201,9 +607,6 @@ def main(argv):
                                        module.__name__)
             mod_results += check_rest(module, set(names).difference(deprecated),
                                       dots=dots)
-            if args.doctests:
-                mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
-                                              doctest_warnings=args.doctest_warnings)
 
             for v in mod_results:
                 assert isinstance(v, tuple), v
@@ -1214,19 +617,6 @@ def main(argv):
                 sys.stderr.write('\n')
                 sys.stderr.flush()
 
-    if args.rst:
-        base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
-        rst_path = os.path.relpath(os.path.join(base_dir, args.rst))
-        if os.path.exists(rst_path):
-            print('\nChecking files in %s:' % rst_path)
-            check_documentation(rst_path, results, args, dots)
-        else:
-            sys.stderr.write(f'\ninvalid --rst argument "{args.rst}"')
-            errormsgs.append('invalid directory argument to --rst')
-        if dots:
-            sys.stderr.write("\n")
-            sys.stderr.flush()
-
     # Report results
     for module, mod_results in results:
         success = all(x[1] for x in mod_results)
@@ -1249,7 +639,7 @@ def main(argv):
                     print("")
             elif not success or (args.verbose >= 2 and output.strip()):
                 print(name)
-                print("-"*len(name))
+                print("-" * len(name))
                 print("")
                 print(output.strip())
                 print("")
diff --git a/tools/stubtest/allowlist.txt b/tools/stubtest/allowlist.txt
new file mode 100644
index 000000000000..bafd2c795f88
--- /dev/null
+++ b/tools/stubtest/allowlist.txt
@@ -0,0 +1,184 @@
+# intentional type-check-only deviations from runtime
+numpy\._typing.*
+numpy\.polynomial\._polytypes
+
+# internal testing code
+numpy\.conftest.*
+numpy\.random\._generator\.__test__
+numpy(\.\w+)?\.tests.*
+
+# system-dependent extended precision types
+numpy(\..+)?\.float(96|128)
+numpy(\..+)?\.complex(192|256)
+
+# system-dependent SIMD constants
+numpy\._core\._simd\.\w+
+
+# these are always either float96/complex192 or float128/complex256
+numpy\.__all__
+numpy\._core\.__all__
+numpy\._?core\.numeric\.__all__
+numpy\._?core\.numerictypes\.__all__
+numpy\.matlib\.__all__
+
+# requires numpy/_core/code_generators to be on the PYTHONPATH when running stubtest
+numpy\._core\.cversions
+
+# raises SystemExit on import
+numpy\.f2py\.__main__
+
+# inexpressible: the `dtype.type` class-attribute is `None` unless instantiated
+numpy(\..+)?\.dtype\.type
+
+# import errors
+numpy\._pyinstaller\..*
+
+# known minor deviations from runtime
+numpy\.(\w+\.)*integer\.__index__
+numpy\.(\w+\.)*integer\.bit_count
+numpy\.(\w+\.)*floating\.as_integer_ratio
+numpy\.(\w+\.)*floating\.is_integer
+numpy\.(\w+\.)*complexfloating\.__complex__
+# https://github.com/numpy/numpy/issues/30445#issuecomment-3665484402
+numpy\.(\w+\.)*generic\.__hash__
+
+# intentionally missing deprecated module stubs
+numpy\.typing\.mypy_plugin
+
+# false positive "... is not a Union" errors
+numpy\.typing\.ArrayLike
+numpy\.typing\.DTypeLike
+numpy\.typing\.NDArray
+
+# ufuncs, see https://github.com/python/mypy/issues/20223
+numpy\.(\w+\.)*abs
+numpy\.(\w+\.)*absolute
+numpy\.(\w+\.)*acos
+numpy\.(\w+\.)*acosh
+numpy\.(\w+\.)*add
+numpy\.(\w+\.)*arccos
+numpy\.(\w+\.)*arccosh
+numpy\.(\w+\.)*arcsin
+numpy\.(\w+\.)*arcsinh
+numpy\.(\w+\.)*arctan
+numpy\.(\w+\.)*arctan2
+numpy\.(\w+\.)*arctanh
+numpy\.(\w+\.)*asin
+numpy\.(\w+\.)*asinh
+numpy\.(\w+\.)*atan
+numpy\.(\w+\.)*atan2
+numpy\.(\w+\.)*atanh
+numpy\.(\w+\.)*bitwise_and
+numpy\.(\w+\.)*bitwise_count
+numpy\.(\w+\.)*bitwise_invert
+numpy\.(\w+\.)*bitwise_left_shift
+numpy\.(\w+\.)*bitwise_not
+numpy\.(\w+\.)*bitwise_or
+numpy\.(\w+\.)*bitwise_right_shift
+numpy\.(\w+\.)*bitwise_xor
+numpy\.(\w+\.)*cbrt
+numpy\.(\w+\.)*ceil
+numpy\.(\w+\.)*conj
+numpy\.(\w+\.)*conjugate
+numpy\.(\w+\.)*copysign
+numpy\.(\w+\.)*cos
+numpy\.(\w+\.)*cosh
+numpy\.(\w+\.)*deg2rad
+numpy\.(\w+\.)*degrees
+numpy\.(\w+\.)*divide
+numpy\.(\w+\.)*divmod
+numpy\.(\w+\.)*equal
+numpy\.(\w+\.)*exp
+numpy\.(\w+\.)*exp2
+numpy\.(\w+\.)*expm1
+numpy\.(\w+\.)*fabs
+numpy\.(\w+\.)*float_power
+numpy\.(\w+\.)*floor
+numpy\.(\w+\.)*floor_divide
+numpy\.(\w+\.)*fmax
+numpy\.(\w+\.)*fmin
+numpy\.(\w+\.)*fmod
+numpy\.(\w+\.)*frexp
+numpy\.(\w+\.)*gcd
+numpy\.(\w+\.)*greater
+numpy\.(\w+\.)*greater_equal
+numpy\.(\w+\.)*heaviside
+numpy\.(\w+\.)*hypot
+numpy\.(\w+\.)*invert
+numpy\.(\w+\.)*isfinite
+numpy\.(\w+\.)*isinf
+numpy\.(\w+\.)*isnan
+numpy\.(\w+\.)*isnat
+numpy\.(\w+\.)*lcm
+numpy\.(\w+\.)*ldexp
+numpy\.(\w+\.)*left_shift
+numpy\.(\w+\.)*less
+numpy\.(\w+\.)*less_equal
+numpy\.(\w+\.)*log
+numpy\.(\w+\.)*log10
+numpy\.(\w+\.)*log1p
+numpy\.(\w+\.)*log2
+numpy\.(\w+\.)*logaddexp
+numpy\.(\w+\.)*logaddexp2
+numpy\.(\w+\.)*logical_and
+numpy\.(\w+\.)*logical_not
+numpy\.(\w+\.)*logical_or
+numpy\.(\w+\.)*logical_xor
+numpy\.(\w+\.)*matmul
+numpy\.(\w+\.)*matvec
+numpy\.(\w+\.)*maximum
+numpy\.(\w+\.)*minimum
+numpy\.(\w+\.)*mod
+numpy\.(\w+\.)*modf
+numpy\.(\w+\.)*multiply
+numpy\.(\w+\.)*negative
+numpy\.(\w+\.)*nextafter
+numpy\.(\w+\.)*not_equal
+numpy\.(\w+\.)*positive
+numpy\.(\w+\.)*pow
+numpy\.(\w+\.)*power
+numpy\.(\w+\.)*rad2deg
+numpy\.(\w+\.)*radians
+numpy\.(\w+\.)*reciprocal
+numpy\.(\w+\.)*remainder
+numpy\.(\w+\.)*right_shift
+numpy\.(\w+\.)*rint
+numpy\.(\w+\.)*sign
+numpy\.(\w+\.)*signbit
+numpy\.(\w+\.)*sin
+numpy\.(\w+\.)*sinh
+numpy\.(\w+\.)*spacing
+numpy\.(\w+\.)*sqrt
+numpy\.(\w+\.)*square
+numpy\.(\w+\.)*subtract
+numpy\.(\w+\.)*tan
+numpy\.(\w+\.)*tanh
+numpy\.(\w+\.)*true_divide
+numpy\.(\w+\.)*trunc
+numpy\.(\w+\.)*vecdot
+numpy\.(\w+\.)*vecmat
+numpy\.(\w+\.)*isalnum
+numpy\.(\w+\.)*isalpha
+numpy\.(\w+\.)*isdecimal
+numpy\.(\w+\.)*isdigit
+numpy\.(\w+\.)*islower
+numpy\.(\w+\.)*isnumeric
+numpy\.(\w+\.)*isspace
+numpy\.(\w+\.)*istitle
+numpy\.(\w+\.)*isupper
+numpy\.(\w+\.)*str_len
+numpy\._core\._methods\.umr_bitwise_count
+numpy\._core\._umath_tests\.always_error
+numpy\._core\._umath_tests\.always_error_gufunc
+numpy\._core\._umath_tests\.always_error_unary
+numpy\._core\._umath_tests\.conv1d_full
+numpy\._core\._umath_tests\.cross1d
+numpy\._core\._umath_tests\.euclidean_pdist
+numpy\._core\._umath_tests\.indexed_negative
+numpy\._core\._umath_tests\.inner1d
+numpy\._core\._umath_tests\.inner1d_no_doc
+numpy\._core\._umath_tests\.matrix_multiply
+numpy\.linalg\._umath_linalg\.qr_complete
+numpy\.linalg\._umath_linalg\.qr_reduced
+numpy\.linalg\._umath_linalg\.solve
+numpy\.linalg\._umath_linalg\.solve1
diff --git a/tools/stubtest/mypy.ini b/tools/stubtest/mypy.ini
new file mode 100644
index 000000000000..4c75171acffe
--- /dev/null
+++ b/tools/stubtest/mypy.ini
@@ -0,0 +1,24 @@
+[mypy]
+files = numpy/**/*.pyi
+exclude = (?x)(
+    ^numpy/(
+      .+\.py$
+      | _build_utils/
+      | _core/code_generators/
+    )
+  )
+namespace_packages = False
+
+enable_error_code = deprecated, ignore-without-code, redundant-expr, truthy-bool
+warn_unreachable = False
+strict = True
+strict_bytes = True
+allow_redefinition_new = True
+local_partial_types = True
+
+; Stubtest requires mypy to pass before running, which would currently fail
+; on numerous stubs. To allow running stubtest independently, we temporarily
+; ignore these errors here. The goal is to eventually fix these mypy errors
+; and remove the ignores.
+; See also https://github.com/numpy/numpy/issues/27032
+disable_error_code = overload-overlap
diff --git a/tools/swig/README b/tools/swig/README
index c539c597f8c6..876d6a698034 100644
--- a/tools/swig/README
+++ b/tools/swig/README
@@ -3,9 +3,7 @@ Notes for the numpy/tools/swig directory
 
 This set of files is for developing and testing file numpy.i, which is
 intended to be a set of typemaps for helping SWIG interface between C
-and C++ code that uses C arrays and the python module NumPy.  It is
-ultimately hoped that numpy.i will be included as part of the SWIG
-distribution.
+and C++ code that uses C arrays and the python module NumPy.
 
 Documentation
 -------------
diff --git a/tools/swig/numpy.i b/tools/swig/numpy.i
index c8c26cbcd3d6..747446648c8b 100644
--- a/tools/swig/numpy.i
+++ b/tools/swig/numpy.i
@@ -1989,7 +1989,7 @@ void free_cap(PyObject * cap)
 %typemap(argout)
   (DATA_TYPE ARGOUT_ARRAY1[ANY])
 {
-  $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum);
+  $result = SWIG_AppendOutput($result,(PyObject*)array$argnum);
 }
 
 /* Typemap suite for (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1)
@@ -2018,7 +2018,7 @@ void free_cap(PyObject * cap)
 %typemap(argout)
   (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1)
 {
-  $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum);
+  $result = SWIG_AppendOutput($result,(PyObject*)array$argnum);
 }
 
 /* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1)
@@ -2047,7 +2047,7 @@ void free_cap(PyObject * cap)
 %typemap(argout)
   (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1)
 {
-  $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum);
+  $result = SWIG_AppendOutput($result,(PyObject*)array$argnum);
 }
 
 /* Typemap suite for (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY])
@@ -2065,7 +2065,7 @@ void free_cap(PyObject * cap)
 %typemap(argout)
   (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY])
 {
-  $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum);
+  $result = SWIG_AppendOutput($result,(PyObject*)array$argnum);
 }
 
 /* Typemap suite for (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY])
@@ -2083,7 +2083,7 @@ void free_cap(PyObject * cap)
 %typemap(argout)
   (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY])
 {
-  $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum);
+  $result = SWIG_AppendOutput($result,(PyObject*)array$argnum);
 }
 
 /* Typemap suite for (DATA_TYPE ARGOUT_ARRAY4[ANY][ANY][ANY][ANY])
@@ -2101,7 +2101,7 @@ void free_cap(PyObject * cap)
 %typemap(argout)
   (DATA_TYPE ARGOUT_ARRAY4[ANY][ANY][ANY][ANY])
 {
-  $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum);
+  $result = SWIG_AppendOutput($result,(PyObject*)array$argnum);
 }
 
 /*****************************/
@@ -2126,7 +2126,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1)
@@ -2147,7 +2147,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
@@ -2169,7 +2169,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2)
@@ -2191,7 +2191,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
@@ -2213,7 +2213,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array || !require_fortran(array)) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2)
@@ -2235,7 +2235,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array || !require_fortran(array)) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
@@ -2259,7 +2259,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3,
@@ -2283,7 +2283,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
@@ -2307,7 +2307,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array || !require_fortran(array)) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3,
@@ -2331,7 +2331,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array || !require_fortran(array)) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
@@ -2356,7 +2356,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4,
@@ -2381,7 +2381,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
@@ -2406,7 +2406,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array || !require_fortran(array)) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4,
@@ -2431,7 +2431,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array || !require_fortran(array)) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /*************************************/
@@ -2465,7 +2465,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEWM_ARRAY1)
@@ -2495,7 +2495,7 @@ PyObject* cap = PyCapsule_New((void*)(*$2), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
@@ -2526,7 +2526,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEWM_ARRAY2)
@@ -2557,7 +2557,7 @@ PyObject* cap = PyCapsule_New((void*)(*$3), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
@@ -2588,7 +2588,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEWM_FARRAY2)
@@ -2619,7 +2619,7 @@ PyObject* cap = PyCapsule_New((void*)(*$3), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
@@ -2652,7 +2652,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3,
@@ -2685,7 +2685,7 @@ PyObject* cap = PyCapsule_New((void*)(*$4), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
@@ -2718,7 +2718,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3,
@@ -2751,7 +2751,7 @@ PyObject* cap = PyCapsule_New((void*)(*$4), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
@@ -2785,7 +2785,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4,
@@ -2819,7 +2819,7 @@ PyObject* cap = PyCapsule_New((void*)(*$5), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
@@ -2853,7 +2853,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4,
@@ -2887,7 +2887,7 @@ PyObject* cap = PyCapsule_New((void*)(*$5), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /**************************************/
diff --git a/tools/swig/test/Array2.cxx b/tools/swig/test/Array2.cxx
index 2da61f728569..11b523523617 100644
--- a/tools/swig/test/Array2.cxx
+++ b/tools/swig/test/Array2.cxx
@@ -160,7 +160,7 @@ void Array2::allocateRows()
 
 void Array2::deallocateMemory()
 {
-  if (_ownData && _nrows*_ncols && _buffer)
+  if (_ownData && _nrows && _ncols && _buffer)
   {
     delete [] _rows;
     delete [] _buffer;
diff --git a/tools/swig/test/setup.py b/tools/swig/test/setup.py
index 71830fd2cc53..98ba239942bd 100755
--- a/tools/swig/test/setup.py
+++ b/tools/swig/test/setup.py
@@ -1,8 +1,6 @@
 #!/usr/bin/env python3
-# System imports
 from distutils.core import Extension, setup
 
-# Third-party modules - we depend on numpy for everything
 import numpy
 
 # Obtain the numpy include directory.
@@ -14,55 +12,55 @@
                     "Array1.cxx",
                     "Array2.cxx",
                     "ArrayZ.cxx"],
-                   include_dirs = [numpy_include],
+                   include_dirs=[numpy_include],
                    )
 
 # Farray extension module
 _Farray = Extension("_Farray",
                     ["Farray_wrap.cxx",
                      "Farray.cxx"],
-                    include_dirs = [numpy_include],
+                    include_dirs=[numpy_include],
                     )
 
 # _Vector extension module
 _Vector = Extension("_Vector",
                     ["Vector_wrap.cxx",
                      "Vector.cxx"],
-                    include_dirs = [numpy_include],
+                    include_dirs=[numpy_include],
                     )
 
 # _Matrix extension module
 _Matrix = Extension("_Matrix",
                     ["Matrix_wrap.cxx",
                      "Matrix.cxx"],
-                    include_dirs = [numpy_include],
+                    include_dirs=[numpy_include],
                     )
 
 # _Tensor extension module
 _Tensor = Extension("_Tensor",
                     ["Tensor_wrap.cxx",
                      "Tensor.cxx"],
-                    include_dirs = [numpy_include],
+                    include_dirs=[numpy_include],
                     )
 
 _Fortran = Extension("_Fortran",
-                    ["Fortran_wrap.cxx",
-                     "Fortran.cxx"],
-                    include_dirs = [numpy_include],
-                    )
+                     ["Fortran_wrap.cxx",
+                      "Fortran.cxx"],
+                     include_dirs=[numpy_include],
+                     )
 
 _Flat = Extension("_Flat",
-                    ["Flat_wrap.cxx",
-                     "Flat.cxx"],
-                    include_dirs = [numpy_include],
-                    )
+                  ["Flat_wrap.cxx",
+                   "Flat.cxx"],
+                  include_dirs=[numpy_include],
+                  )
 
 # NumyTypemapTests setup
-setup(name        = "NumpyTypemapTests",
-      description = "Functions that work on arrays",
-      author      = "Bill Spotz",
-      py_modules  = ["Array", "Farray", "Vector", "Matrix", "Tensor",
-                     "Fortran", "Flat"],
-      ext_modules = [_Array, _Farray, _Vector, _Matrix, _Tensor,
+setup(name="NumpyTypemapTests",
+      description="Functions that work on arrays",
+      author="Bill Spotz",
+      py_modules=["Array", "Farray", "Vector", "Matrix", "Tensor",
+                  "Fortran", "Flat"],
+      ext_modules=[_Array, _Farray, _Vector, _Matrix, _Tensor,
                      _Fortran, _Flat]
       )
diff --git a/tools/swig/test/testArray.py b/tools/swig/test/testArray.py
index 49011bb13304..b9b4af3319ae 100755
--- a/tools/swig/test/testArray.py
+++ b/tools/swig/test/testArray.py
@@ -1,11 +1,10 @@
 #!/usr/bin/env python3
-# System imports
 import sys
 import unittest
 
-# Import NumPy
 import numpy as np
-major, minor = [ int(d) for d in np.__version__.split(".")[:2] ]
+
+major, minor = [int(d) for d in np.__version__.split(".")[:2]]
 if major == 0:
     BadListError = TypeError
 else:
@@ -39,7 +38,8 @@ def testConstructor2(self):
 
     def testConstructor3(self):
         "Test Array1 copy constructor"
-        for i in range(self.array1.length()): self.array1[i] = i
+        for i in range(self.array1.length()):
+            self.array1[i] = i
         arrayCopy = Array.Array1(self.array1)
         self.assertTrue(arrayCopy == self.array1)
 
@@ -63,7 +63,7 @@ def testResize0(self):
 
     def testResize1(self):
         "Test Array1 resize method, array"
-        a = np.zeros((2*self.length,), dtype='l')
+        a = np.zeros((2 * self.length,), dtype='l')
         self.array1.resize(a)
         self.assertTrue(len(self.array1) == a.size)
 
@@ -75,9 +75,9 @@ def testSetGet(self):
         "Test Array1 __setitem__, __getitem__ methods"
         n = self.length
         for i in range(n):
-            self.array1[i] = i*i
+            self.array1[i] = i * i
         for i in range(n):
-            self.assertTrue(self.array1[i] == i*i)
+            self.assertTrue(self.array1[i] == i * i)
 
     def testSetBad1(self):
         "Test Array1 __setitem__ method, negative index"
@@ -85,7 +85,7 @@ def testSetBad1(self):
 
     def testSetBad2(self):
         "Test Array1 __setitem__ method, out-of-range index"
-        self.assertRaises(IndexError, self.array1.__setitem__, self.length+1, 0)
+        self.assertRaises(IndexError, self.array1.__setitem__, self.length + 1, 0)
 
     def testGetBad1(self):
         "Test Array1 __getitem__ method, negative index"
@@ -93,21 +93,24 @@ def testGetBad1(self):
 
     def testGetBad2(self):
         "Test Array1 __getitem__ method, out-of-range index"
-        self.assertRaises(IndexError, self.array1.__getitem__, self.length+1)
+        self.assertRaises(IndexError, self.array1.__getitem__, self.length + 1)
 
     def testAsString(self):
         "Test Array1 asString method"
-        for i in range(self.array1.length()): self.array1[i] = i+1
+        for i in range(self.array1.length()):
+            self.array1[i] = i + 1
         self.assertTrue(self.array1.asString() == "[ 1, 2, 3, 4, 5 ]")
 
     def testStr(self):
         "Test Array1 __str__ method"
-        for i in range(self.array1.length()): self.array1[i] = i-2
+        for i in range(self.array1.length()):
+            self.array1[i] = i - 2
         self.assertTrue(str(self.array1) == "[ -2, -1, 0, 1, 2 ]")
 
     def testView(self):
         "Test Array1 view method"
-        for i in range(self.array1.length()): self.array1[i] = i+1
+        for i in range(self.array1.length()):
+            self.array1[i] = i + 1
         a = self.array1.view()
         self.assertTrue(isinstance(a, np.ndarray))
         self.assertTrue(len(a) == self.length)
@@ -164,7 +167,7 @@ def testNcols(self):
 
     def testLen(self):
         "Test Array2 __len__ method"
-        self.assertTrue(len(self.array2) == self.nrows*self.ncols)
+        self.assertTrue(len(self.array2) == self.nrows * self.ncols)
 
     def testResize0(self):
         "Test Array2 resize method, size"
@@ -175,7 +178,7 @@ def testResize0(self):
 
     def testResize1(self):
         "Test Array2 resize method, array"
-        a = np.zeros((2*self.nrows, 2*self.ncols), dtype='l')
+        a = np.zeros((2 * self.nrows, 2 * self.ncols), dtype='l')
         self.array2.resize(a)
         self.assertTrue(len(self.array2) == a.size)
 
@@ -191,10 +194,10 @@ def testSetGet1(self):
         "Test Array2 __setitem__, __getitem__ methods"
         m = self.nrows
         n = self.ncols
-        array1 = [ ]
+        array1 = []
         a = np.arange(n, dtype="l")
         for i in range(m):
-            array1.append(Array.Array1(i*a))
+            array1.append(Array.Array1(i * a))
         for i in range(m):
             self.array2[i] = array1[i]
         for i in range(m):
@@ -206,10 +209,10 @@ def testSetGet2(self):
         n = self.ncols
         for i in range(m):
             for j in range(n):
-                self.array2[i][j] = i*j
+                self.array2[i][j] = i * j
         for i in range(m):
             for j in range(n):
-                self.assertTrue(self.array2[i][j] == i*j)
+                self.assertTrue(self.array2[i][j] == i * j)
 
     def testSetBad1(self):
         "Test Array2 __setitem__ method, negative index"
@@ -219,7 +222,7 @@ def testSetBad1(self):
     def testSetBad2(self):
         "Test Array2 __setitem__ method, out-of-range index"
         a = Array.Array1(self.ncols)
-        self.assertRaises(IndexError, self.array2.__setitem__, self.nrows+1, a)
+        self.assertRaises(IndexError, self.array2.__setitem__, self.nrows + 1, a)
 
     def testGetBad1(self):
         "Test Array2 __getitem__ method, negative index"
@@ -227,7 +230,7 @@ def testGetBad1(self):
 
     def testGetBad2(self):
         "Test Array2 __getitem__ method, out-of-range index"
-        self.assertRaises(IndexError, self.array2.__getitem__, self.nrows+1)
+        self.assertRaises(IndexError, self.array2.__getitem__, self.nrows + 1)
 
     def testAsString(self):
         "Test Array2 asString method"
@@ -240,7 +243,7 @@ def testAsString(self):
 """
         for i in range(self.nrows):
             for j in range(self.ncols):
-                self.array2[i][j] = i+j
+                self.array2[i][j] = i + j
         self.assertTrue(self.array2.asString() == result)
 
     def testStr(self):
@@ -254,7 +257,7 @@ def testStr(self):
 """
         for i in range(self.nrows):
             for j in range(self.ncols):
-                self.array2[i][j] = i-j
+                self.array2[i][j] = i - j
         self.assertTrue(str(self.array2) == result)
 
     def testView(self):
@@ -289,7 +292,8 @@ def testConstructor2(self):
 
     def testConstructor3(self):
         "Test ArrayZ copy constructor"
-        for i in range(self.array3.length()): self.array3[i] = complex(i,-i)
+        for i in range(self.array3.length()):
+            self.array3[i] = complex(i, -i)
         arrayCopy = Array.ArrayZ(self.array3)
         self.assertTrue(arrayCopy == self.array3)
 
@@ -313,7 +317,7 @@ def testResize0(self):
 
     def testResize1(self):
         "Test ArrayZ resize method, array"
-        a = np.zeros((2*self.length,), dtype=np.complex128)
+        a = np.zeros((2 * self.length,), dtype=np.complex128)
         self.array3.resize(a)
         self.assertTrue(len(self.array3) == a.size)
 
@@ -325,9 +329,9 @@ def testSetGet(self):
         "Test ArrayZ __setitem__, __getitem__ methods"
         n = self.length
         for i in range(n):
-            self.array3[i] = i*i
+            self.array3[i] = i * i
         for i in range(n):
-            self.assertTrue(self.array3[i] == i*i)
+            self.assertTrue(self.array3[i] == i * i)
 
     def testSetBad1(self):
         "Test ArrayZ __setitem__ method, negative index"
@@ -335,7 +339,7 @@ def testSetBad1(self):
 
     def testSetBad2(self):
         "Test ArrayZ __setitem__ method, out-of-range index"
-        self.assertRaises(IndexError, self.array3.__setitem__, self.length+1, 0)
+        self.assertRaises(IndexError, self.array3.__setitem__, self.length + 1, 0)
 
     def testGetBad1(self):
         "Test ArrayZ __getitem__ method, negative index"
@@ -343,35 +347,40 @@ def testGetBad1(self):
 
     def testGetBad2(self):
         "Test ArrayZ __getitem__ method, out-of-range index"
-        self.assertRaises(IndexError, self.array3.__getitem__, self.length+1)
+        self.assertRaises(IndexError, self.array3.__getitem__, self.length + 1)
 
     def testAsString(self):
         "Test ArrayZ asString method"
-        for i in range(self.array3.length()): self.array3[i] = complex(i+1,-i-1)
-        self.assertTrue(self.array3.asString() == "[ (1,-1), (2,-2), (3,-3), (4,-4), (5,-5) ]")
+        for i in range(self.array3.length()):
+            self.array3[i] = complex(i + 1, -i - 1)
+        self.assertTrue(self.array3.asString() ==
+                        "[ (1,-1), (2,-2), (3,-3), (4,-4), (5,-5) ]")
 
     def testStr(self):
         "Test ArrayZ __str__ method"
-        for i in range(self.array3.length()): self.array3[i] = complex(i-2,(i-2)*2)
+        for i in range(self.array3.length()):
+            self.array3[i] = complex(i - 2, (i - 2) * 2)
         self.assertTrue(str(self.array3) == "[ (-2,-4), (-1,-2), (0,0), (1,2), (2,4) ]")
 
     def testView(self):
         "Test ArrayZ view method"
-        for i in range(self.array3.length()): self.array3[i] = complex(i+1,i+2)
+        for i in range(self.array3.length()):
+            self.array3[i] = complex(i + 1, i + 2)
         a = self.array3.view()
         self.assertTrue(isinstance(a, np.ndarray))
         self.assertTrue(len(a) == self.length)
-        self.assertTrue((a == [1+2j, 2+3j, 3+4j, 4+5j, 5+6j]).all())
+        self.assertTrue((a == [1 + 2j, 2 + 3j, 3 + 4j, 4 + 5j, 5 + 6j]).all())
 
 ######################################################################
 
+
 if __name__ == "__main__":
 
     # Build the test suite
     suite = unittest.TestSuite()
-    suite.addTest(unittest.makeSuite(Array1TestCase))
-    suite.addTest(unittest.makeSuite(Array2TestCase))
-    suite.addTest(unittest.makeSuite(ArrayZTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(Array1TestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(Array2TestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ArrayZTestCase))
 
     # Execute the test suite
     print("Testing Classes of Module Array")
diff --git a/tools/swig/test/testFarray.py b/tools/swig/test/testFarray.py
index 29bf96fe2f68..3798029dbe4b 100755
--- a/tools/swig/test/testFarray.py
+++ b/tools/swig/test/testFarray.py
@@ -1,19 +1,21 @@
 #!/usr/bin/env python3
-# System imports
-from   distutils.util import get_platform
 import os
 import sys
 import unittest
 
-# Import NumPy
+from distutils.util import get_platform
+
 import numpy as np
-major, minor = [ int(d) for d in np.__version__.split(".")[:2] ]
-if major == 0: BadListError = TypeError
-else:          BadListError = ValueError
+
+major, minor = [int(d) for d in np.__version__.split(".")[:2]]
+if major == 0:
+    BadListError = TypeError
+else:
+    BadListError = ValueError
 
 # Add the distutils-generated build directory to the python search path and then
 # import the extension module
-libDir = "lib.{}-{}.{}".format(get_platform(), *sys.version_info[:2])
+libDir = f"lib.{get_platform()}-{sys.version_info[0]}.{sys.version_info[1]}"
 sys.path.insert(0, os.path.join("build", libDir))
 import Farray
 
@@ -56,7 +58,7 @@ def testNcols(self):
 
     def testLen(self):
         "Test Farray __len__ method"
-        self.assertTrue(len(self.array) == self.nrows*self.ncols)
+        self.assertTrue(len(self.array) == self.nrows * self.ncols)
 
     def testSetGet(self):
         "Test Farray __setitem__, __getitem__ methods"
@@ -64,10 +66,10 @@ def testSetGet(self):
         n = self.ncols
         for i in range(m):
             for j in range(n):
-                self.array[i, j] = i*j
+                self.array[i, j] = i * j
         for i in range(m):
             for j in range(n):
-                self.assertTrue(self.array[i, j] == i*j)
+                self.assertTrue(self.array[i, j] == i * j)
 
     def testSetBad1(self):
         "Test Farray __setitem__ method, negative row"
@@ -79,11 +81,11 @@ def testSetBad2(self):
 
     def testSetBad3(self):
         "Test Farray __setitem__ method, out-of-range row"
-        self.assertRaises(IndexError, self.array.__setitem__, (self.nrows+1, 0), 0)
+        self.assertRaises(IndexError, self.array.__setitem__, (self.nrows + 1, 0), 0)
 
     def testSetBad4(self):
         "Test Farray __setitem__ method, out-of-range col"
-        self.assertRaises(IndexError, self.array.__setitem__, (0, self.ncols+1), 0)
+        self.assertRaises(IndexError, self.array.__setitem__, (0, self.ncols + 1), 0)
 
     def testGetBad1(self):
         "Test Farray __getitem__ method, negative row"
@@ -95,11 +97,11 @@ def testGetBad2(self):
 
     def testGetBad3(self):
         "Test Farray __getitem__ method, out-of-range row"
-        self.assertRaises(IndexError, self.array.__getitem__, (self.nrows+1, 0))
+        self.assertRaises(IndexError, self.array.__getitem__, (self.nrows + 1, 0))
 
     def testGetBad4(self):
         "Test Farray __getitem__ method, out-of-range col"
-        self.assertRaises(IndexError, self.array.__getitem__, (0, self.ncols+1))
+        self.assertRaises(IndexError, self.array.__getitem__, (0, self.ncols + 1))
 
     def testAsString(self):
         "Test Farray asString method"
@@ -112,7 +114,7 @@ def testAsString(self):
 """
         for i in range(self.nrows):
             for j in range(self.ncols):
-                self.array[i, j] = i+j
+                self.array[i, j] = i + j
         self.assertTrue(self.array.asString() == result)
 
     def testStr(self):
@@ -126,28 +128,29 @@ def testStr(self):
 """
         for i in range(self.nrows):
             for j in range(self.ncols):
-                self.array[i, j] = i-j
+                self.array[i, j] = i - j
         self.assertTrue(str(self.array) == result)
 
     def testView(self):
         "Test Farray view method"
         for i in range(self.nrows):
             for j in range(self.ncols):
-                self.array[i, j] = i+j
+                self.array[i, j] = i + j
         a = self.array.view()
         self.assertTrue(isinstance(a, np.ndarray))
         self.assertTrue(a.flags.f_contiguous)
         for i in range(self.nrows):
             for j in range(self.ncols):
-                self.assertTrue(a[i, j] == i+j)
+                self.assertTrue(a[i, j] == i + j)
 
 ######################################################################
 
+
 if __name__ == "__main__":
 
     # Build the test suite
     suite = unittest.TestSuite()
-    suite.addTest(unittest.makeSuite(FarrayTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(FarrayTestCase))
 
     # Execute the test suite
     print("Testing Classes of Module Farray")
diff --git a/tools/swig/test/testFlat.py b/tools/swig/test/testFlat.py
index e3e456a56415..43ed84bcfa06 100755
--- a/tools/swig/test/testFlat.py
+++ b/tools/swig/test/testFlat.py
@@ -1,15 +1,15 @@
 #!/usr/bin/env python3
-# System imports
+import struct
 import sys
 import unittest
 
-import struct
-
-# Import NumPy
 import numpy as np
-major, minor = [ int(d) for d in np.__version__.split(".")[:2] ]
-if major == 0: BadListError = TypeError
-else:          BadListError = ValueError
+
+major, minor = [int(d) for d in np.__version__.split(".")[:2]]
+if major == 0:
+    BadListError = TypeError
+else:
+    BadListError = ValueError
 
 import Flat
 
@@ -19,7 +19,7 @@ class FlatTestCase(unittest.TestCase):
 
     def __init__(self, methodName="runTest"):
         unittest.TestCase.__init__(self, methodName)
-        self.typeStr  = "double"
+        self.typeStr = "double"
         self.typeCode = "d"
 
     # Test the (type* INPLACE_ARRAY_FLAT, int DIM_FLAT) typemap
@@ -29,11 +29,11 @@ def testProcess1D(self):
         process = Flat.__dict__[self.typeStr + "Process"]
         pack_output = b''
         for i in range(10):
-            pack_output += struct.pack(self.typeCode,i)
+            pack_output += struct.pack(self.typeCode, i)
         x = np.frombuffer(pack_output, dtype=self.typeCode)
         y = x.copy()
         process(y)
-        self.assertEqual(np.all((x+1)==y),True)
+        self.assertEqual(np.all((x + 1) == y), True)
 
     def testProcess3D(self):
         "Test Process function 3D array"
@@ -41,12 +41,12 @@ def testProcess3D(self):
         process = Flat.__dict__[self.typeStr + "Process"]
         pack_output = b''
         for i in range(24):
-            pack_output += struct.pack(self.typeCode,i)
+            pack_output += struct.pack(self.typeCode, i)
         x = np.frombuffer(pack_output, dtype=self.typeCode)
-        x.shape = (2,3,4)
+        x = x.reshape((2, 3, 4))
         y = x.copy()
         process(y)
-        self.assertEqual(np.all((x+1)==y),True)
+        self.assertEqual(np.all((x + 1) == y), True)
 
     def testProcess3DTranspose(self):
         "Test Process function 3D array, FORTRAN order"
@@ -54,12 +54,12 @@ def testProcess3DTranspose(self):
         process = Flat.__dict__[self.typeStr + "Process"]
         pack_output = b''
         for i in range(24):
-            pack_output += struct.pack(self.typeCode,i)
+            pack_output += struct.pack(self.typeCode, i)
         x = np.frombuffer(pack_output, dtype=self.typeCode)
-        x.shape = (2,3,4)
+        x = x.reshape((2, 3, 4))
         y = x.copy()
         process(y.T)
-        self.assertEqual(np.all((x.T+1)==y.T),True)
+        self.assertEqual(np.all((x.T + 1) == y.T), True)
 
     def testProcessNoncontiguous(self):
         "Test Process function with non-contiguous array, which should raise an error"
@@ -67,10 +67,10 @@ def testProcessNoncontiguous(self):
         process = Flat.__dict__[self.typeStr + "Process"]
         pack_output = b''
         for i in range(24):
-            pack_output += struct.pack(self.typeCode,i)
+            pack_output += struct.pack(self.typeCode, i)
         x = np.frombuffer(pack_output, dtype=self.typeCode)
-        x.shape = (2,3,4)
-        self.assertRaises(TypeError, process, x[:,:,0])
+        x = x.reshape((2, 3, 4))
+        self.assertRaises(TypeError, process, x[:, :, 0])
 
 
 ######################################################################
@@ -78,7 +78,7 @@ def testProcessNoncontiguous(self):
 class scharTestCase(FlatTestCase):
     def __init__(self, methodName="runTest"):
         FlatTestCase.__init__(self, methodName)
-        self.typeStr  = "schar"
+        self.typeStr = "schar"
         self.typeCode = "b"
 
 ######################################################################
@@ -86,7 +86,7 @@ def __init__(self, methodName="runTest"):
 class ucharTestCase(FlatTestCase):
     def __init__(self, methodName="runTest"):
         FlatTestCase.__init__(self, methodName)
-        self.typeStr  = "uchar"
+        self.typeStr = "uchar"
         self.typeCode = "B"
 
 ######################################################################
@@ -94,7 +94,7 @@ def __init__(self, methodName="runTest"):
 class shortTestCase(FlatTestCase):
     def __init__(self, methodName="runTest"):
         FlatTestCase.__init__(self, methodName)
-        self.typeStr  = "short"
+        self.typeStr = "short"
         self.typeCode = "h"
 
 ######################################################################
@@ -102,7 +102,7 @@ def __init__(self, methodName="runTest"):
 class ushortTestCase(FlatTestCase):
     def __init__(self, methodName="runTest"):
         FlatTestCase.__init__(self, methodName)
-        self.typeStr  = "ushort"
+        self.typeStr = "ushort"
         self.typeCode = "H"
 
 ######################################################################
@@ -110,7 +110,7 @@ def __init__(self, methodName="runTest"):
 class intTestCase(FlatTestCase):
     def __init__(self, methodName="runTest"):
         FlatTestCase.__init__(self, methodName)
-        self.typeStr  = "int"
+        self.typeStr = "int"
         self.typeCode = "i"
 
 ######################################################################
@@ -118,7 +118,7 @@ def __init__(self, methodName="runTest"):
 class uintTestCase(FlatTestCase):
     def __init__(self, methodName="runTest"):
         FlatTestCase.__init__(self, methodName)
-        self.typeStr  = "uint"
+        self.typeStr = "uint"
         self.typeCode = "I"
 
 ######################################################################
@@ -126,7 +126,7 @@ def __init__(self, methodName="runTest"):
 class longTestCase(FlatTestCase):
     def __init__(self, methodName="runTest"):
         FlatTestCase.__init__(self, methodName)
-        self.typeStr  = "long"
+        self.typeStr = "long"
         self.typeCode = "l"
 
 ######################################################################
@@ -134,7 +134,7 @@ def __init__(self, methodName="runTest"):
 class ulongTestCase(FlatTestCase):
     def __init__(self, methodName="runTest"):
         FlatTestCase.__init__(self, methodName)
-        self.typeStr  = "ulong"
+        self.typeStr = "ulong"
         self.typeCode = "L"
 
 ######################################################################
@@ -142,7 +142,7 @@ def __init__(self, methodName="runTest"):
 class longLongTestCase(FlatTestCase):
     def __init__(self, methodName="runTest"):
         FlatTestCase.__init__(self, methodName)
-        self.typeStr  = "longLong"
+        self.typeStr = "longLong"
         self.typeCode = "q"
 
 ######################################################################
@@ -150,7 +150,7 @@ def __init__(self, methodName="runTest"):
 class ulongLongTestCase(FlatTestCase):
     def __init__(self, methodName="runTest"):
         FlatTestCase.__init__(self, methodName)
-        self.typeStr  = "ulongLong"
+        self.typeStr = "ulongLong"
         self.typeCode = "Q"
 
 ######################################################################
@@ -158,7 +158,7 @@ def __init__(self, methodName="runTest"):
 class floatTestCase(FlatTestCase):
     def __init__(self, methodName="runTest"):
         FlatTestCase.__init__(self, methodName)
-        self.typeStr  = "float"
+        self.typeStr = "float"
         self.typeCode = "f"
 
 ######################################################################
@@ -166,27 +166,28 @@ def __init__(self, methodName="runTest"):
 class doubleTestCase(FlatTestCase):
     def __init__(self, methodName="runTest"):
         FlatTestCase.__init__(self, methodName)
-        self.typeStr  = "double"
+        self.typeStr = "double"
         self.typeCode = "d"
 
 ######################################################################
 
+
 if __name__ == "__main__":
 
     # Build the test suite
     suite = unittest.TestSuite()
-    suite.addTest(unittest.makeSuite(    scharTestCase))
-    suite.addTest(unittest.makeSuite(    ucharTestCase))
-    suite.addTest(unittest.makeSuite(    shortTestCase))
-    suite.addTest(unittest.makeSuite(   ushortTestCase))
-    suite.addTest(unittest.makeSuite(      intTestCase))
-    suite.addTest(unittest.makeSuite(     uintTestCase))
-    suite.addTest(unittest.makeSuite(     longTestCase))
-    suite.addTest(unittest.makeSuite(    ulongTestCase))
-    suite.addTest(unittest.makeSuite( longLongTestCase))
-    suite.addTest(unittest.makeSuite(ulongLongTestCase))
-    suite.addTest(unittest.makeSuite(    floatTestCase))
-    suite.addTest(unittest.makeSuite(   doubleTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    scharTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    ucharTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    shortTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(   ushortTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(      intTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(     uintTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(     longTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    ulongTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    floatTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(   doubleTestCase))
 
     # Execute the test suite
     print("Testing 1D Functions of Module Flat")
diff --git a/tools/swig/test/testFortran.py b/tools/swig/test/testFortran.py
index 348355afcba8..8b23af610481 100644
--- a/tools/swig/test/testFortran.py
+++ b/tools/swig/test/testFortran.py
@@ -1,13 +1,14 @@
 #!/usr/bin/env python3
-# System imports
 import sys
 import unittest
 
-# Import NumPy
 import numpy as np
-major, minor = [ int(d) for d in np.__version__.split(".")[:2] ]
-if major == 0: BadListError = TypeError
-else:          BadListError = ValueError
+
+major, minor = [int(d) for d in np.__version__.split(".")[:2]]
+if major == 0:
+    BadListError = TypeError
+else:
+    BadListError = ValueError
 
 import Fortran
 
@@ -17,7 +18,7 @@ class FortranTestCase(unittest.TestCase):
 
     def __init__(self, methodName="runTests"):
         unittest.TestCase.__init__(self, methodName)
-        self.typeStr  = "double"
+        self.typeStr = "double"
         self.typeCode = "d"
 
     # Test (type* IN_FARRAY2, int DIM1, int DIM2) typemap
@@ -41,7 +42,7 @@ def testSecondElementObject(self):
 class scharTestCase(FortranTestCase):
     def __init__(self, methodName="runTest"):
         FortranTestCase.__init__(self, methodName)
-        self.typeStr  = "schar"
+        self.typeStr = "schar"
         self.typeCode = "b"
 
 ######################################################################
@@ -49,7 +50,7 @@ def __init__(self, methodName="runTest"):
 class ucharTestCase(FortranTestCase):
     def __init__(self, methodName="runTest"):
         FortranTestCase.__init__(self, methodName)
-        self.typeStr  = "uchar"
+        self.typeStr = "uchar"
         self.typeCode = "B"
 
 ######################################################################
@@ -57,7 +58,7 @@ def __init__(self, methodName="runTest"):
 class shortTestCase(FortranTestCase):
     def __init__(self, methodName="runTest"):
         FortranTestCase.__init__(self, methodName)
-        self.typeStr  = "short"
+        self.typeStr = "short"
         self.typeCode = "h"
 
 ######################################################################
@@ -65,7 +66,7 @@ def __init__(self, methodName="runTest"):
 class ushortTestCase(FortranTestCase):
     def __init__(self, methodName="runTest"):
         FortranTestCase.__init__(self, methodName)
-        self.typeStr  = "ushort"
+        self.typeStr = "ushort"
         self.typeCode = "H"
 
 ######################################################################
@@ -73,7 +74,7 @@ def __init__(self, methodName="runTest"):
 class intTestCase(FortranTestCase):
     def __init__(self, methodName="runTest"):
         FortranTestCase.__init__(self, methodName)
-        self.typeStr  = "int"
+        self.typeStr = "int"
         self.typeCode = "i"
 
 ######################################################################
@@ -81,7 +82,7 @@ def __init__(self, methodName="runTest"):
 class uintTestCase(FortranTestCase):
     def __init__(self, methodName="runTest"):
         FortranTestCase.__init__(self, methodName)
-        self.typeStr  = "uint"
+        self.typeStr = "uint"
         self.typeCode = "I"
 
 ######################################################################
@@ -89,7 +90,7 @@ def __init__(self, methodName="runTest"):
 class longTestCase(FortranTestCase):
     def __init__(self, methodName="runTest"):
         FortranTestCase.__init__(self, methodName)
-        self.typeStr  = "long"
+        self.typeStr = "long"
         self.typeCode = "l"
 
 ######################################################################
@@ -97,7 +98,7 @@ def __init__(self, methodName="runTest"):
 class ulongTestCase(FortranTestCase):
     def __init__(self, methodName="runTest"):
         FortranTestCase.__init__(self, methodName)
-        self.typeStr  = "ulong"
+        self.typeStr = "ulong"
         self.typeCode = "L"
 
 ######################################################################
@@ -105,7 +106,7 @@ def __init__(self, methodName="runTest"):
 class longLongTestCase(FortranTestCase):
     def __init__(self, methodName="runTest"):
         FortranTestCase.__init__(self, methodName)
-        self.typeStr  = "longLong"
+        self.typeStr = "longLong"
         self.typeCode = "q"
 
 ######################################################################
@@ -113,7 +114,7 @@ def __init__(self, methodName="runTest"):
 class ulongLongTestCase(FortranTestCase):
     def __init__(self, methodName="runTest"):
         FortranTestCase.__init__(self, methodName)
-        self.typeStr  = "ulongLong"
+        self.typeStr = "ulongLong"
         self.typeCode = "Q"
 
 ######################################################################
@@ -121,7 +122,7 @@ def __init__(self, methodName="runTest"):
 class floatTestCase(FortranTestCase):
     def __init__(self, methodName="runTest"):
         FortranTestCase.__init__(self, methodName)
-        self.typeStr  = "float"
+        self.typeStr = "float"
         self.typeCode = "f"
 
 ######################################################################
@@ -129,27 +130,28 @@ def __init__(self, methodName="runTest"):
 class doubleTestCase(FortranTestCase):
     def __init__(self, methodName="runTest"):
         FortranTestCase.__init__(self, methodName)
-        self.typeStr  = "double"
+        self.typeStr = "double"
         self.typeCode = "d"
 
 ######################################################################
 
+
 if __name__ == "__main__":
 
     # Build the test suite
     suite = unittest.TestSuite()
-    suite.addTest(unittest.makeSuite(    scharTestCase))
-    suite.addTest(unittest.makeSuite(    ucharTestCase))
-    suite.addTest(unittest.makeSuite(    shortTestCase))
-    suite.addTest(unittest.makeSuite(   ushortTestCase))
-    suite.addTest(unittest.makeSuite(      intTestCase))
-    suite.addTest(unittest.makeSuite(     uintTestCase))
-    suite.addTest(unittest.makeSuite(     longTestCase))
-    suite.addTest(unittest.makeSuite(    ulongTestCase))
-    suite.addTest(unittest.makeSuite( longLongTestCase))
-    suite.addTest(unittest.makeSuite(ulongLongTestCase))
-    suite.addTest(unittest.makeSuite(    floatTestCase))
-    suite.addTest(unittest.makeSuite(   doubleTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    scharTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    ucharTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    shortTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(   ushortTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(      intTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(     uintTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(     longTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    ulongTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    floatTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(   doubleTestCase))
 
     # Execute the test suite
     print("Testing 2D Functions of Module Matrix")
diff --git a/tools/swig/test/testMatrix.py b/tools/swig/test/testMatrix.py
index 814c0d578039..d3151a0fb857 100755
--- a/tools/swig/test/testMatrix.py
+++ b/tools/swig/test/testMatrix.py
@@ -1,13 +1,14 @@
 #!/usr/bin/env python3
-# System imports
 import sys
 import unittest
 
-# Import NumPy
 import numpy as np
-major, minor = [ int(d) for d in np.__version__.split(".")[:2] ]
-if major == 0: BadListError = TypeError
-else:          BadListError = ValueError
+
+major, minor = [int(d) for d in np.__version__.split(".")[:2]]
+if major == 0:
+    BadListError = TypeError
+else:
+    BadListError = ValueError
 
 import Matrix
 
@@ -17,7 +18,7 @@ class MatrixTestCase(unittest.TestCase):
 
     def __init__(self, methodName="runTests"):
         unittest.TestCase.__init__(self, methodName)
-        self.typeStr  = "double"
+        self.typeStr = "double"
         self.typeCode = "d"
 
     # Test (type IN_ARRAY2[ANY][ANY]) typemap
@@ -240,7 +241,7 @@ def testLUSplit(self):
 class scharTestCase(MatrixTestCase):
     def __init__(self, methodName="runTest"):
         MatrixTestCase.__init__(self, methodName)
-        self.typeStr  = "schar"
+        self.typeStr = "schar"
         self.typeCode = "b"
 
 ######################################################################
@@ -248,7 +249,7 @@ def __init__(self, methodName="runTest"):
 class ucharTestCase(MatrixTestCase):
     def __init__(self, methodName="runTest"):
         MatrixTestCase.__init__(self, methodName)
-        self.typeStr  = "uchar"
+        self.typeStr = "uchar"
         self.typeCode = "B"
 
 ######################################################################
@@ -256,7 +257,7 @@ def __init__(self, methodName="runTest"):
 class shortTestCase(MatrixTestCase):
     def __init__(self, methodName="runTest"):
         MatrixTestCase.__init__(self, methodName)
-        self.typeStr  = "short"
+        self.typeStr = "short"
         self.typeCode = "h"
 
 ######################################################################
@@ -264,7 +265,7 @@ def __init__(self, methodName="runTest"):
 class ushortTestCase(MatrixTestCase):
     def __init__(self, methodName="runTest"):
         MatrixTestCase.__init__(self, methodName)
-        self.typeStr  = "ushort"
+        self.typeStr = "ushort"
         self.typeCode = "H"
 
 ######################################################################
@@ -272,7 +273,7 @@ def __init__(self, methodName="runTest"):
 class intTestCase(MatrixTestCase):
     def __init__(self, methodName="runTest"):
         MatrixTestCase.__init__(self, methodName)
-        self.typeStr  = "int"
+        self.typeStr = "int"
         self.typeCode = "i"
 
 ######################################################################
@@ -280,7 +281,7 @@ def __init__(self, methodName="runTest"):
 class uintTestCase(MatrixTestCase):
     def __init__(self, methodName="runTest"):
         MatrixTestCase.__init__(self, methodName)
-        self.typeStr  = "uint"
+        self.typeStr = "uint"
         self.typeCode = "I"
 
 ######################################################################
@@ -288,7 +289,7 @@ def __init__(self, methodName="runTest"):
 class longTestCase(MatrixTestCase):
     def __init__(self, methodName="runTest"):
         MatrixTestCase.__init__(self, methodName)
-        self.typeStr  = "long"
+        self.typeStr = "long"
         self.typeCode = "l"
 
 ######################################################################
@@ -296,7 +297,7 @@ def __init__(self, methodName="runTest"):
 class ulongTestCase(MatrixTestCase):
     def __init__(self, methodName="runTest"):
         MatrixTestCase.__init__(self, methodName)
-        self.typeStr  = "ulong"
+        self.typeStr = "ulong"
         self.typeCode = "L"
 
 ######################################################################
@@ -304,7 +305,7 @@ def __init__(self, methodName="runTest"):
 class longLongTestCase(MatrixTestCase):
     def __init__(self, methodName="runTest"):
         MatrixTestCase.__init__(self, methodName)
-        self.typeStr  = "longLong"
+        self.typeStr = "longLong"
         self.typeCode = "q"
 
 ######################################################################
@@ -312,7 +313,7 @@ def __init__(self, methodName="runTest"):
 class ulongLongTestCase(MatrixTestCase):
     def __init__(self, methodName="runTest"):
         MatrixTestCase.__init__(self, methodName)
-        self.typeStr  = "ulongLong"
+        self.typeStr = "ulongLong"
         self.typeCode = "Q"
 
 ######################################################################
@@ -320,7 +321,7 @@ def __init__(self, methodName="runTest"):
 class floatTestCase(MatrixTestCase):
     def __init__(self, methodName="runTest"):
         MatrixTestCase.__init__(self, methodName)
-        self.typeStr  = "float"
+        self.typeStr = "float"
         self.typeCode = "f"
 
 ######################################################################
@@ -328,27 +329,28 @@ def __init__(self, methodName="runTest"):
 class doubleTestCase(MatrixTestCase):
     def __init__(self, methodName="runTest"):
         MatrixTestCase.__init__(self, methodName)
-        self.typeStr  = "double"
+        self.typeStr = "double"
         self.typeCode = "d"
 
 ######################################################################
 
+
 if __name__ == "__main__":
 
     # Build the test suite
     suite = unittest.TestSuite()
-    suite.addTest(unittest.makeSuite(    scharTestCase))
-    suite.addTest(unittest.makeSuite(    ucharTestCase))
-    suite.addTest(unittest.makeSuite(    shortTestCase))
-    suite.addTest(unittest.makeSuite(   ushortTestCase))
-    suite.addTest(unittest.makeSuite(      intTestCase))
-    suite.addTest(unittest.makeSuite(     uintTestCase))
-    suite.addTest(unittest.makeSuite(     longTestCase))
-    suite.addTest(unittest.makeSuite(    ulongTestCase))
-    suite.addTest(unittest.makeSuite( longLongTestCase))
-    suite.addTest(unittest.makeSuite(ulongLongTestCase))
-    suite.addTest(unittest.makeSuite(    floatTestCase))
-    suite.addTest(unittest.makeSuite(   doubleTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    scharTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    ucharTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    shortTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(   ushortTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(      intTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(     uintTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(     longTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    ulongTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    floatTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(   doubleTestCase))
 
     # Execute the test suite
     print("Testing 2D Functions of Module Matrix")
diff --git a/tools/swig/test/testSuperTensor.py b/tools/swig/test/testSuperTensor.py
index 121c4a405805..f49a0aa07a90 100644
--- a/tools/swig/test/testSuperTensor.py
+++ b/tools/swig/test/testSuperTensor.py
@@ -1,13 +1,14 @@
 #!/usr/bin/env python3
-# System imports
 import sys
 import unittest
 
-# Import NumPy
 import numpy as np
-major, minor = [ int(d) for d in np.__version__.split(".")[:2] ]
-if major == 0: BadListError = TypeError
-else:          BadListError = ValueError
+
+major, minor = [int(d) for d in np.__version__.split(".")[:2]]
+if major == 0:
+    BadListError = TypeError
+else:
+    BadListError = ValueError
 
 import SuperTensor
 
@@ -17,7 +18,7 @@ class SuperTensorTestCase(unittest.TestCase):
 
     def __init__(self, methodName="runTests"):
         unittest.TestCase.__init__(self, methodName)
-        self.typeStr  = "double"
+        self.typeStr = "double"
         self.typeCode = "d"
 
     # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
@@ -25,10 +26,11 @@ def testNorm(self):
         "Test norm function"
         print(self.typeStr, "... ", file=sys.stderr)
         norm = SuperTensor.__dict__[self.typeStr + "Norm"]
-        supertensor = np.arange(2*2*2*2, dtype=self.typeCode).reshape((2, 2, 2, 2))
-        #Note: cludge to get an answer of the same type as supertensor.
-        #Answer is simply sqrt(sum(supertensor*supertensor)/16)
-        answer = np.array([np.sqrt(np.sum(supertensor.astype('d')*supertensor)/16.)], dtype=self.typeCode)[0]
+        supertensor = np.arange(2 * 2 * 2 * 2,
+                                dtype=self.typeCode).reshape((2, 2, 2, 2))
+        # Note: cludge to get an answer of the same type as supertensor.
+        # Answer is simply sqrt(sum(supertensor*supertensor)/16)
+        answer = np.array([np.sqrt(np.sum(supertensor.astype('d') * supertensor) / 16.)], dtype=self.typeCode)[0]  # noqa: E501
         self.assertAlmostEqual(norm(supertensor), answer, 6)
 
     # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
@@ -36,7 +38,8 @@ def testNormBadList(self):
         "Test norm function with bad list"
         print(self.typeStr, "... ", file=sys.stderr)
         norm = SuperTensor.__dict__[self.typeStr + "Norm"]
-        supertensor = [[[[0, "one"], [2, 3]], [[3, "two"], [1, 0]]], [[[0, "one"], [2, 3]], [[3, "two"], [1, 0]]]]
+        supertensor = [[[[0, "one"], [2, 3]], [[3, "two"], [1, 0]]],
+                       [[[0, "one"], [2, 3]], [[3, "two"], [1, 0]]]]
         self.assertRaises(BadListError, norm, supertensor)
 
     # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
@@ -44,7 +47,7 @@ def testNormWrongDim(self):
         "Test norm function with wrong dimensions"
         print(self.typeStr, "... ", file=sys.stderr)
         norm = SuperTensor.__dict__[self.typeStr + "Norm"]
-        supertensor = np.arange(2*2*2, dtype=self.typeCode).reshape((2, 2, 2))
+        supertensor = np.arange(2 * 2 * 2, dtype=self.typeCode).reshape((2, 2, 2))
         self.assertRaises(TypeError, norm, supertensor)
 
     # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
@@ -52,7 +55,7 @@ def testNormWrongSize(self):
         "Test norm function with wrong size"
         print(self.typeStr, "... ", file=sys.stderr)
         norm = SuperTensor.__dict__[self.typeStr + "Norm"]
-        supertensor = np.arange(3*2*2, dtype=self.typeCode).reshape((3, 2, 2))
+        supertensor = np.arange(3 * 2 * 2, dtype=self.typeCode).reshape((3, 2, 2))
         self.assertRaises(TypeError, norm, supertensor)
 
     # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
@@ -67,7 +70,8 @@ def testMax(self):
         "Test max function"
         print(self.typeStr, "... ", file=sys.stderr)
         max = SuperTensor.__dict__[self.typeStr + "Max"]
-        supertensor = [[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]
+        supertensor = [[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
+                       [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]
         self.assertEqual(max(supertensor), 8)
 
     # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
@@ -75,7 +79,8 @@ def testMaxBadList(self):
         "Test max function with bad list"
         print(self.typeStr, "... ", file=sys.stderr)
         max = SuperTensor.__dict__[self.typeStr + "Max"]
-        supertensor = [[[[1, "two"], [3, 4]], [[5, "six"], [7, 8]]], [[[1, "two"], [3, 4]], [[5, "six"], [7, 8]]]]
+        supertensor = [[[[1, "two"], [3, 4]], [[5, "six"], [7, 8]]],
+                       [[[1, "two"], [3, 4]], [[5, "six"], [7, 8]]]]
         self.assertRaises(BadListError, max, supertensor)
 
     # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
@@ -97,7 +102,8 @@ def testMin(self):
         "Test min function"
         print(self.typeStr, "... ", file=sys.stderr)
         min = SuperTensor.__dict__[self.typeStr + "Min"]
-        supertensor = [[[[9, 8], [7, 6]], [[5, 4], [3, 2]]], [[[9, 8], [7, 6]], [[5, 4], [3, 2]]]]
+        supertensor = [[[[9, 8], [7, 6]], [[5, 4], [3, 2]]],
+                       [[[9, 8], [7, 6]], [[5, 4], [3, 2]]]]
         self.assertEqual(min(supertensor), 2)
 
     # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
@@ -105,7 +111,8 @@ def testMinBadList(self):
         "Test min function with bad list"
         print(self.typeStr, "... ", file=sys.stderr)
         min = SuperTensor.__dict__[self.typeStr + "Min"]
-        supertensor = [[[["nine", 8], [7, 6]], [["five", 4], [3, 2]]], [[["nine", 8], [7, 6]], [["five", 4], [3, 2]]]]
+        supertensor = [[[["nine", 8], [7, 6]], [["five", 4], [3, 2]]],
+                       [[["nine", 8], [7, 6]], [["five", 4], [3, 2]]]]
         self.assertRaises(BadListError, min, supertensor)
 
     # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
@@ -127,8 +134,9 @@ def testScale(self):
         "Test scale function"
         print(self.typeStr, "... ", file=sys.stderr)
         scale = SuperTensor.__dict__[self.typeStr + "Scale"]
-        supertensor = np.arange(3*3*3*3, dtype=self.typeCode).reshape((3, 3, 3, 3))
-        answer = supertensor.copy()*4
+        supertensor = np.arange(3 * 3 * 3 * 3,
+                                dtype=self.typeCode).reshape((3, 3, 3, 3))
+        answer = supertensor.copy() * 4
         scale(supertensor, 4)
         self.assertEqual((supertensor == answer).all(), True)
 
@@ -172,7 +180,8 @@ def testScaleNonArray(self):
     def testFloor(self):
         "Test floor function"
         print(self.typeStr, "... ", file=sys.stderr)
-        supertensor = np.arange(2*2*2*2, dtype=self.typeCode).reshape((2, 2, 2, 2))
+        supertensor = np.arange(2 * 2 * 2 * 2,
+                                dtype=self.typeCode).reshape((2, 2, 2, 2))
         answer = supertensor.copy()
         answer[answer < 4] = 4
 
@@ -185,7 +194,7 @@ def testFloorWrongType(self):
         "Test floor function with wrong type"
         print(self.typeStr, "... ", file=sys.stderr)
         floor = SuperTensor.__dict__[self.typeStr + "Floor"]
-        supertensor = np.ones(2*2*2*2, dtype='c').reshape((2, 2, 2, 2))
+        supertensor = np.ones(2 * 2 * 2 * 2, dtype='c').reshape((2, 2, 2, 2))
         self.assertRaises(TypeError, floor, supertensor)
 
     # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
@@ -193,7 +202,7 @@ def testFloorWrongDim(self):
         "Test floor function with wrong type"
         print(self.typeStr, "... ", file=sys.stderr)
         floor = SuperTensor.__dict__[self.typeStr + "Floor"]
-        supertensor = np.arange(2*2*2, dtype=self.typeCode).reshape((2, 2, 2))
+        supertensor = np.arange(2 * 2 * 2, dtype=self.typeCode).reshape((2, 2, 2))
         self.assertRaises(TypeError, floor, supertensor)
 
     # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
@@ -207,7 +216,8 @@ def testFloorNonArray(self):
     def testCeil(self):
         "Test ceil function"
         print(self.typeStr, "... ", file=sys.stderr)
-        supertensor = np.arange(2*2*2*2, dtype=self.typeCode).reshape((2, 2, 2, 2))
+        supertensor = np.arange(2 * 2 * 2 * 2,
+                                dtype=self.typeCode).reshape((2, 2, 2, 2))
         answer = supertensor.copy()
         answer[answer > 5] = 5
         ceil = SuperTensor.__dict__[self.typeStr + "Ceil"]
@@ -219,7 +229,7 @@ def testCeilWrongType(self):
         "Test ceil function with wrong type"
         print(self.typeStr, "... ", file=sys.stderr)
         ceil = SuperTensor.__dict__[self.typeStr + "Ceil"]
-        supertensor = np.ones(2*2*2*2, 'c').reshape((2, 2, 2, 2))
+        supertensor = np.ones(2 * 2 * 2 * 2, 'c').reshape((2, 2, 2, 2))
         self.assertRaises(TypeError, ceil, supertensor)
 
     # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
@@ -227,7 +237,7 @@ def testCeilWrongDim(self):
         "Test ceil function with wrong dimensions"
         print(self.typeStr, "... ", file=sys.stderr)
         ceil = SuperTensor.__dict__[self.typeStr + "Ceil"]
-        supertensor = np.arange(2*2*2, dtype=self.typeCode).reshape((2, 2, 2))
+        supertensor = np.arange(2 * 2 * 2, dtype=self.typeCode).reshape((2, 2, 2))
         self.assertRaises(TypeError, ceil, supertensor)
 
     # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
@@ -235,7 +245,8 @@ def testCeilNonArray(self):
         "Test ceil function with non-array"
         print(self.typeStr, "... ", file=sys.stderr)
         ceil = SuperTensor.__dict__[self.typeStr + "Ceil"]
-        supertensor = np.arange(2*2*2*2, dtype=self.typeCode).reshape((2, 2, 2, 2)).tolist()
+        supertensor = np.arange(2 * 2 * 2 * 2,
+                                dtype=self.typeCode).reshape((2, 2, 2, 2)).tolist()
         self.assertRaises(TypeError, ceil, supertensor)
 
     # Test (type ARGOUT_ARRAY3[ANY][ANY][ANY]) typemap
@@ -243,9 +254,9 @@ def testLUSplit(self):
         "Test luSplit function"
         print(self.typeStr, "... ", file=sys.stderr)
         luSplit = SuperTensor.__dict__[self.typeStr + "LUSplit"]
-        supertensor = np.ones(2*2*2*2, dtype=self.typeCode).reshape((2, 2, 2, 2))
-        answer_upper = [[[[0, 0], [0, 1]], [[0, 1], [1, 1]]], [[[0, 1], [1, 1]], [[1, 1], [1, 1]]]]
-        answer_lower = [[[[1, 1], [1, 0]], [[1, 0], [0, 0]]], [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]]
+        supertensor = np.ones(2 * 2 * 2 * 2, dtype=self.typeCode).reshape((2, 2, 2, 2))
+        answer_upper = [[[[0, 0], [0, 1]], [[0, 1], [1, 1]]], [[[0, 1], [1, 1]], [[1, 1], [1, 1]]]]  # noqa: E501
+        answer_lower = [[[[1, 1], [1, 0]], [[1, 0], [0, 0]]], [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]]  # noqa: E501
         lower, upper = luSplit(supertensor)
         self.assertEqual((lower == answer_lower).all(), True)
         self.assertEqual((upper == answer_upper).all(), True)
@@ -255,7 +266,7 @@ def testLUSplit(self):
 class scharTestCase(SuperTensorTestCase):
     def __init__(self, methodName="runTest"):
         SuperTensorTestCase.__init__(self, methodName)
-        self.typeStr  = "schar"
+        self.typeStr = "schar"
         self.typeCode = "b"
         #self.result   = int(self.result)
 
@@ -264,7 +275,7 @@ def __init__(self, methodName="runTest"):
 class ucharTestCase(SuperTensorTestCase):
     def __init__(self, methodName="runTest"):
         SuperTensorTestCase.__init__(self, methodName)
-        self.typeStr  = "uchar"
+        self.typeStr = "uchar"
         self.typeCode = "B"
         #self.result   = int(self.result)
 
@@ -273,7 +284,7 @@ def __init__(self, methodName="runTest"):
 class shortTestCase(SuperTensorTestCase):
     def __init__(self, methodName="runTest"):
         SuperTensorTestCase.__init__(self, methodName)
-        self.typeStr  = "short"
+        self.typeStr = "short"
         self.typeCode = "h"
         #self.result   = int(self.result)
 
@@ -282,7 +293,7 @@ def __init__(self, methodName="runTest"):
 class ushortTestCase(SuperTensorTestCase):
     def __init__(self, methodName="runTest"):
         SuperTensorTestCase.__init__(self, methodName)
-        self.typeStr  = "ushort"
+        self.typeStr = "ushort"
         self.typeCode = "H"
         #self.result   = int(self.result)
 
@@ -291,7 +302,7 @@ def __init__(self, methodName="runTest"):
 class intTestCase(SuperTensorTestCase):
     def __init__(self, methodName="runTest"):
         SuperTensorTestCase.__init__(self, methodName)
-        self.typeStr  = "int"
+        self.typeStr = "int"
         self.typeCode = "i"
         #self.result   = int(self.result)
 
@@ -300,7 +311,7 @@ def __init__(self, methodName="runTest"):
 class uintTestCase(SuperTensorTestCase):
     def __init__(self, methodName="runTest"):
         SuperTensorTestCase.__init__(self, methodName)
-        self.typeStr  = "uint"
+        self.typeStr = "uint"
         self.typeCode = "I"
         #self.result   = int(self.result)
 
@@ -309,7 +320,7 @@ def __init__(self, methodName="runTest"):
 class longTestCase(SuperTensorTestCase):
     def __init__(self, methodName="runTest"):
         SuperTensorTestCase.__init__(self, methodName)
-        self.typeStr  = "long"
+        self.typeStr = "long"
         self.typeCode = "l"
         #self.result   = int(self.result)
 
@@ -318,7 +329,7 @@ def __init__(self, methodName="runTest"):
 class ulongTestCase(SuperTensorTestCase):
     def __init__(self, methodName="runTest"):
         SuperTensorTestCase.__init__(self, methodName)
-        self.typeStr  = "ulong"
+        self.typeStr = "ulong"
         self.typeCode = "L"
         #self.result   = int(self.result)
 
@@ -327,7 +338,7 @@ def __init__(self, methodName="runTest"):
 class longLongTestCase(SuperTensorTestCase):
     def __init__(self, methodName="runTest"):
         SuperTensorTestCase.__init__(self, methodName)
-        self.typeStr  = "longLong"
+        self.typeStr = "longLong"
         self.typeCode = "q"
         #self.result   = int(self.result)
 
@@ -336,7 +347,7 @@ def __init__(self, methodName="runTest"):
 class ulongLongTestCase(SuperTensorTestCase):
     def __init__(self, methodName="runTest"):
         SuperTensorTestCase.__init__(self, methodName)
-        self.typeStr  = "ulongLong"
+        self.typeStr = "ulongLong"
         self.typeCode = "Q"
         #self.result   = int(self.result)
 
@@ -345,7 +356,7 @@ def __init__(self, methodName="runTest"):
 class floatTestCase(SuperTensorTestCase):
     def __init__(self, methodName="runTest"):
         SuperTensorTestCase.__init__(self, methodName)
-        self.typeStr  = "float"
+        self.typeStr = "float"
         self.typeCode = "f"
 
 ######################################################################
@@ -353,27 +364,28 @@ def __init__(self, methodName="runTest"):
 class doubleTestCase(SuperTensorTestCase):
     def __init__(self, methodName="runTest"):
         SuperTensorTestCase.__init__(self, methodName)
-        self.typeStr  = "double"
+        self.typeStr = "double"
         self.typeCode = "d"
 
 ######################################################################
 
+
 if __name__ == "__main__":
 
     # Build the test suite
     suite = unittest.TestSuite()
-    suite.addTest(unittest.makeSuite(    scharTestCase))
-    suite.addTest(unittest.makeSuite(    ucharTestCase))
-    suite.addTest(unittest.makeSuite(    shortTestCase))
-    suite.addTest(unittest.makeSuite(   ushortTestCase))
-    suite.addTest(unittest.makeSuite(      intTestCase))
-    suite.addTest(unittest.makeSuite(     uintTestCase))
-    suite.addTest(unittest.makeSuite(     longTestCase))
-    suite.addTest(unittest.makeSuite(    ulongTestCase))
-    suite.addTest(unittest.makeSuite( longLongTestCase))
-    suite.addTest(unittest.makeSuite(ulongLongTestCase))
-    suite.addTest(unittest.makeSuite(    floatTestCase))
-    suite.addTest(unittest.makeSuite(   doubleTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    scharTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    ucharTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    shortTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(   ushortTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(      intTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(     uintTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(     longTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    ulongTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    floatTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(   doubleTestCase))
 
     # Execute the test suite
     print("Testing 4D Functions of Module SuperTensor")
diff --git a/tools/swig/test/testTensor.py b/tools/swig/test/testTensor.py
index 164ceb2d5626..536d848e6135 100755
--- a/tools/swig/test/testTensor.py
+++ b/tools/swig/test/testTensor.py
@@ -1,14 +1,15 @@
 #!/usr/bin/env python3
-# System imports
-from   math           import sqrt
 import sys
 import unittest
+from math import sqrt
 
-# Import NumPy
 import numpy as np
-major, minor = [ int(d) for d in np.__version__.split(".")[:2] ]
-if major == 0: BadListError = TypeError
-else:          BadListError = ValueError
+
+major, minor = [int(d) for d in np.__version__.split(".")[:2]]
+if major == 0:
+    BadListError = TypeError
+else:
+    BadListError = ValueError
 
 import Tensor
 
@@ -18,9 +19,9 @@ class TensorTestCase(unittest.TestCase):
 
     def __init__(self, methodName="runTests"):
         unittest.TestCase.__init__(self, methodName)
-        self.typeStr  = "double"
+        self.typeStr = "double"
         self.typeCode = "d"
-        self.result   = sqrt(28.0/8)
+        self.result = sqrt(28.0 / 8)
 
     # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
     def testNorm(self):
@@ -98,7 +99,7 @@ def testMaxWrongDim(self):
         "Test max function with wrong dimensions"
         print(self.typeStr, "... ", end=' ', file=sys.stderr)
         max = Tensor.__dict__[self.typeStr + "Max"]
-        self.assertRaises(TypeError, max, [0, -1, 2, -3])
+        self.assertRaises(TypeError, max, [0, 1, 2, 3])
 
     # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
     def testMin(self):
@@ -270,97 +271,97 @@ def testLUSplit(self):
 class scharTestCase(TensorTestCase):
     def __init__(self, methodName="runTest"):
         TensorTestCase.__init__(self, methodName)
-        self.typeStr  = "schar"
+        self.typeStr = "schar"
         self.typeCode = "b"
-        self.result   = int(self.result)
+        self.result = int(self.result)
 
 ######################################################################
 
 class ucharTestCase(TensorTestCase):
     def __init__(self, methodName="runTest"):
         TensorTestCase.__init__(self, methodName)
-        self.typeStr  = "uchar"
+        self.typeStr = "uchar"
         self.typeCode = "B"
-        self.result   = int(self.result)
+        self.result = int(self.result)
 
 ######################################################################
 
 class shortTestCase(TensorTestCase):
     def __init__(self, methodName="runTest"):
         TensorTestCase.__init__(self, methodName)
-        self.typeStr  = "short"
+        self.typeStr = "short"
         self.typeCode = "h"
-        self.result   = int(self.result)
+        self.result = int(self.result)
 
 ######################################################################
 
 class ushortTestCase(TensorTestCase):
     def __init__(self, methodName="runTest"):
         TensorTestCase.__init__(self, methodName)
-        self.typeStr  = "ushort"
+        self.typeStr = "ushort"
         self.typeCode = "H"
-        self.result   = int(self.result)
+        self.result = int(self.result)
 
 ######################################################################
 
 class intTestCase(TensorTestCase):
     def __init__(self, methodName="runTest"):
         TensorTestCase.__init__(self, methodName)
-        self.typeStr  = "int"
+        self.typeStr = "int"
         self.typeCode = "i"
-        self.result   = int(self.result)
+        self.result = int(self.result)
 
 ######################################################################
 
 class uintTestCase(TensorTestCase):
     def __init__(self, methodName="runTest"):
         TensorTestCase.__init__(self, methodName)
-        self.typeStr  = "uint"
+        self.typeStr = "uint"
         self.typeCode = "I"
-        self.result   = int(self.result)
+        self.result = int(self.result)
 
 ######################################################################
 
 class longTestCase(TensorTestCase):
     def __init__(self, methodName="runTest"):
         TensorTestCase.__init__(self, methodName)
-        self.typeStr  = "long"
+        self.typeStr = "long"
         self.typeCode = "l"
-        self.result   = int(self.result)
+        self.result = int(self.result)
 
 ######################################################################
 
 class ulongTestCase(TensorTestCase):
     def __init__(self, methodName="runTest"):
         TensorTestCase.__init__(self, methodName)
-        self.typeStr  = "ulong"
+        self.typeStr = "ulong"
         self.typeCode = "L"
-        self.result   = int(self.result)
+        self.result = int(self.result)
 
 ######################################################################
 
 class longLongTestCase(TensorTestCase):
     def __init__(self, methodName="runTest"):
         TensorTestCase.__init__(self, methodName)
-        self.typeStr  = "longLong"
+        self.typeStr = "longLong"
         self.typeCode = "q"
-        self.result   = int(self.result)
+        self.result = int(self.result)
 
 ######################################################################
 
 class ulongLongTestCase(TensorTestCase):
     def __init__(self, methodName="runTest"):
         TensorTestCase.__init__(self, methodName)
-        self.typeStr  = "ulongLong"
+        self.typeStr = "ulongLong"
         self.typeCode = "Q"
-        self.result   = int(self.result)
+        self.result = int(self.result)
 
 ######################################################################
 
 class floatTestCase(TensorTestCase):
     def __init__(self, methodName="runTest"):
         TensorTestCase.__init__(self, methodName)
-        self.typeStr  = "float"
+        self.typeStr = "float"
         self.typeCode = "f"
 
 ######################################################################
@@ -368,27 +369,28 @@ def __init__(self, methodName="runTest"):
 class doubleTestCase(TensorTestCase):
     def __init__(self, methodName="runTest"):
         TensorTestCase.__init__(self, methodName)
-        self.typeStr  = "double"
+        self.typeStr = "double"
         self.typeCode = "d"
 
 ######################################################################
 
+
 if __name__ == "__main__":
 
     # Build the test suite
     suite = unittest.TestSuite()
-    suite.addTest(unittest.makeSuite(    scharTestCase))
-    suite.addTest(unittest.makeSuite(    ucharTestCase))
-    suite.addTest(unittest.makeSuite(    shortTestCase))
-    suite.addTest(unittest.makeSuite(   ushortTestCase))
-    suite.addTest(unittest.makeSuite(      intTestCase))
-    suite.addTest(unittest.makeSuite(     uintTestCase))
-    suite.addTest(unittest.makeSuite(     longTestCase))
-    suite.addTest(unittest.makeSuite(    ulongTestCase))
-    suite.addTest(unittest.makeSuite( longLongTestCase))
-    suite.addTest(unittest.makeSuite(ulongLongTestCase))
-    suite.addTest(unittest.makeSuite(    floatTestCase))
-    suite.addTest(unittest.makeSuite(   doubleTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    scharTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    ucharTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    shortTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(   ushortTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(      intTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(     uintTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(     longTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    ulongTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    floatTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(   doubleTestCase))
 
     # Execute the test suite
     print("Testing 3D Functions of Module Tensor")
diff --git a/tools/swig/test/testVector.py b/tools/swig/test/testVector.py
index 1a663d1db83b..15ad96da4503 100755
--- a/tools/swig/test/testVector.py
+++ b/tools/swig/test/testVector.py
@@ -1,13 +1,14 @@
 #!/usr/bin/env python3
-# System imports
 import sys
 import unittest
 
-# Import NumPy
 import numpy as np
-major, minor = [ int(d) for d in np.__version__.split(".")[:2] ]
-if major == 0: BadListError = TypeError
-else:          BadListError = ValueError
+
+major, minor = [int(d) for d in np.__version__.split(".")[:2]]
+if major == 0:
+    BadListError = TypeError
+else:
+    BadListError = ValueError
 
 import Vector
 
@@ -17,7 +18,7 @@ class VectorTestCase(unittest.TestCase):
 
     def __init__(self, methodName="runTest"):
         unittest.TestCase.__init__(self, methodName)
-        self.typeStr  = "double"
+        self.typeStr = "double"
         self.typeCode = "d"
 
     # Test the (type IN_ARRAY1[ANY]) typemap
@@ -222,7 +223,7 @@ def testEOSplit(self):
         eoSplit = Vector.__dict__[self.typeStr + "EOSplit"]
         even, odd = eoSplit([1, 2, 3])
         self.assertEqual((even == [1, 0, 3]).all(), True)
-        self.assertEqual((odd  == [0, 2, 0]).all(), True)
+        self.assertEqual((odd == [0, 2, 0]).all(), True)
 
     # Test the (type* ARGOUT_ARRAY1, int DIM1) typemap
     def testTwos(self):
@@ -259,7 +260,7 @@ def testThreesNonInt(self):
 class scharTestCase(VectorTestCase):
     def __init__(self, methodName="runTest"):
         VectorTestCase.__init__(self, methodName)
-        self.typeStr  = "schar"
+        self.typeStr = "schar"
         self.typeCode = "b"
 
 ######################################################################
@@ -267,7 +268,7 @@ def __init__(self, methodName="runTest"):
 class ucharTestCase(VectorTestCase):
     def __init__(self, methodName="runTest"):
         VectorTestCase.__init__(self, methodName)
-        self.typeStr  = "uchar"
+        self.typeStr = "uchar"
         self.typeCode = "B"
 
 ######################################################################
@@ -275,7 +276,7 @@ def __init__(self, methodName="runTest"):
 class shortTestCase(VectorTestCase):
     def __init__(self, methodName="runTest"):
         VectorTestCase.__init__(self, methodName)
-        self.typeStr  = "short"
+        self.typeStr = "short"
         self.typeCode = "h"
 
 ######################################################################
@@ -283,7 +284,7 @@ def __init__(self, methodName="runTest"):
 class ushortTestCase(VectorTestCase):
     def __init__(self, methodName="runTest"):
         VectorTestCase.__init__(self, methodName)
-        self.typeStr  = "ushort"
+        self.typeStr = "ushort"
         self.typeCode = "H"
 
 ######################################################################
@@ -291,7 +292,7 @@ def __init__(self, methodName="runTest"):
 class intTestCase(VectorTestCase):
     def __init__(self, methodName="runTest"):
         VectorTestCase.__init__(self, methodName)
-        self.typeStr  = "int"
+        self.typeStr = "int"
         self.typeCode = "i"
 
 ######################################################################
@@ -299,7 +300,7 @@ def __init__(self, methodName="runTest"):
 class uintTestCase(VectorTestCase):
     def __init__(self, methodName="runTest"):
         VectorTestCase.__init__(self, methodName)
-        self.typeStr  = "uint"
+        self.typeStr = "uint"
         self.typeCode = "I"
 
 ######################################################################
@@ -307,7 +308,7 @@ def __init__(self, methodName="runTest"):
 class longTestCase(VectorTestCase):
     def __init__(self, methodName="runTest"):
         VectorTestCase.__init__(self, methodName)
-        self.typeStr  = "long"
+        self.typeStr = "long"
         self.typeCode = "l"
 
 ######################################################################
@@ -315,7 +316,7 @@ def __init__(self, methodName="runTest"):
 class ulongTestCase(VectorTestCase):
     def __init__(self, methodName="runTest"):
         VectorTestCase.__init__(self, methodName)
-        self.typeStr  = "ulong"
+        self.typeStr = "ulong"
         self.typeCode = "L"
 
 ######################################################################
@@ -323,7 +324,7 @@ def __init__(self, methodName="runTest"):
 class longLongTestCase(VectorTestCase):
     def __init__(self, methodName="runTest"):
         VectorTestCase.__init__(self, methodName)
-        self.typeStr  = "longLong"
+        self.typeStr = "longLong"
         self.typeCode = "q"
 
 ######################################################################
@@ -331,7 +332,7 @@ def __init__(self, methodName="runTest"):
 class ulongLongTestCase(VectorTestCase):
     def __init__(self, methodName="runTest"):
         VectorTestCase.__init__(self, methodName)
-        self.typeStr  = "ulongLong"
+        self.typeStr = "ulongLong"
         self.typeCode = "Q"
 
 ######################################################################
@@ -339,7 +340,7 @@ def __init__(self, methodName="runTest"):
 class floatTestCase(VectorTestCase):
     def __init__(self, methodName="runTest"):
         VectorTestCase.__init__(self, methodName)
-        self.typeStr  = "float"
+        self.typeStr = "float"
         self.typeCode = "f"
 
 ######################################################################
@@ -347,27 +348,28 @@ def __init__(self, methodName="runTest"):
 class doubleTestCase(VectorTestCase):
     def __init__(self, methodName="runTest"):
         VectorTestCase.__init__(self, methodName)
-        self.typeStr  = "double"
+        self.typeStr = "double"
         self.typeCode = "d"
 
 ######################################################################
 
+
 if __name__ == "__main__":
 
     # Build the test suite
     suite = unittest.TestSuite()
-    suite.addTest(unittest.makeSuite(    scharTestCase))
-    suite.addTest(unittest.makeSuite(    ucharTestCase))
-    suite.addTest(unittest.makeSuite(    shortTestCase))
-    suite.addTest(unittest.makeSuite(   ushortTestCase))
-    suite.addTest(unittest.makeSuite(      intTestCase))
-    suite.addTest(unittest.makeSuite(     uintTestCase))
-    suite.addTest(unittest.makeSuite(     longTestCase))
-    suite.addTest(unittest.makeSuite(    ulongTestCase))
-    suite.addTest(unittest.makeSuite( longLongTestCase))
-    suite.addTest(unittest.makeSuite(ulongLongTestCase))
-    suite.addTest(unittest.makeSuite(    floatTestCase))
-    suite.addTest(unittest.makeSuite(   doubleTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    scharTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    ucharTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    shortTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(   ushortTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(      intTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(     uintTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(     longTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    ulongTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(    floatTestCase))
+    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(   doubleTestCase))
 
     # Execute the test suite
     print("Testing 1D Functions of Module Vector")
diff --git a/tools/wheels/LICENSE_linux.txt b/tools/wheels/LICENSE_linux.txt
deleted file mode 100644
index a5b5ae5c22e6..000000000000
--- a/tools/wheels/LICENSE_linux.txt
+++ /dev/null
@@ -1,902 +0,0 @@
-
-----
-
-This binary distribution of NumPy also bundles the following software:
-
-
-Name: OpenBLAS
-Files: numpy.libs/libopenblas*.so
-Description: bundled as a dynamically linked library
-Availability: https://github.com/OpenMathLib/OpenBLAS/
-License: BSD-3-Clause
-  Copyright (c) 2011-2014, The OpenBLAS Project
-  All rights reserved.
-
-  Redistribution and use in source and binary forms, with or without
-  modification, are permitted provided that the following conditions are
-  met:
-
-     1. Redistributions of source code must retain the above copyright
-        notice, this list of conditions and the following disclaimer.
-
-     2. Redistributions in binary form must reproduce the above copyright
-        notice, this list of conditions and the following disclaimer in
-        the documentation and/or other materials provided with the
-        distribution.
-     3. Neither the name of the OpenBLAS project nor the names of
-        its contributors may be used to endorse or promote products
-        derived from this software without specific prior written
-        permission.
-
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-  DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-  SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-  OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-Name: LAPACK
-Files: numpy.libs/libopenblas*.so
-Description: bundled in OpenBLAS
-Availability: https://github.com/OpenMathLib/OpenBLAS/
-License: BSD-3-Clause-Attribution
-  Copyright (c) 1992-2013 The University of Tennessee and The University
-                          of Tennessee Research Foundation.  All rights
-                          reserved.
-  Copyright (c) 2000-2013 The University of California Berkeley. All
-                          rights reserved.
-  Copyright (c) 2006-2013 The University of Colorado Denver.  All rights
-                          reserved.
-
-  $COPYRIGHT$
-
-  Additional copyrights may follow
-
-  $HEADER$
-
-  Redistribution and use in source and binary forms, with or without
-  modification, are permitted provided that the following conditions are
-  met:
-
-  - Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-
-  - Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer listed
-    in this license in the documentation and/or other materials
-    provided with the distribution.
-
-  - Neither the name of the copyright holders nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-  The copyright holders provide no reassurances that the source code
-  provided does not infringe any patent, copyright, or any other
-  intellectual property rights of third parties.  The copyright holders
-  disclaim any liability to any recipient for claims brought against
-  recipient by any third party for infringement of that parties
-  intellectual property rights.
-
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-Name: GCC runtime library
-Files: numpy.libs/libgfortran*.so
-Description: dynamically linked to files compiled with gcc
-Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran
-License: GPL-3.0-with-GCC-exception
-  Copyright (C) 2002-2017 Free Software Foundation, Inc.
-
-  Libgfortran is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3, or (at your option)
-  any later version.
-
-  Libgfortran is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  Under Section 7 of GPL version 3, you are granted additional
-  permissions described in the GCC Runtime Library Exception, version
-  3.1, as published by the Free Software Foundation.
-
-  You should have received a copy of the GNU General Public License and
-  a copy of the GCC Runtime Library Exception along with this program;
-  see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
-  .
-
-----
-
-Full text of license texts referred to above follows (that they are
-listed below does not necessarily imply the conditions apply to the
-present binary release):
-
-----
-
-GCC RUNTIME LIBRARY EXCEPTION
-
-Version 3.1, 31 March 2009
-
-Copyright (C) 2009 Free Software Foundation, Inc. 
-
-Everyone is permitted to copy and distribute verbatim copies of this
-license document, but changing it is not allowed.
-
-This GCC Runtime Library Exception ("Exception") is an additional
-permission under section 7 of the GNU General Public License, version
-3 ("GPLv3"). It applies to a given file (the "Runtime Library") that
-bears a notice placed by the copyright holder of the file stating that
-the file is governed by GPLv3 along with this Exception.
-
-When you use GCC to compile a program, GCC may combine portions of
-certain GCC header files and runtime libraries with the compiled
-program. The purpose of this Exception is to allow compilation of
-non-GPL (including proprietary) programs to use, in this way, the
-header files and runtime libraries covered by this Exception.
-
-0. Definitions.
-
-A file is an "Independent Module" if it either requires the Runtime
-Library for execution after a Compilation Process, or makes use of an
-interface provided by the Runtime Library, but is not otherwise based
-on the Runtime Library.
-
-"GCC" means a version of the GNU Compiler Collection, with or without
-modifications, governed by version 3 (or a specified later version) of
-the GNU General Public License (GPL) with the option of using any
-subsequent versions published by the FSF.
-
-"GPL-compatible Software" is software whose conditions of propagation,
-modification and use would permit combination with GCC in accord with
-the license of GCC.
-
-"Target Code" refers to output from any compiler for a real or virtual
-target processor architecture, in executable form or suitable for
-input to an assembler, loader, linker and/or execution
-phase. Notwithstanding that, Target Code does not include data in any
-format that is used as a compiler intermediate representation, or used
-for producing a compiler intermediate representation.
-
-The "Compilation Process" transforms code entirely represented in
-non-intermediate languages designed for human-written code, and/or in
-Java Virtual Machine byte code, into Target Code. Thus, for example,
-use of source code generators and preprocessors need not be considered
-part of the Compilation Process, since the Compilation Process can be
-understood as starting with the output of the generators or
-preprocessors.
-
-A Compilation Process is "Eligible" if it is done using GCC, alone or
-with other GPL-compatible software, or if it is done without using any
-work based on GCC. For example, using non-GPL-compatible Software to
-optimize any GCC intermediate representations would not qualify as an
-Eligible Compilation Process.
-
-1. Grant of Additional Permission.
-
-You have permission to propagate a work of Target Code formed by
-combining the Runtime Library with Independent Modules, even if such
-propagation would otherwise violate the terms of GPLv3, provided that
-all Target Code was generated by Eligible Compilation Processes. You
-may then convey such a combination under terms of your choice,
-consistent with the licensing of the Independent Modules.
-
-2. No Weakening of GCC Copyleft.
-
-The availability of this Exception does not imply any general
-presumption that third-party software is unaffected by the copyleft
-requirements of the license of GCC.
-
-----
-
-                    GNU GENERAL PUBLIC LICENSE
-                       Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. 
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-                            Preamble
-
-  The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
-  The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works.  By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users.  We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors.  You can apply it to
-your programs, too.
-
-  When we speak of free software, we are referring to freedom, not
-price.  Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
-  To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights.  Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
-  For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received.  You must make sure that they, too, receive
-or can get the source code.  And you must show them these terms so they
-know their rights.
-
-  Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
-  For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software.  For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
-  Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so.  This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software.  The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable.  Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products.  If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
-  Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary.  To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
-  The precise terms and conditions for copying, distribution and
-modification follow.
-
-                       TERMS AND CONDITIONS
-
-  0. Definitions.
-
-  "This License" refers to version 3 of the GNU General Public License.
-
-  "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
-  "The Program" refers to any copyrightable work licensed under this
-License.  Each licensee is addressed as "you".  "Licensees" and
-"recipients" may be individuals or organizations.
-
-  To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy.  The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
-  A "covered work" means either the unmodified Program or a work based
-on the Program.
-
-  To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy.  Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
-  To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies.  Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
-  An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License.  If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
-  1. Source Code.
-
-  The "source code" for a work means the preferred form of the work
-for making modifications to it.  "Object code" means any non-source
-form of a work.
-
-  A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
-  The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form.  A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
-  The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities.  However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work.  For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
-  The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
-  The Corresponding Source for a work in source code form is that
-same work.
-
-  2. Basic Permissions.
-
-  All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met.  This License explicitly affirms your unlimited
-permission to run the unmodified Program.  The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work.  This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
-  You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force.  You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright.  Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
-  Conveying under any other circumstances is permitted solely under
-the conditions stated below.  Sublicensing is not allowed; section 10
-makes it unnecessary.
-
-  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
-  No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
-  When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
-  4. Conveying Verbatim Copies.
-
-  You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
-  You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
-  5. Conveying Modified Source Versions.
-
-  You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
-    a) The work must carry prominent notices stating that you modified
-    it, and giving a relevant date.
-
-    b) The work must carry prominent notices stating that it is
-    released under this License and any conditions added under section
-    7.  This requirement modifies the requirement in section 4 to
-    "keep intact all notices".
-
-    c) You must license the entire work, as a whole, under this
-    License to anyone who comes into possession of a copy.  This
-    License will therefore apply, along with any applicable section 7
-    additional terms, to the whole of the work, and all its parts,
-    regardless of how they are packaged.  This License gives no
-    permission to license the work in any other way, but it does not
-    invalidate such permission if you have separately received it.
-
-    d) If the work has interactive user interfaces, each must display
-    Appropriate Legal Notices; however, if the Program has interactive
-    interfaces that do not display Appropriate Legal Notices, your
-    work need not make them do so.
-
-  A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit.  Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
-  6. Conveying Non-Source Forms.
-
-  You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
-    a) Convey the object code in, or embodied in, a physical product
-    (including a physical distribution medium), accompanied by the
-    Corresponding Source fixed on a durable physical medium
-    customarily used for software interchange.
-
-    b) Convey the object code in, or embodied in, a physical product
-    (including a physical distribution medium), accompanied by a
-    written offer, valid for at least three years and valid for as
-    long as you offer spare parts or customer support for that product
-    model, to give anyone who possesses the object code either (1) a
-    copy of the Corresponding Source for all the software in the
-    product that is covered by this License, on a durable physical
-    medium customarily used for software interchange, for a price no
-    more than your reasonable cost of physically performing this
-    conveying of source, or (2) access to copy the
-    Corresponding Source from a network server at no charge.
-
-    c) Convey individual copies of the object code with a copy of the
-    written offer to provide the Corresponding Source.  This
-    alternative is allowed only occasionally and noncommercially, and
-    only if you received the object code with such an offer, in accord
-    with subsection 6b.
-
-    d) Convey the object code by offering access from a designated
-    place (gratis or for a charge), and offer equivalent access to the
-    Corresponding Source in the same way through the same place at no
-    further charge.  You need not require recipients to copy the
-    Corresponding Source along with the object code.  If the place to
-    copy the object code is a network server, the Corresponding Source
-    may be on a different server (operated by you or a third party)
-    that supports equivalent copying facilities, provided you maintain
-    clear directions next to the object code saying where to find the
-    Corresponding Source.  Regardless of what server hosts the
-    Corresponding Source, you remain obligated to ensure that it is
-    available for as long as needed to satisfy these requirements.
-
-    e) Convey the object code using peer-to-peer transmission, provided
-    you inform other peers where the object code and Corresponding
-    Source of the work are being offered to the general public at no
-    charge under subsection 6d.
-
-  A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
-  A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling.  In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage.  For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product.  A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
-  "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source.  The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
-  If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information.  But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
-  The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed.  Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
-  Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
-  7. Additional Terms.
-
-  "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law.  If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
-  When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it.  (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.)  You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
-  Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
-    a) Disclaiming warranty or limiting liability differently from the
-    terms of sections 15 and 16 of this License; or
-
-    b) Requiring preservation of specified reasonable legal notices or
-    author attributions in that material or in the Appropriate Legal
-    Notices displayed by works containing it; or
-
-    c) Prohibiting misrepresentation of the origin of that material, or
-    requiring that modified versions of such material be marked in
-    reasonable ways as different from the original version; or
-
-    d) Limiting the use for publicity purposes of names of licensors or
-    authors of the material; or
-
-    e) Declining to grant rights under trademark law for use of some
-    trade names, trademarks, or service marks; or
-
-    f) Requiring indemnification of licensors and authors of that
-    material by anyone who conveys the material (or modified versions of
-    it) with contractual assumptions of liability to the recipient, for
-    any liability that these contractual assumptions directly impose on
-    those licensors and authors.
-
-  All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10.  If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term.  If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
-  If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
-  Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
-  8. Termination.
-
-  You may not propagate or modify a covered work except as expressly
-provided under this License.  Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
-  However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
-  Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
-  Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License.  If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
-  9. Acceptance Not Required for Having Copies.
-
-  You are not required to accept this License in order to receive or
-run a copy of the Program.  Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance.  However,
-nothing other than this License grants you permission to propagate or
-modify any covered work.  These actions infringe copyright if you do
-not accept this License.  Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
-  10. Automatic Licensing of Downstream Recipients.
-
-  Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License.  You are not responsible
-for enforcing compliance by third parties with this License.
-
-  An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations.  If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
-  You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License.  For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
-  11. Patents.
-
-  A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based.  The
-work thus licensed is called the contributor's "contributor version".
-
-  A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version.  For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
-  Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
-  In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement).  To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
-  If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients.  "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
-  If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
-  A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License.  You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
-  Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
-  12. No Surrender of Others' Freedom.
-
-  If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License.  If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all.  For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
-  13. Use with the GNU Affero General Public License.
-
-  Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work.  The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
-  14. Revised Versions of this License.
-
-  The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time.  Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
-  Each version is given a distinguishing version number.  If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation.  If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
-  If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
-  Later license versions may give you additional or different
-permissions.  However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
-  15. Disclaimer of Warranty.
-
-  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-  16. Limitation of Liability.
-
-  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
-  17. Interpretation of Sections 15 and 16.
-
-  If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
-                     END OF TERMS AND CONDITIONS
-
-            How to Apply These Terms to Your New Programs
-
-  If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
-  To do so, attach the following notices to the program.  It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
-    
-    Copyright (C)   
-
-    This program is free software: you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation, either version 3 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program.  If not, see .
-
-Also add information on how to contact you by electronic and paper mail.
-
-  If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
-      Copyright (C)   
-    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
-    This is free software, and you are welcome to redistribute it
-    under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License.  Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
-  You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-.
-
-  The GNU General Public License does not permit incorporating your program
-into proprietary programs.  If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library.  If this is what you want to do, use the GNU Lesser General
-Public License instead of this License.  But first, please read
-.
-
-Name: libquadmath
-Files: numpy.libs/libquadmath*.so
-Description: dynamically linked to files compiled with gcc
-Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath
-License: LGPL-2.1-or-later
-
-    GCC Quad-Precision Math Library
-    Copyright (C) 2010-2019 Free Software Foundation, Inc.
-    Written by Francois-Xavier Coudert  
-
-    This file is part of the libquadmath library.
-    Libquadmath is free software; you can redistribute it and/or
-    modify it under the terms of the GNU Library General Public
-    License as published by the Free Software Foundation; either
-    version 2.1 of the License, or (at your option) any later version.
-
-    Libquadmath is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-    Lesser General Public License for more details.
-    https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html
diff --git a/tools/wheels/LICENSE_osx.txt b/tools/wheels/LICENSE_osx.txt
deleted file mode 100644
index 1ebd5663d02c..000000000000
--- a/tools/wheels/LICENSE_osx.txt
+++ /dev/null
@@ -1,901 +0,0 @@
-
-----
-
-This binary distribution of NumPy also bundles the following software:
-
-Name: OpenBLAS
-Files: numpy/.dylibs/libopenblas*.so
-Description: bundled as a dynamically linked library
-Availability: https://github.com/OpenMathLib/OpenBLAS/
-License: BSD-3-Clause
-  Copyright (c) 2011-2014, The OpenBLAS Project
-  All rights reserved.
-
-  Redistribution and use in source and binary forms, with or without
-  modification, are permitted provided that the following conditions are
-  met:
-
-     1. Redistributions of source code must retain the above copyright
-        notice, this list of conditions and the following disclaimer.
-
-     2. Redistributions in binary form must reproduce the above copyright
-        notice, this list of conditions and the following disclaimer in
-        the documentation and/or other materials provided with the
-        distribution.
-     3. Neither the name of the OpenBLAS project nor the names of
-        its contributors may be used to endorse or promote products
-        derived from this software without specific prior written
-        permission.
-
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-  DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-  SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-  OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-Name: LAPACK
-Files: numpy/.dylibs/libopenblas*.so
-Description: bundled in OpenBLAS
-Availability: https://github.com/OpenMathLib/OpenBLAS/
-License: BSD-3-Clause-Attribution
-  Copyright (c) 1992-2013 The University of Tennessee and The University
-                          of Tennessee Research Foundation.  All rights
-                          reserved.
-  Copyright (c) 2000-2013 The University of California Berkeley. All
-                          rights reserved.
-  Copyright (c) 2006-2013 The University of Colorado Denver.  All rights
-                          reserved.
-
-  $COPYRIGHT$
-
-  Additional copyrights may follow
-
-  $HEADER$
-
-  Redistribution and use in source and binary forms, with or without
-  modification, are permitted provided that the following conditions are
-  met:
-
-  - Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-
-  - Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer listed
-    in this license in the documentation and/or other materials
-    provided with the distribution.
-
-  - Neither the name of the copyright holders nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-  The copyright holders provide no reassurances that the source code
-  provided does not infringe any patent, copyright, or any other
-  intellectual property rights of third parties.  The copyright holders
-  disclaim any liability to any recipient for claims brought against
-  recipient by any third party for infringement of that parties
-  intellectual property rights.
-
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-Name: GCC runtime library
-Files: numpy/.dylibs/libgfortran*, numpy/.dylibs/libgcc*
-Description: dynamically linked to files compiled with gcc
-Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran
-License: GPL-3.0-with-GCC-exception
-  Copyright (C) 2002-2017 Free Software Foundation, Inc.
-
-  Libgfortran is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3, or (at your option)
-  any later version.
-
-  Libgfortran is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  Under Section 7 of GPL version 3, you are granted additional
-  permissions described in the GCC Runtime Library Exception, version
-  3.1, as published by the Free Software Foundation.
-
-  You should have received a copy of the GNU General Public License and
-  a copy of the GCC Runtime Library Exception along with this program;
-  see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
-  .
-
-----
-
-Full text of license texts referred to above follows (that they are
-listed below does not necessarily imply the conditions apply to the
-present binary release):
-
-----
-
-GCC RUNTIME LIBRARY EXCEPTION
-
-Version 3.1, 31 March 2009
-
-Copyright (C) 2009 Free Software Foundation, Inc. 
-
-Everyone is permitted to copy and distribute verbatim copies of this
-license document, but changing it is not allowed.
-
-This GCC Runtime Library Exception ("Exception") is an additional
-permission under section 7 of the GNU General Public License, version
-3 ("GPLv3"). It applies to a given file (the "Runtime Library") that
-bears a notice placed by the copyright holder of the file stating that
-the file is governed by GPLv3 along with this Exception.
-
-When you use GCC to compile a program, GCC may combine portions of
-certain GCC header files and runtime libraries with the compiled
-program. The purpose of this Exception is to allow compilation of
-non-GPL (including proprietary) programs to use, in this way, the
-header files and runtime libraries covered by this Exception.
-
-0. Definitions.
-
-A file is an "Independent Module" if it either requires the Runtime
-Library for execution after a Compilation Process, or makes use of an
-interface provided by the Runtime Library, but is not otherwise based
-on the Runtime Library.
-
-"GCC" means a version of the GNU Compiler Collection, with or without
-modifications, governed by version 3 (or a specified later version) of
-the GNU General Public License (GPL) with the option of using any
-subsequent versions published by the FSF.
-
-"GPL-compatible Software" is software whose conditions of propagation,
-modification and use would permit combination with GCC in accord with
-the license of GCC.
-
-"Target Code" refers to output from any compiler for a real or virtual
-target processor architecture, in executable form or suitable for
-input to an assembler, loader, linker and/or execution
-phase. Notwithstanding that, Target Code does not include data in any
-format that is used as a compiler intermediate representation, or used
-for producing a compiler intermediate representation.
-
-The "Compilation Process" transforms code entirely represented in
-non-intermediate languages designed for human-written code, and/or in
-Java Virtual Machine byte code, into Target Code. Thus, for example,
-use of source code generators and preprocessors need not be considered
-part of the Compilation Process, since the Compilation Process can be
-understood as starting with the output of the generators or
-preprocessors.
-
-A Compilation Process is "Eligible" if it is done using GCC, alone or
-with other GPL-compatible software, or if it is done without using any
-work based on GCC. For example, using non-GPL-compatible Software to
-optimize any GCC intermediate representations would not qualify as an
-Eligible Compilation Process.
-
-1. Grant of Additional Permission.
-
-You have permission to propagate a work of Target Code formed by
-combining the Runtime Library with Independent Modules, even if such
-propagation would otherwise violate the terms of GPLv3, provided that
-all Target Code was generated by Eligible Compilation Processes. You
-may then convey such a combination under terms of your choice,
-consistent with the licensing of the Independent Modules.
-
-2. No Weakening of GCC Copyleft.
-
-The availability of this Exception does not imply any general
-presumption that third-party software is unaffected by the copyleft
-requirements of the license of GCC.
-
-----
-
-                    GNU GENERAL PUBLIC LICENSE
-                       Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. 
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-                            Preamble
-
-  The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
-  The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works.  By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users.  We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors.  You can apply it to
-your programs, too.
-
-  When we speak of free software, we are referring to freedom, not
-price.  Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
-  To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights.  Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
-  For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received.  You must make sure that they, too, receive
-or can get the source code.  And you must show them these terms so they
-know their rights.
-
-  Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
-  For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software.  For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
-  Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so.  This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software.  The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable.  Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products.  If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
-  Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary.  To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
-  The precise terms and conditions for copying, distribution and
-modification follow.
-
-                       TERMS AND CONDITIONS
-
-  0. Definitions.
-
-  "This License" refers to version 3 of the GNU General Public License.
-
-  "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
-  "The Program" refers to any copyrightable work licensed under this
-License.  Each licensee is addressed as "you".  "Licensees" and
-"recipients" may be individuals or organizations.
-
-  To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy.  The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
-  A "covered work" means either the unmodified Program or a work based
-on the Program.
-
-  To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy.  Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
-  To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies.  Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
-  An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License.  If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
-  1. Source Code.
-
-  The "source code" for a work means the preferred form of the work
-for making modifications to it.  "Object code" means any non-source
-form of a work.
-
-  A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
-  The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form.  A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
-  The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities.  However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work.  For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
-  The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
-  The Corresponding Source for a work in source code form is that
-same work.
-
-  2. Basic Permissions.
-
-  All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met.  This License explicitly affirms your unlimited
-permission to run the unmodified Program.  The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work.  This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
-  You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force.  You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright.  Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
-  Conveying under any other circumstances is permitted solely under
-the conditions stated below.  Sublicensing is not allowed; section 10
-makes it unnecessary.
-
-  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
-  No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
-  When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
-  4. Conveying Verbatim Copies.
-
-  You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
-  You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
-  5. Conveying Modified Source Versions.
-
-  You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
-    a) The work must carry prominent notices stating that you modified
-    it, and giving a relevant date.
-
-    b) The work must carry prominent notices stating that it is
-    released under this License and any conditions added under section
-    7.  This requirement modifies the requirement in section 4 to
-    "keep intact all notices".
-
-    c) You must license the entire work, as a whole, under this
-    License to anyone who comes into possession of a copy.  This
-    License will therefore apply, along with any applicable section 7
-    additional terms, to the whole of the work, and all its parts,
-    regardless of how they are packaged.  This License gives no
-    permission to license the work in any other way, but it does not
-    invalidate such permission if you have separately received it.
-
-    d) If the work has interactive user interfaces, each must display
-    Appropriate Legal Notices; however, if the Program has interactive
-    interfaces that do not display Appropriate Legal Notices, your
-    work need not make them do so.
-
-  A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit.  Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
-  6. Conveying Non-Source Forms.
-
-  You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
-    a) Convey the object code in, or embodied in, a physical product
-    (including a physical distribution medium), accompanied by the
-    Corresponding Source fixed on a durable physical medium
-    customarily used for software interchange.
-
-    b) Convey the object code in, or embodied in, a physical product
-    (including a physical distribution medium), accompanied by a
-    written offer, valid for at least three years and valid for as
-    long as you offer spare parts or customer support for that product
-    model, to give anyone who possesses the object code either (1) a
-    copy of the Corresponding Source for all the software in the
-    product that is covered by this License, on a durable physical
-    medium customarily used for software interchange, for a price no
-    more than your reasonable cost of physically performing this
-    conveying of source, or (2) access to copy the
-    Corresponding Source from a network server at no charge.
-
-    c) Convey individual copies of the object code with a copy of the
-    written offer to provide the Corresponding Source.  This
-    alternative is allowed only occasionally and noncommercially, and
-    only if you received the object code with such an offer, in accord
-    with subsection 6b.
-
-    d) Convey the object code by offering access from a designated
-    place (gratis or for a charge), and offer equivalent access to the
-    Corresponding Source in the same way through the same place at no
-    further charge.  You need not require recipients to copy the
-    Corresponding Source along with the object code.  If the place to
-    copy the object code is a network server, the Corresponding Source
-    may be on a different server (operated by you or a third party)
-    that supports equivalent copying facilities, provided you maintain
-    clear directions next to the object code saying where to find the
-    Corresponding Source.  Regardless of what server hosts the
-    Corresponding Source, you remain obligated to ensure that it is
-    available for as long as needed to satisfy these requirements.
-
-    e) Convey the object code using peer-to-peer transmission, provided
-    you inform other peers where the object code and Corresponding
-    Source of the work are being offered to the general public at no
-    charge under subsection 6d.
-
-  A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
-  A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling.  In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage.  For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product.  A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
-  "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source.  The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
-  If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information.  But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
-  The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed.  Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
-  Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
-  7. Additional Terms.
-
-  "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law.  If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
-  When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it.  (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.)  You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
-  Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
-    a) Disclaiming warranty or limiting liability differently from the
-    terms of sections 15 and 16 of this License; or
-
-    b) Requiring preservation of specified reasonable legal notices or
-    author attributions in that material or in the Appropriate Legal
-    Notices displayed by works containing it; or
-
-    c) Prohibiting misrepresentation of the origin of that material, or
-    requiring that modified versions of such material be marked in
-    reasonable ways as different from the original version; or
-
-    d) Limiting the use for publicity purposes of names of licensors or
-    authors of the material; or
-
-    e) Declining to grant rights under trademark law for use of some
-    trade names, trademarks, or service marks; or
-
-    f) Requiring indemnification of licensors and authors of that
-    material by anyone who conveys the material (or modified versions of
-    it) with contractual assumptions of liability to the recipient, for
-    any liability that these contractual assumptions directly impose on
-    those licensors and authors.
-
-  All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10.  If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term.  If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
-  If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
-  Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
-  8. Termination.
-
-  You may not propagate or modify a covered work except as expressly
-provided under this License.  Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
-  However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
-  Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
-  Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License.  If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
-  9. Acceptance Not Required for Having Copies.
-
-  You are not required to accept this License in order to receive or
-run a copy of the Program.  Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance.  However,
-nothing other than this License grants you permission to propagate or
-modify any covered work.  These actions infringe copyright if you do
-not accept this License.  Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
-  10. Automatic Licensing of Downstream Recipients.
-
-  Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License.  You are not responsible
-for enforcing compliance by third parties with this License.
-
-  An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations.  If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
-  You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License.  For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
-  11. Patents.
-
-  A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based.  The
-work thus licensed is called the contributor's "contributor version".
-
-  A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version.  For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
-  Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
-  In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement).  To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
-  If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients.  "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
-  If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
-  A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License.  You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
-  Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
-  12. No Surrender of Others' Freedom.
-
-  If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License.  If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all.  For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
-  13. Use with the GNU Affero General Public License.
-
-  Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work.  The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
-  14. Revised Versions of this License.
-
-  The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time.  Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
-  Each version is given a distinguishing version number.  If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation.  If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
-  If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
-  Later license versions may give you additional or different
-permissions.  However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
-  15. Disclaimer of Warranty.
-
-  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-  16. Limitation of Liability.
-
-  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
-  17. Interpretation of Sections 15 and 16.
-
-  If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
-                     END OF TERMS AND CONDITIONS
-
-            How to Apply These Terms to Your New Programs
-
-  If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
-  To do so, attach the following notices to the program.  It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
-    
-    Copyright (C)   
-
-    This program is free software: you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation, either version 3 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program.  If not, see .
-
-Also add information on how to contact you by electronic and paper mail.
-
-  If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
-      Copyright (C)   
-    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
-    This is free software, and you are welcome to redistribute it
-    under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License.  Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
-  You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-.
-
-  The GNU General Public License does not permit incorporating your program
-into proprietary programs.  If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library.  If this is what you want to do, use the GNU Lesser General
-Public License instead of this License.  But first, please read
-.
-
-Name: libquadmath
-Files: numpy/.dylibs/libquadmath*.so
-Description: dynamically linked to files compiled with gcc
-Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath
-License: LGPL-2.1-or-later
-
-    GCC Quad-Precision Math Library
-    Copyright (C) 2010-2019 Free Software Foundation, Inc.
-    Written by Francois-Xavier Coudert  
-
-    This file is part of the libquadmath library.
-    Libquadmath is free software; you can redistribute it and/or
-    modify it under the terms of the GNU Library General Public
-    License as published by the Free Software Foundation; either
-    version 2.1 of the License, or (at your option) any later version.
-
-    Libquadmath is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-    Lesser General Public License for more details.
-    https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html
diff --git a/tools/wheels/LICENSE_win32.txt b/tools/wheels/LICENSE_win32.txt
deleted file mode 100644
index f8eaaf1cae25..000000000000
--- a/tools/wheels/LICENSE_win32.txt
+++ /dev/null
@@ -1,902 +0,0 @@
-
-----
-
-This binary distribution of NumPy also bundles the following software:
-
-
-Name: OpenBLAS
-Files: numpy.libs\libopenblas*.dll
-Description: bundled as a dynamically linked library
-Availability: https://github.com/OpenMathLib/OpenBLAS/
-License: BSD-3-Clause
-  Copyright (c) 2011-2014, The OpenBLAS Project
-  All rights reserved.
-
-  Redistribution and use in source and binary forms, with or without
-  modification, are permitted provided that the following conditions are
-  met:
-
-     1. Redistributions of source code must retain the above copyright
-        notice, this list of conditions and the following disclaimer.
-
-     2. Redistributions in binary form must reproduce the above copyright
-        notice, this list of conditions and the following disclaimer in
-        the documentation and/or other materials provided with the
-        distribution.
-     3. Neither the name of the OpenBLAS project nor the names of
-        its contributors may be used to endorse or promote products
-        derived from this software without specific prior written
-        permission.
-
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-  DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-  SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-  OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-Name: LAPACK
-Files: numpy.libs\libopenblas*.dll
-Description: bundled in OpenBLAS
-Availability: https://github.com/OpenMathLib/OpenBLAS/
-License: BSD-3-Clause-Attribution
-  Copyright (c) 1992-2013 The University of Tennessee and The University
-                          of Tennessee Research Foundation.  All rights
-                          reserved.
-  Copyright (c) 2000-2013 The University of California Berkeley. All
-                          rights reserved.
-  Copyright (c) 2006-2013 The University of Colorado Denver.  All rights
-                          reserved.
-
-  $COPYRIGHT$
-
-  Additional copyrights may follow
-
-  $HEADER$
-
-  Redistribution and use in source and binary forms, with or without
-  modification, are permitted provided that the following conditions are
-  met:
-
-  - Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-
-  - Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer listed
-    in this license in the documentation and/or other materials
-    provided with the distribution.
-
-  - Neither the name of the copyright holders nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-  The copyright holders provide no reassurances that the source code
-  provided does not infringe any patent, copyright, or any other
-  intellectual property rights of third parties.  The copyright holders
-  disclaim any liability to any recipient for claims brought against
-  recipient by any third party for infringement of that parties
-  intellectual property rights.
-
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-Name: GCC runtime library
-Files: numpy.libs\libgfortran*.dll
-Description: statically linked to files compiled with gcc
-Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran
-License: GPL-3.0-with-GCC-exception
-  Copyright (C) 2002-2017 Free Software Foundation, Inc.
-
-  Libgfortran is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3, or (at your option)
-  any later version.
-
-  Libgfortran is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  Under Section 7 of GPL version 3, you are granted additional
-  permissions described in the GCC Runtime Library Exception, version
-  3.1, as published by the Free Software Foundation.
-
-  You should have received a copy of the GNU General Public License and
-  a copy of the GCC Runtime Library Exception along with this program;
-  see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
-  .
-
-----
-
-Full text of license texts referred to above follows (that they are
-listed below does not necessarily imply the conditions apply to the
-present binary release):
-
-----
-
-GCC RUNTIME LIBRARY EXCEPTION
-
-Version 3.1, 31 March 2009
-
-Copyright (C) 2009 Free Software Foundation, Inc. 
-
-Everyone is permitted to copy and distribute verbatim copies of this
-license document, but changing it is not allowed.
-
-This GCC Runtime Library Exception ("Exception") is an additional
-permission under section 7 of the GNU General Public License, version
-3 ("GPLv3"). It applies to a given file (the "Runtime Library") that
-bears a notice placed by the copyright holder of the file stating that
-the file is governed by GPLv3 along with this Exception.
-
-When you use GCC to compile a program, GCC may combine portions of
-certain GCC header files and runtime libraries with the compiled
-program. The purpose of this Exception is to allow compilation of
-non-GPL (including proprietary) programs to use, in this way, the
-header files and runtime libraries covered by this Exception.
-
-0. Definitions.
-
-A file is an "Independent Module" if it either requires the Runtime
-Library for execution after a Compilation Process, or makes use of an
-interface provided by the Runtime Library, but is not otherwise based
-on the Runtime Library.
-
-"GCC" means a version of the GNU Compiler Collection, with or without
-modifications, governed by version 3 (or a specified later version) of
-the GNU General Public License (GPL) with the option of using any
-subsequent versions published by the FSF.
-
-"GPL-compatible Software" is software whose conditions of propagation,
-modification and use would permit combination with GCC in accord with
-the license of GCC.
-
-"Target Code" refers to output from any compiler for a real or virtual
-target processor architecture, in executable form or suitable for
-input to an assembler, loader, linker and/or execution
-phase. Notwithstanding that, Target Code does not include data in any
-format that is used as a compiler intermediate representation, or used
-for producing a compiler intermediate representation.
-
-The "Compilation Process" transforms code entirely represented in
-non-intermediate languages designed for human-written code, and/or in
-Java Virtual Machine byte code, into Target Code. Thus, for example,
-use of source code generators and preprocessors need not be considered
-part of the Compilation Process, since the Compilation Process can be
-understood as starting with the output of the generators or
-preprocessors.
-
-A Compilation Process is "Eligible" if it is done using GCC, alone or
-with other GPL-compatible software, or if it is done without using any
-work based on GCC. For example, using non-GPL-compatible Software to
-optimize any GCC intermediate representations would not qualify as an
-Eligible Compilation Process.
-
-1. Grant of Additional Permission.
-
-You have permission to propagate a work of Target Code formed by
-combining the Runtime Library with Independent Modules, even if such
-propagation would otherwise violate the terms of GPLv3, provided that
-all Target Code was generated by Eligible Compilation Processes. You
-may then convey such a combination under terms of your choice,
-consistent with the licensing of the Independent Modules.
-
-2. No Weakening of GCC Copyleft.
-
-The availability of this Exception does not imply any general
-presumption that third-party software is unaffected by the copyleft
-requirements of the license of GCC.
-
-----
-
-                    GNU GENERAL PUBLIC LICENSE
-                       Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. 
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-                            Preamble
-
-  The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
-  The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works.  By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users.  We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors.  You can apply it to
-your programs, too.
-
-  When we speak of free software, we are referring to freedom, not
-price.  Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
-  To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights.  Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
-  For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received.  You must make sure that they, too, receive
-or can get the source code.  And you must show them these terms so they
-know their rights.
-
-  Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
-  For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software.  For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
-  Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so.  This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software.  The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable.  Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products.  If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
-  Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary.  To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
-  The precise terms and conditions for copying, distribution and
-modification follow.
-
-                       TERMS AND CONDITIONS
-
-  0. Definitions.
-
-  "This License" refers to version 3 of the GNU General Public License.
-
-  "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
-  "The Program" refers to any copyrightable work licensed under this
-License.  Each licensee is addressed as "you".  "Licensees" and
-"recipients" may be individuals or organizations.
-
-  To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy.  The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
-  A "covered work" means either the unmodified Program or a work based
-on the Program.
-
-  To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy.  Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
-  To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies.  Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
-  An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License.  If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
-  1. Source Code.
-
-  The "source code" for a work means the preferred form of the work
-for making modifications to it.  "Object code" means any non-source
-form of a work.
-
-  A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
-  The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form.  A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
-  The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities.  However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work.  For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
-  The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
-  The Corresponding Source for a work in source code form is that
-same work.
-
-  2. Basic Permissions.
-
-  All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met.  This License explicitly affirms your unlimited
-permission to run the unmodified Program.  The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work.  This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
-  You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force.  You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright.  Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
-  Conveying under any other circumstances is permitted solely under
-the conditions stated below.  Sublicensing is not allowed; section 10
-makes it unnecessary.
-
-  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
-  No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
-  When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
-  4. Conveying Verbatim Copies.
-
-  You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
-  You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
-  5. Conveying Modified Source Versions.
-
-  You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
-    a) The work must carry prominent notices stating that you modified
-    it, and giving a relevant date.
-
-    b) The work must carry prominent notices stating that it is
-    released under this License and any conditions added under section
-    7.  This requirement modifies the requirement in section 4 to
-    "keep intact all notices".
-
-    c) You must license the entire work, as a whole, under this
-    License to anyone who comes into possession of a copy.  This
-    License will therefore apply, along with any applicable section 7
-    additional terms, to the whole of the work, and all its parts,
-    regardless of how they are packaged.  This License gives no
-    permission to license the work in any other way, but it does not
-    invalidate such permission if you have separately received it.
-
-    d) If the work has interactive user interfaces, each must display
-    Appropriate Legal Notices; however, if the Program has interactive
-    interfaces that do not display Appropriate Legal Notices, your
-    work need not make them do so.
-
-  A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit.  Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
-  6. Conveying Non-Source Forms.
-
-  You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
-    a) Convey the object code in, or embodied in, a physical product
-    (including a physical distribution medium), accompanied by the
-    Corresponding Source fixed on a durable physical medium
-    customarily used for software interchange.
-
-    b) Convey the object code in, or embodied in, a physical product
-    (including a physical distribution medium), accompanied by a
-    written offer, valid for at least three years and valid for as
-    long as you offer spare parts or customer support for that product
-    model, to give anyone who possesses the object code either (1) a
-    copy of the Corresponding Source for all the software in the
-    product that is covered by this License, on a durable physical
-    medium customarily used for software interchange, for a price no
-    more than your reasonable cost of physically performing this
-    conveying of source, or (2) access to copy the
-    Corresponding Source from a network server at no charge.
-
-    c) Convey individual copies of the object code with a copy of the
-    written offer to provide the Corresponding Source.  This
-    alternative is allowed only occasionally and noncommercially, and
-    only if you received the object code with such an offer, in accord
-    with subsection 6b.
-
-    d) Convey the object code by offering access from a designated
-    place (gratis or for a charge), and offer equivalent access to the
-    Corresponding Source in the same way through the same place at no
-    further charge.  You need not require recipients to copy the
-    Corresponding Source along with the object code.  If the place to
-    copy the object code is a network server, the Corresponding Source
-    may be on a different server (operated by you or a third party)
-    that supports equivalent copying facilities, provided you maintain
-    clear directions next to the object code saying where to find the
-    Corresponding Source.  Regardless of what server hosts the
-    Corresponding Source, you remain obligated to ensure that it is
-    available for as long as needed to satisfy these requirements.
-
-    e) Convey the object code using peer-to-peer transmission, provided
-    you inform other peers where the object code and Corresponding
-    Source of the work are being offered to the general public at no
-    charge under subsection 6d.
-
-  A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
-  A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling.  In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage.  For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product.  A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
-  "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source.  The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
-  If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information.  But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
-  The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed.  Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
-  Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
-  7. Additional Terms.
-
-  "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law.  If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
-  When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it.  (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.)  You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
-  Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
-    a) Disclaiming warranty or limiting liability differently from the
-    terms of sections 15 and 16 of this License; or
-
-    b) Requiring preservation of specified reasonable legal notices or
-    author attributions in that material or in the Appropriate Legal
-    Notices displayed by works containing it; or
-
-    c) Prohibiting misrepresentation of the origin of that material, or
-    requiring that modified versions of such material be marked in
-    reasonable ways as different from the original version; or
-
-    d) Limiting the use for publicity purposes of names of licensors or
-    authors of the material; or
-
-    e) Declining to grant rights under trademark law for use of some
-    trade names, trademarks, or service marks; or
-
-    f) Requiring indemnification of licensors and authors of that
-    material by anyone who conveys the material (or modified versions of
-    it) with contractual assumptions of liability to the recipient, for
-    any liability that these contractual assumptions directly impose on
-    those licensors and authors.
-
-  All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10.  If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term.  If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
-  If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
-  Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
-  8. Termination.
-
-  You may not propagate or modify a covered work except as expressly
-provided under this License.  Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
-  However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
-  Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
-  Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License.  If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
-  9. Acceptance Not Required for Having Copies.
-
-  You are not required to accept this License in order to receive or
-run a copy of the Program.  Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance.  However,
-nothing other than this License grants you permission to propagate or
-modify any covered work.  These actions infringe copyright if you do
-not accept this License.  Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
-  10. Automatic Licensing of Downstream Recipients.
-
-  Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License.  You are not responsible
-for enforcing compliance by third parties with this License.
-
-  An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations.  If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
-  You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License.  For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
-  11. Patents.
-
-  A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based.  The
-work thus licensed is called the contributor's "contributor version".
-
-  A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version.  For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
-  Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
-  In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement).  To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
-  If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients.  "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
-  If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
-  A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License.  You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
-  Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
-  12. No Surrender of Others' Freedom.
-
-  If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License.  If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all.  For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
-  13. Use with the GNU Affero General Public License.
-
-  Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work.  The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
-  14. Revised Versions of this License.
-
-  The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time.  Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
-  Each version is given a distinguishing version number.  If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation.  If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
-  If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
-  Later license versions may give you additional or different
-permissions.  However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
-  15. Disclaimer of Warranty.
-
-  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-  16. Limitation of Liability.
-
-  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
-  17. Interpretation of Sections 15 and 16.
-
-  If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
-                     END OF TERMS AND CONDITIONS
-
-            How to Apply These Terms to Your New Programs
-
-  If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
-  To do so, attach the following notices to the program.  It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
-    
-    Copyright (C)   
-
-    This program is free software: you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation, either version 3 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program.  If not, see .
-
-Also add information on how to contact you by electronic and paper mail.
-
-  If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
-      Copyright (C)   
-    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
-    This is free software, and you are welcome to redistribute it
-    under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License.  Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
-  You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-.
-
-  The GNU General Public License does not permit incorporating your program
-into proprietary programs.  If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library.  If this is what you want to do, use the GNU Lesser General
-Public License instead of this License.  But first, please read
-.
-
-Name: libquadmath
-Files: numpy.libs\libopenb*.dll
-Description: statically linked to files compiled with gcc
-Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath
-License: LGPL-2.1-or-later
-
-    GCC Quad-Precision Math Library
-    Copyright (C) 2010-2019 Free Software Foundation, Inc.
-    Written by Francois-Xavier Coudert  
-
-    This file is part of the libquadmath library.
-    Libquadmath is free software; you can redistribute it and/or
-    modify it under the terms of the GNU Library General Public
-    License as published by the Free Software Foundation; either
-    version 2.1 of the License, or (at your option) any later version.
-
-    Libquadmath is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-    Lesser General Public License for more details.
-    https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html
diff --git a/tools/wheels/check_license.py b/tools/wheels/check_license.py
deleted file mode 100644
index 7d0ef7921a4e..000000000000
--- a/tools/wheels/check_license.py
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/env python
-"""
-check_license.py [MODULE]
-
-Check the presence of a LICENSE.txt in the installed module directory,
-and that it appears to contain text prevalent for a NumPy binary
-distribution.
-
-"""
-import sys
-import re
-import argparse
-import pathlib
-
-
-def check_text(text):
-    ok = "Copyright (c)" in text and re.search(
-        r"This binary distribution of \w+ also bundles the following software",
-        text,
-    )
-    return ok
-
-
-def main():
-    p = argparse.ArgumentParser(usage=__doc__.rstrip())
-    p.add_argument("module", nargs="?", default="numpy")
-    args = p.parse_args()
-
-    # Drop '' from sys.path
-    sys.path.pop(0)
-
-    # Find module path
-    __import__(args.module)
-    mod = sys.modules[args.module]
-
-    # LICENSE.txt is installed in the .dist-info directory, so find it there
-    sitepkgs = pathlib.Path(mod.__file__).parent.parent
-    distinfo_path = [s for s in sitepkgs.glob("numpy-*.dist-info")][0]
-
-    # Check license text
-    license_txt = distinfo_path / "LICENSE.txt"
-    with open(license_txt, encoding="utf-8") as f:
-        text = f.read()
-
-    ok = check_text(text)
-    if not ok:
-        print(
-            "ERROR: License text {} does not contain expected "
-            "text fragments\n".format(license_txt)
-        )
-        print(text)
-        sys.exit(1)
-
-    sys.exit(0)
-
-
-if __name__ == "__main__":
-    main()
diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh
index 2fc5fa144d26..381c329a5372 100644
--- a/tools/wheels/cibw_before_build.sh
+++ b/tools/wheels/cibw_before_build.sh
@@ -2,23 +2,9 @@ set -xe
 
 PROJECT_DIR="${1:-$PWD}"
 
-
 # remove any cruft from a previous run
 rm -rf build
 
-# Update license
-echo "" >> $PROJECT_DIR/LICENSE.txt
-echo "----" >> $PROJECT_DIR/LICENSE.txt
-echo "" >> $PROJECT_DIR/LICENSE.txt
-cat $PROJECT_DIR/LICENSES_bundled.txt >> $PROJECT_DIR/LICENSE.txt
-if [[ $RUNNER_OS == "Linux" ]] ; then
-    cat $PROJECT_DIR/tools/wheels/LICENSE_linux.txt >> $PROJECT_DIR/LICENSE.txt
-elif [[ $RUNNER_OS == "macOS" ]]; then
-    cat $PROJECT_DIR/tools/wheels/LICENSE_osx.txt >> $PROJECT_DIR/LICENSE.txt
-elif [[ $RUNNER_OS == "Windows" ]]; then
-    cat $PROJECT_DIR/tools/wheels/LICENSE_win32.txt >> $PROJECT_DIR/LICENSE.txt
-fi
-
 if [[ $(python -c"import sys; print(sys.maxsize)") < $(python -c"import sys; print(2**33)") ]]; then
     echo "No BLAS used for 32-bit wheels"
     export INSTALL_OPENBLAS=false
@@ -27,36 +13,41 @@ elif [ -z $INSTALL_OPENBLAS ]; then
     export INSTALL_OPENBLAS=true
 fi
 
-# Install Openblas from scipy-openblas64
+# Install OpenBLAS from scipy-openblas32|64
 if [[ "$INSTALL_OPENBLAS" = "true" ]] ; then
-    echo PKG_CONFIG_PATH $PKG_CONFIG_PATH
-    PKG_CONFIG_PATH=$PROJECT_DIR/.openblas
-    rm -rf $PKG_CONFIG_PATH
-    mkdir -p $PKG_CONFIG_PATH
-    python -m pip install -r requirements/ci_requirements.txt
-    python -c "import scipy_openblas64; print(scipy_openblas64.get_pkg_config())" > $PKG_CONFIG_PATH/scipy-openblas.pc
-    # Copy the shared objects to a path under $PKG_CONFIG_PATH, the build
-    # will point $LD_LIBRARY_PATH there and then auditwheel/delocate-wheel will
-    # pull these into the wheel. Use python to avoid windows/posix problems
-    python < $pkgconf_path/scipy-openblas.pc
+
+    # Copy scipy-openblas DLL's to a fixed location so we can point delvewheel
+    # at it in `repair_windows.sh` (needed only on Windows because of the lack
+    # of RPATH support).
+    if [[ $RUNNER_OS == "Windows" ]]; then
+        python <&1) == *"The global interpreter lock (GIL) has been enabled"* ]]; then
+        echo "Error: Importing NumPy re-enables the GIL in the free-threaded build"
+        exit 1
+    fi
 fi
 
 # Run full tests with -n=auto. This makes pytest-xdist distribute tests across
-# the available N CPU cores: 2 by default for Linux instances and 4 for macOS arm64
-python -c "import sys; import numpy; sys.exit(not numpy.test(label='full', extra_argv=['-n=auto']))"
-python $PROJECT_DIR/tools/wheels/check_license.py
+# the available N CPU cores. Also print the durations for the 10 slowest tests
+# to help with debugging slow or hanging tests
+python -c "import sys; import numpy; sys.exit(not numpy.test(label='full', extra_argv=['-n=auto', '--durations=10']))"
diff --git a/tools/wheels/gfortran_utils.sh b/tools/wheels/gfortran_utils.sh
deleted file mode 100644
index 52d5a6573a70..000000000000
--- a/tools/wheels/gfortran_utils.sh
+++ /dev/null
@@ -1,188 +0,0 @@
-# This file is vendored from github.com/MacPython/gfortran-install It is
-# licensed under BSD-2 which is copied as a comment below
-
-# Copyright 2016-2021 Matthew Brett, Isuru Fernando, Matti Picus
-
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-
-# Redistributions of source code must retain the above copyright notice, this
-# list of conditions and the following disclaimer.
-
-# Redistributions in binary form must reproduce the above copyright notice, this
-# list of conditions and the following disclaimer in the documentation and/or
-# other materials provided with the distribution.
-
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# Bash utilities for use with gfortran
-
-ARCHIVE_SDIR="${ARCHIVE_SDIR:-archives}"
-
-GF_UTIL_DIR=$(dirname "${BASH_SOURCE[0]}")
-
-function get_distutils_platform {
-    # Report platform as in form of distutils get_platform.
-    # This is like the platform tag that pip will use.
-    # Modify fat architecture tags on macOS to reflect compiled architecture
-
-    # Deprecate this function once get_distutils_platform_ex is used in all
-    # downstream projects
-    local plat=$1
-    case $plat in
-        i686|x86_64|arm64|universal2|intel|aarch64|s390x|ppc64le) ;;
-        *) echo Did not recognize plat $plat; return 1 ;;
-    esac
-    local uname=${2:-$(uname)}
-    if [ "$uname" != "Darwin" ]; then
-        if [ "$plat" == "intel" ]; then
-            echo plat=intel not allowed for Manylinux
-            return 1
-        fi
-        echo "manylinux1_$plat"
-        return
-    fi
-    # The gfortran downloads build for macos 10.9
-    local target="10_9"
-    echo "macosx_${target}_${plat}"
-}
-
-function get_distutils_platform_ex {
-    # Report platform as in form of distutils get_platform.
-    # This is like the platform tag that pip will use.
-    # Modify fat architecture tags on macOS to reflect compiled architecture
-    # For non-darwin, report manylinux version
-    local plat=$1
-    local mb_ml_ver=${MB_ML_VER:-1}
-    case $plat in
-        i686|x86_64|arm64|universal2|intel|aarch64|s390x|ppc64le) ;;
-        *) echo Did not recognize plat $plat; return 1 ;;
-    esac
-    local uname=${2:-$(uname)}
-    if [ "$uname" != "Darwin" ]; then
-        if [ "$plat" == "intel" ]; then
-            echo plat=intel not allowed for Manylinux
-            return 1
-        fi
-        echo "manylinux${mb_ml_ver}_${plat}"
-        return
-    fi
-    # The gfortran downloads build for macos 10.9
-    local target="10_9"
-    echo "macosx_${target}_${plat}"
-}
-
-function get_macosx_target {
-    # Report MACOSX_DEPLOYMENT_TARGET as given by distutils get_platform.
-    python3 -c "import sysconfig as s; print(s.get_config_vars()['MACOSX_DEPLOYMENT_TARGET'])"
-}
-
-function check_gfortran {
-    # Check that gfortran exists on the path
-    if [ -z "$(which gfortran)" ]; then
-        echo Missing gfortran
-        exit 1
-    fi
-}
-
-function get_gf_lib_for_suf {
-    local suffix=$1
-    local prefix=$2
-    local plat=${3:-$PLAT}
-    local uname=${4:-$(uname)}
-    if [ -z "$prefix" ]; then echo Prefix not defined; exit 1; fi
-    local plat_tag=$(get_distutils_platform_ex $plat $uname)
-    if [ -n "$suffix" ]; then suffix="-$suffix"; fi
-    local fname="$prefix-${plat_tag}${suffix}.tar.gz"
-    local out_fname="${ARCHIVE_SDIR}/$fname"
-    [ -s $out_fname ] || (echo "$out_fname is empty"; exit 24)
-    echo "$out_fname"
-}
-
-if [ "$(uname)" == "Darwin" ]; then
-    mac_target=${MACOSX_DEPLOYMENT_TARGET:-$(get_macosx_target)}
-    export MACOSX_DEPLOYMENT_TARGET=$mac_target
-    # Keep this for now as some builds might depend on this being
-    # available before install_gfortran is called
-    export GFORTRAN_SHA=c469a420d2d003112749dcdcbe3c684eef42127e
-    # Set SDKROOT env variable if not set
-    export SDKROOT=${SDKROOT:-$(xcrun --show-sdk-path)}
-
-    function download_and_unpack_gfortran {
-	local arch=$1
-	local type=$2
-        curl -L -O https://github.com/isuruf/gcc/releases/download/gcc-11.3.0-2/gfortran-darwin-${arch}-${type}.tar.gz
-	case ${arch}-${type} in
-	    arm64-native)
-	        export GFORTRAN_SHA=0d5c118e5966d0fb9e7ddb49321f63cac1397ce8
-		;;
-	    arm64-cross)
-		export GFORTRAN_SHA=527232845abc5af21f21ceacc46fb19c190fe804
-		;;
-	    x86_64-native)
-		export GFORTRAN_SHA=c469a420d2d003112749dcdcbe3c684eef42127e
-		;;
-	    x86_64-cross)
-		export GFORTRAN_SHA=107604e57db97a0ae3e7ca7f5dd722959752f0b3
-		;;
-	esac
-        if [[ "$(shasum gfortran-darwin-${arch}-${type}.tar.gz)" != "${GFORTRAN_SHA}  gfortran-darwin-${arch}-${type}.tar.gz" ]]; then
-            echo "shasum mismatch for gfortran-darwin-${arch}-${type}"
-            exit 1
-        fi
-        sudo mkdir -p /opt/
-        sudo cp "gfortran-darwin-${arch}-${type}.tar.gz" /opt/gfortran-darwin-${arch}-${type}.tar.gz
-        pushd /opt
-            sudo tar -xvf gfortran-darwin-${arch}-${type}.tar.gz
-            sudo rm gfortran-darwin-${arch}-${type}.tar.gz
-        popd
-	if [[ "${type}" == "native" ]]; then
-	    # Link these into /usr/local so that there's no need to add rpath or -L
-	    for f in libgfortran.dylib libgfortran.5.dylib libgcc_s.1.dylib libgcc_s.1.1.dylib libquadmath.dylib libquadmath.0.dylib; do
-                sudo ln -sf /opt/gfortran-darwin-${arch}-${type}/lib/$f /usr/local/lib/$f
-            done
-	    # Add it to PATH
-	    sudo ln -sf /opt/gfortran-darwin-${arch}-${type}/bin/gfortran /usr/local/bin/gfortran
-	fi
-    }
-
-    function install_arm64_cross_gfortran {
-	download_and_unpack_gfortran arm64 cross
-        export FC_ARM64="$(find /opt/gfortran-darwin-arm64-cross/bin -name "*-gfortran")"
-        local libgfortran="$(find /opt/gfortran-darwin-arm64-cross/lib -name libgfortran.dylib)"
-        local libdir=$(dirname $libgfortran)
-
-        export FC_ARM64_LDFLAGS="-L$libdir -Wl,-rpath,$libdir"
-        if [[ "${PLAT:-}" == "arm64" ]]; then
-            export FC=$FC_ARM64
-        fi
-    }
-    function install_gfortran {
-        download_and_unpack_gfortran $(uname -m) native
-        check_gfortran
-    }
-
-    function get_gf_lib {
-        # Get lib with gfortran suffix
-        get_gf_lib_for_suf "gf_${GFORTRAN_SHA:0:7}" $@
-    }
-else
-    function install_gfortran {
-        # No-op - already installed on manylinux image
-        check_gfortran
-    }
-
-    function get_gf_lib {
-        # Get library with no suffix
-        get_gf_lib_for_suf "" $@
-    }
-fi
diff --git a/tools/wheels/repair_windows.sh b/tools/wheels/repair_windows.sh
index 79b3f90f1af6..db9905f99059 100644
--- a/tools/wheels/repair_windows.sh
+++ b/tools/wheels/repair_windows.sh
@@ -3,31 +3,8 @@ set -xe
 WHEEL="$1"
 DEST_DIR="$2"
 
-# create a temporary directory in the destination folder and unpack the wheel
-# into there
 cwd=$PWD
-
-pushd $DEST_DIR
-mkdir -p tmp
-pushd tmp
-wheel unpack $WHEEL
-pushd numpy*
-
-# To avoid DLL hell, the file name of libopenblas that's being vendored with
-# the wheel has to be name-mangled. delvewheel is unable to name-mangle PYD
-# containing extra data at the end of the binary, which frequently occurs when
-# building with mingw.
-# We therefore find each PYD in the directory structure and strip them.
-
-for f in $(find ./numpy* -name '*.pyd'); do strip $f; done
-
-
-# now repack the wheel and overwrite the original
-wheel pack .
-mv -fv *.whl $WHEEL
-
 cd $DEST_DIR
-rm -rf tmp
 
 # the libopenblas.dll is placed into this directory in the cibw_before_build
 # script.
diff --git a/tools/wheels/upload_wheels.sh b/tools/wheels/upload_wheels.sh
deleted file mode 100644
index ccd713c907a2..000000000000
--- a/tools/wheels/upload_wheels.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-set_travis_vars() {
-    # Set env vars
-    echo "TRAVIS_EVENT_TYPE is $TRAVIS_EVENT_TYPE"
-    echo "TRAVIS_TAG is $TRAVIS_TAG"
-    if [[ "$TRAVIS_EVENT_TYPE" == "push" && "$TRAVIS_TAG" == v* ]]; then
-      IS_PUSH="true"
-    else
-      IS_PUSH="false"
-    fi
-    if [[ "$TRAVIS_EVENT_TYPE" == "cron" ]]; then
-      IS_SCHEDULE_DISPATCH="true"
-    else
-      IS_SCHEDULE_DISPATCH="false"
-    fi
-}
-set_upload_vars() {
-    echo "IS_PUSH is $IS_PUSH"
-    echo "IS_SCHEDULE_DISPATCH is $IS_SCHEDULE_DISPATCH"
-    if [[ "$IS_PUSH" == "true" ]]; then
-        echo push and tag event
-        export ANACONDA_ORG="multibuild-wheels-staging"
-        export TOKEN="$NUMPY_STAGING_UPLOAD_TOKEN"
-        export ANACONDA_UPLOAD="true"
-    elif [[ "$IS_SCHEDULE_DISPATCH" == "true" ]]; then
-        echo scheduled or dispatched event
-        export ANACONDA_ORG="scientific-python-nightly-wheels"
-        export TOKEN="$NUMPY_NIGHTLY_UPLOAD_TOKEN"
-        export ANACONDA_UPLOAD="true"
-    else
-        echo non-dispatch event
-        export ANACONDA_UPLOAD="false"
-    fi
-}
-upload_wheels() {
-    echo ${PWD}
-    if [[ ${ANACONDA_UPLOAD} == true ]]; then
-        if [[ -z ${TOKEN} ]]; then
-            echo no token set, not uploading
-        else
-            # sdists are located under dist folder when built through setup.py
-            if compgen -G "./dist/*.gz"; then
-                echo "Found sdist"
-                anaconda -q -t ${TOKEN} upload --force -u ${ANACONDA_ORG} ./dist/*.gz
-            elif compgen -G "./wheelhouse/*.whl"; then
-                echo "Found wheel"
-                anaconda -q -t ${TOKEN} upload --force -u ${ANACONDA_ORG} ./wheelhouse/*.whl
-            else
-                echo "Files do not exist"
-                return 1
-            fi
-            echo "PyPI-style index: https://pypi.anaconda.org/$ANACONDA_ORG/simple"
-        fi
-    fi
-}
diff --git a/tools/write_release.py b/tools/write_release.py
new file mode 100644
index 000000000000..cdb5235f0bd0
--- /dev/null
+++ b/tools/write_release.py
@@ -0,0 +1,68 @@
+"""
+Standalone script for writing release doc::
+
+    python tools/write_release 
+
+Example::
+
+    python tools/write_release.py 1.7.0
+
+Needs to be run from the root of the repository and assumes
+that the output is in `release` and wheels and sdist in
+`release/installers`.
+
+Translation from rst to md markdown requires Pandoc, you
+will need to rely on your distribution to provide that.
+
+"""
+import argparse
+import os
+import subprocess
+from pathlib import Path
+
+# Name of the notes directory
+NOTES_DIR = "doc/source/release"
+# Name of the output directory
+OUTPUT_DIR = "release"
+# Output base name, `.rst` or `.md` will be appended
+OUTPUT_FILE = "README"
+
+def write_release(version):
+    """
+    Copy the -notes.rst file to the OUTPUT_DIR and use
+    pandoc to translate it to markdown. That results in both
+    README.rst and README.md files that can be used for on
+    github for the release.
+
+    Parameters
+    ----------
+    version: str
+       Release version, e.g., '2.3.2', etc.
+
+    Returns
+    -------
+    None.
+
+    """
+    notes = Path(NOTES_DIR) / f"{version}-notes.rst"
+    outdir = Path(OUTPUT_DIR)
+    outdir.mkdir(exist_ok=True)
+    target_md = outdir / f"{OUTPUT_FILE}.md"
+    target_rst = outdir / f"{OUTPUT_FILE}.rst"
+
+    # translate README.rst to md for posting on GitHub
+    os.system(f"cp {notes} {target_rst}")
+    subprocess.run(
+        ["pandoc", "-s", "-o", str(target_md), str(target_rst), "--wrap=preserve"],
+        check=True,
+    )
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "version",
+        help="NumPy version of the release, e.g. 2.3.2, etc.")
+
+    args = parser.parse_args()
+    write_release(args.version)
diff --git a/vendored-meson/meson b/vendored-meson/meson
index 31161eef3fc8..5d5a3d478da1 160000
--- a/vendored-meson/meson
+++ b/vendored-meson/meson
@@ -1 +1 @@
-Subproject commit 31161eef3fc8cf0bf834edc1dd29e490fc6d7713
+Subproject commit 5d5a3d478da115c812be77afa651db2492d52171